Coverage for drivers/LVMSR.py : 47%
Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1#!/usr/bin/python3
2#
3# Copyright (C) Citrix Systems Inc.
4#
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License as published
7# by the Free Software Foundation; version 2.1 only.
8#
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with this program; if not, write to the Free Software Foundation, Inc.,
16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17#
18# LVMSR: VHD and QCOW2 on LVM storage repository
19#
21from sm_typing import Dict, List, override
23import SR
24from SR import deviceCheck
25import VDI
26import SRCommand
27import util
28import lvutil
29import lvmcache
30import scsiutil
31import lock
32import os
33import sys
34import time
35import errno
36import xs_errors
37import cleanup
38import blktap2
39from journaler import Journaler
40from refcounter import RefCounter
41from ipc import IPCFlag
42from constants import NS_PREFIX_LVM, VG_LOCATION, VG_PREFIX, CBT_BLOCK_SIZE
43from cowutil import CowUtil, getCowUtil, getImageStringFromVdiType, getVdiTypeFromImageFormat
44from lvmcowutil import LV_PREFIX, LvmCowUtil
45from lvmanager import LVActivator
46from vditype import VdiType
47import XenAPI # pylint: disable=import-error
48import re
49from srmetadata import ALLOCATION_TAG, NAME_LABEL_TAG, NAME_DESCRIPTION_TAG, \
50 UUID_TAG, IS_A_SNAPSHOT_TAG, SNAPSHOT_OF_TAG, TYPE_TAG, VDI_TYPE_TAG, \
51 READ_ONLY_TAG, MANAGED_TAG, SNAPSHOT_TIME_TAG, METADATA_OF_POOL_TAG, \
52 LVMMetadataHandler, METADATA_OBJECT_TYPE_VDI, \
53 METADATA_OBJECT_TYPE_SR, METADATA_UPDATE_OBJECT_TYPE_TAG
54from metadata import retrieveXMLfromFile, _parseXML
55from xmlrpc.client import DateTime
56import glob
57from constants import CBTLOG_TAG
58from fairlock import Fairlock
59DEV_MAPPER_ROOT = os.path.join('/dev/mapper', VG_PREFIX)
61geneology: Dict[str, List[str]] = {}
62CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_TRIM",
63 "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", "VDI_MIRROR",
64 "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "ATOMIC_PAUSE",
65 "VDI_RESET_ON_BOOT/2", "VDI_UPDATE", "VDI_CONFIG_CBT",
66 "VDI_ACTIVATE", "VDI_DEACTIVATE"]
68CONFIGURATION = [['device', 'local device path (required) (e.g. /dev/sda3)']]
70DRIVER_INFO = {
71 'name': 'Local VHD and QCOW2 on LVM',
72 'description': 'SR plugin which represents disks as VHD and QCOW2 disks on ' + \
73 'Logical Volumes within a locally-attached Volume Group',
74 'vendor': 'XenSource Inc',
75 'copyright': '(C) 2008 XenSource Inc',
76 'driver_version': '1.0',
77 'required_api_version': '1.0',
78 'capabilities': CAPABILITIES,
79 'configuration': CONFIGURATION
80 }
82CREATE_PARAM_TYPES = {
83 "raw": VdiType.RAW,
84 "vhd": VdiType.VHD,
85 "qcow2": VdiType.QCOW2
86}
88OPS_EXCLUSIVE = [
89 "sr_create", "sr_delete", "sr_attach", "sr_detach", "sr_scan",
90 "sr_update", "vdi_create", "vdi_delete", "vdi_resize", "vdi_snapshot",
91 "vdi_clone"]
93# Log if snapshot pauses VM for more than this many seconds
94LONG_SNAPTIME = 60
96class LVMSR(SR.SR):
97 DRIVER_TYPE = 'lvhd'
99 PROVISIONING_TYPES = ["thin", "thick"]
100 PROVISIONING_DEFAULT = "thick"
101 THIN_PLUGIN = "lvhd-thin"
103 PLUGIN_ON_SLAVE = "on-slave"
105 FLAG_USE_VHD = "use_vhd"
106 MDVOLUME_NAME = "MGT"
108 ALLOCATION_QUANTUM = "allocation_quantum"
109 INITIAL_ALLOCATION = "initial_allocation"
111 LOCK_RETRY_INTERVAL = 3
112 LOCK_RETRY_ATTEMPTS = 10
114 TEST_MODE_KEY = "testmode"
115 TEST_MODE_VHD_FAIL_REPARENT_BEGIN = "vhd_fail_reparent_begin"
116 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR = "vhd_fail_reparent_locator"
117 TEST_MODE_VHD_FAIL_REPARENT_END = "vhd_fail_reparent_end"
118 TEST_MODE_VHD_FAIL_RESIZE_BEGIN = "vhd_fail_resize_begin"
119 TEST_MODE_VHD_FAIL_RESIZE_DATA = "vhd_fail_resize_data"
120 TEST_MODE_VHD_FAIL_RESIZE_METADATA = "vhd_fail_resize_metadata"
121 TEST_MODE_VHD_FAIL_RESIZE_END = "vhd_fail_resize_end"
123 ENV_VAR_VHD_TEST = {
124 TEST_MODE_VHD_FAIL_REPARENT_BEGIN:
125 "VHD_UTIL_TEST_FAIL_REPARENT_BEGIN",
126 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR:
127 "VHD_UTIL_TEST_FAIL_REPARENT_LOCATOR",
128 TEST_MODE_VHD_FAIL_REPARENT_END:
129 "VHD_UTIL_TEST_FAIL_REPARENT_END",
130 TEST_MODE_VHD_FAIL_RESIZE_BEGIN:
131 "VHD_UTIL_TEST_FAIL_RESIZE_BEGIN",
132 TEST_MODE_VHD_FAIL_RESIZE_DATA:
133 "VHD_UTIL_TEST_FAIL_RESIZE_DATA_MOVED",
134 TEST_MODE_VHD_FAIL_RESIZE_METADATA:
135 "VHD_UTIL_TEST_FAIL_RESIZE_METADATA_MOVED",
136 TEST_MODE_VHD_FAIL_RESIZE_END:
137 "VHD_UTIL_TEST_FAIL_RESIZE_END"
138 }
139 testMode = ""
141 legacyMode = True
143 @override
144 @staticmethod
145 def handles(type) -> bool:
146 """Returns True if this SR class understands the given dconf string"""
147 # we can pose as LVMSR or EXTSR for compatibility purposes
148 if __name__ == '__main__':
149 name = sys.argv[0]
150 else:
151 name = __name__
152 if name.endswith("LVMSR"):
153 return type == "lvm"
154 elif name.endswith("EXTSR"):
155 return type == "ext"
156 return type == LVMSR.DRIVER_TYPE
158 def __init__(self, srcmd, sr_uuid):
159 SR.SR.__init__(self, srcmd, sr_uuid)
160 self._init_preferred_image_formats()
162 @override
163 def load(self, sr_uuid) -> None:
164 self.ops_exclusive = OPS_EXCLUSIVE
166 self.isMaster = False
167 if 'SRmaster' in self.dconf and self.dconf['SRmaster'] == 'true':
168 self.isMaster = True
170 self.lock = lock.Lock(lock.LOCK_TYPE_SR, self.uuid)
171 self.sr_vditype = SR.DEFAULT_TAP
172 self.uuid = sr_uuid
173 self.vgname = VG_PREFIX + self.uuid
174 self.path = os.path.join(VG_LOCATION, self.vgname)
175 self.mdpath = os.path.join(self.path, self.MDVOLUME_NAME)
176 self.provision = self.PROVISIONING_DEFAULT
178 has_sr_ref = self.srcmd.params.get("sr_ref")
179 if has_sr_ref:
180 self.other_conf = self.session.xenapi.SR.get_other_config(self.sr_ref)
181 else:
182 self.other_conf = None
184 self.lvm_conf = None
185 if self.other_conf:
186 self.lvm_conf = self.other_conf.get('lvm-conf')
188 try:
189 self.lvmCache = lvmcache.LVMCache(self.vgname, self.lvm_conf)
190 except:
191 raise xs_errors.XenError('SRUnavailable', \
192 opterr='Failed to initialise the LVMCache')
193 self.lvActivator = LVActivator(self.uuid, self.lvmCache)
194 self.journaler = Journaler(self.lvmCache)
195 if not has_sr_ref:
196 return # must be a probe call
197 # Test for thick vs thin provisioning conf parameter
198 if 'allocation' in self.dconf: 198 ↛ 199line 198 didn't jump to line 199, because the condition on line 198 was never true
199 if self.dconf['allocation'] in self.PROVISIONING_TYPES:
200 self.provision = self.dconf['allocation']
201 else:
202 raise xs_errors.XenError('InvalidArg', \
203 opterr='Allocation parameter must be one of %s' % self.PROVISIONING_TYPES)
205 if self.other_conf.get(self.TEST_MODE_KEY): 205 ↛ 209line 205 didn't jump to line 209, because the condition on line 205 was never false
206 self.testMode = self.other_conf[self.TEST_MODE_KEY]
207 self._prepareTestMode()
209 self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref)
210 # sm_config flag overrides PBD, if any
211 if self.sm_config.get('allocation') in self.PROVISIONING_TYPES:
212 self.provision = self.sm_config.get('allocation')
214 if self.sm_config.get(self.FLAG_USE_VHD) == "true":
215 self.legacyMode = False
217 if lvutil._checkVG(self.vgname):
218 if self.isMaster and not self.cmd in ["vdi_attach", "vdi_detach", 218 ↛ 221line 218 didn't jump to line 221, because the condition on line 218 was never false
219 "vdi_activate", "vdi_deactivate"]:
220 self._undoAllJournals()
221 if not self.cmd in ["sr_attach", "sr_probe"]:
222 self._checkMetadataVolume()
224 self.mdexists = False
226 # get a VDI -> TYPE map from the storage
227 contains_uuid_regex = \
228 re.compile("^.*[0-9a-f]{8}-(([0-9a-f]{4})-){3}[0-9a-f]{12}.*")
229 self.storageVDIs = {}
231 for key in self.lvmCache.lvs.keys(): 231 ↛ 233line 231 didn't jump to line 233, because the loop on line 231 never started
232 # if the lvname has a uuid in it
233 type = None
234 vdi = None
235 if contains_uuid_regex.search(key) is not None:
236 for vdi_type, prefix in LV_PREFIX.items():
237 if key.startswith(prefix):
238 vdi = key[len(prefix):]
239 self.storageVDIs[vdi] = vdi_type
240 break
242 # check if metadata volume exists
243 try:
244 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME)
245 except:
246 pass
248 @override
249 def cleanup(self) -> None:
250 # we don't need to hold the lock to dec refcounts of activated LVs
251 if not self.lvActivator.deactivateAll(): 251 ↛ 252line 251 didn't jump to line 252, because the condition on line 251 was never true
252 raise util.SMException("failed to deactivate LVs")
254 def updateSRMetadata(self, allocation):
255 try:
256 # Add SR specific SR metadata
257 sr_info = \
258 {ALLOCATION_TAG: allocation,
259 UUID_TAG: self.uuid,
260 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_label(self.sr_ref)),
261 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_description(self.sr_ref))
262 }
264 vdi_info = {}
265 for vdi in self.session.xenapi.SR.get_VDIs(self.sr_ref):
266 vdi_uuid = self.session.xenapi.VDI.get_uuid(vdi)
268 vdi_type = self.session.xenapi.VDI.get_sm_config(vdi).get('vdi_type')
269 if not vdi_type:
270 raise xs_errors.XenError('MetadataError', opterr=f"Missing `vdi_type` for VDI {vdi_uuid}")
272 # Create the VDI entry in the SR metadata
273 vdi_info[vdi_uuid] = \
274 {
275 UUID_TAG: vdi_uuid,
276 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi)),
277 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi)),
278 IS_A_SNAPSHOT_TAG: \
279 int(self.session.xenapi.VDI.get_is_a_snapshot(vdi)),
280 SNAPSHOT_OF_TAG: \
281 self.session.xenapi.VDI.get_snapshot_of(vdi),
282 SNAPSHOT_TIME_TAG: \
283 self.session.xenapi.VDI.get_snapshot_time(vdi),
284 TYPE_TAG: \
285 self.session.xenapi.VDI.get_type(vdi),
286 VDI_TYPE_TAG: \
287 vdi_type,
288 READ_ONLY_TAG: \
289 int(self.session.xenapi.VDI.get_read_only(vdi)),
290 METADATA_OF_POOL_TAG: \
291 self.session.xenapi.VDI.get_metadata_of_pool(vdi),
292 MANAGED_TAG: \
293 int(self.session.xenapi.VDI.get_managed(vdi))
294 }
295 LVMMetadataHandler(self.mdpath).writeMetadata(sr_info, vdi_info)
297 except Exception as e:
298 raise xs_errors.XenError('MetadataError', \
299 opterr='Error upgrading SR Metadata: %s' % str(e))
301 def syncMetadataAndStorage(self):
302 try:
303 # if a VDI is present in the metadata but not in the storage
304 # then delete it from the metadata
305 vdi_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1]
306 for vdi in list(vdi_info.keys()):
307 update_map = {}
308 if not vdi_info[vdi][UUID_TAG] in set(self.storageVDIs.keys()): 308 ↛ 315line 308 didn't jump to line 315, because the condition on line 308 was never false
309 # delete this from metadata
310 LVMMetadataHandler(self.mdpath). \
311 deleteVdiFromMetadata(vdi_info[vdi][UUID_TAG])
312 else:
313 # search for this in the metadata, compare types
314 # self.storageVDIs is a map of vdi_uuid to vdi_type
315 if vdi_info[vdi][VDI_TYPE_TAG] != \
316 self.storageVDIs[vdi_info[vdi][UUID_TAG]]:
317 # storage type takes authority
318 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] \
319 = METADATA_OBJECT_TYPE_VDI
320 update_map[UUID_TAG] = vdi_info[vdi][UUID_TAG]
321 update_map[VDI_TYPE_TAG] = \
322 self.storageVDIs[vdi_info[vdi][UUID_TAG]]
323 LVMMetadataHandler(self.mdpath) \
324 .updateMetadata(update_map)
325 else:
326 # This should never happen
327 pass
329 except Exception as e:
330 raise xs_errors.XenError('MetadataError', \
331 opterr='Error synching SR Metadata and storage: %s' % str(e))
333 def syncMetadataAndXapi(self):
334 try:
335 # get metadata
336 (sr_info, vdi_info) = \
337 LVMMetadataHandler(self.mdpath, False).getMetadata()
339 # First synch SR parameters
340 self.update(self.uuid)
342 # Now update the VDI information in the metadata if required
343 for vdi_offset in vdi_info.keys():
344 try:
345 vdi_ref = \
346 self.session.xenapi.VDI.get_by_uuid( \
347 vdi_info[vdi_offset][UUID_TAG])
348 except:
349 # may be the VDI is not in XAPI yet dont bother
350 continue
352 new_name_label = util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi_ref))
353 new_name_description = util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi_ref))
355 if vdi_info[vdi_offset][NAME_LABEL_TAG] != new_name_label or \
356 vdi_info[vdi_offset][NAME_DESCRIPTION_TAG] != \
357 new_name_description:
358 update_map = {}
359 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \
360 METADATA_OBJECT_TYPE_VDI
361 update_map[UUID_TAG] = vdi_info[vdi_offset][UUID_TAG]
362 update_map[NAME_LABEL_TAG] = new_name_label
363 update_map[NAME_DESCRIPTION_TAG] = new_name_description
364 LVMMetadataHandler(self.mdpath) \
365 .updateMetadata(update_map)
366 except Exception as e:
367 raise xs_errors.XenError('MetadataError', \
368 opterr='Error synching SR Metadata and XAPI: %s' % str(e))
370 def _checkMetadataVolume(self):
371 util.SMlog("Entering _checkMetadataVolume")
372 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME)
373 if self.isMaster: 373 ↛ 389line 373 didn't jump to line 389, because the condition on line 373 was never false
374 if self.mdexists and self.cmd == "sr_attach":
375 try:
376 # activate the management volume
377 # will be deactivated at detach time
378 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME)
379 self._synchSmConfigWithMetaData()
380 util.SMlog("Sync SR metadata and the state on the storage.")
381 self.syncMetadataAndStorage()
382 self.syncMetadataAndXapi()
383 except Exception as e:
384 util.SMlog("Exception in _checkMetadataVolume, " \
385 "Error: %s." % str(e))
386 elif not self.mdexists and not self.legacyMode: 386 ↛ 389line 386 didn't jump to line 389, because the condition on line 386 was never false
387 self._introduceMetaDataVolume()
389 if self.mdexists:
390 self.legacyMode = False
392 def _synchSmConfigWithMetaData(self):
393 util.SMlog("Synching sm-config with metadata volume")
395 try:
396 # get SR info from metadata
397 sr_info = {}
398 map = {}
399 sr_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[0]
401 if sr_info == {}: 401 ↛ 402line 401 didn't jump to line 402, because the condition on line 401 was never true
402 raise Exception("Failed to get SR information from metadata.")
404 if "allocation" in sr_info: 404 ↛ 408line 404 didn't jump to line 408, because the condition on line 404 was never false
405 self.provision = sr_info.get("allocation")
406 map['allocation'] = sr_info.get("allocation")
407 else:
408 raise Exception("Allocation key not found in SR metadata. "
409 "SR info found: %s" % sr_info)
411 except Exception as e:
412 raise xs_errors.XenError(
413 'MetadataError',
414 opterr='Error reading SR params from '
415 'metadata Volume: %s' % str(e))
416 try:
417 map[self.FLAG_USE_VHD] = 'true'
418 self.session.xenapi.SR.set_sm_config(self.sr_ref, map)
419 except:
420 raise xs_errors.XenError(
421 'MetadataError',
422 opterr='Error updating sm_config key')
424 def _introduceMetaDataVolume(self):
425 util.SMlog("Creating Metadata volume")
426 try:
427 config = {}
428 self.lvmCache.create(self.MDVOLUME_NAME, 4 * 1024 * 1024)
430 # activate the management volume, will be deactivated at detach time
431 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME)
433 name_label = util.to_plain_string( \
434 self.session.xenapi.SR.get_name_label(self.sr_ref))
435 name_description = util.to_plain_string( \
436 self.session.xenapi.SR.get_name_description(self.sr_ref))
437 config[self.FLAG_USE_VHD] = "true"
438 config['allocation'] = self.provision
439 self.session.xenapi.SR.set_sm_config(self.sr_ref, config)
441 # Add the SR metadata
442 self.updateSRMetadata(self.provision)
443 except Exception as e:
444 raise xs_errors.XenError('MetadataError', \
445 opterr='Error introducing Metadata Volume: %s' % str(e))
447 def _removeMetadataVolume(self):
448 if self.mdexists:
449 try:
450 self.lvmCache.remove(self.MDVOLUME_NAME)
451 except:
452 raise xs_errors.XenError('MetadataError', \
453 opterr='Failed to delete MGT Volume')
455 def _refresh_size(self):
456 """
457 Refreshs the size of the backing device.
458 Return true if all paths/devices agree on the same size.
459 """
460 if hasattr(self, 'SCSIid'): 460 ↛ 462line 460 didn't jump to line 462, because the condition on line 460 was never true
461 # LVMoHBASR, LVMoISCSISR
462 return scsiutil.refresh_lun_size_by_SCSIid(getattr(self, 'SCSIid'))
463 else:
464 # LVMSR
465 devices = self.dconf['device'].split(',')
466 scsiutil.refreshdev(devices)
467 return True
469 def _expand_size(self):
470 """
471 Expands the size of the SR by growing into additional availiable
472 space, if extra space is availiable on the backing device.
473 Needs to be called after a successful call of _refresh_size.
474 """
475 currentvgsize = lvutil._getVGstats(self.vgname)['physical_size']
476 # We are comparing PV- with VG-sizes that are aligned. Need a threshold
477 resizethreshold = 100 * 1024 * 1024 # 100MB
478 devices = self.dconf['device'].split(',')
479 totaldevicesize = 0
480 for device in devices:
481 totaldevicesize = totaldevicesize + scsiutil.getsize(device)
482 if totaldevicesize >= (currentvgsize + resizethreshold):
483 try:
484 if hasattr(self, 'SCSIid'): 484 ↛ 486line 484 didn't jump to line 486, because the condition on line 484 was never true
485 # LVMoHBASR, LVMoISCSISR might have slaves
486 scsiutil.refresh_lun_size_by_SCSIid_on_slaves(self.session,
487 getattr(self, 'SCSIid'))
488 util.SMlog("LVMSR._expand_size for %s will resize the pv." %
489 self.uuid)
490 for pv in lvutil.get_pv_for_vg(self.vgname):
491 lvutil.resizePV(pv)
492 except:
493 util.logException("LVMSR._expand_size for %s failed to resize"
494 " the PV" % self.uuid)
496 @override
497 @deviceCheck
498 def create(self, uuid, size) -> None:
499 util.SMlog("LVMSR.create for %s" % self.uuid)
500 if not self.isMaster:
501 util.SMlog('sr_create blocked for non-master')
502 raise xs_errors.XenError('LVMMaster')
504 if lvutil._checkVG(self.vgname):
505 raise xs_errors.XenError('SRExists')
507 # Check none of the devices already in use by other PBDs
508 if util.test_hostPBD_devs(self.session, uuid, self.dconf['device']):
509 raise xs_errors.XenError('SRInUse')
511 # Check serial number entry in SR records
512 for dev in self.dconf['device'].split(','):
513 if util.test_scsiserial(self.session, dev):
514 raise xs_errors.XenError('SRInUse')
516 lvutil.createVG(self.dconf['device'], self.vgname)
518 #Update serial number string
519 scsiutil.add_serial_record(self.session, self.sr_ref, \
520 scsiutil.devlist_to_serialstring(self.dconf['device'].split(',')))
522 # since this is an SR.create turn off legacy mode
523 self.session.xenapi.SR.add_to_sm_config(self.sr_ref, \
524 self.FLAG_USE_VHD, 'true')
526 @override
527 def delete(self, uuid) -> None:
528 util.SMlog("LVMSR.delete for %s" % self.uuid)
529 if not self.isMaster:
530 raise xs_errors.XenError('LVMMaster')
531 cleanup.gc_force(self.session, self.uuid)
533 success = True
534 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'):
535 if util.extractSRFromDevMapper(fileName) != self.uuid:
536 continue
538 if util.doesFileHaveOpenHandles(fileName):
539 util.SMlog("LVMSR.delete: The dev mapper entry %s has open " \
540 "handles" % fileName)
541 success = False
542 continue
544 # Now attempt to remove the dev mapper entry
545 if not lvutil.removeDevMapperEntry(fileName, False):
546 success = False
547 continue
549 try:
550 lvname = os.path.basename(fileName.replace('-', '/'). \
551 replace('//', '-'))
552 lpath = os.path.join(self.path, lvname)
553 os.unlink(lpath)
554 except OSError as e:
555 if e.errno != errno.ENOENT:
556 util.SMlog("LVMSR.delete: failed to remove the symlink for " \
557 "file %s. Error: %s" % (fileName, str(e)))
558 success = False
560 if success:
561 try:
562 if util.pathexists(self.path):
563 os.rmdir(self.path)
564 except Exception as e:
565 util.SMlog("LVMSR.delete: failed to remove the symlink " \
566 "directory %s. Error: %s" % (self.path, str(e)))
567 success = False
569 self._removeMetadataVolume()
570 self.lvmCache.refresh()
571 if LvmCowUtil.getVolumeInfo(self.lvmCache):
572 raise xs_errors.XenError('SRNotEmpty')
574 if not success:
575 raise Exception("LVMSR delete failed, please refer to the log " \
576 "for details.")
578 lvutil.removeVG(self.dconf['device'], self.vgname)
579 self._cleanup()
581 @override
582 def attach(self, uuid) -> None:
583 util.SMlog("LVMSR.attach for %s" % self.uuid)
585 self._cleanup(True) # in case of host crashes, if detach wasn't called
587 if not util.match_uuid(self.uuid) or not lvutil._checkVG(self.vgname): 587 ↛ 588line 587 didn't jump to line 588, because the condition on line 587 was never true
588 raise xs_errors.XenError('SRUnavailable', \
589 opterr='no such volume group: %s' % self.vgname)
591 # Refresh the metadata status
592 self._checkMetadataVolume()
594 refreshsizeok = self._refresh_size()
596 if self.isMaster: 596 ↛ 607line 596 didn't jump to line 607, because the condition on line 596 was never false
597 if refreshsizeok: 597 ↛ 601line 597 didn't jump to line 601, because the condition on line 597 was never false
598 self._expand_size()
600 # Update SCSIid string
601 util.SMlog("Calling devlist_to_serial")
602 scsiutil.add_serial_record(
603 self.session, self.sr_ref,
604 scsiutil.devlist_to_serialstring(self.dconf['device'].split(',')))
606 # Test Legacy Mode Flag and update if COW volumes exist
607 if self.isMaster and self.legacyMode: 607 ↛ 608line 607 didn't jump to line 608, because the condition on line 607 was never true
608 vdiInfo = LvmCowUtil.getVDIInfo(self.lvmCache)
609 for uuid, info in vdiInfo.items():
610 if VdiType.isCowImage(info.vdiType):
611 self.legacyMode = False
612 map = self.session.xenapi.SR.get_sm_config(self.sr_ref)
613 self._introduceMetaDataVolume()
614 break
616 # Set the block scheduler
617 for dev in self.dconf['device'].split(','):
618 self.block_setscheduler(dev)
620 @override
621 def detach(self, uuid) -> None:
622 util.SMlog("LVMSR.detach for %s" % self.uuid)
623 cleanup.abort(self.uuid)
625 # Do a best effort cleanup of the dev mapper entries
626 # go through all devmapper entries for this VG
627 success = True
628 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'):
629 if util.extractSRFromDevMapper(fileName) != self.uuid: 629 ↛ 630line 629 didn't jump to line 630, because the condition on line 629 was never true
630 continue
632 with Fairlock('devicemapper'):
633 # check if any file has open handles
634 if util.doesFileHaveOpenHandles(fileName):
635 # if yes, log this and signal failure
636 util.SMlog(
637 f"LVMSR.detach: The dev mapper entry {fileName} has "
638 "open handles")
639 success = False
640 continue
642 # Now attempt to remove the dev mapper entry
643 if not lvutil.removeDevMapperEntry(fileName, False): 643 ↛ 644line 643 didn't jump to line 644, because the condition on line 643 was never true
644 success = False
645 continue
647 # also remove the symlinks from /dev/VG-XenStorage-SRUUID/*
648 try:
649 lvname = os.path.basename(fileName.replace('-', '/'). \
650 replace('//', '-'))
651 lvname = os.path.join(self.path, lvname)
652 util.force_unlink(lvname)
653 except Exception as e:
654 util.SMlog("LVMSR.detach: failed to remove the symlink for " \
655 "file %s. Error: %s" % (fileName, str(e)))
656 success = False
658 # now remove the directory where the symlinks are
659 # this should pass as the directory should be empty by now
660 if success:
661 try:
662 if util.pathexists(self.path): 662 ↛ 663line 662 didn't jump to line 663, because the condition on line 662 was never true
663 os.rmdir(self.path)
664 except Exception as e:
665 util.SMlog("LVMSR.detach: failed to remove the symlink " \
666 "directory %s. Error: %s" % (self.path, str(e)))
667 success = False
669 if not success:
670 raise Exception("SR detach failed, please refer to the log " \
671 "for details.")
673 # Don't delete lock files on the master as it will break the locking
674 # between SM and any GC thread that survives through SR.detach.
675 # However, we should still delete lock files on slaves as it is the
676 # only place to do so.
677 self._cleanup(self.isMaster)
679 @override
680 def forget_vdi(self, uuid) -> None:
681 if not self.legacyMode:
682 LVMMetadataHandler(self.mdpath).deleteVdiFromMetadata(uuid)
683 super(LVMSR, self).forget_vdi(uuid)
685 @override
686 def scan(self, uuid) -> None:
687 activated_lvs = set()
688 try:
689 util.SMlog("LVMSR.scan for %s" % self.uuid)
690 if not self.isMaster: 690 ↛ 691line 690 didn't jump to line 691, because the condition on line 690 was never true
691 util.SMlog('sr_scan blocked for non-master')
692 raise xs_errors.XenError('LVMMaster')
694 if self._refresh_size(): 694 ↛ 696line 694 didn't jump to line 696, because the condition on line 694 was never false
695 self._expand_size()
696 self.lvmCache.refresh()
697 cbt_vdis = self.lvmCache.getTagged(CBTLOG_TAG)
698 self._loadvdis()
699 stats = lvutil._getVGstats(self.vgname)
700 self.physical_size = stats['physical_size']
701 self.physical_utilisation = stats['physical_utilisation']
703 # Now check if there are any VDIs in the metadata, which are not in
704 # XAPI
705 if self.mdexists: 705 ↛ 816line 705 didn't jump to line 816, because the condition on line 705 was never false
706 vdiToSnaps: Dict[str, List[str]] = {}
707 # get VDIs from XAPI
708 vdis = self.session.xenapi.SR.get_VDIs(self.sr_ref)
709 vdi_uuids = set([])
710 for vdi in vdis:
711 vdi_uuids.add(self.session.xenapi.VDI.get_uuid(vdi))
713 info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1]
715 for vdi in list(info.keys()):
716 vdi_uuid = info[vdi][UUID_TAG]
717 if bool(int(info[vdi][IS_A_SNAPSHOT_TAG])): 717 ↛ 718line 717 didn't jump to line 718, because the condition on line 717 was never true
718 if info[vdi][SNAPSHOT_OF_TAG] in vdiToSnaps:
719 vdiToSnaps[info[vdi][SNAPSHOT_OF_TAG]].append(vdi_uuid)
720 else:
721 vdiToSnaps[info[vdi][SNAPSHOT_OF_TAG]] = [vdi_uuid]
723 if vdi_uuid not in vdi_uuids:
724 util.SMlog("Introduce VDI %s as it is present in " \
725 "metadata and not in XAPI." % vdi_uuid)
726 vdi_type = info[vdi][VDI_TYPE_TAG]
727 sm_config = {}
728 sm_config['vdi_type'] = vdi_type
729 lvname = "%s%s" % (LV_PREFIX[sm_config['vdi_type']], vdi_uuid)
730 self.lvActivator.activate(
731 vdi_uuid, lvname, LVActivator.NORMAL)
732 activated_lvs.add(vdi_uuid)
733 lvPath = os.path.join(self.path, lvname)
735 if not VdiType.isCowImage(vdi_type): 735 ↛ 736line 735 didn't jump to line 736, because the condition on line 735 was never true
736 size = self.lvmCache.getSize(LV_PREFIX[vdi_type] + vdi_uuid)
737 utilisation = \
738 util.roundup(lvutil.LVM_SIZE_INCREMENT,
739 int(size))
740 else:
741 cowutil = getCowUtil(vdi_type)
742 lvmcowutil = LvmCowUtil(cowutil)
744 parent = cowutil.getParentNoCheck(lvPath)
746 if parent is not None: 746 ↛ 747line 746 didn't jump to line 747, because the condition on line 746 was never true
747 sm_config['vhd-parent'] = parent[parent.find('-') + 1:]
748 size = cowutil.getSizeVirt(lvPath)
749 if self.provision == "thin": 749 ↛ 750line 749 didn't jump to line 750, because the condition on line 749 was never true
750 utilisation = util.roundup(
751 lvutil.LVM_SIZE_INCREMENT,
752 cowutil.calcOverheadEmpty(max(size, cowutil.getDefaultPreallocationSizeVirt()))
753 )
754 else:
755 utilisation = lvmcowutil.calcVolumeSize(int(size))
757 vdi_ref = self.session.xenapi.VDI.db_introduce(
758 vdi_uuid,
759 info[vdi][NAME_LABEL_TAG],
760 info[vdi][NAME_DESCRIPTION_TAG],
761 self.sr_ref,
762 info[vdi][TYPE_TAG],
763 False,
764 bool(int(info[vdi][READ_ONLY_TAG])),
765 {},
766 vdi_uuid,
767 {},
768 sm_config)
770 self.session.xenapi.VDI.set_managed(vdi_ref,
771 bool(int(info[vdi][MANAGED_TAG])))
772 self.session.xenapi.VDI.set_virtual_size(vdi_ref,
773 str(size))
774 self.session.xenapi.VDI.set_physical_utilisation( \
775 vdi_ref, str(utilisation))
776 self.session.xenapi.VDI.set_is_a_snapshot( \
777 vdi_ref, bool(int(info[vdi][IS_A_SNAPSHOT_TAG])))
778 if bool(int(info[vdi][IS_A_SNAPSHOT_TAG])): 778 ↛ 779line 778 didn't jump to line 779, because the condition on line 778 was never true
779 self.session.xenapi.VDI.set_snapshot_time( \
780 vdi_ref, DateTime(info[vdi][SNAPSHOT_TIME_TAG]))
781 if info[vdi][TYPE_TAG] == 'metadata': 781 ↛ 782line 781 didn't jump to line 782, because the condition on line 781 was never true
782 self.session.xenapi.VDI.set_metadata_of_pool( \
783 vdi_ref, info[vdi][METADATA_OF_POOL_TAG])
785 # Update CBT status of disks either just added
786 # or already in XAPI
787 cbt_logname = "%s.%s" % (vdi_uuid, CBTLOG_TAG)
788 if cbt_logname in cbt_vdis: 788 ↛ 789line 788 didn't jump to line 789, because the condition on line 788 was never true
789 vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid)
790 self.session.xenapi.VDI.set_cbt_enabled(vdi_ref, True)
791 # For existing VDIs, update local state too
792 # Scan in base class SR updates existing VDIs
793 # again based on local states
794 if vdi_uuid in self.vdis:
795 self.vdis[vdi_uuid].cbt_enabled = True
796 cbt_vdis.remove(cbt_logname)
798 # Now set the snapshot statuses correctly in XAPI
799 for srcvdi in vdiToSnaps.keys(): 799 ↛ 800line 799 didn't jump to line 800, because the loop on line 799 never started
800 try:
801 srcref = self.session.xenapi.VDI.get_by_uuid(srcvdi)
802 except:
803 # the source VDI no longer exists, continue
804 continue
806 for snapvdi in vdiToSnaps[srcvdi]:
807 try:
808 # this might fail in cases where its already set
809 snapref = \
810 self.session.xenapi.VDI.get_by_uuid(snapvdi)
811 self.session.xenapi.VDI.set_snapshot_of(snapref, srcref)
812 except Exception as e:
813 util.SMlog("Setting snapshot failed. " \
814 "Error: %s" % str(e))
816 if cbt_vdis: 816 ↛ 827line 816 didn't jump to line 827, because the condition on line 816 was never false
817 # If we have items remaining in this list,
818 # they are cbt_metadata VDI that XAPI doesn't know about
819 # Add them to self.vdis and they'll get added to the DB
820 for cbt_vdi in cbt_vdis: 820 ↛ 821line 820 didn't jump to line 821, because the loop on line 820 never started
821 cbt_uuid = cbt_vdi.split(".")[0]
822 new_vdi = self.vdi(cbt_uuid)
823 new_vdi.ty = "cbt_metadata"
824 new_vdi.cbt_enabled = True
825 self.vdis[cbt_uuid] = new_vdi
827 super(LVMSR, self).scan(uuid)
828 self._kickGC()
830 finally:
831 for vdi in activated_lvs:
832 self.lvActivator.deactivate(
833 vdi, LVActivator.NORMAL, False)
835 @override
836 def update(self, uuid) -> None:
837 if not lvutil._checkVG(self.vgname): 837 ↛ 838line 837 didn't jump to line 838, because the condition on line 837 was never true
838 return
839 self._updateStats(uuid, 0)
841 if self.legacyMode: 841 ↛ 842line 841 didn't jump to line 842, because the condition on line 841 was never true
842 return
844 # synch name_label in metadata with XAPI
845 update_map = {}
846 update_map = {METADATA_UPDATE_OBJECT_TYPE_TAG: \
847 METADATA_OBJECT_TYPE_SR,
848 NAME_LABEL_TAG: util.to_plain_string( \
849 self.session.xenapi.SR.get_name_label(self.sr_ref)),
850 NAME_DESCRIPTION_TAG: util.to_plain_string( \
851 self.session.xenapi.SR.get_name_description(self.sr_ref))
852 }
853 LVMMetadataHandler(self.mdpath).updateMetadata(update_map)
855 def _updateStats(self, uuid, virtAllocDelta):
856 valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref))
857 self.virtual_allocation = valloc + virtAllocDelta
858 util.SMlog("Setting virtual_allocation of SR %s to %d" %
859 (uuid, self.virtual_allocation))
860 stats = lvutil._getVGstats(self.vgname)
861 self.physical_size = stats['physical_size']
862 self.physical_utilisation = stats['physical_utilisation']
863 self._db_update()
865 @override
866 @deviceCheck
867 def probe(self) -> str:
868 return lvutil.srlist_toxml(
869 lvutil.scan_srlist(VG_PREFIX, self.dconf['device']),
870 VG_PREFIX,
871 ('metadata' in self.srcmd.params['sr_sm_config'] and \
872 self.srcmd.params['sr_sm_config']['metadata'] == 'true'))
874 @override
875 def vdi(self, uuid) -> VDI.VDI:
876 return LVMVDI(self, uuid)
878 def _loadvdis(self):
879 self.virtual_allocation = 0
880 self.vdiInfo = LvmCowUtil.getVDIInfo(self.lvmCache)
881 self.allVDIs = {}
883 for uuid, info in self.vdiInfo.items():
884 if uuid.startswith(cleanup.SR.TMP_RENAME_PREFIX): 884 ↛ 885line 884 didn't jump to line 885, because the condition on line 884 was never true
885 continue
886 if info.scanError: 886 ↛ 887line 886 didn't jump to line 887, because the condition on line 886 was never true
887 raise xs_errors.XenError('VDIUnavailable', \
888 opterr='Error scanning VDI %s' % uuid)
889 self.vdis[uuid] = self.allVDIs[uuid] = self.vdi(uuid)
890 if not self.vdis[uuid].hidden: 890 ↛ 883line 890 didn't jump to line 883, because the condition on line 890 was never false
891 self.virtual_allocation += self.vdis[uuid].utilisation
893 for uuid, vdi in self.vdis.items():
894 if vdi.parent: 894 ↛ 895line 894 didn't jump to line 895, because the condition on line 894 was never true
895 if vdi.parent in self.vdis:
896 self.vdis[vdi.parent].read_only = True
897 if vdi.parent in geneology:
898 geneology[vdi.parent].append(uuid)
899 else:
900 geneology[vdi.parent] = [uuid]
902 # Now remove all hidden leaf nodes to avoid introducing records that
903 # will be GC'ed
904 for uuid in list(self.vdis.keys()):
905 if uuid not in geneology and self.vdis[uuid].hidden: 905 ↛ 906line 905 didn't jump to line 906, because the condition on line 905 was never true
906 util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid)
907 del self.vdis[uuid]
909 def _ensureSpaceAvailable(self, amount_needed):
910 space_available = lvutil._getVGstats(self.vgname)['freespace']
911 if (space_available < amount_needed):
912 util.SMlog("Not enough space! free space: %d, need: %d" % \
913 (space_available, amount_needed))
914 raise xs_errors.XenError('SRNoSpace')
916 def _handleInterruptedCloneOps(self):
917 entries = self.journaler.getAll(LVMVDI.JRN_CLONE)
918 for uuid, val in entries.items(): 918 ↛ 919line 918 didn't jump to line 919, because the loop on line 918 never started
919 util.fistpoint.activate("LVHDRT_clone_vdi_before_undo_clone", self.uuid)
920 self._handleInterruptedCloneOp(uuid, val)
921 util.fistpoint.activate("LVHDRT_clone_vdi_after_undo_clone", self.uuid)
922 self.journaler.remove(LVMVDI.JRN_CLONE, uuid)
924 def _handleInterruptedCoalesceLeaf(self):
925 entries = self.journaler.getAll(cleanup.VDI.JRN_LEAF)
926 if len(entries) > 0: 926 ↛ 927line 926 didn't jump to line 927, because the condition on line 926 was never true
927 util.SMlog("*** INTERRUPTED COALESCE-LEAF OP DETECTED ***")
928 cleanup.gc_force(self.session, self.uuid)
929 self.lvmCache.refresh()
931 def _handleInterruptedCloneOp(self, origUuid, jval, forceUndo=False):
932 """Either roll back or finalize the interrupted snapshot/clone
933 operation. Rolling back is unsafe if the leaf images have already been
934 in use and written to. However, it is always safe to roll back while
935 we're still in the context of the failed snapshot operation since the
936 VBD is paused for the duration of the operation"""
937 util.SMlog("*** INTERRUPTED CLONE OP: for %s (%s)" % (origUuid, jval))
938 lvs = LvmCowUtil.getVolumeInfo(self.lvmCache)
939 baseUuid, clonUuid = jval.split("_")
941 # is there a "base copy" VDI?
942 if not lvs.get(baseUuid):
943 # no base copy: make sure the original is there
944 if lvs.get(origUuid):
945 util.SMlog("*** INTERRUPTED CLONE OP: nothing to do")
946 return
947 raise util.SMException("base copy %s not present, " \
948 "but no original %s found" % (baseUuid, origUuid))
950 vdis = LvmCowUtil.getVDIInfo(self.lvmCache)
951 base = vdis[baseUuid]
952 cowutil = getCowUtil(base.vdiType)
954 if forceUndo:
955 util.SMlog("Explicit revert")
956 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid)
957 return
959 if not lvs.get(origUuid) or (clonUuid and not lvs.get(clonUuid)):
960 util.SMlog("One or both leaves missing => revert")
961 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid)
962 return
964 if vdis[origUuid].scanError or (clonUuid and vdis[clonUuid].scanError):
965 util.SMlog("One or both leaves invalid => revert")
966 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid)
967 return
969 orig = vdis[origUuid]
970 self.lvActivator.activate(baseUuid, base.lvName, False)
971 self.lvActivator.activate(origUuid, orig.lvName, False)
972 if orig.parentUuid != baseUuid:
973 parent = vdis[orig.parentUuid]
974 self.lvActivator.activate(parent.uuid, parent.lvName, False)
975 origPath = os.path.join(self.path, orig.lvName)
977 if cowutil.check(origPath) != CowUtil.CheckResult.Success:
978 util.SMlog("Orig image invalid => revert")
979 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid)
980 return
982 if clonUuid:
983 clon = vdis[clonUuid]
984 clonPath = os.path.join(self.path, clon.lvName)
985 self.lvActivator.activate(clonUuid, clon.lvName, False)
986 if cowutil.check(clonPath) != CowUtil.CheckResult.Success:
987 util.SMlog("Clon image invalid => revert")
988 self._undoCloneOp(cowutil, lvs, origUuid, baseUuid, clonUuid)
989 return
991 util.SMlog("Snapshot appears valid, will not roll back")
992 self._completeCloneOp(cowutil, vdis, origUuid, baseUuid, clonUuid)
994 def _undoCloneOp(self, cowutil, lvs, origUuid, baseUuid, clonUuid):
995 base = lvs[baseUuid]
996 basePath = os.path.join(self.path, base.name)
998 # make the parent RW
999 if base.readonly:
1000 self.lvmCache.setReadonly(base.name, False)
1002 ns = NS_PREFIX_LVM + self.uuid
1003 origRefcountBinary = RefCounter.check(origUuid, ns)[1]
1004 origRefcountNormal = 0
1006 # un-hide the parent
1007 if VdiType.isCowImage(base.vdiType):
1008 self.lvActivator.activate(baseUuid, base.name, False)
1009 origRefcountNormal = 1
1010 imageInfo = cowutil.getInfo(basePath, LvmCowUtil.extractUuid, False)
1011 if imageInfo.hidden:
1012 cowutil.setHidden(basePath, False)
1013 elif base.hidden:
1014 self.lvmCache.setHidden(base.name, False)
1016 # remove the child nodes
1017 if clonUuid and lvs.get(clonUuid):
1018 if not VdiType.isCowImage(lvs[clonUuid].vdiType):
1019 raise util.SMException("clone %s not a COW image" % clonUuid)
1020 self.lvmCache.remove(lvs[clonUuid].name)
1021 if self.lvActivator.get(clonUuid, False):
1022 self.lvActivator.remove(clonUuid, False)
1023 if lvs.get(origUuid):
1024 self.lvmCache.remove(lvs[origUuid].name)
1026 # inflate the parent to fully-allocated size
1027 if VdiType.isCowImage(base.vdiType):
1028 lvmcowutil = LvmCowUtil(cowutil)
1029 fullSize = lvmcowutil.calcVolumeSize(imageInfo.sizeVirt)
1030 lvmcowutil.inflate(self.journaler, self.uuid, baseUuid, base.vdiType, fullSize)
1032 # rename back
1033 origLV = LV_PREFIX[base.vdiType] + origUuid
1034 self.lvmCache.rename(base.name, origLV)
1035 RefCounter.reset(baseUuid, ns)
1036 if self.lvActivator.get(baseUuid, False):
1037 self.lvActivator.replace(baseUuid, origUuid, origLV, False)
1038 RefCounter.set(origUuid, origRefcountNormal, origRefcountBinary, ns)
1040 # At this stage, tapdisk and SM vdi will be in paused state. Remove
1041 # flag to facilitate vm deactivate
1042 origVdiRef = self.session.xenapi.VDI.get_by_uuid(origUuid)
1043 self.session.xenapi.VDI.remove_from_sm_config(origVdiRef, 'paused')
1045 # update LVM metadata on slaves
1046 slaves = util.get_slaves_attached_on(self.session, [origUuid])
1047 LvmCowUtil.refreshVolumeOnSlaves(self.session, self.uuid, self.vgname,
1048 origLV, origUuid, slaves)
1050 util.SMlog("*** INTERRUPTED CLONE OP: rollback success")
1052 def _completeCloneOp(self, cowutil, vdis, origUuid, baseUuid, clonUuid):
1053 """Finalize the interrupted snapshot/clone operation. This must not be
1054 called from the live snapshot op context because we attempt to pause/
1055 unpause the VBD here (the VBD is already paused during snapshot, so it
1056 would cause a deadlock)"""
1057 base = vdis[baseUuid]
1058 clon = None
1059 if clonUuid:
1060 clon = vdis[clonUuid]
1062 cleanup.abort(self.uuid)
1064 # make sure the parent is hidden and read-only
1065 if not base.hidden:
1066 if not VdiType.isCowImage(base.vdiType):
1067 self.lvmCache.setHidden(base.lvName)
1068 else:
1069 basePath = os.path.join(self.path, base.lvName)
1070 cowutil.setHidden(basePath)
1071 if not base.lvReadonly:
1072 self.lvmCache.setReadonly(base.lvName, True)
1074 # NB: since this snapshot-preserving call is only invoked outside the
1075 # snapshot op context, we assume the LVM metadata on the involved slave
1076 # has by now been refreshed and do not attempt to do it here
1078 # Update the original record
1079 try:
1080 vdi_ref = self.session.xenapi.VDI.get_by_uuid(origUuid)
1081 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
1082 type = self.session.xenapi.VDI.get_type(vdi_ref)
1083 sm_config["vdi_type"] = vdis[origUuid].vdiType
1084 sm_config['vhd-parent'] = baseUuid
1085 self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config)
1086 except XenAPI.Failure:
1087 util.SMlog("ERROR updating the orig record")
1089 # introduce the new VDI records
1090 if clonUuid:
1091 try:
1092 clon_vdi = VDI.VDI(self, clonUuid)
1093 clon_vdi.read_only = False
1094 clon_vdi.location = clonUuid
1095 clon_vdi.utilisation = clon.sizeLV
1096 clon_vdi.sm_config = {
1097 "vdi_type": clon.vdiType,
1098 "vhd-parent": baseUuid}
1100 if not self.legacyMode:
1101 LVMMetadataHandler(self.mdpath). \
1102 ensureSpaceIsAvailableForVdis(1)
1104 clon_vdi_ref = clon_vdi._db_introduce()
1105 util.SMlog("introduced clon VDI: %s (%s)" % \
1106 (clon_vdi_ref, clonUuid))
1108 vdi_info = {UUID_TAG: clonUuid,
1109 NAME_LABEL_TAG: clon_vdi.label,
1110 NAME_DESCRIPTION_TAG: clon_vdi.description,
1111 IS_A_SNAPSHOT_TAG: 0,
1112 SNAPSHOT_OF_TAG: '',
1113 SNAPSHOT_TIME_TAG: '',
1114 TYPE_TAG: type,
1115 VDI_TYPE_TAG: clon_vdi.sm_config['vdi_type'],
1116 READ_ONLY_TAG: int(clon_vdi.read_only),
1117 MANAGED_TAG: int(clon_vdi.managed),
1118 METADATA_OF_POOL_TAG: ''
1119 }
1121 if not self.legacyMode:
1122 LVMMetadataHandler(self.mdpath).addVdi(vdi_info)
1124 except XenAPI.Failure:
1125 util.SMlog("ERROR introducing the clon record")
1127 try:
1128 base_vdi = VDI.VDI(self, baseUuid) # readonly parent
1129 base_vdi.label = "base copy"
1130 base_vdi.read_only = True
1131 base_vdi.location = baseUuid
1132 base_vdi.size = base.sizeVirt
1133 base_vdi.utilisation = base.sizeLV
1134 base_vdi.managed = False
1135 base_vdi.sm_config = {
1136 "vdi_type": base.vdiType,
1137 "vhd-parent": baseUuid}
1139 if not self.legacyMode:
1140 LVMMetadataHandler(self.mdpath).ensureSpaceIsAvailableForVdis(1)
1142 base_vdi_ref = base_vdi._db_introduce()
1143 util.SMlog("introduced base VDI: %s (%s)" % \
1144 (base_vdi_ref, baseUuid))
1146 vdi_info = {UUID_TAG: baseUuid,
1147 NAME_LABEL_TAG: base_vdi.label,
1148 NAME_DESCRIPTION_TAG: base_vdi.description,
1149 IS_A_SNAPSHOT_TAG: 0,
1150 SNAPSHOT_OF_TAG: '',
1151 SNAPSHOT_TIME_TAG: '',
1152 TYPE_TAG: type,
1153 VDI_TYPE_TAG: base_vdi.sm_config['vdi_type'],
1154 READ_ONLY_TAG: int(base_vdi.read_only),
1155 MANAGED_TAG: int(base_vdi.managed),
1156 METADATA_OF_POOL_TAG: ''
1157 }
1159 if not self.legacyMode:
1160 LVMMetadataHandler(self.mdpath).addVdi(vdi_info)
1161 except XenAPI.Failure:
1162 util.SMlog("ERROR introducing the base record")
1164 util.SMlog("*** INTERRUPTED CLONE OP: complete")
1166 def _undoAllJournals(self):
1167 """Undo all COW image & SM interrupted journaled operations. This call must
1168 be serialized with respect to all operations that create journals"""
1169 # undoing interrupted inflates must be done first, since undoing COW images
1170 # ops might require inflations
1171 self.lock.acquire()
1172 try:
1173 self._undoAllInflateJournals()
1174 self._undoAllCowJournals()
1175 self._handleInterruptedCloneOps()
1176 self._handleInterruptedCoalesceLeaf()
1177 finally:
1178 self.lock.release()
1179 self.cleanup()
1181 def _undoAllInflateJournals(self):
1182 entries = self.journaler.getAll(LvmCowUtil.JOURNAL_INFLATE)
1183 if len(entries) == 0:
1184 return
1185 self._loadvdis()
1186 for uuid, val in entries.items():
1187 vdi = self.vdis.get(uuid)
1188 if vdi: 1188 ↛ 1208line 1188 didn't jump to line 1208, because the condition on line 1188 was never false
1189 util.SMlog("Found inflate journal %s, deflating %s to %s" % \
1190 (uuid, vdi.path, val))
1191 if vdi.readonly: 1191 ↛ 1192line 1191 didn't jump to line 1192, because the condition on line 1191 was never true
1192 self.lvmCache.setReadonly(vdi.lvname, False)
1193 self.lvActivator.activate(uuid, vdi.lvname, False)
1194 currSizeLV = self.lvmCache.getSize(vdi.lvname)
1196 cowutil = getCowUtil(vdi.vdi_type)
1197 lvmcowutil = LvmCowUtil(cowutil)
1199 footer_size = cowutil.getFooterSize()
1200 util.zeroOut(vdi.path, currSizeLV - footer_size, footer_size)
1201 lvmcowutil.deflate(self.lvmCache, vdi.lvname, int(val))
1202 if vdi.readonly: 1202 ↛ 1203line 1202 didn't jump to line 1203, because the condition on line 1202 was never true
1203 self.lvmCache.setReadonly(vdi.lvname, True)
1204 if "true" == self.session.xenapi.SR.get_shared(self.sr_ref): 1204 ↛ 1205line 1204 didn't jump to line 1205, because the condition on line 1204 was never true
1205 LvmCowUtil.refreshVolumeOnAllSlaves(
1206 self.session, self.uuid, self.vgname, vdi.lvname, uuid
1207 )
1208 self.journaler.remove(LvmCowUtil.JOURNAL_INFLATE, uuid)
1209 delattr(self, "vdiInfo")
1210 delattr(self, "allVDIs")
1212 def _undoAllCowJournals(self):
1213 """
1214 Check if there are COW journals in existence and revert them.
1215 """
1216 journals = LvmCowUtil.getAllResizeJournals(self.lvmCache)
1217 if len(journals) == 0: 1217 ↛ 1219line 1217 didn't jump to line 1219, because the condition on line 1217 was never false
1218 return
1219 self._loadvdis()
1221 for uuid, jlvName in journals:
1222 vdi = self.vdis[uuid]
1223 util.SMlog("Found COW journal %s, reverting %s" % (uuid, vdi.path))
1224 cowutil = getCowUtil(vdi.vdi_type)
1225 lvmcowutil = LvmCowUtil(cowutil)
1227 self.lvActivator.activate(uuid, vdi.lvname, False)
1228 self.lvmCache.activateNoRefcount(jlvName)
1229 fullSize = lvmcowutil.calcVolumeSize(vdi.size)
1230 lvmcowutil.inflate(self.journaler, self.uuid, vdi.uuid, vdi.vdi_type, fullSize)
1231 try:
1232 jFile = os.path.join(self.path, jlvName)
1233 cowutil.revert(vdi.path, jFile)
1234 except util.CommandException:
1235 util.logException("COW journal revert")
1236 cowutil.check(vdi.path)
1237 util.SMlog("COW image revert failed but COW image ok: removing journal")
1238 # Attempt to reclaim unused space
1241 imageInfo = cowutil.getInfo(vdi.path, LvmCowUtil.extractUuid, False)
1242 NewSize = lvmcowutil.calcVolumeSize(imageInfo.sizeVirt)
1243 if NewSize < fullSize:
1244 lvmcowutil.deflate(self.lvmCache, vdi.lvname, int(NewSize))
1245 LvmCowUtil.refreshVolumeOnAllSlaves(self.session, self.uuid, self.vgname, vdi.lvname, uuid)
1246 self.lvmCache.remove(jlvName)
1247 delattr(self, "vdiInfo")
1248 delattr(self, "allVDIs")
1250 def call_on_slave(self, args, host_refs, message: str):
1251 master_ref = util.get_this_host_ref(self.session)
1252 for hostRef in host_refs:
1253 if hostRef == master_ref: 1253 ↛ 1254line 1253 didn't jump to line 1254, because the condition on line 1253 was never true
1254 continue
1255 util.SMlog(f"{message} on slave {hostRef}")
1256 rv = self.session.xenapi.host.call_plugin(
1257 hostRef, self.PLUGIN_ON_SLAVE, "multi", args)
1258 util.SMlog("call-plugin returned: %s" % rv)
1259 if not rv: 1259 ↛ 1260line 1259 didn't jump to line 1260, because the condition on line 1259 was never true
1260 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
1262 def _updateSlavesOnClone(self, hostRefs, origOldLV, origLV,
1263 baseUuid, baseLV):
1264 """We need to reactivate the original LV on each slave (note that the
1265 name for the original LV might change), as well as init the refcount
1266 for the base LV"""
1267 args = {"vgName": self.vgname,
1268 "action1": "refresh",
1269 "lvName1": origLV,
1270 "action2": "activate",
1271 "ns2": NS_PREFIX_LVM + self.uuid,
1272 "lvName2": baseLV,
1273 "uuid2": baseUuid}
1275 message = f"Updating {origOldLV}, {origLV}, {baseLV}"
1276 self.call_on_slave(args, hostRefs, message)
1278 def _updateSlavesOnCBTClone(self, hostRefs, cbtlog):
1279 """Reactivate and refresh CBT log file on slaves"""
1280 args = {"vgName": self.vgname,
1281 "action1": "deactivateNoRefcount",
1282 "lvName1": cbtlog,
1283 "action2": "refresh",
1284 "lvName2": cbtlog}
1286 message = f"Updating {cbtlog}"
1287 self.call_on_slave(args, hostRefs, message)
1289 def _updateSlavesOnRemove(self, hostRefs, baseUuid, baseLV):
1290 """Tell the slave we deleted the base image"""
1291 args = {"vgName": self.vgname,
1292 "action1": "cleanupLockAndRefcount",
1293 "uuid1": baseUuid,
1294 "ns1": NS_PREFIX_LVM + self.uuid}
1296 message = f"Cleaning locks for {baseLV}"
1297 self.call_on_slave(args, hostRefs, message)
1299 def _deactivateOnSlave(self, hostRefs, lvname):
1300 """Tell the slave we need to deactivate the base image"""
1301 args = {
1302 "vgName": self.vgname,
1303 "action1": "deactivateNoRefcount",
1304 "lvName1": lvname}
1306 message = f"Deactivating {lvname}"
1307 self.call_on_slave(args, hostRefs, message)
1309 def _cleanup(self, skipLockCleanup=False):
1310 """delete stale refcounter, flag, and lock files"""
1311 RefCounter.resetAll(NS_PREFIX_LVM + self.uuid)
1312 IPCFlag(self.uuid).clearAll()
1313 if not skipLockCleanup: 1313 ↛ 1314line 1313 didn't jump to line 1314, because the condition on line 1313 was never true
1314 lock.Lock.cleanupAll(self.uuid)
1315 lock.Lock.cleanupAll(NS_PREFIX_LVM + self.uuid)
1317 def _prepareTestMode(self):
1318 util.SMlog("Test mode: %s" % self.testMode)
1319 if self.ENV_VAR_VHD_TEST.get(self.testMode): 1319 ↛ 1320line 1319 didn't jump to line 1320, because the condition on line 1319 was never true
1320 os.environ[self.ENV_VAR_VHD_TEST[self.testMode]] = "yes"
1321 util.SMlog("Setting env %s" % self.ENV_VAR_VHD_TEST[self.testMode])
1323 def _kickGC(self):
1324 util.SMlog("Kicking GC")
1325 cleanup.start_gc_service(self.uuid)
1327 def ensureCBTSpace(self, virtual_size=0):
1328 # Ensure we have space for at least one LV
1329 size = max(util.roundup(CBT_BLOCK_SIZE, virtual_size//CBT_BLOCK_SIZE), self.journaler.LV_SIZE)
1330 self._ensureSpaceAvailable(size)
1333class LVMVDI(VDI.VDI):
1335 JRN_CLONE = "clone" # journal entry type for the clone operation
1337 @override
1338 def load(self, vdi_uuid) -> None:
1339 self.lock = self.sr.lock
1340 self.lvActivator = self.sr.lvActivator
1341 self.loaded = False
1342 if self.sr.legacyMode or util.fistpoint.is_active("xenrt_default_vdi_type_legacy"): 1342 ↛ 1344line 1342 didn't jump to line 1344, because the condition on line 1342 was never false
1343 self._setType(VdiType.RAW)
1344 self.uuid = vdi_uuid
1345 self.location = self.uuid
1346 self.exists = True
1348 if hasattr(self.sr, "vdiInfo") and self.sr.vdiInfo.get(self.uuid):
1349 self._initFromVDIInfo(self.sr.vdiInfo[self.uuid])
1350 if self.parent: 1350 ↛ 1351line 1350 didn't jump to line 1351, because the condition on line 1350 was never true
1351 self.sm_config_override['vhd-parent'] = self.parent
1352 else:
1353 self.sm_config_override['vhd-parent'] = None
1354 return
1356 # scan() didn't run: determine the type of the VDI manually
1357 if self._determineType(): 1357 ↛ 1361line 1357 didn't jump to line 1361, because the condition on line 1357 was never false
1358 return
1360 # the VDI must be in the process of being created
1361 self.exists = False
1363 vdi_sm_config = self.sr.srcmd.params.get("vdi_sm_config")
1364 if vdi_sm_config:
1365 image_format = vdi_sm_config.get("image-format") or vdi_sm_config.get("type")
1366 if image_format:
1367 try:
1368 self._setType(CREATE_PARAM_TYPES[image_format])
1369 except:
1370 raise xs_errors.XenError('VDICreate', opterr='bad image format')
1371 if self.sr.legacyMode and self.sr.cmd == 'vdi_create' and VdiType.isCowImage(self.vdi_type):
1372 raise xs_errors.XenError('VDICreate', opterr='Cannot create COW type disk in legacy mode')
1374 if not self.vdi_type:
1375 size = int(self.sr.srcmd.params['args'][0])
1376 # In the case of vdi_create, the first parameter is size.
1377 # We need it to validate the vdi_type choice
1378 for image_format in self.sr.preferred_image_formats:
1379 vdi_type = getVdiTypeFromImageFormat(image_format)
1380 self._setType(vdi_type)
1381 try:
1382 self.cowutil.validateAndRoundImageSize(size)
1383 break
1384 except xs_errors.SROSError:
1385 util.SMlog(f"We won't be able to create the VDI with format {vdi_type}.")
1387 self.lvname = "%s%s" % (LV_PREFIX[self.vdi_type], vdi_uuid)
1388 self.path = os.path.join(self.sr.path, self.lvname)
1390 @override
1391 def create(self, sr_uuid, vdi_uuid, size) -> str:
1392 util.SMlog("LVMVDI.create for %s" % self.uuid)
1393 if not self.sr.isMaster:
1394 raise xs_errors.XenError('LVMMaster')
1395 if self.exists:
1396 raise xs_errors.XenError('VDIExists')
1398 size = self.cowutil.validateAndRoundImageSize(int(size))
1400 util.SMlog("LVMVDI.create: type = %s, %s (size=%s)" % \
1401 (self.vdi_type, self.path, size))
1402 lvSize = 0
1403 self.sm_config = self.sr.srcmd.params["vdi_sm_config"]
1404 if not VdiType.isCowImage(self.vdi_type):
1405 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, int(size))
1406 else:
1407 if self.sr.provision == "thin":
1408 lvSize = util.roundup(
1409 lvutil.LVM_SIZE_INCREMENT,
1410 self.cowutil.calcOverheadEmpty(max(size, self.cowutil.getDefaultPreallocationSizeVirt()))
1411 )
1412 elif self.sr.provision == "thick":
1413 lvSize = self.lvmcowutil.calcVolumeSize(int(size))
1415 self.sr._ensureSpaceAvailable(lvSize)
1417 try:
1418 self.sr.lvmCache.create(self.lvname, lvSize)
1419 if not VdiType.isCowImage(self.vdi_type):
1420 self.size = self.sr.lvmCache.getSize(self.lvname)
1421 else:
1422 self.cowutil.create(
1423 self.path, int(size), False, self.cowutil.getDefaultPreallocationSizeVirt()
1424 )
1425 self.size = self.cowutil.getSizeVirt(self.path)
1426 self.sr.lvmCache.deactivateNoRefcount(self.lvname)
1427 except util.CommandException as e:
1428 util.SMlog("Unable to create VDI")
1429 self.sr.lvmCache.remove(self.lvname)
1430 raise xs_errors.XenError('VDICreate', opterr='error %d' % e.code)
1432 self.utilisation = lvSize
1433 self.sm_config["vdi_type"] = self.vdi_type
1434 self.sm_config["image-format"] = getImageStringFromVdiType(self.vdi_type)
1436 if not self.sr.legacyMode:
1437 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
1439 self.ref = self._db_introduce()
1440 self.sr._updateStats(self.sr.uuid, self.size)
1442 vdi_info = {UUID_TAG: self.uuid,
1443 NAME_LABEL_TAG: util.to_plain_string(self.label),
1444 NAME_DESCRIPTION_TAG: util.to_plain_string(self.description),
1445 IS_A_SNAPSHOT_TAG: 0,
1446 SNAPSHOT_OF_TAG: '',
1447 SNAPSHOT_TIME_TAG: '',
1448 TYPE_TAG: self.ty,
1449 VDI_TYPE_TAG: self.vdi_type,
1450 READ_ONLY_TAG: int(self.read_only),
1451 MANAGED_TAG: int(self.managed),
1452 METADATA_OF_POOL_TAG: ''
1453 }
1455 if not self.sr.legacyMode:
1456 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
1458 return VDI.VDI.get_params(self)
1460 @override
1461 def delete(self, sr_uuid, vdi_uuid, data_only=False) -> None:
1462 util.SMlog("LVMVDI.delete for %s" % self.uuid)
1463 try:
1464 self._loadThis()
1465 except xs_errors.SRException as e:
1466 # Catch 'VDI doesn't exist' exception
1467 if e.errno == 46:
1468 return super(LVMVDI, self).delete(sr_uuid, vdi_uuid, data_only)
1469 raise
1471 vdi_ref = self.sr.srcmd.params['vdi_ref']
1472 if not self.session.xenapi.VDI.get_managed(vdi_ref):
1473 raise xs_errors.XenError("VDIDelete", \
1474 opterr="Deleting non-leaf node not permitted")
1476 if not self.hidden:
1477 self._markHidden()
1479 if not data_only:
1480 # Remove from XAPI and delete from MGT
1481 self._db_forget()
1482 else:
1483 # If this is a data_destroy call, don't remove from XAPI db
1484 # Only delete from MGT
1485 if not self.sr.legacyMode:
1486 LVMMetadataHandler(self.sr.mdpath).deleteVdiFromMetadata(self.uuid)
1488 # deactivate here because it might be too late to do it in the "final"
1489 # step: GC might have removed the LV by then
1490 if self.sr.lvActivator.get(self.uuid, False):
1491 self.sr.lvActivator.deactivate(self.uuid, False)
1493 try:
1494 self.sr.lvmCache.remove(self.lvname)
1495 self.sr.lock.cleanup(vdi_uuid, NS_PREFIX_LVM + sr_uuid)
1496 self.sr.lock.cleanupAll(vdi_uuid)
1497 except xs_errors.SRException as e:
1498 util.SMlog(
1499 "Failed to remove the volume (maybe is leaf coalescing) "
1500 "for %s err:%d" % (self.uuid, e.errno))
1502 self.sr._updateStats(self.sr.uuid, -self.size)
1503 self.sr._kickGC()
1504 return super(LVMVDI, self).delete(sr_uuid, vdi_uuid, data_only)
1506 @override
1507 def attach(self, sr_uuid, vdi_uuid) -> str:
1508 util.SMlog("LVMVDI.attach for %s" % self.uuid)
1509 if self.sr.journaler.hasJournals(self.uuid):
1510 raise xs_errors.XenError('VDIUnavailable',
1511 opterr='Interrupted operation detected on this VDI, '
1512 'scan SR first to trigger auto-repair')
1514 writable = ('args' not in self.sr.srcmd.params) or \
1515 (self.sr.srcmd.params['args'][0] == "true")
1516 needInflate = True
1517 if not VdiType.isCowImage(self.vdi_type) or not writable:
1518 needInflate = False
1519 else:
1520 self._loadThis()
1521 if self.utilisation >= self.lvmcowutil.calcVolumeSize(self.size):
1522 needInflate = False
1524 if needInflate:
1525 try:
1526 self._prepareThin(True, self.vdi_type)
1527 except:
1528 util.logException("attach")
1529 raise xs_errors.XenError('LVMProvisionAttach')
1531 try:
1532 return self._attach()
1533 finally:
1534 if not self.sr.lvActivator.deactivateAll():
1535 util.SMlog("Failed to deactivate LVs back (%s)" % self.uuid)
1537 @override
1538 def detach(self, sr_uuid, vdi_uuid) -> None:
1539 util.SMlog("LVMVDI.detach for %s" % self.uuid)
1540 self._loadThis()
1541 already_deflated = (self.utilisation < \
1542 self.lvmcowutil.calcVolumeSize(self.size))
1543 needDeflate = True
1544 if not VdiType.isCowImage(self.vdi_type) or already_deflated:
1545 needDeflate = False
1546 elif self.sr.provision == "thick":
1547 needDeflate = False
1548 # except for snapshots, which are always deflated
1549 if self.sr.srcmd.cmd != 'vdi_detach_from_config':
1550 vdi_ref = self.sr.srcmd.params['vdi_ref']
1551 snap = self.session.xenapi.VDI.get_is_a_snapshot(vdi_ref)
1552 if snap:
1553 needDeflate = True
1555 if needDeflate:
1556 try:
1557 self._prepareThin(False, self.vdi_type)
1558 except:
1559 util.logException("_prepareThin")
1560 raise xs_errors.XenError('VDIUnavailable', opterr='deflate')
1562 try:
1563 self._detach()
1564 finally:
1565 if not self.sr.lvActivator.deactivateAll():
1566 raise xs_errors.XenError("SMGeneral", opterr="deactivation")
1568 # We only support offline resize
1569 @override
1570 def resize(self, sr_uuid, vdi_uuid, size) -> str:
1571 util.SMlog("LVMVDI.resize for %s" % self.uuid)
1572 if not self.sr.isMaster:
1573 raise xs_errors.XenError('LVMMaster')
1575 self._loadThis()
1576 if self.hidden:
1577 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI')
1579 if size < self.size:
1580 util.SMlog('vdi_resize: shrinking not supported: ' + \
1581 '(current size: %d, new size: %d)' % (self.size, size))
1582 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed')
1584 size = self.cowutil.validateAndRoundImageSize(int(size))
1586 if size == self.size:
1587 return VDI.VDI.get_params(self)
1589 if not VdiType.isCowImage(self.vdi_type):
1590 lvSizeOld = self.size
1591 lvSizeNew = util.roundup(lvutil.LVM_SIZE_INCREMENT, size)
1592 else:
1593 lvSizeOld = self.utilisation
1594 lvSizeNew = self.lvmcowutil.calcVolumeSize(size)
1595 if self.sr.provision == "thin":
1596 # VDI is currently deflated, so keep it deflated
1597 lvSizeNew = lvSizeOld
1598 assert(lvSizeNew >= lvSizeOld)
1599 spaceNeeded = lvSizeNew - lvSizeOld
1600 self.sr._ensureSpaceAvailable(spaceNeeded)
1602 oldSize = self.size
1603 if not VdiType.isCowImage(self.vdi_type):
1604 self.sr.lvmCache.setSize(self.lvname, lvSizeNew)
1605 self.size = self.sr.lvmCache.getSize(self.lvname)
1606 self.utilisation = self.size
1607 else:
1608 if lvSizeNew != lvSizeOld:
1609 self.lvmcowutil.inflate(self.sr.journaler, self.sr.uuid, self.uuid, self.vdi_type, lvSizeNew)
1610 if self.vdi_type == VdiType.QCOW2:
1611 # We only do this for QCOW2 since qemu-img need to read the chain to resize
1612 self._chainSetActive(True, True)
1613 self.cowutil.setSizeVirtFast(self.path, size)
1614 self.size = self.cowutil.getSizeVirt(self.path)
1615 self.utilisation = self.sr.lvmCache.getSize(self.lvname)
1617 vdi_ref = self.sr.srcmd.params['vdi_ref']
1618 self.session.xenapi.VDI.set_virtual_size(vdi_ref, str(self.size))
1619 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref,
1620 str(self.utilisation))
1621 self.sr._updateStats(self.sr.uuid, self.size - oldSize)
1622 super(LVMVDI, self).resize_cbt(self.sr.uuid, self.uuid, self.size)
1623 if self.vdi_type == VdiType.QCOW2:
1624 self._chainSetActive(False, True)
1625 return VDI.VDI.get_params(self)
1627 @override
1628 def clone(self, sr_uuid, vdi_uuid) -> str:
1629 return self._do_snapshot(
1630 sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE, cloneOp=True)
1632 @override
1633 def compose(self, sr_uuid, vdi1, vdi2) -> None:
1634 util.SMlog("LVMSR.compose for %s -> %s" % (vdi2, vdi1))
1635 if not VdiType.isCowImage(self.vdi_type):
1636 raise xs_errors.XenError('Unimplemented')
1638 parent_uuid = vdi1
1639 parent_lvname = LV_PREFIX[self.vdi_type] + parent_uuid
1640 assert(self.sr.lvmCache.checkLV(parent_lvname))
1641 parent_path = os.path.join(self.sr.path, parent_lvname)
1643 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
1644 self.sr.lvActivator.activate(parent_uuid, parent_lvname, False)
1646 self.cowutil.setParent(self.path, parent_path, False)
1647 self.cowutil.setHidden(parent_path)
1648 self.sr.session.xenapi.VDI.set_managed(self.sr.srcmd.params['args'][0], False)
1650 if not blktap2.VDI.tap_refresh(self.session, self.sr.uuid, self.uuid,
1651 True):
1652 raise util.SMException("failed to refresh VDI %s" % self.uuid)
1654 util.SMlog("Compose done")
1656 def reset_leaf(self, sr_uuid, vdi_uuid):
1657 util.SMlog("LVMSR.reset_leaf for %s" % vdi_uuid)
1658 if not VdiType.isCowImage(self.vdi_type):
1659 raise xs_errors.XenError('Unimplemented')
1661 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
1663 # safety check
1664 if not self.cowutil.hasParent(self.path):
1665 raise util.SMException("ERROR: VDI %s has no parent, " + \
1666 "will not reset contents" % self.uuid)
1668 self.cowutil.killData(self.path)
1670 def _attach(self):
1671 self._chainSetActive(True, True, True)
1672 if not util.pathexists(self.path):
1673 raise xs_errors.XenError('VDIUnavailable', \
1674 opterr='Could not find: %s' % self.path)
1676 if not hasattr(self, 'xenstore_data'):
1677 self.xenstore_data = {}
1679 self.xenstore_data.update(scsiutil.update_XS_SCSIdata(self.uuid, \
1680 scsiutil.gen_synthetic_page_data(self.uuid)))
1682 self.xenstore_data['storage-type'] = 'lvm'
1683 self.xenstore_data['vdi-type'] = self.vdi_type
1685 self.attached = True
1686 self.sr.lvActivator.persist()
1687 return VDI.VDI.attach(self, self.sr.uuid, self.uuid)
1689 def _detach(self):
1690 self._chainSetActive(False, True)
1691 self.attached = False
1693 @override
1694 def _do_snapshot(self, sr_uuid, vdi_uuid, snapType,
1695 cloneOp=False, secondary=None, cbtlog=None, is_mirror_destination=False) -> str:
1696 # If cbt enabled, save file consistency state
1697 if cbtlog is not None:
1698 if blktap2.VDI.tap_status(self.session, vdi_uuid): 1698 ↛ 1699line 1698 didn't jump to line 1699, because the condition on line 1698 was never true
1699 consistency_state = False
1700 else:
1701 consistency_state = True
1702 util.SMlog("Saving log consistency state of %s for vdi: %s" %
1703 (consistency_state, vdi_uuid))
1704 else:
1705 consistency_state = None
1707 pause_time = time.time()
1708 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): 1708 ↛ 1709line 1708 didn't jump to line 1709, because the condition on line 1708 was never true
1709 raise util.SMException("failed to pause VDI %s" % vdi_uuid)
1711 snapResult = None
1712 try:
1713 snapResult = self._snapshot(snapType, cloneOp, cbtlog, consistency_state, is_mirror_destination)
1714 except Exception as e1:
1715 try:
1716 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid,
1717 secondary=None)
1718 except Exception as e2:
1719 util.SMlog('WARNING: failed to clean up failed snapshot: '
1720 '%s (error ignored)' % e2)
1721 raise
1722 self.disable_leaf_on_secondary(vdi_uuid, secondary=secondary)
1723 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary)
1724 unpause_time = time.time()
1725 if (unpause_time - pause_time) > LONG_SNAPTIME: 1725 ↛ 1726line 1725 didn't jump to line 1726, because the condition on line 1725 was never true
1726 util.SMlog('WARNING: snapshot paused VM for %s seconds' %
1727 (unpause_time - pause_time))
1728 return snapResult
1730 def _snapshot(self, snapType, cloneOp=False, cbtlog=None, cbt_consistency=None, is_mirror_destination=False):
1731 util.SMlog("LVMVDI._snapshot for %s (type %s)" % (self.uuid, snapType))
1733 if not self.sr.isMaster: 1733 ↛ 1734line 1733 didn't jump to line 1734, because the condition on line 1733 was never true
1734 raise xs_errors.XenError('LVMMaster')
1735 if self.sr.legacyMode: 1735 ↛ 1736line 1735 didn't jump to line 1736, because the condition on line 1735 was never true
1736 raise xs_errors.XenError('Unimplemented', opterr='In legacy mode')
1738 self._loadThis()
1739 if self.hidden: 1739 ↛ 1740line 1739 didn't jump to line 1740, because the condition on line 1739 was never true
1740 raise xs_errors.XenError('VDISnapshot', opterr='hidden VDI')
1742 snapVdiType = self.sr._get_snap_vdi_type(self.vdi_type, self.size)
1744 self.sm_config = self.session.xenapi.VDI.get_sm_config( \
1745 self.sr.srcmd.params['vdi_ref'])
1746 if "type" in self.sm_config and self.sm_config['type'] == 'raw': 1746 ↛ 1747line 1746 didn't jump to line 1747, because the condition on line 1746 was never true
1747 if not util.fistpoint.is_active("testsm_clone_allow_raw"):
1748 raise xs_errors.XenError('Unimplemented', \
1749 opterr='Raw VDI, snapshot or clone not permitted')
1751 # we must activate the entire image chain because the real parent could
1752 # theoretically be anywhere in the chain if all images under it are empty
1753 self._chainSetActive(True, False)
1754 if not util.pathexists(self.path): 1754 ↛ 1755line 1754 didn't jump to line 1755, because the condition on line 1754 was never true
1755 raise xs_errors.XenError('VDIUnavailable', \
1756 opterr='VDI unavailable: %s' % (self.path))
1758 if VdiType.isCowImage(self.vdi_type): 1758 ↛ 1766line 1758 didn't jump to line 1766, because the condition on line 1758 was never false
1759 depth = self.cowutil.getDepth(self.path)
1760 if depth == -1: 1760 ↛ 1761line 1760 didn't jump to line 1761, because the condition on line 1760 was never true
1761 raise xs_errors.XenError('VDIUnavailable', \
1762 opterr='failed to get COW depth')
1763 elif depth >= self.cowutil.getMaxChainLength(): 1763 ↛ 1764line 1763 didn't jump to line 1764, because the condition on line 1763 was never true
1764 raise xs_errors.XenError('SnapshotChainTooLong')
1766 self.issnap = self.session.xenapi.VDI.get_is_a_snapshot( \
1767 self.sr.srcmd.params['vdi_ref'])
1769 fullpr = self.lvmcowutil.calcVolumeSize(self.size)
1770 thinpr = util.roundup(
1771 lvutil.LVM_SIZE_INCREMENT,
1772 self.cowutil.calcOverheadEmpty(max(self.size, self.cowutil.getDefaultPreallocationSizeVirt()))
1773 )
1774 lvSizeOrig = thinpr
1775 lvSizeClon = thinpr
1777 hostRefs = []
1778 if self.sr.cmd == "vdi_snapshot":
1779 hostRefs = util.get_hosts_attached_on(self.session, [self.uuid])
1780 if hostRefs: 1780 ↛ 1782line 1780 didn't jump to line 1782, because the condition on line 1780 was never false
1781 lvSizeOrig = fullpr
1782 if self.sr.provision == "thick": 1782 ↛ 1788line 1782 didn't jump to line 1788, because the condition on line 1782 was never false
1783 if not self.issnap: 1783 ↛ 1784line 1783 didn't jump to line 1784, because the condition on line 1783 was never true
1784 lvSizeOrig = fullpr
1785 if self.sr.cmd != "vdi_snapshot":
1786 lvSizeClon = fullpr
1788 if (snapType == VDI.SNAPSHOT_SINGLE or 1788 ↛ 1790line 1788 didn't jump to line 1790, because the condition on line 1788 was never true
1789 snapType == VDI.SNAPSHOT_INTERNAL):
1790 lvSizeClon = 0
1792 # the space required must include 2 journal LVs: a clone journal and an
1793 # inflate journal (for the failure handling
1794 size_req = lvSizeOrig + lvSizeClon + 2 * self.sr.journaler.LV_SIZE
1795 lvSizeBase = self.size
1796 if VdiType.isCowImage(self.vdi_type): 1796 ↛ 1799line 1796 didn't jump to line 1799, because the condition on line 1796 was never false
1797 lvSizeBase = util.roundup(lvutil.LVM_SIZE_INCREMENT, self.cowutil.getSizePhys(self.path))
1798 size_req -= (self.utilisation - lvSizeBase)
1799 self.sr._ensureSpaceAvailable(size_req)
1801 if hostRefs:
1802 self.sr._deactivateOnSlave(hostRefs, self.lvname)
1804 baseUuid = util.gen_uuid()
1805 origUuid = self.uuid
1806 clonUuid = ""
1807 if snapType == VDI.SNAPSHOT_DOUBLE: 1807 ↛ 1809line 1807 didn't jump to line 1809, because the condition on line 1807 was never false
1808 clonUuid = util.gen_uuid()
1809 jval = "%s_%s" % (baseUuid, clonUuid)
1810 self.sr.journaler.create(self.JRN_CLONE, origUuid, jval)
1811 util.fistpoint.activate("LVHDRT_clone_vdi_after_create_journal", self.sr.uuid)
1813 try:
1814 # self becomes the "base vdi"
1815 origOldLV = self.lvname
1816 baseLV = LV_PREFIX[self.vdi_type] + baseUuid
1817 self.sr.lvmCache.rename(self.lvname, baseLV)
1818 self.sr.lvActivator.replace(self.uuid, baseUuid, baseLV, False)
1819 RefCounter.set(baseUuid, 1, 0, NS_PREFIX_LVM + self.sr.uuid)
1820 self.uuid = baseUuid
1821 self.lvname = baseLV
1822 self.path = os.path.join(self.sr.path, baseLV)
1823 self.label = "base copy"
1824 self.read_only = True
1825 self.location = self.uuid
1826 self.managed = False
1828 # shrink the base copy to the minimum - we do it before creating
1829 # the snapshot volumes to avoid requiring double the space
1830 if VdiType.isCowImage(self.vdi_type): 1830 ↛ 1833line 1830 didn't jump to line 1833, because the condition on line 1830 was never false
1831 self.lvmcowutil.deflate(self.sr.lvmCache, self.lvname, lvSizeBase)
1832 self.utilisation = lvSizeBase
1833 util.fistpoint.activate("LVHDRT_clone_vdi_after_shrink_parent", self.sr.uuid)
1835 snapVDI = self._createSnap(origUuid, snapVdiType, lvSizeOrig, False, is_mirror_destination)
1836 util.fistpoint.activate("LVHDRT_clone_vdi_after_first_snap", self.sr.uuid)
1837 snapVDI2 = None
1838 if snapType == VDI.SNAPSHOT_DOUBLE: 1838 ↛ 1844line 1838 didn't jump to line 1844, because the condition on line 1838 was never false
1839 snapVDI2 = self._createSnap(clonUuid, snapVdiType, lvSizeClon, True)
1840 # If we have CBT enabled on the VDI,
1841 # set CBT status for the new snapshot disk
1842 if cbtlog:
1843 snapVDI2.cbt_enabled = True
1844 util.fistpoint.activate("LVHDRT_clone_vdi_after_second_snap", self.sr.uuid)
1846 # note: it is important to mark the parent hidden only AFTER the
1847 # new image children have been created, which are referencing it;
1848 # otherwise we would introduce a race with GC that could reclaim
1849 # the parent before we snapshot it
1850 if not VdiType.isCowImage(self.vdi_type): 1850 ↛ 1851line 1850 didn't jump to line 1851, because the condition on line 1850 was never true
1851 self.sr.lvmCache.setHidden(self.lvname)
1852 else:
1853 self.cowutil.setHidden(self.path)
1854 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_hidden", self.sr.uuid)
1856 # set the base copy to ReadOnly
1857 self.sr.lvmCache.setReadonly(self.lvname, True)
1858 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_ro", self.sr.uuid)
1860 if hostRefs:
1861 self.sr._updateSlavesOnClone(hostRefs, origOldLV,
1862 snapVDI.lvname, self.uuid, self.lvname)
1864 # Update cbt files if user created snapshot (SNAPSHOT_DOUBLE)
1865 if snapType == VDI.SNAPSHOT_DOUBLE and cbtlog:
1866 snapVDI._cbt_snapshot(clonUuid, cbt_consistency)
1867 if hostRefs: 1867 ↛ 1881line 1867 didn't jump to line 1881, because the condition on line 1867 was never false
1868 cbtlog_file = self._get_cbt_logname(snapVDI.uuid)
1869 try:
1870 self.sr._updateSlavesOnCBTClone(hostRefs, cbtlog_file)
1871 except:
1872 alert_name = "VDI_CBT_SNAPSHOT_FAILED"
1873 alert_str = ("Creating CBT snapshot for {} failed"
1874 .format(snapVDI.uuid))
1875 snapVDI._disable_cbt_on_error(alert_name, alert_str)
1876 pass
1878 except (util.SMException, XenAPI.Failure) as e:
1879 util.logException("LVMVDI._snapshot")
1880 self._failClone(origUuid, jval, str(e))
1881 util.fistpoint.activate("LVHDRT_clone_vdi_before_remove_journal", self.sr.uuid)
1883 self.sr.journaler.remove(self.JRN_CLONE, origUuid)
1885 return self._finishSnapshot(snapVDI, snapVDI2, hostRefs, cloneOp, snapType)
1887 def _createSnap(self, snapUuid, snapVdiType, snapSizeLV, isNew, is_mirror_destination=False):
1888 """Snapshot self and return the snapshot VDI object"""
1890 snapLV = LV_PREFIX[snapVdiType] + snapUuid
1891 snapPath = os.path.join(self.sr.path, snapLV)
1892 self.sr.lvmCache.create(snapLV, int(snapSizeLV))
1893 util.fistpoint.activate("LVHDRT_clone_vdi_after_lvcreate", self.sr.uuid)
1894 if isNew:
1895 RefCounter.set(snapUuid, 1, 0, NS_PREFIX_LVM + self.sr.uuid)
1896 self.sr.lvActivator.add(snapUuid, snapLV, False)
1897 parentRaw = (self.vdi_type == VdiType.RAW)
1898 self.cowutil.snapshot(
1899 snapPath, self.path, parentRaw, max(self.size, self.cowutil.getDefaultPreallocationSizeVirt()), is_mirror_image=is_mirror_destination
1900 )
1901 snapParent = self.cowutil.getParent(snapPath, LvmCowUtil.extractUuid)
1903 snapVDI = LVMVDI(self.sr, snapUuid)
1904 snapVDI.read_only = False
1905 snapVDI.location = snapUuid
1906 snapVDI.size = self.size
1907 snapVDI.utilisation = snapSizeLV
1908 snapVDI.sm_config = dict()
1909 for key, val in self.sm_config.items(): 1909 ↛ 1910line 1909 didn't jump to line 1910, because the loop on line 1909 never started
1910 if key not in [
1911 "type", "vdi_type", "vhd-parent", "paused", "relinking", "activating"] and \
1912 not key.startswith("host_"):
1913 snapVDI.sm_config[key] = val
1914 snapVDI.sm_config["vdi_type"] = snapVdiType
1915 snapVDI.sm_config["vhd-parent"] = snapParent
1916 # TODO: fix the raw snapshot case
1917 snapVDI.sm_config["image-format"] = getImageStringFromVdiType(self.vdi_type)
1918 snapVDI.lvname = snapLV
1919 return snapVDI
1921 def _finishSnapshot(self, snapVDI, snapVDI2, hostRefs, cloneOp=False, snapType=None):
1922 if snapType is not VDI.SNAPSHOT_INTERNAL: 1922 ↛ 1924line 1922 didn't jump to line 1924, because the condition on line 1922 was never false
1923 self.sr._updateStats(self.sr.uuid, self.size)
1924 basePresent = True
1926 # Verify parent locator field of both children and delete basePath if
1927 # unused
1928 snapParent = snapVDI.sm_config["vhd-parent"]
1929 snap2Parent = ""
1930 if snapVDI2: 1930 ↛ 1932line 1930 didn't jump to line 1932, because the condition on line 1930 was never false
1931 snap2Parent = snapVDI2.sm_config["vhd-parent"]
1932 if snapParent != self.uuid and \ 1932 ↛ 1966line 1932 didn't jump to line 1966, because the condition on line 1932 was never false
1933 (not snapVDI2 or snap2Parent != self.uuid):
1934 util.SMlog("%s != %s != %s => deleting unused base %s" % \
1935 (snapParent, self.uuid, snap2Parent, self.lvname))
1936 RefCounter.put(self.uuid, False, NS_PREFIX_LVM + self.sr.uuid)
1938 # The removed LV could still be activated on a slave host if it's
1939 # part of a VM currently running there, we need to deactivate it
1940 # before it gets removed to avoid a LV leak.
1941 if hostRefs:
1942 self.sr._deactivateOnSlave(hostRefs, self.lvname)
1944 self.sr.lvmCache.remove(self.lvname)
1945 self.sr.lvActivator.remove(self.uuid, False)
1946 if hostRefs:
1947 self.sr._updateSlavesOnRemove(hostRefs, self.uuid, self.lvname)
1948 basePresent = False
1949 else:
1950 # assign the _binary_ refcount of the original VDI to the new base
1951 # VDI (but as the normal refcount, since binary refcounts are only
1952 # for leaf nodes). The normal refcount of the child is not
1953 # transferred to to the base VDI because normal refcounts are
1954 # incremented and decremented individually, and not based on the
1955 # image chain (i.e., the child's normal refcount will be decremented
1956 # independently of its parent situation). Add 1 for this clone op.
1957 # Note that we do not need to do protect the refcount operations
1958 # below with per-VDI locking like we do in lvutil because at this
1959 # point we have exclusive access to the VDIs involved. Other SM
1960 # operations are serialized by the Agent or with the SR lock, and
1961 # any coalesce activations are serialized with the SR lock. (The
1962 # coalesce activates the coalesced VDI pair in the beginning, which
1963 # cannot affect the VDIs here because they cannot possibly be
1964 # involved in coalescing at this point, and at the relinkSkip step
1965 # that activates the children, which takes the SR lock.)
1966 ns = NS_PREFIX_LVM + self.sr.uuid
1967 (cnt, bcnt) = RefCounter.check(snapVDI.uuid, ns)
1968 RefCounter.set(self.uuid, bcnt + 1, 0, ns)
1970 # the "paused" and "host_*" sm-config keys are special and must stay on
1971 # the leaf without being inherited by anyone else
1972 for key in [x for x in self.sm_config.keys() if x == "paused" or x.startswith("host_")]: 1972 ↛ 1973line 1972 didn't jump to line 1973, because the loop on line 1972 never started
1973 snapVDI.sm_config[key] = self.sm_config[key]
1974 del self.sm_config[key]
1976 # Introduce any new VDI records & update the existing one
1977 type = self.session.xenapi.VDI.get_type( \
1978 self.sr.srcmd.params['vdi_ref'])
1979 if snapVDI2: 1979 ↛ 2021line 1979 didn't jump to line 2021, because the condition on line 1979 was never false
1980 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
1981 vdiRef = snapVDI2._db_introduce()
1982 if cloneOp:
1983 vdi_info = {UUID_TAG: snapVDI2.uuid,
1984 NAME_LABEL_TAG: util.to_plain_string( \
1985 self.session.xenapi.VDI.get_name_label( \
1986 self.sr.srcmd.params['vdi_ref'])),
1987 NAME_DESCRIPTION_TAG: util.to_plain_string( \
1988 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])),
1989 IS_A_SNAPSHOT_TAG: 0,
1990 SNAPSHOT_OF_TAG: '',
1991 SNAPSHOT_TIME_TAG: '',
1992 TYPE_TAG: type,
1993 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'],
1994 READ_ONLY_TAG: 0,
1995 MANAGED_TAG: int(snapVDI2.managed),
1996 METADATA_OF_POOL_TAG: ''
1997 }
1998 else:
1999 util.SMlog("snapshot VDI params: %s" % \
2000 self.session.xenapi.VDI.get_snapshot_time(vdiRef))
2001 vdi_info = {UUID_TAG: snapVDI2.uuid,
2002 NAME_LABEL_TAG: util.to_plain_string( \
2003 self.session.xenapi.VDI.get_name_label( \
2004 self.sr.srcmd.params['vdi_ref'])),
2005 NAME_DESCRIPTION_TAG: util.to_plain_string( \
2006 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])),
2007 IS_A_SNAPSHOT_TAG: 1,
2008 SNAPSHOT_OF_TAG: snapVDI.uuid,
2009 SNAPSHOT_TIME_TAG: '',
2010 TYPE_TAG: type,
2011 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'],
2012 READ_ONLY_TAG: 0,
2013 MANAGED_TAG: int(snapVDI2.managed),
2014 METADATA_OF_POOL_TAG: ''
2015 }
2017 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
2018 util.SMlog("vdi_clone: introduced 2nd snap VDI: %s (%s)" % \
2019 (vdiRef, snapVDI2.uuid))
2021 if basePresent: 2021 ↛ 2022line 2021 didn't jump to line 2022, because the condition on line 2021 was never true
2022 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
2023 vdiRef = self._db_introduce()
2024 vdi_info = {UUID_TAG: self.uuid,
2025 NAME_LABEL_TAG: self.label,
2026 NAME_DESCRIPTION_TAG: self.description,
2027 IS_A_SNAPSHOT_TAG: 0,
2028 SNAPSHOT_OF_TAG: '',
2029 SNAPSHOT_TIME_TAG: '',
2030 TYPE_TAG: type,
2031 VDI_TYPE_TAG: self.sm_config['vdi_type'],
2032 READ_ONLY_TAG: 1,
2033 MANAGED_TAG: 0,
2034 METADATA_OF_POOL_TAG: ''
2035 }
2037 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
2038 util.SMlog("vdi_clone: introduced base VDI: %s (%s)" % \
2039 (vdiRef, self.uuid))
2041 # Update the original record
2042 vdi_ref = self.sr.srcmd.params['vdi_ref']
2043 self.session.xenapi.VDI.set_sm_config(vdi_ref, snapVDI.sm_config)
2044 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, \
2045 str(snapVDI.utilisation))
2047 # Return the info on the new snap VDI
2048 snap = snapVDI2
2049 if not snap: 2049 ↛ 2050line 2049 didn't jump to line 2050, because the condition on line 2049 was never true
2050 snap = self
2051 if not basePresent:
2052 # a single-snapshot of an empty VDI will be a noop, resulting
2053 # in no new VDIs, so return the existing one. The GC wouldn't
2054 # normally try to single-snapshot an empty image of course, but
2055 # if an external snapshot operation manages to sneak in right
2056 # before a snapshot-coalesce phase, we would get here
2057 snap = snapVDI
2058 return snap.get_params()
2060 def _setType(self, vdiType: str) -> None:
2061 self.vdi_type = vdiType
2062 self.cowutil = getCowUtil(self.vdi_type)
2063 self.lvmcowutil = LvmCowUtil(self.cowutil)
2065 def _initFromVDIInfo(self, vdiInfo):
2066 self._setType(vdiInfo.vdiType)
2067 self.lvname = vdiInfo.lvName
2068 self.size = vdiInfo.sizeVirt
2069 self.utilisation = vdiInfo.sizeLV
2070 self.hidden = vdiInfo.hidden
2071 if self.hidden: 2071 ↛ 2072line 2071 didn't jump to line 2072, because the condition on line 2071 was never true
2072 self.managed = False
2073 self.active = vdiInfo.lvActive
2074 self.readonly = vdiInfo.lvReadonly
2075 self.parent = vdiInfo.parentUuid
2076 self.path = os.path.join(self.sr.path, self.lvname)
2077 if hasattr(self, "sm_config_override"): 2077 ↛ 2080line 2077 didn't jump to line 2080, because the condition on line 2077 was never false
2078 self.sm_config_override["vdi_type"] = self.vdi_type
2079 else:
2080 self.sm_config_override = {'vdi_type': self.vdi_type}
2081 self.loaded = True
2083 def _initFromLVInfo(self, lvInfo):
2084 self._setType(lvInfo.vdiType)
2085 self.lvname = lvInfo.name
2086 self.size = lvInfo.size
2087 self.utilisation = lvInfo.size
2088 self.hidden = lvInfo.hidden
2089 self.active = lvInfo.active
2090 self.readonly = lvInfo.readonly
2091 self.parent = ''
2092 self.path = os.path.join(self.sr.path, self.lvname)
2093 if hasattr(self, "sm_config_override"): 2093 ↛ 2096line 2093 didn't jump to line 2096, because the condition on line 2093 was never false
2094 self.sm_config_override["vdi_type"] = self.vdi_type
2095 else:
2096 self.sm_config_override = {'vdi_type': self.vdi_type}
2097 if 'vhd-parent' in self.sm_config_override: 2097 ↛ 2098line 2097 didn't jump to line 2098, because the condition on line 2097 was never true
2098 self.parent = self.sm_config_override['vhd-parent']
2099 if not VdiType.isCowImage(self.vdi_type): 2099 ↛ 2100line 2099 didn't jump to line 2100, because the condition on line 2099 was never true
2100 self.loaded = True
2102 def _initFromImageInfo(self, imageInfo):
2103 self.size = imageInfo.sizeVirt
2104 if self.parent == '' or (imageInfo.parentUuid != '' and imageInfo.parentUuid != self.parent): 2104 ↛ 2106line 2104 didn't jump to line 2106, because the condition on line 2104 was never false
2105 self.parent = imageInfo.parentUuid
2106 self.hidden = imageInfo.hidden
2107 self.loaded = True
2109 def _determineType(self):
2110 """
2111 Determine whether this is a RAW or a COW VDI.
2112 """
2113 if "vdi_ref" in self.sr.srcmd.params:
2114 vdi_ref = self.sr.srcmd.params["vdi_ref"]
2115 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
2116 if sm_config.get("vdi_type"): 2116 ↛ 2117line 2116 didn't jump to line 2117, because the condition on line 2116 was never true
2117 self._setType(sm_config["vdi_type"])
2118 prefix = LV_PREFIX[self.vdi_type]
2119 self.lvname = "%s%s" % (prefix, self.uuid)
2120 self.path = os.path.join(self.sr.path, self.lvname)
2121 self.sm_config_override = sm_config
2122 return True
2124 # LVM commands can be costly, so check the file directly first in case
2125 # the LV is active
2126 found = False
2127 for vdi_type, prefix in LV_PREFIX.items():
2128 lvname = "%s%s" % (prefix, self.uuid)
2129 path = os.path.join(self.sr.path, lvname)
2130 if util.pathexists(path):
2131 if found: 2131 ↛ 2132line 2131 didn't jump to line 2132, because the condition on line 2131 was never true
2132 raise xs_errors.XenError('VDILoad',
2133 opterr="multiple VDI's: uuid %s" % self.uuid)
2134 found = True
2135 self._setType(vdi_type)
2136 self.lvname = lvname
2137 self.path = path
2138 if found:
2139 return True
2141 # now list all LV's
2142 if not lvutil._checkVG(self.sr.vgname): 2142 ↛ 2144line 2142 didn't jump to line 2144, because the condition on line 2142 was never true
2143 # when doing attach_from_config, the VG won't be there yet
2144 return False
2146 lvs = LvmCowUtil.getVolumeInfo(self.sr.lvmCache)
2147 if lvs.get(self.uuid): 2147 ↛ 2150line 2147 didn't jump to line 2150, because the condition on line 2147 was never false
2148 self._initFromLVInfo(lvs[self.uuid])
2149 return True
2150 return False
2152 def _loadThis(self):
2153 """
2154 Load VDI info for this VDI and activate the LV if it's COW. We
2155 don't do it in VDI.load() because not all VDI operations need it.
2156 """
2157 if self.loaded: 2157 ↛ 2158line 2157 didn't jump to line 2158, because the condition on line 2157 was never true
2158 if VdiType.isCowImage(self.vdi_type):
2159 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
2160 return
2161 try:
2162 lvs = LvmCowUtil.getVolumeInfo(self.sr.lvmCache, self.lvname)
2163 except util.CommandException as e:
2164 raise xs_errors.XenError('VDIUnavailable',
2165 opterr='%s (LV scan error)' % os.strerror(abs(e.code)))
2166 if not lvs.get(self.uuid): 2166 ↛ 2167line 2166 didn't jump to line 2167, because the condition on line 2166 was never true
2167 raise xs_errors.XenError('VDIUnavailable', opterr='LV not found')
2168 self._initFromLVInfo(lvs[self.uuid])
2169 if VdiType.isCowImage(self.vdi_type): 2169 ↛ 2175line 2169 didn't jump to line 2175, because the condition on line 2169 was never false
2170 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
2171 imageInfo = self.cowutil.getInfo(self.path, LvmCowUtil.extractUuid, False)
2172 if not imageInfo: 2172 ↛ 2173line 2172 didn't jump to line 2173, because the condition on line 2172 was never true
2173 raise xs_errors.XenError('VDIUnavailable', opterr='getInfo failed')
2174 self._initFromImageInfo(imageInfo)
2175 self.loaded = True
2177 def _chainSetActive(self, active, binary, persistent=False):
2178 if binary: 2178 ↛ 2179line 2178 didn't jump to line 2179, because the condition on line 2178 was never true
2179 (count, bcount) = RefCounter.checkLocked(self.uuid,
2180 NS_PREFIX_LVM + self.sr.uuid)
2181 if (active and bcount > 0) or (not active and bcount == 0):
2182 return # this is a redundant activation/deactivation call
2184 vdiList = {self.uuid: self.lvname}
2185 if VdiType.isCowImage(self.vdi_type): 2185 ↛ 2187line 2185 didn't jump to line 2187, because the condition on line 2185 was never false
2186 vdiList = self.cowutil.getParentChain(self.lvname, LvmCowUtil.extractUuid, self.sr.vgname)
2187 for uuid, lvName in vdiList.items(): 2187 ↛ 2188line 2187 didn't jump to line 2188, because the loop on line 2187 never started
2188 binaryParam = binary
2189 if uuid != self.uuid:
2190 binaryParam = False # binary param only applies to leaf nodes
2191 if active:
2192 self.sr.lvActivator.activate(uuid, lvName, binaryParam,
2193 persistent)
2194 else:
2195 # just add the LVs for deactivation in the final (cleanup)
2196 # step. The LVs must not have been activated during the current
2197 # operation
2198 self.sr.lvActivator.add(uuid, lvName, binaryParam)
2200 def _failClone(self, uuid, jval, msg):
2201 try:
2202 self.sr._handleInterruptedCloneOp(uuid, jval, True)
2203 self.sr.journaler.remove(self.JRN_CLONE, uuid)
2204 except Exception as e:
2205 util.SMlog('WARNING: failed to clean up failed snapshot: ' \
2206 ' %s (error ignored)' % e)
2207 raise xs_errors.XenError('VDIClone', opterr=msg)
2209 def _markHidden(self):
2210 if not VdiType.isCowImage(self.vdi_type):
2211 self.sr.lvmCache.setHidden(self.lvname)
2212 else:
2213 self.cowutil.setHidden(self.path)
2214 self.hidden = 1
2216 def _prepareThin(self, attach, vdiType):
2217 origUtilisation = self.sr.lvmCache.getSize(self.lvname)
2218 if self.sr.isMaster:
2219 # the master can prepare the VDI locally
2220 if attach:
2221 self.lvmcowutil.attachThin(self.sr.journaler, self.sr.uuid, self.uuid, self.vdi_type)
2222 else:
2223 self.lvmcowutil.detachThin(self.session, self.sr.lvmCache, self.sr.uuid, self.uuid, self.vdi_type)
2224 else:
2225 fn = "attach"
2226 if not attach:
2227 fn = "detach"
2228 pools = self.session.xenapi.pool.get_all()
2229 master = self.session.xenapi.pool.get_master(pools[0])
2230 rv = self.session.xenapi.host.call_plugin(
2231 master,
2232 self.sr.THIN_PLUGIN,
2233 fn,
2234 {
2235 "srUuid": self.sr.uuid,
2236 "vdiUuid": self.uuid,
2237 "vdiType": vdiType
2238 }
2239 )
2240 util.SMlog("call-plugin returned: %s" % rv)
2241 if not rv:
2242 raise Exception('plugin %s failed' % self.sr.THIN_PLUGIN)
2243 # refresh to pick up the size change on this slave
2244 self.sr.lvmCache.activateNoRefcount(self.lvname, True)
2246 self.utilisation = self.sr.lvmCache.getSize(self.lvname)
2247 if origUtilisation != self.utilisation:
2248 vdi_ref = self.sr.srcmd.params['vdi_ref']
2249 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref,
2250 str(self.utilisation))
2251 stats = lvutil._getVGstats(self.sr.vgname)
2252 sr_utilisation = stats['physical_utilisation']
2253 self.session.xenapi.SR.set_physical_utilisation(self.sr.sr_ref,
2254 str(sr_utilisation))
2256 @override
2257 def update(self, sr_uuid, vdi_uuid) -> None:
2258 if self.sr.legacyMode:
2259 return
2261 #Synch the name_label of this VDI on storage with the name_label in XAPI
2262 vdi_ref = self.session.xenapi.VDI.get_by_uuid(self.uuid)
2263 update_map = {}
2264 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \
2265 METADATA_OBJECT_TYPE_VDI
2266 update_map[UUID_TAG] = self.uuid
2267 update_map[NAME_LABEL_TAG] = util.to_plain_string( \
2268 self.session.xenapi.VDI.get_name_label(vdi_ref))
2269 update_map[NAME_DESCRIPTION_TAG] = util.to_plain_string( \
2270 self.session.xenapi.VDI.get_name_description(vdi_ref))
2271 update_map[SNAPSHOT_TIME_TAG] = \
2272 self.session.xenapi.VDI.get_snapshot_time(vdi_ref)
2273 update_map[METADATA_OF_POOL_TAG] = \
2274 self.session.xenapi.VDI.get_metadata_of_pool(vdi_ref)
2275 LVMMetadataHandler(self.sr.mdpath).updateMetadata(update_map)
2277 @override
2278 def _ensure_cbt_space(self) -> None:
2279 # We need virtual_size to compute the size in case of a bigger VDI
2280 self.sr.ensureCBTSpace(self.size)
2282 @override
2283 def _create_cbt_log(self) -> str:
2284 logname = self._get_cbt_logname(self.uuid)
2285 logsize = max(util.roundup(CBT_BLOCK_SIZE, self.size//CBT_BLOCK_SIZE), self.sr.journaler.LV_SIZE)
2286 # We choose 4MiB as the minimum for the log size to maintain the old behavior and compute the correct amount
2287 # if we need a bigger LV for the CBT (can happen with big QCOW2)
2288 self.sr.lvmCache.create(logname, logsize, CBTLOG_TAG)
2289 logpath = super(LVMVDI, self)._create_cbt_log()
2290 self.sr.lvmCache.deactivateNoRefcount(logname)
2291 return logpath
2293 @override
2294 def _delete_cbt_log(self) -> None:
2295 logpath = self._get_cbt_logpath(self.uuid)
2296 if self._cbt_log_exists(logpath):
2297 logname = self._get_cbt_logname(self.uuid)
2298 self.sr.lvmCache.remove(logname)
2300 @override
2301 def _rename(self, oldpath, newpath) -> None:
2302 oldname = os.path.basename(oldpath)
2303 newname = os.path.basename(newpath)
2304 self.sr.lvmCache.rename(oldname, newname)
2306 @override
2307 def update_slaves_on_cbt_disable(self, cbtlog) -> None:
2308 args = {
2309 "vgName": self.sr.vgname,
2310 "action1": "deactivateNoRefcount",
2311 "lvName1": cbtlog
2312 }
2314 host_refs = util.get_hosts_attached_on(self.session, [self.uuid])
2316 message = f"Deactivating {cbtlog}"
2317 self.sr.call_on_slave(args, host_refs, message)
2319 @override
2320 def _activate_cbt_log(self, lv_name) -> bool:
2321 self.sr.lvmCache.refresh()
2322 if not self.sr.lvmCache.is_active(lv_name): 2322 ↛ 2323line 2322 didn't jump to line 2323, because the condition on line 2322 was never true
2323 try:
2324 self.sr.lvmCache.activateNoRefcount(lv_name)
2325 return True
2326 except Exception as e:
2327 util.SMlog("Exception in _activate_cbt_log, "
2328 "Error: %s." % str(e))
2329 raise
2330 else:
2331 return False
2333 @override
2334 def _deactivate_cbt_log(self, lv_name) -> None:
2335 try:
2336 self.sr.lvmCache.deactivateNoRefcount(lv_name)
2337 except Exception as e:
2338 util.SMlog("Exception in _deactivate_cbt_log, Error: %s." % str(e))
2339 raise
2341 @override
2342 def _cbt_log_exists(self, logpath) -> bool:
2343 return lvutil.exists(logpath)
2345if __name__ == '__main__': 2345 ↛ 2346line 2345 didn't jump to line 2346, because the condition on line 2345 was never true
2346 SRCommand.run(LVMSR, DRIVER_INFO)
2347else:
2348 SR.registerSR(LVMSR)