ia64/xen-unstable

view tools/python/xen/xend/XendDomainInfo.py @ 19755:08de8ec655c2

xend: pass-through: fix typo: spx -> sxp

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jun 16 11:35:16 2009 +0100 (2009-06-16)
parents 94f6bf69e8e8
children 9e36ef77f658
line source
1 #===========================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
19 """Representation of a single domain.
20 Includes support for domain construction, using
21 open-ended configurations.
23 Author: Mike Wray <mike.wray@hp.com>
25 """
27 import logging
28 import time
29 import threading
30 import thread
31 import re
32 import copy
33 import os
34 import traceback
35 from types import StringTypes
37 import xen.lowlevel.xc
38 from xen.util import asserts, auxbin
39 from xen.util.blkif import blkdev_uname_to_file, blkdev_uname_to_taptype
40 import xen.util.xsm.xsm as security
41 from xen.util import xsconstants
42 from xen.util.pci import serialise_pci_opts, pci_opts_list_to_sxp
44 from xen.xend import balloon, sxp, uuid, image, arch
45 from xen.xend import XendOptions, XendNode, XendConfig
47 from xen.xend.XendConfig import scrub_password
48 from xen.xend.XendBootloader import bootloader, bootloader_tidy
49 from xen.xend.XendError import XendError, VmError
50 from xen.xend.XendDevices import XendDevices
51 from xen.xend.XendTask import XendTask
52 from xen.xend.xenstore.xstransact import xstransact, complete
53 from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
54 from xen.xend.xenstore.xswatch import xswatch
55 from xen.xend.XendConstants import *
56 from xen.xend.XendAPIConstants import *
57 from xen.xend.server.DevConstants import xenbusState
59 from xen.xend.XendVMMetrics import XendVMMetrics
61 from xen.xend import XendAPIStore
62 from xen.xend.XendPPCI import XendPPCI
63 from xen.xend.XendDPCI import XendDPCI
64 from xen.xend.XendPSCSI import XendPSCSI
65 from xen.xend.XendDSCSI import XendDSCSI
67 MIGRATE_TIMEOUT = 30.0
68 BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
70 xc = xen.lowlevel.xc.xc()
71 xoptions = XendOptions.instance()
73 log = logging.getLogger("xend.XendDomainInfo")
74 #log.setLevel(logging.TRACE)
77 def create(config):
78 """Creates and start a VM using the supplied configuration.
80 @param config: A configuration object involving lists of tuples.
81 @type config: list of lists, eg ['vm', ['image', 'xen.gz']]
83 @rtype: XendDomainInfo
84 @return: An up and running XendDomainInfo instance
85 @raise VmError: Invalid configuration or failure to start.
86 """
87 from xen.xend import XendDomain
88 domconfig = XendConfig.XendConfig(sxp_obj = config)
89 othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
90 if othervm is None or othervm.domid is None:
91 othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
92 if othervm is not None and othervm.domid is not None:
93 raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
94 log.debug("XendDomainInfo.create(%s)", scrub_password(config))
95 vm = XendDomainInfo(domconfig)
96 try:
97 vm.start()
98 except:
99 log.exception('Domain construction failed')
100 vm.destroy()
101 raise
103 return vm
105 def create_from_dict(config_dict):
106 """Creates and start a VM using the supplied configuration.
108 @param config_dict: An configuration dictionary.
110 @rtype: XendDomainInfo
111 @return: An up and running XendDomainInfo instance
112 @raise VmError: Invalid configuration or failure to start.
113 """
115 log.debug("XendDomainInfo.create_from_dict(%s)",
116 scrub_password(config_dict))
117 vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
118 try:
119 vm.start()
120 except:
121 log.exception('Domain construction failed')
122 vm.destroy()
123 raise
124 return vm
126 def recreate(info, priv):
127 """Create the VM object for an existing domain. The domain must not
128 be dying, as the paths in the store should already have been removed,
129 and asking us to recreate them causes problems.
131 @param xeninfo: Parsed configuration
132 @type xeninfo: Dictionary
133 @param priv: Is a privileged domain (Dom 0)
134 @type priv: bool
136 @rtype: XendDomainInfo
137 @return: A up and running XendDomainInfo instance
138 @raise VmError: Invalid configuration.
139 @raise XendError: Errors with configuration.
140 """
142 log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
144 assert not info['dying']
146 xeninfo = XendConfig.XendConfig(dominfo = info)
147 xeninfo['is_control_domain'] = priv
148 xeninfo['is_a_template'] = False
149 xeninfo['auto_power_on'] = False
150 domid = xeninfo['domid']
151 uuid1 = uuid.fromString(xeninfo['uuid'])
152 needs_reinitialising = False
154 dompath = GetDomainPath(domid)
155 if not dompath:
156 raise XendError('No domain path in store for existing '
157 'domain %d' % domid)
159 log.info("Recreating domain %d, UUID %s. at %s" %
160 (domid, xeninfo['uuid'], dompath))
162 # need to verify the path and uuid if not Domain-0
163 # if the required uuid and vm aren't set, then that means
164 # we need to recreate the dom with our own values
165 #
166 # NOTE: this is probably not desirable, really we should just
167 # abort or ignore, but there may be cases where xenstore's
168 # entry disappears (eg. xenstore-rm /)
169 #
170 try:
171 vmpath = xstransact.Read(dompath, "vm")
172 if not vmpath:
173 if not priv:
174 log.warn('/local/domain/%d/vm is missing. recreate is '
175 'confused, trying our best to recover' % domid)
176 needs_reinitialising = True
177 raise XendError('reinit')
179 uuid2_str = xstransact.Read(vmpath, "uuid")
180 if not uuid2_str:
181 log.warn('%s/uuid/ is missing. recreate is confused, '
182 'trying our best to recover' % vmpath)
183 needs_reinitialising = True
184 raise XendError('reinit')
186 uuid2 = uuid.fromString(uuid2_str)
187 if uuid1 != uuid2:
188 log.warn('UUID in /vm does not match the UUID in /dom/%d.'
189 'Trying out best to recover' % domid)
190 needs_reinitialising = True
191 except XendError:
192 pass # our best shot at 'goto' in python :)
194 vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
195 vmpath = vmpath)
197 if needs_reinitialising:
198 vm._recreateDom()
199 vm._removeVm()
200 vm._storeVmDetails()
201 vm._storeDomDetails()
203 vm.image = image.create(vm, vm.info)
204 vm.image.recreate()
206 vm._registerWatches()
207 vm.refreshShutdown(xeninfo)
209 # register the domain in the list
210 from xen.xend import XendDomain
211 XendDomain.instance().add_domain(vm)
213 return vm
216 def restore(config):
217 """Create a domain and a VM object to do a restore.
219 @param config: Domain SXP configuration
220 @type config: list of lists. (see C{create})
222 @rtype: XendDomainInfo
223 @return: A up and running XendDomainInfo instance
224 @raise VmError: Invalid configuration or failure to start.
225 @raise XendError: Errors with configuration.
226 """
228 log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
229 vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
230 resume = True)
231 try:
232 vm.resume()
233 return vm
234 except:
235 vm.destroy()
236 raise
238 def createDormant(domconfig):
239 """Create a dormant/inactive XenDomainInfo without creating VM.
240 This is for creating instances of persistent domains that are not
241 yet start.
243 @param domconfig: Parsed configuration
244 @type domconfig: XendConfig object
246 @rtype: XendDomainInfo
247 @return: A up and running XendDomainInfo instance
248 @raise XendError: Errors with configuration.
249 """
251 log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
253 # domid does not make sense for non-running domains.
254 domconfig.pop('domid', None)
255 vm = XendDomainInfo(domconfig)
256 return vm
258 def domain_by_name(name):
259 """Get domain by name
261 @params name: Name of the domain
262 @type name: string
263 @return: XendDomainInfo or None
264 """
265 from xen.xend import XendDomain
266 return XendDomain.instance().domain_lookup_by_name_nr(name)
269 def shutdown_reason(code):
270 """Get a shutdown reason from a code.
272 @param code: shutdown code
273 @type code: int
274 @return: shutdown reason
275 @rtype: string
276 """
277 return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
279 def dom_get(dom):
280 """Get info from xen for an existing domain.
282 @param dom: domain id
283 @type dom: int
284 @return: info or None
285 @rtype: dictionary
286 """
287 try:
288 domlist = xc.domain_getinfo(dom, 1)
289 if domlist and dom == domlist[0]['domid']:
290 return domlist[0]
291 except Exception, err:
292 # ignore missing domain
293 log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
294 return None
296 def get_assigned_pci_devices(domid):
297 dev_str_list = []
298 path = '/local/domain/0/backend/pci/%u/0/' % domid
299 num_devs = xstransact.Read(path + 'num_devs');
300 if num_devs is None or num_devs == "":
301 return dev_str_list
302 num_devs = int(num_devs);
303 for i in range(num_devs):
304 dev_str = xstransact.Read(path + 'dev-%i' % i)
305 dev_str_list = dev_str_list + [dev_str]
306 return dev_str_list
308 def do_FLR(domid):
309 from xen.xend.server.pciif import parse_pci_name, PciDevice
310 dev_str_list = get_assigned_pci_devices(domid)
312 for dev_str in dev_str_list:
313 (dom, b, d, f) = parse_pci_name(dev_str)
314 try:
315 dev = PciDevice(dom, b, d, f)
316 except Exception, e:
317 raise VmError("pci: failed to locate device and "+
318 "parse it's resources - "+str(e))
319 dev.do_FLR()
321 class XendDomainInfo:
322 """An object represents a domain.
324 @TODO: try to unify dom and domid, they mean the same thing, but
325 xc refers to it as dom, and everywhere else, including
326 xenstore it is domid. The best way is to change xc's
327 python interface.
329 @ivar info: Parsed configuration
330 @type info: dictionary
331 @ivar domid: Domain ID (if VM has started)
332 @type domid: int or None
333 @ivar vmpath: XenStore path to this VM.
334 @type vmpath: string
335 @ivar dompath: XenStore path to this Domain.
336 @type dompath: string
337 @ivar image: Reference to the VM Image.
338 @type image: xen.xend.image.ImageHandler
339 @ivar store_port: event channel to xenstored
340 @type store_port: int
341 @ivar console_port: event channel to xenconsoled
342 @type console_port: int
343 @ivar store_mfn: xenstored mfn
344 @type store_mfn: int
345 @ivar console_mfn: xenconsoled mfn
346 @type console_mfn: int
347 @ivar notes: OS image notes
348 @type notes: dictionary
349 @ivar vmWatch: reference to a watch on the xenstored vmpath
350 @type vmWatch: xen.xend.xenstore.xswatch
351 @ivar shutdownWatch: reference to watch on the xenstored domain shutdown
352 @type shutdownWatch: xen.xend.xenstore.xswatch
353 @ivar shutdownStartTime: UNIX Time when domain started shutting down.
354 @type shutdownStartTime: float or None
355 @ivar restart_in_progress: Is a domain restart thread running?
356 @type restart_in_progress: bool
357 # @ivar state: Domain state
358 # @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
359 @ivar state_updated: lock for self.state
360 @type state_updated: threading.Condition
361 @ivar refresh_shutdown_lock: lock for polling shutdown state
362 @type refresh_shutdown_lock: threading.Condition
363 @ivar _deviceControllers: device controller cache for this domain
364 @type _deviceControllers: dict 'string' to DevControllers
365 """
367 def __init__(self, info, domid = None, dompath = None, augment = False,
368 priv = False, resume = False, vmpath = None):
369 """Constructor for a domain
371 @param info: parsed configuration
372 @type info: dictionary
373 @keyword domid: Set initial domain id (if any)
374 @type domid: int
375 @keyword dompath: Set initial dompath (if any)
376 @type dompath: string
377 @keyword augment: Augment given info with xenstored VM info
378 @type augment: bool
379 @keyword priv: Is a privileged domain (Dom 0)
380 @type priv: bool
381 @keyword resume: Is this domain being resumed?
382 @type resume: bool
383 """
385 self.info = info
386 if domid == None:
387 self.domid = self.info.get('domid')
388 else:
389 self.domid = domid
391 #REMOVE: uuid is now generated in XendConfig
392 #if not self._infoIsSet('uuid'):
393 # self.info['uuid'] = uuid.toString(uuid.create())
395 # Find a unique /vm/<uuid>/<integer> path if not specified.
396 # This avoids conflict between pre-/post-migrate domains when doing
397 # localhost relocation.
398 self.vmpath = vmpath
399 i = 0
400 while self.vmpath == None:
401 self.vmpath = XS_VMROOT + self.info['uuid']
402 if i != 0:
403 self.vmpath = self.vmpath + '-' + str(i)
404 try:
405 if self._readVm("uuid"):
406 self.vmpath = None
407 i = i + 1
408 except:
409 pass
411 self.dompath = dompath
413 self.image = None
414 self.store_port = None
415 self.store_mfn = None
416 self.console_port = None
417 self.console_mfn = None
419 self.native_protocol = None
421 self.vmWatch = None
422 self.shutdownWatch = None
423 self.shutdownStartTime = None
424 self._resume = resume
425 self.restart_in_progress = False
427 self.state_updated = threading.Condition()
428 self.refresh_shutdown_lock = threading.Condition()
429 self._stateSet(DOM_STATE_HALTED)
431 self._deviceControllers = {}
433 for state in DOM_STATES_OLD:
434 self.info[state] = 0
436 if augment:
437 self._augmentInfo(priv)
439 self._checkName(self.info['name_label'])
441 self.metrics = XendVMMetrics(uuid.createString(), self)
444 #
445 # Public functions available through XMLRPC
446 #
449 def start(self, is_managed = False):
450 """Attempts to start the VM by do the appropriate
451 initialisation if it not started.
452 """
453 from xen.xend import XendDomain
455 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
456 try:
457 XendTask.log_progress(0, 30, self._constructDomain)
458 XendTask.log_progress(31, 60, self._initDomain)
460 XendTask.log_progress(61, 70, self._storeVmDetails)
461 XendTask.log_progress(71, 80, self._storeDomDetails)
462 XendTask.log_progress(81, 90, self._registerWatches)
463 XendTask.log_progress(91, 100, self.refreshShutdown)
465 xendomains = XendDomain.instance()
466 xennode = XendNode.instance()
468 # save running configuration if XendDomains believe domain is
469 # persistent
470 if is_managed:
471 xendomains.managed_config_save(self)
473 if xennode.xenschedinfo() == 'credit':
474 xendomains.domain_sched_credit_set(self.getDomid(),
475 self.getWeight(),
476 self.getCap())
477 except:
478 log.exception('VM start failed')
479 self.destroy()
480 raise
481 else:
482 raise XendError('VM already running')
484 def resume(self):
485 """Resumes a domain that has come back from suspension."""
486 state = self._stateGet()
487 if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
488 try:
489 self._constructDomain()
491 try:
492 self._setCPUAffinity()
493 except:
494 # usually a CPU we want to set affinity to does not exist
495 # we just ignore it so that the domain can still be restored
496 log.warn("Cannot restore CPU affinity")
498 self._storeVmDetails()
499 self._createChannels()
500 self._createDevices()
501 self._storeDomDetails()
502 self._endRestore()
503 except:
504 log.exception('VM resume failed')
505 self.destroy()
506 raise
507 else:
508 raise XendError('VM is not suspended; it is %s'
509 % XEN_API_VM_POWER_STATE[state])
511 def shutdown(self, reason):
512 """Shutdown a domain by signalling this via xenstored."""
513 log.debug('XendDomainInfo.shutdown(%s)', reason)
514 if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
515 raise XendError('Domain cannot be shutdown')
517 if self.domid == 0:
518 raise XendError('Domain 0 cannot be shutdown')
520 if reason not in DOMAIN_SHUTDOWN_REASONS.values():
521 raise XendError('Invalid reason: %s' % reason)
522 self.storeDom("control/shutdown", reason)
524 # HVM domain shuts itself down only if it has PV drivers
525 if self.info.is_hvm():
526 hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
527 hvm_s_state = xc.hvm_get_param(self.domid, HVM_PARAM_ACPI_S_STATE)
528 if not hvm_pvdrv or hvm_s_state != 0:
529 code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
530 log.info("HVM save:remote shutdown dom %d!", self.domid)
531 xc.domain_shutdown(self.domid, code)
533 def pause(self):
534 """Pause domain
536 @raise XendError: Failed pausing a domain
537 """
538 try:
539 bepath="/local/domain/0/backend/"
540 if(self.domid):
542 dev = xstransact.List(bepath + 'vbd' + "/%d" % (self.domid,))
543 for x in dev:
544 path = self.getDeviceController('vbd').readBackend(x, 'params')
545 if path and path.startswith('/dev/xen/blktap-2'):
546 #Figure out the sysfs path.
547 pattern = re.compile('/dev/xen/blktap-2/tapdev(\d+)$')
548 ctrlid = pattern.search(path)
549 ctrl = '/sys/class/blktap2/blktap' + ctrlid.group(1)
550 #pause the disk
551 f = open(ctrl + '/pause', 'w')
552 f.write('pause');
553 f.close()
554 except Exception, ex:
555 log.warn('Could not pause blktap disk.');
557 try:
558 xc.domain_pause(self.domid)
559 self._stateSet(DOM_STATE_PAUSED)
560 except Exception, ex:
561 log.exception(ex)
562 raise XendError("Domain unable to be paused: %s" % str(ex))
564 def unpause(self):
565 """Unpause domain
567 @raise XendError: Failed unpausing a domain
568 """
569 try:
570 bepath="/local/domain/0/backend/"
571 if(self.domid):
572 dev = xstransact.List(bepath + "vbd" + "/%d" % (self.domid,))
573 for x in dev:
574 path = self.getDeviceController('vbd').readBackend(x, 'params')
575 if path and path.startswith('/dev/xen/blktap-2'):
576 #Figure out the sysfs path.
577 pattern = re.compile('/dev/xen/blktap-2/tapdev(\d+)$')
578 ctrlid = pattern.search(path)
579 ctrl = '/sys/class/blktap2/blktap' + ctrlid.group(1)
580 #unpause the disk
581 if(os.path.exists(ctrl + '/resume')):
582 f = open(ctrl + '/resume', 'w');
583 f.write('resume');
584 f.close();
586 except Exception, ex:
587 log.warn('Could not unpause blktap disk: %s' % str(ex));
589 try:
590 xc.domain_unpause(self.domid)
591 self._stateSet(DOM_STATE_RUNNING)
592 except Exception, ex:
593 log.exception(ex)
594 raise XendError("Domain unable to be unpaused: %s" % str(ex))
596 def send_sysrq(self, key):
597 """ Send a Sysrq equivalent key via xenstored."""
598 if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
599 raise XendError("Domain '%s' is not started" % self.info['name_label'])
601 asserts.isCharConvertible(key)
602 self.storeDom("control/sysrq", '%c' % key)
604 def pci_device_configure_boot(self):
606 if not self.info.is_hvm():
607 return
609 devid = '0'
610 dev_info = self._getDeviceInfo_pci(devid)
611 if dev_info is None:
612 return
614 # get the virtual slot info from xenstore
615 dev_uuid = sxp.child_value(dev_info, 'uuid')
616 pci_conf = self.info['devices'][dev_uuid][1]
617 pci_devs = pci_conf['devs']
618 request = map(lambda x:
619 self.info.pci_convert_dict_to_sxp(x, 'Initialising',
620 'Booting'), pci_devs)
622 for i in request:
623 self.pci_device_configure(i)
625 def hvm_pci_device_create(self, dev_config):
626 log.debug("XendDomainInfo.hvm_pci_device_create: %s"
627 % scrub_password(dev_config))
629 if not self.info.is_hvm():
630 raise VmError("hvm_pci_device_create called on non-HVM guest")
632 #all the PCI devs share one conf node
633 devid = '0'
635 new_dev = dev_config['devs'][0]
636 dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
638 #check conflict before trigger hotplug event
639 if dev_info is not None:
640 dev_uuid = sxp.child_value(dev_info, 'uuid')
641 pci_conf = self.info['devices'][dev_uuid][1]
642 pci_devs = pci_conf['devs']
643 for x in pci_devs:
644 if (int(x['vslot'], 16) == int(new_dev['vslot'], 16) and
645 int(x['vslot'], 16) != AUTO_PHP_SLOT):
646 raise VmError("vslot %s already have a device." % (new_dev['vslot']))
648 if (int(x['domain'], 16) == int(new_dev['domain'], 16) and
649 int(x['bus'], 16) == int(new_dev['bus'], 16) and
650 int(x['slot'], 16) == int(new_dev['slot'], 16) and
651 int(x['func'], 16) == int(new_dev['func'], 16) ):
652 raise VmError("device is already inserted")
654 # Test whether the devices can be assigned with VT-d
655 pci_str = "%s, %s, %s, %s" % (new_dev['domain'],
656 new_dev['bus'],
657 new_dev['slot'],
658 new_dev['func'])
659 bdf = xc.test_assign_device(0, pci_str)
660 if bdf != 0:
661 if bdf == -1:
662 raise VmError("failed to assign device: maybe the platform"
663 " doesn't support VT-d, or VT-d isn't enabled"
664 " properly?")
665 bus = (bdf >> 16) & 0xff
666 devfn = (bdf >> 8) & 0xff
667 dev = (devfn >> 3) & 0x1f
668 func = devfn & 0x7
669 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
670 " already been assigned to other domain, or maybe"
671 " it doesn't exist." % (bus, dev, func))
673 # Here, we duplicate some checkings (in some cases, we mustn't allow
674 # a device to be hot-plugged into an HVM guest) that are also done in
675 # pci_device_configure()'s self.device_create(dev_sxp) or
676 # dev_control.reconfigureDevice(devid, dev_config).
677 # We must make the checkings before sending the command 'pci-ins' to
678 # ioemu.
680 # Test whether the device is owned by pciback. For instance, we can't
681 # hotplug a device being used by Dom0 itself to an HVM guest.
682 from xen.xend.server.pciif import PciDevice, parse_pci_name
683 domain = int(new_dev['domain'],16)
684 bus = int(new_dev['bus'],16)
685 dev = int(new_dev['slot'],16)
686 func = int(new_dev['func'],16)
687 try:
688 pci_device = PciDevice(domain, bus, dev, func)
689 except Exception, e:
690 raise VmError("pci: failed to locate device and "+
691 "parse it's resources - "+str(e))
692 if pci_device.driver!='pciback':
693 raise VmError(("pci: PCI Backend does not own device "+ \
694 "%s\n"+ \
695 "See the pciback.hide kernel "+ \
696 "command-line parameter or\n"+ \
697 "bind your slot/device to the PCI backend using sysfs" \
698 )%(pci_device.name))
700 # Check non-page-aligned MMIO BAR.
701 if pci_device.has_non_page_aligned_bar and arch.type != "ia64":
702 raise VmError("pci: %s: non-page-aligned MMIO BAR found." % \
703 pci_device.name)
705 # Check the co-assignment.
706 # To pci-attach a device D to domN, we should ensure each of D's
707 # co-assignment devices hasn't been assigned, or has been assigned to
708 # domN.
709 coassignment_list = pci_device.find_coassigned_devices()
710 pci_device.devs_check_driver(coassignment_list)
711 assigned_pci_device_str_list = self._get_assigned_pci_devices()
712 for pci_str in coassignment_list:
713 (domain, bus, dev, func) = parse_pci_name(pci_str)
714 dev_str = '0x%x,0x%x,0x%x,0x%x' % (domain, bus, dev, func)
715 if xc.test_assign_device(0, dev_str) == 0:
716 continue
717 if not pci_str in assigned_pci_device_str_list:
718 raise VmError(("pci: failed to pci-attach %s to domain %s" + \
719 " because one of its co-assignment device %s has been" + \
720 " assigned to other domain." \
721 )% (pci_device.name, self.info['name_label'], pci_str))
723 return self.hvm_pci_device_insert_dev(new_dev)
725 def hvm_pci_device_insert(self, dev_config):
726 log.debug("XendDomainInfo.hvm_pci_device_insert: %s"
727 % scrub_password(dev_config))
729 if not self.info.is_hvm():
730 raise VmError("hvm_pci_device_create called on non-HVM guest")
732 new_dev = dev_config['devs'][0]
734 return self.hvm_pci_device_insert_dev(new_dev)
736 def hvm_pci_device_insert_dev(self, new_dev):
737 log.debug("XendDomainInfo.hvm_pci_device_insert_dev: %s"
738 % scrub_password(new_dev))
740 if self.domid is not None:
741 opts = ''
742 if new_dev.has_key('opts'):
743 opts = ',' + serialise_pci_opts(new_dev['opts'])
745 bdf_str = "%s:%s:%s.%s@%s%s" % (new_dev['domain'],
746 new_dev['bus'],
747 new_dev['slot'],
748 new_dev['func'],
749 new_dev['vslot'],
750 opts)
751 self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
753 vslot = xstransact.Read("/local/domain/0/device-model/%i/parameter"
754 % self.getDomid())
755 try:
756 vslot_int = int(vslot, 16)
757 except ValueError:
758 raise VmError(("Cannot pass-through PCI function '%s'. " +
759 "Device model reported an error: %s") %
760 (bdf_str, vslot))
761 else:
762 vslot = new_dev['vslot']
764 return vslot
767 def device_create(self, dev_config):
768 """Create a new device.
770 @param dev_config: device configuration
771 @type dev_config: SXP object (parsed config)
772 """
773 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
774 dev_type = sxp.name(dev_config)
775 dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
776 dev_config_dict = self.info['devices'][dev_uuid][1]
777 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
779 if dev_type == 'vif':
780 for x in dev_config:
781 if x != 'vif' and x[0] == 'mac':
782 if not re.match('^([0-9a-f]{2}:){5}[0-9a-f]{2}$', x[1], re.I):
783 log.error("Virtual network interface creation error - invalid MAC Address entered: %s", x[1])
784 raise VmError("Cannot create a new virtual network interface - MAC address is not valid!");
786 if self.domid is not None:
787 try:
788 dev_config_dict['devid'] = devid = \
789 self._createDevice(dev_type, dev_config_dict)
790 self._waitForDevice(dev_type, devid)
791 except VmError, ex:
792 del self.info['devices'][dev_uuid]
793 if dev_type == 'pci':
794 for dev in dev_config_dict['devs']:
795 XendAPIStore.deregister(dev['uuid'], 'DPCI')
796 elif dev_type == 'vscsi':
797 for dev in dev_config_dict['devs']:
798 XendAPIStore.deregister(dev['uuid'], 'DSCSI')
799 elif dev_type == 'tap':
800 self.info['vbd_refs'].remove(dev_uuid)
801 else:
802 self.info['%s_refs' % dev_type].remove(dev_uuid)
803 raise ex
804 else:
805 devid = None
807 xen.xend.XendDomain.instance().managed_config_save(self)
808 return self.getDeviceController(dev_type).sxpr(devid)
811 def pci_device_configure(self, dev_sxp, devid = 0):
812 """Configure an existing pci device.
814 @param dev_sxp: device configuration
815 @type dev_sxp: SXP object (parsed config)
816 @param devid: device id
817 @type devid: int
818 @return: Returns True if successfully updated device
819 @rtype: boolean
820 """
821 log.debug("XendDomainInfo.pci_device_configure: %s"
822 % scrub_password(dev_sxp))
824 dev_class = sxp.name(dev_sxp)
826 if dev_class != 'pci':
827 return False
829 pci_state = sxp.child_value(dev_sxp, 'state')
830 pci_sub_state = sxp.child_value(dev_sxp, 'sub_state')
831 existing_dev_info = self._getDeviceInfo_pci(devid)
833 if existing_dev_info is None and pci_state != 'Initialising':
834 raise XendError("Cannot detach when pci platform does not exist")
836 pci_dev = sxp.children(dev_sxp, 'dev')[0]
837 dev_config = self.info.pci_convert_sxp_to_dict(dev_sxp)
838 dev = dev_config['devs'][0]
840 # Do HVM specific processing
841 if self.info.is_hvm():
842 if pci_state == 'Initialising':
843 # HVM PCI device attachment
844 if pci_sub_state == 'Booting':
845 vslot = self.hvm_pci_device_insert(dev_config)
846 else:
847 vslot = self.hvm_pci_device_create(dev_config)
848 # Update vslot
849 dev['vslot'] = vslot
850 for n in sxp.children(pci_dev):
851 if(n[0] == 'vslot'):
852 n[1] = vslot
853 else:
854 # HVM PCI device detachment
855 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
856 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
857 existing_pci_devs = existing_pci_conf['devs']
858 vslot = ""
859 for x in existing_pci_devs:
860 if ( int(x['domain'], 16) == int(dev['domain'], 16) and
861 int(x['bus'], 16) == int(dev['bus'], 16) and
862 int(x['slot'], 16) == int(dev['slot'], 16) and
863 int(x['func'], 16) == int(dev['func'], 16) ):
864 vslot = x['vslot']
865 break
866 if vslot == "":
867 raise VmError("Device %04x:%02x:%02x.%01x is not connected"
868 % (int(dev['domain'],16), int(dev['bus'],16),
869 int(dev['slot'],16), int(dev['func'],16)))
870 self.hvm_destroyPCIDevice(int(vslot, 16))
871 # Update vslot
872 dev['vslot'] = vslot
873 for n in sxp.children(pci_dev):
874 if(n[0] == 'vslot'):
875 n[1] = vslot
877 # If pci platform does not exist, create and exit.
878 if existing_dev_info is None:
879 self.device_create(dev_sxp)
880 return True
882 if self.domid is not None:
883 # use DevController.reconfigureDevice to change device config
884 dev_control = self.getDeviceController(dev_class)
885 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
886 if not self.info.is_hvm():
887 # in PV case, wait until backend state becomes connected.
888 dev_control.waitForDevice_reconfigure(devid)
889 num_devs = dev_control.cleanupDevice(devid)
891 # update XendConfig with new device info
892 if dev_uuid:
893 new_dev_sxp = dev_control.configuration(devid)
894 self.info.device_update(dev_uuid, new_dev_sxp)
896 # If there is no device left, destroy pci and remove config.
897 if num_devs == 0:
898 if self.info.is_hvm():
899 self.destroyDevice('pci', devid, True)
900 else:
901 self.destroyDevice('pci', devid)
902 del self.info['devices'][dev_uuid]
903 else:
904 new_dev_sxp = ['pci']
905 for cur_dev in sxp.children(existing_dev_info, 'dev'):
906 if pci_state == 'Closing':
907 if int(dev['domain'], 16) == int(sxp.child_value(cur_dev, 'domain'), 16) and \
908 int(dev['bus'], 16) == int(sxp.child_value(cur_dev, 'bus'), 16) and \
909 int(dev['slot'], 16) == int(sxp.child_value(cur_dev, 'slot'), 16) and \
910 int(dev['func'], 16) == int(sxp.child_value(cur_dev, 'func'), 16):
911 continue
912 new_dev_sxp.append(cur_dev)
914 if pci_state == 'Initialising' and pci_sub_state != 'Booting':
915 for new_dev in sxp.children(dev_sxp, 'dev'):
916 new_dev_sxp.append(new_dev)
918 dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
919 self.info.device_update(dev_uuid, new_dev_sxp)
921 # If there is no device left, remove config.
922 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
923 del self.info['devices'][dev_uuid]
925 xen.xend.XendDomain.instance().managed_config_save(self)
927 return True
929 def vscsi_device_configure(self, dev_sxp):
930 """Configure an existing vscsi device.
931 quoted pci funciton
932 """
933 def _is_vscsi_defined(dev_info, p_devs = None, v_devs = None):
934 if not dev_info:
935 return False
936 for dev in sxp.children(dev_info, 'dev'):
937 if p_devs is not None:
938 if sxp.child_value(dev, 'p-dev') in p_devs:
939 return True
940 if v_devs is not None:
941 if sxp.child_value(dev, 'v-dev') in v_devs:
942 return True
943 return False
945 def _vscsi_be(be):
946 be_xdi = xen.xend.XendDomain.instance().domain_lookup_nr(be)
947 if be_xdi is not None:
948 be_domid = be_xdi.getDomid()
949 if be_domid is not None:
950 return str(be_domid)
951 return str(be)
953 dev_class = sxp.name(dev_sxp)
954 if dev_class != 'vscsi':
955 return False
957 dev_config = self.info.vscsi_convert_sxp_to_dict(dev_sxp)
958 devs = dev_config['devs']
959 v_devs = [d['v-dev'] for d in devs]
960 state = devs[0]['state']
961 req_devid = int(devs[0]['devid'])
962 cur_dev_sxp = self._getDeviceInfo_vscsi(req_devid)
964 if state == xenbusState['Initialising']:
965 # new create
966 # If request devid does not exist, create and exit.
967 p_devs = [d['p-dev'] for d in devs]
968 for dev_type, dev_info in self.info.all_devices_sxpr():
969 if dev_type != 'vscsi':
970 continue
971 if _is_vscsi_defined(dev_info, p_devs = p_devs):
972 raise XendError('The physical device "%s" is already defined' % \
973 p_devs[0])
974 if cur_dev_sxp is None:
975 self.device_create(dev_sxp)
976 return True
978 if _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
979 raise XendError('The virtual device "%s" is already defined' % \
980 v_devs[0])
982 if int(dev_config['feature-host']) != \
983 int(sxp.child_value(cur_dev_sxp, 'feature-host')):
984 raise XendError('The physical device "%s" cannot define '
985 'because mode is different' % devs[0]['p-dev'])
987 new_be = dev_config.get('backend', None)
988 if new_be is not None:
989 cur_be = sxp.child_value(cur_dev_sxp, 'backend', None)
990 if cur_be is None:
991 cur_be = xen.xend.XendDomain.DOM0_ID
992 new_be_dom = _vscsi_be(new_be)
993 cur_be_dom = _vscsi_be(cur_be)
994 if new_be_dom != cur_be_dom:
995 raise XendError('The physical device "%s" cannot define '
996 'because backend is different' % devs[0]['p-dev'])
998 elif state == xenbusState['Closing']:
999 if not _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
1000 raise XendError("Cannot detach vscsi device does not exist")
1002 if self.domid is not None:
1003 # use DevController.reconfigureDevice to change device config
1004 dev_control = self.getDeviceController(dev_class)
1005 dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
1006 dev_control.waitForDevice_reconfigure(req_devid)
1007 num_devs = dev_control.cleanupDevice(req_devid)
1009 # update XendConfig with new device info
1010 if dev_uuid:
1011 new_dev_sxp = dev_control.configuration(req_devid)
1012 self.info.device_update(dev_uuid, new_dev_sxp)
1014 # If there is no device left, destroy vscsi and remove config.
1015 if num_devs == 0:
1016 self.destroyDevice('vscsi', req_devid)
1017 del self.info['devices'][dev_uuid]
1019 else:
1020 new_dev_sxp = ['vscsi']
1021 cur_mode = sxp.children(cur_dev_sxp, 'feature-host')[0]
1022 new_dev_sxp.append(cur_mode)
1023 try:
1024 cur_be = sxp.children(cur_dev_sxp, 'backend')[0]
1025 new_dev_sxp.append(cur_be)
1026 except IndexError:
1027 pass
1029 for cur_dev in sxp.children(cur_dev_sxp, 'dev'):
1030 if state == xenbusState['Closing']:
1031 if int(cur_mode[1]) == 1:
1032 continue
1033 if sxp.child_value(cur_dev, 'v-dev') in v_devs:
1034 continue
1035 new_dev_sxp.append(cur_dev)
1037 if state == xenbusState['Initialising']:
1038 for new_dev in sxp.children(dev_sxp, 'dev'):
1039 new_dev_sxp.append(new_dev)
1041 dev_uuid = sxp.child_value(cur_dev_sxp, 'uuid')
1042 self.info.device_update(dev_uuid, new_dev_sxp)
1044 # If there is only 'vscsi' in new_dev_sxp, remove the config.
1045 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
1046 del self.info['devices'][dev_uuid]
1048 xen.xend.XendDomain.instance().managed_config_save(self)
1050 return True
1052 def device_configure(self, dev_sxp, devid = None):
1053 """Configure an existing device.
1055 @param dev_config: device configuration
1056 @type dev_config: SXP object (parsed config)
1057 @param devid: device id
1058 @type devid: int
1059 @return: Returns True if successfully updated device
1060 @rtype: boolean
1061 """
1063 # convert device sxp to a dict
1064 dev_class = sxp.name(dev_sxp)
1065 dev_config = {}
1067 if dev_class == 'pci':
1068 return self.pci_device_configure(dev_sxp)
1070 if dev_class == 'vscsi':
1071 return self.vscsi_device_configure(dev_sxp)
1073 for opt_val in dev_sxp[1:]:
1074 try:
1075 dev_config[opt_val[0]] = opt_val[1]
1076 except IndexError:
1077 pass
1079 dev_control = self.getDeviceController(dev_class)
1080 if devid is None:
1081 dev = dev_config.get('dev', '')
1082 if not dev:
1083 raise VmError('Block device must have virtual details specified')
1084 if 'ioemu:' in dev:
1085 (_, dev) = dev.split(':', 1)
1086 try:
1087 (dev, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1088 except ValueError:
1089 pass
1090 devid = dev_control.convertToDeviceNumber(dev)
1091 dev_info = self._getDeviceInfo_vbd(devid)
1092 if dev_info is None:
1093 raise VmError("Device %s not connected" % devid)
1094 dev_uuid = sxp.child_value(dev_info, 'uuid')
1096 if self.domid is not None:
1097 # use DevController.reconfigureDevice to change device config
1098 dev_control.reconfigureDevice(devid, dev_config)
1099 else:
1100 (_, new_b, new_f) = dev_control.getDeviceDetails(dev_config)
1101 if (new_f['device-type'] == 'cdrom' and
1102 sxp.child_value(dev_info, 'dev').endswith(':cdrom') and
1103 new_b['mode'] == 'r' and
1104 sxp.child_value(dev_info, 'mode') == 'r'):
1105 pass
1106 else:
1107 raise VmError('Refusing to reconfigure device %s:%d to %s' %
1108 (dev_class, devid, dev_config))
1110 # update XendConfig with new device info
1111 self.info.device_update(dev_uuid, dev_sxp)
1112 xen.xend.XendDomain.instance().managed_config_save(self)
1114 return True
1116 def waitForDevices(self):
1117 """Wait for this domain's configured devices to connect.
1119 @raise VmError: if any device fails to initialise.
1120 """
1121 for devclass in XendDevices.valid_devices():
1122 self.getDeviceController(devclass).waitForDevices()
1124 def hvm_destroyPCIDevice(self, vslot):
1125 log.debug("hvm_destroyPCIDevice called %s", vslot)
1127 if not self.info.is_hvm():
1128 raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
1130 #all the PCI devs share one conf node
1131 devid = '0'
1132 vslot = int(vslot)
1133 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1134 dev_uuid = sxp.child_value(dev_info, 'uuid')
1136 #delete the pci bdf config under the pci device
1137 pci_conf = self.info['devices'][dev_uuid][1]
1138 pci_len = len(pci_conf['devs'])
1140 #find the pass-through device with the virtual slot
1141 devnum = 0
1142 for x in pci_conf['devs']:
1143 if int(x['vslot'], 16) == vslot:
1144 break
1145 devnum += 1
1147 if devnum >= pci_len:
1148 raise VmError("Device @ vslot 0x%x doesn't exist." % (vslot))
1150 # Check the co-assignment.
1151 # To pci-detach a device D from domN, we should ensure: for each DD in the
1152 # list of D's co-assignment devices, DD is not assigned (to domN).
1154 from xen.xend.server.pciif import PciDevice
1155 domain = int(x['domain'],16)
1156 bus = int(x['bus'],16)
1157 dev = int(x['slot'],16)
1158 func = int(x['func'],16)
1159 try:
1160 pci_device = PciDevice(domain, bus, dev, func)
1161 except Exception, e:
1162 raise VmError("pci: failed to locate device and "+
1163 "parse it's resources - "+str(e))
1164 coassignment_list = pci_device.find_coassigned_devices()
1165 coassignment_list.remove(pci_device.name)
1166 assigned_pci_device_str_list = self._get_assigned_pci_devices()
1167 for pci_str in coassignment_list:
1168 if pci_str in assigned_pci_device_str_list:
1169 raise VmError(("pci: failed to pci-detach %s from domain %s" + \
1170 " because one of its co-assignment device %s is still " + \
1171 " assigned to the domain." \
1172 )% (pci_device.name, self.info['name_label'], pci_str))
1175 bdf_str = "%s:%s:%s.%s" % (x['domain'], x['bus'], x['slot'], x['func'])
1176 log.info("hvm_destroyPCIDevice:%s:%s!", x, bdf_str)
1178 if self.domid is not None:
1179 self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
1181 return 0
1183 def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
1184 log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
1185 deviceClass, devid)
1187 if rm_cfg:
1188 # Convert devid to device number. A device number is
1189 # needed to remove its configuration.
1190 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1192 # Save current sxprs. A device number and a backend
1193 # path are needed to remove its configuration but sxprs
1194 # do not have those after calling destroyDevice.
1195 sxprs = self.getDeviceSxprs(deviceClass)
1197 rc = None
1198 if self.domid is not None:
1200 #new blktap implementation may need a sysfs write after everything is torn down.
1201 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1202 path = self.getDeviceController(deviceClass).readBackend(dev, 'params')
1203 if path and path.startswith('/dev/xen/blktap-2'):
1204 frontpath = self.getDeviceController(deviceClass).frontendPath(dev)
1205 backpath = xstransact.Read(frontpath, "backend")
1206 thread.start_new_thread(self.getDeviceController(deviceClass).finishDeviceCleanup, (backpath, path))
1208 rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
1209 if not force and rm_cfg:
1210 # The backend path, other than the device itself,
1211 # has to be passed because its accompanied frontend
1212 # path may be void until its removal is actually
1213 # issued. It is probable because destroyDevice is
1214 # issued first.
1215 for dev_num, dev_info in sxprs:
1216 dev_num = int(dev_num)
1217 if dev_num == dev:
1218 for x in dev_info:
1219 if x[0] == 'backend':
1220 backend = x[1]
1221 break
1222 break
1223 self._waitForDevice_destroy(deviceClass, devid, backend)
1225 if rm_cfg:
1226 if deviceClass == 'vif':
1227 if self.domid is not None:
1228 for dev_num, dev_info in sxprs:
1229 dev_num = int(dev_num)
1230 if dev_num == dev:
1231 for x in dev_info:
1232 if x[0] == 'mac':
1233 mac = x[1]
1234 break
1235 break
1236 dev_info = self._getDeviceInfo_vif(mac)
1237 else:
1238 _, dev_info = sxprs[dev]
1239 else: # 'vbd' or 'tap'
1240 dev_info = self._getDeviceInfo_vbd(dev)
1241 # To remove the UUID of the device from refs,
1242 # deviceClass must be always 'vbd'.
1243 deviceClass = 'vbd'
1244 if dev_info is None:
1245 raise XendError("Device %s is not defined" % devid)
1247 dev_uuid = sxp.child_value(dev_info, 'uuid')
1248 del self.info['devices'][dev_uuid]
1249 self.info['%s_refs' % deviceClass].remove(dev_uuid)
1250 xen.xend.XendDomain.instance().managed_config_save(self)
1252 return rc
1254 def getDeviceSxprs(self, deviceClass):
1255 if deviceClass == 'pci':
1256 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1257 if dev_info is None:
1258 return []
1259 dev_uuid = sxp.child_value(dev_info, 'uuid')
1260 pci_devs = self.info['devices'][dev_uuid][1]['devs']
1261 return pci_devs
1262 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1263 return self.getDeviceController(deviceClass).sxprs()
1264 else:
1265 sxprs = []
1266 dev_num = 0
1267 for dev_type, dev_info in self.info.all_devices_sxpr():
1268 if (deviceClass == 'vbd' and dev_type not in ['vbd', 'tap']) or \
1269 (deviceClass != 'vbd' and dev_type != deviceClass):
1270 continue
1272 if deviceClass == 'vscsi':
1273 vscsi_devs = ['devs', []]
1274 for vscsi_dev in sxp.children(dev_info, 'dev'):
1275 vscsi_dev.append(['frontstate', None])
1276 vscsi_devs[1].append(vscsi_dev)
1277 dev_num = int(sxp.child_value(vscsi_dev, 'devid'))
1278 vscsi_mode = sxp.children(dev_info, 'feature-host')[0]
1279 sxprs.append([dev_num, [vscsi_devs, vscsi_mode]])
1280 elif deviceClass == 'vbd':
1281 dev = sxp.child_value(dev_info, 'dev')
1282 if 'ioemu:' in dev:
1283 (_, dev) = dev.split(':', 1)
1284 try:
1285 (dev_name, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1286 except ValueError:
1287 dev_name = dev
1288 dev_num = self.getDeviceController('vbd').convertToDeviceNumber(dev_name)
1289 sxprs.append([dev_num, dev_info])
1290 else:
1291 sxprs.append([dev_num, dev_info])
1292 dev_num += 1
1293 return sxprs
1295 def getBlockDeviceClass(self, devid):
1296 # To get a device number from the devid,
1297 # we temporarily use the device controller of VBD.
1298 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1299 dev_info = self._getDeviceInfo_vbd(dev)
1300 if dev_info:
1301 return dev_info[0]
1303 def _getDeviceInfo_vif(self, mac):
1304 for dev_type, dev_info in self.info.all_devices_sxpr():
1305 if dev_type != 'vif':
1306 continue
1307 if mac == sxp.child_value(dev_info, 'mac'):
1308 return dev_info
1310 def _getDeviceInfo_vbd(self, devid):
1311 for dev_type, dev_info in self.info.all_devices_sxpr():
1312 if dev_type != 'vbd' and dev_type != 'tap':
1313 continue
1314 dev = sxp.child_value(dev_info, 'dev')
1315 dev = dev.split(':')[0]
1316 dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
1317 if devid == dev:
1318 return dev_info
1320 def _getDeviceInfo_pci(self, devid):
1321 for dev_type, dev_info in self.info.all_devices_sxpr():
1322 if dev_type != 'pci':
1323 continue
1324 return dev_info
1325 return None
1327 def _getDeviceInfo_vscsi(self, devid):
1328 devid = int(devid)
1329 for dev_type, dev_info in self.info.all_devices_sxpr():
1330 if dev_type != 'vscsi':
1331 continue
1332 devs = sxp.children(dev_info, 'dev')
1333 if devid == int(sxp.child_value(devs[0], 'devid')):
1334 return dev_info
1335 return None
1337 def _get_assigned_pci_devices(self, devid = 0):
1338 if self.domid is not None:
1339 return get_assigned_pci_devices(self.domid)
1341 dev_str_list = []
1342 dev_info = self._getDeviceInfo_pci(devid)
1343 if dev_info is None:
1344 return dev_str_list
1345 dev_uuid = sxp.child_value(dev_info, 'uuid')
1346 pci_conf = self.info['devices'][dev_uuid][1]
1347 pci_devs = pci_conf['devs']
1348 for pci_dev in pci_devs:
1349 domain = int(pci_dev['domain'], 16)
1350 bus = int(pci_dev['bus'], 16)
1351 slot = int(pci_dev['slot'], 16)
1352 func = int(pci_dev['func'], 16)
1353 dev_str = "%04x:%02x:%02x.%01x" % (domain, bus, slot, func)
1354 dev_str_list = dev_str_list + [dev_str]
1355 return dev_str_list
1357 def setMemoryTarget(self, target):
1358 """Set the memory target of this domain.
1359 @param target: In MiB.
1360 """
1361 log.debug("Setting memory target of domain %s (%s) to %d MiB.",
1362 self.info['name_label'], str(self.domid), target)
1364 MiB = 1024 * 1024
1365 memory_cur = self.get_memory_dynamic_max() / MiB
1367 if self.domid == 0:
1368 dom0_min_mem = xoptions.get_dom0_min_mem()
1369 if target < memory_cur and dom0_min_mem > target:
1370 raise XendError("memory_dynamic_max too small")
1372 self._safe_set_memory('memory_dynamic_min', target * MiB)
1373 self._safe_set_memory('memory_dynamic_max', target * MiB)
1375 if self.domid >= 0:
1376 if target > memory_cur:
1377 balloon.free((target - memory_cur) * 1024, self)
1378 self.storeVm("memory", target)
1379 self.storeDom("memory/target", target << 10)
1380 xc.domain_set_target_mem(self.domid,
1381 (target * 1024))
1382 xen.xend.XendDomain.instance().managed_config_save(self)
1384 def setMemoryMaximum(self, limit):
1385 """Set the maximum memory limit of this domain
1386 @param limit: In MiB.
1387 """
1388 log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
1389 self.info['name_label'], str(self.domid), limit)
1391 maxmem_cur = self.get_memory_static_max()
1392 MiB = 1024 * 1024
1393 self._safe_set_memory('memory_static_max', limit * MiB)
1395 if self.domid >= 0:
1396 maxmem = int(limit) * 1024
1397 try:
1398 return xc.domain_setmaxmem(self.domid, maxmem)
1399 except Exception, ex:
1400 self._safe_set_memory('memory_static_max', maxmem_cur)
1401 raise XendError(str(ex))
1402 xen.xend.XendDomain.instance().managed_config_save(self)
1405 def getVCPUInfo(self):
1406 try:
1407 # We include the domain name and ID, to help xm.
1408 sxpr = ['domain',
1409 ['domid', self.domid],
1410 ['name', self.info['name_label']],
1411 ['vcpu_count', self.info['VCPUs_max']]]
1413 for i in range(0, self.info['VCPUs_max']):
1414 if self.domid is not None:
1415 info = xc.vcpu_getinfo(self.domid, i)
1417 sxpr.append(['vcpu',
1418 ['number', i],
1419 ['online', info['online']],
1420 ['blocked', info['blocked']],
1421 ['running', info['running']],
1422 ['cpu_time', info['cpu_time'] / 1e9],
1423 ['cpu', info['cpu']],
1424 ['cpumap', info['cpumap']]])
1425 else:
1426 sxpr.append(['vcpu',
1427 ['number', i],
1428 ['online', 0],
1429 ['blocked', 0],
1430 ['running', 0],
1431 ['cpu_time', 0.0],
1432 ['cpu', -1],
1433 ['cpumap', self.info['cpus'][i] and \
1434 self.info['cpus'][i] or range(64)]])
1436 return sxpr
1438 except RuntimeError, exn:
1439 raise XendError(str(exn))
1442 def getDomInfo(self):
1443 return dom_get(self.domid)
1446 # internal functions ... TODO: re-categorised
1449 def _augmentInfo(self, priv):
1450 """Augment self.info, as given to us through L{recreate}, with
1451 values taken from the store. This recovers those values known
1452 to xend but not to the hypervisor.
1453 """
1454 augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
1455 if priv:
1456 augment_entries.remove('memory')
1457 augment_entries.remove('maxmem')
1458 augment_entries.remove('vcpus')
1459 augment_entries.remove('vcpu_avail')
1461 vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
1462 for k in augment_entries])
1464 # make returned lists into a dictionary
1465 vm_config = dict(zip(augment_entries, vm_config))
1467 for arg in augment_entries:
1468 val = vm_config[arg]
1469 if val != None:
1470 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1471 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1472 self.info[xapiarg] = val
1473 elif arg == "memory":
1474 self.info["static_memory_min"] = val
1475 elif arg == "maxmem":
1476 self.info["static_memory_max"] = val
1477 else:
1478 self.info[arg] = val
1480 # read CPU Affinity
1481 self.info['cpus'] = []
1482 vcpus_info = self.getVCPUInfo()
1483 for vcpu_info in sxp.children(vcpus_info, 'vcpu'):
1484 self.info['cpus'].append(sxp.child_value(vcpu_info, 'cpumap'))
1486 # For dom0, we ignore any stored value for the vcpus fields, and
1487 # read the current value from Xen instead. This allows boot-time
1488 # settings to take precedence over any entries in the store.
1489 if priv:
1490 xeninfo = dom_get(self.domid)
1491 self.info['VCPUs_max'] = xeninfo['online_vcpus']
1492 self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
1494 # read image value
1495 image_sxp = self._readVm('image')
1496 if image_sxp:
1497 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1499 # read devices
1500 devices = []
1501 for devclass in XendDevices.valid_devices():
1502 devconfig = self.getDeviceController(devclass).configurations()
1503 if devconfig:
1504 devices.extend(devconfig)
1506 if not self.info['devices'] and devices is not None:
1507 for device in devices:
1508 self.info.device_add(device[0], cfg_sxp = device)
1510 self._update_consoles()
1512 def _update_consoles(self, transaction = None):
1513 if self.domid == None or self.domid == 0:
1514 return
1516 # Update VT100 port if it exists
1517 if transaction is None:
1518 self.console_port = self.readDom('console/port')
1519 else:
1520 self.console_port = self.readDomTxn(transaction, 'console/port')
1521 if self.console_port is not None:
1522 serial_consoles = self.info.console_get_all('vt100')
1523 if not serial_consoles:
1524 cfg = self.info.console_add('vt100', self.console_port)
1525 self._createDevice('console', cfg)
1526 else:
1527 console_uuid = serial_consoles[0].get('uuid')
1528 self.info.console_update(console_uuid, 'location',
1529 self.console_port)
1532 # Update VNC port if it exists and write to xenstore
1533 if transaction is None:
1534 vnc_port = self.readDom('console/vnc-port')
1535 else:
1536 vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
1537 if vnc_port is not None:
1538 for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
1539 if dev_type == 'vfb':
1540 old_location = dev_info.get('location')
1541 listen_host = dev_info.get('vnclisten', \
1542 XendOptions.instance().get_vnclisten_address())
1543 new_location = '%s:%s' % (listen_host, str(vnc_port))
1544 if old_location == new_location:
1545 break
1547 dev_info['location'] = new_location
1548 self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
1549 vfb_ctrl = self.getDeviceController('vfb')
1550 vfb_ctrl.reconfigureDevice(0, dev_info)
1551 break
1554 # Function to update xenstore /vm/*
1557 def _readVm(self, *args):
1558 return xstransact.Read(self.vmpath, *args)
1560 def _writeVm(self, *args):
1561 return xstransact.Write(self.vmpath, *args)
1563 def _removeVm(self, *args):
1564 return xstransact.Remove(self.vmpath, *args)
1566 def _gatherVm(self, *args):
1567 return xstransact.Gather(self.vmpath, *args)
1569 def _listRecursiveVm(self, *args):
1570 return xstransact.ListRecursive(self.vmpath, *args)
1572 def storeVm(self, *args):
1573 return xstransact.Store(self.vmpath, *args)
1575 def permissionsVm(self, *args):
1576 return xstransact.SetPermissions(self.vmpath, *args)
1579 # Function to update xenstore /dom/*
1582 def readDom(self, *args):
1583 return xstransact.Read(self.dompath, *args)
1585 def gatherDom(self, *args):
1586 return xstransact.Gather(self.dompath, *args)
1588 def _writeDom(self, *args):
1589 return xstransact.Write(self.dompath, *args)
1591 def _removeDom(self, *args):
1592 return xstransact.Remove(self.dompath, *args)
1594 def storeDom(self, *args):
1595 return xstransact.Store(self.dompath, *args)
1598 def readDomTxn(self, transaction, *args):
1599 paths = map(lambda x: self.dompath + "/" + x, args)
1600 return transaction.read(*paths)
1602 def gatherDomTxn(self, transaction, *args):
1603 paths = map(lambda x: self.dompath + "/" + x, args)
1604 return transaction.gather(*paths)
1606 def _writeDomTxn(self, transaction, *args):
1607 paths = map(lambda x: self.dompath + "/" + x, args)
1608 return transaction.write(*paths)
1610 def _removeDomTxn(self, transaction, *args):
1611 paths = map(lambda x: self.dompath + "/" + x, args)
1612 return transaction.remove(*paths)
1614 def storeDomTxn(self, transaction, *args):
1615 paths = map(lambda x: self.dompath + "/" + x, args)
1616 return transaction.store(*paths)
1619 def _recreateDom(self):
1620 complete(self.dompath, lambda t: self._recreateDomFunc(t))
1622 def _recreateDomFunc(self, t):
1623 t.remove()
1624 t.mkdir()
1625 t.set_permissions({'dom' : self.domid, 'read' : True})
1626 t.write('vm', self.vmpath)
1627 # NB. Solaris guests use guest/ and hvmpv/ xenstore directories
1628 for i in [ 'device', 'control', 'error', 'memory', 'guest', 'hvmpv' ]:
1629 t.mkdir(i)
1630 t.set_permissions(i, {'dom' : self.domid})
1632 def _storeDomDetails(self):
1633 to_store = {
1634 'domid': str(self.domid),
1635 'vm': self.vmpath,
1636 'name': self.info['name_label'],
1637 'console/limit': str(xoptions.get_console_limit() * 1024),
1638 'memory/target': str(self.info['memory_dynamic_max'] / 1024),
1641 def f(n, v):
1642 if v is not None:
1643 if type(v) == bool:
1644 to_store[n] = v and "1" or "0"
1645 else:
1646 to_store[n] = str(v)
1648 # Figure out if we need to tell xenconsoled to ignore this guest's
1649 # console - device model will handle console if it is running
1650 constype = "ioemu"
1651 if 'device_model' not in self.info['platform']:
1652 constype = "xenconsoled"
1654 f('console/port', self.console_port)
1655 f('console/ring-ref', self.console_mfn)
1656 f('console/type', constype)
1657 f('store/port', self.store_port)
1658 f('store/ring-ref', self.store_mfn)
1660 if arch.type == "x86":
1661 f('control/platform-feature-multiprocessor-suspend', True)
1663 # elfnotes
1664 for n, v in self.info.get_notes().iteritems():
1665 n = n.lower().replace('_', '-')
1666 if n == 'features':
1667 for v in v.split('|'):
1668 v = v.replace('_', '-')
1669 if v.startswith('!'):
1670 f('image/%s/%s' % (n, v[1:]), False)
1671 else:
1672 f('image/%s/%s' % (n, v), True)
1673 else:
1674 f('image/%s' % n, v)
1676 if self.info.has_key('security_label'):
1677 f('security_label', self.info['security_label'])
1679 to_store.update(self._vcpuDomDetails())
1681 log.debug("Storing domain details: %s", scrub_password(to_store))
1683 self._writeDom(to_store)
1685 def _vcpuDomDetails(self):
1686 def availability(n):
1687 if self.info['vcpu_avail'] & (1 << n):
1688 return 'online'
1689 else:
1690 return 'offline'
1692 result = {}
1693 for v in range(0, self.info['VCPUs_max']):
1694 result["cpu/%d/availability" % v] = availability(v)
1695 return result
1698 # xenstore watches
1701 def _registerWatches(self):
1702 """Register a watch on this VM's entries in the store, and the
1703 domain's control/shutdown node, so that when they are changed
1704 externally, we keep up to date. This should only be called by {@link
1705 #create}, {@link #recreate}, or {@link #restore}, once the domain's
1706 details have been written, but before the new instance is returned."""
1707 self.vmWatch = xswatch(self.vmpath, self._storeChanged)
1708 self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
1709 self._handleShutdownWatch)
1711 def _storeChanged(self, _):
1712 log.trace("XendDomainInfo.storeChanged");
1714 changed = False
1716 # Check whether values in the configuration have
1717 # changed in Xenstore.
1719 cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
1720 'rtc/timeoffset']
1722 vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
1723 for k in cfg_vm])
1725 # convert two lists into a python dictionary
1726 vm_details = dict(zip(cfg_vm, vm_details))
1728 for arg, val in vm_details.items():
1729 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1730 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1731 if val != None and val != self.info[xapiarg]:
1732 self.info[xapiarg] = val
1733 changed = True
1734 elif arg == "memory":
1735 if val != None and val != self.info["static_memory_min"]:
1736 self.info["static_memory_min"] = val
1737 changed = True
1738 elif arg == "maxmem":
1739 if val != None and val != self.info["static_memory_max"]:
1740 self.info["static_memory_max"] = val
1741 changed = True
1743 # Check whether image definition has been updated
1744 image_sxp = self._readVm('image')
1745 if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
1746 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1747 changed = True
1749 # Update the rtc_timeoffset to be preserved across reboot.
1750 # NB. No need to update xenstore domain section.
1751 val = int(vm_details.get("rtc/timeoffset", 0))
1752 self.info["platform"]["rtc_timeoffset"] = val
1754 if changed:
1755 # Update the domain section of the store, as this contains some
1756 # parameters derived from the VM configuration.
1757 self.refresh_shutdown_lock.acquire()
1758 try:
1759 state = self._stateGet()
1760 if state not in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
1761 self._storeDomDetails()
1762 finally:
1763 self.refresh_shutdown_lock.release()
1765 return 1
1767 def _handleShutdownWatch(self, _):
1768 log.debug('XendDomainInfo.handleShutdownWatch')
1770 reason = self.readDom('control/shutdown')
1772 if reason and reason != 'suspend':
1773 sst = self.readDom('xend/shutdown_start_time')
1774 now = time.time()
1775 if sst:
1776 self.shutdownStartTime = float(sst)
1777 timeout = float(sst) + SHUTDOWN_TIMEOUT - now
1778 else:
1779 self.shutdownStartTime = now
1780 self.storeDom('xend/shutdown_start_time', now)
1781 timeout = SHUTDOWN_TIMEOUT
1783 log.trace(
1784 "Scheduling refreshShutdown on domain %d in %ds.",
1785 self.domid, timeout)
1786 threading.Timer(timeout, self.refreshShutdown).start()
1788 return True
1792 # Public Attributes for the VM
1796 def getDomid(self):
1797 return self.domid
1799 def setName(self, name, to_store = True):
1800 self._checkName(name)
1801 self.info['name_label'] = name
1802 if to_store:
1803 self.storeVm("name", name)
1805 def getName(self):
1806 return self.info['name_label']
1808 def getDomainPath(self):
1809 return self.dompath
1811 def getShutdownReason(self):
1812 return self.readDom('control/shutdown')
1814 def getStorePort(self):
1815 """For use only by image.py and XendCheckpoint.py."""
1816 return self.store_port
1818 def getConsolePort(self):
1819 """For use only by image.py and XendCheckpoint.py"""
1820 return self.console_port
1822 def getFeatures(self):
1823 """For use only by image.py."""
1824 return self.info['features']
1826 def getVCpuCount(self):
1827 return self.info['VCPUs_max']
1829 def setVCpuCount(self, vcpus):
1830 def vcpus_valid(n):
1831 if vcpus <= 0:
1832 raise XendError('Zero or less VCPUs is invalid')
1833 if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
1834 raise XendError('Cannot set vcpus greater than max vcpus on running domain')
1835 vcpus_valid(vcpus)
1837 self.info['vcpu_avail'] = (1 << vcpus) - 1
1838 if self.domid >= 0:
1839 self.storeVm('vcpu_avail', self.info['vcpu_avail'])
1840 self._writeDom(self._vcpuDomDetails())
1841 self.info['VCPUs_live'] = vcpus
1842 else:
1843 if self.info['VCPUs_max'] > vcpus:
1844 # decreasing
1845 del self.info['cpus'][vcpus:]
1846 elif self.info['VCPUs_max'] < vcpus:
1847 # increasing
1848 for c in range(self.info['VCPUs_max'], vcpus):
1849 self.info['cpus'].append(list())
1850 self.info['VCPUs_max'] = vcpus
1851 xen.xend.XendDomain.instance().managed_config_save(self)
1852 log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
1853 vcpus)
1855 def getMemoryTarget(self):
1856 """Get this domain's target memory size, in KB."""
1857 return self.info['memory_dynamic_max'] / 1024
1859 def getMemoryMaximum(self):
1860 """Get this domain's maximum memory size, in KB."""
1861 # remember, info now stores memory in bytes
1862 return self.info['memory_static_max'] / 1024
1864 def getResume(self):
1865 return str(self._resume)
1867 def setResume(self, isresume):
1868 self._resume = isresume
1870 def getCpus(self):
1871 return self.info['cpus']
1873 def setCpus(self, cpumap):
1874 self.info['cpus'] = cpumap
1876 def getCap(self):
1877 return self.info['vcpus_params']['cap']
1879 def setCap(self, cpu_cap):
1880 self.info['vcpus_params']['cap'] = cpu_cap
1882 def getWeight(self):
1883 return self.info['vcpus_params']['weight']
1885 def setWeight(self, cpu_weight):
1886 self.info['vcpus_params']['weight'] = cpu_weight
1888 def getRestartCount(self):
1889 return self._readVm('xend/restart_count')
1891 def refreshShutdown(self, xeninfo = None):
1892 """ Checks the domain for whether a shutdown is required.
1894 Called from XendDomainInfo and also image.py for HVM images.
1895 """
1897 # If set at the end of this method, a restart is required, with the
1898 # given reason. This restart has to be done out of the scope of
1899 # refresh_shutdown_lock.
1900 restart_reason = None
1902 self.refresh_shutdown_lock.acquire()
1903 try:
1904 if xeninfo is None:
1905 xeninfo = dom_get(self.domid)
1906 if xeninfo is None:
1907 # The domain no longer exists. This will occur if we have
1908 # scheduled a timer to check for shutdown timeouts and the
1909 # shutdown succeeded. It will also occur if someone
1910 # destroys a domain beneath us. We clean up the domain,
1911 # just in case, but we can't clean up the VM, because that
1912 # VM may have migrated to a different domain on this
1913 # machine.
1914 self.cleanupDomain()
1915 self._stateSet(DOM_STATE_HALTED)
1916 return
1918 if xeninfo['dying']:
1919 # Dying means that a domain has been destroyed, but has not
1920 # yet been cleaned up by Xen. This state could persist
1921 # indefinitely if, for example, another domain has some of its
1922 # pages mapped. We might like to diagnose this problem in the
1923 # future, but for now all we do is make sure that it's not us
1924 # holding the pages, by calling cleanupDomain. We can't
1925 # clean up the VM, as above.
1926 self.cleanupDomain()
1927 self._stateSet(DOM_STATE_SHUTDOWN)
1928 return
1930 elif xeninfo['crashed']:
1931 if self.readDom('xend/shutdown_completed'):
1932 # We've seen this shutdown already, but we are preserving
1933 # the domain for debugging. Leave it alone.
1934 return
1936 log.warn('Domain has crashed: name=%s id=%d.',
1937 self.info['name_label'], self.domid)
1938 self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
1940 restart_reason = 'crash'
1941 self._stateSet(DOM_STATE_HALTED)
1943 elif xeninfo['shutdown']:
1944 self._stateSet(DOM_STATE_SHUTDOWN)
1945 if self.readDom('xend/shutdown_completed'):
1946 # We've seen this shutdown already, but we are preserving
1947 # the domain for debugging. Leave it alone.
1948 return
1950 else:
1951 reason = shutdown_reason(xeninfo['shutdown_reason'])
1953 log.info('Domain has shutdown: name=%s id=%d reason=%s.',
1954 self.info['name_label'], self.domid, reason)
1955 self._writeVm(LAST_SHUTDOWN_REASON, reason)
1957 self._clearRestart()
1959 if reason == 'suspend':
1960 self._stateSet(DOM_STATE_SUSPENDED)
1961 # Don't destroy the domain. XendCheckpoint will do
1962 # this once it has finished. However, stop watching
1963 # the VM path now, otherwise we will end up with one
1964 # watch for the old domain, and one for the new.
1965 self._unwatchVm()
1966 elif reason in ('poweroff', 'reboot'):
1967 restart_reason = reason
1968 else:
1969 self.destroy()
1971 elif self.dompath is None:
1972 # We have yet to manage to call introduceDomain on this
1973 # domain. This can happen if a restore is in progress, or has
1974 # failed. Ignore this domain.
1975 pass
1976 else:
1977 # Domain is alive. If we are shutting it down, log a message
1978 # if it seems unresponsive.
1979 if xeninfo['paused']:
1980 self._stateSet(DOM_STATE_PAUSED)
1981 else:
1982 self._stateSet(DOM_STATE_RUNNING)
1984 if self.shutdownStartTime:
1985 timeout = (SHUTDOWN_TIMEOUT - time.time() +
1986 self.shutdownStartTime)
1987 if (timeout < 0 and not self.readDom('xend/unresponsive')):
1988 log.info(
1989 "Domain shutdown timeout expired: name=%s id=%s",
1990 self.info['name_label'], self.domid)
1991 self.storeDom('xend/unresponsive', 'True')
1992 finally:
1993 self.refresh_shutdown_lock.release()
1995 if restart_reason and not self.restart_in_progress:
1996 self.restart_in_progress = True
1997 threading.Thread(target = self._maybeRestart,
1998 args = (restart_reason,)).start()
2002 # Restart functions - handling whether we come back up on shutdown.
2005 def _clearRestart(self):
2006 self._removeDom("xend/shutdown_start_time")
2008 def _maybeDumpCore(self, reason):
2009 if reason == 'crash':
2010 if xoptions.get_enable_dump() or self.get_on_crash() \
2011 in ['coredump_and_destroy', 'coredump_and_restart']:
2012 try:
2013 self.dumpCore()
2014 except XendError:
2015 # This error has been logged -- there's nothing more
2016 # we can do in this context.
2017 pass
2019 def _maybeRestart(self, reason):
2020 # Before taking configured action, dump core if configured to do so.
2022 self._maybeDumpCore(reason)
2024 # Dispatch to the correct method based upon the configured on_{reason}
2025 # behaviour.
2026 actions = {"destroy" : self.destroy,
2027 "restart" : self._restart,
2028 "preserve" : self._preserve,
2029 "rename-restart" : self._renameRestart,
2030 "coredump-destroy" : self.destroy,
2031 "coredump-restart" : self._restart}
2033 action_conf = {
2034 'poweroff': 'actions_after_shutdown',
2035 'reboot': 'actions_after_reboot',
2036 'crash': 'actions_after_crash',
2039 action_target = self.info.get(action_conf.get(reason))
2040 func = actions.get(action_target, None)
2041 if func and callable(func):
2042 func()
2043 else:
2044 self.destroy() # default to destroy
2046 def _renameRestart(self):
2047 self._restart(True)
2049 def _restart(self, rename = False):
2050 """Restart the domain after it has exited.
2052 @param rename True if the old domain is to be renamed and preserved,
2053 False if it is to be destroyed.
2054 """
2055 from xen.xend import XendDomain
2057 if self._readVm(RESTART_IN_PROGRESS):
2058 log.error('Xend failed during restart of domain %s. '
2059 'Refusing to restart to avoid loops.',
2060 str(self.domid))
2061 self.destroy()
2062 return
2064 old_domid = self.domid
2065 self._writeVm(RESTART_IN_PROGRESS, 'True')
2067 elapse = time.time() - self.info['start_time']
2068 if elapse < MINIMUM_RESTART_TIME:
2069 log.error('VM %s restarting too fast (Elapsed time: %f seconds). '
2070 'Refusing to restart to avoid loops.',
2071 self.info['name_label'], elapse)
2072 self.destroy()
2073 return
2075 prev_vm_xend = self._listRecursiveVm('xend')
2076 new_dom_info = self.info
2077 try:
2078 if rename:
2079 new_dom_info = self._preserveForRestart()
2080 else:
2081 self._unwatchVm()
2082 self.destroy()
2084 # new_dom's VM will be the same as this domain's VM, except where
2085 # the rename flag has instructed us to call preserveForRestart.
2086 # In that case, it is important that we remove the
2087 # RESTART_IN_PROGRESS node from the new domain, not the old one,
2088 # once the new one is available.
2090 new_dom = None
2091 try:
2092 new_dom = XendDomain.instance().domain_create_from_dict(
2093 new_dom_info)
2094 for x in prev_vm_xend[0][1]:
2095 new_dom._writeVm('xend/%s' % x[0], x[1])
2096 new_dom.waitForDevices()
2097 new_dom.unpause()
2098 rst_cnt = new_dom._readVm('xend/restart_count')
2099 rst_cnt = int(rst_cnt) + 1
2100 new_dom._writeVm('xend/restart_count', str(rst_cnt))
2101 new_dom._removeVm(RESTART_IN_PROGRESS)
2102 except:
2103 if new_dom:
2104 new_dom._removeVm(RESTART_IN_PROGRESS)
2105 new_dom.destroy()
2106 else:
2107 self._removeVm(RESTART_IN_PROGRESS)
2108 raise
2109 except:
2110 log.exception('Failed to restart domain %s.', str(old_domid))
2112 def _preserveForRestart(self):
2113 """Preserve a domain that has been shut down, by giving it a new UUID,
2114 cloning the VM details, and giving it a new name. This allows us to
2115 keep this domain for debugging, but restart a new one in its place
2116 preserving the restart semantics (name and UUID preserved).
2117 """
2119 new_uuid = uuid.createString()
2120 new_name = 'Domain-%s' % new_uuid
2121 log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
2122 self.info['name_label'], self.domid, self.info['uuid'],
2123 new_name, new_uuid)
2124 self._unwatchVm()
2125 self._releaseDevices()
2126 # Remove existing vm node in xenstore
2127 self._removeVm()
2128 new_dom_info = self.info.copy()
2129 new_dom_info['name_label'] = self.info['name_label']
2130 new_dom_info['uuid'] = self.info['uuid']
2131 self.info['name_label'] = new_name
2132 self.info['uuid'] = new_uuid
2133 self.vmpath = XS_VMROOT + new_uuid
2134 # Write out new vm node to xenstore
2135 self._storeVmDetails()
2136 self._preserve()
2137 return new_dom_info
2140 def _preserve(self):
2141 log.info("Preserving dead domain %s (%d).", self.info['name_label'],
2142 self.domid)
2143 self._unwatchVm()
2144 self.storeDom('xend/shutdown_completed', 'True')
2145 self._stateSet(DOM_STATE_HALTED)
2148 # Debugging ..
2151 def dumpCore(self, corefile = None):
2152 """Create a core dump for this domain.
2154 @raise: XendError if core dumping failed.
2155 """
2157 if not corefile:
2158 this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
2159 corefile = "/var/xen/dump/%s-%s.%s.core" % (this_time,
2160 self.info['name_label'], self.domid)
2162 if os.path.isdir(corefile):
2163 raise XendError("Cannot dump core in a directory: %s" %
2164 corefile)
2166 try:
2167 try:
2168 self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
2169 xc.domain_dumpcore(self.domid, corefile)
2170 except RuntimeError, ex:
2171 corefile_incomp = corefile+'-incomplete'
2172 try:
2173 os.rename(corefile, corefile_incomp)
2174 except:
2175 pass
2177 log.error("core dump failed: id = %s name = %s: %s",
2178 self.domid, self.info['name_label'], str(ex))
2179 raise XendError("Failed to dump core: %s" % str(ex))
2180 finally:
2181 self._removeVm(DUMPCORE_IN_PROGRESS)
2184 # Device creation/deletion functions
2187 def _createDevice(self, deviceClass, devConfig):
2188 return self.getDeviceController(deviceClass).createDevice(devConfig)
2190 def _waitForDevice(self, deviceClass, devid):
2191 return self.getDeviceController(deviceClass).waitForDevice(devid)
2193 def _waitForDeviceUUID(self, dev_uuid):
2194 deviceClass, config = self.info['devices'].get(dev_uuid)
2195 self._waitForDevice(deviceClass, config['devid'])
2197 def _waitForDevice_destroy(self, deviceClass, devid, backpath):
2198 return self.getDeviceController(deviceClass).waitForDevice_destroy(
2199 devid, backpath)
2201 def _reconfigureDevice(self, deviceClass, devid, devconfig):
2202 return self.getDeviceController(deviceClass).reconfigureDevice(
2203 devid, devconfig)
2205 def _createDevices(self):
2206 """Create the devices for a vm.
2208 @raise: VmError for invalid devices
2209 """
2210 if self.image:
2211 self.image.prepareEnvironment()
2213 vscsi_uuidlist = {}
2214 vscsi_devidlist = []
2215 ordered_refs = self.info.ordered_device_refs()
2216 for dev_uuid in ordered_refs:
2217 devclass, config = self.info['devices'][dev_uuid]
2218 if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
2219 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2220 dev_uuid = config.get('uuid')
2221 devid = self._createDevice(devclass, config)
2223 # store devid in XendConfig for caching reasons
2224 if dev_uuid in self.info['devices']:
2225 self.info['devices'][dev_uuid][1]['devid'] = devid
2227 elif devclass == 'vscsi':
2228 vscsi_config = config.get('devs', [])[0]
2229 devid = vscsi_config.get('devid', '')
2230 dev_uuid = config.get('uuid')
2231 vscsi_uuidlist[devid] = dev_uuid
2232 vscsi_devidlist.append(devid)
2234 #It is necessary to sorted it for /dev/sdxx in guest.
2235 if len(vscsi_uuidlist) > 0:
2236 vscsi_devidlist.sort()
2237 for vscsiid in vscsi_devidlist:
2238 dev_uuid = vscsi_uuidlist[vscsiid]
2239 devclass, config = self.info['devices'][dev_uuid]
2240 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2241 dev_uuid = config.get('uuid')
2242 devid = self._createDevice(devclass, config)
2243 # store devid in XendConfig for caching reasons
2244 if dev_uuid in self.info['devices']:
2245 self.info['devices'][dev_uuid][1]['devid'] = devid
2248 if self.image:
2249 self.image.createDeviceModel()
2251 #if have pass-through devs, need the virtual pci slots info from qemu
2252 self.pci_device_configure_boot()
2254 def _releaseDevices(self, suspend = False):
2255 """Release all domain's devices. Nothrow guarantee."""
2256 if self.image:
2257 try:
2258 log.debug("Destroying device model")
2259 self.image.destroyDeviceModel()
2260 except Exception, e:
2261 log.exception("Device model destroy failed %s" % str(e))
2262 else:
2263 log.debug("No device model")
2265 log.debug("Releasing devices")
2266 t = xstransact("%s/device" % self.dompath)
2267 try:
2268 for devclass in XendDevices.valid_devices():
2269 for dev in t.list(devclass):
2270 try:
2271 true_devclass = devclass
2272 if devclass == 'vbd':
2273 # In the case of "vbd", the true device class
2274 # may possibly be "tap". Just in case, verify
2275 # device class.
2276 devid = dev.split('/')[-1]
2277 true_devclass = self.getBlockDeviceClass(devid)
2278 log.debug("Removing %s", dev);
2279 self.destroyDevice(true_devclass, dev, False);
2280 except:
2281 # Log and swallow any exceptions in removal --
2282 # there's nothing more we can do.
2283 log.exception("Device release failed: %s; %s; %s",
2284 self.info['name_label'],
2285 true_devclass, dev)
2286 finally:
2287 t.abort()
2289 def getDeviceController(self, name):
2290 """Get the device controller for this domain, and if it
2291 doesn't exist, create it.
2293 @param name: device class name
2294 @type name: string
2295 @rtype: subclass of DevController
2296 """
2297 if name not in self._deviceControllers:
2298 devController = XendDevices.make_controller(name, self)
2299 if not devController:
2300 raise XendError("Unknown device type: %s" % name)
2301 self._deviceControllers[name] = devController
2303 return self._deviceControllers[name]
2306 # Migration functions (public)
2309 def testMigrateDevices(self, network, dst):
2310 """ Notify all device about intention of migration
2311 @raise: XendError for a device that cannot be migrated
2312 """
2313 for (n, c) in self.info.all_devices_sxpr():
2314 rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
2315 if rc != 0:
2316 raise XendError("Device of type '%s' refuses migration." % n)
2318 def migrateDevices(self, network, dst, step, domName=''):
2319 """Notify the devices about migration
2320 """
2321 ctr = 0
2322 try:
2323 for (dev_type, dev_conf) in self.info.all_devices_sxpr():
2324 self.migrateDevice(dev_type, dev_conf, network, dst,
2325 step, domName)
2326 ctr = ctr + 1
2327 except:
2328 for dev_type, dev_conf in self.info.all_devices_sxpr():
2329 if ctr == 0:
2330 step = step - 1
2331 ctr = ctr - 1
2332 self._recoverMigrateDevice(dev_type, dev_conf, network,
2333 dst, step, domName)
2334 raise
2336 def migrateDevice(self, deviceClass, deviceConfig, network, dst,
2337 step, domName=''):
2338 return self.getDeviceController(deviceClass).migrate(deviceConfig,
2339 network, dst, step, domName)
2341 def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
2342 dst, step, domName=''):
2343 return self.getDeviceController(deviceClass).recover_migrate(
2344 deviceConfig, network, dst, step, domName)
2347 ## private:
2349 def _constructDomain(self):
2350 """Construct the domain.
2352 @raise: VmError on error
2353 """
2355 log.debug('XendDomainInfo.constructDomain')
2357 self.shutdownStartTime = None
2358 self.restart_in_progress = False
2360 hap = 0
2361 hvm = self.info.is_hvm()
2362 if hvm:
2363 hap = self.info.is_hap()
2364 info = xc.xeninfo()
2365 if 'hvm' not in info['xen_caps']:
2366 raise VmError("HVM guest support is unavailable: is VT/AMD-V "
2367 "supported by your CPU and enabled in your "
2368 "BIOS?")
2370 # Hack to pre-reserve some memory for initial domain creation.
2371 # There is an implicit memory overhead for any domain creation. This
2372 # overhead is greater for some types of domain than others. For
2373 # example, an x86 HVM domain will have a default shadow-pagetable
2374 # allocation of 1MB. We free up 4MB here to be on the safe side.
2375 # 2MB memory allocation was not enough in some cases, so it's 4MB now
2376 balloon.free(4*1024, self) # 4MB should be plenty
2378 ssidref = 0
2379 if security.on() == xsconstants.XS_POLICY_USE:
2380 ssidref = security.calc_dom_ssidref_from_info(self.info)
2381 if security.has_authorization(ssidref) == False:
2382 raise VmError("VM is not authorized to run.")
2384 s3_integrity = 0
2385 if self.info.has_key('s3_integrity'):
2386 s3_integrity = self.info['s3_integrity']
2387 flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2)
2389 try:
2390 self.domid = xc.domain_create(
2391 domid = 0,
2392 ssidref = ssidref,
2393 handle = uuid.fromString(self.info['uuid']),
2394 flags = flags,
2395 target = self.info.target())
2396 except Exception, e:
2397 # may get here if due to ACM the operation is not permitted
2398 if security.on() == xsconstants.XS_POLICY_ACM:
2399 raise VmError('Domain in conflict set with running domain?')
2401 if self.domid < 0:
2402 raise VmError('Creating domain failed: name=%s' %
2403 self.info['name_label'])
2405 self.dompath = GetDomainPath(self.domid)
2407 self._recreateDom()
2409 # Set timer configration of domain
2410 timer_mode = self.info["platform"].get("timer_mode")
2411 if hvm and timer_mode is not None:
2412 xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
2413 long(timer_mode))
2415 # Set Viridian interface configuration of domain
2416 viridian = self.info["platform"].get("viridian")
2417 if arch.type == "x86" and hvm and viridian is not None:
2418 xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
2420 # Optionally enable virtual HPET
2421 hpet = self.info["platform"].get("hpet")
2422 if hvm and hpet is not None:
2423 xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
2424 long(hpet))
2426 # Optionally enable periodic vpt aligning
2427 vpt_align = self.info["platform"].get("vpt_align")
2428 if hvm and vpt_align is not None:
2429 xc.hvm_set_param(self.domid, HVM_PARAM_VPT_ALIGN,
2430 long(vpt_align))
2432 # Set maximum number of vcpus in domain
2433 xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
2435 # Check for cpu_{cap|weight} validity for credit scheduler
2436 if XendNode.instance().xenschedinfo() == 'credit':
2437 cap = self.getCap()
2438 weight = self.getWeight()
2440 assert type(weight) == int
2441 assert type(cap) == int
2443 if weight < 1 or weight > 65535:
2444 raise VmError("Cpu weight out of range, valid values are within range from 1 to 65535")
2446 if cap < 0 or cap > self.getVCpuCount() * 100:
2447 raise VmError("Cpu cap out of range, valid range is from 0 to %s for specified number of vcpus" %
2448 (self.getVCpuCount() * 100))
2450 # Test whether the devices can be assigned with VT-d
2451 self.info.update_platform_pci()
2452 pci = self.info["platform"].get("pci")
2453 pci_str = ''
2454 if pci and len(pci) > 0:
2455 pci = map(lambda x: x[0:4], pci) # strip options
2456 pci_str = str(pci)
2457 if hvm and pci_str:
2458 bdf = xc.test_assign_device(0, pci_str)
2459 if bdf != 0:
2460 if bdf == -1:
2461 raise VmError("failed to assign device: maybe the platform"
2462 " doesn't support VT-d, or VT-d isn't enabled"
2463 " properly?")
2464 bus = (bdf >> 16) & 0xff
2465 devfn = (bdf >> 8) & 0xff
2466 dev = (devfn >> 3) & 0x1f
2467 func = devfn & 0x7
2468 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
2469 " already been assigned to other domain, or maybe"
2470 " it doesn't exist." % (bus, dev, func))
2472 # register the domain in the list
2473 from xen.xend import XendDomain
2474 XendDomain.instance().add_domain(self)
2476 def _introduceDomain(self):
2477 assert self.domid is not None
2478 assert self.store_mfn is not None
2479 assert self.store_port is not None
2481 try:
2482 IntroduceDomain(self.domid, self.store_mfn, self.store_port)
2483 except RuntimeError, exn:
2484 raise XendError(str(exn))
2486 def _setTarget(self, target):
2487 assert self.domid is not None
2489 try:
2490 SetTarget(self.domid, target)
2491 self.storeDom('target', target)
2492 except RuntimeError, exn:
2493 raise XendError(str(exn))
2496 def _setCPUAffinity(self):
2497 """ Repin domain vcpus if a restricted cpus list is provided
2498 """
2500 def has_cpus():
2501 if self.info['cpus'] is not None:
2502 for c in self.info['cpus']:
2503 if c:
2504 return True
2505 return False
2507 if has_cpus():
2508 for v in range(0, self.info['VCPUs_max']):
2509 if self.info['cpus'][v]:
2510 xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
2511 else:
2512 def find_relaxed_node(node_list):
2513 import sys
2514 nr_nodes = info['nr_nodes']
2515 if node_list is None:
2516 node_list = range(0, nr_nodes)
2517 nodeload = [0]
2518 nodeload = nodeload * nr_nodes
2519 from xen.xend import XendDomain
2520 doms = XendDomain.instance().list('all')
2521 for dom in filter (lambda d: d.domid != self.domid, doms):
2522 cpuinfo = dom.getVCPUInfo()
2523 for vcpu in sxp.children(cpuinfo, 'vcpu'):
2524 if sxp.child_value(vcpu, 'online') == 0: continue
2525 cpumap = list(sxp.child_value(vcpu,'cpumap'))
2526 for i in range(0, nr_nodes):
2527 node_cpumask = info['node_to_cpu'][i]
2528 for j in node_cpumask:
2529 if j in cpumap:
2530 nodeload[i] += 1
2531 break
2532 for i in range(0, nr_nodes):
2533 if len(info['node_to_cpu'][i]) > 0 and i in node_list:
2534 nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
2535 else:
2536 nodeload[i] = sys.maxint
2537 index = nodeload.index( min(nodeload) )
2538 return index
2540 info = xc.physinfo()
2541 if info['nr_nodes'] > 1:
2542 node_memory_list = info['node_to_memory']
2543 needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
2544 candidate_node_list = []
2545 for i in range(0, info['nr_nodes']):
2546 if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
2547 candidate_node_list.append(i)
2548 index = find_relaxed_node(candidate_node_list)
2549 cpumask = info['node_to_cpu'][index]
2550 for v in range(0, self.info['VCPUs_max']):
2551 xc.vcpu_setaffinity(self.domid, v, cpumask)
2554 def _initDomain(self):
2555 log.debug('XendDomainInfo.initDomain: %s %s',
2556 self.domid,
2557 self.info['vcpus_params']['weight'])
2559 self._configureBootloader()
2561 try:
2562 self.image = image.create(self, self.info)
2564 # repin domain vcpus if a restricted cpus list is provided
2565 # this is done prior to memory allocation to aide in memory
2566 # distribution for NUMA systems.
2567 self._setCPUAffinity()
2569 # Use architecture- and image-specific calculations to determine
2570 # the various headrooms necessary, given the raw configured
2571 # values. maxmem, memory, and shadow are all in KiB.
2572 # but memory_static_max etc are all stored in bytes now.
2573 memory = self.image.getRequiredAvailableMemory(
2574 self.info['memory_dynamic_max'] / 1024)
2575 maxmem = self.image.getRequiredAvailableMemory(
2576 self.info['memory_static_max'] / 1024)
2577 shadow = self.image.getRequiredShadowMemory(
2578 self.info['shadow_memory'] * 1024,
2579 self.info['memory_static_max'] / 1024)
2581 log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
2582 # Round shadow up to a multiple of a MiB, as shadow_mem_control
2583 # takes MiB and we must not round down and end up under-providing.
2584 shadow = ((shadow + 1023) / 1024) * 1024
2586 # set memory limit
2587 xc.domain_setmaxmem(self.domid, maxmem)
2589 # Reserve 1 page per MiB of RAM for separate VT-d page table.
2590 vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
2591 # Round vtd_mem up to a multiple of a MiB.
2592 vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
2594 # Make sure there's enough RAM available for the domain
2595 balloon.free(memory + shadow + vtd_mem, self)
2597 # Set up the shadow memory
2598 shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
2599 self.info['shadow_memory'] = shadow_cur
2601 # machine address size
2602 if self.info.has_key('machine_address_size'):
2603 log.debug("_initDomain: setting maximum machine address size %d" % self.info['machine_address_size'])
2604 xc.domain_set_machine_address_size(self.domid, self.info['machine_address_size'])
2606 if self.info.has_key('suppress_spurious_page_faults') and self.info['suppress_spurious_page_faults']:
2607 log.debug("_initDomain: suppressing spurious page faults")
2608 xc.domain_suppress_spurious_page_faults(self.domid)
2610 self._createChannels()
2612 channel_details = self.image.createImage()
2614 self.store_mfn = channel_details['store_mfn']
2615 if 'console_mfn' in channel_details:
2616 self.console_mfn = channel_details['console_mfn']
2617 if 'notes' in channel_details:
2618 self.info.set_notes(channel_details['notes'])
2619 if 'native_protocol' in channel_details:
2620 self.native_protocol = channel_details['native_protocol'];
2622 self._introduceDomain()
2623 if self.info.target():
2624 self._setTarget(self.info.target())
2626 self._createDevices()
2628 self.image.cleanupBootloading()
2630 self.info['start_time'] = time.time()
2632 self._stateSet(DOM_STATE_RUNNING)
2633 except VmError, exn:
2634 log.exception("XendDomainInfo.initDomain: exception occurred")
2635 if self.image:
2636 self.image.cleanupBootloading()
2637 raise exn
2638 except RuntimeError, exn:
2639 log.exception("XendDomainInfo.initDomain: exception occurred")
2640 if self.image:
2641 self.image.cleanupBootloading()
2642 raise VmError(str(exn))
2645 def cleanupDomain(self):
2646 """Cleanup domain resources; release devices. Idempotent. Nothrow
2647 guarantee."""
2649 self.refresh_shutdown_lock.acquire()
2650 try:
2651 self.unwatchShutdown()
2652 self._releaseDevices()
2653 bootloader_tidy(self)
2655 if self.image:
2656 self.image = None
2658 try:
2659 self._removeDom()
2660 except:
2661 log.exception("Removing domain path failed.")
2663 self._stateSet(DOM_STATE_HALTED)
2664 self.domid = None # Do not push into _stateSet()!
2665 finally:
2666 self.refresh_shutdown_lock.release()
2669 def unwatchShutdown(self):
2670 """Remove the watch on the domain's control/shutdown node, if any.
2671 Idempotent. Nothrow guarantee. Expects to be protected by the
2672 refresh_shutdown_lock."""
2674 try:
2675 try:
2676 if self.shutdownWatch:
2677 self.shutdownWatch.unwatch()
2678 finally:
2679 self.shutdownWatch = None
2680 except:
2681 log.exception("Unwatching control/shutdown failed.")
2683 def waitForShutdown(self):
2684 self.state_updated.acquire()
2685 try:
2686 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2687 self.state_updated.wait(timeout=1.0)
2688 finally:
2689 self.state_updated.release()
2691 def waitForSuspend(self):
2692 """Wait for the guest to respond to a suspend request by
2693 shutting down. If the guest hasn't re-written control/shutdown
2694 after a certain amount of time, it's obviously not listening and
2695 won't suspend, so we give up. HVM guests with no PV drivers
2696 should already be shutdown.
2697 """
2698 state = "suspend"
2699 nr_tries = 60
2701 self.state_updated.acquire()
2702 try:
2703 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2704 self.state_updated.wait(1.0)
2705 if state == "suspend":
2706 if nr_tries == 0:
2707 msg = ('Timeout waiting for domain %s to suspend'
2708 % self.domid)
2709 self._writeDom('control/shutdown', '')
2710 raise XendError(msg)
2711 state = self.readDom('control/shutdown')
2712 nr_tries -= 1
2713 finally:
2714 self.state_updated.release()
2717 # TODO: recategorise - called from XendCheckpoint
2720 def completeRestore(self, store_mfn, console_mfn):
2722 log.debug("XendDomainInfo.completeRestore")
2724 self.store_mfn = store_mfn
2725 self.console_mfn = console_mfn
2727 self._introduceDomain()
2728 self.image = image.create(self, self.info)
2729 if self.image:
2730 self.image.createDeviceModel(True)
2731 self._storeDomDetails()
2732 self._registerWatches()
2733 self.refreshShutdown()
2735 log.debug("XendDomainInfo.completeRestore done")
2738 def _endRestore(self):
2739 self.setResume(False)
2742 # VM Destroy
2745 def _prepare_phantom_paths(self):
2746 # get associated devices to destroy
2747 # build list of phantom devices to be removed after normal devices
2748 plist = []
2749 if self.domid is not None:
2750 t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
2751 try:
2752 for dev in t.list():
2753 backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
2754 % (self.dompath, dev))
2755 if backend_phantom_vbd is not None:
2756 frontend_phantom_vbd = xstransact.Read("%s/frontend" \
2757 % backend_phantom_vbd)
2758 plist.append(backend_phantom_vbd)
2759 plist.append(frontend_phantom_vbd)
2760 finally:
2761 t.abort()
2762 return plist
2764 def _cleanup_phantom_devs(self, plist):
2765 # remove phantom devices
2766 if not plist == []:
2767 time.sleep(2)
2768 for paths in plist:
2769 if paths.find('backend') != -1:
2770 # Modify online status /before/ updating state (latter is watched by
2771 # drivers, so this ordering avoids a race).
2772 xstransact.Write(paths, 'online', "0")
2773 xstransact.Write(paths, 'state', str(xenbusState['Closing']))
2774 # force
2775 xstransact.Remove(paths)
2777 def destroy(self):
2778 """Cleanup VM and destroy domain. Nothrow guarantee."""
2780 if self.domid is None:
2781 return
2783 from xen.xend import XendDomain
2784 log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
2786 paths = self._prepare_phantom_paths()
2788 if self.dompath is not None:
2789 try:
2790 xc.domain_destroy_hook(self.domid)
2791 xc.domain_pause(self.domid)
2792 do_FLR(self.domid)
2793 xc.domain_destroy(self.domid)
2794 for state in DOM_STATES_OLD:
2795 self.info[state] = 0
2796 self._stateSet(DOM_STATE_HALTED)
2797 except:
2798 log.exception("XendDomainInfo.destroy: domain destruction failed.")
2800 XendDomain.instance().remove_domain(self)
2801 self.cleanupDomain()
2803 self._cleanup_phantom_devs(paths)
2804 self._cleanupVm()
2806 if "transient" in self.info["other_config"] \
2807 and bool(self.info["other_config"]["transient"]):
2808 XendDomain.instance().domain_delete_by_dominfo(self)
2811 def resetDomain(self):
2812 log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
2814 old_domid = self.domid
2815 prev_vm_xend = self._listRecursiveVm('xend')
2816 new_dom_info = self.info
2817 try:
2818 self._unwatchVm()
2819 self.destroy()
2821 new_dom = None
2822 try:
2823 from xen.xend import XendDomain
2824 new_dom_info['domid'] = None
2825 new_dom = XendDomain.instance().domain_create_from_dict(
2826 new_dom_info)
2827 for x in prev_vm_xend[0][1]:
2828 new_dom._writeVm('xend/%s' % x[0], x[1])
2829 new_dom.waitForDevices()
2830 new_dom.unpause()
2831 except:
2832 if new_dom:
2833 new_dom.destroy()
2834 raise
2835 except:
2836 log.exception('Failed to reset domain %s.', str(old_domid))
2839 def resumeDomain(self):
2840 log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
2842 # resume a suspended domain (e.g. after live checkpoint, or after
2843 # a later error during save or migate); checks that the domain
2844 # is currently suspended first so safe to call from anywhere
2846 xeninfo = dom_get(self.domid)
2847 if xeninfo is None:
2848 return
2849 if not xeninfo['shutdown']:
2850 return
2851 reason = shutdown_reason(xeninfo['shutdown_reason'])
2852 if reason != 'suspend':
2853 return
2855 try:
2856 # could also fetch a parsed note from xenstore
2857 fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
2858 if not fast:
2859 self._releaseDevices()
2860 self.testDeviceComplete()
2861 self.testvifsComplete()
2862 log.debug("XendDomainInfo.resumeDomain: devices released")
2864 self._resetChannels()
2866 self._removeDom('control/shutdown')
2867 self._removeDom('device-misc/vif/nextDeviceID')
2869 self._createChannels()
2870 self._introduceDomain()
2871 self._storeDomDetails()
2873 self._createDevices()
2874 log.debug("XendDomainInfo.resumeDomain: devices created")
2876 xc.domain_resume(self.domid, fast)
2877 ResumeDomain(self.domid)
2878 except:
2879 log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
2880 self.image.resumeDeviceModel()
2881 log.debug("XendDomainInfo.resumeDomain: completed")
2885 # Channels for xenstore and console
2888 def _createChannels(self):
2889 """Create the channels to the domain.
2890 """
2891 self.store_port = self._createChannel()
2892 self.console_port = self._createChannel()
2895 def _createChannel(self):
2896 """Create an event channel to the domain.
2897 """
2898 try:
2899 if self.domid != None:
2900 return xc.evtchn_alloc_unbound(domid = self.domid,
2901 remote_dom = 0)
2902 except:
2903 log.exception("Exception in alloc_unbound(%s)", str(self.domid))
2904 raise
2906 def _resetChannels(self):
2907 """Reset all event channels in the domain.
2908 """
2909 try:
2910 if self.domid != None:
2911 return xc.evtchn_reset(dom = self.domid)
2912 except:
2913 log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
2914 raise
2918 # Bootloader configuration
2921 def _configureBootloader(self):
2922 """Run the bootloader if we're configured to do so."""
2924 blexec = self.info['PV_bootloader']
2925 bootloader_args = self.info['PV_bootloader_args']
2926 kernel = self.info['PV_kernel']
2927 ramdisk = self.info['PV_ramdisk']
2928 args = self.info['PV_args']
2929 boot = self.info['HVM_boot_policy']
2931 if boot:
2932 # HVM booting.
2933 pass
2934 elif not blexec and kernel:
2935 # Boot from dom0. Nothing left to do -- the kernel and ramdisk
2936 # will be picked up by image.py.
2937 pass
2938 else:
2939 # Boot using bootloader
2940 if not blexec or blexec == 'pygrub':
2941 blexec = auxbin.pathTo('pygrub')
2943 blcfg = None
2944 disks = [x for x in self.info['vbd_refs']
2945 if self.info['devices'][x][1]['bootable']]
2947 if not disks:
2948 msg = "Had a bootloader specified, but no disks are bootable"
2949 log.error(msg)
2950 raise VmError(msg)
2952 devinfo = self.info['devices'][disks[0]]
2953 devtype = devinfo[0]
2954 disk = devinfo[1]['uname']
2956 fn = blkdev_uname_to_file(disk)
2957 taptype = blkdev_uname_to_taptype(disk)
2958 mounted = devtype == 'tap' and taptype != 'aio' and taptype != 'sync' and not os.stat(fn).st_rdev
2959 if mounted:
2960 # This is a file, not a device. pygrub can cope with a
2961 # file if it's raw, but if it's QCOW or other such formats
2962 # used through blktap, then we need to mount it first.
2964 log.info("Mounting %s on %s." %
2965 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2967 vbd = {
2968 'mode': 'RO',
2969 'device': BOOTLOADER_LOOPBACK_DEVICE,
2972 from xen.xend import XendDomain
2973 dom0 = XendDomain.instance().privilegedDomain()
2974 dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
2975 fn = BOOTLOADER_LOOPBACK_DEVICE
2977 try:
2978 blcfg = bootloader(blexec, fn, self, False,
2979 bootloader_args, kernel, ramdisk, args)
2980 finally:
2981 if mounted:
2982 log.info("Unmounting %s from %s." %
2983 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2985 dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
2987 if blcfg is None:
2988 msg = "Had a bootloader specified, but can't find disk"
2989 log.error(msg)
2990 raise VmError(msg)
2992 self.info.update_with_image_sxp(blcfg, True)
2996 # VM Functions
2999 def _readVMDetails(self, params):
3000 """Read the specified parameters from the store.
3001 """
3002 try:
3003 return self._gatherVm(*params)
3004 except ValueError:
3005 # One of the int/float entries in params has a corresponding store
3006 # entry that is invalid. We recover, because older versions of
3007 # Xend may have put the entry there (memory/target, for example),
3008 # but this is in general a bad situation to have reached.
3009 log.exception(
3010 "Store corrupted at %s! Domain %d's configuration may be "
3011 "affected.", self.vmpath, self.domid)
3012 return []
3014 def _cleanupVm(self):
3015 """Cleanup VM resources. Idempotent. Nothrow guarantee."""
3017 self._unwatchVm()
3019 try:
3020 self._removeVm()
3021 except:
3022 log.exception("Removing VM path failed.")
3025 def checkLiveMigrateMemory(self):
3026 """ Make sure there's enough memory to migrate this domain """
3027 overhead_kb = 0
3028 if arch.type == "x86":
3029 # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
3030 # the minimum that Xen would allocate if no value were given.
3031 overhead_kb = self.info['VCPUs_max'] * 1024 + \
3032 (self.info['memory_static_max'] / 1024 / 1024) * 4
3033 overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
3034 # The domain might already have some shadow memory
3035 overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
3036 if overhead_kb > 0:
3037 balloon.free(overhead_kb, self)
3039 def _unwatchVm(self):
3040 """Remove the watch on the VM path, if any. Idempotent. Nothrow
3041 guarantee."""
3042 try:
3043 try:
3044 if self.vmWatch:
3045 self.vmWatch.unwatch()
3046 finally:
3047 self.vmWatch = None
3048 except:
3049 log.exception("Unwatching VM path failed.")
3051 def testDeviceComplete(self):
3052 """ For Block IO migration safety we must ensure that
3053 the device has shutdown correctly, i.e. all blocks are
3054 flushed to disk
3055 """
3056 start = time.time()
3057 while True:
3058 test = 0
3059 diff = time.time() - start
3060 vbds = self.getDeviceController('vbd').deviceIDs()
3061 taps = self.getDeviceController('tap').deviceIDs()
3062 for i in vbds + taps:
3063 test = 1
3064 log.info("Dev %s still active, looping...", i)
3065 time.sleep(0.1)
3067 if test == 0:
3068 break
3069 if diff >= MIGRATE_TIMEOUT:
3070 log.info("Dev still active but hit max loop timeout")
3071 break
3073 def testvifsComplete(self):
3074 """ In case vifs are released and then created for the same
3075 domain, we need to wait the device shut down.
3076 """
3077 start = time.time()
3078 while True:
3079 test = 0
3080 diff = time.time() - start
3081 for i in self.getDeviceController('vif').deviceIDs():
3082 test = 1
3083 log.info("Dev %s still active, looping...", i)
3084 time.sleep(0.1)
3086 if test == 0:
3087 break
3088 if diff >= MIGRATE_TIMEOUT:
3089 log.info("Dev still active but hit max loop timeout")
3090 break
3092 def _storeVmDetails(self):
3093 to_store = {}
3095 for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
3096 info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
3097 if self._infoIsSet(info_key):
3098 to_store[key] = str(self.info[info_key])
3100 if self._infoIsSet("static_memory_min"):
3101 to_store["memory"] = str(self.info["static_memory_min"])
3102 if self._infoIsSet("static_memory_max"):
3103 to_store["maxmem"] = str(self.info["static_memory_max"])
3105 image_sxpr = self.info.image_sxpr()
3106 if image_sxpr:
3107 to_store['image'] = sxp.to_string(image_sxpr)
3109 if not self._readVm('xend/restart_count'):
3110 to_store['xend/restart_count'] = str(0)
3112 log.debug("Storing VM details: %s", scrub_password(to_store))
3114 self._writeVm(to_store)
3115 self._setVmPermissions()
3117 def _setVmPermissions(self):
3118 """Allow the guest domain to read its UUID. We don't allow it to
3119 access any other entry, for security."""
3120 xstransact.SetPermissions('%s/uuid' % self.vmpath,
3121 { 'dom' : self.domid,
3122 'read' : True,
3123 'write' : False })
3126 # Utility functions
3129 def __getattr__(self, name):
3130 if name == "state":
3131 log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
3132 log.warn("".join(traceback.format_stack()))
3133 return self._stateGet()
3134 else:
3135 raise AttributeError(name)
3137 def __setattr__(self, name, value):
3138 if name == "state":
3139 log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
3140 log.warn("".join(traceback.format_stack()))
3141 self._stateSet(value)
3142 else:
3143 self.__dict__[name] = value
3145 def _stateSet(self, state):
3146 self.state_updated.acquire()
3147 try:
3148 # TODO Not sure this is correct...
3149 # _stateGet is live now. Why not fire event
3150 # even when it hasn't changed?
3151 if self._stateGet() != state:
3152 self.state_updated.notifyAll()
3153 import XendAPI
3154 XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
3155 'power_state')
3156 finally:
3157 self.state_updated.release()
3159 def _stateGet(self):
3160 # Lets try and reconsitute the state from xc
3161 # first lets try and get the domain info
3162 # from xc - this will tell us if the domain
3163 # exists
3164 info = dom_get(self.getDomid())
3165 if info is None or info['shutdown']:
3166 # We are either HALTED or SUSPENDED
3167 # check saved image exists
3168 from xen.xend import XendDomain
3169 managed_config_path = \
3170 XendDomain.instance()._managed_check_point_path( \
3171 self.get_uuid())
3172 if os.path.exists(managed_config_path):
3173 return XEN_API_VM_POWER_STATE_SUSPENDED
3174 else:
3175 return XEN_API_VM_POWER_STATE_HALTED
3176 elif info['crashed']:
3177 # Crashed
3178 return XEN_API_VM_POWER_STATE_CRASHED
3179 else:
3180 # We are either RUNNING or PAUSED
3181 if info['paused']:
3182 return XEN_API_VM_POWER_STATE_PAUSED
3183 else:
3184 return XEN_API_VM_POWER_STATE_RUNNING
3186 def _infoIsSet(self, name):
3187 return name in self.info and self.info[name] is not None
3189 def _checkName(self, name):
3190 """Check if a vm name is valid. Valid names contain alphabetic
3191 characters, digits, or characters in '_-.:/+'.
3192 The same name cannot be used for more than one vm at the same time.
3194 @param name: name
3195 @raise: VmError if invalid
3196 """
3197 from xen.xend import XendDomain
3199 if name is None or name == '':
3200 raise VmError('Missing VM Name')
3202 if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name):
3203 raise VmError('Invalid VM Name')
3205 dom = XendDomain.instance().domain_lookup_nr(name)
3206 if dom and dom.info['uuid'] != self.info['uuid']:
3207 raise VmError("VM name '%s' already exists%s" %
3208 (name,
3209 dom.domid is not None and
3210 (" as domain %s" % str(dom.domid)) or ""))
3213 def update(self, info = None, refresh = True, transaction = None):
3214 """Update with info from xc.domain_getinfo().
3215 """
3216 log.trace("XendDomainInfo.update(%s) on domain %s", info,
3217 str(self.domid))
3219 if not info:
3220 info = dom_get(self.domid)
3221 if not info:
3222 return
3224 if info["maxmem_kb"] < 0:
3225 info["maxmem_kb"] = XendNode.instance() \
3226 .physinfo_dict()['total_memory'] * 1024
3228 # make sure state is reset for info
3229 # TODO: we should eventually get rid of old_dom_states
3231 self.info.update_config(info)
3232 self._update_consoles(transaction)
3234 if refresh:
3235 self.refreshShutdown(info)
3237 log.trace("XendDomainInfo.update done on domain %s: %s",
3238 str(self.domid), self.info)
3240 def sxpr(self, ignore_store = False, legacy_only = True):
3241 result = self.info.to_sxp(domain = self,
3242 ignore_devices = ignore_store,
3243 legacy_only = legacy_only)
3245 return result
3247 # Xen API
3248 # ----------------------------------------------------------------
3250 def get_uuid(self):
3251 dom_uuid = self.info.get('uuid')
3252 if not dom_uuid: # if it doesn't exist, make one up
3253 dom_uuid = uuid.createString()
3254 self.info['uuid'] = dom_uuid
3255 return dom_uuid
3257 def get_memory_static_max(self):
3258 return self.info.get('memory_static_max', 0)
3259 def get_memory_static_min(self):
3260 return self.info.get('memory_static_min', 0)
3261 def get_memory_dynamic_max(self):
3262 return self.info.get('memory_dynamic_max', 0)
3263 def get_memory_dynamic_min(self):
3264 return self.info.get('memory_dynamic_min', 0)
3266 # only update memory-related config values if they maintain sanity
3267 def _safe_set_memory(self, key, newval):
3268 oldval = self.info.get(key, 0)
3269 try:
3270 self.info[key] = newval
3271 self.info._memory_sanity_check()
3272 except Exception, ex:
3273 self.info[key] = oldval
3274 raise
3276 def set_memory_static_max(self, val):
3277 self._safe_set_memory('memory_static_max', val)
3278 def set_memory_static_min(self, val):
3279 self._safe_set_memory('memory_static_min', val)
3280 def set_memory_dynamic_max(self, val):
3281 self._safe_set_memory('memory_dynamic_max', val)
3282 def set_memory_dynamic_min(self, val):
3283 self._safe_set_memory('memory_dynamic_min', val)
3285 def get_vcpus_params(self):
3286 if self.getDomid() is None:
3287 return self.info['vcpus_params']
3289 retval = xc.sched_credit_domain_get(self.getDomid())
3290 return retval
3291 def get_power_state(self):
3292 return XEN_API_VM_POWER_STATE[self._stateGet()]
3293 def get_platform(self):
3294 return self.info.get('platform', {})
3295 def get_pci_bus(self):
3296 return self.info.get('pci_bus', '')
3297 def get_tools_version(self):
3298 return self.info.get('tools_version', {})
3299 def get_metrics(self):
3300 return self.metrics.get_uuid();
3303 def get_security_label(self, xspol=None):
3304 import xen.util.xsm.xsm as security
3305 label = security.get_security_label(self, xspol)
3306 return label
3308 def set_security_label(self, seclab, old_seclab, xspol=None,
3309 xspol_old=None):
3310 """
3311 Set the security label of a domain from its old to
3312 a new value.
3313 @param seclab New security label formatted in the form
3314 <policy type>:<policy name>:<vm label>
3315 @param old_seclab The current security label that the
3316 VM must have.
3317 @param xspol An optional policy under which this
3318 update should be done. If not given,
3319 then the current active policy is used.
3320 @param xspol_old The old policy; only to be passed during
3321 the updating of a policy
3322 @return Returns return code, a string with errors from
3323 the hypervisor's operation, old label of the
3324 domain
3325 """
3326 rc = 0
3327 errors = ""
3328 old_label = ""
3329 new_ssidref = 0
3330 domid = self.getDomid()
3331 res_labels = None
3332 is_policy_update = (xspol_old != None)
3334 from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
3336 state = self._stateGet()
3337 # Relabel only HALTED or RUNNING or PAUSED domains
3338 if domid != 0 and \
3339 state not in \
3340 [ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
3341 DOM_STATE_SUSPENDED ]:
3342 log.warn("Relabeling domain not possible in state '%s'" %
3343 DOM_STATES[state])
3344 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3346 # Remove security label. Works only for halted or suspended domains
3347 if not seclab or seclab == "":
3348 if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
3349 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3351 if self.info.has_key('security_label'):
3352 old_label = self.info['security_label']
3353 # Check label against expected one.
3354 if old_label != old_seclab:
3355 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3356 del self.info['security_label']
3357 xen.xend.XendDomain.instance().managed_config_save(self)
3358 return (xsconstants.XSERR_SUCCESS, "", "", 0)
3360 tmp = seclab.split(":")
3361 if len(tmp) != 3:
3362 return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
3363 typ, policy, label = tmp
3365 poladmin = XSPolicyAdminInstance()
3366 if not xspol:
3367 xspol = poladmin.get_policy_by_name(policy)
3369 try:
3370 xen.xend.XendDomain.instance().policy_lock.acquire_writer()
3372 if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
3373 #if domain is running or paused try to relabel in hypervisor
3374 if not xspol:
3375 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3377 if typ != xspol.get_type_name() or \
3378 policy != xspol.get_name():
3379 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3381 if typ == xsconstants.ACM_POLICY_ID:
3382 new_ssidref = xspol.vmlabel_to_ssidref(label)
3383 if new_ssidref == xsconstants.INVALID_SSIDREF:
3384 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3386 # Check that all used resources are accessible under the
3387 # new label
3388 if not is_policy_update and \
3389 not security.resources_compatible_with_vmlabel(xspol,
3390 self, label):
3391 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3393 #Check label against expected one. Can only do this
3394 # if the policy hasn't changed underneath in the meantime
3395 if xspol_old == None:
3396 old_label = self.get_security_label()
3397 if old_label != old_seclab:
3398 log.info("old_label != old_seclab: %s != %s" %
3399 (old_label, old_seclab))
3400 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3402 # relabel domain in the hypervisor
3403 rc, errors = security.relabel_domains([[domid, new_ssidref]])
3404 log.info("rc from relabeling in HV: %d" % rc)
3405 else:
3406 return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
3408 if rc == 0:
3409 # HALTED, RUNNING or PAUSED
3410 if domid == 0:
3411 if xspol:
3412 self.info['security_label'] = seclab
3413 ssidref = poladmin.set_domain0_bootlabel(xspol, label)
3414 else:
3415 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3416 else:
3417 if self.info.has_key('security_label'):
3418 old_label = self.info['security_label']
3419 # Check label against expected one, unless wildcard
3420 if old_label != old_seclab:
3421 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3423 self.info['security_label'] = seclab
3425 try:
3426 xen.xend.XendDomain.instance().managed_config_save(self)
3427 except:
3428 pass
3429 return (rc, errors, old_label, new_ssidref)
3430 finally:
3431 xen.xend.XendDomain.instance().policy_lock.release()
3433 def get_on_shutdown(self):
3434 after_shutdown = self.info.get('actions_after_shutdown')
3435 if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
3436 return XEN_API_ON_NORMAL_EXIT[-1]
3437 return after_shutdown
3439 def get_on_reboot(self):
3440 after_reboot = self.info.get('actions_after_reboot')
3441 if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
3442 return XEN_API_ON_NORMAL_EXIT[-1]
3443 return after_reboot
3445 def get_on_suspend(self):
3446 # TODO: not supported
3447 after_suspend = self.info.get('actions_after_suspend')
3448 if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
3449 return XEN_API_ON_NORMAL_EXIT[-1]
3450 return after_suspend
3452 def get_on_crash(self):
3453 after_crash = self.info.get('actions_after_crash')
3454 if not after_crash or after_crash not in \
3455 XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
3456 return XEN_API_ON_CRASH_BEHAVIOUR[0]
3457 return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
3459 def get_dev_config_by_uuid(self, dev_class, dev_uuid):
3460 """ Get's a device configuration either from XendConfig or
3461 from the DevController.
3463 @param dev_class: device class, either, 'vbd' or 'vif'
3464 @param dev_uuid: device UUID
3466 @rtype: dictionary
3467 """
3468 dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
3470 # shortcut if the domain isn't started because
3471 # the devcontrollers will have no better information
3472 # than XendConfig.
3473 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
3474 XEN_API_VM_POWER_STATE_SUSPENDED):
3475 if dev_config:
3476 return copy.deepcopy(dev_config)
3477 return None
3479 # instead of using dev_class, we use the dev_type
3480 # that is from XendConfig.
3481 controller = self.getDeviceController(dev_type)
3482 if not controller:
3483 return None
3485 all_configs = controller.getAllDeviceConfigurations()
3486 if not all_configs:
3487 return None
3489 updated_dev_config = copy.deepcopy(dev_config)
3490 for _devid, _devcfg in all_configs.items():
3491 if _devcfg.get('uuid') == dev_uuid:
3492 updated_dev_config.update(_devcfg)
3493 updated_dev_config['id'] = _devid
3494 return updated_dev_config
3496 return updated_dev_config
3498 def get_dev_xenapi_config(self, dev_class, dev_uuid):
3499 config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
3500 if not config:
3501 return {}
3503 config['VM'] = self.get_uuid()
3505 if dev_class == 'vif':
3506 if not config.has_key('name'):
3507 config['name'] = config.get('vifname', '')
3508 if not config.has_key('MAC'):
3509 config['MAC'] = config.get('mac', '')
3510 if not config.has_key('type'):
3511 config['type'] = 'paravirtualised'
3512 if not config.has_key('device'):
3513 devid = config.get('id')
3514 if devid != None:
3515 config['device'] = 'eth%s' % devid
3516 else:
3517 config['device'] = ''
3519 if not config.has_key('network'):
3520 try:
3521 bridge = config.get('bridge', None)
3522 if bridge is None:
3523 from xen.util import Brctl
3524 if_to_br = dict([(i,b)
3525 for (b,ifs) in Brctl.get_state().items()
3526 for i in ifs])
3527 vifname = "vif%s.%s" % (self.getDomid(),
3528 config.get('id'))
3529 bridge = if_to_br.get(vifname, None)
3530 config['network'] = \
3531 XendNode.instance().bridge_to_network(
3532 config.get('bridge')).get_uuid()
3533 except Exception:
3534 log.exception('bridge_to_network')
3535 # Ignore this for now -- it may happen if the device
3536 # has been specified using the legacy methods, but at
3537 # some point we're going to have to figure out how to
3538 # handle that properly.
3540 config['MTU'] = 1500 # TODO
3542 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3543 xennode = XendNode.instance()
3544 rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
3545 config['io_read_kbs'] = rx_bps/1024
3546 config['io_write_kbs'] = tx_bps/1024
3547 rx, tx = xennode.get_vif_stat(self.domid, devid)
3548 config['io_total_read_kbs'] = rx/1024
3549 config['io_total_write_kbs'] = tx/1024
3550 else:
3551 config['io_read_kbs'] = 0.0
3552 config['io_write_kbs'] = 0.0
3553 config['io_total_read_kbs'] = 0.0
3554 config['io_total_write_kbs'] = 0.0
3556 config['security_label'] = config.get('security_label', '')
3558 if dev_class == 'vbd':
3560 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3561 controller = self.getDeviceController(dev_class)
3562 devid, _1, _2 = controller.getDeviceDetails(config)
3563 xennode = XendNode.instance()
3564 rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
3565 config['io_read_kbs'] = rd_blkps
3566 config['io_write_kbs'] = wr_blkps
3567 else:
3568 config['io_read_kbs'] = 0.0
3569 config['io_write_kbs'] = 0.0
3571 config['VDI'] = config.get('VDI', '')
3572 config['device'] = config.get('dev', '')
3573 if ':' in config['device']:
3574 vbd_name, vbd_type = config['device'].split(':', 1)
3575 config['device'] = vbd_name
3576 if vbd_type == 'cdrom':
3577 config['type'] = XEN_API_VBD_TYPE[0]
3578 else:
3579 config['type'] = XEN_API_VBD_TYPE[1]
3581 config['driver'] = 'paravirtualised' # TODO
3582 config['image'] = config.get('uname', '')
3584 if config.get('mode', 'r') == 'r':
3585 config['mode'] = 'RO'
3586 else:
3587 config['mode'] = 'RW'
3589 if dev_class == 'vtpm':
3590 if not config.has_key('type'):
3591 config['type'] = 'paravirtualised' # TODO
3592 if not config.has_key('backend'):
3593 config['backend'] = "00000000-0000-0000-0000-000000000000"
3595 return config
3597 def get_dev_property(self, dev_class, dev_uuid, field):
3598 config = self.get_dev_xenapi_config(dev_class, dev_uuid)
3599 try:
3600 return config[field]
3601 except KeyError:
3602 raise XendError('Invalid property for device: %s' % field)
3604 def set_dev_property(self, dev_class, dev_uuid, field, value):
3605 self.info['devices'][dev_uuid][1][field] = value
3607 def get_vcpus_util(self):
3608 vcpu_util = {}
3609 xennode = XendNode.instance()
3610 if 'VCPUs_max' in self.info and self.domid != None:
3611 for i in range(0, self.info['VCPUs_max']):
3612 util = xennode.get_vcpu_util(self.domid, i)
3613 vcpu_util[str(i)] = util
3615 return vcpu_util
3617 def get_consoles(self):
3618 return self.info.get('console_refs', [])
3620 def get_vifs(self):
3621 return self.info.get('vif_refs', [])
3623 def get_vbds(self):
3624 return self.info.get('vbd_refs', [])
3626 def get_vtpms(self):
3627 return self.info.get('vtpm_refs', [])
3629 def get_dpcis(self):
3630 return XendDPCI.get_by_VM(self.info.get('uuid'))
3632 def get_dscsis(self):
3633 return XendDSCSI.get_by_VM(self.info.get('uuid'))
3635 def create_vbd(self, xenapi_vbd, vdi_image_path):
3636 """Create a VBD using a VDI from XendStorageRepository.
3638 @param xenapi_vbd: vbd struct from the Xen API
3639 @param vdi_image_path: VDI UUID
3640 @rtype: string
3641 @return: uuid of the device
3642 """
3643 xenapi_vbd['image'] = vdi_image_path
3644 if vdi_image_path.startswith('tap'):
3645 dev_uuid = self.info.device_add('tap', cfg_xenapi = xenapi_vbd)
3646 else:
3647 dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
3649 if not dev_uuid:
3650 raise XendError('Failed to create device')
3652 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3653 XEN_API_VM_POWER_STATE_PAUSED):
3654 _, config = self.info['devices'][dev_uuid]
3656 if vdi_image_path.startswith('tap'):
3657 dev_control = self.getDeviceController('tap')
3658 else:
3659 dev_control = self.getDeviceController('vbd')
3661 try:
3662 devid = dev_control.createDevice(config)
3663 dev_control.waitForDevice(devid)
3664 self.info.device_update(dev_uuid,
3665 cfg_xenapi = {'devid': devid})
3666 except Exception, exn:
3667 log.exception(exn)
3668 del self.info['devices'][dev_uuid]
3669 self.info['vbd_refs'].remove(dev_uuid)
3670 raise
3672 return dev_uuid
3674 def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
3675 """Create a VBD using a VDI from XendStorageRepository.
3677 @param xenapi_vbd: vbd struct from the Xen API
3678 @param vdi_image_path: VDI UUID
3679 @rtype: string
3680 @return: uuid of the device
3681 """
3682 xenapi_vbd['image'] = vdi_image_path
3683 dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
3684 if not dev_uuid:
3685 raise XendError('Failed to create device')
3687 if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
3688 _, config = self.info['devices'][dev_uuid]
3689 config['devid'] = self.getDeviceController('tap').createDevice(config)
3691 return config['devid']
3693 def create_vif(self, xenapi_vif):
3694 """Create VIF device from the passed struct in Xen API format.
3696 @param xenapi_vif: Xen API VIF Struct.
3697 @rtype: string
3698 @return: UUID
3699 """
3700 dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
3701 if not dev_uuid:
3702 raise XendError('Failed to create device')
3704 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3705 XEN_API_VM_POWER_STATE_PAUSED):
3707 _, config = self.info['devices'][dev_uuid]
3708 dev_control = self.getDeviceController('vif')
3710 try:
3711 devid = dev_control.createDevice(config)
3712 dev_control.waitForDevice(devid)
3713 self.info.device_update(dev_uuid,
3714 cfg_xenapi = {'devid': devid})
3715 except Exception, exn:
3716 log.exception(exn)
3717 del self.info['devices'][dev_uuid]
3718 self.info['vif_refs'].remove(dev_uuid)
3719 raise
3721 return dev_uuid
3723 def create_vtpm(self, xenapi_vtpm):
3724 """Create a VTPM device from the passed struct in Xen API format.
3726 @return: uuid of the device
3727 @rtype: string
3728 """
3730 if self._stateGet() not in (DOM_STATE_HALTED,):
3731 raise VmError("Can only add vTPM to a halted domain.")
3732 if self.get_vtpms() != []:
3733 raise VmError('Domain already has a vTPM.')
3734 dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
3735 if not dev_uuid:
3736 raise XendError('Failed to create device')
3738 return dev_uuid
3740 def create_console(self, xenapi_console):
3741 """ Create a console device from a Xen API struct.
3743 @return: uuid of device
3744 @rtype: string
3745 """
3746 if self._stateGet() not in (DOM_STATE_HALTED,):
3747 raise VmError("Can only add console to a halted domain.")
3749 dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
3750 if not dev_uuid:
3751 raise XendError('Failed to create device')
3753 return dev_uuid
3755 def set_console_other_config(self, console_uuid, other_config):
3756 self.info.console_update(console_uuid, 'other_config', other_config)
3758 def create_dpci(self, xenapi_pci):
3759 """Create pci device from the passed struct in Xen API format.
3761 @param xenapi_pci: DPCI struct from Xen API
3762 @rtype: bool
3763 #@rtype: string
3764 @return: True if successfully created device
3765 #@return: UUID
3766 """
3768 dpci_uuid = uuid.createString()
3770 dpci_opts = []
3771 opts_dict = xenapi_pci.get('options')
3772 for k in opts_dict.keys():
3773 dpci_opts.append([k, opts_dict[k]])
3774 opts_sxp = pci_opts_list_to_sxp(dpci_opts)
3776 # Convert xenapi to sxp
3777 ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
3779 dev_sxp = ['dev',
3780 ['domain', '0x%02x' % ppci.get_domain()],
3781 ['bus', '0x%02x' % ppci.get_bus()],
3782 ['slot', '0x%02x' % ppci.get_slot()],
3783 ['func', '0x%1x' % ppci.get_func()],
3784 ['vslot', '0x%02x' % xenapi_pci.get('hotplug_slot')],
3785 ['uuid', dpci_uuid]]
3786 dev_sxp = sxp.merge(dev_sxp, opts_sxp)
3788 target_pci_sxp = ['pci', dev_sxp, ['state', 'Initialising'] ]
3790 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3792 old_pci_sxp = self._getDeviceInfo_pci(0)
3794 if old_pci_sxp is None:
3795 dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
3796 if not dev_uuid:
3797 raise XendError('Failed to create device')
3799 else:
3800 new_pci_sxp = ['pci']
3801 for existing_dev in sxp.children(old_pci_sxp, 'dev'):
3802 new_pci_sxp.append(existing_dev)
3803 new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
3805 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3806 self.info.device_update(dev_uuid, new_pci_sxp)
3808 xen.xend.XendDomain.instance().managed_config_save(self)
3810 else:
3811 try:
3812 self.device_configure(target_pci_sxp)
3814 except Exception, exn:
3815 raise XendError('Failed to create device')
3817 return dpci_uuid
3819 def create_dscsi(self, xenapi_dscsi):
3820 """Create scsi device from the passed struct in Xen API format.
3822 @param xenapi_dscsi: DSCSI struct from Xen API
3823 @rtype: string
3824 @return: UUID
3825 """
3827 dscsi_uuid = uuid.createString()
3829 # Convert xenapi to sxp
3830 pscsi = XendAPIStore.get(xenapi_dscsi.get('PSCSI'), 'PSCSI')
3831 devid = int(xenapi_dscsi.get('virtual_HCTL').split(':')[0])
3832 target_vscsi_sxp = \
3833 ['vscsi',
3834 ['dev',
3835 ['devid', devid],
3836 ['p-devname', pscsi.get_dev_name()],
3837 ['p-dev', pscsi.get_physical_HCTL()],
3838 ['v-dev', xenapi_dscsi.get('virtual_HCTL')],
3839 ['state', xenbusState['Initialising']],
3840 ['uuid', dscsi_uuid]
3841 ],
3842 ['feature-host', 0]
3845 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3847 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
3849 if cur_vscsi_sxp is None:
3850 dev_uuid = self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp)
3851 if not dev_uuid:
3852 raise XendError('Failed to create device')
3854 else:
3855 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
3856 for existing_dev in sxp.children(cur_vscsi_sxp, 'dev'):
3857 new_vscsi_sxp.append(existing_dev)
3858 new_vscsi_sxp.append(sxp.child0(target_vscsi_sxp, 'dev'))
3860 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3861 self.info.device_update(dev_uuid, new_vscsi_sxp)
3863 xen.xend.XendDomain.instance().managed_config_save(self)
3865 else:
3866 try:
3867 self.device_configure(target_vscsi_sxp)
3869 except Exception, exn:
3870 raise XendError('Failed to create device')
3872 return dscsi_uuid
3875 def destroy_device_by_uuid(self, dev_type, dev_uuid):
3876 if dev_uuid not in self.info['devices']:
3877 raise XendError('Device does not exist')
3879 try:
3880 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3881 XEN_API_VM_POWER_STATE_PAUSED):
3882 _, config = self.info['devices'][dev_uuid]
3883 devid = config.get('devid')
3884 if devid != None:
3885 self.getDeviceController(dev_type).destroyDevice(devid, force = False)
3886 else:
3887 raise XendError('Unable to get devid for device: %s:%s' %
3888 (dev_type, dev_uuid))
3889 finally:
3890 del self.info['devices'][dev_uuid]
3891 self.info['%s_refs' % dev_type].remove(dev_uuid)
3893 def destroy_vbd(self, dev_uuid):
3894 self.destroy_device_by_uuid('vbd', dev_uuid)
3896 def destroy_vif(self, dev_uuid):
3897 self.destroy_device_by_uuid('vif', dev_uuid)
3899 def destroy_vtpm(self, dev_uuid):
3900 self.destroy_device_by_uuid('vtpm', dev_uuid)
3902 def destroy_dpci(self, dev_uuid):
3904 dpci = XendAPIStore.get(dev_uuid, 'DPCI')
3905 ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
3907 old_pci_sxp = self._getDeviceInfo_pci(0)
3908 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3909 target_dev = None
3910 new_pci_sxp = ['pci']
3911 for dev in sxp.children(old_pci_sxp, 'dev'):
3912 domain = int(sxp.child_value(dev, 'domain'), 16)
3913 bus = int(sxp.child_value(dev, 'bus'), 16)
3914 slot = int(sxp.child_value(dev, 'slot'), 16)
3915 func = int(sxp.child_value(dev, 'func'), 16)
3916 name = "%04x:%02x:%02x.%01x" % (domain, bus, slot, func)
3917 if ppci.get_name() == name:
3918 target_dev = dev
3919 else:
3920 new_pci_sxp.append(dev)
3922 if target_dev is None:
3923 raise XendError('Failed to destroy device')
3925 target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
3927 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3929 self.info.device_update(dev_uuid, new_pci_sxp)
3930 if len(sxp.children(new_pci_sxp, 'dev')) == 0:
3931 del self.info['devices'][dev_uuid]
3932 xen.xend.XendDomain.instance().managed_config_save(self)
3934 else:
3935 try:
3936 self.device_configure(target_pci_sxp)
3938 except Exception, exn:
3939 raise XendError('Failed to destroy device')
3941 def destroy_dscsi(self, dev_uuid):
3942 dscsi = XendAPIStore.get(dev_uuid, 'DSCSI')
3943 devid = dscsi.get_virtual_host()
3944 vHCTL = dscsi.get_virtual_HCTL()
3945 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
3946 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3948 target_dev = None
3949 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
3950 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
3951 if vHCTL == sxp.child_value(dev, 'v-dev'):
3952 target_dev = dev
3953 else:
3954 new_vscsi_sxp.append(dev)
3956 if target_dev is None:
3957 raise XendError('Failed to destroy device')
3959 target_dev.append(['state', xenbusState['Closing']])
3960 target_vscsi_sxp = ['vscsi', target_dev, ['feature-host', 0]]
3962 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3964 self.info.device_update(dev_uuid, new_vscsi_sxp)
3965 if len(sxp.children(new_vscsi_sxp, 'dev')) == 0:
3966 del self.info['devices'][dev_uuid]
3967 xen.xend.XendDomain.instance().managed_config_save(self)
3969 else:
3970 try:
3971 self.device_configure(target_vscsi_sxp)
3973 except Exception, exn:
3974 raise XendError('Failed to destroy device')
3976 def destroy_xapi_instances(self):
3977 """Destroy Xen-API instances stored in XendAPIStore.
3978 """
3979 # Xen-API classes based on XendBase have their instances stored
3980 # in XendAPIStore. Cleanup these instances here, if they are supposed
3981 # to be destroyed when the parent domain is dead.
3983 # Most of the virtual devices (vif, vbd, vfb, etc) are not based on
3984 # XendBase and there's no need to remove them from XendAPIStore.
3986 from xen.xend import XendDomain
3987 if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
3988 # domain still exists.
3989 return
3991 # Destroy the VMMetrics instance.
3992 if XendAPIStore.get(self.metrics.get_uuid(), self.metrics.getClass()) \
3993 is not None:
3994 self.metrics.destroy()
3996 # Destroy DPCI instances.
3997 for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
3998 XendAPIStore.deregister(dpci_uuid, "DPCI")
4000 # Destroy DSCSI instances.
4001 for dscsi_uuid in XendDSCSI.get_by_VM(self.info.get('uuid')):
4002 XendAPIStore.deregister(dscsi_uuid, "DSCSI")
4004 def has_device(self, dev_class, dev_uuid):
4005 return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
4007 def __str__(self):
4008 return '<domain id=%s name=%s memory=%s state=%s>' % \
4009 (str(self.domid), self.info['name_label'],
4010 str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])
4012 __repr__ = __str__