ia64/xen-unstable

view tools/python/xen/xend/XendDomainInfo.py @ 17993:bd97e45e073a

pvSCSI: fix xend

Previous "xend" assumed initial Xenbus state would be "Connected" when
LUN hot-plug starts. However it was not guaranteed in general, and it
may cause some problems.

Signed-off-by: Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
Signed-off-by: Jun Kamada <kama@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jul 08 09:28:50 2008 +0100 (2008-07-08)
parents 6ae87b27ccea
children 823caffa7ddf
line source
1 #===========================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
19 """Representation of a single domain.
20 Includes support for domain construction, using
21 open-ended configurations.
23 Author: Mike Wray <mike.wray@hp.com>
25 """
27 import logging
28 import time
29 import threading
30 import re
31 import copy
32 import os
33 import traceback
34 from types import StringTypes
36 import xen.lowlevel.xc
37 from xen.util import asserts
38 from xen.util.blkif import blkdev_uname_to_file, blkdev_uname_to_taptype
39 import xen.util.xsm.xsm as security
40 from xen.util import xsconstants
42 from xen.xend import balloon, sxp, uuid, image, arch, osdep
43 from xen.xend import XendOptions, XendNode, XendConfig
45 from xen.xend.XendConfig import scrub_password
46 from xen.xend.XendBootloader import bootloader, bootloader_tidy
47 from xen.xend.XendError import XendError, VmError
48 from xen.xend.XendDevices import XendDevices
49 from xen.xend.XendTask import XendTask
50 from xen.xend.xenstore.xstransact import xstransact, complete
51 from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
52 from xen.xend.xenstore.xswatch import xswatch
53 from xen.xend.XendConstants import *
54 from xen.xend.XendAPIConstants import *
56 from xen.xend.XendVMMetrics import XendVMMetrics
58 from xen.xend.XendPPCI import XendPPCI
59 from xen.xend.XendDPCI import XendDPCI
60 from xen.xend import XendAPIStore
62 MIGRATE_TIMEOUT = 30.0
63 BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
65 xc = xen.lowlevel.xc.xc()
66 xoptions = XendOptions.instance()
68 log = logging.getLogger("xend.XendDomainInfo")
69 #log.setLevel(logging.TRACE)
72 def create(config):
73 """Creates and start a VM using the supplied configuration.
75 @param config: A configuration object involving lists of tuples.
76 @type config: list of lists, eg ['vm', ['image', 'xen.gz']]
78 @rtype: XendDomainInfo
79 @return: An up and running XendDomainInfo instance
80 @raise VmError: Invalid configuration or failure to start.
81 """
82 from xen.xend import XendDomain
83 domconfig = XendConfig.XendConfig(sxp_obj = config)
84 othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
85 if othervm is None or othervm.domid is None:
86 othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
87 if othervm is not None and othervm.domid is not None:
88 raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
89 log.debug("XendDomainInfo.create(%s)", scrub_password(config))
90 vm = XendDomainInfo(domconfig)
91 try:
92 vm.start()
93 except:
94 log.exception('Domain construction failed')
95 vm.destroy()
96 raise
98 return vm
100 def create_from_dict(config_dict):
101 """Creates and start a VM using the supplied configuration.
103 @param config_dict: An configuration dictionary.
105 @rtype: XendDomainInfo
106 @return: An up and running XendDomainInfo instance
107 @raise VmError: Invalid configuration or failure to start.
108 """
110 log.debug("XendDomainInfo.create_from_dict(%s)",
111 scrub_password(config_dict))
112 vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
113 try:
114 vm.start()
115 except:
116 log.exception('Domain construction failed')
117 vm.destroy()
118 raise
119 return vm
121 def recreate(info, priv):
122 """Create the VM object for an existing domain. The domain must not
123 be dying, as the paths in the store should already have been removed,
124 and asking us to recreate them causes problems.
126 @param xeninfo: Parsed configuration
127 @type xeninfo: Dictionary
128 @param priv: Is a privileged domain (Dom 0)
129 @type priv: bool
131 @rtype: XendDomainInfo
132 @return: A up and running XendDomainInfo instance
133 @raise VmError: Invalid configuration.
134 @raise XendError: Errors with configuration.
135 """
137 log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
139 assert not info['dying']
141 xeninfo = XendConfig.XendConfig(dominfo = info)
142 xeninfo['is_control_domain'] = priv
143 xeninfo['is_a_template'] = False
144 domid = xeninfo['domid']
145 uuid1 = uuid.fromString(xeninfo['uuid'])
146 needs_reinitialising = False
148 dompath = GetDomainPath(domid)
149 if not dompath:
150 raise XendError('No domain path in store for existing '
151 'domain %d' % domid)
153 log.info("Recreating domain %d, UUID %s. at %s" %
154 (domid, xeninfo['uuid'], dompath))
156 # need to verify the path and uuid if not Domain-0
157 # if the required uuid and vm aren't set, then that means
158 # we need to recreate the dom with our own values
159 #
160 # NOTE: this is probably not desirable, really we should just
161 # abort or ignore, but there may be cases where xenstore's
162 # entry disappears (eg. xenstore-rm /)
163 #
164 try:
165 vmpath = xstransact.Read(dompath, "vm")
166 if not vmpath:
167 if not priv:
168 log.warn('/local/domain/%d/vm is missing. recreate is '
169 'confused, trying our best to recover' % domid)
170 needs_reinitialising = True
171 raise XendError('reinit')
173 uuid2_str = xstransact.Read(vmpath, "uuid")
174 if not uuid2_str:
175 log.warn('%s/uuid/ is missing. recreate is confused, '
176 'trying our best to recover' % vmpath)
177 needs_reinitialising = True
178 raise XendError('reinit')
180 uuid2 = uuid.fromString(uuid2_str)
181 if uuid1 != uuid2:
182 log.warn('UUID in /vm does not match the UUID in /dom/%d.'
183 'Trying out best to recover' % domid)
184 needs_reinitialising = True
185 except XendError:
186 pass # our best shot at 'goto' in python :)
188 vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
189 vmpath = vmpath)
191 if needs_reinitialising:
192 vm._recreateDom()
193 vm._removeVm()
194 vm._storeVmDetails()
195 vm._storeDomDetails()
197 vm.image = image.create(vm, vm.info)
198 vm.image.recreate()
200 vm._registerWatches()
201 vm.refreshShutdown(xeninfo)
203 # register the domain in the list
204 from xen.xend import XendDomain
205 XendDomain.instance().add_domain(vm)
207 return vm
210 def restore(config):
211 """Create a domain and a VM object to do a restore.
213 @param config: Domain SXP configuration
214 @type config: list of lists. (see C{create})
216 @rtype: XendDomainInfo
217 @return: A up and running XendDomainInfo instance
218 @raise VmError: Invalid configuration or failure to start.
219 @raise XendError: Errors with configuration.
220 """
222 log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
223 vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
224 resume = True)
225 try:
226 vm.resume()
227 return vm
228 except:
229 vm.destroy()
230 raise
232 def createDormant(domconfig):
233 """Create a dormant/inactive XenDomainInfo without creating VM.
234 This is for creating instances of persistent domains that are not
235 yet start.
237 @param domconfig: Parsed configuration
238 @type domconfig: XendConfig object
240 @rtype: XendDomainInfo
241 @return: A up and running XendDomainInfo instance
242 @raise XendError: Errors with configuration.
243 """
245 log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
247 # domid does not make sense for non-running domains.
248 domconfig.pop('domid', None)
249 vm = XendDomainInfo(domconfig)
250 return vm
252 def domain_by_name(name):
253 """Get domain by name
255 @params name: Name of the domain
256 @type name: string
257 @return: XendDomainInfo or None
258 """
259 from xen.xend import XendDomain
260 return XendDomain.instance().domain_lookup_by_name_nr(name)
263 def shutdown_reason(code):
264 """Get a shutdown reason from a code.
266 @param code: shutdown code
267 @type code: int
268 @return: shutdown reason
269 @rtype: string
270 """
271 return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
273 def dom_get(dom):
274 """Get info from xen for an existing domain.
276 @param dom: domain id
277 @type dom: int
278 @return: info or None
279 @rtype: dictionary
280 """
281 try:
282 domlist = xc.domain_getinfo(dom, 1)
283 if domlist and dom == domlist[0]['domid']:
284 return domlist[0]
285 except Exception, err:
286 # ignore missing domain
287 log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
288 return None
291 class XendDomainInfo:
292 """An object represents a domain.
294 @TODO: try to unify dom and domid, they mean the same thing, but
295 xc refers to it as dom, and everywhere else, including
296 xenstore it is domid. The best way is to change xc's
297 python interface.
299 @ivar info: Parsed configuration
300 @type info: dictionary
301 @ivar domid: Domain ID (if VM has started)
302 @type domid: int or None
303 @ivar vmpath: XenStore path to this VM.
304 @type vmpath: string
305 @ivar dompath: XenStore path to this Domain.
306 @type dompath: string
307 @ivar image: Reference to the VM Image.
308 @type image: xen.xend.image.ImageHandler
309 @ivar store_port: event channel to xenstored
310 @type store_port: int
311 @ivar console_port: event channel to xenconsoled
312 @type console_port: int
313 @ivar store_mfn: xenstored mfn
314 @type store_mfn: int
315 @ivar console_mfn: xenconsoled mfn
316 @type console_mfn: int
317 @ivar notes: OS image notes
318 @type notes: dictionary
319 @ivar vmWatch: reference to a watch on the xenstored vmpath
320 @type vmWatch: xen.xend.xenstore.xswatch
321 @ivar shutdownWatch: reference to watch on the xenstored domain shutdown
322 @type shutdownWatch: xen.xend.xenstore.xswatch
323 @ivar shutdownStartTime: UNIX Time when domain started shutting down.
324 @type shutdownStartTime: float or None
325 # @ivar state: Domain state
326 # @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
327 @ivar state_updated: lock for self.state
328 @type state_updated: threading.Condition
329 @ivar refresh_shutdown_lock: lock for polling shutdown state
330 @type refresh_shutdown_lock: threading.Condition
331 @ivar _deviceControllers: device controller cache for this domain
332 @type _deviceControllers: dict 'string' to DevControllers
333 """
335 def __init__(self, info, domid = None, dompath = None, augment = False,
336 priv = False, resume = False, vmpath = None):
337 """Constructor for a domain
339 @param info: parsed configuration
340 @type info: dictionary
341 @keyword domid: Set initial domain id (if any)
342 @type domid: int
343 @keyword dompath: Set initial dompath (if any)
344 @type dompath: string
345 @keyword augment: Augment given info with xenstored VM info
346 @type augment: bool
347 @keyword priv: Is a privileged domain (Dom 0)
348 @type priv: bool
349 @keyword resume: Is this domain being resumed?
350 @type resume: bool
351 """
353 self.info = info
354 if domid == None:
355 self.domid = self.info.get('domid')
356 else:
357 self.domid = domid
359 #REMOVE: uuid is now generated in XendConfig
360 #if not self._infoIsSet('uuid'):
361 # self.info['uuid'] = uuid.toString(uuid.create())
363 # Find a unique /vm/<uuid>/<integer> path if not specified.
364 # This avoids conflict between pre-/post-migrate domains when doing
365 # localhost relocation.
366 self.vmpath = vmpath
367 i = 0
368 while self.vmpath == None:
369 self.vmpath = XS_VMROOT + self.info['uuid']
370 if i != 0:
371 self.vmpath = self.vmpath + '-' + str(i)
372 try:
373 if self._readVm("uuid"):
374 self.vmpath = None
375 i = i + 1
376 except:
377 pass
379 self.dompath = dompath
381 self.image = None
382 self.store_port = None
383 self.store_mfn = None
384 self.console_port = None
385 self.console_mfn = None
387 self.native_protocol = None
389 self.vmWatch = None
390 self.shutdownWatch = None
391 self.shutdownStartTime = None
392 self._resume = resume
394 self.state_updated = threading.Condition()
395 self.refresh_shutdown_lock = threading.Condition()
396 self._stateSet(DOM_STATE_HALTED)
398 self._deviceControllers = {}
400 for state in DOM_STATES_OLD:
401 self.info[state] = 0
403 if augment:
404 self._augmentInfo(priv)
406 self._checkName(self.info['name_label'])
408 self.metrics = XendVMMetrics(uuid.createString(), self)
411 #
412 # Public functions available through XMLRPC
413 #
416 def start(self, is_managed = False):
417 """Attempts to start the VM by do the appropriate
418 initialisation if it not started.
419 """
420 from xen.xend import XendDomain
422 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
423 try:
424 XendTask.log_progress(0, 30, self._constructDomain)
425 XendTask.log_progress(31, 60, self._initDomain)
427 XendTask.log_progress(61, 70, self._storeVmDetails)
428 XendTask.log_progress(71, 80, self._storeDomDetails)
429 XendTask.log_progress(81, 90, self._registerWatches)
430 XendTask.log_progress(91, 100, self.refreshShutdown)
432 xendomains = XendDomain.instance()
433 xennode = XendNode.instance()
435 # save running configuration if XendDomains believe domain is
436 # persistent
437 if is_managed:
438 xendomains.managed_config_save(self)
440 if xennode.xenschedinfo() == 'credit':
441 xendomains.domain_sched_credit_set(self.getDomid(),
442 self.getWeight(),
443 self.getCap())
444 except:
445 log.exception('VM start failed')
446 self.destroy()
447 raise
448 else:
449 raise XendError('VM already running')
451 def resume(self):
452 """Resumes a domain that has come back from suspension."""
453 state = self._stateGet()
454 if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
455 try:
456 self._constructDomain()
457 self._storeVmDetails()
458 self._createDevices()
459 self._createChannels()
460 self._storeDomDetails()
461 self._endRestore()
462 except:
463 log.exception('VM resume failed')
464 self.destroy()
465 raise
466 else:
467 raise XendError('VM is not suspended; it is %s'
468 % XEN_API_VM_POWER_STATE[state])
470 def shutdown(self, reason):
471 """Shutdown a domain by signalling this via xenstored."""
472 log.debug('XendDomainInfo.shutdown(%s)', reason)
473 if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
474 raise XendError('Domain cannot be shutdown')
476 if self.domid == 0:
477 raise XendError('Domain 0 cannot be shutdown')
479 if reason not in DOMAIN_SHUTDOWN_REASONS.values():
480 raise XendError('Invalid reason: %s' % reason)
481 self._removeVm('xend/previous_restart_time')
482 self.storeDom("control/shutdown", reason)
484 # HVM domain shuts itself down only if it has PV drivers
485 if self.info.is_hvm():
486 hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
487 if not hvm_pvdrv:
488 code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
489 log.info("HVM save:remote shutdown dom %d!", self.domid)
490 xc.domain_shutdown(self.domid, code)
492 def pause(self):
493 """Pause domain
495 @raise XendError: Failed pausing a domain
496 """
497 try:
498 xc.domain_pause(self.domid)
499 self._stateSet(DOM_STATE_PAUSED)
500 except Exception, ex:
501 log.exception(ex)
502 raise XendError("Domain unable to be paused: %s" % str(ex))
504 def unpause(self):
505 """Unpause domain
507 @raise XendError: Failed unpausing a domain
508 """
509 try:
510 xc.domain_unpause(self.domid)
511 self._stateSet(DOM_STATE_RUNNING)
512 except Exception, ex:
513 log.exception(ex)
514 raise XendError("Domain unable to be unpaused: %s" % str(ex))
516 def send_sysrq(self, key):
517 """ Send a Sysrq equivalent key via xenstored."""
518 if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
519 raise XendError("Domain '%s' is not started" % self.info['name_label'])
521 asserts.isCharConvertible(key)
522 self.storeDom("control/sysrq", '%c' % key)
524 def sync_pcidev_info(self):
526 if not self.info.is_hvm():
527 return
529 devid = '0'
530 dev_info = self._getDeviceInfo_pci(devid)
531 if dev_info is None:
532 return
534 # get the virtual slot info from xenstore
535 dev_uuid = sxp.child_value(dev_info, 'uuid')
536 pci_conf = self.info['devices'][dev_uuid][1]
537 pci_devs = pci_conf['devs']
539 count = 0
540 vslots = None
541 while vslots is None and count < 20:
542 vslots = xstransact.Read("/local/domain/0/backend/pci/%u/%s/vslots"
543 % (self.getDomid(), devid))
544 time.sleep(0.1)
545 count += 1
546 if vslots is None:
547 log.error("Device model didn't tell the vslots for PCI device")
548 return
550 #delete last delim
551 if vslots[-1] == ";":
552 vslots = vslots[:-1]
554 slot_list = vslots.split(';')
555 if len(slot_list) != len(pci_devs):
556 log.error("Device model's pci dev num dismatch")
557 return
559 #update the vslot info
560 count = 0;
561 for x in pci_devs:
562 x['vslt'] = slot_list[count]
563 count += 1
566 def hvm_pci_device_create(self, dev_config):
567 log.debug("XendDomainInfo.hvm_pci_device_create: %s"
568 % scrub_password(dev_config))
570 if not self.info.is_hvm():
571 raise VmError("hvm_pci_device_create called on non-HVM guest")
573 #all the PCI devs share one conf node
574 devid = '0'
576 new_dev = dev_config['devs'][0]
577 dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
579 #check conflict before trigger hotplug event
580 if dev_info is not None:
581 dev_uuid = sxp.child_value(dev_info, 'uuid')
582 pci_conf = self.info['devices'][dev_uuid][1]
583 pci_devs = pci_conf['devs']
584 for x in pci_devs:
585 if (int(x['vslt'], 16) == int(new_dev['vslt'], 16) and
586 int(x['vslt'], 16) != 0 ):
587 raise VmError("vslot %s already have a device." % (new_dev['vslt']))
589 if (int(x['domain'], 16) == int(new_dev['domain'], 16) and
590 int(x['bus'], 16) == int(new_dev['bus'], 16) and
591 int(x['slot'], 16) == int(new_dev['slot'], 16) and
592 int(x['func'], 16) == int(new_dev['func'], 16) ):
593 raise VmError("device is already inserted")
595 # Test whether the devices can be assigned with VT-d
596 pci_str = "%s, %s, %s, %s" % (new_dev['domain'],
597 new_dev['bus'],
598 new_dev['slot'],
599 new_dev['func'])
600 bdf = xc.test_assign_device(self.domid, pci_str)
601 if bdf != 0:
602 bus = (bdf >> 16) & 0xff
603 devfn = (bdf >> 8) & 0xff
604 dev = (devfn >> 3) & 0x1f
605 func = devfn & 0x7
606 raise VmError("Fail to hot insert device(%x:%x.%x): maybe VT-d is "
607 "not enabled, or the device is not exist, or it "
608 "has already been assigned to other domain"
609 % (bus, dev, func))
611 bdf_str = "%s:%s:%s.%s@%s" % (new_dev['domain'],
612 new_dev['bus'],
613 new_dev['slot'],
614 new_dev['func'],
615 new_dev['vslt'])
616 self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
619 def device_create(self, dev_config):
620 """Create a new device.
622 @param dev_config: device configuration
623 @type dev_config: SXP object (parsed config)
624 """
625 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
626 dev_type = sxp.name(dev_config)
627 dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
628 dev_config_dict = self.info['devices'][dev_uuid][1]
629 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
631 if self.domid is not None:
632 try:
633 dev_config_dict['devid'] = devid = \
634 self._createDevice(dev_type, dev_config_dict)
635 self._waitForDevice(dev_type, devid)
636 except VmError, ex:
637 del self.info['devices'][dev_uuid]
638 if dev_type == 'tap':
639 self.info['vbd_refs'].remove(dev_uuid)
640 else:
641 self.info['%s_refs' % dev_type].remove(dev_uuid)
642 raise ex
643 else:
644 devid = None
646 xen.xend.XendDomain.instance().managed_config_save(self)
647 return self.getDeviceController(dev_type).sxpr(devid)
650 def pci_device_configure(self, dev_sxp, devid = 0):
651 """Configure an existing pci device.
653 @param dev_sxp: device configuration
654 @type dev_sxp: SXP object (parsed config)
655 @param devid: device id
656 @type devid: int
657 @return: Returns True if successfully updated device
658 @rtype: boolean
659 """
660 log.debug("XendDomainInfo.pci_device_configure: %s"
661 % scrub_password(dev_sxp))
663 dev_class = sxp.name(dev_sxp)
665 if dev_class != 'pci':
666 return False
668 pci_state = sxp.child_value(dev_sxp, 'state')
669 existing_dev_info = self._getDeviceInfo_pci(devid)
671 if existing_dev_info is None and pci_state != 'Initialising':
672 raise XendError("Cannot detach when pci platform does not exist")
674 pci_dev = sxp.children(dev_sxp, 'dev')[0]
675 dev_config = self.info.pci_convert_sxp_to_dict(dev_sxp)
676 dev = dev_config['devs'][0]
678 # Do HVM specific processing
679 if self.info.is_hvm():
680 if pci_state == 'Initialising':
681 # HVM PCI device attachment
682 self.hvm_pci_device_create(dev_config)
683 # Update vslt
684 vslt = xstransact.Read("/local/domain/0/device-model/%i/parameter"
685 % self.getDomid())
686 dev['vslt'] = vslt
687 for n in sxp.children(pci_dev):
688 if(n[0] == 'vslt'):
689 n[1] = vslt
690 else:
691 # HVM PCI device detachment
692 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
693 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
694 existing_pci_devs = existing_pci_conf['devs']
695 vslt = '0x0'
696 for x in existing_pci_devs:
697 if ( int(x['domain'], 16) == int(dev['domain'], 16) and
698 int(x['bus'], 16) == int(dev['bus'], 16) and
699 int(x['slot'], 16) == int(dev['slot'], 16) and
700 int(x['func'], 16) == int(dev['func'], 16) ):
701 vslt = x['vslt']
702 break
703 if vslt == '0x0':
704 raise VmError("Device %04x:%02x:%02x.%02x is not connected"
705 % (int(dev['domain'],16), int(dev['bus'],16),
706 int(dev['slot'],16), int(dev['func'],16)))
707 self.hvm_destroyPCIDevice(int(vslt, 16))
708 # Update vslt
709 dev['vslt'] = vslt
710 for n in sxp.children(pci_dev):
711 if(n[0] == 'vslt'):
712 n[1] = vslt
714 # If pci platform does not exist, create and exit.
715 if existing_dev_info is None:
716 self.device_create(dev_sxp)
717 return True
719 # use DevController.reconfigureDevice to change device config
720 dev_control = self.getDeviceController(dev_class)
721 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
722 if not self.info.is_hvm():
723 # in PV case, wait until backend state becomes connected.
724 dev_control.waitForDevice_reconfigure(devid)
725 num_devs = dev_control.cleanupDevice(devid)
727 # update XendConfig with new device info
728 if dev_uuid:
729 new_dev_sxp = dev_control.configuration(devid)
730 self.info.device_update(dev_uuid, new_dev_sxp)
732 # If there is no device left, destroy pci and remove config.
733 if num_devs == 0:
734 if self.info.is_hvm():
735 self.destroyDevice('pci', devid, True)
736 del self.info['devices'][dev_uuid]
737 platform = self.info['platform']
738 orig_dev_num = len(platform['pci'])
739 # TODO: can use this to keep some info to ask high level
740 # management tools to hot insert a new passthrough dev
741 # after migration
742 if orig_dev_num != 0:
743 #platform['pci'] = ["%dDEVs" % orig_dev_num]
744 platform['pci'] = []
745 else:
746 self.destroyDevice('pci', devid)
747 del self.info['devices'][dev_uuid]
749 xen.xend.XendDomain.instance().managed_config_save(self)
751 return True
753 def vscsi_device_configure(self, dev_sxp):
754 """Configure an existing vscsi device.
755 quoted pci funciton
756 """
757 dev_class = sxp.name(dev_sxp)
758 if dev_class != 'vscsi':
759 return False
761 dev_config = self.info.pci_convert_sxp_to_dict(dev_sxp)
762 dev = dev_config['devs'][0]
763 req_devid = sxp.child_value(dev_sxp, 'devid')
764 req_devid = int(req_devid)
765 existing_dev_info = self._getDeviceInfo_vscsi(req_devid, dev['v-dev'])
766 state = sxp.child_value(dev_sxp, 'state')
768 if state == 'Initialising':
769 # new create
770 # If request devid does not exist, create and exit.
771 if existing_dev_info is None:
772 self.device_create(dev_sxp)
773 return True
774 elif existing_dev_info == "exists":
775 raise XendError("The virtual device %s is already defined" % dev['v-dev'])
777 elif state == 'Closing':
778 if existing_dev_info is None:
779 raise XendError("Cannot detach vscsi device does not exist")
781 # use DevController.reconfigureDevice to change device config
782 dev_control = self.getDeviceController(dev_class)
783 dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
784 dev_control.waitForDevice_reconfigure(req_devid)
785 num_devs = dev_control.cleanupDevice(req_devid)
787 # update XendConfig with new device info
788 if dev_uuid:
789 new_dev_sxp = dev_control.configuration(req_devid)
790 self.info.device_update(dev_uuid, new_dev_sxp)
792 # If there is no device left, destroy vscsi and remove config.
793 if num_devs == 0:
794 self.destroyDevice('vscsi', req_devid)
795 del self.info['devices'][dev_uuid]
797 return True
799 def device_configure(self, dev_sxp, devid = None):
800 """Configure an existing device.
802 @param dev_config: device configuration
803 @type dev_config: SXP object (parsed config)
804 @param devid: device id
805 @type devid: int
806 @return: Returns True if successfully updated device
807 @rtype: boolean
808 """
810 # convert device sxp to a dict
811 dev_class = sxp.name(dev_sxp)
812 dev_config = {}
814 if dev_class == 'pci':
815 return self.pci_device_configure(dev_sxp)
817 if dev_class == 'vscsi':
818 return self.vscsi_device_configure(dev_sxp)
820 for opt_val in dev_sxp[1:]:
821 try:
822 dev_config[opt_val[0]] = opt_val[1]
823 except IndexError:
824 pass
826 # use DevController.reconfigureDevice to change device config
827 dev_control = self.getDeviceController(dev_class)
828 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
830 # update XendConfig with new device info
831 if dev_uuid:
832 self.info.device_update(dev_uuid, dev_sxp)
834 return True
836 def waitForDevices(self):
837 """Wait for this domain's configured devices to connect.
839 @raise VmError: if any device fails to initialise.
840 """
841 for devclass in XendDevices.valid_devices():
842 self.getDeviceController(devclass).waitForDevices()
844 def hvm_destroyPCIDevice(self, vslot):
845 log.debug("hvm_destroyPCIDevice called %s", vslot)
847 if not self.info.is_hvm():
848 raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
850 #all the PCI devs share one conf node
851 devid = '0'
852 vslot = int(vslot)
853 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
854 dev_uuid = sxp.child_value(dev_info, 'uuid')
856 #delete the pci bdf config under the pci device
857 pci_conf = self.info['devices'][dev_uuid][1]
858 pci_len = len(pci_conf['devs'])
860 #find the pass-through device with the virtual slot
861 devnum = 0
862 for x in pci_conf['devs']:
863 if int(x['vslt'], 16) == vslot:
864 break
865 devnum += 1
867 if devnum >= pci_len:
868 raise VmError("Device @ vslot 0x%x doesn't exist." % (vslot))
870 if vslot == 0:
871 raise VmError("Device @ vslot 0x%x do not support hotplug." % (vslot))
873 bdf_str = "%s:%s:%s.%s" % (x['domain'], x['bus'], x['slot'], x['func'])
874 log.info("hvm_destroyPCIDevice:%s:%s!", x, bdf_str)
876 self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
878 return 0
880 def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
881 log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
882 deviceClass, devid)
884 if rm_cfg:
885 # Convert devid to device number. A device number is
886 # needed to remove its configuration.
887 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
889 # Save current sxprs. A device number and a backend
890 # path are needed to remove its configuration but sxprs
891 # do not have those after calling destroyDevice.
892 sxprs = self.getDeviceSxprs(deviceClass)
894 rc = None
895 if self.domid is not None:
896 rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
897 if not force and rm_cfg:
898 # The backend path, other than the device itself,
899 # has to be passed because its accompanied frontend
900 # path may be void until its removal is actually
901 # issued. It is probable because destroyDevice is
902 # issued first.
903 for dev_num, dev_info in sxprs:
904 dev_num = int(dev_num)
905 if dev_num == dev:
906 for x in dev_info:
907 if x[0] == 'backend':
908 backend = x[1]
909 break
910 break
911 self._waitForDevice_destroy(deviceClass, devid, backend)
913 if rm_cfg:
914 if deviceClass == 'vif':
915 if self.domid is not None:
916 for dev_num, dev_info in sxprs:
917 dev_num = int(dev_num)
918 if dev_num == dev:
919 for x in dev_info:
920 if x[0] == 'mac':
921 mac = x[1]
922 break
923 break
924 dev_info = self._getDeviceInfo_vif(mac)
925 else:
926 _, dev_info = sxprs[dev]
927 else: # 'vbd' or 'tap'
928 dev_info = self._getDeviceInfo_vbd(dev)
929 # To remove the UUID of the device from refs,
930 # deviceClass must be always 'vbd'.
931 deviceClass = 'vbd'
932 if dev_info is None:
933 raise XendError("Device %s is not defined" % devid)
935 dev_uuid = sxp.child_value(dev_info, 'uuid')
936 del self.info['devices'][dev_uuid]
937 self.info['%s_refs' % deviceClass].remove(dev_uuid)
938 xen.xend.XendDomain.instance().managed_config_save(self)
940 return rc
942 def getDeviceSxprs(self, deviceClass):
943 if deviceClass == 'pci':
944 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
945 if dev_info is None:
946 return []
947 dev_uuid = sxp.child_value(dev_info, 'uuid')
948 pci_devs = self.info['devices'][dev_uuid][1]['devs']
949 pci_len = len(pci_devs)
950 return pci_devs
951 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
952 return self.getDeviceController(deviceClass).sxprs()
953 else:
954 sxprs = []
955 dev_num = 0
956 for dev_type, dev_info in self.info.all_devices_sxpr():
957 if dev_type == deviceClass:
958 sxprs.append([dev_num, dev_info])
959 dev_num += 1
960 return sxprs
962 def getBlockDeviceClass(self, devid):
963 # To get a device number from the devid,
964 # we temporarily use the device controller of VBD.
965 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
966 dev_info = self._getDeviceInfo_vbd(dev)
967 if dev_info:
968 return dev_info[0]
970 def _getDeviceInfo_vif(self, mac):
971 for dev_type, dev_info in self.info.all_devices_sxpr():
972 if dev_type != 'vif':
973 continue
974 if mac == sxp.child_value(dev_info, 'mac'):
975 return dev_info
977 def _getDeviceInfo_vbd(self, devid):
978 for dev_type, dev_info in self.info.all_devices_sxpr():
979 if dev_type != 'vbd' and dev_type != 'tap':
980 continue
981 dev = sxp.child_value(dev_info, 'dev')
982 dev = dev.split(':')[0]
983 dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
984 if devid == dev:
985 return dev_info
987 def _getDeviceInfo_pci(self, devid):
988 for dev_type, dev_info in self.info.all_devices_sxpr():
989 if dev_type != 'pci':
990 continue
991 return dev_info
992 return None
994 def _getDeviceInfo_vscsi(self, devid, vdev):
995 devid = int(devid)
996 for dev_type, dev_info in self.info.all_devices_sxpr():
997 if dev_type != 'vscsi':
998 continue
999 existing_dev_uuid = sxp.child_value(dev_info, 'uuid')
1000 existing_conf = self.info['devices'][existing_dev_uuid][1]
1001 existing_dev = existing_conf['devs'][0]
1002 existing_devid = int(existing_dev['devid'])
1003 existing_vdev = existing_dev['v-dev']
1005 if vdev == existing_vdev:
1006 return "exists"
1008 if devid == existing_devid:
1009 return dev_info
1011 return None
1013 def setMemoryTarget(self, target):
1014 """Set the memory target of this domain.
1015 @param target: In MiB.
1016 """
1017 log.debug("Setting memory target of domain %s (%s) to %d MiB.",
1018 self.info['name_label'], str(self.domid), target)
1020 MiB = 1024 * 1024
1022 if self.domid == 0:
1023 dom0_min_mem = xoptions.get_dom0_min_mem()
1024 memory_cur = self.get_memory_dynamic_max() / MiB
1025 if target < memory_cur and dom0_min_mem > target:
1026 raise XendError("memory_dynamic_max too small")
1028 self._safe_set_memory('memory_dynamic_min', target * MiB)
1029 self._safe_set_memory('memory_dynamic_max', target * MiB)
1031 if self.domid >= 0:
1032 self.storeVm("memory", target)
1033 self.storeDom("memory/target", target << 10)
1034 xen.xend.XendDomain.instance().managed_config_save(self)
1036 def setMemoryMaximum(self, limit):
1037 """Set the maximum memory limit of this domain
1038 @param limit: In MiB.
1039 """
1040 log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
1041 self.info['name_label'], str(self.domid), limit)
1043 maxmem_cur = self.get_memory_static_max()
1044 MiB = 1024 * 1024
1045 self._safe_set_memory('memory_static_max', limit * MiB)
1047 if self.domid >= 0:
1048 maxmem = int(limit) * 1024
1049 try:
1050 return xc.domain_setmaxmem(self.domid, maxmem)
1051 except Exception, ex:
1052 self._safe_set_memory('memory_static_max', maxmem_cur)
1053 raise XendError(str(ex))
1054 xen.xend.XendDomain.instance().managed_config_save(self)
1057 def getVCPUInfo(self):
1058 try:
1059 # We include the domain name and ID, to help xm.
1060 sxpr = ['domain',
1061 ['domid', self.domid],
1062 ['name', self.info['name_label']],
1063 ['vcpu_count', self.info['VCPUs_max']]]
1065 for i in range(0, self.info['VCPUs_max']):
1066 if self.domid is not None:
1067 info = xc.vcpu_getinfo(self.domid, i)
1069 sxpr.append(['vcpu',
1070 ['number', i],
1071 ['online', info['online']],
1072 ['blocked', info['blocked']],
1073 ['running', info['running']],
1074 ['cpu_time', info['cpu_time'] / 1e9],
1075 ['cpu', info['cpu']],
1076 ['cpumap', info['cpumap']]])
1077 else:
1078 sxpr.append(['vcpu',
1079 ['number', i],
1080 ['online', 0],
1081 ['blocked', 0],
1082 ['running', 0],
1083 ['cpu_time', 0.0],
1084 ['cpu', -1],
1085 ['cpumap', self.info['cpus'][i] and \
1086 self.info['cpus'][i] or range(64)]])
1088 return sxpr
1090 except RuntimeError, exn:
1091 raise XendError(str(exn))
1094 def getDomInfo(self):
1095 return dom_get(self.domid)
1098 # internal functions ... TODO: re-categorised
1101 def _augmentInfo(self, priv):
1102 """Augment self.info, as given to us through L{recreate}, with
1103 values taken from the store. This recovers those values known
1104 to xend but not to the hypervisor.
1105 """
1106 augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
1107 if priv:
1108 augment_entries.remove('memory')
1109 augment_entries.remove('maxmem')
1110 augment_entries.remove('vcpus')
1111 augment_entries.remove('vcpu_avail')
1113 vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
1114 for k in augment_entries])
1116 # make returned lists into a dictionary
1117 vm_config = dict(zip(augment_entries, vm_config))
1119 for arg in augment_entries:
1120 val = vm_config[arg]
1121 if val != None:
1122 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1123 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1124 self.info[xapiarg] = val
1125 elif arg == "memory":
1126 self.info["static_memory_min"] = val
1127 elif arg == "maxmem":
1128 self.info["static_memory_max"] = val
1129 else:
1130 self.info[arg] = val
1132 # For dom0, we ignore any stored value for the vcpus fields, and
1133 # read the current value from Xen instead. This allows boot-time
1134 # settings to take precedence over any entries in the store.
1135 if priv:
1136 xeninfo = dom_get(self.domid)
1137 self.info['VCPUs_max'] = xeninfo['online_vcpus']
1138 self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
1140 # read image value
1141 image_sxp = self._readVm('image')
1142 if image_sxp:
1143 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1145 # read devices
1146 devices = []
1147 for devclass in XendDevices.valid_devices():
1148 devconfig = self.getDeviceController(devclass).configurations()
1149 if devconfig:
1150 devices.extend(devconfig)
1152 if not self.info['devices'] and devices is not None:
1153 for device in devices:
1154 self.info.device_add(device[0], cfg_sxp = device)
1156 self._update_consoles()
1158 def _update_consoles(self, transaction = None):
1159 if self.domid == None or self.domid == 0:
1160 return
1162 # Update VT100 port if it exists
1163 if transaction is None:
1164 self.console_port = self.readDom('console/port')
1165 else:
1166 self.console_port = self.readDomTxn(transaction, 'console/port')
1167 if self.console_port is not None:
1168 serial_consoles = self.info.console_get_all('vt100')
1169 if not serial_consoles:
1170 cfg = self.info.console_add('vt100', self.console_port)
1171 self._createDevice('console', cfg)
1172 else:
1173 console_uuid = serial_consoles[0].get('uuid')
1174 self.info.console_update(console_uuid, 'location',
1175 self.console_port)
1178 # Update VNC port if it exists and write to xenstore
1179 if transaction is None:
1180 vnc_port = self.readDom('console/vnc-port')
1181 else:
1182 vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
1183 if vnc_port is not None:
1184 for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
1185 if dev_type == 'vfb':
1186 old_location = dev_info.get('location')
1187 listen_host = dev_info.get('vnclisten', 'localhost')
1188 new_location = '%s:%s' % (listen_host, str(vnc_port))
1189 if old_location == new_location:
1190 break
1192 dev_info['location'] = new_location
1193 self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
1194 vfb_ctrl = self.getDeviceController('vfb')
1195 vfb_ctrl.reconfigureDevice(0, dev_info)
1196 break
1199 # Function to update xenstore /vm/*
1202 def _readVm(self, *args):
1203 return xstransact.Read(self.vmpath, *args)
1205 def _writeVm(self, *args):
1206 return xstransact.Write(self.vmpath, *args)
1208 def _removeVm(self, *args):
1209 return xstransact.Remove(self.vmpath, *args)
1211 def _gatherVm(self, *args):
1212 return xstransact.Gather(self.vmpath, *args)
1214 def _listRecursiveVm(self, *args):
1215 return xstransact.ListRecursive(self.vmpath, *args)
1217 def storeVm(self, *args):
1218 return xstransact.Store(self.vmpath, *args)
1220 def permissionsVm(self, *args):
1221 return xstransact.SetPermissions(self.vmpath, *args)
1224 def _readVmTxn(self, transaction, *args):
1225 paths = map(lambda x: self.vmpath + "/" + x, args)
1226 return transaction.read(*paths)
1228 def _writeVmTxn(self, transaction, *args):
1229 paths = map(lambda x: self.vmpath + "/" + x, args)
1230 return transaction.write(*paths)
1232 def _removeVmTxn(self, transaction, *args):
1233 paths = map(lambda x: self.vmpath + "/" + x, args)
1234 return transaction.remove(*paths)
1236 def _gatherVmTxn(self, transaction, *args):
1237 paths = map(lambda x: self.vmpath + "/" + x, args)
1238 return transaction.gather(paths)
1240 def storeVmTxn(self, transaction, *args):
1241 paths = map(lambda x: self.vmpath + "/" + x, args)
1242 return transaction.store(*paths)
1244 def permissionsVmTxn(self, transaction, *args):
1245 paths = map(lambda x: self.vmpath + "/" + x, args)
1246 return transaction.set_permissions(*paths)
1249 # Function to update xenstore /dom/*
1252 def readDom(self, *args):
1253 return xstransact.Read(self.dompath, *args)
1255 def gatherDom(self, *args):
1256 return xstransact.Gather(self.dompath, *args)
1258 def _writeDom(self, *args):
1259 return xstransact.Write(self.dompath, *args)
1261 def _removeDom(self, *args):
1262 return xstransact.Remove(self.dompath, *args)
1264 def storeDom(self, *args):
1265 return xstransact.Store(self.dompath, *args)
1268 def readDomTxn(self, transaction, *args):
1269 paths = map(lambda x: self.dompath + "/" + x, args)
1270 return transaction.read(*paths)
1272 def gatherDomTxn(self, transaction, *args):
1273 paths = map(lambda x: self.dompath + "/" + x, args)
1274 return transaction.gather(*paths)
1276 def _writeDomTxn(self, transaction, *args):
1277 paths = map(lambda x: self.dompath + "/" + x, args)
1278 return transaction.write(*paths)
1280 def _removeDomTxn(self, transaction, *args):
1281 paths = map(lambda x: self.dompath + "/" + x, args)
1282 return transaction.remove(*paths)
1284 def storeDomTxn(self, transaction, *args):
1285 paths = map(lambda x: self.dompath + "/" + x, args)
1286 return transaction.store(*paths)
1289 def _recreateDom(self):
1290 complete(self.dompath, lambda t: self._recreateDomFunc(t))
1292 def _recreateDomFunc(self, t):
1293 t.remove()
1294 t.mkdir()
1295 t.set_permissions({'dom' : self.domid})
1296 t.write('vm', self.vmpath)
1298 def _storeDomDetails(self):
1299 to_store = {
1300 'domid': str(self.domid),
1301 'vm': self.vmpath,
1302 'name': self.info['name_label'],
1303 'console/limit': str(xoptions.get_console_limit() * 1024),
1304 'memory/target': str(self.info['memory_dynamic_max'] / 1024),
1307 def f(n, v):
1308 if v is not None:
1309 if type(v) == bool:
1310 to_store[n] = v and "1" or "0"
1311 else:
1312 to_store[n] = str(v)
1314 # Figure out if we need to tell xenconsoled to ignore this guest's
1315 # console - device model will handle console if it is running
1316 constype = "ioemu"
1317 if 'device_model' not in self.info['platform']:
1318 constype = "xenconsoled"
1320 f('console/port', self.console_port)
1321 f('console/ring-ref', self.console_mfn)
1322 f('console/type', constype)
1323 f('store/port', self.store_port)
1324 f('store/ring-ref', self.store_mfn)
1326 if arch.type == "x86":
1327 f('control/platform-feature-multiprocessor-suspend', True)
1329 # elfnotes
1330 for n, v in self.info.get_notes().iteritems():
1331 n = n.lower().replace('_', '-')
1332 if n == 'features':
1333 for v in v.split('|'):
1334 v = v.replace('_', '-')
1335 if v.startswith('!'):
1336 f('image/%s/%s' % (n, v[1:]), False)
1337 else:
1338 f('image/%s/%s' % (n, v), True)
1339 else:
1340 f('image/%s' % n, v)
1342 if self.info.has_key('security_label'):
1343 f('security_label', self.info['security_label'])
1345 to_store.update(self._vcpuDomDetails())
1347 log.debug("Storing domain details: %s", scrub_password(to_store))
1349 self._writeDom(to_store)
1351 def _vcpuDomDetails(self):
1352 def availability(n):
1353 if self.info['vcpu_avail'] & (1 << n):
1354 return 'online'
1355 else:
1356 return 'offline'
1358 result = {}
1359 for v in range(0, self.info['VCPUs_max']):
1360 result["cpu/%d/availability" % v] = availability(v)
1361 return result
1364 # xenstore watches
1367 def _registerWatches(self):
1368 """Register a watch on this VM's entries in the store, and the
1369 domain's control/shutdown node, so that when they are changed
1370 externally, we keep up to date. This should only be called by {@link
1371 #create}, {@link #recreate}, or {@link #restore}, once the domain's
1372 details have been written, but before the new instance is returned."""
1373 self.vmWatch = xswatch(self.vmpath, self._storeChanged)
1374 self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
1375 self._handleShutdownWatch)
1377 def _storeChanged(self, _):
1378 log.trace("XendDomainInfo.storeChanged");
1380 changed = False
1382 # Check whether values in the configuration have
1383 # changed in Xenstore.
1385 cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
1386 'rtc/timeoffset']
1388 vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
1389 for k in cfg_vm])
1391 # convert two lists into a python dictionary
1392 vm_details = dict(zip(cfg_vm, vm_details))
1394 if vm_details['rtc/timeoffset'] == None:
1395 vm_details['rtc/timeoffset'] = "0"
1397 for arg, val in vm_details.items():
1398 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1399 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1400 if val != None and val != self.info[xapiarg]:
1401 self.info[xapiarg] = val
1402 changed = True
1403 elif arg == "memory":
1404 if val != None and val != self.info["static_memory_min"]:
1405 self.info["static_memory_min"] = val
1406 changed = True
1407 elif arg == "maxmem":
1408 if val != None and val != self.info["static_memory_max"]:
1409 self.info["static_memory_max"] = val
1410 changed = True
1412 # Check whether image definition has been updated
1413 image_sxp = self._readVm('image')
1414 if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
1415 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1416 changed = True
1418 # Check if the rtc offset has changes
1419 if vm_details.get("rtc/timeoffset", "0") != self.info["platform"].get("rtc_timeoffset", "0"):
1420 self.info["platform"]["rtc_timeoffset"] = vm_details.get("rtc/timeoffset", 0)
1421 changed = True
1423 if changed:
1424 # Update the domain section of the store, as this contains some
1425 # parameters derived from the VM configuration.
1426 self._storeDomDetails()
1428 return 1
1430 def _handleShutdownWatch(self, _):
1431 log.debug('XendDomainInfo.handleShutdownWatch')
1433 reason = self.readDom('control/shutdown')
1435 if reason and reason != 'suspend':
1436 sst = self.readDom('xend/shutdown_start_time')
1437 now = time.time()
1438 if sst:
1439 self.shutdownStartTime = float(sst)
1440 timeout = float(sst) + SHUTDOWN_TIMEOUT - now
1441 else:
1442 self.shutdownStartTime = now
1443 self.storeDom('xend/shutdown_start_time', now)
1444 timeout = SHUTDOWN_TIMEOUT
1446 log.trace(
1447 "Scheduling refreshShutdown on domain %d in %ds.",
1448 self.domid, timeout)
1449 threading.Timer(timeout, self.refreshShutdown).start()
1451 return True
1455 # Public Attributes for the VM
1459 def getDomid(self):
1460 return self.domid
1462 def setName(self, name, to_store = True):
1463 self._checkName(name)
1464 self.info['name_label'] = name
1465 if to_store:
1466 self.storeVm("name", name)
1468 def getName(self):
1469 return self.info['name_label']
1471 def getDomainPath(self):
1472 return self.dompath
1474 def getShutdownReason(self):
1475 return self.readDom('control/shutdown')
1477 def getStorePort(self):
1478 """For use only by image.py and XendCheckpoint.py."""
1479 return self.store_port
1481 def getConsolePort(self):
1482 """For use only by image.py and XendCheckpoint.py"""
1483 return self.console_port
1485 def getFeatures(self):
1486 """For use only by image.py."""
1487 return self.info['features']
1489 def getVCpuCount(self):
1490 return self.info['VCPUs_max']
1492 def setVCpuCount(self, vcpus):
1493 if vcpus <= 0:
1494 raise XendError('Invalid VCPUs')
1496 self.info['vcpu_avail'] = (1 << vcpus) - 1
1497 if self.domid >= 0:
1498 self.storeVm('vcpu_avail', self.info['vcpu_avail'])
1499 # update dom differently depending on whether we are adjusting
1500 # vcpu number up or down, otherwise _vcpuDomDetails does not
1501 # disable the vcpus
1502 if self.info['VCPUs_max'] > vcpus:
1503 # decreasing
1504 self._writeDom(self._vcpuDomDetails())
1505 self.info['VCPUs_live'] = vcpus
1506 else:
1507 # same or increasing
1508 self.info['VCPUs_live'] = vcpus
1509 self._writeDom(self._vcpuDomDetails())
1510 else:
1511 if self.info['VCPUs_max'] > vcpus:
1512 # decreasing
1513 del self.info['cpus'][vcpus:]
1514 elif self.info['VCPUs_max'] < vcpus:
1515 # increasing
1516 for c in range(self.info['VCPUs_max'], vcpus):
1517 self.info['cpus'].append(list())
1518 self.info['VCPUs_max'] = vcpus
1519 xen.xend.XendDomain.instance().managed_config_save(self)
1520 log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
1521 vcpus)
1523 def getMemoryTarget(self):
1524 """Get this domain's target memory size, in KB."""
1525 return self.info['memory_dynamic_max'] / 1024
1527 def getMemoryMaximum(self):
1528 """Get this domain's maximum memory size, in KB."""
1529 # remember, info now stores memory in bytes
1530 return self.info['memory_static_max'] / 1024
1532 def getResume(self):
1533 return str(self._resume)
1535 def setResume(self, isresume):
1536 self._resume = isresume
1538 def getCpus(self):
1539 return self.info['cpus']
1541 def setCpus(self, cpumap):
1542 self.info['cpus'] = cpumap
1544 def getCap(self):
1545 return self.info['vcpus_params']['cap']
1547 def setCap(self, cpu_cap):
1548 self.info['vcpus_params']['cap'] = cpu_cap
1550 def getWeight(self):
1551 return self.info['vcpus_params']['weight']
1553 def setWeight(self, cpu_weight):
1554 self.info['vcpus_params']['weight'] = cpu_weight
1556 def getRestartCount(self):
1557 return self._readVm('xend/restart_count')
1559 def refreshShutdown(self, xeninfo = None):
1560 """ Checks the domain for whether a shutdown is required.
1562 Called from XendDomainInfo and also image.py for HVM images.
1563 """
1565 # If set at the end of this method, a restart is required, with the
1566 # given reason. This restart has to be done out of the scope of
1567 # refresh_shutdown_lock.
1568 restart_reason = None
1570 self.refresh_shutdown_lock.acquire()
1571 try:
1572 if xeninfo is None:
1573 xeninfo = dom_get(self.domid)
1574 if xeninfo is None:
1575 # The domain no longer exists. This will occur if we have
1576 # scheduled a timer to check for shutdown timeouts and the
1577 # shutdown succeeded. It will also occur if someone
1578 # destroys a domain beneath us. We clean up the domain,
1579 # just in case, but we can't clean up the VM, because that
1580 # VM may have migrated to a different domain on this
1581 # machine.
1582 self.cleanupDomain()
1583 self._stateSet(DOM_STATE_HALTED)
1584 return
1586 if xeninfo['dying']:
1587 # Dying means that a domain has been destroyed, but has not
1588 # yet been cleaned up by Xen. This state could persist
1589 # indefinitely if, for example, another domain has some of its
1590 # pages mapped. We might like to diagnose this problem in the
1591 # future, but for now all we do is make sure that it's not us
1592 # holding the pages, by calling cleanupDomain. We can't
1593 # clean up the VM, as above.
1594 self.cleanupDomain()
1595 self._stateSet(DOM_STATE_SHUTDOWN)
1596 return
1598 elif xeninfo['crashed']:
1599 if self.readDom('xend/shutdown_completed'):
1600 # We've seen this shutdown already, but we are preserving
1601 # the domain for debugging. Leave it alone.
1602 return
1604 log.warn('Domain has crashed: name=%s id=%d.',
1605 self.info['name_label'], self.domid)
1606 self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
1608 restart_reason = 'crash'
1609 self._stateSet(DOM_STATE_HALTED)
1611 elif xeninfo['shutdown']:
1612 self._stateSet(DOM_STATE_SHUTDOWN)
1613 if self.readDom('xend/shutdown_completed'):
1614 # We've seen this shutdown already, but we are preserving
1615 # the domain for debugging. Leave it alone.
1616 return
1618 else:
1619 reason = shutdown_reason(xeninfo['shutdown_reason'])
1621 log.info('Domain has shutdown: name=%s id=%d reason=%s.',
1622 self.info['name_label'], self.domid, reason)
1623 self._writeVm(LAST_SHUTDOWN_REASON, reason)
1625 self._clearRestart()
1627 if reason == 'suspend':
1628 self._stateSet(DOM_STATE_SUSPENDED)
1629 # Don't destroy the domain. XendCheckpoint will do
1630 # this once it has finished. However, stop watching
1631 # the VM path now, otherwise we will end up with one
1632 # watch for the old domain, and one for the new.
1633 self._unwatchVm()
1634 elif reason in ('poweroff', 'reboot'):
1635 restart_reason = reason
1636 else:
1637 self.destroy()
1639 elif self.dompath is None:
1640 # We have yet to manage to call introduceDomain on this
1641 # domain. This can happen if a restore is in progress, or has
1642 # failed. Ignore this domain.
1643 pass
1644 else:
1645 # Domain is alive. If we are shutting it down, log a message
1646 # if it seems unresponsive.
1647 if xeninfo['paused']:
1648 self._stateSet(DOM_STATE_PAUSED)
1649 else:
1650 self._stateSet(DOM_STATE_RUNNING)
1652 if self.shutdownStartTime:
1653 timeout = (SHUTDOWN_TIMEOUT - time.time() +
1654 self.shutdownStartTime)
1655 if (timeout < 0 and not self.readDom('xend/unresponsive')):
1656 log.info(
1657 "Domain shutdown timeout expired: name=%s id=%s",
1658 self.info['name_label'], self.domid)
1659 self.storeDom('xend/unresponsive', 'True')
1660 finally:
1661 self.refresh_shutdown_lock.release()
1663 if restart_reason:
1664 threading.Thread(target = self._maybeRestart,
1665 args = (restart_reason,)).start()
1669 # Restart functions - handling whether we come back up on shutdown.
1672 def _clearRestart(self):
1673 self._removeDom("xend/shutdown_start_time")
1675 def _maybeDumpCore(self, reason):
1676 if reason == 'crash':
1677 if xoptions.get_enable_dump() or self.get_on_crash() \
1678 in ['coredump_and_destroy', 'coredump_and_restart']:
1679 try:
1680 self.dumpCore()
1681 except XendError:
1682 # This error has been logged -- there's nothing more
1683 # we can do in this context.
1684 pass
1686 def _maybeRestart(self, reason):
1687 # Before taking configured action, dump core if configured to do so.
1689 self._maybeDumpCore(reason)
1691 # Dispatch to the correct method based upon the configured on_{reason}
1692 # behaviour.
1693 actions = {"destroy" : self.destroy,
1694 "restart" : self._restart,
1695 "preserve" : self._preserve,
1696 "rename-restart" : self._renameRestart,
1697 "coredump-destroy" : self.destroy,
1698 "coredump-restart" : self._restart}
1700 action_conf = {
1701 'poweroff': 'actions_after_shutdown',
1702 'reboot': 'actions_after_reboot',
1703 'crash': 'actions_after_crash',
1706 action_target = self.info.get(action_conf.get(reason))
1707 func = actions.get(action_target, None)
1708 if func and callable(func):
1709 func()
1710 else:
1711 self.destroy() # default to destroy
1713 def _renameRestart(self):
1714 self._restart(True)
1716 def _restart(self, rename = False):
1717 """Restart the domain after it has exited.
1719 @param rename True if the old domain is to be renamed and preserved,
1720 False if it is to be destroyed.
1721 """
1722 from xen.xend import XendDomain
1724 if self._readVm(RESTART_IN_PROGRESS):
1725 log.error('Xend failed during restart of domain %s. '
1726 'Refusing to restart to avoid loops.',
1727 str(self.domid))
1728 self.destroy()
1729 return
1731 old_domid = self.domid
1732 self._writeVm(RESTART_IN_PROGRESS, 'True')
1734 now = time.time()
1735 rst = self._readVm('xend/previous_restart_time')
1736 if rst:
1737 rst = float(rst)
1738 timeout = now - rst
1739 if timeout < MINIMUM_RESTART_TIME:
1740 log.error(
1741 'VM %s restarting too fast (%f seconds since the last '
1742 'restart). Refusing to restart to avoid loops.',
1743 self.info['name_label'], timeout)
1744 self.destroy()
1745 return
1747 self._writeVm('xend/previous_restart_time', str(now))
1749 prev_vm_xend = self._listRecursiveVm('xend')
1750 new_dom_info = self.info
1751 try:
1752 if rename:
1753 new_dom_info = self._preserveForRestart()
1754 else:
1755 self._unwatchVm()
1756 self.destroy()
1758 # new_dom's VM will be the same as this domain's VM, except where
1759 # the rename flag has instructed us to call preserveForRestart.
1760 # In that case, it is important that we remove the
1761 # RESTART_IN_PROGRESS node from the new domain, not the old one,
1762 # once the new one is available.
1764 new_dom = None
1765 try:
1766 new_dom = XendDomain.instance().domain_create_from_dict(
1767 new_dom_info)
1768 for x in prev_vm_xend[0][1]:
1769 new_dom._writeVm('xend/%s' % x[0], x[1])
1770 new_dom.waitForDevices()
1771 new_dom.unpause()
1772 rst_cnt = new_dom._readVm('xend/restart_count')
1773 rst_cnt = int(rst_cnt) + 1
1774 new_dom._writeVm('xend/restart_count', str(rst_cnt))
1775 new_dom._removeVm(RESTART_IN_PROGRESS)
1776 except:
1777 if new_dom:
1778 new_dom._removeVm(RESTART_IN_PROGRESS)
1779 new_dom.destroy()
1780 else:
1781 self._removeVm(RESTART_IN_PROGRESS)
1782 raise
1783 except:
1784 log.exception('Failed to restart domain %s.', str(old_domid))
1786 def _preserveForRestart(self):
1787 """Preserve a domain that has been shut down, by giving it a new UUID,
1788 cloning the VM details, and giving it a new name. This allows us to
1789 keep this domain for debugging, but restart a new one in its place
1790 preserving the restart semantics (name and UUID preserved).
1791 """
1793 new_uuid = uuid.createString()
1794 new_name = 'Domain-%s' % new_uuid
1795 log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
1796 self.info['name_label'], self.domid, self.info['uuid'],
1797 new_name, new_uuid)
1798 self._unwatchVm()
1799 self._releaseDevices()
1800 # Remove existing vm node in xenstore
1801 self._removeVm()
1802 new_dom_info = self.info.copy()
1803 new_dom_info['name_label'] = self.info['name_label']
1804 new_dom_info['uuid'] = self.info['uuid']
1805 self.info['name_label'] = new_name
1806 self.info['uuid'] = new_uuid
1807 self.vmpath = XS_VMROOT + new_uuid
1808 # Write out new vm node to xenstore
1809 self._storeVmDetails()
1810 self._preserve()
1811 return new_dom_info
1814 def _preserve(self):
1815 log.info("Preserving dead domain %s (%d).", self.info['name_label'],
1816 self.domid)
1817 self._unwatchVm()
1818 self.storeDom('xend/shutdown_completed', 'True')
1819 self._stateSet(DOM_STATE_HALTED)
1822 # Debugging ..
1825 def dumpCore(self, corefile = None):
1826 """Create a core dump for this domain.
1828 @raise: XendError if core dumping failed.
1829 """
1831 try:
1832 if not corefile:
1833 this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
1834 corefile = "/var/xen/dump/%s-%s.%s.core" % (this_time,
1835 self.info['name_label'], self.domid)
1837 if os.path.isdir(corefile):
1838 raise XendError("Cannot dump core in a directory: %s" %
1839 corefile)
1841 self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
1842 xc.domain_dumpcore(self.domid, corefile)
1843 self._removeVm(DUMPCORE_IN_PROGRESS)
1844 except RuntimeError, ex:
1845 corefile_incomp = corefile+'-incomplete'
1846 os.rename(corefile, corefile_incomp)
1847 self._removeVm(DUMPCORE_IN_PROGRESS)
1848 log.exception("XendDomainInfo.dumpCore failed: id = %s name = %s",
1849 self.domid, self.info['name_label'])
1850 raise XendError("Failed to dump core: %s" % str(ex))
1853 # Device creation/deletion functions
1856 def _createDevice(self, deviceClass, devConfig):
1857 return self.getDeviceController(deviceClass).createDevice(devConfig)
1859 def _waitForDevice(self, deviceClass, devid):
1860 return self.getDeviceController(deviceClass).waitForDevice(devid)
1862 def _waitForDeviceUUID(self, dev_uuid):
1863 deviceClass, config = self.info['devices'].get(dev_uuid)
1864 self._waitForDevice(deviceClass, config['devid'])
1866 def _waitForDevice_destroy(self, deviceClass, devid, backpath):
1867 return self.getDeviceController(deviceClass).waitForDevice_destroy(
1868 devid, backpath)
1870 def _reconfigureDevice(self, deviceClass, devid, devconfig):
1871 return self.getDeviceController(deviceClass).reconfigureDevice(
1872 devid, devconfig)
1874 def _createDevices(self):
1875 """Create the devices for a vm.
1877 @raise: VmError for invalid devices
1878 """
1879 if self.image:
1880 self.image.prepareEnvironment()
1882 vscsi_uuidlist = {}
1883 vscsi_devidlist = []
1884 ordered_refs = self.info.ordered_device_refs()
1885 for dev_uuid in ordered_refs:
1886 devclass, config = self.info['devices'][dev_uuid]
1887 if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
1888 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
1889 dev_uuid = config.get('uuid')
1890 devid = self._createDevice(devclass, config)
1892 # store devid in XendConfig for caching reasons
1893 if dev_uuid in self.info['devices']:
1894 self.info['devices'][dev_uuid][1]['devid'] = devid
1896 elif devclass == 'vscsi':
1897 vscsi_config = config.get('devs', [])[0]
1898 devid = vscsi_config.get('devid', '')
1899 dev_uuid = config.get('uuid')
1900 vscsi_uuidlist[devid] = dev_uuid
1901 vscsi_devidlist.append(devid)
1903 #It is necessary to sorted it for /dev/sdxx in guest.
1904 if len(vscsi_uuidlist) > 0:
1905 vscsi_devidlist.sort()
1906 for vscsiid in vscsi_devidlist:
1907 dev_uuid = vscsi_uuidlist[vscsiid]
1908 devclass, config = self.info['devices'][dev_uuid]
1909 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
1910 dev_uuid = config.get('uuid')
1911 devid = self._createDevice(devclass, config)
1912 # store devid in XendConfig for caching reasons
1913 if dev_uuid in self.info['devices']:
1914 self.info['devices'][dev_uuid][1]['devid'] = devid
1917 if self.image:
1918 self.image.createDeviceModel()
1920 #if have pass-through devs, need the virtual pci slots info from qemu
1921 self.sync_pcidev_info()
1923 def _releaseDevices(self, suspend = False):
1924 """Release all domain's devices. Nothrow guarantee."""
1925 if self.image:
1926 try:
1927 log.debug("Destroying device model")
1928 self.image.destroyDeviceModel()
1929 except Exception, e:
1930 log.exception("Device model destroy failed %s" % str(e))
1931 else:
1932 log.debug("No device model")
1934 log.debug("Releasing devices")
1935 t = xstransact("%s/device" % self.dompath)
1936 try:
1937 for devclass in XendDevices.valid_devices():
1938 for dev in t.list(devclass):
1939 try:
1940 log.debug("Removing %s", dev);
1941 self.destroyDevice(devclass, dev, False);
1942 except:
1943 # Log and swallow any exceptions in removal --
1944 # there's nothing more we can do.
1945 log.exception("Device release failed: %s; %s; %s",
1946 self.info['name_label'], devclass, dev)
1947 finally:
1948 t.abort()
1950 def getDeviceController(self, name):
1951 """Get the device controller for this domain, and if it
1952 doesn't exist, create it.
1954 @param name: device class name
1955 @type name: string
1956 @rtype: subclass of DevController
1957 """
1958 if name not in self._deviceControllers:
1959 devController = XendDevices.make_controller(name, self)
1960 if not devController:
1961 raise XendError("Unknown device type: %s" % name)
1962 self._deviceControllers[name] = devController
1964 return self._deviceControllers[name]
1967 # Migration functions (public)
1970 def testMigrateDevices(self, network, dst):
1971 """ Notify all device about intention of migration
1972 @raise: XendError for a device that cannot be migrated
1973 """
1974 for (n, c) in self.info.all_devices_sxpr():
1975 rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
1976 if rc != 0:
1977 raise XendError("Device of type '%s' refuses migration." % n)
1979 def migrateDevices(self, network, dst, step, domName=''):
1980 """Notify the devices about migration
1981 """
1982 ctr = 0
1983 try:
1984 for (dev_type, dev_conf) in self.info.all_devices_sxpr():
1985 self.migrateDevice(dev_type, dev_conf, network, dst,
1986 step, domName)
1987 ctr = ctr + 1
1988 except:
1989 for dev_type, dev_conf in self.info.all_devices_sxpr():
1990 if ctr == 0:
1991 step = step - 1
1992 ctr = ctr - 1
1993 self._recoverMigrateDevice(dev_type, dev_conf, network,
1994 dst, step, domName)
1995 raise
1997 def migrateDevice(self, deviceClass, deviceConfig, network, dst,
1998 step, domName=''):
1999 return self.getDeviceController(deviceClass).migrate(deviceConfig,
2000 network, dst, step, domName)
2002 def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
2003 dst, step, domName=''):
2004 return self.getDeviceController(deviceClass).recover_migrate(
2005 deviceConfig, network, dst, step, domName)
2008 ## private:
2010 def _constructDomain(self):
2011 """Construct the domain.
2013 @raise: VmError on error
2014 """
2016 log.debug('XendDomainInfo.constructDomain')
2018 self.shutdownStartTime = None
2020 hap = 0
2021 hvm = self.info.is_hvm()
2022 if hvm:
2023 hap = self.info.is_hap()
2024 info = xc.xeninfo()
2025 if 'hvm' not in info['xen_caps']:
2026 raise VmError("HVM guest support is unavailable: is VT/AMD-V "
2027 "supported by your CPU and enabled in your "
2028 "BIOS?")
2030 # Hack to pre-reserve some memory for initial domain creation.
2031 # There is an implicit memory overhead for any domain creation. This
2032 # overhead is greater for some types of domain than others. For
2033 # example, an x86 HVM domain will have a default shadow-pagetable
2034 # allocation of 1MB. We free up 2MB here to be on the safe side.
2035 balloon.free(2*1024) # 2MB should be plenty
2037 ssidref = 0
2038 if security.on() == xsconstants.XS_POLICY_ACM:
2039 ssidref = security.calc_dom_ssidref_from_info(self.info)
2040 if security.has_authorization(ssidref) == False:
2041 raise VmError("VM is not authorized to run.")
2043 try:
2044 self.domid = xc.domain_create(
2045 domid = 0,
2046 ssidref = ssidref,
2047 handle = uuid.fromString(self.info['uuid']),
2048 flags = (int(hvm) << 0) | (int(hap) << 1),
2049 target = self.info.target())
2050 except Exception, e:
2051 # may get here if due to ACM the operation is not permitted
2052 if security.on() == xsconstants.XS_POLICY_ACM:
2053 raise VmError('Domain in conflict set with running domain?')
2055 if self.domid < 0:
2056 raise VmError('Creating domain failed: name=%s' %
2057 self.info['name_label'])
2059 self.dompath = GetDomainPath(self.domid)
2061 self._recreateDom()
2063 # Set timer configration of domain
2064 timer_mode = self.info["platform"].get("timer_mode")
2065 if hvm and timer_mode is not None:
2066 xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
2067 long(timer_mode))
2069 # Optionally enable virtual HPET
2070 hpet = self.info["platform"].get("hpet")
2071 if hvm and hpet is not None:
2072 xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
2073 long(hpet))
2075 # Set maximum number of vcpus in domain
2076 xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
2078 # Test whether the devices can be assigned with VT-d
2079 pci_str = str(self.info["platform"].get("pci"))
2080 if hvm and pci_str:
2081 bdf = xc.test_assign_device(self.domid, pci_str)
2082 if bdf != 0:
2083 bus = (bdf >> 16) & 0xff
2084 devfn = (bdf >> 8) & 0xff
2085 dev = (devfn >> 3) & 0x1f
2086 func = devfn & 0x7
2087 raise VmError("Fail to assign device(%x:%x.%x): maybe VT-d is "
2088 "not enabled, or the device is not exist, or it "
2089 "has already been assigned to other domain"
2090 % (bus, dev, func))
2092 # register the domain in the list
2093 from xen.xend import XendDomain
2094 XendDomain.instance().add_domain(self)
2096 def _introduceDomain(self):
2097 assert self.domid is not None
2098 assert self.store_mfn is not None
2099 assert self.store_port is not None
2101 try:
2102 IntroduceDomain(self.domid, self.store_mfn, self.store_port)
2103 except RuntimeError, exn:
2104 raise XendError(str(exn))
2106 def _setTarget(self, target):
2107 assert self.domid is not None
2109 try:
2110 SetTarget(self.domid, target)
2111 self.storeDom('target', target)
2112 except RuntimeError, exn:
2113 raise XendError(str(exn))
2116 def _initDomain(self):
2117 log.debug('XendDomainInfo.initDomain: %s %s',
2118 self.domid,
2119 self.info['vcpus_params']['weight'])
2121 self._configureBootloader()
2123 try:
2124 if self.info['platform'].get('localtime', 0):
2125 if time.localtime(time.time())[8]:
2126 self.info['platform']['rtc_timeoffset'] = -time.altzone
2127 else:
2128 self.info['platform']['rtc_timeoffset'] = -time.timezone
2130 self.image = image.create(self, self.info)
2132 # repin domain vcpus if a restricted cpus list is provided
2133 # this is done prior to memory allocation to aide in memory
2134 # distribution for NUMA systems.
2135 def has_cpus():
2136 if self.info['cpus'] is not None:
2137 for c in self.info['cpus']:
2138 if c:
2139 return True
2140 return False
2142 if has_cpus():
2143 for v in range(0, self.info['VCPUs_max']):
2144 if self.info['cpus'][v]:
2145 xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
2146 else:
2147 def find_relaxed_node(node_list):
2148 import sys
2149 nr_nodes = info['nr_nodes']
2150 if node_list is None:
2151 node_list = range(0, nr_nodes)
2152 nodeload = [0]
2153 nodeload = nodeload * nr_nodes
2154 from xen.xend import XendDomain
2155 doms = XendDomain.instance().list('all')
2156 for dom in filter (lambda d: d.domid != self.domid, doms):
2157 cpuinfo = dom.getVCPUInfo()
2158 for vcpu in sxp.children(cpuinfo, 'vcpu'):
2159 if sxp.child_value(vcpu, 'online') == 0: continue
2160 cpumap = list(sxp.child_value(vcpu,'cpumap'))
2161 for i in range(0, nr_nodes):
2162 node_cpumask = info['node_to_cpu'][i]
2163 for j in node_cpumask:
2164 if j in cpumap:
2165 nodeload[i] += 1
2166 break
2167 for i in range(0, nr_nodes):
2168 if len(info['node_to_cpu'][i]) > 0 and i in node_list:
2169 nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
2170 else:
2171 nodeload[i] = sys.maxint
2172 index = nodeload.index( min(nodeload) )
2173 return index
2175 info = xc.physinfo()
2176 if info['nr_nodes'] > 1:
2177 node_memory_list = info['node_to_memory']
2178 needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
2179 candidate_node_list = []
2180 for i in range(0, info['nr_nodes']):
2181 if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
2182 candidate_node_list.append(i)
2183 index = find_relaxed_node(candidate_node_list)
2184 cpumask = info['node_to_cpu'][index]
2185 for v in range(0, self.info['VCPUs_max']):
2186 xc.vcpu_setaffinity(self.domid, v, cpumask)
2188 # Use architecture- and image-specific calculations to determine
2189 # the various headrooms necessary, given the raw configured
2190 # values. maxmem, memory, and shadow are all in KiB.
2191 # but memory_static_max etc are all stored in bytes now.
2192 memory = self.image.getRequiredAvailableMemory(
2193 self.info['memory_dynamic_max'] / 1024)
2194 maxmem = self.image.getRequiredAvailableMemory(
2195 self.info['memory_static_max'] / 1024)
2196 shadow = self.image.getRequiredShadowMemory(
2197 self.info['shadow_memory'] * 1024,
2198 self.info['memory_static_max'] / 1024)
2200 log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
2201 # Round shadow up to a multiple of a MiB, as shadow_mem_control
2202 # takes MiB and we must not round down and end up under-providing.
2203 shadow = ((shadow + 1023) / 1024) * 1024
2205 # set memory limit
2206 xc.domain_setmaxmem(self.domid, maxmem)
2208 # Reserve 1 page per MiB of RAM for separate VT-d page table.
2209 vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
2210 # Round vtd_mem up to a multiple of a MiB.
2211 vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
2213 # Make sure there's enough RAM available for the domain
2214 balloon.free(memory + shadow + vtd_mem)
2216 # Set up the shadow memory
2217 shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
2218 self.info['shadow_memory'] = shadow_cur
2220 self._createChannels()
2222 channel_details = self.image.createImage()
2224 self.store_mfn = channel_details['store_mfn']
2225 if 'console_mfn' in channel_details:
2226 self.console_mfn = channel_details['console_mfn']
2227 if 'notes' in channel_details:
2228 self.info.set_notes(channel_details['notes'])
2229 if 'native_protocol' in channel_details:
2230 self.native_protocol = channel_details['native_protocol'];
2232 self._introduceDomain()
2233 if self.info.target():
2234 self._setTarget(self.info.target())
2236 self._createDevices()
2238 self.image.cleanupBootloading()
2240 self.info['start_time'] = time.time()
2242 self._stateSet(DOM_STATE_RUNNING)
2243 except VmError, exn:
2244 log.exception("XendDomainInfo.initDomain: exception occurred")
2245 if self.image:
2246 self.image.cleanupBootloading()
2247 raise exn
2248 except RuntimeError, exn:
2249 log.exception("XendDomainInfo.initDomain: exception occurred")
2250 if self.image:
2251 self.image.cleanupBootloading()
2252 raise VmError(str(exn))
2255 def cleanupDomain(self):
2256 """Cleanup domain resources; release devices. Idempotent. Nothrow
2257 guarantee."""
2259 self.refresh_shutdown_lock.acquire()
2260 try:
2261 self.unwatchShutdown()
2262 self._releaseDevices()
2263 bootloader_tidy(self)
2265 if self.image:
2266 self.image = None
2268 try:
2269 self._removeDom()
2270 except:
2271 log.exception("Removing domain path failed.")
2273 self._stateSet(DOM_STATE_HALTED)
2274 self.domid = None # Do not push into _stateSet()!
2275 finally:
2276 self.refresh_shutdown_lock.release()
2279 def unwatchShutdown(self):
2280 """Remove the watch on the domain's control/shutdown node, if any.
2281 Idempotent. Nothrow guarantee. Expects to be protected by the
2282 refresh_shutdown_lock."""
2284 try:
2285 try:
2286 if self.shutdownWatch:
2287 self.shutdownWatch.unwatch()
2288 finally:
2289 self.shutdownWatch = None
2290 except:
2291 log.exception("Unwatching control/shutdown failed.")
2293 def waitForShutdown(self):
2294 self.state_updated.acquire()
2295 try:
2296 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2297 self.state_updated.wait(timeout=1.0)
2298 finally:
2299 self.state_updated.release()
2302 # TODO: recategorise - called from XendCheckpoint
2305 def completeRestore(self, store_mfn, console_mfn):
2307 log.debug("XendDomainInfo.completeRestore")
2309 self.store_mfn = store_mfn
2310 self.console_mfn = console_mfn
2312 self._introduceDomain()
2313 self.image = image.create(self, self.info)
2314 if self.image:
2315 self.image.createDeviceModel(True)
2316 self._storeDomDetails()
2317 self._registerWatches()
2318 self.refreshShutdown()
2320 log.debug("XendDomainInfo.completeRestore done")
2323 def _endRestore(self):
2324 self.setResume(False)
2327 # VM Destroy
2330 def _prepare_phantom_paths(self):
2331 # get associated devices to destroy
2332 # build list of phantom devices to be removed after normal devices
2333 plist = []
2334 if self.domid is not None:
2335 t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
2336 try:
2337 for dev in t.list():
2338 backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
2339 % (self.dompath, dev))
2340 if backend_phantom_vbd is not None:
2341 frontend_phantom_vbd = xstransact.Read("%s/frontend" \
2342 % backend_phantom_vbd)
2343 plist.append(backend_phantom_vbd)
2344 plist.append(frontend_phantom_vbd)
2345 finally:
2346 t.abort()
2347 return plist
2349 def _cleanup_phantom_devs(self, plist):
2350 # remove phantom devices
2351 if not plist == []:
2352 time.sleep(2)
2353 for paths in plist:
2354 if paths.find('backend') != -1:
2355 from xen.xend.server import DevController
2356 # Modify online status /before/ updating state (latter is watched by
2357 # drivers, so this ordering avoids a race).
2358 xstransact.Write(paths, 'online', "0")
2359 xstransact.Write(paths, 'state', str(DevController.xenbusState['Closing']))
2360 # force
2361 xstransact.Remove(paths)
2363 def destroy(self):
2364 """Cleanup VM and destroy domain. Nothrow guarantee."""
2366 log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
2368 paths = self._prepare_phantom_paths()
2370 self._cleanupVm()
2371 if self.dompath is not None:
2372 self.destroyDomain()
2374 self._cleanup_phantom_devs(paths)
2376 if "transient" in self.info["other_config"] \
2377 and bool(self.info["other_config"]["transient"]):
2378 from xen.xend import XendDomain
2379 XendDomain.instance().domain_delete_by_dominfo(self)
2382 def destroyDomain(self):
2383 log.debug("XendDomainInfo.destroyDomain(%s)", str(self.domid))
2385 paths = self._prepare_phantom_paths()
2387 try:
2388 if self.domid is not None:
2389 xc.domain_destroy_hook(self.domid)
2390 xc.domain_destroy(self.domid)
2391 for state in DOM_STATES_OLD:
2392 self.info[state] = 0
2393 self._stateSet(DOM_STATE_HALTED)
2394 except:
2395 log.exception("XendDomainInfo.destroy: xc.domain_destroy failed.")
2397 from xen.xend import XendDomain
2398 XendDomain.instance().remove_domain(self)
2400 self.cleanupDomain()
2401 self._cleanup_phantom_devs(paths)
2404 def resetDomain(self):
2405 log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
2407 old_domid = self.domid
2408 prev_vm_xend = self._listRecursiveVm('xend')
2409 new_dom_info = self.info
2410 try:
2411 self._unwatchVm()
2412 self.destroy()
2414 new_dom = None
2415 try:
2416 from xen.xend import XendDomain
2417 new_dom_info['domid'] = None
2418 new_dom = XendDomain.instance().domain_create_from_dict(
2419 new_dom_info)
2420 for x in prev_vm_xend[0][1]:
2421 new_dom._writeVm('xend/%s' % x[0], x[1])
2422 new_dom.waitForDevices()
2423 new_dom.unpause()
2424 except:
2425 if new_dom:
2426 new_dom.destroy()
2427 raise
2428 except:
2429 log.exception('Failed to reset domain %s.', str(old_domid))
2432 def resumeDomain(self):
2433 log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
2435 # resume a suspended domain (e.g. after live checkpoint, or after
2436 # a later error during save or migate); checks that the domain
2437 # is currently suspended first so safe to call from anywhere
2439 xeninfo = dom_get(self.domid)
2440 if xeninfo is None:
2441 return
2442 if not xeninfo['shutdown']:
2443 return
2444 reason = shutdown_reason(xeninfo['shutdown_reason'])
2445 if reason != 'suspend':
2446 return
2448 try:
2449 # could also fetch a parsed note from xenstore
2450 fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
2451 if not fast:
2452 self._releaseDevices()
2453 self.testDeviceComplete()
2454 self.testvifsComplete()
2455 log.debug("XendDomainInfo.resumeDomain: devices released")
2457 self._resetChannels()
2459 self._removeDom('control/shutdown')
2460 self._removeDom('device-misc/vif/nextDeviceID')
2462 self._createChannels()
2463 self._introduceDomain()
2464 self._storeDomDetails()
2466 self._createDevices()
2467 log.debug("XendDomainInfo.resumeDomain: devices created")
2469 xc.domain_resume(self.domid, fast)
2470 ResumeDomain(self.domid)
2471 except:
2472 log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
2473 self.image.resumeDeviceModel()
2474 log.debug("XendDomainInfo.resumeDomain: completed")
2478 # Channels for xenstore and console
2481 def _createChannels(self):
2482 """Create the channels to the domain.
2483 """
2484 self.store_port = self._createChannel()
2485 self.console_port = self._createChannel()
2488 def _createChannel(self):
2489 """Create an event channel to the domain.
2490 """
2491 try:
2492 if self.domid != None:
2493 return xc.evtchn_alloc_unbound(domid = self.domid,
2494 remote_dom = 0)
2495 except:
2496 log.exception("Exception in alloc_unbound(%s)", str(self.domid))
2497 raise
2499 def _resetChannels(self):
2500 """Reset all event channels in the domain.
2501 """
2502 try:
2503 if self.domid != None:
2504 return xc.evtchn_reset(dom = self.domid)
2505 except:
2506 log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
2507 raise
2511 # Bootloader configuration
2514 def _configureBootloader(self):
2515 """Run the bootloader if we're configured to do so."""
2517 blexec = self.info['PV_bootloader']
2518 bootloader_args = self.info['PV_bootloader_args']
2519 kernel = self.info['PV_kernel']
2520 ramdisk = self.info['PV_ramdisk']
2521 args = self.info['PV_args']
2522 boot = self.info['HVM_boot_policy']
2524 if boot:
2525 # HVM booting.
2526 pass
2527 elif not blexec and kernel:
2528 # Boot from dom0. Nothing left to do -- the kernel and ramdisk
2529 # will be picked up by image.py.
2530 pass
2531 else:
2532 # Boot using bootloader
2533 if not blexec or blexec == 'pygrub':
2534 blexec = osdep.pygrub_path
2536 blcfg = None
2537 disks = [x for x in self.info['vbd_refs']
2538 if self.info['devices'][x][1]['bootable']]
2540 if not disks:
2541 msg = "Had a bootloader specified, but no disks are bootable"
2542 log.error(msg)
2543 raise VmError(msg)
2545 devinfo = self.info['devices'][disks[0]]
2546 devtype = devinfo[0]
2547 disk = devinfo[1]['uname']
2549 fn = blkdev_uname_to_file(disk)
2550 taptype = blkdev_uname_to_taptype(disk)
2551 mounted = devtype == 'tap' and taptype != 'aio' and taptype != 'sync' and not os.stat(fn).st_rdev
2552 if mounted:
2553 # This is a file, not a device. pygrub can cope with a
2554 # file if it's raw, but if it's QCOW or other such formats
2555 # used through blktap, then we need to mount it first.
2557 log.info("Mounting %s on %s." %
2558 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2560 vbd = {
2561 'mode': 'RO',
2562 'device': BOOTLOADER_LOOPBACK_DEVICE,
2565 from xen.xend import XendDomain
2566 dom0 = XendDomain.instance().privilegedDomain()
2567 dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
2568 fn = BOOTLOADER_LOOPBACK_DEVICE
2570 try:
2571 blcfg = bootloader(blexec, fn, self, False,
2572 bootloader_args, kernel, ramdisk, args)
2573 finally:
2574 if mounted:
2575 log.info("Unmounting %s from %s." %
2576 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2578 dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
2580 if blcfg is None:
2581 msg = "Had a bootloader specified, but can't find disk"
2582 log.error(msg)
2583 raise VmError(msg)
2585 self.info.update_with_image_sxp(blcfg, True)
2589 # VM Functions
2592 def _readVMDetails(self, params):
2593 """Read the specified parameters from the store.
2594 """
2595 try:
2596 return self._gatherVm(*params)
2597 except ValueError:
2598 # One of the int/float entries in params has a corresponding store
2599 # entry that is invalid. We recover, because older versions of
2600 # Xend may have put the entry there (memory/target, for example),
2601 # but this is in general a bad situation to have reached.
2602 log.exception(
2603 "Store corrupted at %s! Domain %d's configuration may be "
2604 "affected.", self.vmpath, self.domid)
2605 return []
2607 def _cleanupVm(self):
2608 """Cleanup VM resources. Idempotent. Nothrow guarantee."""
2610 self._unwatchVm()
2612 try:
2613 self._removeVm()
2614 except:
2615 log.exception("Removing VM path failed.")
2618 def checkLiveMigrateMemory(self):
2619 """ Make sure there's enough memory to migrate this domain """
2620 overhead_kb = 0
2621 if arch.type == "x86":
2622 # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
2623 # the minimum that Xen would allocate if no value were given.
2624 overhead_kb = self.info['VCPUs_max'] * 1024 + \
2625 (self.info['memory_static_max'] / 1024 / 1024) * 4
2626 overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
2627 # The domain might already have some shadow memory
2628 overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
2629 if overhead_kb > 0:
2630 balloon.free(overhead_kb)
2632 def _unwatchVm(self):
2633 """Remove the watch on the VM path, if any. Idempotent. Nothrow
2634 guarantee."""
2635 try:
2636 try:
2637 if self.vmWatch:
2638 self.vmWatch.unwatch()
2639 finally:
2640 self.vmWatch = None
2641 except:
2642 log.exception("Unwatching VM path failed.")
2644 def testDeviceComplete(self):
2645 """ For Block IO migration safety we must ensure that
2646 the device has shutdown correctly, i.e. all blocks are
2647 flushed to disk
2648 """
2649 start = time.time()
2650 while True:
2651 test = 0
2652 diff = time.time() - start
2653 for i in self.getDeviceController('vbd').deviceIDs():
2654 test = 1
2655 log.info("Dev %s still active, looping...", i)
2656 time.sleep(0.1)
2658 if test == 0:
2659 break
2660 if diff >= MIGRATE_TIMEOUT:
2661 log.info("Dev still active but hit max loop timeout")
2662 break
2664 def testvifsComplete(self):
2665 """ In case vifs are released and then created for the same
2666 domain, we need to wait the device shut down.
2667 """
2668 start = time.time()
2669 while True:
2670 test = 0
2671 diff = time.time() - start
2672 for i in self.getDeviceController('vif').deviceIDs():
2673 test = 1
2674 log.info("Dev %s still active, looping...", i)
2675 time.sleep(0.1)
2677 if test == 0:
2678 break
2679 if diff >= MIGRATE_TIMEOUT:
2680 log.info("Dev still active but hit max loop timeout")
2681 break
2683 def _storeVmDetails(self):
2684 to_store = {}
2686 for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
2687 info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
2688 if self._infoIsSet(info_key):
2689 to_store[key] = str(self.info[info_key])
2691 if self._infoIsSet("static_memory_min"):
2692 to_store["memory"] = str(self.info["static_memory_min"])
2693 if self._infoIsSet("static_memory_max"):
2694 to_store["maxmem"] = str(self.info["static_memory_max"])
2696 image_sxpr = self.info.image_sxpr()
2697 if image_sxpr:
2698 to_store['image'] = sxp.to_string(image_sxpr)
2700 if not self._readVm('xend/restart_count'):
2701 to_store['xend/restart_count'] = str(0)
2703 log.debug("Storing VM details: %s", scrub_password(to_store))
2705 self._writeVm(to_store)
2706 self._setVmPermissions()
2709 def _setVmPermissions(self):
2710 """Allow the guest domain to read its UUID. We don't allow it to
2711 access any other entry, for security."""
2712 xstransact.SetPermissions('%s/uuid' % self.vmpath,
2713 { 'dom' : self.domid,
2714 'read' : True,
2715 'write' : False })
2718 # Utility functions
2721 def __getattr__(self, name):
2722 if name == "state":
2723 log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
2724 log.warn("".join(traceback.format_stack()))
2725 return self._stateGet()
2726 else:
2727 raise AttributeError()
2729 def __setattr__(self, name, value):
2730 if name == "state":
2731 log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
2732 log.warn("".join(traceback.format_stack()))
2733 self._stateSet(value)
2734 else:
2735 self.__dict__[name] = value
2737 def _stateSet(self, state):
2738 self.state_updated.acquire()
2739 try:
2740 # TODO Not sure this is correct...
2741 # _stateGet is live now. Why not fire event
2742 # even when it hasn't changed?
2743 if self._stateGet() != state:
2744 self.state_updated.notifyAll()
2745 import XendAPI
2746 XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
2747 'power_state')
2748 finally:
2749 self.state_updated.release()
2751 def _stateGet(self):
2752 # Lets try and reconsitute the state from xc
2753 # first lets try and get the domain info
2754 # from xc - this will tell us if the domain
2755 # exists
2756 info = dom_get(self.getDomid())
2757 if info is None or info['shutdown']:
2758 # We are either HALTED or SUSPENDED
2759 # check saved image exists
2760 from xen.xend import XendDomain
2761 managed_config_path = \
2762 XendDomain.instance()._managed_check_point_path( \
2763 self.get_uuid())
2764 if os.path.exists(managed_config_path):
2765 return XEN_API_VM_POWER_STATE_SUSPENDED
2766 else:
2767 return XEN_API_VM_POWER_STATE_HALTED
2768 elif info['crashed']:
2769 # Crashed
2770 return XEN_API_VM_POWER_STATE_CRASHED
2771 else:
2772 # We are either RUNNING or PAUSED
2773 if info['paused']:
2774 return XEN_API_VM_POWER_STATE_PAUSED
2775 else:
2776 return XEN_API_VM_POWER_STATE_RUNNING
2778 def _infoIsSet(self, name):
2779 return name in self.info and self.info[name] is not None
2781 def _checkName(self, name):
2782 """Check if a vm name is valid. Valid names contain alphabetic
2783 characters, digits, or characters in '_-.:/+'.
2784 The same name cannot be used for more than one vm at the same time.
2786 @param name: name
2787 @raise: VmError if invalid
2788 """
2789 from xen.xend import XendDomain
2791 if name is None or name == '':
2792 raise VmError('Missing VM Name')
2794 if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name):
2795 raise VmError('Invalid VM Name')
2797 dom = XendDomain.instance().domain_lookup_nr(name)
2798 if dom and dom.info['uuid'] != self.info['uuid']:
2799 raise VmError("VM name '%s' already exists%s" %
2800 (name,
2801 dom.domid is not None and
2802 (" as domain %s" % str(dom.domid)) or ""))
2805 def update(self, info = None, refresh = True, transaction = None):
2806 """Update with info from xc.domain_getinfo().
2807 """
2808 log.trace("XendDomainInfo.update(%s) on domain %s", info,
2809 str(self.domid))
2811 if not info:
2812 info = dom_get(self.domid)
2813 if not info:
2814 return
2816 if info["maxmem_kb"] < 0:
2817 info["maxmem_kb"] = XendNode.instance() \
2818 .physinfo_dict()['total_memory'] * 1024
2820 #ssidref field not used any longer
2821 if 'ssidref' in info:
2822 info.pop('ssidref')
2824 # make sure state is reset for info
2825 # TODO: we should eventually get rid of old_dom_states
2827 self.info.update_config(info)
2828 self._update_consoles(transaction)
2830 if refresh:
2831 self.refreshShutdown(info)
2833 log.trace("XendDomainInfo.update done on domain %s: %s",
2834 str(self.domid), self.info)
2836 def sxpr(self, ignore_store = False, legacy_only = True):
2837 result = self.info.to_sxp(domain = self,
2838 ignore_devices = ignore_store,
2839 legacy_only = legacy_only)
2841 #if not ignore_store and self.dompath:
2842 # vnc_port = self.readDom('console/vnc-port')
2843 # if vnc_port is not None:
2844 # result.append(['device',
2845 # ['console', ['vnc-port', str(vnc_port)]]])
2847 return result
2849 # Xen API
2850 # ----------------------------------------------------------------
2852 def get_uuid(self):
2853 dom_uuid = self.info.get('uuid')
2854 if not dom_uuid: # if it doesn't exist, make one up
2855 dom_uuid = uuid.createString()
2856 self.info['uuid'] = dom_uuid
2857 return dom_uuid
2859 def get_memory_static_max(self):
2860 return self.info.get('memory_static_max', 0)
2861 def get_memory_static_min(self):
2862 return self.info.get('memory_static_min', 0)
2863 def get_memory_dynamic_max(self):
2864 return self.info.get('memory_dynamic_max', 0)
2865 def get_memory_dynamic_min(self):
2866 return self.info.get('memory_dynamic_min', 0)
2868 # only update memory-related config values if they maintain sanity
2869 def _safe_set_memory(self, key, newval):
2870 oldval = self.info.get(key, 0)
2871 try:
2872 self.info[key] = newval
2873 self.info._memory_sanity_check()
2874 except Exception, ex:
2875 self.info[key] = oldval
2876 raise
2878 def set_memory_static_max(self, val):
2879 self._safe_set_memory('memory_static_max', val)
2880 def set_memory_static_min(self, val):
2881 self._safe_set_memory('memory_static_min', val)
2882 def set_memory_dynamic_max(self, val):
2883 self._safe_set_memory('memory_dynamic_max', val)
2884 def set_memory_dynamic_min(self, val):
2885 self._safe_set_memory('memory_dynamic_min', val)
2887 def get_vcpus_params(self):
2888 if self.getDomid() is None:
2889 return self.info['vcpus_params']
2891 retval = xc.sched_credit_domain_get(self.getDomid())
2892 return retval
2893 def get_power_state(self):
2894 return XEN_API_VM_POWER_STATE[self._stateGet()]
2895 def get_platform(self):
2896 return self.info.get('platform', {})
2897 def get_pci_bus(self):
2898 return self.info.get('pci_bus', '')
2899 def get_tools_version(self):
2900 return self.info.get('tools_version', {})
2901 def get_metrics(self):
2902 return self.metrics.get_uuid();
2905 def get_security_label(self, xspol=None):
2906 import xen.util.xsm.xsm as security
2907 label = security.get_security_label(self, xspol)
2908 return label
2910 def set_security_label(self, seclab, old_seclab, xspol=None,
2911 xspol_old=None):
2912 """
2913 Set the security label of a domain from its old to
2914 a new value.
2915 @param seclab New security label formatted in the form
2916 <policy type>:<policy name>:<vm label>
2917 @param old_seclab The current security label that the
2918 VM must have.
2919 @param xspol An optional policy under which this
2920 update should be done. If not given,
2921 then the current active policy is used.
2922 @param xspol_old The old policy; only to be passed during
2923 the updating of a policy
2924 @return Returns return code, a string with errors from
2925 the hypervisor's operation, old label of the
2926 domain
2927 """
2928 rc = 0
2929 errors = ""
2930 old_label = ""
2931 new_ssidref = 0
2932 domid = self.getDomid()
2933 res_labels = None
2934 is_policy_update = (xspol_old != None)
2936 from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
2938 state = self._stateGet()
2939 # Relabel only HALTED or RUNNING or PAUSED domains
2940 if domid != 0 and \
2941 state not in \
2942 [ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
2943 DOM_STATE_SUSPENDED ]:
2944 log.warn("Relabeling domain not possible in state '%s'" %
2945 DOM_STATES[state])
2946 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
2948 # Remove security label. Works only for halted or suspended domains
2949 if not seclab or seclab == "":
2950 if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
2951 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
2953 if self.info.has_key('security_label'):
2954 old_label = self.info['security_label']
2955 # Check label against expected one.
2956 if old_label != old_seclab:
2957 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
2958 del self.info['security_label']
2959 xen.xend.XendDomain.instance().managed_config_save(self)
2960 return (xsconstants.XSERR_SUCCESS, "", "", 0)
2962 tmp = seclab.split(":")
2963 if len(tmp) != 3:
2964 return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
2965 typ, policy, label = tmp
2967 poladmin = XSPolicyAdminInstance()
2968 if not xspol:
2969 xspol = poladmin.get_policy_by_name(policy)
2971 if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
2972 #if domain is running or paused try to relabel in hypervisor
2973 if not xspol:
2974 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
2976 if typ != xspol.get_type_name() or \
2977 policy != xspol.get_name():
2978 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
2980 if typ == xsconstants.ACM_POLICY_ID:
2981 new_ssidref = xspol.vmlabel_to_ssidref(label)
2982 if new_ssidref == xsconstants.INVALID_SSIDREF:
2983 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
2985 # Check that all used resources are accessible under the
2986 # new label
2987 if not is_policy_update and \
2988 not security.resources_compatible_with_vmlabel(xspol,
2989 self, label):
2990 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
2992 #Check label against expected one. Can only do this
2993 # if the policy hasn't changed underneath in the meantime
2994 if xspol_old == None:
2995 old_label = self.get_security_label()
2996 if old_label != old_seclab:
2997 log.info("old_label != old_seclab: %s != %s" %
2998 (old_label, old_seclab))
2999 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3001 # relabel domain in the hypervisor
3002 rc, errors = security.relabel_domains([[domid, new_ssidref]])
3003 log.info("rc from relabeling in HV: %d" % rc)
3004 else:
3005 return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
3007 if rc == 0:
3008 # HALTED, RUNNING or PAUSED
3009 if domid == 0:
3010 if xspol:
3011 self.info['security_label'] = seclab
3012 ssidref = poladmin.set_domain0_bootlabel(xspol, label)
3013 else:
3014 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3015 else:
3016 if self.info.has_key('security_label'):
3017 old_label = self.info['security_label']
3018 # Check label against expected one, unless wildcard
3019 if old_label != old_seclab:
3020 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3022 self.info['security_label'] = seclab
3024 try:
3025 xen.xend.XendDomain.instance().managed_config_save(self)
3026 except:
3027 pass
3028 return (rc, errors, old_label, new_ssidref)
3030 def get_on_shutdown(self):
3031 after_shutdown = self.info.get('actions_after_shutdown')
3032 if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
3033 return XEN_API_ON_NORMAL_EXIT[-1]
3034 return after_shutdown
3036 def get_on_reboot(self):
3037 after_reboot = self.info.get('actions_after_reboot')
3038 if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
3039 return XEN_API_ON_NORMAL_EXIT[-1]
3040 return after_reboot
3042 def get_on_suspend(self):
3043 # TODO: not supported
3044 after_suspend = self.info.get('actions_after_suspend')
3045 if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
3046 return XEN_API_ON_NORMAL_EXIT[-1]
3047 return after_suspend
3049 def get_on_crash(self):
3050 after_crash = self.info.get('actions_after_crash')
3051 if not after_crash or after_crash not in \
3052 XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
3053 return XEN_API_ON_CRASH_BEHAVIOUR[0]
3054 return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
3056 def get_dev_config_by_uuid(self, dev_class, dev_uuid):
3057 """ Get's a device configuration either from XendConfig or
3058 from the DevController.
3060 @param dev_class: device class, either, 'vbd' or 'vif'
3061 @param dev_uuid: device UUID
3063 @rtype: dictionary
3064 """
3065 dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
3067 # shortcut if the domain isn't started because
3068 # the devcontrollers will have no better information
3069 # than XendConfig.
3070 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
3071 XEN_API_VM_POWER_STATE_SUSPENDED):
3072 if dev_config:
3073 return copy.deepcopy(dev_config)
3074 return None
3076 # instead of using dev_class, we use the dev_type
3077 # that is from XendConfig.
3078 controller = self.getDeviceController(dev_type)
3079 if not controller:
3080 return None
3082 all_configs = controller.getAllDeviceConfigurations()
3083 if not all_configs:
3084 return None
3086 updated_dev_config = copy.deepcopy(dev_config)
3087 for _devid, _devcfg in all_configs.items():
3088 if _devcfg.get('uuid') == dev_uuid:
3089 updated_dev_config.update(_devcfg)
3090 updated_dev_config['id'] = _devid
3091 return updated_dev_config
3093 return updated_dev_config
3095 def get_dev_xenapi_config(self, dev_class, dev_uuid):
3096 config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
3097 if not config:
3098 return {}
3100 config['VM'] = self.get_uuid()
3102 if dev_class == 'vif':
3103 if not config.has_key('name'):
3104 config['name'] = config.get('vifname', '')
3105 if not config.has_key('MAC'):
3106 config['MAC'] = config.get('mac', '')
3107 if not config.has_key('type'):
3108 config['type'] = 'paravirtualised'
3109 if not config.has_key('device'):
3110 devid = config.get('id')
3111 if devid != None:
3112 config['device'] = 'eth%d' % devid
3113 else:
3114 config['device'] = ''
3116 if not config.has_key('network'):
3117 try:
3118 bridge = config.get('bridge', None)
3119 if bridge is None:
3120 from xen.util import Brctl
3121 if_to_br = dict([(i,b)
3122 for (b,ifs) in Brctl.get_state().items()
3123 for i in ifs])
3124 vifname = "vif%s.%s" % (self.getDomid(),
3125 config.get('id'))
3126 bridge = if_to_br.get(vifname, None)
3127 config['network'] = \
3128 XendNode.instance().bridge_to_network(
3129 config.get('bridge')).get_uuid()
3130 except Exception:
3131 log.exception('bridge_to_network')
3132 # Ignore this for now -- it may happen if the device
3133 # has been specified using the legacy methods, but at
3134 # some point we're going to have to figure out how to
3135 # handle that properly.
3137 config['MTU'] = 1500 # TODO
3139 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3140 xennode = XendNode.instance()
3141 rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
3142 config['io_read_kbs'] = rx_bps/1024
3143 config['io_write_kbs'] = tx_bps/1024
3144 rx, tx = xennode.get_vif_stat(self.domid, devid)
3145 config['io_total_read_kbs'] = rx/1024
3146 config['io_total_write_kbs'] = tx/1024
3147 else:
3148 config['io_read_kbs'] = 0.0
3149 config['io_write_kbs'] = 0.0
3150 config['io_total_read_kbs'] = 0.0
3151 config['io_total_write_kbs'] = 0.0
3153 config['security_label'] = config.get('security_label', '')
3155 if dev_class == 'vbd':
3157 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3158 controller = self.getDeviceController(dev_class)
3159 devid, _1, _2 = controller.getDeviceDetails(config)
3160 xennode = XendNode.instance()
3161 rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
3162 config['io_read_kbs'] = rd_blkps
3163 config['io_write_kbs'] = wr_blkps
3164 else:
3165 config['io_read_kbs'] = 0.0
3166 config['io_write_kbs'] = 0.0
3168 config['VDI'] = config.get('VDI', '')
3169 config['device'] = config.get('dev', '')
3170 if ':' in config['device']:
3171 vbd_name, vbd_type = config['device'].split(':', 1)
3172 config['device'] = vbd_name
3173 if vbd_type == 'cdrom':
3174 config['type'] = XEN_API_VBD_TYPE[0]
3175 else:
3176 config['type'] = XEN_API_VBD_TYPE[1]
3178 config['driver'] = 'paravirtualised' # TODO
3179 config['image'] = config.get('uname', '')
3181 if config.get('mode', 'r') == 'r':
3182 config['mode'] = 'RO'
3183 else:
3184 config['mode'] = 'RW'
3186 if dev_class == 'vtpm':
3187 if not config.has_key('type'):
3188 config['type'] = 'paravirtualised' # TODO
3189 if not config.has_key('backend'):
3190 config['backend'] = "00000000-0000-0000-0000-000000000000"
3192 return config
3194 def get_dev_property(self, dev_class, dev_uuid, field):
3195 config = self.get_dev_xenapi_config(dev_class, dev_uuid)
3196 try:
3197 return config[field]
3198 except KeyError:
3199 raise XendError('Invalid property for device: %s' % field)
3201 def set_dev_property(self, dev_class, dev_uuid, field, value):
3202 self.info['devices'][dev_uuid][1][field] = value
3204 def get_vcpus_util(self):
3205 vcpu_util = {}
3206 xennode = XendNode.instance()
3207 if 'VCPUs_max' in self.info and self.domid != None:
3208 for i in range(0, self.info['VCPUs_max']):
3209 util = xennode.get_vcpu_util(self.domid, i)
3210 vcpu_util[str(i)] = util
3212 return vcpu_util
3214 def get_consoles(self):
3215 return self.info.get('console_refs', [])
3217 def get_vifs(self):
3218 return self.info.get('vif_refs', [])
3220 def get_vbds(self):
3221 return self.info.get('vbd_refs', [])
3223 def get_vtpms(self):
3224 return self.info.get('vtpm_refs', [])
3226 def get_dpcis(self):
3227 return XendDPCI.get_by_VM(self.info.get('uuid'))
3229 def create_vbd(self, xenapi_vbd, vdi_image_path):
3230 """Create a VBD using a VDI from XendStorageRepository.
3232 @param xenapi_vbd: vbd struct from the Xen API
3233 @param vdi_image_path: VDI UUID
3234 @rtype: string
3235 @return: uuid of the device
3236 """
3237 xenapi_vbd['image'] = vdi_image_path
3238 if vdi_image_path.startswith('tap'):
3239 dev_uuid = self.info.device_add('tap', cfg_xenapi = xenapi_vbd)
3240 else:
3241 dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
3243 if not dev_uuid:
3244 raise XendError('Failed to create device')
3246 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3247 XEN_API_VM_POWER_STATE_PAUSED):
3248 _, config = self.info['devices'][dev_uuid]
3250 if vdi_image_path.startswith('tap'):
3251 dev_control = self.getDeviceController('tap')
3252 else:
3253 dev_control = self.getDeviceController('vbd')
3255 try:
3256 devid = dev_control.createDevice(config)
3257 dev_control.waitForDevice(devid)
3258 self.info.device_update(dev_uuid,
3259 cfg_xenapi = {'devid': devid})
3260 except Exception, exn:
3261 log.exception(exn)
3262 del self.info['devices'][dev_uuid]
3263 self.info['vbd_refs'].remove(dev_uuid)
3264 raise
3266 return dev_uuid
3268 def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
3269 """Create a VBD using a VDI from XendStorageRepository.
3271 @param xenapi_vbd: vbd struct from the Xen API
3272 @param vdi_image_path: VDI UUID
3273 @rtype: string
3274 @return: uuid of the device
3275 """
3276 xenapi_vbd['image'] = vdi_image_path
3277 dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
3278 if not dev_uuid:
3279 raise XendError('Failed to create device')
3281 if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
3282 _, config = self.info['devices'][dev_uuid]
3283 config['devid'] = self.getDeviceController('tap').createDevice(config)
3285 return config['devid']
3287 def create_vif(self, xenapi_vif):
3288 """Create VIF device from the passed struct in Xen API format.
3290 @param xenapi_vif: Xen API VIF Struct.
3291 @rtype: string
3292 @return: UUID
3293 """
3294 dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
3295 if not dev_uuid:
3296 raise XendError('Failed to create device')
3298 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3299 XEN_API_VM_POWER_STATE_PAUSED):
3301 _, config = self.info['devices'][dev_uuid]
3302 dev_control = self.getDeviceController('vif')
3304 try:
3305 devid = dev_control.createDevice(config)
3306 dev_control.waitForDevice(devid)
3307 self.info.device_update(dev_uuid,
3308 cfg_xenapi = {'devid': devid})
3309 except Exception, exn:
3310 log.exception(exn)
3311 del self.info['devices'][dev_uuid]
3312 self.info['vif_refs'].remove(dev_uuid)
3313 raise
3315 return dev_uuid
3317 def create_vtpm(self, xenapi_vtpm):
3318 """Create a VTPM device from the passed struct in Xen API format.
3320 @return: uuid of the device
3321 @rtype: string
3322 """
3324 if self._stateGet() not in (DOM_STATE_HALTED,):
3325 raise VmError("Can only add vTPM to a halted domain.")
3326 if self.get_vtpms() != []:
3327 raise VmError('Domain already has a vTPM.')
3328 dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
3329 if not dev_uuid:
3330 raise XendError('Failed to create device')
3332 return dev_uuid
3334 def create_console(self, xenapi_console):
3335 """ Create a console device from a Xen API struct.
3337 @return: uuid of device
3338 @rtype: string
3339 """
3340 if self._stateGet() not in (DOM_STATE_HALTED,):
3341 raise VmError("Can only add console to a halted domain.")
3343 dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
3344 if not dev_uuid:
3345 raise XendError('Failed to create device')
3347 return dev_uuid
3349 def set_console_other_config(self, console_uuid, other_config):
3350 self.info.console_update(console_uuid, 'other_config', other_config)
3352 def create_dpci(self, xenapi_pci):
3353 """Create pci device from the passed struct in Xen API format.
3355 @param xenapi_pci: DPCI struct from Xen API
3356 @rtype: bool
3357 #@rtype: string
3358 @return: True if successfully created device
3359 #@return: UUID
3360 """
3362 dpci_uuid = uuid.createString()
3364 # Convert xenapi to sxp
3365 ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
3367 target_pci_sxp = \
3368 ['pci',
3369 ['dev',
3370 ['domain', '0x%02x' % ppci.get_domain()],
3371 ['bus', '0x%02x' % ppci.get_bus()],
3372 ['slot', '0x%02x' % ppci.get_slot()],
3373 ['func', '0x%1x' % ppci.get_func()],
3374 ['vslt', '0x%02x' % xenapi_pci.get('hotplug_slot')],
3375 ['uuid', dpci_uuid]
3376 ],
3377 ['state', 'Initialising']
3380 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3382 old_pci_sxp = self._getDeviceInfo_pci(0)
3384 if old_pci_sxp is None:
3385 dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
3386 if not dev_uuid:
3387 raise XendError('Failed to create device')
3389 else:
3390 new_pci_sxp = ['pci']
3391 for existing_dev in sxp.children(old_pci_sxp, 'dev'):
3392 new_pci_sxp.append(existing_dev)
3393 new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
3395 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3396 self.info.device_update(dev_uuid, new_pci_sxp)
3398 xen.xend.XendDomain.instance().managed_config_save(self)
3400 else:
3401 try:
3402 self.device_configure(target_pci_sxp)
3404 except Exception, exn:
3405 raise XendError('Failed to create device')
3407 return dpci_uuid
3410 def destroy_device_by_uuid(self, dev_type, dev_uuid):
3411 if dev_uuid not in self.info['devices']:
3412 raise XendError('Device does not exist')
3414 try:
3415 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3416 XEN_API_VM_POWER_STATE_PAUSED):
3417 _, config = self.info['devices'][dev_uuid]
3418 devid = config.get('devid')
3419 if devid != None:
3420 self.getDeviceController(dev_type).destroyDevice(devid, force = False)
3421 else:
3422 raise XendError('Unable to get devid for device: %s:%s' %
3423 (dev_type, dev_uuid))
3424 finally:
3425 del self.info['devices'][dev_uuid]
3426 self.info['%s_refs' % dev_type].remove(dev_uuid)
3428 def destroy_vbd(self, dev_uuid):
3429 self.destroy_device_by_uuid('vbd', dev_uuid)
3431 def destroy_vif(self, dev_uuid):
3432 self.destroy_device_by_uuid('vif', dev_uuid)
3434 def destroy_vtpm(self, dev_uuid):
3435 self.destroy_device_by_uuid('vtpm', dev_uuid)
3437 def destroy_dpci(self, dev_uuid):
3439 dpci = XendAPIStore.get(dev_uuid, 'DPCI')
3440 ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
3442 old_pci_sxp = self._getDeviceInfo_pci(0)
3443 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3444 target_dev = None
3445 new_pci_sxp = ['pci']
3446 for dev in sxp.children(old_pci_sxp, 'dev'):
3447 domain = int(sxp.child_value(dev, 'domain'), 16)
3448 bus = int(sxp.child_value(dev, 'bus'), 16)
3449 slot = int(sxp.child_value(dev, 'slot'), 16)
3450 func = int(sxp.child_value(dev, 'func'), 16)
3451 name = "%04x:%02x:%02x.%01x" % (domain, bus, slot, func)
3452 if ppci.get_name() == name:
3453 target_dev = dev
3454 else:
3455 new_pci_sxp.append(dev)
3457 if target_dev is None:
3458 raise XendError('Failed to destroy device')
3460 target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
3462 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3464 self.info.device_update(dev_uuid, new_pci_sxp)
3465 if len(sxp.children(new_pci_sxp, 'dev')) == 0:
3466 del self.info['devices'][dev_uuid]
3467 xen.xend.XendDomain.instance().managed_config_save(self)
3469 else:
3470 try:
3471 self.device_configure(target_pci_sxp)
3473 except Exception, exn:
3474 raise XendError('Failed to destroy device')
3476 def destroy_xapi_device_instances(self):
3477 """Destroy Xen-API device instances stored in XendAPIStore.
3478 """
3479 # Xen-API classes based on XendBase have their instances stored
3480 # in XendAPIStore. Cleanup these virtual device instances here
3481 # if they are supposed to be destroyed when the parent domain is dead.
3483 # Most of the virtual devices (vif, vbd, vfb, etc) are not based on
3484 # XendBase and there's no need to remove them from XendAPIStore.
3486 from xen.xend import XendDomain
3487 if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
3488 # domain still exists.
3489 return
3491 for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
3492 XendAPIStore.deregister(dpci_uuid, "DPCI")
3494 def has_device(self, dev_class, dev_uuid):
3495 return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
3497 def __str__(self):
3498 return '<domain id=%s name=%s memory=%s state=%s>' % \
3499 (str(self.domid), self.info['name_label'],
3500 str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])
3502 __repr__ = __str__