ia64/xen-unstable

view tools/python/xen/xend/XendDomainInfo.py @ 18033:1e91c9a4a3f3

Fix memory leak in xend
Signed-off-by: Jim Fehlig <jfehlig@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jul 11 12:44:15 2008 +0100 (2008-07-11)
parents 823caffa7ddf
children f40c310dca31
line source
1 #===========================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
19 """Representation of a single domain.
20 Includes support for domain construction, using
21 open-ended configurations.
23 Author: Mike Wray <mike.wray@hp.com>
25 """
27 import logging
28 import time
29 import threading
30 import re
31 import copy
32 import os
33 import traceback
34 from types import StringTypes
36 import xen.lowlevel.xc
37 from xen.util import asserts
38 from xen.util.blkif import blkdev_uname_to_file, blkdev_uname_to_taptype
39 import xen.util.xsm.xsm as security
40 from xen.util import xsconstants
42 from xen.xend import balloon, sxp, uuid, image, arch, osdep
43 from xen.xend import XendOptions, XendNode, XendConfig
45 from xen.xend.XendConfig import scrub_password
46 from xen.xend.XendBootloader import bootloader, bootloader_tidy
47 from xen.xend.XendError import XendError, VmError
48 from xen.xend.XendDevices import XendDevices
49 from xen.xend.XendTask import XendTask
50 from xen.xend.xenstore.xstransact import xstransact, complete
51 from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
52 from xen.xend.xenstore.xswatch import xswatch
53 from xen.xend.XendConstants import *
54 from xen.xend.XendAPIConstants import *
56 from xen.xend.XendVMMetrics import XendVMMetrics
58 from xen.xend.XendPPCI import XendPPCI
59 from xen.xend.XendDPCI import XendDPCI
60 from xen.xend import XendAPIStore
62 MIGRATE_TIMEOUT = 30.0
63 BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
65 xc = xen.lowlevel.xc.xc()
66 xoptions = XendOptions.instance()
68 log = logging.getLogger("xend.XendDomainInfo")
69 #log.setLevel(logging.TRACE)
72 def create(config):
73 """Creates and start a VM using the supplied configuration.
75 @param config: A configuration object involving lists of tuples.
76 @type config: list of lists, eg ['vm', ['image', 'xen.gz']]
78 @rtype: XendDomainInfo
79 @return: An up and running XendDomainInfo instance
80 @raise VmError: Invalid configuration or failure to start.
81 """
82 from xen.xend import XendDomain
83 domconfig = XendConfig.XendConfig(sxp_obj = config)
84 othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
85 if othervm is None or othervm.domid is None:
86 othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
87 if othervm is not None and othervm.domid is not None:
88 raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
89 log.debug("XendDomainInfo.create(%s)", scrub_password(config))
90 vm = XendDomainInfo(domconfig)
91 try:
92 vm.start()
93 except:
94 log.exception('Domain construction failed')
95 vm.destroy()
96 raise
98 return vm
100 def create_from_dict(config_dict):
101 """Creates and start a VM using the supplied configuration.
103 @param config_dict: An configuration dictionary.
105 @rtype: XendDomainInfo
106 @return: An up and running XendDomainInfo instance
107 @raise VmError: Invalid configuration or failure to start.
108 """
110 log.debug("XendDomainInfo.create_from_dict(%s)",
111 scrub_password(config_dict))
112 vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
113 try:
114 vm.start()
115 except:
116 log.exception('Domain construction failed')
117 vm.destroy()
118 raise
119 return vm
121 def recreate(info, priv):
122 """Create the VM object for an existing domain. The domain must not
123 be dying, as the paths in the store should already have been removed,
124 and asking us to recreate them causes problems.
126 @param xeninfo: Parsed configuration
127 @type xeninfo: Dictionary
128 @param priv: Is a privileged domain (Dom 0)
129 @type priv: bool
131 @rtype: XendDomainInfo
132 @return: A up and running XendDomainInfo instance
133 @raise VmError: Invalid configuration.
134 @raise XendError: Errors with configuration.
135 """
137 log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
139 assert not info['dying']
141 xeninfo = XendConfig.XendConfig(dominfo = info)
142 xeninfo['is_control_domain'] = priv
143 xeninfo['is_a_template'] = False
144 domid = xeninfo['domid']
145 uuid1 = uuid.fromString(xeninfo['uuid'])
146 needs_reinitialising = False
148 dompath = GetDomainPath(domid)
149 if not dompath:
150 raise XendError('No domain path in store for existing '
151 'domain %d' % domid)
153 log.info("Recreating domain %d, UUID %s. at %s" %
154 (domid, xeninfo['uuid'], dompath))
156 # need to verify the path and uuid if not Domain-0
157 # if the required uuid and vm aren't set, then that means
158 # we need to recreate the dom with our own values
159 #
160 # NOTE: this is probably not desirable, really we should just
161 # abort or ignore, but there may be cases where xenstore's
162 # entry disappears (eg. xenstore-rm /)
163 #
164 try:
165 vmpath = xstransact.Read(dompath, "vm")
166 if not vmpath:
167 if not priv:
168 log.warn('/local/domain/%d/vm is missing. recreate is '
169 'confused, trying our best to recover' % domid)
170 needs_reinitialising = True
171 raise XendError('reinit')
173 uuid2_str = xstransact.Read(vmpath, "uuid")
174 if not uuid2_str:
175 log.warn('%s/uuid/ is missing. recreate is confused, '
176 'trying our best to recover' % vmpath)
177 needs_reinitialising = True
178 raise XendError('reinit')
180 uuid2 = uuid.fromString(uuid2_str)
181 if uuid1 != uuid2:
182 log.warn('UUID in /vm does not match the UUID in /dom/%d.'
183 'Trying out best to recover' % domid)
184 needs_reinitialising = True
185 except XendError:
186 pass # our best shot at 'goto' in python :)
188 vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
189 vmpath = vmpath)
191 if needs_reinitialising:
192 vm._recreateDom()
193 vm._removeVm()
194 vm._storeVmDetails()
195 vm._storeDomDetails()
197 vm.image = image.create(vm, vm.info)
198 vm.image.recreate()
200 vm._registerWatches()
201 vm.refreshShutdown(xeninfo)
203 # register the domain in the list
204 from xen.xend import XendDomain
205 XendDomain.instance().add_domain(vm)
207 return vm
210 def restore(config):
211 """Create a domain and a VM object to do a restore.
213 @param config: Domain SXP configuration
214 @type config: list of lists. (see C{create})
216 @rtype: XendDomainInfo
217 @return: A up and running XendDomainInfo instance
218 @raise VmError: Invalid configuration or failure to start.
219 @raise XendError: Errors with configuration.
220 """
222 log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
223 vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
224 resume = True)
225 try:
226 vm.resume()
227 return vm
228 except:
229 vm.destroy()
230 raise
232 def createDormant(domconfig):
233 """Create a dormant/inactive XenDomainInfo without creating VM.
234 This is for creating instances of persistent domains that are not
235 yet start.
237 @param domconfig: Parsed configuration
238 @type domconfig: XendConfig object
240 @rtype: XendDomainInfo
241 @return: A up and running XendDomainInfo instance
242 @raise XendError: Errors with configuration.
243 """
245 log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
247 # domid does not make sense for non-running domains.
248 domconfig.pop('domid', None)
249 vm = XendDomainInfo(domconfig)
250 return vm
252 def domain_by_name(name):
253 """Get domain by name
255 @params name: Name of the domain
256 @type name: string
257 @return: XendDomainInfo or None
258 """
259 from xen.xend import XendDomain
260 return XendDomain.instance().domain_lookup_by_name_nr(name)
263 def shutdown_reason(code):
264 """Get a shutdown reason from a code.
266 @param code: shutdown code
267 @type code: int
268 @return: shutdown reason
269 @rtype: string
270 """
271 return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
273 def dom_get(dom):
274 """Get info from xen for an existing domain.
276 @param dom: domain id
277 @type dom: int
278 @return: info or None
279 @rtype: dictionary
280 """
281 try:
282 domlist = xc.domain_getinfo(dom, 1)
283 if domlist and dom == domlist[0]['domid']:
284 return domlist[0]
285 except Exception, err:
286 # ignore missing domain
287 log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
288 return None
291 class XendDomainInfo:
292 """An object represents a domain.
294 @TODO: try to unify dom and domid, they mean the same thing, but
295 xc refers to it as dom, and everywhere else, including
296 xenstore it is domid. The best way is to change xc's
297 python interface.
299 @ivar info: Parsed configuration
300 @type info: dictionary
301 @ivar domid: Domain ID (if VM has started)
302 @type domid: int or None
303 @ivar vmpath: XenStore path to this VM.
304 @type vmpath: string
305 @ivar dompath: XenStore path to this Domain.
306 @type dompath: string
307 @ivar image: Reference to the VM Image.
308 @type image: xen.xend.image.ImageHandler
309 @ivar store_port: event channel to xenstored
310 @type store_port: int
311 @ivar console_port: event channel to xenconsoled
312 @type console_port: int
313 @ivar store_mfn: xenstored mfn
314 @type store_mfn: int
315 @ivar console_mfn: xenconsoled mfn
316 @type console_mfn: int
317 @ivar notes: OS image notes
318 @type notes: dictionary
319 @ivar vmWatch: reference to a watch on the xenstored vmpath
320 @type vmWatch: xen.xend.xenstore.xswatch
321 @ivar shutdownWatch: reference to watch on the xenstored domain shutdown
322 @type shutdownWatch: xen.xend.xenstore.xswatch
323 @ivar shutdownStartTime: UNIX Time when domain started shutting down.
324 @type shutdownStartTime: float or None
325 # @ivar state: Domain state
326 # @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
327 @ivar state_updated: lock for self.state
328 @type state_updated: threading.Condition
329 @ivar refresh_shutdown_lock: lock for polling shutdown state
330 @type refresh_shutdown_lock: threading.Condition
331 @ivar _deviceControllers: device controller cache for this domain
332 @type _deviceControllers: dict 'string' to DevControllers
333 """
335 def __init__(self, info, domid = None, dompath = None, augment = False,
336 priv = False, resume = False, vmpath = None):
337 """Constructor for a domain
339 @param info: parsed configuration
340 @type info: dictionary
341 @keyword domid: Set initial domain id (if any)
342 @type domid: int
343 @keyword dompath: Set initial dompath (if any)
344 @type dompath: string
345 @keyword augment: Augment given info with xenstored VM info
346 @type augment: bool
347 @keyword priv: Is a privileged domain (Dom 0)
348 @type priv: bool
349 @keyword resume: Is this domain being resumed?
350 @type resume: bool
351 """
353 self.info = info
354 if domid == None:
355 self.domid = self.info.get('domid')
356 else:
357 self.domid = domid
359 #REMOVE: uuid is now generated in XendConfig
360 #if not self._infoIsSet('uuid'):
361 # self.info['uuid'] = uuid.toString(uuid.create())
363 # Find a unique /vm/<uuid>/<integer> path if not specified.
364 # This avoids conflict between pre-/post-migrate domains when doing
365 # localhost relocation.
366 self.vmpath = vmpath
367 i = 0
368 while self.vmpath == None:
369 self.vmpath = XS_VMROOT + self.info['uuid']
370 if i != 0:
371 self.vmpath = self.vmpath + '-' + str(i)
372 try:
373 if self._readVm("uuid"):
374 self.vmpath = None
375 i = i + 1
376 except:
377 pass
379 self.dompath = dompath
381 self.image = None
382 self.store_port = None
383 self.store_mfn = None
384 self.console_port = None
385 self.console_mfn = None
387 self.native_protocol = None
389 self.vmWatch = None
390 self.shutdownWatch = None
391 self.shutdownStartTime = None
392 self._resume = resume
394 self.state_updated = threading.Condition()
395 self.refresh_shutdown_lock = threading.Condition()
396 self._stateSet(DOM_STATE_HALTED)
398 self._deviceControllers = {}
400 for state in DOM_STATES_OLD:
401 self.info[state] = 0
403 if augment:
404 self._augmentInfo(priv)
406 self._checkName(self.info['name_label'])
408 self.metrics = XendVMMetrics(uuid.createString(), self)
411 #
412 # Public functions available through XMLRPC
413 #
416 def start(self, is_managed = False):
417 """Attempts to start the VM by do the appropriate
418 initialisation if it not started.
419 """
420 from xen.xend import XendDomain
422 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
423 try:
424 XendTask.log_progress(0, 30, self._constructDomain)
425 XendTask.log_progress(31, 60, self._initDomain)
427 XendTask.log_progress(61, 70, self._storeVmDetails)
428 XendTask.log_progress(71, 80, self._storeDomDetails)
429 XendTask.log_progress(81, 90, self._registerWatches)
430 XendTask.log_progress(91, 100, self.refreshShutdown)
432 xendomains = XendDomain.instance()
433 xennode = XendNode.instance()
435 # save running configuration if XendDomains believe domain is
436 # persistent
437 if is_managed:
438 xendomains.managed_config_save(self)
440 if xennode.xenschedinfo() == 'credit':
441 xendomains.domain_sched_credit_set(self.getDomid(),
442 self.getWeight(),
443 self.getCap())
444 except:
445 log.exception('VM start failed')
446 self.destroy()
447 raise
448 else:
449 raise XendError('VM already running')
451 def resume(self):
452 """Resumes a domain that has come back from suspension."""
453 state = self._stateGet()
454 if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
455 try:
456 self._constructDomain()
457 self._storeVmDetails()
458 self._createDevices()
459 self._createChannels()
460 self._storeDomDetails()
461 self._endRestore()
462 except:
463 log.exception('VM resume failed')
464 self.destroy()
465 raise
466 else:
467 raise XendError('VM is not suspended; it is %s'
468 % XEN_API_VM_POWER_STATE[state])
470 def shutdown(self, reason):
471 """Shutdown a domain by signalling this via xenstored."""
472 log.debug('XendDomainInfo.shutdown(%s)', reason)
473 if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
474 raise XendError('Domain cannot be shutdown')
476 if self.domid == 0:
477 raise XendError('Domain 0 cannot be shutdown')
479 if reason not in DOMAIN_SHUTDOWN_REASONS.values():
480 raise XendError('Invalid reason: %s' % reason)
481 self._removeVm('xend/previous_restart_time')
482 self.storeDom("control/shutdown", reason)
484 # HVM domain shuts itself down only if it has PV drivers
485 if self.info.is_hvm():
486 hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
487 if not hvm_pvdrv:
488 code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
489 log.info("HVM save:remote shutdown dom %d!", self.domid)
490 xc.domain_shutdown(self.domid, code)
492 def pause(self):
493 """Pause domain
495 @raise XendError: Failed pausing a domain
496 """
497 try:
498 xc.domain_pause(self.domid)
499 self._stateSet(DOM_STATE_PAUSED)
500 except Exception, ex:
501 log.exception(ex)
502 raise XendError("Domain unable to be paused: %s" % str(ex))
504 def unpause(self):
505 """Unpause domain
507 @raise XendError: Failed unpausing a domain
508 """
509 try:
510 xc.domain_unpause(self.domid)
511 self._stateSet(DOM_STATE_RUNNING)
512 except Exception, ex:
513 log.exception(ex)
514 raise XendError("Domain unable to be unpaused: %s" % str(ex))
516 def send_sysrq(self, key):
517 """ Send a Sysrq equivalent key via xenstored."""
518 if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
519 raise XendError("Domain '%s' is not started" % self.info['name_label'])
521 asserts.isCharConvertible(key)
522 self.storeDom("control/sysrq", '%c' % key)
524 def sync_pcidev_info(self):
526 if not self.info.is_hvm():
527 return
529 devid = '0'
530 dev_info = self._getDeviceInfo_pci(devid)
531 if dev_info is None:
532 return
534 # get the virtual slot info from xenstore
535 dev_uuid = sxp.child_value(dev_info, 'uuid')
536 pci_conf = self.info['devices'][dev_uuid][1]
537 pci_devs = pci_conf['devs']
539 count = 0
540 vslots = None
541 while vslots is None and count < 20:
542 vslots = xstransact.Read("/local/domain/0/backend/pci/%u/%s/vslots"
543 % (self.getDomid(), devid))
544 time.sleep(0.1)
545 count += 1
546 if vslots is None:
547 log.error("Device model didn't tell the vslots for PCI device")
548 return
550 #delete last delim
551 if vslots[-1] == ";":
552 vslots = vslots[:-1]
554 slot_list = vslots.split(';')
555 if len(slot_list) != len(pci_devs):
556 log.error("Device model's pci dev num dismatch")
557 return
559 #update the vslot info
560 count = 0;
561 for x in pci_devs:
562 x['vslt'] = slot_list[count]
563 count += 1
566 def hvm_pci_device_create(self, dev_config):
567 log.debug("XendDomainInfo.hvm_pci_device_create: %s"
568 % scrub_password(dev_config))
570 if not self.info.is_hvm():
571 raise VmError("hvm_pci_device_create called on non-HVM guest")
573 #all the PCI devs share one conf node
574 devid = '0'
576 new_dev = dev_config['devs'][0]
577 dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
579 #check conflict before trigger hotplug event
580 if dev_info is not None:
581 dev_uuid = sxp.child_value(dev_info, 'uuid')
582 pci_conf = self.info['devices'][dev_uuid][1]
583 pci_devs = pci_conf['devs']
584 for x in pci_devs:
585 if (int(x['vslt'], 16) == int(new_dev['vslt'], 16) and
586 int(x['vslt'], 16) != 0 ):
587 raise VmError("vslot %s already have a device." % (new_dev['vslt']))
589 if (int(x['domain'], 16) == int(new_dev['domain'], 16) and
590 int(x['bus'], 16) == int(new_dev['bus'], 16) and
591 int(x['slot'], 16) == int(new_dev['slot'], 16) and
592 int(x['func'], 16) == int(new_dev['func'], 16) ):
593 raise VmError("device is already inserted")
595 # Test whether the devices can be assigned with VT-d
596 pci_str = "%s, %s, %s, %s" % (new_dev['domain'],
597 new_dev['bus'],
598 new_dev['slot'],
599 new_dev['func'])
600 bdf = xc.test_assign_device(self.domid, pci_str)
601 if bdf != 0:
602 bus = (bdf >> 16) & 0xff
603 devfn = (bdf >> 8) & 0xff
604 dev = (devfn >> 3) & 0x1f
605 func = devfn & 0x7
606 raise VmError("Fail to hot insert device(%x:%x.%x): maybe VT-d is "
607 "not enabled, or the device is not exist, or it "
608 "has already been assigned to other domain"
609 % (bus, dev, func))
611 bdf_str = "%s:%s:%s.%s@%s" % (new_dev['domain'],
612 new_dev['bus'],
613 new_dev['slot'],
614 new_dev['func'],
615 new_dev['vslt'])
616 self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
619 def device_create(self, dev_config):
620 """Create a new device.
622 @param dev_config: device configuration
623 @type dev_config: SXP object (parsed config)
624 """
625 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
626 dev_type = sxp.name(dev_config)
627 dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
628 dev_config_dict = self.info['devices'][dev_uuid][1]
629 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
631 if self.domid is not None:
632 try:
633 dev_config_dict['devid'] = devid = \
634 self._createDevice(dev_type, dev_config_dict)
635 self._waitForDevice(dev_type, devid)
636 except VmError, ex:
637 del self.info['devices'][dev_uuid]
638 if dev_type == 'tap':
639 self.info['vbd_refs'].remove(dev_uuid)
640 else:
641 self.info['%s_refs' % dev_type].remove(dev_uuid)
642 raise ex
643 else:
644 devid = None
646 xen.xend.XendDomain.instance().managed_config_save(self)
647 return self.getDeviceController(dev_type).sxpr(devid)
650 def pci_device_configure(self, dev_sxp, devid = 0):
651 """Configure an existing pci device.
653 @param dev_sxp: device configuration
654 @type dev_sxp: SXP object (parsed config)
655 @param devid: device id
656 @type devid: int
657 @return: Returns True if successfully updated device
658 @rtype: boolean
659 """
660 log.debug("XendDomainInfo.pci_device_configure: %s"
661 % scrub_password(dev_sxp))
663 dev_class = sxp.name(dev_sxp)
665 if dev_class != 'pci':
666 return False
668 pci_state = sxp.child_value(dev_sxp, 'state')
669 existing_dev_info = self._getDeviceInfo_pci(devid)
671 if existing_dev_info is None and pci_state != 'Initialising':
672 raise XendError("Cannot detach when pci platform does not exist")
674 pci_dev = sxp.children(dev_sxp, 'dev')[0]
675 dev_config = self.info.pci_convert_sxp_to_dict(dev_sxp)
676 dev = dev_config['devs'][0]
678 # Do HVM specific processing
679 if self.info.is_hvm():
680 if pci_state == 'Initialising':
681 # HVM PCI device attachment
682 self.hvm_pci_device_create(dev_config)
683 # Update vslt
684 vslt = xstransact.Read("/local/domain/0/device-model/%i/parameter"
685 % self.getDomid())
686 dev['vslt'] = vslt
687 for n in sxp.children(pci_dev):
688 if(n[0] == 'vslt'):
689 n[1] = vslt
690 else:
691 # HVM PCI device detachment
692 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
693 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
694 existing_pci_devs = existing_pci_conf['devs']
695 vslt = '0x0'
696 for x in existing_pci_devs:
697 if ( int(x['domain'], 16) == int(dev['domain'], 16) and
698 int(x['bus'], 16) == int(dev['bus'], 16) and
699 int(x['slot'], 16) == int(dev['slot'], 16) and
700 int(x['func'], 16) == int(dev['func'], 16) ):
701 vslt = x['vslt']
702 break
703 if vslt == '0x0':
704 raise VmError("Device %04x:%02x:%02x.%02x is not connected"
705 % (int(dev['domain'],16), int(dev['bus'],16),
706 int(dev['slot'],16), int(dev['func'],16)))
707 self.hvm_destroyPCIDevice(int(vslt, 16))
708 # Update vslt
709 dev['vslt'] = vslt
710 for n in sxp.children(pci_dev):
711 if(n[0] == 'vslt'):
712 n[1] = vslt
714 # If pci platform does not exist, create and exit.
715 if existing_dev_info is None:
716 self.device_create(dev_sxp)
717 return True
719 # use DevController.reconfigureDevice to change device config
720 dev_control = self.getDeviceController(dev_class)
721 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
722 if not self.info.is_hvm():
723 # in PV case, wait until backend state becomes connected.
724 dev_control.waitForDevice_reconfigure(devid)
725 num_devs = dev_control.cleanupDevice(devid)
727 # update XendConfig with new device info
728 if dev_uuid:
729 new_dev_sxp = dev_control.configuration(devid)
730 self.info.device_update(dev_uuid, new_dev_sxp)
732 # If there is no device left, destroy pci and remove config.
733 if num_devs == 0:
734 if self.info.is_hvm():
735 self.destroyDevice('pci', devid, True)
736 del self.info['devices'][dev_uuid]
737 platform = self.info['platform']
738 orig_dev_num = len(platform['pci'])
739 # TODO: can use this to keep some info to ask high level
740 # management tools to hot insert a new passthrough dev
741 # after migration
742 if orig_dev_num != 0:
743 #platform['pci'] = ["%dDEVs" % orig_dev_num]
744 platform['pci'] = []
745 else:
746 self.destroyDevice('pci', devid)
747 del self.info['devices'][dev_uuid]
749 xen.xend.XendDomain.instance().managed_config_save(self)
751 return True
753 def vscsi_device_configure(self, dev_sxp):
754 """Configure an existing vscsi device.
755 quoted pci funciton
756 """
757 dev_class = sxp.name(dev_sxp)
758 if dev_class != 'vscsi':
759 return False
761 dev_config = self.info.pci_convert_sxp_to_dict(dev_sxp)
762 dev = dev_config['devs'][0]
763 req_devid = sxp.child_value(dev_sxp, 'devid')
764 req_devid = int(req_devid)
765 existing_dev_info = self._getDeviceInfo_vscsi(req_devid, dev['v-dev'])
766 state = sxp.child_value(dev_sxp, 'state')
768 if state == 'Initialising':
769 # new create
770 # If request devid does not exist, create and exit.
771 if existing_dev_info is None:
772 self.device_create(dev_sxp)
773 return True
774 elif existing_dev_info == "exists":
775 raise XendError("The virtual device %s is already defined" % dev['v-dev'])
777 elif state == 'Closing':
778 if existing_dev_info is None:
779 raise XendError("Cannot detach vscsi device does not exist")
781 # use DevController.reconfigureDevice to change device config
782 dev_control = self.getDeviceController(dev_class)
783 dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
784 dev_control.waitForDevice_reconfigure(req_devid)
785 num_devs = dev_control.cleanupDevice(req_devid)
787 # update XendConfig with new device info
788 if dev_uuid:
789 new_dev_sxp = dev_control.configuration(req_devid)
790 self.info.device_update(dev_uuid, new_dev_sxp)
792 # If there is no device left, destroy vscsi and remove config.
793 if num_devs == 0:
794 self.destroyDevice('vscsi', req_devid)
795 del self.info['devices'][dev_uuid]
797 return True
799 def device_configure(self, dev_sxp, devid = None):
800 """Configure an existing device.
802 @param dev_config: device configuration
803 @type dev_config: SXP object (parsed config)
804 @param devid: device id
805 @type devid: int
806 @return: Returns True if successfully updated device
807 @rtype: boolean
808 """
810 # convert device sxp to a dict
811 dev_class = sxp.name(dev_sxp)
812 dev_config = {}
814 if dev_class == 'pci':
815 return self.pci_device_configure(dev_sxp)
817 if dev_class == 'vscsi':
818 return self.vscsi_device_configure(dev_sxp)
820 for opt_val in dev_sxp[1:]:
821 try:
822 dev_config[opt_val[0]] = opt_val[1]
823 except IndexError:
824 pass
826 # use DevController.reconfigureDevice to change device config
827 dev_control = self.getDeviceController(dev_class)
828 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
830 # update XendConfig with new device info
831 if dev_uuid:
832 self.info.device_update(dev_uuid, dev_sxp)
834 return True
836 def waitForDevices(self):
837 """Wait for this domain's configured devices to connect.
839 @raise VmError: if any device fails to initialise.
840 """
841 for devclass in XendDevices.valid_devices():
842 self.getDeviceController(devclass).waitForDevices()
844 def hvm_destroyPCIDevice(self, vslot):
845 log.debug("hvm_destroyPCIDevice called %s", vslot)
847 if not self.info.is_hvm():
848 raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
850 #all the PCI devs share one conf node
851 devid = '0'
852 vslot = int(vslot)
853 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
854 dev_uuid = sxp.child_value(dev_info, 'uuid')
856 #delete the pci bdf config under the pci device
857 pci_conf = self.info['devices'][dev_uuid][1]
858 pci_len = len(pci_conf['devs'])
860 #find the pass-through device with the virtual slot
861 devnum = 0
862 for x in pci_conf['devs']:
863 if int(x['vslt'], 16) == vslot:
864 break
865 devnum += 1
867 if devnum >= pci_len:
868 raise VmError("Device @ vslot 0x%x doesn't exist." % (vslot))
870 if vslot == 0:
871 raise VmError("Device @ vslot 0x%x do not support hotplug." % (vslot))
873 bdf_str = "%s:%s:%s.%s" % (x['domain'], x['bus'], x['slot'], x['func'])
874 log.info("hvm_destroyPCIDevice:%s:%s!", x, bdf_str)
876 self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
878 return 0
880 def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
881 log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
882 deviceClass, devid)
884 if rm_cfg:
885 # Convert devid to device number. A device number is
886 # needed to remove its configuration.
887 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
889 # Save current sxprs. A device number and a backend
890 # path are needed to remove its configuration but sxprs
891 # do not have those after calling destroyDevice.
892 sxprs = self.getDeviceSxprs(deviceClass)
894 rc = None
895 if self.domid is not None:
896 rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
897 if not force and rm_cfg:
898 # The backend path, other than the device itself,
899 # has to be passed because its accompanied frontend
900 # path may be void until its removal is actually
901 # issued. It is probable because destroyDevice is
902 # issued first.
903 for dev_num, dev_info in sxprs:
904 dev_num = int(dev_num)
905 if dev_num == dev:
906 for x in dev_info:
907 if x[0] == 'backend':
908 backend = x[1]
909 break
910 break
911 self._waitForDevice_destroy(deviceClass, devid, backend)
913 if rm_cfg:
914 if deviceClass == 'vif':
915 if self.domid is not None:
916 for dev_num, dev_info in sxprs:
917 dev_num = int(dev_num)
918 if dev_num == dev:
919 for x in dev_info:
920 if x[0] == 'mac':
921 mac = x[1]
922 break
923 break
924 dev_info = self._getDeviceInfo_vif(mac)
925 else:
926 _, dev_info = sxprs[dev]
927 else: # 'vbd' or 'tap'
928 dev_info = self._getDeviceInfo_vbd(dev)
929 # To remove the UUID of the device from refs,
930 # deviceClass must be always 'vbd'.
931 deviceClass = 'vbd'
932 if dev_info is None:
933 raise XendError("Device %s is not defined" % devid)
935 dev_uuid = sxp.child_value(dev_info, 'uuid')
936 del self.info['devices'][dev_uuid]
937 self.info['%s_refs' % deviceClass].remove(dev_uuid)
938 xen.xend.XendDomain.instance().managed_config_save(self)
940 return rc
942 def getDeviceSxprs(self, deviceClass):
943 if deviceClass == 'pci':
944 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
945 if dev_info is None:
946 return []
947 dev_uuid = sxp.child_value(dev_info, 'uuid')
948 pci_devs = self.info['devices'][dev_uuid][1]['devs']
949 pci_len = len(pci_devs)
950 return pci_devs
951 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
952 return self.getDeviceController(deviceClass).sxprs()
953 else:
954 sxprs = []
955 dev_num = 0
956 for dev_type, dev_info in self.info.all_devices_sxpr():
957 if dev_type == deviceClass:
958 sxprs.append([dev_num, dev_info])
959 dev_num += 1
960 return sxprs
962 def getBlockDeviceClass(self, devid):
963 # To get a device number from the devid,
964 # we temporarily use the device controller of VBD.
965 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
966 dev_info = self._getDeviceInfo_vbd(dev)
967 if dev_info:
968 return dev_info[0]
970 def _getDeviceInfo_vif(self, mac):
971 for dev_type, dev_info in self.info.all_devices_sxpr():
972 if dev_type != 'vif':
973 continue
974 if mac == sxp.child_value(dev_info, 'mac'):
975 return dev_info
977 def _getDeviceInfo_vbd(self, devid):
978 for dev_type, dev_info in self.info.all_devices_sxpr():
979 if dev_type != 'vbd' and dev_type != 'tap':
980 continue
981 dev = sxp.child_value(dev_info, 'dev')
982 dev = dev.split(':')[0]
983 dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
984 if devid == dev:
985 return dev_info
987 def _getDeviceInfo_pci(self, devid):
988 for dev_type, dev_info in self.info.all_devices_sxpr():
989 if dev_type != 'pci':
990 continue
991 return dev_info
992 return None
994 def _getDeviceInfo_vscsi(self, devid, vdev):
995 devid = int(devid)
996 for dev_type, dev_info in self.info.all_devices_sxpr():
997 if dev_type != 'vscsi':
998 continue
999 existing_dev_uuid = sxp.child_value(dev_info, 'uuid')
1000 existing_conf = self.info['devices'][existing_dev_uuid][1]
1001 existing_dev = existing_conf['devs'][0]
1002 existing_devid = int(existing_dev['devid'])
1003 existing_vdev = existing_dev['v-dev']
1005 if vdev == existing_vdev:
1006 return "exists"
1008 if devid == existing_devid:
1009 return dev_info
1011 return None
1013 def setMemoryTarget(self, target):
1014 """Set the memory target of this domain.
1015 @param target: In MiB.
1016 """
1017 log.debug("Setting memory target of domain %s (%s) to %d MiB.",
1018 self.info['name_label'], str(self.domid), target)
1020 MiB = 1024 * 1024
1022 if self.domid == 0:
1023 dom0_min_mem = xoptions.get_dom0_min_mem()
1024 memory_cur = self.get_memory_dynamic_max() / MiB
1025 if target < memory_cur and dom0_min_mem > target:
1026 raise XendError("memory_dynamic_max too small")
1028 self._safe_set_memory('memory_dynamic_min', target * MiB)
1029 self._safe_set_memory('memory_dynamic_max', target * MiB)
1031 if self.domid >= 0:
1032 self.storeVm("memory", target)
1033 self.storeDom("memory/target", target << 10)
1034 xen.xend.XendDomain.instance().managed_config_save(self)
1036 def setMemoryMaximum(self, limit):
1037 """Set the maximum memory limit of this domain
1038 @param limit: In MiB.
1039 """
1040 log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
1041 self.info['name_label'], str(self.domid), limit)
1043 maxmem_cur = self.get_memory_static_max()
1044 MiB = 1024 * 1024
1045 self._safe_set_memory('memory_static_max', limit * MiB)
1047 if self.domid >= 0:
1048 maxmem = int(limit) * 1024
1049 try:
1050 return xc.domain_setmaxmem(self.domid, maxmem)
1051 except Exception, ex:
1052 self._safe_set_memory('memory_static_max', maxmem_cur)
1053 raise XendError(str(ex))
1054 xen.xend.XendDomain.instance().managed_config_save(self)
1057 def getVCPUInfo(self):
1058 try:
1059 # We include the domain name and ID, to help xm.
1060 sxpr = ['domain',
1061 ['domid', self.domid],
1062 ['name', self.info['name_label']],
1063 ['vcpu_count', self.info['VCPUs_max']]]
1065 for i in range(0, self.info['VCPUs_max']):
1066 if self.domid is not None:
1067 info = xc.vcpu_getinfo(self.domid, i)
1069 sxpr.append(['vcpu',
1070 ['number', i],
1071 ['online', info['online']],
1072 ['blocked', info['blocked']],
1073 ['running', info['running']],
1074 ['cpu_time', info['cpu_time'] / 1e9],
1075 ['cpu', info['cpu']],
1076 ['cpumap', info['cpumap']]])
1077 else:
1078 sxpr.append(['vcpu',
1079 ['number', i],
1080 ['online', 0],
1081 ['blocked', 0],
1082 ['running', 0],
1083 ['cpu_time', 0.0],
1084 ['cpu', -1],
1085 ['cpumap', self.info['cpus'][i] and \
1086 self.info['cpus'][i] or range(64)]])
1088 return sxpr
1090 except RuntimeError, exn:
1091 raise XendError(str(exn))
1094 def getDomInfo(self):
1095 return dom_get(self.domid)
1098 # internal functions ... TODO: re-categorised
1101 def _augmentInfo(self, priv):
1102 """Augment self.info, as given to us through L{recreate}, with
1103 values taken from the store. This recovers those values known
1104 to xend but not to the hypervisor.
1105 """
1106 augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
1107 if priv:
1108 augment_entries.remove('memory')
1109 augment_entries.remove('maxmem')
1110 augment_entries.remove('vcpus')
1111 augment_entries.remove('vcpu_avail')
1113 vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
1114 for k in augment_entries])
1116 # make returned lists into a dictionary
1117 vm_config = dict(zip(augment_entries, vm_config))
1119 for arg in augment_entries:
1120 val = vm_config[arg]
1121 if val != None:
1122 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1123 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1124 self.info[xapiarg] = val
1125 elif arg == "memory":
1126 self.info["static_memory_min"] = val
1127 elif arg == "maxmem":
1128 self.info["static_memory_max"] = val
1129 else:
1130 self.info[arg] = val
1132 # read CPU Affinity
1133 self.info['cpus'] = []
1134 vcpus_info = self.getVCPUInfo()
1135 for vcpu_info in sxp.children(vcpus_info, 'vcpu'):
1136 self.info['cpus'].append(sxp.child_value(vcpu_info, 'cpumap'))
1138 # For dom0, we ignore any stored value for the vcpus fields, and
1139 # read the current value from Xen instead. This allows boot-time
1140 # settings to take precedence over any entries in the store.
1141 if priv:
1142 xeninfo = dom_get(self.domid)
1143 self.info['VCPUs_max'] = xeninfo['online_vcpus']
1144 self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
1146 # read image value
1147 image_sxp = self._readVm('image')
1148 if image_sxp:
1149 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1151 # read devices
1152 devices = []
1153 for devclass in XendDevices.valid_devices():
1154 devconfig = self.getDeviceController(devclass).configurations()
1155 if devconfig:
1156 devices.extend(devconfig)
1158 if not self.info['devices'] and devices is not None:
1159 for device in devices:
1160 self.info.device_add(device[0], cfg_sxp = device)
1162 self._update_consoles()
1164 def _update_consoles(self, transaction = None):
1165 if self.domid == None or self.domid == 0:
1166 return
1168 # Update VT100 port if it exists
1169 if transaction is None:
1170 self.console_port = self.readDom('console/port')
1171 else:
1172 self.console_port = self.readDomTxn(transaction, 'console/port')
1173 if self.console_port is not None:
1174 serial_consoles = self.info.console_get_all('vt100')
1175 if not serial_consoles:
1176 cfg = self.info.console_add('vt100', self.console_port)
1177 self._createDevice('console', cfg)
1178 else:
1179 console_uuid = serial_consoles[0].get('uuid')
1180 self.info.console_update(console_uuid, 'location',
1181 self.console_port)
1184 # Update VNC port if it exists and write to xenstore
1185 if transaction is None:
1186 vnc_port = self.readDom('console/vnc-port')
1187 else:
1188 vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
1189 if vnc_port is not None:
1190 for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
1191 if dev_type == 'vfb':
1192 old_location = dev_info.get('location')
1193 listen_host = dev_info.get('vnclisten', 'localhost')
1194 new_location = '%s:%s' % (listen_host, str(vnc_port))
1195 if old_location == new_location:
1196 break
1198 dev_info['location'] = new_location
1199 self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
1200 vfb_ctrl = self.getDeviceController('vfb')
1201 vfb_ctrl.reconfigureDevice(0, dev_info)
1202 break
1205 # Function to update xenstore /vm/*
1208 def _readVm(self, *args):
1209 return xstransact.Read(self.vmpath, *args)
1211 def _writeVm(self, *args):
1212 return xstransact.Write(self.vmpath, *args)
1214 def _removeVm(self, *args):
1215 return xstransact.Remove(self.vmpath, *args)
1217 def _gatherVm(self, *args):
1218 return xstransact.Gather(self.vmpath, *args)
1220 def _listRecursiveVm(self, *args):
1221 return xstransact.ListRecursive(self.vmpath, *args)
1223 def storeVm(self, *args):
1224 return xstransact.Store(self.vmpath, *args)
1226 def permissionsVm(self, *args):
1227 return xstransact.SetPermissions(self.vmpath, *args)
1230 def _readVmTxn(self, transaction, *args):
1231 paths = map(lambda x: self.vmpath + "/" + x, args)
1232 return transaction.read(*paths)
1234 def _writeVmTxn(self, transaction, *args):
1235 paths = map(lambda x: self.vmpath + "/" + x, args)
1236 return transaction.write(*paths)
1238 def _removeVmTxn(self, transaction, *args):
1239 paths = map(lambda x: self.vmpath + "/" + x, args)
1240 return transaction.remove(*paths)
1242 def _gatherVmTxn(self, transaction, *args):
1243 paths = map(lambda x: self.vmpath + "/" + x, args)
1244 return transaction.gather(paths)
1246 def storeVmTxn(self, transaction, *args):
1247 paths = map(lambda x: self.vmpath + "/" + x, args)
1248 return transaction.store(*paths)
1250 def permissionsVmTxn(self, transaction, *args):
1251 paths = map(lambda x: self.vmpath + "/" + x, args)
1252 return transaction.set_permissions(*paths)
1255 # Function to update xenstore /dom/*
1258 def readDom(self, *args):
1259 return xstransact.Read(self.dompath, *args)
1261 def gatherDom(self, *args):
1262 return xstransact.Gather(self.dompath, *args)
1264 def _writeDom(self, *args):
1265 return xstransact.Write(self.dompath, *args)
1267 def _removeDom(self, *args):
1268 return xstransact.Remove(self.dompath, *args)
1270 def storeDom(self, *args):
1271 return xstransact.Store(self.dompath, *args)
1274 def readDomTxn(self, transaction, *args):
1275 paths = map(lambda x: self.dompath + "/" + x, args)
1276 return transaction.read(*paths)
1278 def gatherDomTxn(self, transaction, *args):
1279 paths = map(lambda x: self.dompath + "/" + x, args)
1280 return transaction.gather(*paths)
1282 def _writeDomTxn(self, transaction, *args):
1283 paths = map(lambda x: self.dompath + "/" + x, args)
1284 return transaction.write(*paths)
1286 def _removeDomTxn(self, transaction, *args):
1287 paths = map(lambda x: self.dompath + "/" + x, args)
1288 return transaction.remove(*paths)
1290 def storeDomTxn(self, transaction, *args):
1291 paths = map(lambda x: self.dompath + "/" + x, args)
1292 return transaction.store(*paths)
1295 def _recreateDom(self):
1296 complete(self.dompath, lambda t: self._recreateDomFunc(t))
1298 def _recreateDomFunc(self, t):
1299 t.remove()
1300 t.mkdir()
1301 t.set_permissions({'dom' : self.domid})
1302 t.write('vm', self.vmpath)
1304 def _storeDomDetails(self):
1305 to_store = {
1306 'domid': str(self.domid),
1307 'vm': self.vmpath,
1308 'name': self.info['name_label'],
1309 'console/limit': str(xoptions.get_console_limit() * 1024),
1310 'memory/target': str(self.info['memory_dynamic_max'] / 1024),
1313 def f(n, v):
1314 if v is not None:
1315 if type(v) == bool:
1316 to_store[n] = v and "1" or "0"
1317 else:
1318 to_store[n] = str(v)
1320 # Figure out if we need to tell xenconsoled to ignore this guest's
1321 # console - device model will handle console if it is running
1322 constype = "ioemu"
1323 if 'device_model' not in self.info['platform']:
1324 constype = "xenconsoled"
1326 f('console/port', self.console_port)
1327 f('console/ring-ref', self.console_mfn)
1328 f('console/type', constype)
1329 f('store/port', self.store_port)
1330 f('store/ring-ref', self.store_mfn)
1332 if arch.type == "x86":
1333 f('control/platform-feature-multiprocessor-suspend', True)
1335 # elfnotes
1336 for n, v in self.info.get_notes().iteritems():
1337 n = n.lower().replace('_', '-')
1338 if n == 'features':
1339 for v in v.split('|'):
1340 v = v.replace('_', '-')
1341 if v.startswith('!'):
1342 f('image/%s/%s' % (n, v[1:]), False)
1343 else:
1344 f('image/%s/%s' % (n, v), True)
1345 else:
1346 f('image/%s' % n, v)
1348 if self.info.has_key('security_label'):
1349 f('security_label', self.info['security_label'])
1351 to_store.update(self._vcpuDomDetails())
1353 log.debug("Storing domain details: %s", scrub_password(to_store))
1355 self._writeDom(to_store)
1357 def _vcpuDomDetails(self):
1358 def availability(n):
1359 if self.info['vcpu_avail'] & (1 << n):
1360 return 'online'
1361 else:
1362 return 'offline'
1364 result = {}
1365 for v in range(0, self.info['VCPUs_max']):
1366 result["cpu/%d/availability" % v] = availability(v)
1367 return result
1370 # xenstore watches
1373 def _registerWatches(self):
1374 """Register a watch on this VM's entries in the store, and the
1375 domain's control/shutdown node, so that when they are changed
1376 externally, we keep up to date. This should only be called by {@link
1377 #create}, {@link #recreate}, or {@link #restore}, once the domain's
1378 details have been written, but before the new instance is returned."""
1379 self.vmWatch = xswatch(self.vmpath, self._storeChanged)
1380 self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
1381 self._handleShutdownWatch)
1383 def _storeChanged(self, _):
1384 log.trace("XendDomainInfo.storeChanged");
1386 changed = False
1388 # Check whether values in the configuration have
1389 # changed in Xenstore.
1391 cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
1392 'rtc/timeoffset']
1394 vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
1395 for k in cfg_vm])
1397 # convert two lists into a python dictionary
1398 vm_details = dict(zip(cfg_vm, vm_details))
1400 if vm_details['rtc/timeoffset'] == None:
1401 vm_details['rtc/timeoffset'] = "0"
1403 for arg, val in vm_details.items():
1404 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1405 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1406 if val != None and val != self.info[xapiarg]:
1407 self.info[xapiarg] = val
1408 changed = True
1409 elif arg == "memory":
1410 if val != None and val != self.info["static_memory_min"]:
1411 self.info["static_memory_min"] = val
1412 changed = True
1413 elif arg == "maxmem":
1414 if val != None and val != self.info["static_memory_max"]:
1415 self.info["static_memory_max"] = val
1416 changed = True
1418 # Check whether image definition has been updated
1419 image_sxp = self._readVm('image')
1420 if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
1421 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1422 changed = True
1424 # Check if the rtc offset has changes
1425 if vm_details.get("rtc/timeoffset", "0") != self.info["platform"].get("rtc_timeoffset", "0"):
1426 self.info["platform"]["rtc_timeoffset"] = vm_details.get("rtc/timeoffset", 0)
1427 changed = True
1429 if changed:
1430 # Update the domain section of the store, as this contains some
1431 # parameters derived from the VM configuration.
1432 self._storeDomDetails()
1434 return 1
1436 def _handleShutdownWatch(self, _):
1437 log.debug('XendDomainInfo.handleShutdownWatch')
1439 reason = self.readDom('control/shutdown')
1441 if reason and reason != 'suspend':
1442 sst = self.readDom('xend/shutdown_start_time')
1443 now = time.time()
1444 if sst:
1445 self.shutdownStartTime = float(sst)
1446 timeout = float(sst) + SHUTDOWN_TIMEOUT - now
1447 else:
1448 self.shutdownStartTime = now
1449 self.storeDom('xend/shutdown_start_time', now)
1450 timeout = SHUTDOWN_TIMEOUT
1452 log.trace(
1453 "Scheduling refreshShutdown on domain %d in %ds.",
1454 self.domid, timeout)
1455 threading.Timer(timeout, self.refreshShutdown).start()
1457 return True
1461 # Public Attributes for the VM
1465 def getDomid(self):
1466 return self.domid
1468 def setName(self, name, to_store = True):
1469 self._checkName(name)
1470 self.info['name_label'] = name
1471 if to_store:
1472 self.storeVm("name", name)
1474 def getName(self):
1475 return self.info['name_label']
1477 def getDomainPath(self):
1478 return self.dompath
1480 def getShutdownReason(self):
1481 return self.readDom('control/shutdown')
1483 def getStorePort(self):
1484 """For use only by image.py and XendCheckpoint.py."""
1485 return self.store_port
1487 def getConsolePort(self):
1488 """For use only by image.py and XendCheckpoint.py"""
1489 return self.console_port
1491 def getFeatures(self):
1492 """For use only by image.py."""
1493 return self.info['features']
1495 def getVCpuCount(self):
1496 return self.info['VCPUs_max']
1498 def setVCpuCount(self, vcpus):
1499 if vcpus <= 0:
1500 raise XendError('Invalid VCPUs')
1502 self.info['vcpu_avail'] = (1 << vcpus) - 1
1503 if self.domid >= 0:
1504 self.storeVm('vcpu_avail', self.info['vcpu_avail'])
1505 # update dom differently depending on whether we are adjusting
1506 # vcpu number up or down, otherwise _vcpuDomDetails does not
1507 # disable the vcpus
1508 if self.info['VCPUs_max'] > vcpus:
1509 # decreasing
1510 self._writeDom(self._vcpuDomDetails())
1511 self.info['VCPUs_live'] = vcpus
1512 else:
1513 # same or increasing
1514 self.info['VCPUs_live'] = vcpus
1515 self._writeDom(self._vcpuDomDetails())
1516 else:
1517 if self.info['VCPUs_max'] > vcpus:
1518 # decreasing
1519 del self.info['cpus'][vcpus:]
1520 elif self.info['VCPUs_max'] < vcpus:
1521 # increasing
1522 for c in range(self.info['VCPUs_max'], vcpus):
1523 self.info['cpus'].append(list())
1524 self.info['VCPUs_max'] = vcpus
1525 xen.xend.XendDomain.instance().managed_config_save(self)
1526 log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
1527 vcpus)
1529 def getMemoryTarget(self):
1530 """Get this domain's target memory size, in KB."""
1531 return self.info['memory_dynamic_max'] / 1024
1533 def getMemoryMaximum(self):
1534 """Get this domain's maximum memory size, in KB."""
1535 # remember, info now stores memory in bytes
1536 return self.info['memory_static_max'] / 1024
1538 def getResume(self):
1539 return str(self._resume)
1541 def setResume(self, isresume):
1542 self._resume = isresume
1544 def getCpus(self):
1545 return self.info['cpus']
1547 def setCpus(self, cpumap):
1548 self.info['cpus'] = cpumap
1550 def getCap(self):
1551 return self.info['vcpus_params']['cap']
1553 def setCap(self, cpu_cap):
1554 self.info['vcpus_params']['cap'] = cpu_cap
1556 def getWeight(self):
1557 return self.info['vcpus_params']['weight']
1559 def setWeight(self, cpu_weight):
1560 self.info['vcpus_params']['weight'] = cpu_weight
1562 def getRestartCount(self):
1563 return self._readVm('xend/restart_count')
1565 def refreshShutdown(self, xeninfo = None):
1566 """ Checks the domain for whether a shutdown is required.
1568 Called from XendDomainInfo and also image.py for HVM images.
1569 """
1571 # If set at the end of this method, a restart is required, with the
1572 # given reason. This restart has to be done out of the scope of
1573 # refresh_shutdown_lock.
1574 restart_reason = None
1576 self.refresh_shutdown_lock.acquire()
1577 try:
1578 if xeninfo is None:
1579 xeninfo = dom_get(self.domid)
1580 if xeninfo is None:
1581 # The domain no longer exists. This will occur if we have
1582 # scheduled a timer to check for shutdown timeouts and the
1583 # shutdown succeeded. It will also occur if someone
1584 # destroys a domain beneath us. We clean up the domain,
1585 # just in case, but we can't clean up the VM, because that
1586 # VM may have migrated to a different domain on this
1587 # machine.
1588 self.cleanupDomain()
1589 self._stateSet(DOM_STATE_HALTED)
1590 return
1592 if xeninfo['dying']:
1593 # Dying means that a domain has been destroyed, but has not
1594 # yet been cleaned up by Xen. This state could persist
1595 # indefinitely if, for example, another domain has some of its
1596 # pages mapped. We might like to diagnose this problem in the
1597 # future, but for now all we do is make sure that it's not us
1598 # holding the pages, by calling cleanupDomain. We can't
1599 # clean up the VM, as above.
1600 self.cleanupDomain()
1601 self._stateSet(DOM_STATE_SHUTDOWN)
1602 return
1604 elif xeninfo['crashed']:
1605 if self.readDom('xend/shutdown_completed'):
1606 # We've seen this shutdown already, but we are preserving
1607 # the domain for debugging. Leave it alone.
1608 return
1610 log.warn('Domain has crashed: name=%s id=%d.',
1611 self.info['name_label'], self.domid)
1612 self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
1614 restart_reason = 'crash'
1615 self._stateSet(DOM_STATE_HALTED)
1617 elif xeninfo['shutdown']:
1618 self._stateSet(DOM_STATE_SHUTDOWN)
1619 if self.readDom('xend/shutdown_completed'):
1620 # We've seen this shutdown already, but we are preserving
1621 # the domain for debugging. Leave it alone.
1622 return
1624 else:
1625 reason = shutdown_reason(xeninfo['shutdown_reason'])
1627 log.info('Domain has shutdown: name=%s id=%d reason=%s.',
1628 self.info['name_label'], self.domid, reason)
1629 self._writeVm(LAST_SHUTDOWN_REASON, reason)
1631 self._clearRestart()
1633 if reason == 'suspend':
1634 self._stateSet(DOM_STATE_SUSPENDED)
1635 # Don't destroy the domain. XendCheckpoint will do
1636 # this once it has finished. However, stop watching
1637 # the VM path now, otherwise we will end up with one
1638 # watch for the old domain, and one for the new.
1639 self._unwatchVm()
1640 elif reason in ('poweroff', 'reboot'):
1641 restart_reason = reason
1642 else:
1643 self.destroy()
1645 elif self.dompath is None:
1646 # We have yet to manage to call introduceDomain on this
1647 # domain. This can happen if a restore is in progress, or has
1648 # failed. Ignore this domain.
1649 pass
1650 else:
1651 # Domain is alive. If we are shutting it down, log a message
1652 # if it seems unresponsive.
1653 if xeninfo['paused']:
1654 self._stateSet(DOM_STATE_PAUSED)
1655 else:
1656 self._stateSet(DOM_STATE_RUNNING)
1658 if self.shutdownStartTime:
1659 timeout = (SHUTDOWN_TIMEOUT - time.time() +
1660 self.shutdownStartTime)
1661 if (timeout < 0 and not self.readDom('xend/unresponsive')):
1662 log.info(
1663 "Domain shutdown timeout expired: name=%s id=%s",
1664 self.info['name_label'], self.domid)
1665 self.storeDom('xend/unresponsive', 'True')
1666 finally:
1667 self.refresh_shutdown_lock.release()
1669 if restart_reason:
1670 threading.Thread(target = self._maybeRestart,
1671 args = (restart_reason,)).start()
1675 # Restart functions - handling whether we come back up on shutdown.
1678 def _clearRestart(self):
1679 self._removeDom("xend/shutdown_start_time")
1681 def _maybeDumpCore(self, reason):
1682 if reason == 'crash':
1683 if xoptions.get_enable_dump() or self.get_on_crash() \
1684 in ['coredump_and_destroy', 'coredump_and_restart']:
1685 try:
1686 self.dumpCore()
1687 except XendError:
1688 # This error has been logged -- there's nothing more
1689 # we can do in this context.
1690 pass
1692 def _maybeRestart(self, reason):
1693 # Before taking configured action, dump core if configured to do so.
1695 self._maybeDumpCore(reason)
1697 # Dispatch to the correct method based upon the configured on_{reason}
1698 # behaviour.
1699 actions = {"destroy" : self.destroy,
1700 "restart" : self._restart,
1701 "preserve" : self._preserve,
1702 "rename-restart" : self._renameRestart,
1703 "coredump-destroy" : self.destroy,
1704 "coredump-restart" : self._restart}
1706 action_conf = {
1707 'poweroff': 'actions_after_shutdown',
1708 'reboot': 'actions_after_reboot',
1709 'crash': 'actions_after_crash',
1712 action_target = self.info.get(action_conf.get(reason))
1713 func = actions.get(action_target, None)
1714 if func and callable(func):
1715 func()
1716 else:
1717 self.destroy() # default to destroy
1719 def _renameRestart(self):
1720 self._restart(True)
1722 def _restart(self, rename = False):
1723 """Restart the domain after it has exited.
1725 @param rename True if the old domain is to be renamed and preserved,
1726 False if it is to be destroyed.
1727 """
1728 from xen.xend import XendDomain
1730 if self._readVm(RESTART_IN_PROGRESS):
1731 log.error('Xend failed during restart of domain %s. '
1732 'Refusing to restart to avoid loops.',
1733 str(self.domid))
1734 self.destroy()
1735 return
1737 old_domid = self.domid
1738 self._writeVm(RESTART_IN_PROGRESS, 'True')
1740 now = time.time()
1741 rst = self._readVm('xend/previous_restart_time')
1742 if rst:
1743 rst = float(rst)
1744 timeout = now - rst
1745 if timeout < MINIMUM_RESTART_TIME:
1746 log.error(
1747 'VM %s restarting too fast (%f seconds since the last '
1748 'restart). Refusing to restart to avoid loops.',
1749 self.info['name_label'], timeout)
1750 self.destroy()
1751 return
1753 self._writeVm('xend/previous_restart_time', str(now))
1755 prev_vm_xend = self._listRecursiveVm('xend')
1756 new_dom_info = self.info
1757 try:
1758 if rename:
1759 new_dom_info = self._preserveForRestart()
1760 else:
1761 self._unwatchVm()
1762 self.destroy()
1764 # new_dom's VM will be the same as this domain's VM, except where
1765 # the rename flag has instructed us to call preserveForRestart.
1766 # In that case, it is important that we remove the
1767 # RESTART_IN_PROGRESS node from the new domain, not the old one,
1768 # once the new one is available.
1770 new_dom = None
1771 try:
1772 new_dom = XendDomain.instance().domain_create_from_dict(
1773 new_dom_info)
1774 for x in prev_vm_xend[0][1]:
1775 new_dom._writeVm('xend/%s' % x[0], x[1])
1776 new_dom.waitForDevices()
1777 new_dom.unpause()
1778 rst_cnt = new_dom._readVm('xend/restart_count')
1779 rst_cnt = int(rst_cnt) + 1
1780 new_dom._writeVm('xend/restart_count', str(rst_cnt))
1781 new_dom._removeVm(RESTART_IN_PROGRESS)
1782 except:
1783 if new_dom:
1784 new_dom._removeVm(RESTART_IN_PROGRESS)
1785 new_dom.destroy()
1786 else:
1787 self._removeVm(RESTART_IN_PROGRESS)
1788 raise
1789 except:
1790 log.exception('Failed to restart domain %s.', str(old_domid))
1792 def _preserveForRestart(self):
1793 """Preserve a domain that has been shut down, by giving it a new UUID,
1794 cloning the VM details, and giving it a new name. This allows us to
1795 keep this domain for debugging, but restart a new one in its place
1796 preserving the restart semantics (name and UUID preserved).
1797 """
1799 new_uuid = uuid.createString()
1800 new_name = 'Domain-%s' % new_uuid
1801 log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
1802 self.info['name_label'], self.domid, self.info['uuid'],
1803 new_name, new_uuid)
1804 self._unwatchVm()
1805 self._releaseDevices()
1806 # Remove existing vm node in xenstore
1807 self._removeVm()
1808 new_dom_info = self.info.copy()
1809 new_dom_info['name_label'] = self.info['name_label']
1810 new_dom_info['uuid'] = self.info['uuid']
1811 self.info['name_label'] = new_name
1812 self.info['uuid'] = new_uuid
1813 self.vmpath = XS_VMROOT + new_uuid
1814 # Write out new vm node to xenstore
1815 self._storeVmDetails()
1816 self._preserve()
1817 return new_dom_info
1820 def _preserve(self):
1821 log.info("Preserving dead domain %s (%d).", self.info['name_label'],
1822 self.domid)
1823 self._unwatchVm()
1824 self.storeDom('xend/shutdown_completed', 'True')
1825 self._stateSet(DOM_STATE_HALTED)
1828 # Debugging ..
1831 def dumpCore(self, corefile = None):
1832 """Create a core dump for this domain.
1834 @raise: XendError if core dumping failed.
1835 """
1837 try:
1838 if not corefile:
1839 this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
1840 corefile = "/var/xen/dump/%s-%s.%s.core" % (this_time,
1841 self.info['name_label'], self.domid)
1843 if os.path.isdir(corefile):
1844 raise XendError("Cannot dump core in a directory: %s" %
1845 corefile)
1847 self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
1848 xc.domain_dumpcore(self.domid, corefile)
1849 self._removeVm(DUMPCORE_IN_PROGRESS)
1850 except RuntimeError, ex:
1851 corefile_incomp = corefile+'-incomplete'
1852 os.rename(corefile, corefile_incomp)
1853 self._removeVm(DUMPCORE_IN_PROGRESS)
1854 log.exception("XendDomainInfo.dumpCore failed: id = %s name = %s",
1855 self.domid, self.info['name_label'])
1856 raise XendError("Failed to dump core: %s" % str(ex))
1859 # Device creation/deletion functions
1862 def _createDevice(self, deviceClass, devConfig):
1863 return self.getDeviceController(deviceClass).createDevice(devConfig)
1865 def _waitForDevice(self, deviceClass, devid):
1866 return self.getDeviceController(deviceClass).waitForDevice(devid)
1868 def _waitForDeviceUUID(self, dev_uuid):
1869 deviceClass, config = self.info['devices'].get(dev_uuid)
1870 self._waitForDevice(deviceClass, config['devid'])
1872 def _waitForDevice_destroy(self, deviceClass, devid, backpath):
1873 return self.getDeviceController(deviceClass).waitForDevice_destroy(
1874 devid, backpath)
1876 def _reconfigureDevice(self, deviceClass, devid, devconfig):
1877 return self.getDeviceController(deviceClass).reconfigureDevice(
1878 devid, devconfig)
1880 def _createDevices(self):
1881 """Create the devices for a vm.
1883 @raise: VmError for invalid devices
1884 """
1885 if self.image:
1886 self.image.prepareEnvironment()
1888 vscsi_uuidlist = {}
1889 vscsi_devidlist = []
1890 ordered_refs = self.info.ordered_device_refs()
1891 for dev_uuid in ordered_refs:
1892 devclass, config = self.info['devices'][dev_uuid]
1893 if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
1894 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
1895 dev_uuid = config.get('uuid')
1896 devid = self._createDevice(devclass, config)
1898 # store devid in XendConfig for caching reasons
1899 if dev_uuid in self.info['devices']:
1900 self.info['devices'][dev_uuid][1]['devid'] = devid
1902 elif devclass == 'vscsi':
1903 vscsi_config = config.get('devs', [])[0]
1904 devid = vscsi_config.get('devid', '')
1905 dev_uuid = config.get('uuid')
1906 vscsi_uuidlist[devid] = dev_uuid
1907 vscsi_devidlist.append(devid)
1909 #It is necessary to sorted it for /dev/sdxx in guest.
1910 if len(vscsi_uuidlist) > 0:
1911 vscsi_devidlist.sort()
1912 for vscsiid in vscsi_devidlist:
1913 dev_uuid = vscsi_uuidlist[vscsiid]
1914 devclass, config = self.info['devices'][dev_uuid]
1915 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
1916 dev_uuid = config.get('uuid')
1917 devid = self._createDevice(devclass, config)
1918 # store devid in XendConfig for caching reasons
1919 if dev_uuid in self.info['devices']:
1920 self.info['devices'][dev_uuid][1]['devid'] = devid
1923 if self.image:
1924 self.image.createDeviceModel()
1926 #if have pass-through devs, need the virtual pci slots info from qemu
1927 self.sync_pcidev_info()
1929 def _releaseDevices(self, suspend = False):
1930 """Release all domain's devices. Nothrow guarantee."""
1931 if self.image:
1932 try:
1933 log.debug("Destroying device model")
1934 self.image.destroyDeviceModel()
1935 except Exception, e:
1936 log.exception("Device model destroy failed %s" % str(e))
1937 else:
1938 log.debug("No device model")
1940 log.debug("Releasing devices")
1941 t = xstransact("%s/device" % self.dompath)
1942 try:
1943 for devclass in XendDevices.valid_devices():
1944 for dev in t.list(devclass):
1945 try:
1946 log.debug("Removing %s", dev);
1947 self.destroyDevice(devclass, dev, False);
1948 except:
1949 # Log and swallow any exceptions in removal --
1950 # there's nothing more we can do.
1951 log.exception("Device release failed: %s; %s; %s",
1952 self.info['name_label'], devclass, dev)
1953 finally:
1954 t.abort()
1956 def getDeviceController(self, name):
1957 """Get the device controller for this domain, and if it
1958 doesn't exist, create it.
1960 @param name: device class name
1961 @type name: string
1962 @rtype: subclass of DevController
1963 """
1964 if name not in self._deviceControllers:
1965 devController = XendDevices.make_controller(name, self)
1966 if not devController:
1967 raise XendError("Unknown device type: %s" % name)
1968 self._deviceControllers[name] = devController
1970 return self._deviceControllers[name]
1973 # Migration functions (public)
1976 def testMigrateDevices(self, network, dst):
1977 """ Notify all device about intention of migration
1978 @raise: XendError for a device that cannot be migrated
1979 """
1980 for (n, c) in self.info.all_devices_sxpr():
1981 rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
1982 if rc != 0:
1983 raise XendError("Device of type '%s' refuses migration." % n)
1985 def migrateDevices(self, network, dst, step, domName=''):
1986 """Notify the devices about migration
1987 """
1988 ctr = 0
1989 try:
1990 for (dev_type, dev_conf) in self.info.all_devices_sxpr():
1991 self.migrateDevice(dev_type, dev_conf, network, dst,
1992 step, domName)
1993 ctr = ctr + 1
1994 except:
1995 for dev_type, dev_conf in self.info.all_devices_sxpr():
1996 if ctr == 0:
1997 step = step - 1
1998 ctr = ctr - 1
1999 self._recoverMigrateDevice(dev_type, dev_conf, network,
2000 dst, step, domName)
2001 raise
2003 def migrateDevice(self, deviceClass, deviceConfig, network, dst,
2004 step, domName=''):
2005 return self.getDeviceController(deviceClass).migrate(deviceConfig,
2006 network, dst, step, domName)
2008 def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
2009 dst, step, domName=''):
2010 return self.getDeviceController(deviceClass).recover_migrate(
2011 deviceConfig, network, dst, step, domName)
2014 ## private:
2016 def _constructDomain(self):
2017 """Construct the domain.
2019 @raise: VmError on error
2020 """
2022 log.debug('XendDomainInfo.constructDomain')
2024 self.shutdownStartTime = None
2026 hap = 0
2027 hvm = self.info.is_hvm()
2028 if hvm:
2029 hap = self.info.is_hap()
2030 info = xc.xeninfo()
2031 if 'hvm' not in info['xen_caps']:
2032 raise VmError("HVM guest support is unavailable: is VT/AMD-V "
2033 "supported by your CPU and enabled in your "
2034 "BIOS?")
2036 # Hack to pre-reserve some memory for initial domain creation.
2037 # There is an implicit memory overhead for any domain creation. This
2038 # overhead is greater for some types of domain than others. For
2039 # example, an x86 HVM domain will have a default shadow-pagetable
2040 # allocation of 1MB. We free up 2MB here to be on the safe side.
2041 balloon.free(2*1024) # 2MB should be plenty
2043 ssidref = 0
2044 if security.on() == xsconstants.XS_POLICY_ACM:
2045 ssidref = security.calc_dom_ssidref_from_info(self.info)
2046 if security.has_authorization(ssidref) == False:
2047 raise VmError("VM is not authorized to run.")
2049 try:
2050 self.domid = xc.domain_create(
2051 domid = 0,
2052 ssidref = ssidref,
2053 handle = uuid.fromString(self.info['uuid']),
2054 flags = (int(hvm) << 0) | (int(hap) << 1),
2055 target = self.info.target())
2056 except Exception, e:
2057 # may get here if due to ACM the operation is not permitted
2058 if security.on() == xsconstants.XS_POLICY_ACM:
2059 raise VmError('Domain in conflict set with running domain?')
2061 if self.domid < 0:
2062 raise VmError('Creating domain failed: name=%s' %
2063 self.info['name_label'])
2065 self.dompath = GetDomainPath(self.domid)
2067 self._recreateDom()
2069 # Set timer configration of domain
2070 timer_mode = self.info["platform"].get("timer_mode")
2071 if hvm and timer_mode is not None:
2072 xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
2073 long(timer_mode))
2075 # Optionally enable virtual HPET
2076 hpet = self.info["platform"].get("hpet")
2077 if hvm and hpet is not None:
2078 xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
2079 long(hpet))
2081 # Set maximum number of vcpus in domain
2082 xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
2084 # Test whether the devices can be assigned with VT-d
2085 pci_str = str(self.info["platform"].get("pci"))
2086 if hvm and pci_str:
2087 bdf = xc.test_assign_device(self.domid, pci_str)
2088 if bdf != 0:
2089 bus = (bdf >> 16) & 0xff
2090 devfn = (bdf >> 8) & 0xff
2091 dev = (devfn >> 3) & 0x1f
2092 func = devfn & 0x7
2093 raise VmError("Fail to assign device(%x:%x.%x): maybe VT-d is "
2094 "not enabled, or the device is not exist, or it "
2095 "has already been assigned to other domain"
2096 % (bus, dev, func))
2098 # register the domain in the list
2099 from xen.xend import XendDomain
2100 XendDomain.instance().add_domain(self)
2102 def _introduceDomain(self):
2103 assert self.domid is not None
2104 assert self.store_mfn is not None
2105 assert self.store_port is not None
2107 try:
2108 IntroduceDomain(self.domid, self.store_mfn, self.store_port)
2109 except RuntimeError, exn:
2110 raise XendError(str(exn))
2112 def _setTarget(self, target):
2113 assert self.domid is not None
2115 try:
2116 SetTarget(self.domid, target)
2117 self.storeDom('target', target)
2118 except RuntimeError, exn:
2119 raise XendError(str(exn))
2122 def _initDomain(self):
2123 log.debug('XendDomainInfo.initDomain: %s %s',
2124 self.domid,
2125 self.info['vcpus_params']['weight'])
2127 self._configureBootloader()
2129 try:
2130 if self.info['platform'].get('localtime', 0):
2131 if time.localtime(time.time())[8]:
2132 self.info['platform']['rtc_timeoffset'] = -time.altzone
2133 else:
2134 self.info['platform']['rtc_timeoffset'] = -time.timezone
2136 self.image = image.create(self, self.info)
2138 # repin domain vcpus if a restricted cpus list is provided
2139 # this is done prior to memory allocation to aide in memory
2140 # distribution for NUMA systems.
2141 def has_cpus():
2142 if self.info['cpus'] is not None:
2143 for c in self.info['cpus']:
2144 if c:
2145 return True
2146 return False
2148 if has_cpus():
2149 for v in range(0, self.info['VCPUs_max']):
2150 if self.info['cpus'][v]:
2151 xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
2152 else:
2153 def find_relaxed_node(node_list):
2154 import sys
2155 nr_nodes = info['nr_nodes']
2156 if node_list is None:
2157 node_list = range(0, nr_nodes)
2158 nodeload = [0]
2159 nodeload = nodeload * nr_nodes
2160 from xen.xend import XendDomain
2161 doms = XendDomain.instance().list('all')
2162 for dom in filter (lambda d: d.domid != self.domid, doms):
2163 cpuinfo = dom.getVCPUInfo()
2164 for vcpu in sxp.children(cpuinfo, 'vcpu'):
2165 if sxp.child_value(vcpu, 'online') == 0: continue
2166 cpumap = list(sxp.child_value(vcpu,'cpumap'))
2167 for i in range(0, nr_nodes):
2168 node_cpumask = info['node_to_cpu'][i]
2169 for j in node_cpumask:
2170 if j in cpumap:
2171 nodeload[i] += 1
2172 break
2173 for i in range(0, nr_nodes):
2174 if len(info['node_to_cpu'][i]) > 0 and i in node_list:
2175 nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
2176 else:
2177 nodeload[i] = sys.maxint
2178 index = nodeload.index( min(nodeload) )
2179 return index
2181 info = xc.physinfo()
2182 if info['nr_nodes'] > 1:
2183 node_memory_list = info['node_to_memory']
2184 needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
2185 candidate_node_list = []
2186 for i in range(0, info['nr_nodes']):
2187 if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
2188 candidate_node_list.append(i)
2189 index = find_relaxed_node(candidate_node_list)
2190 cpumask = info['node_to_cpu'][index]
2191 for v in range(0, self.info['VCPUs_max']):
2192 xc.vcpu_setaffinity(self.domid, v, cpumask)
2194 # Use architecture- and image-specific calculations to determine
2195 # the various headrooms necessary, given the raw configured
2196 # values. maxmem, memory, and shadow are all in KiB.
2197 # but memory_static_max etc are all stored in bytes now.
2198 memory = self.image.getRequiredAvailableMemory(
2199 self.info['memory_dynamic_max'] / 1024)
2200 maxmem = self.image.getRequiredAvailableMemory(
2201 self.info['memory_static_max'] / 1024)
2202 shadow = self.image.getRequiredShadowMemory(
2203 self.info['shadow_memory'] * 1024,
2204 self.info['memory_static_max'] / 1024)
2206 log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
2207 # Round shadow up to a multiple of a MiB, as shadow_mem_control
2208 # takes MiB and we must not round down and end up under-providing.
2209 shadow = ((shadow + 1023) / 1024) * 1024
2211 # set memory limit
2212 xc.domain_setmaxmem(self.domid, maxmem)
2214 # Reserve 1 page per MiB of RAM for separate VT-d page table.
2215 vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
2216 # Round vtd_mem up to a multiple of a MiB.
2217 vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
2219 # Make sure there's enough RAM available for the domain
2220 balloon.free(memory + shadow + vtd_mem)
2222 # Set up the shadow memory
2223 shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
2224 self.info['shadow_memory'] = shadow_cur
2226 self._createChannels()
2228 channel_details = self.image.createImage()
2230 self.store_mfn = channel_details['store_mfn']
2231 if 'console_mfn' in channel_details:
2232 self.console_mfn = channel_details['console_mfn']
2233 if 'notes' in channel_details:
2234 self.info.set_notes(channel_details['notes'])
2235 if 'native_protocol' in channel_details:
2236 self.native_protocol = channel_details['native_protocol'];
2238 self._introduceDomain()
2239 if self.info.target():
2240 self._setTarget(self.info.target())
2242 self._createDevices()
2244 self.image.cleanupBootloading()
2246 self.info['start_time'] = time.time()
2248 self._stateSet(DOM_STATE_RUNNING)
2249 except VmError, exn:
2250 log.exception("XendDomainInfo.initDomain: exception occurred")
2251 if self.image:
2252 self.image.cleanupBootloading()
2253 raise exn
2254 except RuntimeError, exn:
2255 log.exception("XendDomainInfo.initDomain: exception occurred")
2256 if self.image:
2257 self.image.cleanupBootloading()
2258 raise VmError(str(exn))
2261 def cleanupDomain(self):
2262 """Cleanup domain resources; release devices. Idempotent. Nothrow
2263 guarantee."""
2265 self.refresh_shutdown_lock.acquire()
2266 try:
2267 self.unwatchShutdown()
2268 self._releaseDevices()
2269 bootloader_tidy(self)
2271 if self.image:
2272 self.image = None
2274 try:
2275 self._removeDom()
2276 except:
2277 log.exception("Removing domain path failed.")
2279 self._stateSet(DOM_STATE_HALTED)
2280 self.domid = None # Do not push into _stateSet()!
2281 finally:
2282 self.refresh_shutdown_lock.release()
2285 def unwatchShutdown(self):
2286 """Remove the watch on the domain's control/shutdown node, if any.
2287 Idempotent. Nothrow guarantee. Expects to be protected by the
2288 refresh_shutdown_lock."""
2290 try:
2291 try:
2292 if self.shutdownWatch:
2293 self.shutdownWatch.unwatch()
2294 finally:
2295 self.shutdownWatch = None
2296 except:
2297 log.exception("Unwatching control/shutdown failed.")
2299 def waitForShutdown(self):
2300 self.state_updated.acquire()
2301 try:
2302 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2303 self.state_updated.wait(timeout=1.0)
2304 finally:
2305 self.state_updated.release()
2308 # TODO: recategorise - called from XendCheckpoint
2311 def completeRestore(self, store_mfn, console_mfn):
2313 log.debug("XendDomainInfo.completeRestore")
2315 self.store_mfn = store_mfn
2316 self.console_mfn = console_mfn
2318 self._introduceDomain()
2319 self.image = image.create(self, self.info)
2320 if self.image:
2321 self.image.createDeviceModel(True)
2322 self._storeDomDetails()
2323 self._registerWatches()
2324 self.refreshShutdown()
2326 log.debug("XendDomainInfo.completeRestore done")
2329 def _endRestore(self):
2330 self.setResume(False)
2333 # VM Destroy
2336 def _prepare_phantom_paths(self):
2337 # get associated devices to destroy
2338 # build list of phantom devices to be removed after normal devices
2339 plist = []
2340 if self.domid is not None:
2341 t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
2342 try:
2343 for dev in t.list():
2344 backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
2345 % (self.dompath, dev))
2346 if backend_phantom_vbd is not None:
2347 frontend_phantom_vbd = xstransact.Read("%s/frontend" \
2348 % backend_phantom_vbd)
2349 plist.append(backend_phantom_vbd)
2350 plist.append(frontend_phantom_vbd)
2351 finally:
2352 t.abort()
2353 return plist
2355 def _cleanup_phantom_devs(self, plist):
2356 # remove phantom devices
2357 if not plist == []:
2358 time.sleep(2)
2359 for paths in plist:
2360 if paths.find('backend') != -1:
2361 from xen.xend.server import DevController
2362 # Modify online status /before/ updating state (latter is watched by
2363 # drivers, so this ordering avoids a race).
2364 xstransact.Write(paths, 'online', "0")
2365 xstransact.Write(paths, 'state', str(DevController.xenbusState['Closing']))
2366 # force
2367 xstransact.Remove(paths)
2369 def destroy(self):
2370 """Cleanup VM and destroy domain. Nothrow guarantee."""
2372 log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
2374 paths = self._prepare_phantom_paths()
2376 self._cleanupVm()
2377 if self.dompath is not None:
2378 self.destroyDomain()
2380 self._cleanup_phantom_devs(paths)
2382 if "transient" in self.info["other_config"] \
2383 and bool(self.info["other_config"]["transient"]):
2384 from xen.xend import XendDomain
2385 XendDomain.instance().domain_delete_by_dominfo(self)
2388 def destroyDomain(self):
2389 log.debug("XendDomainInfo.destroyDomain(%s)", str(self.domid))
2391 paths = self._prepare_phantom_paths()
2393 try:
2394 if self.domid is not None:
2395 xc.domain_destroy_hook(self.domid)
2396 xc.domain_destroy(self.domid)
2397 for state in DOM_STATES_OLD:
2398 self.info[state] = 0
2399 self._stateSet(DOM_STATE_HALTED)
2400 except:
2401 log.exception("XendDomainInfo.destroy: xc.domain_destroy failed.")
2403 from xen.xend import XendDomain
2404 XendDomain.instance().remove_domain(self)
2406 self.cleanupDomain()
2407 self._cleanup_phantom_devs(paths)
2410 def resetDomain(self):
2411 log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
2413 old_domid = self.domid
2414 prev_vm_xend = self._listRecursiveVm('xend')
2415 new_dom_info = self.info
2416 try:
2417 self._unwatchVm()
2418 self.destroy()
2420 new_dom = None
2421 try:
2422 from xen.xend import XendDomain
2423 new_dom_info['domid'] = None
2424 new_dom = XendDomain.instance().domain_create_from_dict(
2425 new_dom_info)
2426 for x in prev_vm_xend[0][1]:
2427 new_dom._writeVm('xend/%s' % x[0], x[1])
2428 new_dom.waitForDevices()
2429 new_dom.unpause()
2430 except:
2431 if new_dom:
2432 new_dom.destroy()
2433 raise
2434 except:
2435 log.exception('Failed to reset domain %s.', str(old_domid))
2438 def resumeDomain(self):
2439 log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
2441 # resume a suspended domain (e.g. after live checkpoint, or after
2442 # a later error during save or migate); checks that the domain
2443 # is currently suspended first so safe to call from anywhere
2445 xeninfo = dom_get(self.domid)
2446 if xeninfo is None:
2447 return
2448 if not xeninfo['shutdown']:
2449 return
2450 reason = shutdown_reason(xeninfo['shutdown_reason'])
2451 if reason != 'suspend':
2452 return
2454 try:
2455 # could also fetch a parsed note from xenstore
2456 fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
2457 if not fast:
2458 self._releaseDevices()
2459 self.testDeviceComplete()
2460 self.testvifsComplete()
2461 log.debug("XendDomainInfo.resumeDomain: devices released")
2463 self._resetChannels()
2465 self._removeDom('control/shutdown')
2466 self._removeDom('device-misc/vif/nextDeviceID')
2468 self._createChannels()
2469 self._introduceDomain()
2470 self._storeDomDetails()
2472 self._createDevices()
2473 log.debug("XendDomainInfo.resumeDomain: devices created")
2475 xc.domain_resume(self.domid, fast)
2476 ResumeDomain(self.domid)
2477 except:
2478 log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
2479 self.image.resumeDeviceModel()
2480 log.debug("XendDomainInfo.resumeDomain: completed")
2484 # Channels for xenstore and console
2487 def _createChannels(self):
2488 """Create the channels to the domain.
2489 """
2490 self.store_port = self._createChannel()
2491 self.console_port = self._createChannel()
2494 def _createChannel(self):
2495 """Create an event channel to the domain.
2496 """
2497 try:
2498 if self.domid != None:
2499 return xc.evtchn_alloc_unbound(domid = self.domid,
2500 remote_dom = 0)
2501 except:
2502 log.exception("Exception in alloc_unbound(%s)", str(self.domid))
2503 raise
2505 def _resetChannels(self):
2506 """Reset all event channels in the domain.
2507 """
2508 try:
2509 if self.domid != None:
2510 return xc.evtchn_reset(dom = self.domid)
2511 except:
2512 log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
2513 raise
2517 # Bootloader configuration
2520 def _configureBootloader(self):
2521 """Run the bootloader if we're configured to do so."""
2523 blexec = self.info['PV_bootloader']
2524 bootloader_args = self.info['PV_bootloader_args']
2525 kernel = self.info['PV_kernel']
2526 ramdisk = self.info['PV_ramdisk']
2527 args = self.info['PV_args']
2528 boot = self.info['HVM_boot_policy']
2530 if boot:
2531 # HVM booting.
2532 pass
2533 elif not blexec and kernel:
2534 # Boot from dom0. Nothing left to do -- the kernel and ramdisk
2535 # will be picked up by image.py.
2536 pass
2537 else:
2538 # Boot using bootloader
2539 if not blexec or blexec == 'pygrub':
2540 blexec = osdep.pygrub_path
2542 blcfg = None
2543 disks = [x for x in self.info['vbd_refs']
2544 if self.info['devices'][x][1]['bootable']]
2546 if not disks:
2547 msg = "Had a bootloader specified, but no disks are bootable"
2548 log.error(msg)
2549 raise VmError(msg)
2551 devinfo = self.info['devices'][disks[0]]
2552 devtype = devinfo[0]
2553 disk = devinfo[1]['uname']
2555 fn = blkdev_uname_to_file(disk)
2556 taptype = blkdev_uname_to_taptype(disk)
2557 mounted = devtype == 'tap' and taptype != 'aio' and taptype != 'sync' and not os.stat(fn).st_rdev
2558 if mounted:
2559 # This is a file, not a device. pygrub can cope with a
2560 # file if it's raw, but if it's QCOW or other such formats
2561 # used through blktap, then we need to mount it first.
2563 log.info("Mounting %s on %s." %
2564 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2566 vbd = {
2567 'mode': 'RO',
2568 'device': BOOTLOADER_LOOPBACK_DEVICE,
2571 from xen.xend import XendDomain
2572 dom0 = XendDomain.instance().privilegedDomain()
2573 dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
2574 fn = BOOTLOADER_LOOPBACK_DEVICE
2576 try:
2577 blcfg = bootloader(blexec, fn, self, False,
2578 bootloader_args, kernel, ramdisk, args)
2579 finally:
2580 if mounted:
2581 log.info("Unmounting %s from %s." %
2582 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2584 dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
2586 if blcfg is None:
2587 msg = "Had a bootloader specified, but can't find disk"
2588 log.error(msg)
2589 raise VmError(msg)
2591 self.info.update_with_image_sxp(blcfg, True)
2595 # VM Functions
2598 def _readVMDetails(self, params):
2599 """Read the specified parameters from the store.
2600 """
2601 try:
2602 return self._gatherVm(*params)
2603 except ValueError:
2604 # One of the int/float entries in params has a corresponding store
2605 # entry that is invalid. We recover, because older versions of
2606 # Xend may have put the entry there (memory/target, for example),
2607 # but this is in general a bad situation to have reached.
2608 log.exception(
2609 "Store corrupted at %s! Domain %d's configuration may be "
2610 "affected.", self.vmpath, self.domid)
2611 return []
2613 def _cleanupVm(self):
2614 """Cleanup VM resources. Idempotent. Nothrow guarantee."""
2616 self.metrics.destroy()
2617 self._unwatchVm()
2619 try:
2620 self._removeVm()
2621 except:
2622 log.exception("Removing VM path failed.")
2625 def checkLiveMigrateMemory(self):
2626 """ Make sure there's enough memory to migrate this domain """
2627 overhead_kb = 0
2628 if arch.type == "x86":
2629 # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
2630 # the minimum that Xen would allocate if no value were given.
2631 overhead_kb = self.info['VCPUs_max'] * 1024 + \
2632 (self.info['memory_static_max'] / 1024 / 1024) * 4
2633 overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
2634 # The domain might already have some shadow memory
2635 overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
2636 if overhead_kb > 0:
2637 balloon.free(overhead_kb)
2639 def _unwatchVm(self):
2640 """Remove the watch on the VM path, if any. Idempotent. Nothrow
2641 guarantee."""
2642 try:
2643 try:
2644 if self.vmWatch:
2645 self.vmWatch.unwatch()
2646 finally:
2647 self.vmWatch = None
2648 except:
2649 log.exception("Unwatching VM path failed.")
2651 def testDeviceComplete(self):
2652 """ For Block IO migration safety we must ensure that
2653 the device has shutdown correctly, i.e. all blocks are
2654 flushed to disk
2655 """
2656 start = time.time()
2657 while True:
2658 test = 0
2659 diff = time.time() - start
2660 for i in self.getDeviceController('vbd').deviceIDs():
2661 test = 1
2662 log.info("Dev %s still active, looping...", i)
2663 time.sleep(0.1)
2665 if test == 0:
2666 break
2667 if diff >= MIGRATE_TIMEOUT:
2668 log.info("Dev still active but hit max loop timeout")
2669 break
2671 def testvifsComplete(self):
2672 """ In case vifs are released and then created for the same
2673 domain, we need to wait the device shut down.
2674 """
2675 start = time.time()
2676 while True:
2677 test = 0
2678 diff = time.time() - start
2679 for i in self.getDeviceController('vif').deviceIDs():
2680 test = 1
2681 log.info("Dev %s still active, looping...", i)
2682 time.sleep(0.1)
2684 if test == 0:
2685 break
2686 if diff >= MIGRATE_TIMEOUT:
2687 log.info("Dev still active but hit max loop timeout")
2688 break
2690 def _storeVmDetails(self):
2691 to_store = {}
2693 for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
2694 info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
2695 if self._infoIsSet(info_key):
2696 to_store[key] = str(self.info[info_key])
2698 if self._infoIsSet("static_memory_min"):
2699 to_store["memory"] = str(self.info["static_memory_min"])
2700 if self._infoIsSet("static_memory_max"):
2701 to_store["maxmem"] = str(self.info["static_memory_max"])
2703 image_sxpr = self.info.image_sxpr()
2704 if image_sxpr:
2705 to_store['image'] = sxp.to_string(image_sxpr)
2707 if not self._readVm('xend/restart_count'):
2708 to_store['xend/restart_count'] = str(0)
2710 log.debug("Storing VM details: %s", scrub_password(to_store))
2712 self._writeVm(to_store)
2713 self._setVmPermissions()
2716 def _setVmPermissions(self):
2717 """Allow the guest domain to read its UUID. We don't allow it to
2718 access any other entry, for security."""
2719 xstransact.SetPermissions('%s/uuid' % self.vmpath,
2720 { 'dom' : self.domid,
2721 'read' : True,
2722 'write' : False })
2725 # Utility functions
2728 def __getattr__(self, name):
2729 if name == "state":
2730 log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
2731 log.warn("".join(traceback.format_stack()))
2732 return self._stateGet()
2733 else:
2734 raise AttributeError()
2736 def __setattr__(self, name, value):
2737 if name == "state":
2738 log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
2739 log.warn("".join(traceback.format_stack()))
2740 self._stateSet(value)
2741 else:
2742 self.__dict__[name] = value
2744 def _stateSet(self, state):
2745 self.state_updated.acquire()
2746 try:
2747 # TODO Not sure this is correct...
2748 # _stateGet is live now. Why not fire event
2749 # even when it hasn't changed?
2750 if self._stateGet() != state:
2751 self.state_updated.notifyAll()
2752 import XendAPI
2753 XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
2754 'power_state')
2755 finally:
2756 self.state_updated.release()
2758 def _stateGet(self):
2759 # Lets try and reconsitute the state from xc
2760 # first lets try and get the domain info
2761 # from xc - this will tell us if the domain
2762 # exists
2763 info = dom_get(self.getDomid())
2764 if info is None or info['shutdown']:
2765 # We are either HALTED or SUSPENDED
2766 # check saved image exists
2767 from xen.xend import XendDomain
2768 managed_config_path = \
2769 XendDomain.instance()._managed_check_point_path( \
2770 self.get_uuid())
2771 if os.path.exists(managed_config_path):
2772 return XEN_API_VM_POWER_STATE_SUSPENDED
2773 else:
2774 return XEN_API_VM_POWER_STATE_HALTED
2775 elif info['crashed']:
2776 # Crashed
2777 return XEN_API_VM_POWER_STATE_CRASHED
2778 else:
2779 # We are either RUNNING or PAUSED
2780 if info['paused']:
2781 return XEN_API_VM_POWER_STATE_PAUSED
2782 else:
2783 return XEN_API_VM_POWER_STATE_RUNNING
2785 def _infoIsSet(self, name):
2786 return name in self.info and self.info[name] is not None
2788 def _checkName(self, name):
2789 """Check if a vm name is valid. Valid names contain alphabetic
2790 characters, digits, or characters in '_-.:/+'.
2791 The same name cannot be used for more than one vm at the same time.
2793 @param name: name
2794 @raise: VmError if invalid
2795 """
2796 from xen.xend import XendDomain
2798 if name is None or name == '':
2799 raise VmError('Missing VM Name')
2801 if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name):
2802 raise VmError('Invalid VM Name')
2804 dom = XendDomain.instance().domain_lookup_nr(name)
2805 if dom and dom.info['uuid'] != self.info['uuid']:
2806 raise VmError("VM name '%s' already exists%s" %
2807 (name,
2808 dom.domid is not None and
2809 (" as domain %s" % str(dom.domid)) or ""))
2812 def update(self, info = None, refresh = True, transaction = None):
2813 """Update with info from xc.domain_getinfo().
2814 """
2815 log.trace("XendDomainInfo.update(%s) on domain %s", info,
2816 str(self.domid))
2818 if not info:
2819 info = dom_get(self.domid)
2820 if not info:
2821 return
2823 if info["maxmem_kb"] < 0:
2824 info["maxmem_kb"] = XendNode.instance() \
2825 .physinfo_dict()['total_memory'] * 1024
2827 #ssidref field not used any longer
2828 if 'ssidref' in info:
2829 info.pop('ssidref')
2831 # make sure state is reset for info
2832 # TODO: we should eventually get rid of old_dom_states
2834 self.info.update_config(info)
2835 self._update_consoles(transaction)
2837 if refresh:
2838 self.refreshShutdown(info)
2840 log.trace("XendDomainInfo.update done on domain %s: %s",
2841 str(self.domid), self.info)
2843 def sxpr(self, ignore_store = False, legacy_only = True):
2844 result = self.info.to_sxp(domain = self,
2845 ignore_devices = ignore_store,
2846 legacy_only = legacy_only)
2848 #if not ignore_store and self.dompath:
2849 # vnc_port = self.readDom('console/vnc-port')
2850 # if vnc_port is not None:
2851 # result.append(['device',
2852 # ['console', ['vnc-port', str(vnc_port)]]])
2854 return result
2856 # Xen API
2857 # ----------------------------------------------------------------
2859 def get_uuid(self):
2860 dom_uuid = self.info.get('uuid')
2861 if not dom_uuid: # if it doesn't exist, make one up
2862 dom_uuid = uuid.createString()
2863 self.info['uuid'] = dom_uuid
2864 return dom_uuid
2866 def get_memory_static_max(self):
2867 return self.info.get('memory_static_max', 0)
2868 def get_memory_static_min(self):
2869 return self.info.get('memory_static_min', 0)
2870 def get_memory_dynamic_max(self):
2871 return self.info.get('memory_dynamic_max', 0)
2872 def get_memory_dynamic_min(self):
2873 return self.info.get('memory_dynamic_min', 0)
2875 # only update memory-related config values if they maintain sanity
2876 def _safe_set_memory(self, key, newval):
2877 oldval = self.info.get(key, 0)
2878 try:
2879 self.info[key] = newval
2880 self.info._memory_sanity_check()
2881 except Exception, ex:
2882 self.info[key] = oldval
2883 raise
2885 def set_memory_static_max(self, val):
2886 self._safe_set_memory('memory_static_max', val)
2887 def set_memory_static_min(self, val):
2888 self._safe_set_memory('memory_static_min', val)
2889 def set_memory_dynamic_max(self, val):
2890 self._safe_set_memory('memory_dynamic_max', val)
2891 def set_memory_dynamic_min(self, val):
2892 self._safe_set_memory('memory_dynamic_min', val)
2894 def get_vcpus_params(self):
2895 if self.getDomid() is None:
2896 return self.info['vcpus_params']
2898 retval = xc.sched_credit_domain_get(self.getDomid())
2899 return retval
2900 def get_power_state(self):
2901 return XEN_API_VM_POWER_STATE[self._stateGet()]
2902 def get_platform(self):
2903 return self.info.get('platform', {})
2904 def get_pci_bus(self):
2905 return self.info.get('pci_bus', '')
2906 def get_tools_version(self):
2907 return self.info.get('tools_version', {})
2908 def get_metrics(self):
2909 return self.metrics.get_uuid();
2912 def get_security_label(self, xspol=None):
2913 import xen.util.xsm.xsm as security
2914 label = security.get_security_label(self, xspol)
2915 return label
2917 def set_security_label(self, seclab, old_seclab, xspol=None,
2918 xspol_old=None):
2919 """
2920 Set the security label of a domain from its old to
2921 a new value.
2922 @param seclab New security label formatted in the form
2923 <policy type>:<policy name>:<vm label>
2924 @param old_seclab The current security label that the
2925 VM must have.
2926 @param xspol An optional policy under which this
2927 update should be done. If not given,
2928 then the current active policy is used.
2929 @param xspol_old The old policy; only to be passed during
2930 the updating of a policy
2931 @return Returns return code, a string with errors from
2932 the hypervisor's operation, old label of the
2933 domain
2934 """
2935 rc = 0
2936 errors = ""
2937 old_label = ""
2938 new_ssidref = 0
2939 domid = self.getDomid()
2940 res_labels = None
2941 is_policy_update = (xspol_old != None)
2943 from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
2945 state = self._stateGet()
2946 # Relabel only HALTED or RUNNING or PAUSED domains
2947 if domid != 0 and \
2948 state not in \
2949 [ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
2950 DOM_STATE_SUSPENDED ]:
2951 log.warn("Relabeling domain not possible in state '%s'" %
2952 DOM_STATES[state])
2953 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
2955 # Remove security label. Works only for halted or suspended domains
2956 if not seclab or seclab == "":
2957 if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
2958 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
2960 if self.info.has_key('security_label'):
2961 old_label = self.info['security_label']
2962 # Check label against expected one.
2963 if old_label != old_seclab:
2964 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
2965 del self.info['security_label']
2966 xen.xend.XendDomain.instance().managed_config_save(self)
2967 return (xsconstants.XSERR_SUCCESS, "", "", 0)
2969 tmp = seclab.split(":")
2970 if len(tmp) != 3:
2971 return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
2972 typ, policy, label = tmp
2974 poladmin = XSPolicyAdminInstance()
2975 if not xspol:
2976 xspol = poladmin.get_policy_by_name(policy)
2978 if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
2979 #if domain is running or paused try to relabel in hypervisor
2980 if not xspol:
2981 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
2983 if typ != xspol.get_type_name() or \
2984 policy != xspol.get_name():
2985 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
2987 if typ == xsconstants.ACM_POLICY_ID:
2988 new_ssidref = xspol.vmlabel_to_ssidref(label)
2989 if new_ssidref == xsconstants.INVALID_SSIDREF:
2990 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
2992 # Check that all used resources are accessible under the
2993 # new label
2994 if not is_policy_update and \
2995 not security.resources_compatible_with_vmlabel(xspol,
2996 self, label):
2997 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
2999 #Check label against expected one. Can only do this
3000 # if the policy hasn't changed underneath in the meantime
3001 if xspol_old == None:
3002 old_label = self.get_security_label()
3003 if old_label != old_seclab:
3004 log.info("old_label != old_seclab: %s != %s" %
3005 (old_label, old_seclab))
3006 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3008 # relabel domain in the hypervisor
3009 rc, errors = security.relabel_domains([[domid, new_ssidref]])
3010 log.info("rc from relabeling in HV: %d" % rc)
3011 else:
3012 return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
3014 if rc == 0:
3015 # HALTED, RUNNING or PAUSED
3016 if domid == 0:
3017 if xspol:
3018 self.info['security_label'] = seclab
3019 ssidref = poladmin.set_domain0_bootlabel(xspol, label)
3020 else:
3021 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3022 else:
3023 if self.info.has_key('security_label'):
3024 old_label = self.info['security_label']
3025 # Check label against expected one, unless wildcard
3026 if old_label != old_seclab:
3027 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3029 self.info['security_label'] = seclab
3031 try:
3032 xen.xend.XendDomain.instance().managed_config_save(self)
3033 except:
3034 pass
3035 return (rc, errors, old_label, new_ssidref)
3037 def get_on_shutdown(self):
3038 after_shutdown = self.info.get('actions_after_shutdown')
3039 if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
3040 return XEN_API_ON_NORMAL_EXIT[-1]
3041 return after_shutdown
3043 def get_on_reboot(self):
3044 after_reboot = self.info.get('actions_after_reboot')
3045 if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
3046 return XEN_API_ON_NORMAL_EXIT[-1]
3047 return after_reboot
3049 def get_on_suspend(self):
3050 # TODO: not supported
3051 after_suspend = self.info.get('actions_after_suspend')
3052 if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
3053 return XEN_API_ON_NORMAL_EXIT[-1]
3054 return after_suspend
3056 def get_on_crash(self):
3057 after_crash = self.info.get('actions_after_crash')
3058 if not after_crash or after_crash not in \
3059 XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
3060 return XEN_API_ON_CRASH_BEHAVIOUR[0]
3061 return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
3063 def get_dev_config_by_uuid(self, dev_class, dev_uuid):
3064 """ Get's a device configuration either from XendConfig or
3065 from the DevController.
3067 @param dev_class: device class, either, 'vbd' or 'vif'
3068 @param dev_uuid: device UUID
3070 @rtype: dictionary
3071 """
3072 dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
3074 # shortcut if the domain isn't started because
3075 # the devcontrollers will have no better information
3076 # than XendConfig.
3077 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
3078 XEN_API_VM_POWER_STATE_SUSPENDED):
3079 if dev_config:
3080 return copy.deepcopy(dev_config)
3081 return None
3083 # instead of using dev_class, we use the dev_type
3084 # that is from XendConfig.
3085 controller = self.getDeviceController(dev_type)
3086 if not controller:
3087 return None
3089 all_configs = controller.getAllDeviceConfigurations()
3090 if not all_configs:
3091 return None
3093 updated_dev_config = copy.deepcopy(dev_config)
3094 for _devid, _devcfg in all_configs.items():
3095 if _devcfg.get('uuid') == dev_uuid:
3096 updated_dev_config.update(_devcfg)
3097 updated_dev_config['id'] = _devid
3098 return updated_dev_config
3100 return updated_dev_config
3102 def get_dev_xenapi_config(self, dev_class, dev_uuid):
3103 config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
3104 if not config:
3105 return {}
3107 config['VM'] = self.get_uuid()
3109 if dev_class == 'vif':
3110 if not config.has_key('name'):
3111 config['name'] = config.get('vifname', '')
3112 if not config.has_key('MAC'):
3113 config['MAC'] = config.get('mac', '')
3114 if not config.has_key('type'):
3115 config['type'] = 'paravirtualised'
3116 if not config.has_key('device'):
3117 devid = config.get('id')
3118 if devid != None:
3119 config['device'] = 'eth%d' % devid
3120 else:
3121 config['device'] = ''
3123 if not config.has_key('network'):
3124 try:
3125 bridge = config.get('bridge', None)
3126 if bridge is None:
3127 from xen.util import Brctl
3128 if_to_br = dict([(i,b)
3129 for (b,ifs) in Brctl.get_state().items()
3130 for i in ifs])
3131 vifname = "vif%s.%s" % (self.getDomid(),
3132 config.get('id'))
3133 bridge = if_to_br.get(vifname, None)
3134 config['network'] = \
3135 XendNode.instance().bridge_to_network(
3136 config.get('bridge')).get_uuid()
3137 except Exception:
3138 log.exception('bridge_to_network')
3139 # Ignore this for now -- it may happen if the device
3140 # has been specified using the legacy methods, but at
3141 # some point we're going to have to figure out how to
3142 # handle that properly.
3144 config['MTU'] = 1500 # TODO
3146 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3147 xennode = XendNode.instance()
3148 rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
3149 config['io_read_kbs'] = rx_bps/1024
3150 config['io_write_kbs'] = tx_bps/1024
3151 rx, tx = xennode.get_vif_stat(self.domid, devid)
3152 config['io_total_read_kbs'] = rx/1024
3153 config['io_total_write_kbs'] = tx/1024
3154 else:
3155 config['io_read_kbs'] = 0.0
3156 config['io_write_kbs'] = 0.0
3157 config['io_total_read_kbs'] = 0.0
3158 config['io_total_write_kbs'] = 0.0
3160 config['security_label'] = config.get('security_label', '')
3162 if dev_class == 'vbd':
3164 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3165 controller = self.getDeviceController(dev_class)
3166 devid, _1, _2 = controller.getDeviceDetails(config)
3167 xennode = XendNode.instance()
3168 rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
3169 config['io_read_kbs'] = rd_blkps
3170 config['io_write_kbs'] = wr_blkps
3171 else:
3172 config['io_read_kbs'] = 0.0
3173 config['io_write_kbs'] = 0.0
3175 config['VDI'] = config.get('VDI', '')
3176 config['device'] = config.get('dev', '')
3177 if ':' in config['device']:
3178 vbd_name, vbd_type = config['device'].split(':', 1)
3179 config['device'] = vbd_name
3180 if vbd_type == 'cdrom':
3181 config['type'] = XEN_API_VBD_TYPE[0]
3182 else:
3183 config['type'] = XEN_API_VBD_TYPE[1]
3185 config['driver'] = 'paravirtualised' # TODO
3186 config['image'] = config.get('uname', '')
3188 if config.get('mode', 'r') == 'r':
3189 config['mode'] = 'RO'
3190 else:
3191 config['mode'] = 'RW'
3193 if dev_class == 'vtpm':
3194 if not config.has_key('type'):
3195 config['type'] = 'paravirtualised' # TODO
3196 if not config.has_key('backend'):
3197 config['backend'] = "00000000-0000-0000-0000-000000000000"
3199 return config
3201 def get_dev_property(self, dev_class, dev_uuid, field):
3202 config = self.get_dev_xenapi_config(dev_class, dev_uuid)
3203 try:
3204 return config[field]
3205 except KeyError:
3206 raise XendError('Invalid property for device: %s' % field)
3208 def set_dev_property(self, dev_class, dev_uuid, field, value):
3209 self.info['devices'][dev_uuid][1][field] = value
3211 def get_vcpus_util(self):
3212 vcpu_util = {}
3213 xennode = XendNode.instance()
3214 if 'VCPUs_max' in self.info and self.domid != None:
3215 for i in range(0, self.info['VCPUs_max']):
3216 util = xennode.get_vcpu_util(self.domid, i)
3217 vcpu_util[str(i)] = util
3219 return vcpu_util
3221 def get_consoles(self):
3222 return self.info.get('console_refs', [])
3224 def get_vifs(self):
3225 return self.info.get('vif_refs', [])
3227 def get_vbds(self):
3228 return self.info.get('vbd_refs', [])
3230 def get_vtpms(self):
3231 return self.info.get('vtpm_refs', [])
3233 def get_dpcis(self):
3234 return XendDPCI.get_by_VM(self.info.get('uuid'))
3236 def create_vbd(self, xenapi_vbd, vdi_image_path):
3237 """Create a VBD using a VDI from XendStorageRepository.
3239 @param xenapi_vbd: vbd struct from the Xen API
3240 @param vdi_image_path: VDI UUID
3241 @rtype: string
3242 @return: uuid of the device
3243 """
3244 xenapi_vbd['image'] = vdi_image_path
3245 if vdi_image_path.startswith('tap'):
3246 dev_uuid = self.info.device_add('tap', cfg_xenapi = xenapi_vbd)
3247 else:
3248 dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
3250 if not dev_uuid:
3251 raise XendError('Failed to create device')
3253 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3254 XEN_API_VM_POWER_STATE_PAUSED):
3255 _, config = self.info['devices'][dev_uuid]
3257 if vdi_image_path.startswith('tap'):
3258 dev_control = self.getDeviceController('tap')
3259 else:
3260 dev_control = self.getDeviceController('vbd')
3262 try:
3263 devid = dev_control.createDevice(config)
3264 dev_control.waitForDevice(devid)
3265 self.info.device_update(dev_uuid,
3266 cfg_xenapi = {'devid': devid})
3267 except Exception, exn:
3268 log.exception(exn)
3269 del self.info['devices'][dev_uuid]
3270 self.info['vbd_refs'].remove(dev_uuid)
3271 raise
3273 return dev_uuid
3275 def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
3276 """Create a VBD using a VDI from XendStorageRepository.
3278 @param xenapi_vbd: vbd struct from the Xen API
3279 @param vdi_image_path: VDI UUID
3280 @rtype: string
3281 @return: uuid of the device
3282 """
3283 xenapi_vbd['image'] = vdi_image_path
3284 dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
3285 if not dev_uuid:
3286 raise XendError('Failed to create device')
3288 if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
3289 _, config = self.info['devices'][dev_uuid]
3290 config['devid'] = self.getDeviceController('tap').createDevice(config)
3292 return config['devid']
3294 def create_vif(self, xenapi_vif):
3295 """Create VIF device from the passed struct in Xen API format.
3297 @param xenapi_vif: Xen API VIF Struct.
3298 @rtype: string
3299 @return: UUID
3300 """
3301 dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
3302 if not dev_uuid:
3303 raise XendError('Failed to create device')
3305 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3306 XEN_API_VM_POWER_STATE_PAUSED):
3308 _, config = self.info['devices'][dev_uuid]
3309 dev_control = self.getDeviceController('vif')
3311 try:
3312 devid = dev_control.createDevice(config)
3313 dev_control.waitForDevice(devid)
3314 self.info.device_update(dev_uuid,
3315 cfg_xenapi = {'devid': devid})
3316 except Exception, exn:
3317 log.exception(exn)
3318 del self.info['devices'][dev_uuid]
3319 self.info['vif_refs'].remove(dev_uuid)
3320 raise
3322 return dev_uuid
3324 def create_vtpm(self, xenapi_vtpm):
3325 """Create a VTPM device from the passed struct in Xen API format.
3327 @return: uuid of the device
3328 @rtype: string
3329 """
3331 if self._stateGet() not in (DOM_STATE_HALTED,):
3332 raise VmError("Can only add vTPM to a halted domain.")
3333 if self.get_vtpms() != []:
3334 raise VmError('Domain already has a vTPM.')
3335 dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
3336 if not dev_uuid:
3337 raise XendError('Failed to create device')
3339 return dev_uuid
3341 def create_console(self, xenapi_console):
3342 """ Create a console device from a Xen API struct.
3344 @return: uuid of device
3345 @rtype: string
3346 """
3347 if self._stateGet() not in (DOM_STATE_HALTED,):
3348 raise VmError("Can only add console to a halted domain.")
3350 dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
3351 if not dev_uuid:
3352 raise XendError('Failed to create device')
3354 return dev_uuid
3356 def set_console_other_config(self, console_uuid, other_config):
3357 self.info.console_update(console_uuid, 'other_config', other_config)
3359 def create_dpci(self, xenapi_pci):
3360 """Create pci device from the passed struct in Xen API format.
3362 @param xenapi_pci: DPCI struct from Xen API
3363 @rtype: bool
3364 #@rtype: string
3365 @return: True if successfully created device
3366 #@return: UUID
3367 """
3369 dpci_uuid = uuid.createString()
3371 # Convert xenapi to sxp
3372 ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
3374 target_pci_sxp = \
3375 ['pci',
3376 ['dev',
3377 ['domain', '0x%02x' % ppci.get_domain()],
3378 ['bus', '0x%02x' % ppci.get_bus()],
3379 ['slot', '0x%02x' % ppci.get_slot()],
3380 ['func', '0x%1x' % ppci.get_func()],
3381 ['vslt', '0x%02x' % xenapi_pci.get('hotplug_slot')],
3382 ['uuid', dpci_uuid]
3383 ],
3384 ['state', 'Initialising']
3387 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3389 old_pci_sxp = self._getDeviceInfo_pci(0)
3391 if old_pci_sxp is None:
3392 dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
3393 if not dev_uuid:
3394 raise XendError('Failed to create device')
3396 else:
3397 new_pci_sxp = ['pci']
3398 for existing_dev in sxp.children(old_pci_sxp, 'dev'):
3399 new_pci_sxp.append(existing_dev)
3400 new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
3402 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3403 self.info.device_update(dev_uuid, new_pci_sxp)
3405 xen.xend.XendDomain.instance().managed_config_save(self)
3407 else:
3408 try:
3409 self.device_configure(target_pci_sxp)
3411 except Exception, exn:
3412 raise XendError('Failed to create device')
3414 return dpci_uuid
3417 def destroy_device_by_uuid(self, dev_type, dev_uuid):
3418 if dev_uuid not in self.info['devices']:
3419 raise XendError('Device does not exist')
3421 try:
3422 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3423 XEN_API_VM_POWER_STATE_PAUSED):
3424 _, config = self.info['devices'][dev_uuid]
3425 devid = config.get('devid')
3426 if devid != None:
3427 self.getDeviceController(dev_type).destroyDevice(devid, force = False)
3428 else:
3429 raise XendError('Unable to get devid for device: %s:%s' %
3430 (dev_type, dev_uuid))
3431 finally:
3432 del self.info['devices'][dev_uuid]
3433 self.info['%s_refs' % dev_type].remove(dev_uuid)
3435 def destroy_vbd(self, dev_uuid):
3436 self.destroy_device_by_uuid('vbd', dev_uuid)
3438 def destroy_vif(self, dev_uuid):
3439 self.destroy_device_by_uuid('vif', dev_uuid)
3441 def destroy_vtpm(self, dev_uuid):
3442 self.destroy_device_by_uuid('vtpm', dev_uuid)
3444 def destroy_dpci(self, dev_uuid):
3446 dpci = XendAPIStore.get(dev_uuid, 'DPCI')
3447 ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
3449 old_pci_sxp = self._getDeviceInfo_pci(0)
3450 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3451 target_dev = None
3452 new_pci_sxp = ['pci']
3453 for dev in sxp.children(old_pci_sxp, 'dev'):
3454 domain = int(sxp.child_value(dev, 'domain'), 16)
3455 bus = int(sxp.child_value(dev, 'bus'), 16)
3456 slot = int(sxp.child_value(dev, 'slot'), 16)
3457 func = int(sxp.child_value(dev, 'func'), 16)
3458 name = "%04x:%02x:%02x.%01x" % (domain, bus, slot, func)
3459 if ppci.get_name() == name:
3460 target_dev = dev
3461 else:
3462 new_pci_sxp.append(dev)
3464 if target_dev is None:
3465 raise XendError('Failed to destroy device')
3467 target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
3469 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3471 self.info.device_update(dev_uuid, new_pci_sxp)
3472 if len(sxp.children(new_pci_sxp, 'dev')) == 0:
3473 del self.info['devices'][dev_uuid]
3474 xen.xend.XendDomain.instance().managed_config_save(self)
3476 else:
3477 try:
3478 self.device_configure(target_pci_sxp)
3480 except Exception, exn:
3481 raise XendError('Failed to destroy device')
3483 def destroy_xapi_device_instances(self):
3484 """Destroy Xen-API device instances stored in XendAPIStore.
3485 """
3486 # Xen-API classes based on XendBase have their instances stored
3487 # in XendAPIStore. Cleanup these virtual device instances here
3488 # if they are supposed to be destroyed when the parent domain is dead.
3490 # Most of the virtual devices (vif, vbd, vfb, etc) are not based on
3491 # XendBase and there's no need to remove them from XendAPIStore.
3493 from xen.xend import XendDomain
3494 if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
3495 # domain still exists.
3496 return
3498 for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
3499 XendAPIStore.deregister(dpci_uuid, "DPCI")
3501 def has_device(self, dev_class, dev_uuid):
3502 return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
3504 def __str__(self):
3505 return '<domain id=%s name=%s memory=%s state=%s>' % \
3506 (str(self.domid), self.info['name_label'],
3507 str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])
3509 __repr__ = __str__