ia64/xen-unstable

view tools/python/xen/xend/XendDomainInfo.py @ 19647:1c627434605e

blktap2: a completely rewritten blktap implementation

Benefits to blktap2 over the old version of blktap:

* Isolation from xenstore - Blktap devices are now created directly on
the linux dom0 command line, rather than being spawned in response
to XenStore events. This is handy for debugging, makes blktap
generally easier to work with, and is a step toward a generic
user-level block device implementation that is not Xen-specific.

* Improved tapdisk infrastructure: simpler request forwarding, new
request scheduler, request merging, more efficient use of AIO.

* Improved tapdisk error handling and memory management. No
allocations on the block data path, IO retry logic to protect
guests
transient block device failures. This has been tested and is known
to work on weird environments such as NFS soft mounts.

* Pause and snapshot of live virtual disks (see xmsnap script).

* VHD support. The VHD code in this release has been rigorously
tested, and represents a very mature implementation of the VHD
image
format.

* No more duplication of mechanism with blkback. The blktap kernel
module has changed dramatically from the original blktap. Blkback
is now always used to talk to Xen guests, blktap just presents a
Linux gendisk that blkback can export. This is done while
preserving the zero-copy data path from domU to physical device.

These patches deprecate the old blktap code, which can hopefully be
removed from the tree completely at some point in the future.

Signed-off-by: Jake Wires <jake.wires@citrix.com>
Signed-off-by: Dutch Meyer <dmeyer@cs.ubc.ca>
author Keir Fraser <keir.fraser@citrix.com>
date Tue May 26 11:52:31 2009 +0100 (2009-05-26)
parents dc7de36c94e3
children e95c4611a0ae
line source
1 #===========================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
19 """Representation of a single domain.
20 Includes support for domain construction, using
21 open-ended configurations.
23 Author: Mike Wray <mike.wray@hp.com>
25 """
27 import logging
28 import time
29 import threading
30 import thread
31 import re
32 import copy
33 import os
34 import traceback
35 from types import StringTypes
37 import xen.lowlevel.xc
38 from xen.util import asserts, auxbin
39 from xen.util.blkif import blkdev_uname_to_file, blkdev_uname_to_taptype
40 import xen.util.xsm.xsm as security
41 from xen.util import xsconstants
42 from xen.util.pci import assigned_or_requested_vslot
44 from xen.xend import balloon, sxp, uuid, image, arch
45 from xen.xend import XendOptions, XendNode, XendConfig
47 from xen.xend.XendConfig import scrub_password
48 from xen.xend.XendBootloader import bootloader, bootloader_tidy
49 from xen.xend.XendError import XendError, VmError
50 from xen.xend.XendDevices import XendDevices
51 from xen.xend.XendTask import XendTask
52 from xen.xend.xenstore.xstransact import xstransact, complete
53 from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
54 from xen.xend.xenstore.xswatch import xswatch
55 from xen.xend.XendConstants import *
56 from xen.xend.XendAPIConstants import *
57 from xen.xend.server.DevConstants import xenbusState
59 from xen.xend.XendVMMetrics import XendVMMetrics
61 from xen.xend import XendAPIStore
62 from xen.xend.XendPPCI import XendPPCI
63 from xen.xend.XendDPCI import XendDPCI
64 from xen.xend.XendPSCSI import XendPSCSI
65 from xen.xend.XendDSCSI import XendDSCSI
67 MIGRATE_TIMEOUT = 30.0
68 BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
70 xc = xen.lowlevel.xc.xc()
71 xoptions = XendOptions.instance()
73 log = logging.getLogger("xend.XendDomainInfo")
74 #log.setLevel(logging.TRACE)
77 def create(config):
78 """Creates and start a VM using the supplied configuration.
80 @param config: A configuration object involving lists of tuples.
81 @type config: list of lists, eg ['vm', ['image', 'xen.gz']]
83 @rtype: XendDomainInfo
84 @return: An up and running XendDomainInfo instance
85 @raise VmError: Invalid configuration or failure to start.
86 """
87 from xen.xend import XendDomain
88 domconfig = XendConfig.XendConfig(sxp_obj = config)
89 othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
90 if othervm is None or othervm.domid is None:
91 othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
92 if othervm is not None and othervm.domid is not None:
93 raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
94 log.debug("XendDomainInfo.create(%s)", scrub_password(config))
95 vm = XendDomainInfo(domconfig)
96 try:
97 vm.start()
98 except:
99 log.exception('Domain construction failed')
100 vm.destroy()
101 raise
103 return vm
105 def create_from_dict(config_dict):
106 """Creates and start a VM using the supplied configuration.
108 @param config_dict: An configuration dictionary.
110 @rtype: XendDomainInfo
111 @return: An up and running XendDomainInfo instance
112 @raise VmError: Invalid configuration or failure to start.
113 """
115 log.debug("XendDomainInfo.create_from_dict(%s)",
116 scrub_password(config_dict))
117 vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
118 try:
119 vm.start()
120 except:
121 log.exception('Domain construction failed')
122 vm.destroy()
123 raise
124 return vm
126 def recreate(info, priv):
127 """Create the VM object for an existing domain. The domain must not
128 be dying, as the paths in the store should already have been removed,
129 and asking us to recreate them causes problems.
131 @param xeninfo: Parsed configuration
132 @type xeninfo: Dictionary
133 @param priv: Is a privileged domain (Dom 0)
134 @type priv: bool
136 @rtype: XendDomainInfo
137 @return: A up and running XendDomainInfo instance
138 @raise VmError: Invalid configuration.
139 @raise XendError: Errors with configuration.
140 """
142 log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
144 assert not info['dying']
146 xeninfo = XendConfig.XendConfig(dominfo = info)
147 xeninfo['is_control_domain'] = priv
148 xeninfo['is_a_template'] = False
149 xeninfo['auto_power_on'] = False
150 domid = xeninfo['domid']
151 uuid1 = uuid.fromString(xeninfo['uuid'])
152 needs_reinitialising = False
154 dompath = GetDomainPath(domid)
155 if not dompath:
156 raise XendError('No domain path in store for existing '
157 'domain %d' % domid)
159 log.info("Recreating domain %d, UUID %s. at %s" %
160 (domid, xeninfo['uuid'], dompath))
162 # need to verify the path and uuid if not Domain-0
163 # if the required uuid and vm aren't set, then that means
164 # we need to recreate the dom with our own values
165 #
166 # NOTE: this is probably not desirable, really we should just
167 # abort or ignore, but there may be cases where xenstore's
168 # entry disappears (eg. xenstore-rm /)
169 #
170 try:
171 vmpath = xstransact.Read(dompath, "vm")
172 if not vmpath:
173 if not priv:
174 log.warn('/local/domain/%d/vm is missing. recreate is '
175 'confused, trying our best to recover' % domid)
176 needs_reinitialising = True
177 raise XendError('reinit')
179 uuid2_str = xstransact.Read(vmpath, "uuid")
180 if not uuid2_str:
181 log.warn('%s/uuid/ is missing. recreate is confused, '
182 'trying our best to recover' % vmpath)
183 needs_reinitialising = True
184 raise XendError('reinit')
186 uuid2 = uuid.fromString(uuid2_str)
187 if uuid1 != uuid2:
188 log.warn('UUID in /vm does not match the UUID in /dom/%d.'
189 'Trying out best to recover' % domid)
190 needs_reinitialising = True
191 except XendError:
192 pass # our best shot at 'goto' in python :)
194 vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
195 vmpath = vmpath)
197 if needs_reinitialising:
198 vm._recreateDom()
199 vm._removeVm()
200 vm._storeVmDetails()
201 vm._storeDomDetails()
203 vm.image = image.create(vm, vm.info)
204 vm.image.recreate()
206 vm._registerWatches()
207 vm.refreshShutdown(xeninfo)
209 # register the domain in the list
210 from xen.xend import XendDomain
211 XendDomain.instance().add_domain(vm)
213 return vm
216 def restore(config):
217 """Create a domain and a VM object to do a restore.
219 @param config: Domain SXP configuration
220 @type config: list of lists. (see C{create})
222 @rtype: XendDomainInfo
223 @return: A up and running XendDomainInfo instance
224 @raise VmError: Invalid configuration or failure to start.
225 @raise XendError: Errors with configuration.
226 """
228 log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
229 vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
230 resume = True)
231 try:
232 vm.resume()
233 return vm
234 except:
235 vm.destroy()
236 raise
238 def createDormant(domconfig):
239 """Create a dormant/inactive XenDomainInfo without creating VM.
240 This is for creating instances of persistent domains that are not
241 yet start.
243 @param domconfig: Parsed configuration
244 @type domconfig: XendConfig object
246 @rtype: XendDomainInfo
247 @return: A up and running XendDomainInfo instance
248 @raise XendError: Errors with configuration.
249 """
251 log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
253 # domid does not make sense for non-running domains.
254 domconfig.pop('domid', None)
255 vm = XendDomainInfo(domconfig)
256 return vm
258 def domain_by_name(name):
259 """Get domain by name
261 @params name: Name of the domain
262 @type name: string
263 @return: XendDomainInfo or None
264 """
265 from xen.xend import XendDomain
266 return XendDomain.instance().domain_lookup_by_name_nr(name)
269 def shutdown_reason(code):
270 """Get a shutdown reason from a code.
272 @param code: shutdown code
273 @type code: int
274 @return: shutdown reason
275 @rtype: string
276 """
277 return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
279 def dom_get(dom):
280 """Get info from xen for an existing domain.
282 @param dom: domain id
283 @type dom: int
284 @return: info or None
285 @rtype: dictionary
286 """
287 try:
288 domlist = xc.domain_getinfo(dom, 1)
289 if domlist and dom == domlist[0]['domid']:
290 return domlist[0]
291 except Exception, err:
292 # ignore missing domain
293 log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
294 return None
296 def get_assigned_pci_devices(domid):
297 dev_str_list = []
298 path = '/local/domain/0/backend/pci/%u/0/' % domid
299 num_devs = xstransact.Read(path + 'num_devs');
300 if num_devs is None or num_devs == "":
301 return dev_str_list
302 num_devs = int(num_devs);
303 for i in range(num_devs):
304 dev_str = xstransact.Read(path + 'dev-%i' % i)
305 dev_str_list = dev_str_list + [dev_str]
306 return dev_str_list
308 def do_FLR(domid):
309 from xen.xend.server.pciif import parse_pci_name, PciDevice
310 dev_str_list = get_assigned_pci_devices(domid)
312 for dev_str in dev_str_list:
313 (dom, b, d, f) = parse_pci_name(dev_str)
314 try:
315 dev = PciDevice(dom, b, d, f)
316 except Exception, e:
317 raise VmError("pci: failed to locate device and "+
318 "parse it's resources - "+str(e))
319 dev.do_FLR()
321 class XendDomainInfo:
322 """An object represents a domain.
324 @TODO: try to unify dom and domid, they mean the same thing, but
325 xc refers to it as dom, and everywhere else, including
326 xenstore it is domid. The best way is to change xc's
327 python interface.
329 @ivar info: Parsed configuration
330 @type info: dictionary
331 @ivar domid: Domain ID (if VM has started)
332 @type domid: int or None
333 @ivar vmpath: XenStore path to this VM.
334 @type vmpath: string
335 @ivar dompath: XenStore path to this Domain.
336 @type dompath: string
337 @ivar image: Reference to the VM Image.
338 @type image: xen.xend.image.ImageHandler
339 @ivar store_port: event channel to xenstored
340 @type store_port: int
341 @ivar console_port: event channel to xenconsoled
342 @type console_port: int
343 @ivar store_mfn: xenstored mfn
344 @type store_mfn: int
345 @ivar console_mfn: xenconsoled mfn
346 @type console_mfn: int
347 @ivar notes: OS image notes
348 @type notes: dictionary
349 @ivar vmWatch: reference to a watch on the xenstored vmpath
350 @type vmWatch: xen.xend.xenstore.xswatch
351 @ivar shutdownWatch: reference to watch on the xenstored domain shutdown
352 @type shutdownWatch: xen.xend.xenstore.xswatch
353 @ivar shutdownStartTime: UNIX Time when domain started shutting down.
354 @type shutdownStartTime: float or None
355 @ivar restart_in_progress: Is a domain restart thread running?
356 @type restart_in_progress: bool
357 # @ivar state: Domain state
358 # @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
359 @ivar state_updated: lock for self.state
360 @type state_updated: threading.Condition
361 @ivar refresh_shutdown_lock: lock for polling shutdown state
362 @type refresh_shutdown_lock: threading.Condition
363 @ivar _deviceControllers: device controller cache for this domain
364 @type _deviceControllers: dict 'string' to DevControllers
365 """
367 def __init__(self, info, domid = None, dompath = None, augment = False,
368 priv = False, resume = False, vmpath = None):
369 """Constructor for a domain
371 @param info: parsed configuration
372 @type info: dictionary
373 @keyword domid: Set initial domain id (if any)
374 @type domid: int
375 @keyword dompath: Set initial dompath (if any)
376 @type dompath: string
377 @keyword augment: Augment given info with xenstored VM info
378 @type augment: bool
379 @keyword priv: Is a privileged domain (Dom 0)
380 @type priv: bool
381 @keyword resume: Is this domain being resumed?
382 @type resume: bool
383 """
385 self.info = info
386 if domid == None:
387 self.domid = self.info.get('domid')
388 else:
389 self.domid = domid
391 #REMOVE: uuid is now generated in XendConfig
392 #if not self._infoIsSet('uuid'):
393 # self.info['uuid'] = uuid.toString(uuid.create())
395 # Find a unique /vm/<uuid>/<integer> path if not specified.
396 # This avoids conflict between pre-/post-migrate domains when doing
397 # localhost relocation.
398 self.vmpath = vmpath
399 i = 0
400 while self.vmpath == None:
401 self.vmpath = XS_VMROOT + self.info['uuid']
402 if i != 0:
403 self.vmpath = self.vmpath + '-' + str(i)
404 try:
405 if self._readVm("uuid"):
406 self.vmpath = None
407 i = i + 1
408 except:
409 pass
411 self.dompath = dompath
413 self.image = None
414 self.store_port = None
415 self.store_mfn = None
416 self.console_port = None
417 self.console_mfn = None
419 self.native_protocol = None
421 self.vmWatch = None
422 self.shutdownWatch = None
423 self.shutdownStartTime = None
424 self._resume = resume
425 self.restart_in_progress = False
427 self.state_updated = threading.Condition()
428 self.refresh_shutdown_lock = threading.Condition()
429 self._stateSet(DOM_STATE_HALTED)
431 self._deviceControllers = {}
433 for state in DOM_STATES_OLD:
434 self.info[state] = 0
436 if augment:
437 self._augmentInfo(priv)
439 self._checkName(self.info['name_label'])
441 self.metrics = XendVMMetrics(uuid.createString(), self)
444 #
445 # Public functions available through XMLRPC
446 #
449 def start(self, is_managed = False):
450 """Attempts to start the VM by do the appropriate
451 initialisation if it not started.
452 """
453 from xen.xend import XendDomain
455 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
456 try:
457 XendTask.log_progress(0, 30, self._constructDomain)
458 XendTask.log_progress(31, 60, self._initDomain)
460 XendTask.log_progress(61, 70, self._storeVmDetails)
461 XendTask.log_progress(71, 80, self._storeDomDetails)
462 XendTask.log_progress(81, 90, self._registerWatches)
463 XendTask.log_progress(91, 100, self.refreshShutdown)
465 xendomains = XendDomain.instance()
466 xennode = XendNode.instance()
468 # save running configuration if XendDomains believe domain is
469 # persistent
470 if is_managed:
471 xendomains.managed_config_save(self)
473 if xennode.xenschedinfo() == 'credit':
474 xendomains.domain_sched_credit_set(self.getDomid(),
475 self.getWeight(),
476 self.getCap())
477 except:
478 log.exception('VM start failed')
479 self.destroy()
480 raise
481 else:
482 raise XendError('VM already running')
484 def resume(self):
485 """Resumes a domain that has come back from suspension."""
486 state = self._stateGet()
487 if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
488 try:
489 self._constructDomain()
491 try:
492 self._setCPUAffinity()
493 except:
494 # usually a CPU we want to set affinity to does not exist
495 # we just ignore it so that the domain can still be restored
496 log.warn("Cannot restore CPU affinity")
498 self._storeVmDetails()
499 self._createChannels()
500 self._createDevices()
501 self._storeDomDetails()
502 self._endRestore()
503 except:
504 log.exception('VM resume failed')
505 self.destroy()
506 raise
507 else:
508 raise XendError('VM is not suspended; it is %s'
509 % XEN_API_VM_POWER_STATE[state])
511 def shutdown(self, reason):
512 """Shutdown a domain by signalling this via xenstored."""
513 log.debug('XendDomainInfo.shutdown(%s)', reason)
514 if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
515 raise XendError('Domain cannot be shutdown')
517 if self.domid == 0:
518 raise XendError('Domain 0 cannot be shutdown')
520 if reason not in DOMAIN_SHUTDOWN_REASONS.values():
521 raise XendError('Invalid reason: %s' % reason)
522 self.storeDom("control/shutdown", reason)
524 # HVM domain shuts itself down only if it has PV drivers
525 if self.info.is_hvm():
526 hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
527 hvm_s_state = xc.hvm_get_param(self.domid, HVM_PARAM_ACPI_S_STATE)
528 if not hvm_pvdrv or hvm_s_state != 0:
529 code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
530 log.info("HVM save:remote shutdown dom %d!", self.domid)
531 xc.domain_shutdown(self.domid, code)
533 def pause(self):
534 """Pause domain
536 @raise XendError: Failed pausing a domain
537 """
538 try:
539 bepath="/local/domain/0/backend/"
540 if(self.domid):
542 dev = xstransact.List(bepath + 'vbd' + "/%d" % (self.domid,))
543 for x in dev:
544 path = self.getDeviceController('vbd').readBackend(x, 'params')
545 if path and path.startswith('/dev/xen/blktap-2'):
546 #Figure out the sysfs path.
547 pattern = re.compile('/dev/xen/blktap-2/tapdev(\d+)$')
548 ctrlid = pattern.search(path)
549 ctrl = '/sys/class/blktap2/blktap' + ctrlid.group(1)
550 #pause the disk
551 f = open(ctrl + '/pause', 'w')
552 f.write('pause');
553 f.close()
554 except Exception, ex:
555 log.warn('Could not pause blktap disk.');
557 try:
558 xc.domain_pause(self.domid)
559 self._stateSet(DOM_STATE_PAUSED)
560 except Exception, ex:
561 log.exception(ex)
562 raise XendError("Domain unable to be paused: %s" % str(ex))
564 def unpause(self):
565 """Unpause domain
567 @raise XendError: Failed unpausing a domain
568 """
569 try:
570 bepath="/local/domain/0/backend/"
571 if(self.domid):
572 dev = xstransact.List(bepath + "vbd" + "/%d" % (self.domid,))
573 for x in dev:
574 path = self.getDeviceController('vbd').readBackend(x, 'params')
575 if path and path.startswith('/dev/xen/blktap-2'):
576 #Figure out the sysfs path.
577 pattern = re.compile('/dev/xen/blktap-2/tapdev(\d+)$')
578 ctrlid = pattern.search(path)
579 ctrl = '/sys/class/blktap2/blktap' + ctrlid.group(1)
580 #unpause the disk
581 if(os.path.exists(ctrl + '/resume')):
582 f = open(ctrl + '/resume', 'w');
583 f.write('resume');
584 f.close();
586 except Exception, ex:
587 log.warn('Could not unpause blktap disk: %s' % str(ex));
589 try:
590 xc.domain_unpause(self.domid)
591 self._stateSet(DOM_STATE_RUNNING)
592 except Exception, ex:
593 log.exception(ex)
594 raise XendError("Domain unable to be unpaused: %s" % str(ex))
596 def send_sysrq(self, key):
597 """ Send a Sysrq equivalent key via xenstored."""
598 if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
599 raise XendError("Domain '%s' is not started" % self.info['name_label'])
601 asserts.isCharConvertible(key)
602 self.storeDom("control/sysrq", '%c' % key)
604 def sync_pcidev_info(self):
606 if not self.info.is_hvm():
607 return
609 devid = '0'
610 dev_info = self._getDeviceInfo_pci(devid)
611 if dev_info is None:
612 return
614 # get the virtual slot info from xenstore
615 dev_uuid = sxp.child_value(dev_info, 'uuid')
616 pci_conf = self.info['devices'][dev_uuid][1]
617 pci_devs = pci_conf['devs']
619 count = 0
620 vslots = None
621 while vslots is None and count < 20:
622 vslots = xstransact.Read("/local/domain/0/backend/pci/%u/%s/vslots"
623 % (self.getDomid(), devid))
624 time.sleep(0.1)
625 count += 1
626 if vslots is None:
627 log.error("Device model didn't tell the vslots for PCI device")
628 return
630 #delete last delim
631 if vslots[-1] == ";":
632 vslots = vslots[:-1]
634 slot_list = vslots.split(';')
635 if len(slot_list) != len(pci_devs):
636 log.error("Device model's pci dev num dismatch")
637 return
639 #update the vslot info
640 count = 0;
641 for x in pci_devs:
642 x['vslot'] = slot_list[count]
643 count += 1
646 def hvm_pci_device_create(self, dev_config):
647 log.debug("XendDomainInfo.hvm_pci_device_create: %s"
648 % scrub_password(dev_config))
650 if not self.info.is_hvm():
651 raise VmError("hvm_pci_device_create called on non-HVM guest")
653 #all the PCI devs share one conf node
654 devid = '0'
656 new_dev = dev_config['devs'][0]
657 dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
659 #check conflict before trigger hotplug event
660 if dev_info is not None:
661 dev_uuid = sxp.child_value(dev_info, 'uuid')
662 pci_conf = self.info['devices'][dev_uuid][1]
663 pci_devs = pci_conf['devs']
664 for x in pci_devs:
665 x_vslot = assigned_or_requested_vslot(x)
666 if (int(x_vslot, 16) == int(new_dev['requested_vslot'], 16) and
667 int(x_vslot, 16) != AUTO_PHP_SLOT):
668 raise VmError("vslot %s already have a device." % (new_dev['requested_vslot']))
670 if (int(x['domain'], 16) == int(new_dev['domain'], 16) and
671 int(x['bus'], 16) == int(new_dev['bus'], 16) and
672 int(x['slot'], 16) == int(new_dev['slot'], 16) and
673 int(x['func'], 16) == int(new_dev['func'], 16) ):
674 raise VmError("device is already inserted")
676 # Test whether the devices can be assigned with VT-d
677 pci_str = "%s, %s, %s, %s" % (new_dev['domain'],
678 new_dev['bus'],
679 new_dev['slot'],
680 new_dev['func'])
681 bdf = xc.test_assign_device(0, pci_str)
682 if bdf != 0:
683 if bdf == -1:
684 raise VmError("failed to assign device: maybe the platform"
685 " doesn't support VT-d, or VT-d isn't enabled"
686 " properly?")
687 bus = (bdf >> 16) & 0xff
688 devfn = (bdf >> 8) & 0xff
689 dev = (devfn >> 3) & 0x1f
690 func = devfn & 0x7
691 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
692 " already been assigned to other domain, or maybe"
693 " it doesn't exist." % (bus, dev, func))
695 # Here, we duplicate some checkings (in some cases, we mustn't allow
696 # a device to be hot-plugged into an HVM guest) that are also done in
697 # pci_device_configure()'s self.device_create(dev_sxp) or
698 # dev_control.reconfigureDevice(devid, dev_config).
699 # We must make the checkings before sending the command 'pci-ins' to
700 # ioemu.
702 # Test whether the device is owned by pciback. For instance, we can't
703 # hotplug a device being used by Dom0 itself to an HVM guest.
704 from xen.xend.server.pciif import PciDevice, parse_pci_name
705 domain = int(new_dev['domain'],16)
706 bus = int(new_dev['bus'],16)
707 dev = int(new_dev['slot'],16)
708 func = int(new_dev['func'],16)
709 try:
710 pci_device = PciDevice(domain, bus, dev, func)
711 except Exception, e:
712 raise VmError("pci: failed to locate device and "+
713 "parse it's resources - "+str(e))
714 if pci_device.driver!='pciback':
715 raise VmError(("pci: PCI Backend does not own device "+ \
716 "%s\n"+ \
717 "See the pciback.hide kernel "+ \
718 "command-line parameter or\n"+ \
719 "bind your slot/device to the PCI backend using sysfs" \
720 )%(pci_device.name))
722 # Check non-page-aligned MMIO BAR.
723 if pci_device.has_non_page_aligned_bar and arch.type != "ia64":
724 raise VmError("pci: %s: non-page-aligned MMIO BAR found." % \
725 pci_device.name)
727 # Check the co-assignment.
728 # To pci-attach a device D to domN, we should ensure each of D's
729 # co-assignment devices hasn't been assigned, or has been assigned to
730 # domN.
731 coassignment_list = pci_device.find_coassigned_devices()
732 assigned_pci_device_str_list = self._get_assigned_pci_devices()
733 for pci_str in coassignment_list:
734 (domain, bus, dev, func) = parse_pci_name(pci_str)
735 dev_str = '0x%x,0x%x,0x%x,0x%x' % (domain, bus, dev, func)
736 if xc.test_assign_device(0, dev_str) == 0:
737 continue
738 if not pci_str in assigned_pci_device_str_list:
739 raise VmError(("pci: failed to pci-attach %s to domain %s" + \
740 " because one of its co-assignment device %s has been" + \
741 " assigned to other domain." \
742 )% (pci_device.name, self.info['name_label'], pci_str))
744 if self.domid is not None:
745 opts = ''
746 if 'opts' in new_dev and len(new_dev['opts']) > 0:
747 config_opts = new_dev['opts']
748 config_opts = map(lambda (x, y): x+'='+y, config_opts)
749 opts = ',' + reduce(lambda x, y: x+','+y, config_opts)
751 bdf_str = "%s:%s:%s.%s@%s%s" % (new_dev['domain'],
752 new_dev['bus'],
753 new_dev['slot'],
754 new_dev['func'],
755 new_dev['requested_vslot'],
756 opts)
757 self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
759 vslot = xstransact.Read("/local/domain/0/device-model/%i/parameter"
760 % self.getDomid())
761 else:
762 vslot = new_dev['requested_vslot']
764 return vslot
767 def device_create(self, dev_config):
768 """Create a new device.
770 @param dev_config: device configuration
771 @type dev_config: SXP object (parsed config)
772 """
773 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
774 dev_type = sxp.name(dev_config)
775 dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
776 dev_config_dict = self.info['devices'][dev_uuid][1]
777 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
779 if dev_type == 'vif':
780 for x in dev_config:
781 if x != 'vif' and x[0] == 'mac':
782 if not re.match('^([0-9a-f]{2}:){5}[0-9a-f]{2}$', x[1], re.I):
783 log.error("Virtual network interface creation error - invalid MAC Address entered: %s", x[1])
784 raise VmError("Cannot create a new virtual network interface - MAC address is not valid!");
786 if self.domid is not None:
787 try:
788 dev_config_dict['devid'] = devid = \
789 self._createDevice(dev_type, dev_config_dict)
790 self._waitForDevice(dev_type, devid)
791 except VmError, ex:
792 del self.info['devices'][dev_uuid]
793 if dev_type == 'pci':
794 for dev in dev_config_dict['devs']:
795 XendAPIStore.deregister(dev['uuid'], 'DPCI')
796 elif dev_type == 'vscsi':
797 for dev in dev_config_dict['devs']:
798 XendAPIStore.deregister(dev['uuid'], 'DSCSI')
799 elif dev_type == 'tap':
800 self.info['vbd_refs'].remove(dev_uuid)
801 else:
802 self.info['%s_refs' % dev_type].remove(dev_uuid)
803 raise ex
804 else:
805 devid = None
807 xen.xend.XendDomain.instance().managed_config_save(self)
808 return self.getDeviceController(dev_type).sxpr(devid)
811 def pci_device_configure(self, dev_sxp, devid = 0):
812 """Configure an existing pci device.
814 @param dev_sxp: device configuration
815 @type dev_sxp: SXP object (parsed config)
816 @param devid: device id
817 @type devid: int
818 @return: Returns True if successfully updated device
819 @rtype: boolean
820 """
821 log.debug("XendDomainInfo.pci_device_configure: %s"
822 % scrub_password(dev_sxp))
824 dev_class = sxp.name(dev_sxp)
826 if dev_class != 'pci':
827 return False
829 pci_state = sxp.child_value(dev_sxp, 'state')
830 existing_dev_info = self._getDeviceInfo_pci(devid)
832 if existing_dev_info is None and pci_state != 'Initialising':
833 raise XendError("Cannot detach when pci platform does not exist")
835 pci_dev = sxp.children(dev_sxp, 'dev')[0]
836 dev_config = self.info.pci_convert_sxp_to_dict(dev_sxp)
837 dev = dev_config['devs'][0]
839 # Do HVM specific processing
840 if self.info.is_hvm():
841 if pci_state == 'Initialising':
842 # HVM PCI device attachment
843 vslot = self.hvm_pci_device_create(dev_config)
844 # Update vslot
845 dev['vslot'] = vslot
846 for n in sxp.children(pci_dev):
847 if(n[0] == 'vslot'):
848 n[1] = vslot
849 else:
850 # HVM PCI device detachment
851 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
852 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
853 existing_pci_devs = existing_pci_conf['devs']
854 vslot = AUTO_PHP_SLOT_STR
855 for x in existing_pci_devs:
856 if ( int(x['domain'], 16) == int(dev['domain'], 16) and
857 int(x['bus'], 16) == int(dev['bus'], 16) and
858 int(x['slot'], 16) == int(dev['slot'], 16) and
859 int(x['func'], 16) == int(dev['func'], 16) ):
860 vslot = assigned_or_requested_vslot(x)
861 break
862 if vslot == AUTO_PHP_SLOT_STR:
863 raise VmError("Device %04x:%02x:%02x.%01x is not connected"
864 % (int(dev['domain'],16), int(dev['bus'],16),
865 int(dev['slot'],16), int(dev['func'],16)))
866 self.hvm_destroyPCIDevice(int(vslot, 16))
867 # Update vslot
868 dev['vslot'] = vslot
869 for n in sxp.children(pci_dev):
870 if(n[0] == 'vslot'):
871 n[1] = vslot
873 # If pci platform does not exist, create and exit.
874 if existing_dev_info is None:
875 self.device_create(dev_sxp)
876 return True
878 if self.domid is not None:
879 # use DevController.reconfigureDevice to change device config
880 dev_control = self.getDeviceController(dev_class)
881 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
882 if not self.info.is_hvm():
883 # in PV case, wait until backend state becomes connected.
884 dev_control.waitForDevice_reconfigure(devid)
885 num_devs = dev_control.cleanupDevice(devid)
887 # update XendConfig with new device info
888 if dev_uuid:
889 new_dev_sxp = dev_control.configuration(devid)
890 self.info.device_update(dev_uuid, new_dev_sxp)
892 # If there is no device left, destroy pci and remove config.
893 if num_devs == 0:
894 if self.info.is_hvm():
895 self.destroyDevice('pci', devid, True)
896 del self.info['devices'][dev_uuid]
897 platform = self.info['platform']
898 orig_dev_num = len(platform['pci'])
899 # TODO: can use this to keep some info to ask high level
900 # management tools to hot insert a new passthrough dev
901 # after migration
902 if orig_dev_num != 0:
903 #platform['pci'] = ["%dDEVs" % orig_dev_num]
904 platform['pci'] = []
905 else:
906 self.destroyDevice('pci', devid)
907 del self.info['devices'][dev_uuid]
908 else:
909 new_dev_sxp = ['pci']
910 for cur_dev in sxp.children(existing_dev_info, 'dev'):
911 if pci_state == 'Closing':
912 if int(dev['domain'], 16) == int(sxp.child_value(cur_dev, 'domain'), 16) and \
913 int(dev['bus'], 16) == int(sxp.child_value(cur_dev, 'bus'), 16) and \
914 int(dev['slot'], 16) == int(sxp.child_value(cur_dev, 'slot'), 16) and \
915 int(dev['func'], 16) == int(sxp.child_value(cur_dev, 'func'), 16):
916 continue
917 new_dev_sxp.append(cur_dev)
919 if pci_state == 'Initialising':
920 for new_dev in sxp.children(dev_sxp, 'dev'):
921 new_dev_sxp.append(new_dev)
923 dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
924 self.info.device_update(dev_uuid, new_dev_sxp)
926 # If there is only 'vscsi' in new_dev_sxp, remove the config.
927 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
928 del self.info['devices'][dev_uuid]
929 if self.info.is_hvm():
930 platform = self.info['platform']
931 orig_dev_num = len(platform['pci'])
932 # TODO: can use this to keep some info to ask high level
933 # management tools to hot insert a new passthrough dev
934 # after migration
935 if orig_dev_num != 0:
936 #platform['pci'] = ["%dDEVs" % orig_dev_num]
937 platform['pci'] = []
939 xen.xend.XendDomain.instance().managed_config_save(self)
941 return True
943 def vscsi_device_configure(self, dev_sxp):
944 """Configure an existing vscsi device.
945 quoted pci funciton
946 """
947 def _is_vscsi_defined(dev_info, p_devs = None, v_devs = None):
948 if not dev_info:
949 return False
950 for dev in sxp.children(dev_info, 'dev'):
951 if p_devs is not None:
952 if sxp.child_value(dev, 'p-dev') in p_devs:
953 return True
954 if v_devs is not None:
955 if sxp.child_value(dev, 'v-dev') in v_devs:
956 return True
957 return False
959 def _vscsi_be(be):
960 be_xdi = xen.xend.XendDomain.instance().domain_lookup_nr(be)
961 if be_xdi is not None:
962 be_domid = be_xdi.getDomid()
963 if be_domid is not None:
964 return str(be_domid)
965 return str(be)
967 dev_class = sxp.name(dev_sxp)
968 if dev_class != 'vscsi':
969 return False
971 dev_config = self.info.vscsi_convert_sxp_to_dict(dev_sxp)
972 devs = dev_config['devs']
973 v_devs = [d['v-dev'] for d in devs]
974 state = devs[0]['state']
975 req_devid = int(devs[0]['devid'])
976 cur_dev_sxp = self._getDeviceInfo_vscsi(req_devid)
978 if state == xenbusState['Initialising']:
979 # new create
980 # If request devid does not exist, create and exit.
981 p_devs = [d['p-dev'] for d in devs]
982 for dev_type, dev_info in self.info.all_devices_sxpr():
983 if dev_type != 'vscsi':
984 continue
985 if _is_vscsi_defined(dev_info, p_devs = p_devs):
986 raise XendError('The physical device "%s" is already defined' % \
987 p_devs[0])
988 if cur_dev_sxp is None:
989 self.device_create(dev_sxp)
990 return True
992 if _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
993 raise XendError('The virtual device "%s" is already defined' % \
994 v_devs[0])
996 if int(dev_config['feature-host']) != \
997 int(sxp.child_value(cur_dev_sxp, 'feature-host')):
998 raise XendError('The physical device "%s" cannot define '
999 'because mode is different' % devs[0]['p-dev'])
1001 new_be = dev_config.get('backend', None)
1002 if new_be is not None:
1003 cur_be = sxp.child_value(cur_dev_sxp, 'backend', None)
1004 if cur_be is None:
1005 cur_be = xen.xend.XendDomain.DOM0_ID
1006 new_be_dom = _vscsi_be(new_be)
1007 cur_be_dom = _vscsi_be(cur_be)
1008 if new_be_dom != cur_be_dom:
1009 raise XendError('The physical device "%s" cannot define '
1010 'because backend is different' % devs[0]['p-dev'])
1012 elif state == xenbusState['Closing']:
1013 if not _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
1014 raise XendError("Cannot detach vscsi device does not exist")
1016 if self.domid is not None:
1017 # use DevController.reconfigureDevice to change device config
1018 dev_control = self.getDeviceController(dev_class)
1019 dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
1020 dev_control.waitForDevice_reconfigure(req_devid)
1021 num_devs = dev_control.cleanupDevice(req_devid)
1023 # update XendConfig with new device info
1024 if dev_uuid:
1025 new_dev_sxp = dev_control.configuration(req_devid)
1026 self.info.device_update(dev_uuid, new_dev_sxp)
1028 # If there is no device left, destroy vscsi and remove config.
1029 if num_devs == 0:
1030 self.destroyDevice('vscsi', req_devid)
1031 del self.info['devices'][dev_uuid]
1033 else:
1034 new_dev_sxp = ['vscsi']
1035 cur_mode = sxp.children(cur_dev_sxp, 'feature-host')[0]
1036 new_dev_sxp.append(cur_mode)
1037 try:
1038 cur_be = sxp.children(cur_dev_sxp, 'backend')[0]
1039 new_dev_sxp.append(cur_be)
1040 except IndexError:
1041 pass
1043 for cur_dev in sxp.children(cur_dev_sxp, 'dev'):
1044 if state == xenbusState['Closing']:
1045 if int(cur_mode[1]) == 1:
1046 continue
1047 if sxp.child_value(cur_dev, 'v-dev') in v_devs:
1048 continue
1049 new_dev_sxp.append(cur_dev)
1051 if state == xenbusState['Initialising']:
1052 for new_dev in sxp.children(dev_sxp, 'dev'):
1053 new_dev_sxp.append(new_dev)
1055 dev_uuid = sxp.child_value(cur_dev_sxp, 'uuid')
1056 self.info.device_update(dev_uuid, new_dev_sxp)
1058 # If there is only 'vscsi' in new_dev_sxp, remove the config.
1059 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
1060 del self.info['devices'][dev_uuid]
1062 xen.xend.XendDomain.instance().managed_config_save(self)
1064 return True
1066 def device_configure(self, dev_sxp, devid = None):
1067 """Configure an existing device.
1069 @param dev_config: device configuration
1070 @type dev_config: SXP object (parsed config)
1071 @param devid: device id
1072 @type devid: int
1073 @return: Returns True if successfully updated device
1074 @rtype: boolean
1075 """
1077 # convert device sxp to a dict
1078 dev_class = sxp.name(dev_sxp)
1079 dev_config = {}
1081 if dev_class == 'pci':
1082 return self.pci_device_configure(dev_sxp)
1084 if dev_class == 'vscsi':
1085 return self.vscsi_device_configure(dev_sxp)
1087 for opt_val in dev_sxp[1:]:
1088 try:
1089 dev_config[opt_val[0]] = opt_val[1]
1090 except IndexError:
1091 pass
1093 dev_control = self.getDeviceController(dev_class)
1094 if devid is None:
1095 dev = dev_config.get('dev', '')
1096 if not dev:
1097 raise VmError('Block device must have virtual details specified')
1098 if 'ioemu:' in dev:
1099 (_, dev) = dev.split(':', 1)
1100 try:
1101 (dev, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1102 except ValueError:
1103 pass
1104 devid = dev_control.convertToDeviceNumber(dev)
1105 dev_info = self._getDeviceInfo_vbd(devid)
1106 if dev_info is None:
1107 raise VmError("Device %s not connected" % devid)
1108 dev_uuid = sxp.child_value(dev_info, 'uuid')
1110 if self.domid is not None:
1111 # use DevController.reconfigureDevice to change device config
1112 dev_control.reconfigureDevice(devid, dev_config)
1113 else:
1114 (_, new_b, new_f) = dev_control.getDeviceDetails(dev_config)
1115 if (new_f['device-type'] == 'cdrom' and
1116 sxp.child_value(dev_info, 'dev').endswith(':cdrom') and
1117 new_b['mode'] == 'r' and
1118 sxp.child_value(dev_info, 'mode') == 'r'):
1119 pass
1120 else:
1121 raise VmError('Refusing to reconfigure device %s:%d to %s' %
1122 (dev_class, devid, dev_config))
1124 # update XendConfig with new device info
1125 self.info.device_update(dev_uuid, dev_sxp)
1126 xen.xend.XendDomain.instance().managed_config_save(self)
1128 return True
1130 def waitForDevices(self):
1131 """Wait for this domain's configured devices to connect.
1133 @raise VmError: if any device fails to initialise.
1134 """
1135 for devclass in XendDevices.valid_devices():
1136 self.getDeviceController(devclass).waitForDevices()
1138 def hvm_destroyPCIDevice(self, vslot):
1139 log.debug("hvm_destroyPCIDevice called %s", vslot)
1141 if not self.info.is_hvm():
1142 raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
1144 #all the PCI devs share one conf node
1145 devid = '0'
1146 vslot = int(vslot)
1147 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1148 dev_uuid = sxp.child_value(dev_info, 'uuid')
1150 #delete the pci bdf config under the pci device
1151 pci_conf = self.info['devices'][dev_uuid][1]
1152 pci_len = len(pci_conf['devs'])
1154 #find the pass-through device with the virtual slot
1155 devnum = 0
1156 for x in pci_conf['devs']:
1157 x_vslot = assigned_or_requested_vslot(x)
1158 if int(x_vslot, 16) == vslot:
1159 break
1160 devnum += 1
1162 if devnum >= pci_len:
1163 raise VmError("Device @ vslot 0x%x doesn't exist." % (vslot))
1165 # Check the co-assignment.
1166 # To pci-detach a device D from domN, we should ensure: for each DD in the
1167 # list of D's co-assignment devices, DD is not assigned (to domN).
1169 from xen.xend.server.pciif import PciDevice
1170 domain = int(x['domain'],16)
1171 bus = int(x['bus'],16)
1172 dev = int(x['slot'],16)
1173 func = int(x['func'],16)
1174 try:
1175 pci_device = PciDevice(domain, bus, dev, func)
1176 except Exception, e:
1177 raise VmError("pci: failed to locate device and "+
1178 "parse it's resources - "+str(e))
1179 coassignment_list = pci_device.find_coassigned_devices()
1180 coassignment_list.remove(pci_device.name)
1181 assigned_pci_device_str_list = self._get_assigned_pci_devices()
1182 for pci_str in coassignment_list:
1183 if pci_str in assigned_pci_device_str_list:
1184 raise VmError(("pci: failed to pci-detach %s from domain %s" + \
1185 " because one of its co-assignment device %s is still " + \
1186 " assigned to the domain." \
1187 )% (pci_device.name, self.info['name_label'], pci_str))
1190 bdf_str = "%s:%s:%s.%s" % (x['domain'], x['bus'], x['slot'], x['func'])
1191 log.info("hvm_destroyPCIDevice:%s:%s!", x, bdf_str)
1193 if self.domid is not None:
1194 self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
1196 return 0
1198 def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
1199 log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
1200 deviceClass, devid)
1202 if rm_cfg:
1203 # Convert devid to device number. A device number is
1204 # needed to remove its configuration.
1205 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1207 # Save current sxprs. A device number and a backend
1208 # path are needed to remove its configuration but sxprs
1209 # do not have those after calling destroyDevice.
1210 sxprs = self.getDeviceSxprs(deviceClass)
1212 rc = None
1213 if self.domid is not None:
1215 #new blktap implementation may need a sysfs write after everything is torn down.
1216 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1217 path = self.getDeviceController(deviceClass).readBackend(dev, 'params')
1218 if path and path.startswith('/dev/xen/blktap-2'):
1219 frontpath = self.getDeviceController(deviceClass).frontendPath(dev)
1220 backpath = xstransact.Read(frontpath, "backend")
1221 thread.start_new_thread(self.getDeviceController(deviceClass).finishDeviceCleanup, (backpath, path))
1223 rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
1224 if not force and rm_cfg:
1225 # The backend path, other than the device itself,
1226 # has to be passed because its accompanied frontend
1227 # path may be void until its removal is actually
1228 # issued. It is probable because destroyDevice is
1229 # issued first.
1230 for dev_num, dev_info in sxprs:
1231 dev_num = int(dev_num)
1232 if dev_num == dev:
1233 for x in dev_info:
1234 if x[0] == 'backend':
1235 backend = x[1]
1236 break
1237 break
1238 self._waitForDevice_destroy(deviceClass, devid, backend)
1240 if rm_cfg:
1241 if deviceClass == 'vif':
1242 if self.domid is not None:
1243 for dev_num, dev_info in sxprs:
1244 dev_num = int(dev_num)
1245 if dev_num == dev:
1246 for x in dev_info:
1247 if x[0] == 'mac':
1248 mac = x[1]
1249 break
1250 break
1251 dev_info = self._getDeviceInfo_vif(mac)
1252 else:
1253 _, dev_info = sxprs[dev]
1254 else: # 'vbd' or 'tap'
1255 dev_info = self._getDeviceInfo_vbd(dev)
1256 # To remove the UUID of the device from refs,
1257 # deviceClass must be always 'vbd'.
1258 deviceClass = 'vbd'
1259 if dev_info is None:
1260 raise XendError("Device %s is not defined" % devid)
1262 dev_uuid = sxp.child_value(dev_info, 'uuid')
1263 del self.info['devices'][dev_uuid]
1264 self.info['%s_refs' % deviceClass].remove(dev_uuid)
1265 xen.xend.XendDomain.instance().managed_config_save(self)
1267 return rc
1269 def getDeviceSxprs(self, deviceClass):
1270 if deviceClass == 'pci':
1271 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1272 if dev_info is None:
1273 return []
1274 dev_uuid = sxp.child_value(dev_info, 'uuid')
1275 pci_devs = self.info['devices'][dev_uuid][1]['devs']
1276 return pci_devs
1277 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1278 return self.getDeviceController(deviceClass).sxprs()
1279 else:
1280 sxprs = []
1281 dev_num = 0
1282 for dev_type, dev_info in self.info.all_devices_sxpr():
1283 if (deviceClass == 'vbd' and dev_type not in ['vbd', 'tap']) or \
1284 (deviceClass != 'vbd' and dev_type != deviceClass):
1285 continue
1287 if deviceClass == 'vscsi':
1288 vscsi_devs = ['devs', []]
1289 for vscsi_dev in sxp.children(dev_info, 'dev'):
1290 vscsi_dev.append(['frontstate', None])
1291 vscsi_devs[1].append(vscsi_dev)
1292 dev_num = int(sxp.child_value(vscsi_dev, 'devid'))
1293 vscsi_mode = sxp.children(dev_info, 'feature-host')[0]
1294 sxprs.append([dev_num, [vscsi_devs, vscsi_mode]])
1295 elif deviceClass == 'vbd':
1296 dev = sxp.child_value(dev_info, 'dev')
1297 if 'ioemu:' in dev:
1298 (_, dev) = dev.split(':', 1)
1299 try:
1300 (dev_name, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1301 except ValueError:
1302 dev_name = dev
1303 dev_num = self.getDeviceController('vbd').convertToDeviceNumber(dev_name)
1304 sxprs.append([dev_num, dev_info])
1305 else:
1306 sxprs.append([dev_num, dev_info])
1307 dev_num += 1
1308 return sxprs
1310 def getBlockDeviceClass(self, devid):
1311 # To get a device number from the devid,
1312 # we temporarily use the device controller of VBD.
1313 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1314 dev_info = self._getDeviceInfo_vbd(dev)
1315 if dev_info:
1316 return dev_info[0]
1318 def _getDeviceInfo_vif(self, mac):
1319 for dev_type, dev_info in self.info.all_devices_sxpr():
1320 if dev_type != 'vif':
1321 continue
1322 if mac == sxp.child_value(dev_info, 'mac'):
1323 return dev_info
1325 def _getDeviceInfo_vbd(self, devid):
1326 for dev_type, dev_info in self.info.all_devices_sxpr():
1327 if dev_type != 'vbd' and dev_type != 'tap':
1328 continue
1329 dev = sxp.child_value(dev_info, 'dev')
1330 dev = dev.split(':')[0]
1331 dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
1332 if devid == dev:
1333 return dev_info
1335 def _getDeviceInfo_pci(self, devid):
1336 for dev_type, dev_info in self.info.all_devices_sxpr():
1337 if dev_type != 'pci':
1338 continue
1339 return dev_info
1340 return None
1342 def _getDeviceInfo_vscsi(self, devid):
1343 devid = int(devid)
1344 for dev_type, dev_info in self.info.all_devices_sxpr():
1345 if dev_type != 'vscsi':
1346 continue
1347 devs = sxp.children(dev_info, 'dev')
1348 if devid == int(sxp.child_value(devs[0], 'devid')):
1349 return dev_info
1350 return None
1352 def _get_assigned_pci_devices(self, devid = 0):
1353 if self.domid is not None:
1354 return get_assigned_pci_devices(self.domid)
1356 dev_str_list = []
1357 dev_info = self._getDeviceInfo_pci(devid)
1358 if dev_info is None:
1359 return dev_str_list
1360 dev_uuid = sxp.child_value(dev_info, 'uuid')
1361 pci_conf = self.info['devices'][dev_uuid][1]
1362 pci_devs = pci_conf['devs']
1363 for pci_dev in pci_devs:
1364 domain = int(pci_dev['domain'], 16)
1365 bus = int(pci_dev['bus'], 16)
1366 slot = int(pci_dev['slot'], 16)
1367 func = int(pci_dev['func'], 16)
1368 dev_str = "%04x:%02x:%02x.%01x" % (domain, bus, slot, func)
1369 dev_str_list = dev_str_list + [dev_str]
1370 return dev_str_list
1372 def setMemoryTarget(self, target):
1373 """Set the memory target of this domain.
1374 @param target: In MiB.
1375 """
1376 log.debug("Setting memory target of domain %s (%s) to %d MiB.",
1377 self.info['name_label'], str(self.domid), target)
1379 MiB = 1024 * 1024
1380 memory_cur = self.get_memory_dynamic_max() / MiB
1382 if self.domid == 0:
1383 dom0_min_mem = xoptions.get_dom0_min_mem()
1384 if target < memory_cur and dom0_min_mem > target:
1385 raise XendError("memory_dynamic_max too small")
1387 self._safe_set_memory('memory_dynamic_min', target * MiB)
1388 self._safe_set_memory('memory_dynamic_max', target * MiB)
1390 if self.domid >= 0:
1391 if target > memory_cur:
1392 balloon.free((target - memory_cur) * 1024, self)
1393 self.storeVm("memory", target)
1394 self.storeDom("memory/target", target << 10)
1395 xc.domain_set_target_mem(self.domid,
1396 (target * 1024))
1397 xen.xend.XendDomain.instance().managed_config_save(self)
1399 def setMemoryMaximum(self, limit):
1400 """Set the maximum memory limit of this domain
1401 @param limit: In MiB.
1402 """
1403 log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
1404 self.info['name_label'], str(self.domid), limit)
1406 maxmem_cur = self.get_memory_static_max()
1407 MiB = 1024 * 1024
1408 self._safe_set_memory('memory_static_max', limit * MiB)
1410 if self.domid >= 0:
1411 maxmem = int(limit) * 1024
1412 try:
1413 return xc.domain_setmaxmem(self.domid, maxmem)
1414 except Exception, ex:
1415 self._safe_set_memory('memory_static_max', maxmem_cur)
1416 raise XendError(str(ex))
1417 xen.xend.XendDomain.instance().managed_config_save(self)
1420 def getVCPUInfo(self):
1421 try:
1422 # We include the domain name and ID, to help xm.
1423 sxpr = ['domain',
1424 ['domid', self.domid],
1425 ['name', self.info['name_label']],
1426 ['vcpu_count', self.info['VCPUs_max']]]
1428 for i in range(0, self.info['VCPUs_max']):
1429 if self.domid is not None:
1430 info = xc.vcpu_getinfo(self.domid, i)
1432 sxpr.append(['vcpu',
1433 ['number', i],
1434 ['online', info['online']],
1435 ['blocked', info['blocked']],
1436 ['running', info['running']],
1437 ['cpu_time', info['cpu_time'] / 1e9],
1438 ['cpu', info['cpu']],
1439 ['cpumap', info['cpumap']]])
1440 else:
1441 sxpr.append(['vcpu',
1442 ['number', i],
1443 ['online', 0],
1444 ['blocked', 0],
1445 ['running', 0],
1446 ['cpu_time', 0.0],
1447 ['cpu', -1],
1448 ['cpumap', self.info['cpus'][i] and \
1449 self.info['cpus'][i] or range(64)]])
1451 return sxpr
1453 except RuntimeError, exn:
1454 raise XendError(str(exn))
1457 def getDomInfo(self):
1458 return dom_get(self.domid)
1461 # internal functions ... TODO: re-categorised
1464 def _augmentInfo(self, priv):
1465 """Augment self.info, as given to us through L{recreate}, with
1466 values taken from the store. This recovers those values known
1467 to xend but not to the hypervisor.
1468 """
1469 augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
1470 if priv:
1471 augment_entries.remove('memory')
1472 augment_entries.remove('maxmem')
1473 augment_entries.remove('vcpus')
1474 augment_entries.remove('vcpu_avail')
1476 vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
1477 for k in augment_entries])
1479 # make returned lists into a dictionary
1480 vm_config = dict(zip(augment_entries, vm_config))
1482 for arg in augment_entries:
1483 val = vm_config[arg]
1484 if val != None:
1485 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1486 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1487 self.info[xapiarg] = val
1488 elif arg == "memory":
1489 self.info["static_memory_min"] = val
1490 elif arg == "maxmem":
1491 self.info["static_memory_max"] = val
1492 else:
1493 self.info[arg] = val
1495 # read CPU Affinity
1496 self.info['cpus'] = []
1497 vcpus_info = self.getVCPUInfo()
1498 for vcpu_info in sxp.children(vcpus_info, 'vcpu'):
1499 self.info['cpus'].append(sxp.child_value(vcpu_info, 'cpumap'))
1501 # For dom0, we ignore any stored value for the vcpus fields, and
1502 # read the current value from Xen instead. This allows boot-time
1503 # settings to take precedence over any entries in the store.
1504 if priv:
1505 xeninfo = dom_get(self.domid)
1506 self.info['VCPUs_max'] = xeninfo['online_vcpus']
1507 self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
1509 # read image value
1510 image_sxp = self._readVm('image')
1511 if image_sxp:
1512 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1514 # read devices
1515 devices = []
1516 for devclass in XendDevices.valid_devices():
1517 devconfig = self.getDeviceController(devclass).configurations()
1518 if devconfig:
1519 devices.extend(devconfig)
1521 if not self.info['devices'] and devices is not None:
1522 for device in devices:
1523 self.info.device_add(device[0], cfg_sxp = device)
1525 self._update_consoles()
1527 def _update_consoles(self, transaction = None):
1528 if self.domid == None or self.domid == 0:
1529 return
1531 # Update VT100 port if it exists
1532 if transaction is None:
1533 self.console_port = self.readDom('console/port')
1534 else:
1535 self.console_port = self.readDomTxn(transaction, 'console/port')
1536 if self.console_port is not None:
1537 serial_consoles = self.info.console_get_all('vt100')
1538 if not serial_consoles:
1539 cfg = self.info.console_add('vt100', self.console_port)
1540 self._createDevice('console', cfg)
1541 else:
1542 console_uuid = serial_consoles[0].get('uuid')
1543 self.info.console_update(console_uuid, 'location',
1544 self.console_port)
1547 # Update VNC port if it exists and write to xenstore
1548 if transaction is None:
1549 vnc_port = self.readDom('console/vnc-port')
1550 else:
1551 vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
1552 if vnc_port is not None:
1553 for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
1554 if dev_type == 'vfb':
1555 old_location = dev_info.get('location')
1556 listen_host = dev_info.get('vnclisten', \
1557 XendOptions.instance().get_vnclisten_address())
1558 new_location = '%s:%s' % (listen_host, str(vnc_port))
1559 if old_location == new_location:
1560 break
1562 dev_info['location'] = new_location
1563 self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
1564 vfb_ctrl = self.getDeviceController('vfb')
1565 vfb_ctrl.reconfigureDevice(0, dev_info)
1566 break
1569 # Function to update xenstore /vm/*
1572 def _readVm(self, *args):
1573 return xstransact.Read(self.vmpath, *args)
1575 def _writeVm(self, *args):
1576 return xstransact.Write(self.vmpath, *args)
1578 def _removeVm(self, *args):
1579 return xstransact.Remove(self.vmpath, *args)
1581 def _gatherVm(self, *args):
1582 return xstransact.Gather(self.vmpath, *args)
1584 def _listRecursiveVm(self, *args):
1585 return xstransact.ListRecursive(self.vmpath, *args)
1587 def storeVm(self, *args):
1588 return xstransact.Store(self.vmpath, *args)
1590 def permissionsVm(self, *args):
1591 return xstransact.SetPermissions(self.vmpath, *args)
1594 # Function to update xenstore /dom/*
1597 def readDom(self, *args):
1598 return xstransact.Read(self.dompath, *args)
1600 def gatherDom(self, *args):
1601 return xstransact.Gather(self.dompath, *args)
1603 def _writeDom(self, *args):
1604 return xstransact.Write(self.dompath, *args)
1606 def _removeDom(self, *args):
1607 return xstransact.Remove(self.dompath, *args)
1609 def storeDom(self, *args):
1610 return xstransact.Store(self.dompath, *args)
1613 def readDomTxn(self, transaction, *args):
1614 paths = map(lambda x: self.dompath + "/" + x, args)
1615 return transaction.read(*paths)
1617 def gatherDomTxn(self, transaction, *args):
1618 paths = map(lambda x: self.dompath + "/" + x, args)
1619 return transaction.gather(*paths)
1621 def _writeDomTxn(self, transaction, *args):
1622 paths = map(lambda x: self.dompath + "/" + x, args)
1623 return transaction.write(*paths)
1625 def _removeDomTxn(self, transaction, *args):
1626 paths = map(lambda x: self.dompath + "/" + x, args)
1627 return transaction.remove(*paths)
1629 def storeDomTxn(self, transaction, *args):
1630 paths = map(lambda x: self.dompath + "/" + x, args)
1631 return transaction.store(*paths)
1634 def _recreateDom(self):
1635 complete(self.dompath, lambda t: self._recreateDomFunc(t))
1637 def _recreateDomFunc(self, t):
1638 t.remove()
1639 t.mkdir()
1640 t.set_permissions({'dom' : self.domid, 'read' : True})
1641 t.write('vm', self.vmpath)
1642 # NB. Solaris guests use guest/ and hvmpv/ xenstore directories
1643 for i in [ 'device', 'control', 'error', 'memory', 'guest', 'hvmpv' ]:
1644 t.mkdir(i)
1645 t.set_permissions(i, {'dom' : self.domid})
1647 def _storeDomDetails(self):
1648 to_store = {
1649 'domid': str(self.domid),
1650 'vm': self.vmpath,
1651 'name': self.info['name_label'],
1652 'console/limit': str(xoptions.get_console_limit() * 1024),
1653 'memory/target': str(self.info['memory_dynamic_max'] / 1024),
1656 def f(n, v):
1657 if v is not None:
1658 if type(v) == bool:
1659 to_store[n] = v and "1" or "0"
1660 else:
1661 to_store[n] = str(v)
1663 # Figure out if we need to tell xenconsoled to ignore this guest's
1664 # console - device model will handle console if it is running
1665 constype = "ioemu"
1666 if 'device_model' not in self.info['platform']:
1667 constype = "xenconsoled"
1669 f('console/port', self.console_port)
1670 f('console/ring-ref', self.console_mfn)
1671 f('console/type', constype)
1672 f('store/port', self.store_port)
1673 f('store/ring-ref', self.store_mfn)
1675 if arch.type == "x86":
1676 f('control/platform-feature-multiprocessor-suspend', True)
1678 # elfnotes
1679 for n, v in self.info.get_notes().iteritems():
1680 n = n.lower().replace('_', '-')
1681 if n == 'features':
1682 for v in v.split('|'):
1683 v = v.replace('_', '-')
1684 if v.startswith('!'):
1685 f('image/%s/%s' % (n, v[1:]), False)
1686 else:
1687 f('image/%s/%s' % (n, v), True)
1688 else:
1689 f('image/%s' % n, v)
1691 if self.info.has_key('security_label'):
1692 f('security_label', self.info['security_label'])
1694 to_store.update(self._vcpuDomDetails())
1696 log.debug("Storing domain details: %s", scrub_password(to_store))
1698 self._writeDom(to_store)
1700 def _vcpuDomDetails(self):
1701 def availability(n):
1702 if self.info['vcpu_avail'] & (1 << n):
1703 return 'online'
1704 else:
1705 return 'offline'
1707 result = {}
1708 for v in range(0, self.info['VCPUs_max']):
1709 result["cpu/%d/availability" % v] = availability(v)
1710 return result
1713 # xenstore watches
1716 def _registerWatches(self):
1717 """Register a watch on this VM's entries in the store, and the
1718 domain's control/shutdown node, so that when they are changed
1719 externally, we keep up to date. This should only be called by {@link
1720 #create}, {@link #recreate}, or {@link #restore}, once the domain's
1721 details have been written, but before the new instance is returned."""
1722 self.vmWatch = xswatch(self.vmpath, self._storeChanged)
1723 self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
1724 self._handleShutdownWatch)
1726 def _storeChanged(self, _):
1727 log.trace("XendDomainInfo.storeChanged");
1729 changed = False
1731 # Check whether values in the configuration have
1732 # changed in Xenstore.
1734 cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
1735 'rtc/timeoffset']
1737 vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
1738 for k in cfg_vm])
1740 # convert two lists into a python dictionary
1741 vm_details = dict(zip(cfg_vm, vm_details))
1743 for arg, val in vm_details.items():
1744 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1745 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1746 if val != None and val != self.info[xapiarg]:
1747 self.info[xapiarg] = val
1748 changed = True
1749 elif arg == "memory":
1750 if val != None and val != self.info["static_memory_min"]:
1751 self.info["static_memory_min"] = val
1752 changed = True
1753 elif arg == "maxmem":
1754 if val != None and val != self.info["static_memory_max"]:
1755 self.info["static_memory_max"] = val
1756 changed = True
1758 # Check whether image definition has been updated
1759 image_sxp = self._readVm('image')
1760 if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
1761 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1762 changed = True
1764 # Update the rtc_timeoffset to be preserved across reboot.
1765 # NB. No need to update xenstore domain section.
1766 val = int(vm_details.get("rtc/timeoffset", 0))
1767 self.info["platform"]["rtc_timeoffset"] = val
1769 if changed:
1770 # Update the domain section of the store, as this contains some
1771 # parameters derived from the VM configuration.
1772 self.refresh_shutdown_lock.acquire()
1773 try:
1774 state = self._stateGet()
1775 if state not in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
1776 self._storeDomDetails()
1777 finally:
1778 self.refresh_shutdown_lock.release()
1780 return 1
1782 def _handleShutdownWatch(self, _):
1783 log.debug('XendDomainInfo.handleShutdownWatch')
1785 reason = self.readDom('control/shutdown')
1787 if reason and reason != 'suspend':
1788 sst = self.readDom('xend/shutdown_start_time')
1789 now = time.time()
1790 if sst:
1791 self.shutdownStartTime = float(sst)
1792 timeout = float(sst) + SHUTDOWN_TIMEOUT - now
1793 else:
1794 self.shutdownStartTime = now
1795 self.storeDom('xend/shutdown_start_time', now)
1796 timeout = SHUTDOWN_TIMEOUT
1798 log.trace(
1799 "Scheduling refreshShutdown on domain %d in %ds.",
1800 self.domid, timeout)
1801 threading.Timer(timeout, self.refreshShutdown).start()
1803 return True
1807 # Public Attributes for the VM
1811 def getDomid(self):
1812 return self.domid
1814 def setName(self, name, to_store = True):
1815 self._checkName(name)
1816 self.info['name_label'] = name
1817 if to_store:
1818 self.storeVm("name", name)
1820 def getName(self):
1821 return self.info['name_label']
1823 def getDomainPath(self):
1824 return self.dompath
1826 def getShutdownReason(self):
1827 return self.readDom('control/shutdown')
1829 def getStorePort(self):
1830 """For use only by image.py and XendCheckpoint.py."""
1831 return self.store_port
1833 def getConsolePort(self):
1834 """For use only by image.py and XendCheckpoint.py"""
1835 return self.console_port
1837 def getFeatures(self):
1838 """For use only by image.py."""
1839 return self.info['features']
1841 def getVCpuCount(self):
1842 return self.info['VCPUs_max']
1844 def setVCpuCount(self, vcpus):
1845 def vcpus_valid(n):
1846 if vcpus <= 0:
1847 raise XendError('Zero or less VCPUs is invalid')
1848 if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
1849 raise XendError('Cannot set vcpus greater than max vcpus on running domain')
1850 vcpus_valid(vcpus)
1852 self.info['vcpu_avail'] = (1 << vcpus) - 1
1853 if self.domid >= 0:
1854 self.storeVm('vcpu_avail', self.info['vcpu_avail'])
1855 self._writeDom(self._vcpuDomDetails())
1856 self.info['VCPUs_live'] = vcpus
1857 else:
1858 if self.info['VCPUs_max'] > vcpus:
1859 # decreasing
1860 del self.info['cpus'][vcpus:]
1861 elif self.info['VCPUs_max'] < vcpus:
1862 # increasing
1863 for c in range(self.info['VCPUs_max'], vcpus):
1864 self.info['cpus'].append(list())
1865 self.info['VCPUs_max'] = vcpus
1866 xen.xend.XendDomain.instance().managed_config_save(self)
1867 log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
1868 vcpus)
1870 def getMemoryTarget(self):
1871 """Get this domain's target memory size, in KB."""
1872 return self.info['memory_dynamic_max'] / 1024
1874 def getMemoryMaximum(self):
1875 """Get this domain's maximum memory size, in KB."""
1876 # remember, info now stores memory in bytes
1877 return self.info['memory_static_max'] / 1024
1879 def getResume(self):
1880 return str(self._resume)
1882 def setResume(self, isresume):
1883 self._resume = isresume
1885 def getCpus(self):
1886 return self.info['cpus']
1888 def setCpus(self, cpumap):
1889 self.info['cpus'] = cpumap
1891 def getCap(self):
1892 return self.info['vcpus_params']['cap']
1894 def setCap(self, cpu_cap):
1895 self.info['vcpus_params']['cap'] = cpu_cap
1897 def getWeight(self):
1898 return self.info['vcpus_params']['weight']
1900 def setWeight(self, cpu_weight):
1901 self.info['vcpus_params']['weight'] = cpu_weight
1903 def getRestartCount(self):
1904 return self._readVm('xend/restart_count')
1906 def refreshShutdown(self, xeninfo = None):
1907 """ Checks the domain for whether a shutdown is required.
1909 Called from XendDomainInfo and also image.py for HVM images.
1910 """
1912 # If set at the end of this method, a restart is required, with the
1913 # given reason. This restart has to be done out of the scope of
1914 # refresh_shutdown_lock.
1915 restart_reason = None
1917 self.refresh_shutdown_lock.acquire()
1918 try:
1919 if xeninfo is None:
1920 xeninfo = dom_get(self.domid)
1921 if xeninfo is None:
1922 # The domain no longer exists. This will occur if we have
1923 # scheduled a timer to check for shutdown timeouts and the
1924 # shutdown succeeded. It will also occur if someone
1925 # destroys a domain beneath us. We clean up the domain,
1926 # just in case, but we can't clean up the VM, because that
1927 # VM may have migrated to a different domain on this
1928 # machine.
1929 self.cleanupDomain()
1930 self._stateSet(DOM_STATE_HALTED)
1931 return
1933 if xeninfo['dying']:
1934 # Dying means that a domain has been destroyed, but has not
1935 # yet been cleaned up by Xen. This state could persist
1936 # indefinitely if, for example, another domain has some of its
1937 # pages mapped. We might like to diagnose this problem in the
1938 # future, but for now all we do is make sure that it's not us
1939 # holding the pages, by calling cleanupDomain. We can't
1940 # clean up the VM, as above.
1941 self.cleanupDomain()
1942 self._stateSet(DOM_STATE_SHUTDOWN)
1943 return
1945 elif xeninfo['crashed']:
1946 if self.readDom('xend/shutdown_completed'):
1947 # We've seen this shutdown already, but we are preserving
1948 # the domain for debugging. Leave it alone.
1949 return
1951 log.warn('Domain has crashed: name=%s id=%d.',
1952 self.info['name_label'], self.domid)
1953 self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
1955 restart_reason = 'crash'
1956 self._stateSet(DOM_STATE_HALTED)
1958 elif xeninfo['shutdown']:
1959 self._stateSet(DOM_STATE_SHUTDOWN)
1960 if self.readDom('xend/shutdown_completed'):
1961 # We've seen this shutdown already, but we are preserving
1962 # the domain for debugging. Leave it alone.
1963 return
1965 else:
1966 reason = shutdown_reason(xeninfo['shutdown_reason'])
1968 log.info('Domain has shutdown: name=%s id=%d reason=%s.',
1969 self.info['name_label'], self.domid, reason)
1970 self._writeVm(LAST_SHUTDOWN_REASON, reason)
1972 self._clearRestart()
1974 if reason == 'suspend':
1975 self._stateSet(DOM_STATE_SUSPENDED)
1976 # Don't destroy the domain. XendCheckpoint will do
1977 # this once it has finished. However, stop watching
1978 # the VM path now, otherwise we will end up with one
1979 # watch for the old domain, and one for the new.
1980 self._unwatchVm()
1981 elif reason in ('poweroff', 'reboot'):
1982 restart_reason = reason
1983 else:
1984 self.destroy()
1986 elif self.dompath is None:
1987 # We have yet to manage to call introduceDomain on this
1988 # domain. This can happen if a restore is in progress, or has
1989 # failed. Ignore this domain.
1990 pass
1991 else:
1992 # Domain is alive. If we are shutting it down, log a message
1993 # if it seems unresponsive.
1994 if xeninfo['paused']:
1995 self._stateSet(DOM_STATE_PAUSED)
1996 else:
1997 self._stateSet(DOM_STATE_RUNNING)
1999 if self.shutdownStartTime:
2000 timeout = (SHUTDOWN_TIMEOUT - time.time() +
2001 self.shutdownStartTime)
2002 if (timeout < 0 and not self.readDom('xend/unresponsive')):
2003 log.info(
2004 "Domain shutdown timeout expired: name=%s id=%s",
2005 self.info['name_label'], self.domid)
2006 self.storeDom('xend/unresponsive', 'True')
2007 finally:
2008 self.refresh_shutdown_lock.release()
2010 if restart_reason and not self.restart_in_progress:
2011 self.restart_in_progress = True
2012 threading.Thread(target = self._maybeRestart,
2013 args = (restart_reason,)).start()
2017 # Restart functions - handling whether we come back up on shutdown.
2020 def _clearRestart(self):
2021 self._removeDom("xend/shutdown_start_time")
2023 def _maybeDumpCore(self, reason):
2024 if reason == 'crash':
2025 if xoptions.get_enable_dump() or self.get_on_crash() \
2026 in ['coredump_and_destroy', 'coredump_and_restart']:
2027 try:
2028 self.dumpCore()
2029 except XendError:
2030 # This error has been logged -- there's nothing more
2031 # we can do in this context.
2032 pass
2034 def _maybeRestart(self, reason):
2035 # Before taking configured action, dump core if configured to do so.
2037 self._maybeDumpCore(reason)
2039 # Dispatch to the correct method based upon the configured on_{reason}
2040 # behaviour.
2041 actions = {"destroy" : self.destroy,
2042 "restart" : self._restart,
2043 "preserve" : self._preserve,
2044 "rename-restart" : self._renameRestart,
2045 "coredump-destroy" : self.destroy,
2046 "coredump-restart" : self._restart}
2048 action_conf = {
2049 'poweroff': 'actions_after_shutdown',
2050 'reboot': 'actions_after_reboot',
2051 'crash': 'actions_after_crash',
2054 action_target = self.info.get(action_conf.get(reason))
2055 func = actions.get(action_target, None)
2056 if func and callable(func):
2057 func()
2058 else:
2059 self.destroy() # default to destroy
2061 def _renameRestart(self):
2062 self._restart(True)
2064 def _restart(self, rename = False):
2065 """Restart the domain after it has exited.
2067 @param rename True if the old domain is to be renamed and preserved,
2068 False if it is to be destroyed.
2069 """
2070 from xen.xend import XendDomain
2072 if self._readVm(RESTART_IN_PROGRESS):
2073 log.error('Xend failed during restart of domain %s. '
2074 'Refusing to restart to avoid loops.',
2075 str(self.domid))
2076 self.destroy()
2077 return
2079 old_domid = self.domid
2080 self._writeVm(RESTART_IN_PROGRESS, 'True')
2082 elapse = time.time() - self.info['start_time']
2083 if elapse < MINIMUM_RESTART_TIME:
2084 log.error('VM %s restarting too fast (Elapsed time: %f seconds). '
2085 'Refusing to restart to avoid loops.',
2086 self.info['name_label'], elapse)
2087 self.destroy()
2088 return
2090 prev_vm_xend = self._listRecursiveVm('xend')
2091 new_dom_info = self.info
2092 try:
2093 if rename:
2094 new_dom_info = self._preserveForRestart()
2095 else:
2096 self._unwatchVm()
2097 self.destroy()
2099 # new_dom's VM will be the same as this domain's VM, except where
2100 # the rename flag has instructed us to call preserveForRestart.
2101 # In that case, it is important that we remove the
2102 # RESTART_IN_PROGRESS node from the new domain, not the old one,
2103 # once the new one is available.
2105 new_dom = None
2106 try:
2107 new_dom = XendDomain.instance().domain_create_from_dict(
2108 new_dom_info)
2109 for x in prev_vm_xend[0][1]:
2110 new_dom._writeVm('xend/%s' % x[0], x[1])
2111 new_dom.waitForDevices()
2112 new_dom.unpause()
2113 rst_cnt = new_dom._readVm('xend/restart_count')
2114 rst_cnt = int(rst_cnt) + 1
2115 new_dom._writeVm('xend/restart_count', str(rst_cnt))
2116 new_dom._removeVm(RESTART_IN_PROGRESS)
2117 except:
2118 if new_dom:
2119 new_dom._removeVm(RESTART_IN_PROGRESS)
2120 new_dom.destroy()
2121 else:
2122 self._removeVm(RESTART_IN_PROGRESS)
2123 raise
2124 except:
2125 log.exception('Failed to restart domain %s.', str(old_domid))
2127 def _preserveForRestart(self):
2128 """Preserve a domain that has been shut down, by giving it a new UUID,
2129 cloning the VM details, and giving it a new name. This allows us to
2130 keep this domain for debugging, but restart a new one in its place
2131 preserving the restart semantics (name and UUID preserved).
2132 """
2134 new_uuid = uuid.createString()
2135 new_name = 'Domain-%s' % new_uuid
2136 log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
2137 self.info['name_label'], self.domid, self.info['uuid'],
2138 new_name, new_uuid)
2139 self._unwatchVm()
2140 self._releaseDevices()
2141 # Remove existing vm node in xenstore
2142 self._removeVm()
2143 new_dom_info = self.info.copy()
2144 new_dom_info['name_label'] = self.info['name_label']
2145 new_dom_info['uuid'] = self.info['uuid']
2146 self.info['name_label'] = new_name
2147 self.info['uuid'] = new_uuid
2148 self.vmpath = XS_VMROOT + new_uuid
2149 # Write out new vm node to xenstore
2150 self._storeVmDetails()
2151 self._preserve()
2152 return new_dom_info
2155 def _preserve(self):
2156 log.info("Preserving dead domain %s (%d).", self.info['name_label'],
2157 self.domid)
2158 self._unwatchVm()
2159 self.storeDom('xend/shutdown_completed', 'True')
2160 self._stateSet(DOM_STATE_HALTED)
2163 # Debugging ..
2166 def dumpCore(self, corefile = None):
2167 """Create a core dump for this domain.
2169 @raise: XendError if core dumping failed.
2170 """
2172 if not corefile:
2173 this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
2174 corefile = "/var/xen/dump/%s-%s.%s.core" % (this_time,
2175 self.info['name_label'], self.domid)
2177 if os.path.isdir(corefile):
2178 raise XendError("Cannot dump core in a directory: %s" %
2179 corefile)
2181 try:
2182 try:
2183 self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
2184 xc.domain_dumpcore(self.domid, corefile)
2185 except RuntimeError, ex:
2186 corefile_incomp = corefile+'-incomplete'
2187 try:
2188 os.rename(corefile, corefile_incomp)
2189 except:
2190 pass
2192 log.error("core dump failed: id = %s name = %s: %s",
2193 self.domid, self.info['name_label'], str(ex))
2194 raise XendError("Failed to dump core: %s" % str(ex))
2195 finally:
2196 self._removeVm(DUMPCORE_IN_PROGRESS)
2199 # Device creation/deletion functions
2202 def _createDevice(self, deviceClass, devConfig):
2203 return self.getDeviceController(deviceClass).createDevice(devConfig)
2205 def _waitForDevice(self, deviceClass, devid):
2206 return self.getDeviceController(deviceClass).waitForDevice(devid)
2208 def _waitForDeviceUUID(self, dev_uuid):
2209 deviceClass, config = self.info['devices'].get(dev_uuid)
2210 self._waitForDevice(deviceClass, config['devid'])
2212 def _waitForDevice_destroy(self, deviceClass, devid, backpath):
2213 return self.getDeviceController(deviceClass).waitForDevice_destroy(
2214 devid, backpath)
2216 def _reconfigureDevice(self, deviceClass, devid, devconfig):
2217 return self.getDeviceController(deviceClass).reconfigureDevice(
2218 devid, devconfig)
2220 def _createDevices(self):
2221 """Create the devices for a vm.
2223 @raise: VmError for invalid devices
2224 """
2225 if self.image:
2226 self.image.prepareEnvironment()
2228 vscsi_uuidlist = {}
2229 vscsi_devidlist = []
2230 ordered_refs = self.info.ordered_device_refs()
2231 for dev_uuid in ordered_refs:
2232 devclass, config = self.info['devices'][dev_uuid]
2233 if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
2234 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2235 dev_uuid = config.get('uuid')
2236 devid = self._createDevice(devclass, config)
2238 # store devid in XendConfig for caching reasons
2239 if dev_uuid in self.info['devices']:
2240 self.info['devices'][dev_uuid][1]['devid'] = devid
2242 elif devclass == 'vscsi':
2243 vscsi_config = config.get('devs', [])[0]
2244 devid = vscsi_config.get('devid', '')
2245 dev_uuid = config.get('uuid')
2246 vscsi_uuidlist[devid] = dev_uuid
2247 vscsi_devidlist.append(devid)
2249 #It is necessary to sorted it for /dev/sdxx in guest.
2250 if len(vscsi_uuidlist) > 0:
2251 vscsi_devidlist.sort()
2252 for vscsiid in vscsi_devidlist:
2253 dev_uuid = vscsi_uuidlist[vscsiid]
2254 devclass, config = self.info['devices'][dev_uuid]
2255 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2256 dev_uuid = config.get('uuid')
2257 devid = self._createDevice(devclass, config)
2258 # store devid in XendConfig for caching reasons
2259 if dev_uuid in self.info['devices']:
2260 self.info['devices'][dev_uuid][1]['devid'] = devid
2263 if self.image:
2264 self.image.createDeviceModel()
2266 #if have pass-through devs, need the virtual pci slots info from qemu
2267 self.sync_pcidev_info()
2269 def _releaseDevices(self, suspend = False):
2270 """Release all domain's devices. Nothrow guarantee."""
2271 if self.image:
2272 try:
2273 log.debug("Destroying device model")
2274 self.image.destroyDeviceModel()
2275 except Exception, e:
2276 log.exception("Device model destroy failed %s" % str(e))
2277 else:
2278 log.debug("No device model")
2280 log.debug("Releasing devices")
2281 t = xstransact("%s/device" % self.dompath)
2282 try:
2283 for devclass in XendDevices.valid_devices():
2284 for dev in t.list(devclass):
2285 try:
2286 true_devclass = devclass
2287 if devclass == 'vbd':
2288 # In the case of "vbd", the true device class
2289 # may possibly be "tap". Just in case, verify
2290 # device class.
2291 devid = dev.split('/')[-1]
2292 true_devclass = self.getBlockDeviceClass(devid)
2293 log.debug("Removing %s", dev);
2294 self.destroyDevice(true_devclass, dev, False);
2295 except:
2296 # Log and swallow any exceptions in removal --
2297 # there's nothing more we can do.
2298 log.exception("Device release failed: %s; %s; %s",
2299 self.info['name_label'],
2300 true_devclass, dev)
2301 finally:
2302 t.abort()
2304 def getDeviceController(self, name):
2305 """Get the device controller for this domain, and if it
2306 doesn't exist, create it.
2308 @param name: device class name
2309 @type name: string
2310 @rtype: subclass of DevController
2311 """
2312 if name not in self._deviceControllers:
2313 devController = XendDevices.make_controller(name, self)
2314 if not devController:
2315 raise XendError("Unknown device type: %s" % name)
2316 self._deviceControllers[name] = devController
2318 return self._deviceControllers[name]
2321 # Migration functions (public)
2324 def testMigrateDevices(self, network, dst):
2325 """ Notify all device about intention of migration
2326 @raise: XendError for a device that cannot be migrated
2327 """
2328 for (n, c) in self.info.all_devices_sxpr():
2329 rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
2330 if rc != 0:
2331 raise XendError("Device of type '%s' refuses migration." % n)
2333 def migrateDevices(self, network, dst, step, domName=''):
2334 """Notify the devices about migration
2335 """
2336 ctr = 0
2337 try:
2338 for (dev_type, dev_conf) in self.info.all_devices_sxpr():
2339 self.migrateDevice(dev_type, dev_conf, network, dst,
2340 step, domName)
2341 ctr = ctr + 1
2342 except:
2343 for dev_type, dev_conf in self.info.all_devices_sxpr():
2344 if ctr == 0:
2345 step = step - 1
2346 ctr = ctr - 1
2347 self._recoverMigrateDevice(dev_type, dev_conf, network,
2348 dst, step, domName)
2349 raise
2351 def migrateDevice(self, deviceClass, deviceConfig, network, dst,
2352 step, domName=''):
2353 return self.getDeviceController(deviceClass).migrate(deviceConfig,
2354 network, dst, step, domName)
2356 def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
2357 dst, step, domName=''):
2358 return self.getDeviceController(deviceClass).recover_migrate(
2359 deviceConfig, network, dst, step, domName)
2362 ## private:
2364 def _constructDomain(self):
2365 """Construct the domain.
2367 @raise: VmError on error
2368 """
2370 log.debug('XendDomainInfo.constructDomain')
2372 self.shutdownStartTime = None
2373 self.restart_in_progress = False
2375 hap = 0
2376 hvm = self.info.is_hvm()
2377 if hvm:
2378 hap = self.info.is_hap()
2379 info = xc.xeninfo()
2380 if 'hvm' not in info['xen_caps']:
2381 raise VmError("HVM guest support is unavailable: is VT/AMD-V "
2382 "supported by your CPU and enabled in your "
2383 "BIOS?")
2385 # Hack to pre-reserve some memory for initial domain creation.
2386 # There is an implicit memory overhead for any domain creation. This
2387 # overhead is greater for some types of domain than others. For
2388 # example, an x86 HVM domain will have a default shadow-pagetable
2389 # allocation of 1MB. We free up 4MB here to be on the safe side.
2390 # 2MB memory allocation was not enough in some cases, so it's 4MB now
2391 balloon.free(4*1024, self) # 4MB should be plenty
2393 ssidref = 0
2394 if security.on() == xsconstants.XS_POLICY_USE:
2395 ssidref = security.calc_dom_ssidref_from_info(self.info)
2396 if security.has_authorization(ssidref) == False:
2397 raise VmError("VM is not authorized to run.")
2399 s3_integrity = 0
2400 if self.info.has_key('s3_integrity'):
2401 s3_integrity = self.info['s3_integrity']
2402 flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2)
2404 try:
2405 self.domid = xc.domain_create(
2406 domid = 0,
2407 ssidref = ssidref,
2408 handle = uuid.fromString(self.info['uuid']),
2409 flags = flags,
2410 target = self.info.target())
2411 except Exception, e:
2412 # may get here if due to ACM the operation is not permitted
2413 if security.on() == xsconstants.XS_POLICY_ACM:
2414 raise VmError('Domain in conflict set with running domain?')
2416 if self.domid < 0:
2417 raise VmError('Creating domain failed: name=%s' %
2418 self.info['name_label'])
2420 self.dompath = GetDomainPath(self.domid)
2422 self._recreateDom()
2424 # Set timer configration of domain
2425 timer_mode = self.info["platform"].get("timer_mode")
2426 if hvm and timer_mode is not None:
2427 xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
2428 long(timer_mode))
2430 # Set Viridian interface configuration of domain
2431 viridian = self.info["platform"].get("viridian")
2432 if arch.type == "x86" and hvm and viridian is not None:
2433 xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
2435 # Optionally enable virtual HPET
2436 hpet = self.info["platform"].get("hpet")
2437 if hvm and hpet is not None:
2438 xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
2439 long(hpet))
2441 # Optionally enable periodic vpt aligning
2442 vpt_align = self.info["platform"].get("vpt_align")
2443 if hvm and vpt_align is not None:
2444 xc.hvm_set_param(self.domid, HVM_PARAM_VPT_ALIGN,
2445 long(vpt_align))
2447 # Set maximum number of vcpus in domain
2448 xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
2450 # Check for cpu_{cap|weight} validity for credit scheduler
2451 if XendNode.instance().xenschedinfo() == 'credit':
2452 cap = self.getCap()
2453 weight = self.getWeight()
2455 assert type(weight) == int
2456 assert type(cap) == int
2458 if weight < 1 or weight > 65535:
2459 raise VmError("Cpu weight out of range, valid values are within range from 1 to 65535")
2461 if cap < 0 or cap > self.getVCpuCount() * 100:
2462 raise VmError("Cpu cap out of range, valid range is from 0 to %s for specified number of vcpus" %
2463 (self.getVCpuCount() * 100))
2465 # Test whether the devices can be assigned with VT-d
2466 pci = self.info["platform"].get("pci")
2467 pci_str = ''
2468 if pci and len(pci) > 0:
2469 pci = map(lambda x: x[0:4], pci) # strip options
2470 pci_str = str(pci)
2471 if hvm and pci_str:
2472 bdf = xc.test_assign_device(0, pci_str)
2473 if bdf != 0:
2474 if bdf == -1:
2475 raise VmError("failed to assign device: maybe the platform"
2476 " doesn't support VT-d, or VT-d isn't enabled"
2477 " properly?")
2478 bus = (bdf >> 16) & 0xff
2479 devfn = (bdf >> 8) & 0xff
2480 dev = (devfn >> 3) & 0x1f
2481 func = devfn & 0x7
2482 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
2483 " already been assigned to other domain, or maybe"
2484 " it doesn't exist." % (bus, dev, func))
2486 # register the domain in the list
2487 from xen.xend import XendDomain
2488 XendDomain.instance().add_domain(self)
2490 def _introduceDomain(self):
2491 assert self.domid is not None
2492 assert self.store_mfn is not None
2493 assert self.store_port is not None
2495 try:
2496 IntroduceDomain(self.domid, self.store_mfn, self.store_port)
2497 except RuntimeError, exn:
2498 raise XendError(str(exn))
2500 def _setTarget(self, target):
2501 assert self.domid is not None
2503 try:
2504 SetTarget(self.domid, target)
2505 self.storeDom('target', target)
2506 except RuntimeError, exn:
2507 raise XendError(str(exn))
2510 def _setCPUAffinity(self):
2511 """ Repin domain vcpus if a restricted cpus list is provided
2512 """
2514 def has_cpus():
2515 if self.info['cpus'] is not None:
2516 for c in self.info['cpus']:
2517 if c:
2518 return True
2519 return False
2521 if has_cpus():
2522 for v in range(0, self.info['VCPUs_max']):
2523 if self.info['cpus'][v]:
2524 xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
2525 else:
2526 def find_relaxed_node(node_list):
2527 import sys
2528 nr_nodes = info['nr_nodes']
2529 if node_list is None:
2530 node_list = range(0, nr_nodes)
2531 nodeload = [0]
2532 nodeload = nodeload * nr_nodes
2533 from xen.xend import XendDomain
2534 doms = XendDomain.instance().list('all')
2535 for dom in filter (lambda d: d.domid != self.domid, doms):
2536 cpuinfo = dom.getVCPUInfo()
2537 for vcpu in sxp.children(cpuinfo, 'vcpu'):
2538 if sxp.child_value(vcpu, 'online') == 0: continue
2539 cpumap = list(sxp.child_value(vcpu,'cpumap'))
2540 for i in range(0, nr_nodes):
2541 node_cpumask = info['node_to_cpu'][i]
2542 for j in node_cpumask:
2543 if j in cpumap:
2544 nodeload[i] += 1
2545 break
2546 for i in range(0, nr_nodes):
2547 if len(info['node_to_cpu'][i]) > 0 and i in node_list:
2548 nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
2549 else:
2550 nodeload[i] = sys.maxint
2551 index = nodeload.index( min(nodeload) )
2552 return index
2554 info = xc.physinfo()
2555 if info['nr_nodes'] > 1:
2556 node_memory_list = info['node_to_memory']
2557 needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
2558 candidate_node_list = []
2559 for i in range(0, info['nr_nodes']):
2560 if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
2561 candidate_node_list.append(i)
2562 index = find_relaxed_node(candidate_node_list)
2563 cpumask = info['node_to_cpu'][index]
2564 for v in range(0, self.info['VCPUs_max']):
2565 xc.vcpu_setaffinity(self.domid, v, cpumask)
2568 def _initDomain(self):
2569 log.debug('XendDomainInfo.initDomain: %s %s',
2570 self.domid,
2571 self.info['vcpus_params']['weight'])
2573 self._configureBootloader()
2575 try:
2576 self.image = image.create(self, self.info)
2578 # repin domain vcpus if a restricted cpus list is provided
2579 # this is done prior to memory allocation to aide in memory
2580 # distribution for NUMA systems.
2581 self._setCPUAffinity()
2583 # Use architecture- and image-specific calculations to determine
2584 # the various headrooms necessary, given the raw configured
2585 # values. maxmem, memory, and shadow are all in KiB.
2586 # but memory_static_max etc are all stored in bytes now.
2587 memory = self.image.getRequiredAvailableMemory(
2588 self.info['memory_dynamic_max'] / 1024)
2589 maxmem = self.image.getRequiredAvailableMemory(
2590 self.info['memory_static_max'] / 1024)
2591 shadow = self.image.getRequiredShadowMemory(
2592 self.info['shadow_memory'] * 1024,
2593 self.info['memory_static_max'] / 1024)
2595 log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
2596 # Round shadow up to a multiple of a MiB, as shadow_mem_control
2597 # takes MiB and we must not round down and end up under-providing.
2598 shadow = ((shadow + 1023) / 1024) * 1024
2600 # set memory limit
2601 xc.domain_setmaxmem(self.domid, maxmem)
2603 # Reserve 1 page per MiB of RAM for separate VT-d page table.
2604 vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
2605 # Round vtd_mem up to a multiple of a MiB.
2606 vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
2608 # Make sure there's enough RAM available for the domain
2609 balloon.free(memory + shadow + vtd_mem, self)
2611 # Set up the shadow memory
2612 shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
2613 self.info['shadow_memory'] = shadow_cur
2615 # machine address size
2616 if self.info.has_key('machine_address_size'):
2617 log.debug("_initDomain: setting maximum machine address size %d" % self.info['machine_address_size'])
2618 xc.domain_set_machine_address_size(self.domid, self.info['machine_address_size'])
2620 if self.info.has_key('suppress_spurious_page_faults') and self.info['suppress_spurious_page_faults']:
2621 log.debug("_initDomain: suppressing spurious page faults")
2622 xc.domain_suppress_spurious_page_faults(self.domid)
2624 self._createChannels()
2626 channel_details = self.image.createImage()
2628 self.store_mfn = channel_details['store_mfn']
2629 if 'console_mfn' in channel_details:
2630 self.console_mfn = channel_details['console_mfn']
2631 if 'notes' in channel_details:
2632 self.info.set_notes(channel_details['notes'])
2633 if 'native_protocol' in channel_details:
2634 self.native_protocol = channel_details['native_protocol'];
2636 self._introduceDomain()
2637 if self.info.target():
2638 self._setTarget(self.info.target())
2640 self._createDevices()
2642 self.image.cleanupBootloading()
2644 self.info['start_time'] = time.time()
2646 self._stateSet(DOM_STATE_RUNNING)
2647 except VmError, exn:
2648 log.exception("XendDomainInfo.initDomain: exception occurred")
2649 if self.image:
2650 self.image.cleanupBootloading()
2651 raise exn
2652 except RuntimeError, exn:
2653 log.exception("XendDomainInfo.initDomain: exception occurred")
2654 if self.image:
2655 self.image.cleanupBootloading()
2656 raise VmError(str(exn))
2659 def cleanupDomain(self):
2660 """Cleanup domain resources; release devices. Idempotent. Nothrow
2661 guarantee."""
2663 self.refresh_shutdown_lock.acquire()
2664 try:
2665 self.unwatchShutdown()
2666 self._releaseDevices()
2667 bootloader_tidy(self)
2669 if self.image:
2670 self.image = None
2672 try:
2673 self._removeDom()
2674 except:
2675 log.exception("Removing domain path failed.")
2677 self._stateSet(DOM_STATE_HALTED)
2678 self.domid = None # Do not push into _stateSet()!
2679 finally:
2680 self.refresh_shutdown_lock.release()
2683 def unwatchShutdown(self):
2684 """Remove the watch on the domain's control/shutdown node, if any.
2685 Idempotent. Nothrow guarantee. Expects to be protected by the
2686 refresh_shutdown_lock."""
2688 try:
2689 try:
2690 if self.shutdownWatch:
2691 self.shutdownWatch.unwatch()
2692 finally:
2693 self.shutdownWatch = None
2694 except:
2695 log.exception("Unwatching control/shutdown failed.")
2697 def waitForShutdown(self):
2698 self.state_updated.acquire()
2699 try:
2700 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2701 self.state_updated.wait(timeout=1.0)
2702 finally:
2703 self.state_updated.release()
2705 def waitForSuspend(self):
2706 """Wait for the guest to respond to a suspend request by
2707 shutting down. If the guest hasn't re-written control/shutdown
2708 after a certain amount of time, it's obviously not listening and
2709 won't suspend, so we give up. HVM guests with no PV drivers
2710 should already be shutdown.
2711 """
2712 state = "suspend"
2713 nr_tries = 60
2715 self.state_updated.acquire()
2716 try:
2717 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2718 self.state_updated.wait(1.0)
2719 if state == "suspend":
2720 if nr_tries == 0:
2721 msg = ('Timeout waiting for domain %s to suspend'
2722 % self.domid)
2723 self._writeDom('control/shutdown', '')
2724 raise XendError(msg)
2725 state = self.readDom('control/shutdown')
2726 nr_tries -= 1
2727 finally:
2728 self.state_updated.release()
2731 # TODO: recategorise - called from XendCheckpoint
2734 def completeRestore(self, store_mfn, console_mfn):
2736 log.debug("XendDomainInfo.completeRestore")
2738 self.store_mfn = store_mfn
2739 self.console_mfn = console_mfn
2741 self._introduceDomain()
2742 self.image = image.create(self, self.info)
2743 if self.image:
2744 self.image.createDeviceModel(True)
2745 self._storeDomDetails()
2746 self._registerWatches()
2747 self.refreshShutdown()
2749 log.debug("XendDomainInfo.completeRestore done")
2752 def _endRestore(self):
2753 self.setResume(False)
2756 # VM Destroy
2759 def _prepare_phantom_paths(self):
2760 # get associated devices to destroy
2761 # build list of phantom devices to be removed after normal devices
2762 plist = []
2763 if self.domid is not None:
2764 t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
2765 try:
2766 for dev in t.list():
2767 backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
2768 % (self.dompath, dev))
2769 if backend_phantom_vbd is not None:
2770 frontend_phantom_vbd = xstransact.Read("%s/frontend" \
2771 % backend_phantom_vbd)
2772 plist.append(backend_phantom_vbd)
2773 plist.append(frontend_phantom_vbd)
2774 finally:
2775 t.abort()
2776 return plist
2778 def _cleanup_phantom_devs(self, plist):
2779 # remove phantom devices
2780 if not plist == []:
2781 time.sleep(2)
2782 for paths in plist:
2783 if paths.find('backend') != -1:
2784 # Modify online status /before/ updating state (latter is watched by
2785 # drivers, so this ordering avoids a race).
2786 xstransact.Write(paths, 'online', "0")
2787 xstransact.Write(paths, 'state', str(xenbusState['Closing']))
2788 # force
2789 xstransact.Remove(paths)
2791 def destroy(self):
2792 """Cleanup VM and destroy domain. Nothrow guarantee."""
2794 if self.domid is None:
2795 return
2797 from xen.xend import XendDomain
2798 log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
2800 paths = self._prepare_phantom_paths()
2802 if self.dompath is not None:
2803 try:
2804 xc.domain_destroy_hook(self.domid)
2805 xc.domain_pause(self.domid)
2806 do_FLR(self.domid)
2807 xc.domain_destroy(self.domid)
2808 for state in DOM_STATES_OLD:
2809 self.info[state] = 0
2810 self._stateSet(DOM_STATE_HALTED)
2811 except:
2812 log.exception("XendDomainInfo.destroy: domain destruction failed.")
2814 XendDomain.instance().remove_domain(self)
2815 self.cleanupDomain()
2817 self._cleanup_phantom_devs(paths)
2818 self._cleanupVm()
2820 if "transient" in self.info["other_config"] \
2821 and bool(self.info["other_config"]["transient"]):
2822 XendDomain.instance().domain_delete_by_dominfo(self)
2825 def resetDomain(self):
2826 log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
2828 old_domid = self.domid
2829 prev_vm_xend = self._listRecursiveVm('xend')
2830 new_dom_info = self.info
2831 try:
2832 self._unwatchVm()
2833 self.destroy()
2835 new_dom = None
2836 try:
2837 from xen.xend import XendDomain
2838 new_dom_info['domid'] = None
2839 new_dom = XendDomain.instance().domain_create_from_dict(
2840 new_dom_info)
2841 for x in prev_vm_xend[0][1]:
2842 new_dom._writeVm('xend/%s' % x[0], x[1])
2843 new_dom.waitForDevices()
2844 new_dom.unpause()
2845 except:
2846 if new_dom:
2847 new_dom.destroy()
2848 raise
2849 except:
2850 log.exception('Failed to reset domain %s.', str(old_domid))
2853 def resumeDomain(self):
2854 log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
2856 # resume a suspended domain (e.g. after live checkpoint, or after
2857 # a later error during save or migate); checks that the domain
2858 # is currently suspended first so safe to call from anywhere
2860 xeninfo = dom_get(self.domid)
2861 if xeninfo is None:
2862 return
2863 if not xeninfo['shutdown']:
2864 return
2865 reason = shutdown_reason(xeninfo['shutdown_reason'])
2866 if reason != 'suspend':
2867 return
2869 try:
2870 # could also fetch a parsed note from xenstore
2871 fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
2872 if not fast:
2873 self._releaseDevices()
2874 self.testDeviceComplete()
2875 self.testvifsComplete()
2876 log.debug("XendDomainInfo.resumeDomain: devices released")
2878 self._resetChannels()
2880 self._removeDom('control/shutdown')
2881 self._removeDom('device-misc/vif/nextDeviceID')
2883 self._createChannels()
2884 self._introduceDomain()
2885 self._storeDomDetails()
2887 self._createDevices()
2888 log.debug("XendDomainInfo.resumeDomain: devices created")
2890 xc.domain_resume(self.domid, fast)
2891 ResumeDomain(self.domid)
2892 except:
2893 log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
2894 self.image.resumeDeviceModel()
2895 log.debug("XendDomainInfo.resumeDomain: completed")
2899 # Channels for xenstore and console
2902 def _createChannels(self):
2903 """Create the channels to the domain.
2904 """
2905 self.store_port = self._createChannel()
2906 self.console_port = self._createChannel()
2909 def _createChannel(self):
2910 """Create an event channel to the domain.
2911 """
2912 try:
2913 if self.domid != None:
2914 return xc.evtchn_alloc_unbound(domid = self.domid,
2915 remote_dom = 0)
2916 except:
2917 log.exception("Exception in alloc_unbound(%s)", str(self.domid))
2918 raise
2920 def _resetChannels(self):
2921 """Reset all event channels in the domain.
2922 """
2923 try:
2924 if self.domid != None:
2925 return xc.evtchn_reset(dom = self.domid)
2926 except:
2927 log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
2928 raise
2932 # Bootloader configuration
2935 def _configureBootloader(self):
2936 """Run the bootloader if we're configured to do so."""
2938 blexec = self.info['PV_bootloader']
2939 bootloader_args = self.info['PV_bootloader_args']
2940 kernel = self.info['PV_kernel']
2941 ramdisk = self.info['PV_ramdisk']
2942 args = self.info['PV_args']
2943 boot = self.info['HVM_boot_policy']
2945 if boot:
2946 # HVM booting.
2947 pass
2948 elif not blexec and kernel:
2949 # Boot from dom0. Nothing left to do -- the kernel and ramdisk
2950 # will be picked up by image.py.
2951 pass
2952 else:
2953 # Boot using bootloader
2954 if not blexec or blexec == 'pygrub':
2955 blexec = auxbin.pathTo('pygrub')
2957 blcfg = None
2958 disks = [x for x in self.info['vbd_refs']
2959 if self.info['devices'][x][1]['bootable']]
2961 if not disks:
2962 msg = "Had a bootloader specified, but no disks are bootable"
2963 log.error(msg)
2964 raise VmError(msg)
2966 devinfo = self.info['devices'][disks[0]]
2967 devtype = devinfo[0]
2968 disk = devinfo[1]['uname']
2970 fn = blkdev_uname_to_file(disk)
2971 taptype = blkdev_uname_to_taptype(disk)
2972 mounted = devtype == 'tap' and taptype != 'aio' and taptype != 'sync' and not os.stat(fn).st_rdev
2973 if mounted:
2974 # This is a file, not a device. pygrub can cope with a
2975 # file if it's raw, but if it's QCOW or other such formats
2976 # used through blktap, then we need to mount it first.
2978 log.info("Mounting %s on %s." %
2979 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2981 vbd = {
2982 'mode': 'RO',
2983 'device': BOOTLOADER_LOOPBACK_DEVICE,
2986 from xen.xend import XendDomain
2987 dom0 = XendDomain.instance().privilegedDomain()
2988 dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
2989 fn = BOOTLOADER_LOOPBACK_DEVICE
2991 try:
2992 blcfg = bootloader(blexec, fn, self, False,
2993 bootloader_args, kernel, ramdisk, args)
2994 finally:
2995 if mounted:
2996 log.info("Unmounting %s from %s." %
2997 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2999 dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
3001 if blcfg is None:
3002 msg = "Had a bootloader specified, but can't find disk"
3003 log.error(msg)
3004 raise VmError(msg)
3006 self.info.update_with_image_sxp(blcfg, True)
3010 # VM Functions
3013 def _readVMDetails(self, params):
3014 """Read the specified parameters from the store.
3015 """
3016 try:
3017 return self._gatherVm(*params)
3018 except ValueError:
3019 # One of the int/float entries in params has a corresponding store
3020 # entry that is invalid. We recover, because older versions of
3021 # Xend may have put the entry there (memory/target, for example),
3022 # but this is in general a bad situation to have reached.
3023 log.exception(
3024 "Store corrupted at %s! Domain %d's configuration may be "
3025 "affected.", self.vmpath, self.domid)
3026 return []
3028 def _cleanupVm(self):
3029 """Cleanup VM resources. Idempotent. Nothrow guarantee."""
3031 self._unwatchVm()
3033 try:
3034 self._removeVm()
3035 except:
3036 log.exception("Removing VM path failed.")
3039 def checkLiveMigrateMemory(self):
3040 """ Make sure there's enough memory to migrate this domain """
3041 overhead_kb = 0
3042 if arch.type == "x86":
3043 # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
3044 # the minimum that Xen would allocate if no value were given.
3045 overhead_kb = self.info['VCPUs_max'] * 1024 + \
3046 (self.info['memory_static_max'] / 1024 / 1024) * 4
3047 overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
3048 # The domain might already have some shadow memory
3049 overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
3050 if overhead_kb > 0:
3051 balloon.free(overhead_kb, self)
3053 def _unwatchVm(self):
3054 """Remove the watch on the VM path, if any. Idempotent. Nothrow
3055 guarantee."""
3056 try:
3057 try:
3058 if self.vmWatch:
3059 self.vmWatch.unwatch()
3060 finally:
3061 self.vmWatch = None
3062 except:
3063 log.exception("Unwatching VM path failed.")
3065 def testDeviceComplete(self):
3066 """ For Block IO migration safety we must ensure that
3067 the device has shutdown correctly, i.e. all blocks are
3068 flushed to disk
3069 """
3070 start = time.time()
3071 while True:
3072 test = 0
3073 diff = time.time() - start
3074 vbds = self.getDeviceController('vbd').deviceIDs()
3075 taps = self.getDeviceController('tap').deviceIDs()
3076 for i in vbds + taps:
3077 test = 1
3078 log.info("Dev %s still active, looping...", i)
3079 time.sleep(0.1)
3081 if test == 0:
3082 break
3083 if diff >= MIGRATE_TIMEOUT:
3084 log.info("Dev still active but hit max loop timeout")
3085 break
3087 def testvifsComplete(self):
3088 """ In case vifs are released and then created for the same
3089 domain, we need to wait the device shut down.
3090 """
3091 start = time.time()
3092 while True:
3093 test = 0
3094 diff = time.time() - start
3095 for i in self.getDeviceController('vif').deviceIDs():
3096 test = 1
3097 log.info("Dev %s still active, looping...", i)
3098 time.sleep(0.1)
3100 if test == 0:
3101 break
3102 if diff >= MIGRATE_TIMEOUT:
3103 log.info("Dev still active but hit max loop timeout")
3104 break
3106 def _storeVmDetails(self):
3107 to_store = {}
3109 for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
3110 info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
3111 if self._infoIsSet(info_key):
3112 to_store[key] = str(self.info[info_key])
3114 if self._infoIsSet("static_memory_min"):
3115 to_store["memory"] = str(self.info["static_memory_min"])
3116 if self._infoIsSet("static_memory_max"):
3117 to_store["maxmem"] = str(self.info["static_memory_max"])
3119 image_sxpr = self.info.image_sxpr()
3120 if image_sxpr:
3121 to_store['image'] = sxp.to_string(image_sxpr)
3123 if not self._readVm('xend/restart_count'):
3124 to_store['xend/restart_count'] = str(0)
3126 log.debug("Storing VM details: %s", scrub_password(to_store))
3128 self._writeVm(to_store)
3129 self._setVmPermissions()
3131 def _setVmPermissions(self):
3132 """Allow the guest domain to read its UUID. We don't allow it to
3133 access any other entry, for security."""
3134 xstransact.SetPermissions('%s/uuid' % self.vmpath,
3135 { 'dom' : self.domid,
3136 'read' : True,
3137 'write' : False })
3140 # Utility functions
3143 def __getattr__(self, name):
3144 if name == "state":
3145 log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
3146 log.warn("".join(traceback.format_stack()))
3147 return self._stateGet()
3148 else:
3149 raise AttributeError(name)
3151 def __setattr__(self, name, value):
3152 if name == "state":
3153 log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
3154 log.warn("".join(traceback.format_stack()))
3155 self._stateSet(value)
3156 else:
3157 self.__dict__[name] = value
3159 def _stateSet(self, state):
3160 self.state_updated.acquire()
3161 try:
3162 # TODO Not sure this is correct...
3163 # _stateGet is live now. Why not fire event
3164 # even when it hasn't changed?
3165 if self._stateGet() != state:
3166 self.state_updated.notifyAll()
3167 import XendAPI
3168 XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
3169 'power_state')
3170 finally:
3171 self.state_updated.release()
3173 def _stateGet(self):
3174 # Lets try and reconsitute the state from xc
3175 # first lets try and get the domain info
3176 # from xc - this will tell us if the domain
3177 # exists
3178 info = dom_get(self.getDomid())
3179 if info is None or info['shutdown']:
3180 # We are either HALTED or SUSPENDED
3181 # check saved image exists
3182 from xen.xend import XendDomain
3183 managed_config_path = \
3184 XendDomain.instance()._managed_check_point_path( \
3185 self.get_uuid())
3186 if os.path.exists(managed_config_path):
3187 return XEN_API_VM_POWER_STATE_SUSPENDED
3188 else:
3189 return XEN_API_VM_POWER_STATE_HALTED
3190 elif info['crashed']:
3191 # Crashed
3192 return XEN_API_VM_POWER_STATE_CRASHED
3193 else:
3194 # We are either RUNNING or PAUSED
3195 if info['paused']:
3196 return XEN_API_VM_POWER_STATE_PAUSED
3197 else:
3198 return XEN_API_VM_POWER_STATE_RUNNING
3200 def _infoIsSet(self, name):
3201 return name in self.info and self.info[name] is not None
3203 def _checkName(self, name):
3204 """Check if a vm name is valid. Valid names contain alphabetic
3205 characters, digits, or characters in '_-.:/+'.
3206 The same name cannot be used for more than one vm at the same time.
3208 @param name: name
3209 @raise: VmError if invalid
3210 """
3211 from xen.xend import XendDomain
3213 if name is None or name == '':
3214 raise VmError('Missing VM Name')
3216 if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name):
3217 raise VmError('Invalid VM Name')
3219 dom = XendDomain.instance().domain_lookup_nr(name)
3220 if dom and dom.info['uuid'] != self.info['uuid']:
3221 raise VmError("VM name '%s' already exists%s" %
3222 (name,
3223 dom.domid is not None and
3224 (" as domain %s" % str(dom.domid)) or ""))
3227 def update(self, info = None, refresh = True, transaction = None):
3228 """Update with info from xc.domain_getinfo().
3229 """
3230 log.trace("XendDomainInfo.update(%s) on domain %s", info,
3231 str(self.domid))
3233 if not info:
3234 info = dom_get(self.domid)
3235 if not info:
3236 return
3238 if info["maxmem_kb"] < 0:
3239 info["maxmem_kb"] = XendNode.instance() \
3240 .physinfo_dict()['total_memory'] * 1024
3242 # make sure state is reset for info
3243 # TODO: we should eventually get rid of old_dom_states
3245 self.info.update_config(info)
3246 self._update_consoles(transaction)
3248 if refresh:
3249 self.refreshShutdown(info)
3251 log.trace("XendDomainInfo.update done on domain %s: %s",
3252 str(self.domid), self.info)
3254 def sxpr(self, ignore_store = False, legacy_only = True):
3255 result = self.info.to_sxp(domain = self,
3256 ignore_devices = ignore_store,
3257 legacy_only = legacy_only)
3259 return result
3261 # Xen API
3262 # ----------------------------------------------------------------
3264 def get_uuid(self):
3265 dom_uuid = self.info.get('uuid')
3266 if not dom_uuid: # if it doesn't exist, make one up
3267 dom_uuid = uuid.createString()
3268 self.info['uuid'] = dom_uuid
3269 return dom_uuid
3271 def get_memory_static_max(self):
3272 return self.info.get('memory_static_max', 0)
3273 def get_memory_static_min(self):
3274 return self.info.get('memory_static_min', 0)
3275 def get_memory_dynamic_max(self):
3276 return self.info.get('memory_dynamic_max', 0)
3277 def get_memory_dynamic_min(self):
3278 return self.info.get('memory_dynamic_min', 0)
3280 # only update memory-related config values if they maintain sanity
3281 def _safe_set_memory(self, key, newval):
3282 oldval = self.info.get(key, 0)
3283 try:
3284 self.info[key] = newval
3285 self.info._memory_sanity_check()
3286 except Exception, ex:
3287 self.info[key] = oldval
3288 raise
3290 def set_memory_static_max(self, val):
3291 self._safe_set_memory('memory_static_max', val)
3292 def set_memory_static_min(self, val):
3293 self._safe_set_memory('memory_static_min', val)
3294 def set_memory_dynamic_max(self, val):
3295 self._safe_set_memory('memory_dynamic_max', val)
3296 def set_memory_dynamic_min(self, val):
3297 self._safe_set_memory('memory_dynamic_min', val)
3299 def get_vcpus_params(self):
3300 if self.getDomid() is None:
3301 return self.info['vcpus_params']
3303 retval = xc.sched_credit_domain_get(self.getDomid())
3304 return retval
3305 def get_power_state(self):
3306 return XEN_API_VM_POWER_STATE[self._stateGet()]
3307 def get_platform(self):
3308 return self.info.get('platform', {})
3309 def get_pci_bus(self):
3310 return self.info.get('pci_bus', '')
3311 def get_tools_version(self):
3312 return self.info.get('tools_version', {})
3313 def get_metrics(self):
3314 return self.metrics.get_uuid();
3317 def get_security_label(self, xspol=None):
3318 import xen.util.xsm.xsm as security
3319 label = security.get_security_label(self, xspol)
3320 return label
3322 def set_security_label(self, seclab, old_seclab, xspol=None,
3323 xspol_old=None):
3324 """
3325 Set the security label of a domain from its old to
3326 a new value.
3327 @param seclab New security label formatted in the form
3328 <policy type>:<policy name>:<vm label>
3329 @param old_seclab The current security label that the
3330 VM must have.
3331 @param xspol An optional policy under which this
3332 update should be done. If not given,
3333 then the current active policy is used.
3334 @param xspol_old The old policy; only to be passed during
3335 the updating of a policy
3336 @return Returns return code, a string with errors from
3337 the hypervisor's operation, old label of the
3338 domain
3339 """
3340 rc = 0
3341 errors = ""
3342 old_label = ""
3343 new_ssidref = 0
3344 domid = self.getDomid()
3345 res_labels = None
3346 is_policy_update = (xspol_old != None)
3348 from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
3350 state = self._stateGet()
3351 # Relabel only HALTED or RUNNING or PAUSED domains
3352 if domid != 0 and \
3353 state not in \
3354 [ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
3355 DOM_STATE_SUSPENDED ]:
3356 log.warn("Relabeling domain not possible in state '%s'" %
3357 DOM_STATES[state])
3358 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3360 # Remove security label. Works only for halted or suspended domains
3361 if not seclab or seclab == "":
3362 if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
3363 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3365 if self.info.has_key('security_label'):
3366 old_label = self.info['security_label']
3367 # Check label against expected one.
3368 if old_label != old_seclab:
3369 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3370 del self.info['security_label']
3371 xen.xend.XendDomain.instance().managed_config_save(self)
3372 return (xsconstants.XSERR_SUCCESS, "", "", 0)
3374 tmp = seclab.split(":")
3375 if len(tmp) != 3:
3376 return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
3377 typ, policy, label = tmp
3379 poladmin = XSPolicyAdminInstance()
3380 if not xspol:
3381 xspol = poladmin.get_policy_by_name(policy)
3383 try:
3384 xen.xend.XendDomain.instance().policy_lock.acquire_writer()
3386 if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
3387 #if domain is running or paused try to relabel in hypervisor
3388 if not xspol:
3389 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3391 if typ != xspol.get_type_name() or \
3392 policy != xspol.get_name():
3393 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3395 if typ == xsconstants.ACM_POLICY_ID:
3396 new_ssidref = xspol.vmlabel_to_ssidref(label)
3397 if new_ssidref == xsconstants.INVALID_SSIDREF:
3398 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3400 # Check that all used resources are accessible under the
3401 # new label
3402 if not is_policy_update and \
3403 not security.resources_compatible_with_vmlabel(xspol,
3404 self, label):
3405 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3407 #Check label against expected one. Can only do this
3408 # if the policy hasn't changed underneath in the meantime
3409 if xspol_old == None:
3410 old_label = self.get_security_label()
3411 if old_label != old_seclab:
3412 log.info("old_label != old_seclab: %s != %s" %
3413 (old_label, old_seclab))
3414 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3416 # relabel domain in the hypervisor
3417 rc, errors = security.relabel_domains([[domid, new_ssidref]])
3418 log.info("rc from relabeling in HV: %d" % rc)
3419 else:
3420 return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
3422 if rc == 0:
3423 # HALTED, RUNNING or PAUSED
3424 if domid == 0:
3425 if xspol:
3426 self.info['security_label'] = seclab
3427 ssidref = poladmin.set_domain0_bootlabel(xspol, label)
3428 else:
3429 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3430 else:
3431 if self.info.has_key('security_label'):
3432 old_label = self.info['security_label']
3433 # Check label against expected one, unless wildcard
3434 if old_label != old_seclab:
3435 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3437 self.info['security_label'] = seclab
3439 try:
3440 xen.xend.XendDomain.instance().managed_config_save(self)
3441 except:
3442 pass
3443 return (rc, errors, old_label, new_ssidref)
3444 finally:
3445 xen.xend.XendDomain.instance().policy_lock.release()
3447 def get_on_shutdown(self):
3448 after_shutdown = self.info.get('actions_after_shutdown')
3449 if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
3450 return XEN_API_ON_NORMAL_EXIT[-1]
3451 return after_shutdown
3453 def get_on_reboot(self):
3454 after_reboot = self.info.get('actions_after_reboot')
3455 if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
3456 return XEN_API_ON_NORMAL_EXIT[-1]
3457 return after_reboot
3459 def get_on_suspend(self):
3460 # TODO: not supported
3461 after_suspend = self.info.get('actions_after_suspend')
3462 if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
3463 return XEN_API_ON_NORMAL_EXIT[-1]
3464 return after_suspend
3466 def get_on_crash(self):
3467 after_crash = self.info.get('actions_after_crash')
3468 if not after_crash or after_crash not in \
3469 XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
3470 return XEN_API_ON_CRASH_BEHAVIOUR[0]
3471 return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
3473 def get_dev_config_by_uuid(self, dev_class, dev_uuid):
3474 """ Get's a device configuration either from XendConfig or
3475 from the DevController.
3477 @param dev_class: device class, either, 'vbd' or 'vif'
3478 @param dev_uuid: device UUID
3480 @rtype: dictionary
3481 """
3482 dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
3484 # shortcut if the domain isn't started because
3485 # the devcontrollers will have no better information
3486 # than XendConfig.
3487 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
3488 XEN_API_VM_POWER_STATE_SUSPENDED):
3489 if dev_config:
3490 return copy.deepcopy(dev_config)
3491 return None
3493 # instead of using dev_class, we use the dev_type
3494 # that is from XendConfig.
3495 controller = self.getDeviceController(dev_type)
3496 if not controller:
3497 return None
3499 all_configs = controller.getAllDeviceConfigurations()
3500 if not all_configs:
3501 return None
3503 updated_dev_config = copy.deepcopy(dev_config)
3504 for _devid, _devcfg in all_configs.items():
3505 if _devcfg.get('uuid') == dev_uuid:
3506 updated_dev_config.update(_devcfg)
3507 updated_dev_config['id'] = _devid
3508 return updated_dev_config
3510 return updated_dev_config
3512 def get_dev_xenapi_config(self, dev_class, dev_uuid):
3513 config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
3514 if not config:
3515 return {}
3517 config['VM'] = self.get_uuid()
3519 if dev_class == 'vif':
3520 if not config.has_key('name'):
3521 config['name'] = config.get('vifname', '')
3522 if not config.has_key('MAC'):
3523 config['MAC'] = config.get('mac', '')
3524 if not config.has_key('type'):
3525 config['type'] = 'paravirtualised'
3526 if not config.has_key('device'):
3527 devid = config.get('id')
3528 if devid != None:
3529 config['device'] = 'eth%s' % devid
3530 else:
3531 config['device'] = ''
3533 if not config.has_key('network'):
3534 try:
3535 bridge = config.get('bridge', None)
3536 if bridge is None:
3537 from xen.util import Brctl
3538 if_to_br = dict([(i,b)
3539 for (b,ifs) in Brctl.get_state().items()
3540 for i in ifs])
3541 vifname = "vif%s.%s" % (self.getDomid(),
3542 config.get('id'))
3543 bridge = if_to_br.get(vifname, None)
3544 config['network'] = \
3545 XendNode.instance().bridge_to_network(
3546 config.get('bridge')).get_uuid()
3547 except Exception:
3548 log.exception('bridge_to_network')
3549 # Ignore this for now -- it may happen if the device
3550 # has been specified using the legacy methods, but at
3551 # some point we're going to have to figure out how to
3552 # handle that properly.
3554 config['MTU'] = 1500 # TODO
3556 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3557 xennode = XendNode.instance()
3558 rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
3559 config['io_read_kbs'] = rx_bps/1024
3560 config['io_write_kbs'] = tx_bps/1024
3561 rx, tx = xennode.get_vif_stat(self.domid, devid)
3562 config['io_total_read_kbs'] = rx/1024
3563 config['io_total_write_kbs'] = tx/1024
3564 else:
3565 config['io_read_kbs'] = 0.0
3566 config['io_write_kbs'] = 0.0
3567 config['io_total_read_kbs'] = 0.0
3568 config['io_total_write_kbs'] = 0.0
3570 config['security_label'] = config.get('security_label', '')
3572 if dev_class == 'vbd':
3574 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3575 controller = self.getDeviceController(dev_class)
3576 devid, _1, _2 = controller.getDeviceDetails(config)
3577 xennode = XendNode.instance()
3578 rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
3579 config['io_read_kbs'] = rd_blkps
3580 config['io_write_kbs'] = wr_blkps
3581 else:
3582 config['io_read_kbs'] = 0.0
3583 config['io_write_kbs'] = 0.0
3585 config['VDI'] = config.get('VDI', '')
3586 config['device'] = config.get('dev', '')
3587 if ':' in config['device']:
3588 vbd_name, vbd_type = config['device'].split(':', 1)
3589 config['device'] = vbd_name
3590 if vbd_type == 'cdrom':
3591 config['type'] = XEN_API_VBD_TYPE[0]
3592 else:
3593 config['type'] = XEN_API_VBD_TYPE[1]
3595 config['driver'] = 'paravirtualised' # TODO
3596 config['image'] = config.get('uname', '')
3598 if config.get('mode', 'r') == 'r':
3599 config['mode'] = 'RO'
3600 else:
3601 config['mode'] = 'RW'
3603 if dev_class == 'vtpm':
3604 if not config.has_key('type'):
3605 config['type'] = 'paravirtualised' # TODO
3606 if not config.has_key('backend'):
3607 config['backend'] = "00000000-0000-0000-0000-000000000000"
3609 return config
3611 def get_dev_property(self, dev_class, dev_uuid, field):
3612 config = self.get_dev_xenapi_config(dev_class, dev_uuid)
3613 try:
3614 return config[field]
3615 except KeyError:
3616 raise XendError('Invalid property for device: %s' % field)
3618 def set_dev_property(self, dev_class, dev_uuid, field, value):
3619 self.info['devices'][dev_uuid][1][field] = value
3621 def get_vcpus_util(self):
3622 vcpu_util = {}
3623 xennode = XendNode.instance()
3624 if 'VCPUs_max' in self.info and self.domid != None:
3625 for i in range(0, self.info['VCPUs_max']):
3626 util = xennode.get_vcpu_util(self.domid, i)
3627 vcpu_util[str(i)] = util
3629 return vcpu_util
3631 def get_consoles(self):
3632 return self.info.get('console_refs', [])
3634 def get_vifs(self):
3635 return self.info.get('vif_refs', [])
3637 def get_vbds(self):
3638 return self.info.get('vbd_refs', [])
3640 def get_vtpms(self):
3641 return self.info.get('vtpm_refs', [])
3643 def get_dpcis(self):
3644 return XendDPCI.get_by_VM(self.info.get('uuid'))
3646 def get_dscsis(self):
3647 return XendDSCSI.get_by_VM(self.info.get('uuid'))
3649 def create_vbd(self, xenapi_vbd, vdi_image_path):
3650 """Create a VBD using a VDI from XendStorageRepository.
3652 @param xenapi_vbd: vbd struct from the Xen API
3653 @param vdi_image_path: VDI UUID
3654 @rtype: string
3655 @return: uuid of the device
3656 """
3657 xenapi_vbd['image'] = vdi_image_path
3658 if vdi_image_path.startswith('tap'):
3659 dev_uuid = self.info.device_add('tap', cfg_xenapi = xenapi_vbd)
3660 else:
3661 dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
3663 if not dev_uuid:
3664 raise XendError('Failed to create device')
3666 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3667 XEN_API_VM_POWER_STATE_PAUSED):
3668 _, config = self.info['devices'][dev_uuid]
3670 if vdi_image_path.startswith('tap'):
3671 dev_control = self.getDeviceController('tap')
3672 else:
3673 dev_control = self.getDeviceController('vbd')
3675 try:
3676 devid = dev_control.createDevice(config)
3677 dev_control.waitForDevice(devid)
3678 self.info.device_update(dev_uuid,
3679 cfg_xenapi = {'devid': devid})
3680 except Exception, exn:
3681 log.exception(exn)
3682 del self.info['devices'][dev_uuid]
3683 self.info['vbd_refs'].remove(dev_uuid)
3684 raise
3686 return dev_uuid
3688 def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
3689 """Create a VBD using a VDI from XendStorageRepository.
3691 @param xenapi_vbd: vbd struct from the Xen API
3692 @param vdi_image_path: VDI UUID
3693 @rtype: string
3694 @return: uuid of the device
3695 """
3696 xenapi_vbd['image'] = vdi_image_path
3697 dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
3698 if not dev_uuid:
3699 raise XendError('Failed to create device')
3701 if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
3702 _, config = self.info['devices'][dev_uuid]
3703 config['devid'] = self.getDeviceController('tap').createDevice(config)
3705 return config['devid']
3707 def create_vif(self, xenapi_vif):
3708 """Create VIF device from the passed struct in Xen API format.
3710 @param xenapi_vif: Xen API VIF Struct.
3711 @rtype: string
3712 @return: UUID
3713 """
3714 dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
3715 if not dev_uuid:
3716 raise XendError('Failed to create device')
3718 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3719 XEN_API_VM_POWER_STATE_PAUSED):
3721 _, config = self.info['devices'][dev_uuid]
3722 dev_control = self.getDeviceController('vif')
3724 try:
3725 devid = dev_control.createDevice(config)
3726 dev_control.waitForDevice(devid)
3727 self.info.device_update(dev_uuid,
3728 cfg_xenapi = {'devid': devid})
3729 except Exception, exn:
3730 log.exception(exn)
3731 del self.info['devices'][dev_uuid]
3732 self.info['vif_refs'].remove(dev_uuid)
3733 raise
3735 return dev_uuid
3737 def create_vtpm(self, xenapi_vtpm):
3738 """Create a VTPM device from the passed struct in Xen API format.
3740 @return: uuid of the device
3741 @rtype: string
3742 """
3744 if self._stateGet() not in (DOM_STATE_HALTED,):
3745 raise VmError("Can only add vTPM to a halted domain.")
3746 if self.get_vtpms() != []:
3747 raise VmError('Domain already has a vTPM.')
3748 dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
3749 if not dev_uuid:
3750 raise XendError('Failed to create device')
3752 return dev_uuid
3754 def create_console(self, xenapi_console):
3755 """ Create a console device from a Xen API struct.
3757 @return: uuid of device
3758 @rtype: string
3759 """
3760 if self._stateGet() not in (DOM_STATE_HALTED,):
3761 raise VmError("Can only add console to a halted domain.")
3763 dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
3764 if not dev_uuid:
3765 raise XendError('Failed to create device')
3767 return dev_uuid
3769 def set_console_other_config(self, console_uuid, other_config):
3770 self.info.console_update(console_uuid, 'other_config', other_config)
3772 def create_dpci(self, xenapi_pci):
3773 """Create pci device from the passed struct in Xen API format.
3775 @param xenapi_pci: DPCI struct from Xen API
3776 @rtype: bool
3777 #@rtype: string
3778 @return: True if successfully created device
3779 #@return: UUID
3780 """
3782 dpci_uuid = uuid.createString()
3784 dpci_opts = []
3785 opts_dict = xenapi_pci.get('options')
3786 for k in opts_dict.keys():
3787 dpci_opts.append([k, opts_dict[k]])
3789 # Convert xenapi to sxp
3790 ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
3792 target_pci_sxp = \
3793 ['pci',
3794 ['dev',
3795 ['domain', '0x%02x' % ppci.get_domain()],
3796 ['bus', '0x%02x' % ppci.get_bus()],
3797 ['slot', '0x%02x' % ppci.get_slot()],
3798 ['func', '0x%1x' % ppci.get_func()],
3799 ['vslot', '0x%02x' % xenapi_pci.get('hotplug_slot')],
3800 ['opts', dpci_opts],
3801 ['uuid', dpci_uuid]
3802 ],
3803 ['state', 'Initialising']
3806 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3808 old_pci_sxp = self._getDeviceInfo_pci(0)
3810 if old_pci_sxp is None:
3811 dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
3812 if not dev_uuid:
3813 raise XendError('Failed to create device')
3815 else:
3816 new_pci_sxp = ['pci']
3817 for existing_dev in sxp.children(old_pci_sxp, 'dev'):
3818 new_pci_sxp.append(existing_dev)
3819 new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
3821 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3822 self.info.device_update(dev_uuid, new_pci_sxp)
3824 xen.xend.XendDomain.instance().managed_config_save(self)
3826 else:
3827 try:
3828 self.device_configure(target_pci_sxp)
3830 except Exception, exn:
3831 raise XendError('Failed to create device')
3833 return dpci_uuid
3835 def create_dscsi(self, xenapi_dscsi):
3836 """Create scsi device from the passed struct in Xen API format.
3838 @param xenapi_dscsi: DSCSI struct from Xen API
3839 @rtype: string
3840 @return: UUID
3841 """
3843 dscsi_uuid = uuid.createString()
3845 # Convert xenapi to sxp
3846 pscsi = XendAPIStore.get(xenapi_dscsi.get('PSCSI'), 'PSCSI')
3847 devid = int(xenapi_dscsi.get('virtual_HCTL').split(':')[0])
3848 target_vscsi_sxp = \
3849 ['vscsi',
3850 ['dev',
3851 ['devid', devid],
3852 ['p-devname', pscsi.get_dev_name()],
3853 ['p-dev', pscsi.get_physical_HCTL()],
3854 ['v-dev', xenapi_dscsi.get('virtual_HCTL')],
3855 ['state', xenbusState['Initialising']],
3856 ['uuid', dscsi_uuid]
3857 ],
3858 ['feature-host', 0]
3861 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3863 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
3865 if cur_vscsi_sxp is None:
3866 dev_uuid = self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp)
3867 if not dev_uuid:
3868 raise XendError('Failed to create device')
3870 else:
3871 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
3872 for existing_dev in sxp.children(cur_vscsi_sxp, 'dev'):
3873 new_vscsi_sxp.append(existing_dev)
3874 new_vscsi_sxp.append(sxp.child0(target_vscsi_sxp, 'dev'))
3876 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3877 self.info.device_update(dev_uuid, new_vscsi_sxp)
3879 xen.xend.XendDomain.instance().managed_config_save(self)
3881 else:
3882 try:
3883 self.device_configure(target_vscsi_sxp)
3885 except Exception, exn:
3886 raise XendError('Failed to create device')
3888 return dscsi_uuid
3891 def destroy_device_by_uuid(self, dev_type, dev_uuid):
3892 if dev_uuid not in self.info['devices']:
3893 raise XendError('Device does not exist')
3895 try:
3896 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3897 XEN_API_VM_POWER_STATE_PAUSED):
3898 _, config = self.info['devices'][dev_uuid]
3899 devid = config.get('devid')
3900 if devid != None:
3901 self.getDeviceController(dev_type).destroyDevice(devid, force = False)
3902 else:
3903 raise XendError('Unable to get devid for device: %s:%s' %
3904 (dev_type, dev_uuid))
3905 finally:
3906 del self.info['devices'][dev_uuid]
3907 self.info['%s_refs' % dev_type].remove(dev_uuid)
3909 def destroy_vbd(self, dev_uuid):
3910 self.destroy_device_by_uuid('vbd', dev_uuid)
3912 def destroy_vif(self, dev_uuid):
3913 self.destroy_device_by_uuid('vif', dev_uuid)
3915 def destroy_vtpm(self, dev_uuid):
3916 self.destroy_device_by_uuid('vtpm', dev_uuid)
3918 def destroy_dpci(self, dev_uuid):
3920 dpci = XendAPIStore.get(dev_uuid, 'DPCI')
3921 ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
3923 old_pci_sxp = self._getDeviceInfo_pci(0)
3924 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3925 target_dev = None
3926 new_pci_sxp = ['pci']
3927 for dev in sxp.children(old_pci_sxp, 'dev'):
3928 domain = int(sxp.child_value(dev, 'domain'), 16)
3929 bus = int(sxp.child_value(dev, 'bus'), 16)
3930 slot = int(sxp.child_value(dev, 'slot'), 16)
3931 func = int(sxp.child_value(dev, 'func'), 16)
3932 name = "%04x:%02x:%02x.%01x" % (domain, bus, slot, func)
3933 if ppci.get_name() == name:
3934 target_dev = dev
3935 else:
3936 new_pci_sxp.append(dev)
3938 if target_dev is None:
3939 raise XendError('Failed to destroy device')
3941 target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
3943 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3945 self.info.device_update(dev_uuid, new_pci_sxp)
3946 if len(sxp.children(new_pci_sxp, 'dev')) == 0:
3947 del self.info['devices'][dev_uuid]
3948 xen.xend.XendDomain.instance().managed_config_save(self)
3950 else:
3951 try:
3952 self.device_configure(target_pci_sxp)
3954 except Exception, exn:
3955 raise XendError('Failed to destroy device')
3957 def destroy_dscsi(self, dev_uuid):
3958 dscsi = XendAPIStore.get(dev_uuid, 'DSCSI')
3959 devid = dscsi.get_virtual_host()
3960 vHCTL = dscsi.get_virtual_HCTL()
3961 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
3962 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3964 target_dev = None
3965 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
3966 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
3967 if vHCTL == sxp.child_value(dev, 'v-dev'):
3968 target_dev = dev
3969 else:
3970 new_vscsi_sxp.append(dev)
3972 if target_dev is None:
3973 raise XendError('Failed to destroy device')
3975 target_dev.append(['state', xenbusState['Closing']])
3976 target_vscsi_sxp = ['vscsi', target_dev, ['feature-host', 0]]
3978 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3980 self.info.device_update(dev_uuid, new_vscsi_sxp)
3981 if len(sxp.children(new_vscsi_sxp, 'dev')) == 0:
3982 del self.info['devices'][dev_uuid]
3983 xen.xend.XendDomain.instance().managed_config_save(self)
3985 else:
3986 try:
3987 self.device_configure(target_vscsi_sxp)
3989 except Exception, exn:
3990 raise XendError('Failed to destroy device')
3992 def destroy_xapi_instances(self):
3993 """Destroy Xen-API instances stored in XendAPIStore.
3994 """
3995 # Xen-API classes based on XendBase have their instances stored
3996 # in XendAPIStore. Cleanup these instances here, if they are supposed
3997 # to be destroyed when the parent domain is dead.
3999 # Most of the virtual devices (vif, vbd, vfb, etc) are not based on
4000 # XendBase and there's no need to remove them from XendAPIStore.
4002 from xen.xend import XendDomain
4003 if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
4004 # domain still exists.
4005 return
4007 # Destroy the VMMetrics instance.
4008 if XendAPIStore.get(self.metrics.get_uuid(), self.metrics.getClass()) \
4009 is not None:
4010 self.metrics.destroy()
4012 # Destroy DPCI instances.
4013 for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
4014 XendAPIStore.deregister(dpci_uuid, "DPCI")
4016 # Destroy DSCSI instances.
4017 for dscsi_uuid in XendDSCSI.get_by_VM(self.info.get('uuid')):
4018 XendAPIStore.deregister(dscsi_uuid, "DSCSI")
4020 def has_device(self, dev_class, dev_uuid):
4021 return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
4023 def __str__(self):
4024 return '<domain id=%s name=%s memory=%s state=%s>' % \
4025 (str(self.domid), self.info['name_label'],
4026 str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])
4028 __repr__ = __str__