ia64/xen-unstable

view tools/python/xen/xend/XendDomainInfo.py @ 19627:070f456143d3

xend: remove spurious pci_len from getDeviceSxprs

Cc: Zhai Edwin <edwin.zhai@intel.com>
Cc: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
Cc: Dexuan Cui <dexuan.cui@intel.com>
Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Wed May 20 15:13:36 2009 +0100 (2009-05-20)
parents 780041c4a96d
children 23f9857f642f
line source
1 #===========================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
19 """Representation of a single domain.
20 Includes support for domain construction, using
21 open-ended configurations.
23 Author: Mike Wray <mike.wray@hp.com>
25 """
27 import logging
28 import time
29 import threading
30 import re
31 import copy
32 import os
33 import traceback
34 from types import StringTypes
36 import xen.lowlevel.xc
37 from xen.util import asserts
38 from xen.util.blkif import blkdev_uname_to_file, blkdev_uname_to_taptype
39 import xen.util.xsm.xsm as security
40 from xen.util import xsconstants
42 from xen.xend import balloon, sxp, uuid, image, arch, osdep
43 from xen.xend import XendOptions, XendNode, XendConfig
45 from xen.xend.XendConfig import scrub_password
46 from xen.xend.XendBootloader import bootloader, bootloader_tidy
47 from xen.xend.XendError import XendError, VmError
48 from xen.xend.XendDevices import XendDevices
49 from xen.xend.XendTask import XendTask
50 from xen.xend.xenstore.xstransact import xstransact, complete
51 from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
52 from xen.xend.xenstore.xswatch import xswatch
53 from xen.xend.XendConstants import *
54 from xen.xend.XendAPIConstants import *
55 from xen.xend.server.DevConstants import xenbusState
57 from xen.xend.XendVMMetrics import XendVMMetrics
59 from xen.xend import XendAPIStore
60 from xen.xend.XendPPCI import XendPPCI
61 from xen.xend.XendDPCI import XendDPCI
62 from xen.xend.XendPSCSI import XendPSCSI
63 from xen.xend.XendDSCSI import XendDSCSI
65 MIGRATE_TIMEOUT = 30.0
66 BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
68 xc = xen.lowlevel.xc.xc()
69 xoptions = XendOptions.instance()
71 log = logging.getLogger("xend.XendDomainInfo")
72 #log.setLevel(logging.TRACE)
75 def create(config):
76 """Creates and start a VM using the supplied configuration.
78 @param config: A configuration object involving lists of tuples.
79 @type config: list of lists, eg ['vm', ['image', 'xen.gz']]
81 @rtype: XendDomainInfo
82 @return: An up and running XendDomainInfo instance
83 @raise VmError: Invalid configuration or failure to start.
84 """
85 from xen.xend import XendDomain
86 domconfig = XendConfig.XendConfig(sxp_obj = config)
87 othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
88 if othervm is None or othervm.domid is None:
89 othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
90 if othervm is not None and othervm.domid is not None:
91 raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
92 log.debug("XendDomainInfo.create(%s)", scrub_password(config))
93 vm = XendDomainInfo(domconfig)
94 try:
95 vm.start()
96 except:
97 log.exception('Domain construction failed')
98 vm.destroy()
99 raise
101 return vm
103 def create_from_dict(config_dict):
104 """Creates and start a VM using the supplied configuration.
106 @param config_dict: An configuration dictionary.
108 @rtype: XendDomainInfo
109 @return: An up and running XendDomainInfo instance
110 @raise VmError: Invalid configuration or failure to start.
111 """
113 log.debug("XendDomainInfo.create_from_dict(%s)",
114 scrub_password(config_dict))
115 vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
116 try:
117 vm.start()
118 except:
119 log.exception('Domain construction failed')
120 vm.destroy()
121 raise
122 return vm
124 def recreate(info, priv):
125 """Create the VM object for an existing domain. The domain must not
126 be dying, as the paths in the store should already have been removed,
127 and asking us to recreate them causes problems.
129 @param xeninfo: Parsed configuration
130 @type xeninfo: Dictionary
131 @param priv: Is a privileged domain (Dom 0)
132 @type priv: bool
134 @rtype: XendDomainInfo
135 @return: A up and running XendDomainInfo instance
136 @raise VmError: Invalid configuration.
137 @raise XendError: Errors with configuration.
138 """
140 log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
142 assert not info['dying']
144 xeninfo = XendConfig.XendConfig(dominfo = info)
145 xeninfo['is_control_domain'] = priv
146 xeninfo['is_a_template'] = False
147 xeninfo['auto_power_on'] = False
148 domid = xeninfo['domid']
149 uuid1 = uuid.fromString(xeninfo['uuid'])
150 needs_reinitialising = False
152 dompath = GetDomainPath(domid)
153 if not dompath:
154 raise XendError('No domain path in store for existing '
155 'domain %d' % domid)
157 log.info("Recreating domain %d, UUID %s. at %s" %
158 (domid, xeninfo['uuid'], dompath))
160 # need to verify the path and uuid if not Domain-0
161 # if the required uuid and vm aren't set, then that means
162 # we need to recreate the dom with our own values
163 #
164 # NOTE: this is probably not desirable, really we should just
165 # abort or ignore, but there may be cases where xenstore's
166 # entry disappears (eg. xenstore-rm /)
167 #
168 try:
169 vmpath = xstransact.Read(dompath, "vm")
170 if not vmpath:
171 if not priv:
172 log.warn('/local/domain/%d/vm is missing. recreate is '
173 'confused, trying our best to recover' % domid)
174 needs_reinitialising = True
175 raise XendError('reinit')
177 uuid2_str = xstransact.Read(vmpath, "uuid")
178 if not uuid2_str:
179 log.warn('%s/uuid/ is missing. recreate is confused, '
180 'trying our best to recover' % vmpath)
181 needs_reinitialising = True
182 raise XendError('reinit')
184 uuid2 = uuid.fromString(uuid2_str)
185 if uuid1 != uuid2:
186 log.warn('UUID in /vm does not match the UUID in /dom/%d.'
187 'Trying out best to recover' % domid)
188 needs_reinitialising = True
189 except XendError:
190 pass # our best shot at 'goto' in python :)
192 vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
193 vmpath = vmpath)
195 if needs_reinitialising:
196 vm._recreateDom()
197 vm._removeVm()
198 vm._storeVmDetails()
199 vm._storeDomDetails()
201 vm.image = image.create(vm, vm.info)
202 vm.image.recreate()
204 vm._registerWatches()
205 vm.refreshShutdown(xeninfo)
207 # register the domain in the list
208 from xen.xend import XendDomain
209 XendDomain.instance().add_domain(vm)
211 return vm
214 def restore(config):
215 """Create a domain and a VM object to do a restore.
217 @param config: Domain SXP configuration
218 @type config: list of lists. (see C{create})
220 @rtype: XendDomainInfo
221 @return: A up and running XendDomainInfo instance
222 @raise VmError: Invalid configuration or failure to start.
223 @raise XendError: Errors with configuration.
224 """
226 log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
227 vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
228 resume = True)
229 try:
230 vm.resume()
231 return vm
232 except:
233 vm.destroy()
234 raise
236 def createDormant(domconfig):
237 """Create a dormant/inactive XenDomainInfo without creating VM.
238 This is for creating instances of persistent domains that are not
239 yet start.
241 @param domconfig: Parsed configuration
242 @type domconfig: XendConfig object
244 @rtype: XendDomainInfo
245 @return: A up and running XendDomainInfo instance
246 @raise XendError: Errors with configuration.
247 """
249 log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
251 # domid does not make sense for non-running domains.
252 domconfig.pop('domid', None)
253 vm = XendDomainInfo(domconfig)
254 return vm
256 def domain_by_name(name):
257 """Get domain by name
259 @params name: Name of the domain
260 @type name: string
261 @return: XendDomainInfo or None
262 """
263 from xen.xend import XendDomain
264 return XendDomain.instance().domain_lookup_by_name_nr(name)
267 def shutdown_reason(code):
268 """Get a shutdown reason from a code.
270 @param code: shutdown code
271 @type code: int
272 @return: shutdown reason
273 @rtype: string
274 """
275 return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
277 def dom_get(dom):
278 """Get info from xen for an existing domain.
280 @param dom: domain id
281 @type dom: int
282 @return: info or None
283 @rtype: dictionary
284 """
285 try:
286 domlist = xc.domain_getinfo(dom, 1)
287 if domlist and dom == domlist[0]['domid']:
288 return domlist[0]
289 except Exception, err:
290 # ignore missing domain
291 log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
292 return None
294 def get_assigned_pci_devices(domid):
295 dev_str_list = []
296 path = '/local/domain/0/backend/pci/%u/0/' % domid
297 num_devs = xstransact.Read(path + 'num_devs');
298 if num_devs is None or num_devs == "":
299 return dev_str_list
300 num_devs = int(num_devs);
301 for i in range(num_devs):
302 dev_str = xstransact.Read(path + 'dev-%i' % i)
303 dev_str_list = dev_str_list + [dev_str]
304 return dev_str_list
306 def do_FLR(domid):
307 from xen.xend.server.pciif import parse_pci_name, PciDevice
308 dev_str_list = get_assigned_pci_devices(domid)
310 for dev_str in dev_str_list:
311 (dom, b, d, f) = parse_pci_name(dev_str)
312 try:
313 dev = PciDevice(dom, b, d, f)
314 except Exception, e:
315 raise VmError("pci: failed to locate device and "+
316 "parse it's resources - "+str(e))
317 dev.do_FLR()
319 class XendDomainInfo:
320 """An object represents a domain.
322 @TODO: try to unify dom and domid, they mean the same thing, but
323 xc refers to it as dom, and everywhere else, including
324 xenstore it is domid. The best way is to change xc's
325 python interface.
327 @ivar info: Parsed configuration
328 @type info: dictionary
329 @ivar domid: Domain ID (if VM has started)
330 @type domid: int or None
331 @ivar vmpath: XenStore path to this VM.
332 @type vmpath: string
333 @ivar dompath: XenStore path to this Domain.
334 @type dompath: string
335 @ivar image: Reference to the VM Image.
336 @type image: xen.xend.image.ImageHandler
337 @ivar store_port: event channel to xenstored
338 @type store_port: int
339 @ivar console_port: event channel to xenconsoled
340 @type console_port: int
341 @ivar store_mfn: xenstored mfn
342 @type store_mfn: int
343 @ivar console_mfn: xenconsoled mfn
344 @type console_mfn: int
345 @ivar notes: OS image notes
346 @type notes: dictionary
347 @ivar vmWatch: reference to a watch on the xenstored vmpath
348 @type vmWatch: xen.xend.xenstore.xswatch
349 @ivar shutdownWatch: reference to watch on the xenstored domain shutdown
350 @type shutdownWatch: xen.xend.xenstore.xswatch
351 @ivar shutdownStartTime: UNIX Time when domain started shutting down.
352 @type shutdownStartTime: float or None
353 @ivar restart_in_progress: Is a domain restart thread running?
354 @type restart_in_progress: bool
355 # @ivar state: Domain state
356 # @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
357 @ivar state_updated: lock for self.state
358 @type state_updated: threading.Condition
359 @ivar refresh_shutdown_lock: lock for polling shutdown state
360 @type refresh_shutdown_lock: threading.Condition
361 @ivar _deviceControllers: device controller cache for this domain
362 @type _deviceControllers: dict 'string' to DevControllers
363 """
365 def __init__(self, info, domid = None, dompath = None, augment = False,
366 priv = False, resume = False, vmpath = None):
367 """Constructor for a domain
369 @param info: parsed configuration
370 @type info: dictionary
371 @keyword domid: Set initial domain id (if any)
372 @type domid: int
373 @keyword dompath: Set initial dompath (if any)
374 @type dompath: string
375 @keyword augment: Augment given info with xenstored VM info
376 @type augment: bool
377 @keyword priv: Is a privileged domain (Dom 0)
378 @type priv: bool
379 @keyword resume: Is this domain being resumed?
380 @type resume: bool
381 """
383 self.info = info
384 if domid == None:
385 self.domid = self.info.get('domid')
386 else:
387 self.domid = domid
389 #REMOVE: uuid is now generated in XendConfig
390 #if not self._infoIsSet('uuid'):
391 # self.info['uuid'] = uuid.toString(uuid.create())
393 # Find a unique /vm/<uuid>/<integer> path if not specified.
394 # This avoids conflict between pre-/post-migrate domains when doing
395 # localhost relocation.
396 self.vmpath = vmpath
397 i = 0
398 while self.vmpath == None:
399 self.vmpath = XS_VMROOT + self.info['uuid']
400 if i != 0:
401 self.vmpath = self.vmpath + '-' + str(i)
402 try:
403 if self._readVm("uuid"):
404 self.vmpath = None
405 i = i + 1
406 except:
407 pass
409 self.dompath = dompath
411 self.image = None
412 self.store_port = None
413 self.store_mfn = None
414 self.console_port = None
415 self.console_mfn = None
417 self.native_protocol = None
419 self.vmWatch = None
420 self.shutdownWatch = None
421 self.shutdownStartTime = None
422 self._resume = resume
423 self.restart_in_progress = False
425 self.state_updated = threading.Condition()
426 self.refresh_shutdown_lock = threading.Condition()
427 self._stateSet(DOM_STATE_HALTED)
429 self._deviceControllers = {}
431 for state in DOM_STATES_OLD:
432 self.info[state] = 0
434 if augment:
435 self._augmentInfo(priv)
437 self._checkName(self.info['name_label'])
439 self.metrics = XendVMMetrics(uuid.createString(), self)
442 #
443 # Public functions available through XMLRPC
444 #
447 def start(self, is_managed = False):
448 """Attempts to start the VM by do the appropriate
449 initialisation if it not started.
450 """
451 from xen.xend import XendDomain
453 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
454 try:
455 XendTask.log_progress(0, 30, self._constructDomain)
456 XendTask.log_progress(31, 60, self._initDomain)
458 XendTask.log_progress(61, 70, self._storeVmDetails)
459 XendTask.log_progress(71, 80, self._storeDomDetails)
460 XendTask.log_progress(81, 90, self._registerWatches)
461 XendTask.log_progress(91, 100, self.refreshShutdown)
463 xendomains = XendDomain.instance()
464 xennode = XendNode.instance()
466 # save running configuration if XendDomains believe domain is
467 # persistent
468 if is_managed:
469 xendomains.managed_config_save(self)
471 if xennode.xenschedinfo() == 'credit':
472 xendomains.domain_sched_credit_set(self.getDomid(),
473 self.getWeight(),
474 self.getCap())
475 except:
476 log.exception('VM start failed')
477 self.destroy()
478 raise
479 else:
480 raise XendError('VM already running')
482 def resume(self):
483 """Resumes a domain that has come back from suspension."""
484 state = self._stateGet()
485 if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
486 try:
487 self._constructDomain()
489 try:
490 self._setCPUAffinity()
491 except:
492 # usually a CPU we want to set affinity to does not exist
493 # we just ignore it so that the domain can still be restored
494 log.warn("Cannot restore CPU affinity")
496 self._storeVmDetails()
497 self._createChannels()
498 self._createDevices()
499 self._storeDomDetails()
500 self._endRestore()
501 except:
502 log.exception('VM resume failed')
503 self.destroy()
504 raise
505 else:
506 raise XendError('VM is not suspended; it is %s'
507 % XEN_API_VM_POWER_STATE[state])
509 def shutdown(self, reason):
510 """Shutdown a domain by signalling this via xenstored."""
511 log.debug('XendDomainInfo.shutdown(%s)', reason)
512 if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
513 raise XendError('Domain cannot be shutdown')
515 if self.domid == 0:
516 raise XendError('Domain 0 cannot be shutdown')
518 if reason not in DOMAIN_SHUTDOWN_REASONS.values():
519 raise XendError('Invalid reason: %s' % reason)
520 self.storeDom("control/shutdown", reason)
522 # HVM domain shuts itself down only if it has PV drivers
523 if self.info.is_hvm():
524 hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
525 hvm_s_state = xc.hvm_get_param(self.domid, HVM_PARAM_ACPI_S_STATE)
526 if not hvm_pvdrv or hvm_s_state != 0:
527 code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
528 log.info("HVM save:remote shutdown dom %d!", self.domid)
529 xc.domain_shutdown(self.domid, code)
531 def pause(self):
532 """Pause domain
534 @raise XendError: Failed pausing a domain
535 """
536 try:
537 xc.domain_pause(self.domid)
538 self._stateSet(DOM_STATE_PAUSED)
539 except Exception, ex:
540 log.exception(ex)
541 raise XendError("Domain unable to be paused: %s" % str(ex))
543 def unpause(self):
544 """Unpause domain
546 @raise XendError: Failed unpausing a domain
547 """
548 try:
549 xc.domain_unpause(self.domid)
550 self._stateSet(DOM_STATE_RUNNING)
551 except Exception, ex:
552 log.exception(ex)
553 raise XendError("Domain unable to be unpaused: %s" % str(ex))
555 def send_sysrq(self, key):
556 """ Send a Sysrq equivalent key via xenstored."""
557 if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
558 raise XendError("Domain '%s' is not started" % self.info['name_label'])
560 asserts.isCharConvertible(key)
561 self.storeDom("control/sysrq", '%c' % key)
563 def sync_pcidev_info(self):
565 if not self.info.is_hvm():
566 return
568 devid = '0'
569 dev_info = self._getDeviceInfo_pci(devid)
570 if dev_info is None:
571 return
573 # get the virtual slot info from xenstore
574 dev_uuid = sxp.child_value(dev_info, 'uuid')
575 pci_conf = self.info['devices'][dev_uuid][1]
576 pci_devs = pci_conf['devs']
578 count = 0
579 vslots = None
580 while vslots is None and count < 20:
581 vslots = xstransact.Read("/local/domain/0/backend/pci/%u/%s/vslots"
582 % (self.getDomid(), devid))
583 time.sleep(0.1)
584 count += 1
585 if vslots is None:
586 log.error("Device model didn't tell the vslots for PCI device")
587 return
589 #delete last delim
590 if vslots[-1] == ";":
591 vslots = vslots[:-1]
593 slot_list = vslots.split(';')
594 if len(slot_list) != len(pci_devs):
595 log.error("Device model's pci dev num dismatch")
596 return
598 #update the vslot info
599 count = 0;
600 for x in pci_devs:
601 x['vslot'] = slot_list[count]
602 count += 1
605 def hvm_pci_device_create(self, dev_config):
606 log.debug("XendDomainInfo.hvm_pci_device_create: %s"
607 % scrub_password(dev_config))
609 if not self.info.is_hvm():
610 raise VmError("hvm_pci_device_create called on non-HVM guest")
612 #all the PCI devs share one conf node
613 devid = '0'
615 new_dev = dev_config['devs'][0]
616 dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
618 #check conflict before trigger hotplug event
619 if dev_info is not None:
620 dev_uuid = sxp.child_value(dev_info, 'uuid')
621 pci_conf = self.info['devices'][dev_uuid][1]
622 pci_devs = pci_conf['devs']
623 for x in pci_devs:
624 if x.has_key('vslot'):
625 x_vslot = x['vslot']
626 else:
627 x_vslot = x['requested_vslot']
628 if (int(x_vslot, 16) == int(new_dev['requested_vslot'], 16) and
629 int(x_vslot, 16) != AUTO_PHP_SLOT):
630 raise VmError("vslot %s already have a device." % (new_dev['requested_vslot']))
632 if (int(x['domain'], 16) == int(new_dev['domain'], 16) and
633 int(x['bus'], 16) == int(new_dev['bus'], 16) and
634 int(x['slot'], 16) == int(new_dev['slot'], 16) and
635 int(x['func'], 16) == int(new_dev['func'], 16) ):
636 raise VmError("device is already inserted")
638 # Test whether the devices can be assigned with VT-d
639 pci_str = "%s, %s, %s, %s" % (new_dev['domain'],
640 new_dev['bus'],
641 new_dev['slot'],
642 new_dev['func'])
643 bdf = xc.test_assign_device(0, pci_str)
644 if bdf != 0:
645 if bdf == -1:
646 raise VmError("failed to assign device: maybe the platform"
647 " doesn't support VT-d, or VT-d isn't enabled"
648 " properly?")
649 bus = (bdf >> 16) & 0xff
650 devfn = (bdf >> 8) & 0xff
651 dev = (devfn >> 3) & 0x1f
652 func = devfn & 0x7
653 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
654 " already been assigned to other domain, or maybe"
655 " it doesn't exist." % (bus, dev, func))
657 # Here, we duplicate some checkings (in some cases, we mustn't allow
658 # a device to be hot-plugged into an HVM guest) that are also done in
659 # pci_device_configure()'s self.device_create(dev_sxp) or
660 # dev_control.reconfigureDevice(devid, dev_config).
661 # We must make the checkings before sending the command 'pci-ins' to
662 # ioemu.
664 # Test whether the device is owned by pciback. For instance, we can't
665 # hotplug a device being used by Dom0 itself to an HVM guest.
666 from xen.xend.server.pciif import PciDevice, parse_pci_name
667 domain = int(new_dev['domain'],16)
668 bus = int(new_dev['bus'],16)
669 dev = int(new_dev['slot'],16)
670 func = int(new_dev['func'],16)
671 try:
672 pci_device = PciDevice(domain, bus, dev, func)
673 except Exception, e:
674 raise VmError("pci: failed to locate device and "+
675 "parse it's resources - "+str(e))
676 if pci_device.driver!='pciback':
677 raise VmError(("pci: PCI Backend does not own device "+ \
678 "%s\n"+ \
679 "See the pciback.hide kernel "+ \
680 "command-line parameter or\n"+ \
681 "bind your slot/device to the PCI backend using sysfs" \
682 )%(pci_device.name))
684 # Check non-page-aligned MMIO BAR.
685 if pci_device.has_non_page_aligned_bar and arch.type != "ia64":
686 raise VmError("pci: %s: non-page-aligned MMIO BAR found." % \
687 pci_device.name)
689 # Check the co-assignment.
690 # To pci-attach a device D to domN, we should ensure each of D's
691 # co-assignment devices hasn't been assigned, or has been assigned to
692 # domN.
693 coassignment_list = pci_device.find_coassigned_devices()
694 assigned_pci_device_str_list = self._get_assigned_pci_devices()
695 for pci_str in coassignment_list:
696 (domain, bus, dev, func) = parse_pci_name(pci_str)
697 dev_str = '0x%x,0x%x,0x%x,0x%x' % (domain, bus, dev, func)
698 if xc.test_assign_device(0, dev_str) == 0:
699 continue
700 if not pci_str in assigned_pci_device_str_list:
701 raise VmError(("pci: failed to pci-attach %s to domain %s" + \
702 " because one of its co-assignment device %s has been" + \
703 " assigned to other domain." \
704 )% (pci_device.name, self.info['name_label'], pci_str))
706 if self.domid is not None:
707 opts = ''
708 if 'opts' in new_dev and len(new_dev['opts']) > 0:
709 config_opts = new_dev['opts']
710 config_opts = map(lambda (x, y): x+'='+y, config_opts)
711 opts = ',' + reduce(lambda x, y: x+','+y, config_opts)
713 bdf_str = "%s:%s:%s.%s@%s%s" % (new_dev['domain'],
714 new_dev['bus'],
715 new_dev['slot'],
716 new_dev['func'],
717 new_dev['requested_vslot'],
718 opts)
719 self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
721 vslot = xstransact.Read("/local/domain/0/device-model/%i/parameter"
722 % self.getDomid())
723 else:
724 vslot = new_dev['requested_vslot']
726 return vslot
729 def device_create(self, dev_config):
730 """Create a new device.
732 @param dev_config: device configuration
733 @type dev_config: SXP object (parsed config)
734 """
735 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
736 dev_type = sxp.name(dev_config)
737 dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
738 dev_config_dict = self.info['devices'][dev_uuid][1]
739 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
741 if dev_type == 'vif':
742 for x in dev_config:
743 if x != 'vif' and x[0] == 'mac':
744 if not re.match('^([0-9a-f]{2}:){5}[0-9a-f]{2}$', x[1], re.I):
745 log.error("Virtual network interface creation error - invalid MAC Address entered: %s", x[1])
746 raise VmError("Cannot create a new virtual network interface - MAC address is not valid!");
748 if self.domid is not None:
749 try:
750 dev_config_dict['devid'] = devid = \
751 self._createDevice(dev_type, dev_config_dict)
752 self._waitForDevice(dev_type, devid)
753 except VmError, ex:
754 del self.info['devices'][dev_uuid]
755 if dev_type == 'pci':
756 for dev in dev_config_dict['devs']:
757 XendAPIStore.deregister(dev['uuid'], 'DPCI')
758 elif dev_type == 'vscsi':
759 for dev in dev_config_dict['devs']:
760 XendAPIStore.deregister(dev['uuid'], 'DSCSI')
761 elif dev_type == 'tap':
762 self.info['vbd_refs'].remove(dev_uuid)
763 else:
764 self.info['%s_refs' % dev_type].remove(dev_uuid)
765 raise ex
766 else:
767 devid = None
769 xen.xend.XendDomain.instance().managed_config_save(self)
770 return self.getDeviceController(dev_type).sxpr(devid)
773 def pci_device_configure(self, dev_sxp, devid = 0):
774 """Configure an existing pci device.
776 @param dev_sxp: device configuration
777 @type dev_sxp: SXP object (parsed config)
778 @param devid: device id
779 @type devid: int
780 @return: Returns True if successfully updated device
781 @rtype: boolean
782 """
783 log.debug("XendDomainInfo.pci_device_configure: %s"
784 % scrub_password(dev_sxp))
786 dev_class = sxp.name(dev_sxp)
788 if dev_class != 'pci':
789 return False
791 pci_state = sxp.child_value(dev_sxp, 'state')
792 existing_dev_info = self._getDeviceInfo_pci(devid)
794 if existing_dev_info is None and pci_state != 'Initialising':
795 raise XendError("Cannot detach when pci platform does not exist")
797 pci_dev = sxp.children(dev_sxp, 'dev')[0]
798 dev_config = self.info.pci_convert_sxp_to_dict(dev_sxp)
799 dev = dev_config['devs'][0]
801 # Do HVM specific processing
802 if self.info.is_hvm():
803 if pci_state == 'Initialising':
804 # HVM PCI device attachment
805 vslot = self.hvm_pci_device_create(dev_config)
806 # Update vslot
807 dev['vslot'] = vslot
808 for n in sxp.children(pci_dev):
809 if(n[0] == 'vslot'):
810 n[1] = vslot
811 else:
812 # HVM PCI device detachment
813 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
814 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
815 existing_pci_devs = existing_pci_conf['devs']
816 vslot = AUTO_PHP_SLOT_STR
817 for x in existing_pci_devs:
818 if ( int(x['domain'], 16) == int(dev['domain'], 16) and
819 int(x['bus'], 16) == int(dev['bus'], 16) and
820 int(x['slot'], 16) == int(dev['slot'], 16) and
821 int(x['func'], 16) == int(dev['func'], 16) ):
822 if x.has_key('vslot'):
823 vslot = x['vslot']
824 else:
825 vslot = x['requested_vslot']
826 break
827 if vslot == AUTO_PHP_SLOT_STR:
828 raise VmError("Device %04x:%02x:%02x.%01x is not connected"
829 % (int(dev['domain'],16), int(dev['bus'],16),
830 int(dev['slot'],16), int(dev['func'],16)))
831 self.hvm_destroyPCIDevice(int(vslot, 16))
832 # Update vslot
833 dev['vslot'] = vslot
834 for n in sxp.children(pci_dev):
835 if(n[0] == 'vslot'):
836 n[1] = vslot
838 # If pci platform does not exist, create and exit.
839 if existing_dev_info is None:
840 self.device_create(dev_sxp)
841 return True
843 if self.domid is not None:
844 # use DevController.reconfigureDevice to change device config
845 dev_control = self.getDeviceController(dev_class)
846 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
847 if not self.info.is_hvm():
848 # in PV case, wait until backend state becomes connected.
849 dev_control.waitForDevice_reconfigure(devid)
850 num_devs = dev_control.cleanupDevice(devid)
852 # update XendConfig with new device info
853 if dev_uuid:
854 new_dev_sxp = dev_control.configuration(devid)
855 self.info.device_update(dev_uuid, new_dev_sxp)
857 # If there is no device left, destroy pci and remove config.
858 if num_devs == 0:
859 if self.info.is_hvm():
860 self.destroyDevice('pci', devid, True)
861 del self.info['devices'][dev_uuid]
862 platform = self.info['platform']
863 orig_dev_num = len(platform['pci'])
864 # TODO: can use this to keep some info to ask high level
865 # management tools to hot insert a new passthrough dev
866 # after migration
867 if orig_dev_num != 0:
868 #platform['pci'] = ["%dDEVs" % orig_dev_num]
869 platform['pci'] = []
870 else:
871 self.destroyDevice('pci', devid)
872 del self.info['devices'][dev_uuid]
873 else:
874 new_dev_sxp = ['pci']
875 for cur_dev in sxp.children(existing_dev_info, 'dev'):
876 if pci_state == 'Closing':
877 if int(dev['domain'], 16) == int(sxp.child_value(cur_dev, 'domain'), 16) and \
878 int(dev['bus'], 16) == int(sxp.child_value(cur_dev, 'bus'), 16) and \
879 int(dev['slot'], 16) == int(sxp.child_value(cur_dev, 'slot'), 16) and \
880 int(dev['func'], 16) == int(sxp.child_value(cur_dev, 'func'), 16):
881 continue
882 new_dev_sxp.append(cur_dev)
884 if pci_state == 'Initialising':
885 for new_dev in sxp.children(dev_sxp, 'dev'):
886 new_dev_sxp.append(new_dev)
888 dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
889 self.info.device_update(dev_uuid, new_dev_sxp)
891 # If there is only 'vscsi' in new_dev_sxp, remove the config.
892 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
893 del self.info['devices'][dev_uuid]
894 if self.info.is_hvm():
895 platform = self.info['platform']
896 orig_dev_num = len(platform['pci'])
897 # TODO: can use this to keep some info to ask high level
898 # management tools to hot insert a new passthrough dev
899 # after migration
900 if orig_dev_num != 0:
901 #platform['pci'] = ["%dDEVs" % orig_dev_num]
902 platform['pci'] = []
904 xen.xend.XendDomain.instance().managed_config_save(self)
906 return True
908 def vscsi_device_configure(self, dev_sxp):
909 """Configure an existing vscsi device.
910 quoted pci funciton
911 """
912 def _is_vscsi_defined(dev_info, p_devs = None, v_devs = None):
913 if not dev_info:
914 return False
915 for dev in sxp.children(dev_info, 'dev'):
916 if p_devs is not None:
917 if sxp.child_value(dev, 'p-dev') in p_devs:
918 return True
919 if v_devs is not None:
920 if sxp.child_value(dev, 'v-dev') in v_devs:
921 return True
922 return False
924 def _vscsi_be(be):
925 be_xdi = xen.xend.XendDomain.instance().domain_lookup_nr(be)
926 if be_xdi is not None:
927 be_domid = be_xdi.getDomid()
928 if be_domid is not None:
929 return str(be_domid)
930 return str(be)
932 dev_class = sxp.name(dev_sxp)
933 if dev_class != 'vscsi':
934 return False
936 dev_config = self.info.vscsi_convert_sxp_to_dict(dev_sxp)
937 devs = dev_config['devs']
938 v_devs = [d['v-dev'] for d in devs]
939 state = devs[0]['state']
940 req_devid = int(devs[0]['devid'])
941 cur_dev_sxp = self._getDeviceInfo_vscsi(req_devid)
943 if state == xenbusState['Initialising']:
944 # new create
945 # If request devid does not exist, create and exit.
946 p_devs = [d['p-dev'] for d in devs]
947 for dev_type, dev_info in self.info.all_devices_sxpr():
948 if dev_type != 'vscsi':
949 continue
950 if _is_vscsi_defined(dev_info, p_devs = p_devs):
951 raise XendError('The physical device "%s" is already defined' % \
952 p_devs[0])
953 if cur_dev_sxp is None:
954 self.device_create(dev_sxp)
955 return True
957 if _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
958 raise XendError('The virtual device "%s" is already defined' % \
959 v_devs[0])
961 if int(dev_config['feature-host']) != \
962 int(sxp.child_value(cur_dev_sxp, 'feature-host')):
963 raise XendError('The physical device "%s" cannot define '
964 'because mode is different' % devs[0]['p-dev'])
966 new_be = dev_config.get('backend', None)
967 if new_be is not None:
968 cur_be = sxp.child_value(cur_dev_sxp, 'backend', None)
969 if cur_be is None:
970 cur_be = xen.xend.XendDomain.DOM0_ID
971 new_be_dom = _vscsi_be(new_be)
972 cur_be_dom = _vscsi_be(cur_be)
973 if new_be_dom != cur_be_dom:
974 raise XendError('The physical device "%s" cannot define '
975 'because backend is different' % devs[0]['p-dev'])
977 elif state == xenbusState['Closing']:
978 if not _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
979 raise XendError("Cannot detach vscsi device does not exist")
981 if self.domid is not None:
982 # use DevController.reconfigureDevice to change device config
983 dev_control = self.getDeviceController(dev_class)
984 dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
985 dev_control.waitForDevice_reconfigure(req_devid)
986 num_devs = dev_control.cleanupDevice(req_devid)
988 # update XendConfig with new device info
989 if dev_uuid:
990 new_dev_sxp = dev_control.configuration(req_devid)
991 self.info.device_update(dev_uuid, new_dev_sxp)
993 # If there is no device left, destroy vscsi and remove config.
994 if num_devs == 0:
995 self.destroyDevice('vscsi', req_devid)
996 del self.info['devices'][dev_uuid]
998 else:
999 new_dev_sxp = ['vscsi']
1000 cur_mode = sxp.children(cur_dev_sxp, 'feature-host')[0]
1001 new_dev_sxp.append(cur_mode)
1002 try:
1003 cur_be = sxp.children(cur_dev_sxp, 'backend')[0]
1004 new_dev_sxp.append(cur_be)
1005 except IndexError:
1006 pass
1008 for cur_dev in sxp.children(cur_dev_sxp, 'dev'):
1009 if state == xenbusState['Closing']:
1010 if int(cur_mode[1]) == 1:
1011 continue
1012 if sxp.child_value(cur_dev, 'v-dev') in v_devs:
1013 continue
1014 new_dev_sxp.append(cur_dev)
1016 if state == xenbusState['Initialising']:
1017 for new_dev in sxp.children(dev_sxp, 'dev'):
1018 new_dev_sxp.append(new_dev)
1020 dev_uuid = sxp.child_value(cur_dev_sxp, 'uuid')
1021 self.info.device_update(dev_uuid, new_dev_sxp)
1023 # If there is only 'vscsi' in new_dev_sxp, remove the config.
1024 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
1025 del self.info['devices'][dev_uuid]
1027 xen.xend.XendDomain.instance().managed_config_save(self)
1029 return True
1031 def device_configure(self, dev_sxp, devid = None):
1032 """Configure an existing device.
1034 @param dev_config: device configuration
1035 @type dev_config: SXP object (parsed config)
1036 @param devid: device id
1037 @type devid: int
1038 @return: Returns True if successfully updated device
1039 @rtype: boolean
1040 """
1042 # convert device sxp to a dict
1043 dev_class = sxp.name(dev_sxp)
1044 dev_config = {}
1046 if dev_class == 'pci':
1047 return self.pci_device_configure(dev_sxp)
1049 if dev_class == 'vscsi':
1050 return self.vscsi_device_configure(dev_sxp)
1052 for opt_val in dev_sxp[1:]:
1053 try:
1054 dev_config[opt_val[0]] = opt_val[1]
1055 except IndexError:
1056 pass
1058 dev_control = self.getDeviceController(dev_class)
1059 if devid is None:
1060 dev = dev_config.get('dev', '')
1061 if not dev:
1062 raise VmError('Block device must have virtual details specified')
1063 if 'ioemu:' in dev:
1064 (_, dev) = dev.split(':', 1)
1065 try:
1066 (dev, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1067 except ValueError:
1068 pass
1069 devid = dev_control.convertToDeviceNumber(dev)
1070 dev_info = self._getDeviceInfo_vbd(devid)
1071 if dev_info is None:
1072 raise VmError("Device %s not connected" % devid)
1073 dev_uuid = sxp.child_value(dev_info, 'uuid')
1075 if self.domid is not None:
1076 # use DevController.reconfigureDevice to change device config
1077 dev_control.reconfigureDevice(devid, dev_config)
1078 else:
1079 (_, new_b, new_f) = dev_control.getDeviceDetails(dev_config)
1080 if (new_f['device-type'] == 'cdrom' and
1081 sxp.child_value(dev_info, 'dev').endswith(':cdrom') and
1082 new_b['mode'] == 'r' and
1083 sxp.child_value(dev_info, 'mode') == 'r'):
1084 pass
1085 else:
1086 raise VmError('Refusing to reconfigure device %s:%d to %s' %
1087 (dev_class, devid, dev_config))
1089 # update XendConfig with new device info
1090 self.info.device_update(dev_uuid, dev_sxp)
1091 xen.xend.XendDomain.instance().managed_config_save(self)
1093 return True
1095 def waitForDevices(self):
1096 """Wait for this domain's configured devices to connect.
1098 @raise VmError: if any device fails to initialise.
1099 """
1100 for devclass in XendDevices.valid_devices():
1101 self.getDeviceController(devclass).waitForDevices()
1103 def hvm_destroyPCIDevice(self, vslot):
1104 log.debug("hvm_destroyPCIDevice called %s", vslot)
1106 if not self.info.is_hvm():
1107 raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
1109 #all the PCI devs share one conf node
1110 devid = '0'
1111 vslot = int(vslot)
1112 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1113 dev_uuid = sxp.child_value(dev_info, 'uuid')
1115 #delete the pci bdf config under the pci device
1116 pci_conf = self.info['devices'][dev_uuid][1]
1117 pci_len = len(pci_conf['devs'])
1119 #find the pass-through device with the virtual slot
1120 devnum = 0
1121 for x in pci_conf['devs']:
1122 if x.has_key('vslot'):
1123 x_vslot = x['vslot']
1124 else:
1125 x_vslot = x['requested_vslot']
1126 if int(x_vslot, 16) == vslot:
1127 break
1128 devnum += 1
1130 if devnum >= pci_len:
1131 raise VmError("Device @ vslot 0x%x doesn't exist." % (vslot))
1133 if vslot == AUTO_PHP_SLOT:
1134 raise VmError("Device @ vslot 0x%x doesn't support hotplug." % (vslot))
1136 # Check the co-assignment.
1137 # To pci-detach a device D from domN, we should ensure: for each DD in the
1138 # list of D's co-assignment devices, DD is not assigned (to domN).
1140 from xen.xend.server.pciif import PciDevice
1141 domain = int(x['domain'],16)
1142 bus = int(x['bus'],16)
1143 dev = int(x['slot'],16)
1144 func = int(x['func'],16)
1145 try:
1146 pci_device = PciDevice(domain, bus, dev, func)
1147 except Exception, e:
1148 raise VmError("pci: failed to locate device and "+
1149 "parse it's resources - "+str(e))
1150 coassignment_list = pci_device.find_coassigned_devices()
1151 coassignment_list.remove(pci_device.name)
1152 assigned_pci_device_str_list = self._get_assigned_pci_devices()
1153 for pci_str in coassignment_list:
1154 if pci_str in assigned_pci_device_str_list:
1155 raise VmError(("pci: failed to pci-detach %s from domain %s" + \
1156 " because one of its co-assignment device %s is still " + \
1157 " assigned to the domain." \
1158 )% (pci_device.name, self.info['name_label'], pci_str))
1161 bdf_str = "%s:%s:%s.%s" % (x['domain'], x['bus'], x['slot'], x['func'])
1162 log.info("hvm_destroyPCIDevice:%s:%s!", x, bdf_str)
1164 if self.domid is not None:
1165 self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
1167 return 0
1169 def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
1170 log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
1171 deviceClass, devid)
1173 if rm_cfg:
1174 # Convert devid to device number. A device number is
1175 # needed to remove its configuration.
1176 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1178 # Save current sxprs. A device number and a backend
1179 # path are needed to remove its configuration but sxprs
1180 # do not have those after calling destroyDevice.
1181 sxprs = self.getDeviceSxprs(deviceClass)
1183 rc = None
1184 if self.domid is not None:
1185 rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
1186 if not force and rm_cfg:
1187 # The backend path, other than the device itself,
1188 # has to be passed because its accompanied frontend
1189 # path may be void until its removal is actually
1190 # issued. It is probable because destroyDevice is
1191 # issued first.
1192 for dev_num, dev_info in sxprs:
1193 dev_num = int(dev_num)
1194 if dev_num == dev:
1195 for x in dev_info:
1196 if x[0] == 'backend':
1197 backend = x[1]
1198 break
1199 break
1200 self._waitForDevice_destroy(deviceClass, devid, backend)
1202 if rm_cfg:
1203 if deviceClass == 'vif':
1204 if self.domid is not None:
1205 for dev_num, dev_info in sxprs:
1206 dev_num = int(dev_num)
1207 if dev_num == dev:
1208 for x in dev_info:
1209 if x[0] == 'mac':
1210 mac = x[1]
1211 break
1212 break
1213 dev_info = self._getDeviceInfo_vif(mac)
1214 else:
1215 _, dev_info = sxprs[dev]
1216 else: # 'vbd' or 'tap'
1217 dev_info = self._getDeviceInfo_vbd(dev)
1218 # To remove the UUID of the device from refs,
1219 # deviceClass must be always 'vbd'.
1220 deviceClass = 'vbd'
1221 if dev_info is None:
1222 raise XendError("Device %s is not defined" % devid)
1224 dev_uuid = sxp.child_value(dev_info, 'uuid')
1225 del self.info['devices'][dev_uuid]
1226 self.info['%s_refs' % deviceClass].remove(dev_uuid)
1227 xen.xend.XendDomain.instance().managed_config_save(self)
1229 return rc
1231 def getDeviceSxprs(self, deviceClass):
1232 if deviceClass == 'pci':
1233 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1234 if dev_info is None:
1235 return []
1236 dev_uuid = sxp.child_value(dev_info, 'uuid')
1237 pci_devs = self.info['devices'][dev_uuid][1]['devs']
1238 return pci_devs
1239 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1240 return self.getDeviceController(deviceClass).sxprs()
1241 else:
1242 sxprs = []
1243 dev_num = 0
1244 for dev_type, dev_info in self.info.all_devices_sxpr():
1245 if (deviceClass == 'vbd' and dev_type not in ['vbd', 'tap']) or \
1246 (deviceClass != 'vbd' and dev_type != deviceClass):
1247 continue
1249 if deviceClass == 'vscsi':
1250 vscsi_devs = ['devs', []]
1251 for vscsi_dev in sxp.children(dev_info, 'dev'):
1252 vscsi_dev.append(['frontstate', None])
1253 vscsi_devs[1].append(vscsi_dev)
1254 dev_num = int(sxp.child_value(vscsi_dev, 'devid'))
1255 vscsi_mode = sxp.children(dev_info, 'feature-host')[0]
1256 sxprs.append([dev_num, [vscsi_devs, vscsi_mode]])
1257 elif deviceClass == 'vbd':
1258 dev = sxp.child_value(dev_info, 'dev')
1259 if 'ioemu:' in dev:
1260 (_, dev) = dev.split(':', 1)
1261 try:
1262 (dev_name, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1263 except ValueError:
1264 dev_name = dev
1265 dev_num = self.getDeviceController('vbd').convertToDeviceNumber(dev_name)
1266 sxprs.append([dev_num, dev_info])
1267 else:
1268 sxprs.append([dev_num, dev_info])
1269 dev_num += 1
1270 return sxprs
1272 def getBlockDeviceClass(self, devid):
1273 # To get a device number from the devid,
1274 # we temporarily use the device controller of VBD.
1275 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1276 dev_info = self._getDeviceInfo_vbd(dev)
1277 if dev_info:
1278 return dev_info[0]
1280 def _getDeviceInfo_vif(self, mac):
1281 for dev_type, dev_info in self.info.all_devices_sxpr():
1282 if dev_type != 'vif':
1283 continue
1284 if mac == sxp.child_value(dev_info, 'mac'):
1285 return dev_info
1287 def _getDeviceInfo_vbd(self, devid):
1288 for dev_type, dev_info in self.info.all_devices_sxpr():
1289 if dev_type != 'vbd' and dev_type != 'tap':
1290 continue
1291 dev = sxp.child_value(dev_info, 'dev')
1292 dev = dev.split(':')[0]
1293 dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
1294 if devid == dev:
1295 return dev_info
1297 def _getDeviceInfo_pci(self, devid):
1298 for dev_type, dev_info in self.info.all_devices_sxpr():
1299 if dev_type != 'pci':
1300 continue
1301 return dev_info
1302 return None
1304 def _getDeviceInfo_vscsi(self, devid):
1305 devid = int(devid)
1306 for dev_type, dev_info in self.info.all_devices_sxpr():
1307 if dev_type != 'vscsi':
1308 continue
1309 devs = sxp.children(dev_info, 'dev')
1310 if devid == int(sxp.child_value(devs[0], 'devid')):
1311 return dev_info
1312 return None
1314 def _get_assigned_pci_devices(self, devid = 0):
1315 if self.domid is not None:
1316 return get_assigned_pci_devices(self.domid)
1318 dev_str_list = []
1319 dev_info = self._getDeviceInfo_pci(devid)
1320 if dev_info is None:
1321 return dev_str_list
1322 dev_uuid = sxp.child_value(dev_info, 'uuid')
1323 pci_conf = self.info['devices'][dev_uuid][1]
1324 pci_devs = pci_conf['devs']
1325 for pci_dev in pci_devs:
1326 domain = int(pci_dev['domain'], 16)
1327 bus = int(pci_dev['bus'], 16)
1328 slot = int(pci_dev['slot'], 16)
1329 func = int(pci_dev['func'], 16)
1330 dev_str = "%04x:%02x:%02x.%01x" % (domain, bus, slot, func)
1331 dev_str_list = dev_str_list + [dev_str]
1332 return dev_str_list
1334 def setMemoryTarget(self, target):
1335 """Set the memory target of this domain.
1336 @param target: In MiB.
1337 """
1338 log.debug("Setting memory target of domain %s (%s) to %d MiB.",
1339 self.info['name_label'], str(self.domid), target)
1341 MiB = 1024 * 1024
1342 memory_cur = self.get_memory_dynamic_max() / MiB
1344 if self.domid == 0:
1345 dom0_min_mem = xoptions.get_dom0_min_mem()
1346 if target < memory_cur and dom0_min_mem > target:
1347 raise XendError("memory_dynamic_max too small")
1349 self._safe_set_memory('memory_dynamic_min', target * MiB)
1350 self._safe_set_memory('memory_dynamic_max', target * MiB)
1352 if self.domid >= 0:
1353 if target > memory_cur:
1354 balloon.free((target - memory_cur) * 1024, self)
1355 self.storeVm("memory", target)
1356 self.storeDom("memory/target", target << 10)
1357 xc.domain_set_target_mem(self.domid,
1358 (target * 1024))
1359 xen.xend.XendDomain.instance().managed_config_save(self)
1361 def setMemoryMaximum(self, limit):
1362 """Set the maximum memory limit of this domain
1363 @param limit: In MiB.
1364 """
1365 log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
1366 self.info['name_label'], str(self.domid), limit)
1368 maxmem_cur = self.get_memory_static_max()
1369 MiB = 1024 * 1024
1370 self._safe_set_memory('memory_static_max', limit * MiB)
1372 if self.domid >= 0:
1373 maxmem = int(limit) * 1024
1374 try:
1375 return xc.domain_setmaxmem(self.domid, maxmem)
1376 except Exception, ex:
1377 self._safe_set_memory('memory_static_max', maxmem_cur)
1378 raise XendError(str(ex))
1379 xen.xend.XendDomain.instance().managed_config_save(self)
1382 def getVCPUInfo(self):
1383 try:
1384 # We include the domain name and ID, to help xm.
1385 sxpr = ['domain',
1386 ['domid', self.domid],
1387 ['name', self.info['name_label']],
1388 ['vcpu_count', self.info['VCPUs_max']]]
1390 for i in range(0, self.info['VCPUs_max']):
1391 if self.domid is not None:
1392 info = xc.vcpu_getinfo(self.domid, i)
1394 sxpr.append(['vcpu',
1395 ['number', i],
1396 ['online', info['online']],
1397 ['blocked', info['blocked']],
1398 ['running', info['running']],
1399 ['cpu_time', info['cpu_time'] / 1e9],
1400 ['cpu', info['cpu']],
1401 ['cpumap', info['cpumap']]])
1402 else:
1403 sxpr.append(['vcpu',
1404 ['number', i],
1405 ['online', 0],
1406 ['blocked', 0],
1407 ['running', 0],
1408 ['cpu_time', 0.0],
1409 ['cpu', -1],
1410 ['cpumap', self.info['cpus'][i] and \
1411 self.info['cpus'][i] or range(64)]])
1413 return sxpr
1415 except RuntimeError, exn:
1416 raise XendError(str(exn))
1419 def getDomInfo(self):
1420 return dom_get(self.domid)
1423 # internal functions ... TODO: re-categorised
1426 def _augmentInfo(self, priv):
1427 """Augment self.info, as given to us through L{recreate}, with
1428 values taken from the store. This recovers those values known
1429 to xend but not to the hypervisor.
1430 """
1431 augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
1432 if priv:
1433 augment_entries.remove('memory')
1434 augment_entries.remove('maxmem')
1435 augment_entries.remove('vcpus')
1436 augment_entries.remove('vcpu_avail')
1438 vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
1439 for k in augment_entries])
1441 # make returned lists into a dictionary
1442 vm_config = dict(zip(augment_entries, vm_config))
1444 for arg in augment_entries:
1445 val = vm_config[arg]
1446 if val != None:
1447 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1448 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1449 self.info[xapiarg] = val
1450 elif arg == "memory":
1451 self.info["static_memory_min"] = val
1452 elif arg == "maxmem":
1453 self.info["static_memory_max"] = val
1454 else:
1455 self.info[arg] = val
1457 # read CPU Affinity
1458 self.info['cpus'] = []
1459 vcpus_info = self.getVCPUInfo()
1460 for vcpu_info in sxp.children(vcpus_info, 'vcpu'):
1461 self.info['cpus'].append(sxp.child_value(vcpu_info, 'cpumap'))
1463 # For dom0, we ignore any stored value for the vcpus fields, and
1464 # read the current value from Xen instead. This allows boot-time
1465 # settings to take precedence over any entries in the store.
1466 if priv:
1467 xeninfo = dom_get(self.domid)
1468 self.info['VCPUs_max'] = xeninfo['online_vcpus']
1469 self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
1471 # read image value
1472 image_sxp = self._readVm('image')
1473 if image_sxp:
1474 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1476 # read devices
1477 devices = []
1478 for devclass in XendDevices.valid_devices():
1479 devconfig = self.getDeviceController(devclass).configurations()
1480 if devconfig:
1481 devices.extend(devconfig)
1483 if not self.info['devices'] and devices is not None:
1484 for device in devices:
1485 self.info.device_add(device[0], cfg_sxp = device)
1487 self._update_consoles()
1489 def _update_consoles(self, transaction = None):
1490 if self.domid == None or self.domid == 0:
1491 return
1493 # Update VT100 port if it exists
1494 if transaction is None:
1495 self.console_port = self.readDom('console/port')
1496 else:
1497 self.console_port = self.readDomTxn(transaction, 'console/port')
1498 if self.console_port is not None:
1499 serial_consoles = self.info.console_get_all('vt100')
1500 if not serial_consoles:
1501 cfg = self.info.console_add('vt100', self.console_port)
1502 self._createDevice('console', cfg)
1503 else:
1504 console_uuid = serial_consoles[0].get('uuid')
1505 self.info.console_update(console_uuid, 'location',
1506 self.console_port)
1509 # Update VNC port if it exists and write to xenstore
1510 if transaction is None:
1511 vnc_port = self.readDom('console/vnc-port')
1512 else:
1513 vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
1514 if vnc_port is not None:
1515 for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
1516 if dev_type == 'vfb':
1517 old_location = dev_info.get('location')
1518 listen_host = dev_info.get('vnclisten', \
1519 XendOptions.instance().get_vnclisten_address())
1520 new_location = '%s:%s' % (listen_host, str(vnc_port))
1521 if old_location == new_location:
1522 break
1524 dev_info['location'] = new_location
1525 self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
1526 vfb_ctrl = self.getDeviceController('vfb')
1527 vfb_ctrl.reconfigureDevice(0, dev_info)
1528 break
1531 # Function to update xenstore /vm/*
1534 def _readVm(self, *args):
1535 return xstransact.Read(self.vmpath, *args)
1537 def _writeVm(self, *args):
1538 return xstransact.Write(self.vmpath, *args)
1540 def _removeVm(self, *args):
1541 return xstransact.Remove(self.vmpath, *args)
1543 def _gatherVm(self, *args):
1544 return xstransact.Gather(self.vmpath, *args)
1546 def _listRecursiveVm(self, *args):
1547 return xstransact.ListRecursive(self.vmpath, *args)
1549 def storeVm(self, *args):
1550 return xstransact.Store(self.vmpath, *args)
1552 def permissionsVm(self, *args):
1553 return xstransact.SetPermissions(self.vmpath, *args)
1556 # Function to update xenstore /dom/*
1559 def readDom(self, *args):
1560 return xstransact.Read(self.dompath, *args)
1562 def gatherDom(self, *args):
1563 return xstransact.Gather(self.dompath, *args)
1565 def _writeDom(self, *args):
1566 return xstransact.Write(self.dompath, *args)
1568 def _removeDom(self, *args):
1569 return xstransact.Remove(self.dompath, *args)
1571 def storeDom(self, *args):
1572 return xstransact.Store(self.dompath, *args)
1575 def readDomTxn(self, transaction, *args):
1576 paths = map(lambda x: self.dompath + "/" + x, args)
1577 return transaction.read(*paths)
1579 def gatherDomTxn(self, transaction, *args):
1580 paths = map(lambda x: self.dompath + "/" + x, args)
1581 return transaction.gather(*paths)
1583 def _writeDomTxn(self, transaction, *args):
1584 paths = map(lambda x: self.dompath + "/" + x, args)
1585 return transaction.write(*paths)
1587 def _removeDomTxn(self, transaction, *args):
1588 paths = map(lambda x: self.dompath + "/" + x, args)
1589 return transaction.remove(*paths)
1591 def storeDomTxn(self, transaction, *args):
1592 paths = map(lambda x: self.dompath + "/" + x, args)
1593 return transaction.store(*paths)
1596 def _recreateDom(self):
1597 complete(self.dompath, lambda t: self._recreateDomFunc(t))
1599 def _recreateDomFunc(self, t):
1600 t.remove()
1601 t.mkdir()
1602 t.set_permissions({'dom' : self.domid, 'read' : True})
1603 t.write('vm', self.vmpath)
1604 # NB. Solaris guests use guest/ and hvmpv/ xenstore directories
1605 for i in [ 'device', 'control', 'error', 'memory', 'guest', 'hvmpv' ]:
1606 t.mkdir(i)
1607 t.set_permissions(i, {'dom' : self.domid})
1609 def _storeDomDetails(self):
1610 to_store = {
1611 'domid': str(self.domid),
1612 'vm': self.vmpath,
1613 'name': self.info['name_label'],
1614 'console/limit': str(xoptions.get_console_limit() * 1024),
1615 'memory/target': str(self.info['memory_dynamic_max'] / 1024),
1618 def f(n, v):
1619 if v is not None:
1620 if type(v) == bool:
1621 to_store[n] = v and "1" or "0"
1622 else:
1623 to_store[n] = str(v)
1625 # Figure out if we need to tell xenconsoled to ignore this guest's
1626 # console - device model will handle console if it is running
1627 constype = "ioemu"
1628 if 'device_model' not in self.info['platform']:
1629 constype = "xenconsoled"
1631 f('console/port', self.console_port)
1632 f('console/ring-ref', self.console_mfn)
1633 f('console/type', constype)
1634 f('store/port', self.store_port)
1635 f('store/ring-ref', self.store_mfn)
1637 if arch.type == "x86":
1638 f('control/platform-feature-multiprocessor-suspend', True)
1640 # elfnotes
1641 for n, v in self.info.get_notes().iteritems():
1642 n = n.lower().replace('_', '-')
1643 if n == 'features':
1644 for v in v.split('|'):
1645 v = v.replace('_', '-')
1646 if v.startswith('!'):
1647 f('image/%s/%s' % (n, v[1:]), False)
1648 else:
1649 f('image/%s/%s' % (n, v), True)
1650 else:
1651 f('image/%s' % n, v)
1653 if self.info.has_key('security_label'):
1654 f('security_label', self.info['security_label'])
1656 to_store.update(self._vcpuDomDetails())
1658 log.debug("Storing domain details: %s", scrub_password(to_store))
1660 self._writeDom(to_store)
1662 def _vcpuDomDetails(self):
1663 def availability(n):
1664 if self.info['vcpu_avail'] & (1 << n):
1665 return 'online'
1666 else:
1667 return 'offline'
1669 result = {}
1670 for v in range(0, self.info['VCPUs_max']):
1671 result["cpu/%d/availability" % v] = availability(v)
1672 return result
1675 # xenstore watches
1678 def _registerWatches(self):
1679 """Register a watch on this VM's entries in the store, and the
1680 domain's control/shutdown node, so that when they are changed
1681 externally, we keep up to date. This should only be called by {@link
1682 #create}, {@link #recreate}, or {@link #restore}, once the domain's
1683 details have been written, but before the new instance is returned."""
1684 self.vmWatch = xswatch(self.vmpath, self._storeChanged)
1685 self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
1686 self._handleShutdownWatch)
1688 def _storeChanged(self, _):
1689 log.trace("XendDomainInfo.storeChanged");
1691 changed = False
1693 # Check whether values in the configuration have
1694 # changed in Xenstore.
1696 cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
1697 'rtc/timeoffset']
1699 vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
1700 for k in cfg_vm])
1702 # convert two lists into a python dictionary
1703 vm_details = dict(zip(cfg_vm, vm_details))
1705 for arg, val in vm_details.items():
1706 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1707 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1708 if val != None and val != self.info[xapiarg]:
1709 self.info[xapiarg] = val
1710 changed = True
1711 elif arg == "memory":
1712 if val != None and val != self.info["static_memory_min"]:
1713 self.info["static_memory_min"] = val
1714 changed = True
1715 elif arg == "maxmem":
1716 if val != None and val != self.info["static_memory_max"]:
1717 self.info["static_memory_max"] = val
1718 changed = True
1720 # Check whether image definition has been updated
1721 image_sxp = self._readVm('image')
1722 if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
1723 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1724 changed = True
1726 # Update the rtc_timeoffset to be preserved across reboot.
1727 # NB. No need to update xenstore domain section.
1728 val = int(vm_details.get("rtc/timeoffset", 0))
1729 self.info["platform"]["rtc_timeoffset"] = val
1731 if changed:
1732 # Update the domain section of the store, as this contains some
1733 # parameters derived from the VM configuration.
1734 self.refresh_shutdown_lock.acquire()
1735 try:
1736 state = self._stateGet()
1737 if state not in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
1738 self._storeDomDetails()
1739 finally:
1740 self.refresh_shutdown_lock.release()
1742 return 1
1744 def _handleShutdownWatch(self, _):
1745 log.debug('XendDomainInfo.handleShutdownWatch')
1747 reason = self.readDom('control/shutdown')
1749 if reason and reason != 'suspend':
1750 sst = self.readDom('xend/shutdown_start_time')
1751 now = time.time()
1752 if sst:
1753 self.shutdownStartTime = float(sst)
1754 timeout = float(sst) + SHUTDOWN_TIMEOUT - now
1755 else:
1756 self.shutdownStartTime = now
1757 self.storeDom('xend/shutdown_start_time', now)
1758 timeout = SHUTDOWN_TIMEOUT
1760 log.trace(
1761 "Scheduling refreshShutdown on domain %d in %ds.",
1762 self.domid, timeout)
1763 threading.Timer(timeout, self.refreshShutdown).start()
1765 return True
1769 # Public Attributes for the VM
1773 def getDomid(self):
1774 return self.domid
1776 def setName(self, name, to_store = True):
1777 self._checkName(name)
1778 self.info['name_label'] = name
1779 if to_store:
1780 self.storeVm("name", name)
1782 def getName(self):
1783 return self.info['name_label']
1785 def getDomainPath(self):
1786 return self.dompath
1788 def getShutdownReason(self):
1789 return self.readDom('control/shutdown')
1791 def getStorePort(self):
1792 """For use only by image.py and XendCheckpoint.py."""
1793 return self.store_port
1795 def getConsolePort(self):
1796 """For use only by image.py and XendCheckpoint.py"""
1797 return self.console_port
1799 def getFeatures(self):
1800 """For use only by image.py."""
1801 return self.info['features']
1803 def getVCpuCount(self):
1804 return self.info['VCPUs_max']
1806 def setVCpuCount(self, vcpus):
1807 def vcpus_valid(n):
1808 if vcpus <= 0:
1809 raise XendError('Zero or less VCPUs is invalid')
1810 if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
1811 raise XendError('Cannot set vcpus greater than max vcpus on running domain')
1812 vcpus_valid(vcpus)
1814 self.info['vcpu_avail'] = (1 << vcpus) - 1
1815 if self.domid >= 0:
1816 self.storeVm('vcpu_avail', self.info['vcpu_avail'])
1817 self._writeDom(self._vcpuDomDetails())
1818 self.info['VCPUs_live'] = vcpus
1819 else:
1820 if self.info['VCPUs_max'] > vcpus:
1821 # decreasing
1822 del self.info['cpus'][vcpus:]
1823 elif self.info['VCPUs_max'] < vcpus:
1824 # increasing
1825 for c in range(self.info['VCPUs_max'], vcpus):
1826 self.info['cpus'].append(list())
1827 self.info['VCPUs_max'] = vcpus
1828 xen.xend.XendDomain.instance().managed_config_save(self)
1829 log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
1830 vcpus)
1832 def getMemoryTarget(self):
1833 """Get this domain's target memory size, in KB."""
1834 return self.info['memory_dynamic_max'] / 1024
1836 def getMemoryMaximum(self):
1837 """Get this domain's maximum memory size, in KB."""
1838 # remember, info now stores memory in bytes
1839 return self.info['memory_static_max'] / 1024
1841 def getResume(self):
1842 return str(self._resume)
1844 def setResume(self, isresume):
1845 self._resume = isresume
1847 def getCpus(self):
1848 return self.info['cpus']
1850 def setCpus(self, cpumap):
1851 self.info['cpus'] = cpumap
1853 def getCap(self):
1854 return self.info['vcpus_params']['cap']
1856 def setCap(self, cpu_cap):
1857 self.info['vcpus_params']['cap'] = cpu_cap
1859 def getWeight(self):
1860 return self.info['vcpus_params']['weight']
1862 def setWeight(self, cpu_weight):
1863 self.info['vcpus_params']['weight'] = cpu_weight
1865 def getRestartCount(self):
1866 return self._readVm('xend/restart_count')
1868 def refreshShutdown(self, xeninfo = None):
1869 """ Checks the domain for whether a shutdown is required.
1871 Called from XendDomainInfo and also image.py for HVM images.
1872 """
1874 # If set at the end of this method, a restart is required, with the
1875 # given reason. This restart has to be done out of the scope of
1876 # refresh_shutdown_lock.
1877 restart_reason = None
1879 self.refresh_shutdown_lock.acquire()
1880 try:
1881 if xeninfo is None:
1882 xeninfo = dom_get(self.domid)
1883 if xeninfo is None:
1884 # The domain no longer exists. This will occur if we have
1885 # scheduled a timer to check for shutdown timeouts and the
1886 # shutdown succeeded. It will also occur if someone
1887 # destroys a domain beneath us. We clean up the domain,
1888 # just in case, but we can't clean up the VM, because that
1889 # VM may have migrated to a different domain on this
1890 # machine.
1891 self.cleanupDomain()
1892 self._stateSet(DOM_STATE_HALTED)
1893 return
1895 if xeninfo['dying']:
1896 # Dying means that a domain has been destroyed, but has not
1897 # yet been cleaned up by Xen. This state could persist
1898 # indefinitely if, for example, another domain has some of its
1899 # pages mapped. We might like to diagnose this problem in the
1900 # future, but for now all we do is make sure that it's not us
1901 # holding the pages, by calling cleanupDomain. We can't
1902 # clean up the VM, as above.
1903 self.cleanupDomain()
1904 self._stateSet(DOM_STATE_SHUTDOWN)
1905 return
1907 elif xeninfo['crashed']:
1908 if self.readDom('xend/shutdown_completed'):
1909 # We've seen this shutdown already, but we are preserving
1910 # the domain for debugging. Leave it alone.
1911 return
1913 log.warn('Domain has crashed: name=%s id=%d.',
1914 self.info['name_label'], self.domid)
1915 self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
1917 restart_reason = 'crash'
1918 self._stateSet(DOM_STATE_HALTED)
1920 elif xeninfo['shutdown']:
1921 self._stateSet(DOM_STATE_SHUTDOWN)
1922 if self.readDom('xend/shutdown_completed'):
1923 # We've seen this shutdown already, but we are preserving
1924 # the domain for debugging. Leave it alone.
1925 return
1927 else:
1928 reason = shutdown_reason(xeninfo['shutdown_reason'])
1930 log.info('Domain has shutdown: name=%s id=%d reason=%s.',
1931 self.info['name_label'], self.domid, reason)
1932 self._writeVm(LAST_SHUTDOWN_REASON, reason)
1934 self._clearRestart()
1936 if reason == 'suspend':
1937 self._stateSet(DOM_STATE_SUSPENDED)
1938 # Don't destroy the domain. XendCheckpoint will do
1939 # this once it has finished. However, stop watching
1940 # the VM path now, otherwise we will end up with one
1941 # watch for the old domain, and one for the new.
1942 self._unwatchVm()
1943 elif reason in ('poweroff', 'reboot'):
1944 restart_reason = reason
1945 else:
1946 self.destroy()
1948 elif self.dompath is None:
1949 # We have yet to manage to call introduceDomain on this
1950 # domain. This can happen if a restore is in progress, or has
1951 # failed. Ignore this domain.
1952 pass
1953 else:
1954 # Domain is alive. If we are shutting it down, log a message
1955 # if it seems unresponsive.
1956 if xeninfo['paused']:
1957 self._stateSet(DOM_STATE_PAUSED)
1958 else:
1959 self._stateSet(DOM_STATE_RUNNING)
1961 if self.shutdownStartTime:
1962 timeout = (SHUTDOWN_TIMEOUT - time.time() +
1963 self.shutdownStartTime)
1964 if (timeout < 0 and not self.readDom('xend/unresponsive')):
1965 log.info(
1966 "Domain shutdown timeout expired: name=%s id=%s",
1967 self.info['name_label'], self.domid)
1968 self.storeDom('xend/unresponsive', 'True')
1969 finally:
1970 self.refresh_shutdown_lock.release()
1972 if restart_reason and not self.restart_in_progress:
1973 self.restart_in_progress = True
1974 threading.Thread(target = self._maybeRestart,
1975 args = (restart_reason,)).start()
1979 # Restart functions - handling whether we come back up on shutdown.
1982 def _clearRestart(self):
1983 self._removeDom("xend/shutdown_start_time")
1985 def _maybeDumpCore(self, reason):
1986 if reason == 'crash':
1987 if xoptions.get_enable_dump() or self.get_on_crash() \
1988 in ['coredump_and_destroy', 'coredump_and_restart']:
1989 try:
1990 self.dumpCore()
1991 except XendError:
1992 # This error has been logged -- there's nothing more
1993 # we can do in this context.
1994 pass
1996 def _maybeRestart(self, reason):
1997 # Before taking configured action, dump core if configured to do so.
1999 self._maybeDumpCore(reason)
2001 # Dispatch to the correct method based upon the configured on_{reason}
2002 # behaviour.
2003 actions = {"destroy" : self.destroy,
2004 "restart" : self._restart,
2005 "preserve" : self._preserve,
2006 "rename-restart" : self._renameRestart,
2007 "coredump-destroy" : self.destroy,
2008 "coredump-restart" : self._restart}
2010 action_conf = {
2011 'poweroff': 'actions_after_shutdown',
2012 'reboot': 'actions_after_reboot',
2013 'crash': 'actions_after_crash',
2016 action_target = self.info.get(action_conf.get(reason))
2017 func = actions.get(action_target, None)
2018 if func and callable(func):
2019 func()
2020 else:
2021 self.destroy() # default to destroy
2023 def _renameRestart(self):
2024 self._restart(True)
2026 def _restart(self, rename = False):
2027 """Restart the domain after it has exited.
2029 @param rename True if the old domain is to be renamed and preserved,
2030 False if it is to be destroyed.
2031 """
2032 from xen.xend import XendDomain
2034 if self._readVm(RESTART_IN_PROGRESS):
2035 log.error('Xend failed during restart of domain %s. '
2036 'Refusing to restart to avoid loops.',
2037 str(self.domid))
2038 self.destroy()
2039 return
2041 old_domid = self.domid
2042 self._writeVm(RESTART_IN_PROGRESS, 'True')
2044 elapse = time.time() - self.info['start_time']
2045 if elapse < MINIMUM_RESTART_TIME:
2046 log.error('VM %s restarting too fast (Elapsed time: %f seconds). '
2047 'Refusing to restart to avoid loops.',
2048 self.info['name_label'], elapse)
2049 self.destroy()
2050 return
2052 prev_vm_xend = self._listRecursiveVm('xend')
2053 new_dom_info = self.info
2054 try:
2055 if rename:
2056 new_dom_info = self._preserveForRestart()
2057 else:
2058 self._unwatchVm()
2059 self.destroy()
2061 # new_dom's VM will be the same as this domain's VM, except where
2062 # the rename flag has instructed us to call preserveForRestart.
2063 # In that case, it is important that we remove the
2064 # RESTART_IN_PROGRESS node from the new domain, not the old one,
2065 # once the new one is available.
2067 new_dom = None
2068 try:
2069 new_dom = XendDomain.instance().domain_create_from_dict(
2070 new_dom_info)
2071 for x in prev_vm_xend[0][1]:
2072 new_dom._writeVm('xend/%s' % x[0], x[1])
2073 new_dom.waitForDevices()
2074 new_dom.unpause()
2075 rst_cnt = new_dom._readVm('xend/restart_count')
2076 rst_cnt = int(rst_cnt) + 1
2077 new_dom._writeVm('xend/restart_count', str(rst_cnt))
2078 new_dom._removeVm(RESTART_IN_PROGRESS)
2079 except:
2080 if new_dom:
2081 new_dom._removeVm(RESTART_IN_PROGRESS)
2082 new_dom.destroy()
2083 else:
2084 self._removeVm(RESTART_IN_PROGRESS)
2085 raise
2086 except:
2087 log.exception('Failed to restart domain %s.', str(old_domid))
2089 def _preserveForRestart(self):
2090 """Preserve a domain that has been shut down, by giving it a new UUID,
2091 cloning the VM details, and giving it a new name. This allows us to
2092 keep this domain for debugging, but restart a new one in its place
2093 preserving the restart semantics (name and UUID preserved).
2094 """
2096 new_uuid = uuid.createString()
2097 new_name = 'Domain-%s' % new_uuid
2098 log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
2099 self.info['name_label'], self.domid, self.info['uuid'],
2100 new_name, new_uuid)
2101 self._unwatchVm()
2102 self._releaseDevices()
2103 # Remove existing vm node in xenstore
2104 self._removeVm()
2105 new_dom_info = self.info.copy()
2106 new_dom_info['name_label'] = self.info['name_label']
2107 new_dom_info['uuid'] = self.info['uuid']
2108 self.info['name_label'] = new_name
2109 self.info['uuid'] = new_uuid
2110 self.vmpath = XS_VMROOT + new_uuid
2111 # Write out new vm node to xenstore
2112 self._storeVmDetails()
2113 self._preserve()
2114 return new_dom_info
2117 def _preserve(self):
2118 log.info("Preserving dead domain %s (%d).", self.info['name_label'],
2119 self.domid)
2120 self._unwatchVm()
2121 self.storeDom('xend/shutdown_completed', 'True')
2122 self._stateSet(DOM_STATE_HALTED)
2125 # Debugging ..
2128 def dumpCore(self, corefile = None):
2129 """Create a core dump for this domain.
2131 @raise: XendError if core dumping failed.
2132 """
2134 if not corefile:
2135 this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
2136 corefile = "/var/xen/dump/%s-%s.%s.core" % (this_time,
2137 self.info['name_label'], self.domid)
2139 if os.path.isdir(corefile):
2140 raise XendError("Cannot dump core in a directory: %s" %
2141 corefile)
2143 try:
2144 try:
2145 self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
2146 xc.domain_dumpcore(self.domid, corefile)
2147 except RuntimeError, ex:
2148 corefile_incomp = corefile+'-incomplete'
2149 try:
2150 os.rename(corefile, corefile_incomp)
2151 except:
2152 pass
2154 log.error("core dump failed: id = %s name = %s: %s",
2155 self.domid, self.info['name_label'], str(ex))
2156 raise XendError("Failed to dump core: %s" % str(ex))
2157 finally:
2158 self._removeVm(DUMPCORE_IN_PROGRESS)
2161 # Device creation/deletion functions
2164 def _createDevice(self, deviceClass, devConfig):
2165 return self.getDeviceController(deviceClass).createDevice(devConfig)
2167 def _waitForDevice(self, deviceClass, devid):
2168 return self.getDeviceController(deviceClass).waitForDevice(devid)
2170 def _waitForDeviceUUID(self, dev_uuid):
2171 deviceClass, config = self.info['devices'].get(dev_uuid)
2172 self._waitForDevice(deviceClass, config['devid'])
2174 def _waitForDevice_destroy(self, deviceClass, devid, backpath):
2175 return self.getDeviceController(deviceClass).waitForDevice_destroy(
2176 devid, backpath)
2178 def _reconfigureDevice(self, deviceClass, devid, devconfig):
2179 return self.getDeviceController(deviceClass).reconfigureDevice(
2180 devid, devconfig)
2182 def _createDevices(self):
2183 """Create the devices for a vm.
2185 @raise: VmError for invalid devices
2186 """
2187 if self.image:
2188 self.image.prepareEnvironment()
2190 vscsi_uuidlist = {}
2191 vscsi_devidlist = []
2192 ordered_refs = self.info.ordered_device_refs()
2193 for dev_uuid in ordered_refs:
2194 devclass, config = self.info['devices'][dev_uuid]
2195 if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
2196 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2197 dev_uuid = config.get('uuid')
2198 devid = self._createDevice(devclass, config)
2200 # store devid in XendConfig for caching reasons
2201 if dev_uuid in self.info['devices']:
2202 self.info['devices'][dev_uuid][1]['devid'] = devid
2204 elif devclass == 'vscsi':
2205 vscsi_config = config.get('devs', [])[0]
2206 devid = vscsi_config.get('devid', '')
2207 dev_uuid = config.get('uuid')
2208 vscsi_uuidlist[devid] = dev_uuid
2209 vscsi_devidlist.append(devid)
2211 #It is necessary to sorted it for /dev/sdxx in guest.
2212 if len(vscsi_uuidlist) > 0:
2213 vscsi_devidlist.sort()
2214 for vscsiid in vscsi_devidlist:
2215 dev_uuid = vscsi_uuidlist[vscsiid]
2216 devclass, config = self.info['devices'][dev_uuid]
2217 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2218 dev_uuid = config.get('uuid')
2219 devid = self._createDevice(devclass, config)
2220 # store devid in XendConfig for caching reasons
2221 if dev_uuid in self.info['devices']:
2222 self.info['devices'][dev_uuid][1]['devid'] = devid
2225 if self.image:
2226 self.image.createDeviceModel()
2228 #if have pass-through devs, need the virtual pci slots info from qemu
2229 self.sync_pcidev_info()
2231 def _releaseDevices(self, suspend = False):
2232 """Release all domain's devices. Nothrow guarantee."""
2233 if self.image:
2234 try:
2235 log.debug("Destroying device model")
2236 self.image.destroyDeviceModel()
2237 except Exception, e:
2238 log.exception("Device model destroy failed %s" % str(e))
2239 else:
2240 log.debug("No device model")
2242 log.debug("Releasing devices")
2243 t = xstransact("%s/device" % self.dompath)
2244 try:
2245 for devclass in XendDevices.valid_devices():
2246 for dev in t.list(devclass):
2247 try:
2248 true_devclass = devclass
2249 if devclass == 'vbd':
2250 # In the case of "vbd", the true device class
2251 # may possibly be "tap". Just in case, verify
2252 # device class.
2253 devid = dev.split('/')[-1]
2254 true_devclass = self.getBlockDeviceClass(devid)
2255 log.debug("Removing %s", dev);
2256 self.destroyDevice(true_devclass, dev, False);
2257 except:
2258 # Log and swallow any exceptions in removal --
2259 # there's nothing more we can do.
2260 log.exception("Device release failed: %s; %s; %s",
2261 self.info['name_label'],
2262 true_devclass, dev)
2263 finally:
2264 t.abort()
2266 def getDeviceController(self, name):
2267 """Get the device controller for this domain, and if it
2268 doesn't exist, create it.
2270 @param name: device class name
2271 @type name: string
2272 @rtype: subclass of DevController
2273 """
2274 if name not in self._deviceControllers:
2275 devController = XendDevices.make_controller(name, self)
2276 if not devController:
2277 raise XendError("Unknown device type: %s" % name)
2278 self._deviceControllers[name] = devController
2280 return self._deviceControllers[name]
2283 # Migration functions (public)
2286 def testMigrateDevices(self, network, dst):
2287 """ Notify all device about intention of migration
2288 @raise: XendError for a device that cannot be migrated
2289 """
2290 for (n, c) in self.info.all_devices_sxpr():
2291 rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
2292 if rc != 0:
2293 raise XendError("Device of type '%s' refuses migration." % n)
2295 def migrateDevices(self, network, dst, step, domName=''):
2296 """Notify the devices about migration
2297 """
2298 ctr = 0
2299 try:
2300 for (dev_type, dev_conf) in self.info.all_devices_sxpr():
2301 self.migrateDevice(dev_type, dev_conf, network, dst,
2302 step, domName)
2303 ctr = ctr + 1
2304 except:
2305 for dev_type, dev_conf in self.info.all_devices_sxpr():
2306 if ctr == 0:
2307 step = step - 1
2308 ctr = ctr - 1
2309 self._recoverMigrateDevice(dev_type, dev_conf, network,
2310 dst, step, domName)
2311 raise
2313 def migrateDevice(self, deviceClass, deviceConfig, network, dst,
2314 step, domName=''):
2315 return self.getDeviceController(deviceClass).migrate(deviceConfig,
2316 network, dst, step, domName)
2318 def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
2319 dst, step, domName=''):
2320 return self.getDeviceController(deviceClass).recover_migrate(
2321 deviceConfig, network, dst, step, domName)
2324 ## private:
2326 def _constructDomain(self):
2327 """Construct the domain.
2329 @raise: VmError on error
2330 """
2332 log.debug('XendDomainInfo.constructDomain')
2334 self.shutdownStartTime = None
2335 self.restart_in_progress = False
2337 hap = 0
2338 hvm = self.info.is_hvm()
2339 if hvm:
2340 hap = self.info.is_hap()
2341 info = xc.xeninfo()
2342 if 'hvm' not in info['xen_caps']:
2343 raise VmError("HVM guest support is unavailable: is VT/AMD-V "
2344 "supported by your CPU and enabled in your "
2345 "BIOS?")
2347 # Hack to pre-reserve some memory for initial domain creation.
2348 # There is an implicit memory overhead for any domain creation. This
2349 # overhead is greater for some types of domain than others. For
2350 # example, an x86 HVM domain will have a default shadow-pagetable
2351 # allocation of 1MB. We free up 4MB here to be on the safe side.
2352 # 2MB memory allocation was not enough in some cases, so it's 4MB now
2353 balloon.free(4*1024, self) # 4MB should be plenty
2355 ssidref = 0
2356 if security.on() == xsconstants.XS_POLICY_USE:
2357 ssidref = security.calc_dom_ssidref_from_info(self.info)
2358 if security.has_authorization(ssidref) == False:
2359 raise VmError("VM is not authorized to run.")
2361 s3_integrity = 0
2362 if self.info.has_key('s3_integrity'):
2363 s3_integrity = self.info['s3_integrity']
2364 flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2)
2366 try:
2367 self.domid = xc.domain_create(
2368 domid = 0,
2369 ssidref = ssidref,
2370 handle = uuid.fromString(self.info['uuid']),
2371 flags = flags,
2372 target = self.info.target())
2373 except Exception, e:
2374 # may get here if due to ACM the operation is not permitted
2375 if security.on() == xsconstants.XS_POLICY_ACM:
2376 raise VmError('Domain in conflict set with running domain?')
2378 if self.domid < 0:
2379 raise VmError('Creating domain failed: name=%s' %
2380 self.info['name_label'])
2382 self.dompath = GetDomainPath(self.domid)
2384 self._recreateDom()
2386 # Set timer configration of domain
2387 timer_mode = self.info["platform"].get("timer_mode")
2388 if hvm and timer_mode is not None:
2389 xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
2390 long(timer_mode))
2392 # Set Viridian interface configuration of domain
2393 viridian = self.info["platform"].get("viridian")
2394 if arch.type == "x86" and hvm and viridian is not None:
2395 xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
2397 # Optionally enable virtual HPET
2398 hpet = self.info["platform"].get("hpet")
2399 if hvm and hpet is not None:
2400 xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
2401 long(hpet))
2403 # Optionally enable periodic vpt aligning
2404 vpt_align = self.info["platform"].get("vpt_align")
2405 if hvm and vpt_align is not None:
2406 xc.hvm_set_param(self.domid, HVM_PARAM_VPT_ALIGN,
2407 long(vpt_align))
2409 # Set maximum number of vcpus in domain
2410 xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
2412 # Check for cpu_{cap|weight} validity for credit scheduler
2413 if XendNode.instance().xenschedinfo() == 'credit':
2414 cap = self.getCap()
2415 weight = self.getWeight()
2417 assert type(weight) == int
2418 assert type(cap) == int
2420 if weight < 1 or weight > 65535:
2421 raise VmError("Cpu weight out of range, valid values are within range from 1 to 65535")
2423 if cap < 0 or cap > self.getVCpuCount() * 100:
2424 raise VmError("Cpu cap out of range, valid range is from 0 to %s for specified number of vcpus" %
2425 (self.getVCpuCount() * 100))
2427 # Test whether the devices can be assigned with VT-d
2428 pci = self.info["platform"].get("pci")
2429 pci_str = ''
2430 if pci and len(pci) > 0:
2431 pci = map(lambda x: x[0:4], pci) # strip options
2432 pci_str = str(pci)
2433 if hvm and pci_str:
2434 bdf = xc.test_assign_device(0, pci_str)
2435 if bdf != 0:
2436 if bdf == -1:
2437 raise VmError("failed to assign device: maybe the platform"
2438 " doesn't support VT-d, or VT-d isn't enabled"
2439 " properly?")
2440 bus = (bdf >> 16) & 0xff
2441 devfn = (bdf >> 8) & 0xff
2442 dev = (devfn >> 3) & 0x1f
2443 func = devfn & 0x7
2444 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
2445 " already been assigned to other domain, or maybe"
2446 " it doesn't exist." % (bus, dev, func))
2448 # register the domain in the list
2449 from xen.xend import XendDomain
2450 XendDomain.instance().add_domain(self)
2452 def _introduceDomain(self):
2453 assert self.domid is not None
2454 assert self.store_mfn is not None
2455 assert self.store_port is not None
2457 try:
2458 IntroduceDomain(self.domid, self.store_mfn, self.store_port)
2459 except RuntimeError, exn:
2460 raise XendError(str(exn))
2462 def _setTarget(self, target):
2463 assert self.domid is not None
2465 try:
2466 SetTarget(self.domid, target)
2467 self.storeDom('target', target)
2468 except RuntimeError, exn:
2469 raise XendError(str(exn))
2472 def _setCPUAffinity(self):
2473 """ Repin domain vcpus if a restricted cpus list is provided
2474 """
2476 def has_cpus():
2477 if self.info['cpus'] is not None:
2478 for c in self.info['cpus']:
2479 if c:
2480 return True
2481 return False
2483 if has_cpus():
2484 for v in range(0, self.info['VCPUs_max']):
2485 if self.info['cpus'][v]:
2486 xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
2487 else:
2488 def find_relaxed_node(node_list):
2489 import sys
2490 nr_nodes = info['nr_nodes']
2491 if node_list is None:
2492 node_list = range(0, nr_nodes)
2493 nodeload = [0]
2494 nodeload = nodeload * nr_nodes
2495 from xen.xend import XendDomain
2496 doms = XendDomain.instance().list('all')
2497 for dom in filter (lambda d: d.domid != self.domid, doms):
2498 cpuinfo = dom.getVCPUInfo()
2499 for vcpu in sxp.children(cpuinfo, 'vcpu'):
2500 if sxp.child_value(vcpu, 'online') == 0: continue
2501 cpumap = list(sxp.child_value(vcpu,'cpumap'))
2502 for i in range(0, nr_nodes):
2503 node_cpumask = info['node_to_cpu'][i]
2504 for j in node_cpumask:
2505 if j in cpumap:
2506 nodeload[i] += 1
2507 break
2508 for i in range(0, nr_nodes):
2509 if len(info['node_to_cpu'][i]) > 0 and i in node_list:
2510 nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
2511 else:
2512 nodeload[i] = sys.maxint
2513 index = nodeload.index( min(nodeload) )
2514 return index
2516 info = xc.physinfo()
2517 if info['nr_nodes'] > 1:
2518 node_memory_list = info['node_to_memory']
2519 needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
2520 candidate_node_list = []
2521 for i in range(0, info['nr_nodes']):
2522 if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
2523 candidate_node_list.append(i)
2524 index = find_relaxed_node(candidate_node_list)
2525 cpumask = info['node_to_cpu'][index]
2526 for v in range(0, self.info['VCPUs_max']):
2527 xc.vcpu_setaffinity(self.domid, v, cpumask)
2530 def _initDomain(self):
2531 log.debug('XendDomainInfo.initDomain: %s %s',
2532 self.domid,
2533 self.info['vcpus_params']['weight'])
2535 self._configureBootloader()
2537 try:
2538 self.image = image.create(self, self.info)
2540 # repin domain vcpus if a restricted cpus list is provided
2541 # this is done prior to memory allocation to aide in memory
2542 # distribution for NUMA systems.
2543 self._setCPUAffinity()
2545 # Use architecture- and image-specific calculations to determine
2546 # the various headrooms necessary, given the raw configured
2547 # values. maxmem, memory, and shadow are all in KiB.
2548 # but memory_static_max etc are all stored in bytes now.
2549 memory = self.image.getRequiredAvailableMemory(
2550 self.info['memory_dynamic_max'] / 1024)
2551 maxmem = self.image.getRequiredAvailableMemory(
2552 self.info['memory_static_max'] / 1024)
2553 shadow = self.image.getRequiredShadowMemory(
2554 self.info['shadow_memory'] * 1024,
2555 self.info['memory_static_max'] / 1024)
2557 log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
2558 # Round shadow up to a multiple of a MiB, as shadow_mem_control
2559 # takes MiB and we must not round down and end up under-providing.
2560 shadow = ((shadow + 1023) / 1024) * 1024
2562 # set memory limit
2563 xc.domain_setmaxmem(self.domid, maxmem)
2565 # Reserve 1 page per MiB of RAM for separate VT-d page table.
2566 vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
2567 # Round vtd_mem up to a multiple of a MiB.
2568 vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
2570 # Make sure there's enough RAM available for the domain
2571 balloon.free(memory + shadow + vtd_mem, self)
2573 # Set up the shadow memory
2574 shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
2575 self.info['shadow_memory'] = shadow_cur
2577 # machine address size
2578 if self.info.has_key('machine_address_size'):
2579 log.debug("_initDomain: setting maximum machine address size %d" % self.info['machine_address_size'])
2580 xc.domain_set_machine_address_size(self.domid, self.info['machine_address_size'])
2582 if self.info.has_key('suppress_spurious_page_faults') and self.info['suppress_spurious_page_faults']:
2583 log.debug("_initDomain: suppressing spurious page faults")
2584 xc.domain_suppress_spurious_page_faults(self.domid)
2586 self._createChannels()
2588 channel_details = self.image.createImage()
2590 self.store_mfn = channel_details['store_mfn']
2591 if 'console_mfn' in channel_details:
2592 self.console_mfn = channel_details['console_mfn']
2593 if 'notes' in channel_details:
2594 self.info.set_notes(channel_details['notes'])
2595 if 'native_protocol' in channel_details:
2596 self.native_protocol = channel_details['native_protocol'];
2598 self._introduceDomain()
2599 if self.info.target():
2600 self._setTarget(self.info.target())
2602 self._createDevices()
2604 self.image.cleanupBootloading()
2606 self.info['start_time'] = time.time()
2608 self._stateSet(DOM_STATE_RUNNING)
2609 except VmError, exn:
2610 log.exception("XendDomainInfo.initDomain: exception occurred")
2611 if self.image:
2612 self.image.cleanupBootloading()
2613 raise exn
2614 except RuntimeError, exn:
2615 log.exception("XendDomainInfo.initDomain: exception occurred")
2616 if self.image:
2617 self.image.cleanupBootloading()
2618 raise VmError(str(exn))
2621 def cleanupDomain(self):
2622 """Cleanup domain resources; release devices. Idempotent. Nothrow
2623 guarantee."""
2625 self.refresh_shutdown_lock.acquire()
2626 try:
2627 self.unwatchShutdown()
2628 self._releaseDevices()
2629 bootloader_tidy(self)
2631 if self.image:
2632 self.image = None
2634 try:
2635 self._removeDom()
2636 except:
2637 log.exception("Removing domain path failed.")
2639 self._stateSet(DOM_STATE_HALTED)
2640 self.domid = None # Do not push into _stateSet()!
2641 finally:
2642 self.refresh_shutdown_lock.release()
2645 def unwatchShutdown(self):
2646 """Remove the watch on the domain's control/shutdown node, if any.
2647 Idempotent. Nothrow guarantee. Expects to be protected by the
2648 refresh_shutdown_lock."""
2650 try:
2651 try:
2652 if self.shutdownWatch:
2653 self.shutdownWatch.unwatch()
2654 finally:
2655 self.shutdownWatch = None
2656 except:
2657 log.exception("Unwatching control/shutdown failed.")
2659 def waitForShutdown(self):
2660 self.state_updated.acquire()
2661 try:
2662 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2663 self.state_updated.wait(timeout=1.0)
2664 finally:
2665 self.state_updated.release()
2667 def waitForSuspend(self):
2668 """Wait for the guest to respond to a suspend request by
2669 shutting down. If the guest hasn't re-written control/shutdown
2670 after a certain amount of time, it's obviously not listening and
2671 won't suspend, so we give up. HVM guests with no PV drivers
2672 should already be shutdown.
2673 """
2674 state = "suspend"
2675 nr_tries = 60
2677 self.state_updated.acquire()
2678 try:
2679 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2680 self.state_updated.wait(1.0)
2681 if state == "suspend":
2682 if nr_tries == 0:
2683 msg = ('Timeout waiting for domain %s to suspend'
2684 % self.domid)
2685 self._writeDom('control/shutdown', '')
2686 raise XendError(msg)
2687 state = self.readDom('control/shutdown')
2688 nr_tries -= 1
2689 finally:
2690 self.state_updated.release()
2693 # TODO: recategorise - called from XendCheckpoint
2696 def completeRestore(self, store_mfn, console_mfn):
2698 log.debug("XendDomainInfo.completeRestore")
2700 self.store_mfn = store_mfn
2701 self.console_mfn = console_mfn
2703 self._introduceDomain()
2704 self.image = image.create(self, self.info)
2705 if self.image:
2706 self.image.createDeviceModel(True)
2707 self._storeDomDetails()
2708 self._registerWatches()
2709 self.refreshShutdown()
2711 log.debug("XendDomainInfo.completeRestore done")
2714 def _endRestore(self):
2715 self.setResume(False)
2718 # VM Destroy
2721 def _prepare_phantom_paths(self):
2722 # get associated devices to destroy
2723 # build list of phantom devices to be removed after normal devices
2724 plist = []
2725 if self.domid is not None:
2726 t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
2727 try:
2728 for dev in t.list():
2729 backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
2730 % (self.dompath, dev))
2731 if backend_phantom_vbd is not None:
2732 frontend_phantom_vbd = xstransact.Read("%s/frontend" \
2733 % backend_phantom_vbd)
2734 plist.append(backend_phantom_vbd)
2735 plist.append(frontend_phantom_vbd)
2736 finally:
2737 t.abort()
2738 return plist
2740 def _cleanup_phantom_devs(self, plist):
2741 # remove phantom devices
2742 if not plist == []:
2743 time.sleep(2)
2744 for paths in plist:
2745 if paths.find('backend') != -1:
2746 # Modify online status /before/ updating state (latter is watched by
2747 # drivers, so this ordering avoids a race).
2748 xstransact.Write(paths, 'online', "0")
2749 xstransact.Write(paths, 'state', str(xenbusState['Closing']))
2750 # force
2751 xstransact.Remove(paths)
2753 def destroy(self):
2754 """Cleanup VM and destroy domain. Nothrow guarantee."""
2756 if self.domid is None:
2757 return
2759 from xen.xend import XendDomain
2760 log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
2762 paths = self._prepare_phantom_paths()
2764 if self.dompath is not None:
2765 try:
2766 xc.domain_destroy_hook(self.domid)
2767 xc.domain_pause(self.domid)
2768 do_FLR(self.domid)
2769 xc.domain_destroy(self.domid)
2770 for state in DOM_STATES_OLD:
2771 self.info[state] = 0
2772 self._stateSet(DOM_STATE_HALTED)
2773 except:
2774 log.exception("XendDomainInfo.destroy: domain destruction failed.")
2776 XendDomain.instance().remove_domain(self)
2777 self.cleanupDomain()
2779 self._cleanup_phantom_devs(paths)
2780 self._cleanupVm()
2782 if "transient" in self.info["other_config"] \
2783 and bool(self.info["other_config"]["transient"]):
2784 XendDomain.instance().domain_delete_by_dominfo(self)
2787 def resetDomain(self):
2788 log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
2790 old_domid = self.domid
2791 prev_vm_xend = self._listRecursiveVm('xend')
2792 new_dom_info = self.info
2793 try:
2794 self._unwatchVm()
2795 self.destroy()
2797 new_dom = None
2798 try:
2799 from xen.xend import XendDomain
2800 new_dom_info['domid'] = None
2801 new_dom = XendDomain.instance().domain_create_from_dict(
2802 new_dom_info)
2803 for x in prev_vm_xend[0][1]:
2804 new_dom._writeVm('xend/%s' % x[0], x[1])
2805 new_dom.waitForDevices()
2806 new_dom.unpause()
2807 except:
2808 if new_dom:
2809 new_dom.destroy()
2810 raise
2811 except:
2812 log.exception('Failed to reset domain %s.', str(old_domid))
2815 def resumeDomain(self):
2816 log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
2818 # resume a suspended domain (e.g. after live checkpoint, or after
2819 # a later error during save or migate); checks that the domain
2820 # is currently suspended first so safe to call from anywhere
2822 xeninfo = dom_get(self.domid)
2823 if xeninfo is None:
2824 return
2825 if not xeninfo['shutdown']:
2826 return
2827 reason = shutdown_reason(xeninfo['shutdown_reason'])
2828 if reason != 'suspend':
2829 return
2831 try:
2832 # could also fetch a parsed note from xenstore
2833 fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
2834 if not fast:
2835 self._releaseDevices()
2836 self.testDeviceComplete()
2837 self.testvifsComplete()
2838 log.debug("XendDomainInfo.resumeDomain: devices released")
2840 self._resetChannels()
2842 self._removeDom('control/shutdown')
2843 self._removeDom('device-misc/vif/nextDeviceID')
2845 self._createChannels()
2846 self._introduceDomain()
2847 self._storeDomDetails()
2849 self._createDevices()
2850 log.debug("XendDomainInfo.resumeDomain: devices created")
2852 xc.domain_resume(self.domid, fast)
2853 ResumeDomain(self.domid)
2854 except:
2855 log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
2856 self.image.resumeDeviceModel()
2857 log.debug("XendDomainInfo.resumeDomain: completed")
2861 # Channels for xenstore and console
2864 def _createChannels(self):
2865 """Create the channels to the domain.
2866 """
2867 self.store_port = self._createChannel()
2868 self.console_port = self._createChannel()
2871 def _createChannel(self):
2872 """Create an event channel to the domain.
2873 """
2874 try:
2875 if self.domid != None:
2876 return xc.evtchn_alloc_unbound(domid = self.domid,
2877 remote_dom = 0)
2878 except:
2879 log.exception("Exception in alloc_unbound(%s)", str(self.domid))
2880 raise
2882 def _resetChannels(self):
2883 """Reset all event channels in the domain.
2884 """
2885 try:
2886 if self.domid != None:
2887 return xc.evtchn_reset(dom = self.domid)
2888 except:
2889 log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
2890 raise
2894 # Bootloader configuration
2897 def _configureBootloader(self):
2898 """Run the bootloader if we're configured to do so."""
2900 blexec = self.info['PV_bootloader']
2901 bootloader_args = self.info['PV_bootloader_args']
2902 kernel = self.info['PV_kernel']
2903 ramdisk = self.info['PV_ramdisk']
2904 args = self.info['PV_args']
2905 boot = self.info['HVM_boot_policy']
2907 if boot:
2908 # HVM booting.
2909 pass
2910 elif not blexec and kernel:
2911 # Boot from dom0. Nothing left to do -- the kernel and ramdisk
2912 # will be picked up by image.py.
2913 pass
2914 else:
2915 # Boot using bootloader
2916 if not blexec or blexec == 'pygrub':
2917 blexec = osdep.pygrub_path
2919 blcfg = None
2920 disks = [x for x in self.info['vbd_refs']
2921 if self.info['devices'][x][1]['bootable']]
2923 if not disks:
2924 msg = "Had a bootloader specified, but no disks are bootable"
2925 log.error(msg)
2926 raise VmError(msg)
2928 devinfo = self.info['devices'][disks[0]]
2929 devtype = devinfo[0]
2930 disk = devinfo[1]['uname']
2932 fn = blkdev_uname_to_file(disk)
2933 taptype = blkdev_uname_to_taptype(disk)
2934 mounted = devtype == 'tap' and taptype != 'aio' and taptype != 'sync' and not os.stat(fn).st_rdev
2935 if mounted:
2936 # This is a file, not a device. pygrub can cope with a
2937 # file if it's raw, but if it's QCOW or other such formats
2938 # used through blktap, then we need to mount it first.
2940 log.info("Mounting %s on %s." %
2941 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2943 vbd = {
2944 'mode': 'RO',
2945 'device': BOOTLOADER_LOOPBACK_DEVICE,
2948 from xen.xend import XendDomain
2949 dom0 = XendDomain.instance().privilegedDomain()
2950 dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
2951 fn = BOOTLOADER_LOOPBACK_DEVICE
2953 try:
2954 blcfg = bootloader(blexec, fn, self, False,
2955 bootloader_args, kernel, ramdisk, args)
2956 finally:
2957 if mounted:
2958 log.info("Unmounting %s from %s." %
2959 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2961 dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
2963 if blcfg is None:
2964 msg = "Had a bootloader specified, but can't find disk"
2965 log.error(msg)
2966 raise VmError(msg)
2968 self.info.update_with_image_sxp(blcfg, True)
2972 # VM Functions
2975 def _readVMDetails(self, params):
2976 """Read the specified parameters from the store.
2977 """
2978 try:
2979 return self._gatherVm(*params)
2980 except ValueError:
2981 # One of the int/float entries in params has a corresponding store
2982 # entry that is invalid. We recover, because older versions of
2983 # Xend may have put the entry there (memory/target, for example),
2984 # but this is in general a bad situation to have reached.
2985 log.exception(
2986 "Store corrupted at %s! Domain %d's configuration may be "
2987 "affected.", self.vmpath, self.domid)
2988 return []
2990 def _cleanupVm(self):
2991 """Cleanup VM resources. Idempotent. Nothrow guarantee."""
2993 self._unwatchVm()
2995 try:
2996 self._removeVm()
2997 except:
2998 log.exception("Removing VM path failed.")
3001 def checkLiveMigrateMemory(self):
3002 """ Make sure there's enough memory to migrate this domain """
3003 overhead_kb = 0
3004 if arch.type == "x86":
3005 # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
3006 # the minimum that Xen would allocate if no value were given.
3007 overhead_kb = self.info['VCPUs_max'] * 1024 + \
3008 (self.info['memory_static_max'] / 1024 / 1024) * 4
3009 overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
3010 # The domain might already have some shadow memory
3011 overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
3012 if overhead_kb > 0:
3013 balloon.free(overhead_kb, self)
3015 def _unwatchVm(self):
3016 """Remove the watch on the VM path, if any. Idempotent. Nothrow
3017 guarantee."""
3018 try:
3019 try:
3020 if self.vmWatch:
3021 self.vmWatch.unwatch()
3022 finally:
3023 self.vmWatch = None
3024 except:
3025 log.exception("Unwatching VM path failed.")
3027 def testDeviceComplete(self):
3028 """ For Block IO migration safety we must ensure that
3029 the device has shutdown correctly, i.e. all blocks are
3030 flushed to disk
3031 """
3032 start = time.time()
3033 while True:
3034 test = 0
3035 diff = time.time() - start
3036 vbds = self.getDeviceController('vbd').deviceIDs()
3037 taps = self.getDeviceController('tap').deviceIDs()
3038 for i in vbds + taps:
3039 test = 1
3040 log.info("Dev %s still active, looping...", i)
3041 time.sleep(0.1)
3043 if test == 0:
3044 break
3045 if diff >= MIGRATE_TIMEOUT:
3046 log.info("Dev still active but hit max loop timeout")
3047 break
3049 def testvifsComplete(self):
3050 """ In case vifs are released and then created for the same
3051 domain, we need to wait the device shut down.
3052 """
3053 start = time.time()
3054 while True:
3055 test = 0
3056 diff = time.time() - start
3057 for i in self.getDeviceController('vif').deviceIDs():
3058 test = 1
3059 log.info("Dev %s still active, looping...", i)
3060 time.sleep(0.1)
3062 if test == 0:
3063 break
3064 if diff >= MIGRATE_TIMEOUT:
3065 log.info("Dev still active but hit max loop timeout")
3066 break
3068 def _storeVmDetails(self):
3069 to_store = {}
3071 for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
3072 info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
3073 if self._infoIsSet(info_key):
3074 to_store[key] = str(self.info[info_key])
3076 if self._infoIsSet("static_memory_min"):
3077 to_store["memory"] = str(self.info["static_memory_min"])
3078 if self._infoIsSet("static_memory_max"):
3079 to_store["maxmem"] = str(self.info["static_memory_max"])
3081 image_sxpr = self.info.image_sxpr()
3082 if image_sxpr:
3083 to_store['image'] = sxp.to_string(image_sxpr)
3085 if not self._readVm('xend/restart_count'):
3086 to_store['xend/restart_count'] = str(0)
3088 log.debug("Storing VM details: %s", scrub_password(to_store))
3090 self._writeVm(to_store)
3091 self._setVmPermissions()
3093 def _setVmPermissions(self):
3094 """Allow the guest domain to read its UUID. We don't allow it to
3095 access any other entry, for security."""
3096 xstransact.SetPermissions('%s/uuid' % self.vmpath,
3097 { 'dom' : self.domid,
3098 'read' : True,
3099 'write' : False })
3102 # Utility functions
3105 def __getattr__(self, name):
3106 if name == "state":
3107 log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
3108 log.warn("".join(traceback.format_stack()))
3109 return self._stateGet()
3110 else:
3111 raise AttributeError(name)
3113 def __setattr__(self, name, value):
3114 if name == "state":
3115 log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
3116 log.warn("".join(traceback.format_stack()))
3117 self._stateSet(value)
3118 else:
3119 self.__dict__[name] = value
3121 def _stateSet(self, state):
3122 self.state_updated.acquire()
3123 try:
3124 # TODO Not sure this is correct...
3125 # _stateGet is live now. Why not fire event
3126 # even when it hasn't changed?
3127 if self._stateGet() != state:
3128 self.state_updated.notifyAll()
3129 import XendAPI
3130 XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
3131 'power_state')
3132 finally:
3133 self.state_updated.release()
3135 def _stateGet(self):
3136 # Lets try and reconsitute the state from xc
3137 # first lets try and get the domain info
3138 # from xc - this will tell us if the domain
3139 # exists
3140 info = dom_get(self.getDomid())
3141 if info is None or info['shutdown']:
3142 # We are either HALTED or SUSPENDED
3143 # check saved image exists
3144 from xen.xend import XendDomain
3145 managed_config_path = \
3146 XendDomain.instance()._managed_check_point_path( \
3147 self.get_uuid())
3148 if os.path.exists(managed_config_path):
3149 return XEN_API_VM_POWER_STATE_SUSPENDED
3150 else:
3151 return XEN_API_VM_POWER_STATE_HALTED
3152 elif info['crashed']:
3153 # Crashed
3154 return XEN_API_VM_POWER_STATE_CRASHED
3155 else:
3156 # We are either RUNNING or PAUSED
3157 if info['paused']:
3158 return XEN_API_VM_POWER_STATE_PAUSED
3159 else:
3160 return XEN_API_VM_POWER_STATE_RUNNING
3162 def _infoIsSet(self, name):
3163 return name in self.info and self.info[name] is not None
3165 def _checkName(self, name):
3166 """Check if a vm name is valid. Valid names contain alphabetic
3167 characters, digits, or characters in '_-.:/+'.
3168 The same name cannot be used for more than one vm at the same time.
3170 @param name: name
3171 @raise: VmError if invalid
3172 """
3173 from xen.xend import XendDomain
3175 if name is None or name == '':
3176 raise VmError('Missing VM Name')
3178 if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name):
3179 raise VmError('Invalid VM Name')
3181 dom = XendDomain.instance().domain_lookup_nr(name)
3182 if dom and dom.info['uuid'] != self.info['uuid']:
3183 raise VmError("VM name '%s' already exists%s" %
3184 (name,
3185 dom.domid is not None and
3186 (" as domain %s" % str(dom.domid)) or ""))
3189 def update(self, info = None, refresh = True, transaction = None):
3190 """Update with info from xc.domain_getinfo().
3191 """
3192 log.trace("XendDomainInfo.update(%s) on domain %s", info,
3193 str(self.domid))
3195 if not info:
3196 info = dom_get(self.domid)
3197 if not info:
3198 return
3200 if info["maxmem_kb"] < 0:
3201 info["maxmem_kb"] = XendNode.instance() \
3202 .physinfo_dict()['total_memory'] * 1024
3204 # make sure state is reset for info
3205 # TODO: we should eventually get rid of old_dom_states
3207 self.info.update_config(info)
3208 self._update_consoles(transaction)
3210 if refresh:
3211 self.refreshShutdown(info)
3213 log.trace("XendDomainInfo.update done on domain %s: %s",
3214 str(self.domid), self.info)
3216 def sxpr(self, ignore_store = False, legacy_only = True):
3217 result = self.info.to_sxp(domain = self,
3218 ignore_devices = ignore_store,
3219 legacy_only = legacy_only)
3221 return result
3223 # Xen API
3224 # ----------------------------------------------------------------
3226 def get_uuid(self):
3227 dom_uuid = self.info.get('uuid')
3228 if not dom_uuid: # if it doesn't exist, make one up
3229 dom_uuid = uuid.createString()
3230 self.info['uuid'] = dom_uuid
3231 return dom_uuid
3233 def get_memory_static_max(self):
3234 return self.info.get('memory_static_max', 0)
3235 def get_memory_static_min(self):
3236 return self.info.get('memory_static_min', 0)
3237 def get_memory_dynamic_max(self):
3238 return self.info.get('memory_dynamic_max', 0)
3239 def get_memory_dynamic_min(self):
3240 return self.info.get('memory_dynamic_min', 0)
3242 # only update memory-related config values if they maintain sanity
3243 def _safe_set_memory(self, key, newval):
3244 oldval = self.info.get(key, 0)
3245 try:
3246 self.info[key] = newval
3247 self.info._memory_sanity_check()
3248 except Exception, ex:
3249 self.info[key] = oldval
3250 raise
3252 def set_memory_static_max(self, val):
3253 self._safe_set_memory('memory_static_max', val)
3254 def set_memory_static_min(self, val):
3255 self._safe_set_memory('memory_static_min', val)
3256 def set_memory_dynamic_max(self, val):
3257 self._safe_set_memory('memory_dynamic_max', val)
3258 def set_memory_dynamic_min(self, val):
3259 self._safe_set_memory('memory_dynamic_min', val)
3261 def get_vcpus_params(self):
3262 if self.getDomid() is None:
3263 return self.info['vcpus_params']
3265 retval = xc.sched_credit_domain_get(self.getDomid())
3266 return retval
3267 def get_power_state(self):
3268 return XEN_API_VM_POWER_STATE[self._stateGet()]
3269 def get_platform(self):
3270 return self.info.get('platform', {})
3271 def get_pci_bus(self):
3272 return self.info.get('pci_bus', '')
3273 def get_tools_version(self):
3274 return self.info.get('tools_version', {})
3275 def get_metrics(self):
3276 return self.metrics.get_uuid();
3279 def get_security_label(self, xspol=None):
3280 import xen.util.xsm.xsm as security
3281 label = security.get_security_label(self, xspol)
3282 return label
3284 def set_security_label(self, seclab, old_seclab, xspol=None,
3285 xspol_old=None):
3286 """
3287 Set the security label of a domain from its old to
3288 a new value.
3289 @param seclab New security label formatted in the form
3290 <policy type>:<policy name>:<vm label>
3291 @param old_seclab The current security label that the
3292 VM must have.
3293 @param xspol An optional policy under which this
3294 update should be done. If not given,
3295 then the current active policy is used.
3296 @param xspol_old The old policy; only to be passed during
3297 the updating of a policy
3298 @return Returns return code, a string with errors from
3299 the hypervisor's operation, old label of the
3300 domain
3301 """
3302 rc = 0
3303 errors = ""
3304 old_label = ""
3305 new_ssidref = 0
3306 domid = self.getDomid()
3307 res_labels = None
3308 is_policy_update = (xspol_old != None)
3310 from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
3312 state = self._stateGet()
3313 # Relabel only HALTED or RUNNING or PAUSED domains
3314 if domid != 0 and \
3315 state not in \
3316 [ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
3317 DOM_STATE_SUSPENDED ]:
3318 log.warn("Relabeling domain not possible in state '%s'" %
3319 DOM_STATES[state])
3320 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3322 # Remove security label. Works only for halted or suspended domains
3323 if not seclab or seclab == "":
3324 if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
3325 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3327 if self.info.has_key('security_label'):
3328 old_label = self.info['security_label']
3329 # Check label against expected one.
3330 if old_label != old_seclab:
3331 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3332 del self.info['security_label']
3333 xen.xend.XendDomain.instance().managed_config_save(self)
3334 return (xsconstants.XSERR_SUCCESS, "", "", 0)
3336 tmp = seclab.split(":")
3337 if len(tmp) != 3:
3338 return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
3339 typ, policy, label = tmp
3341 poladmin = XSPolicyAdminInstance()
3342 if not xspol:
3343 xspol = poladmin.get_policy_by_name(policy)
3345 try:
3346 xen.xend.XendDomain.instance().policy_lock.acquire_writer()
3348 if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
3349 #if domain is running or paused try to relabel in hypervisor
3350 if not xspol:
3351 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3353 if typ != xspol.get_type_name() or \
3354 policy != xspol.get_name():
3355 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3357 if typ == xsconstants.ACM_POLICY_ID:
3358 new_ssidref = xspol.vmlabel_to_ssidref(label)
3359 if new_ssidref == xsconstants.INVALID_SSIDREF:
3360 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3362 # Check that all used resources are accessible under the
3363 # new label
3364 if not is_policy_update and \
3365 not security.resources_compatible_with_vmlabel(xspol,
3366 self, label):
3367 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3369 #Check label against expected one. Can only do this
3370 # if the policy hasn't changed underneath in the meantime
3371 if xspol_old == None:
3372 old_label = self.get_security_label()
3373 if old_label != old_seclab:
3374 log.info("old_label != old_seclab: %s != %s" %
3375 (old_label, old_seclab))
3376 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3378 # relabel domain in the hypervisor
3379 rc, errors = security.relabel_domains([[domid, new_ssidref]])
3380 log.info("rc from relabeling in HV: %d" % rc)
3381 else:
3382 return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
3384 if rc == 0:
3385 # HALTED, RUNNING or PAUSED
3386 if domid == 0:
3387 if xspol:
3388 self.info['security_label'] = seclab
3389 ssidref = poladmin.set_domain0_bootlabel(xspol, label)
3390 else:
3391 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3392 else:
3393 if self.info.has_key('security_label'):
3394 old_label = self.info['security_label']
3395 # Check label against expected one, unless wildcard
3396 if old_label != old_seclab:
3397 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3399 self.info['security_label'] = seclab
3401 try:
3402 xen.xend.XendDomain.instance().managed_config_save(self)
3403 except:
3404 pass
3405 return (rc, errors, old_label, new_ssidref)
3406 finally:
3407 xen.xend.XendDomain.instance().policy_lock.release()
3409 def get_on_shutdown(self):
3410 after_shutdown = self.info.get('actions_after_shutdown')
3411 if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
3412 return XEN_API_ON_NORMAL_EXIT[-1]
3413 return after_shutdown
3415 def get_on_reboot(self):
3416 after_reboot = self.info.get('actions_after_reboot')
3417 if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
3418 return XEN_API_ON_NORMAL_EXIT[-1]
3419 return after_reboot
3421 def get_on_suspend(self):
3422 # TODO: not supported
3423 after_suspend = self.info.get('actions_after_suspend')
3424 if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
3425 return XEN_API_ON_NORMAL_EXIT[-1]
3426 return after_suspend
3428 def get_on_crash(self):
3429 after_crash = self.info.get('actions_after_crash')
3430 if not after_crash or after_crash not in \
3431 XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
3432 return XEN_API_ON_CRASH_BEHAVIOUR[0]
3433 return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
3435 def get_dev_config_by_uuid(self, dev_class, dev_uuid):
3436 """ Get's a device configuration either from XendConfig or
3437 from the DevController.
3439 @param dev_class: device class, either, 'vbd' or 'vif'
3440 @param dev_uuid: device UUID
3442 @rtype: dictionary
3443 """
3444 dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
3446 # shortcut if the domain isn't started because
3447 # the devcontrollers will have no better information
3448 # than XendConfig.
3449 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
3450 XEN_API_VM_POWER_STATE_SUSPENDED):
3451 if dev_config:
3452 return copy.deepcopy(dev_config)
3453 return None
3455 # instead of using dev_class, we use the dev_type
3456 # that is from XendConfig.
3457 controller = self.getDeviceController(dev_type)
3458 if not controller:
3459 return None
3461 all_configs = controller.getAllDeviceConfigurations()
3462 if not all_configs:
3463 return None
3465 updated_dev_config = copy.deepcopy(dev_config)
3466 for _devid, _devcfg in all_configs.items():
3467 if _devcfg.get('uuid') == dev_uuid:
3468 updated_dev_config.update(_devcfg)
3469 updated_dev_config['id'] = _devid
3470 return updated_dev_config
3472 return updated_dev_config
3474 def get_dev_xenapi_config(self, dev_class, dev_uuid):
3475 config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
3476 if not config:
3477 return {}
3479 config['VM'] = self.get_uuid()
3481 if dev_class == 'vif':
3482 if not config.has_key('name'):
3483 config['name'] = config.get('vifname', '')
3484 if not config.has_key('MAC'):
3485 config['MAC'] = config.get('mac', '')
3486 if not config.has_key('type'):
3487 config['type'] = 'paravirtualised'
3488 if not config.has_key('device'):
3489 devid = config.get('id')
3490 if devid != None:
3491 config['device'] = 'eth%s' % devid
3492 else:
3493 config['device'] = ''
3495 if not config.has_key('network'):
3496 try:
3497 bridge = config.get('bridge', None)
3498 if bridge is None:
3499 from xen.util import Brctl
3500 if_to_br = dict([(i,b)
3501 for (b,ifs) in Brctl.get_state().items()
3502 for i in ifs])
3503 vifname = "vif%s.%s" % (self.getDomid(),
3504 config.get('id'))
3505 bridge = if_to_br.get(vifname, None)
3506 config['network'] = \
3507 XendNode.instance().bridge_to_network(
3508 config.get('bridge')).get_uuid()
3509 except Exception:
3510 log.exception('bridge_to_network')
3511 # Ignore this for now -- it may happen if the device
3512 # has been specified using the legacy methods, but at
3513 # some point we're going to have to figure out how to
3514 # handle that properly.
3516 config['MTU'] = 1500 # TODO
3518 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3519 xennode = XendNode.instance()
3520 rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
3521 config['io_read_kbs'] = rx_bps/1024
3522 config['io_write_kbs'] = tx_bps/1024
3523 rx, tx = xennode.get_vif_stat(self.domid, devid)
3524 config['io_total_read_kbs'] = rx/1024
3525 config['io_total_write_kbs'] = tx/1024
3526 else:
3527 config['io_read_kbs'] = 0.0
3528 config['io_write_kbs'] = 0.0
3529 config['io_total_read_kbs'] = 0.0
3530 config['io_total_write_kbs'] = 0.0
3532 config['security_label'] = config.get('security_label', '')
3534 if dev_class == 'vbd':
3536 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3537 controller = self.getDeviceController(dev_class)
3538 devid, _1, _2 = controller.getDeviceDetails(config)
3539 xennode = XendNode.instance()
3540 rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
3541 config['io_read_kbs'] = rd_blkps
3542 config['io_write_kbs'] = wr_blkps
3543 else:
3544 config['io_read_kbs'] = 0.0
3545 config['io_write_kbs'] = 0.0
3547 config['VDI'] = config.get('VDI', '')
3548 config['device'] = config.get('dev', '')
3549 if ':' in config['device']:
3550 vbd_name, vbd_type = config['device'].split(':', 1)
3551 config['device'] = vbd_name
3552 if vbd_type == 'cdrom':
3553 config['type'] = XEN_API_VBD_TYPE[0]
3554 else:
3555 config['type'] = XEN_API_VBD_TYPE[1]
3557 config['driver'] = 'paravirtualised' # TODO
3558 config['image'] = config.get('uname', '')
3560 if config.get('mode', 'r') == 'r':
3561 config['mode'] = 'RO'
3562 else:
3563 config['mode'] = 'RW'
3565 if dev_class == 'vtpm':
3566 if not config.has_key('type'):
3567 config['type'] = 'paravirtualised' # TODO
3568 if not config.has_key('backend'):
3569 config['backend'] = "00000000-0000-0000-0000-000000000000"
3571 return config
3573 def get_dev_property(self, dev_class, dev_uuid, field):
3574 config = self.get_dev_xenapi_config(dev_class, dev_uuid)
3575 try:
3576 return config[field]
3577 except KeyError:
3578 raise XendError('Invalid property for device: %s' % field)
3580 def set_dev_property(self, dev_class, dev_uuid, field, value):
3581 self.info['devices'][dev_uuid][1][field] = value
3583 def get_vcpus_util(self):
3584 vcpu_util = {}
3585 xennode = XendNode.instance()
3586 if 'VCPUs_max' in self.info and self.domid != None:
3587 for i in range(0, self.info['VCPUs_max']):
3588 util = xennode.get_vcpu_util(self.domid, i)
3589 vcpu_util[str(i)] = util
3591 return vcpu_util
3593 def get_consoles(self):
3594 return self.info.get('console_refs', [])
3596 def get_vifs(self):
3597 return self.info.get('vif_refs', [])
3599 def get_vbds(self):
3600 return self.info.get('vbd_refs', [])
3602 def get_vtpms(self):
3603 return self.info.get('vtpm_refs', [])
3605 def get_dpcis(self):
3606 return XendDPCI.get_by_VM(self.info.get('uuid'))
3608 def get_dscsis(self):
3609 return XendDSCSI.get_by_VM(self.info.get('uuid'))
3611 def create_vbd(self, xenapi_vbd, vdi_image_path):
3612 """Create a VBD using a VDI from XendStorageRepository.
3614 @param xenapi_vbd: vbd struct from the Xen API
3615 @param vdi_image_path: VDI UUID
3616 @rtype: string
3617 @return: uuid of the device
3618 """
3619 xenapi_vbd['image'] = vdi_image_path
3620 if vdi_image_path.startswith('tap'):
3621 dev_uuid = self.info.device_add('tap', cfg_xenapi = xenapi_vbd)
3622 else:
3623 dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
3625 if not dev_uuid:
3626 raise XendError('Failed to create device')
3628 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3629 XEN_API_VM_POWER_STATE_PAUSED):
3630 _, config = self.info['devices'][dev_uuid]
3632 if vdi_image_path.startswith('tap'):
3633 dev_control = self.getDeviceController('tap')
3634 else:
3635 dev_control = self.getDeviceController('vbd')
3637 try:
3638 devid = dev_control.createDevice(config)
3639 dev_control.waitForDevice(devid)
3640 self.info.device_update(dev_uuid,
3641 cfg_xenapi = {'devid': devid})
3642 except Exception, exn:
3643 log.exception(exn)
3644 del self.info['devices'][dev_uuid]
3645 self.info['vbd_refs'].remove(dev_uuid)
3646 raise
3648 return dev_uuid
3650 def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
3651 """Create a VBD using a VDI from XendStorageRepository.
3653 @param xenapi_vbd: vbd struct from the Xen API
3654 @param vdi_image_path: VDI UUID
3655 @rtype: string
3656 @return: uuid of the device
3657 """
3658 xenapi_vbd['image'] = vdi_image_path
3659 dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
3660 if not dev_uuid:
3661 raise XendError('Failed to create device')
3663 if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
3664 _, config = self.info['devices'][dev_uuid]
3665 config['devid'] = self.getDeviceController('tap').createDevice(config)
3667 return config['devid']
3669 def create_vif(self, xenapi_vif):
3670 """Create VIF device from the passed struct in Xen API format.
3672 @param xenapi_vif: Xen API VIF Struct.
3673 @rtype: string
3674 @return: UUID
3675 """
3676 dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
3677 if not dev_uuid:
3678 raise XendError('Failed to create device')
3680 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3681 XEN_API_VM_POWER_STATE_PAUSED):
3683 _, config = self.info['devices'][dev_uuid]
3684 dev_control = self.getDeviceController('vif')
3686 try:
3687 devid = dev_control.createDevice(config)
3688 dev_control.waitForDevice(devid)
3689 self.info.device_update(dev_uuid,
3690 cfg_xenapi = {'devid': devid})
3691 except Exception, exn:
3692 log.exception(exn)
3693 del self.info['devices'][dev_uuid]
3694 self.info['vif_refs'].remove(dev_uuid)
3695 raise
3697 return dev_uuid
3699 def create_vtpm(self, xenapi_vtpm):
3700 """Create a VTPM device from the passed struct in Xen API format.
3702 @return: uuid of the device
3703 @rtype: string
3704 """
3706 if self._stateGet() not in (DOM_STATE_HALTED,):
3707 raise VmError("Can only add vTPM to a halted domain.")
3708 if self.get_vtpms() != []:
3709 raise VmError('Domain already has a vTPM.')
3710 dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
3711 if not dev_uuid:
3712 raise XendError('Failed to create device')
3714 return dev_uuid
3716 def create_console(self, xenapi_console):
3717 """ Create a console device from a Xen API struct.
3719 @return: uuid of device
3720 @rtype: string
3721 """
3722 if self._stateGet() not in (DOM_STATE_HALTED,):
3723 raise VmError("Can only add console to a halted domain.")
3725 dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
3726 if not dev_uuid:
3727 raise XendError('Failed to create device')
3729 return dev_uuid
3731 def set_console_other_config(self, console_uuid, other_config):
3732 self.info.console_update(console_uuid, 'other_config', other_config)
3734 def create_dpci(self, xenapi_pci):
3735 """Create pci device from the passed struct in Xen API format.
3737 @param xenapi_pci: DPCI struct from Xen API
3738 @rtype: bool
3739 #@rtype: string
3740 @return: True if successfully created device
3741 #@return: UUID
3742 """
3744 dpci_uuid = uuid.createString()
3746 dpci_opts = []
3747 opts_dict = xenapi_pci.get('options')
3748 for k in opts_dict.keys():
3749 dpci_opts.append([k, opts_dict[k]])
3751 # Convert xenapi to sxp
3752 ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
3754 target_pci_sxp = \
3755 ['pci',
3756 ['dev',
3757 ['domain', '0x%02x' % ppci.get_domain()],
3758 ['bus', '0x%02x' % ppci.get_bus()],
3759 ['slot', '0x%02x' % ppci.get_slot()],
3760 ['func', '0x%1x' % ppci.get_func()],
3761 ['vslot', '0x%02x' % xenapi_pci.get('hotplug_slot')],
3762 ['opts', dpci_opts],
3763 ['uuid', dpci_uuid]
3764 ],
3765 ['state', 'Initialising']
3768 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3770 old_pci_sxp = self._getDeviceInfo_pci(0)
3772 if old_pci_sxp is None:
3773 dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
3774 if not dev_uuid:
3775 raise XendError('Failed to create device')
3777 else:
3778 new_pci_sxp = ['pci']
3779 for existing_dev in sxp.children(old_pci_sxp, 'dev'):
3780 new_pci_sxp.append(existing_dev)
3781 new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
3783 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3784 self.info.device_update(dev_uuid, new_pci_sxp)
3786 xen.xend.XendDomain.instance().managed_config_save(self)
3788 else:
3789 try:
3790 self.device_configure(target_pci_sxp)
3792 except Exception, exn:
3793 raise XendError('Failed to create device')
3795 return dpci_uuid
3797 def create_dscsi(self, xenapi_dscsi):
3798 """Create scsi device from the passed struct in Xen API format.
3800 @param xenapi_dscsi: DSCSI struct from Xen API
3801 @rtype: string
3802 @return: UUID
3803 """
3805 dscsi_uuid = uuid.createString()
3807 # Convert xenapi to sxp
3808 pscsi = XendAPIStore.get(xenapi_dscsi.get('PSCSI'), 'PSCSI')
3809 devid = int(xenapi_dscsi.get('virtual_HCTL').split(':')[0])
3810 target_vscsi_sxp = \
3811 ['vscsi',
3812 ['dev',
3813 ['devid', devid],
3814 ['p-devname', pscsi.get_dev_name()],
3815 ['p-dev', pscsi.get_physical_HCTL()],
3816 ['v-dev', xenapi_dscsi.get('virtual_HCTL')],
3817 ['state', xenbusState['Initialising']],
3818 ['uuid', dscsi_uuid]
3819 ],
3820 ['feature-host', 0]
3823 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3825 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
3827 if cur_vscsi_sxp is None:
3828 dev_uuid = self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp)
3829 if not dev_uuid:
3830 raise XendError('Failed to create device')
3832 else:
3833 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
3834 for existing_dev in sxp.children(cur_vscsi_sxp, 'dev'):
3835 new_vscsi_sxp.append(existing_dev)
3836 new_vscsi_sxp.append(sxp.child0(target_vscsi_sxp, 'dev'))
3838 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3839 self.info.device_update(dev_uuid, new_vscsi_sxp)
3841 xen.xend.XendDomain.instance().managed_config_save(self)
3843 else:
3844 try:
3845 self.device_configure(target_vscsi_sxp)
3847 except Exception, exn:
3848 raise XendError('Failed to create device')
3850 return dscsi_uuid
3853 def destroy_device_by_uuid(self, dev_type, dev_uuid):
3854 if dev_uuid not in self.info['devices']:
3855 raise XendError('Device does not exist')
3857 try:
3858 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3859 XEN_API_VM_POWER_STATE_PAUSED):
3860 _, config = self.info['devices'][dev_uuid]
3861 devid = config.get('devid')
3862 if devid != None:
3863 self.getDeviceController(dev_type).destroyDevice(devid, force = False)
3864 else:
3865 raise XendError('Unable to get devid for device: %s:%s' %
3866 (dev_type, dev_uuid))
3867 finally:
3868 del self.info['devices'][dev_uuid]
3869 self.info['%s_refs' % dev_type].remove(dev_uuid)
3871 def destroy_vbd(self, dev_uuid):
3872 self.destroy_device_by_uuid('vbd', dev_uuid)
3874 def destroy_vif(self, dev_uuid):
3875 self.destroy_device_by_uuid('vif', dev_uuid)
3877 def destroy_vtpm(self, dev_uuid):
3878 self.destroy_device_by_uuid('vtpm', dev_uuid)
3880 def destroy_dpci(self, dev_uuid):
3882 dpci = XendAPIStore.get(dev_uuid, 'DPCI')
3883 ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
3885 old_pci_sxp = self._getDeviceInfo_pci(0)
3886 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3887 target_dev = None
3888 new_pci_sxp = ['pci']
3889 for dev in sxp.children(old_pci_sxp, 'dev'):
3890 domain = int(sxp.child_value(dev, 'domain'), 16)
3891 bus = int(sxp.child_value(dev, 'bus'), 16)
3892 slot = int(sxp.child_value(dev, 'slot'), 16)
3893 func = int(sxp.child_value(dev, 'func'), 16)
3894 name = "%04x:%02x:%02x.%01x" % (domain, bus, slot, func)
3895 if ppci.get_name() == name:
3896 target_dev = dev
3897 else:
3898 new_pci_sxp.append(dev)
3900 if target_dev is None:
3901 raise XendError('Failed to destroy device')
3903 target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
3905 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3907 self.info.device_update(dev_uuid, new_pci_sxp)
3908 if len(sxp.children(new_pci_sxp, 'dev')) == 0:
3909 del self.info['devices'][dev_uuid]
3910 xen.xend.XendDomain.instance().managed_config_save(self)
3912 else:
3913 try:
3914 self.device_configure(target_pci_sxp)
3916 except Exception, exn:
3917 raise XendError('Failed to destroy device')
3919 def destroy_dscsi(self, dev_uuid):
3920 dscsi = XendAPIStore.get(dev_uuid, 'DSCSI')
3921 devid = dscsi.get_virtual_host()
3922 vHCTL = dscsi.get_virtual_HCTL()
3923 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
3924 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3926 target_dev = None
3927 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
3928 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
3929 if vHCTL == sxp.child_value(dev, 'v-dev'):
3930 target_dev = dev
3931 else:
3932 new_vscsi_sxp.append(dev)
3934 if target_dev is None:
3935 raise XendError('Failed to destroy device')
3937 target_dev.append(['state', xenbusState['Closing']])
3938 target_vscsi_sxp = ['vscsi', target_dev, ['feature-host', 0]]
3940 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3942 self.info.device_update(dev_uuid, new_vscsi_sxp)
3943 if len(sxp.children(new_vscsi_sxp, 'dev')) == 0:
3944 del self.info['devices'][dev_uuid]
3945 xen.xend.XendDomain.instance().managed_config_save(self)
3947 else:
3948 try:
3949 self.device_configure(target_vscsi_sxp)
3951 except Exception, exn:
3952 raise XendError('Failed to destroy device')
3954 def destroy_xapi_instances(self):
3955 """Destroy Xen-API instances stored in XendAPIStore.
3956 """
3957 # Xen-API classes based on XendBase have their instances stored
3958 # in XendAPIStore. Cleanup these instances here, if they are supposed
3959 # to be destroyed when the parent domain is dead.
3961 # Most of the virtual devices (vif, vbd, vfb, etc) are not based on
3962 # XendBase and there's no need to remove them from XendAPIStore.
3964 from xen.xend import XendDomain
3965 if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
3966 # domain still exists.
3967 return
3969 # Destroy the VMMetrics instance.
3970 if XendAPIStore.get(self.metrics.get_uuid(), self.metrics.getClass()) \
3971 is not None:
3972 self.metrics.destroy()
3974 # Destroy DPCI instances.
3975 for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
3976 XendAPIStore.deregister(dpci_uuid, "DPCI")
3978 # Destroy DSCSI instances.
3979 for dscsi_uuid in XendDSCSI.get_by_VM(self.info.get('uuid')):
3980 XendAPIStore.deregister(dscsi_uuid, "DSCSI")
3982 def has_device(self, dev_class, dev_uuid):
3983 return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
3985 def __str__(self):
3986 return '<domain id=%s name=%s memory=%s state=%s>' % \
3987 (str(self.domid), self.info['name_label'],
3988 str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])
3990 __repr__ = __str__