ia64/xen-unstable

view tools/python/xen/xend/XendDomainInfo.py @ 19776:9e36ef77f658

xend: pass-through: Common parse_pci_name()

Share some parsing code between different parts of xm.

This has the side-effect that the device specification for
hot-plug may now include the VSLOT and OPTS as per device
specifictions in the domain configuration file.

SEQ:BUS:DEV.FUNC[,OPT...]

e.g. 0000:00:01.00@6

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jun 17 07:34:59 2009 +0100 (2009-06-17)
parents 08de8ec655c2
children 60588f1f055f
line source
1 #===========================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
19 """Representation of a single domain.
20 Includes support for domain construction, using
21 open-ended configurations.
23 Author: Mike Wray <mike.wray@hp.com>
25 """
27 import logging
28 import time
29 import threading
30 import thread
31 import re
32 import copy
33 import os
34 import traceback
35 from types import StringTypes
37 import xen.lowlevel.xc
38 from xen.util import asserts, auxbin
39 from xen.util.blkif import blkdev_uname_to_file, blkdev_uname_to_taptype
40 import xen.util.xsm.xsm as security
41 from xen.util import xsconstants
42 from xen.util.pci import serialise_pci_opts, pci_opts_list_to_sxp, \
43 pci_dict_to_bdf_str, pci_dict_to_xc_str
45 from xen.xend import balloon, sxp, uuid, image, arch
46 from xen.xend import XendOptions, XendNode, XendConfig
48 from xen.xend.XendConfig import scrub_password
49 from xen.xend.XendBootloader import bootloader, bootloader_tidy
50 from xen.xend.XendError import XendError, VmError
51 from xen.xend.XendDevices import XendDevices
52 from xen.xend.XendTask import XendTask
53 from xen.xend.xenstore.xstransact import xstransact, complete
54 from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
55 from xen.xend.xenstore.xswatch import xswatch
56 from xen.xend.XendConstants import *
57 from xen.xend.XendAPIConstants import *
58 from xen.xend.server.DevConstants import xenbusState
60 from xen.xend.XendVMMetrics import XendVMMetrics
62 from xen.xend import XendAPIStore
63 from xen.xend.XendPPCI import XendPPCI
64 from xen.xend.XendDPCI import XendDPCI
65 from xen.xend.XendPSCSI import XendPSCSI
66 from xen.xend.XendDSCSI import XendDSCSI
68 MIGRATE_TIMEOUT = 30.0
69 BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
71 xc = xen.lowlevel.xc.xc()
72 xoptions = XendOptions.instance()
74 log = logging.getLogger("xend.XendDomainInfo")
75 #log.setLevel(logging.TRACE)
78 def create(config):
79 """Creates and start a VM using the supplied configuration.
81 @param config: A configuration object involving lists of tuples.
82 @type config: list of lists, eg ['vm', ['image', 'xen.gz']]
84 @rtype: XendDomainInfo
85 @return: An up and running XendDomainInfo instance
86 @raise VmError: Invalid configuration or failure to start.
87 """
88 from xen.xend import XendDomain
89 domconfig = XendConfig.XendConfig(sxp_obj = config)
90 othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
91 if othervm is None or othervm.domid is None:
92 othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
93 if othervm is not None and othervm.domid is not None:
94 raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
95 log.debug("XendDomainInfo.create(%s)", scrub_password(config))
96 vm = XendDomainInfo(domconfig)
97 try:
98 vm.start()
99 except:
100 log.exception('Domain construction failed')
101 vm.destroy()
102 raise
104 return vm
106 def create_from_dict(config_dict):
107 """Creates and start a VM using the supplied configuration.
109 @param config_dict: An configuration dictionary.
111 @rtype: XendDomainInfo
112 @return: An up and running XendDomainInfo instance
113 @raise VmError: Invalid configuration or failure to start.
114 """
116 log.debug("XendDomainInfo.create_from_dict(%s)",
117 scrub_password(config_dict))
118 vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
119 try:
120 vm.start()
121 except:
122 log.exception('Domain construction failed')
123 vm.destroy()
124 raise
125 return vm
127 def recreate(info, priv):
128 """Create the VM object for an existing domain. The domain must not
129 be dying, as the paths in the store should already have been removed,
130 and asking us to recreate them causes problems.
132 @param xeninfo: Parsed configuration
133 @type xeninfo: Dictionary
134 @param priv: Is a privileged domain (Dom 0)
135 @type priv: bool
137 @rtype: XendDomainInfo
138 @return: A up and running XendDomainInfo instance
139 @raise VmError: Invalid configuration.
140 @raise XendError: Errors with configuration.
141 """
143 log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
145 assert not info['dying']
147 xeninfo = XendConfig.XendConfig(dominfo = info)
148 xeninfo['is_control_domain'] = priv
149 xeninfo['is_a_template'] = False
150 xeninfo['auto_power_on'] = False
151 domid = xeninfo['domid']
152 uuid1 = uuid.fromString(xeninfo['uuid'])
153 needs_reinitialising = False
155 dompath = GetDomainPath(domid)
156 if not dompath:
157 raise XendError('No domain path in store for existing '
158 'domain %d' % domid)
160 log.info("Recreating domain %d, UUID %s. at %s" %
161 (domid, xeninfo['uuid'], dompath))
163 # need to verify the path and uuid if not Domain-0
164 # if the required uuid and vm aren't set, then that means
165 # we need to recreate the dom with our own values
166 #
167 # NOTE: this is probably not desirable, really we should just
168 # abort or ignore, but there may be cases where xenstore's
169 # entry disappears (eg. xenstore-rm /)
170 #
171 try:
172 vmpath = xstransact.Read(dompath, "vm")
173 if not vmpath:
174 if not priv:
175 log.warn('/local/domain/%d/vm is missing. recreate is '
176 'confused, trying our best to recover' % domid)
177 needs_reinitialising = True
178 raise XendError('reinit')
180 uuid2_str = xstransact.Read(vmpath, "uuid")
181 if not uuid2_str:
182 log.warn('%s/uuid/ is missing. recreate is confused, '
183 'trying our best to recover' % vmpath)
184 needs_reinitialising = True
185 raise XendError('reinit')
187 uuid2 = uuid.fromString(uuid2_str)
188 if uuid1 != uuid2:
189 log.warn('UUID in /vm does not match the UUID in /dom/%d.'
190 'Trying out best to recover' % domid)
191 needs_reinitialising = True
192 except XendError:
193 pass # our best shot at 'goto' in python :)
195 vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
196 vmpath = vmpath)
198 if needs_reinitialising:
199 vm._recreateDom()
200 vm._removeVm()
201 vm._storeVmDetails()
202 vm._storeDomDetails()
204 vm.image = image.create(vm, vm.info)
205 vm.image.recreate()
207 vm._registerWatches()
208 vm.refreshShutdown(xeninfo)
210 # register the domain in the list
211 from xen.xend import XendDomain
212 XendDomain.instance().add_domain(vm)
214 return vm
217 def restore(config):
218 """Create a domain and a VM object to do a restore.
220 @param config: Domain SXP configuration
221 @type config: list of lists. (see C{create})
223 @rtype: XendDomainInfo
224 @return: A up and running XendDomainInfo instance
225 @raise VmError: Invalid configuration or failure to start.
226 @raise XendError: Errors with configuration.
227 """
229 log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
230 vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
231 resume = True)
232 try:
233 vm.resume()
234 return vm
235 except:
236 vm.destroy()
237 raise
239 def createDormant(domconfig):
240 """Create a dormant/inactive XenDomainInfo without creating VM.
241 This is for creating instances of persistent domains that are not
242 yet start.
244 @param domconfig: Parsed configuration
245 @type domconfig: XendConfig object
247 @rtype: XendDomainInfo
248 @return: A up and running XendDomainInfo instance
249 @raise XendError: Errors with configuration.
250 """
252 log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
254 # domid does not make sense for non-running domains.
255 domconfig.pop('domid', None)
256 vm = XendDomainInfo(domconfig)
257 return vm
259 def domain_by_name(name):
260 """Get domain by name
262 @params name: Name of the domain
263 @type name: string
264 @return: XendDomainInfo or None
265 """
266 from xen.xend import XendDomain
267 return XendDomain.instance().domain_lookup_by_name_nr(name)
270 def shutdown_reason(code):
271 """Get a shutdown reason from a code.
273 @param code: shutdown code
274 @type code: int
275 @return: shutdown reason
276 @rtype: string
277 """
278 return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
280 def dom_get(dom):
281 """Get info from xen for an existing domain.
283 @param dom: domain id
284 @type dom: int
285 @return: info or None
286 @rtype: dictionary
287 """
288 try:
289 domlist = xc.domain_getinfo(dom, 1)
290 if domlist and dom == domlist[0]['domid']:
291 return domlist[0]
292 except Exception, err:
293 # ignore missing domain
294 log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
295 return None
297 def get_assigned_pci_devices(domid):
298 dev_str_list = []
299 path = '/local/domain/0/backend/pci/%u/0/' % domid
300 num_devs = xstransact.Read(path + 'num_devs');
301 if num_devs is None or num_devs == "":
302 return dev_str_list
303 num_devs = int(num_devs);
304 for i in range(num_devs):
305 dev_str = xstransact.Read(path + 'dev-%i' % i)
306 dev_str_list = dev_str_list + [dev_str]
307 return dev_str_list
309 def do_FLR(domid):
310 from xen.xend.server.pciif import parse_pci_name, PciDevice
311 dev_str_list = get_assigned_pci_devices(domid)
313 for dev_str in dev_str_list:
314 try:
315 dev = PciDevice(parse_pci_name(dev_str))
316 except Exception, e:
317 raise VmError("pci: failed to locate device and "+
318 "parse it's resources - "+str(e))
319 dev.do_FLR()
321 class XendDomainInfo:
322 """An object represents a domain.
324 @TODO: try to unify dom and domid, they mean the same thing, but
325 xc refers to it as dom, and everywhere else, including
326 xenstore it is domid. The best way is to change xc's
327 python interface.
329 @ivar info: Parsed configuration
330 @type info: dictionary
331 @ivar domid: Domain ID (if VM has started)
332 @type domid: int or None
333 @ivar vmpath: XenStore path to this VM.
334 @type vmpath: string
335 @ivar dompath: XenStore path to this Domain.
336 @type dompath: string
337 @ivar image: Reference to the VM Image.
338 @type image: xen.xend.image.ImageHandler
339 @ivar store_port: event channel to xenstored
340 @type store_port: int
341 @ivar console_port: event channel to xenconsoled
342 @type console_port: int
343 @ivar store_mfn: xenstored mfn
344 @type store_mfn: int
345 @ivar console_mfn: xenconsoled mfn
346 @type console_mfn: int
347 @ivar notes: OS image notes
348 @type notes: dictionary
349 @ivar vmWatch: reference to a watch on the xenstored vmpath
350 @type vmWatch: xen.xend.xenstore.xswatch
351 @ivar shutdownWatch: reference to watch on the xenstored domain shutdown
352 @type shutdownWatch: xen.xend.xenstore.xswatch
353 @ivar shutdownStartTime: UNIX Time when domain started shutting down.
354 @type shutdownStartTime: float or None
355 @ivar restart_in_progress: Is a domain restart thread running?
356 @type restart_in_progress: bool
357 # @ivar state: Domain state
358 # @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
359 @ivar state_updated: lock for self.state
360 @type state_updated: threading.Condition
361 @ivar refresh_shutdown_lock: lock for polling shutdown state
362 @type refresh_shutdown_lock: threading.Condition
363 @ivar _deviceControllers: device controller cache for this domain
364 @type _deviceControllers: dict 'string' to DevControllers
365 """
367 def __init__(self, info, domid = None, dompath = None, augment = False,
368 priv = False, resume = False, vmpath = None):
369 """Constructor for a domain
371 @param info: parsed configuration
372 @type info: dictionary
373 @keyword domid: Set initial domain id (if any)
374 @type domid: int
375 @keyword dompath: Set initial dompath (if any)
376 @type dompath: string
377 @keyword augment: Augment given info with xenstored VM info
378 @type augment: bool
379 @keyword priv: Is a privileged domain (Dom 0)
380 @type priv: bool
381 @keyword resume: Is this domain being resumed?
382 @type resume: bool
383 """
385 self.info = info
386 if domid == None:
387 self.domid = self.info.get('domid')
388 else:
389 self.domid = domid
391 #REMOVE: uuid is now generated in XendConfig
392 #if not self._infoIsSet('uuid'):
393 # self.info['uuid'] = uuid.toString(uuid.create())
395 # Find a unique /vm/<uuid>/<integer> path if not specified.
396 # This avoids conflict between pre-/post-migrate domains when doing
397 # localhost relocation.
398 self.vmpath = vmpath
399 i = 0
400 while self.vmpath == None:
401 self.vmpath = XS_VMROOT + self.info['uuid']
402 if i != 0:
403 self.vmpath = self.vmpath + '-' + str(i)
404 try:
405 if self._readVm("uuid"):
406 self.vmpath = None
407 i = i + 1
408 except:
409 pass
411 self.dompath = dompath
413 self.image = None
414 self.store_port = None
415 self.store_mfn = None
416 self.console_port = None
417 self.console_mfn = None
419 self.native_protocol = None
421 self.vmWatch = None
422 self.shutdownWatch = None
423 self.shutdownStartTime = None
424 self._resume = resume
425 self.restart_in_progress = False
427 self.state_updated = threading.Condition()
428 self.refresh_shutdown_lock = threading.Condition()
429 self._stateSet(DOM_STATE_HALTED)
431 self._deviceControllers = {}
433 for state in DOM_STATES_OLD:
434 self.info[state] = 0
436 if augment:
437 self._augmentInfo(priv)
439 self._checkName(self.info['name_label'])
441 self.metrics = XendVMMetrics(uuid.createString(), self)
444 #
445 # Public functions available through XMLRPC
446 #
449 def start(self, is_managed = False):
450 """Attempts to start the VM by do the appropriate
451 initialisation if it not started.
452 """
453 from xen.xend import XendDomain
455 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
456 try:
457 XendTask.log_progress(0, 30, self._constructDomain)
458 XendTask.log_progress(31, 60, self._initDomain)
460 XendTask.log_progress(61, 70, self._storeVmDetails)
461 XendTask.log_progress(71, 80, self._storeDomDetails)
462 XendTask.log_progress(81, 90, self._registerWatches)
463 XendTask.log_progress(91, 100, self.refreshShutdown)
465 xendomains = XendDomain.instance()
466 xennode = XendNode.instance()
468 # save running configuration if XendDomains believe domain is
469 # persistent
470 if is_managed:
471 xendomains.managed_config_save(self)
473 if xennode.xenschedinfo() == 'credit':
474 xendomains.domain_sched_credit_set(self.getDomid(),
475 self.getWeight(),
476 self.getCap())
477 except:
478 log.exception('VM start failed')
479 self.destroy()
480 raise
481 else:
482 raise XendError('VM already running')
484 def resume(self):
485 """Resumes a domain that has come back from suspension."""
486 state = self._stateGet()
487 if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
488 try:
489 self._constructDomain()
491 try:
492 self._setCPUAffinity()
493 except:
494 # usually a CPU we want to set affinity to does not exist
495 # we just ignore it so that the domain can still be restored
496 log.warn("Cannot restore CPU affinity")
498 self._storeVmDetails()
499 self._createChannels()
500 self._createDevices()
501 self._storeDomDetails()
502 self._endRestore()
503 except:
504 log.exception('VM resume failed')
505 self.destroy()
506 raise
507 else:
508 raise XendError('VM is not suspended; it is %s'
509 % XEN_API_VM_POWER_STATE[state])
511 def shutdown(self, reason):
512 """Shutdown a domain by signalling this via xenstored."""
513 log.debug('XendDomainInfo.shutdown(%s)', reason)
514 if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
515 raise XendError('Domain cannot be shutdown')
517 if self.domid == 0:
518 raise XendError('Domain 0 cannot be shutdown')
520 if reason not in DOMAIN_SHUTDOWN_REASONS.values():
521 raise XendError('Invalid reason: %s' % reason)
522 self.storeDom("control/shutdown", reason)
524 # HVM domain shuts itself down only if it has PV drivers
525 if self.info.is_hvm():
526 hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
527 hvm_s_state = xc.hvm_get_param(self.domid, HVM_PARAM_ACPI_S_STATE)
528 if not hvm_pvdrv or hvm_s_state != 0:
529 code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
530 log.info("HVM save:remote shutdown dom %d!", self.domid)
531 xc.domain_shutdown(self.domid, code)
533 def pause(self):
534 """Pause domain
536 @raise XendError: Failed pausing a domain
537 """
538 try:
539 bepath="/local/domain/0/backend/"
540 if(self.domid):
542 dev = xstransact.List(bepath + 'vbd' + "/%d" % (self.domid,))
543 for x in dev:
544 path = self.getDeviceController('vbd').readBackend(x, 'params')
545 if path and path.startswith('/dev/xen/blktap-2'):
546 #Figure out the sysfs path.
547 pattern = re.compile('/dev/xen/blktap-2/tapdev(\d+)$')
548 ctrlid = pattern.search(path)
549 ctrl = '/sys/class/blktap2/blktap' + ctrlid.group(1)
550 #pause the disk
551 f = open(ctrl + '/pause', 'w')
552 f.write('pause');
553 f.close()
554 except Exception, ex:
555 log.warn('Could not pause blktap disk.');
557 try:
558 xc.domain_pause(self.domid)
559 self._stateSet(DOM_STATE_PAUSED)
560 except Exception, ex:
561 log.exception(ex)
562 raise XendError("Domain unable to be paused: %s" % str(ex))
564 def unpause(self):
565 """Unpause domain
567 @raise XendError: Failed unpausing a domain
568 """
569 try:
570 bepath="/local/domain/0/backend/"
571 if(self.domid):
572 dev = xstransact.List(bepath + "vbd" + "/%d" % (self.domid,))
573 for x in dev:
574 path = self.getDeviceController('vbd').readBackend(x, 'params')
575 if path and path.startswith('/dev/xen/blktap-2'):
576 #Figure out the sysfs path.
577 pattern = re.compile('/dev/xen/blktap-2/tapdev(\d+)$')
578 ctrlid = pattern.search(path)
579 ctrl = '/sys/class/blktap2/blktap' + ctrlid.group(1)
580 #unpause the disk
581 if(os.path.exists(ctrl + '/resume')):
582 f = open(ctrl + '/resume', 'w');
583 f.write('resume');
584 f.close();
586 except Exception, ex:
587 log.warn('Could not unpause blktap disk: %s' % str(ex));
589 try:
590 xc.domain_unpause(self.domid)
591 self._stateSet(DOM_STATE_RUNNING)
592 except Exception, ex:
593 log.exception(ex)
594 raise XendError("Domain unable to be unpaused: %s" % str(ex))
596 def send_sysrq(self, key):
597 """ Send a Sysrq equivalent key via xenstored."""
598 if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
599 raise XendError("Domain '%s' is not started" % self.info['name_label'])
601 asserts.isCharConvertible(key)
602 self.storeDom("control/sysrq", '%c' % key)
604 def pci_device_configure_boot(self):
606 if not self.info.is_hvm():
607 return
609 devid = '0'
610 dev_info = self._getDeviceInfo_pci(devid)
611 if dev_info is None:
612 return
614 # get the virtual slot info from xenstore
615 dev_uuid = sxp.child_value(dev_info, 'uuid')
616 pci_conf = self.info['devices'][dev_uuid][1]
617 pci_devs = pci_conf['devs']
618 request = map(lambda x:
619 self.info.pci_convert_dict_to_sxp(x, 'Initialising',
620 'Booting'), pci_devs)
622 for i in request:
623 self.pci_device_configure(i)
625 def hvm_pci_device_create(self, dev_config):
626 log.debug("XendDomainInfo.hvm_pci_device_create: %s"
627 % scrub_password(dev_config))
629 if not self.info.is_hvm():
630 raise VmError("hvm_pci_device_create called on non-HVM guest")
632 #all the PCI devs share one conf node
633 devid = '0'
635 new_dev = dev_config['devs'][0]
636 dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
638 #check conflict before trigger hotplug event
639 if dev_info is not None:
640 dev_uuid = sxp.child_value(dev_info, 'uuid')
641 pci_conf = self.info['devices'][dev_uuid][1]
642 pci_devs = pci_conf['devs']
643 for x in pci_devs:
644 if (int(x['vslot'], 16) == int(new_dev['vslot'], 16) and
645 int(x['vslot'], 16) != AUTO_PHP_SLOT):
646 raise VmError("vslot %s already have a device." % (new_dev['vslot']))
648 if (int(x['domain'], 16) == int(new_dev['domain'], 16) and
649 int(x['bus'], 16) == int(new_dev['bus'], 16) and
650 int(x['slot'], 16) == int(new_dev['slot'], 16) and
651 int(x['func'], 16) == int(new_dev['func'], 16) ):
652 raise VmError("device is already inserted")
654 # Test whether the devices can be assigned with VT-d
655 bdf = xc.test_assign_device(0, pci_dict_to_xc_str(new_dev))
656 if bdf != 0:
657 if bdf == -1:
658 raise VmError("failed to assign device: maybe the platform"
659 " doesn't support VT-d, or VT-d isn't enabled"
660 " properly?")
661 raise VmError("fail to assign device(%s): maybe it has"
662 " already been assigned to other domain, or maybe"
663 " it doesn't exist." % pci_dict_to_bdf_str(new_dev))
665 # Here, we duplicate some checkings (in some cases, we mustn't allow
666 # a device to be hot-plugged into an HVM guest) that are also done in
667 # pci_device_configure()'s self.device_create(dev_sxp) or
668 # dev_control.reconfigureDevice(devid, dev_config).
669 # We must make the checkings before sending the command 'pci-ins' to
670 # ioemu.
672 # Test whether the device is owned by pciback. For instance, we can't
673 # hotplug a device being used by Dom0 itself to an HVM guest.
674 from xen.xend.server.pciif import PciDevice, parse_pci_name
675 try:
676 pci_device = PciDevice(new_dev)
677 except Exception, e:
678 raise VmError("pci: failed to locate device and "+
679 "parse it's resources - "+str(e))
680 if pci_device.driver!='pciback':
681 raise VmError(("pci: PCI Backend does not own device "+ \
682 "%s\n"+ \
683 "See the pciback.hide kernel "+ \
684 "command-line parameter or\n"+ \
685 "bind your slot/device to the PCI backend using sysfs" \
686 )%(pci_device.name))
688 # Check non-page-aligned MMIO BAR.
689 if pci_device.has_non_page_aligned_bar and arch.type != "ia64":
690 raise VmError("pci: %s: non-page-aligned MMIO BAR found." % \
691 pci_device.name)
693 # Check the co-assignment.
694 # To pci-attach a device D to domN, we should ensure each of D's
695 # co-assignment devices hasn't been assigned, or has been assigned to
696 # domN.
697 coassignment_list = pci_device.find_coassigned_devices()
698 pci_device.devs_check_driver(coassignment_list)
699 assigned_pci_device_str_list = self._get_assigned_pci_devices()
700 for pci_str in coassignment_list:
701 pci_dev = parse_pci_name(pci_str)
702 if xc.test_assign_device(0, pci_dict_to_xc_str(pci_dev)) == 0:
703 continue
704 if not pci_str in assigned_pci_device_str_list:
705 raise VmError(("pci: failed to pci-attach %s to domain %s" + \
706 " because one of its co-assignment device %s has been" + \
707 " assigned to other domain." \
708 )% (pci_device.name, self.info['name_label'], pci_str))
710 return self.hvm_pci_device_insert_dev(new_dev)
712 def hvm_pci_device_insert(self, dev_config):
713 log.debug("XendDomainInfo.hvm_pci_device_insert: %s"
714 % scrub_password(dev_config))
716 if not self.info.is_hvm():
717 raise VmError("hvm_pci_device_create called on non-HVM guest")
719 new_dev = dev_config['devs'][0]
721 return self.hvm_pci_device_insert_dev(new_dev)
723 def hvm_pci_device_insert_dev(self, new_dev):
724 log.debug("XendDomainInfo.hvm_pci_device_insert_dev: %s"
725 % scrub_password(new_dev))
727 if self.domid is not None:
728 opts = ''
729 if new_dev.has_key('opts'):
730 opts = ',' + serialise_pci_opts(new_dev['opts'])
732 bdf_str = "%s@%02x%s" % (pci_dict_to_bdf_str(new_dev),
733 int(new_dev['vslot'], 16), opts)
734 log.debug("XendDomainInfo.hvm_pci_device_insert_dev: %s" % bdf_str)
735 self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
737 vslot = xstransact.Read("/local/domain/0/device-model/%i/parameter"
738 % self.getDomid())
739 try:
740 vslot_int = int(vslot, 16)
741 except ValueError:
742 raise VmError(("Cannot pass-through PCI function '%s'. " +
743 "Device model reported an error: %s") %
744 (bdf_str, vslot))
745 else:
746 vslot = new_dev['vslot']
748 return vslot
751 def device_create(self, dev_config):
752 """Create a new device.
754 @param dev_config: device configuration
755 @type dev_config: SXP object (parsed config)
756 """
757 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
758 dev_type = sxp.name(dev_config)
759 dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
760 dev_config_dict = self.info['devices'][dev_uuid][1]
761 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
763 if dev_type == 'vif':
764 for x in dev_config:
765 if x != 'vif' and x[0] == 'mac':
766 if not re.match('^([0-9a-f]{2}:){5}[0-9a-f]{2}$', x[1], re.I):
767 log.error("Virtual network interface creation error - invalid MAC Address entered: %s", x[1])
768 raise VmError("Cannot create a new virtual network interface - MAC address is not valid!");
770 if self.domid is not None:
771 try:
772 dev_config_dict['devid'] = devid = \
773 self._createDevice(dev_type, dev_config_dict)
774 self._waitForDevice(dev_type, devid)
775 except VmError, ex:
776 del self.info['devices'][dev_uuid]
777 if dev_type == 'pci':
778 for dev in dev_config_dict['devs']:
779 XendAPIStore.deregister(dev['uuid'], 'DPCI')
780 elif dev_type == 'vscsi':
781 for dev in dev_config_dict['devs']:
782 XendAPIStore.deregister(dev['uuid'], 'DSCSI')
783 elif dev_type == 'tap':
784 self.info['vbd_refs'].remove(dev_uuid)
785 else:
786 self.info['%s_refs' % dev_type].remove(dev_uuid)
787 raise ex
788 else:
789 devid = None
791 xen.xend.XendDomain.instance().managed_config_save(self)
792 return self.getDeviceController(dev_type).sxpr(devid)
795 def pci_device_configure(self, dev_sxp, devid = 0):
796 """Configure an existing pci device.
798 @param dev_sxp: device configuration
799 @type dev_sxp: SXP object (parsed config)
800 @param devid: device id
801 @type devid: int
802 @return: Returns True if successfully updated device
803 @rtype: boolean
804 """
805 log.debug("XendDomainInfo.pci_device_configure: %s"
806 % scrub_password(dev_sxp))
808 dev_class = sxp.name(dev_sxp)
810 if dev_class != 'pci':
811 return False
813 pci_state = sxp.child_value(dev_sxp, 'state')
814 pci_sub_state = sxp.child_value(dev_sxp, 'sub_state')
815 existing_dev_info = self._getDeviceInfo_pci(devid)
817 if existing_dev_info is None and pci_state != 'Initialising':
818 raise XendError("Cannot detach when pci platform does not exist")
820 pci_dev = sxp.children(dev_sxp, 'dev')[0]
821 dev_config = self.info.pci_convert_sxp_to_dict(dev_sxp)
822 dev = dev_config['devs'][0]
824 # Do HVM specific processing
825 if self.info.is_hvm():
826 if pci_state == 'Initialising':
827 # HVM PCI device attachment
828 if pci_sub_state == 'Booting':
829 vslot = self.hvm_pci_device_insert(dev_config)
830 else:
831 vslot = self.hvm_pci_device_create(dev_config)
832 # Update vslot
833 dev['vslot'] = vslot
834 for n in sxp.children(pci_dev):
835 if(n[0] == 'vslot'):
836 n[1] = vslot
837 else:
838 # HVM PCI device detachment
839 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
840 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
841 existing_pci_devs = existing_pci_conf['devs']
842 vslot = ""
843 for x in existing_pci_devs:
844 if ( int(x['domain'], 16) == int(dev['domain'], 16) and
845 int(x['bus'], 16) == int(dev['bus'], 16) and
846 int(x['slot'], 16) == int(dev['slot'], 16) and
847 int(x['func'], 16) == int(dev['func'], 16) ):
848 vslot = x['vslot']
849 break
850 if vslot == "":
851 raise VmError("Device %s is not connected" %
852 pci_dict_to_bdf_str(dev))
853 self.hvm_destroyPCIDevice(int(vslot, 16))
854 # Update vslot
855 dev['vslot'] = vslot
856 for n in sxp.children(pci_dev):
857 if(n[0] == 'vslot'):
858 n[1] = vslot
860 # If pci platform does not exist, create and exit.
861 if existing_dev_info is None:
862 self.device_create(dev_sxp)
863 return True
865 if self.domid is not None:
866 # use DevController.reconfigureDevice to change device config
867 dev_control = self.getDeviceController(dev_class)
868 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
869 if not self.info.is_hvm():
870 # in PV case, wait until backend state becomes connected.
871 dev_control.waitForDevice_reconfigure(devid)
872 num_devs = dev_control.cleanupDevice(devid)
874 # update XendConfig with new device info
875 if dev_uuid:
876 new_dev_sxp = dev_control.configuration(devid)
877 self.info.device_update(dev_uuid, new_dev_sxp)
879 # If there is no device left, destroy pci and remove config.
880 if num_devs == 0:
881 if self.info.is_hvm():
882 self.destroyDevice('pci', devid, True)
883 else:
884 self.destroyDevice('pci', devid)
885 del self.info['devices'][dev_uuid]
886 else:
887 new_dev_sxp = ['pci']
888 for cur_dev in sxp.children(existing_dev_info, 'dev'):
889 if pci_state == 'Closing':
890 if int(dev['domain'], 16) == int(sxp.child_value(cur_dev, 'domain'), 16) and \
891 int(dev['bus'], 16) == int(sxp.child_value(cur_dev, 'bus'), 16) and \
892 int(dev['slot'], 16) == int(sxp.child_value(cur_dev, 'slot'), 16) and \
893 int(dev['func'], 16) == int(sxp.child_value(cur_dev, 'func'), 16):
894 continue
895 new_dev_sxp.append(cur_dev)
897 if pci_state == 'Initialising' and pci_sub_state != 'Booting':
898 for new_dev in sxp.children(dev_sxp, 'dev'):
899 new_dev_sxp.append(new_dev)
901 dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
902 self.info.device_update(dev_uuid, new_dev_sxp)
904 # If there is no device left, remove config.
905 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
906 del self.info['devices'][dev_uuid]
908 xen.xend.XendDomain.instance().managed_config_save(self)
910 return True
912 def vscsi_device_configure(self, dev_sxp):
913 """Configure an existing vscsi device.
914 quoted pci funciton
915 """
916 def _is_vscsi_defined(dev_info, p_devs = None, v_devs = None):
917 if not dev_info:
918 return False
919 for dev in sxp.children(dev_info, 'dev'):
920 if p_devs is not None:
921 if sxp.child_value(dev, 'p-dev') in p_devs:
922 return True
923 if v_devs is not None:
924 if sxp.child_value(dev, 'v-dev') in v_devs:
925 return True
926 return False
928 def _vscsi_be(be):
929 be_xdi = xen.xend.XendDomain.instance().domain_lookup_nr(be)
930 if be_xdi is not None:
931 be_domid = be_xdi.getDomid()
932 if be_domid is not None:
933 return str(be_domid)
934 return str(be)
936 dev_class = sxp.name(dev_sxp)
937 if dev_class != 'vscsi':
938 return False
940 dev_config = self.info.vscsi_convert_sxp_to_dict(dev_sxp)
941 devs = dev_config['devs']
942 v_devs = [d['v-dev'] for d in devs]
943 state = devs[0]['state']
944 req_devid = int(devs[0]['devid'])
945 cur_dev_sxp = self._getDeviceInfo_vscsi(req_devid)
947 if state == xenbusState['Initialising']:
948 # new create
949 # If request devid does not exist, create and exit.
950 p_devs = [d['p-dev'] for d in devs]
951 for dev_type, dev_info in self.info.all_devices_sxpr():
952 if dev_type != 'vscsi':
953 continue
954 if _is_vscsi_defined(dev_info, p_devs = p_devs):
955 raise XendError('The physical device "%s" is already defined' % \
956 p_devs[0])
957 if cur_dev_sxp is None:
958 self.device_create(dev_sxp)
959 return True
961 if _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
962 raise XendError('The virtual device "%s" is already defined' % \
963 v_devs[0])
965 if int(dev_config['feature-host']) != \
966 int(sxp.child_value(cur_dev_sxp, 'feature-host')):
967 raise XendError('The physical device "%s" cannot define '
968 'because mode is different' % devs[0]['p-dev'])
970 new_be = dev_config.get('backend', None)
971 if new_be is not None:
972 cur_be = sxp.child_value(cur_dev_sxp, 'backend', None)
973 if cur_be is None:
974 cur_be = xen.xend.XendDomain.DOM0_ID
975 new_be_dom = _vscsi_be(new_be)
976 cur_be_dom = _vscsi_be(cur_be)
977 if new_be_dom != cur_be_dom:
978 raise XendError('The physical device "%s" cannot define '
979 'because backend is different' % devs[0]['p-dev'])
981 elif state == xenbusState['Closing']:
982 if not _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
983 raise XendError("Cannot detach vscsi device does not exist")
985 if self.domid is not None:
986 # use DevController.reconfigureDevice to change device config
987 dev_control = self.getDeviceController(dev_class)
988 dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
989 dev_control.waitForDevice_reconfigure(req_devid)
990 num_devs = dev_control.cleanupDevice(req_devid)
992 # update XendConfig with new device info
993 if dev_uuid:
994 new_dev_sxp = dev_control.configuration(req_devid)
995 self.info.device_update(dev_uuid, new_dev_sxp)
997 # If there is no device left, destroy vscsi and remove config.
998 if num_devs == 0:
999 self.destroyDevice('vscsi', req_devid)
1000 del self.info['devices'][dev_uuid]
1002 else:
1003 new_dev_sxp = ['vscsi']
1004 cur_mode = sxp.children(cur_dev_sxp, 'feature-host')[0]
1005 new_dev_sxp.append(cur_mode)
1006 try:
1007 cur_be = sxp.children(cur_dev_sxp, 'backend')[0]
1008 new_dev_sxp.append(cur_be)
1009 except IndexError:
1010 pass
1012 for cur_dev in sxp.children(cur_dev_sxp, 'dev'):
1013 if state == xenbusState['Closing']:
1014 if int(cur_mode[1]) == 1:
1015 continue
1016 if sxp.child_value(cur_dev, 'v-dev') in v_devs:
1017 continue
1018 new_dev_sxp.append(cur_dev)
1020 if state == xenbusState['Initialising']:
1021 for new_dev in sxp.children(dev_sxp, 'dev'):
1022 new_dev_sxp.append(new_dev)
1024 dev_uuid = sxp.child_value(cur_dev_sxp, 'uuid')
1025 self.info.device_update(dev_uuid, new_dev_sxp)
1027 # If there is only 'vscsi' in new_dev_sxp, remove the config.
1028 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
1029 del self.info['devices'][dev_uuid]
1031 xen.xend.XendDomain.instance().managed_config_save(self)
1033 return True
1035 def device_configure(self, dev_sxp, devid = None):
1036 """Configure an existing device.
1038 @param dev_config: device configuration
1039 @type dev_config: SXP object (parsed config)
1040 @param devid: device id
1041 @type devid: int
1042 @return: Returns True if successfully updated device
1043 @rtype: boolean
1044 """
1046 # convert device sxp to a dict
1047 dev_class = sxp.name(dev_sxp)
1048 dev_config = {}
1050 if dev_class == 'pci':
1051 return self.pci_device_configure(dev_sxp)
1053 if dev_class == 'vscsi':
1054 return self.vscsi_device_configure(dev_sxp)
1056 for opt_val in dev_sxp[1:]:
1057 try:
1058 dev_config[opt_val[0]] = opt_val[1]
1059 except IndexError:
1060 pass
1062 dev_control = self.getDeviceController(dev_class)
1063 if devid is None:
1064 dev = dev_config.get('dev', '')
1065 if not dev:
1066 raise VmError('Block device must have virtual details specified')
1067 if 'ioemu:' in dev:
1068 (_, dev) = dev.split(':', 1)
1069 try:
1070 (dev, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1071 except ValueError:
1072 pass
1073 devid = dev_control.convertToDeviceNumber(dev)
1074 dev_info = self._getDeviceInfo_vbd(devid)
1075 if dev_info is None:
1076 raise VmError("Device %s not connected" % devid)
1077 dev_uuid = sxp.child_value(dev_info, 'uuid')
1079 if self.domid is not None:
1080 # use DevController.reconfigureDevice to change device config
1081 dev_control.reconfigureDevice(devid, dev_config)
1082 else:
1083 (_, new_b, new_f) = dev_control.getDeviceDetails(dev_config)
1084 if (new_f['device-type'] == 'cdrom' and
1085 sxp.child_value(dev_info, 'dev').endswith(':cdrom') and
1086 new_b['mode'] == 'r' and
1087 sxp.child_value(dev_info, 'mode') == 'r'):
1088 pass
1089 else:
1090 raise VmError('Refusing to reconfigure device %s:%d to %s' %
1091 (dev_class, devid, dev_config))
1093 # update XendConfig with new device info
1094 self.info.device_update(dev_uuid, dev_sxp)
1095 xen.xend.XendDomain.instance().managed_config_save(self)
1097 return True
1099 def waitForDevices(self):
1100 """Wait for this domain's configured devices to connect.
1102 @raise VmError: if any device fails to initialise.
1103 """
1104 for devclass in XendDevices.valid_devices():
1105 self.getDeviceController(devclass).waitForDevices()
1107 def hvm_destroyPCIDevice(self, vslot):
1108 log.debug("hvm_destroyPCIDevice called %s", vslot)
1110 if not self.info.is_hvm():
1111 raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
1113 #all the PCI devs share one conf node
1114 devid = '0'
1115 vslot = int(vslot)
1116 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1117 dev_uuid = sxp.child_value(dev_info, 'uuid')
1119 #delete the pci bdf config under the pci device
1120 pci_conf = self.info['devices'][dev_uuid][1]
1121 pci_len = len(pci_conf['devs'])
1123 #find the pass-through device with the virtual slot
1124 devnum = 0
1125 for x in pci_conf['devs']:
1126 if int(x['vslot'], 16) == vslot:
1127 break
1128 devnum += 1
1130 if devnum >= pci_len:
1131 raise VmError("Device @ vslot 0x%x doesn't exist." % (vslot))
1133 # Check the co-assignment.
1134 # To pci-detach a device D from domN, we should ensure: for each DD in the
1135 # list of D's co-assignment devices, DD is not assigned (to domN).
1137 from xen.xend.server.pciif import PciDevice
1138 try:
1139 pci_device = PciDevice(x)
1140 except Exception, e:
1141 raise VmError("pci: failed to locate device and "+
1142 "parse it's resources - "+str(e))
1143 coassignment_list = pci_device.find_coassigned_devices()
1144 coassignment_list.remove(pci_device.name)
1145 assigned_pci_device_str_list = self._get_assigned_pci_devices()
1146 for pci_str in coassignment_list:
1147 if pci_str in assigned_pci_device_str_list:
1148 raise VmError(("pci: failed to pci-detach %s from domain %s" + \
1149 " because one of its co-assignment device %s is still " + \
1150 " assigned to the domain." \
1151 )% (pci_device.name, self.info['name_label'], pci_str))
1154 bdf_str = pci_dict_to_bdf_str(x)
1155 log.info("hvm_destroyPCIDevice:%s:%s!", x, bdf_str)
1156 if self.domid is not None:
1157 self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
1159 return 0
1161 def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
1162 log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
1163 deviceClass, devid)
1165 if rm_cfg:
1166 # Convert devid to device number. A device number is
1167 # needed to remove its configuration.
1168 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1170 # Save current sxprs. A device number and a backend
1171 # path are needed to remove its configuration but sxprs
1172 # do not have those after calling destroyDevice.
1173 sxprs = self.getDeviceSxprs(deviceClass)
1175 rc = None
1176 if self.domid is not None:
1178 #new blktap implementation may need a sysfs write after everything is torn down.
1179 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1180 path = self.getDeviceController(deviceClass).readBackend(dev, 'params')
1181 if path and path.startswith('/dev/xen/blktap-2'):
1182 frontpath = self.getDeviceController(deviceClass).frontendPath(dev)
1183 backpath = xstransact.Read(frontpath, "backend")
1184 thread.start_new_thread(self.getDeviceController(deviceClass).finishDeviceCleanup, (backpath, path))
1186 rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
1187 if not force and rm_cfg:
1188 # The backend path, other than the device itself,
1189 # has to be passed because its accompanied frontend
1190 # path may be void until its removal is actually
1191 # issued. It is probable because destroyDevice is
1192 # issued first.
1193 for dev_num, dev_info in sxprs:
1194 dev_num = int(dev_num)
1195 if dev_num == dev:
1196 for x in dev_info:
1197 if x[0] == 'backend':
1198 backend = x[1]
1199 break
1200 break
1201 self._waitForDevice_destroy(deviceClass, devid, backend)
1203 if rm_cfg:
1204 if deviceClass == 'vif':
1205 if self.domid is not None:
1206 for dev_num, dev_info in sxprs:
1207 dev_num = int(dev_num)
1208 if dev_num == dev:
1209 for x in dev_info:
1210 if x[0] == 'mac':
1211 mac = x[1]
1212 break
1213 break
1214 dev_info = self._getDeviceInfo_vif(mac)
1215 else:
1216 _, dev_info = sxprs[dev]
1217 else: # 'vbd' or 'tap'
1218 dev_info = self._getDeviceInfo_vbd(dev)
1219 # To remove the UUID of the device from refs,
1220 # deviceClass must be always 'vbd'.
1221 deviceClass = 'vbd'
1222 if dev_info is None:
1223 raise XendError("Device %s is not defined" % devid)
1225 dev_uuid = sxp.child_value(dev_info, 'uuid')
1226 del self.info['devices'][dev_uuid]
1227 self.info['%s_refs' % deviceClass].remove(dev_uuid)
1228 xen.xend.XendDomain.instance().managed_config_save(self)
1230 return rc
1232 def getDeviceSxprs(self, deviceClass):
1233 if deviceClass == 'pci':
1234 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1235 if dev_info is None:
1236 return []
1237 dev_uuid = sxp.child_value(dev_info, 'uuid')
1238 pci_devs = self.info['devices'][dev_uuid][1]['devs']
1239 return pci_devs
1240 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1241 return self.getDeviceController(deviceClass).sxprs()
1242 else:
1243 sxprs = []
1244 dev_num = 0
1245 for dev_type, dev_info in self.info.all_devices_sxpr():
1246 if (deviceClass == 'vbd' and dev_type not in ['vbd', 'tap']) or \
1247 (deviceClass != 'vbd' and dev_type != deviceClass):
1248 continue
1250 if deviceClass == 'vscsi':
1251 vscsi_devs = ['devs', []]
1252 for vscsi_dev in sxp.children(dev_info, 'dev'):
1253 vscsi_dev.append(['frontstate', None])
1254 vscsi_devs[1].append(vscsi_dev)
1255 dev_num = int(sxp.child_value(vscsi_dev, 'devid'))
1256 vscsi_mode = sxp.children(dev_info, 'feature-host')[0]
1257 sxprs.append([dev_num, [vscsi_devs, vscsi_mode]])
1258 elif deviceClass == 'vbd':
1259 dev = sxp.child_value(dev_info, 'dev')
1260 if 'ioemu:' in dev:
1261 (_, dev) = dev.split(':', 1)
1262 try:
1263 (dev_name, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1264 except ValueError:
1265 dev_name = dev
1266 dev_num = self.getDeviceController('vbd').convertToDeviceNumber(dev_name)
1267 sxprs.append([dev_num, dev_info])
1268 else:
1269 sxprs.append([dev_num, dev_info])
1270 dev_num += 1
1271 return sxprs
1273 def getBlockDeviceClass(self, devid):
1274 # To get a device number from the devid,
1275 # we temporarily use the device controller of VBD.
1276 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1277 dev_info = self._getDeviceInfo_vbd(dev)
1278 if dev_info:
1279 return dev_info[0]
1281 def _getDeviceInfo_vif(self, mac):
1282 for dev_type, dev_info in self.info.all_devices_sxpr():
1283 if dev_type != 'vif':
1284 continue
1285 if mac == sxp.child_value(dev_info, 'mac'):
1286 return dev_info
1288 def _getDeviceInfo_vbd(self, devid):
1289 for dev_type, dev_info in self.info.all_devices_sxpr():
1290 if dev_type != 'vbd' and dev_type != 'tap':
1291 continue
1292 dev = sxp.child_value(dev_info, 'dev')
1293 dev = dev.split(':')[0]
1294 dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
1295 if devid == dev:
1296 return dev_info
1298 def _getDeviceInfo_pci(self, devid):
1299 for dev_type, dev_info in self.info.all_devices_sxpr():
1300 if dev_type != 'pci':
1301 continue
1302 return dev_info
1303 return None
1305 def _getDeviceInfo_vscsi(self, devid):
1306 devid = int(devid)
1307 for dev_type, dev_info in self.info.all_devices_sxpr():
1308 if dev_type != 'vscsi':
1309 continue
1310 devs = sxp.children(dev_info, 'dev')
1311 if devid == int(sxp.child_value(devs[0], 'devid')):
1312 return dev_info
1313 return None
1315 def _get_assigned_pci_devices(self, devid = 0):
1316 if self.domid is not None:
1317 return get_assigned_pci_devices(self.domid)
1319 dev_info = self._getDeviceInfo_pci(devid)
1320 if dev_info is None:
1321 return []
1322 dev_uuid = sxp.child_value(dev_info, 'uuid')
1323 pci_conf = self.info['devices'][dev_uuid][1]
1324 return map(pci_dict_to_bdf_str, pci_conf['devs'])
1326 def setMemoryTarget(self, target):
1327 """Set the memory target of this domain.
1328 @param target: In MiB.
1329 """
1330 log.debug("Setting memory target of domain %s (%s) to %d MiB.",
1331 self.info['name_label'], str(self.domid), target)
1333 MiB = 1024 * 1024
1334 memory_cur = self.get_memory_dynamic_max() / MiB
1336 if self.domid == 0:
1337 dom0_min_mem = xoptions.get_dom0_min_mem()
1338 if target < memory_cur and dom0_min_mem > target:
1339 raise XendError("memory_dynamic_max too small")
1341 self._safe_set_memory('memory_dynamic_min', target * MiB)
1342 self._safe_set_memory('memory_dynamic_max', target * MiB)
1344 if self.domid >= 0:
1345 if target > memory_cur:
1346 balloon.free((target - memory_cur) * 1024, self)
1347 self.storeVm("memory", target)
1348 self.storeDom("memory/target", target << 10)
1349 xc.domain_set_target_mem(self.domid,
1350 (target * 1024))
1351 xen.xend.XendDomain.instance().managed_config_save(self)
1353 def setMemoryMaximum(self, limit):
1354 """Set the maximum memory limit of this domain
1355 @param limit: In MiB.
1356 """
1357 log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
1358 self.info['name_label'], str(self.domid), limit)
1360 maxmem_cur = self.get_memory_static_max()
1361 MiB = 1024 * 1024
1362 self._safe_set_memory('memory_static_max', limit * MiB)
1364 if self.domid >= 0:
1365 maxmem = int(limit) * 1024
1366 try:
1367 return xc.domain_setmaxmem(self.domid, maxmem)
1368 except Exception, ex:
1369 self._safe_set_memory('memory_static_max', maxmem_cur)
1370 raise XendError(str(ex))
1371 xen.xend.XendDomain.instance().managed_config_save(self)
1374 def getVCPUInfo(self):
1375 try:
1376 # We include the domain name and ID, to help xm.
1377 sxpr = ['domain',
1378 ['domid', self.domid],
1379 ['name', self.info['name_label']],
1380 ['vcpu_count', self.info['VCPUs_max']]]
1382 for i in range(0, self.info['VCPUs_max']):
1383 if self.domid is not None:
1384 info = xc.vcpu_getinfo(self.domid, i)
1386 sxpr.append(['vcpu',
1387 ['number', i],
1388 ['online', info['online']],
1389 ['blocked', info['blocked']],
1390 ['running', info['running']],
1391 ['cpu_time', info['cpu_time'] / 1e9],
1392 ['cpu', info['cpu']],
1393 ['cpumap', info['cpumap']]])
1394 else:
1395 sxpr.append(['vcpu',
1396 ['number', i],
1397 ['online', 0],
1398 ['blocked', 0],
1399 ['running', 0],
1400 ['cpu_time', 0.0],
1401 ['cpu', -1],
1402 ['cpumap', self.info['cpus'][i] and \
1403 self.info['cpus'][i] or range(64)]])
1405 return sxpr
1407 except RuntimeError, exn:
1408 raise XendError(str(exn))
1411 def getDomInfo(self):
1412 return dom_get(self.domid)
1415 # internal functions ... TODO: re-categorised
1418 def _augmentInfo(self, priv):
1419 """Augment self.info, as given to us through L{recreate}, with
1420 values taken from the store. This recovers those values known
1421 to xend but not to the hypervisor.
1422 """
1423 augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
1424 if priv:
1425 augment_entries.remove('memory')
1426 augment_entries.remove('maxmem')
1427 augment_entries.remove('vcpus')
1428 augment_entries.remove('vcpu_avail')
1430 vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
1431 for k in augment_entries])
1433 # make returned lists into a dictionary
1434 vm_config = dict(zip(augment_entries, vm_config))
1436 for arg in augment_entries:
1437 val = vm_config[arg]
1438 if val != None:
1439 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1440 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1441 self.info[xapiarg] = val
1442 elif arg == "memory":
1443 self.info["static_memory_min"] = val
1444 elif arg == "maxmem":
1445 self.info["static_memory_max"] = val
1446 else:
1447 self.info[arg] = val
1449 # read CPU Affinity
1450 self.info['cpus'] = []
1451 vcpus_info = self.getVCPUInfo()
1452 for vcpu_info in sxp.children(vcpus_info, 'vcpu'):
1453 self.info['cpus'].append(sxp.child_value(vcpu_info, 'cpumap'))
1455 # For dom0, we ignore any stored value for the vcpus fields, and
1456 # read the current value from Xen instead. This allows boot-time
1457 # settings to take precedence over any entries in the store.
1458 if priv:
1459 xeninfo = dom_get(self.domid)
1460 self.info['VCPUs_max'] = xeninfo['online_vcpus']
1461 self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
1463 # read image value
1464 image_sxp = self._readVm('image')
1465 if image_sxp:
1466 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1468 # read devices
1469 devices = []
1470 for devclass in XendDevices.valid_devices():
1471 devconfig = self.getDeviceController(devclass).configurations()
1472 if devconfig:
1473 devices.extend(devconfig)
1475 if not self.info['devices'] and devices is not None:
1476 for device in devices:
1477 self.info.device_add(device[0], cfg_sxp = device)
1479 self._update_consoles()
1481 def _update_consoles(self, transaction = None):
1482 if self.domid == None or self.domid == 0:
1483 return
1485 # Update VT100 port if it exists
1486 if transaction is None:
1487 self.console_port = self.readDom('console/port')
1488 else:
1489 self.console_port = self.readDomTxn(transaction, 'console/port')
1490 if self.console_port is not None:
1491 serial_consoles = self.info.console_get_all('vt100')
1492 if not serial_consoles:
1493 cfg = self.info.console_add('vt100', self.console_port)
1494 self._createDevice('console', cfg)
1495 else:
1496 console_uuid = serial_consoles[0].get('uuid')
1497 self.info.console_update(console_uuid, 'location',
1498 self.console_port)
1501 # Update VNC port if it exists and write to xenstore
1502 if transaction is None:
1503 vnc_port = self.readDom('console/vnc-port')
1504 else:
1505 vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
1506 if vnc_port is not None:
1507 for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
1508 if dev_type == 'vfb':
1509 old_location = dev_info.get('location')
1510 listen_host = dev_info.get('vnclisten', \
1511 XendOptions.instance().get_vnclisten_address())
1512 new_location = '%s:%s' % (listen_host, str(vnc_port))
1513 if old_location == new_location:
1514 break
1516 dev_info['location'] = new_location
1517 self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
1518 vfb_ctrl = self.getDeviceController('vfb')
1519 vfb_ctrl.reconfigureDevice(0, dev_info)
1520 break
1523 # Function to update xenstore /vm/*
1526 def _readVm(self, *args):
1527 return xstransact.Read(self.vmpath, *args)
1529 def _writeVm(self, *args):
1530 return xstransact.Write(self.vmpath, *args)
1532 def _removeVm(self, *args):
1533 return xstransact.Remove(self.vmpath, *args)
1535 def _gatherVm(self, *args):
1536 return xstransact.Gather(self.vmpath, *args)
1538 def _listRecursiveVm(self, *args):
1539 return xstransact.ListRecursive(self.vmpath, *args)
1541 def storeVm(self, *args):
1542 return xstransact.Store(self.vmpath, *args)
1544 def permissionsVm(self, *args):
1545 return xstransact.SetPermissions(self.vmpath, *args)
1548 # Function to update xenstore /dom/*
1551 def readDom(self, *args):
1552 return xstransact.Read(self.dompath, *args)
1554 def gatherDom(self, *args):
1555 return xstransact.Gather(self.dompath, *args)
1557 def _writeDom(self, *args):
1558 return xstransact.Write(self.dompath, *args)
1560 def _removeDom(self, *args):
1561 return xstransact.Remove(self.dompath, *args)
1563 def storeDom(self, *args):
1564 return xstransact.Store(self.dompath, *args)
1567 def readDomTxn(self, transaction, *args):
1568 paths = map(lambda x: self.dompath + "/" + x, args)
1569 return transaction.read(*paths)
1571 def gatherDomTxn(self, transaction, *args):
1572 paths = map(lambda x: self.dompath + "/" + x, args)
1573 return transaction.gather(*paths)
1575 def _writeDomTxn(self, transaction, *args):
1576 paths = map(lambda x: self.dompath + "/" + x, args)
1577 return transaction.write(*paths)
1579 def _removeDomTxn(self, transaction, *args):
1580 paths = map(lambda x: self.dompath + "/" + x, args)
1581 return transaction.remove(*paths)
1583 def storeDomTxn(self, transaction, *args):
1584 paths = map(lambda x: self.dompath + "/" + x, args)
1585 return transaction.store(*paths)
1588 def _recreateDom(self):
1589 complete(self.dompath, lambda t: self._recreateDomFunc(t))
1591 def _recreateDomFunc(self, t):
1592 t.remove()
1593 t.mkdir()
1594 t.set_permissions({'dom' : self.domid, 'read' : True})
1595 t.write('vm', self.vmpath)
1596 # NB. Solaris guests use guest/ and hvmpv/ xenstore directories
1597 for i in [ 'device', 'control', 'error', 'memory', 'guest', 'hvmpv' ]:
1598 t.mkdir(i)
1599 t.set_permissions(i, {'dom' : self.domid})
1601 def _storeDomDetails(self):
1602 to_store = {
1603 'domid': str(self.domid),
1604 'vm': self.vmpath,
1605 'name': self.info['name_label'],
1606 'console/limit': str(xoptions.get_console_limit() * 1024),
1607 'memory/target': str(self.info['memory_dynamic_max'] / 1024),
1610 def f(n, v):
1611 if v is not None:
1612 if type(v) == bool:
1613 to_store[n] = v and "1" or "0"
1614 else:
1615 to_store[n] = str(v)
1617 # Figure out if we need to tell xenconsoled to ignore this guest's
1618 # console - device model will handle console if it is running
1619 constype = "ioemu"
1620 if 'device_model' not in self.info['platform']:
1621 constype = "xenconsoled"
1623 f('console/port', self.console_port)
1624 f('console/ring-ref', self.console_mfn)
1625 f('console/type', constype)
1626 f('store/port', self.store_port)
1627 f('store/ring-ref', self.store_mfn)
1629 if arch.type == "x86":
1630 f('control/platform-feature-multiprocessor-suspend', True)
1632 # elfnotes
1633 for n, v in self.info.get_notes().iteritems():
1634 n = n.lower().replace('_', '-')
1635 if n == 'features':
1636 for v in v.split('|'):
1637 v = v.replace('_', '-')
1638 if v.startswith('!'):
1639 f('image/%s/%s' % (n, v[1:]), False)
1640 else:
1641 f('image/%s/%s' % (n, v), True)
1642 else:
1643 f('image/%s' % n, v)
1645 if self.info.has_key('security_label'):
1646 f('security_label', self.info['security_label'])
1648 to_store.update(self._vcpuDomDetails())
1650 log.debug("Storing domain details: %s", scrub_password(to_store))
1652 self._writeDom(to_store)
1654 def _vcpuDomDetails(self):
1655 def availability(n):
1656 if self.info['vcpu_avail'] & (1 << n):
1657 return 'online'
1658 else:
1659 return 'offline'
1661 result = {}
1662 for v in range(0, self.info['VCPUs_max']):
1663 result["cpu/%d/availability" % v] = availability(v)
1664 return result
1667 # xenstore watches
1670 def _registerWatches(self):
1671 """Register a watch on this VM's entries in the store, and the
1672 domain's control/shutdown node, so that when they are changed
1673 externally, we keep up to date. This should only be called by {@link
1674 #create}, {@link #recreate}, or {@link #restore}, once the domain's
1675 details have been written, but before the new instance is returned."""
1676 self.vmWatch = xswatch(self.vmpath, self._storeChanged)
1677 self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
1678 self._handleShutdownWatch)
1680 def _storeChanged(self, _):
1681 log.trace("XendDomainInfo.storeChanged");
1683 changed = False
1685 # Check whether values in the configuration have
1686 # changed in Xenstore.
1688 cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
1689 'rtc/timeoffset']
1691 vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
1692 for k in cfg_vm])
1694 # convert two lists into a python dictionary
1695 vm_details = dict(zip(cfg_vm, vm_details))
1697 for arg, val in vm_details.items():
1698 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1699 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1700 if val != None and val != self.info[xapiarg]:
1701 self.info[xapiarg] = val
1702 changed = True
1703 elif arg == "memory":
1704 if val != None and val != self.info["static_memory_min"]:
1705 self.info["static_memory_min"] = val
1706 changed = True
1707 elif arg == "maxmem":
1708 if val != None and val != self.info["static_memory_max"]:
1709 self.info["static_memory_max"] = val
1710 changed = True
1712 # Check whether image definition has been updated
1713 image_sxp = self._readVm('image')
1714 if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
1715 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1716 changed = True
1718 # Update the rtc_timeoffset to be preserved across reboot.
1719 # NB. No need to update xenstore domain section.
1720 val = int(vm_details.get("rtc/timeoffset", 0))
1721 self.info["platform"]["rtc_timeoffset"] = val
1723 if changed:
1724 # Update the domain section of the store, as this contains some
1725 # parameters derived from the VM configuration.
1726 self.refresh_shutdown_lock.acquire()
1727 try:
1728 state = self._stateGet()
1729 if state not in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
1730 self._storeDomDetails()
1731 finally:
1732 self.refresh_shutdown_lock.release()
1734 return 1
1736 def _handleShutdownWatch(self, _):
1737 log.debug('XendDomainInfo.handleShutdownWatch')
1739 reason = self.readDom('control/shutdown')
1741 if reason and reason != 'suspend':
1742 sst = self.readDom('xend/shutdown_start_time')
1743 now = time.time()
1744 if sst:
1745 self.shutdownStartTime = float(sst)
1746 timeout = float(sst) + SHUTDOWN_TIMEOUT - now
1747 else:
1748 self.shutdownStartTime = now
1749 self.storeDom('xend/shutdown_start_time', now)
1750 timeout = SHUTDOWN_TIMEOUT
1752 log.trace(
1753 "Scheduling refreshShutdown on domain %d in %ds.",
1754 self.domid, timeout)
1755 threading.Timer(timeout, self.refreshShutdown).start()
1757 return True
1761 # Public Attributes for the VM
1765 def getDomid(self):
1766 return self.domid
1768 def setName(self, name, to_store = True):
1769 self._checkName(name)
1770 self.info['name_label'] = name
1771 if to_store:
1772 self.storeVm("name", name)
1774 def getName(self):
1775 return self.info['name_label']
1777 def getDomainPath(self):
1778 return self.dompath
1780 def getShutdownReason(self):
1781 return self.readDom('control/shutdown')
1783 def getStorePort(self):
1784 """For use only by image.py and XendCheckpoint.py."""
1785 return self.store_port
1787 def getConsolePort(self):
1788 """For use only by image.py and XendCheckpoint.py"""
1789 return self.console_port
1791 def getFeatures(self):
1792 """For use only by image.py."""
1793 return self.info['features']
1795 def getVCpuCount(self):
1796 return self.info['VCPUs_max']
1798 def setVCpuCount(self, vcpus):
1799 def vcpus_valid(n):
1800 if vcpus <= 0:
1801 raise XendError('Zero or less VCPUs is invalid')
1802 if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
1803 raise XendError('Cannot set vcpus greater than max vcpus on running domain')
1804 vcpus_valid(vcpus)
1806 self.info['vcpu_avail'] = (1 << vcpus) - 1
1807 if self.domid >= 0:
1808 self.storeVm('vcpu_avail', self.info['vcpu_avail'])
1809 self._writeDom(self._vcpuDomDetails())
1810 self.info['VCPUs_live'] = vcpus
1811 else:
1812 if self.info['VCPUs_max'] > vcpus:
1813 # decreasing
1814 del self.info['cpus'][vcpus:]
1815 elif self.info['VCPUs_max'] < vcpus:
1816 # increasing
1817 for c in range(self.info['VCPUs_max'], vcpus):
1818 self.info['cpus'].append(list())
1819 self.info['VCPUs_max'] = vcpus
1820 xen.xend.XendDomain.instance().managed_config_save(self)
1821 log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
1822 vcpus)
1824 def getMemoryTarget(self):
1825 """Get this domain's target memory size, in KB."""
1826 return self.info['memory_dynamic_max'] / 1024
1828 def getMemoryMaximum(self):
1829 """Get this domain's maximum memory size, in KB."""
1830 # remember, info now stores memory in bytes
1831 return self.info['memory_static_max'] / 1024
1833 def getResume(self):
1834 return str(self._resume)
1836 def setResume(self, isresume):
1837 self._resume = isresume
1839 def getCpus(self):
1840 return self.info['cpus']
1842 def setCpus(self, cpumap):
1843 self.info['cpus'] = cpumap
1845 def getCap(self):
1846 return self.info['vcpus_params']['cap']
1848 def setCap(self, cpu_cap):
1849 self.info['vcpus_params']['cap'] = cpu_cap
1851 def getWeight(self):
1852 return self.info['vcpus_params']['weight']
1854 def setWeight(self, cpu_weight):
1855 self.info['vcpus_params']['weight'] = cpu_weight
1857 def getRestartCount(self):
1858 return self._readVm('xend/restart_count')
1860 def refreshShutdown(self, xeninfo = None):
1861 """ Checks the domain for whether a shutdown is required.
1863 Called from XendDomainInfo and also image.py for HVM images.
1864 """
1866 # If set at the end of this method, a restart is required, with the
1867 # given reason. This restart has to be done out of the scope of
1868 # refresh_shutdown_lock.
1869 restart_reason = None
1871 self.refresh_shutdown_lock.acquire()
1872 try:
1873 if xeninfo is None:
1874 xeninfo = dom_get(self.domid)
1875 if xeninfo is None:
1876 # The domain no longer exists. This will occur if we have
1877 # scheduled a timer to check for shutdown timeouts and the
1878 # shutdown succeeded. It will also occur if someone
1879 # destroys a domain beneath us. We clean up the domain,
1880 # just in case, but we can't clean up the VM, because that
1881 # VM may have migrated to a different domain on this
1882 # machine.
1883 self.cleanupDomain()
1884 self._stateSet(DOM_STATE_HALTED)
1885 return
1887 if xeninfo['dying']:
1888 # Dying means that a domain has been destroyed, but has not
1889 # yet been cleaned up by Xen. This state could persist
1890 # indefinitely if, for example, another domain has some of its
1891 # pages mapped. We might like to diagnose this problem in the
1892 # future, but for now all we do is make sure that it's not us
1893 # holding the pages, by calling cleanupDomain. We can't
1894 # clean up the VM, as above.
1895 self.cleanupDomain()
1896 self._stateSet(DOM_STATE_SHUTDOWN)
1897 return
1899 elif xeninfo['crashed']:
1900 if self.readDom('xend/shutdown_completed'):
1901 # We've seen this shutdown already, but we are preserving
1902 # the domain for debugging. Leave it alone.
1903 return
1905 log.warn('Domain has crashed: name=%s id=%d.',
1906 self.info['name_label'], self.domid)
1907 self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
1909 restart_reason = 'crash'
1910 self._stateSet(DOM_STATE_HALTED)
1912 elif xeninfo['shutdown']:
1913 self._stateSet(DOM_STATE_SHUTDOWN)
1914 if self.readDom('xend/shutdown_completed'):
1915 # We've seen this shutdown already, but we are preserving
1916 # the domain for debugging. Leave it alone.
1917 return
1919 else:
1920 reason = shutdown_reason(xeninfo['shutdown_reason'])
1922 log.info('Domain has shutdown: name=%s id=%d reason=%s.',
1923 self.info['name_label'], self.domid, reason)
1924 self._writeVm(LAST_SHUTDOWN_REASON, reason)
1926 self._clearRestart()
1928 if reason == 'suspend':
1929 self._stateSet(DOM_STATE_SUSPENDED)
1930 # Don't destroy the domain. XendCheckpoint will do
1931 # this once it has finished. However, stop watching
1932 # the VM path now, otherwise we will end up with one
1933 # watch for the old domain, and one for the new.
1934 self._unwatchVm()
1935 elif reason in ('poweroff', 'reboot'):
1936 restart_reason = reason
1937 else:
1938 self.destroy()
1940 elif self.dompath is None:
1941 # We have yet to manage to call introduceDomain on this
1942 # domain. This can happen if a restore is in progress, or has
1943 # failed. Ignore this domain.
1944 pass
1945 else:
1946 # Domain is alive. If we are shutting it down, log a message
1947 # if it seems unresponsive.
1948 if xeninfo['paused']:
1949 self._stateSet(DOM_STATE_PAUSED)
1950 else:
1951 self._stateSet(DOM_STATE_RUNNING)
1953 if self.shutdownStartTime:
1954 timeout = (SHUTDOWN_TIMEOUT - time.time() +
1955 self.shutdownStartTime)
1956 if (timeout < 0 and not self.readDom('xend/unresponsive')):
1957 log.info(
1958 "Domain shutdown timeout expired: name=%s id=%s",
1959 self.info['name_label'], self.domid)
1960 self.storeDom('xend/unresponsive', 'True')
1961 finally:
1962 self.refresh_shutdown_lock.release()
1964 if restart_reason and not self.restart_in_progress:
1965 self.restart_in_progress = True
1966 threading.Thread(target = self._maybeRestart,
1967 args = (restart_reason,)).start()
1971 # Restart functions - handling whether we come back up on shutdown.
1974 def _clearRestart(self):
1975 self._removeDom("xend/shutdown_start_time")
1977 def _maybeDumpCore(self, reason):
1978 if reason == 'crash':
1979 if xoptions.get_enable_dump() or self.get_on_crash() \
1980 in ['coredump_and_destroy', 'coredump_and_restart']:
1981 try:
1982 self.dumpCore()
1983 except XendError:
1984 # This error has been logged -- there's nothing more
1985 # we can do in this context.
1986 pass
1988 def _maybeRestart(self, reason):
1989 # Before taking configured action, dump core if configured to do so.
1991 self._maybeDumpCore(reason)
1993 # Dispatch to the correct method based upon the configured on_{reason}
1994 # behaviour.
1995 actions = {"destroy" : self.destroy,
1996 "restart" : self._restart,
1997 "preserve" : self._preserve,
1998 "rename-restart" : self._renameRestart,
1999 "coredump-destroy" : self.destroy,
2000 "coredump-restart" : self._restart}
2002 action_conf = {
2003 'poweroff': 'actions_after_shutdown',
2004 'reboot': 'actions_after_reboot',
2005 'crash': 'actions_after_crash',
2008 action_target = self.info.get(action_conf.get(reason))
2009 func = actions.get(action_target, None)
2010 if func and callable(func):
2011 func()
2012 else:
2013 self.destroy() # default to destroy
2015 def _renameRestart(self):
2016 self._restart(True)
2018 def _restart(self, rename = False):
2019 """Restart the domain after it has exited.
2021 @param rename True if the old domain is to be renamed and preserved,
2022 False if it is to be destroyed.
2023 """
2024 from xen.xend import XendDomain
2026 if self._readVm(RESTART_IN_PROGRESS):
2027 log.error('Xend failed during restart of domain %s. '
2028 'Refusing to restart to avoid loops.',
2029 str(self.domid))
2030 self.destroy()
2031 return
2033 old_domid = self.domid
2034 self._writeVm(RESTART_IN_PROGRESS, 'True')
2036 elapse = time.time() - self.info['start_time']
2037 if elapse < MINIMUM_RESTART_TIME:
2038 log.error('VM %s restarting too fast (Elapsed time: %f seconds). '
2039 'Refusing to restart to avoid loops.',
2040 self.info['name_label'], elapse)
2041 self.destroy()
2042 return
2044 prev_vm_xend = self._listRecursiveVm('xend')
2045 new_dom_info = self.info
2046 try:
2047 if rename:
2048 new_dom_info = self._preserveForRestart()
2049 else:
2050 self._unwatchVm()
2051 self.destroy()
2053 # new_dom's VM will be the same as this domain's VM, except where
2054 # the rename flag has instructed us to call preserveForRestart.
2055 # In that case, it is important that we remove the
2056 # RESTART_IN_PROGRESS node from the new domain, not the old one,
2057 # once the new one is available.
2059 new_dom = None
2060 try:
2061 new_dom = XendDomain.instance().domain_create_from_dict(
2062 new_dom_info)
2063 for x in prev_vm_xend[0][1]:
2064 new_dom._writeVm('xend/%s' % x[0], x[1])
2065 new_dom.waitForDevices()
2066 new_dom.unpause()
2067 rst_cnt = new_dom._readVm('xend/restart_count')
2068 rst_cnt = int(rst_cnt) + 1
2069 new_dom._writeVm('xend/restart_count', str(rst_cnt))
2070 new_dom._removeVm(RESTART_IN_PROGRESS)
2071 except:
2072 if new_dom:
2073 new_dom._removeVm(RESTART_IN_PROGRESS)
2074 new_dom.destroy()
2075 else:
2076 self._removeVm(RESTART_IN_PROGRESS)
2077 raise
2078 except:
2079 log.exception('Failed to restart domain %s.', str(old_domid))
2081 def _preserveForRestart(self):
2082 """Preserve a domain that has been shut down, by giving it a new UUID,
2083 cloning the VM details, and giving it a new name. This allows us to
2084 keep this domain for debugging, but restart a new one in its place
2085 preserving the restart semantics (name and UUID preserved).
2086 """
2088 new_uuid = uuid.createString()
2089 new_name = 'Domain-%s' % new_uuid
2090 log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
2091 self.info['name_label'], self.domid, self.info['uuid'],
2092 new_name, new_uuid)
2093 self._unwatchVm()
2094 self._releaseDevices()
2095 # Remove existing vm node in xenstore
2096 self._removeVm()
2097 new_dom_info = self.info.copy()
2098 new_dom_info['name_label'] = self.info['name_label']
2099 new_dom_info['uuid'] = self.info['uuid']
2100 self.info['name_label'] = new_name
2101 self.info['uuid'] = new_uuid
2102 self.vmpath = XS_VMROOT + new_uuid
2103 # Write out new vm node to xenstore
2104 self._storeVmDetails()
2105 self._preserve()
2106 return new_dom_info
2109 def _preserve(self):
2110 log.info("Preserving dead domain %s (%d).", self.info['name_label'],
2111 self.domid)
2112 self._unwatchVm()
2113 self.storeDom('xend/shutdown_completed', 'True')
2114 self._stateSet(DOM_STATE_HALTED)
2117 # Debugging ..
2120 def dumpCore(self, corefile = None):
2121 """Create a core dump for this domain.
2123 @raise: XendError if core dumping failed.
2124 """
2126 if not corefile:
2127 this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
2128 corefile = "/var/xen/dump/%s-%s.%s.core" % (this_time,
2129 self.info['name_label'], self.domid)
2131 if os.path.isdir(corefile):
2132 raise XendError("Cannot dump core in a directory: %s" %
2133 corefile)
2135 try:
2136 try:
2137 self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
2138 xc.domain_dumpcore(self.domid, corefile)
2139 except RuntimeError, ex:
2140 corefile_incomp = corefile+'-incomplete'
2141 try:
2142 os.rename(corefile, corefile_incomp)
2143 except:
2144 pass
2146 log.error("core dump failed: id = %s name = %s: %s",
2147 self.domid, self.info['name_label'], str(ex))
2148 raise XendError("Failed to dump core: %s" % str(ex))
2149 finally:
2150 self._removeVm(DUMPCORE_IN_PROGRESS)
2153 # Device creation/deletion functions
2156 def _createDevice(self, deviceClass, devConfig):
2157 return self.getDeviceController(deviceClass).createDevice(devConfig)
2159 def _waitForDevice(self, deviceClass, devid):
2160 return self.getDeviceController(deviceClass).waitForDevice(devid)
2162 def _waitForDeviceUUID(self, dev_uuid):
2163 deviceClass, config = self.info['devices'].get(dev_uuid)
2164 self._waitForDevice(deviceClass, config['devid'])
2166 def _waitForDevice_destroy(self, deviceClass, devid, backpath):
2167 return self.getDeviceController(deviceClass).waitForDevice_destroy(
2168 devid, backpath)
2170 def _reconfigureDevice(self, deviceClass, devid, devconfig):
2171 return self.getDeviceController(deviceClass).reconfigureDevice(
2172 devid, devconfig)
2174 def _createDevices(self):
2175 """Create the devices for a vm.
2177 @raise: VmError for invalid devices
2178 """
2179 if self.image:
2180 self.image.prepareEnvironment()
2182 vscsi_uuidlist = {}
2183 vscsi_devidlist = []
2184 ordered_refs = self.info.ordered_device_refs()
2185 for dev_uuid in ordered_refs:
2186 devclass, config = self.info['devices'][dev_uuid]
2187 if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
2188 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2189 dev_uuid = config.get('uuid')
2190 devid = self._createDevice(devclass, config)
2192 # store devid in XendConfig for caching reasons
2193 if dev_uuid in self.info['devices']:
2194 self.info['devices'][dev_uuid][1]['devid'] = devid
2196 elif devclass == 'vscsi':
2197 vscsi_config = config.get('devs', [])[0]
2198 devid = vscsi_config.get('devid', '')
2199 dev_uuid = config.get('uuid')
2200 vscsi_uuidlist[devid] = dev_uuid
2201 vscsi_devidlist.append(devid)
2203 #It is necessary to sorted it for /dev/sdxx in guest.
2204 if len(vscsi_uuidlist) > 0:
2205 vscsi_devidlist.sort()
2206 for vscsiid in vscsi_devidlist:
2207 dev_uuid = vscsi_uuidlist[vscsiid]
2208 devclass, config = self.info['devices'][dev_uuid]
2209 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2210 dev_uuid = config.get('uuid')
2211 devid = self._createDevice(devclass, config)
2212 # store devid in XendConfig for caching reasons
2213 if dev_uuid in self.info['devices']:
2214 self.info['devices'][dev_uuid][1]['devid'] = devid
2217 if self.image:
2218 self.image.createDeviceModel()
2220 #if have pass-through devs, need the virtual pci slots info from qemu
2221 self.pci_device_configure_boot()
2223 def _releaseDevices(self, suspend = False):
2224 """Release all domain's devices. Nothrow guarantee."""
2225 if self.image:
2226 try:
2227 log.debug("Destroying device model")
2228 self.image.destroyDeviceModel()
2229 except Exception, e:
2230 log.exception("Device model destroy failed %s" % str(e))
2231 else:
2232 log.debug("No device model")
2234 log.debug("Releasing devices")
2235 t = xstransact("%s/device" % self.dompath)
2236 try:
2237 for devclass in XendDevices.valid_devices():
2238 for dev in t.list(devclass):
2239 try:
2240 true_devclass = devclass
2241 if devclass == 'vbd':
2242 # In the case of "vbd", the true device class
2243 # may possibly be "tap". Just in case, verify
2244 # device class.
2245 devid = dev.split('/')[-1]
2246 true_devclass = self.getBlockDeviceClass(devid)
2247 log.debug("Removing %s", dev);
2248 self.destroyDevice(true_devclass, dev, False);
2249 except:
2250 # Log and swallow any exceptions in removal --
2251 # there's nothing more we can do.
2252 log.exception("Device release failed: %s; %s; %s",
2253 self.info['name_label'],
2254 true_devclass, dev)
2255 finally:
2256 t.abort()
2258 def getDeviceController(self, name):
2259 """Get the device controller for this domain, and if it
2260 doesn't exist, create it.
2262 @param name: device class name
2263 @type name: string
2264 @rtype: subclass of DevController
2265 """
2266 if name not in self._deviceControllers:
2267 devController = XendDevices.make_controller(name, self)
2268 if not devController:
2269 raise XendError("Unknown device type: %s" % name)
2270 self._deviceControllers[name] = devController
2272 return self._deviceControllers[name]
2275 # Migration functions (public)
2278 def testMigrateDevices(self, network, dst):
2279 """ Notify all device about intention of migration
2280 @raise: XendError for a device that cannot be migrated
2281 """
2282 for (n, c) in self.info.all_devices_sxpr():
2283 rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
2284 if rc != 0:
2285 raise XendError("Device of type '%s' refuses migration." % n)
2287 def migrateDevices(self, network, dst, step, domName=''):
2288 """Notify the devices about migration
2289 """
2290 ctr = 0
2291 try:
2292 for (dev_type, dev_conf) in self.info.all_devices_sxpr():
2293 self.migrateDevice(dev_type, dev_conf, network, dst,
2294 step, domName)
2295 ctr = ctr + 1
2296 except:
2297 for dev_type, dev_conf in self.info.all_devices_sxpr():
2298 if ctr == 0:
2299 step = step - 1
2300 ctr = ctr - 1
2301 self._recoverMigrateDevice(dev_type, dev_conf, network,
2302 dst, step, domName)
2303 raise
2305 def migrateDevice(self, deviceClass, deviceConfig, network, dst,
2306 step, domName=''):
2307 return self.getDeviceController(deviceClass).migrate(deviceConfig,
2308 network, dst, step, domName)
2310 def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
2311 dst, step, domName=''):
2312 return self.getDeviceController(deviceClass).recover_migrate(
2313 deviceConfig, network, dst, step, domName)
2316 ## private:
2318 def _constructDomain(self):
2319 """Construct the domain.
2321 @raise: VmError on error
2322 """
2324 log.debug('XendDomainInfo.constructDomain')
2326 self.shutdownStartTime = None
2327 self.restart_in_progress = False
2329 hap = 0
2330 hvm = self.info.is_hvm()
2331 if hvm:
2332 hap = self.info.is_hap()
2333 info = xc.xeninfo()
2334 if 'hvm' not in info['xen_caps']:
2335 raise VmError("HVM guest support is unavailable: is VT/AMD-V "
2336 "supported by your CPU and enabled in your "
2337 "BIOS?")
2339 # Hack to pre-reserve some memory for initial domain creation.
2340 # There is an implicit memory overhead for any domain creation. This
2341 # overhead is greater for some types of domain than others. For
2342 # example, an x86 HVM domain will have a default shadow-pagetable
2343 # allocation of 1MB. We free up 4MB here to be on the safe side.
2344 # 2MB memory allocation was not enough in some cases, so it's 4MB now
2345 balloon.free(4*1024, self) # 4MB should be plenty
2347 ssidref = 0
2348 if security.on() == xsconstants.XS_POLICY_USE:
2349 ssidref = security.calc_dom_ssidref_from_info(self.info)
2350 if security.has_authorization(ssidref) == False:
2351 raise VmError("VM is not authorized to run.")
2353 s3_integrity = 0
2354 if self.info.has_key('s3_integrity'):
2355 s3_integrity = self.info['s3_integrity']
2356 flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2)
2358 try:
2359 self.domid = xc.domain_create(
2360 domid = 0,
2361 ssidref = ssidref,
2362 handle = uuid.fromString(self.info['uuid']),
2363 flags = flags,
2364 target = self.info.target())
2365 except Exception, e:
2366 # may get here if due to ACM the operation is not permitted
2367 if security.on() == xsconstants.XS_POLICY_ACM:
2368 raise VmError('Domain in conflict set with running domain?')
2370 if self.domid < 0:
2371 raise VmError('Creating domain failed: name=%s' %
2372 self.info['name_label'])
2374 self.dompath = GetDomainPath(self.domid)
2376 self._recreateDom()
2378 # Set timer configration of domain
2379 timer_mode = self.info["platform"].get("timer_mode")
2380 if hvm and timer_mode is not None:
2381 xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
2382 long(timer_mode))
2384 # Set Viridian interface configuration of domain
2385 viridian = self.info["platform"].get("viridian")
2386 if arch.type == "x86" and hvm and viridian is not None:
2387 xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
2389 # Optionally enable virtual HPET
2390 hpet = self.info["platform"].get("hpet")
2391 if hvm and hpet is not None:
2392 xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
2393 long(hpet))
2395 # Optionally enable periodic vpt aligning
2396 vpt_align = self.info["platform"].get("vpt_align")
2397 if hvm and vpt_align is not None:
2398 xc.hvm_set_param(self.domid, HVM_PARAM_VPT_ALIGN,
2399 long(vpt_align))
2401 # Set maximum number of vcpus in domain
2402 xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
2404 # Check for cpu_{cap|weight} validity for credit scheduler
2405 if XendNode.instance().xenschedinfo() == 'credit':
2406 cap = self.getCap()
2407 weight = self.getWeight()
2409 assert type(weight) == int
2410 assert type(cap) == int
2412 if weight < 1 or weight > 65535:
2413 raise VmError("Cpu weight out of range, valid values are within range from 1 to 65535")
2415 if cap < 0 or cap > self.getVCpuCount() * 100:
2416 raise VmError("Cpu cap out of range, valid range is from 0 to %s for specified number of vcpus" %
2417 (self.getVCpuCount() * 100))
2419 # Test whether the devices can be assigned with VT-d
2420 self.info.update_platform_pci()
2421 pci = self.info["platform"].get("pci")
2422 pci_str = ''
2423 if pci and len(pci) > 0:
2424 pci = map(lambda x: x[0:4], pci) # strip options
2425 pci_str = str(pci)
2426 if hvm and pci_str:
2427 bdf = xc.test_assign_device(0, pci_str)
2428 if bdf != 0:
2429 if bdf == -1:
2430 raise VmError("failed to assign device: maybe the platform"
2431 " doesn't support VT-d, or VT-d isn't enabled"
2432 " properly?")
2433 bus = (bdf >> 16) & 0xff
2434 devfn = (bdf >> 8) & 0xff
2435 dev = (devfn >> 3) & 0x1f
2436 func = devfn & 0x7
2437 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
2438 " already been assigned to other domain, or maybe"
2439 " it doesn't exist." % (bus, dev, func))
2441 # register the domain in the list
2442 from xen.xend import XendDomain
2443 XendDomain.instance().add_domain(self)
2445 def _introduceDomain(self):
2446 assert self.domid is not None
2447 assert self.store_mfn is not None
2448 assert self.store_port is not None
2450 try:
2451 IntroduceDomain(self.domid, self.store_mfn, self.store_port)
2452 except RuntimeError, exn:
2453 raise XendError(str(exn))
2455 def _setTarget(self, target):
2456 assert self.domid is not None
2458 try:
2459 SetTarget(self.domid, target)
2460 self.storeDom('target', target)
2461 except RuntimeError, exn:
2462 raise XendError(str(exn))
2465 def _setCPUAffinity(self):
2466 """ Repin domain vcpus if a restricted cpus list is provided
2467 """
2469 def has_cpus():
2470 if self.info['cpus'] is not None:
2471 for c in self.info['cpus']:
2472 if c:
2473 return True
2474 return False
2476 if has_cpus():
2477 for v in range(0, self.info['VCPUs_max']):
2478 if self.info['cpus'][v]:
2479 xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
2480 else:
2481 def find_relaxed_node(node_list):
2482 import sys
2483 nr_nodes = info['nr_nodes']
2484 if node_list is None:
2485 node_list = range(0, nr_nodes)
2486 nodeload = [0]
2487 nodeload = nodeload * nr_nodes
2488 from xen.xend import XendDomain
2489 doms = XendDomain.instance().list('all')
2490 for dom in filter (lambda d: d.domid != self.domid, doms):
2491 cpuinfo = dom.getVCPUInfo()
2492 for vcpu in sxp.children(cpuinfo, 'vcpu'):
2493 if sxp.child_value(vcpu, 'online') == 0: continue
2494 cpumap = list(sxp.child_value(vcpu,'cpumap'))
2495 for i in range(0, nr_nodes):
2496 node_cpumask = info['node_to_cpu'][i]
2497 for j in node_cpumask:
2498 if j in cpumap:
2499 nodeload[i] += 1
2500 break
2501 for i in range(0, nr_nodes):
2502 if len(info['node_to_cpu'][i]) > 0 and i in node_list:
2503 nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
2504 else:
2505 nodeload[i] = sys.maxint
2506 index = nodeload.index( min(nodeload) )
2507 return index
2509 info = xc.physinfo()
2510 if info['nr_nodes'] > 1:
2511 node_memory_list = info['node_to_memory']
2512 needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
2513 candidate_node_list = []
2514 for i in range(0, info['nr_nodes']):
2515 if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
2516 candidate_node_list.append(i)
2517 index = find_relaxed_node(candidate_node_list)
2518 cpumask = info['node_to_cpu'][index]
2519 for v in range(0, self.info['VCPUs_max']):
2520 xc.vcpu_setaffinity(self.domid, v, cpumask)
2523 def _initDomain(self):
2524 log.debug('XendDomainInfo.initDomain: %s %s',
2525 self.domid,
2526 self.info['vcpus_params']['weight'])
2528 self._configureBootloader()
2530 try:
2531 self.image = image.create(self, self.info)
2533 # repin domain vcpus if a restricted cpus list is provided
2534 # this is done prior to memory allocation to aide in memory
2535 # distribution for NUMA systems.
2536 self._setCPUAffinity()
2538 # Use architecture- and image-specific calculations to determine
2539 # the various headrooms necessary, given the raw configured
2540 # values. maxmem, memory, and shadow are all in KiB.
2541 # but memory_static_max etc are all stored in bytes now.
2542 memory = self.image.getRequiredAvailableMemory(
2543 self.info['memory_dynamic_max'] / 1024)
2544 maxmem = self.image.getRequiredAvailableMemory(
2545 self.info['memory_static_max'] / 1024)
2546 shadow = self.image.getRequiredShadowMemory(
2547 self.info['shadow_memory'] * 1024,
2548 self.info['memory_static_max'] / 1024)
2550 log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
2551 # Round shadow up to a multiple of a MiB, as shadow_mem_control
2552 # takes MiB and we must not round down and end up under-providing.
2553 shadow = ((shadow + 1023) / 1024) * 1024
2555 # set memory limit
2556 xc.domain_setmaxmem(self.domid, maxmem)
2558 # Reserve 1 page per MiB of RAM for separate VT-d page table.
2559 vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
2560 # Round vtd_mem up to a multiple of a MiB.
2561 vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
2563 # Make sure there's enough RAM available for the domain
2564 balloon.free(memory + shadow + vtd_mem, self)
2566 # Set up the shadow memory
2567 shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
2568 self.info['shadow_memory'] = shadow_cur
2570 # machine address size
2571 if self.info.has_key('machine_address_size'):
2572 log.debug("_initDomain: setting maximum machine address size %d" % self.info['machine_address_size'])
2573 xc.domain_set_machine_address_size(self.domid, self.info['machine_address_size'])
2575 if self.info.has_key('suppress_spurious_page_faults') and self.info['suppress_spurious_page_faults']:
2576 log.debug("_initDomain: suppressing spurious page faults")
2577 xc.domain_suppress_spurious_page_faults(self.domid)
2579 self._createChannels()
2581 channel_details = self.image.createImage()
2583 self.store_mfn = channel_details['store_mfn']
2584 if 'console_mfn' in channel_details:
2585 self.console_mfn = channel_details['console_mfn']
2586 if 'notes' in channel_details:
2587 self.info.set_notes(channel_details['notes'])
2588 if 'native_protocol' in channel_details:
2589 self.native_protocol = channel_details['native_protocol'];
2591 self._introduceDomain()
2592 if self.info.target():
2593 self._setTarget(self.info.target())
2595 self._createDevices()
2597 self.image.cleanupBootloading()
2599 self.info['start_time'] = time.time()
2601 self._stateSet(DOM_STATE_RUNNING)
2602 except VmError, exn:
2603 log.exception("XendDomainInfo.initDomain: exception occurred")
2604 if self.image:
2605 self.image.cleanupBootloading()
2606 raise exn
2607 except RuntimeError, exn:
2608 log.exception("XendDomainInfo.initDomain: exception occurred")
2609 if self.image:
2610 self.image.cleanupBootloading()
2611 raise VmError(str(exn))
2614 def cleanupDomain(self):
2615 """Cleanup domain resources; release devices. Idempotent. Nothrow
2616 guarantee."""
2618 self.refresh_shutdown_lock.acquire()
2619 try:
2620 self.unwatchShutdown()
2621 self._releaseDevices()
2622 bootloader_tidy(self)
2624 if self.image:
2625 self.image = None
2627 try:
2628 self._removeDom()
2629 except:
2630 log.exception("Removing domain path failed.")
2632 self._stateSet(DOM_STATE_HALTED)
2633 self.domid = None # Do not push into _stateSet()!
2634 finally:
2635 self.refresh_shutdown_lock.release()
2638 def unwatchShutdown(self):
2639 """Remove the watch on the domain's control/shutdown node, if any.
2640 Idempotent. Nothrow guarantee. Expects to be protected by the
2641 refresh_shutdown_lock."""
2643 try:
2644 try:
2645 if self.shutdownWatch:
2646 self.shutdownWatch.unwatch()
2647 finally:
2648 self.shutdownWatch = None
2649 except:
2650 log.exception("Unwatching control/shutdown failed.")
2652 def waitForShutdown(self):
2653 self.state_updated.acquire()
2654 try:
2655 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2656 self.state_updated.wait(timeout=1.0)
2657 finally:
2658 self.state_updated.release()
2660 def waitForSuspend(self):
2661 """Wait for the guest to respond to a suspend request by
2662 shutting down. If the guest hasn't re-written control/shutdown
2663 after a certain amount of time, it's obviously not listening and
2664 won't suspend, so we give up. HVM guests with no PV drivers
2665 should already be shutdown.
2666 """
2667 state = "suspend"
2668 nr_tries = 60
2670 self.state_updated.acquire()
2671 try:
2672 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2673 self.state_updated.wait(1.0)
2674 if state == "suspend":
2675 if nr_tries == 0:
2676 msg = ('Timeout waiting for domain %s to suspend'
2677 % self.domid)
2678 self._writeDom('control/shutdown', '')
2679 raise XendError(msg)
2680 state = self.readDom('control/shutdown')
2681 nr_tries -= 1
2682 finally:
2683 self.state_updated.release()
2686 # TODO: recategorise - called from XendCheckpoint
2689 def completeRestore(self, store_mfn, console_mfn):
2691 log.debug("XendDomainInfo.completeRestore")
2693 self.store_mfn = store_mfn
2694 self.console_mfn = console_mfn
2696 self._introduceDomain()
2697 self.image = image.create(self, self.info)
2698 if self.image:
2699 self.image.createDeviceModel(True)
2700 self._storeDomDetails()
2701 self._registerWatches()
2702 self.refreshShutdown()
2704 log.debug("XendDomainInfo.completeRestore done")
2707 def _endRestore(self):
2708 self.setResume(False)
2711 # VM Destroy
2714 def _prepare_phantom_paths(self):
2715 # get associated devices to destroy
2716 # build list of phantom devices to be removed after normal devices
2717 plist = []
2718 if self.domid is not None:
2719 t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
2720 try:
2721 for dev in t.list():
2722 backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
2723 % (self.dompath, dev))
2724 if backend_phantom_vbd is not None:
2725 frontend_phantom_vbd = xstransact.Read("%s/frontend" \
2726 % backend_phantom_vbd)
2727 plist.append(backend_phantom_vbd)
2728 plist.append(frontend_phantom_vbd)
2729 finally:
2730 t.abort()
2731 return plist
2733 def _cleanup_phantom_devs(self, plist):
2734 # remove phantom devices
2735 if not plist == []:
2736 time.sleep(2)
2737 for paths in plist:
2738 if paths.find('backend') != -1:
2739 # Modify online status /before/ updating state (latter is watched by
2740 # drivers, so this ordering avoids a race).
2741 xstransact.Write(paths, 'online', "0")
2742 xstransact.Write(paths, 'state', str(xenbusState['Closing']))
2743 # force
2744 xstransact.Remove(paths)
2746 def destroy(self):
2747 """Cleanup VM and destroy domain. Nothrow guarantee."""
2749 if self.domid is None:
2750 return
2752 from xen.xend import XendDomain
2753 log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
2755 paths = self._prepare_phantom_paths()
2757 if self.dompath is not None:
2758 try:
2759 xc.domain_destroy_hook(self.domid)
2760 xc.domain_pause(self.domid)
2761 do_FLR(self.domid)
2762 xc.domain_destroy(self.domid)
2763 for state in DOM_STATES_OLD:
2764 self.info[state] = 0
2765 self._stateSet(DOM_STATE_HALTED)
2766 except:
2767 log.exception("XendDomainInfo.destroy: domain destruction failed.")
2769 XendDomain.instance().remove_domain(self)
2770 self.cleanupDomain()
2772 self._cleanup_phantom_devs(paths)
2773 self._cleanupVm()
2775 if "transient" in self.info["other_config"] \
2776 and bool(self.info["other_config"]["transient"]):
2777 XendDomain.instance().domain_delete_by_dominfo(self)
2780 def resetDomain(self):
2781 log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
2783 old_domid = self.domid
2784 prev_vm_xend = self._listRecursiveVm('xend')
2785 new_dom_info = self.info
2786 try:
2787 self._unwatchVm()
2788 self.destroy()
2790 new_dom = None
2791 try:
2792 from xen.xend import XendDomain
2793 new_dom_info['domid'] = None
2794 new_dom = XendDomain.instance().domain_create_from_dict(
2795 new_dom_info)
2796 for x in prev_vm_xend[0][1]:
2797 new_dom._writeVm('xend/%s' % x[0], x[1])
2798 new_dom.waitForDevices()
2799 new_dom.unpause()
2800 except:
2801 if new_dom:
2802 new_dom.destroy()
2803 raise
2804 except:
2805 log.exception('Failed to reset domain %s.', str(old_domid))
2808 def resumeDomain(self):
2809 log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
2811 # resume a suspended domain (e.g. after live checkpoint, or after
2812 # a later error during save or migate); checks that the domain
2813 # is currently suspended first so safe to call from anywhere
2815 xeninfo = dom_get(self.domid)
2816 if xeninfo is None:
2817 return
2818 if not xeninfo['shutdown']:
2819 return
2820 reason = shutdown_reason(xeninfo['shutdown_reason'])
2821 if reason != 'suspend':
2822 return
2824 try:
2825 # could also fetch a parsed note from xenstore
2826 fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
2827 if not fast:
2828 self._releaseDevices()
2829 self.testDeviceComplete()
2830 self.testvifsComplete()
2831 log.debug("XendDomainInfo.resumeDomain: devices released")
2833 self._resetChannels()
2835 self._removeDom('control/shutdown')
2836 self._removeDom('device-misc/vif/nextDeviceID')
2838 self._createChannels()
2839 self._introduceDomain()
2840 self._storeDomDetails()
2842 self._createDevices()
2843 log.debug("XendDomainInfo.resumeDomain: devices created")
2845 xc.domain_resume(self.domid, fast)
2846 ResumeDomain(self.domid)
2847 except:
2848 log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
2849 self.image.resumeDeviceModel()
2850 log.debug("XendDomainInfo.resumeDomain: completed")
2854 # Channels for xenstore and console
2857 def _createChannels(self):
2858 """Create the channels to the domain.
2859 """
2860 self.store_port = self._createChannel()
2861 self.console_port = self._createChannel()
2864 def _createChannel(self):
2865 """Create an event channel to the domain.
2866 """
2867 try:
2868 if self.domid != None:
2869 return xc.evtchn_alloc_unbound(domid = self.domid,
2870 remote_dom = 0)
2871 except:
2872 log.exception("Exception in alloc_unbound(%s)", str(self.domid))
2873 raise
2875 def _resetChannels(self):
2876 """Reset all event channels in the domain.
2877 """
2878 try:
2879 if self.domid != None:
2880 return xc.evtchn_reset(dom = self.domid)
2881 except:
2882 log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
2883 raise
2887 # Bootloader configuration
2890 def _configureBootloader(self):
2891 """Run the bootloader if we're configured to do so."""
2893 blexec = self.info['PV_bootloader']
2894 bootloader_args = self.info['PV_bootloader_args']
2895 kernel = self.info['PV_kernel']
2896 ramdisk = self.info['PV_ramdisk']
2897 args = self.info['PV_args']
2898 boot = self.info['HVM_boot_policy']
2900 if boot:
2901 # HVM booting.
2902 pass
2903 elif not blexec and kernel:
2904 # Boot from dom0. Nothing left to do -- the kernel and ramdisk
2905 # will be picked up by image.py.
2906 pass
2907 else:
2908 # Boot using bootloader
2909 if not blexec or blexec == 'pygrub':
2910 blexec = auxbin.pathTo('pygrub')
2912 blcfg = None
2913 disks = [x for x in self.info['vbd_refs']
2914 if self.info['devices'][x][1]['bootable']]
2916 if not disks:
2917 msg = "Had a bootloader specified, but no disks are bootable"
2918 log.error(msg)
2919 raise VmError(msg)
2921 devinfo = self.info['devices'][disks[0]]
2922 devtype = devinfo[0]
2923 disk = devinfo[1]['uname']
2925 fn = blkdev_uname_to_file(disk)
2926 taptype = blkdev_uname_to_taptype(disk)
2927 mounted = devtype == 'tap' and taptype != 'aio' and taptype != 'sync' and not os.stat(fn).st_rdev
2928 if mounted:
2929 # This is a file, not a device. pygrub can cope with a
2930 # file if it's raw, but if it's QCOW or other such formats
2931 # used through blktap, then we need to mount it first.
2933 log.info("Mounting %s on %s." %
2934 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2936 vbd = {
2937 'mode': 'RO',
2938 'device': BOOTLOADER_LOOPBACK_DEVICE,
2941 from xen.xend import XendDomain
2942 dom0 = XendDomain.instance().privilegedDomain()
2943 dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
2944 fn = BOOTLOADER_LOOPBACK_DEVICE
2946 try:
2947 blcfg = bootloader(blexec, fn, self, False,
2948 bootloader_args, kernel, ramdisk, args)
2949 finally:
2950 if mounted:
2951 log.info("Unmounting %s from %s." %
2952 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2954 dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
2956 if blcfg is None:
2957 msg = "Had a bootloader specified, but can't find disk"
2958 log.error(msg)
2959 raise VmError(msg)
2961 self.info.update_with_image_sxp(blcfg, True)
2965 # VM Functions
2968 def _readVMDetails(self, params):
2969 """Read the specified parameters from the store.
2970 """
2971 try:
2972 return self._gatherVm(*params)
2973 except ValueError:
2974 # One of the int/float entries in params has a corresponding store
2975 # entry that is invalid. We recover, because older versions of
2976 # Xend may have put the entry there (memory/target, for example),
2977 # but this is in general a bad situation to have reached.
2978 log.exception(
2979 "Store corrupted at %s! Domain %d's configuration may be "
2980 "affected.", self.vmpath, self.domid)
2981 return []
2983 def _cleanupVm(self):
2984 """Cleanup VM resources. Idempotent. Nothrow guarantee."""
2986 self._unwatchVm()
2988 try:
2989 self._removeVm()
2990 except:
2991 log.exception("Removing VM path failed.")
2994 def checkLiveMigrateMemory(self):
2995 """ Make sure there's enough memory to migrate this domain """
2996 overhead_kb = 0
2997 if arch.type == "x86":
2998 # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
2999 # the minimum that Xen would allocate if no value were given.
3000 overhead_kb = self.info['VCPUs_max'] * 1024 + \
3001 (self.info['memory_static_max'] / 1024 / 1024) * 4
3002 overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
3003 # The domain might already have some shadow memory
3004 overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
3005 if overhead_kb > 0:
3006 balloon.free(overhead_kb, self)
3008 def _unwatchVm(self):
3009 """Remove the watch on the VM path, if any. Idempotent. Nothrow
3010 guarantee."""
3011 try:
3012 try:
3013 if self.vmWatch:
3014 self.vmWatch.unwatch()
3015 finally:
3016 self.vmWatch = None
3017 except:
3018 log.exception("Unwatching VM path failed.")
3020 def testDeviceComplete(self):
3021 """ For Block IO migration safety we must ensure that
3022 the device has shutdown correctly, i.e. all blocks are
3023 flushed to disk
3024 """
3025 start = time.time()
3026 while True:
3027 test = 0
3028 diff = time.time() - start
3029 vbds = self.getDeviceController('vbd').deviceIDs()
3030 taps = self.getDeviceController('tap').deviceIDs()
3031 for i in vbds + taps:
3032 test = 1
3033 log.info("Dev %s still active, looping...", i)
3034 time.sleep(0.1)
3036 if test == 0:
3037 break
3038 if diff >= MIGRATE_TIMEOUT:
3039 log.info("Dev still active but hit max loop timeout")
3040 break
3042 def testvifsComplete(self):
3043 """ In case vifs are released and then created for the same
3044 domain, we need to wait the device shut down.
3045 """
3046 start = time.time()
3047 while True:
3048 test = 0
3049 diff = time.time() - start
3050 for i in self.getDeviceController('vif').deviceIDs():
3051 test = 1
3052 log.info("Dev %s still active, looping...", i)
3053 time.sleep(0.1)
3055 if test == 0:
3056 break
3057 if diff >= MIGRATE_TIMEOUT:
3058 log.info("Dev still active but hit max loop timeout")
3059 break
3061 def _storeVmDetails(self):
3062 to_store = {}
3064 for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
3065 info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
3066 if self._infoIsSet(info_key):
3067 to_store[key] = str(self.info[info_key])
3069 if self._infoIsSet("static_memory_min"):
3070 to_store["memory"] = str(self.info["static_memory_min"])
3071 if self._infoIsSet("static_memory_max"):
3072 to_store["maxmem"] = str(self.info["static_memory_max"])
3074 image_sxpr = self.info.image_sxpr()
3075 if image_sxpr:
3076 to_store['image'] = sxp.to_string(image_sxpr)
3078 if not self._readVm('xend/restart_count'):
3079 to_store['xend/restart_count'] = str(0)
3081 log.debug("Storing VM details: %s", scrub_password(to_store))
3083 self._writeVm(to_store)
3084 self._setVmPermissions()
3086 def _setVmPermissions(self):
3087 """Allow the guest domain to read its UUID. We don't allow it to
3088 access any other entry, for security."""
3089 xstransact.SetPermissions('%s/uuid' % self.vmpath,
3090 { 'dom' : self.domid,
3091 'read' : True,
3092 'write' : False })
3095 # Utility functions
3098 def __getattr__(self, name):
3099 if name == "state":
3100 log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
3101 log.warn("".join(traceback.format_stack()))
3102 return self._stateGet()
3103 else:
3104 raise AttributeError(name)
3106 def __setattr__(self, name, value):
3107 if name == "state":
3108 log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
3109 log.warn("".join(traceback.format_stack()))
3110 self._stateSet(value)
3111 else:
3112 self.__dict__[name] = value
3114 def _stateSet(self, state):
3115 self.state_updated.acquire()
3116 try:
3117 # TODO Not sure this is correct...
3118 # _stateGet is live now. Why not fire event
3119 # even when it hasn't changed?
3120 if self._stateGet() != state:
3121 self.state_updated.notifyAll()
3122 import XendAPI
3123 XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
3124 'power_state')
3125 finally:
3126 self.state_updated.release()
3128 def _stateGet(self):
3129 # Lets try and reconsitute the state from xc
3130 # first lets try and get the domain info
3131 # from xc - this will tell us if the domain
3132 # exists
3133 info = dom_get(self.getDomid())
3134 if info is None or info['shutdown']:
3135 # We are either HALTED or SUSPENDED
3136 # check saved image exists
3137 from xen.xend import XendDomain
3138 managed_config_path = \
3139 XendDomain.instance()._managed_check_point_path( \
3140 self.get_uuid())
3141 if os.path.exists(managed_config_path):
3142 return XEN_API_VM_POWER_STATE_SUSPENDED
3143 else:
3144 return XEN_API_VM_POWER_STATE_HALTED
3145 elif info['crashed']:
3146 # Crashed
3147 return XEN_API_VM_POWER_STATE_CRASHED
3148 else:
3149 # We are either RUNNING or PAUSED
3150 if info['paused']:
3151 return XEN_API_VM_POWER_STATE_PAUSED
3152 else:
3153 return XEN_API_VM_POWER_STATE_RUNNING
3155 def _infoIsSet(self, name):
3156 return name in self.info and self.info[name] is not None
3158 def _checkName(self, name):
3159 """Check if a vm name is valid. Valid names contain alphabetic
3160 characters, digits, or characters in '_-.:/+'.
3161 The same name cannot be used for more than one vm at the same time.
3163 @param name: name
3164 @raise: VmError if invalid
3165 """
3166 from xen.xend import XendDomain
3168 if name is None or name == '':
3169 raise VmError('Missing VM Name')
3171 if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name):
3172 raise VmError('Invalid VM Name')
3174 dom = XendDomain.instance().domain_lookup_nr(name)
3175 if dom and dom.info['uuid'] != self.info['uuid']:
3176 raise VmError("VM name '%s' already exists%s" %
3177 (name,
3178 dom.domid is not None and
3179 (" as domain %s" % str(dom.domid)) or ""))
3182 def update(self, info = None, refresh = True, transaction = None):
3183 """Update with info from xc.domain_getinfo().
3184 """
3185 log.trace("XendDomainInfo.update(%s) on domain %s", info,
3186 str(self.domid))
3188 if not info:
3189 info = dom_get(self.domid)
3190 if not info:
3191 return
3193 if info["maxmem_kb"] < 0:
3194 info["maxmem_kb"] = XendNode.instance() \
3195 .physinfo_dict()['total_memory'] * 1024
3197 # make sure state is reset for info
3198 # TODO: we should eventually get rid of old_dom_states
3200 self.info.update_config(info)
3201 self._update_consoles(transaction)
3203 if refresh:
3204 self.refreshShutdown(info)
3206 log.trace("XendDomainInfo.update done on domain %s: %s",
3207 str(self.domid), self.info)
3209 def sxpr(self, ignore_store = False, legacy_only = True):
3210 result = self.info.to_sxp(domain = self,
3211 ignore_devices = ignore_store,
3212 legacy_only = legacy_only)
3214 return result
3216 # Xen API
3217 # ----------------------------------------------------------------
3219 def get_uuid(self):
3220 dom_uuid = self.info.get('uuid')
3221 if not dom_uuid: # if it doesn't exist, make one up
3222 dom_uuid = uuid.createString()
3223 self.info['uuid'] = dom_uuid
3224 return dom_uuid
3226 def get_memory_static_max(self):
3227 return self.info.get('memory_static_max', 0)
3228 def get_memory_static_min(self):
3229 return self.info.get('memory_static_min', 0)
3230 def get_memory_dynamic_max(self):
3231 return self.info.get('memory_dynamic_max', 0)
3232 def get_memory_dynamic_min(self):
3233 return self.info.get('memory_dynamic_min', 0)
3235 # only update memory-related config values if they maintain sanity
3236 def _safe_set_memory(self, key, newval):
3237 oldval = self.info.get(key, 0)
3238 try:
3239 self.info[key] = newval
3240 self.info._memory_sanity_check()
3241 except Exception, ex:
3242 self.info[key] = oldval
3243 raise
3245 def set_memory_static_max(self, val):
3246 self._safe_set_memory('memory_static_max', val)
3247 def set_memory_static_min(self, val):
3248 self._safe_set_memory('memory_static_min', val)
3249 def set_memory_dynamic_max(self, val):
3250 self._safe_set_memory('memory_dynamic_max', val)
3251 def set_memory_dynamic_min(self, val):
3252 self._safe_set_memory('memory_dynamic_min', val)
3254 def get_vcpus_params(self):
3255 if self.getDomid() is None:
3256 return self.info['vcpus_params']
3258 retval = xc.sched_credit_domain_get(self.getDomid())
3259 return retval
3260 def get_power_state(self):
3261 return XEN_API_VM_POWER_STATE[self._stateGet()]
3262 def get_platform(self):
3263 return self.info.get('platform', {})
3264 def get_pci_bus(self):
3265 return self.info.get('pci_bus', '')
3266 def get_tools_version(self):
3267 return self.info.get('tools_version', {})
3268 def get_metrics(self):
3269 return self.metrics.get_uuid();
3272 def get_security_label(self, xspol=None):
3273 import xen.util.xsm.xsm as security
3274 label = security.get_security_label(self, xspol)
3275 return label
3277 def set_security_label(self, seclab, old_seclab, xspol=None,
3278 xspol_old=None):
3279 """
3280 Set the security label of a domain from its old to
3281 a new value.
3282 @param seclab New security label formatted in the form
3283 <policy type>:<policy name>:<vm label>
3284 @param old_seclab The current security label that the
3285 VM must have.
3286 @param xspol An optional policy under which this
3287 update should be done. If not given,
3288 then the current active policy is used.
3289 @param xspol_old The old policy; only to be passed during
3290 the updating of a policy
3291 @return Returns return code, a string with errors from
3292 the hypervisor's operation, old label of the
3293 domain
3294 """
3295 rc = 0
3296 errors = ""
3297 old_label = ""
3298 new_ssidref = 0
3299 domid = self.getDomid()
3300 res_labels = None
3301 is_policy_update = (xspol_old != None)
3303 from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
3305 state = self._stateGet()
3306 # Relabel only HALTED or RUNNING or PAUSED domains
3307 if domid != 0 and \
3308 state not in \
3309 [ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
3310 DOM_STATE_SUSPENDED ]:
3311 log.warn("Relabeling domain not possible in state '%s'" %
3312 DOM_STATES[state])
3313 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3315 # Remove security label. Works only for halted or suspended domains
3316 if not seclab or seclab == "":
3317 if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
3318 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3320 if self.info.has_key('security_label'):
3321 old_label = self.info['security_label']
3322 # Check label against expected one.
3323 if old_label != old_seclab:
3324 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3325 del self.info['security_label']
3326 xen.xend.XendDomain.instance().managed_config_save(self)
3327 return (xsconstants.XSERR_SUCCESS, "", "", 0)
3329 tmp = seclab.split(":")
3330 if len(tmp) != 3:
3331 return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
3332 typ, policy, label = tmp
3334 poladmin = XSPolicyAdminInstance()
3335 if not xspol:
3336 xspol = poladmin.get_policy_by_name(policy)
3338 try:
3339 xen.xend.XendDomain.instance().policy_lock.acquire_writer()
3341 if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
3342 #if domain is running or paused try to relabel in hypervisor
3343 if not xspol:
3344 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3346 if typ != xspol.get_type_name() or \
3347 policy != xspol.get_name():
3348 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3350 if typ == xsconstants.ACM_POLICY_ID:
3351 new_ssidref = xspol.vmlabel_to_ssidref(label)
3352 if new_ssidref == xsconstants.INVALID_SSIDREF:
3353 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3355 # Check that all used resources are accessible under the
3356 # new label
3357 if not is_policy_update and \
3358 not security.resources_compatible_with_vmlabel(xspol,
3359 self, label):
3360 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3362 #Check label against expected one. Can only do this
3363 # if the policy hasn't changed underneath in the meantime
3364 if xspol_old == None:
3365 old_label = self.get_security_label()
3366 if old_label != old_seclab:
3367 log.info("old_label != old_seclab: %s != %s" %
3368 (old_label, old_seclab))
3369 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3371 # relabel domain in the hypervisor
3372 rc, errors = security.relabel_domains([[domid, new_ssidref]])
3373 log.info("rc from relabeling in HV: %d" % rc)
3374 else:
3375 return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
3377 if rc == 0:
3378 # HALTED, RUNNING or PAUSED
3379 if domid == 0:
3380 if xspol:
3381 self.info['security_label'] = seclab
3382 ssidref = poladmin.set_domain0_bootlabel(xspol, label)
3383 else:
3384 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3385 else:
3386 if self.info.has_key('security_label'):
3387 old_label = self.info['security_label']
3388 # Check label against expected one, unless wildcard
3389 if old_label != old_seclab:
3390 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3392 self.info['security_label'] = seclab
3394 try:
3395 xen.xend.XendDomain.instance().managed_config_save(self)
3396 except:
3397 pass
3398 return (rc, errors, old_label, new_ssidref)
3399 finally:
3400 xen.xend.XendDomain.instance().policy_lock.release()
3402 def get_on_shutdown(self):
3403 after_shutdown = self.info.get('actions_after_shutdown')
3404 if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
3405 return XEN_API_ON_NORMAL_EXIT[-1]
3406 return after_shutdown
3408 def get_on_reboot(self):
3409 after_reboot = self.info.get('actions_after_reboot')
3410 if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
3411 return XEN_API_ON_NORMAL_EXIT[-1]
3412 return after_reboot
3414 def get_on_suspend(self):
3415 # TODO: not supported
3416 after_suspend = self.info.get('actions_after_suspend')
3417 if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
3418 return XEN_API_ON_NORMAL_EXIT[-1]
3419 return after_suspend
3421 def get_on_crash(self):
3422 after_crash = self.info.get('actions_after_crash')
3423 if not after_crash or after_crash not in \
3424 XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
3425 return XEN_API_ON_CRASH_BEHAVIOUR[0]
3426 return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
3428 def get_dev_config_by_uuid(self, dev_class, dev_uuid):
3429 """ Get's a device configuration either from XendConfig or
3430 from the DevController.
3432 @param dev_class: device class, either, 'vbd' or 'vif'
3433 @param dev_uuid: device UUID
3435 @rtype: dictionary
3436 """
3437 dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
3439 # shortcut if the domain isn't started because
3440 # the devcontrollers will have no better information
3441 # than XendConfig.
3442 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
3443 XEN_API_VM_POWER_STATE_SUSPENDED):
3444 if dev_config:
3445 return copy.deepcopy(dev_config)
3446 return None
3448 # instead of using dev_class, we use the dev_type
3449 # that is from XendConfig.
3450 controller = self.getDeviceController(dev_type)
3451 if not controller:
3452 return None
3454 all_configs = controller.getAllDeviceConfigurations()
3455 if not all_configs:
3456 return None
3458 updated_dev_config = copy.deepcopy(dev_config)
3459 for _devid, _devcfg in all_configs.items():
3460 if _devcfg.get('uuid') == dev_uuid:
3461 updated_dev_config.update(_devcfg)
3462 updated_dev_config['id'] = _devid
3463 return updated_dev_config
3465 return updated_dev_config
3467 def get_dev_xenapi_config(self, dev_class, dev_uuid):
3468 config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
3469 if not config:
3470 return {}
3472 config['VM'] = self.get_uuid()
3474 if dev_class == 'vif':
3475 if not config.has_key('name'):
3476 config['name'] = config.get('vifname', '')
3477 if not config.has_key('MAC'):
3478 config['MAC'] = config.get('mac', '')
3479 if not config.has_key('type'):
3480 config['type'] = 'paravirtualised'
3481 if not config.has_key('device'):
3482 devid = config.get('id')
3483 if devid != None:
3484 config['device'] = 'eth%s' % devid
3485 else:
3486 config['device'] = ''
3488 if not config.has_key('network'):
3489 try:
3490 bridge = config.get('bridge', None)
3491 if bridge is None:
3492 from xen.util import Brctl
3493 if_to_br = dict([(i,b)
3494 for (b,ifs) in Brctl.get_state().items()
3495 for i in ifs])
3496 vifname = "vif%s.%s" % (self.getDomid(),
3497 config.get('id'))
3498 bridge = if_to_br.get(vifname, None)
3499 config['network'] = \
3500 XendNode.instance().bridge_to_network(
3501 config.get('bridge')).get_uuid()
3502 except Exception:
3503 log.exception('bridge_to_network')
3504 # Ignore this for now -- it may happen if the device
3505 # has been specified using the legacy methods, but at
3506 # some point we're going to have to figure out how to
3507 # handle that properly.
3509 config['MTU'] = 1500 # TODO
3511 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3512 xennode = XendNode.instance()
3513 rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
3514 config['io_read_kbs'] = rx_bps/1024
3515 config['io_write_kbs'] = tx_bps/1024
3516 rx, tx = xennode.get_vif_stat(self.domid, devid)
3517 config['io_total_read_kbs'] = rx/1024
3518 config['io_total_write_kbs'] = tx/1024
3519 else:
3520 config['io_read_kbs'] = 0.0
3521 config['io_write_kbs'] = 0.0
3522 config['io_total_read_kbs'] = 0.0
3523 config['io_total_write_kbs'] = 0.0
3525 config['security_label'] = config.get('security_label', '')
3527 if dev_class == 'vbd':
3529 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3530 controller = self.getDeviceController(dev_class)
3531 devid, _1, _2 = controller.getDeviceDetails(config)
3532 xennode = XendNode.instance()
3533 rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
3534 config['io_read_kbs'] = rd_blkps
3535 config['io_write_kbs'] = wr_blkps
3536 else:
3537 config['io_read_kbs'] = 0.0
3538 config['io_write_kbs'] = 0.0
3540 config['VDI'] = config.get('VDI', '')
3541 config['device'] = config.get('dev', '')
3542 if ':' in config['device']:
3543 vbd_name, vbd_type = config['device'].split(':', 1)
3544 config['device'] = vbd_name
3545 if vbd_type == 'cdrom':
3546 config['type'] = XEN_API_VBD_TYPE[0]
3547 else:
3548 config['type'] = XEN_API_VBD_TYPE[1]
3550 config['driver'] = 'paravirtualised' # TODO
3551 config['image'] = config.get('uname', '')
3553 if config.get('mode', 'r') == 'r':
3554 config['mode'] = 'RO'
3555 else:
3556 config['mode'] = 'RW'
3558 if dev_class == 'vtpm':
3559 if not config.has_key('type'):
3560 config['type'] = 'paravirtualised' # TODO
3561 if not config.has_key('backend'):
3562 config['backend'] = "00000000-0000-0000-0000-000000000000"
3564 return config
3566 def get_dev_property(self, dev_class, dev_uuid, field):
3567 config = self.get_dev_xenapi_config(dev_class, dev_uuid)
3568 try:
3569 return config[field]
3570 except KeyError:
3571 raise XendError('Invalid property for device: %s' % field)
3573 def set_dev_property(self, dev_class, dev_uuid, field, value):
3574 self.info['devices'][dev_uuid][1][field] = value
3576 def get_vcpus_util(self):
3577 vcpu_util = {}
3578 xennode = XendNode.instance()
3579 if 'VCPUs_max' in self.info and self.domid != None:
3580 for i in range(0, self.info['VCPUs_max']):
3581 util = xennode.get_vcpu_util(self.domid, i)
3582 vcpu_util[str(i)] = util
3584 return vcpu_util
3586 def get_consoles(self):
3587 return self.info.get('console_refs', [])
3589 def get_vifs(self):
3590 return self.info.get('vif_refs', [])
3592 def get_vbds(self):
3593 return self.info.get('vbd_refs', [])
3595 def get_vtpms(self):
3596 return self.info.get('vtpm_refs', [])
3598 def get_dpcis(self):
3599 return XendDPCI.get_by_VM(self.info.get('uuid'))
3601 def get_dscsis(self):
3602 return XendDSCSI.get_by_VM(self.info.get('uuid'))
3604 def create_vbd(self, xenapi_vbd, vdi_image_path):
3605 """Create a VBD using a VDI from XendStorageRepository.
3607 @param xenapi_vbd: vbd struct from the Xen API
3608 @param vdi_image_path: VDI UUID
3609 @rtype: string
3610 @return: uuid of the device
3611 """
3612 xenapi_vbd['image'] = vdi_image_path
3613 if vdi_image_path.startswith('tap'):
3614 dev_uuid = self.info.device_add('tap', cfg_xenapi = xenapi_vbd)
3615 else:
3616 dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
3618 if not dev_uuid:
3619 raise XendError('Failed to create device')
3621 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3622 XEN_API_VM_POWER_STATE_PAUSED):
3623 _, config = self.info['devices'][dev_uuid]
3625 if vdi_image_path.startswith('tap'):
3626 dev_control = self.getDeviceController('tap')
3627 else:
3628 dev_control = self.getDeviceController('vbd')
3630 try:
3631 devid = dev_control.createDevice(config)
3632 dev_control.waitForDevice(devid)
3633 self.info.device_update(dev_uuid,
3634 cfg_xenapi = {'devid': devid})
3635 except Exception, exn:
3636 log.exception(exn)
3637 del self.info['devices'][dev_uuid]
3638 self.info['vbd_refs'].remove(dev_uuid)
3639 raise
3641 return dev_uuid
3643 def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
3644 """Create a VBD using a VDI from XendStorageRepository.
3646 @param xenapi_vbd: vbd struct from the Xen API
3647 @param vdi_image_path: VDI UUID
3648 @rtype: string
3649 @return: uuid of the device
3650 """
3651 xenapi_vbd['image'] = vdi_image_path
3652 dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
3653 if not dev_uuid:
3654 raise XendError('Failed to create device')
3656 if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
3657 _, config = self.info['devices'][dev_uuid]
3658 config['devid'] = self.getDeviceController('tap').createDevice(config)
3660 return config['devid']
3662 def create_vif(self, xenapi_vif):
3663 """Create VIF device from the passed struct in Xen API format.
3665 @param xenapi_vif: Xen API VIF Struct.
3666 @rtype: string
3667 @return: UUID
3668 """
3669 dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
3670 if not dev_uuid:
3671 raise XendError('Failed to create device')
3673 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3674 XEN_API_VM_POWER_STATE_PAUSED):
3676 _, config = self.info['devices'][dev_uuid]
3677 dev_control = self.getDeviceController('vif')
3679 try:
3680 devid = dev_control.createDevice(config)
3681 dev_control.waitForDevice(devid)
3682 self.info.device_update(dev_uuid,
3683 cfg_xenapi = {'devid': devid})
3684 except Exception, exn:
3685 log.exception(exn)
3686 del self.info['devices'][dev_uuid]
3687 self.info['vif_refs'].remove(dev_uuid)
3688 raise
3690 return dev_uuid
3692 def create_vtpm(self, xenapi_vtpm):
3693 """Create a VTPM device from the passed struct in Xen API format.
3695 @return: uuid of the device
3696 @rtype: string
3697 """
3699 if self._stateGet() not in (DOM_STATE_HALTED,):
3700 raise VmError("Can only add vTPM to a halted domain.")
3701 if self.get_vtpms() != []:
3702 raise VmError('Domain already has a vTPM.')
3703 dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
3704 if not dev_uuid:
3705 raise XendError('Failed to create device')
3707 return dev_uuid
3709 def create_console(self, xenapi_console):
3710 """ Create a console device from a Xen API struct.
3712 @return: uuid of device
3713 @rtype: string
3714 """
3715 if self._stateGet() not in (DOM_STATE_HALTED,):
3716 raise VmError("Can only add console to a halted domain.")
3718 dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
3719 if not dev_uuid:
3720 raise XendError('Failed to create device')
3722 return dev_uuid
3724 def set_console_other_config(self, console_uuid, other_config):
3725 self.info.console_update(console_uuid, 'other_config', other_config)
3727 def create_dpci(self, xenapi_pci):
3728 """Create pci device from the passed struct in Xen API format.
3730 @param xenapi_pci: DPCI struct from Xen API
3731 @rtype: bool
3732 #@rtype: string
3733 @return: True if successfully created device
3734 #@return: UUID
3735 """
3737 dpci_uuid = uuid.createString()
3739 dpci_opts = []
3740 opts_dict = xenapi_pci.get('options')
3741 for k in opts_dict.keys():
3742 dpci_opts.append([k, opts_dict[k]])
3743 opts_sxp = pci_opts_list_to_sxp(dpci_opts)
3745 # Convert xenapi to sxp
3746 ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
3748 dev_sxp = ['dev',
3749 ['domain', '0x%02x' % ppci.get_domain()],
3750 ['bus', '0x%02x' % ppci.get_bus()],
3751 ['slot', '0x%02x' % ppci.get_slot()],
3752 ['func', '0x%1x' % ppci.get_func()],
3753 ['vslot', '0x%02x' % xenapi_pci.get('hotplug_slot')],
3754 ['uuid', dpci_uuid]]
3755 dev_sxp = sxp.merge(dev_sxp, opts_sxp)
3757 target_pci_sxp = ['pci', dev_sxp, ['state', 'Initialising'] ]
3759 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3761 old_pci_sxp = self._getDeviceInfo_pci(0)
3763 if old_pci_sxp is None:
3764 dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
3765 if not dev_uuid:
3766 raise XendError('Failed to create device')
3768 else:
3769 new_pci_sxp = ['pci']
3770 for existing_dev in sxp.children(old_pci_sxp, 'dev'):
3771 new_pci_sxp.append(existing_dev)
3772 new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
3774 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3775 self.info.device_update(dev_uuid, new_pci_sxp)
3777 xen.xend.XendDomain.instance().managed_config_save(self)
3779 else:
3780 try:
3781 self.device_configure(target_pci_sxp)
3783 except Exception, exn:
3784 raise XendError('Failed to create device')
3786 return dpci_uuid
3788 def create_dscsi(self, xenapi_dscsi):
3789 """Create scsi device from the passed struct in Xen API format.
3791 @param xenapi_dscsi: DSCSI struct from Xen API
3792 @rtype: string
3793 @return: UUID
3794 """
3796 dscsi_uuid = uuid.createString()
3798 # Convert xenapi to sxp
3799 pscsi = XendAPIStore.get(xenapi_dscsi.get('PSCSI'), 'PSCSI')
3800 devid = int(xenapi_dscsi.get('virtual_HCTL').split(':')[0])
3801 target_vscsi_sxp = \
3802 ['vscsi',
3803 ['dev',
3804 ['devid', devid],
3805 ['p-devname', pscsi.get_dev_name()],
3806 ['p-dev', pscsi.get_physical_HCTL()],
3807 ['v-dev', xenapi_dscsi.get('virtual_HCTL')],
3808 ['state', xenbusState['Initialising']],
3809 ['uuid', dscsi_uuid]
3810 ],
3811 ['feature-host', 0]
3814 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3816 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
3818 if cur_vscsi_sxp is None:
3819 dev_uuid = self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp)
3820 if not dev_uuid:
3821 raise XendError('Failed to create device')
3823 else:
3824 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
3825 for existing_dev in sxp.children(cur_vscsi_sxp, 'dev'):
3826 new_vscsi_sxp.append(existing_dev)
3827 new_vscsi_sxp.append(sxp.child0(target_vscsi_sxp, 'dev'))
3829 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3830 self.info.device_update(dev_uuid, new_vscsi_sxp)
3832 xen.xend.XendDomain.instance().managed_config_save(self)
3834 else:
3835 try:
3836 self.device_configure(target_vscsi_sxp)
3838 except Exception, exn:
3839 raise XendError('Failed to create device')
3841 return dscsi_uuid
3844 def destroy_device_by_uuid(self, dev_type, dev_uuid):
3845 if dev_uuid not in self.info['devices']:
3846 raise XendError('Device does not exist')
3848 try:
3849 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3850 XEN_API_VM_POWER_STATE_PAUSED):
3851 _, config = self.info['devices'][dev_uuid]
3852 devid = config.get('devid')
3853 if devid != None:
3854 self.getDeviceController(dev_type).destroyDevice(devid, force = False)
3855 else:
3856 raise XendError('Unable to get devid for device: %s:%s' %
3857 (dev_type, dev_uuid))
3858 finally:
3859 del self.info['devices'][dev_uuid]
3860 self.info['%s_refs' % dev_type].remove(dev_uuid)
3862 def destroy_vbd(self, dev_uuid):
3863 self.destroy_device_by_uuid('vbd', dev_uuid)
3865 def destroy_vif(self, dev_uuid):
3866 self.destroy_device_by_uuid('vif', dev_uuid)
3868 def destroy_vtpm(self, dev_uuid):
3869 self.destroy_device_by_uuid('vtpm', dev_uuid)
3871 def destroy_dpci(self, dev_uuid):
3873 dpci = XendAPIStore.get(dev_uuid, 'DPCI')
3874 ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
3876 old_pci_sxp = self._getDeviceInfo_pci(0)
3877 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3878 target_dev = None
3879 new_pci_sxp = ['pci']
3880 for dev in sxp.children(old_pci_sxp, 'dev'):
3881 pci_dev = {}
3882 pci_dev['domain'] = sxp.child_value(dev, 'domain')
3883 pci_dev['bus'] = sxp.child_value(dev, 'bus')
3884 pci_dev['slot'] = sxp.child_value(dev, 'slot')
3885 pci_dev['func'] = sxp.child_value(dev, 'func')
3886 if ppci.get_name() == pci_dict_to_bdf_str(pci_dev):
3887 target_dev = dev
3888 else:
3889 new_pci_sxp.append(dev)
3891 if target_dev is None:
3892 raise XendError('Failed to destroy device')
3894 target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
3896 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3898 self.info.device_update(dev_uuid, new_pci_sxp)
3899 if len(sxp.children(new_pci_sxp, 'dev')) == 0:
3900 del self.info['devices'][dev_uuid]
3901 xen.xend.XendDomain.instance().managed_config_save(self)
3903 else:
3904 try:
3905 self.device_configure(target_pci_sxp)
3907 except Exception, exn:
3908 raise XendError('Failed to destroy device')
3910 def destroy_dscsi(self, dev_uuid):
3911 dscsi = XendAPIStore.get(dev_uuid, 'DSCSI')
3912 devid = dscsi.get_virtual_host()
3913 vHCTL = dscsi.get_virtual_HCTL()
3914 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
3915 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3917 target_dev = None
3918 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
3919 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
3920 if vHCTL == sxp.child_value(dev, 'v-dev'):
3921 target_dev = dev
3922 else:
3923 new_vscsi_sxp.append(dev)
3925 if target_dev is None:
3926 raise XendError('Failed to destroy device')
3928 target_dev.append(['state', xenbusState['Closing']])
3929 target_vscsi_sxp = ['vscsi', target_dev, ['feature-host', 0]]
3931 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3933 self.info.device_update(dev_uuid, new_vscsi_sxp)
3934 if len(sxp.children(new_vscsi_sxp, 'dev')) == 0:
3935 del self.info['devices'][dev_uuid]
3936 xen.xend.XendDomain.instance().managed_config_save(self)
3938 else:
3939 try:
3940 self.device_configure(target_vscsi_sxp)
3942 except Exception, exn:
3943 raise XendError('Failed to destroy device')
3945 def destroy_xapi_instances(self):
3946 """Destroy Xen-API instances stored in XendAPIStore.
3947 """
3948 # Xen-API classes based on XendBase have their instances stored
3949 # in XendAPIStore. Cleanup these instances here, if they are supposed
3950 # to be destroyed when the parent domain is dead.
3952 # Most of the virtual devices (vif, vbd, vfb, etc) are not based on
3953 # XendBase and there's no need to remove them from XendAPIStore.
3955 from xen.xend import XendDomain
3956 if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
3957 # domain still exists.
3958 return
3960 # Destroy the VMMetrics instance.
3961 if XendAPIStore.get(self.metrics.get_uuid(), self.metrics.getClass()) \
3962 is not None:
3963 self.metrics.destroy()
3965 # Destroy DPCI instances.
3966 for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
3967 XendAPIStore.deregister(dpci_uuid, "DPCI")
3969 # Destroy DSCSI instances.
3970 for dscsi_uuid in XendDSCSI.get_by_VM(self.info.get('uuid')):
3971 XendAPIStore.deregister(dscsi_uuid, "DSCSI")
3973 def has_device(self, dev_class, dev_uuid):
3974 return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
3976 def __str__(self):
3977 return '<domain id=%s name=%s memory=%s state=%s>' % \
3978 (str(self.domid), self.info['name_label'],
3979 str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])
3981 __repr__ = __str__