ia64/xen-unstable

view tools/python/xen/xend/XendDomainInfo.py @ 19276:cff29d694a89

xend: Handle missing s3_integrity value, default to zero.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Mar 05 17:50:05 2009 +0000 (2009-03-05)
parents 43019597f85c
children 5997e86988f6
line source
1 #===========================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
19 """Representation of a single domain.
20 Includes support for domain construction, using
21 open-ended configurations.
23 Author: Mike Wray <mike.wray@hp.com>
25 """
27 import logging
28 import time
29 import threading
30 import re
31 import copy
32 import os
33 import traceback
34 from types import StringTypes
36 import xen.lowlevel.xc
37 from xen.util import asserts
38 from xen.util.blkif import blkdev_uname_to_file, blkdev_uname_to_taptype
39 import xen.util.xsm.xsm as security
40 from xen.util import xsconstants
42 from xen.xend import balloon, sxp, uuid, image, arch, osdep
43 from xen.xend import XendOptions, XendNode, XendConfig
45 from xen.xend.XendConfig import scrub_password
46 from xen.xend.XendBootloader import bootloader, bootloader_tidy
47 from xen.xend.XendError import XendError, VmError
48 from xen.xend.XendDevices import XendDevices
49 from xen.xend.XendTask import XendTask
50 from xen.xend.xenstore.xstransact import xstransact, complete
51 from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
52 from xen.xend.xenstore.xswatch import xswatch
53 from xen.xend.XendConstants import *
54 from xen.xend.XendAPIConstants import *
55 from xen.xend.server.DevConstants import xenbusState
57 from xen.xend.XendVMMetrics import XendVMMetrics
59 from xen.xend import XendAPIStore
60 from xen.xend.XendPPCI import XendPPCI
61 from xen.xend.XendDPCI import XendDPCI
62 from xen.xend.XendPSCSI import XendPSCSI
63 from xen.xend.XendDSCSI import XendDSCSI
65 MIGRATE_TIMEOUT = 30.0
66 BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
68 xc = xen.lowlevel.xc.xc()
69 xoptions = XendOptions.instance()
71 log = logging.getLogger("xend.XendDomainInfo")
72 #log.setLevel(logging.TRACE)
75 def create(config):
76 """Creates and start a VM using the supplied configuration.
78 @param config: A configuration object involving lists of tuples.
79 @type config: list of lists, eg ['vm', ['image', 'xen.gz']]
81 @rtype: XendDomainInfo
82 @return: An up and running XendDomainInfo instance
83 @raise VmError: Invalid configuration or failure to start.
84 """
85 from xen.xend import XendDomain
86 domconfig = XendConfig.XendConfig(sxp_obj = config)
87 othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
88 if othervm is None or othervm.domid is None:
89 othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
90 if othervm is not None and othervm.domid is not None:
91 raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
92 log.debug("XendDomainInfo.create(%s)", scrub_password(config))
93 vm = XendDomainInfo(domconfig)
94 try:
95 vm.start()
96 except:
97 log.exception('Domain construction failed')
98 vm.destroy()
99 raise
101 return vm
103 def create_from_dict(config_dict):
104 """Creates and start a VM using the supplied configuration.
106 @param config_dict: An configuration dictionary.
108 @rtype: XendDomainInfo
109 @return: An up and running XendDomainInfo instance
110 @raise VmError: Invalid configuration or failure to start.
111 """
113 log.debug("XendDomainInfo.create_from_dict(%s)",
114 scrub_password(config_dict))
115 vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
116 try:
117 vm.start()
118 except:
119 log.exception('Domain construction failed')
120 vm.destroy()
121 raise
122 return vm
124 def recreate(info, priv):
125 """Create the VM object for an existing domain. The domain must not
126 be dying, as the paths in the store should already have been removed,
127 and asking us to recreate them causes problems.
129 @param xeninfo: Parsed configuration
130 @type xeninfo: Dictionary
131 @param priv: Is a privileged domain (Dom 0)
132 @type priv: bool
134 @rtype: XendDomainInfo
135 @return: A up and running XendDomainInfo instance
136 @raise VmError: Invalid configuration.
137 @raise XendError: Errors with configuration.
138 """
140 log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
142 assert not info['dying']
144 xeninfo = XendConfig.XendConfig(dominfo = info)
145 xeninfo['is_control_domain'] = priv
146 xeninfo['is_a_template'] = False
147 domid = xeninfo['domid']
148 uuid1 = uuid.fromString(xeninfo['uuid'])
149 needs_reinitialising = False
151 dompath = GetDomainPath(domid)
152 if not dompath:
153 raise XendError('No domain path in store for existing '
154 'domain %d' % domid)
156 log.info("Recreating domain %d, UUID %s. at %s" %
157 (domid, xeninfo['uuid'], dompath))
159 # need to verify the path and uuid if not Domain-0
160 # if the required uuid and vm aren't set, then that means
161 # we need to recreate the dom with our own values
162 #
163 # NOTE: this is probably not desirable, really we should just
164 # abort or ignore, but there may be cases where xenstore's
165 # entry disappears (eg. xenstore-rm /)
166 #
167 try:
168 vmpath = xstransact.Read(dompath, "vm")
169 if not vmpath:
170 if not priv:
171 log.warn('/local/domain/%d/vm is missing. recreate is '
172 'confused, trying our best to recover' % domid)
173 needs_reinitialising = True
174 raise XendError('reinit')
176 uuid2_str = xstransact.Read(vmpath, "uuid")
177 if not uuid2_str:
178 log.warn('%s/uuid/ is missing. recreate is confused, '
179 'trying our best to recover' % vmpath)
180 needs_reinitialising = True
181 raise XendError('reinit')
183 uuid2 = uuid.fromString(uuid2_str)
184 if uuid1 != uuid2:
185 log.warn('UUID in /vm does not match the UUID in /dom/%d.'
186 'Trying out best to recover' % domid)
187 needs_reinitialising = True
188 except XendError:
189 pass # our best shot at 'goto' in python :)
191 vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
192 vmpath = vmpath)
194 if needs_reinitialising:
195 vm._recreateDom()
196 vm._removeVm()
197 vm._storeVmDetails()
198 vm._storeDomDetails()
200 vm.image = image.create(vm, vm.info)
201 vm.image.recreate()
203 vm._registerWatches()
204 vm.refreshShutdown(xeninfo)
206 # register the domain in the list
207 from xen.xend import XendDomain
208 XendDomain.instance().add_domain(vm)
210 return vm
213 def restore(config):
214 """Create a domain and a VM object to do a restore.
216 @param config: Domain SXP configuration
217 @type config: list of lists. (see C{create})
219 @rtype: XendDomainInfo
220 @return: A up and running XendDomainInfo instance
221 @raise VmError: Invalid configuration or failure to start.
222 @raise XendError: Errors with configuration.
223 """
225 log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
226 vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
227 resume = True)
228 try:
229 vm.resume()
230 return vm
231 except:
232 vm.destroy()
233 raise
235 def createDormant(domconfig):
236 """Create a dormant/inactive XenDomainInfo without creating VM.
237 This is for creating instances of persistent domains that are not
238 yet start.
240 @param domconfig: Parsed configuration
241 @type domconfig: XendConfig object
243 @rtype: XendDomainInfo
244 @return: A up and running XendDomainInfo instance
245 @raise XendError: Errors with configuration.
246 """
248 log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
250 # domid does not make sense for non-running domains.
251 domconfig.pop('domid', None)
252 vm = XendDomainInfo(domconfig)
253 return vm
255 def domain_by_name(name):
256 """Get domain by name
258 @params name: Name of the domain
259 @type name: string
260 @return: XendDomainInfo or None
261 """
262 from xen.xend import XendDomain
263 return XendDomain.instance().domain_lookup_by_name_nr(name)
266 def shutdown_reason(code):
267 """Get a shutdown reason from a code.
269 @param code: shutdown code
270 @type code: int
271 @return: shutdown reason
272 @rtype: string
273 """
274 return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
276 def dom_get(dom):
277 """Get info from xen for an existing domain.
279 @param dom: domain id
280 @type dom: int
281 @return: info or None
282 @rtype: dictionary
283 """
284 try:
285 domlist = xc.domain_getinfo(dom, 1)
286 if domlist and dom == domlist[0]['domid']:
287 return domlist[0]
288 except Exception, err:
289 # ignore missing domain
290 log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
291 return None
293 def get_assigned_pci_devices(domid):
294 dev_str_list = []
295 path = '/local/domain/0/backend/pci/%u/0/' % domid
296 num_devs = xstransact.Read(path + 'num_devs');
297 if num_devs is None or num_devs == "":
298 return dev_str_list
299 num_devs = int(num_devs);
300 for i in range(num_devs):
301 dev_str = xstransact.Read(path + 'dev-%i' % i)
302 dev_str_list = dev_str_list + [dev_str]
303 return dev_str_list
305 def do_FLR(domid):
306 from xen.xend.server.pciif import parse_pci_name, PciDevice
307 dev_str_list = get_assigned_pci_devices(domid)
309 for dev_str in dev_str_list:
310 (dom, b, d, f) = parse_pci_name(dev_str)
311 try:
312 dev = PciDevice(dom, b, d, f)
313 except Exception, e:
314 raise VmError("pci: failed to locate device and "+
315 "parse it's resources - "+str(e))
316 dev.do_FLR()
318 class XendDomainInfo:
319 """An object represents a domain.
321 @TODO: try to unify dom and domid, they mean the same thing, but
322 xc refers to it as dom, and everywhere else, including
323 xenstore it is domid. The best way is to change xc's
324 python interface.
326 @ivar info: Parsed configuration
327 @type info: dictionary
328 @ivar domid: Domain ID (if VM has started)
329 @type domid: int or None
330 @ivar vmpath: XenStore path to this VM.
331 @type vmpath: string
332 @ivar dompath: XenStore path to this Domain.
333 @type dompath: string
334 @ivar image: Reference to the VM Image.
335 @type image: xen.xend.image.ImageHandler
336 @ivar store_port: event channel to xenstored
337 @type store_port: int
338 @ivar console_port: event channel to xenconsoled
339 @type console_port: int
340 @ivar store_mfn: xenstored mfn
341 @type store_mfn: int
342 @ivar console_mfn: xenconsoled mfn
343 @type console_mfn: int
344 @ivar notes: OS image notes
345 @type notes: dictionary
346 @ivar vmWatch: reference to a watch on the xenstored vmpath
347 @type vmWatch: xen.xend.xenstore.xswatch
348 @ivar shutdownWatch: reference to watch on the xenstored domain shutdown
349 @type shutdownWatch: xen.xend.xenstore.xswatch
350 @ivar shutdownStartTime: UNIX Time when domain started shutting down.
351 @type shutdownStartTime: float or None
352 # @ivar state: Domain state
353 # @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
354 @ivar state_updated: lock for self.state
355 @type state_updated: threading.Condition
356 @ivar refresh_shutdown_lock: lock for polling shutdown state
357 @type refresh_shutdown_lock: threading.Condition
358 @ivar _deviceControllers: device controller cache for this domain
359 @type _deviceControllers: dict 'string' to DevControllers
360 """
362 def __init__(self, info, domid = None, dompath = None, augment = False,
363 priv = False, resume = False, vmpath = None):
364 """Constructor for a domain
366 @param info: parsed configuration
367 @type info: dictionary
368 @keyword domid: Set initial domain id (if any)
369 @type domid: int
370 @keyword dompath: Set initial dompath (if any)
371 @type dompath: string
372 @keyword augment: Augment given info with xenstored VM info
373 @type augment: bool
374 @keyword priv: Is a privileged domain (Dom 0)
375 @type priv: bool
376 @keyword resume: Is this domain being resumed?
377 @type resume: bool
378 """
380 self.info = info
381 if domid == None:
382 self.domid = self.info.get('domid')
383 else:
384 self.domid = domid
386 #REMOVE: uuid is now generated in XendConfig
387 #if not self._infoIsSet('uuid'):
388 # self.info['uuid'] = uuid.toString(uuid.create())
390 # Find a unique /vm/<uuid>/<integer> path if not specified.
391 # This avoids conflict between pre-/post-migrate domains when doing
392 # localhost relocation.
393 self.vmpath = vmpath
394 i = 0
395 while self.vmpath == None:
396 self.vmpath = XS_VMROOT + self.info['uuid']
397 if i != 0:
398 self.vmpath = self.vmpath + '-' + str(i)
399 try:
400 if self._readVm("uuid"):
401 self.vmpath = None
402 i = i + 1
403 except:
404 pass
406 self.dompath = dompath
408 self.image = None
409 self.store_port = None
410 self.store_mfn = None
411 self.console_port = None
412 self.console_mfn = None
414 self.native_protocol = None
416 self.vmWatch = None
417 self.shutdownWatch = None
418 self.shutdownStartTime = None
419 self._resume = resume
421 self.state_updated = threading.Condition()
422 self.refresh_shutdown_lock = threading.Condition()
423 self._stateSet(DOM_STATE_HALTED)
425 self._deviceControllers = {}
427 for state in DOM_STATES_OLD:
428 self.info[state] = 0
430 if augment:
431 self._augmentInfo(priv)
433 self._checkName(self.info['name_label'])
435 self.metrics = XendVMMetrics(uuid.createString(), self)
438 #
439 # Public functions available through XMLRPC
440 #
443 def start(self, is_managed = False):
444 """Attempts to start the VM by do the appropriate
445 initialisation if it not started.
446 """
447 from xen.xend import XendDomain
449 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
450 try:
451 XendTask.log_progress(0, 30, self._constructDomain)
452 XendTask.log_progress(31, 60, self._initDomain)
454 XendTask.log_progress(61, 70, self._storeVmDetails)
455 XendTask.log_progress(71, 80, self._storeDomDetails)
456 XendTask.log_progress(81, 90, self._registerWatches)
457 XendTask.log_progress(91, 100, self.refreshShutdown)
459 xendomains = XendDomain.instance()
460 xennode = XendNode.instance()
462 # save running configuration if XendDomains believe domain is
463 # persistent
464 if is_managed:
465 xendomains.managed_config_save(self)
467 if xennode.xenschedinfo() == 'credit':
468 xendomains.domain_sched_credit_set(self.getDomid(),
469 self.getWeight(),
470 self.getCap())
471 except:
472 log.exception('VM start failed')
473 self.destroy()
474 raise
475 else:
476 raise XendError('VM already running')
478 def resume(self):
479 """Resumes a domain that has come back from suspension."""
480 state = self._stateGet()
481 if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
482 try:
483 self._constructDomain()
485 try:
486 self._setCPUAffinity()
487 except:
488 # usually a CPU we want to set affinity to does not exist
489 # we just ignore it so that the domain can still be restored
490 log.warn("Cannot restore CPU affinity")
492 self._storeVmDetails()
493 self._createChannels()
494 self._createDevices()
495 self._storeDomDetails()
496 self._endRestore()
497 except:
498 log.exception('VM resume failed')
499 self.destroy()
500 raise
501 else:
502 raise XendError('VM is not suspended; it is %s'
503 % XEN_API_VM_POWER_STATE[state])
505 def shutdown(self, reason):
506 """Shutdown a domain by signalling this via xenstored."""
507 log.debug('XendDomainInfo.shutdown(%s)', reason)
508 if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
509 raise XendError('Domain cannot be shutdown')
511 if self.domid == 0:
512 raise XendError('Domain 0 cannot be shutdown')
514 if reason not in DOMAIN_SHUTDOWN_REASONS.values():
515 raise XendError('Invalid reason: %s' % reason)
516 self._removeVm('xend/previous_restart_time')
517 self.storeDom("control/shutdown", reason)
519 # HVM domain shuts itself down only if it has PV drivers
520 if self.info.is_hvm():
521 hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
522 hvm_s_state = xc.hvm_get_param(self.domid, HVM_PARAM_ACPI_S_STATE)
523 if not hvm_pvdrv or hvm_s_state != 0:
524 code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
525 log.info("HVM save:remote shutdown dom %d!", self.domid)
526 xc.domain_shutdown(self.domid, code)
528 def pause(self):
529 """Pause domain
531 @raise XendError: Failed pausing a domain
532 """
533 try:
534 xc.domain_pause(self.domid)
535 self._stateSet(DOM_STATE_PAUSED)
536 except Exception, ex:
537 log.exception(ex)
538 raise XendError("Domain unable to be paused: %s" % str(ex))
540 def unpause(self):
541 """Unpause domain
543 @raise XendError: Failed unpausing a domain
544 """
545 try:
546 xc.domain_unpause(self.domid)
547 self._stateSet(DOM_STATE_RUNNING)
548 except Exception, ex:
549 log.exception(ex)
550 raise XendError("Domain unable to be unpaused: %s" % str(ex))
552 def send_sysrq(self, key):
553 """ Send a Sysrq equivalent key via xenstored."""
554 if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
555 raise XendError("Domain '%s' is not started" % self.info['name_label'])
557 asserts.isCharConvertible(key)
558 self.storeDom("control/sysrq", '%c' % key)
560 def sync_pcidev_info(self):
562 if not self.info.is_hvm():
563 return
565 devid = '0'
566 dev_info = self._getDeviceInfo_pci(devid)
567 if dev_info is None:
568 return
570 # get the virtual slot info from xenstore
571 dev_uuid = sxp.child_value(dev_info, 'uuid')
572 pci_conf = self.info['devices'][dev_uuid][1]
573 pci_devs = pci_conf['devs']
575 count = 0
576 vslots = None
577 while vslots is None and count < 20:
578 vslots = xstransact.Read("/local/domain/0/backend/pci/%u/%s/vslots"
579 % (self.getDomid(), devid))
580 time.sleep(0.1)
581 count += 1
582 if vslots is None:
583 log.error("Device model didn't tell the vslots for PCI device")
584 return
586 #delete last delim
587 if vslots[-1] == ";":
588 vslots = vslots[:-1]
590 slot_list = vslots.split(';')
591 if len(slot_list) != len(pci_devs):
592 log.error("Device model's pci dev num dismatch")
593 return
595 #update the vslot info
596 count = 0;
597 for x in pci_devs:
598 x['vslt'] = slot_list[count]
599 count += 1
602 def hvm_pci_device_create(self, dev_config):
603 log.debug("XendDomainInfo.hvm_pci_device_create: %s"
604 % scrub_password(dev_config))
606 if not self.info.is_hvm():
607 raise VmError("hvm_pci_device_create called on non-HVM guest")
609 #all the PCI devs share one conf node
610 devid = '0'
612 new_dev = dev_config['devs'][0]
613 dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
615 #check conflict before trigger hotplug event
616 if dev_info is not None:
617 dev_uuid = sxp.child_value(dev_info, 'uuid')
618 pci_conf = self.info['devices'][dev_uuid][1]
619 pci_devs = pci_conf['devs']
620 for x in pci_devs:
621 if (int(x['vslt'], 16) == int(new_dev['vslt'], 16) and
622 int(x['vslt'], 16) != 0 ):
623 raise VmError("vslot %s already have a device." % (new_dev['vslt']))
625 if (int(x['domain'], 16) == int(new_dev['domain'], 16) and
626 int(x['bus'], 16) == int(new_dev['bus'], 16) and
627 int(x['slot'], 16) == int(new_dev['slot'], 16) and
628 int(x['func'], 16) == int(new_dev['func'], 16) ):
629 raise VmError("device is already inserted")
631 # Test whether the devices can be assigned with VT-d
632 pci_str = "%s, %s, %s, %s" % (new_dev['domain'],
633 new_dev['bus'],
634 new_dev['slot'],
635 new_dev['func'])
636 bdf = xc.test_assign_device(self.domid, pci_str)
637 if bdf != 0:
638 if bdf == -1:
639 raise VmError("failed to assign device: maybe the platform"
640 " doesn't support VT-d, or VT-d isn't enabled"
641 " properly?")
642 bus = (bdf >> 16) & 0xff
643 devfn = (bdf >> 8) & 0xff
644 dev = (devfn >> 3) & 0x1f
645 func = devfn & 0x7
646 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
647 " already been assigned to other domain, or maybe"
648 " it doesn't exist." % (bus, dev, func))
650 # Here, we duplicate some checkings (in some cases, we mustn't allow
651 # a device to be hot-plugged into an HVM guest) that are also done in
652 # pci_device_configure()'s self.device_create(dev_sxp) or
653 # dev_control.reconfigureDevice(devid, dev_config).
654 # We must make the checkings before sending the command 'pci-ins' to
655 # ioemu.
657 # Test whether the device is owned by pciback. For instance, we can't
658 # hotplug a device being used by Dom0 itself to an HVM guest.
659 from xen.xend.server.pciif import PciDevice, parse_pci_name
660 domain = int(new_dev['domain'],16)
661 bus = int(new_dev['bus'],16)
662 dev = int(new_dev['slot'],16)
663 func = int(new_dev['func'],16)
664 try:
665 pci_device = PciDevice(domain, bus, dev, func)
666 except Exception, e:
667 raise VmError("pci: failed to locate device and "+
668 "parse it's resources - "+str(e))
669 if pci_device.driver!='pciback':
670 raise VmError(("pci: PCI Backend does not own device "+ \
671 "%s\n"+ \
672 "See the pciback.hide kernel "+ \
673 "command-line parameter or\n"+ \
674 "bind your slot/device to the PCI backend using sysfs" \
675 )%(pci_device.name))
677 # Check non-page-aligned MMIO BAR.
678 if pci_device.has_non_page_aligned_bar and arch.type != "ia64":
679 raise VmError("pci: %s: non-page-aligned MMIO BAR found." % \
680 pci_device.name)
682 # Check the co-assignment.
683 # To pci-attach a device D to domN, we should ensure each of D's
684 # co-assignment devices hasn't been assigned, or has been assigned to
685 # domN.
686 coassignment_list = pci_device.find_coassigned_devices()
687 assigned_pci_device_str_list = get_assigned_pci_devices(self.domid)
688 for pci_str in coassignment_list:
689 (domain, bus, dev, func) = parse_pci_name(pci_str)
690 dev_str = '0x%x,0x%x,0x%x,0x%x' % (domain, bus, dev, func)
691 if xc.test_assign_device(self.domid, dev_str) == 0:
692 continue
693 if not pci_str in assigned_pci_device_str_list:
694 raise VmError(('pci: failed to pci-attach %s to dom%d" + \
695 " because one of its co-assignment device %s has been" + \
696 " assigned to other domain.' \
697 )% (pci_device.name, self.domid, pci_str))
699 opts = ''
700 if 'opts' in new_dev and len(new_dev['opts']) > 0:
701 config_opts = new_dev['opts']
702 config_opts = map(lambda (x, y): x+'='+y, config_opts)
703 opts = ',' + reduce(lambda x, y: x+','+y, config_opts)
705 bdf_str = "%s:%s:%s.%s%s@%s" % (new_dev['domain'],
706 new_dev['bus'],
707 new_dev['slot'],
708 new_dev['func'],
709 opts,
710 new_dev['vslt'])
711 self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
714 def device_create(self, dev_config):
715 """Create a new device.
717 @param dev_config: device configuration
718 @type dev_config: SXP object (parsed config)
719 """
720 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
721 dev_type = sxp.name(dev_config)
722 dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
723 dev_config_dict = self.info['devices'][dev_uuid][1]
724 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
726 if self.domid is not None:
727 try:
728 dev_config_dict['devid'] = devid = \
729 self._createDevice(dev_type, dev_config_dict)
730 self._waitForDevice(dev_type, devid)
731 except VmError, ex:
732 del self.info['devices'][dev_uuid]
733 if dev_type == 'pci':
734 for dev in dev_config_dict['devs']:
735 XendAPIStore.deregister(dev['uuid'], 'DPCI')
736 elif dev_type == 'vscsi':
737 for dev in dev_config_dict['devs']:
738 XendAPIStore.deregister(dev['uuid'], 'DSCSI')
739 elif dev_type == 'tap':
740 self.info['vbd_refs'].remove(dev_uuid)
741 else:
742 self.info['%s_refs' % dev_type].remove(dev_uuid)
743 raise ex
744 else:
745 devid = None
747 xen.xend.XendDomain.instance().managed_config_save(self)
748 return self.getDeviceController(dev_type).sxpr(devid)
751 def pci_device_configure(self, dev_sxp, devid = 0):
752 """Configure an existing pci device.
754 @param dev_sxp: device configuration
755 @type dev_sxp: SXP object (parsed config)
756 @param devid: device id
757 @type devid: int
758 @return: Returns True if successfully updated device
759 @rtype: boolean
760 """
761 log.debug("XendDomainInfo.pci_device_configure: %s"
762 % scrub_password(dev_sxp))
764 dev_class = sxp.name(dev_sxp)
766 if dev_class != 'pci':
767 return False
769 pci_state = sxp.child_value(dev_sxp, 'state')
770 existing_dev_info = self._getDeviceInfo_pci(devid)
772 if existing_dev_info is None and pci_state != 'Initialising':
773 raise XendError("Cannot detach when pci platform does not exist")
775 pci_dev = sxp.children(dev_sxp, 'dev')[0]
776 dev_config = self.info.pci_convert_sxp_to_dict(dev_sxp)
777 dev = dev_config['devs'][0]
779 # Do HVM specific processing
780 if self.info.is_hvm():
781 if pci_state == 'Initialising':
782 # HVM PCI device attachment
783 self.hvm_pci_device_create(dev_config)
784 # Update vslt
785 vslt = xstransact.Read("/local/domain/0/device-model/%i/parameter"
786 % self.getDomid())
787 dev['vslt'] = vslt
788 for n in sxp.children(pci_dev):
789 if(n[0] == 'vslt'):
790 n[1] = vslt
791 else:
792 # HVM PCI device detachment
793 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
794 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
795 existing_pci_devs = existing_pci_conf['devs']
796 vslt = '0x0'
797 for x in existing_pci_devs:
798 if ( int(x['domain'], 16) == int(dev['domain'], 16) and
799 int(x['bus'], 16) == int(dev['bus'], 16) and
800 int(x['slot'], 16) == int(dev['slot'], 16) and
801 int(x['func'], 16) == int(dev['func'], 16) ):
802 vslt = x['vslt']
803 break
804 if vslt == '0x0':
805 raise VmError("Device %04x:%02x:%02x.%01x is not connected"
806 % (int(dev['domain'],16), int(dev['bus'],16),
807 int(dev['slot'],16), int(dev['func'],16)))
808 self.hvm_destroyPCIDevice(int(vslt, 16))
809 # Update vslt
810 dev['vslt'] = vslt
811 for n in sxp.children(pci_dev):
812 if(n[0] == 'vslt'):
813 n[1] = vslt
815 # If pci platform does not exist, create and exit.
816 if existing_dev_info is None:
817 self.device_create(dev_sxp)
818 return True
820 # use DevController.reconfigureDevice to change device config
821 dev_control = self.getDeviceController(dev_class)
822 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
823 if not self.info.is_hvm():
824 # in PV case, wait until backend state becomes connected.
825 dev_control.waitForDevice_reconfigure(devid)
826 num_devs = dev_control.cleanupDevice(devid)
828 # update XendConfig with new device info
829 if dev_uuid:
830 new_dev_sxp = dev_control.configuration(devid)
831 self.info.device_update(dev_uuid, new_dev_sxp)
833 # If there is no device left, destroy pci and remove config.
834 if num_devs == 0:
835 if self.info.is_hvm():
836 self.destroyDevice('pci', devid, True)
837 del self.info['devices'][dev_uuid]
838 platform = self.info['platform']
839 orig_dev_num = len(platform['pci'])
840 # TODO: can use this to keep some info to ask high level
841 # management tools to hot insert a new passthrough dev
842 # after migration
843 if orig_dev_num != 0:
844 #platform['pci'] = ["%dDEVs" % orig_dev_num]
845 platform['pci'] = []
846 else:
847 self.destroyDevice('pci', devid)
848 del self.info['devices'][dev_uuid]
850 xen.xend.XendDomain.instance().managed_config_save(self)
852 return True
854 def vscsi_device_configure(self, dev_sxp):
855 """Configure an existing vscsi device.
856 quoted pci funciton
857 """
858 dev_class = sxp.name(dev_sxp)
859 if dev_class != 'vscsi':
860 return False
862 dev_config = self.info.vscsi_convert_sxp_to_dict(dev_sxp)
863 dev = dev_config['devs'][0]
864 req_devid = int(dev['devid'])
865 existing_dev_info = self._getDeviceInfo_vscsi(req_devid, dev['v-dev'])
866 state = dev['state']
868 if state == xenbusState['Initialising']:
869 # new create
870 # If request devid does not exist, create and exit.
871 if existing_dev_info is None:
872 self.device_create(dev_sxp)
873 return True
874 elif existing_dev_info == "exists":
875 raise XendError("The virtual device %s is already defined" % dev['v-dev'])
877 elif state == xenbusState['Closing']:
878 if existing_dev_info is None:
879 raise XendError("Cannot detach vscsi device does not exist")
881 if self.domid is not None:
882 # use DevController.reconfigureDevice to change device config
883 dev_control = self.getDeviceController(dev_class)
884 dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
885 dev_control.waitForDevice_reconfigure(req_devid)
886 num_devs = dev_control.cleanupDevice(req_devid)
888 # update XendConfig with new device info
889 if dev_uuid:
890 new_dev_sxp = dev_control.configuration(req_devid)
891 self.info.device_update(dev_uuid, new_dev_sxp)
893 # If there is no device left, destroy vscsi and remove config.
894 if num_devs == 0:
895 self.destroyDevice('vscsi', req_devid)
896 del self.info['devices'][dev_uuid]
898 else:
899 cur_dev_sxp = self._getDeviceInfo_vscsi(req_devid, None)
900 new_dev_sxp = ['vscsi']
901 cur_mode = sxp.children(cur_dev_sxp, 'feature-host')[0]
902 new_dev_sxp.append(cur_mode)
903 try:
904 cur_be = sxp.children(cur_dev_sxp, 'backend')[0]
905 new_dev_sxp.append(cur_be)
906 except IndexError:
907 pass
909 for cur_dev in sxp.children(cur_dev_sxp, 'dev'):
910 if state == xenbusState['Closing']:
911 if int(cur_mode[1]) == 1:
912 continue
913 cur_dev_vdev = sxp.child_value(cur_dev, 'v-dev')
914 if cur_dev_vdev == dev['v-dev']:
915 continue
916 new_dev_sxp.append(cur_dev)
918 if state == xenbusState['Initialising']:
919 for new_dev in sxp.children(dev_sxp, 'dev'):
920 new_dev_sxp.append(new_dev)
922 dev_uuid = sxp.child_value(cur_dev_sxp, 'uuid')
923 self.info.device_update(dev_uuid, new_dev_sxp)
925 # If there is only 'vscsi' in new_dev_sxp, remove the config.
926 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
927 del self.info['devices'][dev_uuid]
929 xen.xend.XendDomain.instance().managed_config_save(self)
931 return True
933 def device_configure(self, dev_sxp, devid = None):
934 """Configure an existing device.
936 @param dev_config: device configuration
937 @type dev_config: SXP object (parsed config)
938 @param devid: device id
939 @type devid: int
940 @return: Returns True if successfully updated device
941 @rtype: boolean
942 """
944 # convert device sxp to a dict
945 dev_class = sxp.name(dev_sxp)
946 dev_config = {}
948 if dev_class == 'pci':
949 return self.pci_device_configure(dev_sxp)
951 if dev_class == 'vscsi':
952 return self.vscsi_device_configure(dev_sxp)
954 for opt_val in dev_sxp[1:]:
955 try:
956 dev_config[opt_val[0]] = opt_val[1]
957 except IndexError:
958 pass
960 # use DevController.reconfigureDevice to change device config
961 dev_control = self.getDeviceController(dev_class)
962 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
964 # update XendConfig with new device info
965 if dev_uuid:
966 self.info.device_update(dev_uuid, dev_sxp)
968 return True
970 def waitForDevices(self):
971 """Wait for this domain's configured devices to connect.
973 @raise VmError: if any device fails to initialise.
974 """
975 for devclass in XendDevices.valid_devices():
976 self.getDeviceController(devclass).waitForDevices()
978 def hvm_destroyPCIDevice(self, vslot):
979 log.debug("hvm_destroyPCIDevice called %s", vslot)
981 if not self.info.is_hvm():
982 raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
984 #all the PCI devs share one conf node
985 devid = '0'
986 vslot = int(vslot)
987 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
988 dev_uuid = sxp.child_value(dev_info, 'uuid')
990 #delete the pci bdf config under the pci device
991 pci_conf = self.info['devices'][dev_uuid][1]
992 pci_len = len(pci_conf['devs'])
994 #find the pass-through device with the virtual slot
995 devnum = 0
996 for x in pci_conf['devs']:
997 if int(x['vslt'], 16) == vslot:
998 break
999 devnum += 1
1001 if devnum >= pci_len:
1002 raise VmError("Device @ vslot 0x%x doesn't exist." % (vslot))
1004 if vslot == 0:
1005 raise VmError("Device @ vslot 0x%x do not support hotplug." % (vslot))
1007 # Check the co-assignment.
1008 # To pci-detach a device D from domN, we should ensure: for each DD in the
1009 # list of D's co-assignment devices, DD is not assigned (to domN).
1011 from xen.xend.server.pciif import PciDevice
1012 domain = int(x['domain'],16)
1013 bus = int(x['bus'],16)
1014 dev = int(x['slot'],16)
1015 func = int(x['func'],16)
1016 try:
1017 pci_device = PciDevice(domain, bus, dev, func)
1018 except Exception, e:
1019 raise VmError("pci: failed to locate device and "+
1020 "parse it's resources - "+str(e))
1021 coassignment_list = pci_device.find_coassigned_devices()
1022 coassignment_list.remove(pci_device.name)
1023 assigned_pci_device_str_list = get_assigned_pci_devices(self.domid)
1024 for pci_str in coassignment_list:
1025 if pci_str in assigned_pci_device_str_list:
1026 raise VmError(('pci: failed to pci-detach %s from dom%d" + \
1027 " because one of its co-assignment device %s is still " + \
1028 " assigned to the domain.' \
1029 )% (pci_device.name, self.domid, pci_str))
1032 bdf_str = "%s:%s:%s.%s" % (x['domain'], x['bus'], x['slot'], x['func'])
1033 log.info("hvm_destroyPCIDevice:%s:%s!", x, bdf_str)
1035 self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
1037 return 0
1039 def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
1040 log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
1041 deviceClass, devid)
1043 if rm_cfg:
1044 # Convert devid to device number. A device number is
1045 # needed to remove its configuration.
1046 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1048 # Save current sxprs. A device number and a backend
1049 # path are needed to remove its configuration but sxprs
1050 # do not have those after calling destroyDevice.
1051 sxprs = self.getDeviceSxprs(deviceClass)
1053 rc = None
1054 if self.domid is not None:
1055 rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
1056 if not force and rm_cfg:
1057 # The backend path, other than the device itself,
1058 # has to be passed because its accompanied frontend
1059 # path may be void until its removal is actually
1060 # issued. It is probable because destroyDevice is
1061 # issued first.
1062 for dev_num, dev_info in sxprs:
1063 dev_num = int(dev_num)
1064 if dev_num == dev:
1065 for x in dev_info:
1066 if x[0] == 'backend':
1067 backend = x[1]
1068 break
1069 break
1070 self._waitForDevice_destroy(deviceClass, devid, backend)
1072 if rm_cfg:
1073 if deviceClass == 'vif':
1074 if self.domid is not None:
1075 for dev_num, dev_info in sxprs:
1076 dev_num = int(dev_num)
1077 if dev_num == dev:
1078 for x in dev_info:
1079 if x[0] == 'mac':
1080 mac = x[1]
1081 break
1082 break
1083 dev_info = self._getDeviceInfo_vif(mac)
1084 else:
1085 _, dev_info = sxprs[dev]
1086 else: # 'vbd' or 'tap'
1087 dev_info = self._getDeviceInfo_vbd(dev)
1088 # To remove the UUID of the device from refs,
1089 # deviceClass must be always 'vbd'.
1090 deviceClass = 'vbd'
1091 if dev_info is None:
1092 raise XendError("Device %s is not defined" % devid)
1094 dev_uuid = sxp.child_value(dev_info, 'uuid')
1095 del self.info['devices'][dev_uuid]
1096 self.info['%s_refs' % deviceClass].remove(dev_uuid)
1097 xen.xend.XendDomain.instance().managed_config_save(self)
1099 return rc
1101 def getDeviceSxprs(self, deviceClass):
1102 if deviceClass == 'pci':
1103 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1104 if dev_info is None:
1105 return []
1106 dev_uuid = sxp.child_value(dev_info, 'uuid')
1107 pci_devs = self.info['devices'][dev_uuid][1]['devs']
1108 pci_len = len(pci_devs)
1109 return pci_devs
1110 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1111 return self.getDeviceController(deviceClass).sxprs()
1112 else:
1113 sxprs = []
1114 dev_num = 0
1115 for dev_type, dev_info in self.info.all_devices_sxpr():
1116 if (deviceClass == 'vbd' and dev_type not in ['vbd', 'tap']) or \
1117 (deviceClass != 'vbd' and dev_type != deviceClass):
1118 continue
1120 if deviceClass == 'vscsi':
1121 vscsi_devs = ['devs', []]
1122 for vscsi_dev in sxp.children(dev_info, 'dev'):
1123 vscsi_dev.append(['frontstate', None])
1124 vscsi_devs[1].append(vscsi_dev)
1125 dev_num = int(sxp.child_value(vscsi_dev, 'devid'))
1126 vscsi_mode = sxp.children(dev_info, 'feature-host')[0]
1127 sxprs.append([dev_num, [vscsi_devs, vscsi_mode]])
1128 elif deviceClass == 'vbd':
1129 dev = sxp.child_value(dev_info, 'dev')
1130 if 'ioemu:' in dev:
1131 (_, dev) = dev.split(':', 1)
1132 try:
1133 (dev_name, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1134 except ValueError:
1135 dev_name = dev
1136 dev_num = self.getDeviceController('vbd').convertToDeviceNumber(dev_name)
1137 sxprs.append([dev_num, dev_info])
1138 else:
1139 sxprs.append([dev_num, dev_info])
1140 dev_num += 1
1141 return sxprs
1143 def getBlockDeviceClass(self, devid):
1144 # To get a device number from the devid,
1145 # we temporarily use the device controller of VBD.
1146 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1147 dev_info = self._getDeviceInfo_vbd(dev)
1148 if dev_info:
1149 return dev_info[0]
1151 def _getDeviceInfo_vif(self, mac):
1152 for dev_type, dev_info in self.info.all_devices_sxpr():
1153 if dev_type != 'vif':
1154 continue
1155 if mac == sxp.child_value(dev_info, 'mac'):
1156 return dev_info
1158 def _getDeviceInfo_vbd(self, devid):
1159 for dev_type, dev_info in self.info.all_devices_sxpr():
1160 if dev_type != 'vbd' and dev_type != 'tap':
1161 continue
1162 dev = sxp.child_value(dev_info, 'dev')
1163 dev = dev.split(':')[0]
1164 dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
1165 if devid == dev:
1166 return dev_info
1168 def _getDeviceInfo_pci(self, devid):
1169 for dev_type, dev_info in self.info.all_devices_sxpr():
1170 if dev_type != 'pci':
1171 continue
1172 return dev_info
1173 return None
1175 def _getDeviceInfo_vscsi(self, devid, vdev):
1176 devid = int(devid)
1177 for dev_type, dev_info in self.info.all_devices_sxpr():
1178 if dev_type != 'vscsi':
1179 continue
1180 existing_dev_uuid = sxp.child_value(dev_info, 'uuid')
1181 existing_conf = self.info['devices'][existing_dev_uuid][1]
1182 existing_dev = existing_conf['devs'][0]
1183 existing_devid = int(existing_dev['devid'])
1184 existing_vdev = existing_dev['v-dev']
1186 if vdev == existing_vdev:
1187 return "exists"
1189 if devid == existing_devid:
1190 return dev_info
1192 return None
1194 def setMemoryTarget(self, target):
1195 """Set the memory target of this domain.
1196 @param target: In MiB.
1197 """
1198 log.debug("Setting memory target of domain %s (%s) to %d MiB.",
1199 self.info['name_label'], str(self.domid), target)
1201 MiB = 1024 * 1024
1202 memory_cur = self.get_memory_dynamic_max() / MiB
1204 if self.domid == 0:
1205 dom0_min_mem = xoptions.get_dom0_min_mem()
1206 if target < memory_cur and dom0_min_mem > target:
1207 raise XendError("memory_dynamic_max too small")
1209 self._safe_set_memory('memory_dynamic_min', target * MiB)
1210 self._safe_set_memory('memory_dynamic_max', target * MiB)
1212 if self.domid >= 0:
1213 if target > memory_cur:
1214 balloon.free((target - memory_cur) * 1024, self)
1215 self.storeVm("memory", target)
1216 self.storeDom("memory/target", target << 10)
1217 xc.domain_set_target_mem(self.domid,
1218 (target * 1024))
1219 xen.xend.XendDomain.instance().managed_config_save(self)
1221 def setMemoryMaximum(self, limit):
1222 """Set the maximum memory limit of this domain
1223 @param limit: In MiB.
1224 """
1225 log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
1226 self.info['name_label'], str(self.domid), limit)
1228 maxmem_cur = self.get_memory_static_max()
1229 MiB = 1024 * 1024
1230 self._safe_set_memory('memory_static_max', limit * MiB)
1232 if self.domid >= 0:
1233 maxmem = int(limit) * 1024
1234 try:
1235 return xc.domain_setmaxmem(self.domid, maxmem)
1236 except Exception, ex:
1237 self._safe_set_memory('memory_static_max', maxmem_cur)
1238 raise XendError(str(ex))
1239 xen.xend.XendDomain.instance().managed_config_save(self)
1242 def getVCPUInfo(self):
1243 try:
1244 # We include the domain name and ID, to help xm.
1245 sxpr = ['domain',
1246 ['domid', self.domid],
1247 ['name', self.info['name_label']],
1248 ['vcpu_count', self.info['VCPUs_max']]]
1250 for i in range(0, self.info['VCPUs_max']):
1251 if self.domid is not None:
1252 info = xc.vcpu_getinfo(self.domid, i)
1254 sxpr.append(['vcpu',
1255 ['number', i],
1256 ['online', info['online']],
1257 ['blocked', info['blocked']],
1258 ['running', info['running']],
1259 ['cpu_time', info['cpu_time'] / 1e9],
1260 ['cpu', info['cpu']],
1261 ['cpumap', info['cpumap']]])
1262 else:
1263 sxpr.append(['vcpu',
1264 ['number', i],
1265 ['online', 0],
1266 ['blocked', 0],
1267 ['running', 0],
1268 ['cpu_time', 0.0],
1269 ['cpu', -1],
1270 ['cpumap', self.info['cpus'][i] and \
1271 self.info['cpus'][i] or range(64)]])
1273 return sxpr
1275 except RuntimeError, exn:
1276 raise XendError(str(exn))
1279 def getDomInfo(self):
1280 return dom_get(self.domid)
1283 # internal functions ... TODO: re-categorised
1286 def _augmentInfo(self, priv):
1287 """Augment self.info, as given to us through L{recreate}, with
1288 values taken from the store. This recovers those values known
1289 to xend but not to the hypervisor.
1290 """
1291 augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
1292 if priv:
1293 augment_entries.remove('memory')
1294 augment_entries.remove('maxmem')
1295 augment_entries.remove('vcpus')
1296 augment_entries.remove('vcpu_avail')
1298 vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
1299 for k in augment_entries])
1301 # make returned lists into a dictionary
1302 vm_config = dict(zip(augment_entries, vm_config))
1304 for arg in augment_entries:
1305 val = vm_config[arg]
1306 if val != None:
1307 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1308 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1309 self.info[xapiarg] = val
1310 elif arg == "memory":
1311 self.info["static_memory_min"] = val
1312 elif arg == "maxmem":
1313 self.info["static_memory_max"] = val
1314 else:
1315 self.info[arg] = val
1317 # read CPU Affinity
1318 self.info['cpus'] = []
1319 vcpus_info = self.getVCPUInfo()
1320 for vcpu_info in sxp.children(vcpus_info, 'vcpu'):
1321 self.info['cpus'].append(sxp.child_value(vcpu_info, 'cpumap'))
1323 # For dom0, we ignore any stored value for the vcpus fields, and
1324 # read the current value from Xen instead. This allows boot-time
1325 # settings to take precedence over any entries in the store.
1326 if priv:
1327 xeninfo = dom_get(self.domid)
1328 self.info['VCPUs_max'] = xeninfo['online_vcpus']
1329 self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
1331 # read image value
1332 image_sxp = self._readVm('image')
1333 if image_sxp:
1334 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1336 # read devices
1337 devices = []
1338 for devclass in XendDevices.valid_devices():
1339 devconfig = self.getDeviceController(devclass).configurations()
1340 if devconfig:
1341 devices.extend(devconfig)
1343 if not self.info['devices'] and devices is not None:
1344 for device in devices:
1345 self.info.device_add(device[0], cfg_sxp = device)
1347 self._update_consoles()
1349 def _update_consoles(self, transaction = None):
1350 if self.domid == None or self.domid == 0:
1351 return
1353 # Update VT100 port if it exists
1354 if transaction is None:
1355 self.console_port = self.readDom('console/port')
1356 else:
1357 self.console_port = self.readDomTxn(transaction, 'console/port')
1358 if self.console_port is not None:
1359 serial_consoles = self.info.console_get_all('vt100')
1360 if not serial_consoles:
1361 cfg = self.info.console_add('vt100', self.console_port)
1362 self._createDevice('console', cfg)
1363 else:
1364 console_uuid = serial_consoles[0].get('uuid')
1365 self.info.console_update(console_uuid, 'location',
1366 self.console_port)
1369 # Update VNC port if it exists and write to xenstore
1370 if transaction is None:
1371 vnc_port = self.readDom('console/vnc-port')
1372 else:
1373 vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
1374 if vnc_port is not None:
1375 for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
1376 if dev_type == 'vfb':
1377 old_location = dev_info.get('location')
1378 listen_host = dev_info.get('vnclisten', 'localhost')
1379 new_location = '%s:%s' % (listen_host, str(vnc_port))
1380 if old_location == new_location:
1381 break
1383 dev_info['location'] = new_location
1384 self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
1385 vfb_ctrl = self.getDeviceController('vfb')
1386 vfb_ctrl.reconfigureDevice(0, dev_info)
1387 break
1390 # Function to update xenstore /vm/*
1393 def _readVm(self, *args):
1394 return xstransact.Read(self.vmpath, *args)
1396 def _writeVm(self, *args):
1397 return xstransact.Write(self.vmpath, *args)
1399 def _removeVm(self, *args):
1400 return xstransact.Remove(self.vmpath, *args)
1402 def _gatherVm(self, *args):
1403 return xstransact.Gather(self.vmpath, *args)
1405 def _listRecursiveVm(self, *args):
1406 return xstransact.ListRecursive(self.vmpath, *args)
1408 def storeVm(self, *args):
1409 return xstransact.Store(self.vmpath, *args)
1411 def permissionsVm(self, *args):
1412 return xstransact.SetPermissions(self.vmpath, *args)
1415 # Function to update xenstore /dom/*
1418 def readDom(self, *args):
1419 return xstransact.Read(self.dompath, *args)
1421 def gatherDom(self, *args):
1422 return xstransact.Gather(self.dompath, *args)
1424 def _writeDom(self, *args):
1425 return xstransact.Write(self.dompath, *args)
1427 def _removeDom(self, *args):
1428 return xstransact.Remove(self.dompath, *args)
1430 def storeDom(self, *args):
1431 return xstransact.Store(self.dompath, *args)
1434 def readDomTxn(self, transaction, *args):
1435 paths = map(lambda x: self.dompath + "/" + x, args)
1436 return transaction.read(*paths)
1438 def gatherDomTxn(self, transaction, *args):
1439 paths = map(lambda x: self.dompath + "/" + x, args)
1440 return transaction.gather(*paths)
1442 def _writeDomTxn(self, transaction, *args):
1443 paths = map(lambda x: self.dompath + "/" + x, args)
1444 return transaction.write(*paths)
1446 def _removeDomTxn(self, transaction, *args):
1447 paths = map(lambda x: self.dompath + "/" + x, args)
1448 return transaction.remove(*paths)
1450 def storeDomTxn(self, transaction, *args):
1451 paths = map(lambda x: self.dompath + "/" + x, args)
1452 return transaction.store(*paths)
1455 def _recreateDom(self):
1456 complete(self.dompath, lambda t: self._recreateDomFunc(t))
1458 def _recreateDomFunc(self, t):
1459 t.remove()
1460 t.mkdir()
1461 t.set_permissions({'dom' : self.domid, 'read' : True})
1462 t.write('vm', self.vmpath)
1463 for i in [ 'device', 'control', 'error', 'memory' ]:
1464 t.mkdir(i)
1465 t.set_permissions(i, {'dom' : self.domid})
1467 def _storeDomDetails(self):
1468 to_store = {
1469 'domid': str(self.domid),
1470 'vm': self.vmpath,
1471 'name': self.info['name_label'],
1472 'console/limit': str(xoptions.get_console_limit() * 1024),
1473 'memory/target': str(self.info['memory_dynamic_max'] / 1024),
1476 def f(n, v):
1477 if v is not None:
1478 if type(v) == bool:
1479 to_store[n] = v and "1" or "0"
1480 else:
1481 to_store[n] = str(v)
1483 # Figure out if we need to tell xenconsoled to ignore this guest's
1484 # console - device model will handle console if it is running
1485 constype = "ioemu"
1486 if 'device_model' not in self.info['platform']:
1487 constype = "xenconsoled"
1489 f('console/port', self.console_port)
1490 f('console/ring-ref', self.console_mfn)
1491 f('console/type', constype)
1492 f('store/port', self.store_port)
1493 f('store/ring-ref', self.store_mfn)
1495 if arch.type == "x86":
1496 f('control/platform-feature-multiprocessor-suspend', True)
1498 # elfnotes
1499 for n, v in self.info.get_notes().iteritems():
1500 n = n.lower().replace('_', '-')
1501 if n == 'features':
1502 for v in v.split('|'):
1503 v = v.replace('_', '-')
1504 if v.startswith('!'):
1505 f('image/%s/%s' % (n, v[1:]), False)
1506 else:
1507 f('image/%s/%s' % (n, v), True)
1508 else:
1509 f('image/%s' % n, v)
1511 if self.info.has_key('security_label'):
1512 f('security_label', self.info['security_label'])
1514 to_store.update(self._vcpuDomDetails())
1516 log.debug("Storing domain details: %s", scrub_password(to_store))
1518 self._writeDom(to_store)
1520 def _vcpuDomDetails(self):
1521 def availability(n):
1522 if self.info['vcpu_avail'] & (1 << n):
1523 return 'online'
1524 else:
1525 return 'offline'
1527 result = {}
1528 for v in range(0, self.info['VCPUs_max']):
1529 result["cpu/%d/availability" % v] = availability(v)
1530 return result
1533 # xenstore watches
1536 def _registerWatches(self):
1537 """Register a watch on this VM's entries in the store, and the
1538 domain's control/shutdown node, so that when they are changed
1539 externally, we keep up to date. This should only be called by {@link
1540 #create}, {@link #recreate}, or {@link #restore}, once the domain's
1541 details have been written, but before the new instance is returned."""
1542 self.vmWatch = xswatch(self.vmpath, self._storeChanged)
1543 self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
1544 self._handleShutdownWatch)
1546 def _storeChanged(self, _):
1547 log.trace("XendDomainInfo.storeChanged");
1549 changed = False
1551 # Check whether values in the configuration have
1552 # changed in Xenstore.
1554 cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
1555 'rtc/timeoffset']
1557 vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
1558 for k in cfg_vm])
1560 # convert two lists into a python dictionary
1561 vm_details = dict(zip(cfg_vm, vm_details))
1563 if vm_details['rtc/timeoffset'] == None:
1564 vm_details['rtc/timeoffset'] = "0"
1566 for arg, val in vm_details.items():
1567 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1568 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1569 if val != None and val != self.info[xapiarg]:
1570 self.info[xapiarg] = val
1571 changed = True
1572 elif arg == "memory":
1573 if val != None and val != self.info["static_memory_min"]:
1574 self.info["static_memory_min"] = val
1575 changed = True
1576 elif arg == "maxmem":
1577 if val != None and val != self.info["static_memory_max"]:
1578 self.info["static_memory_max"] = val
1579 changed = True
1581 # Check whether image definition has been updated
1582 image_sxp = self._readVm('image')
1583 if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
1584 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1585 changed = True
1587 # Check if the rtc offset has changes
1588 if vm_details.get("rtc/timeoffset", "0") != self.info["platform"].get("rtc_timeoffset", "0"):
1589 self.info["platform"]["rtc_timeoffset"] = vm_details.get("rtc/timeoffset", 0)
1590 changed = True
1592 if changed:
1593 # Update the domain section of the store, as this contains some
1594 # parameters derived from the VM configuration.
1595 self._storeDomDetails()
1597 return 1
1599 def _handleShutdownWatch(self, _):
1600 log.debug('XendDomainInfo.handleShutdownWatch')
1602 reason = self.readDom('control/shutdown')
1604 if reason and reason != 'suspend':
1605 sst = self.readDom('xend/shutdown_start_time')
1606 now = time.time()
1607 if sst:
1608 self.shutdownStartTime = float(sst)
1609 timeout = float(sst) + SHUTDOWN_TIMEOUT - now
1610 else:
1611 self.shutdownStartTime = now
1612 self.storeDom('xend/shutdown_start_time', now)
1613 timeout = SHUTDOWN_TIMEOUT
1615 log.trace(
1616 "Scheduling refreshShutdown on domain %d in %ds.",
1617 self.domid, timeout)
1618 threading.Timer(timeout, self.refreshShutdown).start()
1620 return True
1624 # Public Attributes for the VM
1628 def getDomid(self):
1629 return self.domid
1631 def setName(self, name, to_store = True):
1632 self._checkName(name)
1633 self.info['name_label'] = name
1634 if to_store:
1635 self.storeVm("name", name)
1637 def getName(self):
1638 return self.info['name_label']
1640 def getDomainPath(self):
1641 return self.dompath
1643 def getShutdownReason(self):
1644 return self.readDom('control/shutdown')
1646 def getStorePort(self):
1647 """For use only by image.py and XendCheckpoint.py."""
1648 return self.store_port
1650 def getConsolePort(self):
1651 """For use only by image.py and XendCheckpoint.py"""
1652 return self.console_port
1654 def getFeatures(self):
1655 """For use only by image.py."""
1656 return self.info['features']
1658 def getVCpuCount(self):
1659 return self.info['VCPUs_max']
1661 def setVCpuCount(self, vcpus):
1662 def vcpus_valid(n):
1663 if vcpus <= 0:
1664 raise XendError('Zero or less VCPUs is invalid')
1665 if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
1666 raise XendError('Cannot set vcpus greater than max vcpus on running domain')
1667 vcpus_valid(vcpus)
1669 self.info['vcpu_avail'] = (1 << vcpus) - 1
1670 if self.domid >= 0:
1671 self.storeVm('vcpu_avail', self.info['vcpu_avail'])
1672 self._writeDom(self._vcpuDomDetails())
1673 self.info['VCPUs_live'] = vcpus
1674 else:
1675 if self.info['VCPUs_max'] > vcpus:
1676 # decreasing
1677 del self.info['cpus'][vcpus:]
1678 elif self.info['VCPUs_max'] < vcpus:
1679 # increasing
1680 for c in range(self.info['VCPUs_max'], vcpus):
1681 self.info['cpus'].append(list())
1682 self.info['VCPUs_max'] = vcpus
1683 xen.xend.XendDomain.instance().managed_config_save(self)
1684 log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
1685 vcpus)
1687 def getMemoryTarget(self):
1688 """Get this domain's target memory size, in KB."""
1689 return self.info['memory_dynamic_max'] / 1024
1691 def getMemoryMaximum(self):
1692 """Get this domain's maximum memory size, in KB."""
1693 # remember, info now stores memory in bytes
1694 return self.info['memory_static_max'] / 1024
1696 def getResume(self):
1697 return str(self._resume)
1699 def setResume(self, isresume):
1700 self._resume = isresume
1702 def getCpus(self):
1703 return self.info['cpus']
1705 def setCpus(self, cpumap):
1706 self.info['cpus'] = cpumap
1708 def getCap(self):
1709 return self.info['vcpus_params']['cap']
1711 def setCap(self, cpu_cap):
1712 self.info['vcpus_params']['cap'] = cpu_cap
1714 def getWeight(self):
1715 return self.info['vcpus_params']['weight']
1717 def setWeight(self, cpu_weight):
1718 self.info['vcpus_params']['weight'] = cpu_weight
1720 def getRestartCount(self):
1721 return self._readVm('xend/restart_count')
1723 def refreshShutdown(self, xeninfo = None):
1724 """ Checks the domain for whether a shutdown is required.
1726 Called from XendDomainInfo and also image.py for HVM images.
1727 """
1729 # If set at the end of this method, a restart is required, with the
1730 # given reason. This restart has to be done out of the scope of
1731 # refresh_shutdown_lock.
1732 restart_reason = None
1734 self.refresh_shutdown_lock.acquire()
1735 try:
1736 if xeninfo is None:
1737 xeninfo = dom_get(self.domid)
1738 if xeninfo is None:
1739 # The domain no longer exists. This will occur if we have
1740 # scheduled a timer to check for shutdown timeouts and the
1741 # shutdown succeeded. It will also occur if someone
1742 # destroys a domain beneath us. We clean up the domain,
1743 # just in case, but we can't clean up the VM, because that
1744 # VM may have migrated to a different domain on this
1745 # machine.
1746 self.cleanupDomain()
1747 self._stateSet(DOM_STATE_HALTED)
1748 return
1750 if xeninfo['dying']:
1751 # Dying means that a domain has been destroyed, but has not
1752 # yet been cleaned up by Xen. This state could persist
1753 # indefinitely if, for example, another domain has some of its
1754 # pages mapped. We might like to diagnose this problem in the
1755 # future, but for now all we do is make sure that it's not us
1756 # holding the pages, by calling cleanupDomain. We can't
1757 # clean up the VM, as above.
1758 self.cleanupDomain()
1759 self._stateSet(DOM_STATE_SHUTDOWN)
1760 return
1762 elif xeninfo['crashed']:
1763 if self.readDom('xend/shutdown_completed'):
1764 # We've seen this shutdown already, but we are preserving
1765 # the domain for debugging. Leave it alone.
1766 return
1768 log.warn('Domain has crashed: name=%s id=%d.',
1769 self.info['name_label'], self.domid)
1770 self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
1772 restart_reason = 'crash'
1773 self._stateSet(DOM_STATE_HALTED)
1775 elif xeninfo['shutdown']:
1776 self._stateSet(DOM_STATE_SHUTDOWN)
1777 if self.readDom('xend/shutdown_completed'):
1778 # We've seen this shutdown already, but we are preserving
1779 # the domain for debugging. Leave it alone.
1780 return
1782 else:
1783 reason = shutdown_reason(xeninfo['shutdown_reason'])
1785 log.info('Domain has shutdown: name=%s id=%d reason=%s.',
1786 self.info['name_label'], self.domid, reason)
1787 self._writeVm(LAST_SHUTDOWN_REASON, reason)
1789 self._clearRestart()
1791 if reason == 'suspend':
1792 self._stateSet(DOM_STATE_SUSPENDED)
1793 # Don't destroy the domain. XendCheckpoint will do
1794 # this once it has finished. However, stop watching
1795 # the VM path now, otherwise we will end up with one
1796 # watch for the old domain, and one for the new.
1797 self._unwatchVm()
1798 elif reason in ('poweroff', 'reboot'):
1799 restart_reason = reason
1800 else:
1801 self.destroy()
1803 elif self.dompath is None:
1804 # We have yet to manage to call introduceDomain on this
1805 # domain. This can happen if a restore is in progress, or has
1806 # failed. Ignore this domain.
1807 pass
1808 else:
1809 # Domain is alive. If we are shutting it down, log a message
1810 # if it seems unresponsive.
1811 if xeninfo['paused']:
1812 self._stateSet(DOM_STATE_PAUSED)
1813 else:
1814 self._stateSet(DOM_STATE_RUNNING)
1816 if self.shutdownStartTime:
1817 timeout = (SHUTDOWN_TIMEOUT - time.time() +
1818 self.shutdownStartTime)
1819 if (timeout < 0 and not self.readDom('xend/unresponsive')):
1820 log.info(
1821 "Domain shutdown timeout expired: name=%s id=%s",
1822 self.info['name_label'], self.domid)
1823 self.storeDom('xend/unresponsive', 'True')
1824 finally:
1825 self.refresh_shutdown_lock.release()
1827 if restart_reason:
1828 threading.Thread(target = self._maybeRestart,
1829 args = (restart_reason,)).start()
1833 # Restart functions - handling whether we come back up on shutdown.
1836 def _clearRestart(self):
1837 self._removeDom("xend/shutdown_start_time")
1839 def _maybeDumpCore(self, reason):
1840 if reason == 'crash':
1841 if xoptions.get_enable_dump() or self.get_on_crash() \
1842 in ['coredump_and_destroy', 'coredump_and_restart']:
1843 try:
1844 self.dumpCore()
1845 except XendError:
1846 # This error has been logged -- there's nothing more
1847 # we can do in this context.
1848 pass
1850 def _maybeRestart(self, reason):
1851 # Before taking configured action, dump core if configured to do so.
1853 self._maybeDumpCore(reason)
1855 # Dispatch to the correct method based upon the configured on_{reason}
1856 # behaviour.
1857 actions = {"destroy" : self.destroy,
1858 "restart" : self._restart,
1859 "preserve" : self._preserve,
1860 "rename-restart" : self._renameRestart,
1861 "coredump-destroy" : self.destroy,
1862 "coredump-restart" : self._restart}
1864 action_conf = {
1865 'poweroff': 'actions_after_shutdown',
1866 'reboot': 'actions_after_reboot',
1867 'crash': 'actions_after_crash',
1870 action_target = self.info.get(action_conf.get(reason))
1871 func = actions.get(action_target, None)
1872 if func and callable(func):
1873 func()
1874 else:
1875 self.destroy() # default to destroy
1877 def _renameRestart(self):
1878 self._restart(True)
1880 def _restart(self, rename = False):
1881 """Restart the domain after it has exited.
1883 @param rename True if the old domain is to be renamed and preserved,
1884 False if it is to be destroyed.
1885 """
1886 from xen.xend import XendDomain
1888 if self._readVm(RESTART_IN_PROGRESS):
1889 log.error('Xend failed during restart of domain %s. '
1890 'Refusing to restart to avoid loops.',
1891 str(self.domid))
1892 self.destroy()
1893 return
1895 old_domid = self.domid
1896 self._writeVm(RESTART_IN_PROGRESS, 'True')
1898 now = time.time()
1899 rst = self._readVm('xend/previous_restart_time')
1900 if rst:
1901 rst = float(rst)
1902 timeout = now - rst
1903 if timeout < MINIMUM_RESTART_TIME:
1904 log.error(
1905 'VM %s restarting too fast (%f seconds since the last '
1906 'restart). Refusing to restart to avoid loops.',
1907 self.info['name_label'], timeout)
1908 self.destroy()
1909 return
1911 self._writeVm('xend/previous_restart_time', str(now))
1913 prev_vm_xend = self._listRecursiveVm('xend')
1914 new_dom_info = self.info
1915 try:
1916 if rename:
1917 new_dom_info = self._preserveForRestart()
1918 else:
1919 self._unwatchVm()
1920 self.destroy()
1922 # new_dom's VM will be the same as this domain's VM, except where
1923 # the rename flag has instructed us to call preserveForRestart.
1924 # In that case, it is important that we remove the
1925 # RESTART_IN_PROGRESS node from the new domain, not the old one,
1926 # once the new one is available.
1928 new_dom = None
1929 try:
1930 new_dom = XendDomain.instance().domain_create_from_dict(
1931 new_dom_info)
1932 for x in prev_vm_xend[0][1]:
1933 new_dom._writeVm('xend/%s' % x[0], x[1])
1934 new_dom.waitForDevices()
1935 new_dom.unpause()
1936 rst_cnt = new_dom._readVm('xend/restart_count')
1937 rst_cnt = int(rst_cnt) + 1
1938 new_dom._writeVm('xend/restart_count', str(rst_cnt))
1939 new_dom._removeVm(RESTART_IN_PROGRESS)
1940 except:
1941 if new_dom:
1942 new_dom._removeVm(RESTART_IN_PROGRESS)
1943 new_dom.destroy()
1944 else:
1945 self._removeVm(RESTART_IN_PROGRESS)
1946 raise
1947 except:
1948 log.exception('Failed to restart domain %s.', str(old_domid))
1950 def _preserveForRestart(self):
1951 """Preserve a domain that has been shut down, by giving it a new UUID,
1952 cloning the VM details, and giving it a new name. This allows us to
1953 keep this domain for debugging, but restart a new one in its place
1954 preserving the restart semantics (name and UUID preserved).
1955 """
1957 new_uuid = uuid.createString()
1958 new_name = 'Domain-%s' % new_uuid
1959 log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
1960 self.info['name_label'], self.domid, self.info['uuid'],
1961 new_name, new_uuid)
1962 self._unwatchVm()
1963 self._releaseDevices()
1964 # Remove existing vm node in xenstore
1965 self._removeVm()
1966 new_dom_info = self.info.copy()
1967 new_dom_info['name_label'] = self.info['name_label']
1968 new_dom_info['uuid'] = self.info['uuid']
1969 self.info['name_label'] = new_name
1970 self.info['uuid'] = new_uuid
1971 self.vmpath = XS_VMROOT + new_uuid
1972 # Write out new vm node to xenstore
1973 self._storeVmDetails()
1974 self._preserve()
1975 return new_dom_info
1978 def _preserve(self):
1979 log.info("Preserving dead domain %s (%d).", self.info['name_label'],
1980 self.domid)
1981 self._unwatchVm()
1982 self.storeDom('xend/shutdown_completed', 'True')
1983 self._stateSet(DOM_STATE_HALTED)
1986 # Debugging ..
1989 def dumpCore(self, corefile = None):
1990 """Create a core dump for this domain.
1992 @raise: XendError if core dumping failed.
1993 """
1995 try:
1996 if not corefile:
1997 this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
1998 corefile = "/var/xen/dump/%s-%s.%s.core" % (this_time,
1999 self.info['name_label'], self.domid)
2001 if os.path.isdir(corefile):
2002 raise XendError("Cannot dump core in a directory: %s" %
2003 corefile)
2005 self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
2006 xc.domain_dumpcore(self.domid, corefile)
2007 self._removeVm(DUMPCORE_IN_PROGRESS)
2008 except RuntimeError, ex:
2009 corefile_incomp = corefile+'-incomplete'
2010 os.rename(corefile, corefile_incomp)
2011 self._removeVm(DUMPCORE_IN_PROGRESS)
2012 log.exception("XendDomainInfo.dumpCore failed: id = %s name = %s",
2013 self.domid, self.info['name_label'])
2014 raise XendError("Failed to dump core: %s" % str(ex))
2017 # Device creation/deletion functions
2020 def _createDevice(self, deviceClass, devConfig):
2021 return self.getDeviceController(deviceClass).createDevice(devConfig)
2023 def _waitForDevice(self, deviceClass, devid):
2024 return self.getDeviceController(deviceClass).waitForDevice(devid)
2026 def _waitForDeviceUUID(self, dev_uuid):
2027 deviceClass, config = self.info['devices'].get(dev_uuid)
2028 self._waitForDevice(deviceClass, config['devid'])
2030 def _waitForDevice_destroy(self, deviceClass, devid, backpath):
2031 return self.getDeviceController(deviceClass).waitForDevice_destroy(
2032 devid, backpath)
2034 def _reconfigureDevice(self, deviceClass, devid, devconfig):
2035 return self.getDeviceController(deviceClass).reconfigureDevice(
2036 devid, devconfig)
2038 def _createDevices(self):
2039 """Create the devices for a vm.
2041 @raise: VmError for invalid devices
2042 """
2043 if self.image:
2044 self.image.prepareEnvironment()
2046 vscsi_uuidlist = {}
2047 vscsi_devidlist = []
2048 ordered_refs = self.info.ordered_device_refs()
2049 for dev_uuid in ordered_refs:
2050 devclass, config = self.info['devices'][dev_uuid]
2051 if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
2052 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2053 dev_uuid = config.get('uuid')
2054 devid = self._createDevice(devclass, config)
2056 # store devid in XendConfig for caching reasons
2057 if dev_uuid in self.info['devices']:
2058 self.info['devices'][dev_uuid][1]['devid'] = devid
2060 elif devclass == 'vscsi':
2061 vscsi_config = config.get('devs', [])[0]
2062 devid = vscsi_config.get('devid', '')
2063 dev_uuid = config.get('uuid')
2064 vscsi_uuidlist[devid] = dev_uuid
2065 vscsi_devidlist.append(devid)
2067 #It is necessary to sorted it for /dev/sdxx in guest.
2068 if len(vscsi_uuidlist) > 0:
2069 vscsi_devidlist.sort()
2070 for vscsiid in vscsi_devidlist:
2071 dev_uuid = vscsi_uuidlist[vscsiid]
2072 devclass, config = self.info['devices'][dev_uuid]
2073 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2074 dev_uuid = config.get('uuid')
2075 devid = self._createDevice(devclass, config)
2076 # store devid in XendConfig for caching reasons
2077 if dev_uuid in self.info['devices']:
2078 self.info['devices'][dev_uuid][1]['devid'] = devid
2081 if self.image:
2082 self.image.createDeviceModel()
2084 #if have pass-through devs, need the virtual pci slots info from qemu
2085 self.sync_pcidev_info()
2087 def _releaseDevices(self, suspend = False):
2088 """Release all domain's devices. Nothrow guarantee."""
2089 if self.image:
2090 try:
2091 log.debug("Destroying device model")
2092 self.image.destroyDeviceModel()
2093 except Exception, e:
2094 log.exception("Device model destroy failed %s" % str(e))
2095 else:
2096 log.debug("No device model")
2098 log.debug("Releasing devices")
2099 t = xstransact("%s/device" % self.dompath)
2100 try:
2101 for devclass in XendDevices.valid_devices():
2102 for dev in t.list(devclass):
2103 try:
2104 true_devclass = devclass
2105 if devclass == 'vbd':
2106 # In the case of "vbd", the true device class
2107 # may possibly be "tap". Just in case, verify
2108 # device class.
2109 devid = dev.split('/')[-1]
2110 true_devclass = self.getBlockDeviceClass(devid)
2111 log.debug("Removing %s", dev);
2112 self.destroyDevice(true_devclass, dev, False);
2113 except:
2114 # Log and swallow any exceptions in removal --
2115 # there's nothing more we can do.
2116 log.exception("Device release failed: %s; %s; %s",
2117 self.info['name_label'],
2118 true_devclass, dev)
2119 finally:
2120 t.abort()
2122 def getDeviceController(self, name):
2123 """Get the device controller for this domain, and if it
2124 doesn't exist, create it.
2126 @param name: device class name
2127 @type name: string
2128 @rtype: subclass of DevController
2129 """
2130 if name not in self._deviceControllers:
2131 devController = XendDevices.make_controller(name, self)
2132 if not devController:
2133 raise XendError("Unknown device type: %s" % name)
2134 self._deviceControllers[name] = devController
2136 return self._deviceControllers[name]
2139 # Migration functions (public)
2142 def testMigrateDevices(self, network, dst):
2143 """ Notify all device about intention of migration
2144 @raise: XendError for a device that cannot be migrated
2145 """
2146 for (n, c) in self.info.all_devices_sxpr():
2147 rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
2148 if rc != 0:
2149 raise XendError("Device of type '%s' refuses migration." % n)
2151 def migrateDevices(self, network, dst, step, domName=''):
2152 """Notify the devices about migration
2153 """
2154 ctr = 0
2155 try:
2156 for (dev_type, dev_conf) in self.info.all_devices_sxpr():
2157 self.migrateDevice(dev_type, dev_conf, network, dst,
2158 step, domName)
2159 ctr = ctr + 1
2160 except:
2161 for dev_type, dev_conf in self.info.all_devices_sxpr():
2162 if ctr == 0:
2163 step = step - 1
2164 ctr = ctr - 1
2165 self._recoverMigrateDevice(dev_type, dev_conf, network,
2166 dst, step, domName)
2167 raise
2169 def migrateDevice(self, deviceClass, deviceConfig, network, dst,
2170 step, domName=''):
2171 return self.getDeviceController(deviceClass).migrate(deviceConfig,
2172 network, dst, step, domName)
2174 def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
2175 dst, step, domName=''):
2176 return self.getDeviceController(deviceClass).recover_migrate(
2177 deviceConfig, network, dst, step, domName)
2180 ## private:
2182 def _constructDomain(self):
2183 """Construct the domain.
2185 @raise: VmError on error
2186 """
2188 log.debug('XendDomainInfo.constructDomain')
2190 self.shutdownStartTime = None
2192 hap = 0
2193 hvm = self.info.is_hvm()
2194 if hvm:
2195 hap = self.info.is_hap()
2196 info = xc.xeninfo()
2197 if 'hvm' not in info['xen_caps']:
2198 raise VmError("HVM guest support is unavailable: is VT/AMD-V "
2199 "supported by your CPU and enabled in your "
2200 "BIOS?")
2202 # Hack to pre-reserve some memory for initial domain creation.
2203 # There is an implicit memory overhead for any domain creation. This
2204 # overhead is greater for some types of domain than others. For
2205 # example, an x86 HVM domain will have a default shadow-pagetable
2206 # allocation of 1MB. We free up 2MB here to be on the safe side.
2207 balloon.free(2*1024, self) # 2MB should be plenty
2209 ssidref = 0
2210 if security.on() == xsconstants.XS_POLICY_USE:
2211 ssidref = security.calc_dom_ssidref_from_info(self.info)
2212 if security.has_authorization(ssidref) == False:
2213 raise VmError("VM is not authorized to run.")
2215 s3_integrity = 0
2216 if self.info.has_key('s3_integrity'):
2217 s3_integrity = self.info['s3_integrity']
2218 flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2)
2220 try:
2221 self.domid = xc.domain_create(
2222 domid = 0,
2223 ssidref = ssidref,
2224 handle = uuid.fromString(self.info['uuid']),
2225 flags = flags,
2226 target = self.info.target())
2227 except Exception, e:
2228 # may get here if due to ACM the operation is not permitted
2229 if security.on() == xsconstants.XS_POLICY_ACM:
2230 raise VmError('Domain in conflict set with running domain?')
2232 if self.domid < 0:
2233 raise VmError('Creating domain failed: name=%s' %
2234 self.info['name_label'])
2236 self.dompath = GetDomainPath(self.domid)
2238 self._recreateDom()
2240 # Set timer configration of domain
2241 timer_mode = self.info["platform"].get("timer_mode")
2242 if hvm and timer_mode is not None:
2243 xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
2244 long(timer_mode))
2246 # Set Viridian interface configuration of domain
2247 viridian = self.info["platform"].get("viridian")
2248 if arch.type == "x86" and hvm and viridian is not None:
2249 xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
2251 # Optionally enable virtual HPET
2252 hpet = self.info["platform"].get("hpet")
2253 if hvm and hpet is not None:
2254 xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
2255 long(hpet))
2257 # Optionally enable periodic vpt aligning
2258 vpt_align = self.info["platform"].get("vpt_align")
2259 if hvm and vpt_align is not None:
2260 xc.hvm_set_param(self.domid, HVM_PARAM_VPT_ALIGN,
2261 long(vpt_align))
2263 # Set maximum number of vcpus in domain
2264 xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
2266 # Test whether the devices can be assigned with VT-d
2267 pci = self.info["platform"].get("pci")
2268 pci_str = ''
2269 if pci and len(pci) > 0:
2270 pci = map(lambda x: x[0:4], pci) # strip options
2271 pci_str = str(pci)
2272 if hvm and pci_str:
2273 bdf = xc.test_assign_device(self.domid, pci_str)
2274 if bdf != 0:
2275 if bdf == -1:
2276 raise VmError("failed to assign device: maybe the platform"
2277 " doesn't support VT-d, or VT-d isn't enabled"
2278 " properly?")
2279 bus = (bdf >> 16) & 0xff
2280 devfn = (bdf >> 8) & 0xff
2281 dev = (devfn >> 3) & 0x1f
2282 func = devfn & 0x7
2283 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
2284 " already been assigned to other domain, or maybe"
2285 " it doesn't exist." % (bus, dev, func))
2287 # register the domain in the list
2288 from xen.xend import XendDomain
2289 XendDomain.instance().add_domain(self)
2291 def _introduceDomain(self):
2292 assert self.domid is not None
2293 assert self.store_mfn is not None
2294 assert self.store_port is not None
2296 try:
2297 IntroduceDomain(self.domid, self.store_mfn, self.store_port)
2298 except RuntimeError, exn:
2299 raise XendError(str(exn))
2301 def _setTarget(self, target):
2302 assert self.domid is not None
2304 try:
2305 SetTarget(self.domid, target)
2306 self.storeDom('target', target)
2307 except RuntimeError, exn:
2308 raise XendError(str(exn))
2311 def _setCPUAffinity(self):
2312 """ Repin domain vcpus if a restricted cpus list is provided
2313 """
2315 def has_cpus():
2316 if self.info['cpus'] is not None:
2317 for c in self.info['cpus']:
2318 if c:
2319 return True
2320 return False
2322 if has_cpus():
2323 for v in range(0, self.info['VCPUs_max']):
2324 if self.info['cpus'][v]:
2325 xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
2326 else:
2327 def find_relaxed_node(node_list):
2328 import sys
2329 nr_nodes = info['nr_nodes']
2330 if node_list is None:
2331 node_list = range(0, nr_nodes)
2332 nodeload = [0]
2333 nodeload = nodeload * nr_nodes
2334 from xen.xend import XendDomain
2335 doms = XendDomain.instance().list('all')
2336 for dom in filter (lambda d: d.domid != self.domid, doms):
2337 cpuinfo = dom.getVCPUInfo()
2338 for vcpu in sxp.children(cpuinfo, 'vcpu'):
2339 if sxp.child_value(vcpu, 'online') == 0: continue
2340 cpumap = list(sxp.child_value(vcpu,'cpumap'))
2341 for i in range(0, nr_nodes):
2342 node_cpumask = info['node_to_cpu'][i]
2343 for j in node_cpumask:
2344 if j in cpumap:
2345 nodeload[i] += 1
2346 break
2347 for i in range(0, nr_nodes):
2348 if len(info['node_to_cpu'][i]) > 0 and i in node_list:
2349 nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
2350 else:
2351 nodeload[i] = sys.maxint
2352 index = nodeload.index( min(nodeload) )
2353 return index
2355 info = xc.physinfo()
2356 if info['nr_nodes'] > 1:
2357 node_memory_list = info['node_to_memory']
2358 needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
2359 candidate_node_list = []
2360 for i in range(0, info['nr_nodes']):
2361 if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
2362 candidate_node_list.append(i)
2363 index = find_relaxed_node(candidate_node_list)
2364 cpumask = info['node_to_cpu'][index]
2365 for v in range(0, self.info['VCPUs_max']):
2366 xc.vcpu_setaffinity(self.domid, v, cpumask)
2369 def _initDomain(self):
2370 log.debug('XendDomainInfo.initDomain: %s %s',
2371 self.domid,
2372 self.info['vcpus_params']['weight'])
2374 self._configureBootloader()
2376 try:
2377 if self.info['platform'].get('localtime', 0):
2378 if time.localtime(time.time())[8]:
2379 self.info['platform']['rtc_timeoffset'] = -time.altzone
2380 else:
2381 self.info['platform']['rtc_timeoffset'] = -time.timezone
2383 self.image = image.create(self, self.info)
2385 # repin domain vcpus if a restricted cpus list is provided
2386 # this is done prior to memory allocation to aide in memory
2387 # distribution for NUMA systems.
2388 self._setCPUAffinity()
2390 # Use architecture- and image-specific calculations to determine
2391 # the various headrooms necessary, given the raw configured
2392 # values. maxmem, memory, and shadow are all in KiB.
2393 # but memory_static_max etc are all stored in bytes now.
2394 memory = self.image.getRequiredAvailableMemory(
2395 self.info['memory_dynamic_max'] / 1024)
2396 maxmem = self.image.getRequiredAvailableMemory(
2397 self.info['memory_static_max'] / 1024)
2398 shadow = self.image.getRequiredShadowMemory(
2399 self.info['shadow_memory'] * 1024,
2400 self.info['memory_static_max'] / 1024)
2402 log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
2403 # Round shadow up to a multiple of a MiB, as shadow_mem_control
2404 # takes MiB and we must not round down and end up under-providing.
2405 shadow = ((shadow + 1023) / 1024) * 1024
2407 # set memory limit
2408 xc.domain_setmaxmem(self.domid, maxmem)
2410 # Reserve 1 page per MiB of RAM for separate VT-d page table.
2411 vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
2412 # Round vtd_mem up to a multiple of a MiB.
2413 vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
2415 # Make sure there's enough RAM available for the domain
2416 balloon.free(memory + shadow + vtd_mem, self)
2418 # Set up the shadow memory
2419 shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
2420 self.info['shadow_memory'] = shadow_cur
2422 # machine address size
2423 if self.info.has_key('machine_address_size'):
2424 log.debug("_initDomain: setting maximum machine address size %d" % self.info['machine_address_size'])
2425 xc.domain_set_machine_address_size(self.domid, self.info['machine_address_size'])
2427 if self.info.has_key('suppress_spurious_page_faults') and self.info['suppress_spurious_page_faults']:
2428 log.debug("_initDomain: suppressing spurious page faults")
2429 xc.domain_suppress_spurious_page_faults(self.domid)
2431 self._createChannels()
2433 channel_details = self.image.createImage()
2435 self.store_mfn = channel_details['store_mfn']
2436 if 'console_mfn' in channel_details:
2437 self.console_mfn = channel_details['console_mfn']
2438 if 'notes' in channel_details:
2439 self.info.set_notes(channel_details['notes'])
2440 if 'native_protocol' in channel_details:
2441 self.native_protocol = channel_details['native_protocol'];
2443 self._introduceDomain()
2444 if self.info.target():
2445 self._setTarget(self.info.target())
2447 self._createDevices()
2449 self.image.cleanupBootloading()
2451 self.info['start_time'] = time.time()
2453 self._stateSet(DOM_STATE_RUNNING)
2454 except VmError, exn:
2455 log.exception("XendDomainInfo.initDomain: exception occurred")
2456 if self.image:
2457 self.image.cleanupBootloading()
2458 raise exn
2459 except RuntimeError, exn:
2460 log.exception("XendDomainInfo.initDomain: exception occurred")
2461 if self.image:
2462 self.image.cleanupBootloading()
2463 raise VmError(str(exn))
2466 def cleanupDomain(self):
2467 """Cleanup domain resources; release devices. Idempotent. Nothrow
2468 guarantee."""
2470 self.refresh_shutdown_lock.acquire()
2471 try:
2472 self.unwatchShutdown()
2473 self._releaseDevices()
2474 bootloader_tidy(self)
2476 if self.image:
2477 self.image = None
2479 try:
2480 self._removeDom()
2481 except:
2482 log.exception("Removing domain path failed.")
2484 self._stateSet(DOM_STATE_HALTED)
2485 self.domid = None # Do not push into _stateSet()!
2486 finally:
2487 self.refresh_shutdown_lock.release()
2490 def unwatchShutdown(self):
2491 """Remove the watch on the domain's control/shutdown node, if any.
2492 Idempotent. Nothrow guarantee. Expects to be protected by the
2493 refresh_shutdown_lock."""
2495 try:
2496 try:
2497 if self.shutdownWatch:
2498 self.shutdownWatch.unwatch()
2499 finally:
2500 self.shutdownWatch = None
2501 except:
2502 log.exception("Unwatching control/shutdown failed.")
2504 def waitForShutdown(self):
2505 self.state_updated.acquire()
2506 try:
2507 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2508 self.state_updated.wait(timeout=1.0)
2509 finally:
2510 self.state_updated.release()
2513 # TODO: recategorise - called from XendCheckpoint
2516 def completeRestore(self, store_mfn, console_mfn):
2518 log.debug("XendDomainInfo.completeRestore")
2520 self.store_mfn = store_mfn
2521 self.console_mfn = console_mfn
2523 self._introduceDomain()
2524 self.image = image.create(self, self.info)
2525 if self.image:
2526 self.image.createDeviceModel(True)
2527 self._storeDomDetails()
2528 self._registerWatches()
2529 self.refreshShutdown()
2531 log.debug("XendDomainInfo.completeRestore done")
2534 def _endRestore(self):
2535 self.setResume(False)
2538 # VM Destroy
2541 def _prepare_phantom_paths(self):
2542 # get associated devices to destroy
2543 # build list of phantom devices to be removed after normal devices
2544 plist = []
2545 if self.domid is not None:
2546 t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
2547 try:
2548 for dev in t.list():
2549 backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
2550 % (self.dompath, dev))
2551 if backend_phantom_vbd is not None:
2552 frontend_phantom_vbd = xstransact.Read("%s/frontend" \
2553 % backend_phantom_vbd)
2554 plist.append(backend_phantom_vbd)
2555 plist.append(frontend_phantom_vbd)
2556 finally:
2557 t.abort()
2558 return plist
2560 def _cleanup_phantom_devs(self, plist):
2561 # remove phantom devices
2562 if not plist == []:
2563 time.sleep(2)
2564 for paths in plist:
2565 if paths.find('backend') != -1:
2566 # Modify online status /before/ updating state (latter is watched by
2567 # drivers, so this ordering avoids a race).
2568 xstransact.Write(paths, 'online', "0")
2569 xstransact.Write(paths, 'state', str(xenbusState['Closing']))
2570 # force
2571 xstransact.Remove(paths)
2573 def destroy(self):
2574 """Cleanup VM and destroy domain. Nothrow guarantee."""
2576 if self.domid is None:
2577 return
2579 from xen.xend import XendDomain
2580 log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
2582 paths = self._prepare_phantom_paths()
2584 if self.dompath is not None:
2585 try:
2586 xc.domain_destroy_hook(self.domid)
2587 xc.domain_pause(self.domid)
2588 do_FLR(self.domid)
2589 xc.domain_destroy(self.domid)
2590 for state in DOM_STATES_OLD:
2591 self.info[state] = 0
2592 self._stateSet(DOM_STATE_HALTED)
2593 except:
2594 log.exception("XendDomainInfo.destroy: domain destruction failed.")
2596 XendDomain.instance().remove_domain(self)
2597 self.cleanupDomain()
2599 self._cleanup_phantom_devs(paths)
2600 self._cleanupVm()
2602 if "transient" in self.info["other_config"] \
2603 and bool(self.info["other_config"]["transient"]):
2604 XendDomain.instance().domain_delete_by_dominfo(self)
2607 def resetDomain(self):
2608 log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
2610 old_domid = self.domid
2611 prev_vm_xend = self._listRecursiveVm('xend')
2612 new_dom_info = self.info
2613 try:
2614 self._unwatchVm()
2615 self.destroy()
2617 new_dom = None
2618 try:
2619 from xen.xend import XendDomain
2620 new_dom_info['domid'] = None
2621 new_dom = XendDomain.instance().domain_create_from_dict(
2622 new_dom_info)
2623 for x in prev_vm_xend[0][1]:
2624 new_dom._writeVm('xend/%s' % x[0], x[1])
2625 new_dom.waitForDevices()
2626 new_dom.unpause()
2627 except:
2628 if new_dom:
2629 new_dom.destroy()
2630 raise
2631 except:
2632 log.exception('Failed to reset domain %s.', str(old_domid))
2635 def resumeDomain(self):
2636 log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
2638 # resume a suspended domain (e.g. after live checkpoint, or after
2639 # a later error during save or migate); checks that the domain
2640 # is currently suspended first so safe to call from anywhere
2642 xeninfo = dom_get(self.domid)
2643 if xeninfo is None:
2644 return
2645 if not xeninfo['shutdown']:
2646 return
2647 reason = shutdown_reason(xeninfo['shutdown_reason'])
2648 if reason != 'suspend':
2649 return
2651 try:
2652 # could also fetch a parsed note from xenstore
2653 fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
2654 if not fast:
2655 self._releaseDevices()
2656 self.testDeviceComplete()
2657 self.testvifsComplete()
2658 log.debug("XendDomainInfo.resumeDomain: devices released")
2660 self._resetChannels()
2662 self._removeDom('control/shutdown')
2663 self._removeDom('device-misc/vif/nextDeviceID')
2665 self._createChannels()
2666 self._introduceDomain()
2667 self._storeDomDetails()
2669 self._createDevices()
2670 log.debug("XendDomainInfo.resumeDomain: devices created")
2672 xc.domain_resume(self.domid, fast)
2673 ResumeDomain(self.domid)
2674 except:
2675 log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
2676 self.image.resumeDeviceModel()
2677 log.debug("XendDomainInfo.resumeDomain: completed")
2681 # Channels for xenstore and console
2684 def _createChannels(self):
2685 """Create the channels to the domain.
2686 """
2687 self.store_port = self._createChannel()
2688 self.console_port = self._createChannel()
2691 def _createChannel(self):
2692 """Create an event channel to the domain.
2693 """
2694 try:
2695 if self.domid != None:
2696 return xc.evtchn_alloc_unbound(domid = self.domid,
2697 remote_dom = 0)
2698 except:
2699 log.exception("Exception in alloc_unbound(%s)", str(self.domid))
2700 raise
2702 def _resetChannels(self):
2703 """Reset all event channels in the domain.
2704 """
2705 try:
2706 if self.domid != None:
2707 return xc.evtchn_reset(dom = self.domid)
2708 except:
2709 log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
2710 raise
2714 # Bootloader configuration
2717 def _configureBootloader(self):
2718 """Run the bootloader if we're configured to do so."""
2720 blexec = self.info['PV_bootloader']
2721 bootloader_args = self.info['PV_bootloader_args']
2722 kernel = self.info['PV_kernel']
2723 ramdisk = self.info['PV_ramdisk']
2724 args = self.info['PV_args']
2725 boot = self.info['HVM_boot_policy']
2727 if boot:
2728 # HVM booting.
2729 pass
2730 elif not blexec and kernel:
2731 # Boot from dom0. Nothing left to do -- the kernel and ramdisk
2732 # will be picked up by image.py.
2733 pass
2734 else:
2735 # Boot using bootloader
2736 if not blexec or blexec == 'pygrub':
2737 blexec = osdep.pygrub_path
2739 blcfg = None
2740 disks = [x for x in self.info['vbd_refs']
2741 if self.info['devices'][x][1]['bootable']]
2743 if not disks:
2744 msg = "Had a bootloader specified, but no disks are bootable"
2745 log.error(msg)
2746 raise VmError(msg)
2748 devinfo = self.info['devices'][disks[0]]
2749 devtype = devinfo[0]
2750 disk = devinfo[1]['uname']
2752 fn = blkdev_uname_to_file(disk)
2753 taptype = blkdev_uname_to_taptype(disk)
2754 mounted = devtype == 'tap' and taptype != 'aio' and taptype != 'sync' and not os.stat(fn).st_rdev
2755 if mounted:
2756 # This is a file, not a device. pygrub can cope with a
2757 # file if it's raw, but if it's QCOW or other such formats
2758 # used through blktap, then we need to mount it first.
2760 log.info("Mounting %s on %s." %
2761 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2763 vbd = {
2764 'mode': 'RO',
2765 'device': BOOTLOADER_LOOPBACK_DEVICE,
2768 from xen.xend import XendDomain
2769 dom0 = XendDomain.instance().privilegedDomain()
2770 dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
2771 fn = BOOTLOADER_LOOPBACK_DEVICE
2773 try:
2774 blcfg = bootloader(blexec, fn, self, False,
2775 bootloader_args, kernel, ramdisk, args)
2776 finally:
2777 if mounted:
2778 log.info("Unmounting %s from %s." %
2779 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2781 dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
2783 if blcfg is None:
2784 msg = "Had a bootloader specified, but can't find disk"
2785 log.error(msg)
2786 raise VmError(msg)
2788 self.info.update_with_image_sxp(blcfg, True)
2792 # VM Functions
2795 def _readVMDetails(self, params):
2796 """Read the specified parameters from the store.
2797 """
2798 try:
2799 return self._gatherVm(*params)
2800 except ValueError:
2801 # One of the int/float entries in params has a corresponding store
2802 # entry that is invalid. We recover, because older versions of
2803 # Xend may have put the entry there (memory/target, for example),
2804 # but this is in general a bad situation to have reached.
2805 log.exception(
2806 "Store corrupted at %s! Domain %d's configuration may be "
2807 "affected.", self.vmpath, self.domid)
2808 return []
2810 def _cleanupVm(self):
2811 """Cleanup VM resources. Idempotent. Nothrow guarantee."""
2813 self._unwatchVm()
2815 try:
2816 self._removeVm()
2817 except:
2818 log.exception("Removing VM path failed.")
2821 def checkLiveMigrateMemory(self):
2822 """ Make sure there's enough memory to migrate this domain """
2823 overhead_kb = 0
2824 if arch.type == "x86":
2825 # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
2826 # the minimum that Xen would allocate if no value were given.
2827 overhead_kb = self.info['VCPUs_max'] * 1024 + \
2828 (self.info['memory_static_max'] / 1024 / 1024) * 4
2829 overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
2830 # The domain might already have some shadow memory
2831 overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
2832 if overhead_kb > 0:
2833 balloon.free(overhead_kb, self)
2835 def _unwatchVm(self):
2836 """Remove the watch on the VM path, if any. Idempotent. Nothrow
2837 guarantee."""
2838 try:
2839 try:
2840 if self.vmWatch:
2841 self.vmWatch.unwatch()
2842 finally:
2843 self.vmWatch = None
2844 except:
2845 log.exception("Unwatching VM path failed.")
2847 def testDeviceComplete(self):
2848 """ For Block IO migration safety we must ensure that
2849 the device has shutdown correctly, i.e. all blocks are
2850 flushed to disk
2851 """
2852 start = time.time()
2853 while True:
2854 test = 0
2855 diff = time.time() - start
2856 for i in self.getDeviceController('vbd').deviceIDs():
2857 test = 1
2858 log.info("Dev %s still active, looping...", i)
2859 time.sleep(0.1)
2861 if test == 0:
2862 break
2863 if diff >= MIGRATE_TIMEOUT:
2864 log.info("Dev still active but hit max loop timeout")
2865 break
2867 def testvifsComplete(self):
2868 """ In case vifs are released and then created for the same
2869 domain, we need to wait the device shut down.
2870 """
2871 start = time.time()
2872 while True:
2873 test = 0
2874 diff = time.time() - start
2875 for i in self.getDeviceController('vif').deviceIDs():
2876 test = 1
2877 log.info("Dev %s still active, looping...", i)
2878 time.sleep(0.1)
2880 if test == 0:
2881 break
2882 if diff >= MIGRATE_TIMEOUT:
2883 log.info("Dev still active but hit max loop timeout")
2884 break
2886 def _storeVmDetails(self):
2887 to_store = {}
2889 for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
2890 info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
2891 if self._infoIsSet(info_key):
2892 to_store[key] = str(self.info[info_key])
2894 if self._infoIsSet("static_memory_min"):
2895 to_store["memory"] = str(self.info["static_memory_min"])
2896 if self._infoIsSet("static_memory_max"):
2897 to_store["maxmem"] = str(self.info["static_memory_max"])
2899 image_sxpr = self.info.image_sxpr()
2900 if image_sxpr:
2901 to_store['image'] = sxp.to_string(image_sxpr)
2903 if not self._readVm('xend/restart_count'):
2904 to_store['xend/restart_count'] = str(0)
2906 log.debug("Storing VM details: %s", scrub_password(to_store))
2908 self._writeVm(to_store)
2909 self._setVmPermissions()
2911 def _setVmPermissions(self):
2912 """Allow the guest domain to read its UUID. We don't allow it to
2913 access any other entry, for security."""
2914 xstransact.SetPermissions('%s/uuid' % self.vmpath,
2915 { 'dom' : self.domid,
2916 'read' : True,
2917 'write' : False })
2920 # Utility functions
2923 def __getattr__(self, name):
2924 if name == "state":
2925 log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
2926 log.warn("".join(traceback.format_stack()))
2927 return self._stateGet()
2928 else:
2929 raise AttributeError(name)
2931 def __setattr__(self, name, value):
2932 if name == "state":
2933 log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
2934 log.warn("".join(traceback.format_stack()))
2935 self._stateSet(value)
2936 else:
2937 self.__dict__[name] = value
2939 def _stateSet(self, state):
2940 self.state_updated.acquire()
2941 try:
2942 # TODO Not sure this is correct...
2943 # _stateGet is live now. Why not fire event
2944 # even when it hasn't changed?
2945 if self._stateGet() != state:
2946 self.state_updated.notifyAll()
2947 import XendAPI
2948 XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
2949 'power_state')
2950 finally:
2951 self.state_updated.release()
2953 def _stateGet(self):
2954 # Lets try and reconsitute the state from xc
2955 # first lets try and get the domain info
2956 # from xc - this will tell us if the domain
2957 # exists
2958 info = dom_get(self.getDomid())
2959 if info is None or info['shutdown']:
2960 # We are either HALTED or SUSPENDED
2961 # check saved image exists
2962 from xen.xend import XendDomain
2963 managed_config_path = \
2964 XendDomain.instance()._managed_check_point_path( \
2965 self.get_uuid())
2966 if os.path.exists(managed_config_path):
2967 return XEN_API_VM_POWER_STATE_SUSPENDED
2968 else:
2969 return XEN_API_VM_POWER_STATE_HALTED
2970 elif info['crashed']:
2971 # Crashed
2972 return XEN_API_VM_POWER_STATE_CRASHED
2973 else:
2974 # We are either RUNNING or PAUSED
2975 if info['paused']:
2976 return XEN_API_VM_POWER_STATE_PAUSED
2977 else:
2978 return XEN_API_VM_POWER_STATE_RUNNING
2980 def _infoIsSet(self, name):
2981 return name in self.info and self.info[name] is not None
2983 def _checkName(self, name):
2984 """Check if a vm name is valid. Valid names contain alphabetic
2985 characters, digits, or characters in '_-.:/+'.
2986 The same name cannot be used for more than one vm at the same time.
2988 @param name: name
2989 @raise: VmError if invalid
2990 """
2991 from xen.xend import XendDomain
2993 if name is None or name == '':
2994 raise VmError('Missing VM Name')
2996 if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name):
2997 raise VmError('Invalid VM Name')
2999 dom = XendDomain.instance().domain_lookup_nr(name)
3000 if dom and dom.info['uuid'] != self.info['uuid']:
3001 raise VmError("VM name '%s' already exists%s" %
3002 (name,
3003 dom.domid is not None and
3004 (" as domain %s" % str(dom.domid)) or ""))
3007 def update(self, info = None, refresh = True, transaction = None):
3008 """Update with info from xc.domain_getinfo().
3009 """
3010 log.trace("XendDomainInfo.update(%s) on domain %s", info,
3011 str(self.domid))
3013 if not info:
3014 info = dom_get(self.domid)
3015 if not info:
3016 return
3018 if info["maxmem_kb"] < 0:
3019 info["maxmem_kb"] = XendNode.instance() \
3020 .physinfo_dict()['total_memory'] * 1024
3022 # make sure state is reset for info
3023 # TODO: we should eventually get rid of old_dom_states
3025 self.info.update_config(info)
3026 self._update_consoles(transaction)
3028 if refresh:
3029 self.refreshShutdown(info)
3031 log.trace("XendDomainInfo.update done on domain %s: %s",
3032 str(self.domid), self.info)
3034 def sxpr(self, ignore_store = False, legacy_only = True):
3035 result = self.info.to_sxp(domain = self,
3036 ignore_devices = ignore_store,
3037 legacy_only = legacy_only)
3039 return result
3041 # Xen API
3042 # ----------------------------------------------------------------
3044 def get_uuid(self):
3045 dom_uuid = self.info.get('uuid')
3046 if not dom_uuid: # if it doesn't exist, make one up
3047 dom_uuid = uuid.createString()
3048 self.info['uuid'] = dom_uuid
3049 return dom_uuid
3051 def get_memory_static_max(self):
3052 return self.info.get('memory_static_max', 0)
3053 def get_memory_static_min(self):
3054 return self.info.get('memory_static_min', 0)
3055 def get_memory_dynamic_max(self):
3056 return self.info.get('memory_dynamic_max', 0)
3057 def get_memory_dynamic_min(self):
3058 return self.info.get('memory_dynamic_min', 0)
3060 # only update memory-related config values if they maintain sanity
3061 def _safe_set_memory(self, key, newval):
3062 oldval = self.info.get(key, 0)
3063 try:
3064 self.info[key] = newval
3065 self.info._memory_sanity_check()
3066 except Exception, ex:
3067 self.info[key] = oldval
3068 raise
3070 def set_memory_static_max(self, val):
3071 self._safe_set_memory('memory_static_max', val)
3072 def set_memory_static_min(self, val):
3073 self._safe_set_memory('memory_static_min', val)
3074 def set_memory_dynamic_max(self, val):
3075 self._safe_set_memory('memory_dynamic_max', val)
3076 def set_memory_dynamic_min(self, val):
3077 self._safe_set_memory('memory_dynamic_min', val)
3079 def get_vcpus_params(self):
3080 if self.getDomid() is None:
3081 return self.info['vcpus_params']
3083 retval = xc.sched_credit_domain_get(self.getDomid())
3084 return retval
3085 def get_power_state(self):
3086 return XEN_API_VM_POWER_STATE[self._stateGet()]
3087 def get_platform(self):
3088 return self.info.get('platform', {})
3089 def get_pci_bus(self):
3090 return self.info.get('pci_bus', '')
3091 def get_tools_version(self):
3092 return self.info.get('tools_version', {})
3093 def get_metrics(self):
3094 return self.metrics.get_uuid();
3097 def get_security_label(self, xspol=None):
3098 import xen.util.xsm.xsm as security
3099 label = security.get_security_label(self, xspol)
3100 return label
3102 def set_security_label(self, seclab, old_seclab, xspol=None,
3103 xspol_old=None):
3104 """
3105 Set the security label of a domain from its old to
3106 a new value.
3107 @param seclab New security label formatted in the form
3108 <policy type>:<policy name>:<vm label>
3109 @param old_seclab The current security label that the
3110 VM must have.
3111 @param xspol An optional policy under which this
3112 update should be done. If not given,
3113 then the current active policy is used.
3114 @param xspol_old The old policy; only to be passed during
3115 the updating of a policy
3116 @return Returns return code, a string with errors from
3117 the hypervisor's operation, old label of the
3118 domain
3119 """
3120 rc = 0
3121 errors = ""
3122 old_label = ""
3123 new_ssidref = 0
3124 domid = self.getDomid()
3125 res_labels = None
3126 is_policy_update = (xspol_old != None)
3128 from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
3130 state = self._stateGet()
3131 # Relabel only HALTED or RUNNING or PAUSED domains
3132 if domid != 0 and \
3133 state not in \
3134 [ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
3135 DOM_STATE_SUSPENDED ]:
3136 log.warn("Relabeling domain not possible in state '%s'" %
3137 DOM_STATES[state])
3138 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3140 # Remove security label. Works only for halted or suspended domains
3141 if not seclab or seclab == "":
3142 if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
3143 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3145 if self.info.has_key('security_label'):
3146 old_label = self.info['security_label']
3147 # Check label against expected one.
3148 if old_label != old_seclab:
3149 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3150 del self.info['security_label']
3151 xen.xend.XendDomain.instance().managed_config_save(self)
3152 return (xsconstants.XSERR_SUCCESS, "", "", 0)
3154 tmp = seclab.split(":")
3155 if len(tmp) != 3:
3156 return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
3157 typ, policy, label = tmp
3159 poladmin = XSPolicyAdminInstance()
3160 if not xspol:
3161 xspol = poladmin.get_policy_by_name(policy)
3163 try:
3164 xen.xend.XendDomain.instance().policy_lock.acquire_writer()
3166 if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
3167 #if domain is running or paused try to relabel in hypervisor
3168 if not xspol:
3169 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3171 if typ != xspol.get_type_name() or \
3172 policy != xspol.get_name():
3173 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3175 if typ == xsconstants.ACM_POLICY_ID:
3176 new_ssidref = xspol.vmlabel_to_ssidref(label)
3177 if new_ssidref == xsconstants.INVALID_SSIDREF:
3178 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3180 # Check that all used resources are accessible under the
3181 # new label
3182 if not is_policy_update and \
3183 not security.resources_compatible_with_vmlabel(xspol,
3184 self, label):
3185 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3187 #Check label against expected one. Can only do this
3188 # if the policy hasn't changed underneath in the meantime
3189 if xspol_old == None:
3190 old_label = self.get_security_label()
3191 if old_label != old_seclab:
3192 log.info("old_label != old_seclab: %s != %s" %
3193 (old_label, old_seclab))
3194 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3196 # relabel domain in the hypervisor
3197 rc, errors = security.relabel_domains([[domid, new_ssidref]])
3198 log.info("rc from relabeling in HV: %d" % rc)
3199 else:
3200 return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
3202 if rc == 0:
3203 # HALTED, RUNNING or PAUSED
3204 if domid == 0:
3205 if xspol:
3206 self.info['security_label'] = seclab
3207 ssidref = poladmin.set_domain0_bootlabel(xspol, label)
3208 else:
3209 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3210 else:
3211 if self.info.has_key('security_label'):
3212 old_label = self.info['security_label']
3213 # Check label against expected one, unless wildcard
3214 if old_label != old_seclab:
3215 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3217 self.info['security_label'] = seclab
3219 try:
3220 xen.xend.XendDomain.instance().managed_config_save(self)
3221 except:
3222 pass
3223 return (rc, errors, old_label, new_ssidref)
3224 finally:
3225 xen.xend.XendDomain.instance().policy_lock.release()
3227 def get_on_shutdown(self):
3228 after_shutdown = self.info.get('actions_after_shutdown')
3229 if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
3230 return XEN_API_ON_NORMAL_EXIT[-1]
3231 return after_shutdown
3233 def get_on_reboot(self):
3234 after_reboot = self.info.get('actions_after_reboot')
3235 if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
3236 return XEN_API_ON_NORMAL_EXIT[-1]
3237 return after_reboot
3239 def get_on_suspend(self):
3240 # TODO: not supported
3241 after_suspend = self.info.get('actions_after_suspend')
3242 if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
3243 return XEN_API_ON_NORMAL_EXIT[-1]
3244 return after_suspend
3246 def get_on_crash(self):
3247 after_crash = self.info.get('actions_after_crash')
3248 if not after_crash or after_crash not in \
3249 XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
3250 return XEN_API_ON_CRASH_BEHAVIOUR[0]
3251 return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
3253 def get_dev_config_by_uuid(self, dev_class, dev_uuid):
3254 """ Get's a device configuration either from XendConfig or
3255 from the DevController.
3257 @param dev_class: device class, either, 'vbd' or 'vif'
3258 @param dev_uuid: device UUID
3260 @rtype: dictionary
3261 """
3262 dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
3264 # shortcut if the domain isn't started because
3265 # the devcontrollers will have no better information
3266 # than XendConfig.
3267 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
3268 XEN_API_VM_POWER_STATE_SUSPENDED):
3269 if dev_config:
3270 return copy.deepcopy(dev_config)
3271 return None
3273 # instead of using dev_class, we use the dev_type
3274 # that is from XendConfig.
3275 controller = self.getDeviceController(dev_type)
3276 if not controller:
3277 return None
3279 all_configs = controller.getAllDeviceConfigurations()
3280 if not all_configs:
3281 return None
3283 updated_dev_config = copy.deepcopy(dev_config)
3284 for _devid, _devcfg in all_configs.items():
3285 if _devcfg.get('uuid') == dev_uuid:
3286 updated_dev_config.update(_devcfg)
3287 updated_dev_config['id'] = _devid
3288 return updated_dev_config
3290 return updated_dev_config
3292 def get_dev_xenapi_config(self, dev_class, dev_uuid):
3293 config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
3294 if not config:
3295 return {}
3297 config['VM'] = self.get_uuid()
3299 if dev_class == 'vif':
3300 if not config.has_key('name'):
3301 config['name'] = config.get('vifname', '')
3302 if not config.has_key('MAC'):
3303 config['MAC'] = config.get('mac', '')
3304 if not config.has_key('type'):
3305 config['type'] = 'paravirtualised'
3306 if not config.has_key('device'):
3307 devid = config.get('id')
3308 if devid != None:
3309 config['device'] = 'eth%s' % devid
3310 else:
3311 config['device'] = ''
3313 if not config.has_key('network'):
3314 try:
3315 bridge = config.get('bridge', None)
3316 if bridge is None:
3317 from xen.util import Brctl
3318 if_to_br = dict([(i,b)
3319 for (b,ifs) in Brctl.get_state().items()
3320 for i in ifs])
3321 vifname = "vif%s.%s" % (self.getDomid(),
3322 config.get('id'))
3323 bridge = if_to_br.get(vifname, None)
3324 config['network'] = \
3325 XendNode.instance().bridge_to_network(
3326 config.get('bridge')).get_uuid()
3327 except Exception:
3328 log.exception('bridge_to_network')
3329 # Ignore this for now -- it may happen if the device
3330 # has been specified using the legacy methods, but at
3331 # some point we're going to have to figure out how to
3332 # handle that properly.
3334 config['MTU'] = 1500 # TODO
3336 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3337 xennode = XendNode.instance()
3338 rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
3339 config['io_read_kbs'] = rx_bps/1024
3340 config['io_write_kbs'] = tx_bps/1024
3341 rx, tx = xennode.get_vif_stat(self.domid, devid)
3342 config['io_total_read_kbs'] = rx/1024
3343 config['io_total_write_kbs'] = tx/1024
3344 else:
3345 config['io_read_kbs'] = 0.0
3346 config['io_write_kbs'] = 0.0
3347 config['io_total_read_kbs'] = 0.0
3348 config['io_total_write_kbs'] = 0.0
3350 config['security_label'] = config.get('security_label', '')
3352 if dev_class == 'vbd':
3354 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3355 controller = self.getDeviceController(dev_class)
3356 devid, _1, _2 = controller.getDeviceDetails(config)
3357 xennode = XendNode.instance()
3358 rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
3359 config['io_read_kbs'] = rd_blkps
3360 config['io_write_kbs'] = wr_blkps
3361 else:
3362 config['io_read_kbs'] = 0.0
3363 config['io_write_kbs'] = 0.0
3365 config['VDI'] = config.get('VDI', '')
3366 config['device'] = config.get('dev', '')
3367 if ':' in config['device']:
3368 vbd_name, vbd_type = config['device'].split(':', 1)
3369 config['device'] = vbd_name
3370 if vbd_type == 'cdrom':
3371 config['type'] = XEN_API_VBD_TYPE[0]
3372 else:
3373 config['type'] = XEN_API_VBD_TYPE[1]
3375 config['driver'] = 'paravirtualised' # TODO
3376 config['image'] = config.get('uname', '')
3378 if config.get('mode', 'r') == 'r':
3379 config['mode'] = 'RO'
3380 else:
3381 config['mode'] = 'RW'
3383 if dev_class == 'vtpm':
3384 if not config.has_key('type'):
3385 config['type'] = 'paravirtualised' # TODO
3386 if not config.has_key('backend'):
3387 config['backend'] = "00000000-0000-0000-0000-000000000000"
3389 return config
3391 def get_dev_property(self, dev_class, dev_uuid, field):
3392 config = self.get_dev_xenapi_config(dev_class, dev_uuid)
3393 try:
3394 return config[field]
3395 except KeyError:
3396 raise XendError('Invalid property for device: %s' % field)
3398 def set_dev_property(self, dev_class, dev_uuid, field, value):
3399 self.info['devices'][dev_uuid][1][field] = value
3401 def get_vcpus_util(self):
3402 vcpu_util = {}
3403 xennode = XendNode.instance()
3404 if 'VCPUs_max' in self.info and self.domid != None:
3405 for i in range(0, self.info['VCPUs_max']):
3406 util = xennode.get_vcpu_util(self.domid, i)
3407 vcpu_util[str(i)] = util
3409 return vcpu_util
3411 def get_consoles(self):
3412 return self.info.get('console_refs', [])
3414 def get_vifs(self):
3415 return self.info.get('vif_refs', [])
3417 def get_vbds(self):
3418 return self.info.get('vbd_refs', [])
3420 def get_vtpms(self):
3421 return self.info.get('vtpm_refs', [])
3423 def get_dpcis(self):
3424 return XendDPCI.get_by_VM(self.info.get('uuid'))
3426 def get_dscsis(self):
3427 return XendDSCSI.get_by_VM(self.info.get('uuid'))
3429 def create_vbd(self, xenapi_vbd, vdi_image_path):
3430 """Create a VBD using a VDI from XendStorageRepository.
3432 @param xenapi_vbd: vbd struct from the Xen API
3433 @param vdi_image_path: VDI UUID
3434 @rtype: string
3435 @return: uuid of the device
3436 """
3437 xenapi_vbd['image'] = vdi_image_path
3438 if vdi_image_path.startswith('tap'):
3439 dev_uuid = self.info.device_add('tap', cfg_xenapi = xenapi_vbd)
3440 else:
3441 dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
3443 if not dev_uuid:
3444 raise XendError('Failed to create device')
3446 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3447 XEN_API_VM_POWER_STATE_PAUSED):
3448 _, config = self.info['devices'][dev_uuid]
3450 if vdi_image_path.startswith('tap'):
3451 dev_control = self.getDeviceController('tap')
3452 else:
3453 dev_control = self.getDeviceController('vbd')
3455 try:
3456 devid = dev_control.createDevice(config)
3457 dev_control.waitForDevice(devid)
3458 self.info.device_update(dev_uuid,
3459 cfg_xenapi = {'devid': devid})
3460 except Exception, exn:
3461 log.exception(exn)
3462 del self.info['devices'][dev_uuid]
3463 self.info['vbd_refs'].remove(dev_uuid)
3464 raise
3466 return dev_uuid
3468 def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
3469 """Create a VBD using a VDI from XendStorageRepository.
3471 @param xenapi_vbd: vbd struct from the Xen API
3472 @param vdi_image_path: VDI UUID
3473 @rtype: string
3474 @return: uuid of the device
3475 """
3476 xenapi_vbd['image'] = vdi_image_path
3477 dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
3478 if not dev_uuid:
3479 raise XendError('Failed to create device')
3481 if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
3482 _, config = self.info['devices'][dev_uuid]
3483 config['devid'] = self.getDeviceController('tap').createDevice(config)
3485 return config['devid']
3487 def create_vif(self, xenapi_vif):
3488 """Create VIF device from the passed struct in Xen API format.
3490 @param xenapi_vif: Xen API VIF Struct.
3491 @rtype: string
3492 @return: UUID
3493 """
3494 dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
3495 if not dev_uuid:
3496 raise XendError('Failed to create device')
3498 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3499 XEN_API_VM_POWER_STATE_PAUSED):
3501 _, config = self.info['devices'][dev_uuid]
3502 dev_control = self.getDeviceController('vif')
3504 try:
3505 devid = dev_control.createDevice(config)
3506 dev_control.waitForDevice(devid)
3507 self.info.device_update(dev_uuid,
3508 cfg_xenapi = {'devid': devid})
3509 except Exception, exn:
3510 log.exception(exn)
3511 del self.info['devices'][dev_uuid]
3512 self.info['vif_refs'].remove(dev_uuid)
3513 raise
3515 return dev_uuid
3517 def create_vtpm(self, xenapi_vtpm):
3518 """Create a VTPM device from the passed struct in Xen API format.
3520 @return: uuid of the device
3521 @rtype: string
3522 """
3524 if self._stateGet() not in (DOM_STATE_HALTED,):
3525 raise VmError("Can only add vTPM to a halted domain.")
3526 if self.get_vtpms() != []:
3527 raise VmError('Domain already has a vTPM.')
3528 dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
3529 if not dev_uuid:
3530 raise XendError('Failed to create device')
3532 return dev_uuid
3534 def create_console(self, xenapi_console):
3535 """ Create a console device from a Xen API struct.
3537 @return: uuid of device
3538 @rtype: string
3539 """
3540 if self._stateGet() not in (DOM_STATE_HALTED,):
3541 raise VmError("Can only add console to a halted domain.")
3543 dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
3544 if not dev_uuid:
3545 raise XendError('Failed to create device')
3547 return dev_uuid
3549 def set_console_other_config(self, console_uuid, other_config):
3550 self.info.console_update(console_uuid, 'other_config', other_config)
3552 def create_dpci(self, xenapi_pci):
3553 """Create pci device from the passed struct in Xen API format.
3555 @param xenapi_pci: DPCI struct from Xen API
3556 @rtype: bool
3557 #@rtype: string
3558 @return: True if successfully created device
3559 #@return: UUID
3560 """
3562 dpci_uuid = uuid.createString()
3564 dpci_opts = []
3565 opts_dict = xenapi_pci.get('options')
3566 for k in opts_dict.keys():
3567 dpci_opts.append([k, opts_dict[k]])
3569 # Convert xenapi to sxp
3570 ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
3572 target_pci_sxp = \
3573 ['pci',
3574 ['dev',
3575 ['domain', '0x%02x' % ppci.get_domain()],
3576 ['bus', '0x%02x' % ppci.get_bus()],
3577 ['slot', '0x%02x' % ppci.get_slot()],
3578 ['func', '0x%1x' % ppci.get_func()],
3579 ['vslt', '0x%02x' % xenapi_pci.get('hotplug_slot')],
3580 ['opts', dpci_opts],
3581 ['uuid', dpci_uuid]
3582 ],
3583 ['state', 'Initialising']
3586 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3588 old_pci_sxp = self._getDeviceInfo_pci(0)
3590 if old_pci_sxp is None:
3591 dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
3592 if not dev_uuid:
3593 raise XendError('Failed to create device')
3595 else:
3596 new_pci_sxp = ['pci']
3597 for existing_dev in sxp.children(old_pci_sxp, 'dev'):
3598 new_pci_sxp.append(existing_dev)
3599 new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
3601 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3602 self.info.device_update(dev_uuid, new_pci_sxp)
3604 xen.xend.XendDomain.instance().managed_config_save(self)
3606 else:
3607 try:
3608 self.device_configure(target_pci_sxp)
3610 except Exception, exn:
3611 raise XendError('Failed to create device')
3613 return dpci_uuid
3615 def create_dscsi(self, xenapi_dscsi):
3616 """Create scsi device from the passed struct in Xen API format.
3618 @param xenapi_dscsi: DSCSI struct from Xen API
3619 @rtype: string
3620 @return: UUID
3621 """
3623 dscsi_uuid = uuid.createString()
3625 # Convert xenapi to sxp
3626 pscsi = XendAPIStore.get(xenapi_dscsi.get('PSCSI'), 'PSCSI')
3627 devid = int(xenapi_dscsi.get('virtual_HCTL').split(':')[0])
3628 target_vscsi_sxp = \
3629 ['vscsi',
3630 ['dev',
3631 ['devid', devid],
3632 ['p-devname', pscsi.get_dev_name()],
3633 ['p-dev', pscsi.get_physical_HCTL()],
3634 ['v-dev', xenapi_dscsi.get('virtual_HCTL')],
3635 ['state', xenbusState['Initialising']],
3636 ['uuid', dscsi_uuid]
3640 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3642 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid, None)
3644 if cur_vscsi_sxp is None:
3645 dev_uuid = self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp)
3646 if not dev_uuid:
3647 raise XendError('Failed to create device')
3649 else:
3650 new_vscsi_sxp = ['vscsi']
3651 for existing_dev in sxp.children(cur_vscsi_sxp, 'dev'):
3652 new_vscsi_sxp.append(existing_dev)
3653 new_vscsi_sxp.append(sxp.child0(target_vscsi_sxp, 'dev'))
3655 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3656 self.info.device_update(dev_uuid, new_vscsi_sxp)
3658 xen.xend.XendDomain.instance().managed_config_save(self)
3660 else:
3661 try:
3662 self.device_configure(target_vscsi_sxp)
3664 except Exception, exn:
3665 raise XendError('Failed to create device')
3667 return dscsi_uuid
3670 def destroy_device_by_uuid(self, dev_type, dev_uuid):
3671 if dev_uuid not in self.info['devices']:
3672 raise XendError('Device does not exist')
3674 try:
3675 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3676 XEN_API_VM_POWER_STATE_PAUSED):
3677 _, config = self.info['devices'][dev_uuid]
3678 devid = config.get('devid')
3679 if devid != None:
3680 self.getDeviceController(dev_type).destroyDevice(devid, force = False)
3681 else:
3682 raise XendError('Unable to get devid for device: %s:%s' %
3683 (dev_type, dev_uuid))
3684 finally:
3685 del self.info['devices'][dev_uuid]
3686 self.info['%s_refs' % dev_type].remove(dev_uuid)
3688 def destroy_vbd(self, dev_uuid):
3689 self.destroy_device_by_uuid('vbd', dev_uuid)
3691 def destroy_vif(self, dev_uuid):
3692 self.destroy_device_by_uuid('vif', dev_uuid)
3694 def destroy_vtpm(self, dev_uuid):
3695 self.destroy_device_by_uuid('vtpm', dev_uuid)
3697 def destroy_dpci(self, dev_uuid):
3699 dpci = XendAPIStore.get(dev_uuid, 'DPCI')
3700 ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
3702 old_pci_sxp = self._getDeviceInfo_pci(0)
3703 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3704 target_dev = None
3705 new_pci_sxp = ['pci']
3706 for dev in sxp.children(old_pci_sxp, 'dev'):
3707 domain = int(sxp.child_value(dev, 'domain'), 16)
3708 bus = int(sxp.child_value(dev, 'bus'), 16)
3709 slot = int(sxp.child_value(dev, 'slot'), 16)
3710 func = int(sxp.child_value(dev, 'func'), 16)
3711 name = "%04x:%02x:%02x.%01x" % (domain, bus, slot, func)
3712 if ppci.get_name() == name:
3713 target_dev = dev
3714 else:
3715 new_pci_sxp.append(dev)
3717 if target_dev is None:
3718 raise XendError('Failed to destroy device')
3720 target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
3722 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3724 self.info.device_update(dev_uuid, new_pci_sxp)
3725 if len(sxp.children(new_pci_sxp, 'dev')) == 0:
3726 del self.info['devices'][dev_uuid]
3727 xen.xend.XendDomain.instance().managed_config_save(self)
3729 else:
3730 try:
3731 self.device_configure(target_pci_sxp)
3733 except Exception, exn:
3734 raise XendError('Failed to destroy device')
3736 def destroy_dscsi(self, dev_uuid):
3737 dscsi = XendAPIStore.get(dev_uuid, 'DSCSI')
3738 devid = dscsi.get_virtual_host()
3739 vHCTL = dscsi.get_virtual_HCTL()
3740 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid, None)
3741 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3743 target_dev = None
3744 new_vscsi_sxp = ['vscsi']
3745 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
3746 if vHCTL == sxp.child_value(dev, 'v-dev'):
3747 target_dev = dev
3748 else:
3749 new_vscsi_sxp.append(dev)
3751 if target_dev is None:
3752 raise XendError('Failed to destroy device')
3754 target_dev.append(['state', xenbusState['Closing']])
3755 target_vscsi_sxp = ['vscsi', target_dev]
3757 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3759 self.info.device_update(dev_uuid, new_vscsi_sxp)
3760 if len(sxp.children(new_vscsi_sxp, 'dev')) == 0:
3761 del self.info['devices'][dev_uuid]
3762 xen.xend.XendDomain.instance().managed_config_save(self)
3764 else:
3765 try:
3766 self.device_configure(target_vscsi_sxp)
3768 except Exception, exn:
3769 raise XendError('Failed to destroy device')
3771 def destroy_xapi_instances(self):
3772 """Destroy Xen-API instances stored in XendAPIStore.
3773 """
3774 # Xen-API classes based on XendBase have their instances stored
3775 # in XendAPIStore. Cleanup these instances here, if they are supposed
3776 # to be destroyed when the parent domain is dead.
3778 # Most of the virtual devices (vif, vbd, vfb, etc) are not based on
3779 # XendBase and there's no need to remove them from XendAPIStore.
3781 from xen.xend import XendDomain
3782 if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
3783 # domain still exists.
3784 return
3786 # Destroy the VMMetrics instance.
3787 if XendAPIStore.get(self.metrics.get_uuid(), self.metrics.getClass()) \
3788 is not None:
3789 self.metrics.destroy()
3791 # Destroy DPCI instances.
3792 for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
3793 XendAPIStore.deregister(dpci_uuid, "DPCI")
3795 # Destroy DSCSI instances.
3796 for dscsi_uuid in XendDSCSI.get_by_VM(self.info.get('uuid')):
3797 XendAPIStore.deregister(dscsi_uuid, "DSCSI")
3799 def has_device(self, dev_class, dev_uuid):
3800 return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
3802 def __str__(self):
3803 return '<domain id=%s name=%s memory=%s state=%s>' % \
3804 (str(self.domid), self.info['name_label'],
3805 str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])
3807 __repr__ = __str__