ia64/xen-unstable

view tools/python/xen/xend/XendDomainInfo.py @ 19505:bdbe5232b068

xend: Use AUTO_PHP_SLOT where it ought to be

Signed-off-by: Simon Horman <horms@vereg.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Apr 06 13:49:59 2009 +0100 (2009-04-06)
parents 5d701be7c37b
children 71077a0fd289
line source
1 #===========================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
19 """Representation of a single domain.
20 Includes support for domain construction, using
21 open-ended configurations.
23 Author: Mike Wray <mike.wray@hp.com>
25 """
27 import logging
28 import time
29 import threading
30 import re
31 import copy
32 import os
33 import traceback
34 from types import StringTypes
36 import xen.lowlevel.xc
37 from xen.util import asserts
38 from xen.util.blkif import blkdev_uname_to_file, blkdev_uname_to_taptype
39 import xen.util.xsm.xsm as security
40 from xen.util import xsconstants
42 from xen.xend import balloon, sxp, uuid, image, arch, osdep
43 from xen.xend import XendOptions, XendNode, XendConfig
45 from xen.xend.XendConfig import scrub_password
46 from xen.xend.XendBootloader import bootloader, bootloader_tidy
47 from xen.xend.XendError import XendError, VmError
48 from xen.xend.XendDevices import XendDevices
49 from xen.xend.XendTask import XendTask
50 from xen.xend.xenstore.xstransact import xstransact, complete
51 from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
52 from xen.xend.xenstore.xswatch import xswatch
53 from xen.xend.XendConstants import *
54 from xen.xend.XendAPIConstants import *
55 from xen.xend.server.DevConstants import xenbusState
57 from xen.xend.XendVMMetrics import XendVMMetrics
59 from xen.xend import XendAPIStore
60 from xen.xend.XendPPCI import XendPPCI
61 from xen.xend.XendDPCI import XendDPCI
62 from xen.xend.XendPSCSI import XendPSCSI
63 from xen.xend.XendDSCSI import XendDSCSI
65 MIGRATE_TIMEOUT = 30.0
66 BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
68 xc = xen.lowlevel.xc.xc()
69 xoptions = XendOptions.instance()
71 log = logging.getLogger("xend.XendDomainInfo")
72 #log.setLevel(logging.TRACE)
75 def create(config):
76 """Creates and start a VM using the supplied configuration.
78 @param config: A configuration object involving lists of tuples.
79 @type config: list of lists, eg ['vm', ['image', 'xen.gz']]
81 @rtype: XendDomainInfo
82 @return: An up and running XendDomainInfo instance
83 @raise VmError: Invalid configuration or failure to start.
84 """
85 from xen.xend import XendDomain
86 domconfig = XendConfig.XendConfig(sxp_obj = config)
87 othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
88 if othervm is None or othervm.domid is None:
89 othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
90 if othervm is not None and othervm.domid is not None:
91 raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
92 log.debug("XendDomainInfo.create(%s)", scrub_password(config))
93 vm = XendDomainInfo(domconfig)
94 try:
95 vm.start()
96 except:
97 log.exception('Domain construction failed')
98 vm.destroy()
99 raise
101 return vm
103 def create_from_dict(config_dict):
104 """Creates and start a VM using the supplied configuration.
106 @param config_dict: An configuration dictionary.
108 @rtype: XendDomainInfo
109 @return: An up and running XendDomainInfo instance
110 @raise VmError: Invalid configuration or failure to start.
111 """
113 log.debug("XendDomainInfo.create_from_dict(%s)",
114 scrub_password(config_dict))
115 vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
116 try:
117 vm.start()
118 except:
119 log.exception('Domain construction failed')
120 vm.destroy()
121 raise
122 return vm
124 def recreate(info, priv):
125 """Create the VM object for an existing domain. The domain must not
126 be dying, as the paths in the store should already have been removed,
127 and asking us to recreate them causes problems.
129 @param xeninfo: Parsed configuration
130 @type xeninfo: Dictionary
131 @param priv: Is a privileged domain (Dom 0)
132 @type priv: bool
134 @rtype: XendDomainInfo
135 @return: A up and running XendDomainInfo instance
136 @raise VmError: Invalid configuration.
137 @raise XendError: Errors with configuration.
138 """
140 log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
142 assert not info['dying']
144 xeninfo = XendConfig.XendConfig(dominfo = info)
145 xeninfo['is_control_domain'] = priv
146 xeninfo['is_a_template'] = False
147 xeninfo['auto_power_on'] = False
148 domid = xeninfo['domid']
149 uuid1 = uuid.fromString(xeninfo['uuid'])
150 needs_reinitialising = False
152 dompath = GetDomainPath(domid)
153 if not dompath:
154 raise XendError('No domain path in store for existing '
155 'domain %d' % domid)
157 log.info("Recreating domain %d, UUID %s. at %s" %
158 (domid, xeninfo['uuid'], dompath))
160 # need to verify the path and uuid if not Domain-0
161 # if the required uuid and vm aren't set, then that means
162 # we need to recreate the dom with our own values
163 #
164 # NOTE: this is probably not desirable, really we should just
165 # abort or ignore, but there may be cases where xenstore's
166 # entry disappears (eg. xenstore-rm /)
167 #
168 try:
169 vmpath = xstransact.Read(dompath, "vm")
170 if not vmpath:
171 if not priv:
172 log.warn('/local/domain/%d/vm is missing. recreate is '
173 'confused, trying our best to recover' % domid)
174 needs_reinitialising = True
175 raise XendError('reinit')
177 uuid2_str = xstransact.Read(vmpath, "uuid")
178 if not uuid2_str:
179 log.warn('%s/uuid/ is missing. recreate is confused, '
180 'trying our best to recover' % vmpath)
181 needs_reinitialising = True
182 raise XendError('reinit')
184 uuid2 = uuid.fromString(uuid2_str)
185 if uuid1 != uuid2:
186 log.warn('UUID in /vm does not match the UUID in /dom/%d.'
187 'Trying out best to recover' % domid)
188 needs_reinitialising = True
189 except XendError:
190 pass # our best shot at 'goto' in python :)
192 vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
193 vmpath = vmpath)
195 if needs_reinitialising:
196 vm._recreateDom()
197 vm._removeVm()
198 vm._storeVmDetails()
199 vm._storeDomDetails()
201 vm.image = image.create(vm, vm.info)
202 vm.image.recreate()
204 vm._registerWatches()
205 vm.refreshShutdown(xeninfo)
207 # register the domain in the list
208 from xen.xend import XendDomain
209 XendDomain.instance().add_domain(vm)
211 return vm
214 def restore(config):
215 """Create a domain and a VM object to do a restore.
217 @param config: Domain SXP configuration
218 @type config: list of lists. (see C{create})
220 @rtype: XendDomainInfo
221 @return: A up and running XendDomainInfo instance
222 @raise VmError: Invalid configuration or failure to start.
223 @raise XendError: Errors with configuration.
224 """
226 log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
227 vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
228 resume = True)
229 try:
230 vm.resume()
231 return vm
232 except:
233 vm.destroy()
234 raise
236 def createDormant(domconfig):
237 """Create a dormant/inactive XenDomainInfo without creating VM.
238 This is for creating instances of persistent domains that are not
239 yet start.
241 @param domconfig: Parsed configuration
242 @type domconfig: XendConfig object
244 @rtype: XendDomainInfo
245 @return: A up and running XendDomainInfo instance
246 @raise XendError: Errors with configuration.
247 """
249 log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
251 # domid does not make sense for non-running domains.
252 domconfig.pop('domid', None)
253 vm = XendDomainInfo(domconfig)
254 return vm
256 def domain_by_name(name):
257 """Get domain by name
259 @params name: Name of the domain
260 @type name: string
261 @return: XendDomainInfo or None
262 """
263 from xen.xend import XendDomain
264 return XendDomain.instance().domain_lookup_by_name_nr(name)
267 def shutdown_reason(code):
268 """Get a shutdown reason from a code.
270 @param code: shutdown code
271 @type code: int
272 @return: shutdown reason
273 @rtype: string
274 """
275 return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
277 def dom_get(dom):
278 """Get info from xen for an existing domain.
280 @param dom: domain id
281 @type dom: int
282 @return: info or None
283 @rtype: dictionary
284 """
285 try:
286 domlist = xc.domain_getinfo(dom, 1)
287 if domlist and dom == domlist[0]['domid']:
288 return domlist[0]
289 except Exception, err:
290 # ignore missing domain
291 log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
292 return None
294 def get_assigned_pci_devices(domid):
295 dev_str_list = []
296 path = '/local/domain/0/backend/pci/%u/0/' % domid
297 num_devs = xstransact.Read(path + 'num_devs');
298 if num_devs is None or num_devs == "":
299 return dev_str_list
300 num_devs = int(num_devs);
301 for i in range(num_devs):
302 dev_str = xstransact.Read(path + 'dev-%i' % i)
303 dev_str_list = dev_str_list + [dev_str]
304 return dev_str_list
306 def do_FLR(domid):
307 from xen.xend.server.pciif import parse_pci_name, PciDevice
308 dev_str_list = get_assigned_pci_devices(domid)
310 for dev_str in dev_str_list:
311 (dom, b, d, f) = parse_pci_name(dev_str)
312 try:
313 dev = PciDevice(dom, b, d, f)
314 except Exception, e:
315 raise VmError("pci: failed to locate device and "+
316 "parse it's resources - "+str(e))
317 dev.do_FLR()
319 class XendDomainInfo:
320 """An object represents a domain.
322 @TODO: try to unify dom and domid, they mean the same thing, but
323 xc refers to it as dom, and everywhere else, including
324 xenstore it is domid. The best way is to change xc's
325 python interface.
327 @ivar info: Parsed configuration
328 @type info: dictionary
329 @ivar domid: Domain ID (if VM has started)
330 @type domid: int or None
331 @ivar vmpath: XenStore path to this VM.
332 @type vmpath: string
333 @ivar dompath: XenStore path to this Domain.
334 @type dompath: string
335 @ivar image: Reference to the VM Image.
336 @type image: xen.xend.image.ImageHandler
337 @ivar store_port: event channel to xenstored
338 @type store_port: int
339 @ivar console_port: event channel to xenconsoled
340 @type console_port: int
341 @ivar store_mfn: xenstored mfn
342 @type store_mfn: int
343 @ivar console_mfn: xenconsoled mfn
344 @type console_mfn: int
345 @ivar notes: OS image notes
346 @type notes: dictionary
347 @ivar vmWatch: reference to a watch on the xenstored vmpath
348 @type vmWatch: xen.xend.xenstore.xswatch
349 @ivar shutdownWatch: reference to watch on the xenstored domain shutdown
350 @type shutdownWatch: xen.xend.xenstore.xswatch
351 @ivar shutdownStartTime: UNIX Time when domain started shutting down.
352 @type shutdownStartTime: float or None
353 # @ivar state: Domain state
354 # @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
355 @ivar state_updated: lock for self.state
356 @type state_updated: threading.Condition
357 @ivar refresh_shutdown_lock: lock for polling shutdown state
358 @type refresh_shutdown_lock: threading.Condition
359 @ivar _deviceControllers: device controller cache for this domain
360 @type _deviceControllers: dict 'string' to DevControllers
361 """
363 def __init__(self, info, domid = None, dompath = None, augment = False,
364 priv = False, resume = False, vmpath = None):
365 """Constructor for a domain
367 @param info: parsed configuration
368 @type info: dictionary
369 @keyword domid: Set initial domain id (if any)
370 @type domid: int
371 @keyword dompath: Set initial dompath (if any)
372 @type dompath: string
373 @keyword augment: Augment given info with xenstored VM info
374 @type augment: bool
375 @keyword priv: Is a privileged domain (Dom 0)
376 @type priv: bool
377 @keyword resume: Is this domain being resumed?
378 @type resume: bool
379 """
381 self.info = info
382 if domid == None:
383 self.domid = self.info.get('domid')
384 else:
385 self.domid = domid
387 #REMOVE: uuid is now generated in XendConfig
388 #if not self._infoIsSet('uuid'):
389 # self.info['uuid'] = uuid.toString(uuid.create())
391 # Find a unique /vm/<uuid>/<integer> path if not specified.
392 # This avoids conflict between pre-/post-migrate domains when doing
393 # localhost relocation.
394 self.vmpath = vmpath
395 i = 0
396 while self.vmpath == None:
397 self.vmpath = XS_VMROOT + self.info['uuid']
398 if i != 0:
399 self.vmpath = self.vmpath + '-' + str(i)
400 try:
401 if self._readVm("uuid"):
402 self.vmpath = None
403 i = i + 1
404 except:
405 pass
407 self.dompath = dompath
409 self.image = None
410 self.store_port = None
411 self.store_mfn = None
412 self.console_port = None
413 self.console_mfn = None
415 self.native_protocol = None
417 self.vmWatch = None
418 self.shutdownWatch = None
419 self.shutdownStartTime = None
420 self._resume = resume
422 self.state_updated = threading.Condition()
423 self.refresh_shutdown_lock = threading.Condition()
424 self._stateSet(DOM_STATE_HALTED)
426 self._deviceControllers = {}
428 for state in DOM_STATES_OLD:
429 self.info[state] = 0
431 if augment:
432 self._augmentInfo(priv)
434 self._checkName(self.info['name_label'])
436 self.metrics = XendVMMetrics(uuid.createString(), self)
439 #
440 # Public functions available through XMLRPC
441 #
444 def start(self, is_managed = False):
445 """Attempts to start the VM by do the appropriate
446 initialisation if it not started.
447 """
448 from xen.xend import XendDomain
450 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
451 try:
452 XendTask.log_progress(0, 30, self._constructDomain)
453 XendTask.log_progress(31, 60, self._initDomain)
455 XendTask.log_progress(61, 70, self._storeVmDetails)
456 XendTask.log_progress(71, 80, self._storeDomDetails)
457 XendTask.log_progress(81, 90, self._registerWatches)
458 XendTask.log_progress(91, 100, self.refreshShutdown)
460 xendomains = XendDomain.instance()
461 xennode = XendNode.instance()
463 # save running configuration if XendDomains believe domain is
464 # persistent
465 if is_managed:
466 xendomains.managed_config_save(self)
468 if xennode.xenschedinfo() == 'credit':
469 xendomains.domain_sched_credit_set(self.getDomid(),
470 self.getWeight(),
471 self.getCap())
472 except:
473 log.exception('VM start failed')
474 self.destroy()
475 raise
476 else:
477 raise XendError('VM already running')
479 def resume(self):
480 """Resumes a domain that has come back from suspension."""
481 state = self._stateGet()
482 if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
483 try:
484 self._constructDomain()
486 try:
487 self._setCPUAffinity()
488 except:
489 # usually a CPU we want to set affinity to does not exist
490 # we just ignore it so that the domain can still be restored
491 log.warn("Cannot restore CPU affinity")
493 self._storeVmDetails()
494 self._createChannels()
495 self._createDevices()
496 self._storeDomDetails()
497 self._endRestore()
498 except:
499 log.exception('VM resume failed')
500 self.destroy()
501 raise
502 else:
503 raise XendError('VM is not suspended; it is %s'
504 % XEN_API_VM_POWER_STATE[state])
506 def shutdown(self, reason):
507 """Shutdown a domain by signalling this via xenstored."""
508 log.debug('XendDomainInfo.shutdown(%s)', reason)
509 if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
510 raise XendError('Domain cannot be shutdown')
512 if self.domid == 0:
513 raise XendError('Domain 0 cannot be shutdown')
515 if reason not in DOMAIN_SHUTDOWN_REASONS.values():
516 raise XendError('Invalid reason: %s' % reason)
517 self._removeVm('xend/previous_restart_time')
518 self.storeDom("control/shutdown", reason)
520 # HVM domain shuts itself down only if it has PV drivers
521 if self.info.is_hvm():
522 hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
523 hvm_s_state = xc.hvm_get_param(self.domid, HVM_PARAM_ACPI_S_STATE)
524 if not hvm_pvdrv or hvm_s_state != 0:
525 code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
526 log.info("HVM save:remote shutdown dom %d!", self.domid)
527 xc.domain_shutdown(self.domid, code)
529 def pause(self):
530 """Pause domain
532 @raise XendError: Failed pausing a domain
533 """
534 try:
535 xc.domain_pause(self.domid)
536 self._stateSet(DOM_STATE_PAUSED)
537 except Exception, ex:
538 log.exception(ex)
539 raise XendError("Domain unable to be paused: %s" % str(ex))
541 def unpause(self):
542 """Unpause domain
544 @raise XendError: Failed unpausing a domain
545 """
546 try:
547 xc.domain_unpause(self.domid)
548 self._stateSet(DOM_STATE_RUNNING)
549 except Exception, ex:
550 log.exception(ex)
551 raise XendError("Domain unable to be unpaused: %s" % str(ex))
553 def send_sysrq(self, key):
554 """ Send a Sysrq equivalent key via xenstored."""
555 if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
556 raise XendError("Domain '%s' is not started" % self.info['name_label'])
558 asserts.isCharConvertible(key)
559 self.storeDom("control/sysrq", '%c' % key)
561 def sync_pcidev_info(self):
563 if not self.info.is_hvm():
564 return
566 devid = '0'
567 dev_info = self._getDeviceInfo_pci(devid)
568 if dev_info is None:
569 return
571 # get the virtual slot info from xenstore
572 dev_uuid = sxp.child_value(dev_info, 'uuid')
573 pci_conf = self.info['devices'][dev_uuid][1]
574 pci_devs = pci_conf['devs']
576 count = 0
577 vslots = None
578 while vslots is None and count < 20:
579 vslots = xstransact.Read("/local/domain/0/backend/pci/%u/%s/vslots"
580 % (self.getDomid(), devid))
581 time.sleep(0.1)
582 count += 1
583 if vslots is None:
584 log.error("Device model didn't tell the vslots for PCI device")
585 return
587 #delete last delim
588 if vslots[-1] == ";":
589 vslots = vslots[:-1]
591 slot_list = vslots.split(';')
592 if len(slot_list) != len(pci_devs):
593 log.error("Device model's pci dev num dismatch")
594 return
596 #update the vslot info
597 count = 0;
598 for x in pci_devs:
599 x['vslt'] = slot_list[count]
600 count += 1
603 def hvm_pci_device_create(self, dev_config):
604 log.debug("XendDomainInfo.hvm_pci_device_create: %s"
605 % scrub_password(dev_config))
607 if not self.info.is_hvm():
608 raise VmError("hvm_pci_device_create called on non-HVM guest")
610 #all the PCI devs share one conf node
611 devid = '0'
613 new_dev = dev_config['devs'][0]
614 dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
616 #check conflict before trigger hotplug event
617 if dev_info is not None:
618 dev_uuid = sxp.child_value(dev_info, 'uuid')
619 pci_conf = self.info['devices'][dev_uuid][1]
620 pci_devs = pci_conf['devs']
621 for x in pci_devs:
622 if (int(x['vslt'], 16) == int(new_dev['vslt'], 16) and
623 int(x['vslt'], 16) != AUTO_PHP_SLOT):
624 raise VmError("vslot %s already have a device." % (new_dev['vslt']))
626 if (int(x['domain'], 16) == int(new_dev['domain'], 16) and
627 int(x['bus'], 16) == int(new_dev['bus'], 16) and
628 int(x['slot'], 16) == int(new_dev['slot'], 16) and
629 int(x['func'], 16) == int(new_dev['func'], 16) ):
630 raise VmError("device is already inserted")
632 # Test whether the devices can be assigned with VT-d
633 pci_str = "%s, %s, %s, %s" % (new_dev['domain'],
634 new_dev['bus'],
635 new_dev['slot'],
636 new_dev['func'])
637 bdf = xc.test_assign_device(self.domid, pci_str)
638 if bdf != 0:
639 if bdf == -1:
640 raise VmError("failed to assign device: maybe the platform"
641 " doesn't support VT-d, or VT-d isn't enabled"
642 " properly?")
643 bus = (bdf >> 16) & 0xff
644 devfn = (bdf >> 8) & 0xff
645 dev = (devfn >> 3) & 0x1f
646 func = devfn & 0x7
647 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
648 " already been assigned to other domain, or maybe"
649 " it doesn't exist." % (bus, dev, func))
651 # Here, we duplicate some checkings (in some cases, we mustn't allow
652 # a device to be hot-plugged into an HVM guest) that are also done in
653 # pci_device_configure()'s self.device_create(dev_sxp) or
654 # dev_control.reconfigureDevice(devid, dev_config).
655 # We must make the checkings before sending the command 'pci-ins' to
656 # ioemu.
658 # Test whether the device is owned by pciback. For instance, we can't
659 # hotplug a device being used by Dom0 itself to an HVM guest.
660 from xen.xend.server.pciif import PciDevice, parse_pci_name
661 domain = int(new_dev['domain'],16)
662 bus = int(new_dev['bus'],16)
663 dev = int(new_dev['slot'],16)
664 func = int(new_dev['func'],16)
665 try:
666 pci_device = PciDevice(domain, bus, dev, func)
667 except Exception, e:
668 raise VmError("pci: failed to locate device and "+
669 "parse it's resources - "+str(e))
670 if pci_device.driver!='pciback':
671 raise VmError(("pci: PCI Backend does not own device "+ \
672 "%s\n"+ \
673 "See the pciback.hide kernel "+ \
674 "command-line parameter or\n"+ \
675 "bind your slot/device to the PCI backend using sysfs" \
676 )%(pci_device.name))
678 # Check non-page-aligned MMIO BAR.
679 if pci_device.has_non_page_aligned_bar and arch.type != "ia64":
680 raise VmError("pci: %s: non-page-aligned MMIO BAR found." % \
681 pci_device.name)
683 # Check the co-assignment.
684 # To pci-attach a device D to domN, we should ensure each of D's
685 # co-assignment devices hasn't been assigned, or has been assigned to
686 # domN.
687 coassignment_list = pci_device.find_coassigned_devices()
688 assigned_pci_device_str_list = get_assigned_pci_devices(self.domid)
689 for pci_str in coassignment_list:
690 (domain, bus, dev, func) = parse_pci_name(pci_str)
691 dev_str = '0x%x,0x%x,0x%x,0x%x' % (domain, bus, dev, func)
692 if xc.test_assign_device(self.domid, dev_str) == 0:
693 continue
694 if not pci_str in assigned_pci_device_str_list:
695 raise VmError(('pci: failed to pci-attach %s to dom%d" + \
696 " because one of its co-assignment device %s has been" + \
697 " assigned to other domain.' \
698 )% (pci_device.name, self.domid, pci_str))
700 opts = ''
701 if 'opts' in new_dev and len(new_dev['opts']) > 0:
702 config_opts = new_dev['opts']
703 config_opts = map(lambda (x, y): x+'='+y, config_opts)
704 opts = ',' + reduce(lambda x, y: x+','+y, config_opts)
706 bdf_str = "%s:%s:%s.%s%s@%s" % (new_dev['domain'],
707 new_dev['bus'],
708 new_dev['slot'],
709 new_dev['func'],
710 opts,
711 new_dev['vslt'])
712 self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
715 def device_create(self, dev_config):
716 """Create a new device.
718 @param dev_config: device configuration
719 @type dev_config: SXP object (parsed config)
720 """
721 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
722 dev_type = sxp.name(dev_config)
723 dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
724 dev_config_dict = self.info['devices'][dev_uuid][1]
725 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
727 if dev_type == 'vif':
728 for x in dev_config:
729 if x != 'vif' and x[0] == 'mac':
730 if not re.match('^([0-9a-f]{2}:){5}[0-9a-f]{2}$', x[1], re.I):
731 log.error("Virtual network interface creation error - invalid MAC Address entered: %s", x[1])
732 raise VmError("Cannot create a new virtual network interface - MAC address is not valid!");
734 if self.domid is not None:
735 try:
736 dev_config_dict['devid'] = devid = \
737 self._createDevice(dev_type, dev_config_dict)
738 self._waitForDevice(dev_type, devid)
739 except VmError, ex:
740 del self.info['devices'][dev_uuid]
741 if dev_type == 'pci':
742 for dev in dev_config_dict['devs']:
743 XendAPIStore.deregister(dev['uuid'], 'DPCI')
744 elif dev_type == 'vscsi':
745 for dev in dev_config_dict['devs']:
746 XendAPIStore.deregister(dev['uuid'], 'DSCSI')
747 elif dev_type == 'tap':
748 self.info['vbd_refs'].remove(dev_uuid)
749 else:
750 self.info['%s_refs' % dev_type].remove(dev_uuid)
751 raise ex
752 else:
753 devid = None
755 xen.xend.XendDomain.instance().managed_config_save(self)
756 return self.getDeviceController(dev_type).sxpr(devid)
759 def pci_device_configure(self, dev_sxp, devid = 0):
760 """Configure an existing pci device.
762 @param dev_sxp: device configuration
763 @type dev_sxp: SXP object (parsed config)
764 @param devid: device id
765 @type devid: int
766 @return: Returns True if successfully updated device
767 @rtype: boolean
768 """
769 log.debug("XendDomainInfo.pci_device_configure: %s"
770 % scrub_password(dev_sxp))
772 dev_class = sxp.name(dev_sxp)
774 if dev_class != 'pci':
775 return False
777 pci_state = sxp.child_value(dev_sxp, 'state')
778 existing_dev_info = self._getDeviceInfo_pci(devid)
780 if existing_dev_info is None and pci_state != 'Initialising':
781 raise XendError("Cannot detach when pci platform does not exist")
783 pci_dev = sxp.children(dev_sxp, 'dev')[0]
784 dev_config = self.info.pci_convert_sxp_to_dict(dev_sxp)
785 dev = dev_config['devs'][0]
787 # Do HVM specific processing
788 if self.info.is_hvm():
789 if pci_state == 'Initialising':
790 # HVM PCI device attachment
791 self.hvm_pci_device_create(dev_config)
792 # Update vslt
793 vslt = xstransact.Read("/local/domain/0/device-model/%i/parameter"
794 % self.getDomid())
795 dev['vslt'] = vslt
796 for n in sxp.children(pci_dev):
797 if(n[0] == 'vslt'):
798 n[1] = vslt
799 else:
800 # HVM PCI device detachment
801 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
802 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
803 existing_pci_devs = existing_pci_conf['devs']
804 vslt = AUTO_PHP_SLOT_STR
805 for x in existing_pci_devs:
806 if ( int(x['domain'], 16) == int(dev['domain'], 16) and
807 int(x['bus'], 16) == int(dev['bus'], 16) and
808 int(x['slot'], 16) == int(dev['slot'], 16) and
809 int(x['func'], 16) == int(dev['func'], 16) ):
810 vslt = x['vslt']
811 break
812 if vslt == AUTO_PHP_SLOT_STR:
813 raise VmError("Device %04x:%02x:%02x.%01x is not connected"
814 % (int(dev['domain'],16), int(dev['bus'],16),
815 int(dev['slot'],16), int(dev['func'],16)))
816 self.hvm_destroyPCIDevice(int(vslt, 16))
817 # Update vslt
818 dev['vslt'] = vslt
819 for n in sxp.children(pci_dev):
820 if(n[0] == 'vslt'):
821 n[1] = vslt
823 # If pci platform does not exist, create and exit.
824 if existing_dev_info is None:
825 self.device_create(dev_sxp)
826 return True
828 # use DevController.reconfigureDevice to change device config
829 dev_control = self.getDeviceController(dev_class)
830 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
831 if not self.info.is_hvm():
832 # in PV case, wait until backend state becomes connected.
833 dev_control.waitForDevice_reconfigure(devid)
834 num_devs = dev_control.cleanupDevice(devid)
836 # update XendConfig with new device info
837 if dev_uuid:
838 new_dev_sxp = dev_control.configuration(devid)
839 self.info.device_update(dev_uuid, new_dev_sxp)
841 # If there is no device left, destroy pci and remove config.
842 if num_devs == 0:
843 if self.info.is_hvm():
844 self.destroyDevice('pci', devid, True)
845 del self.info['devices'][dev_uuid]
846 platform = self.info['platform']
847 orig_dev_num = len(platform['pci'])
848 # TODO: can use this to keep some info to ask high level
849 # management tools to hot insert a new passthrough dev
850 # after migration
851 if orig_dev_num != 0:
852 #platform['pci'] = ["%dDEVs" % orig_dev_num]
853 platform['pci'] = []
854 else:
855 self.destroyDevice('pci', devid)
856 del self.info['devices'][dev_uuid]
858 xen.xend.XendDomain.instance().managed_config_save(self)
860 return True
862 def vscsi_device_configure(self, dev_sxp):
863 """Configure an existing vscsi device.
864 quoted pci funciton
865 """
866 def _is_vscsi_defined(dev_info, p_devs = None, v_devs = None):
867 if not dev_info:
868 return False
869 for dev in sxp.children(dev_info, 'dev'):
870 if p_devs is not None:
871 if sxp.child_value(dev, 'p-dev') in p_devs:
872 return True
873 if v_devs is not None:
874 if sxp.child_value(dev, 'v-dev') in v_devs:
875 return True
876 return False
878 def _vscsi_be(be):
879 be_xdi = xen.xend.XendDomain.instance().domain_lookup_nr(be)
880 if be_xdi is not None:
881 be_domid = be_xdi.getDomid()
882 if be_domid is not None:
883 return str(be_domid)
884 return str(be)
886 dev_class = sxp.name(dev_sxp)
887 if dev_class != 'vscsi':
888 return False
890 dev_config = self.info.vscsi_convert_sxp_to_dict(dev_sxp)
891 devs = dev_config['devs']
892 v_devs = [d['v-dev'] for d in devs]
893 state = devs[0]['state']
894 req_devid = int(devs[0]['devid'])
895 cur_dev_sxp = self._getDeviceInfo_vscsi(req_devid)
897 if state == xenbusState['Initialising']:
898 # new create
899 # If request devid does not exist, create and exit.
900 p_devs = [d['p-dev'] for d in devs]
901 for dev_type, dev_info in self.info.all_devices_sxpr():
902 if dev_type != 'vscsi':
903 continue
904 if _is_vscsi_defined(dev_info, p_devs = p_devs):
905 raise XendError('The physical device "%s" is already defined' % \
906 p_devs[0])
907 if cur_dev_sxp is None:
908 self.device_create(dev_sxp)
909 return True
911 if _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
912 raise XendError('The virtual device "%s" is already defined' % \
913 v_devs[0])
915 if int(dev_config['feature-host']) != \
916 int(sxp.child_value(cur_dev_sxp, 'feature-host')):
917 raise XendError('The physical device "%s" cannot define '
918 'because mode is different' % devs[0]['p-dev'])
920 new_be = dev_config.get('backend', None)
921 if new_be is not None:
922 cur_be = sxp.child_value(cur_dev_sxp, 'backend', None)
923 if cur_be is None:
924 cur_be = xen.xend.XendDomain.DOM0_ID
925 new_be_dom = _vscsi_be(new_be)
926 cur_be_dom = _vscsi_be(cur_be)
927 if new_be_dom != cur_be_dom:
928 raise XendError('The physical device "%s" cannot define '
929 'because backend is different' % devs[0]['p-dev'])
931 elif state == xenbusState['Closing']:
932 if not _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
933 raise XendError("Cannot detach vscsi device does not exist")
935 if self.domid is not None:
936 # use DevController.reconfigureDevice to change device config
937 dev_control = self.getDeviceController(dev_class)
938 dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
939 dev_control.waitForDevice_reconfigure(req_devid)
940 num_devs = dev_control.cleanupDevice(req_devid)
942 # update XendConfig with new device info
943 if dev_uuid:
944 new_dev_sxp = dev_control.configuration(req_devid)
945 self.info.device_update(dev_uuid, new_dev_sxp)
947 # If there is no device left, destroy vscsi and remove config.
948 if num_devs == 0:
949 self.destroyDevice('vscsi', req_devid)
950 del self.info['devices'][dev_uuid]
952 else:
953 new_dev_sxp = ['vscsi']
954 cur_mode = sxp.children(cur_dev_sxp, 'feature-host')[0]
955 new_dev_sxp.append(cur_mode)
956 try:
957 cur_be = sxp.children(cur_dev_sxp, 'backend')[0]
958 new_dev_sxp.append(cur_be)
959 except IndexError:
960 pass
962 for cur_dev in sxp.children(cur_dev_sxp, 'dev'):
963 if state == xenbusState['Closing']:
964 if int(cur_mode[1]) == 1:
965 continue
966 if sxp.child_value(cur_dev, 'v-dev') in v_devs:
967 continue
968 new_dev_sxp.append(cur_dev)
970 if state == xenbusState['Initialising']:
971 for new_dev in sxp.children(dev_sxp, 'dev'):
972 new_dev_sxp.append(new_dev)
974 dev_uuid = sxp.child_value(cur_dev_sxp, 'uuid')
975 self.info.device_update(dev_uuid, new_dev_sxp)
977 # If there is only 'vscsi' in new_dev_sxp, remove the config.
978 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
979 del self.info['devices'][dev_uuid]
981 xen.xend.XendDomain.instance().managed_config_save(self)
983 return True
985 def device_configure(self, dev_sxp, devid = None):
986 """Configure an existing device.
988 @param dev_config: device configuration
989 @type dev_config: SXP object (parsed config)
990 @param devid: device id
991 @type devid: int
992 @return: Returns True if successfully updated device
993 @rtype: boolean
994 """
996 # convert device sxp to a dict
997 dev_class = sxp.name(dev_sxp)
998 dev_config = {}
1000 if dev_class == 'pci':
1001 return self.pci_device_configure(dev_sxp)
1003 if dev_class == 'vscsi':
1004 return self.vscsi_device_configure(dev_sxp)
1006 for opt_val in dev_sxp[1:]:
1007 try:
1008 dev_config[opt_val[0]] = opt_val[1]
1009 except IndexError:
1010 pass
1012 # use DevController.reconfigureDevice to change device config
1013 dev_control = self.getDeviceController(dev_class)
1014 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
1016 # update XendConfig with new device info
1017 if dev_uuid:
1018 self.info.device_update(dev_uuid, dev_sxp)
1020 return True
1022 def waitForDevices(self):
1023 """Wait for this domain's configured devices to connect.
1025 @raise VmError: if any device fails to initialise.
1026 """
1027 for devclass in XendDevices.valid_devices():
1028 self.getDeviceController(devclass).waitForDevices()
1030 def hvm_destroyPCIDevice(self, vslot):
1031 log.debug("hvm_destroyPCIDevice called %s", vslot)
1033 if not self.info.is_hvm():
1034 raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
1036 #all the PCI devs share one conf node
1037 devid = '0'
1038 vslot = int(vslot)
1039 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1040 dev_uuid = sxp.child_value(dev_info, 'uuid')
1042 #delete the pci bdf config under the pci device
1043 pci_conf = self.info['devices'][dev_uuid][1]
1044 pci_len = len(pci_conf['devs'])
1046 #find the pass-through device with the virtual slot
1047 devnum = 0
1048 for x in pci_conf['devs']:
1049 if int(x['vslt'], 16) == vslot:
1050 break
1051 devnum += 1
1053 if devnum >= pci_len:
1054 raise VmError("Device @ vslot 0x%x doesn't exist." % (vslot))
1056 if vslot == AUTO_PHP_SLOT:
1057 raise VmError("Device @ vslot 0x%x do not support hotplug." % (vslot))
1059 # Check the co-assignment.
1060 # To pci-detach a device D from domN, we should ensure: for each DD in the
1061 # list of D's co-assignment devices, DD is not assigned (to domN).
1063 from xen.xend.server.pciif import PciDevice
1064 domain = int(x['domain'],16)
1065 bus = int(x['bus'],16)
1066 dev = int(x['slot'],16)
1067 func = int(x['func'],16)
1068 try:
1069 pci_device = PciDevice(domain, bus, dev, func)
1070 except Exception, e:
1071 raise VmError("pci: failed to locate device and "+
1072 "parse it's resources - "+str(e))
1073 coassignment_list = pci_device.find_coassigned_devices()
1074 coassignment_list.remove(pci_device.name)
1075 assigned_pci_device_str_list = get_assigned_pci_devices(self.domid)
1076 for pci_str in coassignment_list:
1077 if pci_str in assigned_pci_device_str_list:
1078 raise VmError(('pci: failed to pci-detach %s from dom%d" + \
1079 " because one of its co-assignment device %s is still " + \
1080 " assigned to the domain.' \
1081 )% (pci_device.name, self.domid, pci_str))
1084 bdf_str = "%s:%s:%s.%s" % (x['domain'], x['bus'], x['slot'], x['func'])
1085 log.info("hvm_destroyPCIDevice:%s:%s!", x, bdf_str)
1087 self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
1089 return 0
1091 def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
1092 log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
1093 deviceClass, devid)
1095 if rm_cfg:
1096 # Convert devid to device number. A device number is
1097 # needed to remove its configuration.
1098 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1100 # Save current sxprs. A device number and a backend
1101 # path are needed to remove its configuration but sxprs
1102 # do not have those after calling destroyDevice.
1103 sxprs = self.getDeviceSxprs(deviceClass)
1105 rc = None
1106 if self.domid is not None:
1107 rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
1108 if not force and rm_cfg:
1109 # The backend path, other than the device itself,
1110 # has to be passed because its accompanied frontend
1111 # path may be void until its removal is actually
1112 # issued. It is probable because destroyDevice is
1113 # issued first.
1114 for dev_num, dev_info in sxprs:
1115 dev_num = int(dev_num)
1116 if dev_num == dev:
1117 for x in dev_info:
1118 if x[0] == 'backend':
1119 backend = x[1]
1120 break
1121 break
1122 self._waitForDevice_destroy(deviceClass, devid, backend)
1124 if rm_cfg:
1125 if deviceClass == 'vif':
1126 if self.domid is not None:
1127 for dev_num, dev_info in sxprs:
1128 dev_num = int(dev_num)
1129 if dev_num == dev:
1130 for x in dev_info:
1131 if x[0] == 'mac':
1132 mac = x[1]
1133 break
1134 break
1135 dev_info = self._getDeviceInfo_vif(mac)
1136 else:
1137 _, dev_info = sxprs[dev]
1138 else: # 'vbd' or 'tap'
1139 dev_info = self._getDeviceInfo_vbd(dev)
1140 # To remove the UUID of the device from refs,
1141 # deviceClass must be always 'vbd'.
1142 deviceClass = 'vbd'
1143 if dev_info is None:
1144 raise XendError("Device %s is not defined" % devid)
1146 dev_uuid = sxp.child_value(dev_info, 'uuid')
1147 del self.info['devices'][dev_uuid]
1148 self.info['%s_refs' % deviceClass].remove(dev_uuid)
1149 xen.xend.XendDomain.instance().managed_config_save(self)
1151 return rc
1153 def getDeviceSxprs(self, deviceClass):
1154 if deviceClass == 'pci':
1155 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1156 if dev_info is None:
1157 return []
1158 dev_uuid = sxp.child_value(dev_info, 'uuid')
1159 pci_devs = self.info['devices'][dev_uuid][1]['devs']
1160 pci_len = len(pci_devs)
1161 return pci_devs
1162 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1163 return self.getDeviceController(deviceClass).sxprs()
1164 else:
1165 sxprs = []
1166 dev_num = 0
1167 for dev_type, dev_info in self.info.all_devices_sxpr():
1168 if (deviceClass == 'vbd' and dev_type not in ['vbd', 'tap']) or \
1169 (deviceClass != 'vbd' and dev_type != deviceClass):
1170 continue
1172 if deviceClass == 'vscsi':
1173 vscsi_devs = ['devs', []]
1174 for vscsi_dev in sxp.children(dev_info, 'dev'):
1175 vscsi_dev.append(['frontstate', None])
1176 vscsi_devs[1].append(vscsi_dev)
1177 dev_num = int(sxp.child_value(vscsi_dev, 'devid'))
1178 vscsi_mode = sxp.children(dev_info, 'feature-host')[0]
1179 sxprs.append([dev_num, [vscsi_devs, vscsi_mode]])
1180 elif deviceClass == 'vbd':
1181 dev = sxp.child_value(dev_info, 'dev')
1182 if 'ioemu:' in dev:
1183 (_, dev) = dev.split(':', 1)
1184 try:
1185 (dev_name, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1186 except ValueError:
1187 dev_name = dev
1188 dev_num = self.getDeviceController('vbd').convertToDeviceNumber(dev_name)
1189 sxprs.append([dev_num, dev_info])
1190 else:
1191 sxprs.append([dev_num, dev_info])
1192 dev_num += 1
1193 return sxprs
1195 def getBlockDeviceClass(self, devid):
1196 # To get a device number from the devid,
1197 # we temporarily use the device controller of VBD.
1198 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1199 dev_info = self._getDeviceInfo_vbd(dev)
1200 if dev_info:
1201 return dev_info[0]
1203 def _getDeviceInfo_vif(self, mac):
1204 for dev_type, dev_info in self.info.all_devices_sxpr():
1205 if dev_type != 'vif':
1206 continue
1207 if mac == sxp.child_value(dev_info, 'mac'):
1208 return dev_info
1210 def _getDeviceInfo_vbd(self, devid):
1211 for dev_type, dev_info in self.info.all_devices_sxpr():
1212 if dev_type != 'vbd' and dev_type != 'tap':
1213 continue
1214 dev = sxp.child_value(dev_info, 'dev')
1215 dev = dev.split(':')[0]
1216 dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
1217 if devid == dev:
1218 return dev_info
1220 def _getDeviceInfo_pci(self, devid):
1221 for dev_type, dev_info in self.info.all_devices_sxpr():
1222 if dev_type != 'pci':
1223 continue
1224 return dev_info
1225 return None
1227 def _getDeviceInfo_vscsi(self, devid):
1228 devid = int(devid)
1229 for dev_type, dev_info in self.info.all_devices_sxpr():
1230 if dev_type != 'vscsi':
1231 continue
1232 devs = sxp.children(dev_info, 'dev')
1233 if devid == int(sxp.child_value(devs[0], 'devid')):
1234 return dev_info
1235 return None
1237 def setMemoryTarget(self, target):
1238 """Set the memory target of this domain.
1239 @param target: In MiB.
1240 """
1241 log.debug("Setting memory target of domain %s (%s) to %d MiB.",
1242 self.info['name_label'], str(self.domid), target)
1244 MiB = 1024 * 1024
1245 memory_cur = self.get_memory_dynamic_max() / MiB
1247 if self.domid == 0:
1248 dom0_min_mem = xoptions.get_dom0_min_mem()
1249 if target < memory_cur and dom0_min_mem > target:
1250 raise XendError("memory_dynamic_max too small")
1252 self._safe_set_memory('memory_dynamic_min', target * MiB)
1253 self._safe_set_memory('memory_dynamic_max', target * MiB)
1255 if self.domid >= 0:
1256 if target > memory_cur:
1257 balloon.free((target - memory_cur) * 1024, self)
1258 self.storeVm("memory", target)
1259 self.storeDom("memory/target", target << 10)
1260 xc.domain_set_target_mem(self.domid,
1261 (target * 1024))
1262 xen.xend.XendDomain.instance().managed_config_save(self)
1264 def setMemoryMaximum(self, limit):
1265 """Set the maximum memory limit of this domain
1266 @param limit: In MiB.
1267 """
1268 log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
1269 self.info['name_label'], str(self.domid), limit)
1271 maxmem_cur = self.get_memory_static_max()
1272 MiB = 1024 * 1024
1273 self._safe_set_memory('memory_static_max', limit * MiB)
1275 if self.domid >= 0:
1276 maxmem = int(limit) * 1024
1277 try:
1278 return xc.domain_setmaxmem(self.domid, maxmem)
1279 except Exception, ex:
1280 self._safe_set_memory('memory_static_max', maxmem_cur)
1281 raise XendError(str(ex))
1282 xen.xend.XendDomain.instance().managed_config_save(self)
1285 def getVCPUInfo(self):
1286 try:
1287 # We include the domain name and ID, to help xm.
1288 sxpr = ['domain',
1289 ['domid', self.domid],
1290 ['name', self.info['name_label']],
1291 ['vcpu_count', self.info['VCPUs_max']]]
1293 for i in range(0, self.info['VCPUs_max']):
1294 if self.domid is not None:
1295 info = xc.vcpu_getinfo(self.domid, i)
1297 sxpr.append(['vcpu',
1298 ['number', i],
1299 ['online', info['online']],
1300 ['blocked', info['blocked']],
1301 ['running', info['running']],
1302 ['cpu_time', info['cpu_time'] / 1e9],
1303 ['cpu', info['cpu']],
1304 ['cpumap', info['cpumap']]])
1305 else:
1306 sxpr.append(['vcpu',
1307 ['number', i],
1308 ['online', 0],
1309 ['blocked', 0],
1310 ['running', 0],
1311 ['cpu_time', 0.0],
1312 ['cpu', -1],
1313 ['cpumap', self.info['cpus'][i] and \
1314 self.info['cpus'][i] or range(64)]])
1316 return sxpr
1318 except RuntimeError, exn:
1319 raise XendError(str(exn))
1322 def getDomInfo(self):
1323 return dom_get(self.domid)
1326 # internal functions ... TODO: re-categorised
1329 def _augmentInfo(self, priv):
1330 """Augment self.info, as given to us through L{recreate}, with
1331 values taken from the store. This recovers those values known
1332 to xend but not to the hypervisor.
1333 """
1334 augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
1335 if priv:
1336 augment_entries.remove('memory')
1337 augment_entries.remove('maxmem')
1338 augment_entries.remove('vcpus')
1339 augment_entries.remove('vcpu_avail')
1341 vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
1342 for k in augment_entries])
1344 # make returned lists into a dictionary
1345 vm_config = dict(zip(augment_entries, vm_config))
1347 for arg in augment_entries:
1348 val = vm_config[arg]
1349 if val != None:
1350 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1351 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1352 self.info[xapiarg] = val
1353 elif arg == "memory":
1354 self.info["static_memory_min"] = val
1355 elif arg == "maxmem":
1356 self.info["static_memory_max"] = val
1357 else:
1358 self.info[arg] = val
1360 # read CPU Affinity
1361 self.info['cpus'] = []
1362 vcpus_info = self.getVCPUInfo()
1363 for vcpu_info in sxp.children(vcpus_info, 'vcpu'):
1364 self.info['cpus'].append(sxp.child_value(vcpu_info, 'cpumap'))
1366 # For dom0, we ignore any stored value for the vcpus fields, and
1367 # read the current value from Xen instead. This allows boot-time
1368 # settings to take precedence over any entries in the store.
1369 if priv:
1370 xeninfo = dom_get(self.domid)
1371 self.info['VCPUs_max'] = xeninfo['online_vcpus']
1372 self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
1374 # read image value
1375 image_sxp = self._readVm('image')
1376 if image_sxp:
1377 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1379 # read devices
1380 devices = []
1381 for devclass in XendDevices.valid_devices():
1382 devconfig = self.getDeviceController(devclass).configurations()
1383 if devconfig:
1384 devices.extend(devconfig)
1386 if not self.info['devices'] and devices is not None:
1387 for device in devices:
1388 self.info.device_add(device[0], cfg_sxp = device)
1390 self._update_consoles()
1392 def _update_consoles(self, transaction = None):
1393 if self.domid == None or self.domid == 0:
1394 return
1396 # Update VT100 port if it exists
1397 if transaction is None:
1398 self.console_port = self.readDom('console/port')
1399 else:
1400 self.console_port = self.readDomTxn(transaction, 'console/port')
1401 if self.console_port is not None:
1402 serial_consoles = self.info.console_get_all('vt100')
1403 if not serial_consoles:
1404 cfg = self.info.console_add('vt100', self.console_port)
1405 self._createDevice('console', cfg)
1406 else:
1407 console_uuid = serial_consoles[0].get('uuid')
1408 self.info.console_update(console_uuid, 'location',
1409 self.console_port)
1412 # Update VNC port if it exists and write to xenstore
1413 if transaction is None:
1414 vnc_port = self.readDom('console/vnc-port')
1415 else:
1416 vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
1417 if vnc_port is not None:
1418 for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
1419 if dev_type == 'vfb':
1420 old_location = dev_info.get('location')
1421 listen_host = dev_info.get('vnclisten', \
1422 XendOptions.instance().get_vnclisten_address())
1423 new_location = '%s:%s' % (listen_host, str(vnc_port))
1424 if old_location == new_location:
1425 break
1427 dev_info['location'] = new_location
1428 self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
1429 vfb_ctrl = self.getDeviceController('vfb')
1430 vfb_ctrl.reconfigureDevice(0, dev_info)
1431 break
1434 # Function to update xenstore /vm/*
1437 def _readVm(self, *args):
1438 return xstransact.Read(self.vmpath, *args)
1440 def _writeVm(self, *args):
1441 return xstransact.Write(self.vmpath, *args)
1443 def _removeVm(self, *args):
1444 return xstransact.Remove(self.vmpath, *args)
1446 def _gatherVm(self, *args):
1447 return xstransact.Gather(self.vmpath, *args)
1449 def _listRecursiveVm(self, *args):
1450 return xstransact.ListRecursive(self.vmpath, *args)
1452 def storeVm(self, *args):
1453 return xstransact.Store(self.vmpath, *args)
1455 def permissionsVm(self, *args):
1456 return xstransact.SetPermissions(self.vmpath, *args)
1459 # Function to update xenstore /dom/*
1462 def readDom(self, *args):
1463 return xstransact.Read(self.dompath, *args)
1465 def gatherDom(self, *args):
1466 return xstransact.Gather(self.dompath, *args)
1468 def _writeDom(self, *args):
1469 return xstransact.Write(self.dompath, *args)
1471 def _removeDom(self, *args):
1472 return xstransact.Remove(self.dompath, *args)
1474 def storeDom(self, *args):
1475 return xstransact.Store(self.dompath, *args)
1478 def readDomTxn(self, transaction, *args):
1479 paths = map(lambda x: self.dompath + "/" + x, args)
1480 return transaction.read(*paths)
1482 def gatherDomTxn(self, transaction, *args):
1483 paths = map(lambda x: self.dompath + "/" + x, args)
1484 return transaction.gather(*paths)
1486 def _writeDomTxn(self, transaction, *args):
1487 paths = map(lambda x: self.dompath + "/" + x, args)
1488 return transaction.write(*paths)
1490 def _removeDomTxn(self, transaction, *args):
1491 paths = map(lambda x: self.dompath + "/" + x, args)
1492 return transaction.remove(*paths)
1494 def storeDomTxn(self, transaction, *args):
1495 paths = map(lambda x: self.dompath + "/" + x, args)
1496 return transaction.store(*paths)
1499 def _recreateDom(self):
1500 complete(self.dompath, lambda t: self._recreateDomFunc(t))
1502 def _recreateDomFunc(self, t):
1503 t.remove()
1504 t.mkdir()
1505 t.set_permissions({'dom' : self.domid, 'read' : True})
1506 t.write('vm', self.vmpath)
1507 # NB. Solaris guests use guest/ and hvmpv/ xenstore directories
1508 for i in [ 'device', 'control', 'error', 'memory', 'guest', 'hvmpv' ]:
1509 t.mkdir(i)
1510 t.set_permissions(i, {'dom' : self.domid})
1512 def _storeDomDetails(self):
1513 to_store = {
1514 'domid': str(self.domid),
1515 'vm': self.vmpath,
1516 'name': self.info['name_label'],
1517 'console/limit': str(xoptions.get_console_limit() * 1024),
1518 'memory/target': str(self.info['memory_dynamic_max'] / 1024),
1521 def f(n, v):
1522 if v is not None:
1523 if type(v) == bool:
1524 to_store[n] = v and "1" or "0"
1525 else:
1526 to_store[n] = str(v)
1528 # Figure out if we need to tell xenconsoled to ignore this guest's
1529 # console - device model will handle console if it is running
1530 constype = "ioemu"
1531 if 'device_model' not in self.info['platform']:
1532 constype = "xenconsoled"
1534 f('console/port', self.console_port)
1535 f('console/ring-ref', self.console_mfn)
1536 f('console/type', constype)
1537 f('store/port', self.store_port)
1538 f('store/ring-ref', self.store_mfn)
1540 if arch.type == "x86":
1541 f('control/platform-feature-multiprocessor-suspend', True)
1543 # elfnotes
1544 for n, v in self.info.get_notes().iteritems():
1545 n = n.lower().replace('_', '-')
1546 if n == 'features':
1547 for v in v.split('|'):
1548 v = v.replace('_', '-')
1549 if v.startswith('!'):
1550 f('image/%s/%s' % (n, v[1:]), False)
1551 else:
1552 f('image/%s/%s' % (n, v), True)
1553 else:
1554 f('image/%s' % n, v)
1556 if self.info.has_key('security_label'):
1557 f('security_label', self.info['security_label'])
1559 to_store.update(self._vcpuDomDetails())
1561 log.debug("Storing domain details: %s", scrub_password(to_store))
1563 self._writeDom(to_store)
1565 def _vcpuDomDetails(self):
1566 def availability(n):
1567 if self.info['vcpu_avail'] & (1 << n):
1568 return 'online'
1569 else:
1570 return 'offline'
1572 result = {}
1573 for v in range(0, self.info['VCPUs_max']):
1574 result["cpu/%d/availability" % v] = availability(v)
1575 return result
1578 # xenstore watches
1581 def _registerWatches(self):
1582 """Register a watch on this VM's entries in the store, and the
1583 domain's control/shutdown node, so that when they are changed
1584 externally, we keep up to date. This should only be called by {@link
1585 #create}, {@link #recreate}, or {@link #restore}, once the domain's
1586 details have been written, but before the new instance is returned."""
1587 self.vmWatch = xswatch(self.vmpath, self._storeChanged)
1588 self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
1589 self._handleShutdownWatch)
1591 def _storeChanged(self, _):
1592 log.trace("XendDomainInfo.storeChanged");
1594 changed = False
1596 # Check whether values in the configuration have
1597 # changed in Xenstore.
1599 cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
1600 'rtc/timeoffset']
1602 vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
1603 for k in cfg_vm])
1605 # convert two lists into a python dictionary
1606 vm_details = dict(zip(cfg_vm, vm_details))
1608 for arg, val in vm_details.items():
1609 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1610 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1611 if val != None and val != self.info[xapiarg]:
1612 self.info[xapiarg] = val
1613 changed = True
1614 elif arg == "memory":
1615 if val != None and val != self.info["static_memory_min"]:
1616 self.info["static_memory_min"] = val
1617 changed = True
1618 elif arg == "maxmem":
1619 if val != None and val != self.info["static_memory_max"]:
1620 self.info["static_memory_max"] = val
1621 changed = True
1623 # Check whether image definition has been updated
1624 image_sxp = self._readVm('image')
1625 if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
1626 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1627 changed = True
1629 # Update the rtc_timeoffset to be preserved across reboot.
1630 # NB. No need to update xenstore domain section.
1631 val = int(vm_details.get("rtc/timeoffset", 0))
1632 self.info["platform"]["rtc_timeoffset"] = val
1634 if changed:
1635 # Update the domain section of the store, as this contains some
1636 # parameters derived from the VM configuration.
1637 self._storeDomDetails()
1639 return 1
1641 def _handleShutdownWatch(self, _):
1642 log.debug('XendDomainInfo.handleShutdownWatch')
1644 reason = self.readDom('control/shutdown')
1646 if reason and reason != 'suspend':
1647 sst = self.readDom('xend/shutdown_start_time')
1648 now = time.time()
1649 if sst:
1650 self.shutdownStartTime = float(sst)
1651 timeout = float(sst) + SHUTDOWN_TIMEOUT - now
1652 else:
1653 self.shutdownStartTime = now
1654 self.storeDom('xend/shutdown_start_time', now)
1655 timeout = SHUTDOWN_TIMEOUT
1657 log.trace(
1658 "Scheduling refreshShutdown on domain %d in %ds.",
1659 self.domid, timeout)
1660 threading.Timer(timeout, self.refreshShutdown).start()
1662 return True
1666 # Public Attributes for the VM
1670 def getDomid(self):
1671 return self.domid
1673 def setName(self, name, to_store = True):
1674 self._checkName(name)
1675 self.info['name_label'] = name
1676 if to_store:
1677 self.storeVm("name", name)
1679 def getName(self):
1680 return self.info['name_label']
1682 def getDomainPath(self):
1683 return self.dompath
1685 def getShutdownReason(self):
1686 return self.readDom('control/shutdown')
1688 def getStorePort(self):
1689 """For use only by image.py and XendCheckpoint.py."""
1690 return self.store_port
1692 def getConsolePort(self):
1693 """For use only by image.py and XendCheckpoint.py"""
1694 return self.console_port
1696 def getFeatures(self):
1697 """For use only by image.py."""
1698 return self.info['features']
1700 def getVCpuCount(self):
1701 return self.info['VCPUs_max']
1703 def setVCpuCount(self, vcpus):
1704 def vcpus_valid(n):
1705 if vcpus <= 0:
1706 raise XendError('Zero or less VCPUs is invalid')
1707 if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
1708 raise XendError('Cannot set vcpus greater than max vcpus on running domain')
1709 vcpus_valid(vcpus)
1711 self.info['vcpu_avail'] = (1 << vcpus) - 1
1712 if self.domid >= 0:
1713 self.storeVm('vcpu_avail', self.info['vcpu_avail'])
1714 self._writeDom(self._vcpuDomDetails())
1715 self.info['VCPUs_live'] = vcpus
1716 else:
1717 if self.info['VCPUs_max'] > vcpus:
1718 # decreasing
1719 del self.info['cpus'][vcpus:]
1720 elif self.info['VCPUs_max'] < vcpus:
1721 # increasing
1722 for c in range(self.info['VCPUs_max'], vcpus):
1723 self.info['cpus'].append(list())
1724 self.info['VCPUs_max'] = vcpus
1725 xen.xend.XendDomain.instance().managed_config_save(self)
1726 log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
1727 vcpus)
1729 def getMemoryTarget(self):
1730 """Get this domain's target memory size, in KB."""
1731 return self.info['memory_dynamic_max'] / 1024
1733 def getMemoryMaximum(self):
1734 """Get this domain's maximum memory size, in KB."""
1735 # remember, info now stores memory in bytes
1736 return self.info['memory_static_max'] / 1024
1738 def getResume(self):
1739 return str(self._resume)
1741 def setResume(self, isresume):
1742 self._resume = isresume
1744 def getCpus(self):
1745 return self.info['cpus']
1747 def setCpus(self, cpumap):
1748 self.info['cpus'] = cpumap
1750 def getCap(self):
1751 return self.info['vcpus_params']['cap']
1753 def setCap(self, cpu_cap):
1754 self.info['vcpus_params']['cap'] = cpu_cap
1756 def getWeight(self):
1757 return self.info['vcpus_params']['weight']
1759 def setWeight(self, cpu_weight):
1760 self.info['vcpus_params']['weight'] = cpu_weight
1762 def getRestartCount(self):
1763 return self._readVm('xend/restart_count')
1765 def refreshShutdown(self, xeninfo = None):
1766 """ Checks the domain for whether a shutdown is required.
1768 Called from XendDomainInfo and also image.py for HVM images.
1769 """
1771 # If set at the end of this method, a restart is required, with the
1772 # given reason. This restart has to be done out of the scope of
1773 # refresh_shutdown_lock.
1774 restart_reason = None
1776 self.refresh_shutdown_lock.acquire()
1777 try:
1778 if xeninfo is None:
1779 xeninfo = dom_get(self.domid)
1780 if xeninfo is None:
1781 # The domain no longer exists. This will occur if we have
1782 # scheduled a timer to check for shutdown timeouts and the
1783 # shutdown succeeded. It will also occur if someone
1784 # destroys a domain beneath us. We clean up the domain,
1785 # just in case, but we can't clean up the VM, because that
1786 # VM may have migrated to a different domain on this
1787 # machine.
1788 self.cleanupDomain()
1789 self._stateSet(DOM_STATE_HALTED)
1790 return
1792 if xeninfo['dying']:
1793 # Dying means that a domain has been destroyed, but has not
1794 # yet been cleaned up by Xen. This state could persist
1795 # indefinitely if, for example, another domain has some of its
1796 # pages mapped. We might like to diagnose this problem in the
1797 # future, but for now all we do is make sure that it's not us
1798 # holding the pages, by calling cleanupDomain. We can't
1799 # clean up the VM, as above.
1800 self.cleanupDomain()
1801 self._stateSet(DOM_STATE_SHUTDOWN)
1802 return
1804 elif xeninfo['crashed']:
1805 if self.readDom('xend/shutdown_completed'):
1806 # We've seen this shutdown already, but we are preserving
1807 # the domain for debugging. Leave it alone.
1808 return
1810 log.warn('Domain has crashed: name=%s id=%d.',
1811 self.info['name_label'], self.domid)
1812 self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
1814 restart_reason = 'crash'
1815 self._stateSet(DOM_STATE_HALTED)
1817 elif xeninfo['shutdown']:
1818 self._stateSet(DOM_STATE_SHUTDOWN)
1819 if self.readDom('xend/shutdown_completed'):
1820 # We've seen this shutdown already, but we are preserving
1821 # the domain for debugging. Leave it alone.
1822 return
1824 else:
1825 reason = shutdown_reason(xeninfo['shutdown_reason'])
1827 log.info('Domain has shutdown: name=%s id=%d reason=%s.',
1828 self.info['name_label'], self.domid, reason)
1829 self._writeVm(LAST_SHUTDOWN_REASON, reason)
1831 self._clearRestart()
1833 if reason == 'suspend':
1834 self._stateSet(DOM_STATE_SUSPENDED)
1835 # Don't destroy the domain. XendCheckpoint will do
1836 # this once it has finished. However, stop watching
1837 # the VM path now, otherwise we will end up with one
1838 # watch for the old domain, and one for the new.
1839 self._unwatchVm()
1840 elif reason in ('poweroff', 'reboot'):
1841 restart_reason = reason
1842 else:
1843 self.destroy()
1845 elif self.dompath is None:
1846 # We have yet to manage to call introduceDomain on this
1847 # domain. This can happen if a restore is in progress, or has
1848 # failed. Ignore this domain.
1849 pass
1850 else:
1851 # Domain is alive. If we are shutting it down, log a message
1852 # if it seems unresponsive.
1853 if xeninfo['paused']:
1854 self._stateSet(DOM_STATE_PAUSED)
1855 else:
1856 self._stateSet(DOM_STATE_RUNNING)
1858 if self.shutdownStartTime:
1859 timeout = (SHUTDOWN_TIMEOUT - time.time() +
1860 self.shutdownStartTime)
1861 if (timeout < 0 and not self.readDom('xend/unresponsive')):
1862 log.info(
1863 "Domain shutdown timeout expired: name=%s id=%s",
1864 self.info['name_label'], self.domid)
1865 self.storeDom('xend/unresponsive', 'True')
1866 finally:
1867 self.refresh_shutdown_lock.release()
1869 if restart_reason:
1870 threading.Thread(target = self._maybeRestart,
1871 args = (restart_reason,)).start()
1875 # Restart functions - handling whether we come back up on shutdown.
1878 def _clearRestart(self):
1879 self._removeDom("xend/shutdown_start_time")
1881 def _maybeDumpCore(self, reason):
1882 if reason == 'crash':
1883 if xoptions.get_enable_dump() or self.get_on_crash() \
1884 in ['coredump_and_destroy', 'coredump_and_restart']:
1885 try:
1886 self.dumpCore()
1887 except XendError:
1888 # This error has been logged -- there's nothing more
1889 # we can do in this context.
1890 pass
1892 def _maybeRestart(self, reason):
1893 # Before taking configured action, dump core if configured to do so.
1895 self._maybeDumpCore(reason)
1897 # Dispatch to the correct method based upon the configured on_{reason}
1898 # behaviour.
1899 actions = {"destroy" : self.destroy,
1900 "restart" : self._restart,
1901 "preserve" : self._preserve,
1902 "rename-restart" : self._renameRestart,
1903 "coredump-destroy" : self.destroy,
1904 "coredump-restart" : self._restart}
1906 action_conf = {
1907 'poweroff': 'actions_after_shutdown',
1908 'reboot': 'actions_after_reboot',
1909 'crash': 'actions_after_crash',
1912 action_target = self.info.get(action_conf.get(reason))
1913 func = actions.get(action_target, None)
1914 if func and callable(func):
1915 func()
1916 else:
1917 self.destroy() # default to destroy
1919 def _renameRestart(self):
1920 self._restart(True)
1922 def _restart(self, rename = False):
1923 """Restart the domain after it has exited.
1925 @param rename True if the old domain is to be renamed and preserved,
1926 False if it is to be destroyed.
1927 """
1928 from xen.xend import XendDomain
1930 if self._readVm(RESTART_IN_PROGRESS):
1931 log.error('Xend failed during restart of domain %s. '
1932 'Refusing to restart to avoid loops.',
1933 str(self.domid))
1934 self.destroy()
1935 return
1937 old_domid = self.domid
1938 self._writeVm(RESTART_IN_PROGRESS, 'True')
1940 now = time.time()
1941 rst = self._readVm('xend/previous_restart_time')
1942 if rst:
1943 rst = float(rst)
1944 timeout = now - rst
1945 if timeout < MINIMUM_RESTART_TIME:
1946 log.error(
1947 'VM %s restarting too fast (%f seconds since the last '
1948 'restart). Refusing to restart to avoid loops.',
1949 self.info['name_label'], timeout)
1950 self.destroy()
1951 return
1953 self._writeVm('xend/previous_restart_time', str(now))
1955 prev_vm_xend = self._listRecursiveVm('xend')
1956 new_dom_info = self.info
1957 try:
1958 if rename:
1959 new_dom_info = self._preserveForRestart()
1960 else:
1961 self._unwatchVm()
1962 self.destroy()
1964 # new_dom's VM will be the same as this domain's VM, except where
1965 # the rename flag has instructed us to call preserveForRestart.
1966 # In that case, it is important that we remove the
1967 # RESTART_IN_PROGRESS node from the new domain, not the old one,
1968 # once the new one is available.
1970 new_dom = None
1971 try:
1972 new_dom = XendDomain.instance().domain_create_from_dict(
1973 new_dom_info)
1974 for x in prev_vm_xend[0][1]:
1975 new_dom._writeVm('xend/%s' % x[0], x[1])
1976 new_dom.waitForDevices()
1977 new_dom.unpause()
1978 rst_cnt = new_dom._readVm('xend/restart_count')
1979 rst_cnt = int(rst_cnt) + 1
1980 new_dom._writeVm('xend/restart_count', str(rst_cnt))
1981 new_dom._removeVm(RESTART_IN_PROGRESS)
1982 except:
1983 if new_dom:
1984 new_dom._removeVm(RESTART_IN_PROGRESS)
1985 new_dom.destroy()
1986 else:
1987 self._removeVm(RESTART_IN_PROGRESS)
1988 raise
1989 except:
1990 log.exception('Failed to restart domain %s.', str(old_domid))
1992 def _preserveForRestart(self):
1993 """Preserve a domain that has been shut down, by giving it a new UUID,
1994 cloning the VM details, and giving it a new name. This allows us to
1995 keep this domain for debugging, but restart a new one in its place
1996 preserving the restart semantics (name and UUID preserved).
1997 """
1999 new_uuid = uuid.createString()
2000 new_name = 'Domain-%s' % new_uuid
2001 log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
2002 self.info['name_label'], self.domid, self.info['uuid'],
2003 new_name, new_uuid)
2004 self._unwatchVm()
2005 self._releaseDevices()
2006 # Remove existing vm node in xenstore
2007 self._removeVm()
2008 new_dom_info = self.info.copy()
2009 new_dom_info['name_label'] = self.info['name_label']
2010 new_dom_info['uuid'] = self.info['uuid']
2011 self.info['name_label'] = new_name
2012 self.info['uuid'] = new_uuid
2013 self.vmpath = XS_VMROOT + new_uuid
2014 # Write out new vm node to xenstore
2015 self._storeVmDetails()
2016 self._preserve()
2017 return new_dom_info
2020 def _preserve(self):
2021 log.info("Preserving dead domain %s (%d).", self.info['name_label'],
2022 self.domid)
2023 self._unwatchVm()
2024 self.storeDom('xend/shutdown_completed', 'True')
2025 self._stateSet(DOM_STATE_HALTED)
2028 # Debugging ..
2031 def dumpCore(self, corefile = None):
2032 """Create a core dump for this domain.
2034 @raise: XendError if core dumping failed.
2035 """
2037 if not corefile:
2038 this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
2039 corefile = "/var/xen/dump/%s-%s.%s.core" % (this_time,
2040 self.info['name_label'], self.domid)
2042 if os.path.isdir(corefile):
2043 raise XendError("Cannot dump core in a directory: %s" %
2044 corefile)
2046 try:
2047 try:
2048 self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
2049 xc.domain_dumpcore(self.domid, corefile)
2050 except RuntimeError, ex:
2051 corefile_incomp = corefile+'-incomplete'
2052 try:
2053 os.rename(corefile, corefile_incomp)
2054 except:
2055 pass
2057 log.error("core dump failed: id = %s name = %s: %s",
2058 self.domid, self.info['name_label'], str(ex))
2059 raise XendError("Failed to dump core: %s" % str(ex))
2060 finally:
2061 self._removeVm(DUMPCORE_IN_PROGRESS)
2064 # Device creation/deletion functions
2067 def _createDevice(self, deviceClass, devConfig):
2068 return self.getDeviceController(deviceClass).createDevice(devConfig)
2070 def _waitForDevice(self, deviceClass, devid):
2071 return self.getDeviceController(deviceClass).waitForDevice(devid)
2073 def _waitForDeviceUUID(self, dev_uuid):
2074 deviceClass, config = self.info['devices'].get(dev_uuid)
2075 self._waitForDevice(deviceClass, config['devid'])
2077 def _waitForDevice_destroy(self, deviceClass, devid, backpath):
2078 return self.getDeviceController(deviceClass).waitForDevice_destroy(
2079 devid, backpath)
2081 def _reconfigureDevice(self, deviceClass, devid, devconfig):
2082 return self.getDeviceController(deviceClass).reconfigureDevice(
2083 devid, devconfig)
2085 def _createDevices(self):
2086 """Create the devices for a vm.
2088 @raise: VmError for invalid devices
2089 """
2090 if self.image:
2091 self.image.prepareEnvironment()
2093 vscsi_uuidlist = {}
2094 vscsi_devidlist = []
2095 ordered_refs = self.info.ordered_device_refs()
2096 for dev_uuid in ordered_refs:
2097 devclass, config = self.info['devices'][dev_uuid]
2098 if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
2099 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2100 dev_uuid = config.get('uuid')
2101 devid = self._createDevice(devclass, config)
2103 # store devid in XendConfig for caching reasons
2104 if dev_uuid in self.info['devices']:
2105 self.info['devices'][dev_uuid][1]['devid'] = devid
2107 elif devclass == 'vscsi':
2108 vscsi_config = config.get('devs', [])[0]
2109 devid = vscsi_config.get('devid', '')
2110 dev_uuid = config.get('uuid')
2111 vscsi_uuidlist[devid] = dev_uuid
2112 vscsi_devidlist.append(devid)
2114 #It is necessary to sorted it for /dev/sdxx in guest.
2115 if len(vscsi_uuidlist) > 0:
2116 vscsi_devidlist.sort()
2117 for vscsiid in vscsi_devidlist:
2118 dev_uuid = vscsi_uuidlist[vscsiid]
2119 devclass, config = self.info['devices'][dev_uuid]
2120 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2121 dev_uuid = config.get('uuid')
2122 devid = self._createDevice(devclass, config)
2123 # store devid in XendConfig for caching reasons
2124 if dev_uuid in self.info['devices']:
2125 self.info['devices'][dev_uuid][1]['devid'] = devid
2128 if self.image:
2129 self.image.createDeviceModel()
2131 #if have pass-through devs, need the virtual pci slots info from qemu
2132 self.sync_pcidev_info()
2134 def _releaseDevices(self, suspend = False):
2135 """Release all domain's devices. Nothrow guarantee."""
2136 if self.image:
2137 try:
2138 log.debug("Destroying device model")
2139 self.image.destroyDeviceModel()
2140 except Exception, e:
2141 log.exception("Device model destroy failed %s" % str(e))
2142 else:
2143 log.debug("No device model")
2145 log.debug("Releasing devices")
2146 t = xstransact("%s/device" % self.dompath)
2147 try:
2148 for devclass in XendDevices.valid_devices():
2149 for dev in t.list(devclass):
2150 try:
2151 true_devclass = devclass
2152 if devclass == 'vbd':
2153 # In the case of "vbd", the true device class
2154 # may possibly be "tap". Just in case, verify
2155 # device class.
2156 devid = dev.split('/')[-1]
2157 true_devclass = self.getBlockDeviceClass(devid)
2158 log.debug("Removing %s", dev);
2159 self.destroyDevice(true_devclass, dev, False);
2160 except:
2161 # Log and swallow any exceptions in removal --
2162 # there's nothing more we can do.
2163 log.exception("Device release failed: %s; %s; %s",
2164 self.info['name_label'],
2165 true_devclass, dev)
2166 finally:
2167 t.abort()
2169 def getDeviceController(self, name):
2170 """Get the device controller for this domain, and if it
2171 doesn't exist, create it.
2173 @param name: device class name
2174 @type name: string
2175 @rtype: subclass of DevController
2176 """
2177 if name not in self._deviceControllers:
2178 devController = XendDevices.make_controller(name, self)
2179 if not devController:
2180 raise XendError("Unknown device type: %s" % name)
2181 self._deviceControllers[name] = devController
2183 return self._deviceControllers[name]
2186 # Migration functions (public)
2189 def testMigrateDevices(self, network, dst):
2190 """ Notify all device about intention of migration
2191 @raise: XendError for a device that cannot be migrated
2192 """
2193 for (n, c) in self.info.all_devices_sxpr():
2194 rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
2195 if rc != 0:
2196 raise XendError("Device of type '%s' refuses migration." % n)
2198 def migrateDevices(self, network, dst, step, domName=''):
2199 """Notify the devices about migration
2200 """
2201 ctr = 0
2202 try:
2203 for (dev_type, dev_conf) in self.info.all_devices_sxpr():
2204 self.migrateDevice(dev_type, dev_conf, network, dst,
2205 step, domName)
2206 ctr = ctr + 1
2207 except:
2208 for dev_type, dev_conf in self.info.all_devices_sxpr():
2209 if ctr == 0:
2210 step = step - 1
2211 ctr = ctr - 1
2212 self._recoverMigrateDevice(dev_type, dev_conf, network,
2213 dst, step, domName)
2214 raise
2216 def migrateDevice(self, deviceClass, deviceConfig, network, dst,
2217 step, domName=''):
2218 return self.getDeviceController(deviceClass).migrate(deviceConfig,
2219 network, dst, step, domName)
2221 def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
2222 dst, step, domName=''):
2223 return self.getDeviceController(deviceClass).recover_migrate(
2224 deviceConfig, network, dst, step, domName)
2227 ## private:
2229 def _constructDomain(self):
2230 """Construct the domain.
2232 @raise: VmError on error
2233 """
2235 log.debug('XendDomainInfo.constructDomain')
2237 self.shutdownStartTime = None
2239 hap = 0
2240 hvm = self.info.is_hvm()
2241 if hvm:
2242 hap = self.info.is_hap()
2243 info = xc.xeninfo()
2244 if 'hvm' not in info['xen_caps']:
2245 raise VmError("HVM guest support is unavailable: is VT/AMD-V "
2246 "supported by your CPU and enabled in your "
2247 "BIOS?")
2249 # Hack to pre-reserve some memory for initial domain creation.
2250 # There is an implicit memory overhead for any domain creation. This
2251 # overhead is greater for some types of domain than others. For
2252 # example, an x86 HVM domain will have a default shadow-pagetable
2253 # allocation of 1MB. We free up 4MB here to be on the safe side.
2254 # 2MB memory allocation was not enough in some cases, so it's 4MB now
2255 balloon.free(4*1024, self) # 4MB should be plenty
2257 ssidref = 0
2258 if security.on() == xsconstants.XS_POLICY_USE:
2259 ssidref = security.calc_dom_ssidref_from_info(self.info)
2260 if security.has_authorization(ssidref) == False:
2261 raise VmError("VM is not authorized to run.")
2263 s3_integrity = 0
2264 if self.info.has_key('s3_integrity'):
2265 s3_integrity = self.info['s3_integrity']
2266 flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2)
2268 try:
2269 self.domid = xc.domain_create(
2270 domid = 0,
2271 ssidref = ssidref,
2272 handle = uuid.fromString(self.info['uuid']),
2273 flags = flags,
2274 target = self.info.target())
2275 except Exception, e:
2276 # may get here if due to ACM the operation is not permitted
2277 if security.on() == xsconstants.XS_POLICY_ACM:
2278 raise VmError('Domain in conflict set with running domain?')
2280 if self.domid < 0:
2281 raise VmError('Creating domain failed: name=%s' %
2282 self.info['name_label'])
2284 self.dompath = GetDomainPath(self.domid)
2286 self._recreateDom()
2288 # Set timer configration of domain
2289 timer_mode = self.info["platform"].get("timer_mode")
2290 if hvm and timer_mode is not None:
2291 xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
2292 long(timer_mode))
2294 # Set Viridian interface configuration of domain
2295 viridian = self.info["platform"].get("viridian")
2296 if arch.type == "x86" and hvm and viridian is not None:
2297 xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
2299 # Optionally enable virtual HPET
2300 hpet = self.info["platform"].get("hpet")
2301 if hvm and hpet is not None:
2302 xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
2303 long(hpet))
2305 # Optionally enable periodic vpt aligning
2306 vpt_align = self.info["platform"].get("vpt_align")
2307 if hvm and vpt_align is not None:
2308 xc.hvm_set_param(self.domid, HVM_PARAM_VPT_ALIGN,
2309 long(vpt_align))
2311 # Set maximum number of vcpus in domain
2312 xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
2314 # Check for cpu_{cap|weight} validity for credit scheduler
2315 if XendNode.instance().xenschedinfo() == 'credit':
2316 cap = self.getCap()
2317 weight = self.getWeight()
2319 assert type(weight) == int
2320 assert type(cap) == int
2322 if weight < 1 or weight > 65535:
2323 raise VmError("Cpu weight out of range, valid values are within range from 1 to 65535")
2325 if cap < 0 or cap > self.getVCpuCount() * 100:
2326 raise VmError("Cpu cap out of range, valid range is from 0 to %s for specified number of vcpus" %
2327 (self.getVCpuCount() * 100))
2329 # Test whether the devices can be assigned with VT-d
2330 pci = self.info["platform"].get("pci")
2331 pci_str = ''
2332 if pci and len(pci) > 0:
2333 pci = map(lambda x: x[0:4], pci) # strip options
2334 pci_str = str(pci)
2335 if hvm and pci_str:
2336 bdf = xc.test_assign_device(self.domid, pci_str)
2337 if bdf != 0:
2338 if bdf == -1:
2339 raise VmError("failed to assign device: maybe the platform"
2340 " doesn't support VT-d, or VT-d isn't enabled"
2341 " properly?")
2342 bus = (bdf >> 16) & 0xff
2343 devfn = (bdf >> 8) & 0xff
2344 dev = (devfn >> 3) & 0x1f
2345 func = devfn & 0x7
2346 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
2347 " already been assigned to other domain, or maybe"
2348 " it doesn't exist." % (bus, dev, func))
2350 # register the domain in the list
2351 from xen.xend import XendDomain
2352 XendDomain.instance().add_domain(self)
2354 def _introduceDomain(self):
2355 assert self.domid is not None
2356 assert self.store_mfn is not None
2357 assert self.store_port is not None
2359 try:
2360 IntroduceDomain(self.domid, self.store_mfn, self.store_port)
2361 except RuntimeError, exn:
2362 raise XendError(str(exn))
2364 def _setTarget(self, target):
2365 assert self.domid is not None
2367 try:
2368 SetTarget(self.domid, target)
2369 self.storeDom('target', target)
2370 except RuntimeError, exn:
2371 raise XendError(str(exn))
2374 def _setCPUAffinity(self):
2375 """ Repin domain vcpus if a restricted cpus list is provided
2376 """
2378 def has_cpus():
2379 if self.info['cpus'] is not None:
2380 for c in self.info['cpus']:
2381 if c:
2382 return True
2383 return False
2385 if has_cpus():
2386 for v in range(0, self.info['VCPUs_max']):
2387 if self.info['cpus'][v]:
2388 xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
2389 else:
2390 def find_relaxed_node(node_list):
2391 import sys
2392 nr_nodes = info['nr_nodes']
2393 if node_list is None:
2394 node_list = range(0, nr_nodes)
2395 nodeload = [0]
2396 nodeload = nodeload * nr_nodes
2397 from xen.xend import XendDomain
2398 doms = XendDomain.instance().list('all')
2399 for dom in filter (lambda d: d.domid != self.domid, doms):
2400 cpuinfo = dom.getVCPUInfo()
2401 for vcpu in sxp.children(cpuinfo, 'vcpu'):
2402 if sxp.child_value(vcpu, 'online') == 0: continue
2403 cpumap = list(sxp.child_value(vcpu,'cpumap'))
2404 for i in range(0, nr_nodes):
2405 node_cpumask = info['node_to_cpu'][i]
2406 for j in node_cpumask:
2407 if j in cpumap:
2408 nodeload[i] += 1
2409 break
2410 for i in range(0, nr_nodes):
2411 if len(info['node_to_cpu'][i]) > 0 and i in node_list:
2412 nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
2413 else:
2414 nodeload[i] = sys.maxint
2415 index = nodeload.index( min(nodeload) )
2416 return index
2418 info = xc.physinfo()
2419 if info['nr_nodes'] > 1:
2420 node_memory_list = info['node_to_memory']
2421 needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
2422 candidate_node_list = []
2423 for i in range(0, info['nr_nodes']):
2424 if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
2425 candidate_node_list.append(i)
2426 index = find_relaxed_node(candidate_node_list)
2427 cpumask = info['node_to_cpu'][index]
2428 for v in range(0, self.info['VCPUs_max']):
2429 xc.vcpu_setaffinity(self.domid, v, cpumask)
2432 def _initDomain(self):
2433 log.debug('XendDomainInfo.initDomain: %s %s',
2434 self.domid,
2435 self.info['vcpus_params']['weight'])
2437 self._configureBootloader()
2439 try:
2440 self.image = image.create(self, self.info)
2442 # repin domain vcpus if a restricted cpus list is provided
2443 # this is done prior to memory allocation to aide in memory
2444 # distribution for NUMA systems.
2445 self._setCPUAffinity()
2447 # Use architecture- and image-specific calculations to determine
2448 # the various headrooms necessary, given the raw configured
2449 # values. maxmem, memory, and shadow are all in KiB.
2450 # but memory_static_max etc are all stored in bytes now.
2451 memory = self.image.getRequiredAvailableMemory(
2452 self.info['memory_dynamic_max'] / 1024)
2453 maxmem = self.image.getRequiredAvailableMemory(
2454 self.info['memory_static_max'] / 1024)
2455 shadow = self.image.getRequiredShadowMemory(
2456 self.info['shadow_memory'] * 1024,
2457 self.info['memory_static_max'] / 1024)
2459 log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
2460 # Round shadow up to a multiple of a MiB, as shadow_mem_control
2461 # takes MiB and we must not round down and end up under-providing.
2462 shadow = ((shadow + 1023) / 1024) * 1024
2464 # set memory limit
2465 xc.domain_setmaxmem(self.domid, maxmem)
2467 # Reserve 1 page per MiB of RAM for separate VT-d page table.
2468 vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
2469 # Round vtd_mem up to a multiple of a MiB.
2470 vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
2472 # Make sure there's enough RAM available for the domain
2473 balloon.free(memory + shadow + vtd_mem, self)
2475 # Set up the shadow memory
2476 shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
2477 self.info['shadow_memory'] = shadow_cur
2479 # machine address size
2480 if self.info.has_key('machine_address_size'):
2481 log.debug("_initDomain: setting maximum machine address size %d" % self.info['machine_address_size'])
2482 xc.domain_set_machine_address_size(self.domid, self.info['machine_address_size'])
2484 if self.info.has_key('suppress_spurious_page_faults') and self.info['suppress_spurious_page_faults']:
2485 log.debug("_initDomain: suppressing spurious page faults")
2486 xc.domain_suppress_spurious_page_faults(self.domid)
2488 self._createChannels()
2490 channel_details = self.image.createImage()
2492 self.store_mfn = channel_details['store_mfn']
2493 if 'console_mfn' in channel_details:
2494 self.console_mfn = channel_details['console_mfn']
2495 if 'notes' in channel_details:
2496 self.info.set_notes(channel_details['notes'])
2497 if 'native_protocol' in channel_details:
2498 self.native_protocol = channel_details['native_protocol'];
2500 self._introduceDomain()
2501 if self.info.target():
2502 self._setTarget(self.info.target())
2504 self._createDevices()
2506 self.image.cleanupBootloading()
2508 self.info['start_time'] = time.time()
2510 self._stateSet(DOM_STATE_RUNNING)
2511 except VmError, exn:
2512 log.exception("XendDomainInfo.initDomain: exception occurred")
2513 if self.image:
2514 self.image.cleanupBootloading()
2515 raise exn
2516 except RuntimeError, exn:
2517 log.exception("XendDomainInfo.initDomain: exception occurred")
2518 if self.image:
2519 self.image.cleanupBootloading()
2520 raise VmError(str(exn))
2523 def cleanupDomain(self):
2524 """Cleanup domain resources; release devices. Idempotent. Nothrow
2525 guarantee."""
2527 self.refresh_shutdown_lock.acquire()
2528 try:
2529 self.unwatchShutdown()
2530 self._releaseDevices()
2531 bootloader_tidy(self)
2533 if self.image:
2534 self.image = None
2536 try:
2537 self._removeDom()
2538 except:
2539 log.exception("Removing domain path failed.")
2541 self._stateSet(DOM_STATE_HALTED)
2542 self.domid = None # Do not push into _stateSet()!
2543 finally:
2544 self.refresh_shutdown_lock.release()
2547 def unwatchShutdown(self):
2548 """Remove the watch on the domain's control/shutdown node, if any.
2549 Idempotent. Nothrow guarantee. Expects to be protected by the
2550 refresh_shutdown_lock."""
2552 try:
2553 try:
2554 if self.shutdownWatch:
2555 self.shutdownWatch.unwatch()
2556 finally:
2557 self.shutdownWatch = None
2558 except:
2559 log.exception("Unwatching control/shutdown failed.")
2561 def waitForShutdown(self):
2562 self.state_updated.acquire()
2563 try:
2564 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2565 self.state_updated.wait(timeout=1.0)
2566 finally:
2567 self.state_updated.release()
2569 def waitForSuspend(self):
2570 """Wait for the guest to respond to a suspend request by
2571 shutting down. If the guest hasn't re-written control/shutdown
2572 after a certain amount of time, it's obviously not listening and
2573 won't suspend, so we give up. HVM guests with no PV drivers
2574 should already be shutdown.
2575 """
2576 state = "suspend"
2577 nr_tries = 60
2579 self.state_updated.acquire()
2580 try:
2581 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2582 self.state_updated.wait(1.0)
2583 if state == "suspend":
2584 if nr_tries == 0:
2585 msg = ('Timeout waiting for domain %s to suspend'
2586 % self.domid)
2587 self._writeDom('control/shutdown', '')
2588 raise XendError(msg)
2589 state = self.readDom('control/shutdown')
2590 nr_tries -= 1
2591 finally:
2592 self.state_updated.release()
2595 # TODO: recategorise - called from XendCheckpoint
2598 def completeRestore(self, store_mfn, console_mfn):
2600 log.debug("XendDomainInfo.completeRestore")
2602 self.store_mfn = store_mfn
2603 self.console_mfn = console_mfn
2605 self._introduceDomain()
2606 self.image = image.create(self, self.info)
2607 if self.image:
2608 self.image.createDeviceModel(True)
2609 self._storeDomDetails()
2610 self._registerWatches()
2611 self.refreshShutdown()
2613 log.debug("XendDomainInfo.completeRestore done")
2616 def _endRestore(self):
2617 self.setResume(False)
2620 # VM Destroy
2623 def _prepare_phantom_paths(self):
2624 # get associated devices to destroy
2625 # build list of phantom devices to be removed after normal devices
2626 plist = []
2627 if self.domid is not None:
2628 t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
2629 try:
2630 for dev in t.list():
2631 backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
2632 % (self.dompath, dev))
2633 if backend_phantom_vbd is not None:
2634 frontend_phantom_vbd = xstransact.Read("%s/frontend" \
2635 % backend_phantom_vbd)
2636 plist.append(backend_phantom_vbd)
2637 plist.append(frontend_phantom_vbd)
2638 finally:
2639 t.abort()
2640 return plist
2642 def _cleanup_phantom_devs(self, plist):
2643 # remove phantom devices
2644 if not plist == []:
2645 time.sleep(2)
2646 for paths in plist:
2647 if paths.find('backend') != -1:
2648 # Modify online status /before/ updating state (latter is watched by
2649 # drivers, so this ordering avoids a race).
2650 xstransact.Write(paths, 'online', "0")
2651 xstransact.Write(paths, 'state', str(xenbusState['Closing']))
2652 # force
2653 xstransact.Remove(paths)
2655 def destroy(self):
2656 """Cleanup VM and destroy domain. Nothrow guarantee."""
2658 if self.domid is None:
2659 return
2661 from xen.xend import XendDomain
2662 log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
2664 paths = self._prepare_phantom_paths()
2666 if self.dompath is not None:
2667 try:
2668 xc.domain_destroy_hook(self.domid)
2669 xc.domain_pause(self.domid)
2670 do_FLR(self.domid)
2671 xc.domain_destroy(self.domid)
2672 for state in DOM_STATES_OLD:
2673 self.info[state] = 0
2674 self._stateSet(DOM_STATE_HALTED)
2675 except:
2676 log.exception("XendDomainInfo.destroy: domain destruction failed.")
2678 XendDomain.instance().remove_domain(self)
2679 self.cleanupDomain()
2681 self._cleanup_phantom_devs(paths)
2682 self._cleanupVm()
2684 if "transient" in self.info["other_config"] \
2685 and bool(self.info["other_config"]["transient"]):
2686 XendDomain.instance().domain_delete_by_dominfo(self)
2689 def resetDomain(self):
2690 log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
2692 old_domid = self.domid
2693 prev_vm_xend = self._listRecursiveVm('xend')
2694 new_dom_info = self.info
2695 try:
2696 self._unwatchVm()
2697 self.destroy()
2699 new_dom = None
2700 try:
2701 from xen.xend import XendDomain
2702 new_dom_info['domid'] = None
2703 new_dom = XendDomain.instance().domain_create_from_dict(
2704 new_dom_info)
2705 for x in prev_vm_xend[0][1]:
2706 new_dom._writeVm('xend/%s' % x[0], x[1])
2707 new_dom.waitForDevices()
2708 new_dom.unpause()
2709 except:
2710 if new_dom:
2711 new_dom.destroy()
2712 raise
2713 except:
2714 log.exception('Failed to reset domain %s.', str(old_domid))
2717 def resumeDomain(self):
2718 log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
2720 # resume a suspended domain (e.g. after live checkpoint, or after
2721 # a later error during save or migate); checks that the domain
2722 # is currently suspended first so safe to call from anywhere
2724 xeninfo = dom_get(self.domid)
2725 if xeninfo is None:
2726 return
2727 if not xeninfo['shutdown']:
2728 return
2729 reason = shutdown_reason(xeninfo['shutdown_reason'])
2730 if reason != 'suspend':
2731 return
2733 try:
2734 # could also fetch a parsed note from xenstore
2735 fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
2736 if not fast:
2737 self._releaseDevices()
2738 self.testDeviceComplete()
2739 self.testvifsComplete()
2740 log.debug("XendDomainInfo.resumeDomain: devices released")
2742 self._resetChannels()
2744 self._removeDom('control/shutdown')
2745 self._removeDom('device-misc/vif/nextDeviceID')
2747 self._createChannels()
2748 self._introduceDomain()
2749 self._storeDomDetails()
2751 self._createDevices()
2752 log.debug("XendDomainInfo.resumeDomain: devices created")
2754 xc.domain_resume(self.domid, fast)
2755 ResumeDomain(self.domid)
2756 except:
2757 log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
2758 self.image.resumeDeviceModel()
2759 log.debug("XendDomainInfo.resumeDomain: completed")
2763 # Channels for xenstore and console
2766 def _createChannels(self):
2767 """Create the channels to the domain.
2768 """
2769 self.store_port = self._createChannel()
2770 self.console_port = self._createChannel()
2773 def _createChannel(self):
2774 """Create an event channel to the domain.
2775 """
2776 try:
2777 if self.domid != None:
2778 return xc.evtchn_alloc_unbound(domid = self.domid,
2779 remote_dom = 0)
2780 except:
2781 log.exception("Exception in alloc_unbound(%s)", str(self.domid))
2782 raise
2784 def _resetChannels(self):
2785 """Reset all event channels in the domain.
2786 """
2787 try:
2788 if self.domid != None:
2789 return xc.evtchn_reset(dom = self.domid)
2790 except:
2791 log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
2792 raise
2796 # Bootloader configuration
2799 def _configureBootloader(self):
2800 """Run the bootloader if we're configured to do so."""
2802 blexec = self.info['PV_bootloader']
2803 bootloader_args = self.info['PV_bootloader_args']
2804 kernel = self.info['PV_kernel']
2805 ramdisk = self.info['PV_ramdisk']
2806 args = self.info['PV_args']
2807 boot = self.info['HVM_boot_policy']
2809 if boot:
2810 # HVM booting.
2811 pass
2812 elif not blexec and kernel:
2813 # Boot from dom0. Nothing left to do -- the kernel and ramdisk
2814 # will be picked up by image.py.
2815 pass
2816 else:
2817 # Boot using bootloader
2818 if not blexec or blexec == 'pygrub':
2819 blexec = osdep.pygrub_path
2821 blcfg = None
2822 disks = [x for x in self.info['vbd_refs']
2823 if self.info['devices'][x][1]['bootable']]
2825 if not disks:
2826 msg = "Had a bootloader specified, but no disks are bootable"
2827 log.error(msg)
2828 raise VmError(msg)
2830 devinfo = self.info['devices'][disks[0]]
2831 devtype = devinfo[0]
2832 disk = devinfo[1]['uname']
2834 fn = blkdev_uname_to_file(disk)
2835 taptype = blkdev_uname_to_taptype(disk)
2836 mounted = devtype == 'tap' and taptype != 'aio' and taptype != 'sync' and not os.stat(fn).st_rdev
2837 if mounted:
2838 # This is a file, not a device. pygrub can cope with a
2839 # file if it's raw, but if it's QCOW or other such formats
2840 # used through blktap, then we need to mount it first.
2842 log.info("Mounting %s on %s." %
2843 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2845 vbd = {
2846 'mode': 'RO',
2847 'device': BOOTLOADER_LOOPBACK_DEVICE,
2850 from xen.xend import XendDomain
2851 dom0 = XendDomain.instance().privilegedDomain()
2852 dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
2853 fn = BOOTLOADER_LOOPBACK_DEVICE
2855 try:
2856 blcfg = bootloader(blexec, fn, self, False,
2857 bootloader_args, kernel, ramdisk, args)
2858 finally:
2859 if mounted:
2860 log.info("Unmounting %s from %s." %
2861 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2863 dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
2865 if blcfg is None:
2866 msg = "Had a bootloader specified, but can't find disk"
2867 log.error(msg)
2868 raise VmError(msg)
2870 self.info.update_with_image_sxp(blcfg, True)
2874 # VM Functions
2877 def _readVMDetails(self, params):
2878 """Read the specified parameters from the store.
2879 """
2880 try:
2881 return self._gatherVm(*params)
2882 except ValueError:
2883 # One of the int/float entries in params has a corresponding store
2884 # entry that is invalid. We recover, because older versions of
2885 # Xend may have put the entry there (memory/target, for example),
2886 # but this is in general a bad situation to have reached.
2887 log.exception(
2888 "Store corrupted at %s! Domain %d's configuration may be "
2889 "affected.", self.vmpath, self.domid)
2890 return []
2892 def _cleanupVm(self):
2893 """Cleanup VM resources. Idempotent. Nothrow guarantee."""
2895 self._unwatchVm()
2897 try:
2898 self._removeVm()
2899 except:
2900 log.exception("Removing VM path failed.")
2903 def checkLiveMigrateMemory(self):
2904 """ Make sure there's enough memory to migrate this domain """
2905 overhead_kb = 0
2906 if arch.type == "x86":
2907 # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
2908 # the minimum that Xen would allocate if no value were given.
2909 overhead_kb = self.info['VCPUs_max'] * 1024 + \
2910 (self.info['memory_static_max'] / 1024 / 1024) * 4
2911 overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
2912 # The domain might already have some shadow memory
2913 overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
2914 if overhead_kb > 0:
2915 balloon.free(overhead_kb, self)
2917 def _unwatchVm(self):
2918 """Remove the watch on the VM path, if any. Idempotent. Nothrow
2919 guarantee."""
2920 try:
2921 try:
2922 if self.vmWatch:
2923 self.vmWatch.unwatch()
2924 finally:
2925 self.vmWatch = None
2926 except:
2927 log.exception("Unwatching VM path failed.")
2929 def testDeviceComplete(self):
2930 """ For Block IO migration safety we must ensure that
2931 the device has shutdown correctly, i.e. all blocks are
2932 flushed to disk
2933 """
2934 start = time.time()
2935 while True:
2936 test = 0
2937 diff = time.time() - start
2938 vbds = self.getDeviceController('vbd').deviceIDs()
2939 taps = self.getDeviceController('tap').deviceIDs()
2940 for i in vbds + taps:
2941 test = 1
2942 log.info("Dev %s still active, looping...", i)
2943 time.sleep(0.1)
2945 if test == 0:
2946 break
2947 if diff >= MIGRATE_TIMEOUT:
2948 log.info("Dev still active but hit max loop timeout")
2949 break
2951 def testvifsComplete(self):
2952 """ In case vifs are released and then created for the same
2953 domain, we need to wait the device shut down.
2954 """
2955 start = time.time()
2956 while True:
2957 test = 0
2958 diff = time.time() - start
2959 for i in self.getDeviceController('vif').deviceIDs():
2960 test = 1
2961 log.info("Dev %s still active, looping...", i)
2962 time.sleep(0.1)
2964 if test == 0:
2965 break
2966 if diff >= MIGRATE_TIMEOUT:
2967 log.info("Dev still active but hit max loop timeout")
2968 break
2970 def _storeVmDetails(self):
2971 to_store = {}
2973 for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
2974 info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
2975 if self._infoIsSet(info_key):
2976 to_store[key] = str(self.info[info_key])
2978 if self._infoIsSet("static_memory_min"):
2979 to_store["memory"] = str(self.info["static_memory_min"])
2980 if self._infoIsSet("static_memory_max"):
2981 to_store["maxmem"] = str(self.info["static_memory_max"])
2983 image_sxpr = self.info.image_sxpr()
2984 if image_sxpr:
2985 to_store['image'] = sxp.to_string(image_sxpr)
2987 if not self._readVm('xend/restart_count'):
2988 to_store['xend/restart_count'] = str(0)
2990 log.debug("Storing VM details: %s", scrub_password(to_store))
2992 self._writeVm(to_store)
2993 self._setVmPermissions()
2995 def _setVmPermissions(self):
2996 """Allow the guest domain to read its UUID. We don't allow it to
2997 access any other entry, for security."""
2998 xstransact.SetPermissions('%s/uuid' % self.vmpath,
2999 { 'dom' : self.domid,
3000 'read' : True,
3001 'write' : False })
3004 # Utility functions
3007 def __getattr__(self, name):
3008 if name == "state":
3009 log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
3010 log.warn("".join(traceback.format_stack()))
3011 return self._stateGet()
3012 else:
3013 raise AttributeError(name)
3015 def __setattr__(self, name, value):
3016 if name == "state":
3017 log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
3018 log.warn("".join(traceback.format_stack()))
3019 self._stateSet(value)
3020 else:
3021 self.__dict__[name] = value
3023 def _stateSet(self, state):
3024 self.state_updated.acquire()
3025 try:
3026 # TODO Not sure this is correct...
3027 # _stateGet is live now. Why not fire event
3028 # even when it hasn't changed?
3029 if self._stateGet() != state:
3030 self.state_updated.notifyAll()
3031 import XendAPI
3032 XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
3033 'power_state')
3034 finally:
3035 self.state_updated.release()
3037 def _stateGet(self):
3038 # Lets try and reconsitute the state from xc
3039 # first lets try and get the domain info
3040 # from xc - this will tell us if the domain
3041 # exists
3042 info = dom_get(self.getDomid())
3043 if info is None or info['shutdown']:
3044 # We are either HALTED or SUSPENDED
3045 # check saved image exists
3046 from xen.xend import XendDomain
3047 managed_config_path = \
3048 XendDomain.instance()._managed_check_point_path( \
3049 self.get_uuid())
3050 if os.path.exists(managed_config_path):
3051 return XEN_API_VM_POWER_STATE_SUSPENDED
3052 else:
3053 return XEN_API_VM_POWER_STATE_HALTED
3054 elif info['crashed']:
3055 # Crashed
3056 return XEN_API_VM_POWER_STATE_CRASHED
3057 else:
3058 # We are either RUNNING or PAUSED
3059 if info['paused']:
3060 return XEN_API_VM_POWER_STATE_PAUSED
3061 else:
3062 return XEN_API_VM_POWER_STATE_RUNNING
3064 def _infoIsSet(self, name):
3065 return name in self.info and self.info[name] is not None
3067 def _checkName(self, name):
3068 """Check if a vm name is valid. Valid names contain alphabetic
3069 characters, digits, or characters in '_-.:/+'.
3070 The same name cannot be used for more than one vm at the same time.
3072 @param name: name
3073 @raise: VmError if invalid
3074 """
3075 from xen.xend import XendDomain
3077 if name is None or name == '':
3078 raise VmError('Missing VM Name')
3080 if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name):
3081 raise VmError('Invalid VM Name')
3083 dom = XendDomain.instance().domain_lookup_nr(name)
3084 if dom and dom.info['uuid'] != self.info['uuid']:
3085 raise VmError("VM name '%s' already exists%s" %
3086 (name,
3087 dom.domid is not None and
3088 (" as domain %s" % str(dom.domid)) or ""))
3091 def update(self, info = None, refresh = True, transaction = None):
3092 """Update with info from xc.domain_getinfo().
3093 """
3094 log.trace("XendDomainInfo.update(%s) on domain %s", info,
3095 str(self.domid))
3097 if not info:
3098 info = dom_get(self.domid)
3099 if not info:
3100 return
3102 if info["maxmem_kb"] < 0:
3103 info["maxmem_kb"] = XendNode.instance() \
3104 .physinfo_dict()['total_memory'] * 1024
3106 # make sure state is reset for info
3107 # TODO: we should eventually get rid of old_dom_states
3109 self.info.update_config(info)
3110 self._update_consoles(transaction)
3112 if refresh:
3113 self.refreshShutdown(info)
3115 log.trace("XendDomainInfo.update done on domain %s: %s",
3116 str(self.domid), self.info)
3118 def sxpr(self, ignore_store = False, legacy_only = True):
3119 result = self.info.to_sxp(domain = self,
3120 ignore_devices = ignore_store,
3121 legacy_only = legacy_only)
3123 return result
3125 # Xen API
3126 # ----------------------------------------------------------------
3128 def get_uuid(self):
3129 dom_uuid = self.info.get('uuid')
3130 if not dom_uuid: # if it doesn't exist, make one up
3131 dom_uuid = uuid.createString()
3132 self.info['uuid'] = dom_uuid
3133 return dom_uuid
3135 def get_memory_static_max(self):
3136 return self.info.get('memory_static_max', 0)
3137 def get_memory_static_min(self):
3138 return self.info.get('memory_static_min', 0)
3139 def get_memory_dynamic_max(self):
3140 return self.info.get('memory_dynamic_max', 0)
3141 def get_memory_dynamic_min(self):
3142 return self.info.get('memory_dynamic_min', 0)
3144 # only update memory-related config values if they maintain sanity
3145 def _safe_set_memory(self, key, newval):
3146 oldval = self.info.get(key, 0)
3147 try:
3148 self.info[key] = newval
3149 self.info._memory_sanity_check()
3150 except Exception, ex:
3151 self.info[key] = oldval
3152 raise
3154 def set_memory_static_max(self, val):
3155 self._safe_set_memory('memory_static_max', val)
3156 def set_memory_static_min(self, val):
3157 self._safe_set_memory('memory_static_min', val)
3158 def set_memory_dynamic_max(self, val):
3159 self._safe_set_memory('memory_dynamic_max', val)
3160 def set_memory_dynamic_min(self, val):
3161 self._safe_set_memory('memory_dynamic_min', val)
3163 def get_vcpus_params(self):
3164 if self.getDomid() is None:
3165 return self.info['vcpus_params']
3167 retval = xc.sched_credit_domain_get(self.getDomid())
3168 return retval
3169 def get_power_state(self):
3170 return XEN_API_VM_POWER_STATE[self._stateGet()]
3171 def get_platform(self):
3172 return self.info.get('platform', {})
3173 def get_pci_bus(self):
3174 return self.info.get('pci_bus', '')
3175 def get_tools_version(self):
3176 return self.info.get('tools_version', {})
3177 def get_metrics(self):
3178 return self.metrics.get_uuid();
3181 def get_security_label(self, xspol=None):
3182 import xen.util.xsm.xsm as security
3183 label = security.get_security_label(self, xspol)
3184 return label
3186 def set_security_label(self, seclab, old_seclab, xspol=None,
3187 xspol_old=None):
3188 """
3189 Set the security label of a domain from its old to
3190 a new value.
3191 @param seclab New security label formatted in the form
3192 <policy type>:<policy name>:<vm label>
3193 @param old_seclab The current security label that the
3194 VM must have.
3195 @param xspol An optional policy under which this
3196 update should be done. If not given,
3197 then the current active policy is used.
3198 @param xspol_old The old policy; only to be passed during
3199 the updating of a policy
3200 @return Returns return code, a string with errors from
3201 the hypervisor's operation, old label of the
3202 domain
3203 """
3204 rc = 0
3205 errors = ""
3206 old_label = ""
3207 new_ssidref = 0
3208 domid = self.getDomid()
3209 res_labels = None
3210 is_policy_update = (xspol_old != None)
3212 from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
3214 state = self._stateGet()
3215 # Relabel only HALTED or RUNNING or PAUSED domains
3216 if domid != 0 and \
3217 state not in \
3218 [ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
3219 DOM_STATE_SUSPENDED ]:
3220 log.warn("Relabeling domain not possible in state '%s'" %
3221 DOM_STATES[state])
3222 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3224 # Remove security label. Works only for halted or suspended domains
3225 if not seclab or seclab == "":
3226 if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
3227 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3229 if self.info.has_key('security_label'):
3230 old_label = self.info['security_label']
3231 # Check label against expected one.
3232 if old_label != old_seclab:
3233 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3234 del self.info['security_label']
3235 xen.xend.XendDomain.instance().managed_config_save(self)
3236 return (xsconstants.XSERR_SUCCESS, "", "", 0)
3238 tmp = seclab.split(":")
3239 if len(tmp) != 3:
3240 return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
3241 typ, policy, label = tmp
3243 poladmin = XSPolicyAdminInstance()
3244 if not xspol:
3245 xspol = poladmin.get_policy_by_name(policy)
3247 try:
3248 xen.xend.XendDomain.instance().policy_lock.acquire_writer()
3250 if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
3251 #if domain is running or paused try to relabel in hypervisor
3252 if not xspol:
3253 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3255 if typ != xspol.get_type_name() or \
3256 policy != xspol.get_name():
3257 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3259 if typ == xsconstants.ACM_POLICY_ID:
3260 new_ssidref = xspol.vmlabel_to_ssidref(label)
3261 if new_ssidref == xsconstants.INVALID_SSIDREF:
3262 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3264 # Check that all used resources are accessible under the
3265 # new label
3266 if not is_policy_update and \
3267 not security.resources_compatible_with_vmlabel(xspol,
3268 self, label):
3269 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3271 #Check label against expected one. Can only do this
3272 # if the policy hasn't changed underneath in the meantime
3273 if xspol_old == None:
3274 old_label = self.get_security_label()
3275 if old_label != old_seclab:
3276 log.info("old_label != old_seclab: %s != %s" %
3277 (old_label, old_seclab))
3278 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3280 # relabel domain in the hypervisor
3281 rc, errors = security.relabel_domains([[domid, new_ssidref]])
3282 log.info("rc from relabeling in HV: %d" % rc)
3283 else:
3284 return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
3286 if rc == 0:
3287 # HALTED, RUNNING or PAUSED
3288 if domid == 0:
3289 if xspol:
3290 self.info['security_label'] = seclab
3291 ssidref = poladmin.set_domain0_bootlabel(xspol, label)
3292 else:
3293 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3294 else:
3295 if self.info.has_key('security_label'):
3296 old_label = self.info['security_label']
3297 # Check label against expected one, unless wildcard
3298 if old_label != old_seclab:
3299 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3301 self.info['security_label'] = seclab
3303 try:
3304 xen.xend.XendDomain.instance().managed_config_save(self)
3305 except:
3306 pass
3307 return (rc, errors, old_label, new_ssidref)
3308 finally:
3309 xen.xend.XendDomain.instance().policy_lock.release()
3311 def get_on_shutdown(self):
3312 after_shutdown = self.info.get('actions_after_shutdown')
3313 if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
3314 return XEN_API_ON_NORMAL_EXIT[-1]
3315 return after_shutdown
3317 def get_on_reboot(self):
3318 after_reboot = self.info.get('actions_after_reboot')
3319 if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
3320 return XEN_API_ON_NORMAL_EXIT[-1]
3321 return after_reboot
3323 def get_on_suspend(self):
3324 # TODO: not supported
3325 after_suspend = self.info.get('actions_after_suspend')
3326 if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
3327 return XEN_API_ON_NORMAL_EXIT[-1]
3328 return after_suspend
3330 def get_on_crash(self):
3331 after_crash = self.info.get('actions_after_crash')
3332 if not after_crash or after_crash not in \
3333 XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
3334 return XEN_API_ON_CRASH_BEHAVIOUR[0]
3335 return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
3337 def get_dev_config_by_uuid(self, dev_class, dev_uuid):
3338 """ Get's a device configuration either from XendConfig or
3339 from the DevController.
3341 @param dev_class: device class, either, 'vbd' or 'vif'
3342 @param dev_uuid: device UUID
3344 @rtype: dictionary
3345 """
3346 dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
3348 # shortcut if the domain isn't started because
3349 # the devcontrollers will have no better information
3350 # than XendConfig.
3351 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
3352 XEN_API_VM_POWER_STATE_SUSPENDED):
3353 if dev_config:
3354 return copy.deepcopy(dev_config)
3355 return None
3357 # instead of using dev_class, we use the dev_type
3358 # that is from XendConfig.
3359 controller = self.getDeviceController(dev_type)
3360 if not controller:
3361 return None
3363 all_configs = controller.getAllDeviceConfigurations()
3364 if not all_configs:
3365 return None
3367 updated_dev_config = copy.deepcopy(dev_config)
3368 for _devid, _devcfg in all_configs.items():
3369 if _devcfg.get('uuid') == dev_uuid:
3370 updated_dev_config.update(_devcfg)
3371 updated_dev_config['id'] = _devid
3372 return updated_dev_config
3374 return updated_dev_config
3376 def get_dev_xenapi_config(self, dev_class, dev_uuid):
3377 config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
3378 if not config:
3379 return {}
3381 config['VM'] = self.get_uuid()
3383 if dev_class == 'vif':
3384 if not config.has_key('name'):
3385 config['name'] = config.get('vifname', '')
3386 if not config.has_key('MAC'):
3387 config['MAC'] = config.get('mac', '')
3388 if not config.has_key('type'):
3389 config['type'] = 'paravirtualised'
3390 if not config.has_key('device'):
3391 devid = config.get('id')
3392 if devid != None:
3393 config['device'] = 'eth%s' % devid
3394 else:
3395 config['device'] = ''
3397 if not config.has_key('network'):
3398 try:
3399 bridge = config.get('bridge', None)
3400 if bridge is None:
3401 from xen.util import Brctl
3402 if_to_br = dict([(i,b)
3403 for (b,ifs) in Brctl.get_state().items()
3404 for i in ifs])
3405 vifname = "vif%s.%s" % (self.getDomid(),
3406 config.get('id'))
3407 bridge = if_to_br.get(vifname, None)
3408 config['network'] = \
3409 XendNode.instance().bridge_to_network(
3410 config.get('bridge')).get_uuid()
3411 except Exception:
3412 log.exception('bridge_to_network')
3413 # Ignore this for now -- it may happen if the device
3414 # has been specified using the legacy methods, but at
3415 # some point we're going to have to figure out how to
3416 # handle that properly.
3418 config['MTU'] = 1500 # TODO
3420 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3421 xennode = XendNode.instance()
3422 rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
3423 config['io_read_kbs'] = rx_bps/1024
3424 config['io_write_kbs'] = tx_bps/1024
3425 rx, tx = xennode.get_vif_stat(self.domid, devid)
3426 config['io_total_read_kbs'] = rx/1024
3427 config['io_total_write_kbs'] = tx/1024
3428 else:
3429 config['io_read_kbs'] = 0.0
3430 config['io_write_kbs'] = 0.0
3431 config['io_total_read_kbs'] = 0.0
3432 config['io_total_write_kbs'] = 0.0
3434 config['security_label'] = config.get('security_label', '')
3436 if dev_class == 'vbd':
3438 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3439 controller = self.getDeviceController(dev_class)
3440 devid, _1, _2 = controller.getDeviceDetails(config)
3441 xennode = XendNode.instance()
3442 rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
3443 config['io_read_kbs'] = rd_blkps
3444 config['io_write_kbs'] = wr_blkps
3445 else:
3446 config['io_read_kbs'] = 0.0
3447 config['io_write_kbs'] = 0.0
3449 config['VDI'] = config.get('VDI', '')
3450 config['device'] = config.get('dev', '')
3451 if ':' in config['device']:
3452 vbd_name, vbd_type = config['device'].split(':', 1)
3453 config['device'] = vbd_name
3454 if vbd_type == 'cdrom':
3455 config['type'] = XEN_API_VBD_TYPE[0]
3456 else:
3457 config['type'] = XEN_API_VBD_TYPE[1]
3459 config['driver'] = 'paravirtualised' # TODO
3460 config['image'] = config.get('uname', '')
3462 if config.get('mode', 'r') == 'r':
3463 config['mode'] = 'RO'
3464 else:
3465 config['mode'] = 'RW'
3467 if dev_class == 'vtpm':
3468 if not config.has_key('type'):
3469 config['type'] = 'paravirtualised' # TODO
3470 if not config.has_key('backend'):
3471 config['backend'] = "00000000-0000-0000-0000-000000000000"
3473 return config
3475 def get_dev_property(self, dev_class, dev_uuid, field):
3476 config = self.get_dev_xenapi_config(dev_class, dev_uuid)
3477 try:
3478 return config[field]
3479 except KeyError:
3480 raise XendError('Invalid property for device: %s' % field)
3482 def set_dev_property(self, dev_class, dev_uuid, field, value):
3483 self.info['devices'][dev_uuid][1][field] = value
3485 def get_vcpus_util(self):
3486 vcpu_util = {}
3487 xennode = XendNode.instance()
3488 if 'VCPUs_max' in self.info and self.domid != None:
3489 for i in range(0, self.info['VCPUs_max']):
3490 util = xennode.get_vcpu_util(self.domid, i)
3491 vcpu_util[str(i)] = util
3493 return vcpu_util
3495 def get_consoles(self):
3496 return self.info.get('console_refs', [])
3498 def get_vifs(self):
3499 return self.info.get('vif_refs', [])
3501 def get_vbds(self):
3502 return self.info.get('vbd_refs', [])
3504 def get_vtpms(self):
3505 return self.info.get('vtpm_refs', [])
3507 def get_dpcis(self):
3508 return XendDPCI.get_by_VM(self.info.get('uuid'))
3510 def get_dscsis(self):
3511 return XendDSCSI.get_by_VM(self.info.get('uuid'))
3513 def create_vbd(self, xenapi_vbd, vdi_image_path):
3514 """Create a VBD using a VDI from XendStorageRepository.
3516 @param xenapi_vbd: vbd struct from the Xen API
3517 @param vdi_image_path: VDI UUID
3518 @rtype: string
3519 @return: uuid of the device
3520 """
3521 xenapi_vbd['image'] = vdi_image_path
3522 if vdi_image_path.startswith('tap'):
3523 dev_uuid = self.info.device_add('tap', cfg_xenapi = xenapi_vbd)
3524 else:
3525 dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
3527 if not dev_uuid:
3528 raise XendError('Failed to create device')
3530 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3531 XEN_API_VM_POWER_STATE_PAUSED):
3532 _, config = self.info['devices'][dev_uuid]
3534 if vdi_image_path.startswith('tap'):
3535 dev_control = self.getDeviceController('tap')
3536 else:
3537 dev_control = self.getDeviceController('vbd')
3539 try:
3540 devid = dev_control.createDevice(config)
3541 dev_control.waitForDevice(devid)
3542 self.info.device_update(dev_uuid,
3543 cfg_xenapi = {'devid': devid})
3544 except Exception, exn:
3545 log.exception(exn)
3546 del self.info['devices'][dev_uuid]
3547 self.info['vbd_refs'].remove(dev_uuid)
3548 raise
3550 return dev_uuid
3552 def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
3553 """Create a VBD using a VDI from XendStorageRepository.
3555 @param xenapi_vbd: vbd struct from the Xen API
3556 @param vdi_image_path: VDI UUID
3557 @rtype: string
3558 @return: uuid of the device
3559 """
3560 xenapi_vbd['image'] = vdi_image_path
3561 dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
3562 if not dev_uuid:
3563 raise XendError('Failed to create device')
3565 if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
3566 _, config = self.info['devices'][dev_uuid]
3567 config['devid'] = self.getDeviceController('tap').createDevice(config)
3569 return config['devid']
3571 def create_vif(self, xenapi_vif):
3572 """Create VIF device from the passed struct in Xen API format.
3574 @param xenapi_vif: Xen API VIF Struct.
3575 @rtype: string
3576 @return: UUID
3577 """
3578 dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
3579 if not dev_uuid:
3580 raise XendError('Failed to create device')
3582 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3583 XEN_API_VM_POWER_STATE_PAUSED):
3585 _, config = self.info['devices'][dev_uuid]
3586 dev_control = self.getDeviceController('vif')
3588 try:
3589 devid = dev_control.createDevice(config)
3590 dev_control.waitForDevice(devid)
3591 self.info.device_update(dev_uuid,
3592 cfg_xenapi = {'devid': devid})
3593 except Exception, exn:
3594 log.exception(exn)
3595 del self.info['devices'][dev_uuid]
3596 self.info['vif_refs'].remove(dev_uuid)
3597 raise
3599 return dev_uuid
3601 def create_vtpm(self, xenapi_vtpm):
3602 """Create a VTPM device from the passed struct in Xen API format.
3604 @return: uuid of the device
3605 @rtype: string
3606 """
3608 if self._stateGet() not in (DOM_STATE_HALTED,):
3609 raise VmError("Can only add vTPM to a halted domain.")
3610 if self.get_vtpms() != []:
3611 raise VmError('Domain already has a vTPM.')
3612 dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
3613 if not dev_uuid:
3614 raise XendError('Failed to create device')
3616 return dev_uuid
3618 def create_console(self, xenapi_console):
3619 """ Create a console device from a Xen API struct.
3621 @return: uuid of device
3622 @rtype: string
3623 """
3624 if self._stateGet() not in (DOM_STATE_HALTED,):
3625 raise VmError("Can only add console to a halted domain.")
3627 dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
3628 if not dev_uuid:
3629 raise XendError('Failed to create device')
3631 return dev_uuid
3633 def set_console_other_config(self, console_uuid, other_config):
3634 self.info.console_update(console_uuid, 'other_config', other_config)
3636 def create_dpci(self, xenapi_pci):
3637 """Create pci device from the passed struct in Xen API format.
3639 @param xenapi_pci: DPCI struct from Xen API
3640 @rtype: bool
3641 #@rtype: string
3642 @return: True if successfully created device
3643 #@return: UUID
3644 """
3646 dpci_uuid = uuid.createString()
3648 dpci_opts = []
3649 opts_dict = xenapi_pci.get('options')
3650 for k in opts_dict.keys():
3651 dpci_opts.append([k, opts_dict[k]])
3653 # Convert xenapi to sxp
3654 ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
3656 target_pci_sxp = \
3657 ['pci',
3658 ['dev',
3659 ['domain', '0x%02x' % ppci.get_domain()],
3660 ['bus', '0x%02x' % ppci.get_bus()],
3661 ['slot', '0x%02x' % ppci.get_slot()],
3662 ['func', '0x%1x' % ppci.get_func()],
3663 ['vslt', '0x%02x' % xenapi_pci.get('hotplug_slot')],
3664 ['opts', dpci_opts],
3665 ['uuid', dpci_uuid]
3666 ],
3667 ['state', 'Initialising']
3670 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3672 old_pci_sxp = self._getDeviceInfo_pci(0)
3674 if old_pci_sxp is None:
3675 dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
3676 if not dev_uuid:
3677 raise XendError('Failed to create device')
3679 else:
3680 new_pci_sxp = ['pci']
3681 for existing_dev in sxp.children(old_pci_sxp, 'dev'):
3682 new_pci_sxp.append(existing_dev)
3683 new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
3685 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3686 self.info.device_update(dev_uuid, new_pci_sxp)
3688 xen.xend.XendDomain.instance().managed_config_save(self)
3690 else:
3691 try:
3692 self.device_configure(target_pci_sxp)
3694 except Exception, exn:
3695 raise XendError('Failed to create device')
3697 return dpci_uuid
3699 def create_dscsi(self, xenapi_dscsi):
3700 """Create scsi device from the passed struct in Xen API format.
3702 @param xenapi_dscsi: DSCSI struct from Xen API
3703 @rtype: string
3704 @return: UUID
3705 """
3707 dscsi_uuid = uuid.createString()
3709 # Convert xenapi to sxp
3710 pscsi = XendAPIStore.get(xenapi_dscsi.get('PSCSI'), 'PSCSI')
3711 devid = int(xenapi_dscsi.get('virtual_HCTL').split(':')[0])
3712 target_vscsi_sxp = \
3713 ['vscsi',
3714 ['dev',
3715 ['devid', devid],
3716 ['p-devname', pscsi.get_dev_name()],
3717 ['p-dev', pscsi.get_physical_HCTL()],
3718 ['v-dev', xenapi_dscsi.get('virtual_HCTL')],
3719 ['state', xenbusState['Initialising']],
3720 ['uuid', dscsi_uuid]
3721 ],
3722 ['feature-host', 0]
3725 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3727 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
3729 if cur_vscsi_sxp is None:
3730 dev_uuid = self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp)
3731 if not dev_uuid:
3732 raise XendError('Failed to create device')
3734 else:
3735 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
3736 for existing_dev in sxp.children(cur_vscsi_sxp, 'dev'):
3737 new_vscsi_sxp.append(existing_dev)
3738 new_vscsi_sxp.append(sxp.child0(target_vscsi_sxp, 'dev'))
3740 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3741 self.info.device_update(dev_uuid, new_vscsi_sxp)
3743 xen.xend.XendDomain.instance().managed_config_save(self)
3745 else:
3746 try:
3747 self.device_configure(target_vscsi_sxp)
3749 except Exception, exn:
3750 raise XendError('Failed to create device')
3752 return dscsi_uuid
3755 def destroy_device_by_uuid(self, dev_type, dev_uuid):
3756 if dev_uuid not in self.info['devices']:
3757 raise XendError('Device does not exist')
3759 try:
3760 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3761 XEN_API_VM_POWER_STATE_PAUSED):
3762 _, config = self.info['devices'][dev_uuid]
3763 devid = config.get('devid')
3764 if devid != None:
3765 self.getDeviceController(dev_type).destroyDevice(devid, force = False)
3766 else:
3767 raise XendError('Unable to get devid for device: %s:%s' %
3768 (dev_type, dev_uuid))
3769 finally:
3770 del self.info['devices'][dev_uuid]
3771 self.info['%s_refs' % dev_type].remove(dev_uuid)
3773 def destroy_vbd(self, dev_uuid):
3774 self.destroy_device_by_uuid('vbd', dev_uuid)
3776 def destroy_vif(self, dev_uuid):
3777 self.destroy_device_by_uuid('vif', dev_uuid)
3779 def destroy_vtpm(self, dev_uuid):
3780 self.destroy_device_by_uuid('vtpm', dev_uuid)
3782 def destroy_dpci(self, dev_uuid):
3784 dpci = XendAPIStore.get(dev_uuid, 'DPCI')
3785 ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
3787 old_pci_sxp = self._getDeviceInfo_pci(0)
3788 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3789 target_dev = None
3790 new_pci_sxp = ['pci']
3791 for dev in sxp.children(old_pci_sxp, 'dev'):
3792 domain = int(sxp.child_value(dev, 'domain'), 16)
3793 bus = int(sxp.child_value(dev, 'bus'), 16)
3794 slot = int(sxp.child_value(dev, 'slot'), 16)
3795 func = int(sxp.child_value(dev, 'func'), 16)
3796 name = "%04x:%02x:%02x.%01x" % (domain, bus, slot, func)
3797 if ppci.get_name() == name:
3798 target_dev = dev
3799 else:
3800 new_pci_sxp.append(dev)
3802 if target_dev is None:
3803 raise XendError('Failed to destroy device')
3805 target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
3807 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3809 self.info.device_update(dev_uuid, new_pci_sxp)
3810 if len(sxp.children(new_pci_sxp, 'dev')) == 0:
3811 del self.info['devices'][dev_uuid]
3812 xen.xend.XendDomain.instance().managed_config_save(self)
3814 else:
3815 try:
3816 self.device_configure(target_pci_sxp)
3818 except Exception, exn:
3819 raise XendError('Failed to destroy device')
3821 def destroy_dscsi(self, dev_uuid):
3822 dscsi = XendAPIStore.get(dev_uuid, 'DSCSI')
3823 devid = dscsi.get_virtual_host()
3824 vHCTL = dscsi.get_virtual_HCTL()
3825 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
3826 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3828 target_dev = None
3829 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
3830 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
3831 if vHCTL == sxp.child_value(dev, 'v-dev'):
3832 target_dev = dev
3833 else:
3834 new_vscsi_sxp.append(dev)
3836 if target_dev is None:
3837 raise XendError('Failed to destroy device')
3839 target_dev.append(['state', xenbusState['Closing']])
3840 target_vscsi_sxp = ['vscsi', target_dev, ['feature-host', 0]]
3842 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3844 self.info.device_update(dev_uuid, new_vscsi_sxp)
3845 if len(sxp.children(new_vscsi_sxp, 'dev')) == 0:
3846 del self.info['devices'][dev_uuid]
3847 xen.xend.XendDomain.instance().managed_config_save(self)
3849 else:
3850 try:
3851 self.device_configure(target_vscsi_sxp)
3853 except Exception, exn:
3854 raise XendError('Failed to destroy device')
3856 def destroy_xapi_instances(self):
3857 """Destroy Xen-API instances stored in XendAPIStore.
3858 """
3859 # Xen-API classes based on XendBase have their instances stored
3860 # in XendAPIStore. Cleanup these instances here, if they are supposed
3861 # to be destroyed when the parent domain is dead.
3863 # Most of the virtual devices (vif, vbd, vfb, etc) are not based on
3864 # XendBase and there's no need to remove them from XendAPIStore.
3866 from xen.xend import XendDomain
3867 if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
3868 # domain still exists.
3869 return
3871 # Destroy the VMMetrics instance.
3872 if XendAPIStore.get(self.metrics.get_uuid(), self.metrics.getClass()) \
3873 is not None:
3874 self.metrics.destroy()
3876 # Destroy DPCI instances.
3877 for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
3878 XendAPIStore.deregister(dpci_uuid, "DPCI")
3880 # Destroy DSCSI instances.
3881 for dscsi_uuid in XendDSCSI.get_by_VM(self.info.get('uuid')):
3882 XendAPIStore.deregister(dscsi_uuid, "DSCSI")
3884 def has_device(self, dev_class, dev_uuid):
3885 return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
3887 def __str__(self):
3888 return '<domain id=%s name=%s memory=%s state=%s>' % \
3889 (str(self.domid), self.info['name_label'],
3890 str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])
3892 __repr__ = __str__