ia64/xen-unstable

view tools/python/xen/xend/XendDomainInfo.py @ 19460:2269d8704139

XenAPI: Implement {get,set}_auto_power_on

Signed-off-by: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Mar 31 11:48:07 2009 +0100 (2009-03-31)
parents f86ebd7f887a
children 156093ff8067
line source
1 #===========================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
19 """Representation of a single domain.
20 Includes support for domain construction, using
21 open-ended configurations.
23 Author: Mike Wray <mike.wray@hp.com>
25 """
27 import logging
28 import time
29 import threading
30 import re
31 import copy
32 import os
33 import traceback
34 from types import StringTypes
36 import xen.lowlevel.xc
37 from xen.util import asserts
38 from xen.util.blkif import blkdev_uname_to_file, blkdev_uname_to_taptype
39 import xen.util.xsm.xsm as security
40 from xen.util import xsconstants
42 from xen.xend import balloon, sxp, uuid, image, arch, osdep
43 from xen.xend import XendOptions, XendNode, XendConfig
45 from xen.xend.XendConfig import scrub_password
46 from xen.xend.XendBootloader import bootloader, bootloader_tidy
47 from xen.xend.XendError import XendError, VmError
48 from xen.xend.XendDevices import XendDevices
49 from xen.xend.XendTask import XendTask
50 from xen.xend.xenstore.xstransact import xstransact, complete
51 from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
52 from xen.xend.xenstore.xswatch import xswatch
53 from xen.xend.XendConstants import *
54 from xen.xend.XendAPIConstants import *
55 from xen.xend.server.DevConstants import xenbusState
57 from xen.xend.XendVMMetrics import XendVMMetrics
59 from xen.xend import XendAPIStore
60 from xen.xend.XendPPCI import XendPPCI
61 from xen.xend.XendDPCI import XendDPCI
62 from xen.xend.XendPSCSI import XendPSCSI
63 from xen.xend.XendDSCSI import XendDSCSI
65 MIGRATE_TIMEOUT = 30.0
66 BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
68 xc = xen.lowlevel.xc.xc()
69 xoptions = XendOptions.instance()
71 log = logging.getLogger("xend.XendDomainInfo")
72 #log.setLevel(logging.TRACE)
75 def create(config):
76 """Creates and start a VM using the supplied configuration.
78 @param config: A configuration object involving lists of tuples.
79 @type config: list of lists, eg ['vm', ['image', 'xen.gz']]
81 @rtype: XendDomainInfo
82 @return: An up and running XendDomainInfo instance
83 @raise VmError: Invalid configuration or failure to start.
84 """
85 from xen.xend import XendDomain
86 domconfig = XendConfig.XendConfig(sxp_obj = config)
87 othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
88 if othervm is None or othervm.domid is None:
89 othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
90 if othervm is not None and othervm.domid is not None:
91 raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
92 log.debug("XendDomainInfo.create(%s)", scrub_password(config))
93 vm = XendDomainInfo(domconfig)
94 try:
95 vm.start()
96 except:
97 log.exception('Domain construction failed')
98 vm.destroy()
99 raise
101 return vm
103 def create_from_dict(config_dict):
104 """Creates and start a VM using the supplied configuration.
106 @param config_dict: An configuration dictionary.
108 @rtype: XendDomainInfo
109 @return: An up and running XendDomainInfo instance
110 @raise VmError: Invalid configuration or failure to start.
111 """
113 log.debug("XendDomainInfo.create_from_dict(%s)",
114 scrub_password(config_dict))
115 vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
116 try:
117 vm.start()
118 except:
119 log.exception('Domain construction failed')
120 vm.destroy()
121 raise
122 return vm
124 def recreate(info, priv):
125 """Create the VM object for an existing domain. The domain must not
126 be dying, as the paths in the store should already have been removed,
127 and asking us to recreate them causes problems.
129 @param xeninfo: Parsed configuration
130 @type xeninfo: Dictionary
131 @param priv: Is a privileged domain (Dom 0)
132 @type priv: bool
134 @rtype: XendDomainInfo
135 @return: A up and running XendDomainInfo instance
136 @raise VmError: Invalid configuration.
137 @raise XendError: Errors with configuration.
138 """
140 log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
142 assert not info['dying']
144 xeninfo = XendConfig.XendConfig(dominfo = info)
145 xeninfo['is_control_domain'] = priv
146 xeninfo['is_a_template'] = False
147 xeninfo['auto_power_on'] = False
148 domid = xeninfo['domid']
149 uuid1 = uuid.fromString(xeninfo['uuid'])
150 needs_reinitialising = False
152 dompath = GetDomainPath(domid)
153 if not dompath:
154 raise XendError('No domain path in store for existing '
155 'domain %d' % domid)
157 log.info("Recreating domain %d, UUID %s. at %s" %
158 (domid, xeninfo['uuid'], dompath))
160 # need to verify the path and uuid if not Domain-0
161 # if the required uuid and vm aren't set, then that means
162 # we need to recreate the dom with our own values
163 #
164 # NOTE: this is probably not desirable, really we should just
165 # abort or ignore, but there may be cases where xenstore's
166 # entry disappears (eg. xenstore-rm /)
167 #
168 try:
169 vmpath = xstransact.Read(dompath, "vm")
170 if not vmpath:
171 if not priv:
172 log.warn('/local/domain/%d/vm is missing. recreate is '
173 'confused, trying our best to recover' % domid)
174 needs_reinitialising = True
175 raise XendError('reinit')
177 uuid2_str = xstransact.Read(vmpath, "uuid")
178 if not uuid2_str:
179 log.warn('%s/uuid/ is missing. recreate is confused, '
180 'trying our best to recover' % vmpath)
181 needs_reinitialising = True
182 raise XendError('reinit')
184 uuid2 = uuid.fromString(uuid2_str)
185 if uuid1 != uuid2:
186 log.warn('UUID in /vm does not match the UUID in /dom/%d.'
187 'Trying out best to recover' % domid)
188 needs_reinitialising = True
189 except XendError:
190 pass # our best shot at 'goto' in python :)
192 vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
193 vmpath = vmpath)
195 if needs_reinitialising:
196 vm._recreateDom()
197 vm._removeVm()
198 vm._storeVmDetails()
199 vm._storeDomDetails()
201 vm.image = image.create(vm, vm.info)
202 vm.image.recreate()
204 vm._registerWatches()
205 vm.refreshShutdown(xeninfo)
207 # register the domain in the list
208 from xen.xend import XendDomain
209 XendDomain.instance().add_domain(vm)
211 return vm
214 def restore(config):
215 """Create a domain and a VM object to do a restore.
217 @param config: Domain SXP configuration
218 @type config: list of lists. (see C{create})
220 @rtype: XendDomainInfo
221 @return: A up and running XendDomainInfo instance
222 @raise VmError: Invalid configuration or failure to start.
223 @raise XendError: Errors with configuration.
224 """
226 log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
227 vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
228 resume = True)
229 try:
230 vm.resume()
231 return vm
232 except:
233 vm.destroy()
234 raise
236 def createDormant(domconfig):
237 """Create a dormant/inactive XenDomainInfo without creating VM.
238 This is for creating instances of persistent domains that are not
239 yet start.
241 @param domconfig: Parsed configuration
242 @type domconfig: XendConfig object
244 @rtype: XendDomainInfo
245 @return: A up and running XendDomainInfo instance
246 @raise XendError: Errors with configuration.
247 """
249 log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
251 # domid does not make sense for non-running domains.
252 domconfig.pop('domid', None)
253 vm = XendDomainInfo(domconfig)
254 return vm
256 def domain_by_name(name):
257 """Get domain by name
259 @params name: Name of the domain
260 @type name: string
261 @return: XendDomainInfo or None
262 """
263 from xen.xend import XendDomain
264 return XendDomain.instance().domain_lookup_by_name_nr(name)
267 def shutdown_reason(code):
268 """Get a shutdown reason from a code.
270 @param code: shutdown code
271 @type code: int
272 @return: shutdown reason
273 @rtype: string
274 """
275 return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
277 def dom_get(dom):
278 """Get info from xen for an existing domain.
280 @param dom: domain id
281 @type dom: int
282 @return: info or None
283 @rtype: dictionary
284 """
285 try:
286 domlist = xc.domain_getinfo(dom, 1)
287 if domlist and dom == domlist[0]['domid']:
288 return domlist[0]
289 except Exception, err:
290 # ignore missing domain
291 log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
292 return None
294 def get_assigned_pci_devices(domid):
295 dev_str_list = []
296 path = '/local/domain/0/backend/pci/%u/0/' % domid
297 num_devs = xstransact.Read(path + 'num_devs');
298 if num_devs is None or num_devs == "":
299 return dev_str_list
300 num_devs = int(num_devs);
301 for i in range(num_devs):
302 dev_str = xstransact.Read(path + 'dev-%i' % i)
303 dev_str_list = dev_str_list + [dev_str]
304 return dev_str_list
306 def do_FLR(domid):
307 from xen.xend.server.pciif import parse_pci_name, PciDevice
308 dev_str_list = get_assigned_pci_devices(domid)
310 for dev_str in dev_str_list:
311 (dom, b, d, f) = parse_pci_name(dev_str)
312 try:
313 dev = PciDevice(dom, b, d, f)
314 except Exception, e:
315 raise VmError("pci: failed to locate device and "+
316 "parse it's resources - "+str(e))
317 dev.do_FLR()
319 class XendDomainInfo:
320 """An object represents a domain.
322 @TODO: try to unify dom and domid, they mean the same thing, but
323 xc refers to it as dom, and everywhere else, including
324 xenstore it is domid. The best way is to change xc's
325 python interface.
327 @ivar info: Parsed configuration
328 @type info: dictionary
329 @ivar domid: Domain ID (if VM has started)
330 @type domid: int or None
331 @ivar vmpath: XenStore path to this VM.
332 @type vmpath: string
333 @ivar dompath: XenStore path to this Domain.
334 @type dompath: string
335 @ivar image: Reference to the VM Image.
336 @type image: xen.xend.image.ImageHandler
337 @ivar store_port: event channel to xenstored
338 @type store_port: int
339 @ivar console_port: event channel to xenconsoled
340 @type console_port: int
341 @ivar store_mfn: xenstored mfn
342 @type store_mfn: int
343 @ivar console_mfn: xenconsoled mfn
344 @type console_mfn: int
345 @ivar notes: OS image notes
346 @type notes: dictionary
347 @ivar vmWatch: reference to a watch on the xenstored vmpath
348 @type vmWatch: xen.xend.xenstore.xswatch
349 @ivar shutdownWatch: reference to watch on the xenstored domain shutdown
350 @type shutdownWatch: xen.xend.xenstore.xswatch
351 @ivar shutdownStartTime: UNIX Time when domain started shutting down.
352 @type shutdownStartTime: float or None
353 # @ivar state: Domain state
354 # @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
355 @ivar state_updated: lock for self.state
356 @type state_updated: threading.Condition
357 @ivar refresh_shutdown_lock: lock for polling shutdown state
358 @type refresh_shutdown_lock: threading.Condition
359 @ivar _deviceControllers: device controller cache for this domain
360 @type _deviceControllers: dict 'string' to DevControllers
361 """
363 def __init__(self, info, domid = None, dompath = None, augment = False,
364 priv = False, resume = False, vmpath = None):
365 """Constructor for a domain
367 @param info: parsed configuration
368 @type info: dictionary
369 @keyword domid: Set initial domain id (if any)
370 @type domid: int
371 @keyword dompath: Set initial dompath (if any)
372 @type dompath: string
373 @keyword augment: Augment given info with xenstored VM info
374 @type augment: bool
375 @keyword priv: Is a privileged domain (Dom 0)
376 @type priv: bool
377 @keyword resume: Is this domain being resumed?
378 @type resume: bool
379 """
381 self.info = info
382 if domid == None:
383 self.domid = self.info.get('domid')
384 else:
385 self.domid = domid
387 #REMOVE: uuid is now generated in XendConfig
388 #if not self._infoIsSet('uuid'):
389 # self.info['uuid'] = uuid.toString(uuid.create())
391 # Find a unique /vm/<uuid>/<integer> path if not specified.
392 # This avoids conflict between pre-/post-migrate domains when doing
393 # localhost relocation.
394 self.vmpath = vmpath
395 i = 0
396 while self.vmpath == None:
397 self.vmpath = XS_VMROOT + self.info['uuid']
398 if i != 0:
399 self.vmpath = self.vmpath + '-' + str(i)
400 try:
401 if self._readVm("uuid"):
402 self.vmpath = None
403 i = i + 1
404 except:
405 pass
407 self.dompath = dompath
409 self.image = None
410 self.store_port = None
411 self.store_mfn = None
412 self.console_port = None
413 self.console_mfn = None
415 self.native_protocol = None
417 self.vmWatch = None
418 self.shutdownWatch = None
419 self.shutdownStartTime = None
420 self._resume = resume
422 self.state_updated = threading.Condition()
423 self.refresh_shutdown_lock = threading.Condition()
424 self._stateSet(DOM_STATE_HALTED)
426 self._deviceControllers = {}
428 for state in DOM_STATES_OLD:
429 self.info[state] = 0
431 if augment:
432 self._augmentInfo(priv)
434 self._checkName(self.info['name_label'])
436 self.metrics = XendVMMetrics(uuid.createString(), self)
439 #
440 # Public functions available through XMLRPC
441 #
444 def start(self, is_managed = False):
445 """Attempts to start the VM by do the appropriate
446 initialisation if it not started.
447 """
448 from xen.xend import XendDomain
450 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
451 try:
452 XendTask.log_progress(0, 30, self._constructDomain)
453 XendTask.log_progress(31, 60, self._initDomain)
455 XendTask.log_progress(61, 70, self._storeVmDetails)
456 XendTask.log_progress(71, 80, self._storeDomDetails)
457 XendTask.log_progress(81, 90, self._registerWatches)
458 XendTask.log_progress(91, 100, self.refreshShutdown)
460 xendomains = XendDomain.instance()
461 xennode = XendNode.instance()
463 # save running configuration if XendDomains believe domain is
464 # persistent
465 if is_managed:
466 xendomains.managed_config_save(self)
468 if xennode.xenschedinfo() == 'credit':
469 xendomains.domain_sched_credit_set(self.getDomid(),
470 self.getWeight(),
471 self.getCap())
472 except:
473 log.exception('VM start failed')
474 self.destroy()
475 raise
476 else:
477 raise XendError('VM already running')
479 def resume(self):
480 """Resumes a domain that has come back from suspension."""
481 state = self._stateGet()
482 if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
483 try:
484 self._constructDomain()
486 try:
487 self._setCPUAffinity()
488 except:
489 # usually a CPU we want to set affinity to does not exist
490 # we just ignore it so that the domain can still be restored
491 log.warn("Cannot restore CPU affinity")
493 self._storeVmDetails()
494 self._createChannels()
495 self._createDevices()
496 self._storeDomDetails()
497 self._endRestore()
498 except:
499 log.exception('VM resume failed')
500 self.destroy()
501 raise
502 else:
503 raise XendError('VM is not suspended; it is %s'
504 % XEN_API_VM_POWER_STATE[state])
506 def shutdown(self, reason):
507 """Shutdown a domain by signalling this via xenstored."""
508 log.debug('XendDomainInfo.shutdown(%s)', reason)
509 if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
510 raise XendError('Domain cannot be shutdown')
512 if self.domid == 0:
513 raise XendError('Domain 0 cannot be shutdown')
515 if reason not in DOMAIN_SHUTDOWN_REASONS.values():
516 raise XendError('Invalid reason: %s' % reason)
517 self._removeVm('xend/previous_restart_time')
518 self.storeDom("control/shutdown", reason)
520 # HVM domain shuts itself down only if it has PV drivers
521 if self.info.is_hvm():
522 hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
523 hvm_s_state = xc.hvm_get_param(self.domid, HVM_PARAM_ACPI_S_STATE)
524 if not hvm_pvdrv or hvm_s_state != 0:
525 code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
526 log.info("HVM save:remote shutdown dom %d!", self.domid)
527 xc.domain_shutdown(self.domid, code)
529 def pause(self):
530 """Pause domain
532 @raise XendError: Failed pausing a domain
533 """
534 try:
535 xc.domain_pause(self.domid)
536 self._stateSet(DOM_STATE_PAUSED)
537 except Exception, ex:
538 log.exception(ex)
539 raise XendError("Domain unable to be paused: %s" % str(ex))
541 def unpause(self):
542 """Unpause domain
544 @raise XendError: Failed unpausing a domain
545 """
546 try:
547 xc.domain_unpause(self.domid)
548 self._stateSet(DOM_STATE_RUNNING)
549 except Exception, ex:
550 log.exception(ex)
551 raise XendError("Domain unable to be unpaused: %s" % str(ex))
553 def send_sysrq(self, key):
554 """ Send a Sysrq equivalent key via xenstored."""
555 if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
556 raise XendError("Domain '%s' is not started" % self.info['name_label'])
558 asserts.isCharConvertible(key)
559 self.storeDom("control/sysrq", '%c' % key)
561 def sync_pcidev_info(self):
563 if not self.info.is_hvm():
564 return
566 devid = '0'
567 dev_info = self._getDeviceInfo_pci(devid)
568 if dev_info is None:
569 return
571 # get the virtual slot info from xenstore
572 dev_uuid = sxp.child_value(dev_info, 'uuid')
573 pci_conf = self.info['devices'][dev_uuid][1]
574 pci_devs = pci_conf['devs']
576 count = 0
577 vslots = None
578 while vslots is None and count < 20:
579 vslots = xstransact.Read("/local/domain/0/backend/pci/%u/%s/vslots"
580 % (self.getDomid(), devid))
581 time.sleep(0.1)
582 count += 1
583 if vslots is None:
584 log.error("Device model didn't tell the vslots for PCI device")
585 return
587 #delete last delim
588 if vslots[-1] == ";":
589 vslots = vslots[:-1]
591 slot_list = vslots.split(';')
592 if len(slot_list) != len(pci_devs):
593 log.error("Device model's pci dev num dismatch")
594 return
596 #update the vslot info
597 count = 0;
598 for x in pci_devs:
599 x['vslt'] = slot_list[count]
600 count += 1
603 def hvm_pci_device_create(self, dev_config):
604 log.debug("XendDomainInfo.hvm_pci_device_create: %s"
605 % scrub_password(dev_config))
607 if not self.info.is_hvm():
608 raise VmError("hvm_pci_device_create called on non-HVM guest")
610 #all the PCI devs share one conf node
611 devid = '0'
613 new_dev = dev_config['devs'][0]
614 dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
616 #check conflict before trigger hotplug event
617 if dev_info is not None:
618 dev_uuid = sxp.child_value(dev_info, 'uuid')
619 pci_conf = self.info['devices'][dev_uuid][1]
620 pci_devs = pci_conf['devs']
621 for x in pci_devs:
622 if (int(x['vslt'], 16) == int(new_dev['vslt'], 16) and
623 int(x['vslt'], 16) != 0 ):
624 raise VmError("vslot %s already have a device." % (new_dev['vslt']))
626 if (int(x['domain'], 16) == int(new_dev['domain'], 16) and
627 int(x['bus'], 16) == int(new_dev['bus'], 16) and
628 int(x['slot'], 16) == int(new_dev['slot'], 16) and
629 int(x['func'], 16) == int(new_dev['func'], 16) ):
630 raise VmError("device is already inserted")
632 # Test whether the devices can be assigned with VT-d
633 pci_str = "%s, %s, %s, %s" % (new_dev['domain'],
634 new_dev['bus'],
635 new_dev['slot'],
636 new_dev['func'])
637 bdf = xc.test_assign_device(self.domid, pci_str)
638 if bdf != 0:
639 if bdf == -1:
640 raise VmError("failed to assign device: maybe the platform"
641 " doesn't support VT-d, or VT-d isn't enabled"
642 " properly?")
643 bus = (bdf >> 16) & 0xff
644 devfn = (bdf >> 8) & 0xff
645 dev = (devfn >> 3) & 0x1f
646 func = devfn & 0x7
647 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
648 " already been assigned to other domain, or maybe"
649 " it doesn't exist." % (bus, dev, func))
651 # Here, we duplicate some checkings (in some cases, we mustn't allow
652 # a device to be hot-plugged into an HVM guest) that are also done in
653 # pci_device_configure()'s self.device_create(dev_sxp) or
654 # dev_control.reconfigureDevice(devid, dev_config).
655 # We must make the checkings before sending the command 'pci-ins' to
656 # ioemu.
658 # Test whether the device is owned by pciback. For instance, we can't
659 # hotplug a device being used by Dom0 itself to an HVM guest.
660 from xen.xend.server.pciif import PciDevice, parse_pci_name
661 domain = int(new_dev['domain'],16)
662 bus = int(new_dev['bus'],16)
663 dev = int(new_dev['slot'],16)
664 func = int(new_dev['func'],16)
665 try:
666 pci_device = PciDevice(domain, bus, dev, func)
667 except Exception, e:
668 raise VmError("pci: failed to locate device and "+
669 "parse it's resources - "+str(e))
670 if pci_device.driver!='pciback':
671 raise VmError(("pci: PCI Backend does not own device "+ \
672 "%s\n"+ \
673 "See the pciback.hide kernel "+ \
674 "command-line parameter or\n"+ \
675 "bind your slot/device to the PCI backend using sysfs" \
676 )%(pci_device.name))
678 # Check non-page-aligned MMIO BAR.
679 if pci_device.has_non_page_aligned_bar and arch.type != "ia64":
680 raise VmError("pci: %s: non-page-aligned MMIO BAR found." % \
681 pci_device.name)
683 # Check the co-assignment.
684 # To pci-attach a device D to domN, we should ensure each of D's
685 # co-assignment devices hasn't been assigned, or has been assigned to
686 # domN.
687 coassignment_list = pci_device.find_coassigned_devices()
688 assigned_pci_device_str_list = get_assigned_pci_devices(self.domid)
689 for pci_str in coassignment_list:
690 (domain, bus, dev, func) = parse_pci_name(pci_str)
691 dev_str = '0x%x,0x%x,0x%x,0x%x' % (domain, bus, dev, func)
692 if xc.test_assign_device(self.domid, dev_str) == 0:
693 continue
694 if not pci_str in assigned_pci_device_str_list:
695 raise VmError(('pci: failed to pci-attach %s to dom%d" + \
696 " because one of its co-assignment device %s has been" + \
697 " assigned to other domain.' \
698 )% (pci_device.name, self.domid, pci_str))
700 opts = ''
701 if 'opts' in new_dev and len(new_dev['opts']) > 0:
702 config_opts = new_dev['opts']
703 config_opts = map(lambda (x, y): x+'='+y, config_opts)
704 opts = ',' + reduce(lambda x, y: x+','+y, config_opts)
706 bdf_str = "%s:%s:%s.%s%s@%s" % (new_dev['domain'],
707 new_dev['bus'],
708 new_dev['slot'],
709 new_dev['func'],
710 opts,
711 new_dev['vslt'])
712 self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
715 def device_create(self, dev_config):
716 """Create a new device.
718 @param dev_config: device configuration
719 @type dev_config: SXP object (parsed config)
720 """
721 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
722 dev_type = sxp.name(dev_config)
723 dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
724 dev_config_dict = self.info['devices'][dev_uuid][1]
725 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
727 if dev_type == 'vif':
728 for x in dev_config:
729 if x != 'vif' and x[0] == 'mac':
730 if not re.match('^([0-9a-f]{2}:){5}[0-9a-f]{2}$', x[1], re.I):
731 log.error("Virtual network interface creation error - invalid MAC Address entered: %s", x[1])
732 raise VmError("Cannot create a new virtual network interface - MAC address is not valid!");
734 if self.domid is not None:
735 try:
736 dev_config_dict['devid'] = devid = \
737 self._createDevice(dev_type, dev_config_dict)
738 self._waitForDevice(dev_type, devid)
739 except VmError, ex:
740 del self.info['devices'][dev_uuid]
741 if dev_type == 'pci':
742 for dev in dev_config_dict['devs']:
743 XendAPIStore.deregister(dev['uuid'], 'DPCI')
744 elif dev_type == 'vscsi':
745 for dev in dev_config_dict['devs']:
746 XendAPIStore.deregister(dev['uuid'], 'DSCSI')
747 elif dev_type == 'tap':
748 self.info['vbd_refs'].remove(dev_uuid)
749 else:
750 self.info['%s_refs' % dev_type].remove(dev_uuid)
751 raise ex
752 else:
753 devid = None
755 xen.xend.XendDomain.instance().managed_config_save(self)
756 return self.getDeviceController(dev_type).sxpr(devid)
759 def pci_device_configure(self, dev_sxp, devid = 0):
760 """Configure an existing pci device.
762 @param dev_sxp: device configuration
763 @type dev_sxp: SXP object (parsed config)
764 @param devid: device id
765 @type devid: int
766 @return: Returns True if successfully updated device
767 @rtype: boolean
768 """
769 log.debug("XendDomainInfo.pci_device_configure: %s"
770 % scrub_password(dev_sxp))
772 dev_class = sxp.name(dev_sxp)
774 if dev_class != 'pci':
775 return False
777 pci_state = sxp.child_value(dev_sxp, 'state')
778 existing_dev_info = self._getDeviceInfo_pci(devid)
780 if existing_dev_info is None and pci_state != 'Initialising':
781 raise XendError("Cannot detach when pci platform does not exist")
783 pci_dev = sxp.children(dev_sxp, 'dev')[0]
784 dev_config = self.info.pci_convert_sxp_to_dict(dev_sxp)
785 dev = dev_config['devs'][0]
787 # Do HVM specific processing
788 if self.info.is_hvm():
789 if pci_state == 'Initialising':
790 # HVM PCI device attachment
791 self.hvm_pci_device_create(dev_config)
792 # Update vslt
793 vslt = xstransact.Read("/local/domain/0/device-model/%i/parameter"
794 % self.getDomid())
795 dev['vslt'] = vslt
796 for n in sxp.children(pci_dev):
797 if(n[0] == 'vslt'):
798 n[1] = vslt
799 else:
800 # HVM PCI device detachment
801 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
802 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
803 existing_pci_devs = existing_pci_conf['devs']
804 vslt = AUTO_PHP_SLOT_STR
805 for x in existing_pci_devs:
806 if ( int(x['domain'], 16) == int(dev['domain'], 16) and
807 int(x['bus'], 16) == int(dev['bus'], 16) and
808 int(x['slot'], 16) == int(dev['slot'], 16) and
809 int(x['func'], 16) == int(dev['func'], 16) ):
810 vslt = x['vslt']
811 break
812 if vslt == AUTO_PHP_SLOT_STR:
813 raise VmError("Device %04x:%02x:%02x.%01x is not connected"
814 % (int(dev['domain'],16), int(dev['bus'],16),
815 int(dev['slot'],16), int(dev['func'],16)))
816 self.hvm_destroyPCIDevice(int(vslt, 16))
817 # Update vslt
818 dev['vslt'] = vslt
819 for n in sxp.children(pci_dev):
820 if(n[0] == 'vslt'):
821 n[1] = vslt
823 # If pci platform does not exist, create and exit.
824 if existing_dev_info is None:
825 self.device_create(dev_sxp)
826 return True
828 # use DevController.reconfigureDevice to change device config
829 dev_control = self.getDeviceController(dev_class)
830 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
831 if not self.info.is_hvm():
832 # in PV case, wait until backend state becomes connected.
833 dev_control.waitForDevice_reconfigure(devid)
834 num_devs = dev_control.cleanupDevice(devid)
836 # update XendConfig with new device info
837 if dev_uuid:
838 new_dev_sxp = dev_control.configuration(devid)
839 self.info.device_update(dev_uuid, new_dev_sxp)
841 # If there is no device left, destroy pci and remove config.
842 if num_devs == 0:
843 if self.info.is_hvm():
844 self.destroyDevice('pci', devid, True)
845 del self.info['devices'][dev_uuid]
846 platform = self.info['platform']
847 orig_dev_num = len(platform['pci'])
848 # TODO: can use this to keep some info to ask high level
849 # management tools to hot insert a new passthrough dev
850 # after migration
851 if orig_dev_num != 0:
852 #platform['pci'] = ["%dDEVs" % orig_dev_num]
853 platform['pci'] = []
854 else:
855 self.destroyDevice('pci', devid)
856 del self.info['devices'][dev_uuid]
858 xen.xend.XendDomain.instance().managed_config_save(self)
860 return True
862 def vscsi_device_configure(self, dev_sxp):
863 """Configure an existing vscsi device.
864 quoted pci funciton
865 """
866 def _is_vscsi_defined(dev_info, p_devs = None, v_devs = None):
867 if not dev_info:
868 return False
869 for dev in sxp.children(dev_info, 'dev'):
870 if p_devs is not None:
871 if sxp.child_value(dev, 'p-dev') in p_devs:
872 return True
873 if v_devs is not None:
874 if sxp.child_value(dev, 'v-dev') in v_devs:
875 return True
876 return False
878 def _vscsi_be(be):
879 be_xdi = xen.xend.XendDomain.instance().domain_lookup_nr(be)
880 if be_xdi is not None:
881 be_domid = be_xdi.getDomid()
882 if be_domid is not None:
883 return str(be_domid)
884 return str(be)
886 dev_class = sxp.name(dev_sxp)
887 if dev_class != 'vscsi':
888 return False
890 dev_config = self.info.vscsi_convert_sxp_to_dict(dev_sxp)
891 devs = dev_config['devs']
892 v_devs = [d['v-dev'] for d in devs]
893 state = devs[0]['state']
894 req_devid = int(devs[0]['devid'])
895 cur_dev_sxp = self._getDeviceInfo_vscsi(req_devid)
897 if state == xenbusState['Initialising']:
898 # new create
899 # If request devid does not exist, create and exit.
900 p_devs = [d['p-dev'] for d in devs]
901 for dev_type, dev_info in self.info.all_devices_sxpr():
902 if dev_type != 'vscsi':
903 continue
904 if _is_vscsi_defined(dev_info, p_devs = p_devs):
905 raise XendError('The physical device "%s" is already defined' % \
906 p_devs[0])
907 if cur_dev_sxp is None:
908 self.device_create(dev_sxp)
909 return True
911 if _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
912 raise XendError('The virtual device "%s" is already defined' % \
913 v_devs[0])
915 if int(dev_config['feature-host']) != \
916 int(sxp.child_value(cur_dev_sxp, 'feature-host')):
917 raise XendError('The physical device "%s" cannot define '
918 'because mode is different' % devs[0]['p-dev'])
920 new_be = dev_config.get('backend', None)
921 if new_be is not None:
922 cur_be = sxp.child_value(cur_dev_sxp, 'backend', None)
923 if cur_be is None:
924 cur_be = xen.xend.XendDomain.DOM0_ID
925 new_be_dom = _vscsi_be(new_be)
926 cur_be_dom = _vscsi_be(cur_be)
927 if new_be_dom != cur_be_dom:
928 raise XendError('The physical device "%s" cannot define '
929 'because backend is different' % devs[0]['p-dev'])
931 elif state == xenbusState['Closing']:
932 if not _is_vscsi_defined(cur_dev_sxp, v_devs = v_devs):
933 raise XendError("Cannot detach vscsi device does not exist")
935 if self.domid is not None:
936 # use DevController.reconfigureDevice to change device config
937 dev_control = self.getDeviceController(dev_class)
938 dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
939 dev_control.waitForDevice_reconfigure(req_devid)
940 num_devs = dev_control.cleanupDevice(req_devid)
942 # update XendConfig with new device info
943 if dev_uuid:
944 new_dev_sxp = dev_control.configuration(req_devid)
945 self.info.device_update(dev_uuid, new_dev_sxp)
947 # If there is no device left, destroy vscsi and remove config.
948 if num_devs == 0:
949 self.destroyDevice('vscsi', req_devid)
950 del self.info['devices'][dev_uuid]
952 else:
953 new_dev_sxp = ['vscsi']
954 cur_mode = sxp.children(cur_dev_sxp, 'feature-host')[0]
955 new_dev_sxp.append(cur_mode)
956 try:
957 cur_be = sxp.children(cur_dev_sxp, 'backend')[0]
958 new_dev_sxp.append(cur_be)
959 except IndexError:
960 pass
962 for cur_dev in sxp.children(cur_dev_sxp, 'dev'):
963 if state == xenbusState['Closing']:
964 if int(cur_mode[1]) == 1:
965 continue
966 if sxp.child_value(cur_dev, 'v-dev') in v_devs:
967 continue
968 new_dev_sxp.append(cur_dev)
970 if state == xenbusState['Initialising']:
971 for new_dev in sxp.children(dev_sxp, 'dev'):
972 new_dev_sxp.append(new_dev)
974 dev_uuid = sxp.child_value(cur_dev_sxp, 'uuid')
975 self.info.device_update(dev_uuid, new_dev_sxp)
977 # If there is only 'vscsi' in new_dev_sxp, remove the config.
978 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
979 del self.info['devices'][dev_uuid]
981 xen.xend.XendDomain.instance().managed_config_save(self)
983 return True
985 def device_configure(self, dev_sxp, devid = None):
986 """Configure an existing device.
988 @param dev_config: device configuration
989 @type dev_config: SXP object (parsed config)
990 @param devid: device id
991 @type devid: int
992 @return: Returns True if successfully updated device
993 @rtype: boolean
994 """
996 # convert device sxp to a dict
997 dev_class = sxp.name(dev_sxp)
998 dev_config = {}
1000 if dev_class == 'pci':
1001 return self.pci_device_configure(dev_sxp)
1003 if dev_class == 'vscsi':
1004 return self.vscsi_device_configure(dev_sxp)
1006 for opt_val in dev_sxp[1:]:
1007 try:
1008 dev_config[opt_val[0]] = opt_val[1]
1009 except IndexError:
1010 pass
1012 # use DevController.reconfigureDevice to change device config
1013 dev_control = self.getDeviceController(dev_class)
1014 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
1016 # update XendConfig with new device info
1017 if dev_uuid:
1018 self.info.device_update(dev_uuid, dev_sxp)
1020 return True
1022 def waitForDevices(self):
1023 """Wait for this domain's configured devices to connect.
1025 @raise VmError: if any device fails to initialise.
1026 """
1027 for devclass in XendDevices.valid_devices():
1028 self.getDeviceController(devclass).waitForDevices()
1030 def hvm_destroyPCIDevice(self, vslot):
1031 log.debug("hvm_destroyPCIDevice called %s", vslot)
1033 if not self.info.is_hvm():
1034 raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
1036 #all the PCI devs share one conf node
1037 devid = '0'
1038 vslot = int(vslot)
1039 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1040 dev_uuid = sxp.child_value(dev_info, 'uuid')
1042 #delete the pci bdf config under the pci device
1043 pci_conf = self.info['devices'][dev_uuid][1]
1044 pci_len = len(pci_conf['devs'])
1046 #find the pass-through device with the virtual slot
1047 devnum = 0
1048 for x in pci_conf['devs']:
1049 if int(x['vslt'], 16) == vslot:
1050 break
1051 devnum += 1
1053 if devnum >= pci_len:
1054 raise VmError("Device @ vslot 0x%x doesn't exist." % (vslot))
1056 if vslot == 0:
1057 raise VmError("Device @ vslot 0x%x do not support hotplug." % (vslot))
1059 # Check the co-assignment.
1060 # To pci-detach a device D from domN, we should ensure: for each DD in the
1061 # list of D's co-assignment devices, DD is not assigned (to domN).
1063 from xen.xend.server.pciif import PciDevice
1064 domain = int(x['domain'],16)
1065 bus = int(x['bus'],16)
1066 dev = int(x['slot'],16)
1067 func = int(x['func'],16)
1068 try:
1069 pci_device = PciDevice(domain, bus, dev, func)
1070 except Exception, e:
1071 raise VmError("pci: failed to locate device and "+
1072 "parse it's resources - "+str(e))
1073 coassignment_list = pci_device.find_coassigned_devices()
1074 coassignment_list.remove(pci_device.name)
1075 assigned_pci_device_str_list = get_assigned_pci_devices(self.domid)
1076 for pci_str in coassignment_list:
1077 if pci_str in assigned_pci_device_str_list:
1078 raise VmError(('pci: failed to pci-detach %s from dom%d" + \
1079 " because one of its co-assignment device %s is still " + \
1080 " assigned to the domain.' \
1081 )% (pci_device.name, self.domid, pci_str))
1084 bdf_str = "%s:%s:%s.%s" % (x['domain'], x['bus'], x['slot'], x['func'])
1085 log.info("hvm_destroyPCIDevice:%s:%s!", x, bdf_str)
1087 self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
1089 return 0
1091 def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
1092 log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
1093 deviceClass, devid)
1095 if rm_cfg:
1096 # Convert devid to device number. A device number is
1097 # needed to remove its configuration.
1098 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1100 # Save current sxprs. A device number and a backend
1101 # path are needed to remove its configuration but sxprs
1102 # do not have those after calling destroyDevice.
1103 sxprs = self.getDeviceSxprs(deviceClass)
1105 rc = None
1106 if self.domid is not None:
1107 rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
1108 if not force and rm_cfg:
1109 # The backend path, other than the device itself,
1110 # has to be passed because its accompanied frontend
1111 # path may be void until its removal is actually
1112 # issued. It is probable because destroyDevice is
1113 # issued first.
1114 for dev_num, dev_info in sxprs:
1115 dev_num = int(dev_num)
1116 if dev_num == dev:
1117 for x in dev_info:
1118 if x[0] == 'backend':
1119 backend = x[1]
1120 break
1121 break
1122 self._waitForDevice_destroy(deviceClass, devid, backend)
1124 if rm_cfg:
1125 if deviceClass == 'vif':
1126 if self.domid is not None:
1127 for dev_num, dev_info in sxprs:
1128 dev_num = int(dev_num)
1129 if dev_num == dev:
1130 for x in dev_info:
1131 if x[0] == 'mac':
1132 mac = x[1]
1133 break
1134 break
1135 dev_info = self._getDeviceInfo_vif(mac)
1136 else:
1137 _, dev_info = sxprs[dev]
1138 else: # 'vbd' or 'tap'
1139 dev_info = self._getDeviceInfo_vbd(dev)
1140 # To remove the UUID of the device from refs,
1141 # deviceClass must be always 'vbd'.
1142 deviceClass = 'vbd'
1143 if dev_info is None:
1144 raise XendError("Device %s is not defined" % devid)
1146 dev_uuid = sxp.child_value(dev_info, 'uuid')
1147 del self.info['devices'][dev_uuid]
1148 self.info['%s_refs' % deviceClass].remove(dev_uuid)
1149 xen.xend.XendDomain.instance().managed_config_save(self)
1151 return rc
1153 def getDeviceSxprs(self, deviceClass):
1154 if deviceClass == 'pci':
1155 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1156 if dev_info is None:
1157 return []
1158 dev_uuid = sxp.child_value(dev_info, 'uuid')
1159 pci_devs = self.info['devices'][dev_uuid][1]['devs']
1160 pci_len = len(pci_devs)
1161 return pci_devs
1162 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1163 return self.getDeviceController(deviceClass).sxprs()
1164 else:
1165 sxprs = []
1166 dev_num = 0
1167 for dev_type, dev_info in self.info.all_devices_sxpr():
1168 if (deviceClass == 'vbd' and dev_type not in ['vbd', 'tap']) or \
1169 (deviceClass != 'vbd' and dev_type != deviceClass):
1170 continue
1172 if deviceClass == 'vscsi':
1173 vscsi_devs = ['devs', []]
1174 for vscsi_dev in sxp.children(dev_info, 'dev'):
1175 vscsi_dev.append(['frontstate', None])
1176 vscsi_devs[1].append(vscsi_dev)
1177 dev_num = int(sxp.child_value(vscsi_dev, 'devid'))
1178 vscsi_mode = sxp.children(dev_info, 'feature-host')[0]
1179 sxprs.append([dev_num, [vscsi_devs, vscsi_mode]])
1180 elif deviceClass == 'vbd':
1181 dev = sxp.child_value(dev_info, 'dev')
1182 if 'ioemu:' in dev:
1183 (_, dev) = dev.split(':', 1)
1184 try:
1185 (dev_name, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1186 except ValueError:
1187 dev_name = dev
1188 dev_num = self.getDeviceController('vbd').convertToDeviceNumber(dev_name)
1189 sxprs.append([dev_num, dev_info])
1190 else:
1191 sxprs.append([dev_num, dev_info])
1192 dev_num += 1
1193 return sxprs
1195 def getBlockDeviceClass(self, devid):
1196 # To get a device number from the devid,
1197 # we temporarily use the device controller of VBD.
1198 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1199 dev_info = self._getDeviceInfo_vbd(dev)
1200 if dev_info:
1201 return dev_info[0]
1203 def _getDeviceInfo_vif(self, mac):
1204 for dev_type, dev_info in self.info.all_devices_sxpr():
1205 if dev_type != 'vif':
1206 continue
1207 if mac == sxp.child_value(dev_info, 'mac'):
1208 return dev_info
1210 def _getDeviceInfo_vbd(self, devid):
1211 for dev_type, dev_info in self.info.all_devices_sxpr():
1212 if dev_type != 'vbd' and dev_type != 'tap':
1213 continue
1214 dev = sxp.child_value(dev_info, 'dev')
1215 dev = dev.split(':')[0]
1216 dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
1217 if devid == dev:
1218 return dev_info
1220 def _getDeviceInfo_pci(self, devid):
1221 for dev_type, dev_info in self.info.all_devices_sxpr():
1222 if dev_type != 'pci':
1223 continue
1224 return dev_info
1225 return None
1227 def _getDeviceInfo_vscsi(self, devid):
1228 devid = int(devid)
1229 for dev_type, dev_info in self.info.all_devices_sxpr():
1230 if dev_type != 'vscsi':
1231 continue
1232 devs = sxp.children(dev_info, 'dev')
1233 if devid == int(sxp.child_value(devs[0], 'devid')):
1234 return dev_info
1235 return None
1237 def setMemoryTarget(self, target):
1238 """Set the memory target of this domain.
1239 @param target: In MiB.
1240 """
1241 log.debug("Setting memory target of domain %s (%s) to %d MiB.",
1242 self.info['name_label'], str(self.domid), target)
1244 MiB = 1024 * 1024
1245 memory_cur = self.get_memory_dynamic_max() / MiB
1247 if self.domid == 0:
1248 dom0_min_mem = xoptions.get_dom0_min_mem()
1249 if target < memory_cur and dom0_min_mem > target:
1250 raise XendError("memory_dynamic_max too small")
1252 self._safe_set_memory('memory_dynamic_min', target * MiB)
1253 self._safe_set_memory('memory_dynamic_max', target * MiB)
1255 if self.domid >= 0:
1256 if target > memory_cur:
1257 balloon.free((target - memory_cur) * 1024, self)
1258 self.storeVm("memory", target)
1259 self.storeDom("memory/target", target << 10)
1260 xc.domain_set_target_mem(self.domid,
1261 (target * 1024))
1262 xen.xend.XendDomain.instance().managed_config_save(self)
1264 def setMemoryMaximum(self, limit):
1265 """Set the maximum memory limit of this domain
1266 @param limit: In MiB.
1267 """
1268 log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
1269 self.info['name_label'], str(self.domid), limit)
1271 maxmem_cur = self.get_memory_static_max()
1272 MiB = 1024 * 1024
1273 self._safe_set_memory('memory_static_max', limit * MiB)
1275 if self.domid >= 0:
1276 maxmem = int(limit) * 1024
1277 try:
1278 return xc.domain_setmaxmem(self.domid, maxmem)
1279 except Exception, ex:
1280 self._safe_set_memory('memory_static_max', maxmem_cur)
1281 raise XendError(str(ex))
1282 xen.xend.XendDomain.instance().managed_config_save(self)
1285 def getVCPUInfo(self):
1286 try:
1287 # We include the domain name and ID, to help xm.
1288 sxpr = ['domain',
1289 ['domid', self.domid],
1290 ['name', self.info['name_label']],
1291 ['vcpu_count', self.info['VCPUs_max']]]
1293 for i in range(0, self.info['VCPUs_max']):
1294 if self.domid is not None:
1295 info = xc.vcpu_getinfo(self.domid, i)
1297 sxpr.append(['vcpu',
1298 ['number', i],
1299 ['online', info['online']],
1300 ['blocked', info['blocked']],
1301 ['running', info['running']],
1302 ['cpu_time', info['cpu_time'] / 1e9],
1303 ['cpu', info['cpu']],
1304 ['cpumap', info['cpumap']]])
1305 else:
1306 sxpr.append(['vcpu',
1307 ['number', i],
1308 ['online', 0],
1309 ['blocked', 0],
1310 ['running', 0],
1311 ['cpu_time', 0.0],
1312 ['cpu', -1],
1313 ['cpumap', self.info['cpus'][i] and \
1314 self.info['cpus'][i] or range(64)]])
1316 return sxpr
1318 except RuntimeError, exn:
1319 raise XendError(str(exn))
1322 def getDomInfo(self):
1323 return dom_get(self.domid)
1326 # internal functions ... TODO: re-categorised
1329 def _augmentInfo(self, priv):
1330 """Augment self.info, as given to us through L{recreate}, with
1331 values taken from the store. This recovers those values known
1332 to xend but not to the hypervisor.
1333 """
1334 augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
1335 if priv:
1336 augment_entries.remove('memory')
1337 augment_entries.remove('maxmem')
1338 augment_entries.remove('vcpus')
1339 augment_entries.remove('vcpu_avail')
1341 vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
1342 for k in augment_entries])
1344 # make returned lists into a dictionary
1345 vm_config = dict(zip(augment_entries, vm_config))
1347 for arg in augment_entries:
1348 val = vm_config[arg]
1349 if val != None:
1350 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1351 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1352 self.info[xapiarg] = val
1353 elif arg == "memory":
1354 self.info["static_memory_min"] = val
1355 elif arg == "maxmem":
1356 self.info["static_memory_max"] = val
1357 else:
1358 self.info[arg] = val
1360 # read CPU Affinity
1361 self.info['cpus'] = []
1362 vcpus_info = self.getVCPUInfo()
1363 for vcpu_info in sxp.children(vcpus_info, 'vcpu'):
1364 self.info['cpus'].append(sxp.child_value(vcpu_info, 'cpumap'))
1366 # For dom0, we ignore any stored value for the vcpus fields, and
1367 # read the current value from Xen instead. This allows boot-time
1368 # settings to take precedence over any entries in the store.
1369 if priv:
1370 xeninfo = dom_get(self.domid)
1371 self.info['VCPUs_max'] = xeninfo['online_vcpus']
1372 self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
1374 # read image value
1375 image_sxp = self._readVm('image')
1376 if image_sxp:
1377 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1379 # read devices
1380 devices = []
1381 for devclass in XendDevices.valid_devices():
1382 devconfig = self.getDeviceController(devclass).configurations()
1383 if devconfig:
1384 devices.extend(devconfig)
1386 if not self.info['devices'] and devices is not None:
1387 for device in devices:
1388 self.info.device_add(device[0], cfg_sxp = device)
1390 self._update_consoles()
1392 def _update_consoles(self, transaction = None):
1393 if self.domid == None or self.domid == 0:
1394 return
1396 # Update VT100 port if it exists
1397 if transaction is None:
1398 self.console_port = self.readDom('console/port')
1399 else:
1400 self.console_port = self.readDomTxn(transaction, 'console/port')
1401 if self.console_port is not None:
1402 serial_consoles = self.info.console_get_all('vt100')
1403 if not serial_consoles:
1404 cfg = self.info.console_add('vt100', self.console_port)
1405 self._createDevice('console', cfg)
1406 else:
1407 console_uuid = serial_consoles[0].get('uuid')
1408 self.info.console_update(console_uuid, 'location',
1409 self.console_port)
1412 # Update VNC port if it exists and write to xenstore
1413 if transaction is None:
1414 vnc_port = self.readDom('console/vnc-port')
1415 else:
1416 vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
1417 if vnc_port is not None:
1418 for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
1419 if dev_type == 'vfb':
1420 old_location = dev_info.get('location')
1421 listen_host = dev_info.get('vnclisten', \
1422 XendOptions.instance().get_vnclisten_address())
1423 new_location = '%s:%s' % (listen_host, str(vnc_port))
1424 if old_location == new_location:
1425 break
1427 dev_info['location'] = new_location
1428 self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
1429 vfb_ctrl = self.getDeviceController('vfb')
1430 vfb_ctrl.reconfigureDevice(0, dev_info)
1431 break
1434 # Function to update xenstore /vm/*
1437 def _readVm(self, *args):
1438 return xstransact.Read(self.vmpath, *args)
1440 def _writeVm(self, *args):
1441 return xstransact.Write(self.vmpath, *args)
1443 def _removeVm(self, *args):
1444 return xstransact.Remove(self.vmpath, *args)
1446 def _gatherVm(self, *args):
1447 return xstransact.Gather(self.vmpath, *args)
1449 def _listRecursiveVm(self, *args):
1450 return xstransact.ListRecursive(self.vmpath, *args)
1452 def storeVm(self, *args):
1453 return xstransact.Store(self.vmpath, *args)
1455 def permissionsVm(self, *args):
1456 return xstransact.SetPermissions(self.vmpath, *args)
1459 # Function to update xenstore /dom/*
1462 def readDom(self, *args):
1463 return xstransact.Read(self.dompath, *args)
1465 def gatherDom(self, *args):
1466 return xstransact.Gather(self.dompath, *args)
1468 def _writeDom(self, *args):
1469 return xstransact.Write(self.dompath, *args)
1471 def _removeDom(self, *args):
1472 return xstransact.Remove(self.dompath, *args)
1474 def storeDom(self, *args):
1475 return xstransact.Store(self.dompath, *args)
1478 def readDomTxn(self, transaction, *args):
1479 paths = map(lambda x: self.dompath + "/" + x, args)
1480 return transaction.read(*paths)
1482 def gatherDomTxn(self, transaction, *args):
1483 paths = map(lambda x: self.dompath + "/" + x, args)
1484 return transaction.gather(*paths)
1486 def _writeDomTxn(self, transaction, *args):
1487 paths = map(lambda x: self.dompath + "/" + x, args)
1488 return transaction.write(*paths)
1490 def _removeDomTxn(self, transaction, *args):
1491 paths = map(lambda x: self.dompath + "/" + x, args)
1492 return transaction.remove(*paths)
1494 def storeDomTxn(self, transaction, *args):
1495 paths = map(lambda x: self.dompath + "/" + x, args)
1496 return transaction.store(*paths)
1499 def _recreateDom(self):
1500 complete(self.dompath, lambda t: self._recreateDomFunc(t))
1502 def _recreateDomFunc(self, t):
1503 t.remove()
1504 t.mkdir()
1505 t.set_permissions({'dom' : self.domid, 'read' : True})
1506 t.write('vm', self.vmpath)
1507 # NB. Solaris guests use guest/ and hvmpv/ xenstore directories
1508 for i in [ 'device', 'control', 'error', 'memory', 'guest', 'hvmpv' ]:
1509 t.mkdir(i)
1510 t.set_permissions(i, {'dom' : self.domid})
1512 def _storeDomDetails(self):
1513 to_store = {
1514 'domid': str(self.domid),
1515 'vm': self.vmpath,
1516 'name': self.info['name_label'],
1517 'console/limit': str(xoptions.get_console_limit() * 1024),
1518 'memory/target': str(self.info['memory_dynamic_max'] / 1024),
1521 def f(n, v):
1522 if v is not None:
1523 if type(v) == bool:
1524 to_store[n] = v and "1" or "0"
1525 else:
1526 to_store[n] = str(v)
1528 # Figure out if we need to tell xenconsoled to ignore this guest's
1529 # console - device model will handle console if it is running
1530 constype = "ioemu"
1531 if 'device_model' not in self.info['platform']:
1532 constype = "xenconsoled"
1534 f('console/port', self.console_port)
1535 f('console/ring-ref', self.console_mfn)
1536 f('console/type', constype)
1537 f('store/port', self.store_port)
1538 f('store/ring-ref', self.store_mfn)
1540 if arch.type == "x86":
1541 f('control/platform-feature-multiprocessor-suspend', True)
1543 # elfnotes
1544 for n, v in self.info.get_notes().iteritems():
1545 n = n.lower().replace('_', '-')
1546 if n == 'features':
1547 for v in v.split('|'):
1548 v = v.replace('_', '-')
1549 if v.startswith('!'):
1550 f('image/%s/%s' % (n, v[1:]), False)
1551 else:
1552 f('image/%s/%s' % (n, v), True)
1553 else:
1554 f('image/%s' % n, v)
1556 if self.info.has_key('security_label'):
1557 f('security_label', self.info['security_label'])
1559 to_store.update(self._vcpuDomDetails())
1561 log.debug("Storing domain details: %s", scrub_password(to_store))
1563 self._writeDom(to_store)
1565 def _vcpuDomDetails(self):
1566 def availability(n):
1567 if self.info['vcpu_avail'] & (1 << n):
1568 return 'online'
1569 else:
1570 return 'offline'
1572 result = {}
1573 for v in range(0, self.info['VCPUs_max']):
1574 result["cpu/%d/availability" % v] = availability(v)
1575 return result
1578 # xenstore watches
1581 def _registerWatches(self):
1582 """Register a watch on this VM's entries in the store, and the
1583 domain's control/shutdown node, so that when they are changed
1584 externally, we keep up to date. This should only be called by {@link
1585 #create}, {@link #recreate}, or {@link #restore}, once the domain's
1586 details have been written, but before the new instance is returned."""
1587 self.vmWatch = xswatch(self.vmpath, self._storeChanged)
1588 self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
1589 self._handleShutdownWatch)
1591 def _storeChanged(self, _):
1592 log.trace("XendDomainInfo.storeChanged");
1594 changed = False
1596 # Check whether values in the configuration have
1597 # changed in Xenstore.
1599 cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
1600 'rtc/timeoffset']
1602 vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
1603 for k in cfg_vm])
1605 # convert two lists into a python dictionary
1606 vm_details = dict(zip(cfg_vm, vm_details))
1608 if vm_details['rtc/timeoffset'] == None:
1609 vm_details['rtc/timeoffset'] = "0"
1611 for arg, val in vm_details.items():
1612 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1613 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1614 if val != None and val != self.info[xapiarg]:
1615 self.info[xapiarg] = val
1616 changed = True
1617 elif arg == "memory":
1618 if val != None and val != self.info["static_memory_min"]:
1619 self.info["static_memory_min"] = val
1620 changed = True
1621 elif arg == "maxmem":
1622 if val != None and val != self.info["static_memory_max"]:
1623 self.info["static_memory_max"] = val
1624 changed = True
1626 # Check whether image definition has been updated
1627 image_sxp = self._readVm('image')
1628 if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
1629 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1630 changed = True
1632 # Check if the rtc offset has changes
1633 if vm_details.get("rtc/timeoffset", "0") != self.info["platform"].get("rtc_timeoffset", "0"):
1634 self.info["platform"]["rtc_timeoffset"] = vm_details.get("rtc/timeoffset", 0)
1635 changed = True
1637 if changed:
1638 # Update the domain section of the store, as this contains some
1639 # parameters derived from the VM configuration.
1640 self._storeDomDetails()
1642 return 1
1644 def _handleShutdownWatch(self, _):
1645 log.debug('XendDomainInfo.handleShutdownWatch')
1647 reason = self.readDom('control/shutdown')
1649 if reason and reason != 'suspend':
1650 sst = self.readDom('xend/shutdown_start_time')
1651 now = time.time()
1652 if sst:
1653 self.shutdownStartTime = float(sst)
1654 timeout = float(sst) + SHUTDOWN_TIMEOUT - now
1655 else:
1656 self.shutdownStartTime = now
1657 self.storeDom('xend/shutdown_start_time', now)
1658 timeout = SHUTDOWN_TIMEOUT
1660 log.trace(
1661 "Scheduling refreshShutdown on domain %d in %ds.",
1662 self.domid, timeout)
1663 threading.Timer(timeout, self.refreshShutdown).start()
1665 return True
1669 # Public Attributes for the VM
1673 def getDomid(self):
1674 return self.domid
1676 def setName(self, name, to_store = True):
1677 self._checkName(name)
1678 self.info['name_label'] = name
1679 if to_store:
1680 self.storeVm("name", name)
1682 def getName(self):
1683 return self.info['name_label']
1685 def getDomainPath(self):
1686 return self.dompath
1688 def getShutdownReason(self):
1689 return self.readDom('control/shutdown')
1691 def getStorePort(self):
1692 """For use only by image.py and XendCheckpoint.py."""
1693 return self.store_port
1695 def getConsolePort(self):
1696 """For use only by image.py and XendCheckpoint.py"""
1697 return self.console_port
1699 def getFeatures(self):
1700 """For use only by image.py."""
1701 return self.info['features']
1703 def getVCpuCount(self):
1704 return self.info['VCPUs_max']
1706 def setVCpuCount(self, vcpus):
1707 def vcpus_valid(n):
1708 if vcpus <= 0:
1709 raise XendError('Zero or less VCPUs is invalid')
1710 if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
1711 raise XendError('Cannot set vcpus greater than max vcpus on running domain')
1712 vcpus_valid(vcpus)
1714 self.info['vcpu_avail'] = (1 << vcpus) - 1
1715 if self.domid >= 0:
1716 self.storeVm('vcpu_avail', self.info['vcpu_avail'])
1717 self._writeDom(self._vcpuDomDetails())
1718 self.info['VCPUs_live'] = vcpus
1719 else:
1720 if self.info['VCPUs_max'] > vcpus:
1721 # decreasing
1722 del self.info['cpus'][vcpus:]
1723 elif self.info['VCPUs_max'] < vcpus:
1724 # increasing
1725 for c in range(self.info['VCPUs_max'], vcpus):
1726 self.info['cpus'].append(list())
1727 self.info['VCPUs_max'] = vcpus
1728 xen.xend.XendDomain.instance().managed_config_save(self)
1729 log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
1730 vcpus)
1732 def getMemoryTarget(self):
1733 """Get this domain's target memory size, in KB."""
1734 return self.info['memory_dynamic_max'] / 1024
1736 def getMemoryMaximum(self):
1737 """Get this domain's maximum memory size, in KB."""
1738 # remember, info now stores memory in bytes
1739 return self.info['memory_static_max'] / 1024
1741 def getResume(self):
1742 return str(self._resume)
1744 def setResume(self, isresume):
1745 self._resume = isresume
1747 def getCpus(self):
1748 return self.info['cpus']
1750 def setCpus(self, cpumap):
1751 self.info['cpus'] = cpumap
1753 def getCap(self):
1754 return self.info['vcpus_params']['cap']
1756 def setCap(self, cpu_cap):
1757 self.info['vcpus_params']['cap'] = cpu_cap
1759 def getWeight(self):
1760 return self.info['vcpus_params']['weight']
1762 def setWeight(self, cpu_weight):
1763 self.info['vcpus_params']['weight'] = cpu_weight
1765 def getRestartCount(self):
1766 return self._readVm('xend/restart_count')
1768 def refreshShutdown(self, xeninfo = None):
1769 """ Checks the domain for whether a shutdown is required.
1771 Called from XendDomainInfo and also image.py for HVM images.
1772 """
1774 # If set at the end of this method, a restart is required, with the
1775 # given reason. This restart has to be done out of the scope of
1776 # refresh_shutdown_lock.
1777 restart_reason = None
1779 self.refresh_shutdown_lock.acquire()
1780 try:
1781 if xeninfo is None:
1782 xeninfo = dom_get(self.domid)
1783 if xeninfo is None:
1784 # The domain no longer exists. This will occur if we have
1785 # scheduled a timer to check for shutdown timeouts and the
1786 # shutdown succeeded. It will also occur if someone
1787 # destroys a domain beneath us. We clean up the domain,
1788 # just in case, but we can't clean up the VM, because that
1789 # VM may have migrated to a different domain on this
1790 # machine.
1791 self.cleanupDomain()
1792 self._stateSet(DOM_STATE_HALTED)
1793 return
1795 if xeninfo['dying']:
1796 # Dying means that a domain has been destroyed, but has not
1797 # yet been cleaned up by Xen. This state could persist
1798 # indefinitely if, for example, another domain has some of its
1799 # pages mapped. We might like to diagnose this problem in the
1800 # future, but for now all we do is make sure that it's not us
1801 # holding the pages, by calling cleanupDomain. We can't
1802 # clean up the VM, as above.
1803 self.cleanupDomain()
1804 self._stateSet(DOM_STATE_SHUTDOWN)
1805 return
1807 elif xeninfo['crashed']:
1808 if self.readDom('xend/shutdown_completed'):
1809 # We've seen this shutdown already, but we are preserving
1810 # the domain for debugging. Leave it alone.
1811 return
1813 log.warn('Domain has crashed: name=%s id=%d.',
1814 self.info['name_label'], self.domid)
1815 self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
1817 restart_reason = 'crash'
1818 self._stateSet(DOM_STATE_HALTED)
1820 elif xeninfo['shutdown']:
1821 self._stateSet(DOM_STATE_SHUTDOWN)
1822 if self.readDom('xend/shutdown_completed'):
1823 # We've seen this shutdown already, but we are preserving
1824 # the domain for debugging. Leave it alone.
1825 return
1827 else:
1828 reason = shutdown_reason(xeninfo['shutdown_reason'])
1830 log.info('Domain has shutdown: name=%s id=%d reason=%s.',
1831 self.info['name_label'], self.domid, reason)
1832 self._writeVm(LAST_SHUTDOWN_REASON, reason)
1834 self._clearRestart()
1836 if reason == 'suspend':
1837 self._stateSet(DOM_STATE_SUSPENDED)
1838 # Don't destroy the domain. XendCheckpoint will do
1839 # this once it has finished. However, stop watching
1840 # the VM path now, otherwise we will end up with one
1841 # watch for the old domain, and one for the new.
1842 self._unwatchVm()
1843 elif reason in ('poweroff', 'reboot'):
1844 restart_reason = reason
1845 else:
1846 self.destroy()
1848 elif self.dompath is None:
1849 # We have yet to manage to call introduceDomain on this
1850 # domain. This can happen if a restore is in progress, or has
1851 # failed. Ignore this domain.
1852 pass
1853 else:
1854 # Domain is alive. If we are shutting it down, log a message
1855 # if it seems unresponsive.
1856 if xeninfo['paused']:
1857 self._stateSet(DOM_STATE_PAUSED)
1858 else:
1859 self._stateSet(DOM_STATE_RUNNING)
1861 if self.shutdownStartTime:
1862 timeout = (SHUTDOWN_TIMEOUT - time.time() +
1863 self.shutdownStartTime)
1864 if (timeout < 0 and not self.readDom('xend/unresponsive')):
1865 log.info(
1866 "Domain shutdown timeout expired: name=%s id=%s",
1867 self.info['name_label'], self.domid)
1868 self.storeDom('xend/unresponsive', 'True')
1869 finally:
1870 self.refresh_shutdown_lock.release()
1872 if restart_reason:
1873 threading.Thread(target = self._maybeRestart,
1874 args = (restart_reason,)).start()
1878 # Restart functions - handling whether we come back up on shutdown.
1881 def _clearRestart(self):
1882 self._removeDom("xend/shutdown_start_time")
1884 def _maybeDumpCore(self, reason):
1885 if reason == 'crash':
1886 if xoptions.get_enable_dump() or self.get_on_crash() \
1887 in ['coredump_and_destroy', 'coredump_and_restart']:
1888 try:
1889 self.dumpCore()
1890 except XendError:
1891 # This error has been logged -- there's nothing more
1892 # we can do in this context.
1893 pass
1895 def _maybeRestart(self, reason):
1896 # Before taking configured action, dump core if configured to do so.
1898 self._maybeDumpCore(reason)
1900 # Dispatch to the correct method based upon the configured on_{reason}
1901 # behaviour.
1902 actions = {"destroy" : self.destroy,
1903 "restart" : self._restart,
1904 "preserve" : self._preserve,
1905 "rename-restart" : self._renameRestart,
1906 "coredump-destroy" : self.destroy,
1907 "coredump-restart" : self._restart}
1909 action_conf = {
1910 'poweroff': 'actions_after_shutdown',
1911 'reboot': 'actions_after_reboot',
1912 'crash': 'actions_after_crash',
1915 action_target = self.info.get(action_conf.get(reason))
1916 func = actions.get(action_target, None)
1917 if func and callable(func):
1918 func()
1919 else:
1920 self.destroy() # default to destroy
1922 def _renameRestart(self):
1923 self._restart(True)
1925 def _restart(self, rename = False):
1926 """Restart the domain after it has exited.
1928 @param rename True if the old domain is to be renamed and preserved,
1929 False if it is to be destroyed.
1930 """
1931 from xen.xend import XendDomain
1933 if self._readVm(RESTART_IN_PROGRESS):
1934 log.error('Xend failed during restart of domain %s. '
1935 'Refusing to restart to avoid loops.',
1936 str(self.domid))
1937 self.destroy()
1938 return
1940 old_domid = self.domid
1941 self._writeVm(RESTART_IN_PROGRESS, 'True')
1943 now = time.time()
1944 rst = self._readVm('xend/previous_restart_time')
1945 if rst:
1946 rst = float(rst)
1947 timeout = now - rst
1948 if timeout < MINIMUM_RESTART_TIME:
1949 log.error(
1950 'VM %s restarting too fast (%f seconds since the last '
1951 'restart). Refusing to restart to avoid loops.',
1952 self.info['name_label'], timeout)
1953 self.destroy()
1954 return
1956 self._writeVm('xend/previous_restart_time', str(now))
1958 prev_vm_xend = self._listRecursiveVm('xend')
1959 new_dom_info = self.info
1960 try:
1961 if rename:
1962 new_dom_info = self._preserveForRestart()
1963 else:
1964 self._unwatchVm()
1965 self.destroy()
1967 # new_dom's VM will be the same as this domain's VM, except where
1968 # the rename flag has instructed us to call preserveForRestart.
1969 # In that case, it is important that we remove the
1970 # RESTART_IN_PROGRESS node from the new domain, not the old one,
1971 # once the new one is available.
1973 new_dom = None
1974 try:
1975 new_dom = XendDomain.instance().domain_create_from_dict(
1976 new_dom_info)
1977 for x in prev_vm_xend[0][1]:
1978 new_dom._writeVm('xend/%s' % x[0], x[1])
1979 new_dom.waitForDevices()
1980 new_dom.unpause()
1981 rst_cnt = new_dom._readVm('xend/restart_count')
1982 rst_cnt = int(rst_cnt) + 1
1983 new_dom._writeVm('xend/restart_count', str(rst_cnt))
1984 new_dom._removeVm(RESTART_IN_PROGRESS)
1985 except:
1986 if new_dom:
1987 new_dom._removeVm(RESTART_IN_PROGRESS)
1988 new_dom.destroy()
1989 else:
1990 self._removeVm(RESTART_IN_PROGRESS)
1991 raise
1992 except:
1993 log.exception('Failed to restart domain %s.', str(old_domid))
1995 def _preserveForRestart(self):
1996 """Preserve a domain that has been shut down, by giving it a new UUID,
1997 cloning the VM details, and giving it a new name. This allows us to
1998 keep this domain for debugging, but restart a new one in its place
1999 preserving the restart semantics (name and UUID preserved).
2000 """
2002 new_uuid = uuid.createString()
2003 new_name = 'Domain-%s' % new_uuid
2004 log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
2005 self.info['name_label'], self.domid, self.info['uuid'],
2006 new_name, new_uuid)
2007 self._unwatchVm()
2008 self._releaseDevices()
2009 # Remove existing vm node in xenstore
2010 self._removeVm()
2011 new_dom_info = self.info.copy()
2012 new_dom_info['name_label'] = self.info['name_label']
2013 new_dom_info['uuid'] = self.info['uuid']
2014 self.info['name_label'] = new_name
2015 self.info['uuid'] = new_uuid
2016 self.vmpath = XS_VMROOT + new_uuid
2017 # Write out new vm node to xenstore
2018 self._storeVmDetails()
2019 self._preserve()
2020 return new_dom_info
2023 def _preserve(self):
2024 log.info("Preserving dead domain %s (%d).", self.info['name_label'],
2025 self.domid)
2026 self._unwatchVm()
2027 self.storeDom('xend/shutdown_completed', 'True')
2028 self._stateSet(DOM_STATE_HALTED)
2031 # Debugging ..
2034 def dumpCore(self, corefile = None):
2035 """Create a core dump for this domain.
2037 @raise: XendError if core dumping failed.
2038 """
2040 if not corefile:
2041 this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
2042 corefile = "/var/xen/dump/%s-%s.%s.core" % (this_time,
2043 self.info['name_label'], self.domid)
2045 if os.path.isdir(corefile):
2046 raise XendError("Cannot dump core in a directory: %s" %
2047 corefile)
2049 try:
2050 try:
2051 self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
2052 xc.domain_dumpcore(self.domid, corefile)
2053 except RuntimeError, ex:
2054 corefile_incomp = corefile+'-incomplete'
2055 try:
2056 os.rename(corefile, corefile_incomp)
2057 except:
2058 pass
2060 log.error("core dump failed: id = %s name = %s: %s",
2061 self.domid, self.info['name_label'], str(ex))
2062 raise XendError("Failed to dump core: %s" % str(ex))
2063 finally:
2064 self._removeVm(DUMPCORE_IN_PROGRESS)
2067 # Device creation/deletion functions
2070 def _createDevice(self, deviceClass, devConfig):
2071 return self.getDeviceController(deviceClass).createDevice(devConfig)
2073 def _waitForDevice(self, deviceClass, devid):
2074 return self.getDeviceController(deviceClass).waitForDevice(devid)
2076 def _waitForDeviceUUID(self, dev_uuid):
2077 deviceClass, config = self.info['devices'].get(dev_uuid)
2078 self._waitForDevice(deviceClass, config['devid'])
2080 def _waitForDevice_destroy(self, deviceClass, devid, backpath):
2081 return self.getDeviceController(deviceClass).waitForDevice_destroy(
2082 devid, backpath)
2084 def _reconfigureDevice(self, deviceClass, devid, devconfig):
2085 return self.getDeviceController(deviceClass).reconfigureDevice(
2086 devid, devconfig)
2088 def _createDevices(self):
2089 """Create the devices for a vm.
2091 @raise: VmError for invalid devices
2092 """
2093 if self.image:
2094 self.image.prepareEnvironment()
2096 vscsi_uuidlist = {}
2097 vscsi_devidlist = []
2098 ordered_refs = self.info.ordered_device_refs()
2099 for dev_uuid in ordered_refs:
2100 devclass, config = self.info['devices'][dev_uuid]
2101 if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
2102 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2103 dev_uuid = config.get('uuid')
2104 devid = self._createDevice(devclass, config)
2106 # store devid in XendConfig for caching reasons
2107 if dev_uuid in self.info['devices']:
2108 self.info['devices'][dev_uuid][1]['devid'] = devid
2110 elif devclass == 'vscsi':
2111 vscsi_config = config.get('devs', [])[0]
2112 devid = vscsi_config.get('devid', '')
2113 dev_uuid = config.get('uuid')
2114 vscsi_uuidlist[devid] = dev_uuid
2115 vscsi_devidlist.append(devid)
2117 #It is necessary to sorted it for /dev/sdxx in guest.
2118 if len(vscsi_uuidlist) > 0:
2119 vscsi_devidlist.sort()
2120 for vscsiid in vscsi_devidlist:
2121 dev_uuid = vscsi_uuidlist[vscsiid]
2122 devclass, config = self.info['devices'][dev_uuid]
2123 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2124 dev_uuid = config.get('uuid')
2125 devid = self._createDevice(devclass, config)
2126 # store devid in XendConfig for caching reasons
2127 if dev_uuid in self.info['devices']:
2128 self.info['devices'][dev_uuid][1]['devid'] = devid
2131 if self.image:
2132 self.image.createDeviceModel()
2134 #if have pass-through devs, need the virtual pci slots info from qemu
2135 self.sync_pcidev_info()
2137 def _releaseDevices(self, suspend = False):
2138 """Release all domain's devices. Nothrow guarantee."""
2139 if self.image:
2140 try:
2141 log.debug("Destroying device model")
2142 self.image.destroyDeviceModel()
2143 except Exception, e:
2144 log.exception("Device model destroy failed %s" % str(e))
2145 else:
2146 log.debug("No device model")
2148 log.debug("Releasing devices")
2149 t = xstransact("%s/device" % self.dompath)
2150 try:
2151 for devclass in XendDevices.valid_devices():
2152 for dev in t.list(devclass):
2153 try:
2154 true_devclass = devclass
2155 if devclass == 'vbd':
2156 # In the case of "vbd", the true device class
2157 # may possibly be "tap". Just in case, verify
2158 # device class.
2159 devid = dev.split('/')[-1]
2160 true_devclass = self.getBlockDeviceClass(devid)
2161 log.debug("Removing %s", dev);
2162 self.destroyDevice(true_devclass, dev, False);
2163 except:
2164 # Log and swallow any exceptions in removal --
2165 # there's nothing more we can do.
2166 log.exception("Device release failed: %s; %s; %s",
2167 self.info['name_label'],
2168 true_devclass, dev)
2169 finally:
2170 t.abort()
2172 def getDeviceController(self, name):
2173 """Get the device controller for this domain, and if it
2174 doesn't exist, create it.
2176 @param name: device class name
2177 @type name: string
2178 @rtype: subclass of DevController
2179 """
2180 if name not in self._deviceControllers:
2181 devController = XendDevices.make_controller(name, self)
2182 if not devController:
2183 raise XendError("Unknown device type: %s" % name)
2184 self._deviceControllers[name] = devController
2186 return self._deviceControllers[name]
2189 # Migration functions (public)
2192 def testMigrateDevices(self, network, dst):
2193 """ Notify all device about intention of migration
2194 @raise: XendError for a device that cannot be migrated
2195 """
2196 for (n, c) in self.info.all_devices_sxpr():
2197 rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
2198 if rc != 0:
2199 raise XendError("Device of type '%s' refuses migration." % n)
2201 def migrateDevices(self, network, dst, step, domName=''):
2202 """Notify the devices about migration
2203 """
2204 ctr = 0
2205 try:
2206 for (dev_type, dev_conf) in self.info.all_devices_sxpr():
2207 self.migrateDevice(dev_type, dev_conf, network, dst,
2208 step, domName)
2209 ctr = ctr + 1
2210 except:
2211 for dev_type, dev_conf in self.info.all_devices_sxpr():
2212 if ctr == 0:
2213 step = step - 1
2214 ctr = ctr - 1
2215 self._recoverMigrateDevice(dev_type, dev_conf, network,
2216 dst, step, domName)
2217 raise
2219 def migrateDevice(self, deviceClass, deviceConfig, network, dst,
2220 step, domName=''):
2221 return self.getDeviceController(deviceClass).migrate(deviceConfig,
2222 network, dst, step, domName)
2224 def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
2225 dst, step, domName=''):
2226 return self.getDeviceController(deviceClass).recover_migrate(
2227 deviceConfig, network, dst, step, domName)
2230 ## private:
2232 def _constructDomain(self):
2233 """Construct the domain.
2235 @raise: VmError on error
2236 """
2238 log.debug('XendDomainInfo.constructDomain')
2240 self.shutdownStartTime = None
2242 hap = 0
2243 hvm = self.info.is_hvm()
2244 if hvm:
2245 hap = self.info.is_hap()
2246 info = xc.xeninfo()
2247 if 'hvm' not in info['xen_caps']:
2248 raise VmError("HVM guest support is unavailable: is VT/AMD-V "
2249 "supported by your CPU and enabled in your "
2250 "BIOS?")
2252 # Hack to pre-reserve some memory for initial domain creation.
2253 # There is an implicit memory overhead for any domain creation. This
2254 # overhead is greater for some types of domain than others. For
2255 # example, an x86 HVM domain will have a default shadow-pagetable
2256 # allocation of 1MB. We free up 2MB here to be on the safe side.
2257 balloon.free(2*1024, self) # 2MB should be plenty
2259 ssidref = 0
2260 if security.on() == xsconstants.XS_POLICY_USE:
2261 ssidref = security.calc_dom_ssidref_from_info(self.info)
2262 if security.has_authorization(ssidref) == False:
2263 raise VmError("VM is not authorized to run.")
2265 s3_integrity = 0
2266 if self.info.has_key('s3_integrity'):
2267 s3_integrity = self.info['s3_integrity']
2268 flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2)
2270 try:
2271 self.domid = xc.domain_create(
2272 domid = 0,
2273 ssidref = ssidref,
2274 handle = uuid.fromString(self.info['uuid']),
2275 flags = flags,
2276 target = self.info.target())
2277 except Exception, e:
2278 # may get here if due to ACM the operation is not permitted
2279 if security.on() == xsconstants.XS_POLICY_ACM:
2280 raise VmError('Domain in conflict set with running domain?')
2282 if self.domid < 0:
2283 raise VmError('Creating domain failed: name=%s' %
2284 self.info['name_label'])
2286 self.dompath = GetDomainPath(self.domid)
2288 self._recreateDom()
2290 # Set timer configration of domain
2291 timer_mode = self.info["platform"].get("timer_mode")
2292 if hvm and timer_mode is not None:
2293 xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
2294 long(timer_mode))
2296 # Set Viridian interface configuration of domain
2297 viridian = self.info["platform"].get("viridian")
2298 if arch.type == "x86" and hvm and viridian is not None:
2299 xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
2301 # Optionally enable virtual HPET
2302 hpet = self.info["platform"].get("hpet")
2303 if hvm and hpet is not None:
2304 xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
2305 long(hpet))
2307 # Optionally enable periodic vpt aligning
2308 vpt_align = self.info["platform"].get("vpt_align")
2309 if hvm and vpt_align is not None:
2310 xc.hvm_set_param(self.domid, HVM_PARAM_VPT_ALIGN,
2311 long(vpt_align))
2313 # Set maximum number of vcpus in domain
2314 xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
2316 # Test whether the devices can be assigned with VT-d
2317 pci = self.info["platform"].get("pci")
2318 pci_str = ''
2319 if pci and len(pci) > 0:
2320 pci = map(lambda x: x[0:4], pci) # strip options
2321 pci_str = str(pci)
2322 if hvm and pci_str:
2323 bdf = xc.test_assign_device(self.domid, pci_str)
2324 if bdf != 0:
2325 if bdf == -1:
2326 raise VmError("failed to assign device: maybe the platform"
2327 " doesn't support VT-d, or VT-d isn't enabled"
2328 " properly?")
2329 bus = (bdf >> 16) & 0xff
2330 devfn = (bdf >> 8) & 0xff
2331 dev = (devfn >> 3) & 0x1f
2332 func = devfn & 0x7
2333 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
2334 " already been assigned to other domain, or maybe"
2335 " it doesn't exist." % (bus, dev, func))
2337 # register the domain in the list
2338 from xen.xend import XendDomain
2339 XendDomain.instance().add_domain(self)
2341 def _introduceDomain(self):
2342 assert self.domid is not None
2343 assert self.store_mfn is not None
2344 assert self.store_port is not None
2346 try:
2347 IntroduceDomain(self.domid, self.store_mfn, self.store_port)
2348 except RuntimeError, exn:
2349 raise XendError(str(exn))
2351 def _setTarget(self, target):
2352 assert self.domid is not None
2354 try:
2355 SetTarget(self.domid, target)
2356 self.storeDom('target', target)
2357 except RuntimeError, exn:
2358 raise XendError(str(exn))
2361 def _setCPUAffinity(self):
2362 """ Repin domain vcpus if a restricted cpus list is provided
2363 """
2365 def has_cpus():
2366 if self.info['cpus'] is not None:
2367 for c in self.info['cpus']:
2368 if c:
2369 return True
2370 return False
2372 if has_cpus():
2373 for v in range(0, self.info['VCPUs_max']):
2374 if self.info['cpus'][v]:
2375 xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
2376 else:
2377 def find_relaxed_node(node_list):
2378 import sys
2379 nr_nodes = info['nr_nodes']
2380 if node_list is None:
2381 node_list = range(0, nr_nodes)
2382 nodeload = [0]
2383 nodeload = nodeload * nr_nodes
2384 from xen.xend import XendDomain
2385 doms = XendDomain.instance().list('all')
2386 for dom in filter (lambda d: d.domid != self.domid, doms):
2387 cpuinfo = dom.getVCPUInfo()
2388 for vcpu in sxp.children(cpuinfo, 'vcpu'):
2389 if sxp.child_value(vcpu, 'online') == 0: continue
2390 cpumap = list(sxp.child_value(vcpu,'cpumap'))
2391 for i in range(0, nr_nodes):
2392 node_cpumask = info['node_to_cpu'][i]
2393 for j in node_cpumask:
2394 if j in cpumap:
2395 nodeload[i] += 1
2396 break
2397 for i in range(0, nr_nodes):
2398 if len(info['node_to_cpu'][i]) > 0 and i in node_list:
2399 nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
2400 else:
2401 nodeload[i] = sys.maxint
2402 index = nodeload.index( min(nodeload) )
2403 return index
2405 info = xc.physinfo()
2406 if info['nr_nodes'] > 1:
2407 node_memory_list = info['node_to_memory']
2408 needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
2409 candidate_node_list = []
2410 for i in range(0, info['nr_nodes']):
2411 if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
2412 candidate_node_list.append(i)
2413 index = find_relaxed_node(candidate_node_list)
2414 cpumask = info['node_to_cpu'][index]
2415 for v in range(0, self.info['VCPUs_max']):
2416 xc.vcpu_setaffinity(self.domid, v, cpumask)
2419 def _initDomain(self):
2420 log.debug('XendDomainInfo.initDomain: %s %s',
2421 self.domid,
2422 self.info['vcpus_params']['weight'])
2424 self._configureBootloader()
2426 try:
2427 if self.info['platform'].get('localtime', 0):
2428 if time.localtime(time.time())[8]:
2429 self.info['platform']['rtc_timeoffset'] = -time.altzone
2430 else:
2431 self.info['platform']['rtc_timeoffset'] = -time.timezone
2433 self.image = image.create(self, self.info)
2435 # repin domain vcpus if a restricted cpus list is provided
2436 # this is done prior to memory allocation to aide in memory
2437 # distribution for NUMA systems.
2438 self._setCPUAffinity()
2440 # Use architecture- and image-specific calculations to determine
2441 # the various headrooms necessary, given the raw configured
2442 # values. maxmem, memory, and shadow are all in KiB.
2443 # but memory_static_max etc are all stored in bytes now.
2444 memory = self.image.getRequiredAvailableMemory(
2445 self.info['memory_dynamic_max'] / 1024)
2446 maxmem = self.image.getRequiredAvailableMemory(
2447 self.info['memory_static_max'] / 1024)
2448 shadow = self.image.getRequiredShadowMemory(
2449 self.info['shadow_memory'] * 1024,
2450 self.info['memory_static_max'] / 1024)
2452 log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
2453 # Round shadow up to a multiple of a MiB, as shadow_mem_control
2454 # takes MiB and we must not round down and end up under-providing.
2455 shadow = ((shadow + 1023) / 1024) * 1024
2457 # set memory limit
2458 xc.domain_setmaxmem(self.domid, maxmem)
2460 # Reserve 1 page per MiB of RAM for separate VT-d page table.
2461 vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
2462 # Round vtd_mem up to a multiple of a MiB.
2463 vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
2465 # Make sure there's enough RAM available for the domain
2466 balloon.free(memory + shadow + vtd_mem, self)
2468 # Set up the shadow memory
2469 shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
2470 self.info['shadow_memory'] = shadow_cur
2472 # machine address size
2473 if self.info.has_key('machine_address_size'):
2474 log.debug("_initDomain: setting maximum machine address size %d" % self.info['machine_address_size'])
2475 xc.domain_set_machine_address_size(self.domid, self.info['machine_address_size'])
2477 if self.info.has_key('suppress_spurious_page_faults') and self.info['suppress_spurious_page_faults']:
2478 log.debug("_initDomain: suppressing spurious page faults")
2479 xc.domain_suppress_spurious_page_faults(self.domid)
2481 self._createChannels()
2483 channel_details = self.image.createImage()
2485 self.store_mfn = channel_details['store_mfn']
2486 if 'console_mfn' in channel_details:
2487 self.console_mfn = channel_details['console_mfn']
2488 if 'notes' in channel_details:
2489 self.info.set_notes(channel_details['notes'])
2490 if 'native_protocol' in channel_details:
2491 self.native_protocol = channel_details['native_protocol'];
2493 self._introduceDomain()
2494 if self.info.target():
2495 self._setTarget(self.info.target())
2497 self._createDevices()
2499 self.image.cleanupBootloading()
2501 self.info['start_time'] = time.time()
2503 self._stateSet(DOM_STATE_RUNNING)
2504 except VmError, exn:
2505 log.exception("XendDomainInfo.initDomain: exception occurred")
2506 if self.image:
2507 self.image.cleanupBootloading()
2508 raise exn
2509 except RuntimeError, exn:
2510 log.exception("XendDomainInfo.initDomain: exception occurred")
2511 if self.image:
2512 self.image.cleanupBootloading()
2513 raise VmError(str(exn))
2516 def cleanupDomain(self):
2517 """Cleanup domain resources; release devices. Idempotent. Nothrow
2518 guarantee."""
2520 self.refresh_shutdown_lock.acquire()
2521 try:
2522 self.unwatchShutdown()
2523 self._releaseDevices()
2524 bootloader_tidy(self)
2526 if self.image:
2527 self.image = None
2529 try:
2530 self._removeDom()
2531 except:
2532 log.exception("Removing domain path failed.")
2534 self._stateSet(DOM_STATE_HALTED)
2535 self.domid = None # Do not push into _stateSet()!
2536 finally:
2537 self.refresh_shutdown_lock.release()
2540 def unwatchShutdown(self):
2541 """Remove the watch on the domain's control/shutdown node, if any.
2542 Idempotent. Nothrow guarantee. Expects to be protected by the
2543 refresh_shutdown_lock."""
2545 try:
2546 try:
2547 if self.shutdownWatch:
2548 self.shutdownWatch.unwatch()
2549 finally:
2550 self.shutdownWatch = None
2551 except:
2552 log.exception("Unwatching control/shutdown failed.")
2554 def waitForShutdown(self):
2555 self.state_updated.acquire()
2556 try:
2557 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2558 self.state_updated.wait(timeout=1.0)
2559 finally:
2560 self.state_updated.release()
2562 def waitForSuspend(self):
2563 """Wait for the guest to respond to a suspend request by
2564 shutting down. If the guest hasn't re-written control/shutdown
2565 after a certain amount of time, it's obviously not listening and
2566 won't suspend, so we give up. HVM guests with no PV drivers
2567 should already be shutdown.
2568 """
2569 state = "suspend"
2570 nr_tries = 60
2572 self.state_updated.acquire()
2573 try:
2574 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2575 self.state_updated.wait(1.0)
2576 if state == "suspend":
2577 if nr_tries == 0:
2578 msg = ('Timeout waiting for domain %s to suspend'
2579 % self.domid)
2580 self._writeDom('control/shutdown', '')
2581 raise XendError(msg)
2582 state = self.readDom('control/shutdown')
2583 nr_tries -= 1
2584 finally:
2585 self.state_updated.release()
2588 # TODO: recategorise - called from XendCheckpoint
2591 def completeRestore(self, store_mfn, console_mfn):
2593 log.debug("XendDomainInfo.completeRestore")
2595 self.store_mfn = store_mfn
2596 self.console_mfn = console_mfn
2598 self._introduceDomain()
2599 self.image = image.create(self, self.info)
2600 if self.image:
2601 self.image.createDeviceModel(True)
2602 self._storeDomDetails()
2603 self._registerWatches()
2604 self.refreshShutdown()
2606 log.debug("XendDomainInfo.completeRestore done")
2609 def _endRestore(self):
2610 self.setResume(False)
2613 # VM Destroy
2616 def _prepare_phantom_paths(self):
2617 # get associated devices to destroy
2618 # build list of phantom devices to be removed after normal devices
2619 plist = []
2620 if self.domid is not None:
2621 t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
2622 try:
2623 for dev in t.list():
2624 backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
2625 % (self.dompath, dev))
2626 if backend_phantom_vbd is not None:
2627 frontend_phantom_vbd = xstransact.Read("%s/frontend" \
2628 % backend_phantom_vbd)
2629 plist.append(backend_phantom_vbd)
2630 plist.append(frontend_phantom_vbd)
2631 finally:
2632 t.abort()
2633 return plist
2635 def _cleanup_phantom_devs(self, plist):
2636 # remove phantom devices
2637 if not plist == []:
2638 time.sleep(2)
2639 for paths in plist:
2640 if paths.find('backend') != -1:
2641 # Modify online status /before/ updating state (latter is watched by
2642 # drivers, so this ordering avoids a race).
2643 xstransact.Write(paths, 'online', "0")
2644 xstransact.Write(paths, 'state', str(xenbusState['Closing']))
2645 # force
2646 xstransact.Remove(paths)
2648 def destroy(self):
2649 """Cleanup VM and destroy domain. Nothrow guarantee."""
2651 if self.domid is None:
2652 return
2654 from xen.xend import XendDomain
2655 log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
2657 paths = self._prepare_phantom_paths()
2659 if self.dompath is not None:
2660 try:
2661 xc.domain_destroy_hook(self.domid)
2662 xc.domain_pause(self.domid)
2663 do_FLR(self.domid)
2664 xc.domain_destroy(self.domid)
2665 for state in DOM_STATES_OLD:
2666 self.info[state] = 0
2667 self._stateSet(DOM_STATE_HALTED)
2668 except:
2669 log.exception("XendDomainInfo.destroy: domain destruction failed.")
2671 XendDomain.instance().remove_domain(self)
2672 self.cleanupDomain()
2674 self._cleanup_phantom_devs(paths)
2675 self._cleanupVm()
2677 if "transient" in self.info["other_config"] \
2678 and bool(self.info["other_config"]["transient"]):
2679 XendDomain.instance().domain_delete_by_dominfo(self)
2682 def resetDomain(self):
2683 log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
2685 old_domid = self.domid
2686 prev_vm_xend = self._listRecursiveVm('xend')
2687 new_dom_info = self.info
2688 try:
2689 self._unwatchVm()
2690 self.destroy()
2692 new_dom = None
2693 try:
2694 from xen.xend import XendDomain
2695 new_dom_info['domid'] = None
2696 new_dom = XendDomain.instance().domain_create_from_dict(
2697 new_dom_info)
2698 for x in prev_vm_xend[0][1]:
2699 new_dom._writeVm('xend/%s' % x[0], x[1])
2700 new_dom.waitForDevices()
2701 new_dom.unpause()
2702 except:
2703 if new_dom:
2704 new_dom.destroy()
2705 raise
2706 except:
2707 log.exception('Failed to reset domain %s.', str(old_domid))
2710 def resumeDomain(self):
2711 log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
2713 # resume a suspended domain (e.g. after live checkpoint, or after
2714 # a later error during save or migate); checks that the domain
2715 # is currently suspended first so safe to call from anywhere
2717 xeninfo = dom_get(self.domid)
2718 if xeninfo is None:
2719 return
2720 if not xeninfo['shutdown']:
2721 return
2722 reason = shutdown_reason(xeninfo['shutdown_reason'])
2723 if reason != 'suspend':
2724 return
2726 try:
2727 # could also fetch a parsed note from xenstore
2728 fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
2729 if not fast:
2730 self._releaseDevices()
2731 self.testDeviceComplete()
2732 self.testvifsComplete()
2733 log.debug("XendDomainInfo.resumeDomain: devices released")
2735 self._resetChannels()
2737 self._removeDom('control/shutdown')
2738 self._removeDom('device-misc/vif/nextDeviceID')
2740 self._createChannels()
2741 self._introduceDomain()
2742 self._storeDomDetails()
2744 self._createDevices()
2745 log.debug("XendDomainInfo.resumeDomain: devices created")
2747 xc.domain_resume(self.domid, fast)
2748 ResumeDomain(self.domid)
2749 except:
2750 log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
2751 self.image.resumeDeviceModel()
2752 log.debug("XendDomainInfo.resumeDomain: completed")
2756 # Channels for xenstore and console
2759 def _createChannels(self):
2760 """Create the channels to the domain.
2761 """
2762 self.store_port = self._createChannel()
2763 self.console_port = self._createChannel()
2766 def _createChannel(self):
2767 """Create an event channel to the domain.
2768 """
2769 try:
2770 if self.domid != None:
2771 return xc.evtchn_alloc_unbound(domid = self.domid,
2772 remote_dom = 0)
2773 except:
2774 log.exception("Exception in alloc_unbound(%s)", str(self.domid))
2775 raise
2777 def _resetChannels(self):
2778 """Reset all event channels in the domain.
2779 """
2780 try:
2781 if self.domid != None:
2782 return xc.evtchn_reset(dom = self.domid)
2783 except:
2784 log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
2785 raise
2789 # Bootloader configuration
2792 def _configureBootloader(self):
2793 """Run the bootloader if we're configured to do so."""
2795 blexec = self.info['PV_bootloader']
2796 bootloader_args = self.info['PV_bootloader_args']
2797 kernel = self.info['PV_kernel']
2798 ramdisk = self.info['PV_ramdisk']
2799 args = self.info['PV_args']
2800 boot = self.info['HVM_boot_policy']
2802 if boot:
2803 # HVM booting.
2804 pass
2805 elif not blexec and kernel:
2806 # Boot from dom0. Nothing left to do -- the kernel and ramdisk
2807 # will be picked up by image.py.
2808 pass
2809 else:
2810 # Boot using bootloader
2811 if not blexec or blexec == 'pygrub':
2812 blexec = osdep.pygrub_path
2814 blcfg = None
2815 disks = [x for x in self.info['vbd_refs']
2816 if self.info['devices'][x][1]['bootable']]
2818 if not disks:
2819 msg = "Had a bootloader specified, but no disks are bootable"
2820 log.error(msg)
2821 raise VmError(msg)
2823 devinfo = self.info['devices'][disks[0]]
2824 devtype = devinfo[0]
2825 disk = devinfo[1]['uname']
2827 fn = blkdev_uname_to_file(disk)
2828 taptype = blkdev_uname_to_taptype(disk)
2829 mounted = devtype == 'tap' and taptype != 'aio' and taptype != 'sync' and not os.stat(fn).st_rdev
2830 if mounted:
2831 # This is a file, not a device. pygrub can cope with a
2832 # file if it's raw, but if it's QCOW or other such formats
2833 # used through blktap, then we need to mount it first.
2835 log.info("Mounting %s on %s." %
2836 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2838 vbd = {
2839 'mode': 'RO',
2840 'device': BOOTLOADER_LOOPBACK_DEVICE,
2843 from xen.xend import XendDomain
2844 dom0 = XendDomain.instance().privilegedDomain()
2845 dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
2846 fn = BOOTLOADER_LOOPBACK_DEVICE
2848 try:
2849 blcfg = bootloader(blexec, fn, self, False,
2850 bootloader_args, kernel, ramdisk, args)
2851 finally:
2852 if mounted:
2853 log.info("Unmounting %s from %s." %
2854 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2856 dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
2858 if blcfg is None:
2859 msg = "Had a bootloader specified, but can't find disk"
2860 log.error(msg)
2861 raise VmError(msg)
2863 self.info.update_with_image_sxp(blcfg, True)
2867 # VM Functions
2870 def _readVMDetails(self, params):
2871 """Read the specified parameters from the store.
2872 """
2873 try:
2874 return self._gatherVm(*params)
2875 except ValueError:
2876 # One of the int/float entries in params has a corresponding store
2877 # entry that is invalid. We recover, because older versions of
2878 # Xend may have put the entry there (memory/target, for example),
2879 # but this is in general a bad situation to have reached.
2880 log.exception(
2881 "Store corrupted at %s! Domain %d's configuration may be "
2882 "affected.", self.vmpath, self.domid)
2883 return []
2885 def _cleanupVm(self):
2886 """Cleanup VM resources. Idempotent. Nothrow guarantee."""
2888 self._unwatchVm()
2890 try:
2891 self._removeVm()
2892 except:
2893 log.exception("Removing VM path failed.")
2896 def checkLiveMigrateMemory(self):
2897 """ Make sure there's enough memory to migrate this domain """
2898 overhead_kb = 0
2899 if arch.type == "x86":
2900 # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
2901 # the minimum that Xen would allocate if no value were given.
2902 overhead_kb = self.info['VCPUs_max'] * 1024 + \
2903 (self.info['memory_static_max'] / 1024 / 1024) * 4
2904 overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
2905 # The domain might already have some shadow memory
2906 overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
2907 if overhead_kb > 0:
2908 balloon.free(overhead_kb, self)
2910 def _unwatchVm(self):
2911 """Remove the watch on the VM path, if any. Idempotent. Nothrow
2912 guarantee."""
2913 try:
2914 try:
2915 if self.vmWatch:
2916 self.vmWatch.unwatch()
2917 finally:
2918 self.vmWatch = None
2919 except:
2920 log.exception("Unwatching VM path failed.")
2922 def testDeviceComplete(self):
2923 """ For Block IO migration safety we must ensure that
2924 the device has shutdown correctly, i.e. all blocks are
2925 flushed to disk
2926 """
2927 start = time.time()
2928 while True:
2929 test = 0
2930 diff = time.time() - start
2931 vbds = self.getDeviceController('vbd').deviceIDs()
2932 taps = self.getDeviceController('tap').deviceIDs()
2933 for i in vbds + taps:
2934 test = 1
2935 log.info("Dev %s still active, looping...", i)
2936 time.sleep(0.1)
2938 if test == 0:
2939 break
2940 if diff >= MIGRATE_TIMEOUT:
2941 log.info("Dev still active but hit max loop timeout")
2942 break
2944 def testvifsComplete(self):
2945 """ In case vifs are released and then created for the same
2946 domain, we need to wait the device shut down.
2947 """
2948 start = time.time()
2949 while True:
2950 test = 0
2951 diff = time.time() - start
2952 for i in self.getDeviceController('vif').deviceIDs():
2953 test = 1
2954 log.info("Dev %s still active, looping...", i)
2955 time.sleep(0.1)
2957 if test == 0:
2958 break
2959 if diff >= MIGRATE_TIMEOUT:
2960 log.info("Dev still active but hit max loop timeout")
2961 break
2963 def _storeVmDetails(self):
2964 to_store = {}
2966 for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
2967 info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
2968 if self._infoIsSet(info_key):
2969 to_store[key] = str(self.info[info_key])
2971 if self._infoIsSet("static_memory_min"):
2972 to_store["memory"] = str(self.info["static_memory_min"])
2973 if self._infoIsSet("static_memory_max"):
2974 to_store["maxmem"] = str(self.info["static_memory_max"])
2976 image_sxpr = self.info.image_sxpr()
2977 if image_sxpr:
2978 to_store['image'] = sxp.to_string(image_sxpr)
2980 if not self._readVm('xend/restart_count'):
2981 to_store['xend/restart_count'] = str(0)
2983 log.debug("Storing VM details: %s", scrub_password(to_store))
2985 self._writeVm(to_store)
2986 self._setVmPermissions()
2988 def _setVmPermissions(self):
2989 """Allow the guest domain to read its UUID. We don't allow it to
2990 access any other entry, for security."""
2991 xstransact.SetPermissions('%s/uuid' % self.vmpath,
2992 { 'dom' : self.domid,
2993 'read' : True,
2994 'write' : False })
2997 # Utility functions
3000 def __getattr__(self, name):
3001 if name == "state":
3002 log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
3003 log.warn("".join(traceback.format_stack()))
3004 return self._stateGet()
3005 else:
3006 raise AttributeError(name)
3008 def __setattr__(self, name, value):
3009 if name == "state":
3010 log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
3011 log.warn("".join(traceback.format_stack()))
3012 self._stateSet(value)
3013 else:
3014 self.__dict__[name] = value
3016 def _stateSet(self, state):
3017 self.state_updated.acquire()
3018 try:
3019 # TODO Not sure this is correct...
3020 # _stateGet is live now. Why not fire event
3021 # even when it hasn't changed?
3022 if self._stateGet() != state:
3023 self.state_updated.notifyAll()
3024 import XendAPI
3025 XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
3026 'power_state')
3027 finally:
3028 self.state_updated.release()
3030 def _stateGet(self):
3031 # Lets try and reconsitute the state from xc
3032 # first lets try and get the domain info
3033 # from xc - this will tell us if the domain
3034 # exists
3035 info = dom_get(self.getDomid())
3036 if info is None or info['shutdown']:
3037 # We are either HALTED or SUSPENDED
3038 # check saved image exists
3039 from xen.xend import XendDomain
3040 managed_config_path = \
3041 XendDomain.instance()._managed_check_point_path( \
3042 self.get_uuid())
3043 if os.path.exists(managed_config_path):
3044 return XEN_API_VM_POWER_STATE_SUSPENDED
3045 else:
3046 return XEN_API_VM_POWER_STATE_HALTED
3047 elif info['crashed']:
3048 # Crashed
3049 return XEN_API_VM_POWER_STATE_CRASHED
3050 else:
3051 # We are either RUNNING or PAUSED
3052 if info['paused']:
3053 return XEN_API_VM_POWER_STATE_PAUSED
3054 else:
3055 return XEN_API_VM_POWER_STATE_RUNNING
3057 def _infoIsSet(self, name):
3058 return name in self.info and self.info[name] is not None
3060 def _checkName(self, name):
3061 """Check if a vm name is valid. Valid names contain alphabetic
3062 characters, digits, or characters in '_-.:/+'.
3063 The same name cannot be used for more than one vm at the same time.
3065 @param name: name
3066 @raise: VmError if invalid
3067 """
3068 from xen.xend import XendDomain
3070 if name is None or name == '':
3071 raise VmError('Missing VM Name')
3073 if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name):
3074 raise VmError('Invalid VM Name')
3076 dom = XendDomain.instance().domain_lookup_nr(name)
3077 if dom and dom.info['uuid'] != self.info['uuid']:
3078 raise VmError("VM name '%s' already exists%s" %
3079 (name,
3080 dom.domid is not None and
3081 (" as domain %s" % str(dom.domid)) or ""))
3084 def update(self, info = None, refresh = True, transaction = None):
3085 """Update with info from xc.domain_getinfo().
3086 """
3087 log.trace("XendDomainInfo.update(%s) on domain %s", info,
3088 str(self.domid))
3090 if not info:
3091 info = dom_get(self.domid)
3092 if not info:
3093 return
3095 if info["maxmem_kb"] < 0:
3096 info["maxmem_kb"] = XendNode.instance() \
3097 .physinfo_dict()['total_memory'] * 1024
3099 # make sure state is reset for info
3100 # TODO: we should eventually get rid of old_dom_states
3102 self.info.update_config(info)
3103 self._update_consoles(transaction)
3105 if refresh:
3106 self.refreshShutdown(info)
3108 log.trace("XendDomainInfo.update done on domain %s: %s",
3109 str(self.domid), self.info)
3111 def sxpr(self, ignore_store = False, legacy_only = True):
3112 result = self.info.to_sxp(domain = self,
3113 ignore_devices = ignore_store,
3114 legacy_only = legacy_only)
3116 return result
3118 # Xen API
3119 # ----------------------------------------------------------------
3121 def get_uuid(self):
3122 dom_uuid = self.info.get('uuid')
3123 if not dom_uuid: # if it doesn't exist, make one up
3124 dom_uuid = uuid.createString()
3125 self.info['uuid'] = dom_uuid
3126 return dom_uuid
3128 def get_memory_static_max(self):
3129 return self.info.get('memory_static_max', 0)
3130 def get_memory_static_min(self):
3131 return self.info.get('memory_static_min', 0)
3132 def get_memory_dynamic_max(self):
3133 return self.info.get('memory_dynamic_max', 0)
3134 def get_memory_dynamic_min(self):
3135 return self.info.get('memory_dynamic_min', 0)
3137 # only update memory-related config values if they maintain sanity
3138 def _safe_set_memory(self, key, newval):
3139 oldval = self.info.get(key, 0)
3140 try:
3141 self.info[key] = newval
3142 self.info._memory_sanity_check()
3143 except Exception, ex:
3144 self.info[key] = oldval
3145 raise
3147 def set_memory_static_max(self, val):
3148 self._safe_set_memory('memory_static_max', val)
3149 def set_memory_static_min(self, val):
3150 self._safe_set_memory('memory_static_min', val)
3151 def set_memory_dynamic_max(self, val):
3152 self._safe_set_memory('memory_dynamic_max', val)
3153 def set_memory_dynamic_min(self, val):
3154 self._safe_set_memory('memory_dynamic_min', val)
3156 def get_vcpus_params(self):
3157 if self.getDomid() is None:
3158 return self.info['vcpus_params']
3160 retval = xc.sched_credit_domain_get(self.getDomid())
3161 return retval
3162 def get_power_state(self):
3163 return XEN_API_VM_POWER_STATE[self._stateGet()]
3164 def get_platform(self):
3165 return self.info.get('platform', {})
3166 def get_pci_bus(self):
3167 return self.info.get('pci_bus', '')
3168 def get_tools_version(self):
3169 return self.info.get('tools_version', {})
3170 def get_metrics(self):
3171 return self.metrics.get_uuid();
3174 def get_security_label(self, xspol=None):
3175 import xen.util.xsm.xsm as security
3176 label = security.get_security_label(self, xspol)
3177 return label
3179 def set_security_label(self, seclab, old_seclab, xspol=None,
3180 xspol_old=None):
3181 """
3182 Set the security label of a domain from its old to
3183 a new value.
3184 @param seclab New security label formatted in the form
3185 <policy type>:<policy name>:<vm label>
3186 @param old_seclab The current security label that the
3187 VM must have.
3188 @param xspol An optional policy under which this
3189 update should be done. If not given,
3190 then the current active policy is used.
3191 @param xspol_old The old policy; only to be passed during
3192 the updating of a policy
3193 @return Returns return code, a string with errors from
3194 the hypervisor's operation, old label of the
3195 domain
3196 """
3197 rc = 0
3198 errors = ""
3199 old_label = ""
3200 new_ssidref = 0
3201 domid = self.getDomid()
3202 res_labels = None
3203 is_policy_update = (xspol_old != None)
3205 from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
3207 state = self._stateGet()
3208 # Relabel only HALTED or RUNNING or PAUSED domains
3209 if domid != 0 and \
3210 state not in \
3211 [ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
3212 DOM_STATE_SUSPENDED ]:
3213 log.warn("Relabeling domain not possible in state '%s'" %
3214 DOM_STATES[state])
3215 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3217 # Remove security label. Works only for halted or suspended domains
3218 if not seclab or seclab == "":
3219 if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
3220 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3222 if self.info.has_key('security_label'):
3223 old_label = self.info['security_label']
3224 # Check label against expected one.
3225 if old_label != old_seclab:
3226 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3227 del self.info['security_label']
3228 xen.xend.XendDomain.instance().managed_config_save(self)
3229 return (xsconstants.XSERR_SUCCESS, "", "", 0)
3231 tmp = seclab.split(":")
3232 if len(tmp) != 3:
3233 return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
3234 typ, policy, label = tmp
3236 poladmin = XSPolicyAdminInstance()
3237 if not xspol:
3238 xspol = poladmin.get_policy_by_name(policy)
3240 try:
3241 xen.xend.XendDomain.instance().policy_lock.acquire_writer()
3243 if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
3244 #if domain is running or paused try to relabel in hypervisor
3245 if not xspol:
3246 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3248 if typ != xspol.get_type_name() or \
3249 policy != xspol.get_name():
3250 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3252 if typ == xsconstants.ACM_POLICY_ID:
3253 new_ssidref = xspol.vmlabel_to_ssidref(label)
3254 if new_ssidref == xsconstants.INVALID_SSIDREF:
3255 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3257 # Check that all used resources are accessible under the
3258 # new label
3259 if not is_policy_update and \
3260 not security.resources_compatible_with_vmlabel(xspol,
3261 self, label):
3262 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3264 #Check label against expected one. Can only do this
3265 # if the policy hasn't changed underneath in the meantime
3266 if xspol_old == None:
3267 old_label = self.get_security_label()
3268 if old_label != old_seclab:
3269 log.info("old_label != old_seclab: %s != %s" %
3270 (old_label, old_seclab))
3271 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3273 # relabel domain in the hypervisor
3274 rc, errors = security.relabel_domains([[domid, new_ssidref]])
3275 log.info("rc from relabeling in HV: %d" % rc)
3276 else:
3277 return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
3279 if rc == 0:
3280 # HALTED, RUNNING or PAUSED
3281 if domid == 0:
3282 if xspol:
3283 self.info['security_label'] = seclab
3284 ssidref = poladmin.set_domain0_bootlabel(xspol, label)
3285 else:
3286 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3287 else:
3288 if self.info.has_key('security_label'):
3289 old_label = self.info['security_label']
3290 # Check label against expected one, unless wildcard
3291 if old_label != old_seclab:
3292 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3294 self.info['security_label'] = seclab
3296 try:
3297 xen.xend.XendDomain.instance().managed_config_save(self)
3298 except:
3299 pass
3300 return (rc, errors, old_label, new_ssidref)
3301 finally:
3302 xen.xend.XendDomain.instance().policy_lock.release()
3304 def get_on_shutdown(self):
3305 after_shutdown = self.info.get('actions_after_shutdown')
3306 if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
3307 return XEN_API_ON_NORMAL_EXIT[-1]
3308 return after_shutdown
3310 def get_on_reboot(self):
3311 after_reboot = self.info.get('actions_after_reboot')
3312 if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
3313 return XEN_API_ON_NORMAL_EXIT[-1]
3314 return after_reboot
3316 def get_on_suspend(self):
3317 # TODO: not supported
3318 after_suspend = self.info.get('actions_after_suspend')
3319 if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
3320 return XEN_API_ON_NORMAL_EXIT[-1]
3321 return after_suspend
3323 def get_on_crash(self):
3324 after_crash = self.info.get('actions_after_crash')
3325 if not after_crash or after_crash not in \
3326 XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
3327 return XEN_API_ON_CRASH_BEHAVIOUR[0]
3328 return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
3330 def get_dev_config_by_uuid(self, dev_class, dev_uuid):
3331 """ Get's a device configuration either from XendConfig or
3332 from the DevController.
3334 @param dev_class: device class, either, 'vbd' or 'vif'
3335 @param dev_uuid: device UUID
3337 @rtype: dictionary
3338 """
3339 dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
3341 # shortcut if the domain isn't started because
3342 # the devcontrollers will have no better information
3343 # than XendConfig.
3344 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
3345 XEN_API_VM_POWER_STATE_SUSPENDED):
3346 if dev_config:
3347 return copy.deepcopy(dev_config)
3348 return None
3350 # instead of using dev_class, we use the dev_type
3351 # that is from XendConfig.
3352 controller = self.getDeviceController(dev_type)
3353 if not controller:
3354 return None
3356 all_configs = controller.getAllDeviceConfigurations()
3357 if not all_configs:
3358 return None
3360 updated_dev_config = copy.deepcopy(dev_config)
3361 for _devid, _devcfg in all_configs.items():
3362 if _devcfg.get('uuid') == dev_uuid:
3363 updated_dev_config.update(_devcfg)
3364 updated_dev_config['id'] = _devid
3365 return updated_dev_config
3367 return updated_dev_config
3369 def get_dev_xenapi_config(self, dev_class, dev_uuid):
3370 config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
3371 if not config:
3372 return {}
3374 config['VM'] = self.get_uuid()
3376 if dev_class == 'vif':
3377 if not config.has_key('name'):
3378 config['name'] = config.get('vifname', '')
3379 if not config.has_key('MAC'):
3380 config['MAC'] = config.get('mac', '')
3381 if not config.has_key('type'):
3382 config['type'] = 'paravirtualised'
3383 if not config.has_key('device'):
3384 devid = config.get('id')
3385 if devid != None:
3386 config['device'] = 'eth%s' % devid
3387 else:
3388 config['device'] = ''
3390 if not config.has_key('network'):
3391 try:
3392 bridge = config.get('bridge', None)
3393 if bridge is None:
3394 from xen.util import Brctl
3395 if_to_br = dict([(i,b)
3396 for (b,ifs) in Brctl.get_state().items()
3397 for i in ifs])
3398 vifname = "vif%s.%s" % (self.getDomid(),
3399 config.get('id'))
3400 bridge = if_to_br.get(vifname, None)
3401 config['network'] = \
3402 XendNode.instance().bridge_to_network(
3403 config.get('bridge')).get_uuid()
3404 except Exception:
3405 log.exception('bridge_to_network')
3406 # Ignore this for now -- it may happen if the device
3407 # has been specified using the legacy methods, but at
3408 # some point we're going to have to figure out how to
3409 # handle that properly.
3411 config['MTU'] = 1500 # TODO
3413 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3414 xennode = XendNode.instance()
3415 rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
3416 config['io_read_kbs'] = rx_bps/1024
3417 config['io_write_kbs'] = tx_bps/1024
3418 rx, tx = xennode.get_vif_stat(self.domid, devid)
3419 config['io_total_read_kbs'] = rx/1024
3420 config['io_total_write_kbs'] = tx/1024
3421 else:
3422 config['io_read_kbs'] = 0.0
3423 config['io_write_kbs'] = 0.0
3424 config['io_total_read_kbs'] = 0.0
3425 config['io_total_write_kbs'] = 0.0
3427 config['security_label'] = config.get('security_label', '')
3429 if dev_class == 'vbd':
3431 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3432 controller = self.getDeviceController(dev_class)
3433 devid, _1, _2 = controller.getDeviceDetails(config)
3434 xennode = XendNode.instance()
3435 rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
3436 config['io_read_kbs'] = rd_blkps
3437 config['io_write_kbs'] = wr_blkps
3438 else:
3439 config['io_read_kbs'] = 0.0
3440 config['io_write_kbs'] = 0.0
3442 config['VDI'] = config.get('VDI', '')
3443 config['device'] = config.get('dev', '')
3444 if ':' in config['device']:
3445 vbd_name, vbd_type = config['device'].split(':', 1)
3446 config['device'] = vbd_name
3447 if vbd_type == 'cdrom':
3448 config['type'] = XEN_API_VBD_TYPE[0]
3449 else:
3450 config['type'] = XEN_API_VBD_TYPE[1]
3452 config['driver'] = 'paravirtualised' # TODO
3453 config['image'] = config.get('uname', '')
3455 if config.get('mode', 'r') == 'r':
3456 config['mode'] = 'RO'
3457 else:
3458 config['mode'] = 'RW'
3460 if dev_class == 'vtpm':
3461 if not config.has_key('type'):
3462 config['type'] = 'paravirtualised' # TODO
3463 if not config.has_key('backend'):
3464 config['backend'] = "00000000-0000-0000-0000-000000000000"
3466 return config
3468 def get_dev_property(self, dev_class, dev_uuid, field):
3469 config = self.get_dev_xenapi_config(dev_class, dev_uuid)
3470 try:
3471 return config[field]
3472 except KeyError:
3473 raise XendError('Invalid property for device: %s' % field)
3475 def set_dev_property(self, dev_class, dev_uuid, field, value):
3476 self.info['devices'][dev_uuid][1][field] = value
3478 def get_vcpus_util(self):
3479 vcpu_util = {}
3480 xennode = XendNode.instance()
3481 if 'VCPUs_max' in self.info and self.domid != None:
3482 for i in range(0, self.info['VCPUs_max']):
3483 util = xennode.get_vcpu_util(self.domid, i)
3484 vcpu_util[str(i)] = util
3486 return vcpu_util
3488 def get_consoles(self):
3489 return self.info.get('console_refs', [])
3491 def get_vifs(self):
3492 return self.info.get('vif_refs', [])
3494 def get_vbds(self):
3495 return self.info.get('vbd_refs', [])
3497 def get_vtpms(self):
3498 return self.info.get('vtpm_refs', [])
3500 def get_dpcis(self):
3501 return XendDPCI.get_by_VM(self.info.get('uuid'))
3503 def get_dscsis(self):
3504 return XendDSCSI.get_by_VM(self.info.get('uuid'))
3506 def create_vbd(self, xenapi_vbd, vdi_image_path):
3507 """Create a VBD using a VDI from XendStorageRepository.
3509 @param xenapi_vbd: vbd struct from the Xen API
3510 @param vdi_image_path: VDI UUID
3511 @rtype: string
3512 @return: uuid of the device
3513 """
3514 xenapi_vbd['image'] = vdi_image_path
3515 if vdi_image_path.startswith('tap'):
3516 dev_uuid = self.info.device_add('tap', cfg_xenapi = xenapi_vbd)
3517 else:
3518 dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
3520 if not dev_uuid:
3521 raise XendError('Failed to create device')
3523 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3524 XEN_API_VM_POWER_STATE_PAUSED):
3525 _, config = self.info['devices'][dev_uuid]
3527 if vdi_image_path.startswith('tap'):
3528 dev_control = self.getDeviceController('tap')
3529 else:
3530 dev_control = self.getDeviceController('vbd')
3532 try:
3533 devid = dev_control.createDevice(config)
3534 dev_control.waitForDevice(devid)
3535 self.info.device_update(dev_uuid,
3536 cfg_xenapi = {'devid': devid})
3537 except Exception, exn:
3538 log.exception(exn)
3539 del self.info['devices'][dev_uuid]
3540 self.info['vbd_refs'].remove(dev_uuid)
3541 raise
3543 return dev_uuid
3545 def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
3546 """Create a VBD using a VDI from XendStorageRepository.
3548 @param xenapi_vbd: vbd struct from the Xen API
3549 @param vdi_image_path: VDI UUID
3550 @rtype: string
3551 @return: uuid of the device
3552 """
3553 xenapi_vbd['image'] = vdi_image_path
3554 dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
3555 if not dev_uuid:
3556 raise XendError('Failed to create device')
3558 if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
3559 _, config = self.info['devices'][dev_uuid]
3560 config['devid'] = self.getDeviceController('tap').createDevice(config)
3562 return config['devid']
3564 def create_vif(self, xenapi_vif):
3565 """Create VIF device from the passed struct in Xen API format.
3567 @param xenapi_vif: Xen API VIF Struct.
3568 @rtype: string
3569 @return: UUID
3570 """
3571 dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
3572 if not dev_uuid:
3573 raise XendError('Failed to create device')
3575 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3576 XEN_API_VM_POWER_STATE_PAUSED):
3578 _, config = self.info['devices'][dev_uuid]
3579 dev_control = self.getDeviceController('vif')
3581 try:
3582 devid = dev_control.createDevice(config)
3583 dev_control.waitForDevice(devid)
3584 self.info.device_update(dev_uuid,
3585 cfg_xenapi = {'devid': devid})
3586 except Exception, exn:
3587 log.exception(exn)
3588 del self.info['devices'][dev_uuid]
3589 self.info['vif_refs'].remove(dev_uuid)
3590 raise
3592 return dev_uuid
3594 def create_vtpm(self, xenapi_vtpm):
3595 """Create a VTPM device from the passed struct in Xen API format.
3597 @return: uuid of the device
3598 @rtype: string
3599 """
3601 if self._stateGet() not in (DOM_STATE_HALTED,):
3602 raise VmError("Can only add vTPM to a halted domain.")
3603 if self.get_vtpms() != []:
3604 raise VmError('Domain already has a vTPM.')
3605 dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
3606 if not dev_uuid:
3607 raise XendError('Failed to create device')
3609 return dev_uuid
3611 def create_console(self, xenapi_console):
3612 """ Create a console device from a Xen API struct.
3614 @return: uuid of device
3615 @rtype: string
3616 """
3617 if self._stateGet() not in (DOM_STATE_HALTED,):
3618 raise VmError("Can only add console to a halted domain.")
3620 dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
3621 if not dev_uuid:
3622 raise XendError('Failed to create device')
3624 return dev_uuid
3626 def set_console_other_config(self, console_uuid, other_config):
3627 self.info.console_update(console_uuid, 'other_config', other_config)
3629 def create_dpci(self, xenapi_pci):
3630 """Create pci device from the passed struct in Xen API format.
3632 @param xenapi_pci: DPCI struct from Xen API
3633 @rtype: bool
3634 #@rtype: string
3635 @return: True if successfully created device
3636 #@return: UUID
3637 """
3639 dpci_uuid = uuid.createString()
3641 dpci_opts = []
3642 opts_dict = xenapi_pci.get('options')
3643 for k in opts_dict.keys():
3644 dpci_opts.append([k, opts_dict[k]])
3646 # Convert xenapi to sxp
3647 ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
3649 target_pci_sxp = \
3650 ['pci',
3651 ['dev',
3652 ['domain', '0x%02x' % ppci.get_domain()],
3653 ['bus', '0x%02x' % ppci.get_bus()],
3654 ['slot', '0x%02x' % ppci.get_slot()],
3655 ['func', '0x%1x' % ppci.get_func()],
3656 ['vslt', '0x%02x' % xenapi_pci.get('hotplug_slot')],
3657 ['opts', dpci_opts],
3658 ['uuid', dpci_uuid]
3659 ],
3660 ['state', 'Initialising']
3663 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3665 old_pci_sxp = self._getDeviceInfo_pci(0)
3667 if old_pci_sxp is None:
3668 dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
3669 if not dev_uuid:
3670 raise XendError('Failed to create device')
3672 else:
3673 new_pci_sxp = ['pci']
3674 for existing_dev in sxp.children(old_pci_sxp, 'dev'):
3675 new_pci_sxp.append(existing_dev)
3676 new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
3678 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3679 self.info.device_update(dev_uuid, new_pci_sxp)
3681 xen.xend.XendDomain.instance().managed_config_save(self)
3683 else:
3684 try:
3685 self.device_configure(target_pci_sxp)
3687 except Exception, exn:
3688 raise XendError('Failed to create device')
3690 return dpci_uuid
3692 def create_dscsi(self, xenapi_dscsi):
3693 """Create scsi device from the passed struct in Xen API format.
3695 @param xenapi_dscsi: DSCSI struct from Xen API
3696 @rtype: string
3697 @return: UUID
3698 """
3700 dscsi_uuid = uuid.createString()
3702 # Convert xenapi to sxp
3703 pscsi = XendAPIStore.get(xenapi_dscsi.get('PSCSI'), 'PSCSI')
3704 devid = int(xenapi_dscsi.get('virtual_HCTL').split(':')[0])
3705 target_vscsi_sxp = \
3706 ['vscsi',
3707 ['dev',
3708 ['devid', devid],
3709 ['p-devname', pscsi.get_dev_name()],
3710 ['p-dev', pscsi.get_physical_HCTL()],
3711 ['v-dev', xenapi_dscsi.get('virtual_HCTL')],
3712 ['state', xenbusState['Initialising']],
3713 ['uuid', dscsi_uuid]
3714 ],
3715 ['feature-host', 0]
3718 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3720 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
3722 if cur_vscsi_sxp is None:
3723 dev_uuid = self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp)
3724 if not dev_uuid:
3725 raise XendError('Failed to create device')
3727 else:
3728 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
3729 for existing_dev in sxp.children(cur_vscsi_sxp, 'dev'):
3730 new_vscsi_sxp.append(existing_dev)
3731 new_vscsi_sxp.append(sxp.child0(target_vscsi_sxp, 'dev'))
3733 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3734 self.info.device_update(dev_uuid, new_vscsi_sxp)
3736 xen.xend.XendDomain.instance().managed_config_save(self)
3738 else:
3739 try:
3740 self.device_configure(target_vscsi_sxp)
3742 except Exception, exn:
3743 raise XendError('Failed to create device')
3745 return dscsi_uuid
3748 def destroy_device_by_uuid(self, dev_type, dev_uuid):
3749 if dev_uuid not in self.info['devices']:
3750 raise XendError('Device does not exist')
3752 try:
3753 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3754 XEN_API_VM_POWER_STATE_PAUSED):
3755 _, config = self.info['devices'][dev_uuid]
3756 devid = config.get('devid')
3757 if devid != None:
3758 self.getDeviceController(dev_type).destroyDevice(devid, force = False)
3759 else:
3760 raise XendError('Unable to get devid for device: %s:%s' %
3761 (dev_type, dev_uuid))
3762 finally:
3763 del self.info['devices'][dev_uuid]
3764 self.info['%s_refs' % dev_type].remove(dev_uuid)
3766 def destroy_vbd(self, dev_uuid):
3767 self.destroy_device_by_uuid('vbd', dev_uuid)
3769 def destroy_vif(self, dev_uuid):
3770 self.destroy_device_by_uuid('vif', dev_uuid)
3772 def destroy_vtpm(self, dev_uuid):
3773 self.destroy_device_by_uuid('vtpm', dev_uuid)
3775 def destroy_dpci(self, dev_uuid):
3777 dpci = XendAPIStore.get(dev_uuid, 'DPCI')
3778 ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
3780 old_pci_sxp = self._getDeviceInfo_pci(0)
3781 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3782 target_dev = None
3783 new_pci_sxp = ['pci']
3784 for dev in sxp.children(old_pci_sxp, 'dev'):
3785 domain = int(sxp.child_value(dev, 'domain'), 16)
3786 bus = int(sxp.child_value(dev, 'bus'), 16)
3787 slot = int(sxp.child_value(dev, 'slot'), 16)
3788 func = int(sxp.child_value(dev, 'func'), 16)
3789 name = "%04x:%02x:%02x.%01x" % (domain, bus, slot, func)
3790 if ppci.get_name() == name:
3791 target_dev = dev
3792 else:
3793 new_pci_sxp.append(dev)
3795 if target_dev is None:
3796 raise XendError('Failed to destroy device')
3798 target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
3800 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3802 self.info.device_update(dev_uuid, new_pci_sxp)
3803 if len(sxp.children(new_pci_sxp, 'dev')) == 0:
3804 del self.info['devices'][dev_uuid]
3805 xen.xend.XendDomain.instance().managed_config_save(self)
3807 else:
3808 try:
3809 self.device_configure(target_pci_sxp)
3811 except Exception, exn:
3812 raise XendError('Failed to destroy device')
3814 def destroy_dscsi(self, dev_uuid):
3815 dscsi = XendAPIStore.get(dev_uuid, 'DSCSI')
3816 devid = dscsi.get_virtual_host()
3817 vHCTL = dscsi.get_virtual_HCTL()
3818 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid)
3819 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3821 target_dev = None
3822 new_vscsi_sxp = ['vscsi', ['feature-host', 0]]
3823 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
3824 if vHCTL == sxp.child_value(dev, 'v-dev'):
3825 target_dev = dev
3826 else:
3827 new_vscsi_sxp.append(dev)
3829 if target_dev is None:
3830 raise XendError('Failed to destroy device')
3832 target_dev.append(['state', xenbusState['Closing']])
3833 target_vscsi_sxp = ['vscsi', target_dev, ['feature-host', 0]]
3835 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3837 self.info.device_update(dev_uuid, new_vscsi_sxp)
3838 if len(sxp.children(new_vscsi_sxp, 'dev')) == 0:
3839 del self.info['devices'][dev_uuid]
3840 xen.xend.XendDomain.instance().managed_config_save(self)
3842 else:
3843 try:
3844 self.device_configure(target_vscsi_sxp)
3846 except Exception, exn:
3847 raise XendError('Failed to destroy device')
3849 def destroy_xapi_instances(self):
3850 """Destroy Xen-API instances stored in XendAPIStore.
3851 """
3852 # Xen-API classes based on XendBase have their instances stored
3853 # in XendAPIStore. Cleanup these instances here, if they are supposed
3854 # to be destroyed when the parent domain is dead.
3856 # Most of the virtual devices (vif, vbd, vfb, etc) are not based on
3857 # XendBase and there's no need to remove them from XendAPIStore.
3859 from xen.xend import XendDomain
3860 if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
3861 # domain still exists.
3862 return
3864 # Destroy the VMMetrics instance.
3865 if XendAPIStore.get(self.metrics.get_uuid(), self.metrics.getClass()) \
3866 is not None:
3867 self.metrics.destroy()
3869 # Destroy DPCI instances.
3870 for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
3871 XendAPIStore.deregister(dpci_uuid, "DPCI")
3873 # Destroy DSCSI instances.
3874 for dscsi_uuid in XendDSCSI.get_by_VM(self.info.get('uuid')):
3875 XendAPIStore.deregister(dscsi_uuid, "DSCSI")
3877 def has_device(self, dev_class, dev_uuid):
3878 return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
3880 def __str__(self):
3881 return '<domain id=%s name=%s memory=%s state=%s>' % \
3882 (str(self.domid), self.info['name_label'],
3883 str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])
3885 __repr__ = __str__