ia64/xen-unstable

view tools/python/xen/xend/XendDomainInfo.py @ 18783:76e90ac5067e

xend: Restore CPU affinity on domain resume.

Move affinity-setting logic into its own function and call from
relevant places.

From: Jiri Denemark <jdenemar@redhat.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Nov 11 11:06:27 2008 +0000 (2008-11-11)
parents ef202be3cf54
children 857bda0c15b3
line source
1 #===========================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
19 """Representation of a single domain.
20 Includes support for domain construction, using
21 open-ended configurations.
23 Author: Mike Wray <mike.wray@hp.com>
25 """
27 import logging
28 import time
29 import threading
30 import re
31 import copy
32 import os
33 import traceback
34 from types import StringTypes
36 import xen.lowlevel.xc
37 from xen.util import asserts
38 from xen.util.blkif import blkdev_uname_to_file, blkdev_uname_to_taptype
39 import xen.util.xsm.xsm as security
40 from xen.util import xsconstants
42 from xen.xend import balloon, sxp, uuid, image, arch, osdep
43 from xen.xend import XendOptions, XendNode, XendConfig
45 from xen.xend.XendConfig import scrub_password
46 from xen.xend.XendBootloader import bootloader, bootloader_tidy
47 from xen.xend.XendError import XendError, VmError
48 from xen.xend.XendDevices import XendDevices
49 from xen.xend.XendTask import XendTask
50 from xen.xend.xenstore.xstransact import xstransact, complete
51 from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
52 from xen.xend.xenstore.xswatch import xswatch
53 from xen.xend.XendConstants import *
54 from xen.xend.XendAPIConstants import *
55 from xen.xend.server.DevConstants import xenbusState
57 from xen.xend.XendVMMetrics import XendVMMetrics
59 from xen.xend import XendAPIStore
60 from xen.xend.XendPPCI import XendPPCI
61 from xen.xend.XendDPCI import XendDPCI
62 from xen.xend.XendPSCSI import XendPSCSI
63 from xen.xend.XendDSCSI import XendDSCSI
65 MIGRATE_TIMEOUT = 30.0
66 BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
68 xc = xen.lowlevel.xc.xc()
69 xoptions = XendOptions.instance()
71 log = logging.getLogger("xend.XendDomainInfo")
72 #log.setLevel(logging.TRACE)
75 def create(config):
76 """Creates and start a VM using the supplied configuration.
78 @param config: A configuration object involving lists of tuples.
79 @type config: list of lists, eg ['vm', ['image', 'xen.gz']]
81 @rtype: XendDomainInfo
82 @return: An up and running XendDomainInfo instance
83 @raise VmError: Invalid configuration or failure to start.
84 """
85 from xen.xend import XendDomain
86 domconfig = XendConfig.XendConfig(sxp_obj = config)
87 othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
88 if othervm is None or othervm.domid is None:
89 othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
90 if othervm is not None and othervm.domid is not None:
91 raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
92 log.debug("XendDomainInfo.create(%s)", scrub_password(config))
93 vm = XendDomainInfo(domconfig)
94 try:
95 vm.start()
96 except:
97 log.exception('Domain construction failed')
98 vm.destroy()
99 raise
101 return vm
103 def create_from_dict(config_dict):
104 """Creates and start a VM using the supplied configuration.
106 @param config_dict: An configuration dictionary.
108 @rtype: XendDomainInfo
109 @return: An up and running XendDomainInfo instance
110 @raise VmError: Invalid configuration or failure to start.
111 """
113 log.debug("XendDomainInfo.create_from_dict(%s)",
114 scrub_password(config_dict))
115 vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
116 try:
117 vm.start()
118 except:
119 log.exception('Domain construction failed')
120 vm.destroy()
121 raise
122 return vm
124 def recreate(info, priv):
125 """Create the VM object for an existing domain. The domain must not
126 be dying, as the paths in the store should already have been removed,
127 and asking us to recreate them causes problems.
129 @param xeninfo: Parsed configuration
130 @type xeninfo: Dictionary
131 @param priv: Is a privileged domain (Dom 0)
132 @type priv: bool
134 @rtype: XendDomainInfo
135 @return: A up and running XendDomainInfo instance
136 @raise VmError: Invalid configuration.
137 @raise XendError: Errors with configuration.
138 """
140 log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
142 assert not info['dying']
144 xeninfo = XendConfig.XendConfig(dominfo = info)
145 xeninfo['is_control_domain'] = priv
146 xeninfo['is_a_template'] = False
147 domid = xeninfo['domid']
148 uuid1 = uuid.fromString(xeninfo['uuid'])
149 needs_reinitialising = False
151 dompath = GetDomainPath(domid)
152 if not dompath:
153 raise XendError('No domain path in store for existing '
154 'domain %d' % domid)
156 log.info("Recreating domain %d, UUID %s. at %s" %
157 (domid, xeninfo['uuid'], dompath))
159 # need to verify the path and uuid if not Domain-0
160 # if the required uuid and vm aren't set, then that means
161 # we need to recreate the dom with our own values
162 #
163 # NOTE: this is probably not desirable, really we should just
164 # abort or ignore, but there may be cases where xenstore's
165 # entry disappears (eg. xenstore-rm /)
166 #
167 try:
168 vmpath = xstransact.Read(dompath, "vm")
169 if not vmpath:
170 if not priv:
171 log.warn('/local/domain/%d/vm is missing. recreate is '
172 'confused, trying our best to recover' % domid)
173 needs_reinitialising = True
174 raise XendError('reinit')
176 uuid2_str = xstransact.Read(vmpath, "uuid")
177 if not uuid2_str:
178 log.warn('%s/uuid/ is missing. recreate is confused, '
179 'trying our best to recover' % vmpath)
180 needs_reinitialising = True
181 raise XendError('reinit')
183 uuid2 = uuid.fromString(uuid2_str)
184 if uuid1 != uuid2:
185 log.warn('UUID in /vm does not match the UUID in /dom/%d.'
186 'Trying out best to recover' % domid)
187 needs_reinitialising = True
188 except XendError:
189 pass # our best shot at 'goto' in python :)
191 vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
192 vmpath = vmpath)
194 if needs_reinitialising:
195 vm._recreateDom()
196 vm._removeVm()
197 vm._storeVmDetails()
198 vm._storeDomDetails()
200 vm.image = image.create(vm, vm.info)
201 vm.image.recreate()
203 vm._registerWatches()
204 vm.refreshShutdown(xeninfo)
206 # register the domain in the list
207 from xen.xend import XendDomain
208 XendDomain.instance().add_domain(vm)
210 return vm
213 def restore(config):
214 """Create a domain and a VM object to do a restore.
216 @param config: Domain SXP configuration
217 @type config: list of lists. (see C{create})
219 @rtype: XendDomainInfo
220 @return: A up and running XendDomainInfo instance
221 @raise VmError: Invalid configuration or failure to start.
222 @raise XendError: Errors with configuration.
223 """
225 log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
226 vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
227 resume = True)
228 try:
229 vm.resume()
230 return vm
231 except:
232 vm.destroy()
233 raise
235 def createDormant(domconfig):
236 """Create a dormant/inactive XenDomainInfo without creating VM.
237 This is for creating instances of persistent domains that are not
238 yet start.
240 @param domconfig: Parsed configuration
241 @type domconfig: XendConfig object
243 @rtype: XendDomainInfo
244 @return: A up and running XendDomainInfo instance
245 @raise XendError: Errors with configuration.
246 """
248 log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
250 # domid does not make sense for non-running domains.
251 domconfig.pop('domid', None)
252 vm = XendDomainInfo(domconfig)
253 return vm
255 def domain_by_name(name):
256 """Get domain by name
258 @params name: Name of the domain
259 @type name: string
260 @return: XendDomainInfo or None
261 """
262 from xen.xend import XendDomain
263 return XendDomain.instance().domain_lookup_by_name_nr(name)
266 def shutdown_reason(code):
267 """Get a shutdown reason from a code.
269 @param code: shutdown code
270 @type code: int
271 @return: shutdown reason
272 @rtype: string
273 """
274 return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
276 def dom_get(dom):
277 """Get info from xen for an existing domain.
279 @param dom: domain id
280 @type dom: int
281 @return: info or None
282 @rtype: dictionary
283 """
284 try:
285 domlist = xc.domain_getinfo(dom, 1)
286 if domlist and dom == domlist[0]['domid']:
287 return domlist[0]
288 except Exception, err:
289 # ignore missing domain
290 log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
291 return None
293 def do_FLR(domid):
294 from xen.xend.server.pciif import parse_pci_name, PciDevice
295 path = '/local/domain/0/backend/pci/%u/0/' % domid
296 num_devs = xstransact.Read(path + 'num_devs');
297 if num_devs is None or num_devs == "":
298 return;
300 num_devs = int(xstransact.Read(path + 'num_devs'));
302 dev_str_list = []
303 for i in range(num_devs):
304 dev_str = xstransact.Read(path + 'dev-%i' % i)
305 dev_str_list = dev_str_list + [dev_str]
307 for dev_str in dev_str_list:
308 (dom, b, d, f) = parse_pci_name(dev_str)
309 try:
310 dev = PciDevice(dom, b, d, f)
311 except Exception, e:
312 raise VmError("pci: failed to locate device and "+
313 "parse it's resources - "+str(e))
314 dev.do_FLR()
316 class XendDomainInfo:
317 """An object represents a domain.
319 @TODO: try to unify dom and domid, they mean the same thing, but
320 xc refers to it as dom, and everywhere else, including
321 xenstore it is domid. The best way is to change xc's
322 python interface.
324 @ivar info: Parsed configuration
325 @type info: dictionary
326 @ivar domid: Domain ID (if VM has started)
327 @type domid: int or None
328 @ivar vmpath: XenStore path to this VM.
329 @type vmpath: string
330 @ivar dompath: XenStore path to this Domain.
331 @type dompath: string
332 @ivar image: Reference to the VM Image.
333 @type image: xen.xend.image.ImageHandler
334 @ivar store_port: event channel to xenstored
335 @type store_port: int
336 @ivar console_port: event channel to xenconsoled
337 @type console_port: int
338 @ivar store_mfn: xenstored mfn
339 @type store_mfn: int
340 @ivar console_mfn: xenconsoled mfn
341 @type console_mfn: int
342 @ivar notes: OS image notes
343 @type notes: dictionary
344 @ivar vmWatch: reference to a watch on the xenstored vmpath
345 @type vmWatch: xen.xend.xenstore.xswatch
346 @ivar shutdownWatch: reference to watch on the xenstored domain shutdown
347 @type shutdownWatch: xen.xend.xenstore.xswatch
348 @ivar shutdownStartTime: UNIX Time when domain started shutting down.
349 @type shutdownStartTime: float or None
350 # @ivar state: Domain state
351 # @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
352 @ivar state_updated: lock for self.state
353 @type state_updated: threading.Condition
354 @ivar refresh_shutdown_lock: lock for polling shutdown state
355 @type refresh_shutdown_lock: threading.Condition
356 @ivar _deviceControllers: device controller cache for this domain
357 @type _deviceControllers: dict 'string' to DevControllers
358 """
360 def __init__(self, info, domid = None, dompath = None, augment = False,
361 priv = False, resume = False, vmpath = None):
362 """Constructor for a domain
364 @param info: parsed configuration
365 @type info: dictionary
366 @keyword domid: Set initial domain id (if any)
367 @type domid: int
368 @keyword dompath: Set initial dompath (if any)
369 @type dompath: string
370 @keyword augment: Augment given info with xenstored VM info
371 @type augment: bool
372 @keyword priv: Is a privileged domain (Dom 0)
373 @type priv: bool
374 @keyword resume: Is this domain being resumed?
375 @type resume: bool
376 """
378 self.info = info
379 if domid == None:
380 self.domid = self.info.get('domid')
381 else:
382 self.domid = domid
384 #REMOVE: uuid is now generated in XendConfig
385 #if not self._infoIsSet('uuid'):
386 # self.info['uuid'] = uuid.toString(uuid.create())
388 # Find a unique /vm/<uuid>/<integer> path if not specified.
389 # This avoids conflict between pre-/post-migrate domains when doing
390 # localhost relocation.
391 self.vmpath = vmpath
392 i = 0
393 while self.vmpath == None:
394 self.vmpath = XS_VMROOT + self.info['uuid']
395 if i != 0:
396 self.vmpath = self.vmpath + '-' + str(i)
397 try:
398 if self._readVm("uuid"):
399 self.vmpath = None
400 i = i + 1
401 except:
402 pass
404 self.dompath = dompath
406 self.image = None
407 self.store_port = None
408 self.store_mfn = None
409 self.console_port = None
410 self.console_mfn = None
412 self.native_protocol = None
414 self.vmWatch = None
415 self.shutdownWatch = None
416 self.shutdownStartTime = None
417 self._resume = resume
419 self.state_updated = threading.Condition()
420 self.refresh_shutdown_lock = threading.Condition()
421 self._stateSet(DOM_STATE_HALTED)
423 self._deviceControllers = {}
425 for state in DOM_STATES_OLD:
426 self.info[state] = 0
428 if augment:
429 self._augmentInfo(priv)
431 self._checkName(self.info['name_label'])
433 self.metrics = XendVMMetrics(uuid.createString(), self)
436 #
437 # Public functions available through XMLRPC
438 #
441 def start(self, is_managed = False):
442 """Attempts to start the VM by do the appropriate
443 initialisation if it not started.
444 """
445 from xen.xend import XendDomain
447 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
448 try:
449 XendTask.log_progress(0, 30, self._constructDomain)
450 XendTask.log_progress(31, 60, self._initDomain)
452 XendTask.log_progress(61, 70, self._storeVmDetails)
453 XendTask.log_progress(71, 80, self._storeDomDetails)
454 XendTask.log_progress(81, 90, self._registerWatches)
455 XendTask.log_progress(91, 100, self.refreshShutdown)
457 xendomains = XendDomain.instance()
458 xennode = XendNode.instance()
460 # save running configuration if XendDomains believe domain is
461 # persistent
462 if is_managed:
463 xendomains.managed_config_save(self)
465 if xennode.xenschedinfo() == 'credit':
466 xendomains.domain_sched_credit_set(self.getDomid(),
467 self.getWeight(),
468 self.getCap())
469 except:
470 log.exception('VM start failed')
471 self.destroy()
472 raise
473 else:
474 raise XendError('VM already running')
476 def resume(self):
477 """Resumes a domain that has come back from suspension."""
478 state = self._stateGet()
479 if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
480 try:
481 self._constructDomain()
482 self._setCPUAffinity()
483 self._storeVmDetails()
484 self._createChannels()
485 self._createDevices()
486 self._storeDomDetails()
487 self._endRestore()
488 except:
489 log.exception('VM resume failed')
490 self.destroy()
491 raise
492 else:
493 raise XendError('VM is not suspended; it is %s'
494 % XEN_API_VM_POWER_STATE[state])
496 def shutdown(self, reason):
497 """Shutdown a domain by signalling this via xenstored."""
498 log.debug('XendDomainInfo.shutdown(%s)', reason)
499 if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
500 raise XendError('Domain cannot be shutdown')
502 if self.domid == 0:
503 raise XendError('Domain 0 cannot be shutdown')
505 if reason not in DOMAIN_SHUTDOWN_REASONS.values():
506 raise XendError('Invalid reason: %s' % reason)
507 self._removeVm('xend/previous_restart_time')
508 self.storeDom("control/shutdown", reason)
510 # HVM domain shuts itself down only if it has PV drivers
511 if self.info.is_hvm():
512 hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
513 if not hvm_pvdrv:
514 code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
515 log.info("HVM save:remote shutdown dom %d!", self.domid)
516 xc.domain_shutdown(self.domid, code)
518 def pause(self):
519 """Pause domain
521 @raise XendError: Failed pausing a domain
522 """
523 try:
524 xc.domain_pause(self.domid)
525 self._stateSet(DOM_STATE_PAUSED)
526 except Exception, ex:
527 log.exception(ex)
528 raise XendError("Domain unable to be paused: %s" % str(ex))
530 def unpause(self):
531 """Unpause domain
533 @raise XendError: Failed unpausing a domain
534 """
535 try:
536 xc.domain_unpause(self.domid)
537 self._stateSet(DOM_STATE_RUNNING)
538 except Exception, ex:
539 log.exception(ex)
540 raise XendError("Domain unable to be unpaused: %s" % str(ex))
542 def send_sysrq(self, key):
543 """ Send a Sysrq equivalent key via xenstored."""
544 if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
545 raise XendError("Domain '%s' is not started" % self.info['name_label'])
547 asserts.isCharConvertible(key)
548 self.storeDom("control/sysrq", '%c' % key)
550 def sync_pcidev_info(self):
552 if not self.info.is_hvm():
553 return
555 devid = '0'
556 dev_info = self._getDeviceInfo_pci(devid)
557 if dev_info is None:
558 return
560 # get the virtual slot info from xenstore
561 dev_uuid = sxp.child_value(dev_info, 'uuid')
562 pci_conf = self.info['devices'][dev_uuid][1]
563 pci_devs = pci_conf['devs']
565 count = 0
566 vslots = None
567 while vslots is None and count < 20:
568 vslots = xstransact.Read("/local/domain/0/backend/pci/%u/%s/vslots"
569 % (self.getDomid(), devid))
570 time.sleep(0.1)
571 count += 1
572 if vslots is None:
573 log.error("Device model didn't tell the vslots for PCI device")
574 return
576 #delete last delim
577 if vslots[-1] == ";":
578 vslots = vslots[:-1]
580 slot_list = vslots.split(';')
581 if len(slot_list) != len(pci_devs):
582 log.error("Device model's pci dev num dismatch")
583 return
585 #update the vslot info
586 count = 0;
587 for x in pci_devs:
588 x['vslt'] = slot_list[count]
589 count += 1
592 def hvm_pci_device_create(self, dev_config):
593 log.debug("XendDomainInfo.hvm_pci_device_create: %s"
594 % scrub_password(dev_config))
596 if not self.info.is_hvm():
597 raise VmError("hvm_pci_device_create called on non-HVM guest")
599 #all the PCI devs share one conf node
600 devid = '0'
602 new_dev = dev_config['devs'][0]
603 dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
605 #check conflict before trigger hotplug event
606 if dev_info is not None:
607 dev_uuid = sxp.child_value(dev_info, 'uuid')
608 pci_conf = self.info['devices'][dev_uuid][1]
609 pci_devs = pci_conf['devs']
610 for x in pci_devs:
611 if (int(x['vslt'], 16) == int(new_dev['vslt'], 16) and
612 int(x['vslt'], 16) != 0 ):
613 raise VmError("vslot %s already have a device." % (new_dev['vslt']))
615 if (int(x['domain'], 16) == int(new_dev['domain'], 16) and
616 int(x['bus'], 16) == int(new_dev['bus'], 16) and
617 int(x['slot'], 16) == int(new_dev['slot'], 16) and
618 int(x['func'], 16) == int(new_dev['func'], 16) ):
619 raise VmError("device is already inserted")
621 # Test whether the devices can be assigned with VT-d
622 pci_str = "%s, %s, %s, %s" % (new_dev['domain'],
623 new_dev['bus'],
624 new_dev['slot'],
625 new_dev['func'])
626 bdf = xc.test_assign_device(self.domid, pci_str)
627 if bdf != 0:
628 if bdf == -1:
629 raise VmError("failed to assign device: maybe the platform"
630 " doesn't support VT-d, or VT-d isn't enabled"
631 " properly?")
632 bus = (bdf >> 16) & 0xff
633 devfn = (bdf >> 8) & 0xff
634 dev = (devfn >> 3) & 0x1f
635 func = devfn & 0x7
636 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
637 " already been assigned to other domain, or maybe"
638 " it doesn't exist." % (bus, dev, func))
640 bdf_str = "%s:%s:%s.%s@%s" % (new_dev['domain'],
641 new_dev['bus'],
642 new_dev['slot'],
643 new_dev['func'],
644 new_dev['vslt'])
645 self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
648 def device_create(self, dev_config):
649 """Create a new device.
651 @param dev_config: device configuration
652 @type dev_config: SXP object (parsed config)
653 """
654 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
655 dev_type = sxp.name(dev_config)
656 dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
657 dev_config_dict = self.info['devices'][dev_uuid][1]
658 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
660 if self.domid is not None:
661 try:
662 dev_config_dict['devid'] = devid = \
663 self._createDevice(dev_type, dev_config_dict)
664 self._waitForDevice(dev_type, devid)
665 except VmError, ex:
666 del self.info['devices'][dev_uuid]
667 if dev_type == 'pci':
668 for dev in dev_config_dict['devs']:
669 XendAPIStore.deregister(dev['uuid'], 'DPCI')
670 if dev_type == 'vscsi':
671 for dev in dev_config_dict['devs']:
672 XendAPIStore.deregister(dev['uuid'], 'DSCSI')
673 elif dev_type == 'tap':
674 self.info['vbd_refs'].remove(dev_uuid)
675 else:
676 self.info['%s_refs' % dev_type].remove(dev_uuid)
677 raise ex
678 else:
679 devid = None
681 xen.xend.XendDomain.instance().managed_config_save(self)
682 return self.getDeviceController(dev_type).sxpr(devid)
685 def pci_device_configure(self, dev_sxp, devid = 0):
686 """Configure an existing pci device.
688 @param dev_sxp: device configuration
689 @type dev_sxp: SXP object (parsed config)
690 @param devid: device id
691 @type devid: int
692 @return: Returns True if successfully updated device
693 @rtype: boolean
694 """
695 log.debug("XendDomainInfo.pci_device_configure: %s"
696 % scrub_password(dev_sxp))
698 dev_class = sxp.name(dev_sxp)
700 if dev_class != 'pci':
701 return False
703 pci_state = sxp.child_value(dev_sxp, 'state')
704 existing_dev_info = self._getDeviceInfo_pci(devid)
706 if existing_dev_info is None and pci_state != 'Initialising':
707 raise XendError("Cannot detach when pci platform does not exist")
709 pci_dev = sxp.children(dev_sxp, 'dev')[0]
710 dev_config = self.info.pci_convert_sxp_to_dict(dev_sxp)
711 dev = dev_config['devs'][0]
713 # Do HVM specific processing
714 if self.info.is_hvm():
715 if pci_state == 'Initialising':
716 # HVM PCI device attachment
717 self.hvm_pci_device_create(dev_config)
718 # Update vslt
719 vslt = xstransact.Read("/local/domain/0/device-model/%i/parameter"
720 % self.getDomid())
721 dev['vslt'] = vslt
722 for n in sxp.children(pci_dev):
723 if(n[0] == 'vslt'):
724 n[1] = vslt
725 else:
726 # HVM PCI device detachment
727 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
728 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
729 existing_pci_devs = existing_pci_conf['devs']
730 vslt = '0x0'
731 for x in existing_pci_devs:
732 if ( int(x['domain'], 16) == int(dev['domain'], 16) and
733 int(x['bus'], 16) == int(dev['bus'], 16) and
734 int(x['slot'], 16) == int(dev['slot'], 16) and
735 int(x['func'], 16) == int(dev['func'], 16) ):
736 vslt = x['vslt']
737 break
738 if vslt == '0x0':
739 raise VmError("Device %04x:%02x:%02x.%01x is not connected"
740 % (int(dev['domain'],16), int(dev['bus'],16),
741 int(dev['slot'],16), int(dev['func'],16)))
742 self.hvm_destroyPCIDevice(int(vslt, 16))
743 # Update vslt
744 dev['vslt'] = vslt
745 for n in sxp.children(pci_dev):
746 if(n[0] == 'vslt'):
747 n[1] = vslt
749 # If pci platform does not exist, create and exit.
750 if existing_dev_info is None:
751 self.device_create(dev_sxp)
752 return True
754 # use DevController.reconfigureDevice to change device config
755 dev_control = self.getDeviceController(dev_class)
756 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
757 if not self.info.is_hvm():
758 # in PV case, wait until backend state becomes connected.
759 dev_control.waitForDevice_reconfigure(devid)
760 num_devs = dev_control.cleanupDevice(devid)
762 # update XendConfig with new device info
763 if dev_uuid:
764 new_dev_sxp = dev_control.configuration(devid)
765 self.info.device_update(dev_uuid, new_dev_sxp)
767 # If there is no device left, destroy pci and remove config.
768 if num_devs == 0:
769 if self.info.is_hvm():
770 self.destroyDevice('pci', devid, True)
771 del self.info['devices'][dev_uuid]
772 platform = self.info['platform']
773 orig_dev_num = len(platform['pci'])
774 # TODO: can use this to keep some info to ask high level
775 # management tools to hot insert a new passthrough dev
776 # after migration
777 if orig_dev_num != 0:
778 #platform['pci'] = ["%dDEVs" % orig_dev_num]
779 platform['pci'] = []
780 else:
781 self.destroyDevice('pci', devid)
782 del self.info['devices'][dev_uuid]
784 xen.xend.XendDomain.instance().managed_config_save(self)
786 return True
788 def vscsi_device_configure(self, dev_sxp):
789 """Configure an existing vscsi device.
790 quoted pci funciton
791 """
792 dev_class = sxp.name(dev_sxp)
793 if dev_class != 'vscsi':
794 return False
796 dev_config = self.info.vscsi_convert_sxp_to_dict(dev_sxp)
797 dev = dev_config['devs'][0]
798 req_devid = int(dev['devid'])
799 existing_dev_info = self._getDeviceInfo_vscsi(req_devid, dev['v-dev'])
800 state = dev['state']
802 if state == xenbusState['Initialising']:
803 # new create
804 # If request devid does not exist, create and exit.
805 if existing_dev_info is None:
806 self.device_create(dev_sxp)
807 return True
808 elif existing_dev_info == "exists":
809 raise XendError("The virtual device %s is already defined" % dev['v-dev'])
811 elif state == xenbusState['Closing']:
812 if existing_dev_info is None:
813 raise XendError("Cannot detach vscsi device does not exist")
815 if self.domid is not None:
816 # use DevController.reconfigureDevice to change device config
817 dev_control = self.getDeviceController(dev_class)
818 dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
819 dev_control.waitForDevice_reconfigure(req_devid)
820 num_devs = dev_control.cleanupDevice(req_devid)
822 # update XendConfig with new device info
823 if dev_uuid:
824 new_dev_sxp = dev_control.configuration(req_devid)
825 self.info.device_update(dev_uuid, new_dev_sxp)
827 # If there is no device left, destroy vscsi and remove config.
828 if num_devs == 0:
829 self.destroyDevice('vscsi', req_devid)
830 del self.info['devices'][dev_uuid]
832 else:
833 cur_dev_sxp = self._getDeviceInfo_vscsi(req_devid, None)
834 new_dev_sxp = ['vscsi']
835 for cur_dev in sxp.children(cur_dev_sxp, 'dev'):
836 if state == xenbusState['Closing']:
837 cur_dev_vdev = sxp.child_value(cur_dev, 'v-dev')
838 if cur_dev_vdev == dev['v-dev']:
839 continue
840 new_dev_sxp.append(cur_dev)
842 if state == xenbusState['Initialising']:
843 new_dev_sxp.append(sxp.child0(dev_sxp, 'dev'))
845 dev_uuid = sxp.child_value(cur_dev_sxp, 'uuid')
846 self.info.device_update(dev_uuid, new_dev_sxp)
848 # If there is only 'vscsi' in new_dev_sxp, remove the config.
849 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
850 del self.info['devices'][dev_uuid]
852 xen.xend.XendDomain.instance().managed_config_save(self)
854 return True
856 def device_configure(self, dev_sxp, devid = None):
857 """Configure an existing device.
859 @param dev_config: device configuration
860 @type dev_config: SXP object (parsed config)
861 @param devid: device id
862 @type devid: int
863 @return: Returns True if successfully updated device
864 @rtype: boolean
865 """
867 # convert device sxp to a dict
868 dev_class = sxp.name(dev_sxp)
869 dev_config = {}
871 if dev_class == 'pci':
872 return self.pci_device_configure(dev_sxp)
874 if dev_class == 'vscsi':
875 return self.vscsi_device_configure(dev_sxp)
877 for opt_val in dev_sxp[1:]:
878 try:
879 dev_config[opt_val[0]] = opt_val[1]
880 except IndexError:
881 pass
883 # use DevController.reconfigureDevice to change device config
884 dev_control = self.getDeviceController(dev_class)
885 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
887 # update XendConfig with new device info
888 if dev_uuid:
889 self.info.device_update(dev_uuid, dev_sxp)
891 return True
893 def waitForDevices(self):
894 """Wait for this domain's configured devices to connect.
896 @raise VmError: if any device fails to initialise.
897 """
898 for devclass in XendDevices.valid_devices():
899 self.getDeviceController(devclass).waitForDevices()
901 def hvm_destroyPCIDevice(self, vslot):
902 log.debug("hvm_destroyPCIDevice called %s", vslot)
904 if not self.info.is_hvm():
905 raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
907 #all the PCI devs share one conf node
908 devid = '0'
909 vslot = int(vslot)
910 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
911 dev_uuid = sxp.child_value(dev_info, 'uuid')
913 #delete the pci bdf config under the pci device
914 pci_conf = self.info['devices'][dev_uuid][1]
915 pci_len = len(pci_conf['devs'])
917 #find the pass-through device with the virtual slot
918 devnum = 0
919 for x in pci_conf['devs']:
920 if int(x['vslt'], 16) == vslot:
921 break
922 devnum += 1
924 if devnum >= pci_len:
925 raise VmError("Device @ vslot 0x%x doesn't exist." % (vslot))
927 if vslot == 0:
928 raise VmError("Device @ vslot 0x%x do not support hotplug." % (vslot))
930 bdf_str = "%s:%s:%s.%s" % (x['domain'], x['bus'], x['slot'], x['func'])
931 log.info("hvm_destroyPCIDevice:%s:%s!", x, bdf_str)
933 self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
935 return 0
937 def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
938 log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
939 deviceClass, devid)
941 if rm_cfg:
942 # Convert devid to device number. A device number is
943 # needed to remove its configuration.
944 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
946 # Save current sxprs. A device number and a backend
947 # path are needed to remove its configuration but sxprs
948 # do not have those after calling destroyDevice.
949 sxprs = self.getDeviceSxprs(deviceClass)
951 rc = None
952 if self.domid is not None:
953 rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
954 if not force and rm_cfg:
955 # The backend path, other than the device itself,
956 # has to be passed because its accompanied frontend
957 # path may be void until its removal is actually
958 # issued. It is probable because destroyDevice is
959 # issued first.
960 for dev_num, dev_info in sxprs:
961 dev_num = int(dev_num)
962 if dev_num == dev:
963 for x in dev_info:
964 if x[0] == 'backend':
965 backend = x[1]
966 break
967 break
968 self._waitForDevice_destroy(deviceClass, devid, backend)
970 if rm_cfg:
971 if deviceClass == 'vif':
972 if self.domid is not None:
973 for dev_num, dev_info in sxprs:
974 dev_num = int(dev_num)
975 if dev_num == dev:
976 for x in dev_info:
977 if x[0] == 'mac':
978 mac = x[1]
979 break
980 break
981 dev_info = self._getDeviceInfo_vif(mac)
982 else:
983 _, dev_info = sxprs[dev]
984 else: # 'vbd' or 'tap'
985 dev_info = self._getDeviceInfo_vbd(dev)
986 # To remove the UUID of the device from refs,
987 # deviceClass must be always 'vbd'.
988 deviceClass = 'vbd'
989 if dev_info is None:
990 raise XendError("Device %s is not defined" % devid)
992 dev_uuid = sxp.child_value(dev_info, 'uuid')
993 del self.info['devices'][dev_uuid]
994 self.info['%s_refs' % deviceClass].remove(dev_uuid)
995 xen.xend.XendDomain.instance().managed_config_save(self)
997 return rc
999 def getDeviceSxprs(self, deviceClass):
1000 if deviceClass == 'pci':
1001 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1002 if dev_info is None:
1003 return []
1004 dev_uuid = sxp.child_value(dev_info, 'uuid')
1005 pci_devs = self.info['devices'][dev_uuid][1]['devs']
1006 pci_len = len(pci_devs)
1007 return pci_devs
1008 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1009 return self.getDeviceController(deviceClass).sxprs()
1010 else:
1011 sxprs = []
1012 dev_num = 0
1013 for dev_type, dev_info in self.info.all_devices_sxpr():
1014 if dev_type != deviceClass:
1015 continue
1017 if deviceClass == 'vscsi':
1018 vscsi_devs = ['devs', []]
1019 for vscsi_dev in sxp.children(dev_info, 'dev'):
1020 vscsi_dev.append(['frontstate', None])
1021 vscsi_devs[1].append(vscsi_dev)
1022 dev_num = int(sxp.child_value(vscsi_dev, 'devid'))
1023 sxprs.append([dev_num, [vscsi_devs]])
1024 else:
1025 sxprs.append([dev_num, dev_info])
1026 dev_num += 1
1027 return sxprs
1029 def getBlockDeviceClass(self, devid):
1030 # To get a device number from the devid,
1031 # we temporarily use the device controller of VBD.
1032 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1033 dev_info = self._getDeviceInfo_vbd(dev)
1034 if dev_info:
1035 return dev_info[0]
1037 def _getDeviceInfo_vif(self, mac):
1038 for dev_type, dev_info in self.info.all_devices_sxpr():
1039 if dev_type != 'vif':
1040 continue
1041 if mac == sxp.child_value(dev_info, 'mac'):
1042 return dev_info
1044 def _getDeviceInfo_vbd(self, devid):
1045 for dev_type, dev_info in self.info.all_devices_sxpr():
1046 if dev_type != 'vbd' and dev_type != 'tap':
1047 continue
1048 dev = sxp.child_value(dev_info, 'dev')
1049 dev = dev.split(':')[0]
1050 dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
1051 if devid == dev:
1052 return dev_info
1054 def _getDeviceInfo_pci(self, devid):
1055 for dev_type, dev_info in self.info.all_devices_sxpr():
1056 if dev_type != 'pci':
1057 continue
1058 return dev_info
1059 return None
1061 def _getDeviceInfo_vscsi(self, devid, vdev):
1062 devid = int(devid)
1063 for dev_type, dev_info in self.info.all_devices_sxpr():
1064 if dev_type != 'vscsi':
1065 continue
1066 existing_dev_uuid = sxp.child_value(dev_info, 'uuid')
1067 existing_conf = self.info['devices'][existing_dev_uuid][1]
1068 existing_dev = existing_conf['devs'][0]
1069 existing_devid = int(existing_dev['devid'])
1070 existing_vdev = existing_dev['v-dev']
1072 if vdev == existing_vdev:
1073 return "exists"
1075 if devid == existing_devid:
1076 return dev_info
1078 return None
1080 def setMemoryTarget(self, target):
1081 """Set the memory target of this domain.
1082 @param target: In MiB.
1083 """
1084 log.debug("Setting memory target of domain %s (%s) to %d MiB.",
1085 self.info['name_label'], str(self.domid), target)
1087 MiB = 1024 * 1024
1089 if self.domid == 0:
1090 dom0_min_mem = xoptions.get_dom0_min_mem()
1091 memory_cur = self.get_memory_dynamic_max() / MiB
1092 if target < memory_cur and dom0_min_mem > target:
1093 raise XendError("memory_dynamic_max too small")
1095 self._safe_set_memory('memory_dynamic_min', target * MiB)
1096 self._safe_set_memory('memory_dynamic_max', target * MiB)
1098 if self.domid >= 0:
1099 self.storeVm("memory", target)
1100 self.storeDom("memory/target", target << 10)
1101 xen.xend.XendDomain.instance().managed_config_save(self)
1103 def setMemoryMaximum(self, limit):
1104 """Set the maximum memory limit of this domain
1105 @param limit: In MiB.
1106 """
1107 log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
1108 self.info['name_label'], str(self.domid), limit)
1110 maxmem_cur = self.get_memory_static_max()
1111 MiB = 1024 * 1024
1112 self._safe_set_memory('memory_static_max', limit * MiB)
1114 if self.domid >= 0:
1115 maxmem = int(limit) * 1024
1116 try:
1117 return xc.domain_setmaxmem(self.domid, maxmem)
1118 except Exception, ex:
1119 self._safe_set_memory('memory_static_max', maxmem_cur)
1120 raise XendError(str(ex))
1121 xen.xend.XendDomain.instance().managed_config_save(self)
1124 def getVCPUInfo(self):
1125 try:
1126 # We include the domain name and ID, to help xm.
1127 sxpr = ['domain',
1128 ['domid', self.domid],
1129 ['name', self.info['name_label']],
1130 ['vcpu_count', self.info['VCPUs_max']]]
1132 for i in range(0, self.info['VCPUs_max']):
1133 if self.domid is not None:
1134 info = xc.vcpu_getinfo(self.domid, i)
1136 sxpr.append(['vcpu',
1137 ['number', i],
1138 ['online', info['online']],
1139 ['blocked', info['blocked']],
1140 ['running', info['running']],
1141 ['cpu_time', info['cpu_time'] / 1e9],
1142 ['cpu', info['cpu']],
1143 ['cpumap', info['cpumap']]])
1144 else:
1145 sxpr.append(['vcpu',
1146 ['number', i],
1147 ['online', 0],
1148 ['blocked', 0],
1149 ['running', 0],
1150 ['cpu_time', 0.0],
1151 ['cpu', -1],
1152 ['cpumap', self.info['cpus'][i] and \
1153 self.info['cpus'][i] or range(64)]])
1155 return sxpr
1157 except RuntimeError, exn:
1158 raise XendError(str(exn))
1161 def getDomInfo(self):
1162 return dom_get(self.domid)
1165 # internal functions ... TODO: re-categorised
1168 def _augmentInfo(self, priv):
1169 """Augment self.info, as given to us through L{recreate}, with
1170 values taken from the store. This recovers those values known
1171 to xend but not to the hypervisor.
1172 """
1173 augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
1174 if priv:
1175 augment_entries.remove('memory')
1176 augment_entries.remove('maxmem')
1177 augment_entries.remove('vcpus')
1178 augment_entries.remove('vcpu_avail')
1180 vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
1181 for k in augment_entries])
1183 # make returned lists into a dictionary
1184 vm_config = dict(zip(augment_entries, vm_config))
1186 for arg in augment_entries:
1187 val = vm_config[arg]
1188 if val != None:
1189 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1190 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1191 self.info[xapiarg] = val
1192 elif arg == "memory":
1193 self.info["static_memory_min"] = val
1194 elif arg == "maxmem":
1195 self.info["static_memory_max"] = val
1196 else:
1197 self.info[arg] = val
1199 # read CPU Affinity
1200 self.info['cpus'] = []
1201 vcpus_info = self.getVCPUInfo()
1202 for vcpu_info in sxp.children(vcpus_info, 'vcpu'):
1203 self.info['cpus'].append(sxp.child_value(vcpu_info, 'cpumap'))
1205 # For dom0, we ignore any stored value for the vcpus fields, and
1206 # read the current value from Xen instead. This allows boot-time
1207 # settings to take precedence over any entries in the store.
1208 if priv:
1209 xeninfo = dom_get(self.domid)
1210 self.info['VCPUs_max'] = xeninfo['online_vcpus']
1211 self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
1213 # read image value
1214 image_sxp = self._readVm('image')
1215 if image_sxp:
1216 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1218 # read devices
1219 devices = []
1220 for devclass in XendDevices.valid_devices():
1221 devconfig = self.getDeviceController(devclass).configurations()
1222 if devconfig:
1223 devices.extend(devconfig)
1225 if not self.info['devices'] and devices is not None:
1226 for device in devices:
1227 self.info.device_add(device[0], cfg_sxp = device)
1229 self._update_consoles()
1231 def _update_consoles(self, transaction = None):
1232 if self.domid == None or self.domid == 0:
1233 return
1235 # Update VT100 port if it exists
1236 if transaction is None:
1237 self.console_port = self.readDom('console/port')
1238 else:
1239 self.console_port = self.readDomTxn(transaction, 'console/port')
1240 if self.console_port is not None:
1241 serial_consoles = self.info.console_get_all('vt100')
1242 if not serial_consoles:
1243 cfg = self.info.console_add('vt100', self.console_port)
1244 self._createDevice('console', cfg)
1245 else:
1246 console_uuid = serial_consoles[0].get('uuid')
1247 self.info.console_update(console_uuid, 'location',
1248 self.console_port)
1251 # Update VNC port if it exists and write to xenstore
1252 if transaction is None:
1253 vnc_port = self.readDom('console/vnc-port')
1254 else:
1255 vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
1256 if vnc_port is not None:
1257 for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
1258 if dev_type == 'vfb':
1259 old_location = dev_info.get('location')
1260 listen_host = dev_info.get('vnclisten', 'localhost')
1261 new_location = '%s:%s' % (listen_host, str(vnc_port))
1262 if old_location == new_location:
1263 break
1265 dev_info['location'] = new_location
1266 self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
1267 vfb_ctrl = self.getDeviceController('vfb')
1268 vfb_ctrl.reconfigureDevice(0, dev_info)
1269 break
1272 # Function to update xenstore /vm/*
1275 def _readVm(self, *args):
1276 return xstransact.Read(self.vmpath, *args)
1278 def _writeVm(self, *args):
1279 return xstransact.Write(self.vmpath, *args)
1281 def _removeVm(self, *args):
1282 return xstransact.Remove(self.vmpath, *args)
1284 def _gatherVm(self, *args):
1285 return xstransact.Gather(self.vmpath, *args)
1287 def _listRecursiveVm(self, *args):
1288 return xstransact.ListRecursive(self.vmpath, *args)
1290 def storeVm(self, *args):
1291 return xstransact.Store(self.vmpath, *args)
1293 def permissionsVm(self, *args):
1294 return xstransact.SetPermissions(self.vmpath, *args)
1297 # Function to update xenstore /dom/*
1300 def readDom(self, *args):
1301 return xstransact.Read(self.dompath, *args)
1303 def gatherDom(self, *args):
1304 return xstransact.Gather(self.dompath, *args)
1306 def _writeDom(self, *args):
1307 return xstransact.Write(self.dompath, *args)
1309 def _removeDom(self, *args):
1310 return xstransact.Remove(self.dompath, *args)
1312 def storeDom(self, *args):
1313 return xstransact.Store(self.dompath, *args)
1316 def readDomTxn(self, transaction, *args):
1317 paths = map(lambda x: self.dompath + "/" + x, args)
1318 return transaction.read(*paths)
1320 def gatherDomTxn(self, transaction, *args):
1321 paths = map(lambda x: self.dompath + "/" + x, args)
1322 return transaction.gather(*paths)
1324 def _writeDomTxn(self, transaction, *args):
1325 paths = map(lambda x: self.dompath + "/" + x, args)
1326 return transaction.write(*paths)
1328 def _removeDomTxn(self, transaction, *args):
1329 paths = map(lambda x: self.dompath + "/" + x, args)
1330 return transaction.remove(*paths)
1332 def storeDomTxn(self, transaction, *args):
1333 paths = map(lambda x: self.dompath + "/" + x, args)
1334 return transaction.store(*paths)
1337 def _recreateDom(self):
1338 complete(self.dompath, lambda t: self._recreateDomFunc(t))
1340 def _recreateDomFunc(self, t):
1341 t.remove()
1342 t.mkdir()
1343 t.set_permissions({'dom' : self.domid, 'read' : True})
1344 t.write('vm', self.vmpath)
1345 for i in [ 'device', 'control', 'error', 'memory' ]:
1346 t.mkdir(i)
1347 t.set_permissions(i, {'dom' : self.domid})
1349 def _storeDomDetails(self):
1350 to_store = {
1351 'domid': str(self.domid),
1352 'vm': self.vmpath,
1353 'name': self.info['name_label'],
1354 'console/limit': str(xoptions.get_console_limit() * 1024),
1355 'memory/target': str(self.info['memory_dynamic_max'] / 1024),
1358 def f(n, v):
1359 if v is not None:
1360 if type(v) == bool:
1361 to_store[n] = v and "1" or "0"
1362 else:
1363 to_store[n] = str(v)
1365 # Figure out if we need to tell xenconsoled to ignore this guest's
1366 # console - device model will handle console if it is running
1367 constype = "ioemu"
1368 if 'device_model' not in self.info['platform']:
1369 constype = "xenconsoled"
1371 f('console/port', self.console_port)
1372 f('console/ring-ref', self.console_mfn)
1373 f('console/type', constype)
1374 f('store/port', self.store_port)
1375 f('store/ring-ref', self.store_mfn)
1377 if arch.type == "x86":
1378 f('control/platform-feature-multiprocessor-suspend', True)
1380 # elfnotes
1381 for n, v in self.info.get_notes().iteritems():
1382 n = n.lower().replace('_', '-')
1383 if n == 'features':
1384 for v in v.split('|'):
1385 v = v.replace('_', '-')
1386 if v.startswith('!'):
1387 f('image/%s/%s' % (n, v[1:]), False)
1388 else:
1389 f('image/%s/%s' % (n, v), True)
1390 else:
1391 f('image/%s' % n, v)
1393 if self.info.has_key('security_label'):
1394 f('security_label', self.info['security_label'])
1396 to_store.update(self._vcpuDomDetails())
1398 log.debug("Storing domain details: %s", scrub_password(to_store))
1400 self._writeDom(to_store)
1402 def _vcpuDomDetails(self):
1403 def availability(n):
1404 if self.info['vcpu_avail'] & (1 << n):
1405 return 'online'
1406 else:
1407 return 'offline'
1409 result = {}
1410 for v in range(0, self.info['VCPUs_max']):
1411 result["cpu/%d/availability" % v] = availability(v)
1412 return result
1415 # xenstore watches
1418 def _registerWatches(self):
1419 """Register a watch on this VM's entries in the store, and the
1420 domain's control/shutdown node, so that when they are changed
1421 externally, we keep up to date. This should only be called by {@link
1422 #create}, {@link #recreate}, or {@link #restore}, once the domain's
1423 details have been written, but before the new instance is returned."""
1424 self.vmWatch = xswatch(self.vmpath, self._storeChanged)
1425 self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
1426 self._handleShutdownWatch)
1428 def _storeChanged(self, _):
1429 log.trace("XendDomainInfo.storeChanged");
1431 changed = False
1433 # Check whether values in the configuration have
1434 # changed in Xenstore.
1436 cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
1437 'rtc/timeoffset']
1439 vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
1440 for k in cfg_vm])
1442 # convert two lists into a python dictionary
1443 vm_details = dict(zip(cfg_vm, vm_details))
1445 if vm_details['rtc/timeoffset'] == None:
1446 vm_details['rtc/timeoffset'] = "0"
1448 for arg, val in vm_details.items():
1449 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1450 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1451 if val != None and val != self.info[xapiarg]:
1452 self.info[xapiarg] = val
1453 changed = True
1454 elif arg == "memory":
1455 if val != None and val != self.info["static_memory_min"]:
1456 self.info["static_memory_min"] = val
1457 changed = True
1458 elif arg == "maxmem":
1459 if val != None and val != self.info["static_memory_max"]:
1460 self.info["static_memory_max"] = val
1461 changed = True
1463 # Check whether image definition has been updated
1464 image_sxp = self._readVm('image')
1465 if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
1466 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1467 changed = True
1469 # Check if the rtc offset has changes
1470 if vm_details.get("rtc/timeoffset", "0") != self.info["platform"].get("rtc_timeoffset", "0"):
1471 self.info["platform"]["rtc_timeoffset"] = vm_details.get("rtc/timeoffset", 0)
1472 changed = True
1474 if changed:
1475 # Update the domain section of the store, as this contains some
1476 # parameters derived from the VM configuration.
1477 self._storeDomDetails()
1479 return 1
1481 def _handleShutdownWatch(self, _):
1482 log.debug('XendDomainInfo.handleShutdownWatch')
1484 reason = self.readDom('control/shutdown')
1486 if reason and reason != 'suspend':
1487 sst = self.readDom('xend/shutdown_start_time')
1488 now = time.time()
1489 if sst:
1490 self.shutdownStartTime = float(sst)
1491 timeout = float(sst) + SHUTDOWN_TIMEOUT - now
1492 else:
1493 self.shutdownStartTime = now
1494 self.storeDom('xend/shutdown_start_time', now)
1495 timeout = SHUTDOWN_TIMEOUT
1497 log.trace(
1498 "Scheduling refreshShutdown on domain %d in %ds.",
1499 self.domid, timeout)
1500 threading.Timer(timeout, self.refreshShutdown).start()
1502 return True
1506 # Public Attributes for the VM
1510 def getDomid(self):
1511 return self.domid
1513 def setName(self, name, to_store = True):
1514 self._checkName(name)
1515 self.info['name_label'] = name
1516 if to_store:
1517 self.storeVm("name", name)
1519 def getName(self):
1520 return self.info['name_label']
1522 def getDomainPath(self):
1523 return self.dompath
1525 def getShutdownReason(self):
1526 return self.readDom('control/shutdown')
1528 def getStorePort(self):
1529 """For use only by image.py and XendCheckpoint.py."""
1530 return self.store_port
1532 def getConsolePort(self):
1533 """For use only by image.py and XendCheckpoint.py"""
1534 return self.console_port
1536 def getFeatures(self):
1537 """For use only by image.py."""
1538 return self.info['features']
1540 def getVCpuCount(self):
1541 return self.info['VCPUs_max']
1543 def setVCpuCount(self, vcpus):
1544 def vcpus_valid(n):
1545 if vcpus <= 0:
1546 raise XendError('Zero or less VCPUs is invalid')
1547 if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
1548 raise XendError('Cannot set vcpus greater than max vcpus on running domain')
1549 vcpus_valid(vcpus)
1551 self.info['vcpu_avail'] = (1 << vcpus) - 1
1552 if self.domid >= 0:
1553 self.storeVm('vcpu_avail', self.info['vcpu_avail'])
1554 self._writeDom(self._vcpuDomDetails())
1555 self.info['VCPUs_live'] = vcpus
1556 else:
1557 if self.info['VCPUs_max'] > vcpus:
1558 # decreasing
1559 del self.info['cpus'][vcpus:]
1560 elif self.info['VCPUs_max'] < vcpus:
1561 # increasing
1562 for c in range(self.info['VCPUs_max'], vcpus):
1563 self.info['cpus'].append(list())
1564 self.info['VCPUs_max'] = vcpus
1565 xen.xend.XendDomain.instance().managed_config_save(self)
1566 log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
1567 vcpus)
1569 def getMemoryTarget(self):
1570 """Get this domain's target memory size, in KB."""
1571 return self.info['memory_dynamic_max'] / 1024
1573 def getMemoryMaximum(self):
1574 """Get this domain's maximum memory size, in KB."""
1575 # remember, info now stores memory in bytes
1576 return self.info['memory_static_max'] / 1024
1578 def getResume(self):
1579 return str(self._resume)
1581 def setResume(self, isresume):
1582 self._resume = isresume
1584 def getCpus(self):
1585 return self.info['cpus']
1587 def setCpus(self, cpumap):
1588 self.info['cpus'] = cpumap
1590 def getCap(self):
1591 return self.info['vcpus_params']['cap']
1593 def setCap(self, cpu_cap):
1594 self.info['vcpus_params']['cap'] = cpu_cap
1596 def getWeight(self):
1597 return self.info['vcpus_params']['weight']
1599 def setWeight(self, cpu_weight):
1600 self.info['vcpus_params']['weight'] = cpu_weight
1602 def getRestartCount(self):
1603 return self._readVm('xend/restart_count')
1605 def refreshShutdown(self, xeninfo = None):
1606 """ Checks the domain for whether a shutdown is required.
1608 Called from XendDomainInfo and also image.py for HVM images.
1609 """
1611 # If set at the end of this method, a restart is required, with the
1612 # given reason. This restart has to be done out of the scope of
1613 # refresh_shutdown_lock.
1614 restart_reason = None
1616 self.refresh_shutdown_lock.acquire()
1617 try:
1618 if xeninfo is None:
1619 xeninfo = dom_get(self.domid)
1620 if xeninfo is None:
1621 # The domain no longer exists. This will occur if we have
1622 # scheduled a timer to check for shutdown timeouts and the
1623 # shutdown succeeded. It will also occur if someone
1624 # destroys a domain beneath us. We clean up the domain,
1625 # just in case, but we can't clean up the VM, because that
1626 # VM may have migrated to a different domain on this
1627 # machine.
1628 self.cleanupDomain()
1629 self._stateSet(DOM_STATE_HALTED)
1630 return
1632 if xeninfo['dying']:
1633 # Dying means that a domain has been destroyed, but has not
1634 # yet been cleaned up by Xen. This state could persist
1635 # indefinitely if, for example, another domain has some of its
1636 # pages mapped. We might like to diagnose this problem in the
1637 # future, but for now all we do is make sure that it's not us
1638 # holding the pages, by calling cleanupDomain. We can't
1639 # clean up the VM, as above.
1640 self.cleanupDomain()
1641 self._stateSet(DOM_STATE_SHUTDOWN)
1642 return
1644 elif xeninfo['crashed']:
1645 if self.readDom('xend/shutdown_completed'):
1646 # We've seen this shutdown already, but we are preserving
1647 # the domain for debugging. Leave it alone.
1648 return
1650 log.warn('Domain has crashed: name=%s id=%d.',
1651 self.info['name_label'], self.domid)
1652 self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
1654 restart_reason = 'crash'
1655 self._stateSet(DOM_STATE_HALTED)
1657 elif xeninfo['shutdown']:
1658 self._stateSet(DOM_STATE_SHUTDOWN)
1659 if self.readDom('xend/shutdown_completed'):
1660 # We've seen this shutdown already, but we are preserving
1661 # the domain for debugging. Leave it alone.
1662 return
1664 else:
1665 reason = shutdown_reason(xeninfo['shutdown_reason'])
1667 log.info('Domain has shutdown: name=%s id=%d reason=%s.',
1668 self.info['name_label'], self.domid, reason)
1669 self._writeVm(LAST_SHUTDOWN_REASON, reason)
1671 self._clearRestart()
1673 if reason == 'suspend':
1674 self._stateSet(DOM_STATE_SUSPENDED)
1675 # Don't destroy the domain. XendCheckpoint will do
1676 # this once it has finished. However, stop watching
1677 # the VM path now, otherwise we will end up with one
1678 # watch for the old domain, and one for the new.
1679 self._unwatchVm()
1680 elif reason in ('poweroff', 'reboot'):
1681 restart_reason = reason
1682 else:
1683 self.destroy()
1685 elif self.dompath is None:
1686 # We have yet to manage to call introduceDomain on this
1687 # domain. This can happen if a restore is in progress, or has
1688 # failed. Ignore this domain.
1689 pass
1690 else:
1691 # Domain is alive. If we are shutting it down, log a message
1692 # if it seems unresponsive.
1693 if xeninfo['paused']:
1694 self._stateSet(DOM_STATE_PAUSED)
1695 else:
1696 self._stateSet(DOM_STATE_RUNNING)
1698 if self.shutdownStartTime:
1699 timeout = (SHUTDOWN_TIMEOUT - time.time() +
1700 self.shutdownStartTime)
1701 if (timeout < 0 and not self.readDom('xend/unresponsive')):
1702 log.info(
1703 "Domain shutdown timeout expired: name=%s id=%s",
1704 self.info['name_label'], self.domid)
1705 self.storeDom('xend/unresponsive', 'True')
1706 finally:
1707 self.refresh_shutdown_lock.release()
1709 if restart_reason:
1710 threading.Thread(target = self._maybeRestart,
1711 args = (restart_reason,)).start()
1715 # Restart functions - handling whether we come back up on shutdown.
1718 def _clearRestart(self):
1719 self._removeDom("xend/shutdown_start_time")
1721 def _maybeDumpCore(self, reason):
1722 if reason == 'crash':
1723 if xoptions.get_enable_dump() or self.get_on_crash() \
1724 in ['coredump_and_destroy', 'coredump_and_restart']:
1725 try:
1726 self.dumpCore()
1727 except XendError:
1728 # This error has been logged -- there's nothing more
1729 # we can do in this context.
1730 pass
1732 def _maybeRestart(self, reason):
1733 # Before taking configured action, dump core if configured to do so.
1735 self._maybeDumpCore(reason)
1737 # Dispatch to the correct method based upon the configured on_{reason}
1738 # behaviour.
1739 actions = {"destroy" : self.destroy,
1740 "restart" : self._restart,
1741 "preserve" : self._preserve,
1742 "rename-restart" : self._renameRestart,
1743 "coredump-destroy" : self.destroy,
1744 "coredump-restart" : self._restart}
1746 action_conf = {
1747 'poweroff': 'actions_after_shutdown',
1748 'reboot': 'actions_after_reboot',
1749 'crash': 'actions_after_crash',
1752 action_target = self.info.get(action_conf.get(reason))
1753 func = actions.get(action_target, None)
1754 if func and callable(func):
1755 func()
1756 else:
1757 self.destroy() # default to destroy
1759 def _renameRestart(self):
1760 self._restart(True)
1762 def _restart(self, rename = False):
1763 """Restart the domain after it has exited.
1765 @param rename True if the old domain is to be renamed and preserved,
1766 False if it is to be destroyed.
1767 """
1768 from xen.xend import XendDomain
1770 if self._readVm(RESTART_IN_PROGRESS):
1771 log.error('Xend failed during restart of domain %s. '
1772 'Refusing to restart to avoid loops.',
1773 str(self.domid))
1774 self.destroy()
1775 return
1777 old_domid = self.domid
1778 self._writeVm(RESTART_IN_PROGRESS, 'True')
1780 now = time.time()
1781 rst = self._readVm('xend/previous_restart_time')
1782 if rst:
1783 rst = float(rst)
1784 timeout = now - rst
1785 if timeout < MINIMUM_RESTART_TIME:
1786 log.error(
1787 'VM %s restarting too fast (%f seconds since the last '
1788 'restart). Refusing to restart to avoid loops.',
1789 self.info['name_label'], timeout)
1790 self.destroy()
1791 return
1793 self._writeVm('xend/previous_restart_time', str(now))
1795 prev_vm_xend = self._listRecursiveVm('xend')
1796 new_dom_info = self.info
1797 try:
1798 if rename:
1799 new_dom_info = self._preserveForRestart()
1800 else:
1801 self._unwatchVm()
1802 self.destroy()
1804 # new_dom's VM will be the same as this domain's VM, except where
1805 # the rename flag has instructed us to call preserveForRestart.
1806 # In that case, it is important that we remove the
1807 # RESTART_IN_PROGRESS node from the new domain, not the old one,
1808 # once the new one is available.
1810 new_dom = None
1811 try:
1812 new_dom = XendDomain.instance().domain_create_from_dict(
1813 new_dom_info)
1814 for x in prev_vm_xend[0][1]:
1815 new_dom._writeVm('xend/%s' % x[0], x[1])
1816 new_dom.waitForDevices()
1817 new_dom.unpause()
1818 rst_cnt = new_dom._readVm('xend/restart_count')
1819 rst_cnt = int(rst_cnt) + 1
1820 new_dom._writeVm('xend/restart_count', str(rst_cnt))
1821 new_dom._removeVm(RESTART_IN_PROGRESS)
1822 except:
1823 if new_dom:
1824 new_dom._removeVm(RESTART_IN_PROGRESS)
1825 new_dom.destroy()
1826 else:
1827 self._removeVm(RESTART_IN_PROGRESS)
1828 raise
1829 except:
1830 log.exception('Failed to restart domain %s.', str(old_domid))
1832 def _preserveForRestart(self):
1833 """Preserve a domain that has been shut down, by giving it a new UUID,
1834 cloning the VM details, and giving it a new name. This allows us to
1835 keep this domain for debugging, but restart a new one in its place
1836 preserving the restart semantics (name and UUID preserved).
1837 """
1839 new_uuid = uuid.createString()
1840 new_name = 'Domain-%s' % new_uuid
1841 log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
1842 self.info['name_label'], self.domid, self.info['uuid'],
1843 new_name, new_uuid)
1844 self._unwatchVm()
1845 self._releaseDevices()
1846 # Remove existing vm node in xenstore
1847 self._removeVm()
1848 new_dom_info = self.info.copy()
1849 new_dom_info['name_label'] = self.info['name_label']
1850 new_dom_info['uuid'] = self.info['uuid']
1851 self.info['name_label'] = new_name
1852 self.info['uuid'] = new_uuid
1853 self.vmpath = XS_VMROOT + new_uuid
1854 # Write out new vm node to xenstore
1855 self._storeVmDetails()
1856 self._preserve()
1857 return new_dom_info
1860 def _preserve(self):
1861 log.info("Preserving dead domain %s (%d).", self.info['name_label'],
1862 self.domid)
1863 self._unwatchVm()
1864 self.storeDom('xend/shutdown_completed', 'True')
1865 self._stateSet(DOM_STATE_HALTED)
1868 # Debugging ..
1871 def dumpCore(self, corefile = None):
1872 """Create a core dump for this domain.
1874 @raise: XendError if core dumping failed.
1875 """
1877 try:
1878 if not corefile:
1879 this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
1880 corefile = "/var/xen/dump/%s-%s.%s.core" % (this_time,
1881 self.info['name_label'], self.domid)
1883 if os.path.isdir(corefile):
1884 raise XendError("Cannot dump core in a directory: %s" %
1885 corefile)
1887 self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
1888 xc.domain_dumpcore(self.domid, corefile)
1889 self._removeVm(DUMPCORE_IN_PROGRESS)
1890 except RuntimeError, ex:
1891 corefile_incomp = corefile+'-incomplete'
1892 os.rename(corefile, corefile_incomp)
1893 self._removeVm(DUMPCORE_IN_PROGRESS)
1894 log.exception("XendDomainInfo.dumpCore failed: id = %s name = %s",
1895 self.domid, self.info['name_label'])
1896 raise XendError("Failed to dump core: %s" % str(ex))
1899 # Device creation/deletion functions
1902 def _createDevice(self, deviceClass, devConfig):
1903 return self.getDeviceController(deviceClass).createDevice(devConfig)
1905 def _waitForDevice(self, deviceClass, devid):
1906 return self.getDeviceController(deviceClass).waitForDevice(devid)
1908 def _waitForDeviceUUID(self, dev_uuid):
1909 deviceClass, config = self.info['devices'].get(dev_uuid)
1910 self._waitForDevice(deviceClass, config['devid'])
1912 def _waitForDevice_destroy(self, deviceClass, devid, backpath):
1913 return self.getDeviceController(deviceClass).waitForDevice_destroy(
1914 devid, backpath)
1916 def _reconfigureDevice(self, deviceClass, devid, devconfig):
1917 return self.getDeviceController(deviceClass).reconfigureDevice(
1918 devid, devconfig)
1920 def _createDevices(self):
1921 """Create the devices for a vm.
1923 @raise: VmError for invalid devices
1924 """
1925 if self.image:
1926 self.image.prepareEnvironment()
1928 vscsi_uuidlist = {}
1929 vscsi_devidlist = []
1930 ordered_refs = self.info.ordered_device_refs()
1931 for dev_uuid in ordered_refs:
1932 devclass, config = self.info['devices'][dev_uuid]
1933 if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
1934 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
1935 dev_uuid = config.get('uuid')
1936 devid = self._createDevice(devclass, config)
1938 # store devid in XendConfig for caching reasons
1939 if dev_uuid in self.info['devices']:
1940 self.info['devices'][dev_uuid][1]['devid'] = devid
1942 elif devclass == 'vscsi':
1943 vscsi_config = config.get('devs', [])[0]
1944 devid = vscsi_config.get('devid', '')
1945 dev_uuid = config.get('uuid')
1946 vscsi_uuidlist[devid] = dev_uuid
1947 vscsi_devidlist.append(devid)
1949 #It is necessary to sorted it for /dev/sdxx in guest.
1950 if len(vscsi_uuidlist) > 0:
1951 vscsi_devidlist.sort()
1952 for vscsiid in vscsi_devidlist:
1953 dev_uuid = vscsi_uuidlist[vscsiid]
1954 devclass, config = self.info['devices'][dev_uuid]
1955 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
1956 dev_uuid = config.get('uuid')
1957 devid = self._createDevice(devclass, config)
1958 # store devid in XendConfig for caching reasons
1959 if dev_uuid in self.info['devices']:
1960 self.info['devices'][dev_uuid][1]['devid'] = devid
1963 if self.image:
1964 self.image.createDeviceModel()
1966 #if have pass-through devs, need the virtual pci slots info from qemu
1967 self.sync_pcidev_info()
1969 def _releaseDevices(self, suspend = False):
1970 """Release all domain's devices. Nothrow guarantee."""
1971 if self.image:
1972 try:
1973 log.debug("Destroying device model")
1974 self.image.destroyDeviceModel()
1975 except Exception, e:
1976 log.exception("Device model destroy failed %s" % str(e))
1977 else:
1978 log.debug("No device model")
1980 log.debug("Releasing devices")
1981 t = xstransact("%s/device" % self.dompath)
1982 try:
1983 for devclass in XendDevices.valid_devices():
1984 for dev in t.list(devclass):
1985 try:
1986 log.debug("Removing %s", dev);
1987 self.destroyDevice(devclass, dev, False);
1988 except:
1989 # Log and swallow any exceptions in removal --
1990 # there's nothing more we can do.
1991 log.exception("Device release failed: %s; %s; %s",
1992 self.info['name_label'], devclass, dev)
1993 finally:
1994 t.abort()
1996 def getDeviceController(self, name):
1997 """Get the device controller for this domain, and if it
1998 doesn't exist, create it.
2000 @param name: device class name
2001 @type name: string
2002 @rtype: subclass of DevController
2003 """
2004 if name not in self._deviceControllers:
2005 devController = XendDevices.make_controller(name, self)
2006 if not devController:
2007 raise XendError("Unknown device type: %s" % name)
2008 self._deviceControllers[name] = devController
2010 return self._deviceControllers[name]
2013 # Migration functions (public)
2016 def testMigrateDevices(self, network, dst):
2017 """ Notify all device about intention of migration
2018 @raise: XendError for a device that cannot be migrated
2019 """
2020 for (n, c) in self.info.all_devices_sxpr():
2021 rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
2022 if rc != 0:
2023 raise XendError("Device of type '%s' refuses migration." % n)
2025 def migrateDevices(self, network, dst, step, domName=''):
2026 """Notify the devices about migration
2027 """
2028 ctr = 0
2029 try:
2030 for (dev_type, dev_conf) in self.info.all_devices_sxpr():
2031 self.migrateDevice(dev_type, dev_conf, network, dst,
2032 step, domName)
2033 ctr = ctr + 1
2034 except:
2035 for dev_type, dev_conf in self.info.all_devices_sxpr():
2036 if ctr == 0:
2037 step = step - 1
2038 ctr = ctr - 1
2039 self._recoverMigrateDevice(dev_type, dev_conf, network,
2040 dst, step, domName)
2041 raise
2043 def migrateDevice(self, deviceClass, deviceConfig, network, dst,
2044 step, domName=''):
2045 return self.getDeviceController(deviceClass).migrate(deviceConfig,
2046 network, dst, step, domName)
2048 def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
2049 dst, step, domName=''):
2050 return self.getDeviceController(deviceClass).recover_migrate(
2051 deviceConfig, network, dst, step, domName)
2054 ## private:
2056 def _constructDomain(self):
2057 """Construct the domain.
2059 @raise: VmError on error
2060 """
2062 log.debug('XendDomainInfo.constructDomain')
2064 self.shutdownStartTime = None
2066 hap = 0
2067 hvm = self.info.is_hvm()
2068 if hvm:
2069 hap = self.info.is_hap()
2070 info = xc.xeninfo()
2071 if 'hvm' not in info['xen_caps']:
2072 raise VmError("HVM guest support is unavailable: is VT/AMD-V "
2073 "supported by your CPU and enabled in your "
2074 "BIOS?")
2076 # Hack to pre-reserve some memory for initial domain creation.
2077 # There is an implicit memory overhead for any domain creation. This
2078 # overhead is greater for some types of domain than others. For
2079 # example, an x86 HVM domain will have a default shadow-pagetable
2080 # allocation of 1MB. We free up 2MB here to be on the safe side.
2081 balloon.free(2*1024) # 2MB should be plenty
2083 ssidref = 0
2084 if security.on() == xsconstants.XS_POLICY_USE:
2085 ssidref = security.calc_dom_ssidref_from_info(self.info)
2086 if security.has_authorization(ssidref) == False:
2087 raise VmError("VM is not authorized to run.")
2089 try:
2090 self.domid = xc.domain_create(
2091 domid = 0,
2092 ssidref = ssidref,
2093 handle = uuid.fromString(self.info['uuid']),
2094 flags = (int(hvm) << 0) | (int(hap) << 1),
2095 target = self.info.target())
2096 except Exception, e:
2097 # may get here if due to ACM the operation is not permitted
2098 if security.on() == xsconstants.XS_POLICY_ACM:
2099 raise VmError('Domain in conflict set with running domain?')
2101 if self.domid < 0:
2102 raise VmError('Creating domain failed: name=%s' %
2103 self.info['name_label'])
2105 self.dompath = GetDomainPath(self.domid)
2107 self._recreateDom()
2109 # Set timer configration of domain
2110 timer_mode = self.info["platform"].get("timer_mode")
2111 if hvm and timer_mode is not None:
2112 xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
2113 long(timer_mode))
2115 # Set Viridian interface configuration of domain
2116 viridian = self.info["platform"].get("viridian")
2117 if arch.type == "x86" and hvm and viridian is not None:
2118 xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
2120 # Optionally enable virtual HPET
2121 hpet = self.info["platform"].get("hpet")
2122 if hvm and hpet is not None:
2123 xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
2124 long(hpet))
2126 # Set maximum number of vcpus in domain
2127 xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
2129 # Test whether the devices can be assigned with VT-d
2130 pci_str = str(self.info["platform"].get("pci"))
2131 if hvm and pci_str:
2132 bdf = xc.test_assign_device(self.domid, pci_str)
2133 if bdf != 0:
2134 if bdf == -1:
2135 raise VmError("failed to assign device: maybe the platform"
2136 " doesn't support VT-d, or VT-d isn't enabled"
2137 " properly?")
2138 bus = (bdf >> 16) & 0xff
2139 devfn = (bdf >> 8) & 0xff
2140 dev = (devfn >> 3) & 0x1f
2141 func = devfn & 0x7
2142 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
2143 " already been assigned to other domain, or maybe"
2144 " it doesn't exist." % (bus, dev, func))
2146 # register the domain in the list
2147 from xen.xend import XendDomain
2148 XendDomain.instance().add_domain(self)
2150 def _introduceDomain(self):
2151 assert self.domid is not None
2152 assert self.store_mfn is not None
2153 assert self.store_port is not None
2155 try:
2156 IntroduceDomain(self.domid, self.store_mfn, self.store_port)
2157 except RuntimeError, exn:
2158 raise XendError(str(exn))
2160 def _setTarget(self, target):
2161 assert self.domid is not None
2163 try:
2164 SetTarget(self.domid, target)
2165 self.storeDom('target', target)
2166 except RuntimeError, exn:
2167 raise XendError(str(exn))
2170 def _setCPUAffinity(self):
2171 """ Repin domain vcpus if a restricted cpus list is provided
2172 """
2174 def has_cpus():
2175 if self.info['cpus'] is not None:
2176 for c in self.info['cpus']:
2177 if c:
2178 return True
2179 return False
2181 if has_cpus():
2182 for v in range(0, self.info['VCPUs_max']):
2183 if self.info['cpus'][v]:
2184 xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
2185 else:
2186 def find_relaxed_node(node_list):
2187 import sys
2188 nr_nodes = info['nr_nodes']
2189 if node_list is None:
2190 node_list = range(0, nr_nodes)
2191 nodeload = [0]
2192 nodeload = nodeload * nr_nodes
2193 from xen.xend import XendDomain
2194 doms = XendDomain.instance().list('all')
2195 for dom in filter (lambda d: d.domid != self.domid, doms):
2196 cpuinfo = dom.getVCPUInfo()
2197 for vcpu in sxp.children(cpuinfo, 'vcpu'):
2198 if sxp.child_value(vcpu, 'online') == 0: continue
2199 cpumap = list(sxp.child_value(vcpu,'cpumap'))
2200 for i in range(0, nr_nodes):
2201 node_cpumask = info['node_to_cpu'][i]
2202 for j in node_cpumask:
2203 if j in cpumap:
2204 nodeload[i] += 1
2205 break
2206 for i in range(0, nr_nodes):
2207 if len(info['node_to_cpu'][i]) > 0 and i in node_list:
2208 nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
2209 else:
2210 nodeload[i] = sys.maxint
2211 index = nodeload.index( min(nodeload) )
2212 return index
2214 info = xc.physinfo()
2215 if info['nr_nodes'] > 1:
2216 node_memory_list = info['node_to_memory']
2217 needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
2218 candidate_node_list = []
2219 for i in range(0, info['nr_nodes']):
2220 if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
2221 candidate_node_list.append(i)
2222 index = find_relaxed_node(candidate_node_list)
2223 cpumask = info['node_to_cpu'][index]
2224 for v in range(0, self.info['VCPUs_max']):
2225 xc.vcpu_setaffinity(self.domid, v, cpumask)
2228 def _initDomain(self):
2229 log.debug('XendDomainInfo.initDomain: %s %s',
2230 self.domid,
2231 self.info['vcpus_params']['weight'])
2233 self._configureBootloader()
2235 try:
2236 if self.info['platform'].get('localtime', 0):
2237 if time.localtime(time.time())[8]:
2238 self.info['platform']['rtc_timeoffset'] = -time.altzone
2239 else:
2240 self.info['platform']['rtc_timeoffset'] = -time.timezone
2242 self.image = image.create(self, self.info)
2244 # repin domain vcpus if a restricted cpus list is provided
2245 # this is done prior to memory allocation to aide in memory
2246 # distribution for NUMA systems.
2247 self._setCPUAffinity()
2249 # Use architecture- and image-specific calculations to determine
2250 # the various headrooms necessary, given the raw configured
2251 # values. maxmem, memory, and shadow are all in KiB.
2252 # but memory_static_max etc are all stored in bytes now.
2253 memory = self.image.getRequiredAvailableMemory(
2254 self.info['memory_dynamic_max'] / 1024)
2255 maxmem = self.image.getRequiredAvailableMemory(
2256 self.info['memory_static_max'] / 1024)
2257 shadow = self.image.getRequiredShadowMemory(
2258 self.info['shadow_memory'] * 1024,
2259 self.info['memory_static_max'] / 1024)
2261 log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
2262 # Round shadow up to a multiple of a MiB, as shadow_mem_control
2263 # takes MiB and we must not round down and end up under-providing.
2264 shadow = ((shadow + 1023) / 1024) * 1024
2266 # set memory limit
2267 xc.domain_setmaxmem(self.domid, maxmem)
2269 # Reserve 1 page per MiB of RAM for separate VT-d page table.
2270 vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
2271 # Round vtd_mem up to a multiple of a MiB.
2272 vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
2274 # Make sure there's enough RAM available for the domain
2275 balloon.free(memory + shadow + vtd_mem)
2277 # Set up the shadow memory
2278 shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
2279 self.info['shadow_memory'] = shadow_cur
2281 # machine address size
2282 if self.info.has_key('machine_address_size'):
2283 log.debug("_initDomain: setting maximum machine address size %d" % self.info['machine_address_size'])
2284 xc.domain_set_machine_address_size(self.domid, self.info['machine_address_size'])
2286 if self.info.has_key('suppress_spurious_page_faults') and self.info['suppress_spurious_page_faults']:
2287 log.debug("_initDomain: suppressing spurious page faults")
2288 xc.domain_suppress_spurious_page_faults(self.domid)
2290 self._createChannels()
2292 channel_details = self.image.createImage()
2294 self.store_mfn = channel_details['store_mfn']
2295 if 'console_mfn' in channel_details:
2296 self.console_mfn = channel_details['console_mfn']
2297 if 'notes' in channel_details:
2298 self.info.set_notes(channel_details['notes'])
2299 if 'native_protocol' in channel_details:
2300 self.native_protocol = channel_details['native_protocol'];
2302 self._introduceDomain()
2303 if self.info.target():
2304 self._setTarget(self.info.target())
2306 self._createDevices()
2308 self.image.cleanupBootloading()
2310 self.info['start_time'] = time.time()
2312 self._stateSet(DOM_STATE_RUNNING)
2313 except VmError, exn:
2314 log.exception("XendDomainInfo.initDomain: exception occurred")
2315 if self.image:
2316 self.image.cleanupBootloading()
2317 raise exn
2318 except RuntimeError, exn:
2319 log.exception("XendDomainInfo.initDomain: exception occurred")
2320 if self.image:
2321 self.image.cleanupBootloading()
2322 raise VmError(str(exn))
2325 def cleanupDomain(self):
2326 """Cleanup domain resources; release devices. Idempotent. Nothrow
2327 guarantee."""
2329 self.refresh_shutdown_lock.acquire()
2330 try:
2331 self.unwatchShutdown()
2332 self._releaseDevices()
2333 bootloader_tidy(self)
2335 if self.image:
2336 self.image = None
2338 try:
2339 self._removeDom()
2340 except:
2341 log.exception("Removing domain path failed.")
2343 self._stateSet(DOM_STATE_HALTED)
2344 self.domid = None # Do not push into _stateSet()!
2345 finally:
2346 self.refresh_shutdown_lock.release()
2349 def unwatchShutdown(self):
2350 """Remove the watch on the domain's control/shutdown node, if any.
2351 Idempotent. Nothrow guarantee. Expects to be protected by the
2352 refresh_shutdown_lock."""
2354 try:
2355 try:
2356 if self.shutdownWatch:
2357 self.shutdownWatch.unwatch()
2358 finally:
2359 self.shutdownWatch = None
2360 except:
2361 log.exception("Unwatching control/shutdown failed.")
2363 def waitForShutdown(self):
2364 self.state_updated.acquire()
2365 try:
2366 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2367 self.state_updated.wait(timeout=1.0)
2368 finally:
2369 self.state_updated.release()
2372 # TODO: recategorise - called from XendCheckpoint
2375 def completeRestore(self, store_mfn, console_mfn):
2377 log.debug("XendDomainInfo.completeRestore")
2379 self.store_mfn = store_mfn
2380 self.console_mfn = console_mfn
2382 self._introduceDomain()
2383 self.image = image.create(self, self.info)
2384 if self.image:
2385 self.image.createDeviceModel(True)
2386 self._storeDomDetails()
2387 self._registerWatches()
2388 self.refreshShutdown()
2390 log.debug("XendDomainInfo.completeRestore done")
2393 def _endRestore(self):
2394 self.setResume(False)
2397 # VM Destroy
2400 def _prepare_phantom_paths(self):
2401 # get associated devices to destroy
2402 # build list of phantom devices to be removed after normal devices
2403 plist = []
2404 if self.domid is not None:
2405 t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
2406 try:
2407 for dev in t.list():
2408 backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
2409 % (self.dompath, dev))
2410 if backend_phantom_vbd is not None:
2411 frontend_phantom_vbd = xstransact.Read("%s/frontend" \
2412 % backend_phantom_vbd)
2413 plist.append(backend_phantom_vbd)
2414 plist.append(frontend_phantom_vbd)
2415 finally:
2416 t.abort()
2417 return plist
2419 def _cleanup_phantom_devs(self, plist):
2420 # remove phantom devices
2421 if not plist == []:
2422 time.sleep(2)
2423 for paths in plist:
2424 if paths.find('backend') != -1:
2425 # Modify online status /before/ updating state (latter is watched by
2426 # drivers, so this ordering avoids a race).
2427 xstransact.Write(paths, 'online', "0")
2428 xstransact.Write(paths, 'state', str(xenbusState['Closing']))
2429 # force
2430 xstransact.Remove(paths)
2432 def destroy(self):
2433 """Cleanup VM and destroy domain. Nothrow guarantee."""
2435 if self.domid is None:
2436 return
2438 from xen.xend import XendDomain
2439 log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
2441 paths = self._prepare_phantom_paths()
2443 if self.dompath is not None:
2444 try:
2445 xc.domain_destroy_hook(self.domid)
2446 xc.domain_pause(self.domid)
2447 do_FLR(self.domid)
2448 xc.domain_destroy(self.domid)
2449 for state in DOM_STATES_OLD:
2450 self.info[state] = 0
2451 self._stateSet(DOM_STATE_HALTED)
2452 except:
2453 log.exception("XendDomainInfo.destroy: domain destruction failed.")
2455 XendDomain.instance().remove_domain(self)
2456 self.cleanupDomain()
2458 self._cleanup_phantom_devs(paths)
2459 self._cleanupVm()
2461 if "transient" in self.info["other_config"] \
2462 and bool(self.info["other_config"]["transient"]):
2463 XendDomain.instance().domain_delete_by_dominfo(self)
2466 def resetDomain(self):
2467 log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
2469 old_domid = self.domid
2470 prev_vm_xend = self._listRecursiveVm('xend')
2471 new_dom_info = self.info
2472 try:
2473 self._unwatchVm()
2474 self.destroy()
2476 new_dom = None
2477 try:
2478 from xen.xend import XendDomain
2479 new_dom_info['domid'] = None
2480 new_dom = XendDomain.instance().domain_create_from_dict(
2481 new_dom_info)
2482 for x in prev_vm_xend[0][1]:
2483 new_dom._writeVm('xend/%s' % x[0], x[1])
2484 new_dom.waitForDevices()
2485 new_dom.unpause()
2486 except:
2487 if new_dom:
2488 new_dom.destroy()
2489 raise
2490 except:
2491 log.exception('Failed to reset domain %s.', str(old_domid))
2494 def resumeDomain(self):
2495 log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
2497 # resume a suspended domain (e.g. after live checkpoint, or after
2498 # a later error during save or migate); checks that the domain
2499 # is currently suspended first so safe to call from anywhere
2501 xeninfo = dom_get(self.domid)
2502 if xeninfo is None:
2503 return
2504 if not xeninfo['shutdown']:
2505 return
2506 reason = shutdown_reason(xeninfo['shutdown_reason'])
2507 if reason != 'suspend':
2508 return
2510 try:
2511 # could also fetch a parsed note from xenstore
2512 fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
2513 if not fast:
2514 self._releaseDevices()
2515 self.testDeviceComplete()
2516 self.testvifsComplete()
2517 log.debug("XendDomainInfo.resumeDomain: devices released")
2519 self._resetChannels()
2521 self._removeDom('control/shutdown')
2522 self._removeDom('device-misc/vif/nextDeviceID')
2524 self._createChannels()
2525 self._introduceDomain()
2526 self._storeDomDetails()
2528 self._createDevices()
2529 log.debug("XendDomainInfo.resumeDomain: devices created")
2531 xc.domain_resume(self.domid, fast)
2532 ResumeDomain(self.domid)
2533 except:
2534 log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
2535 self.image.resumeDeviceModel()
2536 log.debug("XendDomainInfo.resumeDomain: completed")
2540 # Channels for xenstore and console
2543 def _createChannels(self):
2544 """Create the channels to the domain.
2545 """
2546 self.store_port = self._createChannel()
2547 self.console_port = self._createChannel()
2550 def _createChannel(self):
2551 """Create an event channel to the domain.
2552 """
2553 try:
2554 if self.domid != None:
2555 return xc.evtchn_alloc_unbound(domid = self.domid,
2556 remote_dom = 0)
2557 except:
2558 log.exception("Exception in alloc_unbound(%s)", str(self.domid))
2559 raise
2561 def _resetChannels(self):
2562 """Reset all event channels in the domain.
2563 """
2564 try:
2565 if self.domid != None:
2566 return xc.evtchn_reset(dom = self.domid)
2567 except:
2568 log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
2569 raise
2573 # Bootloader configuration
2576 def _configureBootloader(self):
2577 """Run the bootloader if we're configured to do so."""
2579 blexec = self.info['PV_bootloader']
2580 bootloader_args = self.info['PV_bootloader_args']
2581 kernel = self.info['PV_kernel']
2582 ramdisk = self.info['PV_ramdisk']
2583 args = self.info['PV_args']
2584 boot = self.info['HVM_boot_policy']
2586 if boot:
2587 # HVM booting.
2588 pass
2589 elif not blexec and kernel:
2590 # Boot from dom0. Nothing left to do -- the kernel and ramdisk
2591 # will be picked up by image.py.
2592 pass
2593 else:
2594 # Boot using bootloader
2595 if not blexec or blexec == 'pygrub':
2596 blexec = osdep.pygrub_path
2598 blcfg = None
2599 disks = [x for x in self.info['vbd_refs']
2600 if self.info['devices'][x][1]['bootable']]
2602 if not disks:
2603 msg = "Had a bootloader specified, but no disks are bootable"
2604 log.error(msg)
2605 raise VmError(msg)
2607 devinfo = self.info['devices'][disks[0]]
2608 devtype = devinfo[0]
2609 disk = devinfo[1]['uname']
2611 fn = blkdev_uname_to_file(disk)
2612 taptype = blkdev_uname_to_taptype(disk)
2613 mounted = devtype == 'tap' and taptype != 'aio' and taptype != 'sync' and not os.stat(fn).st_rdev
2614 if mounted:
2615 # This is a file, not a device. pygrub can cope with a
2616 # file if it's raw, but if it's QCOW or other such formats
2617 # used through blktap, then we need to mount it first.
2619 log.info("Mounting %s on %s." %
2620 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2622 vbd = {
2623 'mode': 'RO',
2624 'device': BOOTLOADER_LOOPBACK_DEVICE,
2627 from xen.xend import XendDomain
2628 dom0 = XendDomain.instance().privilegedDomain()
2629 dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
2630 fn = BOOTLOADER_LOOPBACK_DEVICE
2632 try:
2633 blcfg = bootloader(blexec, fn, self, False,
2634 bootloader_args, kernel, ramdisk, args)
2635 finally:
2636 if mounted:
2637 log.info("Unmounting %s from %s." %
2638 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2640 dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
2642 if blcfg is None:
2643 msg = "Had a bootloader specified, but can't find disk"
2644 log.error(msg)
2645 raise VmError(msg)
2647 self.info.update_with_image_sxp(blcfg, True)
2651 # VM Functions
2654 def _readVMDetails(self, params):
2655 """Read the specified parameters from the store.
2656 """
2657 try:
2658 return self._gatherVm(*params)
2659 except ValueError:
2660 # One of the int/float entries in params has a corresponding store
2661 # entry that is invalid. We recover, because older versions of
2662 # Xend may have put the entry there (memory/target, for example),
2663 # but this is in general a bad situation to have reached.
2664 log.exception(
2665 "Store corrupted at %s! Domain %d's configuration may be "
2666 "affected.", self.vmpath, self.domid)
2667 return []
2669 def _cleanupVm(self):
2670 """Cleanup VM resources. Idempotent. Nothrow guarantee."""
2672 self._unwatchVm()
2674 try:
2675 self._removeVm()
2676 except:
2677 log.exception("Removing VM path failed.")
2680 def checkLiveMigrateMemory(self):
2681 """ Make sure there's enough memory to migrate this domain """
2682 overhead_kb = 0
2683 if arch.type == "x86":
2684 # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
2685 # the minimum that Xen would allocate if no value were given.
2686 overhead_kb = self.info['VCPUs_max'] * 1024 + \
2687 (self.info['memory_static_max'] / 1024 / 1024) * 4
2688 overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
2689 # The domain might already have some shadow memory
2690 overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
2691 if overhead_kb > 0:
2692 balloon.free(overhead_kb)
2694 def _unwatchVm(self):
2695 """Remove the watch on the VM path, if any. Idempotent. Nothrow
2696 guarantee."""
2697 try:
2698 try:
2699 if self.vmWatch:
2700 self.vmWatch.unwatch()
2701 finally:
2702 self.vmWatch = None
2703 except:
2704 log.exception("Unwatching VM path failed.")
2706 def testDeviceComplete(self):
2707 """ For Block IO migration safety we must ensure that
2708 the device has shutdown correctly, i.e. all blocks are
2709 flushed to disk
2710 """
2711 start = time.time()
2712 while True:
2713 test = 0
2714 diff = time.time() - start
2715 for i in self.getDeviceController('vbd').deviceIDs():
2716 test = 1
2717 log.info("Dev %s still active, looping...", i)
2718 time.sleep(0.1)
2720 if test == 0:
2721 break
2722 if diff >= MIGRATE_TIMEOUT:
2723 log.info("Dev still active but hit max loop timeout")
2724 break
2726 def testvifsComplete(self):
2727 """ In case vifs are released and then created for the same
2728 domain, we need to wait the device shut down.
2729 """
2730 start = time.time()
2731 while True:
2732 test = 0
2733 diff = time.time() - start
2734 for i in self.getDeviceController('vif').deviceIDs():
2735 test = 1
2736 log.info("Dev %s still active, looping...", i)
2737 time.sleep(0.1)
2739 if test == 0:
2740 break
2741 if diff >= MIGRATE_TIMEOUT:
2742 log.info("Dev still active but hit max loop timeout")
2743 break
2745 def _storeVmDetails(self):
2746 to_store = {}
2748 for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
2749 info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
2750 if self._infoIsSet(info_key):
2751 to_store[key] = str(self.info[info_key])
2753 if self._infoIsSet("static_memory_min"):
2754 to_store["memory"] = str(self.info["static_memory_min"])
2755 if self._infoIsSet("static_memory_max"):
2756 to_store["maxmem"] = str(self.info["static_memory_max"])
2758 image_sxpr = self.info.image_sxpr()
2759 if image_sxpr:
2760 to_store['image'] = sxp.to_string(image_sxpr)
2762 if not self._readVm('xend/restart_count'):
2763 to_store['xend/restart_count'] = str(0)
2765 log.debug("Storing VM details: %s", scrub_password(to_store))
2767 self._writeVm(to_store)
2768 self._setVmPermissions()
2770 def _setVmPermissions(self):
2771 """Allow the guest domain to read its UUID. We don't allow it to
2772 access any other entry, for security."""
2773 xstransact.SetPermissions('%s/uuid' % self.vmpath,
2774 { 'dom' : self.domid,
2775 'read' : True,
2776 'write' : False })
2779 # Utility functions
2782 def __getattr__(self, name):
2783 if name == "state":
2784 log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
2785 log.warn("".join(traceback.format_stack()))
2786 return self._stateGet()
2787 else:
2788 raise AttributeError(name)
2790 def __setattr__(self, name, value):
2791 if name == "state":
2792 log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
2793 log.warn("".join(traceback.format_stack()))
2794 self._stateSet(value)
2795 else:
2796 self.__dict__[name] = value
2798 def _stateSet(self, state):
2799 self.state_updated.acquire()
2800 try:
2801 # TODO Not sure this is correct...
2802 # _stateGet is live now. Why not fire event
2803 # even when it hasn't changed?
2804 if self._stateGet() != state:
2805 self.state_updated.notifyAll()
2806 import XendAPI
2807 XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
2808 'power_state')
2809 finally:
2810 self.state_updated.release()
2812 def _stateGet(self):
2813 # Lets try and reconsitute the state from xc
2814 # first lets try and get the domain info
2815 # from xc - this will tell us if the domain
2816 # exists
2817 info = dom_get(self.getDomid())
2818 if info is None or info['shutdown']:
2819 # We are either HALTED or SUSPENDED
2820 # check saved image exists
2821 from xen.xend import XendDomain
2822 managed_config_path = \
2823 XendDomain.instance()._managed_check_point_path( \
2824 self.get_uuid())
2825 if os.path.exists(managed_config_path):
2826 return XEN_API_VM_POWER_STATE_SUSPENDED
2827 else:
2828 return XEN_API_VM_POWER_STATE_HALTED
2829 elif info['crashed']:
2830 # Crashed
2831 return XEN_API_VM_POWER_STATE_CRASHED
2832 else:
2833 # We are either RUNNING or PAUSED
2834 if info['paused']:
2835 return XEN_API_VM_POWER_STATE_PAUSED
2836 else:
2837 return XEN_API_VM_POWER_STATE_RUNNING
2839 def _infoIsSet(self, name):
2840 return name in self.info and self.info[name] is not None
2842 def _checkName(self, name):
2843 """Check if a vm name is valid. Valid names contain alphabetic
2844 characters, digits, or characters in '_-.:/+'.
2845 The same name cannot be used for more than one vm at the same time.
2847 @param name: name
2848 @raise: VmError if invalid
2849 """
2850 from xen.xend import XendDomain
2852 if name is None or name == '':
2853 raise VmError('Missing VM Name')
2855 if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name):
2856 raise VmError('Invalid VM Name')
2858 dom = XendDomain.instance().domain_lookup_nr(name)
2859 if dom and dom.info['uuid'] != self.info['uuid']:
2860 raise VmError("VM name '%s' already exists%s" %
2861 (name,
2862 dom.domid is not None and
2863 (" as domain %s" % str(dom.domid)) or ""))
2866 def update(self, info = None, refresh = True, transaction = None):
2867 """Update with info from xc.domain_getinfo().
2868 """
2869 log.trace("XendDomainInfo.update(%s) on domain %s", info,
2870 str(self.domid))
2872 if not info:
2873 info = dom_get(self.domid)
2874 if not info:
2875 return
2877 if info["maxmem_kb"] < 0:
2878 info["maxmem_kb"] = XendNode.instance() \
2879 .physinfo_dict()['total_memory'] * 1024
2881 # make sure state is reset for info
2882 # TODO: we should eventually get rid of old_dom_states
2884 self.info.update_config(info)
2885 self._update_consoles(transaction)
2887 if refresh:
2888 self.refreshShutdown(info)
2890 log.trace("XendDomainInfo.update done on domain %s: %s",
2891 str(self.domid), self.info)
2893 def sxpr(self, ignore_store = False, legacy_only = True):
2894 result = self.info.to_sxp(domain = self,
2895 ignore_devices = ignore_store,
2896 legacy_only = legacy_only)
2898 return result
2900 # Xen API
2901 # ----------------------------------------------------------------
2903 def get_uuid(self):
2904 dom_uuid = self.info.get('uuid')
2905 if not dom_uuid: # if it doesn't exist, make one up
2906 dom_uuid = uuid.createString()
2907 self.info['uuid'] = dom_uuid
2908 return dom_uuid
2910 def get_memory_static_max(self):
2911 return self.info.get('memory_static_max', 0)
2912 def get_memory_static_min(self):
2913 return self.info.get('memory_static_min', 0)
2914 def get_memory_dynamic_max(self):
2915 return self.info.get('memory_dynamic_max', 0)
2916 def get_memory_dynamic_min(self):
2917 return self.info.get('memory_dynamic_min', 0)
2919 # only update memory-related config values if they maintain sanity
2920 def _safe_set_memory(self, key, newval):
2921 oldval = self.info.get(key, 0)
2922 try:
2923 self.info[key] = newval
2924 self.info._memory_sanity_check()
2925 except Exception, ex:
2926 self.info[key] = oldval
2927 raise
2929 def set_memory_static_max(self, val):
2930 self._safe_set_memory('memory_static_max', val)
2931 def set_memory_static_min(self, val):
2932 self._safe_set_memory('memory_static_min', val)
2933 def set_memory_dynamic_max(self, val):
2934 self._safe_set_memory('memory_dynamic_max', val)
2935 def set_memory_dynamic_min(self, val):
2936 self._safe_set_memory('memory_dynamic_min', val)
2938 def get_vcpus_params(self):
2939 if self.getDomid() is None:
2940 return self.info['vcpus_params']
2942 retval = xc.sched_credit_domain_get(self.getDomid())
2943 return retval
2944 def get_power_state(self):
2945 return XEN_API_VM_POWER_STATE[self._stateGet()]
2946 def get_platform(self):
2947 return self.info.get('platform', {})
2948 def get_pci_bus(self):
2949 return self.info.get('pci_bus', '')
2950 def get_tools_version(self):
2951 return self.info.get('tools_version', {})
2952 def get_metrics(self):
2953 return self.metrics.get_uuid();
2956 def get_security_label(self, xspol=None):
2957 import xen.util.xsm.xsm as security
2958 label = security.get_security_label(self, xspol)
2959 return label
2961 def set_security_label(self, seclab, old_seclab, xspol=None,
2962 xspol_old=None):
2963 """
2964 Set the security label of a domain from its old to
2965 a new value.
2966 @param seclab New security label formatted in the form
2967 <policy type>:<policy name>:<vm label>
2968 @param old_seclab The current security label that the
2969 VM must have.
2970 @param xspol An optional policy under which this
2971 update should be done. If not given,
2972 then the current active policy is used.
2973 @param xspol_old The old policy; only to be passed during
2974 the updating of a policy
2975 @return Returns return code, a string with errors from
2976 the hypervisor's operation, old label of the
2977 domain
2978 """
2979 rc = 0
2980 errors = ""
2981 old_label = ""
2982 new_ssidref = 0
2983 domid = self.getDomid()
2984 res_labels = None
2985 is_policy_update = (xspol_old != None)
2987 from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
2989 state = self._stateGet()
2990 # Relabel only HALTED or RUNNING or PAUSED domains
2991 if domid != 0 and \
2992 state not in \
2993 [ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
2994 DOM_STATE_SUSPENDED ]:
2995 log.warn("Relabeling domain not possible in state '%s'" %
2996 DOM_STATES[state])
2997 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
2999 # Remove security label. Works only for halted or suspended domains
3000 if not seclab or seclab == "":
3001 if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
3002 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3004 if self.info.has_key('security_label'):
3005 old_label = self.info['security_label']
3006 # Check label against expected one.
3007 if old_label != old_seclab:
3008 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3009 del self.info['security_label']
3010 xen.xend.XendDomain.instance().managed_config_save(self)
3011 return (xsconstants.XSERR_SUCCESS, "", "", 0)
3013 tmp = seclab.split(":")
3014 if len(tmp) != 3:
3015 return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
3016 typ, policy, label = tmp
3018 poladmin = XSPolicyAdminInstance()
3019 if not xspol:
3020 xspol = poladmin.get_policy_by_name(policy)
3022 try:
3023 xen.xend.XendDomain.instance().policy_lock.acquire_writer()
3025 if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
3026 #if domain is running or paused try to relabel in hypervisor
3027 if not xspol:
3028 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3030 if typ != xspol.get_type_name() or \
3031 policy != xspol.get_name():
3032 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3034 if typ == xsconstants.ACM_POLICY_ID:
3035 new_ssidref = xspol.vmlabel_to_ssidref(label)
3036 if new_ssidref == xsconstants.INVALID_SSIDREF:
3037 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3039 # Check that all used resources are accessible under the
3040 # new label
3041 if not is_policy_update and \
3042 not security.resources_compatible_with_vmlabel(xspol,
3043 self, label):
3044 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3046 #Check label against expected one. Can only do this
3047 # if the policy hasn't changed underneath in the meantime
3048 if xspol_old == None:
3049 old_label = self.get_security_label()
3050 if old_label != old_seclab:
3051 log.info("old_label != old_seclab: %s != %s" %
3052 (old_label, old_seclab))
3053 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3055 # relabel domain in the hypervisor
3056 rc, errors = security.relabel_domains([[domid, new_ssidref]])
3057 log.info("rc from relabeling in HV: %d" % rc)
3058 else:
3059 return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
3061 if rc == 0:
3062 # HALTED, RUNNING or PAUSED
3063 if domid == 0:
3064 if xspol:
3065 self.info['security_label'] = seclab
3066 ssidref = poladmin.set_domain0_bootlabel(xspol, label)
3067 else:
3068 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3069 else:
3070 if self.info.has_key('security_label'):
3071 old_label = self.info['security_label']
3072 # Check label against expected one, unless wildcard
3073 if old_label != old_seclab:
3074 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3076 self.info['security_label'] = seclab
3078 try:
3079 xen.xend.XendDomain.instance().managed_config_save(self)
3080 except:
3081 pass
3082 return (rc, errors, old_label, new_ssidref)
3083 finally:
3084 xen.xend.XendDomain.instance().policy_lock.release()
3086 def get_on_shutdown(self):
3087 after_shutdown = self.info.get('actions_after_shutdown')
3088 if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
3089 return XEN_API_ON_NORMAL_EXIT[-1]
3090 return after_shutdown
3092 def get_on_reboot(self):
3093 after_reboot = self.info.get('actions_after_reboot')
3094 if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
3095 return XEN_API_ON_NORMAL_EXIT[-1]
3096 return after_reboot
3098 def get_on_suspend(self):
3099 # TODO: not supported
3100 after_suspend = self.info.get('actions_after_suspend')
3101 if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
3102 return XEN_API_ON_NORMAL_EXIT[-1]
3103 return after_suspend
3105 def get_on_crash(self):
3106 after_crash = self.info.get('actions_after_crash')
3107 if not after_crash or after_crash not in \
3108 XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
3109 return XEN_API_ON_CRASH_BEHAVIOUR[0]
3110 return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
3112 def get_dev_config_by_uuid(self, dev_class, dev_uuid):
3113 """ Get's a device configuration either from XendConfig or
3114 from the DevController.
3116 @param dev_class: device class, either, 'vbd' or 'vif'
3117 @param dev_uuid: device UUID
3119 @rtype: dictionary
3120 """
3121 dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
3123 # shortcut if the domain isn't started because
3124 # the devcontrollers will have no better information
3125 # than XendConfig.
3126 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
3127 XEN_API_VM_POWER_STATE_SUSPENDED):
3128 if dev_config:
3129 return copy.deepcopy(dev_config)
3130 return None
3132 # instead of using dev_class, we use the dev_type
3133 # that is from XendConfig.
3134 controller = self.getDeviceController(dev_type)
3135 if not controller:
3136 return None
3138 all_configs = controller.getAllDeviceConfigurations()
3139 if not all_configs:
3140 return None
3142 updated_dev_config = copy.deepcopy(dev_config)
3143 for _devid, _devcfg in all_configs.items():
3144 if _devcfg.get('uuid') == dev_uuid:
3145 updated_dev_config.update(_devcfg)
3146 updated_dev_config['id'] = _devid
3147 return updated_dev_config
3149 return updated_dev_config
3151 def get_dev_xenapi_config(self, dev_class, dev_uuid):
3152 config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
3153 if not config:
3154 return {}
3156 config['VM'] = self.get_uuid()
3158 if dev_class == 'vif':
3159 if not config.has_key('name'):
3160 config['name'] = config.get('vifname', '')
3161 if not config.has_key('MAC'):
3162 config['MAC'] = config.get('mac', '')
3163 if not config.has_key('type'):
3164 config['type'] = 'paravirtualised'
3165 if not config.has_key('device'):
3166 devid = config.get('id')
3167 if devid != None:
3168 config['device'] = 'eth%s' % devid
3169 else:
3170 config['device'] = ''
3172 if not config.has_key('network'):
3173 try:
3174 bridge = config.get('bridge', None)
3175 if bridge is None:
3176 from xen.util import Brctl
3177 if_to_br = dict([(i,b)
3178 for (b,ifs) in Brctl.get_state().items()
3179 for i in ifs])
3180 vifname = "vif%s.%s" % (self.getDomid(),
3181 config.get('id'))
3182 bridge = if_to_br.get(vifname, None)
3183 config['network'] = \
3184 XendNode.instance().bridge_to_network(
3185 config.get('bridge')).get_uuid()
3186 except Exception:
3187 log.exception('bridge_to_network')
3188 # Ignore this for now -- it may happen if the device
3189 # has been specified using the legacy methods, but at
3190 # some point we're going to have to figure out how to
3191 # handle that properly.
3193 config['MTU'] = 1500 # TODO
3195 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3196 xennode = XendNode.instance()
3197 rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
3198 config['io_read_kbs'] = rx_bps/1024
3199 config['io_write_kbs'] = tx_bps/1024
3200 rx, tx = xennode.get_vif_stat(self.domid, devid)
3201 config['io_total_read_kbs'] = rx/1024
3202 config['io_total_write_kbs'] = tx/1024
3203 else:
3204 config['io_read_kbs'] = 0.0
3205 config['io_write_kbs'] = 0.0
3206 config['io_total_read_kbs'] = 0.0
3207 config['io_total_write_kbs'] = 0.0
3209 config['security_label'] = config.get('security_label', '')
3211 if dev_class == 'vbd':
3213 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3214 controller = self.getDeviceController(dev_class)
3215 devid, _1, _2 = controller.getDeviceDetails(config)
3216 xennode = XendNode.instance()
3217 rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
3218 config['io_read_kbs'] = rd_blkps
3219 config['io_write_kbs'] = wr_blkps
3220 else:
3221 config['io_read_kbs'] = 0.0
3222 config['io_write_kbs'] = 0.0
3224 config['VDI'] = config.get('VDI', '')
3225 config['device'] = config.get('dev', '')
3226 if ':' in config['device']:
3227 vbd_name, vbd_type = config['device'].split(':', 1)
3228 config['device'] = vbd_name
3229 if vbd_type == 'cdrom':
3230 config['type'] = XEN_API_VBD_TYPE[0]
3231 else:
3232 config['type'] = XEN_API_VBD_TYPE[1]
3234 config['driver'] = 'paravirtualised' # TODO
3235 config['image'] = config.get('uname', '')
3237 if config.get('mode', 'r') == 'r':
3238 config['mode'] = 'RO'
3239 else:
3240 config['mode'] = 'RW'
3242 if dev_class == 'vtpm':
3243 if not config.has_key('type'):
3244 config['type'] = 'paravirtualised' # TODO
3245 if not config.has_key('backend'):
3246 config['backend'] = "00000000-0000-0000-0000-000000000000"
3248 return config
3250 def get_dev_property(self, dev_class, dev_uuid, field):
3251 config = self.get_dev_xenapi_config(dev_class, dev_uuid)
3252 try:
3253 return config[field]
3254 except KeyError:
3255 raise XendError('Invalid property for device: %s' % field)
3257 def set_dev_property(self, dev_class, dev_uuid, field, value):
3258 self.info['devices'][dev_uuid][1][field] = value
3260 def get_vcpus_util(self):
3261 vcpu_util = {}
3262 xennode = XendNode.instance()
3263 if 'VCPUs_max' in self.info and self.domid != None:
3264 for i in range(0, self.info['VCPUs_max']):
3265 util = xennode.get_vcpu_util(self.domid, i)
3266 vcpu_util[str(i)] = util
3268 return vcpu_util
3270 def get_consoles(self):
3271 return self.info.get('console_refs', [])
3273 def get_vifs(self):
3274 return self.info.get('vif_refs', [])
3276 def get_vbds(self):
3277 return self.info.get('vbd_refs', [])
3279 def get_vtpms(self):
3280 return self.info.get('vtpm_refs', [])
3282 def get_dpcis(self):
3283 return XendDPCI.get_by_VM(self.info.get('uuid'))
3285 def get_dscsis(self):
3286 return XendDSCSI.get_by_VM(self.info.get('uuid'))
3288 def create_vbd(self, xenapi_vbd, vdi_image_path):
3289 """Create a VBD using a VDI from XendStorageRepository.
3291 @param xenapi_vbd: vbd struct from the Xen API
3292 @param vdi_image_path: VDI UUID
3293 @rtype: string
3294 @return: uuid of the device
3295 """
3296 xenapi_vbd['image'] = vdi_image_path
3297 if vdi_image_path.startswith('tap'):
3298 dev_uuid = self.info.device_add('tap', cfg_xenapi = xenapi_vbd)
3299 else:
3300 dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
3302 if not dev_uuid:
3303 raise XendError('Failed to create device')
3305 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3306 XEN_API_VM_POWER_STATE_PAUSED):
3307 _, config = self.info['devices'][dev_uuid]
3309 if vdi_image_path.startswith('tap'):
3310 dev_control = self.getDeviceController('tap')
3311 else:
3312 dev_control = self.getDeviceController('vbd')
3314 try:
3315 devid = dev_control.createDevice(config)
3316 dev_control.waitForDevice(devid)
3317 self.info.device_update(dev_uuid,
3318 cfg_xenapi = {'devid': devid})
3319 except Exception, exn:
3320 log.exception(exn)
3321 del self.info['devices'][dev_uuid]
3322 self.info['vbd_refs'].remove(dev_uuid)
3323 raise
3325 return dev_uuid
3327 def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
3328 """Create a VBD using a VDI from XendStorageRepository.
3330 @param xenapi_vbd: vbd struct from the Xen API
3331 @param vdi_image_path: VDI UUID
3332 @rtype: string
3333 @return: uuid of the device
3334 """
3335 xenapi_vbd['image'] = vdi_image_path
3336 dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
3337 if not dev_uuid:
3338 raise XendError('Failed to create device')
3340 if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
3341 _, config = self.info['devices'][dev_uuid]
3342 config['devid'] = self.getDeviceController('tap').createDevice(config)
3344 return config['devid']
3346 def create_vif(self, xenapi_vif):
3347 """Create VIF device from the passed struct in Xen API format.
3349 @param xenapi_vif: Xen API VIF Struct.
3350 @rtype: string
3351 @return: UUID
3352 """
3353 dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
3354 if not dev_uuid:
3355 raise XendError('Failed to create device')
3357 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3358 XEN_API_VM_POWER_STATE_PAUSED):
3360 _, config = self.info['devices'][dev_uuid]
3361 dev_control = self.getDeviceController('vif')
3363 try:
3364 devid = dev_control.createDevice(config)
3365 dev_control.waitForDevice(devid)
3366 self.info.device_update(dev_uuid,
3367 cfg_xenapi = {'devid': devid})
3368 except Exception, exn:
3369 log.exception(exn)
3370 del self.info['devices'][dev_uuid]
3371 self.info['vif_refs'].remove(dev_uuid)
3372 raise
3374 return dev_uuid
3376 def create_vtpm(self, xenapi_vtpm):
3377 """Create a VTPM device from the passed struct in Xen API format.
3379 @return: uuid of the device
3380 @rtype: string
3381 """
3383 if self._stateGet() not in (DOM_STATE_HALTED,):
3384 raise VmError("Can only add vTPM to a halted domain.")
3385 if self.get_vtpms() != []:
3386 raise VmError('Domain already has a vTPM.')
3387 dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
3388 if not dev_uuid:
3389 raise XendError('Failed to create device')
3391 return dev_uuid
3393 def create_console(self, xenapi_console):
3394 """ Create a console device from a Xen API struct.
3396 @return: uuid of device
3397 @rtype: string
3398 """
3399 if self._stateGet() not in (DOM_STATE_HALTED,):
3400 raise VmError("Can only add console to a halted domain.")
3402 dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
3403 if not dev_uuid:
3404 raise XendError('Failed to create device')
3406 return dev_uuid
3408 def set_console_other_config(self, console_uuid, other_config):
3409 self.info.console_update(console_uuid, 'other_config', other_config)
3411 def create_dpci(self, xenapi_pci):
3412 """Create pci device from the passed struct in Xen API format.
3414 @param xenapi_pci: DPCI struct from Xen API
3415 @rtype: bool
3416 #@rtype: string
3417 @return: True if successfully created device
3418 #@return: UUID
3419 """
3421 dpci_uuid = uuid.createString()
3423 # Convert xenapi to sxp
3424 ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
3426 target_pci_sxp = \
3427 ['pci',
3428 ['dev',
3429 ['domain', '0x%02x' % ppci.get_domain()],
3430 ['bus', '0x%02x' % ppci.get_bus()],
3431 ['slot', '0x%02x' % ppci.get_slot()],
3432 ['func', '0x%1x' % ppci.get_func()],
3433 ['vslt', '0x%02x' % xenapi_pci.get('hotplug_slot')],
3434 ['uuid', dpci_uuid]
3435 ],
3436 ['state', 'Initialising']
3439 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3441 old_pci_sxp = self._getDeviceInfo_pci(0)
3443 if old_pci_sxp is None:
3444 dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
3445 if not dev_uuid:
3446 raise XendError('Failed to create device')
3448 else:
3449 new_pci_sxp = ['pci']
3450 for existing_dev in sxp.children(old_pci_sxp, 'dev'):
3451 new_pci_sxp.append(existing_dev)
3452 new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
3454 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3455 self.info.device_update(dev_uuid, new_pci_sxp)
3457 xen.xend.XendDomain.instance().managed_config_save(self)
3459 else:
3460 try:
3461 self.device_configure(target_pci_sxp)
3463 except Exception, exn:
3464 raise XendError('Failed to create device')
3466 return dpci_uuid
3468 def create_dscsi(self, xenapi_dscsi):
3469 """Create scsi device from the passed struct in Xen API format.
3471 @param xenapi_dscsi: DSCSI struct from Xen API
3472 @rtype: string
3473 @return: UUID
3474 """
3476 dscsi_uuid = uuid.createString()
3478 # Convert xenapi to sxp
3479 pscsi = XendAPIStore.get(xenapi_dscsi.get('PSCSI'), 'PSCSI')
3480 devid = int(xenapi_dscsi.get('virtual_HCTL').split(':')[0])
3481 target_vscsi_sxp = \
3482 ['vscsi',
3483 ['dev',
3484 ['devid', devid],
3485 ['p-devname', pscsi.get_dev_name()],
3486 ['p-dev', pscsi.get_physical_HCTL()],
3487 ['v-dev', xenapi_dscsi.get('virtual_HCTL')],
3488 ['state', xenbusState['Initialising']],
3489 ['uuid', dscsi_uuid]
3493 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3495 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid, None)
3497 if cur_vscsi_sxp is None:
3498 dev_uuid = self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp)
3499 if not dev_uuid:
3500 raise XendError('Failed to create device')
3502 else:
3503 new_vscsi_sxp = ['vscsi']
3504 for existing_dev in sxp.children(cur_vscsi_sxp, 'dev'):
3505 new_vscsi_sxp.append(existing_dev)
3506 new_vscsi_sxp.append(sxp.child0(target_vscsi_sxp, 'dev'))
3508 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3509 self.info.device_update(dev_uuid, new_vscsi_sxp)
3511 xen.xend.XendDomain.instance().managed_config_save(self)
3513 else:
3514 try:
3515 self.device_configure(target_vscsi_sxp)
3517 except Exception, exn:
3518 raise XendError('Failed to create device')
3520 return dscsi_uuid
3523 def destroy_device_by_uuid(self, dev_type, dev_uuid):
3524 if dev_uuid not in self.info['devices']:
3525 raise XendError('Device does not exist')
3527 try:
3528 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3529 XEN_API_VM_POWER_STATE_PAUSED):
3530 _, config = self.info['devices'][dev_uuid]
3531 devid = config.get('devid')
3532 if devid != None:
3533 self.getDeviceController(dev_type).destroyDevice(devid, force = False)
3534 else:
3535 raise XendError('Unable to get devid for device: %s:%s' %
3536 (dev_type, dev_uuid))
3537 finally:
3538 del self.info['devices'][dev_uuid]
3539 self.info['%s_refs' % dev_type].remove(dev_uuid)
3541 def destroy_vbd(self, dev_uuid):
3542 self.destroy_device_by_uuid('vbd', dev_uuid)
3544 def destroy_vif(self, dev_uuid):
3545 self.destroy_device_by_uuid('vif', dev_uuid)
3547 def destroy_vtpm(self, dev_uuid):
3548 self.destroy_device_by_uuid('vtpm', dev_uuid)
3550 def destroy_dpci(self, dev_uuid):
3552 dpci = XendAPIStore.get(dev_uuid, 'DPCI')
3553 ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
3555 old_pci_sxp = self._getDeviceInfo_pci(0)
3556 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3557 target_dev = None
3558 new_pci_sxp = ['pci']
3559 for dev in sxp.children(old_pci_sxp, 'dev'):
3560 domain = int(sxp.child_value(dev, 'domain'), 16)
3561 bus = int(sxp.child_value(dev, 'bus'), 16)
3562 slot = int(sxp.child_value(dev, 'slot'), 16)
3563 func = int(sxp.child_value(dev, 'func'), 16)
3564 name = "%04x:%02x:%02x.%01x" % (domain, bus, slot, func)
3565 if ppci.get_name() == name:
3566 target_dev = dev
3567 else:
3568 new_pci_sxp.append(dev)
3570 if target_dev is None:
3571 raise XendError('Failed to destroy device')
3573 target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
3575 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3577 self.info.device_update(dev_uuid, new_pci_sxp)
3578 if len(sxp.children(new_pci_sxp, 'dev')) == 0:
3579 del self.info['devices'][dev_uuid]
3580 xen.xend.XendDomain.instance().managed_config_save(self)
3582 else:
3583 try:
3584 self.device_configure(target_pci_sxp)
3586 except Exception, exn:
3587 raise XendError('Failed to destroy device')
3589 def destroy_dscsi(self, dev_uuid):
3590 dscsi = XendAPIStore.get(dev_uuid, 'DSCSI')
3591 devid = dscsi.get_virtual_host()
3592 vHCTL = dscsi.get_virtual_HCTL()
3593 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid, None)
3594 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3596 target_dev = None
3597 new_vscsi_sxp = ['vscsi']
3598 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
3599 if vHCTL == sxp.child_value(dev, 'v-dev'):
3600 target_dev = dev
3601 else:
3602 new_vscsi_sxp.append(dev)
3604 if target_dev is None:
3605 raise XendError('Failed to destroy device')
3607 target_dev.append(['state', xenbusState['Closing']])
3608 target_vscsi_sxp = ['vscsi', target_dev]
3610 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3612 self.info.device_update(dev_uuid, new_vscsi_sxp)
3613 if len(sxp.children(new_vscsi_sxp, 'dev')) == 0:
3614 del self.info['devices'][dev_uuid]
3615 xen.xend.XendDomain.instance().managed_config_save(self)
3617 else:
3618 try:
3619 self.device_configure(target_vscsi_sxp)
3621 except Exception, exn:
3622 raise XendError('Failed to destroy device')
3624 def destroy_xapi_instances(self):
3625 """Destroy Xen-API instances stored in XendAPIStore.
3626 """
3627 # Xen-API classes based on XendBase have their instances stored
3628 # in XendAPIStore. Cleanup these instances here, if they are supposed
3629 # to be destroyed when the parent domain is dead.
3631 # Most of the virtual devices (vif, vbd, vfb, etc) are not based on
3632 # XendBase and there's no need to remove them from XendAPIStore.
3634 from xen.xend import XendDomain
3635 if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
3636 # domain still exists.
3637 return
3639 # Destroy the VMMetrics instance.
3640 if XendAPIStore.get(self.metrics.get_uuid(), self.metrics.getClass()) \
3641 is not None:
3642 self.metrics.destroy()
3644 # Destroy DPCI instances.
3645 for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
3646 XendAPIStore.deregister(dpci_uuid, "DPCI")
3648 # Destroy DSCSI instances.
3649 for dscsi_uuid in XendDSCSI.get_by_VM(self.info.get('uuid')):
3650 XendAPIStore.deregister(dscsi_uuid, "DSCSI")
3652 def has_device(self, dev_class, dev_uuid):
3653 return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
3655 def __str__(self):
3656 return '<domain id=%s name=%s memory=%s state=%s>' % \
3657 (str(self.domid), self.info['name_label'],
3658 str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])
3660 __repr__ = __str__