ia64/xen-unstable

view tools/python/xen/xend/XendDomainInfo.py @ 18844:8dbf23c89cc6

xend: Fix device release for tap devices

I saw an error message when I shut down a domain. The error
message showed that release of device(vbd/51712) failed. But the
device was tap, was not vbd. I think that a cause of the error message is
because _releaseDevices() calls destroyDevice() by wrong device class.

Signed-off-by: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Nov 28 13:05:58 2008 +0000 (2008-11-28)
parents 857bda0c15b3
children 1099be706d4e
line source
1 #===========================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
19 """Representation of a single domain.
20 Includes support for domain construction, using
21 open-ended configurations.
23 Author: Mike Wray <mike.wray@hp.com>
25 """
27 import logging
28 import time
29 import threading
30 import re
31 import copy
32 import os
33 import traceback
34 from types import StringTypes
36 import xen.lowlevel.xc
37 from xen.util import asserts
38 from xen.util.blkif import blkdev_uname_to_file, blkdev_uname_to_taptype
39 import xen.util.xsm.xsm as security
40 from xen.util import xsconstants
42 from xen.xend import balloon, sxp, uuid, image, arch, osdep
43 from xen.xend import XendOptions, XendNode, XendConfig
45 from xen.xend.XendConfig import scrub_password
46 from xen.xend.XendBootloader import bootloader, bootloader_tidy
47 from xen.xend.XendError import XendError, VmError
48 from xen.xend.XendDevices import XendDevices
49 from xen.xend.XendTask import XendTask
50 from xen.xend.xenstore.xstransact import xstransact, complete
51 from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
52 from xen.xend.xenstore.xswatch import xswatch
53 from xen.xend.XendConstants import *
54 from xen.xend.XendAPIConstants import *
55 from xen.xend.server.DevConstants import xenbusState
57 from xen.xend.XendVMMetrics import XendVMMetrics
59 from xen.xend import XendAPIStore
60 from xen.xend.XendPPCI import XendPPCI
61 from xen.xend.XendDPCI import XendDPCI
62 from xen.xend.XendPSCSI import XendPSCSI
63 from xen.xend.XendDSCSI import XendDSCSI
65 MIGRATE_TIMEOUT = 30.0
66 BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
68 xc = xen.lowlevel.xc.xc()
69 xoptions = XendOptions.instance()
71 log = logging.getLogger("xend.XendDomainInfo")
72 #log.setLevel(logging.TRACE)
75 def create(config):
76 """Creates and start a VM using the supplied configuration.
78 @param config: A configuration object involving lists of tuples.
79 @type config: list of lists, eg ['vm', ['image', 'xen.gz']]
81 @rtype: XendDomainInfo
82 @return: An up and running XendDomainInfo instance
83 @raise VmError: Invalid configuration or failure to start.
84 """
85 from xen.xend import XendDomain
86 domconfig = XendConfig.XendConfig(sxp_obj = config)
87 othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
88 if othervm is None or othervm.domid is None:
89 othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
90 if othervm is not None and othervm.domid is not None:
91 raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
92 log.debug("XendDomainInfo.create(%s)", scrub_password(config))
93 vm = XendDomainInfo(domconfig)
94 try:
95 vm.start()
96 except:
97 log.exception('Domain construction failed')
98 vm.destroy()
99 raise
101 return vm
103 def create_from_dict(config_dict):
104 """Creates and start a VM using the supplied configuration.
106 @param config_dict: An configuration dictionary.
108 @rtype: XendDomainInfo
109 @return: An up and running XendDomainInfo instance
110 @raise VmError: Invalid configuration or failure to start.
111 """
113 log.debug("XendDomainInfo.create_from_dict(%s)",
114 scrub_password(config_dict))
115 vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
116 try:
117 vm.start()
118 except:
119 log.exception('Domain construction failed')
120 vm.destroy()
121 raise
122 return vm
124 def recreate(info, priv):
125 """Create the VM object for an existing domain. The domain must not
126 be dying, as the paths in the store should already have been removed,
127 and asking us to recreate them causes problems.
129 @param xeninfo: Parsed configuration
130 @type xeninfo: Dictionary
131 @param priv: Is a privileged domain (Dom 0)
132 @type priv: bool
134 @rtype: XendDomainInfo
135 @return: A up and running XendDomainInfo instance
136 @raise VmError: Invalid configuration.
137 @raise XendError: Errors with configuration.
138 """
140 log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
142 assert not info['dying']
144 xeninfo = XendConfig.XendConfig(dominfo = info)
145 xeninfo['is_control_domain'] = priv
146 xeninfo['is_a_template'] = False
147 domid = xeninfo['domid']
148 uuid1 = uuid.fromString(xeninfo['uuid'])
149 needs_reinitialising = False
151 dompath = GetDomainPath(domid)
152 if not dompath:
153 raise XendError('No domain path in store for existing '
154 'domain %d' % domid)
156 log.info("Recreating domain %d, UUID %s. at %s" %
157 (domid, xeninfo['uuid'], dompath))
159 # need to verify the path and uuid if not Domain-0
160 # if the required uuid and vm aren't set, then that means
161 # we need to recreate the dom with our own values
162 #
163 # NOTE: this is probably not desirable, really we should just
164 # abort or ignore, but there may be cases where xenstore's
165 # entry disappears (eg. xenstore-rm /)
166 #
167 try:
168 vmpath = xstransact.Read(dompath, "vm")
169 if not vmpath:
170 if not priv:
171 log.warn('/local/domain/%d/vm is missing. recreate is '
172 'confused, trying our best to recover' % domid)
173 needs_reinitialising = True
174 raise XendError('reinit')
176 uuid2_str = xstransact.Read(vmpath, "uuid")
177 if not uuid2_str:
178 log.warn('%s/uuid/ is missing. recreate is confused, '
179 'trying our best to recover' % vmpath)
180 needs_reinitialising = True
181 raise XendError('reinit')
183 uuid2 = uuid.fromString(uuid2_str)
184 if uuid1 != uuid2:
185 log.warn('UUID in /vm does not match the UUID in /dom/%d.'
186 'Trying out best to recover' % domid)
187 needs_reinitialising = True
188 except XendError:
189 pass # our best shot at 'goto' in python :)
191 vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
192 vmpath = vmpath)
194 if needs_reinitialising:
195 vm._recreateDom()
196 vm._removeVm()
197 vm._storeVmDetails()
198 vm._storeDomDetails()
200 vm.image = image.create(vm, vm.info)
201 vm.image.recreate()
203 vm._registerWatches()
204 vm.refreshShutdown(xeninfo)
206 # register the domain in the list
207 from xen.xend import XendDomain
208 XendDomain.instance().add_domain(vm)
210 return vm
213 def restore(config):
214 """Create a domain and a VM object to do a restore.
216 @param config: Domain SXP configuration
217 @type config: list of lists. (see C{create})
219 @rtype: XendDomainInfo
220 @return: A up and running XendDomainInfo instance
221 @raise VmError: Invalid configuration or failure to start.
222 @raise XendError: Errors with configuration.
223 """
225 log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
226 vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
227 resume = True)
228 try:
229 vm.resume()
230 return vm
231 except:
232 vm.destroy()
233 raise
235 def createDormant(domconfig):
236 """Create a dormant/inactive XenDomainInfo without creating VM.
237 This is for creating instances of persistent domains that are not
238 yet start.
240 @param domconfig: Parsed configuration
241 @type domconfig: XendConfig object
243 @rtype: XendDomainInfo
244 @return: A up and running XendDomainInfo instance
245 @raise XendError: Errors with configuration.
246 """
248 log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
250 # domid does not make sense for non-running domains.
251 domconfig.pop('domid', None)
252 vm = XendDomainInfo(domconfig)
253 return vm
255 def domain_by_name(name):
256 """Get domain by name
258 @params name: Name of the domain
259 @type name: string
260 @return: XendDomainInfo or None
261 """
262 from xen.xend import XendDomain
263 return XendDomain.instance().domain_lookup_by_name_nr(name)
266 def shutdown_reason(code):
267 """Get a shutdown reason from a code.
269 @param code: shutdown code
270 @type code: int
271 @return: shutdown reason
272 @rtype: string
273 """
274 return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
276 def dom_get(dom):
277 """Get info from xen for an existing domain.
279 @param dom: domain id
280 @type dom: int
281 @return: info or None
282 @rtype: dictionary
283 """
284 try:
285 domlist = xc.domain_getinfo(dom, 1)
286 if domlist and dom == domlist[0]['domid']:
287 return domlist[0]
288 except Exception, err:
289 # ignore missing domain
290 log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
291 return None
293 def do_FLR(domid):
294 from xen.xend.server.pciif import parse_pci_name, PciDevice
295 path = '/local/domain/0/backend/pci/%u/0/' % domid
296 num_devs = xstransact.Read(path + 'num_devs');
297 if num_devs is None or num_devs == "":
298 return;
300 num_devs = int(xstransact.Read(path + 'num_devs'));
302 dev_str_list = []
303 for i in range(num_devs):
304 dev_str = xstransact.Read(path + 'dev-%i' % i)
305 dev_str_list = dev_str_list + [dev_str]
307 for dev_str in dev_str_list:
308 (dom, b, d, f) = parse_pci_name(dev_str)
309 try:
310 dev = PciDevice(dom, b, d, f)
311 except Exception, e:
312 raise VmError("pci: failed to locate device and "+
313 "parse it's resources - "+str(e))
314 dev.do_FLR()
316 class XendDomainInfo:
317 """An object represents a domain.
319 @TODO: try to unify dom and domid, they mean the same thing, but
320 xc refers to it as dom, and everywhere else, including
321 xenstore it is domid. The best way is to change xc's
322 python interface.
324 @ivar info: Parsed configuration
325 @type info: dictionary
326 @ivar domid: Domain ID (if VM has started)
327 @type domid: int or None
328 @ivar vmpath: XenStore path to this VM.
329 @type vmpath: string
330 @ivar dompath: XenStore path to this Domain.
331 @type dompath: string
332 @ivar image: Reference to the VM Image.
333 @type image: xen.xend.image.ImageHandler
334 @ivar store_port: event channel to xenstored
335 @type store_port: int
336 @ivar console_port: event channel to xenconsoled
337 @type console_port: int
338 @ivar store_mfn: xenstored mfn
339 @type store_mfn: int
340 @ivar console_mfn: xenconsoled mfn
341 @type console_mfn: int
342 @ivar notes: OS image notes
343 @type notes: dictionary
344 @ivar vmWatch: reference to a watch on the xenstored vmpath
345 @type vmWatch: xen.xend.xenstore.xswatch
346 @ivar shutdownWatch: reference to watch on the xenstored domain shutdown
347 @type shutdownWatch: xen.xend.xenstore.xswatch
348 @ivar shutdownStartTime: UNIX Time when domain started shutting down.
349 @type shutdownStartTime: float or None
350 # @ivar state: Domain state
351 # @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
352 @ivar state_updated: lock for self.state
353 @type state_updated: threading.Condition
354 @ivar refresh_shutdown_lock: lock for polling shutdown state
355 @type refresh_shutdown_lock: threading.Condition
356 @ivar _deviceControllers: device controller cache for this domain
357 @type _deviceControllers: dict 'string' to DevControllers
358 """
360 def __init__(self, info, domid = None, dompath = None, augment = False,
361 priv = False, resume = False, vmpath = None):
362 """Constructor for a domain
364 @param info: parsed configuration
365 @type info: dictionary
366 @keyword domid: Set initial domain id (if any)
367 @type domid: int
368 @keyword dompath: Set initial dompath (if any)
369 @type dompath: string
370 @keyword augment: Augment given info with xenstored VM info
371 @type augment: bool
372 @keyword priv: Is a privileged domain (Dom 0)
373 @type priv: bool
374 @keyword resume: Is this domain being resumed?
375 @type resume: bool
376 """
378 self.info = info
379 if domid == None:
380 self.domid = self.info.get('domid')
381 else:
382 self.domid = domid
384 #REMOVE: uuid is now generated in XendConfig
385 #if not self._infoIsSet('uuid'):
386 # self.info['uuid'] = uuid.toString(uuid.create())
388 # Find a unique /vm/<uuid>/<integer> path if not specified.
389 # This avoids conflict between pre-/post-migrate domains when doing
390 # localhost relocation.
391 self.vmpath = vmpath
392 i = 0
393 while self.vmpath == None:
394 self.vmpath = XS_VMROOT + self.info['uuid']
395 if i != 0:
396 self.vmpath = self.vmpath + '-' + str(i)
397 try:
398 if self._readVm("uuid"):
399 self.vmpath = None
400 i = i + 1
401 except:
402 pass
404 self.dompath = dompath
406 self.image = None
407 self.store_port = None
408 self.store_mfn = None
409 self.console_port = None
410 self.console_mfn = None
412 self.native_protocol = None
414 self.vmWatch = None
415 self.shutdownWatch = None
416 self.shutdownStartTime = None
417 self._resume = resume
419 self.state_updated = threading.Condition()
420 self.refresh_shutdown_lock = threading.Condition()
421 self._stateSet(DOM_STATE_HALTED)
423 self._deviceControllers = {}
425 for state in DOM_STATES_OLD:
426 self.info[state] = 0
428 if augment:
429 self._augmentInfo(priv)
431 self._checkName(self.info['name_label'])
433 self.metrics = XendVMMetrics(uuid.createString(), self)
436 #
437 # Public functions available through XMLRPC
438 #
441 def start(self, is_managed = False):
442 """Attempts to start the VM by do the appropriate
443 initialisation if it not started.
444 """
445 from xen.xend import XendDomain
447 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
448 try:
449 XendTask.log_progress(0, 30, self._constructDomain)
450 XendTask.log_progress(31, 60, self._initDomain)
452 XendTask.log_progress(61, 70, self._storeVmDetails)
453 XendTask.log_progress(71, 80, self._storeDomDetails)
454 XendTask.log_progress(81, 90, self._registerWatches)
455 XendTask.log_progress(91, 100, self.refreshShutdown)
457 xendomains = XendDomain.instance()
458 xennode = XendNode.instance()
460 # save running configuration if XendDomains believe domain is
461 # persistent
462 if is_managed:
463 xendomains.managed_config_save(self)
465 if xennode.xenschedinfo() == 'credit':
466 xendomains.domain_sched_credit_set(self.getDomid(),
467 self.getWeight(),
468 self.getCap())
469 except:
470 log.exception('VM start failed')
471 self.destroy()
472 raise
473 else:
474 raise XendError('VM already running')
476 def resume(self):
477 """Resumes a domain that has come back from suspension."""
478 state = self._stateGet()
479 if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
480 try:
481 self._constructDomain()
483 try:
484 self._setCPUAffinity()
485 except:
486 # usually a CPU we want to set affinity to does not exist
487 # we just ignore it so that the domain can still be restored
488 log.warn("Cannot restore CPU affinity")
490 self._storeVmDetails()
491 self._createChannels()
492 self._createDevices()
493 self._storeDomDetails()
494 self._endRestore()
495 except:
496 log.exception('VM resume failed')
497 self.destroy()
498 raise
499 else:
500 raise XendError('VM is not suspended; it is %s'
501 % XEN_API_VM_POWER_STATE[state])
503 def shutdown(self, reason):
504 """Shutdown a domain by signalling this via xenstored."""
505 log.debug('XendDomainInfo.shutdown(%s)', reason)
506 if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
507 raise XendError('Domain cannot be shutdown')
509 if self.domid == 0:
510 raise XendError('Domain 0 cannot be shutdown')
512 if reason not in DOMAIN_SHUTDOWN_REASONS.values():
513 raise XendError('Invalid reason: %s' % reason)
514 self._removeVm('xend/previous_restart_time')
515 self.storeDom("control/shutdown", reason)
517 # HVM domain shuts itself down only if it has PV drivers
518 if self.info.is_hvm():
519 hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
520 if not hvm_pvdrv:
521 code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
522 log.info("HVM save:remote shutdown dom %d!", self.domid)
523 xc.domain_shutdown(self.domid, code)
525 def pause(self):
526 """Pause domain
528 @raise XendError: Failed pausing a domain
529 """
530 try:
531 xc.domain_pause(self.domid)
532 self._stateSet(DOM_STATE_PAUSED)
533 except Exception, ex:
534 log.exception(ex)
535 raise XendError("Domain unable to be paused: %s" % str(ex))
537 def unpause(self):
538 """Unpause domain
540 @raise XendError: Failed unpausing a domain
541 """
542 try:
543 xc.domain_unpause(self.domid)
544 self._stateSet(DOM_STATE_RUNNING)
545 except Exception, ex:
546 log.exception(ex)
547 raise XendError("Domain unable to be unpaused: %s" % str(ex))
549 def send_sysrq(self, key):
550 """ Send a Sysrq equivalent key via xenstored."""
551 if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
552 raise XendError("Domain '%s' is not started" % self.info['name_label'])
554 asserts.isCharConvertible(key)
555 self.storeDom("control/sysrq", '%c' % key)
557 def sync_pcidev_info(self):
559 if not self.info.is_hvm():
560 return
562 devid = '0'
563 dev_info = self._getDeviceInfo_pci(devid)
564 if dev_info is None:
565 return
567 # get the virtual slot info from xenstore
568 dev_uuid = sxp.child_value(dev_info, 'uuid')
569 pci_conf = self.info['devices'][dev_uuid][1]
570 pci_devs = pci_conf['devs']
572 count = 0
573 vslots = None
574 while vslots is None and count < 20:
575 vslots = xstransact.Read("/local/domain/0/backend/pci/%u/%s/vslots"
576 % (self.getDomid(), devid))
577 time.sleep(0.1)
578 count += 1
579 if vslots is None:
580 log.error("Device model didn't tell the vslots for PCI device")
581 return
583 #delete last delim
584 if vslots[-1] == ";":
585 vslots = vslots[:-1]
587 slot_list = vslots.split(';')
588 if len(slot_list) != len(pci_devs):
589 log.error("Device model's pci dev num dismatch")
590 return
592 #update the vslot info
593 count = 0;
594 for x in pci_devs:
595 x['vslt'] = slot_list[count]
596 count += 1
599 def hvm_pci_device_create(self, dev_config):
600 log.debug("XendDomainInfo.hvm_pci_device_create: %s"
601 % scrub_password(dev_config))
603 if not self.info.is_hvm():
604 raise VmError("hvm_pci_device_create called on non-HVM guest")
606 #all the PCI devs share one conf node
607 devid = '0'
609 new_dev = dev_config['devs'][0]
610 dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
612 #check conflict before trigger hotplug event
613 if dev_info is not None:
614 dev_uuid = sxp.child_value(dev_info, 'uuid')
615 pci_conf = self.info['devices'][dev_uuid][1]
616 pci_devs = pci_conf['devs']
617 for x in pci_devs:
618 if (int(x['vslt'], 16) == int(new_dev['vslt'], 16) and
619 int(x['vslt'], 16) != 0 ):
620 raise VmError("vslot %s already have a device." % (new_dev['vslt']))
622 if (int(x['domain'], 16) == int(new_dev['domain'], 16) and
623 int(x['bus'], 16) == int(new_dev['bus'], 16) and
624 int(x['slot'], 16) == int(new_dev['slot'], 16) and
625 int(x['func'], 16) == int(new_dev['func'], 16) ):
626 raise VmError("device is already inserted")
628 # Test whether the devices can be assigned with VT-d
629 pci_str = "%s, %s, %s, %s" % (new_dev['domain'],
630 new_dev['bus'],
631 new_dev['slot'],
632 new_dev['func'])
633 bdf = xc.test_assign_device(self.domid, pci_str)
634 if bdf != 0:
635 if bdf == -1:
636 raise VmError("failed to assign device: maybe the platform"
637 " doesn't support VT-d, or VT-d isn't enabled"
638 " properly?")
639 bus = (bdf >> 16) & 0xff
640 devfn = (bdf >> 8) & 0xff
641 dev = (devfn >> 3) & 0x1f
642 func = devfn & 0x7
643 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
644 " already been assigned to other domain, or maybe"
645 " it doesn't exist." % (bus, dev, func))
647 bdf_str = "%s:%s:%s.%s@%s" % (new_dev['domain'],
648 new_dev['bus'],
649 new_dev['slot'],
650 new_dev['func'],
651 new_dev['vslt'])
652 self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
655 def device_create(self, dev_config):
656 """Create a new device.
658 @param dev_config: device configuration
659 @type dev_config: SXP object (parsed config)
660 """
661 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
662 dev_type = sxp.name(dev_config)
663 dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
664 dev_config_dict = self.info['devices'][dev_uuid][1]
665 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
667 if self.domid is not None:
668 try:
669 dev_config_dict['devid'] = devid = \
670 self._createDevice(dev_type, dev_config_dict)
671 self._waitForDevice(dev_type, devid)
672 except VmError, ex:
673 del self.info['devices'][dev_uuid]
674 if dev_type == 'pci':
675 for dev in dev_config_dict['devs']:
676 XendAPIStore.deregister(dev['uuid'], 'DPCI')
677 if dev_type == 'vscsi':
678 for dev in dev_config_dict['devs']:
679 XendAPIStore.deregister(dev['uuid'], 'DSCSI')
680 elif dev_type == 'tap':
681 self.info['vbd_refs'].remove(dev_uuid)
682 else:
683 self.info['%s_refs' % dev_type].remove(dev_uuid)
684 raise ex
685 else:
686 devid = None
688 xen.xend.XendDomain.instance().managed_config_save(self)
689 return self.getDeviceController(dev_type).sxpr(devid)
692 def pci_device_configure(self, dev_sxp, devid = 0):
693 """Configure an existing pci device.
695 @param dev_sxp: device configuration
696 @type dev_sxp: SXP object (parsed config)
697 @param devid: device id
698 @type devid: int
699 @return: Returns True if successfully updated device
700 @rtype: boolean
701 """
702 log.debug("XendDomainInfo.pci_device_configure: %s"
703 % scrub_password(dev_sxp))
705 dev_class = sxp.name(dev_sxp)
707 if dev_class != 'pci':
708 return False
710 pci_state = sxp.child_value(dev_sxp, 'state')
711 existing_dev_info = self._getDeviceInfo_pci(devid)
713 if existing_dev_info is None and pci_state != 'Initialising':
714 raise XendError("Cannot detach when pci platform does not exist")
716 pci_dev = sxp.children(dev_sxp, 'dev')[0]
717 dev_config = self.info.pci_convert_sxp_to_dict(dev_sxp)
718 dev = dev_config['devs'][0]
720 # Do HVM specific processing
721 if self.info.is_hvm():
722 if pci_state == 'Initialising':
723 # HVM PCI device attachment
724 self.hvm_pci_device_create(dev_config)
725 # Update vslt
726 vslt = xstransact.Read("/local/domain/0/device-model/%i/parameter"
727 % self.getDomid())
728 dev['vslt'] = vslt
729 for n in sxp.children(pci_dev):
730 if(n[0] == 'vslt'):
731 n[1] = vslt
732 else:
733 # HVM PCI device detachment
734 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
735 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
736 existing_pci_devs = existing_pci_conf['devs']
737 vslt = '0x0'
738 for x in existing_pci_devs:
739 if ( int(x['domain'], 16) == int(dev['domain'], 16) and
740 int(x['bus'], 16) == int(dev['bus'], 16) and
741 int(x['slot'], 16) == int(dev['slot'], 16) and
742 int(x['func'], 16) == int(dev['func'], 16) ):
743 vslt = x['vslt']
744 break
745 if vslt == '0x0':
746 raise VmError("Device %04x:%02x:%02x.%01x is not connected"
747 % (int(dev['domain'],16), int(dev['bus'],16),
748 int(dev['slot'],16), int(dev['func'],16)))
749 self.hvm_destroyPCIDevice(int(vslt, 16))
750 # Update vslt
751 dev['vslt'] = vslt
752 for n in sxp.children(pci_dev):
753 if(n[0] == 'vslt'):
754 n[1] = vslt
756 # If pci platform does not exist, create and exit.
757 if existing_dev_info is None:
758 self.device_create(dev_sxp)
759 return True
761 # use DevController.reconfigureDevice to change device config
762 dev_control = self.getDeviceController(dev_class)
763 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
764 if not self.info.is_hvm():
765 # in PV case, wait until backend state becomes connected.
766 dev_control.waitForDevice_reconfigure(devid)
767 num_devs = dev_control.cleanupDevice(devid)
769 # update XendConfig with new device info
770 if dev_uuid:
771 new_dev_sxp = dev_control.configuration(devid)
772 self.info.device_update(dev_uuid, new_dev_sxp)
774 # If there is no device left, destroy pci and remove config.
775 if num_devs == 0:
776 if self.info.is_hvm():
777 self.destroyDevice('pci', devid, True)
778 del self.info['devices'][dev_uuid]
779 platform = self.info['platform']
780 orig_dev_num = len(platform['pci'])
781 # TODO: can use this to keep some info to ask high level
782 # management tools to hot insert a new passthrough dev
783 # after migration
784 if orig_dev_num != 0:
785 #platform['pci'] = ["%dDEVs" % orig_dev_num]
786 platform['pci'] = []
787 else:
788 self.destroyDevice('pci', devid)
789 del self.info['devices'][dev_uuid]
791 xen.xend.XendDomain.instance().managed_config_save(self)
793 return True
795 def vscsi_device_configure(self, dev_sxp):
796 """Configure an existing vscsi device.
797 quoted pci funciton
798 """
799 dev_class = sxp.name(dev_sxp)
800 if dev_class != 'vscsi':
801 return False
803 dev_config = self.info.vscsi_convert_sxp_to_dict(dev_sxp)
804 dev = dev_config['devs'][0]
805 req_devid = int(dev['devid'])
806 existing_dev_info = self._getDeviceInfo_vscsi(req_devid, dev['v-dev'])
807 state = dev['state']
809 if state == xenbusState['Initialising']:
810 # new create
811 # If request devid does not exist, create and exit.
812 if existing_dev_info is None:
813 self.device_create(dev_sxp)
814 return True
815 elif existing_dev_info == "exists":
816 raise XendError("The virtual device %s is already defined" % dev['v-dev'])
818 elif state == xenbusState['Closing']:
819 if existing_dev_info is None:
820 raise XendError("Cannot detach vscsi device does not exist")
822 if self.domid is not None:
823 # use DevController.reconfigureDevice to change device config
824 dev_control = self.getDeviceController(dev_class)
825 dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
826 dev_control.waitForDevice_reconfigure(req_devid)
827 num_devs = dev_control.cleanupDevice(req_devid)
829 # update XendConfig with new device info
830 if dev_uuid:
831 new_dev_sxp = dev_control.configuration(req_devid)
832 self.info.device_update(dev_uuid, new_dev_sxp)
834 # If there is no device left, destroy vscsi and remove config.
835 if num_devs == 0:
836 self.destroyDevice('vscsi', req_devid)
837 del self.info['devices'][dev_uuid]
839 else:
840 cur_dev_sxp = self._getDeviceInfo_vscsi(req_devid, None)
841 new_dev_sxp = ['vscsi']
842 for cur_dev in sxp.children(cur_dev_sxp, 'dev'):
843 if state == xenbusState['Closing']:
844 cur_dev_vdev = sxp.child_value(cur_dev, 'v-dev')
845 if cur_dev_vdev == dev['v-dev']:
846 continue
847 new_dev_sxp.append(cur_dev)
849 if state == xenbusState['Initialising']:
850 new_dev_sxp.append(sxp.child0(dev_sxp, 'dev'))
852 dev_uuid = sxp.child_value(cur_dev_sxp, 'uuid')
853 self.info.device_update(dev_uuid, new_dev_sxp)
855 # If there is only 'vscsi' in new_dev_sxp, remove the config.
856 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
857 del self.info['devices'][dev_uuid]
859 xen.xend.XendDomain.instance().managed_config_save(self)
861 return True
863 def device_configure(self, dev_sxp, devid = None):
864 """Configure an existing device.
866 @param dev_config: device configuration
867 @type dev_config: SXP object (parsed config)
868 @param devid: device id
869 @type devid: int
870 @return: Returns True if successfully updated device
871 @rtype: boolean
872 """
874 # convert device sxp to a dict
875 dev_class = sxp.name(dev_sxp)
876 dev_config = {}
878 if dev_class == 'pci':
879 return self.pci_device_configure(dev_sxp)
881 if dev_class == 'vscsi':
882 return self.vscsi_device_configure(dev_sxp)
884 for opt_val in dev_sxp[1:]:
885 try:
886 dev_config[opt_val[0]] = opt_val[1]
887 except IndexError:
888 pass
890 # use DevController.reconfigureDevice to change device config
891 dev_control = self.getDeviceController(dev_class)
892 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
894 # update XendConfig with new device info
895 if dev_uuid:
896 self.info.device_update(dev_uuid, dev_sxp)
898 return True
900 def waitForDevices(self):
901 """Wait for this domain's configured devices to connect.
903 @raise VmError: if any device fails to initialise.
904 """
905 for devclass in XendDevices.valid_devices():
906 self.getDeviceController(devclass).waitForDevices()
908 def hvm_destroyPCIDevice(self, vslot):
909 log.debug("hvm_destroyPCIDevice called %s", vslot)
911 if not self.info.is_hvm():
912 raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
914 #all the PCI devs share one conf node
915 devid = '0'
916 vslot = int(vslot)
917 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
918 dev_uuid = sxp.child_value(dev_info, 'uuid')
920 #delete the pci bdf config under the pci device
921 pci_conf = self.info['devices'][dev_uuid][1]
922 pci_len = len(pci_conf['devs'])
924 #find the pass-through device with the virtual slot
925 devnum = 0
926 for x in pci_conf['devs']:
927 if int(x['vslt'], 16) == vslot:
928 break
929 devnum += 1
931 if devnum >= pci_len:
932 raise VmError("Device @ vslot 0x%x doesn't exist." % (vslot))
934 if vslot == 0:
935 raise VmError("Device @ vslot 0x%x do not support hotplug." % (vslot))
937 bdf_str = "%s:%s:%s.%s" % (x['domain'], x['bus'], x['slot'], x['func'])
938 log.info("hvm_destroyPCIDevice:%s:%s!", x, bdf_str)
940 self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
942 return 0
944 def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
945 log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
946 deviceClass, devid)
948 if rm_cfg:
949 # Convert devid to device number. A device number is
950 # needed to remove its configuration.
951 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
953 # Save current sxprs. A device number and a backend
954 # path are needed to remove its configuration but sxprs
955 # do not have those after calling destroyDevice.
956 sxprs = self.getDeviceSxprs(deviceClass)
958 rc = None
959 if self.domid is not None:
960 rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
961 if not force and rm_cfg:
962 # The backend path, other than the device itself,
963 # has to be passed because its accompanied frontend
964 # path may be void until its removal is actually
965 # issued. It is probable because destroyDevice is
966 # issued first.
967 for dev_num, dev_info in sxprs:
968 dev_num = int(dev_num)
969 if dev_num == dev:
970 for x in dev_info:
971 if x[0] == 'backend':
972 backend = x[1]
973 break
974 break
975 self._waitForDevice_destroy(deviceClass, devid, backend)
977 if rm_cfg:
978 if deviceClass == 'vif':
979 if self.domid is not None:
980 for dev_num, dev_info in sxprs:
981 dev_num = int(dev_num)
982 if dev_num == dev:
983 for x in dev_info:
984 if x[0] == 'mac':
985 mac = x[1]
986 break
987 break
988 dev_info = self._getDeviceInfo_vif(mac)
989 else:
990 _, dev_info = sxprs[dev]
991 else: # 'vbd' or 'tap'
992 dev_info = self._getDeviceInfo_vbd(dev)
993 # To remove the UUID of the device from refs,
994 # deviceClass must be always 'vbd'.
995 deviceClass = 'vbd'
996 if dev_info is None:
997 raise XendError("Device %s is not defined" % devid)
999 dev_uuid = sxp.child_value(dev_info, 'uuid')
1000 del self.info['devices'][dev_uuid]
1001 self.info['%s_refs' % deviceClass].remove(dev_uuid)
1002 xen.xend.XendDomain.instance().managed_config_save(self)
1004 return rc
1006 def getDeviceSxprs(self, deviceClass):
1007 if deviceClass == 'pci':
1008 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1009 if dev_info is None:
1010 return []
1011 dev_uuid = sxp.child_value(dev_info, 'uuid')
1012 pci_devs = self.info['devices'][dev_uuid][1]['devs']
1013 pci_len = len(pci_devs)
1014 return pci_devs
1015 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1016 return self.getDeviceController(deviceClass).sxprs()
1017 else:
1018 sxprs = []
1019 dev_num = 0
1020 for dev_type, dev_info in self.info.all_devices_sxpr():
1021 if dev_type != deviceClass:
1022 continue
1024 if deviceClass == 'vscsi':
1025 vscsi_devs = ['devs', []]
1026 for vscsi_dev in sxp.children(dev_info, 'dev'):
1027 vscsi_dev.append(['frontstate', None])
1028 vscsi_devs[1].append(vscsi_dev)
1029 dev_num = int(sxp.child_value(vscsi_dev, 'devid'))
1030 sxprs.append([dev_num, [vscsi_devs]])
1031 else:
1032 sxprs.append([dev_num, dev_info])
1033 dev_num += 1
1034 return sxprs
1036 def getBlockDeviceClass(self, devid):
1037 # To get a device number from the devid,
1038 # we temporarily use the device controller of VBD.
1039 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1040 dev_info = self._getDeviceInfo_vbd(dev)
1041 if dev_info:
1042 return dev_info[0]
1044 def _getDeviceInfo_vif(self, mac):
1045 for dev_type, dev_info in self.info.all_devices_sxpr():
1046 if dev_type != 'vif':
1047 continue
1048 if mac == sxp.child_value(dev_info, 'mac'):
1049 return dev_info
1051 def _getDeviceInfo_vbd(self, devid):
1052 for dev_type, dev_info in self.info.all_devices_sxpr():
1053 if dev_type != 'vbd' and dev_type != 'tap':
1054 continue
1055 dev = sxp.child_value(dev_info, 'dev')
1056 dev = dev.split(':')[0]
1057 dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
1058 if devid == dev:
1059 return dev_info
1061 def _getDeviceInfo_pci(self, devid):
1062 for dev_type, dev_info in self.info.all_devices_sxpr():
1063 if dev_type != 'pci':
1064 continue
1065 return dev_info
1066 return None
1068 def _getDeviceInfo_vscsi(self, devid, vdev):
1069 devid = int(devid)
1070 for dev_type, dev_info in self.info.all_devices_sxpr():
1071 if dev_type != 'vscsi':
1072 continue
1073 existing_dev_uuid = sxp.child_value(dev_info, 'uuid')
1074 existing_conf = self.info['devices'][existing_dev_uuid][1]
1075 existing_dev = existing_conf['devs'][0]
1076 existing_devid = int(existing_dev['devid'])
1077 existing_vdev = existing_dev['v-dev']
1079 if vdev == existing_vdev:
1080 return "exists"
1082 if devid == existing_devid:
1083 return dev_info
1085 return None
1087 def setMemoryTarget(self, target):
1088 """Set the memory target of this domain.
1089 @param target: In MiB.
1090 """
1091 log.debug("Setting memory target of domain %s (%s) to %d MiB.",
1092 self.info['name_label'], str(self.domid), target)
1094 MiB = 1024 * 1024
1096 if self.domid == 0:
1097 dom0_min_mem = xoptions.get_dom0_min_mem()
1098 memory_cur = self.get_memory_dynamic_max() / MiB
1099 if target < memory_cur and dom0_min_mem > target:
1100 raise XendError("memory_dynamic_max too small")
1102 self._safe_set_memory('memory_dynamic_min', target * MiB)
1103 self._safe_set_memory('memory_dynamic_max', target * MiB)
1105 if self.domid >= 0:
1106 self.storeVm("memory", target)
1107 self.storeDom("memory/target", target << 10)
1108 xen.xend.XendDomain.instance().managed_config_save(self)
1110 def setMemoryMaximum(self, limit):
1111 """Set the maximum memory limit of this domain
1112 @param limit: In MiB.
1113 """
1114 log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
1115 self.info['name_label'], str(self.domid), limit)
1117 maxmem_cur = self.get_memory_static_max()
1118 MiB = 1024 * 1024
1119 self._safe_set_memory('memory_static_max', limit * MiB)
1121 if self.domid >= 0:
1122 maxmem = int(limit) * 1024
1123 try:
1124 return xc.domain_setmaxmem(self.domid, maxmem)
1125 except Exception, ex:
1126 self._safe_set_memory('memory_static_max', maxmem_cur)
1127 raise XendError(str(ex))
1128 xen.xend.XendDomain.instance().managed_config_save(self)
1131 def getVCPUInfo(self):
1132 try:
1133 # We include the domain name and ID, to help xm.
1134 sxpr = ['domain',
1135 ['domid', self.domid],
1136 ['name', self.info['name_label']],
1137 ['vcpu_count', self.info['VCPUs_max']]]
1139 for i in range(0, self.info['VCPUs_max']):
1140 if self.domid is not None:
1141 info = xc.vcpu_getinfo(self.domid, i)
1143 sxpr.append(['vcpu',
1144 ['number', i],
1145 ['online', info['online']],
1146 ['blocked', info['blocked']],
1147 ['running', info['running']],
1148 ['cpu_time', info['cpu_time'] / 1e9],
1149 ['cpu', info['cpu']],
1150 ['cpumap', info['cpumap']]])
1151 else:
1152 sxpr.append(['vcpu',
1153 ['number', i],
1154 ['online', 0],
1155 ['blocked', 0],
1156 ['running', 0],
1157 ['cpu_time', 0.0],
1158 ['cpu', -1],
1159 ['cpumap', self.info['cpus'][i] and \
1160 self.info['cpus'][i] or range(64)]])
1162 return sxpr
1164 except RuntimeError, exn:
1165 raise XendError(str(exn))
1168 def getDomInfo(self):
1169 return dom_get(self.domid)
1172 # internal functions ... TODO: re-categorised
1175 def _augmentInfo(self, priv):
1176 """Augment self.info, as given to us through L{recreate}, with
1177 values taken from the store. This recovers those values known
1178 to xend but not to the hypervisor.
1179 """
1180 augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
1181 if priv:
1182 augment_entries.remove('memory')
1183 augment_entries.remove('maxmem')
1184 augment_entries.remove('vcpus')
1185 augment_entries.remove('vcpu_avail')
1187 vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
1188 for k in augment_entries])
1190 # make returned lists into a dictionary
1191 vm_config = dict(zip(augment_entries, vm_config))
1193 for arg in augment_entries:
1194 val = vm_config[arg]
1195 if val != None:
1196 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1197 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1198 self.info[xapiarg] = val
1199 elif arg == "memory":
1200 self.info["static_memory_min"] = val
1201 elif arg == "maxmem":
1202 self.info["static_memory_max"] = val
1203 else:
1204 self.info[arg] = val
1206 # read CPU Affinity
1207 self.info['cpus'] = []
1208 vcpus_info = self.getVCPUInfo()
1209 for vcpu_info in sxp.children(vcpus_info, 'vcpu'):
1210 self.info['cpus'].append(sxp.child_value(vcpu_info, 'cpumap'))
1212 # For dom0, we ignore any stored value for the vcpus fields, and
1213 # read the current value from Xen instead. This allows boot-time
1214 # settings to take precedence over any entries in the store.
1215 if priv:
1216 xeninfo = dom_get(self.domid)
1217 self.info['VCPUs_max'] = xeninfo['online_vcpus']
1218 self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
1220 # read image value
1221 image_sxp = self._readVm('image')
1222 if image_sxp:
1223 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1225 # read devices
1226 devices = []
1227 for devclass in XendDevices.valid_devices():
1228 devconfig = self.getDeviceController(devclass).configurations()
1229 if devconfig:
1230 devices.extend(devconfig)
1232 if not self.info['devices'] and devices is not None:
1233 for device in devices:
1234 self.info.device_add(device[0], cfg_sxp = device)
1236 self._update_consoles()
1238 def _update_consoles(self, transaction = None):
1239 if self.domid == None or self.domid == 0:
1240 return
1242 # Update VT100 port if it exists
1243 if transaction is None:
1244 self.console_port = self.readDom('console/port')
1245 else:
1246 self.console_port = self.readDomTxn(transaction, 'console/port')
1247 if self.console_port is not None:
1248 serial_consoles = self.info.console_get_all('vt100')
1249 if not serial_consoles:
1250 cfg = self.info.console_add('vt100', self.console_port)
1251 self._createDevice('console', cfg)
1252 else:
1253 console_uuid = serial_consoles[0].get('uuid')
1254 self.info.console_update(console_uuid, 'location',
1255 self.console_port)
1258 # Update VNC port if it exists and write to xenstore
1259 if transaction is None:
1260 vnc_port = self.readDom('console/vnc-port')
1261 else:
1262 vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
1263 if vnc_port is not None:
1264 for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
1265 if dev_type == 'vfb':
1266 old_location = dev_info.get('location')
1267 listen_host = dev_info.get('vnclisten', 'localhost')
1268 new_location = '%s:%s' % (listen_host, str(vnc_port))
1269 if old_location == new_location:
1270 break
1272 dev_info['location'] = new_location
1273 self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
1274 vfb_ctrl = self.getDeviceController('vfb')
1275 vfb_ctrl.reconfigureDevice(0, dev_info)
1276 break
1279 # Function to update xenstore /vm/*
1282 def _readVm(self, *args):
1283 return xstransact.Read(self.vmpath, *args)
1285 def _writeVm(self, *args):
1286 return xstransact.Write(self.vmpath, *args)
1288 def _removeVm(self, *args):
1289 return xstransact.Remove(self.vmpath, *args)
1291 def _gatherVm(self, *args):
1292 return xstransact.Gather(self.vmpath, *args)
1294 def _listRecursiveVm(self, *args):
1295 return xstransact.ListRecursive(self.vmpath, *args)
1297 def storeVm(self, *args):
1298 return xstransact.Store(self.vmpath, *args)
1300 def permissionsVm(self, *args):
1301 return xstransact.SetPermissions(self.vmpath, *args)
1304 # Function to update xenstore /dom/*
1307 def readDom(self, *args):
1308 return xstransact.Read(self.dompath, *args)
1310 def gatherDom(self, *args):
1311 return xstransact.Gather(self.dompath, *args)
1313 def _writeDom(self, *args):
1314 return xstransact.Write(self.dompath, *args)
1316 def _removeDom(self, *args):
1317 return xstransact.Remove(self.dompath, *args)
1319 def storeDom(self, *args):
1320 return xstransact.Store(self.dompath, *args)
1323 def readDomTxn(self, transaction, *args):
1324 paths = map(lambda x: self.dompath + "/" + x, args)
1325 return transaction.read(*paths)
1327 def gatherDomTxn(self, transaction, *args):
1328 paths = map(lambda x: self.dompath + "/" + x, args)
1329 return transaction.gather(*paths)
1331 def _writeDomTxn(self, transaction, *args):
1332 paths = map(lambda x: self.dompath + "/" + x, args)
1333 return transaction.write(*paths)
1335 def _removeDomTxn(self, transaction, *args):
1336 paths = map(lambda x: self.dompath + "/" + x, args)
1337 return transaction.remove(*paths)
1339 def storeDomTxn(self, transaction, *args):
1340 paths = map(lambda x: self.dompath + "/" + x, args)
1341 return transaction.store(*paths)
1344 def _recreateDom(self):
1345 complete(self.dompath, lambda t: self._recreateDomFunc(t))
1347 def _recreateDomFunc(self, t):
1348 t.remove()
1349 t.mkdir()
1350 t.set_permissions({'dom' : self.domid, 'read' : True})
1351 t.write('vm', self.vmpath)
1352 for i in [ 'device', 'control', 'error', 'memory' ]:
1353 t.mkdir(i)
1354 t.set_permissions(i, {'dom' : self.domid})
1356 def _storeDomDetails(self):
1357 to_store = {
1358 'domid': str(self.domid),
1359 'vm': self.vmpath,
1360 'name': self.info['name_label'],
1361 'console/limit': str(xoptions.get_console_limit() * 1024),
1362 'memory/target': str(self.info['memory_dynamic_max'] / 1024),
1365 def f(n, v):
1366 if v is not None:
1367 if type(v) == bool:
1368 to_store[n] = v and "1" or "0"
1369 else:
1370 to_store[n] = str(v)
1372 # Figure out if we need to tell xenconsoled to ignore this guest's
1373 # console - device model will handle console if it is running
1374 constype = "ioemu"
1375 if 'device_model' not in self.info['platform']:
1376 constype = "xenconsoled"
1378 f('console/port', self.console_port)
1379 f('console/ring-ref', self.console_mfn)
1380 f('console/type', constype)
1381 f('store/port', self.store_port)
1382 f('store/ring-ref', self.store_mfn)
1384 if arch.type == "x86":
1385 f('control/platform-feature-multiprocessor-suspend', True)
1387 # elfnotes
1388 for n, v in self.info.get_notes().iteritems():
1389 n = n.lower().replace('_', '-')
1390 if n == 'features':
1391 for v in v.split('|'):
1392 v = v.replace('_', '-')
1393 if v.startswith('!'):
1394 f('image/%s/%s' % (n, v[1:]), False)
1395 else:
1396 f('image/%s/%s' % (n, v), True)
1397 else:
1398 f('image/%s' % n, v)
1400 if self.info.has_key('security_label'):
1401 f('security_label', self.info['security_label'])
1403 to_store.update(self._vcpuDomDetails())
1405 log.debug("Storing domain details: %s", scrub_password(to_store))
1407 self._writeDom(to_store)
1409 def _vcpuDomDetails(self):
1410 def availability(n):
1411 if self.info['vcpu_avail'] & (1 << n):
1412 return 'online'
1413 else:
1414 return 'offline'
1416 result = {}
1417 for v in range(0, self.info['VCPUs_max']):
1418 result["cpu/%d/availability" % v] = availability(v)
1419 return result
1422 # xenstore watches
1425 def _registerWatches(self):
1426 """Register a watch on this VM's entries in the store, and the
1427 domain's control/shutdown node, so that when they are changed
1428 externally, we keep up to date. This should only be called by {@link
1429 #create}, {@link #recreate}, or {@link #restore}, once the domain's
1430 details have been written, but before the new instance is returned."""
1431 self.vmWatch = xswatch(self.vmpath, self._storeChanged)
1432 self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
1433 self._handleShutdownWatch)
1435 def _storeChanged(self, _):
1436 log.trace("XendDomainInfo.storeChanged");
1438 changed = False
1440 # Check whether values in the configuration have
1441 # changed in Xenstore.
1443 cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
1444 'rtc/timeoffset']
1446 vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
1447 for k in cfg_vm])
1449 # convert two lists into a python dictionary
1450 vm_details = dict(zip(cfg_vm, vm_details))
1452 if vm_details['rtc/timeoffset'] == None:
1453 vm_details['rtc/timeoffset'] = "0"
1455 for arg, val in vm_details.items():
1456 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1457 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1458 if val != None and val != self.info[xapiarg]:
1459 self.info[xapiarg] = val
1460 changed = True
1461 elif arg == "memory":
1462 if val != None and val != self.info["static_memory_min"]:
1463 self.info["static_memory_min"] = val
1464 changed = True
1465 elif arg == "maxmem":
1466 if val != None and val != self.info["static_memory_max"]:
1467 self.info["static_memory_max"] = val
1468 changed = True
1470 # Check whether image definition has been updated
1471 image_sxp = self._readVm('image')
1472 if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
1473 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1474 changed = True
1476 # Check if the rtc offset has changes
1477 if vm_details.get("rtc/timeoffset", "0") != self.info["platform"].get("rtc_timeoffset", "0"):
1478 self.info["platform"]["rtc_timeoffset"] = vm_details.get("rtc/timeoffset", 0)
1479 changed = True
1481 if changed:
1482 # Update the domain section of the store, as this contains some
1483 # parameters derived from the VM configuration.
1484 self._storeDomDetails()
1486 return 1
1488 def _handleShutdownWatch(self, _):
1489 log.debug('XendDomainInfo.handleShutdownWatch')
1491 reason = self.readDom('control/shutdown')
1493 if reason and reason != 'suspend':
1494 sst = self.readDom('xend/shutdown_start_time')
1495 now = time.time()
1496 if sst:
1497 self.shutdownStartTime = float(sst)
1498 timeout = float(sst) + SHUTDOWN_TIMEOUT - now
1499 else:
1500 self.shutdownStartTime = now
1501 self.storeDom('xend/shutdown_start_time', now)
1502 timeout = SHUTDOWN_TIMEOUT
1504 log.trace(
1505 "Scheduling refreshShutdown on domain %d in %ds.",
1506 self.domid, timeout)
1507 threading.Timer(timeout, self.refreshShutdown).start()
1509 return True
1513 # Public Attributes for the VM
1517 def getDomid(self):
1518 return self.domid
1520 def setName(self, name, to_store = True):
1521 self._checkName(name)
1522 self.info['name_label'] = name
1523 if to_store:
1524 self.storeVm("name", name)
1526 def getName(self):
1527 return self.info['name_label']
1529 def getDomainPath(self):
1530 return self.dompath
1532 def getShutdownReason(self):
1533 return self.readDom('control/shutdown')
1535 def getStorePort(self):
1536 """For use only by image.py and XendCheckpoint.py."""
1537 return self.store_port
1539 def getConsolePort(self):
1540 """For use only by image.py and XendCheckpoint.py"""
1541 return self.console_port
1543 def getFeatures(self):
1544 """For use only by image.py."""
1545 return self.info['features']
1547 def getVCpuCount(self):
1548 return self.info['VCPUs_max']
1550 def setVCpuCount(self, vcpus):
1551 def vcpus_valid(n):
1552 if vcpus <= 0:
1553 raise XendError('Zero or less VCPUs is invalid')
1554 if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
1555 raise XendError('Cannot set vcpus greater than max vcpus on running domain')
1556 vcpus_valid(vcpus)
1558 self.info['vcpu_avail'] = (1 << vcpus) - 1
1559 if self.domid >= 0:
1560 self.storeVm('vcpu_avail', self.info['vcpu_avail'])
1561 self._writeDom(self._vcpuDomDetails())
1562 self.info['VCPUs_live'] = vcpus
1563 else:
1564 if self.info['VCPUs_max'] > vcpus:
1565 # decreasing
1566 del self.info['cpus'][vcpus:]
1567 elif self.info['VCPUs_max'] < vcpus:
1568 # increasing
1569 for c in range(self.info['VCPUs_max'], vcpus):
1570 self.info['cpus'].append(list())
1571 self.info['VCPUs_max'] = vcpus
1572 xen.xend.XendDomain.instance().managed_config_save(self)
1573 log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
1574 vcpus)
1576 def getMemoryTarget(self):
1577 """Get this domain's target memory size, in KB."""
1578 return self.info['memory_dynamic_max'] / 1024
1580 def getMemoryMaximum(self):
1581 """Get this domain's maximum memory size, in KB."""
1582 # remember, info now stores memory in bytes
1583 return self.info['memory_static_max'] / 1024
1585 def getResume(self):
1586 return str(self._resume)
1588 def setResume(self, isresume):
1589 self._resume = isresume
1591 def getCpus(self):
1592 return self.info['cpus']
1594 def setCpus(self, cpumap):
1595 self.info['cpus'] = cpumap
1597 def getCap(self):
1598 return self.info['vcpus_params']['cap']
1600 def setCap(self, cpu_cap):
1601 self.info['vcpus_params']['cap'] = cpu_cap
1603 def getWeight(self):
1604 return self.info['vcpus_params']['weight']
1606 def setWeight(self, cpu_weight):
1607 self.info['vcpus_params']['weight'] = cpu_weight
1609 def getRestartCount(self):
1610 return self._readVm('xend/restart_count')
1612 def refreshShutdown(self, xeninfo = None):
1613 """ Checks the domain for whether a shutdown is required.
1615 Called from XendDomainInfo and also image.py for HVM images.
1616 """
1618 # If set at the end of this method, a restart is required, with the
1619 # given reason. This restart has to be done out of the scope of
1620 # refresh_shutdown_lock.
1621 restart_reason = None
1623 self.refresh_shutdown_lock.acquire()
1624 try:
1625 if xeninfo is None:
1626 xeninfo = dom_get(self.domid)
1627 if xeninfo is None:
1628 # The domain no longer exists. This will occur if we have
1629 # scheduled a timer to check for shutdown timeouts and the
1630 # shutdown succeeded. It will also occur if someone
1631 # destroys a domain beneath us. We clean up the domain,
1632 # just in case, but we can't clean up the VM, because that
1633 # VM may have migrated to a different domain on this
1634 # machine.
1635 self.cleanupDomain()
1636 self._stateSet(DOM_STATE_HALTED)
1637 return
1639 if xeninfo['dying']:
1640 # Dying means that a domain has been destroyed, but has not
1641 # yet been cleaned up by Xen. This state could persist
1642 # indefinitely if, for example, another domain has some of its
1643 # pages mapped. We might like to diagnose this problem in the
1644 # future, but for now all we do is make sure that it's not us
1645 # holding the pages, by calling cleanupDomain. We can't
1646 # clean up the VM, as above.
1647 self.cleanupDomain()
1648 self._stateSet(DOM_STATE_SHUTDOWN)
1649 return
1651 elif xeninfo['crashed']:
1652 if self.readDom('xend/shutdown_completed'):
1653 # We've seen this shutdown already, but we are preserving
1654 # the domain for debugging. Leave it alone.
1655 return
1657 log.warn('Domain has crashed: name=%s id=%d.',
1658 self.info['name_label'], self.domid)
1659 self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
1661 restart_reason = 'crash'
1662 self._stateSet(DOM_STATE_HALTED)
1664 elif xeninfo['shutdown']:
1665 self._stateSet(DOM_STATE_SHUTDOWN)
1666 if self.readDom('xend/shutdown_completed'):
1667 # We've seen this shutdown already, but we are preserving
1668 # the domain for debugging. Leave it alone.
1669 return
1671 else:
1672 reason = shutdown_reason(xeninfo['shutdown_reason'])
1674 log.info('Domain has shutdown: name=%s id=%d reason=%s.',
1675 self.info['name_label'], self.domid, reason)
1676 self._writeVm(LAST_SHUTDOWN_REASON, reason)
1678 self._clearRestart()
1680 if reason == 'suspend':
1681 self._stateSet(DOM_STATE_SUSPENDED)
1682 # Don't destroy the domain. XendCheckpoint will do
1683 # this once it has finished. However, stop watching
1684 # the VM path now, otherwise we will end up with one
1685 # watch for the old domain, and one for the new.
1686 self._unwatchVm()
1687 elif reason in ('poweroff', 'reboot'):
1688 restart_reason = reason
1689 else:
1690 self.destroy()
1692 elif self.dompath is None:
1693 # We have yet to manage to call introduceDomain on this
1694 # domain. This can happen if a restore is in progress, or has
1695 # failed. Ignore this domain.
1696 pass
1697 else:
1698 # Domain is alive. If we are shutting it down, log a message
1699 # if it seems unresponsive.
1700 if xeninfo['paused']:
1701 self._stateSet(DOM_STATE_PAUSED)
1702 else:
1703 self._stateSet(DOM_STATE_RUNNING)
1705 if self.shutdownStartTime:
1706 timeout = (SHUTDOWN_TIMEOUT - time.time() +
1707 self.shutdownStartTime)
1708 if (timeout < 0 and not self.readDom('xend/unresponsive')):
1709 log.info(
1710 "Domain shutdown timeout expired: name=%s id=%s",
1711 self.info['name_label'], self.domid)
1712 self.storeDom('xend/unresponsive', 'True')
1713 finally:
1714 self.refresh_shutdown_lock.release()
1716 if restart_reason:
1717 threading.Thread(target = self._maybeRestart,
1718 args = (restart_reason,)).start()
1722 # Restart functions - handling whether we come back up on shutdown.
1725 def _clearRestart(self):
1726 self._removeDom("xend/shutdown_start_time")
1728 def _maybeDumpCore(self, reason):
1729 if reason == 'crash':
1730 if xoptions.get_enable_dump() or self.get_on_crash() \
1731 in ['coredump_and_destroy', 'coredump_and_restart']:
1732 try:
1733 self.dumpCore()
1734 except XendError:
1735 # This error has been logged -- there's nothing more
1736 # we can do in this context.
1737 pass
1739 def _maybeRestart(self, reason):
1740 # Before taking configured action, dump core if configured to do so.
1742 self._maybeDumpCore(reason)
1744 # Dispatch to the correct method based upon the configured on_{reason}
1745 # behaviour.
1746 actions = {"destroy" : self.destroy,
1747 "restart" : self._restart,
1748 "preserve" : self._preserve,
1749 "rename-restart" : self._renameRestart,
1750 "coredump-destroy" : self.destroy,
1751 "coredump-restart" : self._restart}
1753 action_conf = {
1754 'poweroff': 'actions_after_shutdown',
1755 'reboot': 'actions_after_reboot',
1756 'crash': 'actions_after_crash',
1759 action_target = self.info.get(action_conf.get(reason))
1760 func = actions.get(action_target, None)
1761 if func and callable(func):
1762 func()
1763 else:
1764 self.destroy() # default to destroy
1766 def _renameRestart(self):
1767 self._restart(True)
1769 def _restart(self, rename = False):
1770 """Restart the domain after it has exited.
1772 @param rename True if the old domain is to be renamed and preserved,
1773 False if it is to be destroyed.
1774 """
1775 from xen.xend import XendDomain
1777 if self._readVm(RESTART_IN_PROGRESS):
1778 log.error('Xend failed during restart of domain %s. '
1779 'Refusing to restart to avoid loops.',
1780 str(self.domid))
1781 self.destroy()
1782 return
1784 old_domid = self.domid
1785 self._writeVm(RESTART_IN_PROGRESS, 'True')
1787 now = time.time()
1788 rst = self._readVm('xend/previous_restart_time')
1789 if rst:
1790 rst = float(rst)
1791 timeout = now - rst
1792 if timeout < MINIMUM_RESTART_TIME:
1793 log.error(
1794 'VM %s restarting too fast (%f seconds since the last '
1795 'restart). Refusing to restart to avoid loops.',
1796 self.info['name_label'], timeout)
1797 self.destroy()
1798 return
1800 self._writeVm('xend/previous_restart_time', str(now))
1802 prev_vm_xend = self._listRecursiveVm('xend')
1803 new_dom_info = self.info
1804 try:
1805 if rename:
1806 new_dom_info = self._preserveForRestart()
1807 else:
1808 self._unwatchVm()
1809 self.destroy()
1811 # new_dom's VM will be the same as this domain's VM, except where
1812 # the rename flag has instructed us to call preserveForRestart.
1813 # In that case, it is important that we remove the
1814 # RESTART_IN_PROGRESS node from the new domain, not the old one,
1815 # once the new one is available.
1817 new_dom = None
1818 try:
1819 new_dom = XendDomain.instance().domain_create_from_dict(
1820 new_dom_info)
1821 for x in prev_vm_xend[0][1]:
1822 new_dom._writeVm('xend/%s' % x[0], x[1])
1823 new_dom.waitForDevices()
1824 new_dom.unpause()
1825 rst_cnt = new_dom._readVm('xend/restart_count')
1826 rst_cnt = int(rst_cnt) + 1
1827 new_dom._writeVm('xend/restart_count', str(rst_cnt))
1828 new_dom._removeVm(RESTART_IN_PROGRESS)
1829 except:
1830 if new_dom:
1831 new_dom._removeVm(RESTART_IN_PROGRESS)
1832 new_dom.destroy()
1833 else:
1834 self._removeVm(RESTART_IN_PROGRESS)
1835 raise
1836 except:
1837 log.exception('Failed to restart domain %s.', str(old_domid))
1839 def _preserveForRestart(self):
1840 """Preserve a domain that has been shut down, by giving it a new UUID,
1841 cloning the VM details, and giving it a new name. This allows us to
1842 keep this domain for debugging, but restart a new one in its place
1843 preserving the restart semantics (name and UUID preserved).
1844 """
1846 new_uuid = uuid.createString()
1847 new_name = 'Domain-%s' % new_uuid
1848 log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
1849 self.info['name_label'], self.domid, self.info['uuid'],
1850 new_name, new_uuid)
1851 self._unwatchVm()
1852 self._releaseDevices()
1853 # Remove existing vm node in xenstore
1854 self._removeVm()
1855 new_dom_info = self.info.copy()
1856 new_dom_info['name_label'] = self.info['name_label']
1857 new_dom_info['uuid'] = self.info['uuid']
1858 self.info['name_label'] = new_name
1859 self.info['uuid'] = new_uuid
1860 self.vmpath = XS_VMROOT + new_uuid
1861 # Write out new vm node to xenstore
1862 self._storeVmDetails()
1863 self._preserve()
1864 return new_dom_info
1867 def _preserve(self):
1868 log.info("Preserving dead domain %s (%d).", self.info['name_label'],
1869 self.domid)
1870 self._unwatchVm()
1871 self.storeDom('xend/shutdown_completed', 'True')
1872 self._stateSet(DOM_STATE_HALTED)
1875 # Debugging ..
1878 def dumpCore(self, corefile = None):
1879 """Create a core dump for this domain.
1881 @raise: XendError if core dumping failed.
1882 """
1884 try:
1885 if not corefile:
1886 this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
1887 corefile = "/var/xen/dump/%s-%s.%s.core" % (this_time,
1888 self.info['name_label'], self.domid)
1890 if os.path.isdir(corefile):
1891 raise XendError("Cannot dump core in a directory: %s" %
1892 corefile)
1894 self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
1895 xc.domain_dumpcore(self.domid, corefile)
1896 self._removeVm(DUMPCORE_IN_PROGRESS)
1897 except RuntimeError, ex:
1898 corefile_incomp = corefile+'-incomplete'
1899 os.rename(corefile, corefile_incomp)
1900 self._removeVm(DUMPCORE_IN_PROGRESS)
1901 log.exception("XendDomainInfo.dumpCore failed: id = %s name = %s",
1902 self.domid, self.info['name_label'])
1903 raise XendError("Failed to dump core: %s" % str(ex))
1906 # Device creation/deletion functions
1909 def _createDevice(self, deviceClass, devConfig):
1910 return self.getDeviceController(deviceClass).createDevice(devConfig)
1912 def _waitForDevice(self, deviceClass, devid):
1913 return self.getDeviceController(deviceClass).waitForDevice(devid)
1915 def _waitForDeviceUUID(self, dev_uuid):
1916 deviceClass, config = self.info['devices'].get(dev_uuid)
1917 self._waitForDevice(deviceClass, config['devid'])
1919 def _waitForDevice_destroy(self, deviceClass, devid, backpath):
1920 return self.getDeviceController(deviceClass).waitForDevice_destroy(
1921 devid, backpath)
1923 def _reconfigureDevice(self, deviceClass, devid, devconfig):
1924 return self.getDeviceController(deviceClass).reconfigureDevice(
1925 devid, devconfig)
1927 def _createDevices(self):
1928 """Create the devices for a vm.
1930 @raise: VmError for invalid devices
1931 """
1932 if self.image:
1933 self.image.prepareEnvironment()
1935 vscsi_uuidlist = {}
1936 vscsi_devidlist = []
1937 ordered_refs = self.info.ordered_device_refs()
1938 for dev_uuid in ordered_refs:
1939 devclass, config = self.info['devices'][dev_uuid]
1940 if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
1941 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
1942 dev_uuid = config.get('uuid')
1943 devid = self._createDevice(devclass, config)
1945 # store devid in XendConfig for caching reasons
1946 if dev_uuid in self.info['devices']:
1947 self.info['devices'][dev_uuid][1]['devid'] = devid
1949 elif devclass == 'vscsi':
1950 vscsi_config = config.get('devs', [])[0]
1951 devid = vscsi_config.get('devid', '')
1952 dev_uuid = config.get('uuid')
1953 vscsi_uuidlist[devid] = dev_uuid
1954 vscsi_devidlist.append(devid)
1956 #It is necessary to sorted it for /dev/sdxx in guest.
1957 if len(vscsi_uuidlist) > 0:
1958 vscsi_devidlist.sort()
1959 for vscsiid in vscsi_devidlist:
1960 dev_uuid = vscsi_uuidlist[vscsiid]
1961 devclass, config = self.info['devices'][dev_uuid]
1962 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
1963 dev_uuid = config.get('uuid')
1964 devid = self._createDevice(devclass, config)
1965 # store devid in XendConfig for caching reasons
1966 if dev_uuid in self.info['devices']:
1967 self.info['devices'][dev_uuid][1]['devid'] = devid
1970 if self.image:
1971 self.image.createDeviceModel()
1973 #if have pass-through devs, need the virtual pci slots info from qemu
1974 self.sync_pcidev_info()
1976 def _releaseDevices(self, suspend = False):
1977 """Release all domain's devices. Nothrow guarantee."""
1978 if self.image:
1979 try:
1980 log.debug("Destroying device model")
1981 self.image.destroyDeviceModel()
1982 except Exception, e:
1983 log.exception("Device model destroy failed %s" % str(e))
1984 else:
1985 log.debug("No device model")
1987 log.debug("Releasing devices")
1988 t = xstransact("%s/device" % self.dompath)
1989 try:
1990 for devclass in XendDevices.valid_devices():
1991 for dev in t.list(devclass):
1992 try:
1993 true_devclass = devclass
1994 if devclass == 'vbd':
1995 # In the case of "vbd", the true device class
1996 # may possibly be "tap". Just in case, verify
1997 # device class.
1998 devid = dev.split('/')[-1]
1999 true_devclass = self.getBlockDeviceClass(devid)
2000 log.debug("Removing %s", dev);
2001 self.destroyDevice(true_devclass, dev, False);
2002 except:
2003 # Log and swallow any exceptions in removal --
2004 # there's nothing more we can do.
2005 log.exception("Device release failed: %s; %s; %s",
2006 self.info['name_label'],
2007 true_devclass, dev)
2008 finally:
2009 t.abort()
2011 def getDeviceController(self, name):
2012 """Get the device controller for this domain, and if it
2013 doesn't exist, create it.
2015 @param name: device class name
2016 @type name: string
2017 @rtype: subclass of DevController
2018 """
2019 if name not in self._deviceControllers:
2020 devController = XendDevices.make_controller(name, self)
2021 if not devController:
2022 raise XendError("Unknown device type: %s" % name)
2023 self._deviceControllers[name] = devController
2025 return self._deviceControllers[name]
2028 # Migration functions (public)
2031 def testMigrateDevices(self, network, dst):
2032 """ Notify all device about intention of migration
2033 @raise: XendError for a device that cannot be migrated
2034 """
2035 for (n, c) in self.info.all_devices_sxpr():
2036 rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
2037 if rc != 0:
2038 raise XendError("Device of type '%s' refuses migration." % n)
2040 def migrateDevices(self, network, dst, step, domName=''):
2041 """Notify the devices about migration
2042 """
2043 ctr = 0
2044 try:
2045 for (dev_type, dev_conf) in self.info.all_devices_sxpr():
2046 self.migrateDevice(dev_type, dev_conf, network, dst,
2047 step, domName)
2048 ctr = ctr + 1
2049 except:
2050 for dev_type, dev_conf in self.info.all_devices_sxpr():
2051 if ctr == 0:
2052 step = step - 1
2053 ctr = ctr - 1
2054 self._recoverMigrateDevice(dev_type, dev_conf, network,
2055 dst, step, domName)
2056 raise
2058 def migrateDevice(self, deviceClass, deviceConfig, network, dst,
2059 step, domName=''):
2060 return self.getDeviceController(deviceClass).migrate(deviceConfig,
2061 network, dst, step, domName)
2063 def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
2064 dst, step, domName=''):
2065 return self.getDeviceController(deviceClass).recover_migrate(
2066 deviceConfig, network, dst, step, domName)
2069 ## private:
2071 def _constructDomain(self):
2072 """Construct the domain.
2074 @raise: VmError on error
2075 """
2077 log.debug('XendDomainInfo.constructDomain')
2079 self.shutdownStartTime = None
2081 hap = 0
2082 hvm = self.info.is_hvm()
2083 if hvm:
2084 hap = self.info.is_hap()
2085 info = xc.xeninfo()
2086 if 'hvm' not in info['xen_caps']:
2087 raise VmError("HVM guest support is unavailable: is VT/AMD-V "
2088 "supported by your CPU and enabled in your "
2089 "BIOS?")
2091 # Hack to pre-reserve some memory for initial domain creation.
2092 # There is an implicit memory overhead for any domain creation. This
2093 # overhead is greater for some types of domain than others. For
2094 # example, an x86 HVM domain will have a default shadow-pagetable
2095 # allocation of 1MB. We free up 2MB here to be on the safe side.
2096 balloon.free(2*1024) # 2MB should be plenty
2098 ssidref = 0
2099 if security.on() == xsconstants.XS_POLICY_USE:
2100 ssidref = security.calc_dom_ssidref_from_info(self.info)
2101 if security.has_authorization(ssidref) == False:
2102 raise VmError("VM is not authorized to run.")
2104 try:
2105 self.domid = xc.domain_create(
2106 domid = 0,
2107 ssidref = ssidref,
2108 handle = uuid.fromString(self.info['uuid']),
2109 flags = (int(hvm) << 0) | (int(hap) << 1),
2110 target = self.info.target())
2111 except Exception, e:
2112 # may get here if due to ACM the operation is not permitted
2113 if security.on() == xsconstants.XS_POLICY_ACM:
2114 raise VmError('Domain in conflict set with running domain?')
2116 if self.domid < 0:
2117 raise VmError('Creating domain failed: name=%s' %
2118 self.info['name_label'])
2120 self.dompath = GetDomainPath(self.domid)
2122 self._recreateDom()
2124 # Set timer configration of domain
2125 timer_mode = self.info["platform"].get("timer_mode")
2126 if hvm and timer_mode is not None:
2127 xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
2128 long(timer_mode))
2130 # Set Viridian interface configuration of domain
2131 viridian = self.info["platform"].get("viridian")
2132 if arch.type == "x86" and hvm and viridian is not None:
2133 xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
2135 # Optionally enable virtual HPET
2136 hpet = self.info["platform"].get("hpet")
2137 if hvm and hpet is not None:
2138 xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
2139 long(hpet))
2141 # Set maximum number of vcpus in domain
2142 xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
2144 # Test whether the devices can be assigned with VT-d
2145 pci_str = str(self.info["platform"].get("pci"))
2146 if hvm and pci_str:
2147 bdf = xc.test_assign_device(self.domid, pci_str)
2148 if bdf != 0:
2149 if bdf == -1:
2150 raise VmError("failed to assign device: maybe the platform"
2151 " doesn't support VT-d, or VT-d isn't enabled"
2152 " properly?")
2153 bus = (bdf >> 16) & 0xff
2154 devfn = (bdf >> 8) & 0xff
2155 dev = (devfn >> 3) & 0x1f
2156 func = devfn & 0x7
2157 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
2158 " already been assigned to other domain, or maybe"
2159 " it doesn't exist." % (bus, dev, func))
2161 # register the domain in the list
2162 from xen.xend import XendDomain
2163 XendDomain.instance().add_domain(self)
2165 def _introduceDomain(self):
2166 assert self.domid is not None
2167 assert self.store_mfn is not None
2168 assert self.store_port is not None
2170 try:
2171 IntroduceDomain(self.domid, self.store_mfn, self.store_port)
2172 except RuntimeError, exn:
2173 raise XendError(str(exn))
2175 def _setTarget(self, target):
2176 assert self.domid is not None
2178 try:
2179 SetTarget(self.domid, target)
2180 self.storeDom('target', target)
2181 except RuntimeError, exn:
2182 raise XendError(str(exn))
2185 def _setCPUAffinity(self):
2186 """ Repin domain vcpus if a restricted cpus list is provided
2187 """
2189 def has_cpus():
2190 if self.info['cpus'] is not None:
2191 for c in self.info['cpus']:
2192 if c:
2193 return True
2194 return False
2196 if has_cpus():
2197 for v in range(0, self.info['VCPUs_max']):
2198 if self.info['cpus'][v]:
2199 xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
2200 else:
2201 def find_relaxed_node(node_list):
2202 import sys
2203 nr_nodes = info['nr_nodes']
2204 if node_list is None:
2205 node_list = range(0, nr_nodes)
2206 nodeload = [0]
2207 nodeload = nodeload * nr_nodes
2208 from xen.xend import XendDomain
2209 doms = XendDomain.instance().list('all')
2210 for dom in filter (lambda d: d.domid != self.domid, doms):
2211 cpuinfo = dom.getVCPUInfo()
2212 for vcpu in sxp.children(cpuinfo, 'vcpu'):
2213 if sxp.child_value(vcpu, 'online') == 0: continue
2214 cpumap = list(sxp.child_value(vcpu,'cpumap'))
2215 for i in range(0, nr_nodes):
2216 node_cpumask = info['node_to_cpu'][i]
2217 for j in node_cpumask:
2218 if j in cpumap:
2219 nodeload[i] += 1
2220 break
2221 for i in range(0, nr_nodes):
2222 if len(info['node_to_cpu'][i]) > 0 and i in node_list:
2223 nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
2224 else:
2225 nodeload[i] = sys.maxint
2226 index = nodeload.index( min(nodeload) )
2227 return index
2229 info = xc.physinfo()
2230 if info['nr_nodes'] > 1:
2231 node_memory_list = info['node_to_memory']
2232 needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
2233 candidate_node_list = []
2234 for i in range(0, info['nr_nodes']):
2235 if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
2236 candidate_node_list.append(i)
2237 index = find_relaxed_node(candidate_node_list)
2238 cpumask = info['node_to_cpu'][index]
2239 for v in range(0, self.info['VCPUs_max']):
2240 xc.vcpu_setaffinity(self.domid, v, cpumask)
2243 def _initDomain(self):
2244 log.debug('XendDomainInfo.initDomain: %s %s',
2245 self.domid,
2246 self.info['vcpus_params']['weight'])
2248 self._configureBootloader()
2250 try:
2251 if self.info['platform'].get('localtime', 0):
2252 if time.localtime(time.time())[8]:
2253 self.info['platform']['rtc_timeoffset'] = -time.altzone
2254 else:
2255 self.info['platform']['rtc_timeoffset'] = -time.timezone
2257 self.image = image.create(self, self.info)
2259 # repin domain vcpus if a restricted cpus list is provided
2260 # this is done prior to memory allocation to aide in memory
2261 # distribution for NUMA systems.
2262 self._setCPUAffinity()
2264 # Use architecture- and image-specific calculations to determine
2265 # the various headrooms necessary, given the raw configured
2266 # values. maxmem, memory, and shadow are all in KiB.
2267 # but memory_static_max etc are all stored in bytes now.
2268 memory = self.image.getRequiredAvailableMemory(
2269 self.info['memory_dynamic_max'] / 1024)
2270 maxmem = self.image.getRequiredAvailableMemory(
2271 self.info['memory_static_max'] / 1024)
2272 shadow = self.image.getRequiredShadowMemory(
2273 self.info['shadow_memory'] * 1024,
2274 self.info['memory_static_max'] / 1024)
2276 log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
2277 # Round shadow up to a multiple of a MiB, as shadow_mem_control
2278 # takes MiB and we must not round down and end up under-providing.
2279 shadow = ((shadow + 1023) / 1024) * 1024
2281 # set memory limit
2282 xc.domain_setmaxmem(self.domid, maxmem)
2284 # Reserve 1 page per MiB of RAM for separate VT-d page table.
2285 vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
2286 # Round vtd_mem up to a multiple of a MiB.
2287 vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
2289 # Make sure there's enough RAM available for the domain
2290 balloon.free(memory + shadow + vtd_mem)
2292 # Set up the shadow memory
2293 shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
2294 self.info['shadow_memory'] = shadow_cur
2296 # machine address size
2297 if self.info.has_key('machine_address_size'):
2298 log.debug("_initDomain: setting maximum machine address size %d" % self.info['machine_address_size'])
2299 xc.domain_set_machine_address_size(self.domid, self.info['machine_address_size'])
2301 if self.info.has_key('suppress_spurious_page_faults') and self.info['suppress_spurious_page_faults']:
2302 log.debug("_initDomain: suppressing spurious page faults")
2303 xc.domain_suppress_spurious_page_faults(self.domid)
2305 self._createChannels()
2307 channel_details = self.image.createImage()
2309 self.store_mfn = channel_details['store_mfn']
2310 if 'console_mfn' in channel_details:
2311 self.console_mfn = channel_details['console_mfn']
2312 if 'notes' in channel_details:
2313 self.info.set_notes(channel_details['notes'])
2314 if 'native_protocol' in channel_details:
2315 self.native_protocol = channel_details['native_protocol'];
2317 self._introduceDomain()
2318 if self.info.target():
2319 self._setTarget(self.info.target())
2321 self._createDevices()
2323 self.image.cleanupBootloading()
2325 self.info['start_time'] = time.time()
2327 self._stateSet(DOM_STATE_RUNNING)
2328 except VmError, exn:
2329 log.exception("XendDomainInfo.initDomain: exception occurred")
2330 if self.image:
2331 self.image.cleanupBootloading()
2332 raise exn
2333 except RuntimeError, exn:
2334 log.exception("XendDomainInfo.initDomain: exception occurred")
2335 if self.image:
2336 self.image.cleanupBootloading()
2337 raise VmError(str(exn))
2340 def cleanupDomain(self):
2341 """Cleanup domain resources; release devices. Idempotent. Nothrow
2342 guarantee."""
2344 self.refresh_shutdown_lock.acquire()
2345 try:
2346 self.unwatchShutdown()
2347 self._releaseDevices()
2348 bootloader_tidy(self)
2350 if self.image:
2351 self.image = None
2353 try:
2354 self._removeDom()
2355 except:
2356 log.exception("Removing domain path failed.")
2358 self._stateSet(DOM_STATE_HALTED)
2359 self.domid = None # Do not push into _stateSet()!
2360 finally:
2361 self.refresh_shutdown_lock.release()
2364 def unwatchShutdown(self):
2365 """Remove the watch on the domain's control/shutdown node, if any.
2366 Idempotent. Nothrow guarantee. Expects to be protected by the
2367 refresh_shutdown_lock."""
2369 try:
2370 try:
2371 if self.shutdownWatch:
2372 self.shutdownWatch.unwatch()
2373 finally:
2374 self.shutdownWatch = None
2375 except:
2376 log.exception("Unwatching control/shutdown failed.")
2378 def waitForShutdown(self):
2379 self.state_updated.acquire()
2380 try:
2381 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2382 self.state_updated.wait(timeout=1.0)
2383 finally:
2384 self.state_updated.release()
2387 # TODO: recategorise - called from XendCheckpoint
2390 def completeRestore(self, store_mfn, console_mfn):
2392 log.debug("XendDomainInfo.completeRestore")
2394 self.store_mfn = store_mfn
2395 self.console_mfn = console_mfn
2397 self._introduceDomain()
2398 self.image = image.create(self, self.info)
2399 if self.image:
2400 self.image.createDeviceModel(True)
2401 self._storeDomDetails()
2402 self._registerWatches()
2403 self.refreshShutdown()
2405 log.debug("XendDomainInfo.completeRestore done")
2408 def _endRestore(self):
2409 self.setResume(False)
2412 # VM Destroy
2415 def _prepare_phantom_paths(self):
2416 # get associated devices to destroy
2417 # build list of phantom devices to be removed after normal devices
2418 plist = []
2419 if self.domid is not None:
2420 t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
2421 try:
2422 for dev in t.list():
2423 backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
2424 % (self.dompath, dev))
2425 if backend_phantom_vbd is not None:
2426 frontend_phantom_vbd = xstransact.Read("%s/frontend" \
2427 % backend_phantom_vbd)
2428 plist.append(backend_phantom_vbd)
2429 plist.append(frontend_phantom_vbd)
2430 finally:
2431 t.abort()
2432 return plist
2434 def _cleanup_phantom_devs(self, plist):
2435 # remove phantom devices
2436 if not plist == []:
2437 time.sleep(2)
2438 for paths in plist:
2439 if paths.find('backend') != -1:
2440 # Modify online status /before/ updating state (latter is watched by
2441 # drivers, so this ordering avoids a race).
2442 xstransact.Write(paths, 'online', "0")
2443 xstransact.Write(paths, 'state', str(xenbusState['Closing']))
2444 # force
2445 xstransact.Remove(paths)
2447 def destroy(self):
2448 """Cleanup VM and destroy domain. Nothrow guarantee."""
2450 if self.domid is None:
2451 return
2453 from xen.xend import XendDomain
2454 log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
2456 paths = self._prepare_phantom_paths()
2458 if self.dompath is not None:
2459 try:
2460 xc.domain_destroy_hook(self.domid)
2461 xc.domain_pause(self.domid)
2462 do_FLR(self.domid)
2463 xc.domain_destroy(self.domid)
2464 for state in DOM_STATES_OLD:
2465 self.info[state] = 0
2466 self._stateSet(DOM_STATE_HALTED)
2467 except:
2468 log.exception("XendDomainInfo.destroy: domain destruction failed.")
2470 XendDomain.instance().remove_domain(self)
2471 self.cleanupDomain()
2473 self._cleanup_phantom_devs(paths)
2474 self._cleanupVm()
2476 if "transient" in self.info["other_config"] \
2477 and bool(self.info["other_config"]["transient"]):
2478 XendDomain.instance().domain_delete_by_dominfo(self)
2481 def resetDomain(self):
2482 log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
2484 old_domid = self.domid
2485 prev_vm_xend = self._listRecursiveVm('xend')
2486 new_dom_info = self.info
2487 try:
2488 self._unwatchVm()
2489 self.destroy()
2491 new_dom = None
2492 try:
2493 from xen.xend import XendDomain
2494 new_dom_info['domid'] = None
2495 new_dom = XendDomain.instance().domain_create_from_dict(
2496 new_dom_info)
2497 for x in prev_vm_xend[0][1]:
2498 new_dom._writeVm('xend/%s' % x[0], x[1])
2499 new_dom.waitForDevices()
2500 new_dom.unpause()
2501 except:
2502 if new_dom:
2503 new_dom.destroy()
2504 raise
2505 except:
2506 log.exception('Failed to reset domain %s.', str(old_domid))
2509 def resumeDomain(self):
2510 log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
2512 # resume a suspended domain (e.g. after live checkpoint, or after
2513 # a later error during save or migate); checks that the domain
2514 # is currently suspended first so safe to call from anywhere
2516 xeninfo = dom_get(self.domid)
2517 if xeninfo is None:
2518 return
2519 if not xeninfo['shutdown']:
2520 return
2521 reason = shutdown_reason(xeninfo['shutdown_reason'])
2522 if reason != 'suspend':
2523 return
2525 try:
2526 # could also fetch a parsed note from xenstore
2527 fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
2528 if not fast:
2529 self._releaseDevices()
2530 self.testDeviceComplete()
2531 self.testvifsComplete()
2532 log.debug("XendDomainInfo.resumeDomain: devices released")
2534 self._resetChannels()
2536 self._removeDom('control/shutdown')
2537 self._removeDom('device-misc/vif/nextDeviceID')
2539 self._createChannels()
2540 self._introduceDomain()
2541 self._storeDomDetails()
2543 self._createDevices()
2544 log.debug("XendDomainInfo.resumeDomain: devices created")
2546 xc.domain_resume(self.domid, fast)
2547 ResumeDomain(self.domid)
2548 except:
2549 log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
2550 self.image.resumeDeviceModel()
2551 log.debug("XendDomainInfo.resumeDomain: completed")
2555 # Channels for xenstore and console
2558 def _createChannels(self):
2559 """Create the channels to the domain.
2560 """
2561 self.store_port = self._createChannel()
2562 self.console_port = self._createChannel()
2565 def _createChannel(self):
2566 """Create an event channel to the domain.
2567 """
2568 try:
2569 if self.domid != None:
2570 return xc.evtchn_alloc_unbound(domid = self.domid,
2571 remote_dom = 0)
2572 except:
2573 log.exception("Exception in alloc_unbound(%s)", str(self.domid))
2574 raise
2576 def _resetChannels(self):
2577 """Reset all event channels in the domain.
2578 """
2579 try:
2580 if self.domid != None:
2581 return xc.evtchn_reset(dom = self.domid)
2582 except:
2583 log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
2584 raise
2588 # Bootloader configuration
2591 def _configureBootloader(self):
2592 """Run the bootloader if we're configured to do so."""
2594 blexec = self.info['PV_bootloader']
2595 bootloader_args = self.info['PV_bootloader_args']
2596 kernel = self.info['PV_kernel']
2597 ramdisk = self.info['PV_ramdisk']
2598 args = self.info['PV_args']
2599 boot = self.info['HVM_boot_policy']
2601 if boot:
2602 # HVM booting.
2603 pass
2604 elif not blexec and kernel:
2605 # Boot from dom0. Nothing left to do -- the kernel and ramdisk
2606 # will be picked up by image.py.
2607 pass
2608 else:
2609 # Boot using bootloader
2610 if not blexec or blexec == 'pygrub':
2611 blexec = osdep.pygrub_path
2613 blcfg = None
2614 disks = [x for x in self.info['vbd_refs']
2615 if self.info['devices'][x][1]['bootable']]
2617 if not disks:
2618 msg = "Had a bootloader specified, but no disks are bootable"
2619 log.error(msg)
2620 raise VmError(msg)
2622 devinfo = self.info['devices'][disks[0]]
2623 devtype = devinfo[0]
2624 disk = devinfo[1]['uname']
2626 fn = blkdev_uname_to_file(disk)
2627 taptype = blkdev_uname_to_taptype(disk)
2628 mounted = devtype == 'tap' and taptype != 'aio' and taptype != 'sync' and not os.stat(fn).st_rdev
2629 if mounted:
2630 # This is a file, not a device. pygrub can cope with a
2631 # file if it's raw, but if it's QCOW or other such formats
2632 # used through blktap, then we need to mount it first.
2634 log.info("Mounting %s on %s." %
2635 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2637 vbd = {
2638 'mode': 'RO',
2639 'device': BOOTLOADER_LOOPBACK_DEVICE,
2642 from xen.xend import XendDomain
2643 dom0 = XendDomain.instance().privilegedDomain()
2644 dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
2645 fn = BOOTLOADER_LOOPBACK_DEVICE
2647 try:
2648 blcfg = bootloader(blexec, fn, self, False,
2649 bootloader_args, kernel, ramdisk, args)
2650 finally:
2651 if mounted:
2652 log.info("Unmounting %s from %s." %
2653 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2655 dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
2657 if blcfg is None:
2658 msg = "Had a bootloader specified, but can't find disk"
2659 log.error(msg)
2660 raise VmError(msg)
2662 self.info.update_with_image_sxp(blcfg, True)
2666 # VM Functions
2669 def _readVMDetails(self, params):
2670 """Read the specified parameters from the store.
2671 """
2672 try:
2673 return self._gatherVm(*params)
2674 except ValueError:
2675 # One of the int/float entries in params has a corresponding store
2676 # entry that is invalid. We recover, because older versions of
2677 # Xend may have put the entry there (memory/target, for example),
2678 # but this is in general a bad situation to have reached.
2679 log.exception(
2680 "Store corrupted at %s! Domain %d's configuration may be "
2681 "affected.", self.vmpath, self.domid)
2682 return []
2684 def _cleanupVm(self):
2685 """Cleanup VM resources. Idempotent. Nothrow guarantee."""
2687 self._unwatchVm()
2689 try:
2690 self._removeVm()
2691 except:
2692 log.exception("Removing VM path failed.")
2695 def checkLiveMigrateMemory(self):
2696 """ Make sure there's enough memory to migrate this domain """
2697 overhead_kb = 0
2698 if arch.type == "x86":
2699 # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
2700 # the minimum that Xen would allocate if no value were given.
2701 overhead_kb = self.info['VCPUs_max'] * 1024 + \
2702 (self.info['memory_static_max'] / 1024 / 1024) * 4
2703 overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
2704 # The domain might already have some shadow memory
2705 overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
2706 if overhead_kb > 0:
2707 balloon.free(overhead_kb)
2709 def _unwatchVm(self):
2710 """Remove the watch on the VM path, if any. Idempotent. Nothrow
2711 guarantee."""
2712 try:
2713 try:
2714 if self.vmWatch:
2715 self.vmWatch.unwatch()
2716 finally:
2717 self.vmWatch = None
2718 except:
2719 log.exception("Unwatching VM path failed.")
2721 def testDeviceComplete(self):
2722 """ For Block IO migration safety we must ensure that
2723 the device has shutdown correctly, i.e. all blocks are
2724 flushed to disk
2725 """
2726 start = time.time()
2727 while True:
2728 test = 0
2729 diff = time.time() - start
2730 for i in self.getDeviceController('vbd').deviceIDs():
2731 test = 1
2732 log.info("Dev %s still active, looping...", i)
2733 time.sleep(0.1)
2735 if test == 0:
2736 break
2737 if diff >= MIGRATE_TIMEOUT:
2738 log.info("Dev still active but hit max loop timeout")
2739 break
2741 def testvifsComplete(self):
2742 """ In case vifs are released and then created for the same
2743 domain, we need to wait the device shut down.
2744 """
2745 start = time.time()
2746 while True:
2747 test = 0
2748 diff = time.time() - start
2749 for i in self.getDeviceController('vif').deviceIDs():
2750 test = 1
2751 log.info("Dev %s still active, looping...", i)
2752 time.sleep(0.1)
2754 if test == 0:
2755 break
2756 if diff >= MIGRATE_TIMEOUT:
2757 log.info("Dev still active but hit max loop timeout")
2758 break
2760 def _storeVmDetails(self):
2761 to_store = {}
2763 for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
2764 info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
2765 if self._infoIsSet(info_key):
2766 to_store[key] = str(self.info[info_key])
2768 if self._infoIsSet("static_memory_min"):
2769 to_store["memory"] = str(self.info["static_memory_min"])
2770 if self._infoIsSet("static_memory_max"):
2771 to_store["maxmem"] = str(self.info["static_memory_max"])
2773 image_sxpr = self.info.image_sxpr()
2774 if image_sxpr:
2775 to_store['image'] = sxp.to_string(image_sxpr)
2777 if not self._readVm('xend/restart_count'):
2778 to_store['xend/restart_count'] = str(0)
2780 log.debug("Storing VM details: %s", scrub_password(to_store))
2782 self._writeVm(to_store)
2783 self._setVmPermissions()
2785 def _setVmPermissions(self):
2786 """Allow the guest domain to read its UUID. We don't allow it to
2787 access any other entry, for security."""
2788 xstransact.SetPermissions('%s/uuid' % self.vmpath,
2789 { 'dom' : self.domid,
2790 'read' : True,
2791 'write' : False })
2794 # Utility functions
2797 def __getattr__(self, name):
2798 if name == "state":
2799 log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
2800 log.warn("".join(traceback.format_stack()))
2801 return self._stateGet()
2802 else:
2803 raise AttributeError(name)
2805 def __setattr__(self, name, value):
2806 if name == "state":
2807 log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
2808 log.warn("".join(traceback.format_stack()))
2809 self._stateSet(value)
2810 else:
2811 self.__dict__[name] = value
2813 def _stateSet(self, state):
2814 self.state_updated.acquire()
2815 try:
2816 # TODO Not sure this is correct...
2817 # _stateGet is live now. Why not fire event
2818 # even when it hasn't changed?
2819 if self._stateGet() != state:
2820 self.state_updated.notifyAll()
2821 import XendAPI
2822 XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
2823 'power_state')
2824 finally:
2825 self.state_updated.release()
2827 def _stateGet(self):
2828 # Lets try and reconsitute the state from xc
2829 # first lets try and get the domain info
2830 # from xc - this will tell us if the domain
2831 # exists
2832 info = dom_get(self.getDomid())
2833 if info is None or info['shutdown']:
2834 # We are either HALTED or SUSPENDED
2835 # check saved image exists
2836 from xen.xend import XendDomain
2837 managed_config_path = \
2838 XendDomain.instance()._managed_check_point_path( \
2839 self.get_uuid())
2840 if os.path.exists(managed_config_path):
2841 return XEN_API_VM_POWER_STATE_SUSPENDED
2842 else:
2843 return XEN_API_VM_POWER_STATE_HALTED
2844 elif info['crashed']:
2845 # Crashed
2846 return XEN_API_VM_POWER_STATE_CRASHED
2847 else:
2848 # We are either RUNNING or PAUSED
2849 if info['paused']:
2850 return XEN_API_VM_POWER_STATE_PAUSED
2851 else:
2852 return XEN_API_VM_POWER_STATE_RUNNING
2854 def _infoIsSet(self, name):
2855 return name in self.info and self.info[name] is not None
2857 def _checkName(self, name):
2858 """Check if a vm name is valid. Valid names contain alphabetic
2859 characters, digits, or characters in '_-.:/+'.
2860 The same name cannot be used for more than one vm at the same time.
2862 @param name: name
2863 @raise: VmError if invalid
2864 """
2865 from xen.xend import XendDomain
2867 if name is None or name == '':
2868 raise VmError('Missing VM Name')
2870 if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name):
2871 raise VmError('Invalid VM Name')
2873 dom = XendDomain.instance().domain_lookup_nr(name)
2874 if dom and dom.info['uuid'] != self.info['uuid']:
2875 raise VmError("VM name '%s' already exists%s" %
2876 (name,
2877 dom.domid is not None and
2878 (" as domain %s" % str(dom.domid)) or ""))
2881 def update(self, info = None, refresh = True, transaction = None):
2882 """Update with info from xc.domain_getinfo().
2883 """
2884 log.trace("XendDomainInfo.update(%s) on domain %s", info,
2885 str(self.domid))
2887 if not info:
2888 info = dom_get(self.domid)
2889 if not info:
2890 return
2892 if info["maxmem_kb"] < 0:
2893 info["maxmem_kb"] = XendNode.instance() \
2894 .physinfo_dict()['total_memory'] * 1024
2896 # make sure state is reset for info
2897 # TODO: we should eventually get rid of old_dom_states
2899 self.info.update_config(info)
2900 self._update_consoles(transaction)
2902 if refresh:
2903 self.refreshShutdown(info)
2905 log.trace("XendDomainInfo.update done on domain %s: %s",
2906 str(self.domid), self.info)
2908 def sxpr(self, ignore_store = False, legacy_only = True):
2909 result = self.info.to_sxp(domain = self,
2910 ignore_devices = ignore_store,
2911 legacy_only = legacy_only)
2913 return result
2915 # Xen API
2916 # ----------------------------------------------------------------
2918 def get_uuid(self):
2919 dom_uuid = self.info.get('uuid')
2920 if not dom_uuid: # if it doesn't exist, make one up
2921 dom_uuid = uuid.createString()
2922 self.info['uuid'] = dom_uuid
2923 return dom_uuid
2925 def get_memory_static_max(self):
2926 return self.info.get('memory_static_max', 0)
2927 def get_memory_static_min(self):
2928 return self.info.get('memory_static_min', 0)
2929 def get_memory_dynamic_max(self):
2930 return self.info.get('memory_dynamic_max', 0)
2931 def get_memory_dynamic_min(self):
2932 return self.info.get('memory_dynamic_min', 0)
2934 # only update memory-related config values if they maintain sanity
2935 def _safe_set_memory(self, key, newval):
2936 oldval = self.info.get(key, 0)
2937 try:
2938 self.info[key] = newval
2939 self.info._memory_sanity_check()
2940 except Exception, ex:
2941 self.info[key] = oldval
2942 raise
2944 def set_memory_static_max(self, val):
2945 self._safe_set_memory('memory_static_max', val)
2946 def set_memory_static_min(self, val):
2947 self._safe_set_memory('memory_static_min', val)
2948 def set_memory_dynamic_max(self, val):
2949 self._safe_set_memory('memory_dynamic_max', val)
2950 def set_memory_dynamic_min(self, val):
2951 self._safe_set_memory('memory_dynamic_min', val)
2953 def get_vcpus_params(self):
2954 if self.getDomid() is None:
2955 return self.info['vcpus_params']
2957 retval = xc.sched_credit_domain_get(self.getDomid())
2958 return retval
2959 def get_power_state(self):
2960 return XEN_API_VM_POWER_STATE[self._stateGet()]
2961 def get_platform(self):
2962 return self.info.get('platform', {})
2963 def get_pci_bus(self):
2964 return self.info.get('pci_bus', '')
2965 def get_tools_version(self):
2966 return self.info.get('tools_version', {})
2967 def get_metrics(self):
2968 return self.metrics.get_uuid();
2971 def get_security_label(self, xspol=None):
2972 import xen.util.xsm.xsm as security
2973 label = security.get_security_label(self, xspol)
2974 return label
2976 def set_security_label(self, seclab, old_seclab, xspol=None,
2977 xspol_old=None):
2978 """
2979 Set the security label of a domain from its old to
2980 a new value.
2981 @param seclab New security label formatted in the form
2982 <policy type>:<policy name>:<vm label>
2983 @param old_seclab The current security label that the
2984 VM must have.
2985 @param xspol An optional policy under which this
2986 update should be done. If not given,
2987 then the current active policy is used.
2988 @param xspol_old The old policy; only to be passed during
2989 the updating of a policy
2990 @return Returns return code, a string with errors from
2991 the hypervisor's operation, old label of the
2992 domain
2993 """
2994 rc = 0
2995 errors = ""
2996 old_label = ""
2997 new_ssidref = 0
2998 domid = self.getDomid()
2999 res_labels = None
3000 is_policy_update = (xspol_old != None)
3002 from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
3004 state = self._stateGet()
3005 # Relabel only HALTED or RUNNING or PAUSED domains
3006 if domid != 0 and \
3007 state not in \
3008 [ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
3009 DOM_STATE_SUSPENDED ]:
3010 log.warn("Relabeling domain not possible in state '%s'" %
3011 DOM_STATES[state])
3012 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3014 # Remove security label. Works only for halted or suspended domains
3015 if not seclab or seclab == "":
3016 if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
3017 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3019 if self.info.has_key('security_label'):
3020 old_label = self.info['security_label']
3021 # Check label against expected one.
3022 if old_label != old_seclab:
3023 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3024 del self.info['security_label']
3025 xen.xend.XendDomain.instance().managed_config_save(self)
3026 return (xsconstants.XSERR_SUCCESS, "", "", 0)
3028 tmp = seclab.split(":")
3029 if len(tmp) != 3:
3030 return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
3031 typ, policy, label = tmp
3033 poladmin = XSPolicyAdminInstance()
3034 if not xspol:
3035 xspol = poladmin.get_policy_by_name(policy)
3037 try:
3038 xen.xend.XendDomain.instance().policy_lock.acquire_writer()
3040 if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
3041 #if domain is running or paused try to relabel in hypervisor
3042 if not xspol:
3043 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3045 if typ != xspol.get_type_name() or \
3046 policy != xspol.get_name():
3047 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3049 if typ == xsconstants.ACM_POLICY_ID:
3050 new_ssidref = xspol.vmlabel_to_ssidref(label)
3051 if new_ssidref == xsconstants.INVALID_SSIDREF:
3052 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3054 # Check that all used resources are accessible under the
3055 # new label
3056 if not is_policy_update and \
3057 not security.resources_compatible_with_vmlabel(xspol,
3058 self, label):
3059 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3061 #Check label against expected one. Can only do this
3062 # if the policy hasn't changed underneath in the meantime
3063 if xspol_old == None:
3064 old_label = self.get_security_label()
3065 if old_label != old_seclab:
3066 log.info("old_label != old_seclab: %s != %s" %
3067 (old_label, old_seclab))
3068 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3070 # relabel domain in the hypervisor
3071 rc, errors = security.relabel_domains([[domid, new_ssidref]])
3072 log.info("rc from relabeling in HV: %d" % rc)
3073 else:
3074 return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
3076 if rc == 0:
3077 # HALTED, RUNNING or PAUSED
3078 if domid == 0:
3079 if xspol:
3080 self.info['security_label'] = seclab
3081 ssidref = poladmin.set_domain0_bootlabel(xspol, label)
3082 else:
3083 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3084 else:
3085 if self.info.has_key('security_label'):
3086 old_label = self.info['security_label']
3087 # Check label against expected one, unless wildcard
3088 if old_label != old_seclab:
3089 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3091 self.info['security_label'] = seclab
3093 try:
3094 xen.xend.XendDomain.instance().managed_config_save(self)
3095 except:
3096 pass
3097 return (rc, errors, old_label, new_ssidref)
3098 finally:
3099 xen.xend.XendDomain.instance().policy_lock.release()
3101 def get_on_shutdown(self):
3102 after_shutdown = self.info.get('actions_after_shutdown')
3103 if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
3104 return XEN_API_ON_NORMAL_EXIT[-1]
3105 return after_shutdown
3107 def get_on_reboot(self):
3108 after_reboot = self.info.get('actions_after_reboot')
3109 if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
3110 return XEN_API_ON_NORMAL_EXIT[-1]
3111 return after_reboot
3113 def get_on_suspend(self):
3114 # TODO: not supported
3115 after_suspend = self.info.get('actions_after_suspend')
3116 if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
3117 return XEN_API_ON_NORMAL_EXIT[-1]
3118 return after_suspend
3120 def get_on_crash(self):
3121 after_crash = self.info.get('actions_after_crash')
3122 if not after_crash or after_crash not in \
3123 XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
3124 return XEN_API_ON_CRASH_BEHAVIOUR[0]
3125 return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
3127 def get_dev_config_by_uuid(self, dev_class, dev_uuid):
3128 """ Get's a device configuration either from XendConfig or
3129 from the DevController.
3131 @param dev_class: device class, either, 'vbd' or 'vif'
3132 @param dev_uuid: device UUID
3134 @rtype: dictionary
3135 """
3136 dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
3138 # shortcut if the domain isn't started because
3139 # the devcontrollers will have no better information
3140 # than XendConfig.
3141 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
3142 XEN_API_VM_POWER_STATE_SUSPENDED):
3143 if dev_config:
3144 return copy.deepcopy(dev_config)
3145 return None
3147 # instead of using dev_class, we use the dev_type
3148 # that is from XendConfig.
3149 controller = self.getDeviceController(dev_type)
3150 if not controller:
3151 return None
3153 all_configs = controller.getAllDeviceConfigurations()
3154 if not all_configs:
3155 return None
3157 updated_dev_config = copy.deepcopy(dev_config)
3158 for _devid, _devcfg in all_configs.items():
3159 if _devcfg.get('uuid') == dev_uuid:
3160 updated_dev_config.update(_devcfg)
3161 updated_dev_config['id'] = _devid
3162 return updated_dev_config
3164 return updated_dev_config
3166 def get_dev_xenapi_config(self, dev_class, dev_uuid):
3167 config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
3168 if not config:
3169 return {}
3171 config['VM'] = self.get_uuid()
3173 if dev_class == 'vif':
3174 if not config.has_key('name'):
3175 config['name'] = config.get('vifname', '')
3176 if not config.has_key('MAC'):
3177 config['MAC'] = config.get('mac', '')
3178 if not config.has_key('type'):
3179 config['type'] = 'paravirtualised'
3180 if not config.has_key('device'):
3181 devid = config.get('id')
3182 if devid != None:
3183 config['device'] = 'eth%s' % devid
3184 else:
3185 config['device'] = ''
3187 if not config.has_key('network'):
3188 try:
3189 bridge = config.get('bridge', None)
3190 if bridge is None:
3191 from xen.util import Brctl
3192 if_to_br = dict([(i,b)
3193 for (b,ifs) in Brctl.get_state().items()
3194 for i in ifs])
3195 vifname = "vif%s.%s" % (self.getDomid(),
3196 config.get('id'))
3197 bridge = if_to_br.get(vifname, None)
3198 config['network'] = \
3199 XendNode.instance().bridge_to_network(
3200 config.get('bridge')).get_uuid()
3201 except Exception:
3202 log.exception('bridge_to_network')
3203 # Ignore this for now -- it may happen if the device
3204 # has been specified using the legacy methods, but at
3205 # some point we're going to have to figure out how to
3206 # handle that properly.
3208 config['MTU'] = 1500 # TODO
3210 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3211 xennode = XendNode.instance()
3212 rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
3213 config['io_read_kbs'] = rx_bps/1024
3214 config['io_write_kbs'] = tx_bps/1024
3215 rx, tx = xennode.get_vif_stat(self.domid, devid)
3216 config['io_total_read_kbs'] = rx/1024
3217 config['io_total_write_kbs'] = tx/1024
3218 else:
3219 config['io_read_kbs'] = 0.0
3220 config['io_write_kbs'] = 0.0
3221 config['io_total_read_kbs'] = 0.0
3222 config['io_total_write_kbs'] = 0.0
3224 config['security_label'] = config.get('security_label', '')
3226 if dev_class == 'vbd':
3228 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3229 controller = self.getDeviceController(dev_class)
3230 devid, _1, _2 = controller.getDeviceDetails(config)
3231 xennode = XendNode.instance()
3232 rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
3233 config['io_read_kbs'] = rd_blkps
3234 config['io_write_kbs'] = wr_blkps
3235 else:
3236 config['io_read_kbs'] = 0.0
3237 config['io_write_kbs'] = 0.0
3239 config['VDI'] = config.get('VDI', '')
3240 config['device'] = config.get('dev', '')
3241 if ':' in config['device']:
3242 vbd_name, vbd_type = config['device'].split(':', 1)
3243 config['device'] = vbd_name
3244 if vbd_type == 'cdrom':
3245 config['type'] = XEN_API_VBD_TYPE[0]
3246 else:
3247 config['type'] = XEN_API_VBD_TYPE[1]
3249 config['driver'] = 'paravirtualised' # TODO
3250 config['image'] = config.get('uname', '')
3252 if config.get('mode', 'r') == 'r':
3253 config['mode'] = 'RO'
3254 else:
3255 config['mode'] = 'RW'
3257 if dev_class == 'vtpm':
3258 if not config.has_key('type'):
3259 config['type'] = 'paravirtualised' # TODO
3260 if not config.has_key('backend'):
3261 config['backend'] = "00000000-0000-0000-0000-000000000000"
3263 return config
3265 def get_dev_property(self, dev_class, dev_uuid, field):
3266 config = self.get_dev_xenapi_config(dev_class, dev_uuid)
3267 try:
3268 return config[field]
3269 except KeyError:
3270 raise XendError('Invalid property for device: %s' % field)
3272 def set_dev_property(self, dev_class, dev_uuid, field, value):
3273 self.info['devices'][dev_uuid][1][field] = value
3275 def get_vcpus_util(self):
3276 vcpu_util = {}
3277 xennode = XendNode.instance()
3278 if 'VCPUs_max' in self.info and self.domid != None:
3279 for i in range(0, self.info['VCPUs_max']):
3280 util = xennode.get_vcpu_util(self.domid, i)
3281 vcpu_util[str(i)] = util
3283 return vcpu_util
3285 def get_consoles(self):
3286 return self.info.get('console_refs', [])
3288 def get_vifs(self):
3289 return self.info.get('vif_refs', [])
3291 def get_vbds(self):
3292 return self.info.get('vbd_refs', [])
3294 def get_vtpms(self):
3295 return self.info.get('vtpm_refs', [])
3297 def get_dpcis(self):
3298 return XendDPCI.get_by_VM(self.info.get('uuid'))
3300 def get_dscsis(self):
3301 return XendDSCSI.get_by_VM(self.info.get('uuid'))
3303 def create_vbd(self, xenapi_vbd, vdi_image_path):
3304 """Create a VBD using a VDI from XendStorageRepository.
3306 @param xenapi_vbd: vbd struct from the Xen API
3307 @param vdi_image_path: VDI UUID
3308 @rtype: string
3309 @return: uuid of the device
3310 """
3311 xenapi_vbd['image'] = vdi_image_path
3312 if vdi_image_path.startswith('tap'):
3313 dev_uuid = self.info.device_add('tap', cfg_xenapi = xenapi_vbd)
3314 else:
3315 dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
3317 if not dev_uuid:
3318 raise XendError('Failed to create device')
3320 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3321 XEN_API_VM_POWER_STATE_PAUSED):
3322 _, config = self.info['devices'][dev_uuid]
3324 if vdi_image_path.startswith('tap'):
3325 dev_control = self.getDeviceController('tap')
3326 else:
3327 dev_control = self.getDeviceController('vbd')
3329 try:
3330 devid = dev_control.createDevice(config)
3331 dev_control.waitForDevice(devid)
3332 self.info.device_update(dev_uuid,
3333 cfg_xenapi = {'devid': devid})
3334 except Exception, exn:
3335 log.exception(exn)
3336 del self.info['devices'][dev_uuid]
3337 self.info['vbd_refs'].remove(dev_uuid)
3338 raise
3340 return dev_uuid
3342 def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
3343 """Create a VBD using a VDI from XendStorageRepository.
3345 @param xenapi_vbd: vbd struct from the Xen API
3346 @param vdi_image_path: VDI UUID
3347 @rtype: string
3348 @return: uuid of the device
3349 """
3350 xenapi_vbd['image'] = vdi_image_path
3351 dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
3352 if not dev_uuid:
3353 raise XendError('Failed to create device')
3355 if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
3356 _, config = self.info['devices'][dev_uuid]
3357 config['devid'] = self.getDeviceController('tap').createDevice(config)
3359 return config['devid']
3361 def create_vif(self, xenapi_vif):
3362 """Create VIF device from the passed struct in Xen API format.
3364 @param xenapi_vif: Xen API VIF Struct.
3365 @rtype: string
3366 @return: UUID
3367 """
3368 dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
3369 if not dev_uuid:
3370 raise XendError('Failed to create device')
3372 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3373 XEN_API_VM_POWER_STATE_PAUSED):
3375 _, config = self.info['devices'][dev_uuid]
3376 dev_control = self.getDeviceController('vif')
3378 try:
3379 devid = dev_control.createDevice(config)
3380 dev_control.waitForDevice(devid)
3381 self.info.device_update(dev_uuid,
3382 cfg_xenapi = {'devid': devid})
3383 except Exception, exn:
3384 log.exception(exn)
3385 del self.info['devices'][dev_uuid]
3386 self.info['vif_refs'].remove(dev_uuid)
3387 raise
3389 return dev_uuid
3391 def create_vtpm(self, xenapi_vtpm):
3392 """Create a VTPM device from the passed struct in Xen API format.
3394 @return: uuid of the device
3395 @rtype: string
3396 """
3398 if self._stateGet() not in (DOM_STATE_HALTED,):
3399 raise VmError("Can only add vTPM to a halted domain.")
3400 if self.get_vtpms() != []:
3401 raise VmError('Domain already has a vTPM.')
3402 dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
3403 if not dev_uuid:
3404 raise XendError('Failed to create device')
3406 return dev_uuid
3408 def create_console(self, xenapi_console):
3409 """ Create a console device from a Xen API struct.
3411 @return: uuid of device
3412 @rtype: string
3413 """
3414 if self._stateGet() not in (DOM_STATE_HALTED,):
3415 raise VmError("Can only add console to a halted domain.")
3417 dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
3418 if not dev_uuid:
3419 raise XendError('Failed to create device')
3421 return dev_uuid
3423 def set_console_other_config(self, console_uuid, other_config):
3424 self.info.console_update(console_uuid, 'other_config', other_config)
3426 def create_dpci(self, xenapi_pci):
3427 """Create pci device from the passed struct in Xen API format.
3429 @param xenapi_pci: DPCI struct from Xen API
3430 @rtype: bool
3431 #@rtype: string
3432 @return: True if successfully created device
3433 #@return: UUID
3434 """
3436 dpci_uuid = uuid.createString()
3438 # Convert xenapi to sxp
3439 ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
3441 target_pci_sxp = \
3442 ['pci',
3443 ['dev',
3444 ['domain', '0x%02x' % ppci.get_domain()],
3445 ['bus', '0x%02x' % ppci.get_bus()],
3446 ['slot', '0x%02x' % ppci.get_slot()],
3447 ['func', '0x%1x' % ppci.get_func()],
3448 ['vslt', '0x%02x' % xenapi_pci.get('hotplug_slot')],
3449 ['uuid', dpci_uuid]
3450 ],
3451 ['state', 'Initialising']
3454 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3456 old_pci_sxp = self._getDeviceInfo_pci(0)
3458 if old_pci_sxp is None:
3459 dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
3460 if not dev_uuid:
3461 raise XendError('Failed to create device')
3463 else:
3464 new_pci_sxp = ['pci']
3465 for existing_dev in sxp.children(old_pci_sxp, 'dev'):
3466 new_pci_sxp.append(existing_dev)
3467 new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
3469 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3470 self.info.device_update(dev_uuid, new_pci_sxp)
3472 xen.xend.XendDomain.instance().managed_config_save(self)
3474 else:
3475 try:
3476 self.device_configure(target_pci_sxp)
3478 except Exception, exn:
3479 raise XendError('Failed to create device')
3481 return dpci_uuid
3483 def create_dscsi(self, xenapi_dscsi):
3484 """Create scsi device from the passed struct in Xen API format.
3486 @param xenapi_dscsi: DSCSI struct from Xen API
3487 @rtype: string
3488 @return: UUID
3489 """
3491 dscsi_uuid = uuid.createString()
3493 # Convert xenapi to sxp
3494 pscsi = XendAPIStore.get(xenapi_dscsi.get('PSCSI'), 'PSCSI')
3495 devid = int(xenapi_dscsi.get('virtual_HCTL').split(':')[0])
3496 target_vscsi_sxp = \
3497 ['vscsi',
3498 ['dev',
3499 ['devid', devid],
3500 ['p-devname', pscsi.get_dev_name()],
3501 ['p-dev', pscsi.get_physical_HCTL()],
3502 ['v-dev', xenapi_dscsi.get('virtual_HCTL')],
3503 ['state', xenbusState['Initialising']],
3504 ['uuid', dscsi_uuid]
3508 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3510 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid, None)
3512 if cur_vscsi_sxp is None:
3513 dev_uuid = self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp)
3514 if not dev_uuid:
3515 raise XendError('Failed to create device')
3517 else:
3518 new_vscsi_sxp = ['vscsi']
3519 for existing_dev in sxp.children(cur_vscsi_sxp, 'dev'):
3520 new_vscsi_sxp.append(existing_dev)
3521 new_vscsi_sxp.append(sxp.child0(target_vscsi_sxp, 'dev'))
3523 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3524 self.info.device_update(dev_uuid, new_vscsi_sxp)
3526 xen.xend.XendDomain.instance().managed_config_save(self)
3528 else:
3529 try:
3530 self.device_configure(target_vscsi_sxp)
3532 except Exception, exn:
3533 raise XendError('Failed to create device')
3535 return dscsi_uuid
3538 def destroy_device_by_uuid(self, dev_type, dev_uuid):
3539 if dev_uuid not in self.info['devices']:
3540 raise XendError('Device does not exist')
3542 try:
3543 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3544 XEN_API_VM_POWER_STATE_PAUSED):
3545 _, config = self.info['devices'][dev_uuid]
3546 devid = config.get('devid')
3547 if devid != None:
3548 self.getDeviceController(dev_type).destroyDevice(devid, force = False)
3549 else:
3550 raise XendError('Unable to get devid for device: %s:%s' %
3551 (dev_type, dev_uuid))
3552 finally:
3553 del self.info['devices'][dev_uuid]
3554 self.info['%s_refs' % dev_type].remove(dev_uuid)
3556 def destroy_vbd(self, dev_uuid):
3557 self.destroy_device_by_uuid('vbd', dev_uuid)
3559 def destroy_vif(self, dev_uuid):
3560 self.destroy_device_by_uuid('vif', dev_uuid)
3562 def destroy_vtpm(self, dev_uuid):
3563 self.destroy_device_by_uuid('vtpm', dev_uuid)
3565 def destroy_dpci(self, dev_uuid):
3567 dpci = XendAPIStore.get(dev_uuid, 'DPCI')
3568 ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
3570 old_pci_sxp = self._getDeviceInfo_pci(0)
3571 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3572 target_dev = None
3573 new_pci_sxp = ['pci']
3574 for dev in sxp.children(old_pci_sxp, 'dev'):
3575 domain = int(sxp.child_value(dev, 'domain'), 16)
3576 bus = int(sxp.child_value(dev, 'bus'), 16)
3577 slot = int(sxp.child_value(dev, 'slot'), 16)
3578 func = int(sxp.child_value(dev, 'func'), 16)
3579 name = "%04x:%02x:%02x.%01x" % (domain, bus, slot, func)
3580 if ppci.get_name() == name:
3581 target_dev = dev
3582 else:
3583 new_pci_sxp.append(dev)
3585 if target_dev is None:
3586 raise XendError('Failed to destroy device')
3588 target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
3590 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3592 self.info.device_update(dev_uuid, new_pci_sxp)
3593 if len(sxp.children(new_pci_sxp, 'dev')) == 0:
3594 del self.info['devices'][dev_uuid]
3595 xen.xend.XendDomain.instance().managed_config_save(self)
3597 else:
3598 try:
3599 self.device_configure(target_pci_sxp)
3601 except Exception, exn:
3602 raise XendError('Failed to destroy device')
3604 def destroy_dscsi(self, dev_uuid):
3605 dscsi = XendAPIStore.get(dev_uuid, 'DSCSI')
3606 devid = dscsi.get_virtual_host()
3607 vHCTL = dscsi.get_virtual_HCTL()
3608 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid, None)
3609 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3611 target_dev = None
3612 new_vscsi_sxp = ['vscsi']
3613 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
3614 if vHCTL == sxp.child_value(dev, 'v-dev'):
3615 target_dev = dev
3616 else:
3617 new_vscsi_sxp.append(dev)
3619 if target_dev is None:
3620 raise XendError('Failed to destroy device')
3622 target_dev.append(['state', xenbusState['Closing']])
3623 target_vscsi_sxp = ['vscsi', target_dev]
3625 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3627 self.info.device_update(dev_uuid, new_vscsi_sxp)
3628 if len(sxp.children(new_vscsi_sxp, 'dev')) == 0:
3629 del self.info['devices'][dev_uuid]
3630 xen.xend.XendDomain.instance().managed_config_save(self)
3632 else:
3633 try:
3634 self.device_configure(target_vscsi_sxp)
3636 except Exception, exn:
3637 raise XendError('Failed to destroy device')
3639 def destroy_xapi_instances(self):
3640 """Destroy Xen-API instances stored in XendAPIStore.
3641 """
3642 # Xen-API classes based on XendBase have their instances stored
3643 # in XendAPIStore. Cleanup these instances here, if they are supposed
3644 # to be destroyed when the parent domain is dead.
3646 # Most of the virtual devices (vif, vbd, vfb, etc) are not based on
3647 # XendBase and there's no need to remove them from XendAPIStore.
3649 from xen.xend import XendDomain
3650 if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
3651 # domain still exists.
3652 return
3654 # Destroy the VMMetrics instance.
3655 if XendAPIStore.get(self.metrics.get_uuid(), self.metrics.getClass()) \
3656 is not None:
3657 self.metrics.destroy()
3659 # Destroy DPCI instances.
3660 for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
3661 XendAPIStore.deregister(dpci_uuid, "DPCI")
3663 # Destroy DSCSI instances.
3664 for dscsi_uuid in XendDSCSI.get_by_VM(self.info.get('uuid')):
3665 XendAPIStore.deregister(dscsi_uuid, "DSCSI")
3667 def has_device(self, dev_class, dev_uuid):
3668 return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
3670 def __str__(self):
3671 return '<domain id=%s name=%s memory=%s state=%s>' % \
3672 (str(self.domid), self.info['name_label'],
3673 str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])
3675 __repr__ = __str__