ia64/xen-unstable

view tools/python/xen/xend/XendDomainInfo.py @ 19224:a35dffe6f1b6

pvSCSI, xend: add new device assignment mode

You can use "host" mode by specifying keyword "host" as virtual scsi
device. Following is usage example.

xm scsi-attach 1 2:0:3:4 host

In this case, all LUNs under host=2 are attached to guest domain 1.
The channel=0, target=3 and lun=4 are ignored.

Signed-off-by: Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
Signed-off-by: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
Signed-off-by: Jun Kamada <kama@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Feb 17 11:19:55 2009 +0000 (2009-02-17)
parents d9480422034b
children f8916c9bc149
line source
1 #===========================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
19 """Representation of a single domain.
20 Includes support for domain construction, using
21 open-ended configurations.
23 Author: Mike Wray <mike.wray@hp.com>
25 """
27 import logging
28 import time
29 import threading
30 import re
31 import copy
32 import os
33 import traceback
34 from types import StringTypes
36 import xen.lowlevel.xc
37 from xen.util import asserts
38 from xen.util.blkif import blkdev_uname_to_file, blkdev_uname_to_taptype
39 import xen.util.xsm.xsm as security
40 from xen.util import xsconstants
42 from xen.xend import balloon, sxp, uuid, image, arch, osdep
43 from xen.xend import XendOptions, XendNode, XendConfig
45 from xen.xend.XendConfig import scrub_password
46 from xen.xend.XendBootloader import bootloader, bootloader_tidy
47 from xen.xend.XendError import XendError, VmError
48 from xen.xend.XendDevices import XendDevices
49 from xen.xend.XendTask import XendTask
50 from xen.xend.xenstore.xstransact import xstransact, complete
51 from xen.xend.xenstore.xsutil import GetDomainPath, IntroduceDomain, SetTarget, ResumeDomain
52 from xen.xend.xenstore.xswatch import xswatch
53 from xen.xend.XendConstants import *
54 from xen.xend.XendAPIConstants import *
55 from xen.xend.server.DevConstants import xenbusState
57 from xen.xend.XendVMMetrics import XendVMMetrics
59 from xen.xend import XendAPIStore
60 from xen.xend.XendPPCI import XendPPCI
61 from xen.xend.XendDPCI import XendDPCI
62 from xen.xend.XendPSCSI import XendPSCSI
63 from xen.xend.XendDSCSI import XendDSCSI
65 MIGRATE_TIMEOUT = 30.0
66 BOOTLOADER_LOOPBACK_DEVICE = '/dev/xvdp'
68 xc = xen.lowlevel.xc.xc()
69 xoptions = XendOptions.instance()
71 log = logging.getLogger("xend.XendDomainInfo")
72 #log.setLevel(logging.TRACE)
75 def create(config):
76 """Creates and start a VM using the supplied configuration.
78 @param config: A configuration object involving lists of tuples.
79 @type config: list of lists, eg ['vm', ['image', 'xen.gz']]
81 @rtype: XendDomainInfo
82 @return: An up and running XendDomainInfo instance
83 @raise VmError: Invalid configuration or failure to start.
84 """
85 from xen.xend import XendDomain
86 domconfig = XendConfig.XendConfig(sxp_obj = config)
87 othervm = XendDomain.instance().domain_lookup_nr(domconfig["name_label"])
88 if othervm is None or othervm.domid is None:
89 othervm = XendDomain.instance().domain_lookup_nr(domconfig["uuid"])
90 if othervm is not None and othervm.domid is not None:
91 raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))
92 log.debug("XendDomainInfo.create(%s)", scrub_password(config))
93 vm = XendDomainInfo(domconfig)
94 try:
95 vm.start()
96 except:
97 log.exception('Domain construction failed')
98 vm.destroy()
99 raise
101 return vm
103 def create_from_dict(config_dict):
104 """Creates and start a VM using the supplied configuration.
106 @param config_dict: An configuration dictionary.
108 @rtype: XendDomainInfo
109 @return: An up and running XendDomainInfo instance
110 @raise VmError: Invalid configuration or failure to start.
111 """
113 log.debug("XendDomainInfo.create_from_dict(%s)",
114 scrub_password(config_dict))
115 vm = XendDomainInfo(XendConfig.XendConfig(xapi = config_dict))
116 try:
117 vm.start()
118 except:
119 log.exception('Domain construction failed')
120 vm.destroy()
121 raise
122 return vm
124 def recreate(info, priv):
125 """Create the VM object for an existing domain. The domain must not
126 be dying, as the paths in the store should already have been removed,
127 and asking us to recreate them causes problems.
129 @param xeninfo: Parsed configuration
130 @type xeninfo: Dictionary
131 @param priv: Is a privileged domain (Dom 0)
132 @type priv: bool
134 @rtype: XendDomainInfo
135 @return: A up and running XendDomainInfo instance
136 @raise VmError: Invalid configuration.
137 @raise XendError: Errors with configuration.
138 """
140 log.debug("XendDomainInfo.recreate(%s)", scrub_password(info))
142 assert not info['dying']
144 xeninfo = XendConfig.XendConfig(dominfo = info)
145 xeninfo['is_control_domain'] = priv
146 xeninfo['is_a_template'] = False
147 domid = xeninfo['domid']
148 uuid1 = uuid.fromString(xeninfo['uuid'])
149 needs_reinitialising = False
151 dompath = GetDomainPath(domid)
152 if not dompath:
153 raise XendError('No domain path in store for existing '
154 'domain %d' % domid)
156 log.info("Recreating domain %d, UUID %s. at %s" %
157 (domid, xeninfo['uuid'], dompath))
159 # need to verify the path and uuid if not Domain-0
160 # if the required uuid and vm aren't set, then that means
161 # we need to recreate the dom with our own values
162 #
163 # NOTE: this is probably not desirable, really we should just
164 # abort or ignore, but there may be cases where xenstore's
165 # entry disappears (eg. xenstore-rm /)
166 #
167 try:
168 vmpath = xstransact.Read(dompath, "vm")
169 if not vmpath:
170 if not priv:
171 log.warn('/local/domain/%d/vm is missing. recreate is '
172 'confused, trying our best to recover' % domid)
173 needs_reinitialising = True
174 raise XendError('reinit')
176 uuid2_str = xstransact.Read(vmpath, "uuid")
177 if not uuid2_str:
178 log.warn('%s/uuid/ is missing. recreate is confused, '
179 'trying our best to recover' % vmpath)
180 needs_reinitialising = True
181 raise XendError('reinit')
183 uuid2 = uuid.fromString(uuid2_str)
184 if uuid1 != uuid2:
185 log.warn('UUID in /vm does not match the UUID in /dom/%d.'
186 'Trying out best to recover' % domid)
187 needs_reinitialising = True
188 except XendError:
189 pass # our best shot at 'goto' in python :)
191 vm = XendDomainInfo(xeninfo, domid, dompath, augment = True, priv = priv,
192 vmpath = vmpath)
194 if needs_reinitialising:
195 vm._recreateDom()
196 vm._removeVm()
197 vm._storeVmDetails()
198 vm._storeDomDetails()
200 vm.image = image.create(vm, vm.info)
201 vm.image.recreate()
203 vm._registerWatches()
204 vm.refreshShutdown(xeninfo)
206 # register the domain in the list
207 from xen.xend import XendDomain
208 XendDomain.instance().add_domain(vm)
210 return vm
213 def restore(config):
214 """Create a domain and a VM object to do a restore.
216 @param config: Domain SXP configuration
217 @type config: list of lists. (see C{create})
219 @rtype: XendDomainInfo
220 @return: A up and running XendDomainInfo instance
221 @raise VmError: Invalid configuration or failure to start.
222 @raise XendError: Errors with configuration.
223 """
225 log.debug("XendDomainInfo.restore(%s)", scrub_password(config))
226 vm = XendDomainInfo(XendConfig.XendConfig(sxp_obj = config),
227 resume = True)
228 try:
229 vm.resume()
230 return vm
231 except:
232 vm.destroy()
233 raise
235 def createDormant(domconfig):
236 """Create a dormant/inactive XenDomainInfo without creating VM.
237 This is for creating instances of persistent domains that are not
238 yet start.
240 @param domconfig: Parsed configuration
241 @type domconfig: XendConfig object
243 @rtype: XendDomainInfo
244 @return: A up and running XendDomainInfo instance
245 @raise XendError: Errors with configuration.
246 """
248 log.debug("XendDomainInfo.createDormant(%s)", scrub_password(domconfig))
250 # domid does not make sense for non-running domains.
251 domconfig.pop('domid', None)
252 vm = XendDomainInfo(domconfig)
253 return vm
255 def domain_by_name(name):
256 """Get domain by name
258 @params name: Name of the domain
259 @type name: string
260 @return: XendDomainInfo or None
261 """
262 from xen.xend import XendDomain
263 return XendDomain.instance().domain_lookup_by_name_nr(name)
266 def shutdown_reason(code):
267 """Get a shutdown reason from a code.
269 @param code: shutdown code
270 @type code: int
271 @return: shutdown reason
272 @rtype: string
273 """
274 return DOMAIN_SHUTDOWN_REASONS.get(code, "?")
276 def dom_get(dom):
277 """Get info from xen for an existing domain.
279 @param dom: domain id
280 @type dom: int
281 @return: info or None
282 @rtype: dictionary
283 """
284 try:
285 domlist = xc.domain_getinfo(dom, 1)
286 if domlist and dom == domlist[0]['domid']:
287 return domlist[0]
288 except Exception, err:
289 # ignore missing domain
290 log.trace("domain_getinfo(%d) failed, ignoring: %s", dom, str(err))
291 return None
293 def get_assigned_pci_devices(domid):
294 dev_str_list = []
295 path = '/local/domain/0/backend/pci/%u/0/' % domid
296 num_devs = xstransact.Read(path + 'num_devs');
297 if num_devs is None or num_devs == "":
298 return dev_str_list
299 num_devs = int(num_devs);
300 for i in range(num_devs):
301 dev_str = xstransact.Read(path + 'dev-%i' % i)
302 dev_str_list = dev_str_list + [dev_str]
303 return dev_str_list
305 def do_FLR(domid):
306 from xen.xend.server.pciif import parse_pci_name, PciDevice
307 dev_str_list = get_assigned_pci_devices(domid)
309 for dev_str in dev_str_list:
310 (dom, b, d, f) = parse_pci_name(dev_str)
311 try:
312 dev = PciDevice(dom, b, d, f)
313 except Exception, e:
314 raise VmError("pci: failed to locate device and "+
315 "parse it's resources - "+str(e))
316 dev.do_FLR()
318 class XendDomainInfo:
319 """An object represents a domain.
321 @TODO: try to unify dom and domid, they mean the same thing, but
322 xc refers to it as dom, and everywhere else, including
323 xenstore it is domid. The best way is to change xc's
324 python interface.
326 @ivar info: Parsed configuration
327 @type info: dictionary
328 @ivar domid: Domain ID (if VM has started)
329 @type domid: int or None
330 @ivar vmpath: XenStore path to this VM.
331 @type vmpath: string
332 @ivar dompath: XenStore path to this Domain.
333 @type dompath: string
334 @ivar image: Reference to the VM Image.
335 @type image: xen.xend.image.ImageHandler
336 @ivar store_port: event channel to xenstored
337 @type store_port: int
338 @ivar console_port: event channel to xenconsoled
339 @type console_port: int
340 @ivar store_mfn: xenstored mfn
341 @type store_mfn: int
342 @ivar console_mfn: xenconsoled mfn
343 @type console_mfn: int
344 @ivar notes: OS image notes
345 @type notes: dictionary
346 @ivar vmWatch: reference to a watch on the xenstored vmpath
347 @type vmWatch: xen.xend.xenstore.xswatch
348 @ivar shutdownWatch: reference to watch on the xenstored domain shutdown
349 @type shutdownWatch: xen.xend.xenstore.xswatch
350 @ivar shutdownStartTime: UNIX Time when domain started shutting down.
351 @type shutdownStartTime: float or None
352 # @ivar state: Domain state
353 # @type state: enum(DOM_STATE_HALTED, DOM_STATE_RUNNING, ...)
354 @ivar state_updated: lock for self.state
355 @type state_updated: threading.Condition
356 @ivar refresh_shutdown_lock: lock for polling shutdown state
357 @type refresh_shutdown_lock: threading.Condition
358 @ivar _deviceControllers: device controller cache for this domain
359 @type _deviceControllers: dict 'string' to DevControllers
360 """
362 def __init__(self, info, domid = None, dompath = None, augment = False,
363 priv = False, resume = False, vmpath = None):
364 """Constructor for a domain
366 @param info: parsed configuration
367 @type info: dictionary
368 @keyword domid: Set initial domain id (if any)
369 @type domid: int
370 @keyword dompath: Set initial dompath (if any)
371 @type dompath: string
372 @keyword augment: Augment given info with xenstored VM info
373 @type augment: bool
374 @keyword priv: Is a privileged domain (Dom 0)
375 @type priv: bool
376 @keyword resume: Is this domain being resumed?
377 @type resume: bool
378 """
380 self.info = info
381 if domid == None:
382 self.domid = self.info.get('domid')
383 else:
384 self.domid = domid
386 #REMOVE: uuid is now generated in XendConfig
387 #if not self._infoIsSet('uuid'):
388 # self.info['uuid'] = uuid.toString(uuid.create())
390 # Find a unique /vm/<uuid>/<integer> path if not specified.
391 # This avoids conflict between pre-/post-migrate domains when doing
392 # localhost relocation.
393 self.vmpath = vmpath
394 i = 0
395 while self.vmpath == None:
396 self.vmpath = XS_VMROOT + self.info['uuid']
397 if i != 0:
398 self.vmpath = self.vmpath + '-' + str(i)
399 try:
400 if self._readVm("uuid"):
401 self.vmpath = None
402 i = i + 1
403 except:
404 pass
406 self.dompath = dompath
408 self.image = None
409 self.store_port = None
410 self.store_mfn = None
411 self.console_port = None
412 self.console_mfn = None
414 self.native_protocol = None
416 self.vmWatch = None
417 self.shutdownWatch = None
418 self.shutdownStartTime = None
419 self._resume = resume
421 self.state_updated = threading.Condition()
422 self.refresh_shutdown_lock = threading.Condition()
423 self._stateSet(DOM_STATE_HALTED)
425 self._deviceControllers = {}
427 for state in DOM_STATES_OLD:
428 self.info[state] = 0
430 if augment:
431 self._augmentInfo(priv)
433 self._checkName(self.info['name_label'])
435 self.metrics = XendVMMetrics(uuid.createString(), self)
438 #
439 # Public functions available through XMLRPC
440 #
443 def start(self, is_managed = False):
444 """Attempts to start the VM by do the appropriate
445 initialisation if it not started.
446 """
447 from xen.xend import XendDomain
449 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED):
450 try:
451 XendTask.log_progress(0, 30, self._constructDomain)
452 XendTask.log_progress(31, 60, self._initDomain)
454 XendTask.log_progress(61, 70, self._storeVmDetails)
455 XendTask.log_progress(71, 80, self._storeDomDetails)
456 XendTask.log_progress(81, 90, self._registerWatches)
457 XendTask.log_progress(91, 100, self.refreshShutdown)
459 xendomains = XendDomain.instance()
460 xennode = XendNode.instance()
462 # save running configuration if XendDomains believe domain is
463 # persistent
464 if is_managed:
465 xendomains.managed_config_save(self)
467 if xennode.xenschedinfo() == 'credit':
468 xendomains.domain_sched_credit_set(self.getDomid(),
469 self.getWeight(),
470 self.getCap())
471 except:
472 log.exception('VM start failed')
473 self.destroy()
474 raise
475 else:
476 raise XendError('VM already running')
478 def resume(self):
479 """Resumes a domain that has come back from suspension."""
480 state = self._stateGet()
481 if state in (DOM_STATE_SUSPENDED, DOM_STATE_HALTED):
482 try:
483 self._constructDomain()
485 try:
486 self._setCPUAffinity()
487 except:
488 # usually a CPU we want to set affinity to does not exist
489 # we just ignore it so that the domain can still be restored
490 log.warn("Cannot restore CPU affinity")
492 self._storeVmDetails()
493 self._createChannels()
494 self._createDevices()
495 self._storeDomDetails()
496 self._endRestore()
497 except:
498 log.exception('VM resume failed')
499 self.destroy()
500 raise
501 else:
502 raise XendError('VM is not suspended; it is %s'
503 % XEN_API_VM_POWER_STATE[state])
505 def shutdown(self, reason):
506 """Shutdown a domain by signalling this via xenstored."""
507 log.debug('XendDomainInfo.shutdown(%s)', reason)
508 if self._stateGet() in (DOM_STATE_SHUTDOWN, DOM_STATE_HALTED,):
509 raise XendError('Domain cannot be shutdown')
511 if self.domid == 0:
512 raise XendError('Domain 0 cannot be shutdown')
514 if reason not in DOMAIN_SHUTDOWN_REASONS.values():
515 raise XendError('Invalid reason: %s' % reason)
516 self._removeVm('xend/previous_restart_time')
517 self.storeDom("control/shutdown", reason)
519 # HVM domain shuts itself down only if it has PV drivers
520 if self.info.is_hvm():
521 hvm_pvdrv = xc.hvm_get_param(self.domid, HVM_PARAM_CALLBACK_IRQ)
522 hvm_s_state = xc.hvm_get_param(self.domid, HVM_PARAM_ACPI_S_STATE)
523 if not hvm_pvdrv or hvm_s_state != 0:
524 code = REVERSE_DOMAIN_SHUTDOWN_REASONS[reason]
525 log.info("HVM save:remote shutdown dom %d!", self.domid)
526 xc.domain_shutdown(self.domid, code)
528 def pause(self):
529 """Pause domain
531 @raise XendError: Failed pausing a domain
532 """
533 try:
534 xc.domain_pause(self.domid)
535 self._stateSet(DOM_STATE_PAUSED)
536 except Exception, ex:
537 log.exception(ex)
538 raise XendError("Domain unable to be paused: %s" % str(ex))
540 def unpause(self):
541 """Unpause domain
543 @raise XendError: Failed unpausing a domain
544 """
545 try:
546 xc.domain_unpause(self.domid)
547 self._stateSet(DOM_STATE_RUNNING)
548 except Exception, ex:
549 log.exception(ex)
550 raise XendError("Domain unable to be unpaused: %s" % str(ex))
552 def send_sysrq(self, key):
553 """ Send a Sysrq equivalent key via xenstored."""
554 if self._stateGet() not in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
555 raise XendError("Domain '%s' is not started" % self.info['name_label'])
557 asserts.isCharConvertible(key)
558 self.storeDom("control/sysrq", '%c' % key)
560 def sync_pcidev_info(self):
562 if not self.info.is_hvm():
563 return
565 devid = '0'
566 dev_info = self._getDeviceInfo_pci(devid)
567 if dev_info is None:
568 return
570 # get the virtual slot info from xenstore
571 dev_uuid = sxp.child_value(dev_info, 'uuid')
572 pci_conf = self.info['devices'][dev_uuid][1]
573 pci_devs = pci_conf['devs']
575 count = 0
576 vslots = None
577 while vslots is None and count < 20:
578 vslots = xstransact.Read("/local/domain/0/backend/pci/%u/%s/vslots"
579 % (self.getDomid(), devid))
580 time.sleep(0.1)
581 count += 1
582 if vslots is None:
583 log.error("Device model didn't tell the vslots for PCI device")
584 return
586 #delete last delim
587 if vslots[-1] == ";":
588 vslots = vslots[:-1]
590 slot_list = vslots.split(';')
591 if len(slot_list) != len(pci_devs):
592 log.error("Device model's pci dev num dismatch")
593 return
595 #update the vslot info
596 count = 0;
597 for x in pci_devs:
598 x['vslt'] = slot_list[count]
599 count += 1
602 def hvm_pci_device_create(self, dev_config):
603 log.debug("XendDomainInfo.hvm_pci_device_create: %s"
604 % scrub_password(dev_config))
606 if not self.info.is_hvm():
607 raise VmError("hvm_pci_device_create called on non-HVM guest")
609 #all the PCI devs share one conf node
610 devid = '0'
612 new_dev = dev_config['devs'][0]
613 dev_info = self._getDeviceInfo_pci(devid)#from self.info['devices']
615 #check conflict before trigger hotplug event
616 if dev_info is not None:
617 dev_uuid = sxp.child_value(dev_info, 'uuid')
618 pci_conf = self.info['devices'][dev_uuid][1]
619 pci_devs = pci_conf['devs']
620 for x in pci_devs:
621 if (int(x['vslt'], 16) == int(new_dev['vslt'], 16) and
622 int(x['vslt'], 16) != 0 ):
623 raise VmError("vslot %s already have a device." % (new_dev['vslt']))
625 if (int(x['domain'], 16) == int(new_dev['domain'], 16) and
626 int(x['bus'], 16) == int(new_dev['bus'], 16) and
627 int(x['slot'], 16) == int(new_dev['slot'], 16) and
628 int(x['func'], 16) == int(new_dev['func'], 16) ):
629 raise VmError("device is already inserted")
631 # Test whether the devices can be assigned with VT-d
632 pci_str = "%s, %s, %s, %s" % (new_dev['domain'],
633 new_dev['bus'],
634 new_dev['slot'],
635 new_dev['func'])
636 bdf = xc.test_assign_device(self.domid, pci_str)
637 if bdf != 0:
638 if bdf == -1:
639 raise VmError("failed to assign device: maybe the platform"
640 " doesn't support VT-d, or VT-d isn't enabled"
641 " properly?")
642 bus = (bdf >> 16) & 0xff
643 devfn = (bdf >> 8) & 0xff
644 dev = (devfn >> 3) & 0x1f
645 func = devfn & 0x7
646 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
647 " already been assigned to other domain, or maybe"
648 " it doesn't exist." % (bus, dev, func))
650 # Here, we duplicate some checkings (in some cases, we mustn't allow
651 # a device to be hot-plugged into an HVM guest) that are also done in
652 # pci_device_configure()'s self.device_create(dev_sxp) or
653 # dev_control.reconfigureDevice(devid, dev_config).
654 # We must make the checkings before sending the command 'pci-ins' to
655 # ioemu.
657 # Test whether the device is owned by pciback. For instance, we can't
658 # hotplug a device being used by Dom0 itself to an HVM guest.
659 from xen.xend.server.pciif import PciDevice, parse_pci_name
660 domain = int(new_dev['domain'],16)
661 bus = int(new_dev['bus'],16)
662 dev = int(new_dev['slot'],16)
663 func = int(new_dev['func'],16)
664 try:
665 pci_device = PciDevice(domain, bus, dev, func)
666 except Exception, e:
667 raise VmError("pci: failed to locate device and "+
668 "parse it's resources - "+str(e))
669 if pci_device.driver!='pciback':
670 raise VmError(("pci: PCI Backend does not own device "+ \
671 "%s\n"+ \
672 "See the pciback.hide kernel "+ \
673 "command-line parameter or\n"+ \
674 "bind your slot/device to the PCI backend using sysfs" \
675 )%(pci_device.name))
677 # Check non-page-aligned MMIO BAR.
678 if pci_device.has_non_page_aligned_bar and arch.type != "ia64":
679 raise VmError("pci: %s: non-page-aligned MMIO BAR found." % \
680 pci_device.name)
682 # Check the co-assignment.
683 # To pci-attach a device D to domN, we should ensure each of D's
684 # co-assignment devices hasn't been assigned, or has been assigned to
685 # domN.
686 coassignment_list = pci_device.find_coassigned_devices()
687 assigned_pci_device_str_list = get_assigned_pci_devices(self.domid)
688 for pci_str in coassignment_list:
689 (domain, bus, dev, func) = parse_pci_name(pci_str)
690 dev_str = '0x%x,0x%x,0x%x,0x%x' % (domain, bus, dev, func)
691 if xc.test_assign_device(self.domid, dev_str) == 0:
692 continue
693 if not pci_str in assigned_pci_device_str_list:
694 raise VmError(('pci: failed to pci-attach %s to dom%d" + \
695 " because one of its co-assignment device %s has been" + \
696 " assigned to other domain.' \
697 )% (pci_device.name, self.domid, pci_str))
699 opts = ''
700 if 'opts' in new_dev and len(new_dev['opts']) > 0:
701 config_opts = new_dev['opts']
702 config_opts = map(lambda (x, y): x+'='+y, config_opts)
703 opts = ',' + reduce(lambda x, y: x+','+y, config_opts)
705 bdf_str = "%s:%s:%s.%s%s@%s" % (new_dev['domain'],
706 new_dev['bus'],
707 new_dev['slot'],
708 new_dev['func'],
709 opts,
710 new_dev['vslt'])
711 self.image.signalDeviceModel('pci-ins', 'pci-inserted', bdf_str)
714 def device_create(self, dev_config):
715 """Create a new device.
717 @param dev_config: device configuration
718 @type dev_config: SXP object (parsed config)
719 """
720 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config))
721 dev_type = sxp.name(dev_config)
722 dev_uuid = self.info.device_add(dev_type, cfg_sxp = dev_config)
723 dev_config_dict = self.info['devices'][dev_uuid][1]
724 log.debug("XendDomainInfo.device_create: %s" % scrub_password(dev_config_dict))
726 if self.domid is not None:
727 try:
728 dev_config_dict['devid'] = devid = \
729 self._createDevice(dev_type, dev_config_dict)
730 self._waitForDevice(dev_type, devid)
731 except VmError, ex:
732 del self.info['devices'][dev_uuid]
733 if dev_type == 'pci':
734 for dev in dev_config_dict['devs']:
735 XendAPIStore.deregister(dev['uuid'], 'DPCI')
736 elif dev_type == 'vscsi':
737 for dev in dev_config_dict['devs']:
738 XendAPIStore.deregister(dev['uuid'], 'DSCSI')
739 elif dev_type == 'tap':
740 self.info['vbd_refs'].remove(dev_uuid)
741 else:
742 self.info['%s_refs' % dev_type].remove(dev_uuid)
743 raise ex
744 else:
745 devid = None
747 xen.xend.XendDomain.instance().managed_config_save(self)
748 return self.getDeviceController(dev_type).sxpr(devid)
751 def pci_device_configure(self, dev_sxp, devid = 0):
752 """Configure an existing pci device.
754 @param dev_sxp: device configuration
755 @type dev_sxp: SXP object (parsed config)
756 @param devid: device id
757 @type devid: int
758 @return: Returns True if successfully updated device
759 @rtype: boolean
760 """
761 log.debug("XendDomainInfo.pci_device_configure: %s"
762 % scrub_password(dev_sxp))
764 dev_class = sxp.name(dev_sxp)
766 if dev_class != 'pci':
767 return False
769 pci_state = sxp.child_value(dev_sxp, 'state')
770 existing_dev_info = self._getDeviceInfo_pci(devid)
772 if existing_dev_info is None and pci_state != 'Initialising':
773 raise XendError("Cannot detach when pci platform does not exist")
775 pci_dev = sxp.children(dev_sxp, 'dev')[0]
776 dev_config = self.info.pci_convert_sxp_to_dict(dev_sxp)
777 dev = dev_config['devs'][0]
779 # Do HVM specific processing
780 if self.info.is_hvm():
781 if pci_state == 'Initialising':
782 # HVM PCI device attachment
783 self.hvm_pci_device_create(dev_config)
784 # Update vslt
785 vslt = xstransact.Read("/local/domain/0/device-model/%i/parameter"
786 % self.getDomid())
787 dev['vslt'] = vslt
788 for n in sxp.children(pci_dev):
789 if(n[0] == 'vslt'):
790 n[1] = vslt
791 else:
792 # HVM PCI device detachment
793 existing_dev_uuid = sxp.child_value(existing_dev_info, 'uuid')
794 existing_pci_conf = self.info['devices'][existing_dev_uuid][1]
795 existing_pci_devs = existing_pci_conf['devs']
796 vslt = '0x0'
797 for x in existing_pci_devs:
798 if ( int(x['domain'], 16) == int(dev['domain'], 16) and
799 int(x['bus'], 16) == int(dev['bus'], 16) and
800 int(x['slot'], 16) == int(dev['slot'], 16) and
801 int(x['func'], 16) == int(dev['func'], 16) ):
802 vslt = x['vslt']
803 break
804 if vslt == '0x0':
805 raise VmError("Device %04x:%02x:%02x.%01x is not connected"
806 % (int(dev['domain'],16), int(dev['bus'],16),
807 int(dev['slot'],16), int(dev['func'],16)))
808 self.hvm_destroyPCIDevice(int(vslt, 16))
809 # Update vslt
810 dev['vslt'] = vslt
811 for n in sxp.children(pci_dev):
812 if(n[0] == 'vslt'):
813 n[1] = vslt
815 # If pci platform does not exist, create and exit.
816 if existing_dev_info is None:
817 self.device_create(dev_sxp)
818 return True
820 # use DevController.reconfigureDevice to change device config
821 dev_control = self.getDeviceController(dev_class)
822 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
823 if not self.info.is_hvm():
824 # in PV case, wait until backend state becomes connected.
825 dev_control.waitForDevice_reconfigure(devid)
826 num_devs = dev_control.cleanupDevice(devid)
828 # update XendConfig with new device info
829 if dev_uuid:
830 new_dev_sxp = dev_control.configuration(devid)
831 self.info.device_update(dev_uuid, new_dev_sxp)
833 # If there is no device left, destroy pci and remove config.
834 if num_devs == 0:
835 if self.info.is_hvm():
836 self.destroyDevice('pci', devid, True)
837 del self.info['devices'][dev_uuid]
838 platform = self.info['platform']
839 orig_dev_num = len(platform['pci'])
840 # TODO: can use this to keep some info to ask high level
841 # management tools to hot insert a new passthrough dev
842 # after migration
843 if orig_dev_num != 0:
844 #platform['pci'] = ["%dDEVs" % orig_dev_num]
845 platform['pci'] = []
846 else:
847 self.destroyDevice('pci', devid)
848 del self.info['devices'][dev_uuid]
850 xen.xend.XendDomain.instance().managed_config_save(self)
852 return True
854 def vscsi_device_configure(self, dev_sxp):
855 """Configure an existing vscsi device.
856 quoted pci funciton
857 """
858 dev_class = sxp.name(dev_sxp)
859 if dev_class != 'vscsi':
860 return False
862 dev_config = self.info.vscsi_convert_sxp_to_dict(dev_sxp)
863 dev = dev_config['devs'][0]
864 req_devid = int(dev['devid'])
865 existing_dev_info = self._getDeviceInfo_vscsi(req_devid, dev['v-dev'])
866 state = dev['state']
868 if state == xenbusState['Initialising']:
869 # new create
870 # If request devid does not exist, create and exit.
871 if existing_dev_info is None:
872 self.device_create(dev_sxp)
873 return True
874 elif existing_dev_info == "exists":
875 raise XendError("The virtual device %s is already defined" % dev['v-dev'])
877 elif state == xenbusState['Closing']:
878 if existing_dev_info is None:
879 raise XendError("Cannot detach vscsi device does not exist")
881 if self.domid is not None:
882 # use DevController.reconfigureDevice to change device config
883 dev_control = self.getDeviceController(dev_class)
884 dev_uuid = dev_control.reconfigureDevice(req_devid, dev_config)
885 dev_control.waitForDevice_reconfigure(req_devid)
886 num_devs = dev_control.cleanupDevice(req_devid)
888 # update XendConfig with new device info
889 if dev_uuid:
890 new_dev_sxp = dev_control.configuration(req_devid)
891 self.info.device_update(dev_uuid, new_dev_sxp)
893 # If there is no device left, destroy vscsi and remove config.
894 if num_devs == 0:
895 self.destroyDevice('vscsi', req_devid)
896 del self.info['devices'][dev_uuid]
898 else:
899 cur_dev_sxp = self._getDeviceInfo_vscsi(req_devid, None)
900 new_dev_sxp = ['vscsi']
901 cur_mode = sxp.children(cur_dev_sxp, 'feature-host')[0]
902 new_dev_sxp.append(cur_mode)
904 for cur_dev in sxp.children(cur_dev_sxp, 'dev'):
905 if state == xenbusState['Closing']:
906 if int(cur_mode[1]) == 1:
907 continue
908 cur_dev_vdev = sxp.child_value(cur_dev, 'v-dev')
909 if cur_dev_vdev == dev['v-dev']:
910 continue
911 new_dev_sxp.append(cur_dev)
913 if state == xenbusState['Initialising']:
914 for new_dev in sxp.children(dev_sxp, 'dev'):
915 new_dev_sxp.append(new_dev)
917 dev_uuid = sxp.child_value(cur_dev_sxp, 'uuid')
918 self.info.device_update(dev_uuid, new_dev_sxp)
920 # If there is only 'vscsi' in new_dev_sxp, remove the config.
921 if len(sxp.children(new_dev_sxp, 'dev')) == 0:
922 del self.info['devices'][dev_uuid]
924 xen.xend.XendDomain.instance().managed_config_save(self)
926 return True
928 def device_configure(self, dev_sxp, devid = None):
929 """Configure an existing device.
931 @param dev_config: device configuration
932 @type dev_config: SXP object (parsed config)
933 @param devid: device id
934 @type devid: int
935 @return: Returns True if successfully updated device
936 @rtype: boolean
937 """
939 # convert device sxp to a dict
940 dev_class = sxp.name(dev_sxp)
941 dev_config = {}
943 if dev_class == 'pci':
944 return self.pci_device_configure(dev_sxp)
946 if dev_class == 'vscsi':
947 return self.vscsi_device_configure(dev_sxp)
949 for opt_val in dev_sxp[1:]:
950 try:
951 dev_config[opt_val[0]] = opt_val[1]
952 except IndexError:
953 pass
955 # use DevController.reconfigureDevice to change device config
956 dev_control = self.getDeviceController(dev_class)
957 dev_uuid = dev_control.reconfigureDevice(devid, dev_config)
959 # update XendConfig with new device info
960 if dev_uuid:
961 self.info.device_update(dev_uuid, dev_sxp)
963 return True
965 def waitForDevices(self):
966 """Wait for this domain's configured devices to connect.
968 @raise VmError: if any device fails to initialise.
969 """
970 for devclass in XendDevices.valid_devices():
971 self.getDeviceController(devclass).waitForDevices()
973 def hvm_destroyPCIDevice(self, vslot):
974 log.debug("hvm_destroyPCIDevice called %s", vslot)
976 if not self.info.is_hvm():
977 raise VmError("hvm_destroyPCIDevice called on non-HVM guest")
979 #all the PCI devs share one conf node
980 devid = '0'
981 vslot = int(vslot)
982 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
983 dev_uuid = sxp.child_value(dev_info, 'uuid')
985 #delete the pci bdf config under the pci device
986 pci_conf = self.info['devices'][dev_uuid][1]
987 pci_len = len(pci_conf['devs'])
989 #find the pass-through device with the virtual slot
990 devnum = 0
991 for x in pci_conf['devs']:
992 if int(x['vslt'], 16) == vslot:
993 break
994 devnum += 1
996 if devnum >= pci_len:
997 raise VmError("Device @ vslot 0x%x doesn't exist." % (vslot))
999 if vslot == 0:
1000 raise VmError("Device @ vslot 0x%x do not support hotplug." % (vslot))
1002 # Check the co-assignment.
1003 # To pci-detach a device D from domN, we should ensure: for each DD in the
1004 # list of D's co-assignment devices, DD is not assigned (to domN).
1006 from xen.xend.server.pciif import PciDevice
1007 domain = int(x['domain'],16)
1008 bus = int(x['bus'],16)
1009 dev = int(x['slot'],16)
1010 func = int(x['func'],16)
1011 try:
1012 pci_device = PciDevice(domain, bus, dev, func)
1013 except Exception, e:
1014 raise VmError("pci: failed to locate device and "+
1015 "parse it's resources - "+str(e))
1016 coassignment_list = pci_device.find_coassigned_devices()
1017 coassignment_list.remove(pci_device.name)
1018 assigned_pci_device_str_list = get_assigned_pci_devices(self.domid)
1019 for pci_str in coassignment_list:
1020 if pci_str in assigned_pci_device_str_list:
1021 raise VmError(('pci: failed to pci-detach %s from dom%d" + \
1022 " because one of its co-assignment device %s is still " + \
1023 " assigned to the domain.' \
1024 )% (pci_device.name, self.domid, pci_str))
1027 bdf_str = "%s:%s:%s.%s" % (x['domain'], x['bus'], x['slot'], x['func'])
1028 log.info("hvm_destroyPCIDevice:%s:%s!", x, bdf_str)
1030 self.image.signalDeviceModel('pci-rem', 'pci-removed', bdf_str)
1032 return 0
1034 def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
1035 log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
1036 deviceClass, devid)
1038 if rm_cfg:
1039 # Convert devid to device number. A device number is
1040 # needed to remove its configuration.
1041 dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
1043 # Save current sxprs. A device number and a backend
1044 # path are needed to remove its configuration but sxprs
1045 # do not have those after calling destroyDevice.
1046 sxprs = self.getDeviceSxprs(deviceClass)
1048 rc = None
1049 if self.domid is not None:
1050 rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
1051 if not force and rm_cfg:
1052 # The backend path, other than the device itself,
1053 # has to be passed because its accompanied frontend
1054 # path may be void until its removal is actually
1055 # issued. It is probable because destroyDevice is
1056 # issued first.
1057 for dev_num, dev_info in sxprs:
1058 dev_num = int(dev_num)
1059 if dev_num == dev:
1060 for x in dev_info:
1061 if x[0] == 'backend':
1062 backend = x[1]
1063 break
1064 break
1065 self._waitForDevice_destroy(deviceClass, devid, backend)
1067 if rm_cfg:
1068 if deviceClass == 'vif':
1069 if self.domid is not None:
1070 for dev_num, dev_info in sxprs:
1071 dev_num = int(dev_num)
1072 if dev_num == dev:
1073 for x in dev_info:
1074 if x[0] == 'mac':
1075 mac = x[1]
1076 break
1077 break
1078 dev_info = self._getDeviceInfo_vif(mac)
1079 else:
1080 _, dev_info = sxprs[dev]
1081 else: # 'vbd' or 'tap'
1082 dev_info = self._getDeviceInfo_vbd(dev)
1083 # To remove the UUID of the device from refs,
1084 # deviceClass must be always 'vbd'.
1085 deviceClass = 'vbd'
1086 if dev_info is None:
1087 raise XendError("Device %s is not defined" % devid)
1089 dev_uuid = sxp.child_value(dev_info, 'uuid')
1090 del self.info['devices'][dev_uuid]
1091 self.info['%s_refs' % deviceClass].remove(dev_uuid)
1092 xen.xend.XendDomain.instance().managed_config_save(self)
1094 return rc
1096 def getDeviceSxprs(self, deviceClass):
1097 if deviceClass == 'pci':
1098 dev_info = self._getDeviceInfo_pci('0')#from self.info['devices']
1099 if dev_info is None:
1100 return []
1101 dev_uuid = sxp.child_value(dev_info, 'uuid')
1102 pci_devs = self.info['devices'][dev_uuid][1]['devs']
1103 pci_len = len(pci_devs)
1104 return pci_devs
1105 if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED, DOM_STATE_CRASHED):
1106 return self.getDeviceController(deviceClass).sxprs()
1107 else:
1108 sxprs = []
1109 dev_num = 0
1110 for dev_type, dev_info in self.info.all_devices_sxpr():
1111 if (deviceClass == 'vbd' and dev_type not in ['vbd', 'tap']) or \
1112 (deviceClass != 'vbd' and dev_type != deviceClass):
1113 continue
1115 if deviceClass == 'vscsi':
1116 vscsi_devs = ['devs', []]
1117 for vscsi_dev in sxp.children(dev_info, 'dev'):
1118 vscsi_dev.append(['frontstate', None])
1119 vscsi_devs[1].append(vscsi_dev)
1120 dev_num = int(sxp.child_value(vscsi_dev, 'devid'))
1121 vscsi_mode = sxp.children(dev_info, 'feature-host')[0]
1122 sxprs.append([dev_num, [vscsi_devs, vscsi_mode]])
1123 elif deviceClass == 'vbd':
1124 dev = sxp.child_value(dev_info, 'dev')
1125 if 'ioemu:' in dev:
1126 (_, dev) = dev.split(':', 1)
1127 try:
1128 (dev_name, _) = dev.split(':', 1) # Remove ":disk" or ":cdrom"
1129 except ValueError:
1130 dev_name = dev
1131 dev_num = self.getDeviceController('vbd').convertToDeviceNumber(dev_name)
1132 sxprs.append([dev_num, dev_info])
1133 else:
1134 sxprs.append([dev_num, dev_info])
1135 dev_num += 1
1136 return sxprs
1138 def getBlockDeviceClass(self, devid):
1139 # To get a device number from the devid,
1140 # we temporarily use the device controller of VBD.
1141 dev = self.getDeviceController('vbd').convertToDeviceNumber(devid)
1142 dev_info = self._getDeviceInfo_vbd(dev)
1143 if dev_info:
1144 return dev_info[0]
1146 def _getDeviceInfo_vif(self, mac):
1147 for dev_type, dev_info in self.info.all_devices_sxpr():
1148 if dev_type != 'vif':
1149 continue
1150 if mac == sxp.child_value(dev_info, 'mac'):
1151 return dev_info
1153 def _getDeviceInfo_vbd(self, devid):
1154 for dev_type, dev_info in self.info.all_devices_sxpr():
1155 if dev_type != 'vbd' and dev_type != 'tap':
1156 continue
1157 dev = sxp.child_value(dev_info, 'dev')
1158 dev = dev.split(':')[0]
1159 dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
1160 if devid == dev:
1161 return dev_info
1163 def _getDeviceInfo_pci(self, devid):
1164 for dev_type, dev_info in self.info.all_devices_sxpr():
1165 if dev_type != 'pci':
1166 continue
1167 return dev_info
1168 return None
1170 def _getDeviceInfo_vscsi(self, devid, vdev):
1171 devid = int(devid)
1172 for dev_type, dev_info in self.info.all_devices_sxpr():
1173 if dev_type != 'vscsi':
1174 continue
1175 existing_dev_uuid = sxp.child_value(dev_info, 'uuid')
1176 existing_conf = self.info['devices'][existing_dev_uuid][1]
1177 existing_dev = existing_conf['devs'][0]
1178 existing_devid = int(existing_dev['devid'])
1179 existing_vdev = existing_dev['v-dev']
1181 if vdev == existing_vdev:
1182 return "exists"
1184 if devid == existing_devid:
1185 return dev_info
1187 return None
1189 def setMemoryTarget(self, target):
1190 """Set the memory target of this domain.
1191 @param target: In MiB.
1192 """
1193 log.debug("Setting memory target of domain %s (%s) to %d MiB.",
1194 self.info['name_label'], str(self.domid), target)
1196 MiB = 1024 * 1024
1197 memory_cur = self.get_memory_dynamic_max() / MiB
1199 if self.domid == 0:
1200 dom0_min_mem = xoptions.get_dom0_min_mem()
1201 if target < memory_cur and dom0_min_mem > target:
1202 raise XendError("memory_dynamic_max too small")
1204 self._safe_set_memory('memory_dynamic_min', target * MiB)
1205 self._safe_set_memory('memory_dynamic_max', target * MiB)
1207 if self.domid >= 0:
1208 if target > memory_cur:
1209 balloon.free((target - memory_cur) * 1024, self)
1210 self.storeVm("memory", target)
1211 self.storeDom("memory/target", target << 10)
1212 xc.domain_set_target_mem(self.domid,
1213 (target * 1024))
1214 xen.xend.XendDomain.instance().managed_config_save(self)
1216 def setMemoryMaximum(self, limit):
1217 """Set the maximum memory limit of this domain
1218 @param limit: In MiB.
1219 """
1220 log.debug("Setting memory maximum of domain %s (%s) to %d MiB.",
1221 self.info['name_label'], str(self.domid), limit)
1223 maxmem_cur = self.get_memory_static_max()
1224 MiB = 1024 * 1024
1225 self._safe_set_memory('memory_static_max', limit * MiB)
1227 if self.domid >= 0:
1228 maxmem = int(limit) * 1024
1229 try:
1230 return xc.domain_setmaxmem(self.domid, maxmem)
1231 except Exception, ex:
1232 self._safe_set_memory('memory_static_max', maxmem_cur)
1233 raise XendError(str(ex))
1234 xen.xend.XendDomain.instance().managed_config_save(self)
1237 def getVCPUInfo(self):
1238 try:
1239 # We include the domain name and ID, to help xm.
1240 sxpr = ['domain',
1241 ['domid', self.domid],
1242 ['name', self.info['name_label']],
1243 ['vcpu_count', self.info['VCPUs_max']]]
1245 for i in range(0, self.info['VCPUs_max']):
1246 if self.domid is not None:
1247 info = xc.vcpu_getinfo(self.domid, i)
1249 sxpr.append(['vcpu',
1250 ['number', i],
1251 ['online', info['online']],
1252 ['blocked', info['blocked']],
1253 ['running', info['running']],
1254 ['cpu_time', info['cpu_time'] / 1e9],
1255 ['cpu', info['cpu']],
1256 ['cpumap', info['cpumap']]])
1257 else:
1258 sxpr.append(['vcpu',
1259 ['number', i],
1260 ['online', 0],
1261 ['blocked', 0],
1262 ['running', 0],
1263 ['cpu_time', 0.0],
1264 ['cpu', -1],
1265 ['cpumap', self.info['cpus'][i] and \
1266 self.info['cpus'][i] or range(64)]])
1268 return sxpr
1270 except RuntimeError, exn:
1271 raise XendError(str(exn))
1274 def getDomInfo(self):
1275 return dom_get(self.domid)
1278 # internal functions ... TODO: re-categorised
1281 def _augmentInfo(self, priv):
1282 """Augment self.info, as given to us through L{recreate}, with
1283 values taken from the store. This recovers those values known
1284 to xend but not to the hypervisor.
1285 """
1286 augment_entries = XendConfig.LEGACY_XENSTORE_VM_PARAMS[:]
1287 if priv:
1288 augment_entries.remove('memory')
1289 augment_entries.remove('maxmem')
1290 augment_entries.remove('vcpus')
1291 augment_entries.remove('vcpu_avail')
1293 vm_config = self._readVMDetails([(k, XendConfig.LEGACY_CFG_TYPES[k])
1294 for k in augment_entries])
1296 # make returned lists into a dictionary
1297 vm_config = dict(zip(augment_entries, vm_config))
1299 for arg in augment_entries:
1300 val = vm_config[arg]
1301 if val != None:
1302 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1303 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1304 self.info[xapiarg] = val
1305 elif arg == "memory":
1306 self.info["static_memory_min"] = val
1307 elif arg == "maxmem":
1308 self.info["static_memory_max"] = val
1309 else:
1310 self.info[arg] = val
1312 # read CPU Affinity
1313 self.info['cpus'] = []
1314 vcpus_info = self.getVCPUInfo()
1315 for vcpu_info in sxp.children(vcpus_info, 'vcpu'):
1316 self.info['cpus'].append(sxp.child_value(vcpu_info, 'cpumap'))
1318 # For dom0, we ignore any stored value for the vcpus fields, and
1319 # read the current value from Xen instead. This allows boot-time
1320 # settings to take precedence over any entries in the store.
1321 if priv:
1322 xeninfo = dom_get(self.domid)
1323 self.info['VCPUs_max'] = xeninfo['online_vcpus']
1324 self.info['vcpu_avail'] = (1 << xeninfo['online_vcpus']) - 1
1326 # read image value
1327 image_sxp = self._readVm('image')
1328 if image_sxp:
1329 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1331 # read devices
1332 devices = []
1333 for devclass in XendDevices.valid_devices():
1334 devconfig = self.getDeviceController(devclass).configurations()
1335 if devconfig:
1336 devices.extend(devconfig)
1338 if not self.info['devices'] and devices is not None:
1339 for device in devices:
1340 self.info.device_add(device[0], cfg_sxp = device)
1342 self._update_consoles()
1344 def _update_consoles(self, transaction = None):
1345 if self.domid == None or self.domid == 0:
1346 return
1348 # Update VT100 port if it exists
1349 if transaction is None:
1350 self.console_port = self.readDom('console/port')
1351 else:
1352 self.console_port = self.readDomTxn(transaction, 'console/port')
1353 if self.console_port is not None:
1354 serial_consoles = self.info.console_get_all('vt100')
1355 if not serial_consoles:
1356 cfg = self.info.console_add('vt100', self.console_port)
1357 self._createDevice('console', cfg)
1358 else:
1359 console_uuid = serial_consoles[0].get('uuid')
1360 self.info.console_update(console_uuid, 'location',
1361 self.console_port)
1364 # Update VNC port if it exists and write to xenstore
1365 if transaction is None:
1366 vnc_port = self.readDom('console/vnc-port')
1367 else:
1368 vnc_port = self.readDomTxn(transaction, 'console/vnc-port')
1369 if vnc_port is not None:
1370 for dev_uuid, (dev_type, dev_info) in self.info['devices'].items():
1371 if dev_type == 'vfb':
1372 old_location = dev_info.get('location')
1373 listen_host = dev_info.get('vnclisten', 'localhost')
1374 new_location = '%s:%s' % (listen_host, str(vnc_port))
1375 if old_location == new_location:
1376 break
1378 dev_info['location'] = new_location
1379 self.info.device_update(dev_uuid, cfg_xenapi = dev_info)
1380 vfb_ctrl = self.getDeviceController('vfb')
1381 vfb_ctrl.reconfigureDevice(0, dev_info)
1382 break
1385 # Function to update xenstore /vm/*
1388 def _readVm(self, *args):
1389 return xstransact.Read(self.vmpath, *args)
1391 def _writeVm(self, *args):
1392 return xstransact.Write(self.vmpath, *args)
1394 def _removeVm(self, *args):
1395 return xstransact.Remove(self.vmpath, *args)
1397 def _gatherVm(self, *args):
1398 return xstransact.Gather(self.vmpath, *args)
1400 def _listRecursiveVm(self, *args):
1401 return xstransact.ListRecursive(self.vmpath, *args)
1403 def storeVm(self, *args):
1404 return xstransact.Store(self.vmpath, *args)
1406 def permissionsVm(self, *args):
1407 return xstransact.SetPermissions(self.vmpath, *args)
1410 # Function to update xenstore /dom/*
1413 def readDom(self, *args):
1414 return xstransact.Read(self.dompath, *args)
1416 def gatherDom(self, *args):
1417 return xstransact.Gather(self.dompath, *args)
1419 def _writeDom(self, *args):
1420 return xstransact.Write(self.dompath, *args)
1422 def _removeDom(self, *args):
1423 return xstransact.Remove(self.dompath, *args)
1425 def storeDom(self, *args):
1426 return xstransact.Store(self.dompath, *args)
1429 def readDomTxn(self, transaction, *args):
1430 paths = map(lambda x: self.dompath + "/" + x, args)
1431 return transaction.read(*paths)
1433 def gatherDomTxn(self, transaction, *args):
1434 paths = map(lambda x: self.dompath + "/" + x, args)
1435 return transaction.gather(*paths)
1437 def _writeDomTxn(self, transaction, *args):
1438 paths = map(lambda x: self.dompath + "/" + x, args)
1439 return transaction.write(*paths)
1441 def _removeDomTxn(self, transaction, *args):
1442 paths = map(lambda x: self.dompath + "/" + x, args)
1443 return transaction.remove(*paths)
1445 def storeDomTxn(self, transaction, *args):
1446 paths = map(lambda x: self.dompath + "/" + x, args)
1447 return transaction.store(*paths)
1450 def _recreateDom(self):
1451 complete(self.dompath, lambda t: self._recreateDomFunc(t))
1453 def _recreateDomFunc(self, t):
1454 t.remove()
1455 t.mkdir()
1456 t.set_permissions({'dom' : self.domid, 'read' : True})
1457 t.write('vm', self.vmpath)
1458 for i in [ 'device', 'control', 'error', 'memory' ]:
1459 t.mkdir(i)
1460 t.set_permissions(i, {'dom' : self.domid})
1462 def _storeDomDetails(self):
1463 to_store = {
1464 'domid': str(self.domid),
1465 'vm': self.vmpath,
1466 'name': self.info['name_label'],
1467 'console/limit': str(xoptions.get_console_limit() * 1024),
1468 'memory/target': str(self.info['memory_dynamic_max'] / 1024),
1471 def f(n, v):
1472 if v is not None:
1473 if type(v) == bool:
1474 to_store[n] = v and "1" or "0"
1475 else:
1476 to_store[n] = str(v)
1478 # Figure out if we need to tell xenconsoled to ignore this guest's
1479 # console - device model will handle console if it is running
1480 constype = "ioemu"
1481 if 'device_model' not in self.info['platform']:
1482 constype = "xenconsoled"
1484 f('console/port', self.console_port)
1485 f('console/ring-ref', self.console_mfn)
1486 f('console/type', constype)
1487 f('store/port', self.store_port)
1488 f('store/ring-ref', self.store_mfn)
1490 if arch.type == "x86":
1491 f('control/platform-feature-multiprocessor-suspend', True)
1493 # elfnotes
1494 for n, v in self.info.get_notes().iteritems():
1495 n = n.lower().replace('_', '-')
1496 if n == 'features':
1497 for v in v.split('|'):
1498 v = v.replace('_', '-')
1499 if v.startswith('!'):
1500 f('image/%s/%s' % (n, v[1:]), False)
1501 else:
1502 f('image/%s/%s' % (n, v), True)
1503 else:
1504 f('image/%s' % n, v)
1506 if self.info.has_key('security_label'):
1507 f('security_label', self.info['security_label'])
1509 to_store.update(self._vcpuDomDetails())
1511 log.debug("Storing domain details: %s", scrub_password(to_store))
1513 self._writeDom(to_store)
1515 def _vcpuDomDetails(self):
1516 def availability(n):
1517 if self.info['vcpu_avail'] & (1 << n):
1518 return 'online'
1519 else:
1520 return 'offline'
1522 result = {}
1523 for v in range(0, self.info['VCPUs_max']):
1524 result["cpu/%d/availability" % v] = availability(v)
1525 return result
1528 # xenstore watches
1531 def _registerWatches(self):
1532 """Register a watch on this VM's entries in the store, and the
1533 domain's control/shutdown node, so that when they are changed
1534 externally, we keep up to date. This should only be called by {@link
1535 #create}, {@link #recreate}, or {@link #restore}, once the domain's
1536 details have been written, but before the new instance is returned."""
1537 self.vmWatch = xswatch(self.vmpath, self._storeChanged)
1538 self.shutdownWatch = xswatch(self.dompath + '/control/shutdown',
1539 self._handleShutdownWatch)
1541 def _storeChanged(self, _):
1542 log.trace("XendDomainInfo.storeChanged");
1544 changed = False
1546 # Check whether values in the configuration have
1547 # changed in Xenstore.
1549 cfg_vm = ['name', 'on_poweroff', 'on_reboot', 'on_crash',
1550 'rtc/timeoffset']
1552 vm_details = self._readVMDetails([(k,XendConfig.LEGACY_CFG_TYPES[k])
1553 for k in cfg_vm])
1555 # convert two lists into a python dictionary
1556 vm_details = dict(zip(cfg_vm, vm_details))
1558 if vm_details['rtc/timeoffset'] == None:
1559 vm_details['rtc/timeoffset'] = "0"
1561 for arg, val in vm_details.items():
1562 if arg in XendConfig.LEGACY_CFG_TO_XENAPI_CFG:
1563 xapiarg = XendConfig.LEGACY_CFG_TO_XENAPI_CFG[arg]
1564 if val != None and val != self.info[xapiarg]:
1565 self.info[xapiarg] = val
1566 changed = True
1567 elif arg == "memory":
1568 if val != None and val != self.info["static_memory_min"]:
1569 self.info["static_memory_min"] = val
1570 changed = True
1571 elif arg == "maxmem":
1572 if val != None and val != self.info["static_memory_max"]:
1573 self.info["static_memory_max"] = val
1574 changed = True
1576 # Check whether image definition has been updated
1577 image_sxp = self._readVm('image')
1578 if image_sxp and image_sxp != sxp.to_string(self.info.image_sxpr()):
1579 self.info.update_with_image_sxp(sxp.from_string(image_sxp))
1580 changed = True
1582 # Check if the rtc offset has changes
1583 if vm_details.get("rtc/timeoffset", "0") != self.info["platform"].get("rtc_timeoffset", "0"):
1584 self.info["platform"]["rtc_timeoffset"] = vm_details.get("rtc/timeoffset", 0)
1585 changed = True
1587 if changed:
1588 # Update the domain section of the store, as this contains some
1589 # parameters derived from the VM configuration.
1590 self._storeDomDetails()
1592 return 1
1594 def _handleShutdownWatch(self, _):
1595 log.debug('XendDomainInfo.handleShutdownWatch')
1597 reason = self.readDom('control/shutdown')
1599 if reason and reason != 'suspend':
1600 sst = self.readDom('xend/shutdown_start_time')
1601 now = time.time()
1602 if sst:
1603 self.shutdownStartTime = float(sst)
1604 timeout = float(sst) + SHUTDOWN_TIMEOUT - now
1605 else:
1606 self.shutdownStartTime = now
1607 self.storeDom('xend/shutdown_start_time', now)
1608 timeout = SHUTDOWN_TIMEOUT
1610 log.trace(
1611 "Scheduling refreshShutdown on domain %d in %ds.",
1612 self.domid, timeout)
1613 threading.Timer(timeout, self.refreshShutdown).start()
1615 return True
1619 # Public Attributes for the VM
1623 def getDomid(self):
1624 return self.domid
1626 def setName(self, name, to_store = True):
1627 self._checkName(name)
1628 self.info['name_label'] = name
1629 if to_store:
1630 self.storeVm("name", name)
1632 def getName(self):
1633 return self.info['name_label']
1635 def getDomainPath(self):
1636 return self.dompath
1638 def getShutdownReason(self):
1639 return self.readDom('control/shutdown')
1641 def getStorePort(self):
1642 """For use only by image.py and XendCheckpoint.py."""
1643 return self.store_port
1645 def getConsolePort(self):
1646 """For use only by image.py and XendCheckpoint.py"""
1647 return self.console_port
1649 def getFeatures(self):
1650 """For use only by image.py."""
1651 return self.info['features']
1653 def getVCpuCount(self):
1654 return self.info['VCPUs_max']
1656 def setVCpuCount(self, vcpus):
1657 def vcpus_valid(n):
1658 if vcpus <= 0:
1659 raise XendError('Zero or less VCPUs is invalid')
1660 if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
1661 raise XendError('Cannot set vcpus greater than max vcpus on running domain')
1662 vcpus_valid(vcpus)
1664 self.info['vcpu_avail'] = (1 << vcpus) - 1
1665 if self.domid >= 0:
1666 self.storeVm('vcpu_avail', self.info['vcpu_avail'])
1667 self._writeDom(self._vcpuDomDetails())
1668 self.info['VCPUs_live'] = vcpus
1669 else:
1670 if self.info['VCPUs_max'] > vcpus:
1671 # decreasing
1672 del self.info['cpus'][vcpus:]
1673 elif self.info['VCPUs_max'] < vcpus:
1674 # increasing
1675 for c in range(self.info['VCPUs_max'], vcpus):
1676 self.info['cpus'].append(list())
1677 self.info['VCPUs_max'] = vcpus
1678 xen.xend.XendDomain.instance().managed_config_save(self)
1679 log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
1680 vcpus)
1682 def getMemoryTarget(self):
1683 """Get this domain's target memory size, in KB."""
1684 return self.info['memory_dynamic_max'] / 1024
1686 def getMemoryMaximum(self):
1687 """Get this domain's maximum memory size, in KB."""
1688 # remember, info now stores memory in bytes
1689 return self.info['memory_static_max'] / 1024
1691 def getResume(self):
1692 return str(self._resume)
1694 def setResume(self, isresume):
1695 self._resume = isresume
1697 def getCpus(self):
1698 return self.info['cpus']
1700 def setCpus(self, cpumap):
1701 self.info['cpus'] = cpumap
1703 def getCap(self):
1704 return self.info['vcpus_params']['cap']
1706 def setCap(self, cpu_cap):
1707 self.info['vcpus_params']['cap'] = cpu_cap
1709 def getWeight(self):
1710 return self.info['vcpus_params']['weight']
1712 def setWeight(self, cpu_weight):
1713 self.info['vcpus_params']['weight'] = cpu_weight
1715 def getRestartCount(self):
1716 return self._readVm('xend/restart_count')
1718 def refreshShutdown(self, xeninfo = None):
1719 """ Checks the domain for whether a shutdown is required.
1721 Called from XendDomainInfo and also image.py for HVM images.
1722 """
1724 # If set at the end of this method, a restart is required, with the
1725 # given reason. This restart has to be done out of the scope of
1726 # refresh_shutdown_lock.
1727 restart_reason = None
1729 self.refresh_shutdown_lock.acquire()
1730 try:
1731 if xeninfo is None:
1732 xeninfo = dom_get(self.domid)
1733 if xeninfo is None:
1734 # The domain no longer exists. This will occur if we have
1735 # scheduled a timer to check for shutdown timeouts and the
1736 # shutdown succeeded. It will also occur if someone
1737 # destroys a domain beneath us. We clean up the domain,
1738 # just in case, but we can't clean up the VM, because that
1739 # VM may have migrated to a different domain on this
1740 # machine.
1741 self.cleanupDomain()
1742 self._stateSet(DOM_STATE_HALTED)
1743 return
1745 if xeninfo['dying']:
1746 # Dying means that a domain has been destroyed, but has not
1747 # yet been cleaned up by Xen. This state could persist
1748 # indefinitely if, for example, another domain has some of its
1749 # pages mapped. We might like to diagnose this problem in the
1750 # future, but for now all we do is make sure that it's not us
1751 # holding the pages, by calling cleanupDomain. We can't
1752 # clean up the VM, as above.
1753 self.cleanupDomain()
1754 self._stateSet(DOM_STATE_SHUTDOWN)
1755 return
1757 elif xeninfo['crashed']:
1758 if self.readDom('xend/shutdown_completed'):
1759 # We've seen this shutdown already, but we are preserving
1760 # the domain for debugging. Leave it alone.
1761 return
1763 log.warn('Domain has crashed: name=%s id=%d.',
1764 self.info['name_label'], self.domid)
1765 self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
1767 restart_reason = 'crash'
1768 self._stateSet(DOM_STATE_HALTED)
1770 elif xeninfo['shutdown']:
1771 self._stateSet(DOM_STATE_SHUTDOWN)
1772 if self.readDom('xend/shutdown_completed'):
1773 # We've seen this shutdown already, but we are preserving
1774 # the domain for debugging. Leave it alone.
1775 return
1777 else:
1778 reason = shutdown_reason(xeninfo['shutdown_reason'])
1780 log.info('Domain has shutdown: name=%s id=%d reason=%s.',
1781 self.info['name_label'], self.domid, reason)
1782 self._writeVm(LAST_SHUTDOWN_REASON, reason)
1784 self._clearRestart()
1786 if reason == 'suspend':
1787 self._stateSet(DOM_STATE_SUSPENDED)
1788 # Don't destroy the domain. XendCheckpoint will do
1789 # this once it has finished. However, stop watching
1790 # the VM path now, otherwise we will end up with one
1791 # watch for the old domain, and one for the new.
1792 self._unwatchVm()
1793 elif reason in ('poweroff', 'reboot'):
1794 restart_reason = reason
1795 else:
1796 self.destroy()
1798 elif self.dompath is None:
1799 # We have yet to manage to call introduceDomain on this
1800 # domain. This can happen if a restore is in progress, or has
1801 # failed. Ignore this domain.
1802 pass
1803 else:
1804 # Domain is alive. If we are shutting it down, log a message
1805 # if it seems unresponsive.
1806 if xeninfo['paused']:
1807 self._stateSet(DOM_STATE_PAUSED)
1808 else:
1809 self._stateSet(DOM_STATE_RUNNING)
1811 if self.shutdownStartTime:
1812 timeout = (SHUTDOWN_TIMEOUT - time.time() +
1813 self.shutdownStartTime)
1814 if (timeout < 0 and not self.readDom('xend/unresponsive')):
1815 log.info(
1816 "Domain shutdown timeout expired: name=%s id=%s",
1817 self.info['name_label'], self.domid)
1818 self.storeDom('xend/unresponsive', 'True')
1819 finally:
1820 self.refresh_shutdown_lock.release()
1822 if restart_reason:
1823 threading.Thread(target = self._maybeRestart,
1824 args = (restart_reason,)).start()
1828 # Restart functions - handling whether we come back up on shutdown.
1831 def _clearRestart(self):
1832 self._removeDom("xend/shutdown_start_time")
1834 def _maybeDumpCore(self, reason):
1835 if reason == 'crash':
1836 if xoptions.get_enable_dump() or self.get_on_crash() \
1837 in ['coredump_and_destroy', 'coredump_and_restart']:
1838 try:
1839 self.dumpCore()
1840 except XendError:
1841 # This error has been logged -- there's nothing more
1842 # we can do in this context.
1843 pass
1845 def _maybeRestart(self, reason):
1846 # Before taking configured action, dump core if configured to do so.
1848 self._maybeDumpCore(reason)
1850 # Dispatch to the correct method based upon the configured on_{reason}
1851 # behaviour.
1852 actions = {"destroy" : self.destroy,
1853 "restart" : self._restart,
1854 "preserve" : self._preserve,
1855 "rename-restart" : self._renameRestart,
1856 "coredump-destroy" : self.destroy,
1857 "coredump-restart" : self._restart}
1859 action_conf = {
1860 'poweroff': 'actions_after_shutdown',
1861 'reboot': 'actions_after_reboot',
1862 'crash': 'actions_after_crash',
1865 action_target = self.info.get(action_conf.get(reason))
1866 func = actions.get(action_target, None)
1867 if func and callable(func):
1868 func()
1869 else:
1870 self.destroy() # default to destroy
1872 def _renameRestart(self):
1873 self._restart(True)
1875 def _restart(self, rename = False):
1876 """Restart the domain after it has exited.
1878 @param rename True if the old domain is to be renamed and preserved,
1879 False if it is to be destroyed.
1880 """
1881 from xen.xend import XendDomain
1883 if self._readVm(RESTART_IN_PROGRESS):
1884 log.error('Xend failed during restart of domain %s. '
1885 'Refusing to restart to avoid loops.',
1886 str(self.domid))
1887 self.destroy()
1888 return
1890 old_domid = self.domid
1891 self._writeVm(RESTART_IN_PROGRESS, 'True')
1893 now = time.time()
1894 rst = self._readVm('xend/previous_restart_time')
1895 if rst:
1896 rst = float(rst)
1897 timeout = now - rst
1898 if timeout < MINIMUM_RESTART_TIME:
1899 log.error(
1900 'VM %s restarting too fast (%f seconds since the last '
1901 'restart). Refusing to restart to avoid loops.',
1902 self.info['name_label'], timeout)
1903 self.destroy()
1904 return
1906 self._writeVm('xend/previous_restart_time', str(now))
1908 prev_vm_xend = self._listRecursiveVm('xend')
1909 new_dom_info = self.info
1910 try:
1911 if rename:
1912 new_dom_info = self._preserveForRestart()
1913 else:
1914 self._unwatchVm()
1915 self.destroy()
1917 # new_dom's VM will be the same as this domain's VM, except where
1918 # the rename flag has instructed us to call preserveForRestart.
1919 # In that case, it is important that we remove the
1920 # RESTART_IN_PROGRESS node from the new domain, not the old one,
1921 # once the new one is available.
1923 new_dom = None
1924 try:
1925 new_dom = XendDomain.instance().domain_create_from_dict(
1926 new_dom_info)
1927 for x in prev_vm_xend[0][1]:
1928 new_dom._writeVm('xend/%s' % x[0], x[1])
1929 new_dom.waitForDevices()
1930 new_dom.unpause()
1931 rst_cnt = new_dom._readVm('xend/restart_count')
1932 rst_cnt = int(rst_cnt) + 1
1933 new_dom._writeVm('xend/restart_count', str(rst_cnt))
1934 new_dom._removeVm(RESTART_IN_PROGRESS)
1935 except:
1936 if new_dom:
1937 new_dom._removeVm(RESTART_IN_PROGRESS)
1938 new_dom.destroy()
1939 else:
1940 self._removeVm(RESTART_IN_PROGRESS)
1941 raise
1942 except:
1943 log.exception('Failed to restart domain %s.', str(old_domid))
1945 def _preserveForRestart(self):
1946 """Preserve a domain that has been shut down, by giving it a new UUID,
1947 cloning the VM details, and giving it a new name. This allows us to
1948 keep this domain for debugging, but restart a new one in its place
1949 preserving the restart semantics (name and UUID preserved).
1950 """
1952 new_uuid = uuid.createString()
1953 new_name = 'Domain-%s' % new_uuid
1954 log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
1955 self.info['name_label'], self.domid, self.info['uuid'],
1956 new_name, new_uuid)
1957 self._unwatchVm()
1958 self._releaseDevices()
1959 # Remove existing vm node in xenstore
1960 self._removeVm()
1961 new_dom_info = self.info.copy()
1962 new_dom_info['name_label'] = self.info['name_label']
1963 new_dom_info['uuid'] = self.info['uuid']
1964 self.info['name_label'] = new_name
1965 self.info['uuid'] = new_uuid
1966 self.vmpath = XS_VMROOT + new_uuid
1967 # Write out new vm node to xenstore
1968 self._storeVmDetails()
1969 self._preserve()
1970 return new_dom_info
1973 def _preserve(self):
1974 log.info("Preserving dead domain %s (%d).", self.info['name_label'],
1975 self.domid)
1976 self._unwatchVm()
1977 self.storeDom('xend/shutdown_completed', 'True')
1978 self._stateSet(DOM_STATE_HALTED)
1981 # Debugging ..
1984 def dumpCore(self, corefile = None):
1985 """Create a core dump for this domain.
1987 @raise: XendError if core dumping failed.
1988 """
1990 try:
1991 if not corefile:
1992 this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
1993 corefile = "/var/xen/dump/%s-%s.%s.core" % (this_time,
1994 self.info['name_label'], self.domid)
1996 if os.path.isdir(corefile):
1997 raise XendError("Cannot dump core in a directory: %s" %
1998 corefile)
2000 self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
2001 xc.domain_dumpcore(self.domid, corefile)
2002 self._removeVm(DUMPCORE_IN_PROGRESS)
2003 except RuntimeError, ex:
2004 corefile_incomp = corefile+'-incomplete'
2005 os.rename(corefile, corefile_incomp)
2006 self._removeVm(DUMPCORE_IN_PROGRESS)
2007 log.exception("XendDomainInfo.dumpCore failed: id = %s name = %s",
2008 self.domid, self.info['name_label'])
2009 raise XendError("Failed to dump core: %s" % str(ex))
2012 # Device creation/deletion functions
2015 def _createDevice(self, deviceClass, devConfig):
2016 return self.getDeviceController(deviceClass).createDevice(devConfig)
2018 def _waitForDevice(self, deviceClass, devid):
2019 return self.getDeviceController(deviceClass).waitForDevice(devid)
2021 def _waitForDeviceUUID(self, dev_uuid):
2022 deviceClass, config = self.info['devices'].get(dev_uuid)
2023 self._waitForDevice(deviceClass, config['devid'])
2025 def _waitForDevice_destroy(self, deviceClass, devid, backpath):
2026 return self.getDeviceController(deviceClass).waitForDevice_destroy(
2027 devid, backpath)
2029 def _reconfigureDevice(self, deviceClass, devid, devconfig):
2030 return self.getDeviceController(deviceClass).reconfigureDevice(
2031 devid, devconfig)
2033 def _createDevices(self):
2034 """Create the devices for a vm.
2036 @raise: VmError for invalid devices
2037 """
2038 if self.image:
2039 self.image.prepareEnvironment()
2041 vscsi_uuidlist = {}
2042 vscsi_devidlist = []
2043 ordered_refs = self.info.ordered_device_refs()
2044 for dev_uuid in ordered_refs:
2045 devclass, config = self.info['devices'][dev_uuid]
2046 if devclass in XendDevices.valid_devices() and devclass != 'vscsi':
2047 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2048 dev_uuid = config.get('uuid')
2049 devid = self._createDevice(devclass, config)
2051 # store devid in XendConfig for caching reasons
2052 if dev_uuid in self.info['devices']:
2053 self.info['devices'][dev_uuid][1]['devid'] = devid
2055 elif devclass == 'vscsi':
2056 vscsi_config = config.get('devs', [])[0]
2057 devid = vscsi_config.get('devid', '')
2058 dev_uuid = config.get('uuid')
2059 vscsi_uuidlist[devid] = dev_uuid
2060 vscsi_devidlist.append(devid)
2062 #It is necessary to sorted it for /dev/sdxx in guest.
2063 if len(vscsi_uuidlist) > 0:
2064 vscsi_devidlist.sort()
2065 for vscsiid in vscsi_devidlist:
2066 dev_uuid = vscsi_uuidlist[vscsiid]
2067 devclass, config = self.info['devices'][dev_uuid]
2068 log.info("createDevice: %s : %s" % (devclass, scrub_password(config)))
2069 dev_uuid = config.get('uuid')
2070 devid = self._createDevice(devclass, config)
2071 # store devid in XendConfig for caching reasons
2072 if dev_uuid in self.info['devices']:
2073 self.info['devices'][dev_uuid][1]['devid'] = devid
2076 if self.image:
2077 self.image.createDeviceModel()
2079 #if have pass-through devs, need the virtual pci slots info from qemu
2080 self.sync_pcidev_info()
2082 def _releaseDevices(self, suspend = False):
2083 """Release all domain's devices. Nothrow guarantee."""
2084 if self.image:
2085 try:
2086 log.debug("Destroying device model")
2087 self.image.destroyDeviceModel()
2088 except Exception, e:
2089 log.exception("Device model destroy failed %s" % str(e))
2090 else:
2091 log.debug("No device model")
2093 log.debug("Releasing devices")
2094 t = xstransact("%s/device" % self.dompath)
2095 try:
2096 for devclass in XendDevices.valid_devices():
2097 for dev in t.list(devclass):
2098 try:
2099 true_devclass = devclass
2100 if devclass == 'vbd':
2101 # In the case of "vbd", the true device class
2102 # may possibly be "tap". Just in case, verify
2103 # device class.
2104 devid = dev.split('/')[-1]
2105 true_devclass = self.getBlockDeviceClass(devid)
2106 log.debug("Removing %s", dev);
2107 self.destroyDevice(true_devclass, dev, False);
2108 except:
2109 # Log and swallow any exceptions in removal --
2110 # there's nothing more we can do.
2111 log.exception("Device release failed: %s; %s; %s",
2112 self.info['name_label'],
2113 true_devclass, dev)
2114 finally:
2115 t.abort()
2117 def getDeviceController(self, name):
2118 """Get the device controller for this domain, and if it
2119 doesn't exist, create it.
2121 @param name: device class name
2122 @type name: string
2123 @rtype: subclass of DevController
2124 """
2125 if name not in self._deviceControllers:
2126 devController = XendDevices.make_controller(name, self)
2127 if not devController:
2128 raise XendError("Unknown device type: %s" % name)
2129 self._deviceControllers[name] = devController
2131 return self._deviceControllers[name]
2134 # Migration functions (public)
2137 def testMigrateDevices(self, network, dst):
2138 """ Notify all device about intention of migration
2139 @raise: XendError for a device that cannot be migrated
2140 """
2141 for (n, c) in self.info.all_devices_sxpr():
2142 rc = self.migrateDevice(n, c, network, dst, DEV_MIGRATE_TEST, self.getName())
2143 if rc != 0:
2144 raise XendError("Device of type '%s' refuses migration." % n)
2146 def migrateDevices(self, network, dst, step, domName=''):
2147 """Notify the devices about migration
2148 """
2149 ctr = 0
2150 try:
2151 for (dev_type, dev_conf) in self.info.all_devices_sxpr():
2152 self.migrateDevice(dev_type, dev_conf, network, dst,
2153 step, domName)
2154 ctr = ctr + 1
2155 except:
2156 for dev_type, dev_conf in self.info.all_devices_sxpr():
2157 if ctr == 0:
2158 step = step - 1
2159 ctr = ctr - 1
2160 self._recoverMigrateDevice(dev_type, dev_conf, network,
2161 dst, step, domName)
2162 raise
2164 def migrateDevice(self, deviceClass, deviceConfig, network, dst,
2165 step, domName=''):
2166 return self.getDeviceController(deviceClass).migrate(deviceConfig,
2167 network, dst, step, domName)
2169 def _recoverMigrateDevice(self, deviceClass, deviceConfig, network,
2170 dst, step, domName=''):
2171 return self.getDeviceController(deviceClass).recover_migrate(
2172 deviceConfig, network, dst, step, domName)
2175 ## private:
2177 def _constructDomain(self):
2178 """Construct the domain.
2180 @raise: VmError on error
2181 """
2183 log.debug('XendDomainInfo.constructDomain')
2185 self.shutdownStartTime = None
2187 hap = 0
2188 hvm = self.info.is_hvm()
2189 if hvm:
2190 hap = self.info.is_hap()
2191 info = xc.xeninfo()
2192 if 'hvm' not in info['xen_caps']:
2193 raise VmError("HVM guest support is unavailable: is VT/AMD-V "
2194 "supported by your CPU and enabled in your "
2195 "BIOS?")
2197 # Hack to pre-reserve some memory for initial domain creation.
2198 # There is an implicit memory overhead for any domain creation. This
2199 # overhead is greater for some types of domain than others. For
2200 # example, an x86 HVM domain will have a default shadow-pagetable
2201 # allocation of 1MB. We free up 2MB here to be on the safe side.
2202 balloon.free(2*1024, self) # 2MB should be plenty
2204 ssidref = 0
2205 if security.on() == xsconstants.XS_POLICY_USE:
2206 ssidref = security.calc_dom_ssidref_from_info(self.info)
2207 if security.has_authorization(ssidref) == False:
2208 raise VmError("VM is not authorized to run.")
2210 try:
2211 self.domid = xc.domain_create(
2212 domid = 0,
2213 ssidref = ssidref,
2214 handle = uuid.fromString(self.info['uuid']),
2215 flags = (int(hvm) << 0) | (int(hap) << 1),
2216 target = self.info.target())
2217 except Exception, e:
2218 # may get here if due to ACM the operation is not permitted
2219 if security.on() == xsconstants.XS_POLICY_ACM:
2220 raise VmError('Domain in conflict set with running domain?')
2222 if self.domid < 0:
2223 raise VmError('Creating domain failed: name=%s' %
2224 self.info['name_label'])
2226 self.dompath = GetDomainPath(self.domid)
2228 self._recreateDom()
2230 # Set timer configration of domain
2231 timer_mode = self.info["platform"].get("timer_mode")
2232 if hvm and timer_mode is not None:
2233 xc.hvm_set_param(self.domid, HVM_PARAM_TIMER_MODE,
2234 long(timer_mode))
2236 # Set Viridian interface configuration of domain
2237 viridian = self.info["platform"].get("viridian")
2238 if arch.type == "x86" and hvm and viridian is not None:
2239 xc.hvm_set_param(self.domid, HVM_PARAM_VIRIDIAN, long(viridian))
2241 # Optionally enable virtual HPET
2242 hpet = self.info["platform"].get("hpet")
2243 if hvm and hpet is not None:
2244 xc.hvm_set_param(self.domid, HVM_PARAM_HPET_ENABLED,
2245 long(hpet))
2247 # Optionally enable periodic vpt aligning
2248 vpt_align = self.info["platform"].get("vpt_align")
2249 if hvm and vpt_align is not None:
2250 xc.hvm_set_param(self.domid, HVM_PARAM_VPT_ALIGN,
2251 long(vpt_align))
2253 # Set maximum number of vcpus in domain
2254 xc.domain_max_vcpus(self.domid, int(self.info['VCPUs_max']))
2256 # Test whether the devices can be assigned with VT-d
2257 pci = self.info["platform"].get("pci")
2258 pci_str = ''
2259 if pci and len(pci) > 0:
2260 pci = map(lambda x: x[0:4], pci) # strip options
2261 pci_str = str(pci)
2262 if hvm and pci_str:
2263 bdf = xc.test_assign_device(self.domid, pci_str)
2264 if bdf != 0:
2265 if bdf == -1:
2266 raise VmError("failed to assign device: maybe the platform"
2267 " doesn't support VT-d, or VT-d isn't enabled"
2268 " properly?")
2269 bus = (bdf >> 16) & 0xff
2270 devfn = (bdf >> 8) & 0xff
2271 dev = (devfn >> 3) & 0x1f
2272 func = devfn & 0x7
2273 raise VmError("fail to assign device(%x:%x.%x): maybe it has"
2274 " already been assigned to other domain, or maybe"
2275 " it doesn't exist." % (bus, dev, func))
2277 # register the domain in the list
2278 from xen.xend import XendDomain
2279 XendDomain.instance().add_domain(self)
2281 def _introduceDomain(self):
2282 assert self.domid is not None
2283 assert self.store_mfn is not None
2284 assert self.store_port is not None
2286 try:
2287 IntroduceDomain(self.domid, self.store_mfn, self.store_port)
2288 except RuntimeError, exn:
2289 raise XendError(str(exn))
2291 def _setTarget(self, target):
2292 assert self.domid is not None
2294 try:
2295 SetTarget(self.domid, target)
2296 self.storeDom('target', target)
2297 except RuntimeError, exn:
2298 raise XendError(str(exn))
2301 def _setCPUAffinity(self):
2302 """ Repin domain vcpus if a restricted cpus list is provided
2303 """
2305 def has_cpus():
2306 if self.info['cpus'] is not None:
2307 for c in self.info['cpus']:
2308 if c:
2309 return True
2310 return False
2312 if has_cpus():
2313 for v in range(0, self.info['VCPUs_max']):
2314 if self.info['cpus'][v]:
2315 xc.vcpu_setaffinity(self.domid, v, self.info['cpus'][v])
2316 else:
2317 def find_relaxed_node(node_list):
2318 import sys
2319 nr_nodes = info['nr_nodes']
2320 if node_list is None:
2321 node_list = range(0, nr_nodes)
2322 nodeload = [0]
2323 nodeload = nodeload * nr_nodes
2324 from xen.xend import XendDomain
2325 doms = XendDomain.instance().list('all')
2326 for dom in filter (lambda d: d.domid != self.domid, doms):
2327 cpuinfo = dom.getVCPUInfo()
2328 for vcpu in sxp.children(cpuinfo, 'vcpu'):
2329 if sxp.child_value(vcpu, 'online') == 0: continue
2330 cpumap = list(sxp.child_value(vcpu,'cpumap'))
2331 for i in range(0, nr_nodes):
2332 node_cpumask = info['node_to_cpu'][i]
2333 for j in node_cpumask:
2334 if j in cpumap:
2335 nodeload[i] += 1
2336 break
2337 for i in range(0, nr_nodes):
2338 if len(info['node_to_cpu'][i]) > 0 and i in node_list:
2339 nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
2340 else:
2341 nodeload[i] = sys.maxint
2342 index = nodeload.index( min(nodeload) )
2343 return index
2345 info = xc.physinfo()
2346 if info['nr_nodes'] > 1:
2347 node_memory_list = info['node_to_memory']
2348 needmem = self.image.getRequiredAvailableMemory(self.info['memory_dynamic_max']) / 1024
2349 candidate_node_list = []
2350 for i in range(0, info['nr_nodes']):
2351 if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
2352 candidate_node_list.append(i)
2353 index = find_relaxed_node(candidate_node_list)
2354 cpumask = info['node_to_cpu'][index]
2355 for v in range(0, self.info['VCPUs_max']):
2356 xc.vcpu_setaffinity(self.domid, v, cpumask)
2359 def _initDomain(self):
2360 log.debug('XendDomainInfo.initDomain: %s %s',
2361 self.domid,
2362 self.info['vcpus_params']['weight'])
2364 self._configureBootloader()
2366 try:
2367 if self.info['platform'].get('localtime', 0):
2368 if time.localtime(time.time())[8]:
2369 self.info['platform']['rtc_timeoffset'] = -time.altzone
2370 else:
2371 self.info['platform']['rtc_timeoffset'] = -time.timezone
2373 self.image = image.create(self, self.info)
2375 # repin domain vcpus if a restricted cpus list is provided
2376 # this is done prior to memory allocation to aide in memory
2377 # distribution for NUMA systems.
2378 self._setCPUAffinity()
2380 # Use architecture- and image-specific calculations to determine
2381 # the various headrooms necessary, given the raw configured
2382 # values. maxmem, memory, and shadow are all in KiB.
2383 # but memory_static_max etc are all stored in bytes now.
2384 memory = self.image.getRequiredAvailableMemory(
2385 self.info['memory_dynamic_max'] / 1024)
2386 maxmem = self.image.getRequiredAvailableMemory(
2387 self.info['memory_static_max'] / 1024)
2388 shadow = self.image.getRequiredShadowMemory(
2389 self.info['shadow_memory'] * 1024,
2390 self.info['memory_static_max'] / 1024)
2392 log.debug("_initDomain:shadow_memory=0x%x, memory_static_max=0x%x, memory_static_min=0x%x.", self.info['shadow_memory'], self.info['memory_static_max'], self.info['memory_static_min'],)
2393 # Round shadow up to a multiple of a MiB, as shadow_mem_control
2394 # takes MiB and we must not round down and end up under-providing.
2395 shadow = ((shadow + 1023) / 1024) * 1024
2397 # set memory limit
2398 xc.domain_setmaxmem(self.domid, maxmem)
2400 # Reserve 1 page per MiB of RAM for separate VT-d page table.
2401 vtd_mem = 4 * (self.info['memory_static_max'] / 1024 / 1024)
2402 # Round vtd_mem up to a multiple of a MiB.
2403 vtd_mem = ((vtd_mem + 1023) / 1024) * 1024
2405 # Make sure there's enough RAM available for the domain
2406 balloon.free(memory + shadow + vtd_mem, self)
2408 # Set up the shadow memory
2409 shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
2410 self.info['shadow_memory'] = shadow_cur
2412 # machine address size
2413 if self.info.has_key('machine_address_size'):
2414 log.debug("_initDomain: setting maximum machine address size %d" % self.info['machine_address_size'])
2415 xc.domain_set_machine_address_size(self.domid, self.info['machine_address_size'])
2417 if self.info.has_key('suppress_spurious_page_faults') and self.info['suppress_spurious_page_faults']:
2418 log.debug("_initDomain: suppressing spurious page faults")
2419 xc.domain_suppress_spurious_page_faults(self.domid)
2421 self._createChannels()
2423 channel_details = self.image.createImage()
2425 self.store_mfn = channel_details['store_mfn']
2426 if 'console_mfn' in channel_details:
2427 self.console_mfn = channel_details['console_mfn']
2428 if 'notes' in channel_details:
2429 self.info.set_notes(channel_details['notes'])
2430 if 'native_protocol' in channel_details:
2431 self.native_protocol = channel_details['native_protocol'];
2433 self._introduceDomain()
2434 if self.info.target():
2435 self._setTarget(self.info.target())
2437 self._createDevices()
2439 self.image.cleanupBootloading()
2441 self.info['start_time'] = time.time()
2443 self._stateSet(DOM_STATE_RUNNING)
2444 except VmError, exn:
2445 log.exception("XendDomainInfo.initDomain: exception occurred")
2446 if self.image:
2447 self.image.cleanupBootloading()
2448 raise exn
2449 except RuntimeError, exn:
2450 log.exception("XendDomainInfo.initDomain: exception occurred")
2451 if self.image:
2452 self.image.cleanupBootloading()
2453 raise VmError(str(exn))
2456 def cleanupDomain(self):
2457 """Cleanup domain resources; release devices. Idempotent. Nothrow
2458 guarantee."""
2460 self.refresh_shutdown_lock.acquire()
2461 try:
2462 self.unwatchShutdown()
2463 self._releaseDevices()
2464 bootloader_tidy(self)
2466 if self.image:
2467 self.image = None
2469 try:
2470 self._removeDom()
2471 except:
2472 log.exception("Removing domain path failed.")
2474 self._stateSet(DOM_STATE_HALTED)
2475 self.domid = None # Do not push into _stateSet()!
2476 finally:
2477 self.refresh_shutdown_lock.release()
2480 def unwatchShutdown(self):
2481 """Remove the watch on the domain's control/shutdown node, if any.
2482 Idempotent. Nothrow guarantee. Expects to be protected by the
2483 refresh_shutdown_lock."""
2485 try:
2486 try:
2487 if self.shutdownWatch:
2488 self.shutdownWatch.unwatch()
2489 finally:
2490 self.shutdownWatch = None
2491 except:
2492 log.exception("Unwatching control/shutdown failed.")
2494 def waitForShutdown(self):
2495 self.state_updated.acquire()
2496 try:
2497 while self._stateGet() in (DOM_STATE_RUNNING,DOM_STATE_PAUSED):
2498 self.state_updated.wait(timeout=1.0)
2499 finally:
2500 self.state_updated.release()
2503 # TODO: recategorise - called from XendCheckpoint
2506 def completeRestore(self, store_mfn, console_mfn):
2508 log.debug("XendDomainInfo.completeRestore")
2510 self.store_mfn = store_mfn
2511 self.console_mfn = console_mfn
2513 self._introduceDomain()
2514 self.image = image.create(self, self.info)
2515 if self.image:
2516 self.image.createDeviceModel(True)
2517 self._storeDomDetails()
2518 self._registerWatches()
2519 self.refreshShutdown()
2521 log.debug("XendDomainInfo.completeRestore done")
2524 def _endRestore(self):
2525 self.setResume(False)
2528 # VM Destroy
2531 def _prepare_phantom_paths(self):
2532 # get associated devices to destroy
2533 # build list of phantom devices to be removed after normal devices
2534 plist = []
2535 if self.domid is not None:
2536 t = xstransact("%s/device/vbd" % GetDomainPath(self.domid))
2537 try:
2538 for dev in t.list():
2539 backend_phantom_vbd = xstransact.Read("%s/device/vbd/%s/phantom_vbd" \
2540 % (self.dompath, dev))
2541 if backend_phantom_vbd is not None:
2542 frontend_phantom_vbd = xstransact.Read("%s/frontend" \
2543 % backend_phantom_vbd)
2544 plist.append(backend_phantom_vbd)
2545 plist.append(frontend_phantom_vbd)
2546 finally:
2547 t.abort()
2548 return plist
2550 def _cleanup_phantom_devs(self, plist):
2551 # remove phantom devices
2552 if not plist == []:
2553 time.sleep(2)
2554 for paths in plist:
2555 if paths.find('backend') != -1:
2556 # Modify online status /before/ updating state (latter is watched by
2557 # drivers, so this ordering avoids a race).
2558 xstransact.Write(paths, 'online', "0")
2559 xstransact.Write(paths, 'state', str(xenbusState['Closing']))
2560 # force
2561 xstransact.Remove(paths)
2563 def destroy(self):
2564 """Cleanup VM and destroy domain. Nothrow guarantee."""
2566 if self.domid is None:
2567 return
2569 from xen.xend import XendDomain
2570 log.debug("XendDomainInfo.destroy: domid=%s", str(self.domid))
2572 paths = self._prepare_phantom_paths()
2574 if self.dompath is not None:
2575 try:
2576 xc.domain_destroy_hook(self.domid)
2577 xc.domain_pause(self.domid)
2578 do_FLR(self.domid)
2579 xc.domain_destroy(self.domid)
2580 for state in DOM_STATES_OLD:
2581 self.info[state] = 0
2582 self._stateSet(DOM_STATE_HALTED)
2583 except:
2584 log.exception("XendDomainInfo.destroy: domain destruction failed.")
2586 XendDomain.instance().remove_domain(self)
2587 self.cleanupDomain()
2589 self._cleanup_phantom_devs(paths)
2590 self._cleanupVm()
2592 if "transient" in self.info["other_config"] \
2593 and bool(self.info["other_config"]["transient"]):
2594 XendDomain.instance().domain_delete_by_dominfo(self)
2597 def resetDomain(self):
2598 log.debug("XendDomainInfo.resetDomain(%s)", str(self.domid))
2600 old_domid = self.domid
2601 prev_vm_xend = self._listRecursiveVm('xend')
2602 new_dom_info = self.info
2603 try:
2604 self._unwatchVm()
2605 self.destroy()
2607 new_dom = None
2608 try:
2609 from xen.xend import XendDomain
2610 new_dom_info['domid'] = None
2611 new_dom = XendDomain.instance().domain_create_from_dict(
2612 new_dom_info)
2613 for x in prev_vm_xend[0][1]:
2614 new_dom._writeVm('xend/%s' % x[0], x[1])
2615 new_dom.waitForDevices()
2616 new_dom.unpause()
2617 except:
2618 if new_dom:
2619 new_dom.destroy()
2620 raise
2621 except:
2622 log.exception('Failed to reset domain %s.', str(old_domid))
2625 def resumeDomain(self):
2626 log.debug("XendDomainInfo.resumeDomain(%s)", str(self.domid))
2628 # resume a suspended domain (e.g. after live checkpoint, or after
2629 # a later error during save or migate); checks that the domain
2630 # is currently suspended first so safe to call from anywhere
2632 xeninfo = dom_get(self.domid)
2633 if xeninfo is None:
2634 return
2635 if not xeninfo['shutdown']:
2636 return
2637 reason = shutdown_reason(xeninfo['shutdown_reason'])
2638 if reason != 'suspend':
2639 return
2641 try:
2642 # could also fetch a parsed note from xenstore
2643 fast = self.info.get_notes().get('SUSPEND_CANCEL') and 1 or 0
2644 if not fast:
2645 self._releaseDevices()
2646 self.testDeviceComplete()
2647 self.testvifsComplete()
2648 log.debug("XendDomainInfo.resumeDomain: devices released")
2650 self._resetChannels()
2652 self._removeDom('control/shutdown')
2653 self._removeDom('device-misc/vif/nextDeviceID')
2655 self._createChannels()
2656 self._introduceDomain()
2657 self._storeDomDetails()
2659 self._createDevices()
2660 log.debug("XendDomainInfo.resumeDomain: devices created")
2662 xc.domain_resume(self.domid, fast)
2663 ResumeDomain(self.domid)
2664 except:
2665 log.exception("XendDomainInfo.resume: xc.domain_resume failed on domain %s." % (str(self.domid)))
2666 self.image.resumeDeviceModel()
2667 log.debug("XendDomainInfo.resumeDomain: completed")
2671 # Channels for xenstore and console
2674 def _createChannels(self):
2675 """Create the channels to the domain.
2676 """
2677 self.store_port = self._createChannel()
2678 self.console_port = self._createChannel()
2681 def _createChannel(self):
2682 """Create an event channel to the domain.
2683 """
2684 try:
2685 if self.domid != None:
2686 return xc.evtchn_alloc_unbound(domid = self.domid,
2687 remote_dom = 0)
2688 except:
2689 log.exception("Exception in alloc_unbound(%s)", str(self.domid))
2690 raise
2692 def _resetChannels(self):
2693 """Reset all event channels in the domain.
2694 """
2695 try:
2696 if self.domid != None:
2697 return xc.evtchn_reset(dom = self.domid)
2698 except:
2699 log.exception("Exception in evtcnh_reset(%s)", str(self.domid))
2700 raise
2704 # Bootloader configuration
2707 def _configureBootloader(self):
2708 """Run the bootloader if we're configured to do so."""
2710 blexec = self.info['PV_bootloader']
2711 bootloader_args = self.info['PV_bootloader_args']
2712 kernel = self.info['PV_kernel']
2713 ramdisk = self.info['PV_ramdisk']
2714 args = self.info['PV_args']
2715 boot = self.info['HVM_boot_policy']
2717 if boot:
2718 # HVM booting.
2719 pass
2720 elif not blexec and kernel:
2721 # Boot from dom0. Nothing left to do -- the kernel and ramdisk
2722 # will be picked up by image.py.
2723 pass
2724 else:
2725 # Boot using bootloader
2726 if not blexec or blexec == 'pygrub':
2727 blexec = osdep.pygrub_path
2729 blcfg = None
2730 disks = [x for x in self.info['vbd_refs']
2731 if self.info['devices'][x][1]['bootable']]
2733 if not disks:
2734 msg = "Had a bootloader specified, but no disks are bootable"
2735 log.error(msg)
2736 raise VmError(msg)
2738 devinfo = self.info['devices'][disks[0]]
2739 devtype = devinfo[0]
2740 disk = devinfo[1]['uname']
2742 fn = blkdev_uname_to_file(disk)
2743 taptype = blkdev_uname_to_taptype(disk)
2744 mounted = devtype == 'tap' and taptype != 'aio' and taptype != 'sync' and not os.stat(fn).st_rdev
2745 if mounted:
2746 # This is a file, not a device. pygrub can cope with a
2747 # file if it's raw, but if it's QCOW or other such formats
2748 # used through blktap, then we need to mount it first.
2750 log.info("Mounting %s on %s." %
2751 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2753 vbd = {
2754 'mode': 'RO',
2755 'device': BOOTLOADER_LOOPBACK_DEVICE,
2758 from xen.xend import XendDomain
2759 dom0 = XendDomain.instance().privilegedDomain()
2760 dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
2761 fn = BOOTLOADER_LOOPBACK_DEVICE
2763 try:
2764 blcfg = bootloader(blexec, fn, self, False,
2765 bootloader_args, kernel, ramdisk, args)
2766 finally:
2767 if mounted:
2768 log.info("Unmounting %s from %s." %
2769 (fn, BOOTLOADER_LOOPBACK_DEVICE))
2771 dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
2773 if blcfg is None:
2774 msg = "Had a bootloader specified, but can't find disk"
2775 log.error(msg)
2776 raise VmError(msg)
2778 self.info.update_with_image_sxp(blcfg, True)
2782 # VM Functions
2785 def _readVMDetails(self, params):
2786 """Read the specified parameters from the store.
2787 """
2788 try:
2789 return self._gatherVm(*params)
2790 except ValueError:
2791 # One of the int/float entries in params has a corresponding store
2792 # entry that is invalid. We recover, because older versions of
2793 # Xend may have put the entry there (memory/target, for example),
2794 # but this is in general a bad situation to have reached.
2795 log.exception(
2796 "Store corrupted at %s! Domain %d's configuration may be "
2797 "affected.", self.vmpath, self.domid)
2798 return []
2800 def _cleanupVm(self):
2801 """Cleanup VM resources. Idempotent. Nothrow guarantee."""
2803 self._unwatchVm()
2805 try:
2806 self._removeVm()
2807 except:
2808 log.exception("Removing VM path failed.")
2811 def checkLiveMigrateMemory(self):
2812 """ Make sure there's enough memory to migrate this domain """
2813 overhead_kb = 0
2814 if arch.type == "x86":
2815 # 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
2816 # the minimum that Xen would allocate if no value were given.
2817 overhead_kb = self.info['VCPUs_max'] * 1024 + \
2818 (self.info['memory_static_max'] / 1024 / 1024) * 4
2819 overhead_kb = ((overhead_kb + 1023) / 1024) * 1024
2820 # The domain might already have some shadow memory
2821 overhead_kb -= xc.shadow_mem_control(self.domid) * 1024
2822 if overhead_kb > 0:
2823 balloon.free(overhead_kb, self)
2825 def _unwatchVm(self):
2826 """Remove the watch on the VM path, if any. Idempotent. Nothrow
2827 guarantee."""
2828 try:
2829 try:
2830 if self.vmWatch:
2831 self.vmWatch.unwatch()
2832 finally:
2833 self.vmWatch = None
2834 except:
2835 log.exception("Unwatching VM path failed.")
2837 def testDeviceComplete(self):
2838 """ For Block IO migration safety we must ensure that
2839 the device has shutdown correctly, i.e. all blocks are
2840 flushed to disk
2841 """
2842 start = time.time()
2843 while True:
2844 test = 0
2845 diff = time.time() - start
2846 for i in self.getDeviceController('vbd').deviceIDs():
2847 test = 1
2848 log.info("Dev %s still active, looping...", i)
2849 time.sleep(0.1)
2851 if test == 0:
2852 break
2853 if diff >= MIGRATE_TIMEOUT:
2854 log.info("Dev still active but hit max loop timeout")
2855 break
2857 def testvifsComplete(self):
2858 """ In case vifs are released and then created for the same
2859 domain, we need to wait the device shut down.
2860 """
2861 start = time.time()
2862 while True:
2863 test = 0
2864 diff = time.time() - start
2865 for i in self.getDeviceController('vif').deviceIDs():
2866 test = 1
2867 log.info("Dev %s still active, looping...", i)
2868 time.sleep(0.1)
2870 if test == 0:
2871 break
2872 if diff >= MIGRATE_TIMEOUT:
2873 log.info("Dev still active but hit max loop timeout")
2874 break
2876 def _storeVmDetails(self):
2877 to_store = {}
2879 for key in XendConfig.LEGACY_XENSTORE_VM_PARAMS:
2880 info_key = XendConfig.LEGACY_CFG_TO_XENAPI_CFG.get(key, key)
2881 if self._infoIsSet(info_key):
2882 to_store[key] = str(self.info[info_key])
2884 if self._infoIsSet("static_memory_min"):
2885 to_store["memory"] = str(self.info["static_memory_min"])
2886 if self._infoIsSet("static_memory_max"):
2887 to_store["maxmem"] = str(self.info["static_memory_max"])
2889 image_sxpr = self.info.image_sxpr()
2890 if image_sxpr:
2891 to_store['image'] = sxp.to_string(image_sxpr)
2893 if not self._readVm('xend/restart_count'):
2894 to_store['xend/restart_count'] = str(0)
2896 log.debug("Storing VM details: %s", scrub_password(to_store))
2898 self._writeVm(to_store)
2899 self._setVmPermissions()
2901 def _setVmPermissions(self):
2902 """Allow the guest domain to read its UUID. We don't allow it to
2903 access any other entry, for security."""
2904 xstransact.SetPermissions('%s/uuid' % self.vmpath,
2905 { 'dom' : self.domid,
2906 'read' : True,
2907 'write' : False })
2910 # Utility functions
2913 def __getattr__(self, name):
2914 if name == "state":
2915 log.warn("Somebody tried to read XendDomainInfo.state... should us _stateGet()!!!")
2916 log.warn("".join(traceback.format_stack()))
2917 return self._stateGet()
2918 else:
2919 raise AttributeError(name)
2921 def __setattr__(self, name, value):
2922 if name == "state":
2923 log.warn("Somebody tried to set XendDomainInfo.state... should us _stateGet()!!!")
2924 log.warn("".join(traceback.format_stack()))
2925 self._stateSet(value)
2926 else:
2927 self.__dict__[name] = value
2929 def _stateSet(self, state):
2930 self.state_updated.acquire()
2931 try:
2932 # TODO Not sure this is correct...
2933 # _stateGet is live now. Why not fire event
2934 # even when it hasn't changed?
2935 if self._stateGet() != state:
2936 self.state_updated.notifyAll()
2937 import XendAPI
2938 XendAPI.event_dispatch('mod', 'VM', self.info['uuid'],
2939 'power_state')
2940 finally:
2941 self.state_updated.release()
2943 def _stateGet(self):
2944 # Lets try and reconsitute the state from xc
2945 # first lets try and get the domain info
2946 # from xc - this will tell us if the domain
2947 # exists
2948 info = dom_get(self.getDomid())
2949 if info is None or info['shutdown']:
2950 # We are either HALTED or SUSPENDED
2951 # check saved image exists
2952 from xen.xend import XendDomain
2953 managed_config_path = \
2954 XendDomain.instance()._managed_check_point_path( \
2955 self.get_uuid())
2956 if os.path.exists(managed_config_path):
2957 return XEN_API_VM_POWER_STATE_SUSPENDED
2958 else:
2959 return XEN_API_VM_POWER_STATE_HALTED
2960 elif info['crashed']:
2961 # Crashed
2962 return XEN_API_VM_POWER_STATE_CRASHED
2963 else:
2964 # We are either RUNNING or PAUSED
2965 if info['paused']:
2966 return XEN_API_VM_POWER_STATE_PAUSED
2967 else:
2968 return XEN_API_VM_POWER_STATE_RUNNING
2970 def _infoIsSet(self, name):
2971 return name in self.info and self.info[name] is not None
2973 def _checkName(self, name):
2974 """Check if a vm name is valid. Valid names contain alphabetic
2975 characters, digits, or characters in '_-.:/+'.
2976 The same name cannot be used for more than one vm at the same time.
2978 @param name: name
2979 @raise: VmError if invalid
2980 """
2981 from xen.xend import XendDomain
2983 if name is None or name == '':
2984 raise VmError('Missing VM Name')
2986 if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name):
2987 raise VmError('Invalid VM Name')
2989 dom = XendDomain.instance().domain_lookup_nr(name)
2990 if dom and dom.info['uuid'] != self.info['uuid']:
2991 raise VmError("VM name '%s' already exists%s" %
2992 (name,
2993 dom.domid is not None and
2994 (" as domain %s" % str(dom.domid)) or ""))
2997 def update(self, info = None, refresh = True, transaction = None):
2998 """Update with info from xc.domain_getinfo().
2999 """
3000 log.trace("XendDomainInfo.update(%s) on domain %s", info,
3001 str(self.domid))
3003 if not info:
3004 info = dom_get(self.domid)
3005 if not info:
3006 return
3008 if info["maxmem_kb"] < 0:
3009 info["maxmem_kb"] = XendNode.instance() \
3010 .physinfo_dict()['total_memory'] * 1024
3012 # make sure state is reset for info
3013 # TODO: we should eventually get rid of old_dom_states
3015 self.info.update_config(info)
3016 self._update_consoles(transaction)
3018 if refresh:
3019 self.refreshShutdown(info)
3021 log.trace("XendDomainInfo.update done on domain %s: %s",
3022 str(self.domid), self.info)
3024 def sxpr(self, ignore_store = False, legacy_only = True):
3025 result = self.info.to_sxp(domain = self,
3026 ignore_devices = ignore_store,
3027 legacy_only = legacy_only)
3029 return result
3031 # Xen API
3032 # ----------------------------------------------------------------
3034 def get_uuid(self):
3035 dom_uuid = self.info.get('uuid')
3036 if not dom_uuid: # if it doesn't exist, make one up
3037 dom_uuid = uuid.createString()
3038 self.info['uuid'] = dom_uuid
3039 return dom_uuid
3041 def get_memory_static_max(self):
3042 return self.info.get('memory_static_max', 0)
3043 def get_memory_static_min(self):
3044 return self.info.get('memory_static_min', 0)
3045 def get_memory_dynamic_max(self):
3046 return self.info.get('memory_dynamic_max', 0)
3047 def get_memory_dynamic_min(self):
3048 return self.info.get('memory_dynamic_min', 0)
3050 # only update memory-related config values if they maintain sanity
3051 def _safe_set_memory(self, key, newval):
3052 oldval = self.info.get(key, 0)
3053 try:
3054 self.info[key] = newval
3055 self.info._memory_sanity_check()
3056 except Exception, ex:
3057 self.info[key] = oldval
3058 raise
3060 def set_memory_static_max(self, val):
3061 self._safe_set_memory('memory_static_max', val)
3062 def set_memory_static_min(self, val):
3063 self._safe_set_memory('memory_static_min', val)
3064 def set_memory_dynamic_max(self, val):
3065 self._safe_set_memory('memory_dynamic_max', val)
3066 def set_memory_dynamic_min(self, val):
3067 self._safe_set_memory('memory_dynamic_min', val)
3069 def get_vcpus_params(self):
3070 if self.getDomid() is None:
3071 return self.info['vcpus_params']
3073 retval = xc.sched_credit_domain_get(self.getDomid())
3074 return retval
3075 def get_power_state(self):
3076 return XEN_API_VM_POWER_STATE[self._stateGet()]
3077 def get_platform(self):
3078 return self.info.get('platform', {})
3079 def get_pci_bus(self):
3080 return self.info.get('pci_bus', '')
3081 def get_tools_version(self):
3082 return self.info.get('tools_version', {})
3083 def get_metrics(self):
3084 return self.metrics.get_uuid();
3087 def get_security_label(self, xspol=None):
3088 import xen.util.xsm.xsm as security
3089 label = security.get_security_label(self, xspol)
3090 return label
3092 def set_security_label(self, seclab, old_seclab, xspol=None,
3093 xspol_old=None):
3094 """
3095 Set the security label of a domain from its old to
3096 a new value.
3097 @param seclab New security label formatted in the form
3098 <policy type>:<policy name>:<vm label>
3099 @param old_seclab The current security label that the
3100 VM must have.
3101 @param xspol An optional policy under which this
3102 update should be done. If not given,
3103 then the current active policy is used.
3104 @param xspol_old The old policy; only to be passed during
3105 the updating of a policy
3106 @return Returns return code, a string with errors from
3107 the hypervisor's operation, old label of the
3108 domain
3109 """
3110 rc = 0
3111 errors = ""
3112 old_label = ""
3113 new_ssidref = 0
3114 domid = self.getDomid()
3115 res_labels = None
3116 is_policy_update = (xspol_old != None)
3118 from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
3120 state = self._stateGet()
3121 # Relabel only HALTED or RUNNING or PAUSED domains
3122 if domid != 0 and \
3123 state not in \
3124 [ DOM_STATE_HALTED, DOM_STATE_RUNNING, DOM_STATE_PAUSED, \
3125 DOM_STATE_SUSPENDED ]:
3126 log.warn("Relabeling domain not possible in state '%s'" %
3127 DOM_STATES[state])
3128 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3130 # Remove security label. Works only for halted or suspended domains
3131 if not seclab or seclab == "":
3132 if state not in [ DOM_STATE_HALTED, DOM_STATE_SUSPENDED ]:
3133 return (-xsconstants.XSERR_VM_WRONG_STATE, "", "", 0)
3135 if self.info.has_key('security_label'):
3136 old_label = self.info['security_label']
3137 # Check label against expected one.
3138 if old_label != old_seclab:
3139 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3140 del self.info['security_label']
3141 xen.xend.XendDomain.instance().managed_config_save(self)
3142 return (xsconstants.XSERR_SUCCESS, "", "", 0)
3144 tmp = seclab.split(":")
3145 if len(tmp) != 3:
3146 return (-xsconstants.XSERR_BAD_LABEL_FORMAT, "", "", 0)
3147 typ, policy, label = tmp
3149 poladmin = XSPolicyAdminInstance()
3150 if not xspol:
3151 xspol = poladmin.get_policy_by_name(policy)
3153 try:
3154 xen.xend.XendDomain.instance().policy_lock.acquire_writer()
3156 if state in [ DOM_STATE_RUNNING, DOM_STATE_PAUSED ]:
3157 #if domain is running or paused try to relabel in hypervisor
3158 if not xspol:
3159 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3161 if typ != xspol.get_type_name() or \
3162 policy != xspol.get_name():
3163 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3165 if typ == xsconstants.ACM_POLICY_ID:
3166 new_ssidref = xspol.vmlabel_to_ssidref(label)
3167 if new_ssidref == xsconstants.INVALID_SSIDREF:
3168 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3170 # Check that all used resources are accessible under the
3171 # new label
3172 if not is_policy_update and \
3173 not security.resources_compatible_with_vmlabel(xspol,
3174 self, label):
3175 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3177 #Check label against expected one. Can only do this
3178 # if the policy hasn't changed underneath in the meantime
3179 if xspol_old == None:
3180 old_label = self.get_security_label()
3181 if old_label != old_seclab:
3182 log.info("old_label != old_seclab: %s != %s" %
3183 (old_label, old_seclab))
3184 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3186 # relabel domain in the hypervisor
3187 rc, errors = security.relabel_domains([[domid, new_ssidref]])
3188 log.info("rc from relabeling in HV: %d" % rc)
3189 else:
3190 return (-xsconstants.XSERR_POLICY_TYPE_UNSUPPORTED, "", "", 0)
3192 if rc == 0:
3193 # HALTED, RUNNING or PAUSED
3194 if domid == 0:
3195 if xspol:
3196 self.info['security_label'] = seclab
3197 ssidref = poladmin.set_domain0_bootlabel(xspol, label)
3198 else:
3199 return (-xsconstants.XSERR_POLICY_NOT_LOADED, "", "", 0)
3200 else:
3201 if self.info.has_key('security_label'):
3202 old_label = self.info['security_label']
3203 # Check label against expected one, unless wildcard
3204 if old_label != old_seclab:
3205 return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
3207 self.info['security_label'] = seclab
3209 try:
3210 xen.xend.XendDomain.instance().managed_config_save(self)
3211 except:
3212 pass
3213 return (rc, errors, old_label, new_ssidref)
3214 finally:
3215 xen.xend.XendDomain.instance().policy_lock.release()
3217 def get_on_shutdown(self):
3218 after_shutdown = self.info.get('actions_after_shutdown')
3219 if not after_shutdown or after_shutdown not in XEN_API_ON_NORMAL_EXIT:
3220 return XEN_API_ON_NORMAL_EXIT[-1]
3221 return after_shutdown
3223 def get_on_reboot(self):
3224 after_reboot = self.info.get('actions_after_reboot')
3225 if not after_reboot or after_reboot not in XEN_API_ON_NORMAL_EXIT:
3226 return XEN_API_ON_NORMAL_EXIT[-1]
3227 return after_reboot
3229 def get_on_suspend(self):
3230 # TODO: not supported
3231 after_suspend = self.info.get('actions_after_suspend')
3232 if not after_suspend or after_suspend not in XEN_API_ON_NORMAL_EXIT:
3233 return XEN_API_ON_NORMAL_EXIT[-1]
3234 return after_suspend
3236 def get_on_crash(self):
3237 after_crash = self.info.get('actions_after_crash')
3238 if not after_crash or after_crash not in \
3239 XEN_API_ON_CRASH_BEHAVIOUR + restart_modes:
3240 return XEN_API_ON_CRASH_BEHAVIOUR[0]
3241 return XEN_API_ON_CRASH_BEHAVIOUR_FILTER[after_crash]
3243 def get_dev_config_by_uuid(self, dev_class, dev_uuid):
3244 """ Get's a device configuration either from XendConfig or
3245 from the DevController.
3247 @param dev_class: device class, either, 'vbd' or 'vif'
3248 @param dev_uuid: device UUID
3250 @rtype: dictionary
3251 """
3252 dev_type, dev_config = self.info['devices'].get(dev_uuid, (None, None))
3254 # shortcut if the domain isn't started because
3255 # the devcontrollers will have no better information
3256 # than XendConfig.
3257 if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED,
3258 XEN_API_VM_POWER_STATE_SUSPENDED):
3259 if dev_config:
3260 return copy.deepcopy(dev_config)
3261 return None
3263 # instead of using dev_class, we use the dev_type
3264 # that is from XendConfig.
3265 controller = self.getDeviceController(dev_type)
3266 if not controller:
3267 return None
3269 all_configs = controller.getAllDeviceConfigurations()
3270 if not all_configs:
3271 return None
3273 updated_dev_config = copy.deepcopy(dev_config)
3274 for _devid, _devcfg in all_configs.items():
3275 if _devcfg.get('uuid') == dev_uuid:
3276 updated_dev_config.update(_devcfg)
3277 updated_dev_config['id'] = _devid
3278 return updated_dev_config
3280 return updated_dev_config
3282 def get_dev_xenapi_config(self, dev_class, dev_uuid):
3283 config = self.get_dev_config_by_uuid(dev_class, dev_uuid)
3284 if not config:
3285 return {}
3287 config['VM'] = self.get_uuid()
3289 if dev_class == 'vif':
3290 if not config.has_key('name'):
3291 config['name'] = config.get('vifname', '')
3292 if not config.has_key('MAC'):
3293 config['MAC'] = config.get('mac', '')
3294 if not config.has_key('type'):
3295 config['type'] = 'paravirtualised'
3296 if not config.has_key('device'):
3297 devid = config.get('id')
3298 if devid != None:
3299 config['device'] = 'eth%s' % devid
3300 else:
3301 config['device'] = ''
3303 if not config.has_key('network'):
3304 try:
3305 bridge = config.get('bridge', None)
3306 if bridge is None:
3307 from xen.util import Brctl
3308 if_to_br = dict([(i,b)
3309 for (b,ifs) in Brctl.get_state().items()
3310 for i in ifs])
3311 vifname = "vif%s.%s" % (self.getDomid(),
3312 config.get('id'))
3313 bridge = if_to_br.get(vifname, None)
3314 config['network'] = \
3315 XendNode.instance().bridge_to_network(
3316 config.get('bridge')).get_uuid()
3317 except Exception:
3318 log.exception('bridge_to_network')
3319 # Ignore this for now -- it may happen if the device
3320 # has been specified using the legacy methods, but at
3321 # some point we're going to have to figure out how to
3322 # handle that properly.
3324 config['MTU'] = 1500 # TODO
3326 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3327 xennode = XendNode.instance()
3328 rx_bps, tx_bps = xennode.get_vif_util(self.domid, devid)
3329 config['io_read_kbs'] = rx_bps/1024
3330 config['io_write_kbs'] = tx_bps/1024
3331 rx, tx = xennode.get_vif_stat(self.domid, devid)
3332 config['io_total_read_kbs'] = rx/1024
3333 config['io_total_write_kbs'] = tx/1024
3334 else:
3335 config['io_read_kbs'] = 0.0
3336 config['io_write_kbs'] = 0.0
3337 config['io_total_read_kbs'] = 0.0
3338 config['io_total_write_kbs'] = 0.0
3340 config['security_label'] = config.get('security_label', '')
3342 if dev_class == 'vbd':
3344 if self._stateGet() not in (XEN_API_VM_POWER_STATE_HALTED,):
3345 controller = self.getDeviceController(dev_class)
3346 devid, _1, _2 = controller.getDeviceDetails(config)
3347 xennode = XendNode.instance()
3348 rd_blkps, wr_blkps = xennode.get_vbd_util(self.domid, devid)
3349 config['io_read_kbs'] = rd_blkps
3350 config['io_write_kbs'] = wr_blkps
3351 else:
3352 config['io_read_kbs'] = 0.0
3353 config['io_write_kbs'] = 0.0
3355 config['VDI'] = config.get('VDI', '')
3356 config['device'] = config.get('dev', '')
3357 if ':' in config['device']:
3358 vbd_name, vbd_type = config['device'].split(':', 1)
3359 config['device'] = vbd_name
3360 if vbd_type == 'cdrom':
3361 config['type'] = XEN_API_VBD_TYPE[0]
3362 else:
3363 config['type'] = XEN_API_VBD_TYPE[1]
3365 config['driver'] = 'paravirtualised' # TODO
3366 config['image'] = config.get('uname', '')
3368 if config.get('mode', 'r') == 'r':
3369 config['mode'] = 'RO'
3370 else:
3371 config['mode'] = 'RW'
3373 if dev_class == 'vtpm':
3374 if not config.has_key('type'):
3375 config['type'] = 'paravirtualised' # TODO
3376 if not config.has_key('backend'):
3377 config['backend'] = "00000000-0000-0000-0000-000000000000"
3379 return config
3381 def get_dev_property(self, dev_class, dev_uuid, field):
3382 config = self.get_dev_xenapi_config(dev_class, dev_uuid)
3383 try:
3384 return config[field]
3385 except KeyError:
3386 raise XendError('Invalid property for device: %s' % field)
3388 def set_dev_property(self, dev_class, dev_uuid, field, value):
3389 self.info['devices'][dev_uuid][1][field] = value
3391 def get_vcpus_util(self):
3392 vcpu_util = {}
3393 xennode = XendNode.instance()
3394 if 'VCPUs_max' in self.info and self.domid != None:
3395 for i in range(0, self.info['VCPUs_max']):
3396 util = xennode.get_vcpu_util(self.domid, i)
3397 vcpu_util[str(i)] = util
3399 return vcpu_util
3401 def get_consoles(self):
3402 return self.info.get('console_refs', [])
3404 def get_vifs(self):
3405 return self.info.get('vif_refs', [])
3407 def get_vbds(self):
3408 return self.info.get('vbd_refs', [])
3410 def get_vtpms(self):
3411 return self.info.get('vtpm_refs', [])
3413 def get_dpcis(self):
3414 return XendDPCI.get_by_VM(self.info.get('uuid'))
3416 def get_dscsis(self):
3417 return XendDSCSI.get_by_VM(self.info.get('uuid'))
3419 def create_vbd(self, xenapi_vbd, vdi_image_path):
3420 """Create a VBD using a VDI from XendStorageRepository.
3422 @param xenapi_vbd: vbd struct from the Xen API
3423 @param vdi_image_path: VDI UUID
3424 @rtype: string
3425 @return: uuid of the device
3426 """
3427 xenapi_vbd['image'] = vdi_image_path
3428 if vdi_image_path.startswith('tap'):
3429 dev_uuid = self.info.device_add('tap', cfg_xenapi = xenapi_vbd)
3430 else:
3431 dev_uuid = self.info.device_add('vbd', cfg_xenapi = xenapi_vbd)
3433 if not dev_uuid:
3434 raise XendError('Failed to create device')
3436 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3437 XEN_API_VM_POWER_STATE_PAUSED):
3438 _, config = self.info['devices'][dev_uuid]
3440 if vdi_image_path.startswith('tap'):
3441 dev_control = self.getDeviceController('tap')
3442 else:
3443 dev_control = self.getDeviceController('vbd')
3445 try:
3446 devid = dev_control.createDevice(config)
3447 dev_control.waitForDevice(devid)
3448 self.info.device_update(dev_uuid,
3449 cfg_xenapi = {'devid': devid})
3450 except Exception, exn:
3451 log.exception(exn)
3452 del self.info['devices'][dev_uuid]
3453 self.info['vbd_refs'].remove(dev_uuid)
3454 raise
3456 return dev_uuid
3458 def create_phantom_vbd_with_vdi(self, xenapi_vbd, vdi_image_path):
3459 """Create a VBD using a VDI from XendStorageRepository.
3461 @param xenapi_vbd: vbd struct from the Xen API
3462 @param vdi_image_path: VDI UUID
3463 @rtype: string
3464 @return: uuid of the device
3465 """
3466 xenapi_vbd['image'] = vdi_image_path
3467 dev_uuid = self.info.phantom_device_add('tap', cfg_xenapi = xenapi_vbd)
3468 if not dev_uuid:
3469 raise XendError('Failed to create device')
3471 if self._stateGet() == XEN_API_VM_POWER_STATE_RUNNING:
3472 _, config = self.info['devices'][dev_uuid]
3473 config['devid'] = self.getDeviceController('tap').createDevice(config)
3475 return config['devid']
3477 def create_vif(self, xenapi_vif):
3478 """Create VIF device from the passed struct in Xen API format.
3480 @param xenapi_vif: Xen API VIF Struct.
3481 @rtype: string
3482 @return: UUID
3483 """
3484 dev_uuid = self.info.device_add('vif', cfg_xenapi = xenapi_vif)
3485 if not dev_uuid:
3486 raise XendError('Failed to create device')
3488 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3489 XEN_API_VM_POWER_STATE_PAUSED):
3491 _, config = self.info['devices'][dev_uuid]
3492 dev_control = self.getDeviceController('vif')
3494 try:
3495 devid = dev_control.createDevice(config)
3496 dev_control.waitForDevice(devid)
3497 self.info.device_update(dev_uuid,
3498 cfg_xenapi = {'devid': devid})
3499 except Exception, exn:
3500 log.exception(exn)
3501 del self.info['devices'][dev_uuid]
3502 self.info['vif_refs'].remove(dev_uuid)
3503 raise
3505 return dev_uuid
3507 def create_vtpm(self, xenapi_vtpm):
3508 """Create a VTPM device from the passed struct in Xen API format.
3510 @return: uuid of the device
3511 @rtype: string
3512 """
3514 if self._stateGet() not in (DOM_STATE_HALTED,):
3515 raise VmError("Can only add vTPM to a halted domain.")
3516 if self.get_vtpms() != []:
3517 raise VmError('Domain already has a vTPM.')
3518 dev_uuid = self.info.device_add('vtpm', cfg_xenapi = xenapi_vtpm)
3519 if not dev_uuid:
3520 raise XendError('Failed to create device')
3522 return dev_uuid
3524 def create_console(self, xenapi_console):
3525 """ Create a console device from a Xen API struct.
3527 @return: uuid of device
3528 @rtype: string
3529 """
3530 if self._stateGet() not in (DOM_STATE_HALTED,):
3531 raise VmError("Can only add console to a halted domain.")
3533 dev_uuid = self.info.device_add('console', cfg_xenapi = xenapi_console)
3534 if not dev_uuid:
3535 raise XendError('Failed to create device')
3537 return dev_uuid
3539 def set_console_other_config(self, console_uuid, other_config):
3540 self.info.console_update(console_uuid, 'other_config', other_config)
3542 def create_dpci(self, xenapi_pci):
3543 """Create pci device from the passed struct in Xen API format.
3545 @param xenapi_pci: DPCI struct from Xen API
3546 @rtype: bool
3547 #@rtype: string
3548 @return: True if successfully created device
3549 #@return: UUID
3550 """
3552 dpci_uuid = uuid.createString()
3554 dpci_opts = []
3555 opts_dict = xenapi_pci.get('options')
3556 for k in opts_dict.keys():
3557 dpci_opts.append([k, opts_dict[k]])
3559 # Convert xenapi to sxp
3560 ppci = XendAPIStore.get(xenapi_pci.get('PPCI'), 'PPCI')
3562 target_pci_sxp = \
3563 ['pci',
3564 ['dev',
3565 ['domain', '0x%02x' % ppci.get_domain()],
3566 ['bus', '0x%02x' % ppci.get_bus()],
3567 ['slot', '0x%02x' % ppci.get_slot()],
3568 ['func', '0x%1x' % ppci.get_func()],
3569 ['vslt', '0x%02x' % xenapi_pci.get('hotplug_slot')],
3570 ['opts', dpci_opts],
3571 ['uuid', dpci_uuid]
3572 ],
3573 ['state', 'Initialising']
3576 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3578 old_pci_sxp = self._getDeviceInfo_pci(0)
3580 if old_pci_sxp is None:
3581 dev_uuid = self.info.device_add('pci', cfg_sxp = target_pci_sxp)
3582 if not dev_uuid:
3583 raise XendError('Failed to create device')
3585 else:
3586 new_pci_sxp = ['pci']
3587 for existing_dev in sxp.children(old_pci_sxp, 'dev'):
3588 new_pci_sxp.append(existing_dev)
3589 new_pci_sxp.append(sxp.child0(target_pci_sxp, 'dev'))
3591 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3592 self.info.device_update(dev_uuid, new_pci_sxp)
3594 xen.xend.XendDomain.instance().managed_config_save(self)
3596 else:
3597 try:
3598 self.device_configure(target_pci_sxp)
3600 except Exception, exn:
3601 raise XendError('Failed to create device')
3603 return dpci_uuid
3605 def create_dscsi(self, xenapi_dscsi):
3606 """Create scsi device from the passed struct in Xen API format.
3608 @param xenapi_dscsi: DSCSI struct from Xen API
3609 @rtype: string
3610 @return: UUID
3611 """
3613 dscsi_uuid = uuid.createString()
3615 # Convert xenapi to sxp
3616 pscsi = XendAPIStore.get(xenapi_dscsi.get('PSCSI'), 'PSCSI')
3617 devid = int(xenapi_dscsi.get('virtual_HCTL').split(':')[0])
3618 target_vscsi_sxp = \
3619 ['vscsi',
3620 ['dev',
3621 ['devid', devid],
3622 ['p-devname', pscsi.get_dev_name()],
3623 ['p-dev', pscsi.get_physical_HCTL()],
3624 ['v-dev', xenapi_dscsi.get('virtual_HCTL')],
3625 ['state', xenbusState['Initialising']],
3626 ['uuid', dscsi_uuid]
3630 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3632 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid, None)
3634 if cur_vscsi_sxp is None:
3635 dev_uuid = self.info.device_add('vscsi', cfg_sxp = target_vscsi_sxp)
3636 if not dev_uuid:
3637 raise XendError('Failed to create device')
3639 else:
3640 new_vscsi_sxp = ['vscsi']
3641 for existing_dev in sxp.children(cur_vscsi_sxp, 'dev'):
3642 new_vscsi_sxp.append(existing_dev)
3643 new_vscsi_sxp.append(sxp.child0(target_vscsi_sxp, 'dev'))
3645 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3646 self.info.device_update(dev_uuid, new_vscsi_sxp)
3648 xen.xend.XendDomain.instance().managed_config_save(self)
3650 else:
3651 try:
3652 self.device_configure(target_vscsi_sxp)
3654 except Exception, exn:
3655 raise XendError('Failed to create device')
3657 return dscsi_uuid
3660 def destroy_device_by_uuid(self, dev_type, dev_uuid):
3661 if dev_uuid not in self.info['devices']:
3662 raise XendError('Device does not exist')
3664 try:
3665 if self._stateGet() in (XEN_API_VM_POWER_STATE_RUNNING,
3666 XEN_API_VM_POWER_STATE_PAUSED):
3667 _, config = self.info['devices'][dev_uuid]
3668 devid = config.get('devid')
3669 if devid != None:
3670 self.getDeviceController(dev_type).destroyDevice(devid, force = False)
3671 else:
3672 raise XendError('Unable to get devid for device: %s:%s' %
3673 (dev_type, dev_uuid))
3674 finally:
3675 del self.info['devices'][dev_uuid]
3676 self.info['%s_refs' % dev_type].remove(dev_uuid)
3678 def destroy_vbd(self, dev_uuid):
3679 self.destroy_device_by_uuid('vbd', dev_uuid)
3681 def destroy_vif(self, dev_uuid):
3682 self.destroy_device_by_uuid('vif', dev_uuid)
3684 def destroy_vtpm(self, dev_uuid):
3685 self.destroy_device_by_uuid('vtpm', dev_uuid)
3687 def destroy_dpci(self, dev_uuid):
3689 dpci = XendAPIStore.get(dev_uuid, 'DPCI')
3690 ppci = XendAPIStore.get(dpci.get_PPCI(), 'PPCI')
3692 old_pci_sxp = self._getDeviceInfo_pci(0)
3693 dev_uuid = sxp.child_value(old_pci_sxp, 'uuid')
3694 target_dev = None
3695 new_pci_sxp = ['pci']
3696 for dev in sxp.children(old_pci_sxp, 'dev'):
3697 domain = int(sxp.child_value(dev, 'domain'), 16)
3698 bus = int(sxp.child_value(dev, 'bus'), 16)
3699 slot = int(sxp.child_value(dev, 'slot'), 16)
3700 func = int(sxp.child_value(dev, 'func'), 16)
3701 name = "%04x:%02x:%02x.%01x" % (domain, bus, slot, func)
3702 if ppci.get_name() == name:
3703 target_dev = dev
3704 else:
3705 new_pci_sxp.append(dev)
3707 if target_dev is None:
3708 raise XendError('Failed to destroy device')
3710 target_pci_sxp = ['pci', target_dev, ['state', 'Closing']]
3712 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3714 self.info.device_update(dev_uuid, new_pci_sxp)
3715 if len(sxp.children(new_pci_sxp, 'dev')) == 0:
3716 del self.info['devices'][dev_uuid]
3717 xen.xend.XendDomain.instance().managed_config_save(self)
3719 else:
3720 try:
3721 self.device_configure(target_pci_sxp)
3723 except Exception, exn:
3724 raise XendError('Failed to destroy device')
3726 def destroy_dscsi(self, dev_uuid):
3727 dscsi = XendAPIStore.get(dev_uuid, 'DSCSI')
3728 devid = dscsi.get_virtual_host()
3729 vHCTL = dscsi.get_virtual_HCTL()
3730 cur_vscsi_sxp = self._getDeviceInfo_vscsi(devid, None)
3731 dev_uuid = sxp.child_value(cur_vscsi_sxp, 'uuid')
3733 target_dev = None
3734 new_vscsi_sxp = ['vscsi']
3735 for dev in sxp.children(cur_vscsi_sxp, 'dev'):
3736 if vHCTL == sxp.child_value(dev, 'v-dev'):
3737 target_dev = dev
3738 else:
3739 new_vscsi_sxp.append(dev)
3741 if target_dev is None:
3742 raise XendError('Failed to destroy device')
3744 target_dev.append(['state', xenbusState['Closing']])
3745 target_vscsi_sxp = ['vscsi', target_dev]
3747 if self._stateGet() != XEN_API_VM_POWER_STATE_RUNNING:
3749 self.info.device_update(dev_uuid, new_vscsi_sxp)
3750 if len(sxp.children(new_vscsi_sxp, 'dev')) == 0:
3751 del self.info['devices'][dev_uuid]
3752 xen.xend.XendDomain.instance().managed_config_save(self)
3754 else:
3755 try:
3756 self.device_configure(target_vscsi_sxp)
3758 except Exception, exn:
3759 raise XendError('Failed to destroy device')
3761 def destroy_xapi_instances(self):
3762 """Destroy Xen-API instances stored in XendAPIStore.
3763 """
3764 # Xen-API classes based on XendBase have their instances stored
3765 # in XendAPIStore. Cleanup these instances here, if they are supposed
3766 # to be destroyed when the parent domain is dead.
3768 # Most of the virtual devices (vif, vbd, vfb, etc) are not based on
3769 # XendBase and there's no need to remove them from XendAPIStore.
3771 from xen.xend import XendDomain
3772 if XendDomain.instance().is_valid_vm(self.info.get('uuid')):
3773 # domain still exists.
3774 return
3776 # Destroy the VMMetrics instance.
3777 if XendAPIStore.get(self.metrics.get_uuid(), self.metrics.getClass()) \
3778 is not None:
3779 self.metrics.destroy()
3781 # Destroy DPCI instances.
3782 for dpci_uuid in XendDPCI.get_by_VM(self.info.get('uuid')):
3783 XendAPIStore.deregister(dpci_uuid, "DPCI")
3785 # Destroy DSCSI instances.
3786 for dscsi_uuid in XendDSCSI.get_by_VM(self.info.get('uuid')):
3787 XendAPIStore.deregister(dscsi_uuid, "DSCSI")
3789 def has_device(self, dev_class, dev_uuid):
3790 return (dev_uuid in self.info['%s_refs' % dev_class.lower()])
3792 def __str__(self):
3793 return '<domain id=%s name=%s memory=%s state=%s>' % \
3794 (str(self.domid), self.info['name_label'],
3795 str(self.info['memory_dynamic_max']), DOM_STATES[self._stateGet()])
3797 __repr__ = __str__