ia64/xen-unstable

view tools/python/xen/xend/image.py @ 19350:b4f3a52c359e

xend: fix regression in c/s 19330

attached patch fixes a regression in c/s 19330 which
prevents to start guests on a Linux Dom0.

Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Mar 12 15:40:52 2009 +0000 (2009-03-12)
parents b3b6aee082d6
children 33270c9a3d2f
line source
1 #============================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
20 import os, os.path, string
21 import re
22 import math
23 import time
24 import signal
25 import thread
26 import fcntl
27 import sys
28 import errno
29 import glob
30 import traceback
31 import platform
33 import xen.lowlevel.xc
34 from xen.xend.XendConstants import *
35 from xen.xend.XendError import VmError, XendError, HVMRequired
36 from xen.xend.XendLogging import log
37 from xen.xend.XendOptions import instance as xenopts
38 from xen.xend.xenstore.xstransact import xstransact
39 from xen.xend.xenstore.xswatch import xswatch
40 from xen.xend import arch
41 from xen.xend import XendOptions
42 from xen.util import oshelp
43 from xen.util import utils
44 from xen.xend import osdep
46 xc = xen.lowlevel.xc.xc()
48 MAX_GUEST_CMDLINE = 1024
50 sentinel_path_prefix = '/var/run/xend/dm-'
51 sentinel_fifos_inuse = { }
53 def cleanup_stale_sentinel_fifos():
54 for path in glob.glob(sentinel_path_prefix + '*.fifo'):
55 if path in sentinel_fifos_inuse: continue
56 try: os.unlink(path)
57 except OSError, e:
58 log.warning('could not delete stale fifo %s: %s',
59 path, utils.exception_string(e))
61 def create(vm, vmConfig):
62 """Create an image handler for a vm.
64 @return ImageHandler instance
65 """
66 return findImageHandlerClass(vmConfig)(vm, vmConfig)
69 class ImageHandler:
70 """Abstract base class for image handlers.
72 createImage() is called to configure and build the domain from its
73 kernel image and ramdisk etc.
75 The method buildDomain() is used to build the domain, and must be
76 defined in a subclass. Usually this is the only method that needs
77 defining in a subclass.
79 The method createDeviceModel() is called to create the domain device
80 model.
82 The method destroyDeviceModel() is called to reap the device model
83 """
85 ostype = None
88 def __init__(self, vm, vmConfig):
89 self.vm = vm
91 self.bootloader = False
92 self.kernel = None
93 self.ramdisk = None
94 self.cmdline = None
96 self.configure(vmConfig)
98 def configure(self, vmConfig):
99 """Config actions common to all unix-like domains."""
100 if '_temp_using_bootloader' in vmConfig:
101 self.bootloader = True
102 self.kernel = vmConfig['_temp_kernel']
103 self.cmdline = vmConfig['_temp_args']
104 self.ramdisk = vmConfig['_temp_ramdisk']
105 else:
106 self.kernel = vmConfig['PV_kernel']
107 self.cmdline = vmConfig['PV_args']
108 self.ramdisk = vmConfig['PV_ramdisk']
109 self.vm.storeVm(("image/ostype", self.ostype),
110 ("image/kernel", self.kernel),
111 ("image/cmdline", self.cmdline),
112 ("image/ramdisk", self.ramdisk))
113 self.vm.permissionsVm("image/cmdline", { 'dom': self.vm.getDomid(), 'read': True } )
115 self.device_model = vmConfig['platform'].get('device_model')
117 self.display = vmConfig['platform'].get('display')
118 self.xauthority = vmConfig['platform'].get('xauthority')
119 self.vncconsole = int(vmConfig['platform'].get('vncconsole', 0))
120 self.dmargs = self.parseDeviceModelArgs(vmConfig)
121 self.pid = None
122 rtc_timeoffset = vmConfig['platform'].get('rtc_timeoffset')
123 if rtc_timeoffset is not None:
124 xc.domain_set_time_offset(self.vm.getDomid(), int(rtc_timeoffset))
126 self.cpuid = None
127 self.cpuid_check = None
128 if 'cpuid' in vmConfig:
129 self.cpuid = vmConfig['cpuid'];
130 if 'cpuid_check' in vmConfig:
131 self.cpuid_check = vmConfig['cpuid_check']
133 def cleanupBootloading(self):
134 if self.bootloader:
135 self.unlink(self.kernel)
136 self.unlink(self.ramdisk)
139 def unlink(self, f):
140 if not f: return
141 try:
142 os.unlink(f)
143 except OSError, ex:
144 log.warning("error removing bootloader file '%s': %s", f, ex)
147 def createImage(self):
148 """Entry point to create domain memory image.
149 Override in subclass if needed.
150 """
151 return self.createDomain()
154 def createDomain(self):
155 """Build the domain boot image.
156 """
157 # Set params and call buildDomain().
159 if self.kernel and not os.path.isfile(self.kernel):
160 raise VmError('Kernel image does not exist: %s' % self.kernel)
161 if self.ramdisk and not os.path.isfile(self.ramdisk):
162 raise VmError('Kernel ramdisk does not exist: %s' % self.ramdisk)
163 if len(self.cmdline) >= MAX_GUEST_CMDLINE:
164 log.warning('kernel cmdline too long, domain %d',
165 self.vm.getDomid())
167 log.info("buildDomain os=%s dom=%d vcpus=%d", self.ostype,
168 self.vm.getDomid(), self.vm.getVCpuCount())
170 result = self.buildDomain()
172 if isinstance(result, dict):
173 return result
174 else:
175 raise VmError('Building domain failed: ostype=%s dom=%d err=%s'
176 % (self.ostype, self.vm.getDomid(), str(result)))
178 def getRequiredAvailableMemory(self, mem_kb):
179 """@param mem_kb The configured maxmem or memory, in KiB.
180 @return The corresponding required amount of memory for the domain,
181 also in KiB. This is normally the given mem_kb, but architecture- or
182 image-specific code may override this to add headroom where
183 necessary."""
184 return mem_kb
186 def getRequiredInitialReservation(self):
187 """@param mem_kb The configured memory, in KiB.
188 @return The corresponding required amount of memory to be free, also
189 in KiB. This is normally the same as getRequiredAvailableMemory, but
190 architecture- or image-specific code may override this to
191 add headroom where necessary."""
192 return self.getRequiredAvailableMemory(self.vm.getMemoryTarget())
194 def getRequiredMaximumReservation(self):
195 """@param mem_kb The maximum possible memory, in KiB.
196 @return The corresponding required amount of memory to be free, also
197 in KiB. This is normally the same as getRequiredAvailableMemory, but
198 architecture- or image-specific code may override this to
199 add headroom where necessary."""
200 return self.getRequiredAvailableMemory(self.vm.getMemoryMaximum())
202 def getRequiredShadowMemory(self, shadow_mem_kb, maxmem_kb):
203 """@param shadow_mem_kb The configured shadow memory, in KiB.
204 @param maxmem_kb The configured maxmem, in KiB.
205 @return The corresponding required amount of shadow memory, also in
206 KiB."""
207 # PV domains don't need any shadow memory
208 return 0
210 def buildDomain(self):
211 """Build the domain. Define in subclass."""
212 raise NotImplementedError()
214 def prepareEnvironment(self):
215 """Prepare the environment for the execution of the domain. This
216 method is called before any devices are set up."""
218 domid = self.vm.getDomid()
220 # Delete left-over pipes
221 try:
222 os.unlink('/var/run/tap/qemu-read-%d' % domid)
223 os.unlink('/var/run/tap/qemu-write-%d' % domid)
224 except:
225 pass
227 # No device model, don't create pipes
228 if self.device_model is None:
229 return
231 if platform.system() != 'SunOS':
232 # If we use a device model, the pipes for communication between
233 # blktapctrl and ioemu must be present before the devices are
234 # created (blktapctrl must access them for new block devices)
236 try:
237 os.makedirs('/var/run/tap', 0755)
238 except:
239 pass
241 try:
242 os.mkfifo('/var/run/tap/qemu-read-%d' % domid, 0600)
243 os.mkfifo('/var/run/tap/qemu-write-%d' % domid, 0600)
244 except OSError, e:
245 log.warn('Could not create blktap pipes for domain %d' % domid)
246 log.exception(e)
247 pass
250 # Return a list of cmd line args to the device models based on the
251 # xm config file
252 def parseDeviceModelArgs(self, vmConfig):
253 ret = ["-domain-name", str(self.vm.info['name_label'])]
255 xen_extended_power_mgmt = int(vmConfig['platform'].get(
256 'xen_extended_power_mgmt', 0))
257 if xen_extended_power_mgmt != 0:
258 xstransact.Store("/local/domain/0/device-model/%i"
259 % self.vm.getDomid(),
260 ('xen_extended_power_mgmt',
261 xen_extended_power_mgmt))
263 # Find RFB console device, and if it exists, make QEMU enable
264 # the VNC console.
265 if int(vmConfig['platform'].get('nographic', 0)) != 0:
266 # skip vnc init if nographic is set
267 ret.append('-nographic')
268 return ret
270 vram = str(vmConfig['platform'].get('videoram',4))
271 ret.append('-videoram')
272 ret.append(vram)
274 vnc_config = {}
275 has_vnc = int(vmConfig['platform'].get('vnc', 0)) != 0
276 has_sdl = int(vmConfig['platform'].get('sdl', 0)) != 0
277 opengl = 1
278 keymap = vmConfig['platform'].get("keymap")
279 for dev_uuid in vmConfig['console_refs']:
280 dev_type, dev_info = vmConfig['devices'][dev_uuid]
281 if dev_type == 'vfb':
282 if 'keymap' in dev_info:
283 keymap = dev_info.get('keymap',{})
284 if int(dev_info.get('vnc', 0)) != 0 :
285 has_vnc = True
286 if int(dev_info.get('sdl', 0)) != 0 :
287 has_sdl = True
288 if has_sdl:
289 self.display = dev_info.get('display', {})
290 self.xauthority = dev_info.get('xauthority', {})
291 opengl = int(dev_info.get('opengl', opengl))
292 if has_vnc:
293 vnc_config = dev_info.get('other_config', {})
294 break
296 if keymap:
297 ret.append("-k")
298 ret.append(keymap)
300 if has_vnc:
301 if not vnc_config:
302 for key in ('vncunused', 'vnclisten', 'vncdisplay',
303 'vncpasswd'):
304 if key in vmConfig['platform']:
305 vnc_config[key] = vmConfig['platform'][key]
306 if vnc_config.has_key("vncpasswd"):
307 passwd = vnc_config["vncpasswd"]
308 else:
309 passwd = XendOptions.instance().get_vncpasswd_default()
310 vncopts = ""
311 if passwd:
312 self.vm.storeVm("vncpasswd", passwd)
313 self.vm.permissionsVm("vncpasswd", { 'dom': self.vm.getDomid(), 'read': True } )
314 vncopts = vncopts + ",password"
315 log.debug("Stored a VNC password for vfb access")
316 else:
317 log.debug("No VNC passwd configured for vfb access")
319 if XendOptions.instance().get_vnc_tls():
320 vncx509certdir = XendOptions.instance().get_vnc_x509_cert_dir()
321 vncx509verify = XendOptions.instance().get_vnc_x509_verify()
323 if not os.path.exists(vncx509certdir):
324 raise VmError("VNC x509 certificate dir %s does not exist" % vncx509certdir)
326 if vncx509verify:
327 vncopts = vncopts + ",tls,x509verify=%s" % vncx509certdir
328 else:
329 vncopts = vncopts + ",tls,x509=%s" % vncx509certdir
332 vnclisten = vnc_config.get('vnclisten',
333 XendOptions.instance().get_vnclisten_address())
334 vncdisplay = int(vnc_config.get('vncdisplay', 0))
335 ret.append('-vnc')
336 ret.append("%s:%s%s" % (vnclisten, vncdisplay, vncopts))
338 if int(vnc_config.get('vncunused', 1)) != 0:
339 ret.append('-vncunused')
341 if has_sdl:
342 ret.append('-sdl')
343 if int(vmConfig['platform'].get('opengl', opengl)) != 1 :
344 ret.append('-disable-opengl')
346 if not has_sdl and not has_vnc :
347 ret.append('-nographic')
349 if int(vmConfig['platform'].get('monitor', 0)) != 0:
350 ret = ret + ['-monitor', 'vc']
351 return ret
353 def getDeviceModelArgs(self, restore = False):
354 args = [self.device_model]
355 args = args + ([ "-d", "%d" % self.vm.getDomid() ])
356 args = args + self.dmargs
357 return args
359 def _openSentinel(self, sentinel_path_fifo):
360 self.sentinel_fifo = file(sentinel_path_fifo, 'r')
361 self.sentinel_lock = thread.allocate_lock()
362 oshelp.fcntl_setfd_cloexec(self.sentinel_fifo, True)
363 sentinel_fifos_inuse[sentinel_path_fifo] = 1
364 self.sentinel_path_fifo = sentinel_path_fifo
366 def createDeviceModel(self, restore = False):
367 if self.device_model is None:
368 return
369 if self.pid:
370 return
371 # Execute device model.
372 #todo: Error handling
373 args = self.getDeviceModelArgs(restore)
374 env = dict(os.environ)
375 if self.display:
376 env['DISPLAY'] = self.display
377 if self.xauthority:
378 env['XAUTHORITY'] = self.xauthority
379 unique_id = "%i-%i" % (self.vm.getDomid(), time.time())
380 sentinel_path = sentinel_path_prefix + unique_id
381 sentinel_path_fifo = sentinel_path + '.fifo'
382 os.mkfifo(sentinel_path_fifo, 0600)
383 sentinel_write = file(sentinel_path_fifo, 'r+')
384 self._openSentinel(sentinel_path_fifo)
385 self.vm.storeDom("image/device-model-fifo", sentinel_path_fifo)
386 xstransact.Mkdir("/local/domain/0/device-model/%i" % self.vm.getDomid())
387 xstransact.SetPermissions("/local/domain/0/device-model/%i" % self.vm.getDomid(),
388 { 'dom': self.vm.getDomid(), 'read': True, 'write': True })
389 log.info("spawning device models: %s %s", self.device_model, args)
390 # keep track of pid and spawned options to kill it later
392 self.logfile = "/var/log/xen/qemu-dm-%s.log" % str(self.vm.info['name_label'])
394 # rotate log
395 logfile_mode = os.O_WRONLY|os.O_CREAT|os.O_APPEND
396 logrotate_count = XendOptions.instance().get_qemu_dm_logrotate_count()
397 if logrotate_count > 0:
398 logfile_mode |= os.O_TRUNC
399 if os.path.exists("%s.%d" % (self.logfile, logrotate_count)):
400 os.unlink("%s.%d" % (self.logfile, logrotate_count))
401 for n in range(logrotate_count - 1, 0, -1):
402 if os.path.exists("%s.%d" % (self.logfile, n)):
403 os.rename("%s.%d" % (self.logfile, n),
404 "%s.%d" % (self.logfile, (n + 1)))
405 if os.path.exists(self.logfile):
406 os.rename(self.logfile, self.logfile + ".1")
408 null = os.open("/dev/null", os.O_RDONLY)
409 logfd = os.open(self.logfile, logfile_mode)
411 sys.stderr.flush()
412 contract = osdep.prefork("%s:%d" %
413 (self.vm.getName(), self.vm.getDomid()))
414 pid = os.fork()
415 if pid == 0: #child
416 try:
417 osdep.postfork(contract)
418 os.dup2(null, 0)
419 os.dup2(logfd, 1)
420 os.dup2(logfd, 2)
421 os.close(null)
422 os.close(logfd)
423 self.sentinel_fifo.close()
424 try:
425 os.execve(self.device_model, args, env)
426 except Exception, e:
427 print >>sys.stderr, (
428 'failed to set up fds or execute dm %s: %s' %
429 (self.device_model, utils.exception_string(e)))
430 os._exit(126)
431 except:
432 os._exit(127)
433 else:
434 osdep.postfork(contract, abandon=True)
435 self.pid = pid
436 os.close(null)
437 os.close(logfd)
438 sentinel_write.close()
439 self.vm.storeDom("image/device-model-pid", self.pid)
440 log.info("device model pid: %d", self.pid)
441 # we would very much prefer not to have a thread here and instead
442 # have a callback but sadly we don't have Twisted in xend
443 self.sentinel_thread = thread.start_new_thread(self._sentinel_watch,())
445 def signalDeviceModel(self, cmd, ret, par = None):
446 if self.device_model is None:
447 return
448 # Signal the device model to for action
449 if cmd is '' or ret is '':
450 raise VmError('need valid command and result when signal device model')
452 orig_state = xstransact.Read("/local/domain/0/device-model/%i/state"
453 % self.vm.getDomid())
455 if par is not None:
456 xstransact.Store("/local/domain/0/device-model/%i"
457 % self.vm.getDomid(), ('parameter', par))
459 xstransact.Store("/local/domain/0/device-model/%i"
460 % self.vm.getDomid(), ('command', cmd))
461 # Wait for confirmation. Could do this with a watch but we'd
462 # still end up spinning here waiting for the watch to fire.
463 state = ''
464 count = 0
465 while state != ret:
466 state = xstransact.Read("/local/domain/0/device-model/%i/state"
467 % self.vm.getDomid())
468 time.sleep(0.1)
469 count += 1
470 if count > 100:
471 raise VmError('Timed out waiting for device model action')
473 #resotre orig state
474 xstransact.Store("/local/domain/0/device-model/%i"
475 % self.vm.getDomid(), ('state', orig_state))
476 log.info("signalDeviceModel:restore dm state to %s", orig_state)
478 def saveDeviceModel(self):
479 # Signal the device model to pause itself and save its state
480 self.signalDeviceModel('save', 'paused')
482 def resumeDeviceModel(self):
483 if self.device_model is None:
484 return
485 # Signal the device model to resume activity after pausing to save.
486 xstransact.Store("/local/domain/0/device-model/%i"
487 % self.vm.getDomid(), ('command', 'continue'))
489 def _dmfailed(self, message):
490 log.warning("domain %s: %s", self.vm.getName(), message)
491 xc.domain_shutdown(self.vm.getDomid(), DOMAIN_CRASH)
493 def recreate(self):
494 if self.device_model is None:
495 return
496 name = self.vm.getName()
497 sentinel_path_fifo = self.vm.readDom('image/device-model-fifo')
498 fifo_fd = -1
499 log.debug("rediscovering %s", sentinel_path_fifo)
500 if sentinel_path_fifo is None:
501 log.debug("%s device model no sentinel, cannot rediscover", name)
502 else:
503 try:
504 # We open it O_WRONLY because that fails ENXIO if no-one
505 # has it open for reading (see SuSv3). The dm process got
506 # a read/write descriptor from our earlier invocation.
507 fifo_fd = os.open(sentinel_path_fifo, os.O_WRONLY|os.O_NONBLOCK)
508 except OSError, e:
509 if e.errno == errno.ENXIO:
510 self._dmfailed("%s device model no longer running"%name)
511 elif e.errno == errno.ENOENT:
512 log.debug("%s device model sentinel %s absent!",
513 name, sentinel_path_fifo)
514 else:
515 raise
516 if fifo_fd >= 0:
517 self._openSentinel(sentinel_path_fifo)
518 os.close(fifo_fd)
519 self.pid = self.vm.gatherDom(('image/device-model-pid', int))
520 log.debug("%s device model rediscovered, pid %s sentinel fifo %s",
521 name, self.pid, sentinel_path_fifo)
522 self.sentinel_thread = thread.start_new_thread(self._sentinel_watch,())
524 def _sentinel_watch(self):
525 log.info("waiting for sentinel_fifo")
526 try: self.sentinel_fifo.read(1)
527 except OSError, e: pass
528 self.sentinel_lock.acquire()
529 try:
530 if self.pid:
531 (p,st) = os.waitpid(self.pid, os.WNOHANG)
532 if p == self.pid:
533 message = oshelp.waitstatus_description(st)
534 else:
535 # obviously it is malfunctioning, kill it now
536 try:
537 os.kill(self.pid, signal.SIGKILL)
538 message = "malfunctioning (closed sentinel), killed"
539 except:
540 message = "malfunctioning or died ?"
541 message = "pid %d: %s" % (self.pid, message)
542 else:
543 message = "no longer running"
544 except Exception, e:
545 message = "waitpid failed: %s" % utils.exception_string(e)
546 message = "device model failure: %s" % message
547 try: message += "; see %s " % self.logfile
548 except: pass
549 self._dmfailed(message)
550 self.pid = None
551 self.sentinel_lock.release()
553 def destroyDeviceModel(self):
554 if self.device_model is None:
555 return
556 if self.pid:
557 self.sentinel_lock.acquire()
558 try:
559 try:
560 os.kill(self.pid, signal.SIGHUP)
561 except OSError, exn:
562 log.exception(exn)
563 # Try to reap the child every 100ms for 10s. Then SIGKILL it.
564 for i in xrange(100):
565 try:
566 (p, rv) = os.waitpid(self.pid, os.WNOHANG)
567 if p == self.pid:
568 break
569 except OSError:
570 # This is expected if Xend has been restarted within
571 # the life of this domain. In this case, we can kill
572 # the process, but we can't wait for it because it's
573 # not our child. We continue this loop, and after it is
574 # terminated make really sure the process is going away
575 # (SIGKILL).
576 pass
577 time.sleep(0.1)
578 else:
579 log.warning("DeviceModel %d took more than 10s "
580 "to terminate: sending SIGKILL" % self.pid)
581 try:
582 os.kill(self.pid, signal.SIGKILL)
583 os.waitpid(self.pid, 0)
584 except OSError:
585 # This happens if the process doesn't exist.
586 pass
587 state = xstransact.Remove("/local/domain/0/device-model/%i"
588 % self.vm.getDomid())
589 finally:
590 self.pid = None
591 self.sentinel_lock.release()
593 try:
594 os.unlink('/var/run/tap/qemu-read-%d' % self.vm.getDomid())
595 os.unlink('/var/run/tap/qemu-write-%d' % self.vm.getDomid())
596 except:
597 pass
598 try:
599 del sentinel_fifos_inuse[self.sentinel_path_fifo]
600 os.unlink(self.sentinel_path_fifo)
601 except:
602 pass
604 def setCpuid(self):
605 xc.domain_set_policy_cpuid(self.vm.getDomid())
607 if self.cpuid is not None:
608 cpuid = self.cpuid
609 transformed = {}
610 for sinput, regs in cpuid.iteritems():
611 inputs = sinput.split(',')
612 input = long(inputs[0])
613 sub_input = None
614 if len(inputs) == 2:
615 sub_input = long(inputs[1])
616 t = xc.domain_set_cpuid(self.vm.getDomid(),
617 input, sub_input, regs)
618 transformed[sinput] = t
619 self.cpuid = transformed
621 if self.cpuid_check is not None:
622 cpuid_check = self.cpuid_check
623 transformed = {}
624 for sinput, regs_check in cpuid_check.iteritems():
625 inputs = sinput.split(',')
626 input = long(inputs[0])
627 sub_input = None
628 if len(inputs) == 2:
629 sub_input = long(inputs[1])
630 t = xc.domain_check_cpuid(input, sub_input, regs_check)
631 transformed[sinput] = t
632 self.cpuid_check = transformed
636 class LinuxImageHandler(ImageHandler):
638 ostype = "linux"
639 flags = 0
640 vhpt = 0
642 def configure(self, vmConfig):
643 ImageHandler.configure(self, vmConfig)
644 self.vramsize = int(vmConfig['platform'].get('videoram',4)) * 1024
645 self.is_stubdom = (self.kernel.find('stubdom') >= 0)
647 def buildDomain(self):
648 store_evtchn = self.vm.getStorePort()
649 console_evtchn = self.vm.getConsolePort()
651 mem_mb = self.getRequiredInitialReservation() / 1024
653 log.debug("domid = %d", self.vm.getDomid())
654 log.debug("memsize = %d", mem_mb)
655 log.debug("image = %s", self.kernel)
656 log.debug("store_evtchn = %d", store_evtchn)
657 log.debug("console_evtchn = %d", console_evtchn)
658 log.debug("cmdline = %s", self.cmdline)
659 log.debug("ramdisk = %s", self.ramdisk)
660 log.debug("vcpus = %d", self.vm.getVCpuCount())
661 log.debug("features = %s", self.vm.getFeatures())
662 log.debug("flags = %d", self.flags)
663 if arch.type == "ia64":
664 log.debug("vhpt = %d", self.vhpt)
666 return xc.linux_build(domid = self.vm.getDomid(),
667 memsize = mem_mb,
668 image = self.kernel,
669 store_evtchn = store_evtchn,
670 console_evtchn = console_evtchn,
671 cmdline = self.cmdline,
672 ramdisk = self.ramdisk,
673 features = self.vm.getFeatures(),
674 flags = self.flags,
675 vhpt = self.vhpt)
677 def getRequiredAvailableMemory(self, mem_kb):
678 if self.is_stubdom :
679 mem_kb += self.vramsize
680 return mem_kb
682 def getRequiredInitialReservation(self):
683 return self.vm.getMemoryTarget()
685 def getRequiredMaximumReservation(self):
686 return self.vm.getMemoryMaximum()
688 def parseDeviceModelArgs(self, vmConfig):
689 ret = ImageHandler.parseDeviceModelArgs(self, vmConfig)
690 # Equivalent to old xenconsoled behaviour. Should make
691 # it configurable in future
692 ret = ret + ["-serial", "pty"]
693 return ret
695 def getDeviceModelArgs(self, restore = False):
696 args = ImageHandler.getDeviceModelArgs(self, restore)
697 args = args + ([ "-M", "xenpv"])
698 return args
701 class HVMImageHandler(ImageHandler):
703 ostype = "hvm"
705 def __init__(self, vm, vmConfig):
706 ImageHandler.__init__(self, vm, vmConfig)
707 self.shutdownWatch = None
708 self.rebootFeatureWatch = None
710 def configure(self, vmConfig):
711 ImageHandler.configure(self, vmConfig)
713 self.loader = vmConfig['platform'].get('loader')
715 info = xc.xeninfo()
716 if 'hvm' not in info['xen_caps']:
717 raise HVMRequired()
719 rtc_timeoffset = vmConfig['platform'].get('rtc_timeoffset')
721 if not self.display :
722 self.display = ''
723 self.vm.storeVm(("image/dmargs", " ".join(self.dmargs)),
724 ("image/device-model", self.device_model),
725 ("image/display", self.display))
726 self.vm.permissionsVm("image/dmargs", { 'dom': self.vm.getDomid(), 'read': True } )
727 self.vm.storeVm(("rtc/timeoffset", rtc_timeoffset))
728 self.vm.permissionsVm("rtc/timeoffset", { 'dom': self.vm.getDomid(), 'read': True } )
730 self.apic = int(vmConfig['platform'].get('apic', 0))
731 self.acpi = int(vmConfig['platform'].get('acpi', 0))
732 self.guest_os_type = vmConfig['platform'].get('guest_os_type')
735 # Return a list of cmd line args to the device models based on the
736 # xm config file
737 def parseDeviceModelArgs(self, vmConfig):
738 ret = ImageHandler.parseDeviceModelArgs(self, vmConfig)
739 ret = ret + ['-vcpus', str(self.vm.getVCpuCount())]
741 if self.kernel:
742 log.debug("kernel = %s", self.kernel)
743 ret = ret + ['-kernel', self.kernel]
744 if self.ramdisk:
745 log.debug("ramdisk = %s", self.ramdisk)
746 ret = ret + ['-initrd', self.ramdisk]
747 if self.cmdline:
748 log.debug("cmdline = %s", self.cmdline)
749 ret = ret + ['-append', self.cmdline]
752 dmargs = [ 'boot', 'fda', 'fdb', 'soundhw',
753 'localtime', 'serial', 'stdvga', 'isa',
754 'acpi', 'usb', 'usbdevice' ]
756 for a in dmargs:
757 v = vmConfig['platform'].get(a)
759 # python doesn't allow '-' in variable names
760 if a == 'stdvga': a = 'std-vga'
761 if a == 'keymap': a = 'k'
763 # Handle booleans gracefully
764 if a in ['localtime', 'std-vga', 'isa', 'usb', 'acpi']:
765 try:
766 if v != None: v = int(v)
767 if v: ret.append("-%s" % a)
768 except (ValueError, TypeError):
769 pass # if we can't convert it to a sane type, ignore it
770 else:
771 if v:
772 ret.append("-%s" % a)
773 ret.append("%s" % v)
775 if a in ['fda', 'fdb']:
776 if v:
777 if not os.path.isabs(v):
778 raise VmError("Floppy file %s does not exist." % v)
779 log.debug("args: %s, val: %s" % (a,v))
781 # Handle disk/network related options
782 mac = None
783 nics = 0
785 for devuuid in vmConfig['vbd_refs']:
786 devinfo = vmConfig['devices'][devuuid][1]
787 uname = devinfo.get('uname')
788 if uname is not None and 'file:' in uname:
789 (_, vbdparam) = string.split(uname, ':', 1)
790 if not os.path.isfile(vbdparam):
791 raise VmError('Disk image does not exist: %s' %
792 vbdparam)
794 for devuuid in vmConfig['vif_refs']:
795 devinfo = vmConfig['devices'][devuuid][1]
796 dtype = devinfo.get('type', 'ioemu')
797 if dtype != 'ioemu':
798 continue
799 nics += 1
800 mac = devinfo.get('mac')
801 if mac is None:
802 raise VmError("MAC address not specified or generated.")
803 bridge = devinfo.get('bridge', 'xenbr0')
804 model = devinfo.get('model', 'rtl8139')
805 ret.append("-net")
806 ret.append("nic,vlan=%d,macaddr=%s,model=%s" %
807 (nics, mac, model))
808 ret.append("-net")
809 ret.append("tap,vlan=%d,ifname=tap%d.%d,bridge=%s" %
810 (nics, self.vm.getDomid(), nics-1, bridge))
812 if nics == 0:
813 ret.append("-net")
814 ret.append("none")
816 return ret
818 def getDeviceModelArgs(self, restore = False):
819 args = ImageHandler.getDeviceModelArgs(self, restore)
820 args = args + ([ "-M", "xenfv"])
821 if restore:
822 args = args + ([ "-loadvm", "/var/lib/xen/qemu-save.%d" %
823 self.vm.getDomid() ])
824 return args
826 def buildDomain(self):
827 store_evtchn = self.vm.getStorePort()
829 memmax_mb = self.getRequiredMaximumReservation() / 1024
830 mem_mb = self.getRequiredInitialReservation() / 1024
832 log.debug("domid = %d", self.vm.getDomid())
833 log.debug("image = %s", self.loader)
834 log.debug("store_evtchn = %d", store_evtchn)
835 log.debug("memsize = %d", memmax_mb)
836 log.debug("target = %d", mem_mb)
837 log.debug("vcpus = %d", self.vm.getVCpuCount())
838 log.debug("acpi = %d", self.acpi)
839 log.debug("apic = %d", self.apic)
841 rc = xc.hvm_build(domid = self.vm.getDomid(),
842 image = self.loader,
843 memsize = memmax_mb,
844 target = mem_mb,
845 vcpus = self.vm.getVCpuCount(),
846 acpi = self.acpi,
847 apic = self.apic)
848 rc['notes'] = { 'SUSPEND_CANCEL': 1 }
850 rc['store_mfn'] = xc.hvm_get_param(self.vm.getDomid(),
851 HVM_PARAM_STORE_PFN)
852 xc.hvm_set_param(self.vm.getDomid(), HVM_PARAM_STORE_EVTCHN,
853 store_evtchn)
855 return rc
858 class IA64_HVM_ImageHandler(HVMImageHandler):
860 def configure(self, vmConfig):
861 HVMImageHandler.configure(self, vmConfig)
862 self.vhpt = int(vmConfig['platform'].get('vhpt', 0))
863 self.vramsize = int(vmConfig['platform'].get('videoram',4)) * 1024
865 def buildDomain(self):
866 xc.nvram_init(self.vm.getName(), self.vm.getDomid())
867 xc.hvm_set_param(self.vm.getDomid(), HVM_PARAM_VHPT_SIZE, self.vhpt)
868 if self.guest_os_type is not None:
869 xc.set_os_type(self.guest_os_type.lower(), self.vm.getDomid())
870 return HVMImageHandler.buildDomain(self)
872 def getRequiredAvailableMemory(self, mem_kb):
873 page_kb = 16
874 # ROM size for guest firmware, io page, xenstore page
875 # buffer io page, buffer pio page and memmap info page
876 extra_pages = 1024 + 5
877 mem_kb += extra_pages * page_kb
878 mem_kb += self.vramsize
879 return mem_kb
881 def getRequiredInitialReservation(self):
882 return self.vm.getMemoryTarget()
884 def getRequiredShadowMemory(self, shadow_mem_kb, maxmem_kb):
885 # Explicit shadow memory is not a concept
886 return 0
888 def getDeviceModelArgs(self, restore = False):
889 args = HVMImageHandler.getDeviceModelArgs(self, restore)
890 args = args + ([ "-m", "%s" %
891 (self.getRequiredInitialReservation() / 1024) ])
892 return args
894 def setCpuid(self):
895 # Guest CPUID configuration is not implemented yet.
896 return
898 class IA64_Linux_ImageHandler(LinuxImageHandler):
900 def configure(self, vmConfig):
901 LinuxImageHandler.configure(self, vmConfig)
902 self.vhpt = int(vmConfig['platform'].get('vhpt', 0))
904 def setCpuid(self):
905 # Guest CPUID configuration is not implemented yet.
906 return
908 class X86_HVM_ImageHandler(HVMImageHandler):
910 def configure(self, vmConfig):
911 HVMImageHandler.configure(self, vmConfig)
912 self.pae = int(vmConfig['platform'].get('pae', 0))
913 self.vramsize = int(vmConfig['platform'].get('videoram',4)) * 1024
915 def buildDomain(self):
916 xc.hvm_set_param(self.vm.getDomid(), HVM_PARAM_PAE_ENABLED, self.pae)
917 rc = HVMImageHandler.buildDomain(self)
918 self.setCpuid()
919 return rc
921 def getRequiredAvailableMemory(self, mem_kb):
922 return mem_kb + self.vramsize
924 def getRequiredInitialReservation(self):
925 return self.vm.getMemoryTarget()
927 def getRequiredMaximumReservation(self):
928 return self.vm.getMemoryMaximum()
930 def getRequiredShadowMemory(self, shadow_mem_kb, maxmem_kb):
931 # 256 pages (1MB) per vcpu,
932 # plus 1 page per MiB of RAM for the P2M map,
933 # plus 1 page per MiB of RAM to shadow the resident processes.
934 # This is higher than the minimum that Xen would allocate if no value
935 # were given (but the Xen minimum is for safety, not performance).
936 return max(4 * (256 * self.vm.getVCpuCount() + 2 * (maxmem_kb / 1024)),
937 shadow_mem_kb)
940 class X86_Linux_ImageHandler(LinuxImageHandler):
942 def buildDomain(self):
943 # set physical mapping limit
944 # add an 8MB slack to balance backend allocations.
945 mem_kb = self.getRequiredMaximumReservation() + (8 * 1024)
946 xc.domain_set_memmap_limit(self.vm.getDomid(), mem_kb)
947 rc = LinuxImageHandler.buildDomain(self)
948 self.setCpuid()
949 return rc
951 _handlers = {
952 "ia64": {
953 "linux": IA64_Linux_ImageHandler,
954 "hvm": IA64_HVM_ImageHandler,
955 },
956 "x86": {
957 "linux": X86_Linux_ImageHandler,
958 "hvm": X86_HVM_ImageHandler,
959 },
960 }
962 def findImageHandlerClass(image):
963 """Find the image handler class for an image config.
965 @param image config
966 @return ImageHandler subclass or None
967 """
968 image_type = image.image_type()
969 try:
970 return _handlers[arch.type][image_type]
971 except KeyError:
972 raise VmError('unknown image type: ' + image_type)