ia64/xen-unstable

view tools/python/xen/xend/image.py @ 18266:0afe29785305

xend: vncconsole config option mustn't be a string
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Aug 05 13:45:25 2008 +0100 (2008-08-05)
parents 18b41609a980
children fa98f03a6bcd
line source
1 #============================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
20 import os, os.path, string
21 import re
22 import math
23 import time
24 import signal
25 import thread
26 import fcntl
27 import sys
28 import errno
29 import glob
30 import traceback
32 import xen.lowlevel.xc
33 from xen.xend.XendConstants import *
34 from xen.xend.XendError import VmError, XendError, HVMRequired
35 from xen.xend.XendLogging import log
36 from xen.xend.XendOptions import instance as xenopts
37 from xen.xend.xenstore.xstransact import xstransact
38 from xen.xend.xenstore.xswatch import xswatch
39 from xen.xend import arch
40 from xen.xend import XendOptions
41 from xen.util import oshelp
42 from xen.util import utils
44 xc = xen.lowlevel.xc.xc()
46 MAX_GUEST_CMDLINE = 1024
48 sentinel_path_prefix = '/var/run/xend/dm-'
49 sentinel_fifos_inuse = { }
51 def cleanup_stale_sentinel_fifos():
52 for path in glob.glob(sentinel_path_prefix + '*.fifo'):
53 if path in sentinel_fifos_inuse: continue
54 try: os.unlink(path)
55 except OSError, e:
56 log.warning('could not delete stale fifo %s: %s',
57 path, utils.exception_string(e))
59 def create(vm, vmConfig):
60 """Create an image handler for a vm.
62 @return ImageHandler instance
63 """
64 return findImageHandlerClass(vmConfig)(vm, vmConfig)
67 class ImageHandler:
68 """Abstract base class for image handlers.
70 createImage() is called to configure and build the domain from its
71 kernel image and ramdisk etc.
73 The method buildDomain() is used to build the domain, and must be
74 defined in a subclass. Usually this is the only method that needs
75 defining in a subclass.
77 The method createDeviceModel() is called to create the domain device
78 model.
80 The method destroyDeviceModel() is called to reap the device model
81 """
83 ostype = None
86 def __init__(self, vm, vmConfig):
87 self.vm = vm
89 self.bootloader = False
90 self.kernel = None
91 self.ramdisk = None
92 self.cmdline = None
94 self.configure(vmConfig)
96 def configure(self, vmConfig):
97 """Config actions common to all unix-like domains."""
98 if '_temp_using_bootloader' in vmConfig:
99 self.bootloader = True
100 self.kernel = vmConfig['_temp_kernel']
101 self.cmdline = vmConfig['_temp_args']
102 self.ramdisk = vmConfig['_temp_ramdisk']
103 else:
104 self.kernel = vmConfig['PV_kernel']
105 self.cmdline = vmConfig['PV_args']
106 self.ramdisk = vmConfig['PV_ramdisk']
107 self.vm.storeVm(("image/ostype", self.ostype),
108 ("image/kernel", self.kernel),
109 ("image/cmdline", self.cmdline),
110 ("image/ramdisk", self.ramdisk))
111 self.vm.permissionsVm("image/cmdline", { 'dom': self.vm.getDomid(), 'read': True } )
113 self.device_model = vmConfig['platform'].get('device_model')
115 self.display = vmConfig['platform'].get('display')
116 self.xauthority = vmConfig['platform'].get('xauthority')
117 self.vncconsole = int(vmConfig['platform'].get('vncconsole', 0))
118 self.dmargs = self.parseDeviceModelArgs(vmConfig)
119 self.pid = None
120 rtc_timeoffset = vmConfig['platform'].get('rtc_timeoffset')
121 if rtc_timeoffset is not None:
122 xc.domain_set_time_offset(self.vm.getDomid(), int(rtc_timeoffset))
124 self.cpuid = None
125 self.cpuid_check = None
126 if 'cpuid' in vmConfig:
127 self.cpuid = vmConfig['cpuid'];
128 if 'cpuid_check' in vmConfig:
129 self.cpuid_check = vmConfig['cpuid_check']
131 def cleanupBootloading(self):
132 if self.bootloader:
133 self.unlink(self.kernel)
134 self.unlink(self.ramdisk)
137 def unlink(self, f):
138 if not f: return
139 try:
140 os.unlink(f)
141 except OSError, ex:
142 log.warning("error removing bootloader file '%s': %s", f, ex)
145 def createImage(self):
146 """Entry point to create domain memory image.
147 Override in subclass if needed.
148 """
149 return self.createDomain()
152 def createDomain(self):
153 """Build the domain boot image.
154 """
155 # Set params and call buildDomain().
157 if self.kernel and not os.path.isfile(self.kernel):
158 raise VmError('Kernel image does not exist: %s' % self.kernel)
159 if self.ramdisk and not os.path.isfile(self.ramdisk):
160 raise VmError('Kernel ramdisk does not exist: %s' % self.ramdisk)
161 if len(self.cmdline) >= MAX_GUEST_CMDLINE:
162 log.warning('kernel cmdline too long, domain %d',
163 self.vm.getDomid())
165 log.info("buildDomain os=%s dom=%d vcpus=%d", self.ostype,
166 self.vm.getDomid(), self.vm.getVCpuCount())
168 result = self.buildDomain()
170 if isinstance(result, dict):
171 return result
172 else:
173 raise VmError('Building domain failed: ostype=%s dom=%d err=%s'
174 % (self.ostype, self.vm.getDomid(), str(result)))
176 def getRequiredAvailableMemory(self, mem_kb):
177 """@param mem_kb The configured maxmem or memory, in KiB.
178 @return The corresponding required amount of memory for the domain,
179 also in KiB. This is normally the given mem_kb, but architecture- or
180 image-specific code may override this to add headroom where
181 necessary."""
182 return mem_kb
184 def getRequiredInitialReservation(self):
185 """@param mem_kb The configured memory, in KiB.
186 @return The corresponding required amount of memory to be free, also
187 in KiB. This is normally the same as getRequiredAvailableMemory, but
188 architecture- or image-specific code may override this to
189 add headroom where necessary."""
190 return self.getRequiredAvailableMemory(self.vm.getMemoryTarget())
192 def getRequiredMaximumReservation(self):
193 """@param mem_kb The maximum possible memory, in KiB.
194 @return The corresponding required amount of memory to be free, also
195 in KiB. This is normally the same as getRequiredAvailableMemory, but
196 architecture- or image-specific code may override this to
197 add headroom where necessary."""
198 return self.getRequiredAvailableMemory(self.vm.getMemoryMaximum())
200 def getRequiredShadowMemory(self, shadow_mem_kb, maxmem_kb):
201 """@param shadow_mem_kb The configured shadow memory, in KiB.
202 @param maxmem_kb The configured maxmem, in KiB.
203 @return The corresponding required amount of shadow memory, also in
204 KiB."""
205 # PV domains don't need any shadow memory
206 return 0
208 def buildDomain(self):
209 """Build the domain. Define in subclass."""
210 raise NotImplementedError()
212 def prepareEnvironment(self):
213 """Prepare the environment for the execution of the domain. This
214 method is called before any devices are set up."""
216 domid = self.vm.getDomid()
218 # Delete left-over pipes
219 try:
220 os.unlink('/var/run/tap/qemu-read-%d' % domid)
221 os.unlink('/var/run/tap/qemu-write-%d' % domid)
222 except:
223 pass
225 # No device model, don't create pipes
226 if self.device_model is None:
227 return
229 # If we use a device model, the pipes for communication between
230 # blktapctrl and ioemu must be present before the devices are
231 # created (blktapctrl must access them for new block devices)
233 # mkdir throws an exception if the path already exists
234 try:
235 os.mkdir('/var/run/tap', 0755)
236 except:
237 pass
239 try:
240 os.mkfifo('/var/run/tap/qemu-read-%d' % domid, 0600)
241 os.mkfifo('/var/run/tap/qemu-write-%d' % domid, 0600)
242 except OSError, e:
243 log.warn('Could not create blktap pipes for domain %d' % domid)
244 log.exception(e)
245 pass
248 # Return a list of cmd line args to the device models based on the
249 # xm config file
250 def parseDeviceModelArgs(self, vmConfig):
251 ret = ["-domain-name", str(self.vm.info['name_label'])]
253 # Find RFB console device, and if it exists, make QEMU enable
254 # the VNC console.
255 if int(vmConfig['platform'].get('nographic', 0)) != 0:
256 # skip vnc init if nographic is set
257 ret.append('-nographic')
258 return ret
260 vnc_config = {}
261 has_vnc = int(vmConfig['platform'].get('vnc', 0)) != 0
262 has_sdl = int(vmConfig['platform'].get('sdl', 0)) != 0
263 opengl = 1
264 keymap = vmConfig['platform'].get("keymap")
265 for dev_uuid in vmConfig['console_refs']:
266 dev_type, dev_info = vmConfig['devices'][dev_uuid]
267 if dev_type == 'vfb':
268 if 'keymap' in dev_info:
269 keymap = dev_info.get('keymap',{})
270 vfb_type = dev_info.get('type', {})
271 if vfb_type == 'sdl':
272 self.display = dev_info.get('display', {})
273 self.xauthority = dev_info.get('xauthority', {})
274 opengl = int(dev_info.get('opengl', opengl))
275 has_sdl = True
276 else:
277 vnc_config = dev_info.get('other_config', {})
278 has_vnc = True
279 break
281 if keymap:
282 ret.append("-k")
283 ret.append(keymap)
285 if has_vnc:
286 if not vnc_config:
287 for key in ('vncunused', 'vnclisten', 'vncdisplay',
288 'vncpasswd'):
289 if key in vmConfig['platform']:
290 vnc_config[key] = vmConfig['platform'][key]
291 if vnc_config.has_key("vncpasswd"):
292 passwd = vnc_config["vncpasswd"]
293 else:
294 passwd = XendOptions.instance().get_vncpasswd_default()
295 vncopts = ""
296 if passwd:
297 self.vm.storeVm("vncpasswd", passwd)
298 self.vm.permissionsVm("vncpasswd", { 'dom': self.vm.getDomid(), 'read': True } )
299 vncopts = vncopts + ",password"
300 log.debug("Stored a VNC password for vfb access")
301 else:
302 log.debug("No VNC passwd configured for vfb access")
304 if XendOptions.instance().get_vnc_tls():
305 vncx509certdir = XendOptions.instance().get_vnc_x509_cert_dir()
306 vncx509verify = XendOptions.instance().get_vnc_x509_verify()
308 if not os.path.exists(vncx509certdir):
309 raise VmError("VNC x509 certificate dir %s does not exist" % vncx509certdir)
311 if vncx509verify:
312 vncopts = vncopts + ",tls,x509verify=%s" % vncx509certdir
313 else:
314 vncopts = vncopts + ",tls,x509=%s" % vncx509certdir
317 vnclisten = vnc_config.get('vnclisten',
318 XendOptions.instance().get_vnclisten_address())
319 vncdisplay = int(vnc_config.get('vncdisplay', 0))
320 ret.append('-vnc')
321 ret.append("%s:%s%s" % (vnclisten, vncdisplay, vncopts))
323 if int(vnc_config.get('vncunused', 1)) != 0:
324 ret.append('-vncunused')
326 elif has_sdl:
327 # SDL is default in QEMU.
328 if int(vmConfig['platform'].get('opengl', opengl)) != 1 :
329 ret.append('-disable-opengl')
330 else:
331 ret.append('-nographic')
333 if int(vmConfig['platform'].get('monitor', 0)) != 0:
334 ret = ret + ['-monitor', 'vc']
335 return ret
337 def getDeviceModelArgs(self, restore = False):
338 args = [self.device_model]
339 args = args + ([ "-d", "%d" % self.vm.getDomid() ])
340 args = args + self.dmargs
341 return args
343 def _openSentinel(self, sentinel_path_fifo):
344 self.sentinel_fifo = file(sentinel_path_fifo, 'r')
345 self.sentinel_lock = thread.allocate_lock()
346 oshelp.fcntl_setfd_cloexec(self.sentinel_fifo, True)
347 sentinel_fifos_inuse[sentinel_path_fifo] = 1
348 self.sentinel_path_fifo = sentinel_path_fifo
350 def createDeviceModel(self, restore = False):
351 if self.device_model is None:
352 return
353 if self.pid:
354 return
355 # Execute device model.
356 #todo: Error handling
357 args = self.getDeviceModelArgs(restore)
358 env = dict(os.environ)
359 if self.display:
360 env['DISPLAY'] = self.display
361 if self.xauthority:
362 env['XAUTHORITY'] = self.xauthority
363 if self.vncconsole:
364 args = args + ([ "-vncviewer" ])
365 unique_id = "%i-%i" % (self.vm.getDomid(), time.time())
366 sentinel_path = sentinel_path_prefix + unique_id
367 sentinel_path_fifo = sentinel_path + '.fifo'
368 os.mkfifo(sentinel_path_fifo, 0600)
369 sentinel_write = file(sentinel_path_fifo, 'r+')
370 self._openSentinel(sentinel_path_fifo)
371 self.vm.storeDom("image/device-model-fifo", sentinel_path_fifo)
372 xstransact.Mkdir("/local/domain/0/device-model/%i" % self.vm.getDomid())
373 xstransact.SetPermissions("/local/domain/0/device-model/%i" % self.vm.getDomid(),
374 { 'dom': self.vm.getDomid(), 'read': True, 'write': True })
375 log.info("spawning device models: %s %s", self.device_model, args)
376 # keep track of pid and spawned options to kill it later
378 self.logfile = "/var/log/xen/qemu-dm-%s.log" % str(self.vm.info['name_label'])
380 # rotate log
381 logfile_mode = os.O_WRONLY|os.O_CREAT|os.O_APPEND
382 logrotate_count = XendOptions.instance().get_qemu_dm_logrotate_count()
383 if logrotate_count > 0:
384 logfile_mode |= os.O_TRUNC
385 if os.path.exists("%s.%d" % (self.logfile, logrotate_count)):
386 os.unlink("%s.%d" % (self.logfile, logrotate_count))
387 for n in range(logrotate_count - 1, 0, -1):
388 if os.path.exists("%s.%d" % (self.logfile, n)):
389 os.rename("%s.%d" % (self.logfile, n),
390 "%s.%d" % (self.logfile, (n + 1)))
391 if os.path.exists(self.logfile):
392 os.rename(self.logfile, self.logfile + ".1")
394 null = os.open("/dev/null", os.O_RDONLY)
395 logfd = os.open(self.logfile, logfile_mode)
397 sys.stderr.flush()
398 pid = os.fork()
399 if pid == 0: #child
400 try:
401 os.dup2(null, 0)
402 os.dup2(logfd, 1)
403 os.dup2(logfd, 2)
404 os.close(null)
405 os.close(logfd)
406 self.sentinel_fifo.close()
407 try:
408 os.execve(self.device_model, args, env)
409 except Exception, e:
410 print >>sys.stderr, (
411 'failed to set up fds or execute dm %s: %s' %
412 (self.device_model, utils.exception_string(e)))
413 os._exit(126)
414 except:
415 os._exit(127)
416 else:
417 self.pid = pid
418 os.close(null)
419 os.close(logfd)
420 sentinel_write.close()
421 self.vm.storeDom("image/device-model-pid", self.pid)
422 log.info("device model pid: %d", self.pid)
423 # we would very much prefer not to have a thread here and instead
424 # have a callback but sadly we don't have Twisted in xend
425 self.sentinel_thread = thread.start_new_thread(self._sentinel_watch,())
427 def signalDeviceModel(self, cmd, ret, par = None):
428 if self.device_model is None:
429 return
430 # Signal the device model to for action
431 if cmd is '' or ret is '':
432 raise VmError('need valid command and result when signal device model')
434 orig_state = xstransact.Read("/local/domain/0/device-model/%i/state"
435 % self.vm.getDomid())
437 if par is not None:
438 xstransact.Store("/local/domain/0/device-model/%i"
439 % self.vm.getDomid(), ('parameter', par))
441 xstransact.Store("/local/domain/0/device-model/%i"
442 % self.vm.getDomid(), ('command', cmd))
443 # Wait for confirmation. Could do this with a watch but we'd
444 # still end up spinning here waiting for the watch to fire.
445 state = ''
446 count = 0
447 while state != ret:
448 state = xstransact.Read("/local/domain/0/device-model/%i/state"
449 % self.vm.getDomid())
450 time.sleep(0.1)
451 count += 1
452 if count > 100:
453 raise VmError('Timed out waiting for device model action')
455 #resotre orig state
456 xstransact.Store("/local/domain/0/device-model/%i"
457 % self.vm.getDomid(), ('state', orig_state))
458 log.info("signalDeviceModel:restore dm state to %s", orig_state)
460 def saveDeviceModel(self):
461 # Signal the device model to pause itself and save its state
462 self.signalDeviceModel('save', 'paused')
464 def resumeDeviceModel(self):
465 if self.device_model is None:
466 return
467 # Signal the device model to resume activity after pausing to save.
468 xstransact.Store("/local/domain/0/device-model/%i"
469 % self.vm.getDomid(), ('command', 'continue'))
471 def _dmfailed(self, message):
472 log.warning("domain %s: %s", self.vm.getName(), message)
473 # ideally we would like to forcibly crash the domain with
474 # something like
475 # xc.domain_shutdown(self.vm.getDomid(), DOMAIN_CRASH)
476 # but this can easily lead to very rapid restart loops against
477 # which we currently have no protection
479 def recreate(self):
480 if self.device_model is None:
481 return
482 name = self.vm.getName()
483 sentinel_path_fifo = self.vm.readDom('image/device-model-fifo')
484 fifo_fd = -1
485 log.debug("rediscovering %s", sentinel_path_fifo)
486 if sentinel_path_fifo is None:
487 log.debug("%s device model no sentinel, cannot rediscover", name)
488 else:
489 try:
490 # We open it O_WRONLY because that fails ENXIO if no-one
491 # has it open for reading (see SuSv3). The dm process got
492 # a read/write descriptor from our earlier invocation.
493 fifo_fd = os.open(sentinel_path_fifo, os.O_WRONLY|os.O_NONBLOCK)
494 except OSError, e:
495 if e.errno == errno.ENXIO:
496 self._dmfailed("%s device model no longer running"%name)
497 elif e.errno == errno.ENOENT:
498 log.debug("%s device model sentinel %s absent!",
499 name, sentinel_path_fifo)
500 else:
501 raise
502 if fifo_fd >= 0:
503 self._openSentinel(sentinel_path_fifo)
504 os.close(fifo_fd)
505 self.pid = self.vm.gatherDom(('image/device-model-pid', int))
506 log.debug("%s device model rediscovered, pid %s sentinel fifo %s",
507 name, self.pid, sentinel_path_fifo)
508 self.sentinel_thread = thread.start_new_thread(self._sentinel_watch,())
510 def _sentinel_watch(self):
511 log.info("waiting for sentinel_fifo")
512 try: self.sentinel_fifo.read(1)
513 except OSError, e: pass
514 self.sentinel_lock.acquire()
515 try:
516 if self.pid:
517 (p,st) = os.waitpid(self.pid, os.WNOHANG)
518 if p == self.pid:
519 message = oshelp.waitstatus_description(st)
520 else:
521 # obviously it is malfunctioning, kill it now
522 try:
523 os.kill(self.pid, signal.SIGKILL)
524 message = "malfunctioning (closed sentinel), killed"
525 except:
526 message = "malfunctioning or died ?"
527 message = "pid %d: %s" % (self.pid, message)
528 else:
529 message = "no longer running"
530 except Exception, e:
531 message = "waitpid failed: %s" % utils.exception_string(e)
532 message = "device model failure: %s" % message
533 try: message += "; see %s " % self.logfile
534 except: pass
535 self._dmfailed(message)
536 self.pid = None
537 self.sentinel_lock.release()
539 def destroyDeviceModel(self):
540 if self.device_model is None:
541 return
542 if self.pid:
543 self.sentinel_lock.acquire()
544 try:
545 try:
546 os.kill(self.pid, signal.SIGHUP)
547 except OSError, exn:
548 log.exception(exn)
549 try:
550 # Try to reap the child every 100ms for 10s. Then SIGKILL it.
551 for i in xrange(100):
552 (p, rv) = os.waitpid(self.pid, os.WNOHANG)
553 if p == self.pid:
554 break
555 time.sleep(0.1)
556 else:
557 log.warning("DeviceModel %d took more than 10s "
558 "to terminate: sending SIGKILL" % self.pid)
559 os.kill(self.pid, signal.SIGKILL)
560 os.waitpid(self.pid, 0)
561 except OSError, exn:
562 # This is expected if Xend has been restarted within the
563 # life of this domain. In this case, we can kill the process,
564 # but we can't wait for it because it's not our child.
565 # We just make really sure it's going away (SIGKILL) first.
566 os.kill(self.pid, signal.SIGKILL)
567 state = xstransact.Remove("/local/domain/0/device-model/%i"
568 % self.vm.getDomid())
569 finally:
570 self.pid = None
571 self.sentinel_lock.release()
573 try:
574 os.unlink('/var/run/tap/qemu-read-%d' % self.vm.getDomid())
575 os.unlink('/var/run/tap/qemu-write-%d' % self.vm.getDomid())
576 except:
577 pass
578 try:
579 del sentinel_fifos_inuse[self.sentinel_path_fifo]
580 os.unlink(self.sentinel_path_fifo)
581 except:
582 pass
584 def setCpuid(self):
585 xc.domain_set_policy_cpuid(self.vm.getDomid())
587 if self.cpuid is not None:
588 cpuid = self.cpuid
589 transformed = {}
590 for sinput, regs in cpuid.iteritems():
591 inputs = sinput.split(',')
592 input = long(inputs[0])
593 sub_input = None
594 if len(inputs) == 2:
595 sub_input = long(inputs[1])
596 t = xc.domain_set_cpuid(self.vm.getDomid(),
597 input, sub_input, regs)
598 transformed[sinput] = t
599 self.cpuid = transformed
601 if self.cpuid_check is not None:
602 cpuid_check = self.cpuid_check
603 transformed = {}
604 for sinput, regs_check in cpuid_check.iteritems():
605 inputs = sinput.split(',')
606 input = long(inputs[0])
607 sub_input = None
608 if len(inputs) == 2:
609 sub_input = long(inputs[1])
610 t = xc.domain_check_cpuid(input, sub_input, regs_check)
611 transformed[sinput] = t
612 self.cpuid_check = transformed
616 class LinuxImageHandler(ImageHandler):
618 ostype = "linux"
619 flags = 0
620 vhpt = 0
622 def configure(self, vmConfig):
623 ImageHandler.configure(self, vmConfig)
625 def buildDomain(self):
626 store_evtchn = self.vm.getStorePort()
627 console_evtchn = self.vm.getConsolePort()
629 mem_mb = self.getRequiredInitialReservation() / 1024
631 log.debug("domid = %d", self.vm.getDomid())
632 log.debug("memsize = %d", mem_mb)
633 log.debug("image = %s", self.kernel)
634 log.debug("store_evtchn = %d", store_evtchn)
635 log.debug("console_evtchn = %d", console_evtchn)
636 log.debug("cmdline = %s", self.cmdline)
637 log.debug("ramdisk = %s", self.ramdisk)
638 log.debug("vcpus = %d", self.vm.getVCpuCount())
639 log.debug("features = %s", self.vm.getFeatures())
640 if arch.type == "ia64":
641 log.debug("vhpt = %d", self.flags)
643 return xc.linux_build(domid = self.vm.getDomid(),
644 memsize = mem_mb,
645 image = self.kernel,
646 store_evtchn = store_evtchn,
647 console_evtchn = console_evtchn,
648 cmdline = self.cmdline,
649 ramdisk = self.ramdisk,
650 features = self.vm.getFeatures(),
651 flags = self.flags,
652 vhpt = self.vhpt)
654 def parseDeviceModelArgs(self, vmConfig):
655 ret = ImageHandler.parseDeviceModelArgs(self, vmConfig)
656 # Equivalent to old xenconsoled behaviour. Should make
657 # it configurable in future
658 ret = ret + ["-serial", "pty"]
659 return ret
661 def getDeviceModelArgs(self, restore = False):
662 args = ImageHandler.getDeviceModelArgs(self, restore)
663 args = args + ([ "-M", "xenpv"])
664 return args
667 class HVMImageHandler(ImageHandler):
669 ostype = "hvm"
671 def __init__(self, vm, vmConfig):
672 ImageHandler.__init__(self, vm, vmConfig)
673 self.shutdownWatch = None
674 self.rebootFeatureWatch = None
676 def configure(self, vmConfig):
677 ImageHandler.configure(self, vmConfig)
679 self.loader = vmConfig['platform'].get('loader')
681 info = xc.xeninfo()
682 if 'hvm' not in info['xen_caps']:
683 raise HVMRequired()
685 rtc_timeoffset = vmConfig['platform'].get('rtc_timeoffset')
687 self.vm.storeVm(("image/dmargs", " ".join(self.dmargs)),
688 ("image/device-model", self.device_model),
689 ("image/display", self.display))
690 self.vm.permissionsVm("image/dmargs", { 'dom': self.vm.getDomid(), 'read': True } )
691 self.vm.storeVm(("rtc/timeoffset", rtc_timeoffset))
692 self.vm.permissionsVm("rtc/timeoffset", { 'dom': self.vm.getDomid(), 'read': True } )
694 self.apic = int(vmConfig['platform'].get('apic', 0))
695 self.acpi = int(vmConfig['platform'].get('acpi', 0))
696 self.guest_os_type = vmConfig['platform'].get('guest_os_type')
699 # Return a list of cmd line args to the device models based on the
700 # xm config file
701 def parseDeviceModelArgs(self, vmConfig):
702 ret = ImageHandler.parseDeviceModelArgs(self, vmConfig)
703 ret = ret + ['-vcpus', str(self.vm.getVCpuCount())]
705 if self.kernel:
706 log.debug("kernel = %s", self.kernel)
707 ret = ret + ['-kernel', self.kernel]
708 if self.ramdisk:
709 log.debug("ramdisk = %s", self.ramdisk)
710 ret = ret + ['-initrd', self.ramdisk]
711 if self.cmdline:
712 log.debug("cmdline = %s", self.cmdline)
713 ret = ret + ['-append', self.cmdline]
716 dmargs = [ 'boot', 'fda', 'fdb', 'soundhw',
717 'localtime', 'serial', 'stdvga', 'isa',
718 'acpi', 'usb', 'usbdevice' ]
720 for a in dmargs:
721 v = vmConfig['platform'].get(a)
723 # python doesn't allow '-' in variable names
724 if a == 'stdvga': a = 'std-vga'
725 if a == 'keymap': a = 'k'
727 # Handle booleans gracefully
728 if a in ['localtime', 'std-vga', 'isa', 'usb', 'acpi']:
729 try:
730 if v != None: v = int(v)
731 if v: ret.append("-%s" % a)
732 except (ValueError, TypeError):
733 pass # if we can't convert it to a sane type, ignore it
734 else:
735 if v:
736 ret.append("-%s" % a)
737 ret.append("%s" % v)
739 if a in ['fda', 'fdb']:
740 if v:
741 if not os.path.isabs(v):
742 raise VmError("Floppy file %s does not exist." % v)
743 log.debug("args: %s, val: %s" % (a,v))
745 # Handle disk/network related options
746 mac = None
747 nics = 0
749 for devuuid in vmConfig['vbd_refs']:
750 devinfo = vmConfig['devices'][devuuid][1]
751 uname = devinfo.get('uname')
752 if uname is not None and 'file:' in uname:
753 (_, vbdparam) = string.split(uname, ':', 1)
754 if not os.path.isfile(vbdparam):
755 raise VmError('Disk image does not exist: %s' %
756 vbdparam)
758 for devuuid in vmConfig['vif_refs']:
759 devinfo = vmConfig['devices'][devuuid][1]
760 dtype = devinfo.get('type', 'ioemu')
761 if dtype != 'ioemu':
762 continue
763 nics += 1
764 mac = devinfo.get('mac')
765 if mac is None:
766 raise VmError("MAC address not specified or generated.")
767 bridge = devinfo.get('bridge', 'xenbr0')
768 model = devinfo.get('model', 'rtl8139')
769 ret.append("-net")
770 ret.append("nic,vlan=%d,macaddr=%s,model=%s" %
771 (nics, mac, model))
772 ret.append("-net")
773 ret.append("tap,vlan=%d,ifname=tap%d.%d,bridge=%s" %
774 (nics, self.vm.getDomid(), nics-1, bridge))
776 if nics == 0:
777 ret.append("-net")
778 ret.append("none")
780 return ret
782 def getDeviceModelArgs(self, restore = False):
783 args = ImageHandler.getDeviceModelArgs(self, restore)
784 args = args + ([ "-M", "xenfv"])
785 if restore:
786 args = args + ([ "-loadvm", "/var/lib/xen/qemu-save.%d" %
787 self.vm.getDomid() ])
788 return args
790 def buildDomain(self):
791 store_evtchn = self.vm.getStorePort()
793 mem_mb = self.getRequiredInitialReservation() / 1024
795 log.debug("domid = %d", self.vm.getDomid())
796 log.debug("image = %s", self.loader)
797 log.debug("store_evtchn = %d", store_evtchn)
798 log.debug("memsize = %d", mem_mb)
799 log.debug("vcpus = %d", self.vm.getVCpuCount())
800 log.debug("acpi = %d", self.acpi)
801 log.debug("apic = %d", self.apic)
803 rc = xc.hvm_build(domid = self.vm.getDomid(),
804 image = self.loader,
805 memsize = mem_mb,
806 vcpus = self.vm.getVCpuCount(),
807 acpi = self.acpi,
808 apic = self.apic)
809 rc['notes'] = { 'SUSPEND_CANCEL': 1 }
811 rc['store_mfn'] = xc.hvm_get_param(self.vm.getDomid(),
812 HVM_PARAM_STORE_PFN)
813 xc.hvm_set_param(self.vm.getDomid(), HVM_PARAM_STORE_EVTCHN,
814 store_evtchn)
816 return rc
819 class IA64_HVM_ImageHandler(HVMImageHandler):
821 def configure(self, vmConfig):
822 HVMImageHandler.configure(self, vmConfig)
823 self.vhpt = int(vmConfig['platform'].get('vhpt', 0))
825 def buildDomain(self):
826 xc.nvram_init(self.vm.getName(), self.vm.getDomid())
827 xc.hvm_set_param(self.vm.getDomid(), HVM_PARAM_VHPT_SIZE, self.vhpt)
828 if self.guest_os_type is not None:
829 xc.set_os_type(self.guest_os_type.lower(), self.vm.getDomid())
830 return HVMImageHandler.buildDomain(self)
832 def getRequiredAvailableMemory(self, mem_kb):
833 page_kb = 16
834 # ROM size for guest firmware, io page, xenstore page
835 # buffer io page, buffer pio page and memmap info page
836 extra_pages = 1024 + 5
837 mem_kb += extra_pages * page_kb
838 # Add 8 MiB overhead for QEMU's video RAM.
839 return mem_kb + 8192
841 def getRequiredInitialReservation(self):
842 return self.vm.getMemoryTarget()
844 def getRequiredShadowMemory(self, shadow_mem_kb, maxmem_kb):
845 # Explicit shadow memory is not a concept
846 return 0
848 def getDeviceModelArgs(self, restore = False):
849 args = HVMImageHandler.getDeviceModelArgs(self, restore)
850 args = args + ([ "-m", "%s" %
851 (self.getRequiredInitialReservation() / 1024) ])
852 return args
854 def setCpuid(self):
855 # Guest CPUID configuration is not implemented yet.
856 return
858 class IA64_Linux_ImageHandler(LinuxImageHandler):
860 def configure(self, vmConfig):
861 LinuxImageHandler.configure(self, vmConfig)
862 self.vhpt = int(vmConfig['platform'].get('vhpt', 0))
864 def setCpuid(self):
865 # Guest CPUID configuration is not implemented yet.
866 return
868 class X86_HVM_ImageHandler(HVMImageHandler):
870 def configure(self, vmConfig):
871 HVMImageHandler.configure(self, vmConfig)
872 self.pae = int(vmConfig['platform'].get('pae', 0))
874 def buildDomain(self):
875 xc.hvm_set_param(self.vm.getDomid(), HVM_PARAM_PAE_ENABLED, self.pae)
876 rc = HVMImageHandler.buildDomain(self)
877 self.setCpuid()
878 return rc
880 def getRequiredAvailableMemory(self, mem_kb):
881 # Add 8 MiB overhead for QEMU's video RAM.
882 return mem_kb + 8192
884 def getRequiredInitialReservation(self):
885 return self.vm.getMemoryTarget()
887 def getRequiredMaximumReservation(self):
888 return self.vm.getMemoryMaximum()
890 def getRequiredShadowMemory(self, shadow_mem_kb, maxmem_kb):
891 # 256 pages (1MB) per vcpu,
892 # plus 1 page per MiB of RAM for the P2M map,
893 # plus 1 page per MiB of RAM to shadow the resident processes.
894 # This is higher than the minimum that Xen would allocate if no value
895 # were given (but the Xen minimum is for safety, not performance).
896 return max(4 * (256 * self.vm.getVCpuCount() + 2 * (maxmem_kb / 1024)),
897 shadow_mem_kb)
900 class X86_Linux_ImageHandler(LinuxImageHandler):
902 def buildDomain(self):
903 # set physical mapping limit
904 # add an 8MB slack to balance backend allocations.
905 mem_kb = self.getRequiredMaximumReservation() + (8 * 1024)
906 xc.domain_set_memmap_limit(self.vm.getDomid(), mem_kb)
907 rc = LinuxImageHandler.buildDomain(self)
908 self.setCpuid()
909 return rc
911 _handlers = {
912 "ia64": {
913 "linux": IA64_Linux_ImageHandler,
914 "hvm": IA64_HVM_ImageHandler,
915 },
916 "x86": {
917 "linux": X86_Linux_ImageHandler,
918 "hvm": X86_HVM_ImageHandler,
919 },
920 }
922 def findImageHandlerClass(image):
923 """Find the image handler class for an image config.
925 @param image config
926 @return ImageHandler subclass or None
927 """
928 image_type = image.image_type()
929 try:
930 return _handlers[arch.type][image_type]
931 except KeyError:
932 raise VmError('unknown image type: ' + image_type)