ia64/xen-unstable

view tools/python/xen/xend/image.py @ 18597:1e37779bf884

xend: Fix typo _gatherDom -> gatherDom.
Signed-off-by: Jim Fehlig <jfehlig@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Oct 09 10:06:50 2008 +0100 (2008-10-09)
parents 60937c4c5a67
children 551c3480beee
line source
1 #============================================================================
2 # This library is free software; you can redistribute it and/or
3 # modify it under the terms of version 2.1 of the GNU Lesser General Public
4 # License as published by the Free Software Foundation.
5 #
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
10 #
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the Free Software
13 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 #============================================================================
15 # Copyright (C) 2005 Mike Wray <mike.wray@hp.com>
16 # Copyright (C) 2005-2007 XenSource Ltd
17 #============================================================================
20 import os, os.path, string
21 import re
22 import math
23 import time
24 import signal
25 import thread
26 import fcntl
27 import sys
28 import errno
29 import glob
30 import traceback
32 import xen.lowlevel.xc
33 from xen.xend.XendConstants import *
34 from xen.xend.XendError import VmError, XendError, HVMRequired
35 from xen.xend.XendLogging import log
36 from xen.xend.XendOptions import instance as xenopts
37 from xen.xend.xenstore.xstransact import xstransact
38 from xen.xend.xenstore.xswatch import xswatch
39 from xen.xend import arch
40 from xen.xend import XendOptions
41 from xen.util import oshelp
42 from xen.util import utils
44 xc = xen.lowlevel.xc.xc()
46 MAX_GUEST_CMDLINE = 1024
48 sentinel_path_prefix = '/var/run/xend/dm-'
49 sentinel_fifos_inuse = { }
51 def cleanup_stale_sentinel_fifos():
52 for path in glob.glob(sentinel_path_prefix + '*.fifo'):
53 if path in sentinel_fifos_inuse: continue
54 try: os.unlink(path)
55 except OSError, e:
56 log.warning('could not delete stale fifo %s: %s',
57 path, utils.exception_string(e))
59 def create(vm, vmConfig):
60 """Create an image handler for a vm.
62 @return ImageHandler instance
63 """
64 return findImageHandlerClass(vmConfig)(vm, vmConfig)
67 class ImageHandler:
68 """Abstract base class for image handlers.
70 createImage() is called to configure and build the domain from its
71 kernel image and ramdisk etc.
73 The method buildDomain() is used to build the domain, and must be
74 defined in a subclass. Usually this is the only method that needs
75 defining in a subclass.
77 The method createDeviceModel() is called to create the domain device
78 model.
80 The method destroyDeviceModel() is called to reap the device model
81 """
83 ostype = None
86 def __init__(self, vm, vmConfig):
87 self.vm = vm
89 self.bootloader = False
90 self.kernel = None
91 self.ramdisk = None
92 self.cmdline = None
94 self.configure(vmConfig)
96 def configure(self, vmConfig):
97 """Config actions common to all unix-like domains."""
98 if '_temp_using_bootloader' in vmConfig:
99 self.bootloader = True
100 self.kernel = vmConfig['_temp_kernel']
101 self.cmdline = vmConfig['_temp_args']
102 self.ramdisk = vmConfig['_temp_ramdisk']
103 else:
104 self.kernel = vmConfig['PV_kernel']
105 self.cmdline = vmConfig['PV_args']
106 self.ramdisk = vmConfig['PV_ramdisk']
107 self.vm.storeVm(("image/ostype", self.ostype),
108 ("image/kernel", self.kernel),
109 ("image/cmdline", self.cmdline),
110 ("image/ramdisk", self.ramdisk))
111 self.vm.permissionsVm("image/cmdline", { 'dom': self.vm.getDomid(), 'read': True } )
113 self.device_model = vmConfig['platform'].get('device_model')
115 self.display = vmConfig['platform'].get('display')
116 self.xauthority = vmConfig['platform'].get('xauthority')
117 self.vncconsole = int(vmConfig['platform'].get('vncconsole', 0))
118 self.dmargs = self.parseDeviceModelArgs(vmConfig)
119 self.pid = None
120 rtc_timeoffset = vmConfig['platform'].get('rtc_timeoffset')
121 if rtc_timeoffset is not None:
122 xc.domain_set_time_offset(self.vm.getDomid(), int(rtc_timeoffset))
124 self.cpuid = None
125 self.cpuid_check = None
126 if 'cpuid' in vmConfig:
127 self.cpuid = vmConfig['cpuid'];
128 if 'cpuid_check' in vmConfig:
129 self.cpuid_check = vmConfig['cpuid_check']
131 def cleanupBootloading(self):
132 if self.bootloader:
133 self.unlink(self.kernel)
134 self.unlink(self.ramdisk)
137 def unlink(self, f):
138 if not f: return
139 try:
140 os.unlink(f)
141 except OSError, ex:
142 log.warning("error removing bootloader file '%s': %s", f, ex)
145 def createImage(self):
146 """Entry point to create domain memory image.
147 Override in subclass if needed.
148 """
149 return self.createDomain()
152 def createDomain(self):
153 """Build the domain boot image.
154 """
155 # Set params and call buildDomain().
157 if self.kernel and not os.path.isfile(self.kernel):
158 raise VmError('Kernel image does not exist: %s' % self.kernel)
159 if self.ramdisk and not os.path.isfile(self.ramdisk):
160 raise VmError('Kernel ramdisk does not exist: %s' % self.ramdisk)
161 if len(self.cmdline) >= MAX_GUEST_CMDLINE:
162 log.warning('kernel cmdline too long, domain %d',
163 self.vm.getDomid())
165 log.info("buildDomain os=%s dom=%d vcpus=%d", self.ostype,
166 self.vm.getDomid(), self.vm.getVCpuCount())
168 result = self.buildDomain()
170 if isinstance(result, dict):
171 return result
172 else:
173 raise VmError('Building domain failed: ostype=%s dom=%d err=%s'
174 % (self.ostype, self.vm.getDomid(), str(result)))
176 def getRequiredAvailableMemory(self, mem_kb):
177 """@param mem_kb The configured maxmem or memory, in KiB.
178 @return The corresponding required amount of memory for the domain,
179 also in KiB. This is normally the given mem_kb, but architecture- or
180 image-specific code may override this to add headroom where
181 necessary."""
182 return mem_kb
184 def getRequiredInitialReservation(self):
185 """@param mem_kb The configured memory, in KiB.
186 @return The corresponding required amount of memory to be free, also
187 in KiB. This is normally the same as getRequiredAvailableMemory, but
188 architecture- or image-specific code may override this to
189 add headroom where necessary."""
190 return self.getRequiredAvailableMemory(self.vm.getMemoryTarget())
192 def getRequiredMaximumReservation(self):
193 """@param mem_kb The maximum possible memory, in KiB.
194 @return The corresponding required amount of memory to be free, also
195 in KiB. This is normally the same as getRequiredAvailableMemory, but
196 architecture- or image-specific code may override this to
197 add headroom where necessary."""
198 return self.getRequiredAvailableMemory(self.vm.getMemoryMaximum())
200 def getRequiredShadowMemory(self, shadow_mem_kb, maxmem_kb):
201 """@param shadow_mem_kb The configured shadow memory, in KiB.
202 @param maxmem_kb The configured maxmem, in KiB.
203 @return The corresponding required amount of shadow memory, also in
204 KiB."""
205 # PV domains don't need any shadow memory
206 return 0
208 def buildDomain(self):
209 """Build the domain. Define in subclass."""
210 raise NotImplementedError()
212 def prepareEnvironment(self):
213 """Prepare the environment for the execution of the domain. This
214 method is called before any devices are set up."""
216 domid = self.vm.getDomid()
218 # Delete left-over pipes
219 try:
220 os.unlink('/var/run/tap/qemu-read-%d' % domid)
221 os.unlink('/var/run/tap/qemu-write-%d' % domid)
222 except:
223 pass
225 # No device model, don't create pipes
226 if self.device_model is None:
227 return
229 # If we use a device model, the pipes for communication between
230 # blktapctrl and ioemu must be present before the devices are
231 # created (blktapctrl must access them for new block devices)
233 # mkdir throws an exception if the path already exists
234 try:
235 os.mkdir('/var/run/tap', 0755)
236 except:
237 pass
239 try:
240 os.mkfifo('/var/run/tap/qemu-read-%d' % domid, 0600)
241 os.mkfifo('/var/run/tap/qemu-write-%d' % domid, 0600)
242 except OSError, e:
243 log.warn('Could not create blktap pipes for domain %d' % domid)
244 log.exception(e)
245 pass
248 # Return a list of cmd line args to the device models based on the
249 # xm config file
250 def parseDeviceModelArgs(self, vmConfig):
251 ret = ["-domain-name", str(self.vm.info['name_label'])]
253 # Find RFB console device, and if it exists, make QEMU enable
254 # the VNC console.
255 if int(vmConfig['platform'].get('nographic', 0)) != 0:
256 # skip vnc init if nographic is set
257 ret.append('-nographic')
258 return ret
260 vnc_config = {}
261 has_vnc = int(vmConfig['platform'].get('vnc', 0)) != 0
262 has_sdl = int(vmConfig['platform'].get('sdl', 0)) != 0
263 opengl = 1
264 keymap = vmConfig['platform'].get("keymap")
265 for dev_uuid in vmConfig['console_refs']:
266 dev_type, dev_info = vmConfig['devices'][dev_uuid]
267 if dev_type == 'vfb':
268 if 'keymap' in dev_info:
269 keymap = dev_info.get('keymap',{})
270 vfb_type = dev_info.get('type', {})
271 if vfb_type == 'sdl':
272 self.display = dev_info.get('display', {})
273 self.xauthority = dev_info.get('xauthority', {})
274 opengl = int(dev_info.get('opengl', opengl))
275 has_sdl = True
276 else:
277 vnc_config = dev_info.get('other_config', {})
278 has_vnc = True
279 break
281 if keymap:
282 ret.append("-k")
283 ret.append(keymap)
285 if has_vnc:
286 if not vnc_config:
287 for key in ('vncunused', 'vnclisten', 'vncdisplay',
288 'vncpasswd'):
289 if key in vmConfig['platform']:
290 vnc_config[key] = vmConfig['platform'][key]
291 if vnc_config.has_key("vncpasswd"):
292 passwd = vnc_config["vncpasswd"]
293 else:
294 passwd = XendOptions.instance().get_vncpasswd_default()
295 vncopts = ""
296 if passwd:
297 self.vm.storeVm("vncpasswd", passwd)
298 self.vm.permissionsVm("vncpasswd", { 'dom': self.vm.getDomid(), 'read': True } )
299 vncopts = vncopts + ",password"
300 log.debug("Stored a VNC password for vfb access")
301 else:
302 log.debug("No VNC passwd configured for vfb access")
304 if XendOptions.instance().get_vnc_tls():
305 vncx509certdir = XendOptions.instance().get_vnc_x509_cert_dir()
306 vncx509verify = XendOptions.instance().get_vnc_x509_verify()
308 if not os.path.exists(vncx509certdir):
309 raise VmError("VNC x509 certificate dir %s does not exist" % vncx509certdir)
311 if vncx509verify:
312 vncopts = vncopts + ",tls,x509verify=%s" % vncx509certdir
313 else:
314 vncopts = vncopts + ",tls,x509=%s" % vncx509certdir
317 vnclisten = vnc_config.get('vnclisten',
318 XendOptions.instance().get_vnclisten_address())
319 vncdisplay = int(vnc_config.get('vncdisplay', 0))
320 ret.append('-vnc')
321 ret.append("%s:%s%s" % (vnclisten, vncdisplay, vncopts))
323 if int(vnc_config.get('vncunused', 1)) != 0:
324 ret.append('-vncunused')
326 elif has_sdl:
327 # SDL is default in QEMU.
328 if int(vmConfig['platform'].get('opengl', opengl)) != 1 :
329 ret.append('-disable-opengl')
330 else:
331 ret.append('-nographic')
333 if int(vmConfig['platform'].get('monitor', 0)) != 0:
334 ret = ret + ['-monitor', 'vc']
335 return ret
337 def getDeviceModelArgs(self, restore = False):
338 args = [self.device_model]
339 args = args + ([ "-d", "%d" % self.vm.getDomid() ])
340 args = args + self.dmargs
341 return args
343 def _openSentinel(self, sentinel_path_fifo):
344 self.sentinel_fifo = file(sentinel_path_fifo, 'r')
345 self.sentinel_lock = thread.allocate_lock()
346 oshelp.fcntl_setfd_cloexec(self.sentinel_fifo, True)
347 sentinel_fifos_inuse[sentinel_path_fifo] = 1
348 self.sentinel_path_fifo = sentinel_path_fifo
350 def createDeviceModel(self, restore = False):
351 if self.device_model is None:
352 return
353 if self.pid:
354 return
355 # Execute device model.
356 #todo: Error handling
357 args = self.getDeviceModelArgs(restore)
358 env = dict(os.environ)
359 if self.display:
360 env['DISPLAY'] = self.display
361 if self.xauthority:
362 env['XAUTHORITY'] = self.xauthority
363 if self.vncconsole:
364 args = args + ([ "-vncviewer" ])
365 unique_id = "%i-%i" % (self.vm.getDomid(), time.time())
366 sentinel_path = sentinel_path_prefix + unique_id
367 sentinel_path_fifo = sentinel_path + '.fifo'
368 os.mkfifo(sentinel_path_fifo, 0600)
369 sentinel_write = file(sentinel_path_fifo, 'r+')
370 self._openSentinel(sentinel_path_fifo)
371 self.vm.storeDom("image/device-model-fifo", sentinel_path_fifo)
372 xstransact.Mkdir("/local/domain/0/device-model/%i" % self.vm.getDomid())
373 xstransact.SetPermissions("/local/domain/0/device-model/%i" % self.vm.getDomid(),
374 { 'dom': self.vm.getDomid(), 'read': True, 'write': True })
375 log.info("spawning device models: %s %s", self.device_model, args)
376 # keep track of pid and spawned options to kill it later
378 self.logfile = "/var/log/xen/qemu-dm-%s.log" % str(self.vm.info['name_label'])
380 # rotate log
381 logfile_mode = os.O_WRONLY|os.O_CREAT|os.O_APPEND
382 logrotate_count = XendOptions.instance().get_qemu_dm_logrotate_count()
383 if logrotate_count > 0:
384 logfile_mode |= os.O_TRUNC
385 if os.path.exists("%s.%d" % (self.logfile, logrotate_count)):
386 os.unlink("%s.%d" % (self.logfile, logrotate_count))
387 for n in range(logrotate_count - 1, 0, -1):
388 if os.path.exists("%s.%d" % (self.logfile, n)):
389 os.rename("%s.%d" % (self.logfile, n),
390 "%s.%d" % (self.logfile, (n + 1)))
391 if os.path.exists(self.logfile):
392 os.rename(self.logfile, self.logfile + ".1")
394 null = os.open("/dev/null", os.O_RDONLY)
395 logfd = os.open(self.logfile, logfile_mode)
397 sys.stderr.flush()
398 pid = os.fork()
399 if pid == 0: #child
400 try:
401 os.dup2(null, 0)
402 os.dup2(logfd, 1)
403 os.dup2(logfd, 2)
404 os.close(null)
405 os.close(logfd)
406 self.sentinel_fifo.close()
407 try:
408 os.execve(self.device_model, args, env)
409 except Exception, e:
410 print >>sys.stderr, (
411 'failed to set up fds or execute dm %s: %s' %
412 (self.device_model, utils.exception_string(e)))
413 os._exit(126)
414 except:
415 os._exit(127)
416 else:
417 self.pid = pid
418 os.close(null)
419 os.close(logfd)
420 sentinel_write.close()
421 self.vm.storeDom("image/device-model-pid", self.pid)
422 log.info("device model pid: %d", self.pid)
423 # we would very much prefer not to have a thread here and instead
424 # have a callback but sadly we don't have Twisted in xend
425 self.sentinel_thread = thread.start_new_thread(self._sentinel_watch,())
427 def signalDeviceModel(self, cmd, ret, par = None):
428 if self.device_model is None:
429 return
430 # Signal the device model to for action
431 if cmd is '' or ret is '':
432 raise VmError('need valid command and result when signal device model')
434 orig_state = xstransact.Read("/local/domain/0/device-model/%i/state"
435 % self.vm.getDomid())
437 if par is not None:
438 xstransact.Store("/local/domain/0/device-model/%i"
439 % self.vm.getDomid(), ('parameter', par))
441 xstransact.Store("/local/domain/0/device-model/%i"
442 % self.vm.getDomid(), ('command', cmd))
443 # Wait for confirmation. Could do this with a watch but we'd
444 # still end up spinning here waiting for the watch to fire.
445 state = ''
446 count = 0
447 while state != ret:
448 state = xstransact.Read("/local/domain/0/device-model/%i/state"
449 % self.vm.getDomid())
450 time.sleep(0.1)
451 count += 1
452 if count > 100:
453 raise VmError('Timed out waiting for device model action')
455 #resotre orig state
456 xstransact.Store("/local/domain/0/device-model/%i"
457 % self.vm.getDomid(), ('state', orig_state))
458 log.info("signalDeviceModel:restore dm state to %s", orig_state)
460 def saveDeviceModel(self):
461 # Signal the device model to pause itself and save its state
462 self.signalDeviceModel('save', 'paused')
464 def resumeDeviceModel(self):
465 if self.device_model is None:
466 return
467 # Signal the device model to resume activity after pausing to save.
468 xstransact.Store("/local/domain/0/device-model/%i"
469 % self.vm.getDomid(), ('command', 'continue'))
471 def _dmfailed(self, message):
472 log.warning("domain %s: %s", self.vm.getName(), message)
473 # ideally we would like to forcibly crash the domain with
474 # something like
475 # xc.domain_shutdown(self.vm.getDomid(), DOMAIN_CRASH)
476 # but this can easily lead to very rapid restart loops against
477 # which we currently have no protection
479 def recreate(self):
480 if self.device_model is None:
481 return
482 name = self.vm.getName()
483 sentinel_path_fifo = self.vm.readDom('image/device-model-fifo')
484 fifo_fd = -1
485 log.debug("rediscovering %s", sentinel_path_fifo)
486 if sentinel_path_fifo is None:
487 log.debug("%s device model no sentinel, cannot rediscover", name)
488 else:
489 try:
490 # We open it O_WRONLY because that fails ENXIO if no-one
491 # has it open for reading (see SuSv3). The dm process got
492 # a read/write descriptor from our earlier invocation.
493 fifo_fd = os.open(sentinel_path_fifo, os.O_WRONLY|os.O_NONBLOCK)
494 except OSError, e:
495 if e.errno == errno.ENXIO:
496 self._dmfailed("%s device model no longer running"%name)
497 elif e.errno == errno.ENOENT:
498 log.debug("%s device model sentinel %s absent!",
499 name, sentinel_path_fifo)
500 else:
501 raise
502 if fifo_fd >= 0:
503 self._openSentinel(sentinel_path_fifo)
504 os.close(fifo_fd)
505 self.pid = self.vm.gatherDom(('image/device-model-pid', int))
506 log.debug("%s device model rediscovered, pid %s sentinel fifo %s",
507 name, self.pid, sentinel_path_fifo)
508 self.sentinel_thread = thread.start_new_thread(self._sentinel_watch,())
510 def _sentinel_watch(self):
511 log.info("waiting for sentinel_fifo")
512 try: self.sentinel_fifo.read(1)
513 except OSError, e: pass
514 self.sentinel_lock.acquire()
515 try:
516 if self.pid:
517 (p,st) = os.waitpid(self.pid, os.WNOHANG)
518 if p == self.pid:
519 message = oshelp.waitstatus_description(st)
520 else:
521 # obviously it is malfunctioning, kill it now
522 try:
523 os.kill(self.pid, signal.SIGKILL)
524 message = "malfunctioning (closed sentinel), killed"
525 except:
526 message = "malfunctioning or died ?"
527 message = "pid %d: %s" % (self.pid, message)
528 else:
529 message = "no longer running"
530 except Exception, e:
531 message = "waitpid failed: %s" % utils.exception_string(e)
532 message = "device model failure: %s" % message
533 try: message += "; see %s " % self.logfile
534 except: pass
535 self._dmfailed(message)
536 self.pid = None
537 self.sentinel_lock.release()
539 def destroyDeviceModel(self):
540 if self.device_model is None:
541 return
542 if self.pid:
543 self.sentinel_lock.acquire()
544 try:
545 try:
546 os.kill(self.pid, signal.SIGHUP)
547 except OSError, exn:
548 log.exception(exn)
549 try:
550 # Try to reap the child every 100ms for 10s. Then SIGKILL it.
551 for i in xrange(100):
552 (p, rv) = os.waitpid(self.pid, os.WNOHANG)
553 if p == self.pid:
554 break
555 time.sleep(0.1)
556 else:
557 log.warning("DeviceModel %d took more than 10s "
558 "to terminate: sending SIGKILL" % self.pid)
559 os.kill(self.pid, signal.SIGKILL)
560 os.waitpid(self.pid, 0)
561 except OSError, exn:
562 # This is expected if Xend has been restarted within the
563 # life of this domain. In this case, we can kill the process,
564 # but we can't wait for it because it's not our child.
565 # We just make really sure it's going away (SIGKILL) first.
566 os.kill(self.pid, signal.SIGKILL)
567 state = xstransact.Remove("/local/domain/0/device-model/%i"
568 % self.vm.getDomid())
569 finally:
570 self.pid = None
571 self.sentinel_lock.release()
573 try:
574 os.unlink('/var/run/tap/qemu-read-%d' % self.vm.getDomid())
575 os.unlink('/var/run/tap/qemu-write-%d' % self.vm.getDomid())
576 except:
577 pass
578 try:
579 del sentinel_fifos_inuse[self.sentinel_path_fifo]
580 os.unlink(self.sentinel_path_fifo)
581 except:
582 pass
584 def setCpuid(self):
585 xc.domain_set_policy_cpuid(self.vm.getDomid())
587 if self.cpuid is not None:
588 cpuid = self.cpuid
589 transformed = {}
590 for sinput, regs in cpuid.iteritems():
591 inputs = sinput.split(',')
592 input = long(inputs[0])
593 sub_input = None
594 if len(inputs) == 2:
595 sub_input = long(inputs[1])
596 t = xc.domain_set_cpuid(self.vm.getDomid(),
597 input, sub_input, regs)
598 transformed[sinput] = t
599 self.cpuid = transformed
601 if self.cpuid_check is not None:
602 cpuid_check = self.cpuid_check
603 transformed = {}
604 for sinput, regs_check in cpuid_check.iteritems():
605 inputs = sinput.split(',')
606 input = long(inputs[0])
607 sub_input = None
608 if len(inputs) == 2:
609 sub_input = long(inputs[1])
610 t = xc.domain_check_cpuid(input, sub_input, regs_check)
611 transformed[sinput] = t
612 self.cpuid_check = transformed
616 class LinuxImageHandler(ImageHandler):
618 ostype = "linux"
619 flags = 0
620 vhpt = 0
622 def configure(self, vmConfig):
623 ImageHandler.configure(self, vmConfig)
625 def buildDomain(self):
626 store_evtchn = self.vm.getStorePort()
627 console_evtchn = self.vm.getConsolePort()
629 mem_mb = self.getRequiredInitialReservation() / 1024
631 log.debug("domid = %d", self.vm.getDomid())
632 log.debug("memsize = %d", mem_mb)
633 log.debug("image = %s", self.kernel)
634 log.debug("store_evtchn = %d", store_evtchn)
635 log.debug("console_evtchn = %d", console_evtchn)
636 log.debug("cmdline = %s", self.cmdline)
637 log.debug("ramdisk = %s", self.ramdisk)
638 log.debug("vcpus = %d", self.vm.getVCpuCount())
639 log.debug("features = %s", self.vm.getFeatures())
640 log.debug("flags = %d", self.flags)
641 if arch.type == "ia64":
642 log.debug("vhpt = %d", self.vhpt)
644 return xc.linux_build(domid = self.vm.getDomid(),
645 memsize = mem_mb,
646 image = self.kernel,
647 store_evtchn = store_evtchn,
648 console_evtchn = console_evtchn,
649 cmdline = self.cmdline,
650 ramdisk = self.ramdisk,
651 features = self.vm.getFeatures(),
652 flags = self.flags,
653 vhpt = self.vhpt)
655 def parseDeviceModelArgs(self, vmConfig):
656 ret = ImageHandler.parseDeviceModelArgs(self, vmConfig)
657 # Equivalent to old xenconsoled behaviour. Should make
658 # it configurable in future
659 ret = ret + ["-serial", "pty"]
660 return ret
662 def getDeviceModelArgs(self, restore = False):
663 args = ImageHandler.getDeviceModelArgs(self, restore)
664 args = args + ([ "-M", "xenpv"])
665 return args
668 class HVMImageHandler(ImageHandler):
670 ostype = "hvm"
672 def __init__(self, vm, vmConfig):
673 ImageHandler.__init__(self, vm, vmConfig)
674 self.shutdownWatch = None
675 self.rebootFeatureWatch = None
677 def configure(self, vmConfig):
678 ImageHandler.configure(self, vmConfig)
680 self.loader = vmConfig['platform'].get('loader')
682 info = xc.xeninfo()
683 if 'hvm' not in info['xen_caps']:
684 raise HVMRequired()
686 rtc_timeoffset = vmConfig['platform'].get('rtc_timeoffset')
688 self.vm.storeVm(("image/dmargs", " ".join(self.dmargs)),
689 ("image/device-model", self.device_model),
690 ("image/display", self.display))
691 self.vm.permissionsVm("image/dmargs", { 'dom': self.vm.getDomid(), 'read': True } )
692 self.vm.storeVm(("rtc/timeoffset", rtc_timeoffset))
693 self.vm.permissionsVm("rtc/timeoffset", { 'dom': self.vm.getDomid(), 'read': True } )
695 self.apic = int(vmConfig['platform'].get('apic', 0))
696 self.acpi = int(vmConfig['platform'].get('acpi', 0))
697 self.guest_os_type = vmConfig['platform'].get('guest_os_type')
700 # Return a list of cmd line args to the device models based on the
701 # xm config file
702 def parseDeviceModelArgs(self, vmConfig):
703 ret = ImageHandler.parseDeviceModelArgs(self, vmConfig)
704 ret = ret + ['-vcpus', str(self.vm.getVCpuCount())]
706 if self.kernel:
707 log.debug("kernel = %s", self.kernel)
708 ret = ret + ['-kernel', self.kernel]
709 if self.ramdisk:
710 log.debug("ramdisk = %s", self.ramdisk)
711 ret = ret + ['-initrd', self.ramdisk]
712 if self.cmdline:
713 log.debug("cmdline = %s", self.cmdline)
714 ret = ret + ['-append', self.cmdline]
717 dmargs = [ 'boot', 'fda', 'fdb', 'soundhw',
718 'localtime', 'serial', 'stdvga', 'isa',
719 'acpi', 'usb', 'usbdevice' ]
721 for a in dmargs:
722 v = vmConfig['platform'].get(a)
724 # python doesn't allow '-' in variable names
725 if a == 'stdvga': a = 'std-vga'
726 if a == 'keymap': a = 'k'
728 # Handle booleans gracefully
729 if a in ['localtime', 'std-vga', 'isa', 'usb', 'acpi']:
730 try:
731 if v != None: v = int(v)
732 if v: ret.append("-%s" % a)
733 except (ValueError, TypeError):
734 pass # if we can't convert it to a sane type, ignore it
735 else:
736 if v:
737 ret.append("-%s" % a)
738 ret.append("%s" % v)
740 if a in ['fda', 'fdb']:
741 if v:
742 if not os.path.isabs(v):
743 raise VmError("Floppy file %s does not exist." % v)
744 log.debug("args: %s, val: %s" % (a,v))
746 # Handle disk/network related options
747 mac = None
748 nics = 0
750 for devuuid in vmConfig['vbd_refs']:
751 devinfo = vmConfig['devices'][devuuid][1]
752 uname = devinfo.get('uname')
753 if uname is not None and 'file:' in uname:
754 (_, vbdparam) = string.split(uname, ':', 1)
755 if not os.path.isfile(vbdparam):
756 raise VmError('Disk image does not exist: %s' %
757 vbdparam)
759 for devuuid in vmConfig['vif_refs']:
760 devinfo = vmConfig['devices'][devuuid][1]
761 dtype = devinfo.get('type', 'ioemu')
762 if dtype != 'ioemu':
763 continue
764 nics += 1
765 mac = devinfo.get('mac')
766 if mac is None:
767 raise VmError("MAC address not specified or generated.")
768 bridge = devinfo.get('bridge', 'xenbr0')
769 model = devinfo.get('model', 'rtl8139')
770 ret.append("-net")
771 ret.append("nic,vlan=%d,macaddr=%s,model=%s" %
772 (nics, mac, model))
773 ret.append("-net")
774 ret.append("tap,vlan=%d,ifname=tap%d.%d,bridge=%s" %
775 (nics, self.vm.getDomid(), nics-1, bridge))
777 if nics == 0:
778 ret.append("-net")
779 ret.append("none")
781 return ret
783 def getDeviceModelArgs(self, restore = False):
784 args = ImageHandler.getDeviceModelArgs(self, restore)
785 args = args + ([ "-M", "xenfv"])
786 if restore:
787 args = args + ([ "-loadvm", "/var/lib/xen/qemu-save.%d" %
788 self.vm.getDomid() ])
789 return args
791 def buildDomain(self):
792 store_evtchn = self.vm.getStorePort()
794 mem_mb = self.getRequiredInitialReservation() / 1024
796 log.debug("domid = %d", self.vm.getDomid())
797 log.debug("image = %s", self.loader)
798 log.debug("store_evtchn = %d", store_evtchn)
799 log.debug("memsize = %d", mem_mb)
800 log.debug("vcpus = %d", self.vm.getVCpuCount())
801 log.debug("acpi = %d", self.acpi)
802 log.debug("apic = %d", self.apic)
804 rc = xc.hvm_build(domid = self.vm.getDomid(),
805 image = self.loader,
806 memsize = mem_mb,
807 vcpus = self.vm.getVCpuCount(),
808 acpi = self.acpi,
809 apic = self.apic)
810 rc['notes'] = { 'SUSPEND_CANCEL': 1 }
812 rc['store_mfn'] = xc.hvm_get_param(self.vm.getDomid(),
813 HVM_PARAM_STORE_PFN)
814 xc.hvm_set_param(self.vm.getDomid(), HVM_PARAM_STORE_EVTCHN,
815 store_evtchn)
817 return rc
820 class IA64_HVM_ImageHandler(HVMImageHandler):
822 def configure(self, vmConfig):
823 HVMImageHandler.configure(self, vmConfig)
824 self.vhpt = int(vmConfig['platform'].get('vhpt', 0))
826 def buildDomain(self):
827 xc.nvram_init(self.vm.getName(), self.vm.getDomid())
828 xc.hvm_set_param(self.vm.getDomid(), HVM_PARAM_VHPT_SIZE, self.vhpt)
829 if self.guest_os_type is not None:
830 xc.set_os_type(self.guest_os_type.lower(), self.vm.getDomid())
831 return HVMImageHandler.buildDomain(self)
833 def getRequiredAvailableMemory(self, mem_kb):
834 page_kb = 16
835 # ROM size for guest firmware, io page, xenstore page
836 # buffer io page, buffer pio page and memmap info page
837 extra_pages = 1024 + 5
838 mem_kb += extra_pages * page_kb
839 # Add 8 MiB overhead for QEMU's video RAM.
840 return mem_kb + 8192
842 def getRequiredInitialReservation(self):
843 return self.vm.getMemoryTarget()
845 def getRequiredShadowMemory(self, shadow_mem_kb, maxmem_kb):
846 # Explicit shadow memory is not a concept
847 return 0
849 def getDeviceModelArgs(self, restore = False):
850 args = HVMImageHandler.getDeviceModelArgs(self, restore)
851 args = args + ([ "-m", "%s" %
852 (self.getRequiredInitialReservation() / 1024) ])
853 return args
855 def setCpuid(self):
856 # Guest CPUID configuration is not implemented yet.
857 return
859 class IA64_Linux_ImageHandler(LinuxImageHandler):
861 def configure(self, vmConfig):
862 LinuxImageHandler.configure(self, vmConfig)
863 self.vhpt = int(vmConfig['platform'].get('vhpt', 0))
865 def setCpuid(self):
866 # Guest CPUID configuration is not implemented yet.
867 return
869 class X86_HVM_ImageHandler(HVMImageHandler):
871 def configure(self, vmConfig):
872 HVMImageHandler.configure(self, vmConfig)
873 self.pae = int(vmConfig['platform'].get('pae', 0))
875 def buildDomain(self):
876 xc.hvm_set_param(self.vm.getDomid(), HVM_PARAM_PAE_ENABLED, self.pae)
877 rc = HVMImageHandler.buildDomain(self)
878 self.setCpuid()
879 return rc
881 def getRequiredAvailableMemory(self, mem_kb):
882 # Add 8 MiB overhead for QEMU's video RAM.
883 return mem_kb + 8192
885 def getRequiredInitialReservation(self):
886 return self.vm.getMemoryTarget()
888 def getRequiredMaximumReservation(self):
889 return self.vm.getMemoryMaximum()
891 def getRequiredShadowMemory(self, shadow_mem_kb, maxmem_kb):
892 # 256 pages (1MB) per vcpu,
893 # plus 1 page per MiB of RAM for the P2M map,
894 # plus 1 page per MiB of RAM to shadow the resident processes.
895 # This is higher than the minimum that Xen would allocate if no value
896 # were given (but the Xen minimum is for safety, not performance).
897 return max(4 * (256 * self.vm.getVCpuCount() + 2 * (maxmem_kb / 1024)),
898 shadow_mem_kb)
901 class X86_Linux_ImageHandler(LinuxImageHandler):
903 def buildDomain(self):
904 # set physical mapping limit
905 # add an 8MB slack to balance backend allocations.
906 mem_kb = self.getRequiredMaximumReservation() + (8 * 1024)
907 xc.domain_set_memmap_limit(self.vm.getDomid(), mem_kb)
908 rc = LinuxImageHandler.buildDomain(self)
909 self.setCpuid()
910 return rc
912 _handlers = {
913 "ia64": {
914 "linux": IA64_Linux_ImageHandler,
915 "hvm": IA64_HVM_ImageHandler,
916 },
917 "x86": {
918 "linux": X86_Linux_ImageHandler,
919 "hvm": X86_HVM_ImageHandler,
920 },
921 }
923 def findImageHandlerClass(image):
924 """Find the image handler class for an image config.
926 @param image config
927 @return ImageHandler subclass or None
928 """
929 image_type = image.image_type()
930 try:
931 return _handlers[arch.type][image_type]
932 except KeyError:
933 raise VmError('unknown image type: ' + image_type)