direct-io.hg

changeset 11355:74db626d2fcf

Merge with xen-ia64-unstable.hg
author kaf24@firebug.cl.cam.ac.uk
date Wed Aug 30 21:38:34 2006 +0100 (2006-08-30)
parents 586c5fe8cf3e 8a0ad47713f1
children 50aea0ec406b
files xen/arch/ia64/xen/dom0_ops.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/x86_64/kernel/setup-xen.c	Tue Aug 29 09:08:29 2006 -0600
     1.2 +++ b/linux-2.6-xen-sparse/arch/x86_64/kernel/setup-xen.c	Wed Aug 30 21:38:34 2006 +0100
     1.3 @@ -846,7 +846,7 @@ void __init setup_arch(char **cmdline_p)
     1.4  
     1.5  		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
     1.6  			/* Make sure we have a large enough P->M table. */
     1.7 -			phys_to_machine_mapping = alloc_bootmem(
     1.8 +			phys_to_machine_mapping = alloc_bootmem_pages(
     1.9  				end_pfn * sizeof(unsigned long));
    1.10  			memset(phys_to_machine_mapping, ~0,
    1.11  			       end_pfn * sizeof(unsigned long));
    1.12 @@ -863,7 +863,7 @@ void __init setup_arch(char **cmdline_p)
    1.13  			 * list of frames that make up the p2m table. Used by
    1.14                           * save/restore.
    1.15  			 */
    1.16 -			pfn_to_mfn_frame_list_list = alloc_bootmem(PAGE_SIZE);
    1.17 +			pfn_to_mfn_frame_list_list = alloc_bootmem_pages(PAGE_SIZE);
    1.18  			HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
    1.19  				virt_to_mfn(pfn_to_mfn_frame_list_list);
    1.20  
    1.21 @@ -873,7 +873,7 @@ void __init setup_arch(char **cmdline_p)
    1.22  					k++;
    1.23  					BUG_ON(k>=fpp);
    1.24  					pfn_to_mfn_frame_list[k] =
    1.25 -						alloc_bootmem(PAGE_SIZE);
    1.26 +						alloc_bootmem_pages(PAGE_SIZE);
    1.27  					pfn_to_mfn_frame_list_list[k] =
    1.28  						virt_to_mfn(pfn_to_mfn_frame_list[k]);
    1.29  					j=0;
     2.1 --- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c	Tue Aug 29 09:08:29 2006 -0600
     2.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c	Wed Aug 30 21:38:34 2006 +0100
     2.3 @@ -114,8 +114,8 @@ typedef struct domid_translate {
     2.4  } domid_translate_t ;
     2.5  
     2.6  
     2.7 -domid_translate_t  translate_domid[MAX_TAP_DEV];
     2.8 -tap_blkif_t *tapfds[MAX_TAP_DEV];
     2.9 +static domid_translate_t  translate_domid[MAX_TAP_DEV];
    2.10 +static tap_blkif_t *tapfds[MAX_TAP_DEV];
    2.11  
    2.12  static int __init set_blkif_reqs(char *str)
    2.13  {
    2.14 @@ -1118,7 +1118,7 @@ static int do_block_io_op(blkif_t *blkif
    2.15  			       "ring does not exist!\n");
    2.16  			print_dbug = 0; /*We only print this message once*/
    2.17  		}
    2.18 -		return 1;
    2.19 +		return 0;
    2.20  	}
    2.21  
    2.22  	info = tapfds[blkif->dev_num];
    2.23 @@ -1127,7 +1127,7 @@ static int do_block_io_op(blkif_t *blkif
    2.24  			WPRINTK("Can't get UE info!\n");
    2.25  			print_dbug = 0;
    2.26  		}
    2.27 -		return 1;
    2.28 +		return 0;
    2.29  	}
    2.30  
    2.31  	while (rc != rp) {
     3.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Tue Aug 29 09:08:29 2006 -0600
     3.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Wed Aug 30 21:38:34 2006 +0100
     3.3 @@ -193,6 +193,7 @@ static void netfront_closing(struct xenb
     3.4  
     3.5  static void end_access(int, void *);
     3.6  static void netif_disconnect_backend(struct netfront_info *);
     3.7 +static int open_netdev(struct netfront_info *);
     3.8  static void close_netdev(struct netfront_info *);
     3.9  static void netif_free(struct netfront_info *);
    3.10  
    3.11 @@ -263,15 +264,22 @@ static int __devinit netfront_probe(stru
    3.12  	dev->dev.driver_data = info;
    3.13  
    3.14  	err = talk_to_backend(dev, info);
    3.15 -	if (err) {
    3.16 -		xennet_sysfs_delif(info->netdev);
    3.17 -		unregister_netdev(netdev);
    3.18 -		free_netdev(netdev);
    3.19 -		dev->dev.driver_data = NULL;
    3.20 -		return err;
    3.21 -	}
    3.22 +	if (err)
    3.23 +		goto fail_backend;
    3.24 +
    3.25 +	err = open_netdev(info);
    3.26 +	if (err)
    3.27 +		goto fail_open;
    3.28  
    3.29  	return 0;
    3.30 +
    3.31 + fail_open:
    3.32 +	xennet_sysfs_delif(info->netdev);
    3.33 +	unregister_netdev(netdev);
    3.34 + fail_backend:
    3.35 +	free_netdev(netdev);
    3.36 +	dev->dev.driver_data = NULL;
    3.37 +	return err;
    3.38  }
    3.39  
    3.40  
    3.41 @@ -1887,27 +1895,9 @@ create_netdev(int handle, int copying_re
    3.42  	SET_MODULE_OWNER(netdev);
    3.43  	SET_NETDEV_DEV(netdev, &dev->dev);
    3.44  
    3.45 -	err = register_netdev(netdev);
    3.46 -	if (err) {
    3.47 -		printk(KERN_WARNING "%s> register_netdev err=%d\n",
    3.48 -		       __FUNCTION__, err);
    3.49 -		goto exit_free_rx;
    3.50 -	}
    3.51 -
    3.52 -	err = xennet_sysfs_addif(netdev);
    3.53 -	if (err) {
    3.54 -		/* This can be non-fatal: it only means no tuning parameters */
    3.55 -		printk(KERN_WARNING "%s> add sysfs failed err=%d\n",
    3.56 -		       __FUNCTION__, err);
    3.57 -	}
    3.58 -
    3.59  	np->netdev = netdev;
    3.60 -
    3.61  	return netdev;
    3.62  
    3.63 -
    3.64 - exit_free_rx:
    3.65 -	gnttab_free_grant_references(np->gref_rx_head);
    3.66   exit_free_tx:
    3.67  	gnttab_free_grant_references(np->gref_tx_head);
    3.68   exit:
    3.69 @@ -1967,6 +1957,26 @@ static int __devexit netfront_remove(str
    3.70  }
    3.71  
    3.72  
    3.73 +static int open_netdev(struct netfront_info *info)
    3.74 +{
    3.75 +	int err;
    3.76 +	
    3.77 +	err = register_netdev(info->netdev);
    3.78 +	if (err) {
    3.79 +		printk(KERN_WARNING "%s: register_netdev err=%d\n",
    3.80 +		       __FUNCTION__, err);
    3.81 +		return err;
    3.82 +	}
    3.83 +
    3.84 +	err = xennet_sysfs_addif(info->netdev);
    3.85 +	if (err) {
    3.86 +		/* This can be non-fatal: it only means no tuning parameters */
    3.87 +		printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
    3.88 +		       __FUNCTION__, err);
    3.89 +	}
    3.90 +	return 0;
    3.91 +}
    3.92 +
    3.93  static void close_netdev(struct netfront_info *info)
    3.94  {
    3.95  	del_timer_sync(&info->rx_refill_timer);
     4.1 --- a/tools/blktap/drivers/block-aio.c	Tue Aug 29 09:08:29 2006 -0600
     4.2 +++ b/tools/blktap/drivers/block-aio.c	Wed Aug 30 21:38:34 2006 +0100
     4.3 @@ -52,7 +52,7 @@
     4.4   */
     4.5  #define REQUEST_ASYNC_FD 1
     4.6  
     4.7 -#define MAX_AIO_REQS (MAX_REQUESTS * MAX_SEGMENTS_PER_REQ * 8)
     4.8 +#define MAX_AIO_REQS (MAX_REQUESTS * MAX_SEGMENTS_PER_REQ)
     4.9  
    4.10  struct pending_aio {
    4.11  	td_callback_t cb;
    4.12 @@ -146,7 +146,7 @@ int tdaio_open (struct td_state *s, cons
    4.13  	struct tdaio_state *prv = (struct tdaio_state *)s->private;
    4.14  	s->private = prv;
    4.15  
    4.16 -	DPRINTF("XXX: block-aio open('%s')", name);
    4.17 +	DPRINTF("block-aio open('%s')", name);
    4.18  	/* Initialize AIO */
    4.19  	prv->iocb_free_count = MAX_AIO_REQS;
    4.20  	prv->iocb_queued     = 0;
    4.21 @@ -156,9 +156,18 @@ int tdaio_open (struct td_state *s, cons
    4.22  
    4.23  	if (prv->poll_fd < 0) {
    4.24  		ret = prv->poll_fd;
    4.25 -		DPRINTF("Couldn't get fd for AIO poll support.  This is "
    4.26 -			"probably because your kernel does not have the "
    4.27 -			"aio-poll patch applied.\n");
    4.28 +                if (ret == -EAGAIN) {
    4.29 +                        DPRINTF("Couldn't setup AIO context.  If you are "
    4.30 +                                "trying to concurrently use a large number "
    4.31 +                                "of blktap-based disks, you may need to "
    4.32 +                                "increase the system-wide aio request limit. "
    4.33 +                                "(e.g. 'echo echo 1048576 > /proc/sys/fs/"
    4.34 +                                "aio-max-nr')\n");
    4.35 +                } else {
    4.36 +                        DPRINTF("Couldn't get fd for AIO poll support.  This "
    4.37 +                                "is probably because your kernel does not "
    4.38 +                                "have the aio-poll patch applied.\n");
    4.39 +                }
    4.40  		goto done;
    4.41  	}
    4.42  
     5.1 --- a/tools/blktap/drivers/block-qcow.c	Tue Aug 29 09:08:29 2006 -0600
     5.2 +++ b/tools/blktap/drivers/block-qcow.c	Wed Aug 30 21:38:34 2006 +0100
     5.3 @@ -51,7 +51,7 @@
     5.4  /******AIO DEFINES******/
     5.5  #define REQUEST_ASYNC_FD 1
     5.6  #define MAX_QCOW_IDS  0xFFFF
     5.7 -#define MAX_AIO_REQS (MAX_REQUESTS * MAX_SEGMENTS_PER_REQ * 8)
     5.8 +#define MAX_AIO_REQS (MAX_REQUESTS * MAX_SEGMENTS_PER_REQ)
     5.9  
    5.10  struct pending_aio {
    5.11          td_callback_t cb;
    5.12 @@ -176,10 +176,21 @@ static int init_aio_state(struct td_stat
    5.13          s->aio_ctx = (io_context_t) REQUEST_ASYNC_FD;   
    5.14          s->poll_fd = io_setup(MAX_AIO_REQS, &s->aio_ctx);
    5.15  
    5.16 -        if (s->poll_fd < 0) {
    5.17 -                DPRINTF("Retrieving Async poll fd failed\n");
    5.18 +	if (s->poll_fd < 0) {
    5.19 +                if (s->poll_fd == -EAGAIN) {
    5.20 +                        DPRINTF("Couldn't setup AIO context.  If you are "
    5.21 +                                "trying to concurrently use a large number "
    5.22 +                                "of blktap-based disks, you may need to "
    5.23 +                                "increase the system-wide aio request limit. "
    5.24 +                                "(e.g. 'echo echo 1048576 > /proc/sys/fs/"
    5.25 +                                "aio-max-nr')\n");
    5.26 +                } else {
    5.27 +                        DPRINTF("Couldn't get fd for AIO poll support.  This "
    5.28 +                                "is probably because your kernel does not "
    5.29 +                                "have the aio-poll patch applied.\n");
    5.30 +                }
    5.31  		goto fail;
    5.32 -        }
    5.33 +	}
    5.34  
    5.35          for (i=0;i<MAX_AIO_REQS;i++)
    5.36                  s->iocb_free[i] = &s->iocb_list[i];
     6.1 --- a/tools/blktap/drivers/tapdisk.c	Tue Aug 29 09:08:29 2006 -0600
     6.2 +++ b/tools/blktap/drivers/tapdisk.c	Wed Aug 30 21:38:34 2006 +0100
     6.3 @@ -110,6 +110,7 @@ static void unmap_disk(struct td_state *
     6.4  	free(s->fd_entry);
     6.5  	free(s->blkif);
     6.6  	free(s->ring_info);
     6.7 +        free(s->private);
     6.8  	free(s);
     6.9  
    6.10  	return;
     7.1 --- a/tools/blktap/lib/xs_api.c	Tue Aug 29 09:08:29 2006 -0600
     7.2 +++ b/tools/blktap/lib/xs_api.c	Wed Aug 30 21:38:34 2006 +0100
     7.3 @@ -204,7 +204,7 @@ char *get_dom_domid(struct xs_handle *h,
     7.4  int convert_dev_name_to_num(char *name) {
     7.5  	char *p_sd, *p_hd, *p_xvd, *p_plx, *p, *alpha,*ptr;
     7.6  	int majors[10] = {3,22,33,34,56,57,88,89,90,91};
     7.7 -	int maj,i;
     7.8 +	int maj,i,ret = 0;
     7.9  
    7.10  	asprintf(&p_sd,"/dev/sd");
    7.11  	asprintf(&p_hd,"/dev/hd");
    7.12 @@ -221,7 +221,7 @@ int convert_dev_name_to_num(char *name) 
    7.13  			*ptr++;
    7.14  		}
    7.15  		*p++;
    7.16 -		return BASE_DEV_VAL + (16*i) + atoi(p);
    7.17 +		ret = BASE_DEV_VAL + (16*i) + atoi(p);
    7.18  	} else if (strstr(name, p_hd) != NULL) {
    7.19  		p = name + strlen(p_hd);
    7.20  		for (i = 0, ptr = alpha; i < strlen(alpha); i++) {
    7.21 @@ -229,7 +229,7 @@ int convert_dev_name_to_num(char *name) 
    7.22  			*ptr++;
    7.23  		}
    7.24  		*p++;
    7.25 -		return (majors[i/2]*256) + atoi(p);
    7.26 +		ret = (majors[i/2]*256) + atoi(p);
    7.27  
    7.28  	} else if (strstr(name, p_xvd) != NULL) {
    7.29  		p = name + strlen(p_xvd);
    7.30 @@ -238,17 +238,24 @@ int convert_dev_name_to_num(char *name) 
    7.31  			*ptr++;
    7.32  		}
    7.33  		*p++;
    7.34 -		return (202*256) + (16*i) + atoi(p);
    7.35 +		ret = (202*256) + (16*i) + atoi(p);
    7.36  
    7.37  	} else if (strstr(name, p_plx) != NULL) {
    7.38  		p = name + strlen(p_plx);
    7.39 -		return atoi(p);
    7.40 +		ret = atoi(p);
    7.41  
    7.42  	} else {
    7.43  		DPRINTF("Unknown device type, setting to default.\n");
    7.44 -		return BASE_DEV_VAL;
    7.45 +		ret = BASE_DEV_VAL;
    7.46  	}
    7.47 -	return 0;
    7.48 +
    7.49 +        free(p_sd);
    7.50 +        free(p_hd);
    7.51 +        free(p_xvd);
    7.52 +        free(p_plx);
    7.53 +        free(alpha);
    7.54 +        
    7.55 +	return ret;
    7.56  }
    7.57  
    7.58  /**
     8.1 --- a/tools/debugger/gdb/gdbbuild	Tue Aug 29 09:08:29 2006 -0600
     8.2 +++ b/tools/debugger/gdb/gdbbuild	Wed Aug 30 21:38:34 2006 +0100
     8.3 @@ -18,7 +18,7 @@ cd gdb-6.2.1-linux-i386-xen
     8.4  if [ "$MAKE" ]; then
     8.5      $MAKE
     8.6  elif which gmake ; then
     8.7 -    gmake -j4
     8.8 +    gmake -j4 CFLAGS=-D__XEN_TOOLS__
     8.9  else
    8.10 -    make -j4
    8.11 +    make -j4 CFLAGS=-D__XEN_TOOLS__
    8.12  fi
     9.1 --- a/tools/libxc/ia64/xc_ia64_stubs.c	Tue Aug 29 09:08:29 2006 -0600
     9.2 +++ b/tools/libxc/ia64/xc_ia64_stubs.c	Wed Aug 30 21:38:34 2006 +0100
     9.3 @@ -36,7 +36,6 @@ xc_ia64_get_pfn_list(int xc_handle, uint
     9.4      struct xen_domctl domctl;
     9.5      int num_pfns,ret;
     9.6      unsigned int __start_page, __nr_pages;
     9.7 -    unsigned long max_pfns;
     9.8      xen_pfn_t *__pfn_buf;
     9.9  
    9.10      __start_page = start_page;
    9.11 @@ -44,27 +43,22 @@ xc_ia64_get_pfn_list(int xc_handle, uint
    9.12      __pfn_buf = pfn_buf;
    9.13    
    9.14      while (__nr_pages) {
    9.15 -        max_pfns = ((unsigned long)__start_page << 32) | __nr_pages;
    9.16          domctl.cmd = XEN_DOMCTL_getmemlist;
    9.17 -        domctl.domain   = (domid_t)domid;
    9.18 -        domctl.u.getmemlist.max_pfns = max_pfns;
    9.19 +        domctl.domain = (domid_t)domid;
    9.20 +        domctl.u.getmemlist.max_pfns = __nr_pages;
    9.21 +        domctl.u.getmemlist.start_pfn =__start_page;
    9.22          domctl.u.getmemlist.num_pfns = 0;
    9.23          set_xen_guest_handle(domctl.u.getmemlist.buffer, __pfn_buf);
    9.24  
    9.25 -        if ((max_pfns != -1UL)
    9.26 -            && mlock(__pfn_buf, __nr_pages * sizeof(xen_pfn_t)) != 0) {
    9.27 +        if (mlock(__pfn_buf, __nr_pages * sizeof(xen_pfn_t)) != 0) {
    9.28              PERROR("Could not lock pfn list buffer");
    9.29              return -1;
    9.30          }
    9.31  
    9.32          ret = do_domctl(xc_handle, &domctl);
    9.33  
    9.34 -        if (max_pfns != -1UL)
    9.35 -            (void)munlock(__pfn_buf, __nr_pages * sizeof(xen_pfn_t));
    9.36 +        (void)munlock(__pfn_buf, __nr_pages * sizeof(xen_pfn_t));
    9.37  
    9.38 -        if (max_pfns == -1UL)
    9.39 -            return 0;
    9.40 -        
    9.41          num_pfns = domctl.u.getmemlist.num_pfns;
    9.42          __start_page += num_pfns;
    9.43          __nr_pages -= num_pfns;
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/tools/python/xen/xend/FlatDeviceTree.py	Wed Aug 30 21:38:34 2006 +0100
    10.3 @@ -0,0 +1,323 @@
    10.4 +#!/usr/bin/env python
    10.5 +#
    10.6 +# This library is free software; you can redistribute it and/or
    10.7 +# modify it under the terms of version 2.1 of the GNU Lesser General Public
    10.8 +# License as published by the Free Software Foundation.
    10.9 +#
   10.10 +# This library is distributed in the hope that it will be useful,
   10.11 +# but WITHOUT ANY WARRANTY; without even the implied warranty of
   10.12 +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   10.13 +# Lesser General Public License for more details.
   10.14 +#
   10.15 +# You should have received a copy of the GNU Lesser General Public
   10.16 +# License along with this library; if not, write to the Free Software
   10.17 +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
   10.18 +#
   10.19 +# Copyright (C) IBM Corp. 2006
   10.20 +#
   10.21 +# Authors: Hollis Blanchard <hollisb@us.ibm.com>
   10.22 +
   10.23 +import os
   10.24 +import sys
   10.25 +import struct
   10.26 +import stat
   10.27 +import re
   10.28 +
   10.29 +_OF_DT_HEADER = int("d00dfeed", 16) # avoid signed/unsigned FutureWarning
   10.30 +_OF_DT_BEGIN_NODE = 0x1
   10.31 +_OF_DT_END_NODE = 0x2
   10.32 +_OF_DT_PROP = 0x3
   10.33 +_OF_DT_END = 0x9
   10.34 +
   10.35 +def _bincat(seq, separator=''):
   10.36 +    '''Concatenate the contents of seq into a bytestream.'''
   10.37 +    strs = []
   10.38 +    for item in seq:
   10.39 +        if type(item) == type(0):
   10.40 +            strs.append(struct.pack(">I", item))
   10.41 +        else:
   10.42 +            try:
   10.43 +                strs.append(item.to_bin())
   10.44 +            except AttributeError, e:
   10.45 +                strs.append(item)
   10.46 +    return separator.join(strs)
   10.47 +
   10.48 +def _alignup(val, alignment):
   10.49 +    return (val + alignment - 1) & ~(alignment - 1)
   10.50 +
   10.51 +def _pad(buf, alignment):
   10.52 +    '''Pad bytestream with NULLs to specified alignment.'''
   10.53 +    padlen = _alignup(len(buf), alignment)
   10.54 +    return buf + '\0' * (padlen - len(buf))
   10.55 +    # not present in Python 2.3:
   10.56 +    #return buf.ljust(_padlen, '\0')
   10.57 +
   10.58 +def _indent(item):
   10.59 +    indented = []
   10.60 +    for line in str(item).splitlines(True):
   10.61 +        indented.append('    ' + line)
   10.62 +    return ''.join(indented)
   10.63 +
   10.64 +class _Property:
   10.65 +    _nonprint = re.compile('[\000-\037\200-\377]')
   10.66 +    def __init__(self, node, name, value):
   10.67 +        self.node = node
   10.68 +        self.value = value
   10.69 +        self.name = name
   10.70 +        self.node.tree.stradd(name)
   10.71 +
   10.72 +    def __str__(self):
   10.73 +        result = self.name
   10.74 +        if self.value:
   10.75 +            searchtext = self.value
   10.76 +            # it's ok for a string to end in NULL
   10.77 +            if searchtext.find('\000') == len(searchtext)-1:
   10.78 +                searchtext = searchtext[:-1]
   10.79 +            m = self._nonprint.search(searchtext)
   10.80 +            if m:
   10.81 +                bytes = struct.unpack("B" * len(self.value), self.value)
   10.82 +                hexbytes = [ '%02x' % b for b in bytes ]
   10.83 +                words = []
   10.84 +                for i in range(0, len(self.value), 4):
   10.85 +                    words.append(''.join(hexbytes[i:i+4]))
   10.86 +                v = '<' + ' '.join(words) + '>'
   10.87 +            else:
   10.88 +                v = '"%s"' % self.value
   10.89 +            result += ': ' + v
   10.90 +        return result
   10.91 +
   10.92 +    def to_bin(self):
   10.93 +        offset = self.node.tree.stroffset(self.name)
   10.94 +        return struct.pack('>III', _OF_DT_PROP, len(self.value), offset) \
   10.95 +            + _pad(self.value, 4)
   10.96 +
   10.97 +class _Node:
   10.98 +    def __init__(self, tree, name):
   10.99 +        self.tree = tree
  10.100 +        self.name = name
  10.101 +        self.props = {}
  10.102 +        self.children = {}
  10.103 +        self.phandle = 0
  10.104 +
  10.105 +    def __str__(self):
  10.106 +        propstrs = [ _indent(prop) for prop in self.props.values() ]
  10.107 +        childstrs = [ _indent(child) for child in self.children.values() ]
  10.108 +        return '%s:\n%s\n%s' % (self.name, '\n'.join(propstrs),
  10.109 +            '\n'.join(childstrs))
  10.110 +
  10.111 +    def to_bin(self):
  10.112 +        name = _pad(self.name + '\0', 4)
  10.113 +        return struct.pack('>I', _OF_DT_BEGIN_NODE) + \
  10.114 +                name + \
  10.115 +                _bincat(self.props.values()) + \
  10.116 +                _bincat(self.children.values()) + \
  10.117 +                struct.pack('>I', _OF_DT_END_NODE)
  10.118 +
  10.119 +    def addprop(self, propname, *cells):
  10.120 +        '''setprop with duplicate error-checking.'''
  10.121 +        if propname in self.props:
  10.122 +            raise AttributeError('%s/%s already exists' % (self.name, propname))
  10.123 +        self.setprop(propname, *cells)
  10.124 +
  10.125 +    def setprop(self, propname, *cells):
  10.126 +        self.props[propname] = _Property(self, propname, _bincat(cells))
  10.127 +
  10.128 +    def addnode(self, nodename):
  10.129 +        '''newnode with duplicate error-checking.'''
  10.130 +        if nodename in self.children:
  10.131 +            raise AttributeError('%s/%s already exists' % (self.name, nodename))
  10.132 +        return self.newnode(nodename)
  10.133 +
  10.134 +    def newnode(self, nodename):
  10.135 +        node = _Node(self.tree, nodename)
  10.136 +        self.children[nodename] = node
  10.137 +        return node
  10.138 +
  10.139 +    def getprop(self, propname):
  10.140 +        return self.props[propname]
  10.141 +
  10.142 +    def getchild(self, nodename):
  10.143 +        return self.children[nodename]
  10.144 +
  10.145 +    def get_phandle(self):
  10.146 +        if self.phandle:
  10.147 +            return self.phandle
  10.148 +        self.phandle = self.tree.alloc_phandle()
  10.149 +        self.addprop('linux,phandle', self.phandle)
  10.150 +        return self.phandle
  10.151 +
  10.152 +class _Header:
  10.153 +    def __init__(self):
  10.154 +        self.magic = 0
  10.155 +        self.totalsize = 0
  10.156 +        self.off_dt_struct = 0
  10.157 +        self.off_dt_strings = 0
  10.158 +        self.off_mem_rsvmap = 0
  10.159 +        self.version = 0
  10.160 +        self.last_comp_version = 0
  10.161 +        self.boot_cpuid_phys = 0
  10.162 +        self.size_dt_strings = 0
  10.163 +    def to_bin(self):
  10.164 +        return struct.pack('>9I',
  10.165 +            self.magic,
  10.166 +            self.totalsize,
  10.167 +            self.off_dt_struct,
  10.168 +            self.off_dt_strings,
  10.169 +            self.off_mem_rsvmap,
  10.170 +            self.version,
  10.171 +            self.last_comp_version,
  10.172 +            self.boot_cpuid_phys,
  10.173 +            self.size_dt_strings)
  10.174 +
  10.175 +class _StringBlock:
  10.176 +    def __init__(self):
  10.177 +        self.table = []
  10.178 +    def to_bin(self):
  10.179 +        return _bincat(self.table, '\0') + '\0'
  10.180 +    def add(self, str):
  10.181 +        self.table.append(str)
  10.182 +    def getoffset(self, str):
  10.183 +        return self.to_bin().index(str + '\0')
  10.184 +
  10.185 +class Tree(_Node):
  10.186 +    def __init__(self):
  10.187 +        self.last_phandle = 0
  10.188 +        self.strings = _StringBlock()
  10.189 +        self.reserved = [(0, 0)]
  10.190 +        _Node.__init__(self, self, '\0')
  10.191 +
  10.192 +    def alloc_phandle(self):
  10.193 +        self.last_phandle += 1
  10.194 +        return self.last_phandle
  10.195 +
  10.196 +    def stradd(self, str):
  10.197 +        return self.strings.add(str)
  10.198 +
  10.199 +    def stroffset(self, str):
  10.200 +        return self.strings.getoffset(str)
  10.201 +
  10.202 +    def reserve(self, start, len):
  10.203 +        self.reserved.insert(0, (start, len))
  10.204 +
  10.205 +    def to_bin(self):
  10.206 +        # layout:
  10.207 +        #   header
  10.208 +        #   reservation map
  10.209 +        #   string block
  10.210 +        #   data block
  10.211 +
  10.212 +        datablock = _Node.to_bin(self)
  10.213 +
  10.214 +        r = [ struct.pack('>QQ', rsrv[0], rsrv[1]) for rsrv in self.reserved ]
  10.215 +        reserved = _bincat(r)
  10.216 +
  10.217 +        strblock = _pad(self.strings.to_bin(), 4)
  10.218 +        strblocklen = len(strblock)
  10.219 +
  10.220 +        header = _Header()
  10.221 +        header.magic = _OF_DT_HEADER
  10.222 +        header.off_mem_rsvmap = _alignup(len(header.to_bin()), 8)
  10.223 +        header.off_dt_strings = header.off_mem_rsvmap + len(reserved)
  10.224 +        header.off_dt_struct = header.off_dt_strings + strblocklen
  10.225 +        header.version = 0x10
  10.226 +        header.last_comp_version = 0x10
  10.227 +        header.boot_cpuid_phys = 0
  10.228 +        header.size_dt_strings = strblocklen
  10.229 +
  10.230 +        payload = reserved + \
  10.231 +                strblock + \
  10.232 +                datablock + \
  10.233 +                struct.pack('>I', _OF_DT_END)
  10.234 +        header.totalsize = len(payload) + _alignup(len(header.to_bin()), 8)
  10.235 +        return _pad(header.to_bin(), 8) + payload
  10.236 +
  10.237 +_host_devtree_root = '/proc/device-tree'
  10.238 +def _getprop(propname):
  10.239 +    '''Extract a property from the system's device tree.'''
  10.240 +    f = file(os.path.join(_host_devtree_root, propname), 'r')
  10.241 +    data = f.read()
  10.242 +    f.close()
  10.243 +    return data
  10.244 +
  10.245 +def _copynode(node, dirpath, propfilter):
  10.246 +    '''Extract all properties from a node in the system's device tree.'''
  10.247 +    dirents = os.listdir(dirpath)
  10.248 +    for dirent in dirents:
  10.249 +        fullpath = os.path.join(dirpath, dirent)
  10.250 +        st = os.lstat(fullpath)
  10.251 +        if stat.S_ISDIR(st.st_mode):
  10.252 +            child = node.addnode(dirent)
  10.253 +            _copytree(child, fullpath, propfilter)
  10.254 +        elif stat.S_ISREG(st.st_mode) and propfilter(fullpath):
  10.255 +            node.addprop(dirent, _getprop(fullpath))
  10.256 +
  10.257 +def _copytree(node, dirpath, propfilter):
  10.258 +    path = os.path.join(_host_devtree_root, dirpath)
  10.259 +    _copynode(node, path, propfilter)
  10.260 +
  10.261 +def build(imghandler):
  10.262 +    '''Construct a device tree by combining the domain's configuration and
  10.263 +    the host's device tree.'''
  10.264 +    root = Tree()
  10.265 +
  10.266 +    # 4 pages: start_info, console, store, shared_info
  10.267 +    root.reserve(0x3ffc000, 0x4000)
  10.268 +
  10.269 +    root.addprop('device_type', 'chrp-but-not-really\0')
  10.270 +    root.addprop('#size-cells', 2)
  10.271 +    root.addprop('#address-cells', 2)
  10.272 +    root.addprop('model', 'Momentum,Maple-D\0')
  10.273 +    root.addprop('compatible', 'Momentum,Maple\0')
  10.274 +
  10.275 +    xen = root.addnode('xen')
  10.276 +    xen.addprop('start-info', 0, 0x3ffc000, 0, 0x1000)
  10.277 +    xen.addprop('version', 'Xen-3.0-unstable\0')
  10.278 +    xen.addprop('reg', 0, imghandler.vm.domid, 0, 0)
  10.279 +    xen.addprop('domain-name', imghandler.vm.getName() + '\0')
  10.280 +    xencons = xen.addnode('console')
  10.281 +    xencons.addprop('interrupts', 1, 0)
  10.282 +
  10.283 +    # XXX split out RMA node
  10.284 +    mem = root.addnode('memory@0')
  10.285 +    totalmem = imghandler.vm.getMemoryTarget() * 1024
  10.286 +    mem.addprop('reg', 0, 0, 0, totalmem)
  10.287 +    mem.addprop('device_type', 'memory\0')
  10.288 +
  10.289 +    cpus = root.addnode('cpus')
  10.290 +    cpus.addprop('smp-enabled')
  10.291 +    cpus.addprop('#size-cells', 0)
  10.292 +    cpus.addprop('#address-cells', 1)
  10.293 +
  10.294 +    # Copy all properties the system firmware gave us, except for 'linux,'
  10.295 +    # properties, from 'cpus/@0', once for every vcpu. Hopefully all cpus are
  10.296 +    # identical...
  10.297 +    cpu0 = None
  10.298 +    def _nolinuxprops(fullpath):
  10.299 +        return not os.path.basename(fullpath).startswith('linux,')
  10.300 +    for i in range(imghandler.vm.getVCpuCount()):
  10.301 +        cpu = cpus.addnode('PowerPC,970@0')
  10.302 +        _copytree(cpu, 'cpus/PowerPC,970@0', _nolinuxprops)
  10.303 +        # and then overwrite what we need to
  10.304 +        pft_size = imghandler.vm.info.get('pft-size', 0x14)
  10.305 +        cpu.setprop('ibm,pft-size', 0, pft_size)
  10.306 +
  10.307 +        # set default CPU
  10.308 +        if cpu0 == None:
  10.309 +            cpu0 = cpu
  10.310 +
  10.311 +    chosen = root.addnode('chosen')
  10.312 +    chosen.addprop('cpu', cpu0.get_phandle())
  10.313 +    chosen.addprop('memory', mem.get_phandle())
  10.314 +    chosen.addprop('linux,stdout-path', '/xen/console\0')
  10.315 +    chosen.addprop('interrupt-controller', xen.get_phandle())
  10.316 +    chosen.addprop('bootargs', imghandler.cmdline + '\0')
  10.317 +    # xc_linux_load.c will overwrite these 64-bit properties later
  10.318 +    chosen.addprop('linux,initrd-start', 0, 0)
  10.319 +    chosen.addprop('linux,initrd-end', 0, 0)
  10.320 +
  10.321 +    if 1:
  10.322 +        f = file('/tmp/domU.dtb', 'w')
  10.323 +        f.write(root.to_bin())
  10.324 +        f.close()
  10.325 +
  10.326 +    return root
    11.1 --- a/tools/python/xen/xend/XendCheckpoint.py	Tue Aug 29 09:08:29 2006 -0600
    11.2 +++ b/tools/python/xen/xend/XendCheckpoint.py	Wed Aug 30 21:38:34 2006 +0100
    11.3 @@ -161,10 +161,12 @@ def restore(xd, fd):
    11.4          if handler.store_mfn is None or handler.console_mfn is None:
    11.5              raise XendError('Could not read store/console MFN')
    11.6  
    11.7 +        #Block until src closes connection
    11.8 +        os.read(fd, 1)
    11.9          dominfo.unpause()
   11.10 -
   11.11 +        
   11.12          dominfo.completeRestore(handler.store_mfn, handler.console_mfn)
   11.13 -
   11.14 +        
   11.15          return dominfo
   11.16      except:
   11.17          dominfo.destroy()
    12.1 --- a/tools/python/xen/xend/XendDomain.py	Tue Aug 29 09:08:29 2006 -0600
    12.2 +++ b/tools/python/xen/xend/XendDomain.py	Wed Aug 30 21:38:34 2006 +0100
    12.3 @@ -431,7 +431,8 @@ class XendDomain:
    12.4          sock.send("receive\n")
    12.5          sock.recv(80)
    12.6          XendCheckpoint.save(sock.fileno(), dominfo, True, live, dst)
    12.7 -
    12.8 +        dominfo.testDeviceComplete()
    12.9 +        sock.close()
   12.10  
   12.11      def domain_save(self, domid, dst):
   12.12          """Start saving a domain to file.
    13.1 --- a/tools/python/xen/xend/XendDomainInfo.py	Tue Aug 29 09:08:29 2006 -0600
    13.2 +++ b/tools/python/xen/xend/XendDomainInfo.py	Wed Aug 30 21:38:34 2006 +0100
    13.3 @@ -30,7 +30,6 @@ import string
    13.4  import time
    13.5  import threading
    13.6  import os
    13.7 -import math
    13.8  
    13.9  import xen.lowlevel.xc
   13.10  from xen.util import asserts
   13.11 @@ -703,6 +702,9 @@ class XendDomainInfo:
   13.12                  if security[idx][0] == 'ssidref':
   13.13                      to_store['security/ssidref'] = str(security[idx][1])
   13.14  
   13.15 +        if not self.readVm('xend/restart_count'):
   13.16 +            to_store['xend/restart_count'] = str(0)
   13.17 +
   13.18          log.debug("Storing VM details: %s", to_store)
   13.19  
   13.20          self.writeVm(to_store)
   13.21 @@ -824,6 +826,9 @@ class XendDomainInfo:
   13.22      def setResume(self, state):
   13.23          self.info['resume'] = state
   13.24  
   13.25 +    def getRestartCount(self):
   13.26 +        return self.readVm('xend/restart_count')
   13.27 +
   13.28      def refreshShutdown(self, xeninfo = None):
   13.29          # If set at the end of this method, a restart is required, with the
   13.30          # given reason.  This restart has to be done out of the scope of
   13.31 @@ -1280,34 +1285,28 @@ class XendDomainInfo:
   13.32                  for v in range(0, self.info['max_vcpu_id']+1):
   13.33                      xc.vcpu_setaffinity(self.domid, v, self.info['cpus'])
   13.34  
   13.35 -            # set domain maxmem in KiB
   13.36 -            xc.domain_setmaxmem(self.domid, self.info['maxmem'] * 1024)
   13.37 +            # set memory limit
   13.38 +            maxmem = self.image.getRequiredMemory(self.info['maxmem'] * 1024)
   13.39 +            xc.domain_setmaxmem(self.domid, maxmem)
   13.40  
   13.41 -            m = self.image.getDomainMemory(self.info['memory'] * 1024)
   13.42 +            mem_kb = self.image.getRequiredMemory(self.info['memory'] * 1024)
   13.43  
   13.44              # get the domain's shadow memory requirement
   13.45 -            sm = int(math.ceil(self.image.getDomainShadowMemory(m) / 1024.0))
   13.46 -            if self.info['shadow_memory'] > sm:
   13.47 -                sm = self.info['shadow_memory']
   13.48 +            shadow_kb = self.image.getRequiredShadowMemory(mem_kb)
   13.49 +            shadow_kb_req = self.info['shadow_memory'] * 1024
   13.50 +            if shadow_kb_req > shadow_kb:
   13.51 +                shadow_kb = shadow_kb_req
   13.52 +            shadow_mb = (shadow_kb + 1023) / 1024
   13.53  
   13.54              # Make sure there's enough RAM available for the domain
   13.55 -            balloon.free(m + sm * 1024)
   13.56 +            balloon.free(mem_kb + shadow_mb * 1024)
   13.57  
   13.58              # Set up the shadow memory
   13.59 -            sm = xc.shadow_mem_control(self.domid, mb=sm)
   13.60 -            self.info['shadow_memory'] = sm
   13.61 +            shadow_cur = xc.shadow_mem_control(self.domid, shadow_mb)
   13.62 +            self.info['shadow_memory'] = shadow_cur
   13.63  
   13.64 -            init_reservation = self.info['memory'] * 1024
   13.65 -            if os.uname()[4] in ('ia64', 'ppc64'):
   13.66 -                # Workaround for architectures that don't yet support
   13.67 -                # ballooning.
   13.68 -                init_reservation = m
   13.69 -                # Following line from xiantao.zhang@intel.com
   13.70 -                # Needed for IA64 until supports ballooning -- okay for PPC64?
   13.71 -                xc.domain_setmaxmem(self.domid, m)
   13.72 -
   13.73 -            xc.domain_memory_increase_reservation(self.domid, init_reservation,
   13.74 -                                                  0, 0)
   13.75 +            # initial memory allocation
   13.76 +            xc.domain_memory_increase_reservation(self.domid, mem_kb, 0, 0)
   13.77  
   13.78              self.createChannels()
   13.79  
   13.80 @@ -1495,6 +1494,21 @@ class XendDomainInfo:
   13.81              if rc != 0:
   13.82                  raise XendError("Device of type '%s' refuses migration." % n)
   13.83  
   13.84 +    def testDeviceComplete(self):
   13.85 +        """ For Block IO migration safety we must ensure that
   13.86 +        the device has shutdown correctly, i.e. all blocks are
   13.87 +        flushed to disk
   13.88 +        """
   13.89 +        while True:
   13.90 +            test = 0
   13.91 +            for i in self.getDeviceController('vbd').deviceIDs():
   13.92 +                test = 1
   13.93 +                log.info("Dev %s still active, looping...", i)
   13.94 +                time.sleep(0.1)
   13.95 +                
   13.96 +            if test == 0:
   13.97 +                break
   13.98 +
   13.99      def migrateDevices(self, network, dst, step, domName=''):
  13.100          """Notify the devices about migration
  13.101          """
  13.102 @@ -1615,6 +1629,9 @@ class XendDomainInfo:
  13.103              try:
  13.104                  new_dom = XendDomain.instance().domain_create(config)
  13.105                  new_dom.unpause()
  13.106 +                rst_cnt = self.readVm('xend/restart_count')
  13.107 +                rst_cnt = int(rst_cnt) + 1
  13.108 +                self.writeVm('xend/restart_count', str(rst_cnt))
  13.109                  new_dom.removeVm(RESTART_IN_PROGRESS)
  13.110              except:
  13.111                  if new_dom:
    14.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.2 +++ b/tools/python/xen/xend/arch.py	Wed Aug 30 21:38:34 2006 +0100
    14.3 @@ -0,0 +1,32 @@
    14.4 +#!/usr/bin/env python
    14.5 +#
    14.6 +# This library is free software; you can redistribute it and/or
    14.7 +# modify it under the terms of version 2.1 of the GNU Lesser General Public
    14.8 +# License as published by the Free Software Foundation.
    14.9 +#
   14.10 +# This library is distributed in the hope that it will be useful,
   14.11 +# but WITHOUT ANY WARRANTY; without even the implied warranty of
   14.12 +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   14.13 +# Lesser General Public License for more details.
   14.14 +#
   14.15 +# You should have received a copy of the GNU Lesser General Public
   14.16 +# License along with this library; if not, write to the Free Software
   14.17 +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
   14.18 +#
   14.19 +# Copyright (C) IBM Corp. 2006
   14.20 +#
   14.21 +# Authors: Hollis Blanchard <hollisb@us.ibm.com>
   14.22 +
   14.23 +import os
   14.24 +
   14.25 +_types = {
   14.26 +    "i386": "x86",
   14.27 +    "i486": "x86",
   14.28 +    "i586": "x86",
   14.29 +    "i686": "x86",
   14.30 +    "x86_64": "x86",
   14.31 +    "ia64": "ia64",
   14.32 +    "ppc": "powerpc",
   14.33 +    "ppc64": "powerpc",
   14.34 +}
   14.35 +type = _types.get(os.uname()[4], "unknown")
    15.1 --- a/tools/python/xen/xend/image.py	Tue Aug 29 09:08:29 2006 -0600
    15.2 +++ b/tools/python/xen/xend/image.py	Wed Aug 30 21:38:34 2006 +0100
    15.3 @@ -27,6 +27,8 @@ from xen.xend.XendError import VmError
    15.4  from xen.xend.XendLogging import log
    15.5  from xen.xend.server.netif import randomMAC
    15.6  from xen.xend.xenstore.xswatch import xswatch
    15.7 +from xen.xend import arch
    15.8 +from xen.xend import FlatDeviceTree
    15.9  
   15.10  
   15.11  xc = xen.lowlevel.xc.xc()
   15.12 @@ -141,19 +143,10 @@ class ImageHandler:
   15.13              raise VmError('Building domain failed: ostype=%s dom=%d err=%s'
   15.14                            % (self.ostype, self.vm.getDomid(), str(result)))
   15.15  
   15.16 -
   15.17 -    def getDomainMemory(self, mem_kb):
   15.18 -        """@return The memory required, in KiB, by the domain to store the
   15.19 -        given amount, also in KiB."""
   15.20 -        if os.uname()[4] != 'ia64':
   15.21 -            # A little extra because auto-ballooning is broken w.r.t. HVM
   15.22 -            # guests. Also, slack is necessary for live migration since that
   15.23 -            # uses shadow page tables.
   15.24 -            if 'hvm' in xc.xeninfo()['xen_caps']:
   15.25 -                mem_kb += 4*1024;
   15.26 +    def getRequiredMemory(self, mem_kb):
   15.27          return mem_kb
   15.28  
   15.29 -    def getDomainShadowMemory(self, mem_kb):
   15.30 +    def getRequiredShadowMemory(self, mem_kb):
   15.31          """@return The minimum shadow memory required, in KiB, for a domain 
   15.32          with mem_kb KiB of RAM."""
   15.33          # PV domains don't need any shadow memory
   15.34 @@ -197,9 +190,39 @@ class LinuxImageHandler(ImageHandler):
   15.35                                ramdisk        = self.ramdisk,
   15.36                                features       = self.vm.getFeatures())
   15.37  
   15.38 -class HVMImageHandler(ImageHandler):
   15.39 +class PPC_LinuxImageHandler(LinuxImageHandler):
   15.40 +
   15.41 +    ostype = "linux"
   15.42 +
   15.43 +    def configure(self, imageConfig, deviceConfig):
   15.44 +        LinuxImageHandler.configure(self, imageConfig, deviceConfig)
   15.45 +        self.imageConfig = imageConfig
   15.46 +
   15.47 +    def buildDomain(self):
   15.48 +        store_evtchn = self.vm.getStorePort()
   15.49 +        console_evtchn = self.vm.getConsolePort()
   15.50  
   15.51 -    ostype = "hvm"
   15.52 +        log.debug("dom            = %d", self.vm.getDomid())
   15.53 +        log.debug("image          = %s", self.kernel)
   15.54 +        log.debug("store_evtchn   = %d", store_evtchn)
   15.55 +        log.debug("console_evtchn = %d", console_evtchn)
   15.56 +        log.debug("cmdline        = %s", self.cmdline)
   15.57 +        log.debug("ramdisk        = %s", self.ramdisk)
   15.58 +        log.debug("vcpus          = %d", self.vm.getVCpuCount())
   15.59 +        log.debug("features       = %s", self.vm.getFeatures())
   15.60 +
   15.61 +        devtree = FlatDeviceTree.build(self)
   15.62 +
   15.63 +        return xc.linux_build(dom            = self.vm.getDomid(),
   15.64 +                              image          = self.kernel,
   15.65 +                              store_evtchn   = store_evtchn,
   15.66 +                              console_evtchn = console_evtchn,
   15.67 +                              cmdline        = self.cmdline,
   15.68 +                              ramdisk        = self.ramdisk,
   15.69 +                              features       = self.vm.getFeatures(),
   15.70 +                              arch_args      = devtree.to_bin())
   15.71 +
   15.72 +class HVMImageHandler(ImageHandler):
   15.73  
   15.74      def configure(self, imageConfig, deviceConfig):
   15.75          ImageHandler.configure(self, imageConfig, deviceConfig)
   15.76 @@ -282,7 +305,7 @@ class HVMImageHandler(ImageHandler):
   15.77          for (name, info) in deviceConfig:
   15.78              if name == 'vbd':
   15.79                  uname = sxp.child_value(info, 'uname')
   15.80 -                if 'file:' in uname:
   15.81 +                if uname is not None and 'file:' in uname:
   15.82                      (_, vbdparam) = string.split(uname, ':', 1)
   15.83                      if not os.path.isfile(vbdparam):
   15.84                          raise VmError('Disk image does not exist: %s' %
   15.85 @@ -355,32 +378,6 @@ class HVMImageHandler(ImageHandler):
   15.86          os.waitpid(self.pid, 0)
   15.87          self.pid = 0
   15.88  
   15.89 -    def getDomainMemory(self, mem_kb):
   15.90 -        """@see ImageHandler.getDomainMemory"""
   15.91 -        if os.uname()[4] == 'ia64':
   15.92 -            page_kb = 16
   15.93 -            # ROM size for guest firmware, ioreq page and xenstore page
   15.94 -            extra_pages = 1024 + 2
   15.95 -        else:
   15.96 -            page_kb = 4
   15.97 -            # This was derived emperically:
   15.98 -            #   2.4 MB overhead per 1024 MB RAM + 8 MB constant
   15.99 -            #   + 4 to avoid low-memory condition
  15.100 -            extra_mb = (2.4/1024) * (mem_kb/1024.0) + 12;
  15.101 -            extra_pages = int( math.ceil( extra_mb*1024 / page_kb ))
  15.102 -        return mem_kb + extra_pages * page_kb
  15.103 -
  15.104 -    def getDomainShadowMemory(self, mem_kb):
  15.105 -        """@return The minimum shadow memory required, in KiB, for a domain 
  15.106 -        with mem_kb KiB of RAM."""
  15.107 -        if os.uname()[4] in ('ia64', 'ppc64'):
  15.108 -            # Explicit shadow memory is not a concept 
  15.109 -            return 0
  15.110 -        else:
  15.111 -            # 1MB per vcpu plus 4Kib/Mib of RAM.  This is higher than 
  15.112 -            # the minimum that Xen would allocate if no value were given.
  15.113 -            return 1024 * self.vm.getVCpuCount() + mem_kb / 256
  15.114 -
  15.115      def register_shutdown_watch(self):
  15.116          """ add xen store watch on control/shutdown """
  15.117          self.shutdownWatch = xswatch(self.vm.dompath + "/control/shutdown", \
  15.118 @@ -417,15 +414,51 @@ class HVMImageHandler(ImageHandler):
  15.119  
  15.120          return 1 # Keep watching
  15.121  
  15.122 -"""Table of image handler classes for virtual machine images.  Indexed by
  15.123 -image type.
  15.124 -"""
  15.125 -imageHandlerClasses = {}
  15.126 +class IA64_HVM_ImageHandler(HVMImageHandler):
  15.127 +
  15.128 +    ostype = "hvm"
  15.129 +
  15.130 +    def getRequiredMemory(self, mem_kb):
  15.131 +        page_kb = 16
  15.132 +        # ROM size for guest firmware, ioreq page and xenstore page
  15.133 +        extra_pages = 1024 + 2
  15.134 +        return mem_kb + extra_pages * page_kb
  15.135 +
  15.136 +    def getRequiredShadowMemory(self, mem_kb):
  15.137 +        # Explicit shadow memory is not a concept 
  15.138 +        return 0
  15.139 +
  15.140 +class X86_HVM_ImageHandler(HVMImageHandler):
  15.141 +
  15.142 +    ostype = "hvm"
  15.143  
  15.144 +    def getRequiredMemory(self, mem_kb):
  15.145 +        page_kb = 4
  15.146 +        # This was derived emperically:
  15.147 +        #   2.4 MB overhead per 1024 MB RAM + 8 MB constant
  15.148 +        #   + 4 to avoid low-memory condition
  15.149 +        extra_mb = (2.4/1024) * (mem_kb/1024.0) + 12;
  15.150 +        extra_pages = int( math.ceil( extra_mb*1024 / page_kb ))
  15.151 +        return mem_kb + extra_pages * page_kb
  15.152  
  15.153 -for h in LinuxImageHandler, HVMImageHandler:
  15.154 -    imageHandlerClasses[h.ostype] = h
  15.155 +    def getRequiredShadowMemory(self, mem_kb):
  15.156 +        # 1MB per vcpu plus 4Kib/Mib of RAM.  This is higher than 
  15.157 +        # the minimum that Xen would allocate if no value were given.
  15.158 +        return 1024 * self.vm.getVCpuCount() + mem_kb / 256
  15.159  
  15.160 +_handlers = {
  15.161 +    "powerpc": {
  15.162 +        "linux": PPC_LinuxImageHandler,
  15.163 +    },
  15.164 +    "ia64": {
  15.165 +        "linux": LinuxImageHandler,
  15.166 +        "hvm": IA64_HVM_ImageHandler,
  15.167 +    },
  15.168 +    "x86": {
  15.169 +        "linux": LinuxImageHandler,
  15.170 +        "hvm": X86_HVM_ImageHandler,
  15.171 +    },
  15.172 +}
  15.173  
  15.174  def findImageHandlerClass(image):
  15.175      """Find the image handler class for an image config.
  15.176 @@ -433,10 +466,10 @@ def findImageHandlerClass(image):
  15.177      @param image config
  15.178      @return ImageHandler subclass or None
  15.179      """
  15.180 -    ty = sxp.name(image)
  15.181 -    if ty is None:
  15.182 +    type = sxp.name(image)
  15.183 +    if type is None:
  15.184          raise VmError('missing image type')
  15.185 -    imageClass = imageHandlerClasses.get(ty)
  15.186 -    if imageClass is None:
  15.187 -        raise VmError('unknown image type: ' + ty)
  15.188 -    return imageClass
  15.189 +    try:
  15.190 +        return _handlers[arch.type][type]
  15.191 +    except KeyError:
  15.192 +        raise VmError('unknown image type: ' + type)
    16.1 --- a/tools/python/xen/xend/server/XMLRPCServer.py	Tue Aug 29 09:08:29 2006 -0600
    16.2 +++ b/tools/python/xen/xend/server/XMLRPCServer.py	Wed Aug 30 21:38:34 2006 +0100
    16.3 @@ -78,7 +78,8 @@ def get_log():
    16.4  methods = ['device_create', 'device_configure', 'destroyDevice',
    16.5             'getDeviceSxprs',
    16.6             'setMemoryTarget', 'setName', 'setVCpuCount', 'shutdown',
    16.7 -           'send_sysrq', 'getVCPUInfo', 'waitForDevices']
    16.8 +           'send_sysrq', 'getVCPUInfo', 'waitForDevices',
    16.9 +           'getRestartCount']
   16.10  
   16.11  exclude = ['domain_create', 'domain_restore']
   16.12  
    17.1 --- a/tools/python/xen/xend/server/blkif.py	Tue Aug 29 09:08:29 2006 -0600
    17.2 +++ b/tools/python/xen/xend/server/blkif.py	Wed Aug 30 21:38:34 2006 +0100
    17.3 @@ -52,10 +52,18 @@ class BlkifController(DevController):
    17.4          except ValueError:
    17.5              dev_type = "disk"
    17.6  
    17.7 -        try:
    17.8 -            (typ, params) = string.split(uname, ':', 1)
    17.9 -        except ValueError:
   17.10 -            (typ, params) = ("", "")
   17.11 +        if uname is None:
   17.12 +            if dev_type == 'cdrom':
   17.13 +                (typ, params) = ("", "")
   17.14 +            else:
   17.15 +                raise VmError(
   17.16 +                    'Block device must have physical details specified')
   17.17 +        else:
   17.18 +            try:
   17.19 +                (typ, params) = string.split(uname, ':', 1)
   17.20 +            except ValueError:
   17.21 +                (typ, params) = ("", "")
   17.22 +
   17.23          back = { 'dev'    : dev,
   17.24                   'type'   : typ,
   17.25                   'params' : params,
    18.1 --- a/tools/python/xen/xm/migrate.py	Tue Aug 29 09:08:29 2006 -0600
    18.2 +++ b/tools/python/xen/xm/migrate.py	Wed Aug 30 21:38:34 2006 +0100
    18.3 @@ -57,7 +57,8 @@ def main(argv):
    18.4          opts.usage()
    18.5          return
    18.6      if len(args) != 2:
    18.7 -        opts.err('Invalid arguments: ' + str(args))
    18.8 +        opts.usage()
    18.9 +        sys.exit(1)
   18.10      dom = args[0]
   18.11      dst = args[1]
   18.12      server.xend.domain.migrate(dom, dst, opts.vals.live, opts.vals.resource, opts.vals.port)
    19.1 --- a/tools/python/xen/xm/shutdown.py	Tue Aug 29 09:08:29 2006 -0600
    19.2 +++ b/tools/python/xen/xm/shutdown.py	Wed Aug 30 21:38:34 2006 +0100
    19.3 @@ -48,21 +48,48 @@ gopts.opt('reboot', short='R',
    19.4            fn=set_true, default=0,
    19.5            use='Shutdown and reboot.')
    19.6  
    19.7 +def wait_reboot(opts, doms, rcs):
    19.8 +    while doms:
    19.9 +        alive = server.xend.domains(0)
   19.10 +        reboot = []
   19.11 +        for d in doms:
   19.12 +            if d in alive:
   19.13 +                rc = server.xend.domain.getRestartCount(d)
   19.14 +                if rc == rcs[d]: continue
   19.15 +                reboot.append(d)
   19.16 +            else:
   19.17 +                opts.info("Domain %s destroyed for failed in rebooting" % d)
   19.18 +                doms.remove(d)
   19.19 +        for d in reboot:
   19.20 +            opts.info("Domain %s rebooted" % d)
   19.21 +            doms.remove(d)
   19.22 +        time.sleep(1)
   19.23 +    opts.info("All domains rebooted")
   19.24 +
   19.25 +def wait_shutdown(opts, doms):
   19.26 +    while doms:
   19.27 +        alive = server.xend.domains(0)
   19.28 +        dead = []
   19.29 +        for d in doms:
   19.30 +            if d in alive: continue
   19.31 +            dead.append(d)
   19.32 +        for d in dead:
   19.33 +            opts.info("Domain %s terminated" % d)
   19.34 +            doms.remove(d)
   19.35 +        time.sleep(1)
   19.36 +    opts.info("All domains terminated")
   19.37 +
   19.38  def shutdown(opts, doms, mode, wait):
   19.39 +    rcs = {}
   19.40      for d in doms:
   19.41 +        rcs[d] = server.xend.domain.getRestartCount(d)
   19.42          server.xend.domain.shutdown(d, mode)
   19.43 +
   19.44      if wait:
   19.45 -        while doms:
   19.46 -            alive = server.xend.domains(0)
   19.47 -            dead = []
   19.48 -            for d in doms:
   19.49 -                if d in alive: continue
   19.50 -                dead.append(d)
   19.51 -            for d in dead:
   19.52 -                opts.info("Domain %s terminated" % d)
   19.53 -                doms.remove(d)
   19.54 -            time.sleep(1)
   19.55 -        opts.info("All domains terminated")
   19.56 +        if mode == 'reboot':
   19.57 +            wait_reboot(opts, doms, rcs)
   19.58 +        else:
   19.59 +            wait_shutdown(opts, doms)
   19.60  
   19.61  def shutdown_mode(opts):
   19.62      if opts.vals.halt and opts.vals.reboot:
    20.1 --- a/xen/arch/ia64/xen/dom0_ops.c	Tue Aug 29 09:08:29 2006 -0600
    20.2 +++ b/xen/arch/ia64/xen/dom0_ops.c	Wed Aug 30 21:38:34 2006 +0100
    20.3 @@ -40,8 +40,8 @@ long arch_do_domctl(xen_domctl_t *op, XE
    20.4      {
    20.5          unsigned long i;
    20.6          struct domain *d = find_domain_by_id(op->domain);
    20.7 -        unsigned long start_page = op->u.getmemlist.max_pfns >> 32;
    20.8 -        unsigned long nr_pages = op->u.getmemlist.max_pfns & 0xffffffff;
    20.9 +        unsigned long start_page = op->u.getmemlist.start_pfn;
   20.10 +        unsigned long nr_pages = op->u.getmemlist.max_pfns;
   20.11          unsigned long mfn;
   20.12  
   20.13          if ( d == NULL ) {
    21.1 --- a/xen/arch/x86/physdev.c	Tue Aug 29 09:08:29 2006 -0600
    21.2 +++ b/xen/arch/x86/physdev.c	Wed Aug 30 21:38:34 2006 +0100
    21.3 @@ -96,10 +96,11 @@ long do_physdev_op(int cmd, XEN_GUEST_HA
    21.4          if ( !IS_PRIV(current->domain) )
    21.5              break;
    21.6  
    21.7 +        irq = irq_op.irq;
    21.8          ret = -EINVAL;
    21.9 -        if ( (irq = irq_op.irq) >= NR_IRQS )
   21.10 +        if ( (irq < 0) || (irq >= NR_IRQS) )
   21.11              break;
   21.12 -        
   21.13 +
   21.14          irq_op.vector = assign_irq_vector(irq);
   21.15          ret = copy_to_guest(arg, &irq_op, 1) ? -EFAULT : 0;
   21.16          break;
    22.1 --- a/xen/include/public/domctl.h	Tue Aug 29 09:08:29 2006 -0600
    22.2 +++ b/xen/include/public/domctl.h	Wed Aug 30 21:38:34 2006 +0100
    22.3 @@ -16,7 +16,7 @@
    22.4  
    22.5  #include "xen.h"
    22.6  
    22.7 -#define XEN_DOMCTL_INTERFACE_VERSION 0x00000001
    22.8 +#define XEN_DOMCTL_INTERFACE_VERSION 0x00000002
    22.9  
   22.10  #define uint64_t uint64_aligned_t
   22.11  
   22.12 @@ -72,8 +72,11 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdo
   22.13  #define XEN_DOMCTL_getmemlist         6
   22.14  struct xen_domctl_getmemlist {
   22.15      /* IN variables. */
   22.16 +    /* Max entries to write to output buffer. */
   22.17      uint64_t max_pfns;
   22.18 -    XEN_GUEST_HANDLE_64(ulong) buffer;
   22.19 +    /* Start index in guest's page list. */
   22.20 +    uint64_t start_pfn;
   22.21 +    XEN_GUEST_HANDLE_64(xen_pfn_t) buffer;
   22.22      /* OUT variables. */
   22.23      uint64_t num_pfns;
   22.24  };
    23.1 --- a/xen/include/public/xen.h	Tue Aug 29 09:08:29 2006 -0600
    23.2 +++ b/xen/include/public/xen.h	Wed Aug 30 21:38:34 2006 +0100
    23.3 @@ -63,6 +63,7 @@
    23.4  #define __HYPERVISOR_hvm_op               34
    23.5  #define __HYPERVISOR_sysctl               35
    23.6  #define __HYPERVISOR_domctl               36
    23.7 +#define __HYPERVISOR_kexec_op             37
    23.8  
    23.9  /* Architecture-specific hypercall definitions. */
   23.10  #define __HYPERVISOR_arch_0               48