ia64/xen-unstable

changeset 1647:7ee821f4caea

bitkeeper revision 1.1043 (40e2f3efQjPxQusu8iQxE6VPLHlHvQ)

Merge boulderdash.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into boulderdash.cl.cam.ac.uk:/local/scratch/gm281/xeno-clone/xeno.bk
author gm281@boulderdash.cl.cam.ac.uk
date Wed Jun 30 17:10:07 2004 +0000 (2004-06-30)
parents 5c960c07a441 19a9925664d8
children 083178f6cdfa
files linux-2.4.26-xen-sparse/arch/xen/drivers/balloon/balloon.c tools/libxc/xc_linux_build.c tools/libxc/xc_linux_restore.c tools/libxc/xc_netbsd_build.c tools/libxc/xc_private.c tools/libxc/xc_private.h tools/python/xen/lowlevel/xu/xu.c tools/python/xen/xend/server/SrvDaemon.py tools/python/xen/xend/server/channel.py xen/common/dom0_ops.c xen/common/domain.c xen/common/keyhandler.c xen/common/memory.c xen/include/xen/mm.h xen/include/xen/sched.h
line diff
     1.1 --- a/linux-2.4.26-xen-sparse/arch/xen/drivers/balloon/balloon.c	Wed Jun 30 13:10:43 2004 +0000
     1.2 +++ b/linux-2.4.26-xen-sparse/arch/xen/drivers/balloon/balloon.c	Wed Jun 30 17:10:07 2004 +0000
     1.3 @@ -17,6 +17,7 @@
     1.4  #include <linux/mman.h>
     1.5  #include <linux/smp_lock.h>
     1.6  #include <linux/pagemap.h>
     1.7 +#include <linux/vmalloc.h>
     1.8  
     1.9  #include <asm/hypervisor.h>
    1.10  #include <asm/pgalloc.h>
    1.11 @@ -33,7 +34,7 @@ typedef struct user_balloon_op {
    1.12  } user_balloon_op_t;
    1.13  /* END OF USER DEFINE */
    1.14  
    1.15 -/* Dead entry written into ballon-owned entries in the PMT. */
    1.16 +/* Dead entry written into balloon-owned entries in the PMT. */
    1.17  #define DEAD 0xdeadbeef
    1.18  
    1.19  static struct proc_dir_entry *balloon_pde;
    1.20 @@ -54,7 +55,7 @@ static inline pte_t *get_ptep(unsigned l
    1.21      return ptep;
    1.22  }
    1.23  
    1.24 -/* main function for relinquishing bit of memory */
    1.25 +/* Main function for relinquishing memory. */
    1.26  static unsigned long inflate_balloon(unsigned long num_pages)
    1.27  {
    1.28      unsigned long *parray;
    1.29 @@ -64,14 +65,19 @@ static unsigned long inflate_balloon(uns
    1.30      unsigned long vaddr;
    1.31      unsigned long i, j;
    1.32  
    1.33 -    parray = (unsigned long *)kmalloc(num_pages * sizeof(unsigned long),
    1.34 -                                      GFP_KERNEL);
    1.35 +    parray = (unsigned long *)vmalloc(num_pages * sizeof(unsigned long));
    1.36 +    if ( parray == NULL )
    1.37 +    {
    1.38 +        printk("inflate_balloon: Unable to vmalloc parray\n");
    1.39 +        return 0;
    1.40 +    }
    1.41 +
    1.42      currp = parray;
    1.43  
    1.44      for ( i = 0; i < num_pages; i++ )
    1.45      {
    1.46 -        /* Try to obtain a free page (has to be done with GFP_ATOMIC). */
    1.47 -        vaddr = __get_free_page(GFP_ATOMIC);
    1.48 +        /* NB. Should be GFP_ATOMIC for a less aggressive inflation. */
    1.49 +        vaddr = __get_free_page(GFP_KERNEL);
    1.50  
    1.51          /* If allocation fails then free all reserved pages. */
    1.52          if ( vaddr == 0 )
    1.53 @@ -113,12 +119,13 @@ static unsigned long inflate_balloon(uns
    1.54      ret = num_pages;
    1.55  
    1.56   cleanup:
    1.57 -    kfree(parray);
    1.58 +    vfree(parray);
    1.59  
    1.60      return ret;
    1.61  }
    1.62  
    1.63 -/* install new mem pages obtained by deflate_balloon. function walks 
    1.64 +/*
    1.65 + * Install new mem pages obtained by deflate_balloon. function walks 
    1.66   * phys->machine mapping table looking for DEAD entries and populates
    1.67   * them.
    1.68   */
    1.69 @@ -143,8 +150,7 @@ static unsigned long process_new_pages(u
    1.70          if ( phys_to_machine_mapping[i] == DEAD )
    1.71          {
    1.72              phys_to_machine_mapping[i] = *curr;
    1.73 -            queue_l1_entry_update(
    1.74 -                (pte_t *)((i << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE), i);
    1.75 +            queue_machphys_update(*curr, i);
    1.76              queue_l1_entry_update(
    1.77                  get_ptep((unsigned long)__va(i << PAGE_SHIFT)),
    1.78                  ((*curr) << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL));
    1.79 @@ -155,11 +161,12 @@ static unsigned long process_new_pages(u
    1.80          }
    1.81      }
    1.82  
    1.83 -    /* now, this is tricky (and will also change for machine addrs that 
    1.84 -      * are mapped to not previously released addresses). we free pages
    1.85 -      * that were allocated by get_free_page (the mappings are different 
    1.86 -      * now, of course).
    1.87 -      */
    1.88 +    /*
    1.89 +     * This is tricky (and will also change for machine addrs that 
    1.90 +     * are mapped to not previously released addresses). We free pages
    1.91 +     * that were allocated by get_free_page (the mappings are different 
    1.92 +     * now, of course).
    1.93 +     */
    1.94      curr = parray;
    1.95      for ( i = 0; i < num_installed; i++ )
    1.96      {
    1.97 @@ -181,8 +188,14 @@ unsigned long deflate_balloon(unsigned l
    1.98          return -EAGAIN;
    1.99      }
   1.100  
   1.101 -    parray = (unsigned long *)kmalloc(num_pages * sizeof(unsigned long), 
   1.102 -                                      GFP_KERNEL);
   1.103 +    parray = (unsigned long *)vmalloc(num_pages * sizeof(unsigned long));
   1.104 +    if ( parray == NULL )
   1.105 +    {
   1.106 +        printk("inflate_balloon: Unable to vmalloc parray\n");
   1.107 +        return 0;
   1.108 +    }
   1.109 +
   1.110 +    XEN_flush_page_update_queue();
   1.111  
   1.112      ret = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation, 
   1.113                                  parray, num_pages);
   1.114 @@ -203,7 +216,7 @@ unsigned long deflate_balloon(unsigned l
   1.115      credit -= num_pages;
   1.116  
   1.117   cleanup:
   1.118 -    kfree(parray);
   1.119 +    vfree(parray);
   1.120  
   1.121      return ret;
   1.122  }
   1.123 @@ -239,9 +252,6 @@ static int balloon_write(struct file *fi
   1.124      return sizeof(bop);
   1.125  }
   1.126  
   1.127 -/*
   1.128 - * main balloon driver initialization function.
   1.129 - */
   1.130  static int __init init_module(void)
   1.131  {
   1.132      printk(KERN_ALERT "Starting Xen Balloon driver\n");
   1.133 @@ -270,5 +280,3 @@ static void __exit cleanup_module(void)
   1.134  
   1.135  module_init(init_module);
   1.136  module_exit(cleanup_module);
   1.137 -
   1.138 -
     2.1 --- a/tools/libxc/xc_linux_build.c	Wed Jun 30 13:10:43 2004 +0000
     2.2 +++ b/tools/libxc/xc_linux_build.c	Wed Jun 30 17:10:07 2004 +0000
     2.3 @@ -18,7 +18,7 @@ static int readelfimage_base_and_size(ch
     2.4                                        unsigned long *pkernstart,
     2.5                                        unsigned long *pkernend,
     2.6                                        unsigned long *pkernentry);
     2.7 -static int loadelfimage(char *elfbase, int pmh, unsigned long *parray,
     2.8 +static int loadelfimage(char *elfbase, void *pmh, unsigned long *parray,
     2.9                          unsigned long vstart);
    2.10  
    2.11  static long get_tot_pages(int xc_handle, u32 domid)
    2.12 @@ -53,7 +53,7 @@ static int get_pfn_list(int xc_handle,
    2.13      return (ret < 0) ? -1 : op.u.getmemlist.num_pfns;
    2.14  }
    2.15  
    2.16 -static int copy_to_domain_page(int pm_handle,
    2.17 +static int copy_to_domain_page(void *pm_handle,
    2.18                                 unsigned long dst_pfn, 
    2.19                                 void *src_page)
    2.20  {
    2.21 @@ -86,7 +86,8 @@ static int setup_guestos(int xc_handle,
    2.22      extended_start_info_t *start_info;
    2.23      shared_info_t *shared_info;
    2.24      mmu_t *mmu = NULL;
    2.25 -    int pm_handle=-1, rc;
    2.26 +    void  *pm_handle=NULL;
    2.27 +    int rc;
    2.28  
    2.29      unsigned long nr_pt_pages;
    2.30      unsigned long ppt_alloc;
    2.31 @@ -165,7 +166,7 @@ static int setup_guestos(int xc_handle,
    2.32             v_start, v_end);
    2.33      printf(" ENTRY ADDRESS: %08lx\n", vkern_entry);
    2.34  
    2.35 -    if ( (pm_handle = init_pfn_mapper((domid_t)dom)) < 0 )
    2.36 +    if ( (pm_handle = init_pfn_mapper((domid_t)dom)) == NULL )
    2.37          goto error_out;
    2.38  
    2.39      if ( (page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL )
    2.40 @@ -307,7 +308,7 @@ static int setup_guestos(int xc_handle,
    2.41   error_out:
    2.42      if ( mmu != NULL )
    2.43          free(mmu);
    2.44 -    if ( pm_handle >= 0 )
    2.45 +    if ( pm_handle != NULL )
    2.46          (void)close_pfn_mapper(pm_handle);
    2.47      if ( page_array != NULL )
    2.48          free(page_array);
    2.49 @@ -629,7 +630,7 @@ static int readelfimage_base_and_size(ch
    2.50      return 0;
    2.51  }
    2.52  
    2.53 -static int loadelfimage(char *elfbase, int pmh, unsigned long *parray,
    2.54 +static int loadelfimage(char *elfbase, void *pmh, unsigned long *parray,
    2.55                          unsigned long vstart)
    2.56  {
    2.57      Elf_Ehdr *ehdr = (Elf_Ehdr *)elfbase;
    2.58 @@ -649,11 +650,11 @@ static int loadelfimage(char *elfbase, i
    2.59          {
    2.60              pa = (phdr->p_vaddr + done) - vstart;
    2.61              va = map_pfn_writeable(pmh, parray[pa>>PAGE_SHIFT]);
    2.62 -            va += pa & (PAGE_SIZE-1);
    2.63              chunksz = phdr->p_filesz - done;
    2.64              if ( chunksz > (PAGE_SIZE - (pa & (PAGE_SIZE-1))) )
    2.65                  chunksz = PAGE_SIZE - (pa & (PAGE_SIZE-1));
    2.66 -            memcpy(va, elfbase + phdr->p_offset + done, chunksz);
    2.67 +            memcpy(va + (pa & (PAGE_SIZE-1)),
    2.68 +                   elfbase + phdr->p_offset + done, chunksz);
    2.69              unmap_pfn(pmh, va);
    2.70          }
    2.71  
    2.72 @@ -661,11 +662,10 @@ static int loadelfimage(char *elfbase, i
    2.73          {
    2.74              pa = (phdr->p_vaddr + done) - vstart;
    2.75              va = map_pfn_writeable(pmh, parray[pa>>PAGE_SHIFT]);
    2.76 -            va += pa & (PAGE_SIZE-1);
    2.77              chunksz = phdr->p_memsz - done;
    2.78              if ( chunksz > (PAGE_SIZE - (pa & (PAGE_SIZE-1))) )
    2.79                  chunksz = PAGE_SIZE - (pa & (PAGE_SIZE-1));
    2.80 -            memset(va, 0, chunksz);
    2.81 +            memset(va + (pa & (PAGE_SIZE-1)), 0, chunksz);
    2.82              unmap_pfn(pmh, va);            
    2.83          }
    2.84      }
     3.1 --- a/tools/libxc/xc_linux_restore.c	Wed Jun 30 13:10:43 2004 +0000
     3.2 +++ b/tools/libxc/xc_linux_restore.c	Wed Jun 30 17:10:07 2004 +0000
     3.3 @@ -19,7 +19,6 @@
     3.4  #define DPRINTF(_f, _a...) ((void)0)
     3.5  #endif
     3.6  
     3.7 -
     3.8  static int get_pfn_list(int xc_handle,
     3.9                          u32 domain_id, 
    3.10                          unsigned long *pfn_buf, 
    3.11 @@ -53,23 +52,28 @@ static int get_pfn_list(int xc_handle,
    3.12   * @param ioctxt i/o context
    3.13   * @return 0 on success, non-zero on error.
    3.14   */
    3.15 -static int read_vmconfig(XcIOContext *ioctxt){
    3.16 +static int read_vmconfig(XcIOContext *ioctxt)
    3.17 +{
    3.18      int err = -1;
    3.19 -    if(xcio_read(ioctxt, &ioctxt->vmconfig_n, sizeof(ioctxt->vmconfig_n))){
    3.20 +
    3.21 +    if ( xcio_read(ioctxt, &ioctxt->vmconfig_n, sizeof(ioctxt->vmconfig_n)) )
    3.22          goto exit;
    3.23 -    }
    3.24 +
    3.25      ioctxt->vmconfig = malloc(ioctxt->vmconfig_n + 1);
    3.26 -    if(!ioctxt->vmconfig) goto exit;
    3.27 -    if(xcio_read(ioctxt, ioctxt->vmconfig, ioctxt->vmconfig_n)){
    3.28 +    if ( ioctxt->vmconfig == NULL ) 
    3.29          goto exit;
    3.30 -    }
    3.31 +
    3.32 +    if ( xcio_read(ioctxt, ioctxt->vmconfig, ioctxt->vmconfig_n) )
    3.33 +        goto exit;
    3.34 +
    3.35      ioctxt->vmconfig[ioctxt->vmconfig_n] = '\0';
    3.36      err = 0;
    3.37 +
    3.38    exit:
    3.39 -    if(err){
    3.40 -        if(ioctxt->vmconfig){
    3.41 +    if ( err )
    3.42 +    {
    3.43 +        if ( ioctxt->vmconfig != NULL )
    3.44              free(ioctxt->vmconfig);
    3.45 -        }
    3.46          ioctxt->vmconfig = NULL;
    3.47          ioctxt->vmconfig_n = 0;
    3.48      }
    3.49 @@ -126,12 +130,13 @@ int xc_linux_restore(int xc_handle, XcIO
    3.50  
    3.51      mmu_t *mmu = NULL;
    3.52  
    3.53 -    int pm_handle = -1;
    3.54 +    void *pm_handle = NULL;
    3.55  
    3.56      /* used by debug verify code */
    3.57      unsigned long buf[PAGE_SIZE/sizeof(unsigned long)];
    3.58  
    3.59 -    if ( mlock(&ctxt, sizeof(ctxt) ) ) {   
    3.60 +    if ( mlock(&ctxt, sizeof(ctxt) ) )
    3.61 +    {
    3.62          /* needed for when we do the build dom0 op, 
    3.63             but might as well do early */
    3.64          PERROR("Unable to mlock ctxt");
    3.65 @@ -140,24 +145,28 @@ int xc_linux_restore(int xc_handle, XcIO
    3.66  
    3.67      /* Start writing out the saved-domain record. */
    3.68      if ( xcio_read(ioctxt, signature, 16) ||
    3.69 -         (memcmp(signature, "LinuxGuestRecord", 16) != 0) ) {
    3.70 +         (memcmp(signature, "LinuxGuestRecord", 16) != 0) )
    3.71 +    {
    3.72          xcio_error(ioctxt, "Unrecognised state format -- no signature found");
    3.73          goto out;
    3.74      }
    3.75  
    3.76      if ( xcio_read(ioctxt, name,                  sizeof(name)) ||
    3.77           xcio_read(ioctxt, &nr_pfns,              sizeof(unsigned long)) ||
    3.78 -         xcio_read(ioctxt, pfn_to_mfn_frame_list, PAGE_SIZE) ) {
    3.79 +         xcio_read(ioctxt, pfn_to_mfn_frame_list, PAGE_SIZE) )
    3.80 +    {
    3.81          xcio_error(ioctxt, "Error reading header");
    3.82          goto out;
    3.83      }
    3.84  
    3.85 -    if(read_vmconfig(ioctxt)){
    3.86 +    if ( read_vmconfig(ioctxt) )
    3.87 +    {
    3.88          xcio_error(ioctxt, "Error writing vmconfig");
    3.89          goto out;
    3.90      }
    3.91  
    3.92 -    for ( i = 0; i < MAX_DOMAIN_NAME; i++ ) {
    3.93 +    for ( i = 0; i < MAX_DOMAIN_NAME; i++ ) 
    3.94 +    {
    3.95          if ( name[i] == '\0' ) break;
    3.96          if ( name[i] & 0x80 )
    3.97          {
    3.98 @@ -167,7 +176,8 @@ int xc_linux_restore(int xc_handle, XcIO
    3.99      }
   3.100      name[MAX_DOMAIN_NAME-1] = '\0';
   3.101  
   3.102 -    if ( nr_pfns > 1024*1024 ) {
   3.103 +    if ( nr_pfns > 1024*1024 )
   3.104 +    {
   3.105          xcio_error(ioctxt, "Invalid state file -- pfn count out of range");
   3.106          goto out;
   3.107      }
   3.108 @@ -177,19 +187,23 @@ int xc_linux_restore(int xc_handle, XcIO
   3.109      pfn_type         = calloc(1, 4 * nr_pfns);    
   3.110      region_mfn       = calloc(1, 4 * MAX_BATCH_SIZE);    
   3.111  
   3.112 -    if ( (pfn_to_mfn_table == NULL) || (pfn_type == NULL) || 
   3.113 -         (region_mfn == NULL) ) {
   3.114 +    if ( (pfn_to_mfn_table == NULL) ||
   3.115 +         (pfn_type == NULL) || 
   3.116 +         (region_mfn == NULL) ) 
   3.117 +    {
   3.118          errno = ENOMEM;
   3.119          goto out;
   3.120      }
   3.121      
   3.122 -    if ( mlock(region_mfn, 4 * MAX_BATCH_SIZE ) ) {
   3.123 +    if ( mlock(region_mfn, 4 * MAX_BATCH_SIZE ) )
   3.124 +    {
   3.125          xcio_error(ioctxt, "Could not mlock region_mfn");
   3.126          goto out;
   3.127      }
   3.128  
   3.129      /* Set the domain's name to that from the restore file */
   3.130 -    if ( xc_domain_setname( xc_handle, dom, name ) ) {
   3.131 +    if ( xc_domain_setname( xc_handle, dom, name ) )
   3.132 +    {
   3.133          xcio_error(ioctxt, "Could not set domain name");
   3.134          goto out;
   3.135      }
   3.136 @@ -208,24 +222,26 @@ int xc_linux_restore(int xc_handle, XcIO
   3.137      op.cmd = DOM0_GETDOMAININFO;
   3.138      op.u.getdomaininfo.domain = (domid_t)dom;
   3.139      op.u.getdomaininfo.ctxt = NULL;
   3.140 -    if ( do_dom0_op(xc_handle, &op) < 0 ) {
   3.141 +    if ( do_dom0_op(xc_handle, &op) < 0 )
   3.142 +    {
   3.143          xcio_error(ioctxt, "Could not get information on new domain");
   3.144          goto out;
   3.145      }
   3.146      shared_info_frame = op.u.getdomaininfo.shared_info_frame;
   3.147  
   3.148 -    if ( (pm_handle = init_pfn_mapper((domid_t)dom)) < 0 )
   3.149 +    if ( (pm_handle = init_pfn_mapper((domid_t)dom)) == NULL )
   3.150          goto out;
   3.151  
   3.152 -
   3.153 -
   3.154      /* Build the pfn-to-mfn table. We choose MFN ordering returned by Xen. */
   3.155 -    if ( get_pfn_list(xc_handle, dom, pfn_to_mfn_table, nr_pfns) != nr_pfns ) {
   3.156 -        xcio_error(ioctxt, "Did not read correct number of frame numbers for new dom");
   3.157 +    if ( get_pfn_list(xc_handle, dom, pfn_to_mfn_table, nr_pfns) != nr_pfns )
   3.158 +    {
   3.159 +        xcio_error(ioctxt, "Did not read correct number of frame "
   3.160 +                   "numbers for new dom");
   3.161          goto out;
   3.162      }
   3.163  
   3.164 -    if ( (mmu = init_mmu_updates(xc_handle, dom)) == NULL ) {
   3.165 +    if ( (mmu = init_mmu_updates(xc_handle, dom)) == NULL )
   3.166 +    {
   3.167          xcio_error(ioctxt, "Could not initialise for MMU updates");
   3.168          goto out;
   3.169      }
   3.170 @@ -238,33 +254,39 @@ int xc_linux_restore(int xc_handle, XcIO
   3.171       */
   3.172      prev_pc = 0;
   3.173  
   3.174 -    n=0;
   3.175 -    while(1) {
   3.176 +    n = 0;
   3.177 +    while ( 1 )
   3.178 +    {
   3.179          int j;
   3.180          unsigned long region_pfn_type[MAX_BATCH_SIZE];
   3.181  
   3.182          this_pc = (n * 100) / nr_pfns;
   3.183 -        if ( (this_pc - prev_pc) >= 5 ) {
   3.184 +        if ( (this_pc - prev_pc) >= 5 )
   3.185 +        {
   3.186              xcio_info(ioctxt, "\b\b\b\b%3d%%", this_pc);
   3.187              prev_pc = this_pc;
   3.188          }
   3.189  
   3.190 -        if ( xcio_read(ioctxt, &j, sizeof(int)) ) {
   3.191 +        if ( xcio_read(ioctxt, &j, sizeof(int)) )
   3.192 +        {
   3.193              xcio_error(ioctxt, "Error when reading from state file");
   3.194              goto out;
   3.195          }
   3.196  
   3.197          DPRINTF("batch %d\n",j);
   3.198   
   3.199 -        if ( j == -1 ) {
   3.200 +        if ( j == -1 )
   3.201 +        {
   3.202              verify = 1;
   3.203              printf("Entering page verify mode\n");
   3.204              continue;
   3.205          }
   3.206  
   3.207 -        if ( j == 0 ) break;  /* our work here is done */
   3.208 +        if ( j == 0 )
   3.209 +            break;  /* our work here is done */
   3.210  
   3.211 -        if( j > MAX_BATCH_SIZE ) {
   3.212 +        if ( j > MAX_BATCH_SIZE )
   3.213 +        {
   3.214              xcio_error(ioctxt, "Max batch size exceeded. Giving up.");
   3.215              goto out;
   3.216          }
   3.217 @@ -274,10 +296,14 @@ int xc_linux_restore(int xc_handle, XcIO
   3.218              goto out;
   3.219          }
   3.220  
   3.221 -        for(i=0; i<j; i++) {
   3.222 -            if ( (region_pfn_type[i] & LTAB_MASK) == XTAB) {
   3.223 +        for ( i = 0; i < j; i++ )
   3.224 +        {
   3.225 +            if ( (region_pfn_type[i] & LTAB_MASK) == XTAB)
   3.226 +            {
   3.227                  region_mfn[i] = 0; /* we know map will fail, but don't care */
   3.228 -            } else {  
   3.229 +            }
   3.230 +            else
   3.231 +            {  
   3.232                  pfn = region_pfn_type[i] & ~LTAB_MASK;
   3.233                  region_mfn[i] = pfn_to_mfn_table[pfn];
   3.234              }          
   3.235 @@ -286,19 +312,22 @@ int xc_linux_restore(int xc_handle, XcIO
   3.236          if ( (region_base = mfn_mapper_map_batch( xc_handle, dom, 
   3.237                                                    PROT_WRITE,
   3.238                                                    region_mfn,
   3.239 -                                                  j )) == 0) {
   3.240 +                                                  j )) == 0 )
   3.241 +        {
   3.242              xcio_error(ioctxt, "map batch failed");
   3.243              goto out;
   3.244          }
   3.245  
   3.246 -        for(i=0;i<j;i++) {
   3.247 +        for ( i = 0; i < j; i++ )
   3.248 +        {
   3.249              unsigned long *ppage;
   3.250  
   3.251              pfn = region_pfn_type[i] & ~LTAB_MASK;
   3.252  
   3.253              if ( (region_pfn_type[i] & LTAB_MASK) == XTAB) continue;
   3.254  
   3.255 -            if (pfn>nr_pfns) {
   3.256 +            if (pfn>nr_pfns)
   3.257 +            {
   3.258                  xcio_error(ioctxt, "pfn out of range");
   3.259                  goto out;
   3.260              }
   3.261 @@ -309,32 +338,36 @@ int xc_linux_restore(int xc_handle, XcIO
   3.262  
   3.263              mfn = pfn_to_mfn_table[pfn];
   3.264  
   3.265 -            if ( verify ) {
   3.266 +            if ( verify )
   3.267                  ppage = (unsigned long*) buf;  /* debug case */
   3.268 -            } else {
   3.269 +            else
   3.270                  ppage = (unsigned long*) (region_base + i*PAGE_SIZE);
   3.271 -            }
   3.272  
   3.273 -            if ( xcio_read(ioctxt, ppage, PAGE_SIZE) ) {
   3.274 +            if ( xcio_read(ioctxt, ppage, PAGE_SIZE) )
   3.275 +            {
   3.276                  xcio_error(ioctxt, "Error when reading from state file");
   3.277                  goto out;
   3.278              }
   3.279  
   3.280 -            switch( region_pfn_type[i] ) {
   3.281 +            switch( region_pfn_type[i] )
   3.282 +            {
   3.283              case 0:
   3.284                  break;
   3.285  
   3.286              case L1TAB:
   3.287              {
   3.288 -                for ( k = 0; k < 1024; k++ ) {
   3.289 -                    if ( ppage[k] & _PAGE_PRESENT ) {
   3.290 +                for ( k = 0; k < 1024; k++ ) 
   3.291 +                {
   3.292 +                    if ( ppage[k] & _PAGE_PRESENT ) 
   3.293 +                    {
   3.294                          xpfn = ppage[k] >> PAGE_SHIFT;
   3.295 -
   3.296 -                        if ( xpfn >= nr_pfns ) {
   3.297 -                            xcio_error(ioctxt, "Frame number in type %lu page table is "
   3.298 -                                  "out of range. i=%d k=%d pfn=0x%lx "
   3.299 -                                  "nr_pfns=%lu", region_pfn_type[i]>>28, i, 
   3.300 -                                  k, xpfn, nr_pfns);
   3.301 +                        if ( xpfn >= nr_pfns )
   3.302 +                        {
   3.303 +                            xcio_error(ioctxt, "Frame number in type %lu page "
   3.304 +                                       "table is out of range. i=%d k=%d "
   3.305 +                                       "pfn=0x%lx nr_pfns=%lu", 
   3.306 +                                       region_pfn_type[i]>>28, i, 
   3.307 +                                       k, xpfn, nr_pfns);
   3.308                              goto out;
   3.309                          }
   3.310  
   3.311 @@ -350,15 +383,19 @@ int xc_linux_restore(int xc_handle, XcIO
   3.312              {
   3.313                  for ( k = 0; 
   3.314                        k < (HYPERVISOR_VIRT_START>>L2_PAGETABLE_SHIFT); 
   3.315 -                      k++ ) {
   3.316 -                    if ( ppage[k] & _PAGE_PRESENT ) {
   3.317 +                      k++ )
   3.318 +                {
   3.319 +                    if ( ppage[k] & _PAGE_PRESENT )
   3.320 +                    {
   3.321                          xpfn = ppage[k] >> PAGE_SHIFT;
   3.322  
   3.323 -                        if ( xpfn >= nr_pfns ) {
   3.324 -                            xcio_error(ioctxt, "Frame number in type %lu page table is "
   3.325 -                                  "out of range. i=%d k=%d pfn=%lu nr_pfns=%lu",
   3.326 -                                  region_pfn_type[i]>>28, i, k, xpfn, nr_pfns);
   3.327 -
   3.328 +                        if ( xpfn >= nr_pfns )
   3.329 +                        {
   3.330 +                            xcio_error(ioctxt, "Frame number in type %lu page"
   3.331 +                                       " table is out of range. i=%d k=%d "
   3.332 +                                       "pfn=%lu nr_pfns=%lu",
   3.333 +                                       region_pfn_type[i]>>28, i, k, 
   3.334 +                                       xpfn, nr_pfns);
   3.335                              goto out;
   3.336                          }
   3.337  
   3.338 @@ -371,21 +408,25 @@ int xc_linux_restore(int xc_handle, XcIO
   3.339              break;
   3.340  
   3.341              default:
   3.342 -                xcio_error(ioctxt, "Bogus page type %lx page table is out of range."
   3.343 -                      " i=%d nr_pfns=%lu", region_pfn_type[i], i, nr_pfns);
   3.344 +                xcio_error(ioctxt, "Bogus page type %lx page table is "
   3.345 +                           "out of range. i=%d nr_pfns=%lu", 
   3.346 +                           region_pfn_type[i], i, nr_pfns);
   3.347                  goto out;
   3.348  
   3.349              } /* end of page type switch statement */
   3.350  
   3.351 -            if ( verify ) {
   3.352 +            if ( verify )
   3.353 +            {
   3.354                  int res = memcmp(buf, (region_base + i*PAGE_SIZE), PAGE_SIZE );
   3.355 -                if (res) {
   3.356 +                if ( res )
   3.357 +                {
   3.358                      int v;
   3.359                      printf("************** pfn=%lx type=%lx gotcs=%08lx "
   3.360                             "actualcs=%08lx\n", pfn, pfn_type[pfn], 
   3.361                             csum_page(region_base + i*PAGE_SIZE), 
   3.362                             csum_page(buf));
   3.363 -                    for ( v = 0; v < 4; v++ ) {
   3.364 +                    for ( v = 0; v < 4; v++ )
   3.365 +                    {
   3.366                          unsigned long *p = (unsigned long *)
   3.367                              (region_base + i*PAGE_SIZE);
   3.368                          if ( buf[v] != p[v] )
   3.369 @@ -396,7 +437,8 @@ int xc_linux_restore(int xc_handle, XcIO
   3.370              }
   3.371  
   3.372              if ( add_mmu_update(xc_handle, mmu,
   3.373 -                                (mfn<<PAGE_SHIFT) | MMU_MACHPHYS_UPDATE, pfn) ) {
   3.374 +                                (mfn<<PAGE_SHIFT) | MMU_MACHPHYS_UPDATE, pfn) )
   3.375 +            {
   3.376                  printf("machpys mfn=%ld pfn=%ld\n",mfn,pfn);
   3.377                  goto out;
   3.378              }
   3.379 @@ -407,16 +449,16 @@ int xc_linux_restore(int xc_handle, XcIO
   3.380          n+=j; /* crude stats */
   3.381      }
   3.382  
   3.383 -    printf("Received all pages\n");
   3.384 -
   3.385      DPRINTF("Received all pages\n");
   3.386  
   3.387      /*
   3.388       * Pin page tables. Do this after writing to them as otherwise Xen
   3.389       * will barf when doing the type-checking.
   3.390       */
   3.391 -    for ( i = 0; i < nr_pfns; i++ ) {
   3.392 -        if ( pfn_type[i] == L1TAB ) {
   3.393 +    for ( i = 0; i < nr_pfns; i++ )
   3.394 +    {
   3.395 +        if ( pfn_type[i] == L1TAB )
   3.396 +        {
   3.397              if ( add_mmu_update(xc_handle, mmu,
   3.398                                  (pfn_to_mfn_table[i]<<PAGE_SHIFT) | 
   3.399                                  MMU_EXTENDED_COMMAND,
   3.400 @@ -425,11 +467,14 @@ int xc_linux_restore(int xc_handle, XcIO
   3.401                         (unsigned long)i, pfn_to_mfn_table[i]);
   3.402                  goto out;
   3.403              }
   3.404 -        } else if ( pfn_type[i] == L2TAB ) {
   3.405 +        }
   3.406 +        else if ( pfn_type[i] == L2TAB )
   3.407 +        {
   3.408              if ( add_mmu_update(xc_handle, mmu,
   3.409                                  (pfn_to_mfn_table[i]<<PAGE_SHIFT) | 
   3.410                                  MMU_EXTENDED_COMMAND,
   3.411 -                                MMUEXT_PIN_L2_TABLE) ) {
   3.412 +                                MMUEXT_PIN_L2_TABLE) )
   3.413 +            {
   3.414                  printf("ERR pin L2 pfn=%lx mfn=%lx\n",
   3.415                         (unsigned long)i, pfn_to_mfn_table[i]);
   3.416                  goto out;
   3.417 @@ -442,8 +487,9 @@ int xc_linux_restore(int xc_handle, XcIO
   3.418      xcio_info(ioctxt, "\b\b\b\b100%%\nMemory reloaded.\n");
   3.419  
   3.420  
   3.421 -    if ( xcio_read(ioctxt, &ctxt,                 sizeof(ctxt)) ||
   3.422 -         xcio_read(ioctxt, shared_info,           PAGE_SIZE) ) {
   3.423 +    if ( xcio_read(ioctxt, &ctxt,       sizeof(ctxt)) ||
   3.424 +         xcio_read(ioctxt, shared_info, PAGE_SIZE) )
   3.425 +    {
   3.426          xcio_error(ioctxt, "Error when reading from state file");
   3.427          goto out;
   3.428      }
   3.429 @@ -463,13 +509,16 @@ int xc_linux_restore(int xc_handle, XcIO
   3.430      unmap_pfn(pm_handle, p_srec);
   3.431  
   3.432      /* Uncanonicalise each GDT frame number. */
   3.433 -    if ( ctxt.gdt_ents > 8192 ) {
   3.434 +    if ( ctxt.gdt_ents > 8192 )
   3.435 +    {
   3.436          xcio_error(ioctxt, "GDT entry count out of range");
   3.437          goto out;
   3.438      }
   3.439 -    for ( i = 0; i < ctxt.gdt_ents; i += 512 ) {
   3.440 +    for ( i = 0; i < ctxt.gdt_ents; i += 512 )
   3.441 +    {
   3.442          pfn = ctxt.gdt_frames[i];
   3.443 -        if ( (pfn >= nr_pfns) || (pfn_type[pfn] != NOTAB) ) {
   3.444 +        if ( (pfn >= nr_pfns) || (pfn_type[pfn] != NOTAB) )
   3.445 +        {
   3.446              xcio_error(ioctxt, "GDT frame number is bad");
   3.447              goto out;
   3.448          }
   3.449 @@ -478,7 +527,8 @@ int xc_linux_restore(int xc_handle, XcIO
   3.450  
   3.451      /* Uncanonicalise the page table base pointer. */
   3.452      pfn = ctxt.pt_base >> PAGE_SHIFT;
   3.453 -    if ( (pfn >= nr_pfns) || (pfn_type[pfn] != L2TAB) ) {
   3.454 +    if ( (pfn >= nr_pfns) || (pfn_type[pfn] != L2TAB) )
   3.455 +    {
   3.456          printf("PT base is bad. pfn=%lu nr=%lu type=%08lx %08lx\n",
   3.457                 pfn, nr_pfns, pfn_type[pfn], (unsigned long)L2TAB);
   3.458          xcio_error(ioctxt, "PT base is bad.");
   3.459 @@ -499,11 +549,13 @@ int xc_linux_restore(int xc_handle, XcIO
   3.460  
   3.461  
   3.462      /* Uncanonicalise the pfn-to-mfn table frame-number list. */
   3.463 -    for ( i = 0; i < (nr_pfns+1023)/1024; i++ ) {
   3.464 +    for ( i = 0; i < (nr_pfns+1023)/1024; i++ )
   3.465 +    {
   3.466          unsigned long pfn, mfn;
   3.467  
   3.468          pfn = pfn_to_mfn_frame_list[i];
   3.469 -        if ( (pfn >= nr_pfns) || (pfn_type[pfn] != NOTAB) ) {
   3.470 +        if ( (pfn >= nr_pfns) || (pfn_type[pfn] != NOTAB) )
   3.471 +        {
   3.472              xcio_error(ioctxt, "PFN-to-MFN frame number is bad");
   3.473              goto out;
   3.474          }
   3.475 @@ -515,15 +567,16 @@ int xc_linux_restore(int xc_handle, XcIO
   3.476            mfn_mapper_map_batch(xc_handle, dom, 
   3.477                                 PROT_WRITE,
   3.478                                 pfn_to_mfn_frame_list,
   3.479 -                               (nr_pfns+1023)/1024 )) == 0 ) {
   3.480 +                               (nr_pfns+1023)/1024 )) == 0 )
   3.481 +    {
   3.482          xcio_error(ioctxt, "Couldn't map pfn_to_mfn table");
   3.483          goto out;
   3.484      }
   3.485  
   3.486 -    memcpy( live_pfn_to_mfn_table, pfn_to_mfn_table, 
   3.487 -            nr_pfns*sizeof(unsigned long) );
   3.488 +    memcpy(live_pfn_to_mfn_table, pfn_to_mfn_table, 
   3.489 +           nr_pfns*sizeof(unsigned long) );
   3.490  
   3.491 -    munmap( live_pfn_to_mfn_table, ((nr_pfns+1023)/1024)*PAGE_SIZE );
   3.492 +    munmap(live_pfn_to_mfn_table, ((nr_pfns+1023)/1024)*PAGE_SIZE);
   3.493  
   3.494      /*
   3.495       * Safety checking of saved context:
   3.496 @@ -538,20 +591,18 @@ int xc_linux_restore(int xc_handle, XcIO
   3.497       *  9. debugregs are checked by Xen.
   3.498       *  10. callback code selectors need checking.
   3.499       */
   3.500 -    for ( i = 0; i < 256; i++ ) {
   3.501 +    for ( i = 0; i < 256; i++ )
   3.502 +    {
   3.503          ctxt.trap_ctxt[i].vector = i;
   3.504          if ( (ctxt.trap_ctxt[i].cs & 3) == 0 )
   3.505              ctxt.trap_ctxt[i].cs = FLAT_GUESTOS_CS;
   3.506      }
   3.507 -    if ( (ctxt.guestos_ss & 3) == 0 ){
   3.508 +    if ( (ctxt.guestos_ss & 3) == 0 )
   3.509          ctxt.guestos_ss = FLAT_GUESTOS_DS;
   3.510 -    }
   3.511 -    if ( (ctxt.event_callback_cs & 3) == 0 ){
   3.512 +    if ( (ctxt.event_callback_cs & 3) == 0 )
   3.513          ctxt.event_callback_cs = FLAT_GUESTOS_CS;
   3.514 -    }
   3.515 -    if ( (ctxt.failsafe_callback_cs & 3) == 0 ){
   3.516 +    if ( (ctxt.failsafe_callback_cs & 3) == 0 )
   3.517          ctxt.failsafe_callback_cs = FLAT_GUESTOS_CS;
   3.518 -    }
   3.519      if ( ((ctxt.ldt_base & (PAGE_SIZE - 1)) != 0) ||
   3.520           (ctxt.ldt_ents > 8192) ||
   3.521           (ctxt.ldt_base > HYPERVISOR_VIRT_START) ||
   3.522 @@ -568,7 +619,8 @@ int xc_linux_restore(int xc_handle, XcIO
   3.523  
   3.524      /* don't start the domain as we have console etc to set up */
   3.525    
   3.526 -    if( rc == 0 ) {
   3.527 +    if ( rc == 0 )
   3.528 +    {
   3.529          /* Success: print the domain id. */
   3.530          xcio_info(ioctxt, "DOM=%lu\n", dom);
   3.531          return 0;
   3.532 @@ -576,25 +628,20 @@ int xc_linux_restore(int xc_handle, XcIO
   3.533  
   3.534  
   3.535   out:
   3.536 -    if ( (rc != 0) && (dom != 0) ){
   3.537 +    if ( (rc != 0) && (dom != 0) )
   3.538          xc_domain_destroy(xc_handle, dom);
   3.539 -    }
   3.540 -    if ( mmu != NULL ){
   3.541 +    if ( mmu != NULL )
   3.542          free(mmu);
   3.543 -    }
   3.544 -    if ( pm_handle >= 0 ){
   3.545 +    if ( pm_handle != NULL )
   3.546          (void)close_pfn_mapper(pm_handle);
   3.547 -    }
   3.548 -    if ( pfn_to_mfn_table != NULL ){
   3.549 +    if ( pfn_to_mfn_table != NULL )
   3.550          free(pfn_to_mfn_table);
   3.551 -    }
   3.552 -    if ( pfn_type != NULL ){
   3.553 +    if ( pfn_type != NULL )
   3.554          free(pfn_type);
   3.555 -    }
   3.556  
   3.557 -    if ( rc == 0 ){
   3.558 +    if ( rc == 0 )
   3.559          ioctxt->domain = dom;
   3.560 -    }
   3.561 +
   3.562      DPRINTF("Restore exit with rc=%d\n",rc);
   3.563      return rc;
   3.564  }
     4.1 --- a/tools/libxc/xc_netbsd_build.c	Wed Jun 30 13:10:43 2004 +0000
     4.2 +++ b/tools/libxc/xc_netbsd_build.c	Wed Jun 30 17:10:07 2004 +0000
     4.3 @@ -13,7 +13,7 @@
     4.4  #define DPRINTF(x)
     4.5  #endif
     4.6  
     4.7 -static int loadelfimage(gzFile, int, unsigned long *, unsigned long,
     4.8 +static int loadelfimage(gzFile, void *, unsigned long *, unsigned long,
     4.9                          unsigned long *, unsigned long *,
    4.10                          unsigned long *, unsigned long *);
    4.11  
    4.12 @@ -77,9 +77,10 @@ static int setup_guestos(int xc_handle,
    4.13      shared_info_t *shared_info;
    4.14      unsigned long ksize;
    4.15      mmu_t *mmu = NULL;
    4.16 -    int pm_handle, i;
    4.17 +    void  *pm_handle = NULL;
    4.18 +    int i;
    4.19  
    4.20 -    if ( (pm_handle = init_pfn_mapper((domid_t)dom)) < 0 )
    4.21 +    if ( (pm_handle = init_pfn_mapper((domid_t)dom)) == NULL )
    4.22          goto error_out;
    4.23  
    4.24      if ( (page_array = malloc(tot_pages * sizeof(unsigned long))) == NULL )
    4.25 @@ -201,7 +202,7 @@ static int setup_guestos(int xc_handle,
    4.26   error_out:
    4.27      if ( mmu != NULL )
    4.28          free(mmu);
    4.29 -    if ( pm_handle >= 0 )
    4.30 +    if ( pm_handle != NULL )
    4.31          (void)close_pfn_mapper(pm_handle);
    4.32      if ( page_array == NULL )
    4.33          free(page_array);
    4.34 @@ -412,7 +413,7 @@ myseek(gzFile gfd, off_t offset, int whe
    4.35  #define IS_BSS(p) (p.p_filesz < p.p_memsz)
    4.36  
    4.37  static int
    4.38 -loadelfimage(gzFile kernel_gfd, int pm_handle, unsigned long *page_array,
    4.39 +loadelfimage(gzFile kernel_gfd, void *pm_handle, unsigned long *page_array,
    4.40               unsigned long tot_pages, unsigned long *virt_load_addr,
    4.41               unsigned long *ksize, unsigned long *symtab_addr,
    4.42               unsigned long *symtab_len)
     5.1 --- a/tools/libxc/xc_private.c	Wed Jun 30 13:10:43 2004 +0000
     5.2 +++ b/tools/libxc/xc_private.c	Wed Jun 30 17:10:07 2004 +0000
     5.3 @@ -6,51 +6,140 @@
     5.4  
     5.5  #include "xc_private.h"
     5.6  
     5.7 -int init_pfn_mapper(domid_t domid)
     5.8 +#define MAX_EXTENTS 8
     5.9 +typedef struct {
    5.10 +    int fd;
    5.11 +    struct {
    5.12 +        void         *base; 
    5.13 +        unsigned long length;
    5.14 +    } extent[MAX_EXTENTS];
    5.15 +} mapper_desc_t;
    5.16 +
    5.17 +void *init_pfn_mapper(domid_t domid)
    5.18  {
    5.19 -    int fd = open("/dev/mem", O_RDWR);
    5.20 -    if ( fd >= 0 )
    5.21 -        (void)ioctl(fd, _IO('M', 1), (unsigned long)domid);
    5.22 -    return fd;
    5.23 +    int            fd = open("/dev/mem", O_RDWR);
    5.24 +    mapper_desc_t *desc;
    5.25 +
    5.26 +    if ( fd < 0 )
    5.27 +        return NULL;
    5.28 +
    5.29 +    if ( (desc = malloc(sizeof(*desc))) == NULL )
    5.30 +    {
    5.31 +        close(fd);
    5.32 +        return NULL;
    5.33 +    }
    5.34 +
    5.35 +    (void)ioctl(fd, _IO('M', 1), (unsigned long)domid);
    5.36 +
    5.37 +    memset(desc, 0, sizeof(*desc));
    5.38 +    desc->fd = fd;
    5.39 +
    5.40 +    return desc;
    5.41  }
    5.42  
    5.43 -int close_pfn_mapper(int pm_handle)
    5.44 +int close_pfn_mapper(void *pm_handle)
    5.45  {
    5.46 -    return close(pm_handle);
    5.47 +    mapper_desc_t *desc = pm_handle;
    5.48 +    int            i;
    5.49 +
    5.50 +    for ( i = 0; i < MAX_EXTENTS; i++ )
    5.51 +    {
    5.52 +        if ( desc->extent[i].base != NULL )
    5.53 +            (void)munmap(desc->extent[i].base, desc->extent[i].length);
    5.54 +    }
    5.55 +
    5.56 +    close(desc->fd);
    5.57 +    free(desc);
    5.58 +
    5.59 +    return 0;
    5.60  }
    5.61  
    5.62 -void *map_pfn_writeable(int pm_handle, unsigned long pfn)
    5.63 +static int get_free_offset(mapper_desc_t *desc)
    5.64  {
    5.65 -    void *vaddr = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE,
    5.66 -                       MAP_SHARED, pm_handle, pfn << PAGE_SHIFT);
    5.67 +    int i;
    5.68 +
    5.69 +    for ( i = 0; i < MAX_EXTENTS; i++ )
    5.70 +    {
    5.71 +        if ( desc->extent[i].base == NULL )
    5.72 +            break;
    5.73 +    }
    5.74 +
    5.75 +    if ( i == MAX_EXTENTS )
    5.76 +    {
    5.77 +        fprintf(stderr, "Extent overflow in map_pfn_*()!\n");
    5.78 +        fflush(stderr);
    5.79 +        *(int*)0=0; /* XXX */
    5.80 +    }
    5.81 +
    5.82 +    return i;
    5.83 +}
    5.84 +
    5.85 +void *map_pfn_writeable(void *pm_handle, unsigned long pfn)
    5.86 +{
    5.87 +    mapper_desc_t *desc = pm_handle;
    5.88 +    void          *vaddr;
    5.89 +    int            off;
    5.90 +
    5.91 +    vaddr = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE,
    5.92 +                 MAP_SHARED, desc->fd, pfn << PAGE_SHIFT);
    5.93      if ( vaddr == MAP_FAILED )
    5.94          return NULL;
    5.95 +
    5.96 +    off = get_free_offset(desc);
    5.97 +    desc->extent[off].base   = vaddr;
    5.98 +    desc->extent[off].length = PAGE_SIZE;
    5.99 +
   5.100      return vaddr;
   5.101  }
   5.102  
   5.103 -void *map_pfn_readonly(int pm_handle, unsigned long pfn)
   5.104 +void *map_pfn_readonly(void *pm_handle, unsigned long pfn)
   5.105  {
   5.106 -    void *vaddr = mmap(NULL, PAGE_SIZE, PROT_READ,
   5.107 -                       MAP_SHARED, pm_handle, pfn << PAGE_SHIFT);
   5.108 +    mapper_desc_t *desc = pm_handle;
   5.109 +    void          *vaddr;
   5.110 +    int            off;
   5.111 +
   5.112 +    vaddr = mmap(NULL, PAGE_SIZE, PROT_READ,
   5.113 +                 MAP_SHARED, desc->fd, pfn << PAGE_SHIFT);
   5.114      if ( vaddr == MAP_FAILED )
   5.115          return NULL;
   5.116 +
   5.117 +    off = get_free_offset(desc);
   5.118 +    desc->extent[off].base   = vaddr;
   5.119 +    desc->extent[off].length = PAGE_SIZE;
   5.120 +
   5.121      return vaddr;
   5.122  }
   5.123  
   5.124 -void unmap_pfn(int pm_handle, void *vaddr)
   5.125 +void unmap_pfn(void *pm_handle, void *vaddr)
   5.126  {
   5.127 -    (void)munmap(vaddr, PAGE_SIZE);
   5.128 +    mapper_desc_t *desc = pm_handle;
   5.129 +    int            i;
   5.130 +    unsigned long  len = 0;
   5.131 +
   5.132 +    for ( i = 0; i < MAX_EXTENTS; i++ )
   5.133 +    {
   5.134 +        if ( desc->extent[i].base == vaddr )
   5.135 +        {
   5.136 +            desc->extent[i].base = NULL;
   5.137 +            len = desc->extent[i].length;
   5.138 +        }
   5.139 +    }
   5.140 +
   5.141 +    if ( len == 0 )
   5.142 +        *(int*)0 = 0; /* XXX */
   5.143 +
   5.144 +    (void)munmap(vaddr, len);
   5.145  }
   5.146  
   5.147  /*******************/
   5.148  
   5.149 -void * mfn_mapper_map_batch(int xc_handle, domid_t dom, int prot,
   5.150 -                            unsigned long *arr, int num )
   5.151 +void *mfn_mapper_map_batch(int xc_handle, domid_t dom, int prot,
   5.152 +                           unsigned long *arr, int num )
   5.153  {
   5.154      privcmd_mmapbatch_t ioctlx; 
   5.155      void *addr;
   5.156 -    addr = mmap( NULL, num*PAGE_SIZE, prot, MAP_SHARED, xc_handle, 0 );
   5.157 -    if (addr)
   5.158 +    addr = mmap(NULL, num*PAGE_SIZE, prot, MAP_SHARED, xc_handle, 0);
   5.159 +    if ( addr != NULL )
   5.160      {
   5.161          ioctlx.num=num;
   5.162          ioctlx.dom=dom;
   5.163 @@ -69,15 +158,15 @@ void * mfn_mapper_map_batch(int xc_handl
   5.164  
   5.165  /*******************/
   5.166  
   5.167 -void * mfn_mapper_map_single(int xc_handle, domid_t dom,
   5.168 -                             int size, int prot,
   5.169 -                             unsigned long mfn )
   5.170 +void *mfn_mapper_map_single(int xc_handle, domid_t dom,
   5.171 +                            int size, int prot,
   5.172 +                            unsigned long mfn )
   5.173  {
   5.174      privcmd_mmap_t ioctlx; 
   5.175      privcmd_mmap_entry_t entry; 
   5.176      void *addr;
   5.177 -    addr = mmap( NULL, size, prot, MAP_SHARED, xc_handle, 0 );
   5.178 -    if (addr)
   5.179 +    addr = mmap(NULL, size, prot, MAP_SHARED, xc_handle, 0);
   5.180 +    if ( addr != NULL )
   5.181      {
   5.182          ioctlx.num=1;
   5.183          ioctlx.dom=dom;
   5.184 @@ -85,7 +174,7 @@ void * mfn_mapper_map_single(int xc_hand
   5.185          entry.va=(unsigned long) addr;
   5.186          entry.mfn=mfn;
   5.187          entry.npages=(size+PAGE_SIZE-1)>>PAGE_SHIFT;
   5.188 -        if ( ioctl( xc_handle, IOCTL_PRIVCMD_MMAP, &ioctlx ) <0 )
   5.189 +        if ( ioctl( xc_handle, IOCTL_PRIVCMD_MMAP, &ioctlx ) < 0 )
   5.190          {
   5.191              munmap(addr, size);
   5.192              return 0;
     6.1 --- a/tools/libxc/xc_private.h	Wed Jun 30 13:10:43 2004 +0000
     6.2 +++ b/tools/libxc/xc_private.h	Wed Jun 30 17:10:07 2004 +0000
     6.3 @@ -131,11 +131,11 @@ static inline int do_multicall_op(int xc
     6.4  /*
     6.5   * PFN mapping.
     6.6   */
     6.7 -int init_pfn_mapper(domid_t domid);
     6.8 -int close_pfn_mapper(int pm_handle);
     6.9 -void *map_pfn_writeable(int pm_handle, unsigned long pfn);
    6.10 -void *map_pfn_readonly(int pm_handle, unsigned long pfn);
    6.11 -void unmap_pfn(int pm_handle, void *vaddr);
    6.12 +void *init_pfn_mapper(domid_t domid);
    6.13 +int close_pfn_mapper(void *pm_handle);
    6.14 +void *map_pfn_writeable(void *pm_handle, unsigned long pfn);
    6.15 +void *map_pfn_readonly(void *pm_handle, unsigned long pfn);
    6.16 +void unmap_pfn(void *pm_handle, void *vaddr);
    6.17  int get_pfn_type_batch(int xc_handle, u32 dom, int num, unsigned long *arr);
    6.18  unsigned long csum_page (void * page);
    6.19  
     7.1 --- a/tools/python/xen/lowlevel/xu/xu.c	Wed Jun 30 13:10:43 2004 +0000
     7.2 +++ b/tools/python/xen/lowlevel/xu/xu.c	Wed Jun 30 17:10:07 2004 +0000
     7.3 @@ -38,9 +38,6 @@
     7.4  #define EVTCHN_DEV_NAME  "/dev/xen/evtchn"
     7.5  #define EVTCHN_DEV_MAJOR 10
     7.6  #define EVTCHN_DEV_MINOR 200
     7.7 -#define PORT_NORMAL     0x0000   /* A standard event notification.      */ 
     7.8 -#define PORT_EXCEPTION  0x8000   /* An exceptional notification.        */
     7.9 -#define PORTIDX_MASK    0x7fff   /* Strip subtype to obtain port index. */
    7.10  /* /dev/xen/evtchn ioctls: */
    7.11  /* EVTCHN_RESET: Clear and reinit the event buffer. Clear error condition. */
    7.12  #define EVTCHN_RESET  _IO('E', 1)
    7.13 @@ -81,7 +78,7 @@ static PyObject *xu_notifier_read(PyObje
    7.14      }
    7.15      
    7.16      if ( bytes == sizeof(v) )
    7.17 -        return Py_BuildValue("(i,i)", v&PORTIDX_MASK, v&~PORTIDX_MASK);
    7.18 +        return PyInt_FromLong(v);
    7.19  
    7.20   none:
    7.21      Py_INCREF(Py_None);
    7.22 @@ -145,7 +142,7 @@ static PyMethodDef xu_notifier_methods[]
    7.23      { "read",
    7.24        (PyCFunction)xu_notifier_read,
    7.25        METH_VARARGS,
    7.26 -      "Read a (@port, @type) pair.\n" },
    7.27 +      "Read a @port with pending notifications.\n" },
    7.28  
    7.29      { "unmask", 
    7.30        (PyCFunction)xu_notifier_unmask,
    7.31 @@ -199,10 +196,6 @@ static PyObject *xu_notifier_new(PyObjec
    7.32  
    7.33  static PyObject *xu_notifier_getattr(PyObject *obj, char *name)
    7.34  {
    7.35 -    if ( strcmp(name, "EXCEPTION") == 0 )
    7.36 -        return PyInt_FromLong(PORT_EXCEPTION);
    7.37 -    if ( strcmp(name, "NORMAL") == 0 )
    7.38 -        return PyInt_FromLong(PORT_NORMAL);
    7.39      return Py_FindMethod(xu_notifier_methods, obj, name);
    7.40  }
    7.41  
    7.42 @@ -686,43 +679,6 @@ typedef struct xu_port_object {
    7.43  
    7.44  static PyObject *port_error;
    7.45  
    7.46 -static int xup_connect(xu_port_object *xup, domid_t dom,
    7.47 -                       int local_port, int remote_port){
    7.48 -    // From our prespective rx = producer, tx = consumer.
    7.49 -    int err = 0;
    7.50 -    printf("%s> dom=%u %d:%d\n", __FUNCTION__, (unsigned int)dom, 
    7.51 -           local_port, remote_port);
    7.52 -
    7.53 -    // Consumer = tx.
    7.54 -    //xup->interface->tx_resp_prod = 0;
    7.55 -    //xup->interface->tx_req_prod = 0;
    7.56 -    xup->tx_resp_prod = xup->interface->tx_resp_prod;
    7.57 -    xup->tx_req_cons = xup->interface->tx_resp_prod;
    7.58 -    printf("%s> tx: %u %u : %u %u\n", __FUNCTION__,
    7.59 -           (unsigned int)xup->interface->tx_resp_prod,
    7.60 -           (unsigned int)xup->tx_resp_prod,
    7.61 -           (unsigned int)xup->tx_req_cons,
    7.62 -           (unsigned int)xup->interface->tx_req_prod);
    7.63 -
    7.64 -    // Producer = rx.
    7.65 -    //xup->interface->rx_req_prod  = 0;
    7.66 -    //xup->interface->rx_resp_prod = 0;
    7.67 -    xup->rx_req_prod  = xup->interface->rx_req_prod;
    7.68 -    xup->rx_resp_cons = xup->interface->rx_resp_prod;
    7.69 -    printf("%s> rx: %u %u : %u %u\n", __FUNCTION__,
    7.70 -           (unsigned int)xup->rx_resp_cons,
    7.71 -           (unsigned int)xup->interface->rx_resp_prod,
    7.72 -           (unsigned int)xup->interface->rx_req_prod,
    7.73 -           (unsigned int)xup->rx_req_prod);
    7.74 -
    7.75 -    xup->remote_dom   = dom;
    7.76 -    xup->local_port   = local_port;
    7.77 -    xup->remote_port  = remote_port;
    7.78 -
    7.79 -    printf("%s< err=%d\n", __FUNCTION__, err);
    7.80 -    return err;
    7.81 -}
    7.82 -
    7.83  static PyObject *xu_port_notify(PyObject *self, PyObject *args)
    7.84  {
    7.85      xu_port_object *xup = (xu_port_object *)self;
    7.86 @@ -913,6 +869,86 @@ static PyObject *xu_port_space_to_write_
    7.87      return PyInt_FromLong(1);
    7.88  }
    7.89  
    7.90 +static int __xu_port_connect(xu_port_object *xup)
    7.91 +{
    7.92 +    xc_dominfo_t info;
    7.93 +
    7.94 +    if ( xup->mem_fd != -1 )
    7.95 +        return 0;
    7.96 +
    7.97 +    if ( (xup->mem_fd = open("/dev/mem", O_RDWR)) == -1 )
    7.98 +    {
    7.99 +        PyErr_SetString(port_error, "Could not open '/dev/mem'");
   7.100 +        return -1;
   7.101 +    }
   7.102 +
   7.103 +    /* Set the General-Purpose Subject whose page frame will be mapped. */
   7.104 +    (void)ioctl(xup->mem_fd, _IO('M', 1), (unsigned long)xup->remote_dom);
   7.105 +
   7.106 +    if ( (xc_domain_getinfo(xup->xc_handle, xup->remote_dom, 1, &info) != 1) ||
   7.107 +         (info.domid != xup->remote_dom) )
   7.108 +    {
   7.109 +        PyErr_SetString(port_error, "Failed to obtain domain status");
   7.110 +        (void)close(xup->mem_fd);
   7.111 +        xup->mem_fd = -1;
   7.112 +        return -1;
   7.113 +    }
   7.114 +
   7.115 +    xup->interface = 
   7.116 +        map_control_interface(xup->mem_fd, info.shared_info_frame);
   7.117 +    if ( xup->interface == NULL )
   7.118 +    {
   7.119 +        PyErr_SetString(port_error, "Failed to map domain control interface");
   7.120 +        (void)close(xup->mem_fd);
   7.121 +        xup->mem_fd = -1;
   7.122 +        return -1;
   7.123 +    }
   7.124 +
   7.125 +    /* Synchronise ring indexes. */
   7.126 +    xup->tx_resp_prod = xup->interface->tx_resp_prod;
   7.127 +    xup->tx_req_cons  = xup->interface->tx_resp_prod;
   7.128 +    xup->rx_req_prod  = xup->interface->rx_req_prod;
   7.129 +    xup->rx_resp_cons = xup->interface->rx_resp_prod;
   7.130 +
   7.131 +    return 0;
   7.132 +}
   7.133 +
   7.134 +static void __xu_port_disconnect(xu_port_object *xup)
   7.135 +{
   7.136 +    if ( xup->mem_fd == -1 )
   7.137 +        return;
   7.138 +    unmap_control_interface(xup->mem_fd, xup->interface);
   7.139 +    (void)close(xup->mem_fd);
   7.140 +    xup->mem_fd = -1;
   7.141 +}
   7.142 +
   7.143 +static PyObject *xu_port_connect(PyObject *self, PyObject *args)
   7.144 +{
   7.145 +    xu_port_object *xup = (xu_port_object *)self;
   7.146 +
   7.147 +    if ( !PyArg_ParseTuple(args, "") )
   7.148 +        return NULL;
   7.149 +
   7.150 +    if ( __xu_port_connect(xup) != 0 )
   7.151 +        return NULL;
   7.152 +
   7.153 +    Py_INCREF(Py_None);
   7.154 +    return Py_None;
   7.155 +}
   7.156 +
   7.157 +static PyObject *xu_port_disconnect(PyObject *self, PyObject *args)
   7.158 +{
   7.159 +    xu_port_object *xup = (xu_port_object *)self;
   7.160 +
   7.161 +    if ( !PyArg_ParseTuple(args, "") )
   7.162 +        return NULL;
   7.163 +
   7.164 +    __xu_port_disconnect(xup);
   7.165 +
   7.166 +    Py_INCREF(Py_None);
   7.167 +    return Py_None;
   7.168 +}
   7.169 +
   7.170  static PyMethodDef xu_port_methods[] = {
   7.171      { "notify",
   7.172        (PyCFunction)xu_port_notify,
   7.173 @@ -959,6 +995,16 @@ static PyMethodDef xu_port_methods[] = {
   7.174        METH_VARARGS,
   7.175        "Returns TRUE if there is space to write a response message.\n" },
   7.176  
   7.177 +    { "connect",
   7.178 +      (PyCFunction)xu_port_connect,
   7.179 +      METH_VARARGS,
   7.180 +      "Synchronously connect to remote domain.\n" },
   7.181 +
   7.182 +    { "disconnect",
   7.183 +      (PyCFunction)xu_port_disconnect,
   7.184 +      METH_VARARGS,
   7.185 +      "Synchronously disconnect from remote domain.\n" },
   7.186 +
   7.187      { NULL, NULL, 0, NULL }
   7.188  };
   7.189  
   7.190 @@ -969,26 +1015,19 @@ static PyObject *xu_port_new(PyObject *s
   7.191      xu_port_object *xup;
   7.192      u32 dom;
   7.193      int port1, port2;
   7.194 -    xc_dominfo_t info;
   7.195  
   7.196      if ( !PyArg_ParseTuple(args, "i", &dom) )
   7.197          return NULL;
   7.198  
   7.199      xup = PyObject_New(xu_port_object, &xu_port_type);
   7.200  
   7.201 -    if ( (xup->mem_fd = open("/dev/mem", O_RDWR)) == -1 )
   7.202 -    {
   7.203 -        PyErr_SetString(port_error, "Could not open '/dev/mem'");
   7.204 -        goto fail1;
   7.205 -    }
   7.206 -
   7.207 -    /* Set the General-Purpose Subject whose page frame will be mapped. */
   7.208 -    (void)ioctl(xup->mem_fd, _IO('M', 1), (unsigned long)dom);
   7.209 +    xup->remote_dom = dom;
   7.210 +    xup->mem_fd     = -1; /* currently disconnected */
   7.211  
   7.212      if ( (xup->xc_handle = xc_interface_open()) == -1 )
   7.213      {
   7.214          PyErr_SetString(port_error, "Could not open Xen control interface");
   7.215 -        goto fail2;
   7.216 +        goto fail1;
   7.217      }
   7.218  
   7.219      if ( dom == 0 )
   7.220 @@ -1002,7 +1041,7 @@ static PyObject *xu_port_new(PyObject *s
   7.221          if ( port1 < 0 )
   7.222          {
   7.223              PyErr_SetString(port_error, "Could not open channel to DOM0");
   7.224 -            goto fail3;
   7.225 +            goto fail2;
   7.226          }
   7.227      }
   7.228      else if ( xc_evtchn_bind_interdomain(xup->xc_handle, 
   7.229 @@ -1010,34 +1049,22 @@ static PyObject *xu_port_new(PyObject *s
   7.230                                           &port1, &port2) != 0 )
   7.231      {
   7.232          PyErr_SetString(port_error, "Could not open channel to domain");
   7.233 -        goto fail3;
   7.234 -    }
   7.235 -
   7.236 -    if ( (xc_domain_getinfo(xup->xc_handle, dom, 1, &info) != 1) ||
   7.237 -         (info.domid != dom) )
   7.238 -    {
   7.239 -        PyErr_SetString(port_error, "Failed to obtain domain status");
   7.240 -        goto fail4;
   7.241 +        goto fail2;
   7.242      }
   7.243  
   7.244 -    xup->interface = 
   7.245 -        map_control_interface(xup->mem_fd, info.shared_info_frame);
   7.246 -    if ( xup->interface == NULL )
   7.247 -    {
   7.248 -        PyErr_SetString(port_error, "Failed to map domain control interface");
   7.249 -        goto fail4;
   7.250 -    }
   7.251 +    xup->local_port  = port1;
   7.252 +    xup->remote_port = port2;
   7.253 +
   7.254 +    if ( __xu_port_connect(xup) != 0 )
   7.255 +        goto fail3;
   7.256  
   7.257 -    xup_connect(xup, dom, port1, port2);
   7.258      return (PyObject *)xup;
   7.259 -
   7.260      
   7.261 - fail4:
   7.262 -    (void)xc_evtchn_close(xup->xc_handle, DOMID_SELF, port1);
   7.263   fail3:
   7.264 -    (void)xc_interface_close(xup->xc_handle);
   7.265 +    if ( dom != 0 )
   7.266 +        (void)xc_evtchn_close(xup->xc_handle, DOMID_SELF, port1);
   7.267   fail2:
   7.268 -    (void)close(xup->mem_fd);
   7.269 +    (void)xc_interface_close(xup->xc_handle);
   7.270   fail1:
   7.271      PyObject_Del((PyObject *)xup);
   7.272      return NULL;        
   7.273 @@ -1058,11 +1085,10 @@ static PyObject *xu_port_getattr(PyObjec
   7.274  static void xu_port_dealloc(PyObject *self)
   7.275  {
   7.276      xu_port_object *xup = (xu_port_object *)self;
   7.277 -    unmap_control_interface(xup->mem_fd, xup->interface);
   7.278 +    __xu_port_disconnect(xup);
   7.279      if ( xup->remote_dom != 0 )
   7.280          (void)xc_evtchn_close(xup->xc_handle, DOMID_SELF, xup->local_port);
   7.281      (void)xc_interface_close(xup->xc_handle);
   7.282 -    (void)close(xup->mem_fd);
   7.283      PyObject_Del(self);
   7.284  }
   7.285  
     8.1 --- a/tools/python/xen/xend/server/SrvDaemon.py	Wed Jun 30 13:10:43 2004 +0000
     8.2 +++ b/tools/python/xen/xend/server/SrvDaemon.py	Wed Jun 30 17:10:07 2004 +0000
     8.3 @@ -169,13 +169,10 @@ class NotifierProtocol(protocol.Protocol
     8.4      def __init__(self, channelFactory):
     8.5          self.channelFactory = channelFactory
     8.6  
     8.7 -    def notificationReceived(self, idx, type):
     8.8 -        #print 'NotifierProtocol>notificationReceived>', idx, type
     8.9 +    def notificationReceived(self, idx):
    8.10          channel = self.channelFactory.getChannel(idx)
    8.11 -        if not channel:
    8.12 -            return
    8.13 -        #print 'NotifierProtocol>notificationReceived> channel', channel
    8.14 -        channel.notificationReceived(type)
    8.15 +        if channel:
    8.16 +            channel.notificationReceived()
    8.17  
    8.18      def connectionLost(self, reason=None):
    8.19          pass
    8.20 @@ -251,9 +248,8 @@ class NotifierPort(abstract.FileDescript
    8.21              notification = self.notifier.read()
    8.22              if not notification:
    8.23                  break
    8.24 -            (idx, type) = notification
    8.25 -            self.protocol.notificationReceived(idx, type)
    8.26 -            self.notifier.unmask(idx)
    8.27 +            self.protocol.notificationReceived(notification)
    8.28 +            self.notifier.unmask(notification)
    8.29              count += 1
    8.30          #print 'NotifierPort>doRead<'
    8.31  
     9.1 --- a/tools/python/xen/xend/server/channel.py	Wed Jun 30 13:10:43 2004 +0000
     9.2 +++ b/tools/python/xen/xend/server/channel.py	Wed Jun 30 17:10:07 2004 +0000
     9.3 @@ -107,22 +107,14 @@ class BaseChannel:
     9.4          """
     9.5          return self.idx
     9.6  
     9.7 -    def notificationReceived(self, type):
     9.8 +    def notificationReceived(self):
     9.9          """Called when a notification is received.
    9.10          Closes the channel on error, otherwise calls
    9.11          handleNotification(type), which should be defined
    9.12          in a subclass.
    9.13          """
    9.14 -        #print 'notificationReceived> type=', type, self
    9.15 -        if self.closed: return
    9.16 -        if type == self.factory.notifier.EXCEPTION:
    9.17 -            print 'notificationReceived> EXCEPTION'
    9.18 -            info = xc.evtchn_status(self.idx)
    9.19 -            if info['status'] == 'unbound':
    9.20 -                print 'notificationReceived> EXCEPTION closing...'
    9.21 -                self.close()
    9.22 -                return
    9.23 -        self.handleNotification(type)
    9.24 +        if not self.closed:
    9.25 +            self.handleNotification(type)
    9.26  
    9.27      def close(self):
    9.28          """Close the channel. Calls channelClosed() on the factory.
    10.1 --- a/xen/common/dom0_ops.c	Wed Jun 30 13:10:43 2004 +0000
    10.2 +++ b/xen/common/dom0_ops.c	Wed Jun 30 17:10:07 2004 +0000
    10.3 @@ -247,7 +247,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
    10.4          {
    10.5              ret = 0;
    10.6  
    10.7 -            spin_lock(&d->page_list_lock);
    10.8 +            spin_lock(&d->page_alloc_lock);
    10.9              list_ent = d->page_list.next;
   10.10              for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
   10.11              {
   10.12 @@ -261,7 +261,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
   10.13                  buffer++;
   10.14                  list_ent = frame_table[pfn].list.next;
   10.15              }
   10.16 -            spin_unlock(&d->page_list_lock);
   10.17 +            spin_unlock(&d->page_alloc_lock);
   10.18  
   10.19              op->u.getmemlist.num_pfns = i;
   10.20              copy_to_user(u_dom0_op, op, sizeof(*op));
    11.1 --- a/xen/common/domain.c	Wed Jun 30 13:10:43 2004 +0000
    11.2 +++ b/xen/common/domain.c	Wed Jun 30 17:10:07 2004 +0000
    11.3 @@ -80,7 +80,7 @@ struct domain *do_createdomain(domid_t d
    11.4  
    11.5          d->addr_limit = USER_DS;
    11.6          
    11.7 -        spin_lock_init(&d->page_list_lock);
    11.8 +        spin_lock_init(&d->page_alloc_lock);
    11.9          INIT_LIST_HEAD(&d->page_list);
   11.10          d->max_pages = d->tot_pages = 0;
   11.11  
   11.12 @@ -260,19 +260,19 @@ struct pfn_info *alloc_domain_page(struc
   11.13      if ( d != NULL )
   11.14      {
   11.15          wmb(); /* Domain pointer must be visible before updating refcnt. */
   11.16 -        spin_lock(&d->page_list_lock);
   11.17 +        spin_lock(&d->page_alloc_lock);
   11.18          if ( unlikely(d->tot_pages >= d->max_pages) )
   11.19          {
   11.20              DPRINTK("Over-allocation for domain %u: %u >= %u\n",
   11.21                      d->domain, d->tot_pages, d->max_pages);
   11.22 -            spin_unlock(&d->page_list_lock);
   11.23 +            spin_unlock(&d->page_alloc_lock);
   11.24              goto free_and_exit;
   11.25          }
   11.26          list_add_tail(&page->list, &d->page_list);
   11.27          page->count_and_flags = PGC_allocated | 1;
   11.28          if ( unlikely(d->tot_pages++ == 0) )
   11.29              get_domain(d);
   11.30 -        spin_unlock(&d->page_list_lock);
   11.31 +        spin_unlock(&d->page_alloc_lock);
   11.32      }
   11.33  
   11.34      return page;
   11.35 @@ -291,27 +291,33 @@ void free_domain_page(struct pfn_info *p
   11.36      int            drop_dom_ref;
   11.37      struct domain *d = page->u.domain;
   11.38  
   11.39 -    /* Deallocation of such pages is handled out of band. */
   11.40      if ( unlikely(IS_XEN_HEAP_FRAME(page)) )
   11.41 -        return;
   11.42 +    {
   11.43 +        spin_lock_recursive(&d->page_alloc_lock);
   11.44 +        drop_dom_ref = (--d->xenheap_pages == 0);
   11.45 +        spin_unlock_recursive(&d->page_alloc_lock);
   11.46 +    }
   11.47 +    else
   11.48 +    {
   11.49 +        page->tlbflush_timestamp = tlbflush_clock;
   11.50 +        page->u.cpu_mask = 1 << d->processor;
   11.51 +        
   11.52 +        /* NB. May recursively lock from domain_relinquish_memory(). */
   11.53 +        spin_lock_recursive(&d->page_alloc_lock);
   11.54 +        list_del(&page->list);
   11.55 +        drop_dom_ref = (--d->tot_pages == 0);
   11.56 +        spin_unlock_recursive(&d->page_alloc_lock);
   11.57  
   11.58 -    page->tlbflush_timestamp = tlbflush_clock;
   11.59 -    page->u.cpu_mask = 1 << d->processor;
   11.60 +        page->count_and_flags = 0;
   11.61 +        
   11.62 +        spin_lock_irqsave(&free_list_lock, flags);
   11.63 +        list_add(&page->list, &free_list);
   11.64 +        free_pfns++;
   11.65 +        spin_unlock_irqrestore(&free_list_lock, flags);
   11.66 +    }
   11.67  
   11.68 -    /* NB. May recursively lock from domain_relinquish_memory(). */
   11.69 -    spin_lock_recursive(&d->page_list_lock);
   11.70 -    list_del(&page->list);
   11.71 -    drop_dom_ref = (--d->tot_pages == 0);
   11.72 -    spin_unlock_recursive(&d->page_list_lock);
   11.73      if ( drop_dom_ref )
   11.74          put_domain(d);
   11.75 -
   11.76 -    page->count_and_flags = 0;
   11.77 -    
   11.78 -    spin_lock_irqsave(&free_list_lock, flags);
   11.79 -    list_add(&page->list, &free_list);
   11.80 -    free_pfns++;
   11.81 -    spin_unlock_irqrestore(&free_list_lock, flags);
   11.82  }
   11.83  
   11.84  
   11.85 @@ -337,8 +343,13 @@ void domain_relinquish_memory(struct dom
   11.86          put_page_and_type(&frame_table[pagetable_val(d->mm.pagetable) >>
   11.87                                        PAGE_SHIFT]);
   11.88  
   11.89 +    /* Relinquish Xen-heap pages. Currently this can only be 'shared_info'. */
   11.90 +    page = virt_to_page(d->shared_info);
   11.91 +    if ( test_and_clear_bit(_PGC_allocated, &page->count_and_flags) )
   11.92 +        put_page(page);
   11.93 +
   11.94      /* Relinquish all pages on the domain's allocation list. */
   11.95 -    spin_lock_recursive(&d->page_list_lock); /* may enter free_domain_page() */
   11.96 +    spin_lock_recursive(&d->page_alloc_lock); /* may enter free_domain_page */
   11.97      list_for_each_safe ( ent, tmp, &d->page_list )
   11.98      {
   11.99          page = list_entry(ent, struct pfn_info, list);
  11.100 @@ -367,7 +378,7 @@ void domain_relinquish_memory(struct dom
  11.101          }
  11.102          while ( unlikely(y != x) );
  11.103      }
  11.104 -    spin_unlock_recursive(&d->page_list_lock);
  11.105 +    spin_unlock_recursive(&d->page_alloc_lock);
  11.106  }
  11.107  
  11.108  
  11.109 @@ -774,11 +785,11 @@ int construct_dom0(struct domain *p,
  11.110      }
  11.111  
  11.112      /* Construct a frame-allocation list for the initial domain. */
  11.113 -    for ( pfn = (alloc_start>>PAGE_SHIFT); 
  11.114 -          pfn < (alloc_end>>PAGE_SHIFT); 
  11.115 -          pfn++ )
  11.116 +    for ( mfn = (alloc_start>>PAGE_SHIFT); 
  11.117 +          mfn < (alloc_end>>PAGE_SHIFT); 
  11.118 +          mfn++ )
  11.119      {
  11.120 -        page = &frame_table[pfn];
  11.121 +        page = &frame_table[mfn];
  11.122          page->u.domain        = p;
  11.123          page->type_and_flags  = 0;
  11.124          page->count_and_flags = PGC_allocated | 1;
  11.125 @@ -890,9 +901,11 @@ int construct_dom0(struct domain *p,
  11.126      si->mfn_list     = vphysmap_start;
  11.127  
  11.128      /* Write the phys->machine and machine->phys table entries. */
  11.129 -    for ( pfn = 0; pfn < p->tot_pages; pfn++ )
  11.130 +    for ( mfn = (alloc_start>>PAGE_SHIFT); 
  11.131 +          mfn < (alloc_end>>PAGE_SHIFT); 
  11.132 +          mfn++ )
  11.133      {
  11.134 -        mfn = (alloc_start >> PAGE_SHIFT) + pfn;
  11.135 +        pfn = mfn - (alloc_start>>PAGE_SHIFT);
  11.136          ((unsigned long *)vphysmap_start)[pfn] = mfn;
  11.137          machine_to_phys_mapping[mfn] = pfn;
  11.138      }
    12.1 --- a/xen/common/keyhandler.c	Wed Jun 30 13:10:43 2004 +0000
    12.2 +++ b/xen/common/keyhandler.c	Wed Jun 30 17:10:07 2004 +0000
    12.3 @@ -60,6 +60,8 @@ void do_task_queues(unsigned char key, v
    12.4      unsigned long  flags;
    12.5      struct domain *d;
    12.6      s_time_t       now = NOW();
    12.7 +    struct list_head *ent;
    12.8 +    struct pfn_info  *page;
    12.9  
   12.10      printk("'%c' pressed -> dumping task queues (now=0x%X:%08X)\n", key,
   12.11             (u32)(now>>32), (u32)now); 
   12.12 @@ -68,10 +70,28 @@ void do_task_queues(unsigned char key, v
   12.13  
   12.14      for_each_domain ( d )
   12.15      {
   12.16 -        printk("Xen: DOM %u, CPU %d [has=%c] refcnt=%d nr_pages=%d\n",
   12.17 +        printk("Xen: DOM %u, CPU %d [has=%c] refcnt=%d nr_pages=%d "
   12.18 +               "xenheap_pages=%d\n",
   12.19                 d->domain, d->processor, 
   12.20                 test_bit(DF_RUNNING, &d->flags) ? 'T':'F',
   12.21 -               atomic_read(&d->refcnt), d->tot_pages);
   12.22 +               atomic_read(&d->refcnt), d->tot_pages, d->xenheap_pages);
   12.23 +
   12.24 +        if ( d->tot_pages < 10 )
   12.25 +        {
   12.26 +            list_for_each ( ent, &d->page_list )
   12.27 +            {
   12.28 +                page = list_entry(ent, struct pfn_info, list);
   12.29 +                printk("Page %08x: caf=%08x, taf=%08x\n",
   12.30 +                       page_to_phys(page), page->count_and_flags,
   12.31 +                       page->type_and_flags);
   12.32 +            }
   12.33 +        }
   12.34 +
   12.35 +        page = virt_to_page(d->shared_info);
   12.36 +        printk("Shared_info@%08x: caf=%08x, taf=%08x\n",
   12.37 +               page_to_phys(page), page->count_and_flags,
   12.38 +               page->type_and_flags);
   12.39 +               
   12.40          printk("Guest: upcall_pend = %02x, upcall_mask = %02x\n", 
   12.41                 d->shared_info->vcpu_data[0].evtchn_upcall_pending, 
   12.42                 d->shared_info->vcpu_data[0].evtchn_upcall_mask);
    13.1 --- a/xen/common/memory.c	Wed Jun 30 13:10:43 2004 +0000
    13.2 +++ b/xen/common/memory.c	Wed Jun 30 17:10:07 2004 +0000
    13.3 @@ -907,17 +907,18 @@ static int do_extended_command(unsigned 
    13.4           */
    13.5          if ( d < e )
    13.6          {
    13.7 -            spin_lock(&d->page_list_lock);
    13.8 -            spin_lock(&e->page_list_lock);
    13.9 +            spin_lock(&d->page_alloc_lock);
   13.10 +            spin_lock(&e->page_alloc_lock);
   13.11          }
   13.12          else
   13.13          {
   13.14 -            spin_lock(&e->page_list_lock);
   13.15 -            spin_lock(&d->page_list_lock);
   13.16 +            spin_lock(&e->page_alloc_lock);
   13.17 +            spin_lock(&d->page_alloc_lock);
   13.18          }
   13.19  
   13.20          /* A domain shouldn't have PGC_allocated pages when it is dying. */
   13.21 -        if ( unlikely(test_bit(DF_DYING, &e->flags)) )
   13.22 +        if ( unlikely(test_bit(DF_DYING, &e->flags)) ||
   13.23 +             unlikely(IS_XEN_HEAP_FRAME(page)) )
   13.24          {
   13.25              okay = 0;
   13.26              goto reassign_fail;
   13.27 @@ -967,8 +968,8 @@ static int do_extended_command(unsigned 
   13.28          list_add_tail(&page->list, &e->page_list);
   13.29  
   13.30      reassign_fail:        
   13.31 -        spin_unlock(&d->page_list_lock);
   13.32 -        spin_unlock(&e->page_list_lock);
   13.33 +        spin_unlock(&d->page_alloc_lock);
   13.34 +        spin_unlock(&e->page_alloc_lock);
   13.35          break;
   13.36  
   13.37      case MMUEXT_RESET_SUBJECTDOM:
    14.1 --- a/xen/include/xen/mm.h	Wed Jun 30 13:10:43 2004 +0000
    14.2 +++ b/xen/include/xen/mm.h	Wed Jun 30 17:10:07 2004 +0000
    14.3 @@ -91,11 +91,15 @@ struct pfn_info
    14.4  #define SHARE_PFN_WITH_DOMAIN(_pfn, _dom)                                   \
    14.5      do {                                                                    \
    14.6          (_pfn)->u.domain = (_dom);                                          \
    14.7 +        /* The incremented type count is intended to pin to 'writeable'. */ \
    14.8 +        (_pfn)->type_and_flags  = PGT_writeable_page | PGT_validated | 1;   \
    14.9          wmb(); /* install valid domain ptr before updating refcnt. */       \
   14.10 +        spin_lock(&(_dom)->page_alloc_lock);                                \
   14.11          /* _dom holds an allocation reference */                            \
   14.12          (_pfn)->count_and_flags = PGC_allocated | 1;                        \
   14.13 -        /* The incremented type count is intended to pin to 'writeable'. */ \
   14.14 -        (_pfn)->type_and_flags  = PGT_writeable_page | PGT_validated | 1;   \
   14.15 +        if ( unlikely((_dom)->xenheap_pages++ == 0) )                       \
   14.16 +            get_domain(_dom);                                               \
   14.17 +        spin_unlock(&(_dom)->page_alloc_lock);                              \
   14.18      } while ( 0 )
   14.19  
   14.20  extern struct pfn_info *frame_table;
    15.1 --- a/xen/include/xen/sched.h	Wed Jun 30 13:10:43 2004 +0000
    15.2 +++ b/xen/include/xen/sched.h	Wed Jun 30 17:10:07 2004 +0000
    15.3 @@ -89,10 +89,11 @@ struct domain
    15.4      char     name[MAX_DOMAIN_NAME];
    15.5      s_time_t create_time;
    15.6  
    15.7 -    spinlock_t       page_list_lock;
    15.8 -    struct list_head page_list;
    15.9 -    unsigned int     tot_pages; /* number of pages currently possesed */
   15.10 -    unsigned int     max_pages; /* max number of pages that can be possesed */
   15.11 +    spinlock_t       page_alloc_lock; /* protects all the following fields  */
   15.12 +    struct list_head page_list;       /* linked list, of size tot_pages     */
   15.13 +    unsigned int     tot_pages;       /* number of pages currently possesed */
   15.14 +    unsigned int     max_pages;       /* maximum value for tot_pages        */
   15.15 +    unsigned int     xenheap_pages;   /* # pages allocated from Xen heap    */
   15.16  
   15.17      /* Scheduling. */
   15.18      struct list_head run_list;