ia64/xen-unstable

changeset 1133:cd5a94809f8a

bitkeeper revision 1.755 (403f5fffwYqT6Gw88yRJHe04lMgpjg)

Many files:
Further cleanups to the Xen pagetable interface.
author kaf24@scramble.cl.cam.ac.uk
date Fri Feb 27 15:19:27 2004 +0000 (2004-02-27)
parents 22160502f0cd
children c71a42306245
files tools/xc/lib/xc_linux_build.c tools/xc/lib/xc_linux_restore.c tools/xc/lib/xc_netbsd_build.c tools/xc/lib/xc_private.c tools/xc/lib/xc_private.h xen/common/event_channel.c xen/common/memory.c xen/include/asm-i386/page.h xen/include/hypervisor-ifs/hypervisor-if.h xen/include/xeno/sched.h xenolinux-2.4.25-sparse/arch/xeno/mm/init.c xenolinux-2.4.25-sparse/arch/xeno/mm/ioremap.c xenolinux-2.4.25-sparse/include/asm-xeno/hypervisor.h xenolinux-2.4.25-sparse/mm/memory.c
line diff
     1.1 --- a/tools/xc/lib/xc_linux_build.c	Thu Feb 26 20:21:31 2004 +0000
     1.2 +++ b/tools/xc/lib/xc_linux_build.c	Fri Feb 27 15:19:27 2004 +0000
     1.3 @@ -43,27 +43,6 @@ static int get_pfn_list(int xc_handle,
     1.4      return (ret < 0) ? -1 : op.u.getmemlist.num_pfns;
     1.5  }
     1.6  
     1.7 -static int send_pgupdates(int xc_handle, mmu_update_t *updates, int nr_updates)
     1.8 -{
     1.9 -    int ret = -1;
    1.10 -    privcmd_hypercall_t hypercall;
    1.11 -
    1.12 -    hypercall.op     = __HYPERVISOR_mmu_update;
    1.13 -    hypercall.arg[0] = (unsigned long)updates;
    1.14 -    hypercall.arg[1] = (unsigned long)nr_updates;
    1.15 -
    1.16 -    if ( mlock(updates, nr_updates * sizeof(*updates)) != 0 )
    1.17 -        goto out1;
    1.18 -
    1.19 -    if ( do_xen_hypercall(xc_handle, &hypercall) < 0 )
    1.20 -        goto out2;
    1.21 -
    1.22 -    ret = 0;
    1.23 -
    1.24 - out2: (void)munlock(updates, nr_updates * sizeof(*updates));
    1.25 - out1: return ret;
    1.26 -}
    1.27 -
    1.28  /* Read the kernel header, extracting the image size and load address. */
    1.29  static int read_kernel_header(gzFile gfd, long dom_size, 
    1.30                                unsigned long *load_addr)
    1.31 @@ -109,16 +88,15 @@ static int setup_guestos(int xc_handle,
    1.32      l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
    1.33      l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
    1.34      unsigned long *page_array = NULL;
    1.35 -    mmu_update_t *pgt_update_arr = NULL, *pgt_updates = NULL;
    1.36      int alloc_index, num_pt_pages;
    1.37      unsigned long l2tab;
    1.38      unsigned long l1tab;
    1.39 -    unsigned long num_pgt_updates = 0;
    1.40      unsigned long count, pt_start, i, j;
    1.41      unsigned long initrd_addr = 0, initrd_len = 0;
    1.42      start_info_t *start_info;
    1.43      shared_info_t *shared_info;
    1.44      unsigned long ksize;
    1.45 +    mmu_t *mmu = NULL;
    1.46      int pm_handle;
    1.47  
    1.48      memset(builddomain, 0, sizeof(*builddomain));
    1.49 @@ -126,10 +104,7 @@ static int setup_guestos(int xc_handle,
    1.50      if ( (pm_handle = init_pfn_mapper()) < 0 )
    1.51          goto error_out;
    1.52  
    1.53 -    pgt_updates = malloc((tot_pages + 1) * sizeof(mmu_update_t));
    1.54 -    page_array = malloc(tot_pages * sizeof(unsigned long));
    1.55 -    pgt_update_arr = pgt_updates;
    1.56 -    if ( (pgt_update_arr == NULL) || (page_array == NULL) )
    1.57 +    if ( (page_array = malloc(tot_pages * sizeof(unsigned long))) == NULL )
    1.58      {
    1.59          PERROR("Could not allocate memory");
    1.60          goto error_out;
    1.61 @@ -210,6 +185,9 @@ static int setup_guestos(int xc_handle,
    1.62      alloc_index--;
    1.63      builddomain->ctxt.pt_base = l2tab;
    1.64  
    1.65 +    if ( (mmu = init_mmu_updates(xc_handle, dom)) == NULL )
    1.66 +        goto error_out;
    1.67 +
    1.68      /* Initialise the page tables. */
    1.69      if ( (vl2tab = map_pfn_writeable(pm_handle, l2tab >> PAGE_SHIFT)) == NULL )
    1.70          goto error_out;
    1.71 @@ -236,11 +214,10 @@ static int setup_guestos(int xc_handle,
    1.72              *vl1e &= ~_PAGE_RW;
    1.73          vl1e++;
    1.74  
    1.75 -        pgt_updates->ptr = 
    1.76 -            (page_array[count] << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
    1.77 -        pgt_updates->val = count;
    1.78 -        pgt_updates++;
    1.79 -        num_pgt_updates++;
    1.80 +        if ( add_mmu_update(xc_handle, mmu,
    1.81 +                            (page_array[count] << PAGE_SHIFT) | 
    1.82 +                            MMU_MACHPHYS_UPDATE, count) )
    1.83 +            goto error_out;
    1.84      }
    1.85      unmap_pfn(pm_handle, vl1tab);
    1.86      unmap_pfn(pm_handle, vl2tab);
    1.87 @@ -249,10 +226,9 @@ static int setup_guestos(int xc_handle,
    1.88       * Pin down l2tab addr as page dir page - causes hypervisor to provide
    1.89       * correct protection for the page
    1.90       */ 
    1.91 -    pgt_updates->ptr = l2tab | MMU_EXTENDED_COMMAND;
    1.92 -    pgt_updates->val = MMUEXT_PIN_L2_TABLE;
    1.93 -    pgt_updates++;
    1.94 -    num_pgt_updates++;
    1.95 +    if ( add_mmu_update(xc_handle, mmu,
    1.96 +                        l2tab | MMU_EXTENDED_COMMAND, MMUEXT_PIN_L2_TABLE) )
    1.97 +        goto error_out;
    1.98  
    1.99      *virt_startinfo_addr =
   1.100          virt_load_addr + ((alloc_index-1) << PAGE_SHIFT);
   1.101 @@ -267,7 +243,6 @@ static int setup_guestos(int xc_handle,
   1.102      start_info->flags       = 0;
   1.103      strncpy(start_info->cmd_line, cmdline, MAX_CMD_LEN);
   1.104      start_info->cmd_line[MAX_CMD_LEN-1] = '\0';
   1.105 -
   1.106      unmap_pfn(pm_handle, start_info);
   1.107  
   1.108      /* shared_info page starts its life empty. */
   1.109 @@ -276,20 +251,21 @@ static int setup_guestos(int xc_handle,
   1.110      unmap_pfn(pm_handle, shared_info);
   1.111  
   1.112      /* Send the page update requests down to the hypervisor. */
   1.113 -    if ( send_pgupdates(xc_handle, pgt_update_arr, num_pgt_updates) < 0 )
   1.114 +    if ( finish_mmu_updates(xc_handle, mmu) )
   1.115          goto error_out;
   1.116  
   1.117 +    free(mmu);
   1.118 +    (void)close_pfn_mapper(pm_handle);
   1.119      free(page_array);
   1.120 -    free(pgt_update_arr);
   1.121      return 0;
   1.122  
   1.123   error_out:
   1.124 +    if ( mmu != NULL )
   1.125 +        free(mmu);
   1.126      if ( pm_handle >= 0 )
   1.127          (void)close_pfn_mapper(pm_handle);
   1.128 -    if ( page_array )
   1.129 +    if ( page_array != NULL )
   1.130          free(page_array);
   1.131 -    if ( pgt_update_arr )
   1.132 -        free(pgt_update_arr);
   1.133      return -1;
   1.134  }
   1.135  
     2.1 --- a/tools/xc/lib/xc_linux_restore.c	Thu Feb 26 20:21:31 2004 +0000
     2.2 +++ b/tools/xc/lib/xc_linux_restore.c	Fri Feb 27 15:19:27 2004 +0000
     2.3 @@ -43,56 +43,6 @@ static int get_pfn_list(int xc_handle,
     2.4      return (ret < 0) ? -1 : op.u.getmemlist.num_pfns;
     2.5  }
     2.6  
     2.7 -#define MAX_MMU_UPDATES 1024
     2.8 -
     2.9 -static int flush_mmu_updates(int xc_handle,
    2.10 -                             mmu_update_t *mmu_updates,
    2.11 -                             int *mmu_update_idx)
    2.12 -{
    2.13 -    int err = 0;
    2.14 -    privcmd_hypercall_t hypercall;
    2.15 -
    2.16 -    if ( *mmu_update_idx == 0 )
    2.17 -        return 0;
    2.18 -
    2.19 -    hypercall.op     = __HYPERVISOR_mmu_update;
    2.20 -    hypercall.arg[0] = (unsigned long)mmu_updates;
    2.21 -    hypercall.arg[1] = (unsigned long)*mmu_update_idx;
    2.22 -
    2.23 -    if ( mlock(mmu_updates, sizeof(mmu_updates)) != 0 )
    2.24 -    {
    2.25 -        PERROR("Could not lock pagetable update array");
    2.26 -        err = 1;
    2.27 -        goto out;
    2.28 -    }
    2.29 -
    2.30 -    if ( do_xen_hypercall(xc_handle, &hypercall) < 0 )
    2.31 -    {
    2.32 -        ERROR("Failure when submitting mmu updates");
    2.33 -        err = 1;
    2.34 -    }
    2.35 -
    2.36 -    *mmu_update_idx = 0;
    2.37 -    
    2.38 -    (void)munlock(mmu_updates, sizeof(mmu_updates));
    2.39 -
    2.40 - out:
    2.41 -    return err;
    2.42 -}
    2.43 -
    2.44 -static int add_mmu_update(int xc_handle,
    2.45 -                          mmu_update_t *mmu_updates,
    2.46 -                          int *mmu_update_idx,
    2.47 -                          unsigned long ptr, 
    2.48 -                          unsigned long val)
    2.49 -{
    2.50 -    mmu_updates[*mmu_update_idx].ptr = ptr;
    2.51 -    mmu_updates[*mmu_update_idx].val = val;
    2.52 -    if ( ++*mmu_update_idx == MAX_MMU_UPDATES )
    2.53 -        return flush_mmu_updates(xc_handle, mmu_updates, mmu_update_idx);
    2.54 -    return 0;
    2.55 -}
    2.56 -
    2.57  static int checked_read(gzFile fd, void *buf, size_t count)
    2.58  {
    2.59      int rc;
    2.60 @@ -147,8 +97,7 @@ int xc_linux_restore(int xc_handle,
    2.61      int    fd;
    2.62      gzFile gfd;
    2.63  
    2.64 -    mmu_update_t mmu_updates[MAX_MMU_UPDATES];
    2.65 -    int mmu_update_idx = 0;
    2.66 +    mmu_t *mmu = NULL;
    2.67  
    2.68      int pm_handle = -1;
    2.69  
    2.70 @@ -252,6 +201,12 @@ int xc_linux_restore(int xc_handle,
    2.71          goto out;
    2.72      }
    2.73  
    2.74 +    if ( (mmu = init_mmu_updates(xc_handle, dom)) == NULL )
    2.75 +    {
    2.76 +        ERROR("Could not initialise for MMU updates");
    2.77 +        goto out;
    2.78 +    }
    2.79 +
    2.80      verbose_printf("Reloading memory pages:   0%%");
    2.81  
    2.82      /*
    2.83 @@ -323,7 +278,7 @@ int xc_linux_restore(int xc_handle,
    2.84  
    2.85          unmap_pfn(pm_handle, ppage);
    2.86  
    2.87 -        if ( add_mmu_update(xc_handle, mmu_updates, &mmu_update_idx,
    2.88 +        if ( add_mmu_update(xc_handle, mmu,
    2.89                              (mfn<<PAGE_SHIFT) | MMU_MACHPHYS_UPDATE, i) )
    2.90              goto out;
    2.91      }
    2.92 @@ -336,7 +291,7 @@ int xc_linux_restore(int xc_handle,
    2.93      {
    2.94          if ( pfn_type[i] == L1TAB )
    2.95          {
    2.96 -            if ( add_mmu_update(xc_handle, mmu_updates, &mmu_update_idx,
    2.97 +            if ( add_mmu_update(xc_handle, mmu,
    2.98                                  (pfn_to_mfn_table[i]<<PAGE_SHIFT) | 
    2.99                                  MMU_EXTENDED_COMMAND,
   2.100                                  MMUEXT_PIN_L1_TABLE) )
   2.101 @@ -344,7 +299,7 @@ int xc_linux_restore(int xc_handle,
   2.102          }
   2.103          else if ( pfn_type[i] == L2TAB )
   2.104          {
   2.105 -            if ( add_mmu_update(xc_handle, mmu_updates, &mmu_update_idx,
   2.106 +            if ( add_mmu_update(xc_handle, mmu,
   2.107                                  (pfn_to_mfn_table[i]<<PAGE_SHIFT) | 
   2.108                                  MMU_EXTENDED_COMMAND,
   2.109                                  MMUEXT_PIN_L2_TABLE) )
   2.110 @@ -352,8 +307,7 @@ int xc_linux_restore(int xc_handle,
   2.111          }
   2.112      }
   2.113  
   2.114 -
   2.115 -    if ( flush_mmu_updates(xc_handle, mmu_updates, &mmu_update_idx) )
   2.116 +    if ( finish_mmu_updates(xc_handle, mmu) )
   2.117          goto out;
   2.118  
   2.119      verbose_printf("\b\b\b\b100%%\nMemory reloaded.\n");
   2.120 @@ -455,6 +409,9 @@ int xc_linux_restore(int xc_handle,
   2.121      rc = do_dom0_op(xc_handle, &op);
   2.122  
   2.123   out:
   2.124 +    if ( mmu != NULL )
   2.125 +        free(mmu);
   2.126 +
   2.127      if ( rc != 0 )
   2.128      {
   2.129          if ( dom != 0 )
     3.1 --- a/tools/xc/lib/xc_netbsd_build.c	Thu Feb 26 20:21:31 2004 +0000
     3.2 +++ b/tools/xc/lib/xc_netbsd_build.c	Fri Feb 27 15:19:27 2004 +0000
     3.3 @@ -53,27 +53,6 @@ static int get_pfn_list(int xc_handle,
     3.4      return (ret < 0) ? -1 : op.u.getmemlist.num_pfns;
     3.5  }
     3.6  
     3.7 -static int send_pgupdates(int xc_handle, mmu_update_t *updates, int nr_updates)
     3.8 -{
     3.9 -    int ret = -1;
    3.10 -    privcmd_hypercall_t hypercall;
    3.11 -
    3.12 -    hypercall.op     = __HYPERVISOR_mmu_update;
    3.13 -    hypercall.arg[0] = (unsigned long)updates;
    3.14 -    hypercall.arg[1] = (unsigned long)nr_updates;
    3.15 -
    3.16 -    if ( mlock(updates, nr_updates * sizeof(*updates)) != 0 )
    3.17 -        goto out1;
    3.18 -
    3.19 -    if ( do_xen_hypercall(xc_handle, &hypercall) < 0 )
    3.20 -        goto out2;
    3.21 -
    3.22 -    ret = 0;
    3.23 -
    3.24 - out2: (void)munlock(updates, nr_updates * sizeof(*updates));
    3.25 - out1: return ret;
    3.26 -}
    3.27 -
    3.28  static int setup_guestos(int xc_handle,
    3.29                           u64 dom, 
    3.30                           gzFile kernel_gfd, 
    3.31 @@ -87,16 +66,15 @@ static int setup_guestos(int xc_handle,
    3.32      l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
    3.33      l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
    3.34      unsigned long *page_array = NULL;
    3.35 -    mmu_update_t *pgt_update_arr = NULL, *pgt_updates = NULL;
    3.36      int alloc_index, num_pt_pages;
    3.37      unsigned long l2tab;
    3.38      unsigned long l1tab;
    3.39 -    unsigned long num_pgt_updates = 0;
    3.40      unsigned long count, pt_start;
    3.41      unsigned long symtab_addr = 0, symtab_len = 0;
    3.42      start_info_t *start_info;
    3.43      shared_info_t *shared_info;
    3.44      unsigned long ksize;
    3.45 +    mmu_t *mmu = NULL;
    3.46      int pm_handle;
    3.47  
    3.48      memset(builddomain, 0, sizeof(*builddomain));
    3.49 @@ -104,10 +82,7 @@ static int setup_guestos(int xc_handle,
    3.50      if ( (pm_handle = init_pfn_mapper()) < 0 )
    3.51          goto error_out;
    3.52  
    3.53 -    pgt_updates = malloc((tot_pages + 1) * sizeof(mmu_update_t));
    3.54 -    page_array = malloc(tot_pages * sizeof(unsigned long));
    3.55 -    pgt_update_arr = pgt_updates;
    3.56 -    if ( (pgt_update_arr == NULL) || (page_array == NULL) )
    3.57 +    if ( (page_array = malloc(tot_pages * sizeof(unsigned long))) == NULL )
    3.58      {
    3.59          PERROR("Could not allocate memory");
    3.60          goto error_out;
    3.61 @@ -144,7 +119,10 @@ static int setup_guestos(int xc_handle,
    3.62      l2tab = page_array[alloc_index] << PAGE_SHIFT;
    3.63      alloc_index--;
    3.64      builddomain->ctxt.pt_base = l2tab;
    3.65 -
    3.66 +    
    3.67 +    if ( (mmu = init_mmu_updates(xc_handle, dom)) == NULL )
    3.68 +        goto error_out;
    3.69 +    
    3.70      /* Initialise the page tables. */
    3.71      if ( (vl2tab = map_pfn_writeable(pm_handle, l2tab >> PAGE_SHIFT)) == NULL )
    3.72          goto error_out;
    3.73 @@ -171,11 +149,10 @@ static int setup_guestos(int xc_handle,
    3.74              *vl1e &= ~_PAGE_RW;
    3.75          vl1e++;
    3.76  
    3.77 -        pgt_updates->ptr = 
    3.78 -            (page_array[count] << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
    3.79 -        pgt_updates->val = count;
    3.80 -        pgt_updates++;
    3.81 -        num_pgt_updates++;
    3.82 +        if ( add_mmu_update(xc_handle, mmu,
    3.83 +                            (page_array[count] << PAGE_SHIFT) | 
    3.84 +                            MMU_MACHPHYS_UPDATE, count) )
    3.85 +            goto error_out;
    3.86      }
    3.87      unmap_pfn(pm_handle, vl1tab);
    3.88      unmap_pfn(pm_handle, vl2tab);
    3.89 @@ -184,10 +161,9 @@ static int setup_guestos(int xc_handle,
    3.90       * Pin down l2tab addr as page dir page - causes hypervisor to provide
    3.91       * correct protection for the page
    3.92       */ 
    3.93 -    pgt_updates->ptr = l2tab | MMU_EXTENDED_COMMAND;
    3.94 -    pgt_updates->val = MMUEXT_PIN_L2_TABLE;
    3.95 -    pgt_updates++;
    3.96 -    num_pgt_updates++;
    3.97 +    if ( add_mmu_update(xc_handle, mmu,
    3.98 +                        l2tab | MMU_EXTENDED_COMMAND, MMUEXT_PIN_L2_TABLE) )
    3.99 +        goto error_out;
   3.100  
   3.101      *virt_startinfo_addr =
   3.102          *virt_load_addr + ((alloc_index-1) << PAGE_SHIFT);
   3.103 @@ -202,7 +178,6 @@ static int setup_guestos(int xc_handle,
   3.104      start_info->flags       = 0;
   3.105      strncpy(start_info->cmd_line, cmdline, MAX_CMD_LEN);
   3.106      start_info->cmd_line[MAX_CMD_LEN-1] = '\0';
   3.107 -
   3.108      unmap_pfn(pm_handle, start_info);
   3.109  
   3.110      /* shared_info page starts its life empty. */
   3.111 @@ -211,20 +186,21 @@ static int setup_guestos(int xc_handle,
   3.112      unmap_pfn(pm_handle, shared_info);
   3.113  
   3.114      /* Send the page update requests down to the hypervisor. */
   3.115 -    if ( send_pgupdates(xc_handle, pgt_update_arr, num_pgt_updates) < 0 )
   3.116 +    if ( finish_mmu_updates(xc_handle, mmu) )
   3.117          goto error_out;
   3.118  
   3.119 +    free(mmu);
   3.120 +    (void)close_pfn_mapper(pm_handle);
   3.121      free(page_array);
   3.122 -    free(pgt_update_arr);
   3.123      return 0;
   3.124  
   3.125   error_out:
   3.126 +    if ( mmu != NULL )
   3.127 +        free(mmu);
   3.128      if ( pm_handle >= 0 )
   3.129          (void)close_pfn_mapper(pm_handle);
   3.130      if ( page_array == NULL )
   3.131          free(page_array);
   3.132 -    if ( pgt_update_arr == NULL )
   3.133 -        free(pgt_update_arr);
   3.134      return -1;
   3.135  }
   3.136  
     4.1 --- a/tools/xc/lib/xc_private.c	Thu Feb 26 20:21:31 2004 +0000
     4.2 +++ b/tools/xc/lib/xc_private.c	Fri Feb 27 15:19:27 2004 +0000
     4.3 @@ -38,3 +38,73 @@ void unmap_pfn(int pm_handle, void *vadd
     4.4  {
     4.5      (void)munmap(vaddr, PAGE_SIZE);
     4.6  }
     4.7 +
     4.8 +#define FIRST_MMU_UPDATE 2
     4.9 +
    4.10 +static int flush_mmu_updates(int xc_handle, mmu_t *mmu)
    4.11 +{
    4.12 +    int err = 0;
    4.13 +    privcmd_hypercall_t hypercall;
    4.14 +
    4.15 +    if ( mmu->idx == FIRST_MMU_UPDATE )
    4.16 +        return 0;
    4.17 +
    4.18 +    /* The first two requests set the correct subject domain. */
    4.19 +    mmu->updates[0].val  = (unsigned long)(mmu->subject<<16) & ~0xFFFFUL;
    4.20 +    mmu->updates[0].ptr  = (unsigned long)(mmu->subject<< 0) & ~0xFFFFUL;
    4.21 +    mmu->updates[1].val  = (unsigned long)(mmu->subject>>16) & ~0xFFFFUL;
    4.22 +    mmu->updates[1].ptr  = (unsigned long)(mmu->subject>>32) & ~0xFFFFUL;
    4.23 +    mmu->updates[0].ptr |= MMU_EXTENDED_COMMAND;
    4.24 +    mmu->updates[0].val |= MMUEXT_SET_SUBJECTDOM_L;
    4.25 +    mmu->updates[1].ptr |= MMU_EXTENDED_COMMAND;
    4.26 +    mmu->updates[1].val |= MMUEXT_SET_SUBJECTDOM_H;
    4.27 +
    4.28 +    hypercall.op     = __HYPERVISOR_mmu_update;
    4.29 +    hypercall.arg[0] = (unsigned long)mmu->updates;
    4.30 +    hypercall.arg[1] = (unsigned long)mmu->idx;
    4.31 +
    4.32 +    if ( mlock(mmu->updates, sizeof(mmu->updates)) != 0 )
    4.33 +    {
    4.34 +        PERROR("Could not lock pagetable update array");
    4.35 +        err = 1;
    4.36 +        goto out;
    4.37 +    }
    4.38 +
    4.39 +    if ( do_xen_hypercall(xc_handle, &hypercall) < 0 )
    4.40 +    {
    4.41 +        ERROR("Failure when submitting mmu updates");
    4.42 +        err = 1;
    4.43 +    }
    4.44 +
    4.45 +    mmu->idx = FIRST_MMU_UPDATE;
    4.46 +    
    4.47 +    (void)munlock(mmu->updates, sizeof(mmu->updates));
    4.48 +
    4.49 + out:
    4.50 +    return err;
    4.51 +}
    4.52 +
    4.53 +mmu_t *init_mmu_updates(int xc_handle, domid_t dom)
    4.54 +{
    4.55 +    mmu_t *mmu = malloc(sizeof(mmu_t));
    4.56 +    if ( mmu == NULL )
    4.57 +        return mmu;
    4.58 +    mmu->idx     = FIRST_MMU_UPDATE;
    4.59 +    mmu->subject = dom;
    4.60 +    return mmu;
    4.61 +}
    4.62 +
    4.63 +int add_mmu_update(int xc_handle, mmu_t *mmu, 
    4.64 +                   unsigned long ptr, unsigned long val)
    4.65 +{
    4.66 +    mmu->updates[mmu->idx].ptr = ptr;
    4.67 +    mmu->updates[mmu->idx].val = val;
    4.68 +    if ( ++mmu->idx == MAX_MMU_UPDATES )
    4.69 +        return flush_mmu_updates(xc_handle, mmu);
    4.70 +    return 0;
    4.71 +}
    4.72 +
    4.73 +int finish_mmu_updates(int xc_handle, mmu_t *mmu)
    4.74 +{
    4.75 +    return flush_mmu_updates(xc_handle, mmu);
    4.76 +}
     5.1 --- a/tools/xc/lib/xc_private.h	Thu Feb 26 20:21:31 2004 +0000
     5.2 +++ b/tools/xc/lib/xc_private.h	Fri Feb 27 15:19:27 2004 +0000
     5.3 @@ -153,4 +153,18 @@ void *map_pfn_writeable(int pm_handle, u
     5.4  void *map_pfn_readonly(int pm_handle, unsigned long pfn);
     5.5  void unmap_pfn(int pm_handle, void *vaddr);
     5.6  
     5.7 +/*
     5.8 + * MMU updates.
     5.9 + */
    5.10 +#define MAX_MMU_UPDATES 1024
    5.11 +typedef struct {
    5.12 +    mmu_update_t updates[MAX_MMU_UPDATES];
    5.13 +    int          idx;
    5.14 +    domid_t      subject;
    5.15 +} mmu_t;
    5.16 +mmu_t *init_mmu_updates(int xc_handle, domid_t dom);
    5.17 +int add_mmu_update(int xc_handle, mmu_t *mmu, 
    5.18 +                   unsigned long ptr, unsigned long val);
    5.19 +int finish_mmu_updates(int xc_handle, mmu_t *mmu);
    5.20 +
    5.21  #endif /* __XC_PRIVATE_H__ */
     6.1 --- a/xen/common/event_channel.c	Thu Feb 26 20:21:31 2004 +0000
     6.2 +++ b/xen/common/event_channel.c	Fri Feb 27 15:19:27 2004 +0000
     6.3 @@ -165,7 +165,7 @@ static long __event_channel_close(struct
     6.4      struct task_struct *p2 = NULL;
     6.5      event_channel_t    *chn1, *chn2;
     6.6      int                 port2;
     6.7 -    unsigned long       cpu_mask;
     6.8 +    unsigned long       cpu_mask = 0;
     6.9      long                rc = 0;
    6.10  
    6.11   again:
    6.12 @@ -214,19 +214,20 @@ static long __event_channel_close(struct
    6.13          if ( chn2[port2].remote_dom != p1 )
    6.14              BUG();
    6.15  
    6.16 -        chn2[port2].state       = ECS_ZOMBIE;
    6.17 +        chn2[port2].state       = ECS_DISCONNECTED;
    6.18          chn2[port2].remote_dom  = NULL;
    6.19          chn2[port2].remote_port = 0xFFFF;
    6.20  
    6.21 -        cpu_mask  = set_event_disc(p1, port1);
    6.22          cpu_mask |= set_event_disc(p2, port2);
    6.23 -        guest_event_notify(cpu_mask);
    6.24      }
    6.25  
    6.26      chn1[port1].state       = ECS_FREE;
    6.27      chn1[port1].remote_dom  = NULL;
    6.28      chn1[port1].remote_port = 0xFFFF;
    6.29      
    6.30 +    cpu_mask |= set_event_disc(p1, port1);
    6.31 +    guest_event_notify(cpu_mask);
    6.32 +
    6.33   out:
    6.34      spin_unlock(&p1->event_channel_lock);
    6.35      put_task_struct(p1);
    6.36 @@ -324,7 +325,7 @@ static long event_channel_status(evtchn_
    6.37      case ECS_FREE:
    6.38          status->status = EVTCHNSTAT_closed;
    6.39          break;
    6.40 -    case ECS_ZOMBIE:
    6.41 +    case ECS_DISCONNECTED:
    6.42          status->status = EVTCHNSTAT_disconnected;
    6.43          break;
    6.44      case ECS_CONNECTED:
     7.1 --- a/xen/common/memory.c	Thu Feb 26 20:21:31 2004 +0000
     7.2 +++ b/xen/common/memory.c	Fri Feb 27 15:19:27 2004 +0000
     7.3 @@ -1,7 +1,7 @@
     7.4  /******************************************************************************
     7.5   * memory.c
     7.6   * 
     7.7 - * Copyright (c) 2002 K A Fraser
     7.8 + * Copyright (c) 2002-2004 K A Fraser
     7.9   * 
    7.10   * This program is free software; you can redistribute it and/or modify
    7.11   * it under the terms of the GNU General Public License as published by
    7.12 @@ -149,9 +149,12 @@
    7.13  
    7.14  static int alloc_l2_table(struct pfn_info *page);
    7.15  static int alloc_l1_table(struct pfn_info *page);
    7.16 -static int get_page_from_pagenr(unsigned long page_nr);
    7.17 +static int get_page_from_pagenr(unsigned long page_nr, int check_level);
    7.18  static int get_page_and_type_from_pagenr(unsigned long page_nr, 
    7.19 -                                         unsigned int type);
    7.20 +                                         unsigned int type,
    7.21 +                                         int check_level);
    7.22 +#define CHECK_STRICT 0 /* Subject domain must own the page                  */
    7.23 +#define CHECK_ANYDOM 1 /* Any domain may own the page (if subject is priv.) */
    7.24  
    7.25  static void free_l2_table(struct pfn_info *page);
    7.26  static void free_l1_table(struct pfn_info *page);
    7.27 @@ -172,9 +175,11 @@ unsigned int free_pfns;
    7.28  static struct {
    7.29  #define DOP_FLUSH_TLB   (1<<0) /* Flush the TLB.                 */
    7.30  #define DOP_RELOAD_LDT  (1<<1) /* Reload the LDT shadow mapping. */
    7.31 -    unsigned long flags;
    7.32 -    unsigned long cr0;
    7.33 -} deferred_op[NR_CPUS] __cacheline_aligned;
    7.34 +    unsigned long       deferred_ops;
    7.35 +    unsigned long       cr0;
    7.36 +    domid_t             subject_id;
    7.37 +    struct task_struct *subject_p;
    7.38 +} percpu_info[NR_CPUS] __cacheline_aligned;
    7.39  
    7.40  /*
    7.41   * init_frametable:
    7.42 @@ -187,7 +192,7 @@ void __init init_frametable(unsigned lon
    7.43      unsigned long page_index;
    7.44      unsigned long flags;
    7.45  
    7.46 -    memset(deferred_op, 0, sizeof(deferred_op));
    7.47 +    memset(percpu_info, 0, sizeof(percpu_info));
    7.48  
    7.49      max_page = nr_pages;
    7.50      frame_table_size = nr_pages * sizeof(struct pfn_info);
    7.51 @@ -232,7 +237,7 @@ static void __invalidate_shadow_ldt(stru
    7.52      }
    7.53  
    7.54      /* Dispose of the (now possibly invalid) mappings from the TLB.  */
    7.55 -    deferred_op[p->processor].flags |= DOP_FLUSH_TLB | DOP_RELOAD_LDT;
    7.56 +    percpu_info[p->processor].deferred_ops |= DOP_FLUSH_TLB | DOP_RELOAD_LDT;
    7.57  }
    7.58  
    7.59  
    7.60 @@ -286,39 +291,49 @@ int map_ldt_shadow_page(unsigned int off
    7.61  }
    7.62  
    7.63  
    7.64 -/* Domain 0 is allowed to build page tables on others' behalf. */
    7.65 -static inline int dom0_get_page(struct pfn_info *page)
    7.66 +static int get_page_from_pagenr(unsigned long page_nr, int check_level)
    7.67  {
    7.68 -    unsigned long x, nx, y = page->count_and_flags;
    7.69 +    struct task_struct *p = current;
    7.70 +    struct pfn_info *page = &frame_table[page_nr];
    7.71 +    unsigned long y, x, nx;
    7.72  
    7.73 -    do {
    7.74 -        x  = y;
    7.75 -        nx = x + 1;
    7.76 -        if ( unlikely((x & PGC_count_mask) == 0) ||
    7.77 -             unlikely((nx & PGC_count_mask) == 0) )
    7.78 -            return 0;
    7.79 -    }
    7.80 -    while ( unlikely((y = cmpxchg(&page->count_and_flags, x, nx)) != x) );
    7.81 -
    7.82 -    return 1;
    7.83 -}
    7.84 -
    7.85 -
    7.86 -static int get_page_from_pagenr(unsigned long page_nr)
    7.87 -{
    7.88 -    struct pfn_info *page = &frame_table[page_nr];
    7.89 -
    7.90 -    if ( unlikely(page_nr >= max_page) )
    7.91 +    if ( unlikely(!pfn_is_ram(page_nr)) )
    7.92      {
    7.93 -        MEM_LOG("Page out of range (%08lx>%08lx)", page_nr, max_page);
    7.94 +        MEM_LOG("Pfn %08lx is not RAM", page_nr);
    7.95          return 0;
    7.96      }
    7.97  
    7.98 -    if ( unlikely(!get_page(page, current)) &&
    7.99 -         unlikely((current->domain != 0) || !dom0_get_page(page)) )
   7.100 +    /* Find the correct subject domain. */
   7.101 +    if ( unlikely(percpu_info[p->processor].subject_p != NULL) )
   7.102 +        p = percpu_info[p->processor].subject_p;
   7.103 +
   7.104 +    /* Demote ANYDOM to STRICT if subject domain is not privileged. */
   7.105 +    if ( check_level == CHECK_ANYDOM && !IS_PRIV(p) )
   7.106 +        check_level = CHECK_STRICT;
   7.107 +
   7.108 +    switch ( check_level )
   7.109      {
   7.110 -        MEM_LOG("Could not get page reference for pfn %08lx\n", page_nr);
   7.111 -        return 0;
   7.112 +    case CHECK_STRICT:
   7.113 +        if ( unlikely(!get_page(page, p)) )
   7.114 +        {
   7.115 +            MEM_LOG("Could not get page ref for pfn %08lx\n", page_nr);
   7.116 +            return 0;
   7.117 +        }
   7.118 +        break;
   7.119 +    case CHECK_ANYDOM:
   7.120 +        y = page->count_and_flags;
   7.121 +        do {
   7.122 +            x  = y;
   7.123 +            nx = x + 1;
   7.124 +            if ( unlikely((x & PGC_count_mask) == 0) ||
   7.125 +                 unlikely((nx & PGC_count_mask) == 0) )
   7.126 +            {
   7.127 +                MEM_LOG("Could not get page ref for pfn %08lx\n", page_nr);
   7.128 +                return 0;
   7.129 +            }
   7.130 +        }
   7.131 +        while ( unlikely((y = cmpxchg(&page->count_and_flags, x, nx)) != x) );
   7.132 +        break;
   7.133      }
   7.134  
   7.135      return 1;
   7.136 @@ -326,11 +341,12 @@ static int get_page_from_pagenr(unsigned
   7.137  
   7.138  
   7.139  static int get_page_and_type_from_pagenr(unsigned long page_nr, 
   7.140 -                                         unsigned int type)
   7.141 +                                         unsigned int type,
   7.142 +                                         int check_level)
   7.143  {
   7.144      struct pfn_info *page = &frame_table[page_nr];
   7.145  
   7.146 -    if ( unlikely(!get_page_from_pagenr(page_nr)) )
   7.147 +    if ( unlikely(!get_page_from_pagenr(page_nr, check_level)) )
   7.148          return 0;
   7.149  
   7.150      if ( unlikely(!get_page_type(page, type)) )
   7.151 @@ -371,7 +387,8 @@ static int get_linear_pagetable(l2_pgent
   7.152      if ( (l2_pgentry_val(l2e) >> PAGE_SHIFT) != pfn )
   7.153      {
   7.154          /* Make sure the mapped frame belongs to the correct domain. */
   7.155 -        if ( unlikely(!get_page_from_pagenr(l2_pgentry_to_pagenr(l2e))) )
   7.156 +        if ( unlikely(!get_page_from_pagenr(l2_pgentry_to_pagenr(l2e), 
   7.157 +                                            CHECK_STRICT)) )
   7.158              return 0;
   7.159  
   7.160          /*
   7.161 @@ -399,33 +416,45 @@ static int get_linear_pagetable(l2_pgent
   7.162  
   7.163  static int get_page_from_l1e(l1_pgentry_t l1e)
   7.164  {
   7.165 -    ASSERT(l1_pgentry_val(l1e) & _PAGE_PRESENT);
   7.166 +    unsigned long l1v = l1_pgentry_val(l1e);
   7.167 +    unsigned long pfn = l1_pgentry_to_pagenr(l1e);
   7.168  
   7.169 -    if ( unlikely((l1_pgentry_val(l1e) & (_PAGE_GLOBAL|_PAGE_PAT))) )
   7.170 +    if ( !(l1v & _PAGE_PRESENT) )
   7.171 +        return 1;
   7.172 +
   7.173 +    if ( unlikely(l1v & (_PAGE_GLOBAL|_PAGE_PAT)) )
   7.174      {
   7.175 -        MEM_LOG("Bad L1 page type settings %04lx",
   7.176 -                l1_pgentry_val(l1e) & (_PAGE_GLOBAL|_PAGE_PAT));
   7.177 +        MEM_LOG("Bad L1 type settings %04lx", l1v & (_PAGE_GLOBAL|_PAGE_PAT));
   7.178          return 0;
   7.179      }
   7.180  
   7.181 -    if ( l1_pgentry_val(l1e) & _PAGE_RW )
   7.182 +    if ( unlikely(!pfn_is_ram(pfn)) )
   7.183 +    {
   7.184 +        if ( IS_PRIV(current) )
   7.185 +            return 1;
   7.186 +        MEM_LOG("Non-privileged attempt to map I/O space %08lx", pfn);
   7.187 +        return 0;
   7.188 +    }
   7.189 +
   7.190 +    if ( l1v & _PAGE_RW )
   7.191      {
   7.192          if ( unlikely(!get_page_and_type_from_pagenr(
   7.193 -            l1_pgentry_to_pagenr(l1e), PGT_writeable_page)) )
   7.194 +            pfn, PGT_writeable_page, CHECK_ANYDOM)) )
   7.195              return 0;
   7.196          set_bit(_PGC_tlb_flush_on_type_change, 
   7.197 -                &frame_table[l1_pgentry_to_pagenr(l1e)].count_and_flags);
   7.198 +                &frame_table[pfn].count_and_flags);
   7.199          return 1;
   7.200      }
   7.201  
   7.202 -    return get_page_from_pagenr(l1_pgentry_to_pagenr(l1e));
   7.203 +    return get_page_from_pagenr(pfn, CHECK_ANYDOM);
   7.204  }
   7.205  
   7.206  
   7.207  /* NB. Virtual address 'l2e' maps to a machine address within frame 'pfn'. */
   7.208  static int get_page_from_l2e(l2_pgentry_t l2e, unsigned long pfn)
   7.209  {
   7.210 -    ASSERT(l2_pgentry_val(l2e) & _PAGE_PRESENT);
   7.211 +    if ( !(l2_pgentry_val(l2e) & _PAGE_PRESENT) )
   7.212 +        return 1;
   7.213  
   7.214      if ( unlikely((l2_pgentry_val(l2e) & (_PAGE_GLOBAL|_PAGE_PSE))) )
   7.215      {
   7.216 @@ -435,7 +464,7 @@ static int get_page_from_l2e(l2_pgentry_
   7.217      }
   7.218  
   7.219      if ( unlikely(!get_page_and_type_from_pagenr(
   7.220 -        l2_pgentry_to_pagenr(l2e), PGT_l1_page_table)) )
   7.221 +        l2_pgentry_to_pagenr(l2e), PGT_l1_page_table, CHECK_STRICT)) )
   7.222          return get_linear_pagetable(l2e, pfn);
   7.223  
   7.224      return 1;
   7.225 @@ -445,10 +474,12 @@ static int get_page_from_l2e(l2_pgentry_
   7.226  static void put_page_from_l1e(l1_pgentry_t l1e)
   7.227  {
   7.228      struct pfn_info *page = &frame_table[l1_pgentry_to_pagenr(l1e)];
   7.229 +    unsigned long    l1v  = l1_pgentry_val(l1e);
   7.230  
   7.231 -    ASSERT(l1_pgentry_val(l1e) & _PAGE_PRESENT);
   7.232 +    if ( !(l1v & _PAGE_PRESENT) || !pfn_is_ram(l1v >> PAGE_SHIFT) )
   7.233 +        return;
   7.234  
   7.235 -    if ( l1_pgentry_val(l1e) & _PAGE_RW )
   7.236 +    if ( l1v & _PAGE_RW )
   7.237      {
   7.238          put_page_and_type(page);
   7.239      }
   7.240 @@ -470,8 +501,6 @@ static void put_page_from_l1e(l1_pgentry
   7.241   */
   7.242  static void put_page_from_l2e(l2_pgentry_t l2e, unsigned long pfn)
   7.243  {
   7.244 -    ASSERT(l2_pgentry_val(l2e) & _PAGE_PRESENT);
   7.245 -
   7.246      if ( (l2_pgentry_val(l2e) & _PAGE_PRESENT) && 
   7.247           ((l2_pgentry_val(l2e) >> PAGE_SHIFT) != pfn) )
   7.248          put_page_and_type(&frame_table[l2_pgentry_to_pagenr(l2e)]);
   7.249 @@ -481,21 +510,14 @@ static void put_page_from_l2e(l2_pgentry
   7.250  static int alloc_l2_table(struct pfn_info *page)
   7.251  {
   7.252      unsigned long page_nr = page - frame_table;
   7.253 -    l2_pgentry_t *pl2e, l2e;
   7.254 +    l2_pgentry_t *pl2e;
   7.255      int i;
   7.256     
   7.257      pl2e = map_domain_mem(page_nr << PAGE_SHIFT);
   7.258  
   7.259      for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
   7.260 -    {
   7.261 -        l2e = pl2e[i];
   7.262 -
   7.263 -        if ( !(l2_pgentry_val(l2e) & _PAGE_PRESENT) ) 
   7.264 -            continue;
   7.265 -
   7.266 -        if ( unlikely(!get_page_from_l2e(l2e, page_nr)) )
   7.267 +        if ( unlikely(!get_page_from_l2e(pl2e[i], page_nr)) )
   7.268              goto fail;
   7.269 -    }
   7.270      
   7.271      /* Now we add our private high mappings. */
   7.272      memcpy(&pl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 
   7.273 @@ -512,11 +534,7 @@ static int alloc_l2_table(struct pfn_inf
   7.274  
   7.275   fail:
   7.276      while ( i-- > 0 )
   7.277 -    {
   7.278 -        l2e = pl2e[i];
   7.279 -        if ( l2_pgentry_val(l2e) & _PAGE_PRESENT )
   7.280 -            put_page_from_l2e(l2e, page_nr);
   7.281 -    }
   7.282 +        put_page_from_l2e(pl2e[i], page_nr);
   7.283  
   7.284      unmap_domain_mem(pl2e);
   7.285      return 0;
   7.286 @@ -526,34 +544,21 @@ static int alloc_l2_table(struct pfn_inf
   7.287  static int alloc_l1_table(struct pfn_info *page)
   7.288  {
   7.289      unsigned long page_nr = page - frame_table;
   7.290 -    l1_pgentry_t *pl1e, l1e;
   7.291 +    l1_pgentry_t *pl1e;
   7.292      int i;
   7.293  
   7.294      pl1e = map_domain_mem(page_nr << PAGE_SHIFT);
   7.295  
   7.296      for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
   7.297 -    {
   7.298 -        l1e = pl1e[i];
   7.299 -
   7.300 -        if ( !(l1_pgentry_val(l1e) & _PAGE_PRESENT) ) 
   7.301 -            continue;
   7.302 +        if ( unlikely(!get_page_from_l1e(pl1e[i])) )
   7.303 +            goto fail;
   7.304  
   7.305 -        if ( unlikely(!get_page_from_l1e(l1e)) )
   7.306 -            goto fail;
   7.307 -    }
   7.308 -
   7.309 -    /* Make sure we unmap the right page! */
   7.310      unmap_domain_mem(pl1e);
   7.311      return 1;
   7.312  
   7.313   fail:
   7.314      while ( i-- > 0 )
   7.315 -    {
   7.316 -        l1e = pl1e[i];
   7.317 -        if ( !(l1_pgentry_val(l1e) & _PAGE_PRESENT) )
   7.318 -            continue;
   7.319 -        put_page_from_l1e(l1e);
   7.320 -    }
   7.321 +        put_page_from_l1e(pl1e[i]);
   7.322  
   7.323      unmap_domain_mem(pl1e);
   7.324      return 0;
   7.325 @@ -563,18 +568,13 @@ static int alloc_l1_table(struct pfn_inf
   7.326  static void free_l2_table(struct pfn_info *page)
   7.327  {
   7.328      unsigned long page_nr = page - frame_table;
   7.329 -    l2_pgentry_t *pl2e, l2e;
   7.330 +    l2_pgentry_t *pl2e;
   7.331      int i;
   7.332  
   7.333      pl2e = map_domain_mem(page_nr << PAGE_SHIFT);
   7.334  
   7.335      for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
   7.336 -    {
   7.337 -        l2e = pl2e[i];
   7.338 -        if ( (l2_pgentry_val(l2e) & _PAGE_PRESENT) &&
   7.339 -             unlikely((l2_pgentry_val(l2e) >> PAGE_SHIFT) != page_nr) )
   7.340 -            put_page_and_type(&frame_table[l2_pgentry_to_pagenr(l2e)]);
   7.341 -    }
   7.342 +        put_page_from_l2e(pl2e[i], page_nr);
   7.343  
   7.344      unmap_domain_mem(pl2e);
   7.345  }
   7.346 @@ -583,18 +583,13 @@ static void free_l2_table(struct pfn_inf
   7.347  static void free_l1_table(struct pfn_info *page)
   7.348  {
   7.349      unsigned long page_nr = page - frame_table;
   7.350 -    l1_pgentry_t *pl1e, l1e;
   7.351 +    l1_pgentry_t *pl1e;
   7.352      int i;
   7.353  
   7.354      pl1e = map_domain_mem(page_nr << PAGE_SHIFT);
   7.355  
   7.356      for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
   7.357 -    {
   7.358 -        l1e = pl1e[i];
   7.359 -        if ( !(l1_pgentry_val(l1e) & _PAGE_PRESENT) ) 
   7.360 -            continue;
   7.361 -        put_page_from_l1e(l1e);
   7.362 -    }
   7.363 +        put_page_from_l1e(pl1e[i]);
   7.364  
   7.365      unmap_domain_mem(pl1e);
   7.366  }
   7.367 @@ -648,18 +643,14 @@ static int mod_l2_entry(l2_pgentry_t *pl
   7.368              return 0;
   7.369          }
   7.370          
   7.371 -        if ( l2_pgentry_val(ol2e) & _PAGE_PRESENT )
   7.372 -            put_page_from_l2e(ol2e, pfn);
   7.373 -        
   7.374 +        put_page_from_l2e(ol2e, pfn);
   7.375          return 1;
   7.376      }
   7.377  
   7.378      if ( unlikely(!update_l2e(pl2e, ol2e, nl2e)) )
   7.379          return 0;
   7.380  
   7.381 -    if ( l2_pgentry_val(ol2e) & _PAGE_PRESENT )
   7.382 -        put_page_from_l2e(ol2e, pfn);
   7.383 -
   7.384 +    put_page_from_l2e(ol2e, pfn);
   7.385      return 1;
   7.386  }
   7.387  
   7.388 @@ -712,18 +703,14 @@ static int mod_l1_entry(l1_pgentry_t *pl
   7.389              return 0;
   7.390          }
   7.391          
   7.392 -        if ( l1_pgentry_val(ol1e) & _PAGE_PRESENT )
   7.393 -            put_page_from_l1e(ol1e);
   7.394 -        
   7.395 +        put_page_from_l1e(ol1e);
   7.396          return 1;
   7.397      }
   7.398  
   7.399      if ( unlikely(!update_l1e(pl1e, ol1e, nl1e)) )
   7.400          return 0;
   7.401      
   7.402 -    if ( l1_pgentry_val(ol1e) & _PAGE_PRESENT )
   7.403 -        put_page_from_l1e(ol1e);
   7.404 -
   7.405 +    put_page_from_l1e(ol1e);
   7.406      return 1;
   7.407  }
   7.408  
   7.409 @@ -790,24 +777,18 @@ static int do_extended_command(unsigned 
   7.410      unsigned long pfn = ptr >> PAGE_SHIFT;
   7.411      struct pfn_info *page = &frame_table[pfn];
   7.412  
   7.413 -    /* 'ptr' must be in range except where it isn't a machine address. */
   7.414 -    if ( (pfn >= max_page) && (cmd != MMUEXT_SET_LDT) )
   7.415 -    {
   7.416 -        MEM_LOG("Ptr out of range for extended MMU command");
   7.417 -        return 1;
   7.418 -    }
   7.419 -
   7.420      switch ( cmd )
   7.421      {
   7.422      case MMUEXT_PIN_L1_TABLE:
   7.423      case MMUEXT_PIN_L2_TABLE:
   7.424 -        okay = get_page_and_type_from_pagenr(pfn, 
   7.425 -                                             (cmd == MMUEXT_PIN_L2_TABLE) ? 
   7.426 -                                             PGT_l2_page_table : 
   7.427 -                                             PGT_l1_page_table);
   7.428 +        okay = get_page_and_type_from_pagenr(
   7.429 +            pfn, (cmd == MMUEXT_PIN_L2_TABLE) ? PGT_l2_page_table : 
   7.430 +                                                PGT_l1_page_table,
   7.431 +            CHECK_STRICT);
   7.432          if ( unlikely(!okay) )
   7.433          {
   7.434              MEM_LOG("Error while pinning pfn %08lx", pfn);
   7.435 +            put_page(page);
   7.436              break;
   7.437          }
   7.438  
   7.439 @@ -823,7 +804,7 @@ static int do_extended_command(unsigned 
   7.440          break;
   7.441  
   7.442      case MMUEXT_UNPIN_TABLE:
   7.443 -        if ( unlikely(!(okay = get_page_from_pagenr(pfn))) )
   7.444 +        if ( unlikely(!(okay = get_page_from_pagenr(pfn, CHECK_STRICT))) )
   7.445          {
   7.446              MEM_LOG("Page %08lx bad domain (dom=%p)",
   7.447                      ptr, page->u.domain);
   7.448 @@ -843,14 +824,15 @@ static int do_extended_command(unsigned 
   7.449          break;
   7.450  
   7.451      case MMUEXT_NEW_BASEPTR:
   7.452 -        okay = get_page_and_type_from_pagenr(pfn, PGT_l2_page_table);
   7.453 +        okay = get_page_and_type_from_pagenr(pfn, PGT_l2_page_table, 
   7.454 +                                             CHECK_STRICT);
   7.455          if ( likely(okay) )
   7.456          {
   7.457              put_page_and_type(&frame_table[pagetable_val(current->mm.pagetable)
   7.458                                            >> PAGE_SHIFT]);
   7.459              current->mm.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
   7.460              invalidate_shadow_ldt();
   7.461 -            deferred_op[cpu].flags |= DOP_FLUSH_TLB;
   7.462 +            percpu_info[cpu].deferred_ops |= DOP_FLUSH_TLB;
   7.463          }
   7.464          else
   7.465          {
   7.466 @@ -859,7 +841,7 @@ static int do_extended_command(unsigned 
   7.467          break;
   7.468          
   7.469      case MMUEXT_TLB_FLUSH:
   7.470 -        deferred_op[cpu].flags |= DOP_FLUSH_TLB;
   7.471 +        percpu_info[cpu].deferred_ops |= DOP_FLUSH_TLB;
   7.472          break;
   7.473      
   7.474      case MMUEXT_INVLPG:
   7.475 @@ -884,13 +866,39 @@ static int do_extended_command(unsigned 
   7.476              current->mm.ldt_base = ptr;
   7.477              current->mm.ldt_ents = ents;
   7.478              load_LDT(current);
   7.479 -            deferred_op[cpu].flags &= ~DOP_RELOAD_LDT;
   7.480 +            percpu_info[cpu].deferred_ops &= ~DOP_RELOAD_LDT;
   7.481              if ( ents != 0 )
   7.482 -                deferred_op[cpu].flags |= DOP_RELOAD_LDT;
   7.483 +                percpu_info[cpu].deferred_ops |= DOP_RELOAD_LDT;
   7.484          }
   7.485          break;
   7.486      }
   7.487  
   7.488 +    case MMUEXT_SET_SUBJECTDOM_L:
   7.489 +        percpu_info[cpu].subject_id = (domid_t)((ptr&~0xFFFF)|(val>>16));
   7.490 +        break;
   7.491 +
   7.492 +    case MMUEXT_SET_SUBJECTDOM_H:
   7.493 +        percpu_info[cpu].subject_id |= (domid_t)((ptr&~0xFFFF)|(val>>16))<<32;
   7.494 +        if ( !IS_PRIV(current) )
   7.495 +        {
   7.496 +            MEM_LOG("Dom %llu has no privilege to set subject domain",
   7.497 +                    current->domain);
   7.498 +            okay = 0;
   7.499 +        }
   7.500 +        else
   7.501 +        {
   7.502 +            if ( percpu_info[cpu].subject_p != NULL )
   7.503 +                put_task_struct(percpu_info[cpu].subject_p);
   7.504 +            percpu_info[cpu].subject_p = find_domain_by_id(
   7.505 +                percpu_info[cpu].subject_id);
   7.506 +            if ( percpu_info[cpu].subject_p == NULL )
   7.507 +            {
   7.508 +                MEM_LOG("Unknown domain '%llu'", percpu_info[cpu].subject_id);
   7.509 +                okay = 0;
   7.510 +            }
   7.511 +        }
   7.512 +        break;
   7.513 +
   7.514      default:
   7.515          MEM_LOG("Invalid extended pt command 0x%08lx", val & MMUEXT_CMD_MASK);
   7.516          okay = 0;
   7.517 @@ -904,7 +912,7 @@ static int do_extended_command(unsigned 
   7.518  int do_mmu_update(mmu_update_t *ureqs, int count)
   7.519  {
   7.520      mmu_update_t req;
   7.521 -    unsigned long va = 0, flags, pfn, prev_pfn = 0;
   7.522 +    unsigned long va = 0, deferred_ops, pfn, prev_pfn = 0;
   7.523      struct pfn_info *page;
   7.524      int rc = 0, okay = 1, i, cpu = smp_processor_id();
   7.525      unsigned int cmd;
   7.526 @@ -932,16 +940,7 @@ int do_mmu_update(mmu_update_t *ureqs, i
   7.527               * MMU_NORMAL_PT_UPDATE: Normal update to any level of page table.
   7.528               */
   7.529          case MMU_NORMAL_PT_UPDATE:
   7.530 -            page = &frame_table[pfn];
   7.531 -
   7.532 -            if ( unlikely(pfn >= max_page) )
   7.533 -            {
   7.534 -                MEM_LOG("Page out of range (%08lx > %08lx)", pfn, max_page);
   7.535 -                break;
   7.536 -            }
   7.537 -
   7.538 -            if ( unlikely(!get_page(page, current)) &&
   7.539 -                 ((current->domain != 0) || !dom0_get_page(page)) )
   7.540 +            if ( unlikely(!get_page_from_pagenr(pfn, CHECK_STRICT)) )
   7.541              {
   7.542                  MEM_LOG("Could not get page for normal update");
   7.543                  break;
   7.544 @@ -959,6 +958,7 @@ int do_mmu_update(mmu_update_t *ureqs, i
   7.545                  prev_pfn = pfn;
   7.546              }
   7.547  
   7.548 +            page = &frame_table[pfn];
   7.549              switch ( (page->type_and_flags & PGT_type_mask) )
   7.550              {
   7.551              case PGT_l1_page_table: 
   7.552 @@ -992,43 +992,16 @@ int do_mmu_update(mmu_update_t *ureqs, i
   7.553  
   7.554              break;
   7.555  
   7.556 -        case MMU_UNCHECKED_PT_UPDATE:
   7.557 -            req.ptr &= ~(sizeof(l1_pgentry_t) - 1);
   7.558 -            if ( likely(IS_PRIV(current)) )
   7.559 +        case MMU_MACHPHYS_UPDATE:
   7.560 +            if ( unlikely(!get_page_from_pagenr(pfn, CHECK_STRICT)) )
   7.561              {
   7.562 -                if ( likely(prev_pfn == pfn) )
   7.563 -                {
   7.564 -                    va = (va & PAGE_MASK) | (req.ptr & ~PAGE_MASK);
   7.565 -                }
   7.566 -                else
   7.567 -                {
   7.568 -                    if ( prev_pfn != 0 )
   7.569 -                        unmap_domain_mem((void *)va);
   7.570 -                    va = (unsigned long)map_domain_mem(req.ptr);
   7.571 -                    prev_pfn = pfn;
   7.572 -                }
   7.573 -                *(unsigned long *)va = req.val;
   7.574 -                okay = 1;
   7.575 +                MEM_LOG("Could not get page for mach->phys update");
   7.576 +                break;
   7.577              }
   7.578 -            else
   7.579 -            {
   7.580 -                MEM_LOG("Bad unchecked update attempt");
   7.581 -            }
   7.582 -            break;
   7.583 -            
   7.584 -        case MMU_MACHPHYS_UPDATE:
   7.585 -            page = &frame_table[pfn];
   7.586 -            if ( unlikely(pfn >= max_page) )
   7.587 -            {
   7.588 -                MEM_LOG("Page out of range (%08lx > %08lx)", pfn, max_page);
   7.589 -            }
   7.590 -            else if ( likely(get_page(page, current)) ||
   7.591 -                      ((current->domain == 0) && dom0_get_page(page)) )
   7.592 -            {
   7.593 -                machine_to_phys_mapping[pfn] = req.val;
   7.594 -                okay = 1;
   7.595 -                put_page(page);
   7.596 -            }
   7.597 +
   7.598 +            machine_to_phys_mapping[pfn] = req.val;
   7.599 +            okay = 1;
   7.600 +            put_page(&frame_table[pfn]);
   7.601              break;
   7.602  
   7.603              /*
   7.604 @@ -1057,27 +1030,33 @@ int do_mmu_update(mmu_update_t *ureqs, i
   7.605      if ( prev_pfn != 0 )
   7.606          unmap_domain_mem((void *)va);
   7.607  
   7.608 -    flags = deferred_op[cpu].flags;
   7.609 -    deferred_op[cpu].flags = 0;
   7.610 +    deferred_ops = percpu_info[cpu].deferred_ops;
   7.611 +    percpu_info[cpu].deferred_ops = 0;
   7.612  
   7.613 -    if ( flags & DOP_FLUSH_TLB )
   7.614 +    if ( deferred_ops & DOP_FLUSH_TLB )
   7.615          write_cr3_counted(pagetable_val(current->mm.pagetable));
   7.616  
   7.617 -    if ( flags & DOP_RELOAD_LDT )
   7.618 +    if ( deferred_ops & DOP_RELOAD_LDT )
   7.619          (void)map_ldt_shadow_page(0);
   7.620  
   7.621 +    if ( unlikely(percpu_info[cpu].subject_p != NULL) )
   7.622 +    {
   7.623 +        put_task_struct(percpu_info[cpu].subject_p);
   7.624 +        percpu_info[cpu].subject_p = NULL;
   7.625 +    }
   7.626 +
   7.627      return rc;
   7.628  }
   7.629  
   7.630  
   7.631  int do_update_va_mapping(unsigned long page_nr, 
   7.632                           unsigned long val, 
   7.633 -                         unsigned long caller_flags)
   7.634 +                         unsigned long flags)
   7.635  {
   7.636      struct task_struct *p = current;
   7.637      int err = 0;
   7.638      unsigned int cpu = p->processor;
   7.639 -    unsigned long defer_flags;
   7.640 +    unsigned long deferred_ops;
   7.641  
   7.642      if ( unlikely(page_nr >= (HYPERVISOR_VIRT_START >> PAGE_SHIFT)) )
   7.643          return -EINVAL;
   7.644 @@ -1086,16 +1065,16 @@ int do_update_va_mapping(unsigned long p
   7.645                                  mk_l1_pgentry(val))) )
   7.646          err = -EINVAL;
   7.647  
   7.648 -    defer_flags = deferred_op[cpu].flags;
   7.649 -    deferred_op[cpu].flags = 0;
   7.650 +    deferred_ops = percpu_info[cpu].deferred_ops;
   7.651 +    percpu_info[cpu].deferred_ops = 0;
   7.652  
   7.653 -    if ( unlikely(defer_flags & DOP_FLUSH_TLB) || 
   7.654 -         unlikely(caller_flags & UVMF_FLUSH_TLB) )
   7.655 +    if ( unlikely(deferred_ops & DOP_FLUSH_TLB) || 
   7.656 +         unlikely(flags & UVMF_FLUSH_TLB) )
   7.657          write_cr3_counted(pagetable_val(p->mm.pagetable));
   7.658 -    else if ( unlikely(caller_flags & UVMF_INVLPG) )
   7.659 +    else if ( unlikely(flags & UVMF_INVLPG) )
   7.660          __flush_tlb_one(page_nr << PAGE_SHIFT);
   7.661  
   7.662 -    if ( unlikely(defer_flags & DOP_RELOAD_LDT) )
   7.663 +    if ( unlikely(deferred_ops & DOP_RELOAD_LDT) )
   7.664          (void)map_ldt_shadow_page(0);
   7.665      
   7.666      return err;
   7.667 @@ -1215,7 +1194,7 @@ void reaudit_pages(u_char key, void *dev
   7.668   * - check for pages with corrupt ref-count
   7.669   * Interrupts are diabled completely. use with care.
   7.670   */
   7.671 -void audit_all_pages (u_char key, void *dev_id, struct pt_regs *regs)
   7.672 +void audit_all_pages(u_char key, void *dev_id, struct pt_regs *regs)
   7.673  {
   7.674      unsigned long     i, j, k;
   7.675      unsigned long     ref_count;
   7.676 @@ -1228,7 +1207,6 @@ void audit_all_pages (u_char key, void *
   7.677      /* walk the frame table */
   7.678      for ( i = 0; i < max_page; i++ )
   7.679      {
   7.680 -
   7.681          /* check for zombies */
   7.682          if ( ((frame_table[i].count_and_flags & PGC_count_mask) != 0) &&
   7.683               ((frame_table[i].count_and_flags & PGC_zombie) != 0) )
     8.1 --- a/xen/include/asm-i386/page.h	Thu Feb 26 20:21:31 2004 +0000
     8.2 +++ b/xen/include/asm-i386/page.h	Fri Feb 27 15:19:27 2004 +0000
     8.3 @@ -1,14 +1,10 @@
     8.4  #ifndef _I386_PAGE_H
     8.5  #define _I386_PAGE_H
     8.6  
     8.7 -
     8.8 -#ifndef __ASSEMBLY__
     8.9  #define BUG() do {					\
    8.10  	printk("BUG at %s:%d\n", __FILE__, __LINE__);	\
    8.11  	__asm__ __volatile__("ud2");			\
    8.12  } while (0)
    8.13 -#endif /* __ASSEMBLY__ */
    8.14 -
    8.15  
    8.16  #define L1_PAGETABLE_SHIFT       12
    8.17  #define L2_PAGETABLE_SHIFT       22
    8.18 @@ -75,6 +71,13 @@ typedef struct { unsigned long pt_lo; } 
    8.19  #define virt_to_page(kaddr)	(frame_table + (__pa(kaddr) >> PAGE_SHIFT))
    8.20  #define VALID_PAGE(page)	((page - frame_table) < max_mapnr)
    8.21  
    8.22 +/*
    8.23 + * NB. We don't currently track I/O holes in the physical RAM space.
    8.24 + * For now we guess that I/O devices will be mapped in the first 1MB
    8.25 + * (e.g., VGA buffers) or beyond the end of physical RAM.
    8.26 + */
    8.27 +#define pfn_is_ram(_pfn)        (((_pfn) > 0x100) && ((_pfn) < max_page))
    8.28 +
    8.29  /* High table entries are reserved by the hypervisor. */
    8.30  #define DOMAIN_ENTRIES_PER_L2_PAGETABLE	    \
    8.31    (HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT)
     9.1 --- a/xen/include/hypervisor-ifs/hypervisor-if.h	Thu Feb 26 20:21:31 2004 +0000
     9.2 +++ b/xen/include/hypervisor-ifs/hypervisor-if.h	Fri Feb 27 15:19:27 2004 +0000
     9.3 @@ -129,23 +129,24 @@
     9.4   *  which shifts the least bits out.
     9.5   */
     9.6  /* A normal page-table update request. */
     9.7 -#define MMU_NORMAL_PT_UPDATE     0 /* checked '*ptr = val'. ptr is MA.      */
     9.8 -/* DOM0 can make entirely unchecked updates which do not affect refcnts. */
     9.9 -#define MMU_UNCHECKED_PT_UPDATE  1 /* unchecked '*ptr = val'. ptr is MA.    */
    9.10 +#define MMU_NORMAL_PT_UPDATE     0 /* checked '*ptr = val'. ptr is MA.       */
    9.11  /* Update an entry in the machine->physical mapping table. */
    9.12 -#define MMU_MACHPHYS_UPDATE      2 /* ptr = MA of frame to modify entry for */
    9.13 +#define MMU_MACHPHYS_UPDATE      2 /* ptr = MA of frame to modify entry for  */
    9.14  /* An extended command. */
    9.15 -#define MMU_EXTENDED_COMMAND     3 /* least 8 bits of val demux further     */
    9.16 +#define MMU_EXTENDED_COMMAND     3 /* least 8 bits of val demux further      */
    9.17  /* Extended commands: */
    9.18 -#define MMUEXT_PIN_L1_TABLE      0 /* ptr = MA of frame to pin              */
    9.19 -#define MMUEXT_PIN_L2_TABLE      1 /* ptr = MA of frame to pin              */
    9.20 -#define MMUEXT_PIN_L3_TABLE      2 /* ptr = MA of frame to pin              */
    9.21 -#define MMUEXT_PIN_L4_TABLE      3 /* ptr = MA of frame to pin              */
    9.22 -#define MMUEXT_UNPIN_TABLE       4 /* ptr = MA of frame to unpin            */
    9.23 -#define MMUEXT_NEW_BASEPTR       5 /* ptr = MA of new pagetable base        */
    9.24 -#define MMUEXT_TLB_FLUSH         6 /* ptr = NULL                            */
    9.25 -#define MMUEXT_INVLPG            7 /* ptr = NULL ; val = VA to invalidate   */
    9.26 -#define MMUEXT_SET_LDT           8 /* ptr = VA of table; val = # entries    */
    9.27 +#define MMUEXT_PIN_L1_TABLE      0 /* ptr = MA of frame to pin               */
    9.28 +#define MMUEXT_PIN_L2_TABLE      1 /* ptr = MA of frame to pin               */
    9.29 +#define MMUEXT_PIN_L3_TABLE      2 /* ptr = MA of frame to pin               */
    9.30 +#define MMUEXT_PIN_L4_TABLE      3 /* ptr = MA of frame to pin               */
    9.31 +#define MMUEXT_UNPIN_TABLE       4 /* ptr = MA of frame to unpin             */
    9.32 +#define MMUEXT_NEW_BASEPTR       5 /* ptr = MA of new pagetable base         */
    9.33 +#define MMUEXT_TLB_FLUSH         6 /* ptr = NULL                             */
    9.34 +#define MMUEXT_INVLPG            7 /* ptr = NULL ; val = VA to invalidate    */
    9.35 +#define MMUEXT_SET_LDT           8 /* ptr = VA of table; val = # entries     */
    9.36 +/* NB. MMUEXT_SET_SUBJECTDOM must consist of *_L followed immediately by *_H */
    9.37 +#define MMUEXT_SET_SUBJECTDOM_L  9 /* (ptr[31:15],val[31:15]) = dom[31:0]    */
    9.38 +#define MMUEXT_SET_SUBJECTDOM_H 10 /* (ptr[31:15],val[31:15]) = dom[63:32]   */
    9.39  #define MMUEXT_CMD_MASK        255
    9.40  #define MMUEXT_CMD_SHIFT         8
    9.41  
    10.1 --- a/xen/include/xeno/sched.h	Thu Feb 26 20:21:31 2004 +0000
    10.2 +++ b/xen/include/xeno/sched.h	Fri Feb 27 15:19:27 2004 +0000
    10.3 @@ -51,9 +51,9 @@ typedef struct event_channel_st
    10.4  {
    10.5      struct task_struct *remote_dom;
    10.6      u16                 remote_port;
    10.7 -#define ECS_FREE      0 /* Available for use.                            */
    10.8 -#define ECS_ZOMBIE    1 /* Connection is closed. Remote is disconnected. */
    10.9 -#define ECS_CONNECTED 2 /* Connected to remote end.                      */
   10.10 +#define ECS_FREE         0 /* Available for use.                            */
   10.11 +#define ECS_DISCONNECTED 1 /* Connection is closed. Remote is disconnected. */
   10.12 +#define ECS_CONNECTED    2 /* Connected to remote end.                      */
   10.13      u16                 state;
   10.14  } event_channel_t;
   10.15  
    11.1 --- a/xenolinux-2.4.25-sparse/arch/xeno/mm/init.c	Thu Feb 26 20:21:31 2004 +0000
    11.2 +++ b/xenolinux-2.4.25-sparse/arch/xeno/mm/init.c	Fri Feb 27 15:19:27 2004 +0000
    11.3 @@ -113,10 +113,7 @@ static inline void set_pte_phys (unsigne
    11.4      }
    11.5      pte = pte_offset(pmd, vaddr);
    11.6  
    11.7 -    if ( pte_io(*pte) || (pgprot_val(prot) & _PAGE_IO) )
    11.8 -        queue_unchecked_mmu_update(pte, phys | pgprot_val(prot));
    11.9 -    else
   11.10 -        queue_l1_entry_update(pte, phys | pgprot_val(prot));
   11.11 +    queue_l1_entry_update(pte, phys | pgprot_val(prot));
   11.12  
   11.13      /*
   11.14       * It's enough to flush this one mapping.
    12.1 --- a/xenolinux-2.4.25-sparse/arch/xeno/mm/ioremap.c	Thu Feb 26 20:21:31 2004 +0000
    12.2 +++ b/xenolinux-2.4.25-sparse/arch/xeno/mm/ioremap.c	Fri Feb 27 15:19:27 2004 +0000
    12.3 @@ -20,14 +20,13 @@
    12.4  
    12.5  #if defined(CONFIG_XENO_PRIV)
    12.6  
    12.7 -#define direct_set_pte(_p, _v) queue_unchecked_mmu_update((_p), (_v).pte_low)
    12.8 +/* These hacky macros avoid phys->machine translations. */
    12.9  #define __direct_pte(x) ((pte_t) { (x) } )
   12.10  #define __direct_mk_pte(page_nr,pgprot) \
   12.11    __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
   12.12  #define direct_mk_pte_phys(physpage, pgprot) \
   12.13    __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
   12.14  
   12.15 -
   12.16  static inline void direct_remap_area_pte(pte_t *pte, 
   12.17                                           unsigned long address, 
   12.18                                           unsigned long size,
   12.19 @@ -47,7 +46,7 @@ static inline void direct_remap_area_pte
   12.20              printk("direct_remap_area_pte: page already exists\n");
   12.21              BUG();
   12.22          }
   12.23 -        direct_set_pte(pte, pte_mkio(direct_mk_pte_phys(machine_addr, prot))); 
   12.24 +        set_pte(pte, pte_mkio(direct_mk_pte_phys(machine_addr, prot))); 
   12.25          address += PAGE_SIZE;
   12.26          machine_addr += PAGE_SIZE;
   12.27          pte++;
    13.1 --- a/xenolinux-2.4.25-sparse/include/asm-xeno/hypervisor.h	Thu Feb 26 20:21:31 2004 +0000
    13.2 +++ b/xenolinux-2.4.25-sparse/include/asm-xeno/hypervisor.h	Fri Feb 27 15:19:27 2004 +0000
    13.3 @@ -49,9 +49,6 @@ void queue_pte_unpin(unsigned long ptr);
    13.4  void queue_set_ldt(unsigned long ptr, unsigned long bytes);
    13.5  #define MMU_UPDATE_DEBUG 0
    13.6  
    13.7 -#define queue_unchecked_mmu_update(_p,_v) queue_l1_entry_update( \
    13.8 -  (pte_t *)((unsigned long)(_p)|MMU_UNCHECKED_PT_UPDATE),(_v))
    13.9 -
   13.10  #if MMU_UPDATE_DEBUG > 0
   13.11  typedef struct {
   13.12      void *ptr;
    14.1 --- a/xenolinux-2.4.25-sparse/mm/memory.c	Thu Feb 26 20:21:31 2004 +0000
    14.2 +++ b/xenolinux-2.4.25-sparse/mm/memory.c	Fri Feb 27 15:19:27 2004 +0000
    14.3 @@ -320,7 +320,7 @@ static inline int zap_pte_range(mmu_gath
    14.4  			struct page *page = pte_page(pte);
    14.5  #if defined(CONFIG_XENO_PRIV)
    14.6  			if (pte_io(pte)) {
    14.7 -				queue_unchecked_mmu_update(ptep, 0);
    14.8 +				queue_l1_entry_update(ptep, 0);
    14.9  				continue;
   14.10  			}
   14.11  #endif