ia64/xen-unstable

changeset 798:101f79f13c44

bitkeeper revision 1.487 (3f82e75amEiS5NO9rXfHZdcC7zEzJw)

Many files:
Modify Xen pt update interface to take VA of PTEs to be modified.
author kaf24@scramble.cl.cam.ac.uk
date Tue Oct 07 16:18:34 2003 +0000 (2003-10-07)
parents 8f90e566a816
children 3d5ab61162dc
files tools/internal/xi_build.c xen/common/memory.c xen/include/asm-i386/page.h xen/include/hypervisor-ifs/hypervisor-if.h xenolinux-2.4.22-sparse/arch/xeno/drivers/balloon/balloon.c xenolinux-2.4.22-sparse/arch/xeno/mm/hypervisor.c xenolinux-2.4.22-sparse/arch/xeno/mm/init.c xenolinux-2.4.22-sparse/arch/xeno/mm/ioremap.c xenolinux-2.4.22-sparse/include/asm-xeno/hypervisor.h xenolinux-2.4.22-sparse/include/asm-xeno/page.h xenolinux-2.4.22-sparse/include/asm-xeno/pgtable-2level.h xenolinux-2.4.22-sparse/include/asm-xeno/pgtable.h xenolinux-2.4.22-sparse/mm/memory.c
line diff
     1.1 --- a/tools/internal/xi_build.c	Tue Oct 07 12:33:03 2003 +0000
     1.2 +++ b/tools/internal/xi_build.c	Tue Oct 07 16:18:34 2003 +0000
     1.3 @@ -130,16 +130,6 @@ static void unmap_pfn(void *vaddr)
     1.4      (void)munmap(vaddr, PAGE_SIZE);
     1.5  }
     1.6  
     1.7 -static int clear_domain_page(unsigned long pfn)
     1.8 -{
     1.9 -    void *vaddr = map_pfn(pfn);
    1.10 -    if ( vaddr == NULL )
    1.11 -        return -1;
    1.12 -    memset(vaddr, 0, PAGE_SIZE);
    1.13 -    unmap_pfn(vaddr);
    1.14 -    return 0;
    1.15 -}
    1.16 -
    1.17  static int copy_to_domain_page(unsigned long dst_pfn, void *src_page)
    1.18  {
    1.19      void *vaddr = map_pfn(dst_pfn);
    1.20 @@ -154,6 +144,8 @@ static int setup_guestos(
    1.21      int dom, int kernel_fd, int initrd_fd, unsigned long tot_pages,
    1.22      unsigned long virt_load_addr, size_t ksize, dom_meminfo_t *meminfo)
    1.23  {
    1.24 +    l1_pgentry_t *vl1tab = NULL, *vl1e = NULL;
    1.25 +    l2_pgentry_t *vl2tab = NULL, *vl2e = NULL;
    1.26      unsigned long *page_array = NULL;
    1.27      page_update_request_t *pgt_update_arr = NULL, *pgt_updates = NULL;
    1.28      int alloc_index, num_pt_pages;
    1.29 @@ -252,8 +244,6 @@ static int setup_guestos(
    1.30       * of the allocated physical address space.
    1.31       */
    1.32      l2tab = page_array[alloc_index] << PAGE_SHIFT;
    1.33 -    if ( clear_domain_page(page_array[alloc_index]) < 0 )
    1.34 -        goto error_out;
    1.35      alloc_index--;
    1.36      meminfo->l2_pgt_addr = l2tab;
    1.37      meminfo->virt_shinfo_addr = virt_load_addr + (tot_pages << PAGE_SHIFT);
    1.38 @@ -272,25 +262,29 @@ static int setup_guestos(
    1.39       * PTE -- we break out before filling in the entry, as that is done by
    1.40       * Xen during final setup.
    1.41       */
    1.42 -    l2tab += l2_table_offset(virt_load_addr) * sizeof(l2_pgentry_t);
    1.43 +    if ( (vl2tab = map_pfn(l2tab >> PAGE_SHIFT)) == NULL )
    1.44 +        goto error_out;
    1.45 +    memset(vl2tab, 0, PAGE_SIZE);
    1.46 +    vl2e = vl2tab + l2_table_offset(virt_load_addr);
    1.47      for ( count = 0; count < (tot_pages + 1); count++ )
    1.48      {    
    1.49 -        if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) ) 
    1.50 +        if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 ) 
    1.51          {
    1.52              l1tab = page_array[alloc_index] << PAGE_SHIFT;
    1.53 -            if ( clear_domain_page(page_array[alloc_index]) < 0 )
    1.54 +            if ( (vl1tab = map_pfn(l1tab >> PAGE_SHIFT)) == NULL )
    1.55                  goto error_out;
    1.56 +            memset(vl1tab, 0, PAGE_SIZE);
    1.57              alloc_index--;
    1.58  			
    1.59 -            l1tab += l1_table_offset(virt_load_addr + (count << PAGE_SHIFT)) 
    1.60 -                * sizeof(l1_pgentry_t);
    1.61 +            vl1e = vl1tab + l1_table_offset(virt_load_addr + 
    1.62 +                                            (count << PAGE_SHIFT));
    1.63  
    1.64              /* make apropriate entry in the page directory */
    1.65 -            pgt_updates->ptr = l2tab;
    1.66 +            pgt_updates->ptr = (unsigned long)vl2e;
    1.67              pgt_updates->val = l1tab | L2_PROT;
    1.68              pgt_updates++;
    1.69              num_pgt_updates++;
    1.70 -            l2tab += sizeof(l2_pgentry_t);
    1.71 +            vl2e++;
    1.72          }
    1.73  
    1.74          /* The last PTE we consider is filled in later by Xen. */
    1.75 @@ -298,20 +292,20 @@ static int setup_guestos(
    1.76  		
    1.77          if ( count < pt_start )
    1.78          {
    1.79 -            pgt_updates->ptr = l1tab;
    1.80 +            pgt_updates->ptr = (unsigned long)vl1e;
    1.81              pgt_updates->val = (page_array[count] << PAGE_SHIFT) | L1_PROT;
    1.82              pgt_updates++;
    1.83              num_pgt_updates++;
    1.84 -            l1tab += sizeof(l1_pgentry_t);
    1.85 +            vl1e++;
    1.86          }
    1.87          else
    1.88          {
    1.89 -            pgt_updates->ptr = l1tab;
    1.90 +            pgt_updates->ptr = (unsigned long)vl1e;
    1.91              pgt_updates->val = 
    1.92  		((page_array[count] << PAGE_SHIFT) | L1_PROT) & ~_PAGE_RW;
    1.93              pgt_updates++;
    1.94              num_pgt_updates++;
    1.95 -            l1tab += sizeof(l1_pgentry_t);
    1.96 +            vl1e++;
    1.97          }
    1.98  
    1.99          pgt_updates->ptr = 
     2.1 --- a/xen/common/memory.c	Tue Oct 07 12:33:03 2003 +0000
     2.2 +++ b/xen/common/memory.c	Tue Oct 07 16:18:34 2003 +0000
     2.3 @@ -159,8 +159,8 @@ static void put_l1_table(unsigned long p
     2.4  static void put_page(unsigned long page_nr, int writeable);
     2.5  static int dec_page_refcnt(unsigned long page_nr, unsigned int type);
     2.6  
     2.7 -static int mod_l2_entry(unsigned long, l2_pgentry_t);
     2.8 -static int mod_l1_entry(unsigned long, l1_pgentry_t);
     2.9 +static int mod_l2_entry(l2_pgentry_t *, l2_pgentry_t);
    2.10 +static int mod_l1_entry(l1_pgentry_t *, l1_pgentry_t);
    2.11  
    2.12  /* frame table size and its size in pages */
    2.13  frame_table_t * frame_table;
    2.14 @@ -524,12 +524,9 @@ static void put_page(unsigned long page_
    2.15  }
    2.16  
    2.17  
    2.18 -static int mod_l2_entry(unsigned long pa, l2_pgentry_t new_l2_entry)
    2.19 +static int mod_l2_entry(l2_pgentry_t *p_l2_entry, l2_pgentry_t new_l2_entry)
    2.20  {
    2.21 -    l2_pgentry_t *p_l2_entry, old_l2_entry;
    2.22 -
    2.23 -    p_l2_entry = map_domain_mem(pa);
    2.24 -    old_l2_entry = *p_l2_entry;
    2.25 +    l2_pgentry_t old_l2_entry = *p_l2_entry;
    2.26  
    2.27      if ( (((unsigned long)p_l2_entry & (PAGE_SIZE-1)) >> 2) >=
    2.28           DOMAIN_ENTRIES_PER_L2_PAGETABLE )
    2.29 @@ -558,9 +555,14 @@ static int mod_l2_entry(unsigned long pa
    2.30              }
    2.31              
    2.32              /* Assume we're mapping an L1 table, falling back to twisted L2. */
    2.33 -            if ( get_l1_table(l2_pgentry_to_pagenr(new_l2_entry)) &&
    2.34 -                 get_twisted_l2_table(pa >> PAGE_SHIFT, new_l2_entry) )
    2.35 -                goto fail;
    2.36 +            if ( unlikely(get_l1_table(l2_pgentry_to_pagenr(new_l2_entry))) )
    2.37 +            {
    2.38 +                /* NB. No need to sanity-check the VA: done already. */
    2.39 +                unsigned long l1e = l1_pgentry_val(
    2.40 +                    linear_pg_table[(unsigned long)p_l2_entry >> PAGE_SHIFT]);
    2.41 +                if ( get_twisted_l2_table(l1e >> PAGE_SHIFT, new_l2_entry) )
    2.42 +                    goto fail;
    2.43 +            }
    2.44          } 
    2.45      }
    2.46      else if ( (l2_pgentry_val(old_l2_entry) & _PAGE_PRESENT) )
    2.47 @@ -569,21 +571,16 @@ static int mod_l2_entry(unsigned long pa
    2.48      }
    2.49      
    2.50      *p_l2_entry = new_l2_entry;
    2.51 -    unmap_domain_mem(p_l2_entry);
    2.52      return 0;
    2.53  
    2.54   fail:
    2.55 -    unmap_domain_mem(p_l2_entry);
    2.56      return -1;
    2.57  }
    2.58  
    2.59  
    2.60 -static int mod_l1_entry(unsigned long pa, l1_pgentry_t new_l1_entry)
    2.61 +static int mod_l1_entry(l1_pgentry_t *p_l1_entry, l1_pgentry_t new_l1_entry)
    2.62  {
    2.63 -    l1_pgentry_t *p_l1_entry, old_l1_entry;
    2.64 -
    2.65 -    p_l1_entry = map_domain_mem(pa);
    2.66 -    old_l1_entry = *p_l1_entry;
    2.67 +    l1_pgentry_t old_l1_entry = *p_l1_entry;
    2.68  
    2.69      if ( (l1_pgentry_val(new_l1_entry) & _PAGE_PRESENT) )
    2.70      {
    2.71 @@ -622,11 +619,9 @@ static int mod_l1_entry(unsigned long pa
    2.72      }
    2.73  
    2.74      *p_l1_entry = new_l1_entry;
    2.75 -    unmap_domain_mem(p_l1_entry);
    2.76      return 0;
    2.77  
    2.78   fail:
    2.79 -    unmap_domain_mem(p_l1_entry);
    2.80      return -1;
    2.81  }
    2.82  
    2.83 @@ -754,10 +749,14 @@ static int do_extended_command(unsigned 
    2.84  int do_process_page_updates(page_update_request_t *ureqs, int count)
    2.85  {
    2.86      page_update_request_t req;
    2.87 -    unsigned long flags, pfn, *ptr;
    2.88 +    unsigned long flags, pfn, l1e;
    2.89      struct pfn_info *page;
    2.90      int err = 0, i;
    2.91      unsigned int cmd;
    2.92 +    unsigned long cr0 = read_cr0();
    2.93 +
    2.94 +    /* Clear the WP bit so that we can write even read-only page mappings. */
    2.95 +    write_cr0(cr0 & ~X86_CR0_WP);
    2.96  
    2.97      for ( i = 0; i < count; i++ )
    2.98      {
    2.99 @@ -767,26 +766,43 @@ int do_process_page_updates(page_update_
   2.100          } 
   2.101  
   2.102          cmd = req.ptr & (sizeof(l1_pgentry_t)-1);
   2.103 -
   2.104 -        /* All normal commands must have 'ptr' in range. */
   2.105          pfn = req.ptr >> PAGE_SHIFT;
   2.106 -        if ( (pfn >= max_page) && (cmd != PGREQ_EXTENDED_COMMAND) )
   2.107 -        {
   2.108 -            MEM_LOG("Page out of range (%08lx > %08lx)", pfn, max_page);
   2.109 -            kill_domain_with_errmsg("Page update request out of range");
   2.110 -        }
   2.111  
   2.112          err = 1;
   2.113  
   2.114 +        spin_lock_irq(&current->page_lock);
   2.115 +
   2.116 +        /* Get the page-frame number that a non-extended command references. */
   2.117 +        if ( likely(cmd != PGREQ_EXTENDED_COMMAND) )
   2.118 +        {
   2.119 +            if ( likely(cmd != PGREQ_MPT_UPDATE) )
   2.120 +            {
   2.121 +                /* Need to use 'get_user' since the VA's PGD may be absent. */
   2.122 +                __get_user(l1e, (unsigned long *)(linear_pg_table+pfn));
   2.123 +                /* Now check that the VA's PTE isn't absent. */
   2.124 +                if ( !(l1e & _PAGE_PRESENT) )
   2.125 +                {
   2.126 +                    MEM_LOG("L1E n.p. at VA %08lx (%08lx)", req.ptr&~3, l1e);
   2.127 +                    goto unlock;
   2.128 +                }
   2.129 +                /* Finally, get the underlying machine address. */
   2.130 +                pfn = l1e >> PAGE_SHIFT;
   2.131 +            }
   2.132 +            else if ( pfn >= max_page )
   2.133 +            {
   2.134 +                MEM_LOG("Page out of range (%08lx > %08lx)", pfn, max_page);
   2.135 +                goto unlock;
   2.136 +            }
   2.137 +        }
   2.138 +
   2.139          /* Least significant bits of 'ptr' demux the operation type. */
   2.140 -        spin_lock_irq(&current->page_lock);
   2.141          switch ( cmd )
   2.142          {
   2.143              /*
   2.144 -             * PGREQ_NORMAL: Normal update to any level of page table.
   2.145 +             * PGREQ_NORMAL_UPDATE: Normal update to any level of page table.
   2.146               */
   2.147 -        case PGREQ_NORMAL:
   2.148 -            page = frame_table + pfn;
   2.149 +        case PGREQ_NORMAL_UPDATE:
   2.150 +            page  = frame_table + pfn;
   2.151              flags = page->flags;
   2.152  
   2.153              if ( DOMAIN_OKAY(flags) )
   2.154 @@ -794,16 +810,16 @@ int do_process_page_updates(page_update_
   2.155                  switch ( (flags & PG_type_mask) )
   2.156                  {
   2.157                  case PGT_l1_page_table: 
   2.158 -                    err = mod_l1_entry(req.ptr, mk_l1_pgentry(req.val)); 
   2.159 +                    err = mod_l1_entry((l1_pgentry_t *)req.ptr, 
   2.160 +                                       mk_l1_pgentry(req.val)); 
   2.161                      break;
   2.162                  case PGT_l2_page_table: 
   2.163 -                    err = mod_l2_entry(req.ptr, mk_l2_pgentry(req.val)); 
   2.164 +                    err = mod_l2_entry((l2_pgentry_t *)req.ptr, 
   2.165 +                                       mk_l2_pgentry(req.val)); 
   2.166                      break;
   2.167                  default:
   2.168                      MEM_LOG("Update to non-pt page %08lx", req.ptr);
   2.169 -                    ptr = map_domain_mem(req.ptr);
   2.170 -                    *ptr = req.val;
   2.171 -                    unmap_domain_mem(ptr);
   2.172 +                    *(unsigned long *)req.ptr = req.val;
   2.173                      err = 0;
   2.174                      break;
   2.175                  }
   2.176 @@ -815,6 +831,19 @@ int do_process_page_updates(page_update_
   2.177              }
   2.178              break;
   2.179  
   2.180 +        case PGREQ_UNCHECKED_UPDATE:
   2.181 +            req.ptr &= ~(sizeof(l1_pgentry_t) - 1);
   2.182 +            if ( IS_PRIV(current) )
   2.183 +            {
   2.184 +                *(unsigned long *)req.ptr = req.val;
   2.185 +                err = 0;
   2.186 +            }
   2.187 +            else
   2.188 +            {
   2.189 +                MEM_LOG("Bad unchecked update attempt");
   2.190 +            }
   2.191 +            break;
   2.192 +            
   2.193          case PGREQ_MPT_UPDATE:
   2.194              page = frame_table + pfn;
   2.195              if ( DOMAIN_OKAY(page->flags) )
   2.196 @@ -838,31 +867,16 @@ int do_process_page_updates(page_update_
   2.197              err = do_extended_command(req.ptr, req.val);
   2.198              break;
   2.199  
   2.200 -        case PGREQ_UNCHECKED_UPDATE:
   2.201 -            req.ptr &= ~(sizeof(l1_pgentry_t) - 1);
   2.202 -            if ( current->domain == 0 )
   2.203 -            {
   2.204 -                ptr = map_domain_mem(req.ptr);
   2.205 -                *ptr = req.val;
   2.206 -                unmap_domain_mem(ptr);
   2.207 -                err = 0;
   2.208 -            }
   2.209 -            else
   2.210 -            {
   2.211 -                MEM_LOG("Bad unchecked update attempt");
   2.212 -            }
   2.213 -            break;
   2.214 -            
   2.215          default:
   2.216              MEM_LOG("Invalid page update command %08lx", req.ptr);
   2.217              break;
   2.218          }
   2.219 +
   2.220 +    unlock:
   2.221          spin_unlock_irq(&current->page_lock);
   2.222  
   2.223          if ( err )
   2.224 -        {
   2.225              kill_domain_with_errmsg("Illegal page update request");
   2.226 -        }
   2.227  
   2.228          ureqs++;
   2.229      }
   2.230 @@ -874,6 +888,9 @@ int do_process_page_updates(page_update_
   2.231  
   2.232      }
   2.233  
   2.234 +    /* Restore the WP bit before returning to guest. */
   2.235 +    write_cr0(cr0);
   2.236 +
   2.237      return 0;
   2.238  }
   2.239  
     3.1 --- a/xen/include/asm-i386/page.h	Tue Oct 07 12:33:03 2003 +0000
     3.2 +++ b/xen/include/asm-i386/page.h	Tue Oct 07 16:18:34 2003 +0000
     3.3 @@ -87,6 +87,8 @@ typedef struct { unsigned long pt_lo; } 
     3.4  #include <asm/bitops.h>
     3.5  #include <asm/flushtlb.h>
     3.6  
     3.7 +#define linear_pg_table ((l1_pgentry_t *)LINEAR_PT_VIRT_START)
     3.8 +
     3.9  extern l2_pgentry_t idle0_pg_table[ENTRIES_PER_L2_PAGETABLE];
    3.10  extern l2_pgentry_t *idle_pg_table[NR_CPUS];
    3.11  extern void paging_init(void);
     4.1 --- a/xen/include/hypervisor-ifs/hypervisor-if.h	Tue Oct 07 12:33:03 2003 +0000
     4.2 +++ b/xen/include/hypervisor-ifs/hypervisor-if.h	Tue Oct 07 16:18:34 2003 +0000
     4.3 @@ -114,33 +114,33 @@
     4.4   * 
     4.5   * PGREQ_XXX: specified in least 2 bits of 'ptr' field. These bits are masked
     4.6   *  off to get the real 'ptr' value.
     4.7 - * All requests specify relevent machine address in 'ptr'.
     4.8 + * All requests specify relevent address in 'ptr'. This is either a
     4.9 + * machine/physical address (PA), or linear/virtual address (VA).
    4.10   * Normal requests specify update value in 'value'.
    4.11   * Extended requests specify command in least 8 bits of 'value'. These bits
    4.12   *  are masked off to get the real 'val' value. Except for PGEXT_SET_LDT 
    4.13   *  which shifts the least bits out.
    4.14   */
    4.15  /* A normal page-table update request. */
    4.16 -#define PGREQ_NORMAL		0 /* does a checked form of '*ptr = val'   */
    4.17 +#define PGREQ_NORMAL_UPDATE     0 /* checked '*ptr = val'. ptr is VA.      */
    4.18 +/* DOM0 can make entirely unchecked updates which do not affect refcnts. */
    4.19 +#define PGREQ_UNCHECKED_UPDATE  1 /* unchecked '*ptr = val'. ptr is VA.    */
    4.20  /* Update an entry in the machine->physical mapping table. */
    4.21 -#define PGREQ_MPT_UPDATE	1 /* ptr = frame to modify table entry for */
    4.22 +#define PGREQ_MPT_UPDATE        2 /* ptr = PA of frame to modify entry for */
    4.23  /* An extended command. */
    4.24 -#define PGREQ_EXTENDED_COMMAND	2 /* least 8 bits of val demux further     */
    4.25 -/* DOM0 can make entirely unchecked updates which do not affect refcnts. */
    4.26 -#define PGREQ_UNCHECKED_UPDATE	3 /* does an unchecked '*ptr = val'        */
    4.27 +#define PGREQ_EXTENDED_COMMAND  3 /* least 8 bits of val demux further     */
    4.28  /* Extended commands: */
    4.29 -#define PGEXT_PIN_L1_TABLE	0 /* ptr = frame to pin                    */
    4.30 -#define PGEXT_PIN_L2_TABLE	1 /* ptr = frame to pin                    */
    4.31 -#define PGEXT_PIN_L3_TABLE	2 /* ptr = frame to pin                    */
    4.32 -#define PGEXT_PIN_L4_TABLE	3 /* ptr = frame to pin                    */
    4.33 -#define PGEXT_UNPIN_TABLE	4 /* ptr = frame to unpin                  */
    4.34 -#define PGEXT_NEW_BASEPTR	5 /* ptr = new pagetable base to install   */
    4.35 -#define PGEXT_TLB_FLUSH		6 /* ptr = NULL                            */
    4.36 -#define PGEXT_INVLPG		7 /* ptr = NULL ; val = page to invalidate */
    4.37 -#define PGEXT_SET_LDT           8 /* ptr = linear address; val = # entries */
    4.38 -#define PGEXT_CMD_MASK	      255
    4.39 -#define PGEXT_CMD_SHIFT		8
    4.40 -
    4.41 +#define PGEXT_PIN_L1_TABLE      0 /* ptr = PA of frame to pin              */
    4.42 +#define PGEXT_PIN_L2_TABLE      1 /* ptr = PA of frame to pin              */
    4.43 +#define PGEXT_PIN_L3_TABLE      2 /* ptr = PA of frame to pin              */
    4.44 +#define PGEXT_PIN_L4_TABLE      3 /* ptr = PA of frame to pin              */
    4.45 +#define PGEXT_UNPIN_TABLE       4 /* ptr = PA of frame to unpin            */
    4.46 +#define PGEXT_NEW_BASEPTR       5 /* ptr = PA of new pagetable base        */
    4.47 +#define PGEXT_TLB_FLUSH         6 /* ptr = NULL                            */
    4.48 +#define PGEXT_INVLPG            7 /* ptr = NULL ; val = page to invalidate */
    4.49 +#define PGEXT_SET_LDT           8 /* ptr = VA of table; val = # entries    */
    4.50 +#define PGEXT_CMD_MASK        255
    4.51 +#define PGEXT_CMD_SHIFT         8
    4.52  
    4.53  /*
    4.54   * Master "switch" for enabling/disabling event delivery.
     5.1 --- a/xenolinux-2.4.22-sparse/arch/xeno/drivers/balloon/balloon.c	Tue Oct 07 12:33:03 2003 +0000
     5.2 +++ b/xenolinux-2.4.22-sparse/arch/xeno/drivers/balloon/balloon.c	Tue Oct 07 16:18:34 2003 +0000
     5.3 @@ -41,9 +41,8 @@ typedef struct user_balloon_op {
     5.4  static struct proc_dir_entry *balloon_pde;
     5.5  unsigned long credit;
     5.6  
     5.7 -static inline unsigned long get_ppte(unsigned long addr)
     5.8 +static inline pte_t *get_ptep(unsigned long addr)
     5.9  {
    5.10 -    unsigned long ppte;
    5.11      pgd_t *pgd; pmd_t *pmd; pte_t *ptep;
    5.12      pgd = pgd_offset_k(addr);
    5.13  
    5.14 @@ -53,9 +52,8 @@ static inline unsigned long get_ppte(uns
    5.15      if ( pmd_none(*pmd) || pmd_bad(*pmd) ) BUG();
    5.16  
    5.17      ptep = pte_offset(pmd, addr);
    5.18 -    ppte = (unsigned long)__pa(ptep);
    5.19  
    5.20 -    return ppte;
    5.21 +    return ptep;
    5.22  }
    5.23  
    5.24  /* main function for relinquishing bit of memory */
    5.25 @@ -100,7 +98,7 @@ static unsigned long inflate_balloon(uns
    5.26      {
    5.27          curraddr = *currp;
    5.28          *currp = virt_to_machine(*currp) >> PAGE_SHIFT;
    5.29 -        queue_l1_entry_update(get_ppte(curraddr) | PGREQ_NORMAL, 0);
    5.30 +        queue_l1_entry_update(get_ptep(curraddr), 0);
    5.31          phys_to_machine_mapping[__pa(curraddr) >> PAGE_SHIFT] = DEAD;
    5.32          currp++;
    5.33      }
    5.34 @@ -149,11 +147,11 @@ static unsigned long process_new_pages(u
    5.35      {
    5.36          if ( phys_to_machine_mapping[i] == DEAD )
    5.37          {
    5.38 -            printk(KERN_ALERT "bd240 debug: proc_new_pages: i %lx, mpt %lx, %lx\n", i, i << PAGE_SHIFT, get_ppte((unsigned long)__va(i << PAGE_SHIFT)) | PGREQ_NORMAL);
    5.39              phys_to_machine_mapping[i] = *curr;
    5.40 -            queue_l1_entry_update((i << PAGE_SHIFT) | PGREQ_MPT_UPDATE, i);
    5.41              queue_l1_entry_update(
    5.42 -                get_ppte((unsigned long)__va(i << PAGE_SHIFT)) | PGREQ_NORMAL, 
    5.43 +                (pte_t *)((i << PAGE_SHIFT) | PGREQ_MPT_UPDATE), i);
    5.44 +            queue_l1_entry_update(
    5.45 +                get_ptep((unsigned long)__va(i << PAGE_SHIFT)),
    5.46                  ((*curr) << PAGE_SHIFT) | L1_PROT);
    5.47  
    5.48              *curr = (unsigned long)__va(i << PAGE_SHIFT);
     6.1 --- a/xenolinux-2.4.22-sparse/arch/xeno/mm/hypervisor.c	Tue Oct 07 12:33:03 2003 +0000
     6.2 +++ b/xenolinux-2.4.22-sparse/arch/xeno/mm/hypervisor.c	Tue Oct 07 16:18:34 2003 +0000
     6.3 @@ -40,12 +40,12 @@ static void DEBUG_allow_pt_reads(void)
     6.4          pte = update_debug_queue[i].ptep;
     6.5          if ( pte == NULL ) continue;
     6.6          update_debug_queue[i].ptep = NULL;
     6.7 -        update.ptr = phys_to_machine(__pa(pte));
     6.8 +        update.ptr = pte;
     6.9          update.val = update_debug_queue[i].pteval;
    6.10          HYPERVISOR_pt_update(&update, 1);
    6.11      }
    6.12  }
    6.13 -static void DEBUG_disallow_pt_read(unsigned long pa)
    6.14 +static void DEBUG_disallow_pt_read(unsigned long va)
    6.15  {
    6.16      pte_t *pte;
    6.17      pmd_t *pmd;
    6.18 @@ -56,11 +56,10 @@ static void DEBUG_disallow_pt_read(unsig
    6.19       * That's okay -- it'll get fixed up in the fault handler.
    6.20       */
    6.21      page_update_request_t update;
    6.22 -    unsigned long va = (unsigned long)__va(pa);
    6.23      pgd = pgd_offset_k(va);
    6.24      pmd = pmd_offset(pgd, va);
    6.25      pte = pte_offset(pmd, va);
    6.26 -    update.ptr = phys_to_machine(__pa(pte));
    6.27 +    update.ptr = pte;
    6.28      pteval = *(unsigned long *)pte;
    6.29      update.val = pteval & ~_PAGE_PRESENT;
    6.30      HYPERVISOR_pt_update(&update, 1);
    6.31 @@ -128,24 +127,24 @@ static inline void increment_index(void)
    6.32      if ( unlikely(idx == QUEUE_SIZE) ) __flush_page_update_queue();
    6.33  }
    6.34  
    6.35 -void queue_l1_entry_update(unsigned long ptr, unsigned long val)
    6.36 +void queue_l1_entry_update(pte_t *ptr, unsigned long val)
    6.37  {
    6.38      unsigned long flags;
    6.39      spin_lock_irqsave(&update_lock, flags);
    6.40  #if PT_UPDATE_DEBUG > 0
    6.41 -    DEBUG_disallow_pt_read(ptr);
    6.42 +    DEBUG_disallow_pt_read((unsigned long)ptr);
    6.43  #endif
    6.44 -    update_queue[idx].ptr = phys_to_machine(ptr);
    6.45 +    update_queue[idx].ptr = (unsigned long)ptr;
    6.46      update_queue[idx].val = val;
    6.47      increment_index();
    6.48      spin_unlock_irqrestore(&update_lock, flags);
    6.49  }
    6.50  
    6.51 -void queue_l2_entry_update(unsigned long ptr, unsigned long val)
    6.52 +void queue_l2_entry_update(pmd_t *ptr, unsigned long val)
    6.53  {
    6.54      unsigned long flags;
    6.55      spin_lock_irqsave(&update_lock, flags);
    6.56 -    update_queue[idx].ptr = phys_to_machine(ptr);
    6.57 +    update_queue[idx].ptr = (unsigned long)ptr;
    6.58      update_queue[idx].val = val;
    6.59      increment_index();
    6.60      spin_unlock_irqrestore(&update_lock, flags);
     7.1 --- a/xenolinux-2.4.22-sparse/arch/xeno/mm/init.c	Tue Oct 07 12:33:03 2003 +0000
     7.2 +++ b/xenolinux-2.4.22-sparse/arch/xeno/mm/init.c	Tue Oct 07 16:18:34 2003 +0000
     7.3 @@ -102,7 +102,7 @@ static inline void set_pte_phys (unsigne
     7.4  	pmd_t *pmd;
     7.5  	pte_t *pte;
     7.6  
     7.7 -    pgd = init_mm.pgd + __pgd_offset(vaddr);
     7.8 +	pgd = init_mm.pgd + __pgd_offset(vaddr);
     7.9  	if (pgd_none(*pgd)) {
    7.10  		printk("PAE BUG #00!\n");
    7.11  		return;
    7.12 @@ -120,7 +120,7 @@ static inline void set_pte_phys (unsigne
    7.13  	pgprot_val(prot) = pgprot_val(PAGE_KERNEL) | pgprot_val(flags);
    7.14  
    7.15  	/* We queue directly, avoiding hidden phys->machine translation. */
    7.16 -	queue_l1_entry_update(__pa(pte), phys | pgprot_val(prot));
    7.17 +	queue_l1_entry_update(pte, phys | pgprot_val(prot));
    7.18  
    7.19  	/*
    7.20  	 * It's enough to flush this one mapping.
    7.21 @@ -174,7 +174,7 @@ static void __init fixrange_init (unsign
    7.22  				kpgd = pgd_offset_k((unsigned long)pte);
    7.23                  		kpmd = pmd_offset(kpgd, (unsigned long)pte);
    7.24                  		kpte = pte_offset(kpmd, (unsigned long)pte);
    7.25 -                		queue_l1_entry_update(__pa(kpte),
    7.26 +                		queue_l1_entry_update(kpte,
    7.27                                        (*(unsigned long *)kpte)&~_PAGE_RW);
    7.28  
    7.29  				set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
     8.1 --- a/xenolinux-2.4.22-sparse/arch/xeno/mm/ioremap.c	Tue Oct 07 12:33:03 2003 +0000
     8.2 +++ b/xenolinux-2.4.22-sparse/arch/xeno/mm/ioremap.c	Tue Oct 07 16:18:34 2003 +0000
     8.3 @@ -20,8 +20,7 @@
     8.4  
     8.5  #if defined(CONFIG_XENO_PRIV)
     8.6  
     8.7 -#define direct_set_pte(pteptr, pteval) \
     8.8 -  queue_l1_entry_update(__pa(pteptr)|PGREQ_UNCHECKED_UPDATE, (pteval).pte_low)
     8.9 +#define direct_set_pte(_p, _v) queue_unchecked_pt_update((_p), (_v).pte_low)
    8.10  #define __direct_pte(x) ((pte_t) { (x) } )
    8.11  #define __direct_mk_pte(page_nr,pgprot) \
    8.12    __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
     9.1 --- a/xenolinux-2.4.22-sparse/include/asm-xeno/hypervisor.h	Tue Oct 07 12:33:03 2003 +0000
     9.2 +++ b/xenolinux-2.4.22-sparse/include/asm-xeno/hypervisor.h	Tue Oct 07 16:18:34 2003 +0000
     9.3 @@ -12,6 +12,7 @@
     9.4  #include <linux/types.h>
     9.5  #include <asm/hypervisor-ifs/hypervisor-if.h>
     9.6  #include <asm/ptrace.h>
     9.7 +#include <asm/page.h>
     9.8  
     9.9  /* arch/xeno/kernel/setup.c */
    9.10  union start_info_union
    9.11 @@ -34,8 +35,8 @@ void do_hypervisor_callback(struct pt_re
    9.12  
    9.13  extern unsigned int pt_update_queue_idx;
    9.14  
    9.15 -void queue_l1_entry_update(unsigned long ptr, unsigned long val);
    9.16 -void queue_l2_entry_update(unsigned long ptr, unsigned long val);
    9.17 +void queue_l1_entry_update(pte_t *ptr, unsigned long val);
    9.18 +void queue_l2_entry_update(pmd_t *ptr, unsigned long val);
    9.19  void queue_pt_switch(unsigned long ptr);
    9.20  void queue_tlb_flush(void);
    9.21  void queue_invlpg(unsigned long ptr);
    9.22 @@ -46,9 +47,13 @@ void queue_pte_unpin(unsigned long ptr);
    9.23  void queue_set_ldt(unsigned long ptr, unsigned long bytes);
    9.24  #define PT_UPDATE_DEBUG 0
    9.25  
    9.26 +#define queue_unchecked_pt_update(_p,_v) queue_l1_entry_update( \
    9.27 +  (pte_t *)((unsigned long)(_p)|PGREQ_UNCHECKED_UPDATE),(_v))
    9.28 +
    9.29  #if PT_UPDATE_DEBUG > 0
    9.30  typedef struct {
    9.31 -    unsigned long ptr, val, pteval;
    9.32 +    void *ptr;
    9.33 +    unsigned long val, pteval;
    9.34      void *ptep;
    9.35      int line; char *file;
    9.36  } page_update_debug_t;
    9.37 @@ -78,7 +83,7 @@ extern page_update_debug_t update_debug_
    9.38   update_debug_queue[pt_update_queue_idx].line = __LINE__;         \
    9.39   update_debug_queue[pt_update_queue_idx].file = __FILE__;         \
    9.40   printk("L1 %s %d: %08lx (%08lx -> %08lx)\n", __FILE__, __LINE__, \
    9.41 -        phys_to_machine(_p), *(unsigned long *)__va(_p),          \
    9.42 +        (_p), pte_val(_p),                                        \
    9.43          (unsigned long)(_v));                                     \
    9.44   queue_l1_entry_update((_p),(_v));                                \
    9.45  })
    9.46 @@ -88,7 +93,7 @@ extern page_update_debug_t update_debug_
    9.47   update_debug_queue[pt_update_queue_idx].line = __LINE__;         \
    9.48   update_debug_queue[pt_update_queue_idx].file = __FILE__;         \
    9.49   printk("L2 %s %d: %08lx (%08lx -> %08lx)\n", __FILE__, __LINE__, \
    9.50 -        phys_to_machine(_p), *(unsigned long *)__va(_p),          \
    9.51 +        (_p), pmd_val(_p),                                        \
    9.52          (unsigned long)(_v));                                     \
    9.53   queue_l2_entry_update((_p),(_v));                                \
    9.54  })
    10.1 --- a/xenolinux-2.4.22-sparse/include/asm-xeno/page.h	Tue Oct 07 12:33:03 2003 +0000
    10.2 +++ b/xenolinux-2.4.22-sparse/include/asm-xeno/page.h	Tue Oct 07 16:18:34 2003 +0000
    10.3 @@ -10,7 +10,6 @@
    10.4  #ifndef __ASSEMBLY__
    10.5  
    10.6  #include <linux/config.h>
    10.7 -#include <asm/hypervisor.h>
    10.8  
    10.9  #ifdef CONFIG_X86_USE_3DNOW
   10.10  
    11.1 --- a/xenolinux-2.4.22-sparse/include/asm-xeno/pgtable-2level.h	Tue Oct 07 12:33:03 2003 +0000
    11.2 +++ b/xenolinux-2.4.22-sparse/include/asm-xeno/pgtable-2level.h	Tue Oct 07 16:18:34 2003 +0000
    11.3 @@ -34,9 +34,9 @@ static inline int pgd_bad(pgd_t pgd)		{ 
    11.4  static inline int pgd_present(pgd_t pgd)	{ return 1; }
    11.5  #define pgd_clear(xp)				do { } while (0)
    11.6  
    11.7 -#define set_pte(pteptr, pteval) queue_l1_entry_update(__pa(pteptr), (pteval).pte_low)
    11.8 -#define set_pte_atomic(pteptr, pteval) queue_l1_entry_update(__pa(pteptr), (pteval).pte_low)
    11.9 -#define set_pmd(pmdptr, pmdval) queue_l2_entry_update(__pa(pmdptr), (pmdval).pmd)
   11.10 +#define set_pte(pteptr, pteval) queue_l1_entry_update(pteptr, (pteval).pte_low)
   11.11 +#define set_pte_atomic(pteptr, pteval) queue_l1_entry_update(pteptr, (pteval).pte_low)
   11.12 +#define set_pmd(pmdptr, pmdval) queue_l2_entry_update((pmdptr), (pmdval).pmd)
   11.13  #define set_pgd(pgdptr, pgdval) ((void)0)
   11.14  
   11.15  #define pgd_page(pgd) \
   11.16 @@ -59,7 +59,7 @@ static inline pmd_t * pmd_offset(pgd_t *
   11.17  static inline pte_t ptep_get_and_clear(pte_t *xp)
   11.18  {
   11.19      pte_t pte = *xp;
   11.20 -    queue_l1_entry_update(__pa(xp), 0);
   11.21 +    queue_l1_entry_update(xp, 0);
   11.22      return pte;
   11.23  }
   11.24  
    12.1 --- a/xenolinux-2.4.22-sparse/include/asm-xeno/pgtable.h	Tue Oct 07 12:33:03 2003 +0000
    12.2 +++ b/xenolinux-2.4.22-sparse/include/asm-xeno/pgtable.h	Tue Oct 07 16:18:34 2003 +0000
    12.3 @@ -177,7 +177,7 @@ extern void * high_memory;
    12.4  #define __S111	PAGE_SHARED
    12.5  
    12.6  #define pte_present(x)	((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
    12.7 -#define pte_clear(xp)	queue_l1_entry_update(__pa(xp), 0)
    12.8 +#define pte_clear(xp)	queue_l1_entry_update(xp, 0)
    12.9  
   12.10  #define pmd_none(x)	(!(x).pmd)
   12.11  #define pmd_present(x)	((x).pmd & _PAGE_PRESENT)
   12.12 @@ -214,27 +214,27 @@ static inline int ptep_test_and_clear_di
   12.13  {
   12.14      unsigned long pteval = *(unsigned long *)ptep;
   12.15      int ret = pteval & _PAGE_DIRTY;
   12.16 -    if ( ret ) queue_l1_entry_update(__pa(ptep), pteval & ~_PAGE_DIRTY);
   12.17 +    if ( ret ) queue_l1_entry_update(ptep, pteval & ~_PAGE_DIRTY);
   12.18      return ret;
   12.19  }
   12.20  static inline  int ptep_test_and_clear_young(pte_t *ptep)
   12.21  {
   12.22      unsigned long pteval = *(unsigned long *)ptep;
   12.23      int ret = pteval & _PAGE_ACCESSED;
   12.24 -    if ( ret ) queue_l1_entry_update(__pa(ptep), pteval & ~_PAGE_ACCESSED);
   12.25 +    if ( ret ) queue_l1_entry_update(ptep, pteval & ~_PAGE_ACCESSED);
   12.26      return ret;
   12.27  }
   12.28  static inline void ptep_set_wrprotect(pte_t *ptep)
   12.29  {
   12.30      unsigned long pteval = *(unsigned long *)ptep;
   12.31      if ( (pteval & _PAGE_RW) )
   12.32 -        queue_l1_entry_update(__pa(ptep), pteval & ~_PAGE_RW);
   12.33 +        queue_l1_entry_update(ptep, pteval & ~_PAGE_RW);
   12.34  }
   12.35  static inline void ptep_mkdirty(pte_t *ptep)
   12.36  {
   12.37      unsigned long pteval = *(unsigned long *)ptep;
   12.38      if ( !(pteval & _PAGE_DIRTY) )
   12.39 -        queue_l1_entry_update(__pa(ptep), pteval | _PAGE_DIRTY);
   12.40 +        queue_l1_entry_update(ptep, pteval | _PAGE_DIRTY);
   12.41  }
   12.42  
   12.43  /*
   12.44 @@ -299,7 +299,7 @@ static inline void __make_page_readonly(
   12.45      pgd_t *pgd = pgd_offset_k((unsigned long)va);
   12.46      pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
   12.47      pte_t *pte = pte_offset(pmd, (unsigned long)va);
   12.48 -    queue_l1_entry_update(__pa(pte), (*(unsigned long *)pte)&~_PAGE_RW);
   12.49 +    queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
   12.50  }
   12.51  
   12.52  static inline void __make_page_writeable(void *va)
   12.53 @@ -307,7 +307,7 @@ static inline void __make_page_writeable
   12.54      pgd_t *pgd = pgd_offset_k((unsigned long)va);
   12.55      pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
   12.56      pte_t *pte = pte_offset(pmd, (unsigned long)va);
   12.57 -    queue_l1_entry_update(__pa(pte), (*(unsigned long *)pte)|_PAGE_RW);
   12.58 +    queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
   12.59  }
   12.60  
   12.61  static inline void make_page_readonly(void *va)
   12.62 @@ -315,7 +315,7 @@ static inline void make_page_readonly(vo
   12.63      pgd_t *pgd = pgd_offset_k((unsigned long)va);
   12.64      pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
   12.65      pte_t *pte = pte_offset(pmd, (unsigned long)va);
   12.66 -    queue_l1_entry_update(__pa(pte), (*(unsigned long *)pte)&~_PAGE_RW);
   12.67 +    queue_l1_entry_update(pte, (*(unsigned long *)pte)&~_PAGE_RW);
   12.68      if ( (unsigned long)va >= VMALLOC_START )
   12.69          __make_page_readonly(machine_to_virt(
   12.70              *(unsigned long *)pte&PAGE_MASK));
   12.71 @@ -326,7 +326,7 @@ static inline void make_page_writeable(v
   12.72      pgd_t *pgd = pgd_offset_k((unsigned long)va);
   12.73      pmd_t *pmd = pmd_offset(pgd, (unsigned long)va);
   12.74      pte_t *pte = pte_offset(pmd, (unsigned long)va);
   12.75 -    queue_l1_entry_update(__pa(pte), (*(unsigned long *)pte)|_PAGE_RW);
   12.76 +    queue_l1_entry_update(pte, (*(unsigned long *)pte)|_PAGE_RW);
   12.77      if ( (unsigned long)va >= VMALLOC_START )
   12.78          __make_page_writeable(machine_to_virt(
   12.79              *(unsigned long *)pte&PAGE_MASK));
    13.1 --- a/xenolinux-2.4.22-sparse/mm/memory.c	Tue Oct 07 12:33:03 2003 +0000
    13.2 +++ b/xenolinux-2.4.22-sparse/mm/memory.c	Tue Oct 07 16:18:34 2003 +0000
    13.3 @@ -320,8 +320,7 @@ static inline int zap_pte_range(mmu_gath
    13.4  			struct page *page = pte_page(pte);
    13.5  #if defined(CONFIG_XENO_PRIV)
    13.6  			if (pte_io(pte)) {
    13.7 -				queue_l1_entry_update(
    13.8 -					__pa(ptep)|PGREQ_UNCHECKED_UPDATE, 0);
    13.9 +				queue_unchecked_pt_update(ptep, 0);
   13.10  				continue;
   13.11  			}
   13.12  #endif