ia64/xen-unstable

changeset 5250:0e97dc32ddf6

bitkeeper revision 1.1624 (429d7aa7Gb7U1ztIGbXeQ0gPWlG51g)

Rename some of the pagetable access macros:
l?e_create_* -> l?e_from_*
*_phys -> *_paddr
*_value -> *_intpte

l?e_add_flags, l?e_remove_flags, l?e_has_changed now take ptes by value
rather than by reference.

The pagetable hypercalls are fixed to use l?e_from_intpte() rather than
abusing l?e_from_paddr(), which munged the NX bit incorrectly on x86/64.
Thanks to Scott Parish for spotting this one.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Jun 01 09:06:47 2005 +0000 (2005-06-01)
parents c00fe904876e
children a247de7b1fb0
files xen/arch/x86/audit.c xen/arch/x86/dom0_ops.c xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/mm.c xen/arch/x86/shadow.c xen/arch/x86/traps.c xen/arch/x86/vmx.c xen/arch/x86/vmx_io.c xen/arch/x86/vmx_vmcs.c xen/arch/x86/x86_32/domain_page.c xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/mm.c xen/common/grant_table.c xen/include/asm-x86/page.h xen/include/asm-x86/shadow.h xen/include/asm-x86/x86_32/page-2level.h xen/include/asm-x86/x86_32/page-3level.h xen/include/asm-x86/x86_64/page.h
line diff
     1.1 --- a/xen/arch/x86/audit.c	Tue May 31 23:04:23 2005 +0000
     1.2 +++ b/xen/arch/x86/audit.c	Wed Jun 01 09:06:47 2005 +0000
     1.3 @@ -408,9 +408,9 @@ int audit_adjust_pgtables(struct domain 
     1.4  
     1.5          for_each_exec_domain(d, ed)
     1.6          {
     1.7 -            if ( pagetable_get_phys(ed->arch.guest_table) )
     1.8 +            if ( pagetable_get_paddr(ed->arch.guest_table) )
     1.9                  adjust(&frame_table[pagetable_get_pfn(ed->arch.guest_table)], 1);
    1.10 -            if ( pagetable_get_phys(ed->arch.shadow_table) )
    1.11 +            if ( pagetable_get_paddr(ed->arch.shadow_table) )
    1.12                  adjust(&frame_table[pagetable_get_pfn(ed->arch.shadow_table)], 0);
    1.13              if ( ed->arch.monitor_shadow_ref )
    1.14                  adjust(&frame_table[ed->arch.monitor_shadow_ref], 0);
     2.1 --- a/xen/arch/x86/dom0_ops.c	Tue May 31 23:04:23 2005 +0000
     2.2 +++ b/xen/arch/x86/dom0_ops.c	Wed Jun 01 09:06:47 2005 +0000
     2.3 @@ -405,7 +405,7 @@ void arch_getdomaininfo_ctxt(
     2.4          c->flags |= VGCF_VMX_GUEST;
     2.5  #endif
     2.6  
     2.7 -    c->pt_base = pagetable_get_phys(ed->arch.guest_table);
     2.8 +    c->pt_base = pagetable_get_paddr(ed->arch.guest_table);
     2.9  
    2.10      c->vm_assist = ed->domain->vm_assist;
    2.11  }
     3.1 --- a/xen/arch/x86/domain.c	Tue May 31 23:04:23 2005 +0000
     3.2 +++ b/xen/arch/x86/domain.c	Wed Jun 01 09:06:47 2005 +0000
     3.3 @@ -250,7 +250,7 @@ void arch_do_createdomain(struct exec_do
     3.4                             PAGE_SHIFT] = INVALID_M2P_ENTRY;
     3.5      ed->arch.perdomain_ptes = d->arch.mm_perdomain_pt;
     3.6      ed->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
     3.7 -        l1e_create_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
     3.8 +        l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
     3.9  
    3.10      ed->arch.guest_vtable  = __linear_l2_table;
    3.11      ed->arch.shadow_vtable = __shadow_linear_l2_table;
    3.12 @@ -262,12 +262,12 @@ void arch_do_createdomain(struct exec_do
    3.13      d->arch.mm_perdomain_l2 = (l2_pgentry_t *)alloc_xenheap_page();
    3.14      memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
    3.15      d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)] = 
    3.16 -        l2e_create_page(virt_to_page(d->arch.mm_perdomain_pt),
    3.17 +        l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt),
    3.18                          __PAGE_HYPERVISOR);
    3.19      d->arch.mm_perdomain_l3 = (l3_pgentry_t *)alloc_xenheap_page();
    3.20      memset(d->arch.mm_perdomain_l3, 0, PAGE_SIZE);
    3.21      d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] = 
    3.22 -        l3e_create_page(virt_to_page(d->arch.mm_perdomain_l2),
    3.23 +        l3e_from_page(virt_to_page(d->arch.mm_perdomain_l2),
    3.24                              __PAGE_HYPERVISOR);
    3.25  #endif
    3.26      
    3.27 @@ -288,7 +288,7 @@ void arch_do_boot_vcpu(struct exec_domai
    3.28      ed->arch.perdomain_ptes =
    3.29          d->arch.mm_perdomain_pt + (ed->vcpu_id << PDPT_VCPU_SHIFT);
    3.30      ed->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] =
    3.31 -        l1e_create_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
    3.32 +        l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
    3.33  }
    3.34  
    3.35  #ifdef CONFIG_VMX
    3.36 @@ -460,7 +460,7 @@ int arch_set_info_guest(
    3.37          //      trust the VMX domain builder.  Xen should validate this
    3.38          //      page table, and/or build the table itself, or ???
    3.39          //
    3.40 -        if ( !pagetable_get_phys(d->arch.phys_table) )
    3.41 +        if ( !pagetable_get_paddr(d->arch.phys_table) )
    3.42              d->arch.phys_table = ed->arch.guest_table;
    3.43  
    3.44          if ( (error = vmx_final_setup_guest(ed, c)) )
    3.45 @@ -660,7 +660,7 @@ long do_switch_to_user(void)
    3.46      struct exec_domain    *ed = current;
    3.47  
    3.48      if ( unlikely(copy_from_user(&stu, (void *)regs->rsp, sizeof(stu))) ||
    3.49 -         unlikely(pagetable_get_phys(ed->arch.guest_table_user) == 0) )
    3.50 +         unlikely(pagetable_get_paddr(ed->arch.guest_table_user) == 0) )
    3.51          return -EFAULT;
    3.52  
    3.53      toggle_guest_mode(ed);
    3.54 @@ -978,7 +978,7 @@ void domain_relinquish_resources(struct 
    3.55      /* Drop the in-use references to page-table bases. */
    3.56      for_each_exec_domain ( d, ed )
    3.57      {
    3.58 -        if ( pagetable_get_phys(ed->arch.guest_table) != 0 )
    3.59 +        if ( pagetable_get_paddr(ed->arch.guest_table) != 0 )
    3.60          {
    3.61              if ( shadow_mode_refcounts(d) )
    3.62                  put_page(&frame_table[pagetable_get_pfn(ed->arch.guest_table)]);
    3.63 @@ -988,7 +988,7 @@ void domain_relinquish_resources(struct 
    3.64              ed->arch.guest_table = mk_pagetable(0);
    3.65          }
    3.66  
    3.67 -        if ( pagetable_get_phys(ed->arch.guest_table_user) != 0 )
    3.68 +        if ( pagetable_get_paddr(ed->arch.guest_table_user) != 0 )
    3.69          {
    3.70              if ( shadow_mode_refcounts(d) )
    3.71                  put_page(&frame_table[pagetable_get_pfn(ed->arch.guest_table_user)]);
     4.1 --- a/xen/arch/x86/domain_build.c	Tue May 31 23:04:23 2005 +0000
     4.2 +++ b/xen/arch/x86/domain_build.c	Wed Jun 01 09:06:47 2005 +0000
     4.3 @@ -263,15 +263,15 @@ int construct_dom0(struct domain *d,
     4.4      l2start = l2tab = (l2_pgentry_t *)mpt_alloc; mpt_alloc += 4*PAGE_SIZE;
     4.5      memcpy(l2tab, idle_pg_table_l2, 4*PAGE_SIZE);
     4.6      for (i = 0; i < 4; i++) {
     4.7 -        l3tab[i] = l3e_create_phys((u32)l2tab + i*PAGE_SIZE, L3_PROT);
     4.8 +        l3tab[i] = l3e_from_paddr((u32)l2tab + i*PAGE_SIZE, L3_PROT);
     4.9          l2tab[(LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT)+i] =
    4.10 -            l2e_create_phys((u32)l2tab + i*PAGE_SIZE, __PAGE_HYPERVISOR);
    4.11 +            l2e_from_paddr((u32)l2tab + i*PAGE_SIZE, __PAGE_HYPERVISOR);
    4.12      }
    4.13      unsigned long v;
    4.14      for (v = PERDOMAIN_VIRT_START; v < PERDOMAIN_VIRT_END;
    4.15           v += (1 << L2_PAGETABLE_SHIFT)) {
    4.16          l2tab[v >> L2_PAGETABLE_SHIFT] =
    4.17 -            l2e_create_phys(__pa(d->arch.mm_perdomain_pt) + (v-PERDOMAIN_VIRT_START),
    4.18 +            l2e_from_paddr(__pa(d->arch.mm_perdomain_pt) + (v-PERDOMAIN_VIRT_START),
    4.19                              __PAGE_HYPERVISOR);
    4.20      }
    4.21      ed->arch.guest_table = mk_pagetable((unsigned long)l3start);
    4.22 @@ -279,9 +279,9 @@ int construct_dom0(struct domain *d,
    4.23      l2start = l2tab = (l2_pgentry_t *)mpt_alloc; mpt_alloc += PAGE_SIZE;
    4.24      memcpy(l2tab, &idle_pg_table[0], PAGE_SIZE);
    4.25      l2tab[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
    4.26 -        l2e_create_phys((unsigned long)l2start, __PAGE_HYPERVISOR);
    4.27 +        l2e_from_paddr((unsigned long)l2start, __PAGE_HYPERVISOR);
    4.28      l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
    4.29 -        l2e_create_phys(__pa(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR);
    4.30 +        l2e_from_paddr(__pa(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR);
    4.31      ed->arch.guest_table = mk_pagetable((unsigned long)l2start);
    4.32  #endif
    4.33  
    4.34 @@ -293,13 +293,13 @@ int construct_dom0(struct domain *d,
    4.35          {
    4.36              l1start = l1tab = (l1_pgentry_t *)mpt_alloc; 
    4.37              mpt_alloc += PAGE_SIZE;
    4.38 -            *l2tab = l2e_create_phys((unsigned long)l1start, L2_PROT);
    4.39 +            *l2tab = l2e_from_paddr((unsigned long)l1start, L2_PROT);
    4.40              l2tab++;
    4.41              clear_page(l1tab);
    4.42              if ( count == 0 )
    4.43                  l1tab += l1_table_offset(dsi.v_start);
    4.44          }
    4.45 -        *l1tab = l1e_create_pfn(mfn, L1_PROT);
    4.46 +        *l1tab = l1e_from_pfn(mfn, L1_PROT);
    4.47          l1tab++;
    4.48          
    4.49          page = &frame_table[mfn];
    4.50 @@ -311,13 +311,13 @@ int construct_dom0(struct domain *d,
    4.51  
    4.52      /* Pages that are part of page tables must be read only. */
    4.53      l2tab = l2start + l2_linear_offset(vpt_start);
    4.54 -    l1start = l1tab = (l1_pgentry_t *)(u32)l2e_get_phys(*l2tab);
    4.55 +    l1start = l1tab = (l1_pgentry_t *)(u32)l2e_get_paddr(*l2tab);
    4.56      l1tab += l1_table_offset(vpt_start);
    4.57      for ( count = 0; count < nr_pt_pages; count++ ) 
    4.58      {
    4.59          page = &frame_table[l1e_get_pfn(*l1tab)];
    4.60          if ( !opt_dom0_shadow )
    4.61 -            l1e_remove_flags(l1tab, _PAGE_RW);
    4.62 +            l1e_remove_flags(*l1tab, _PAGE_RW);
    4.63          else
    4.64              if ( !get_page_type(page, PGT_writable_page) )
    4.65                  BUG();
    4.66 @@ -384,7 +384,7 @@ int construct_dom0(struct domain *d,
    4.67          }
    4.68  #endif
    4.69          if ( !((unsigned long)++l1tab & (PAGE_SIZE - 1)) )
    4.70 -            l1start = l1tab = (l1_pgentry_t *)(u32)l2e_get_phys(*++l2tab);
    4.71 +            l1start = l1tab = (l1_pgentry_t *)(u32)l2e_get_paddr(*++l2tab);
    4.72      }
    4.73  
    4.74  #elif defined(__x86_64__)
    4.75 @@ -402,9 +402,9 @@ int construct_dom0(struct domain *d,
    4.76      l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
    4.77      memcpy(l4tab, &idle_pg_table[0], PAGE_SIZE);
    4.78      l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
    4.79 -        l4e_create_phys(__pa(l4start), __PAGE_HYPERVISOR);
    4.80 +        l4e_from_paddr(__pa(l4start), __PAGE_HYPERVISOR);
    4.81      l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
    4.82 -        l4e_create_phys(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
    4.83 +        l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
    4.84      ed->arch.guest_table = mk_pagetable(__pa(l4start));
    4.85  
    4.86      l4tab += l4_table_offset(dsi.v_start);
    4.87 @@ -433,16 +433,16 @@ int construct_dom0(struct domain *d,
    4.88                      clear_page(l3tab);
    4.89                      if ( count == 0 )
    4.90                          l3tab += l3_table_offset(dsi.v_start);
    4.91 -                    *l4tab = l4e_create_phys(__pa(l3start), L4_PROT);
    4.92 +                    *l4tab = l4e_from_paddr(__pa(l3start), L4_PROT);
    4.93                      l4tab++;
    4.94                  }
    4.95 -                *l3tab = l3e_create_phys(__pa(l2start), L3_PROT);
    4.96 +                *l3tab = l3e_from_paddr(__pa(l2start), L3_PROT);
    4.97                  l3tab++;
    4.98              }
    4.99 -            *l2tab = l2e_create_phys(__pa(l1start), L2_PROT);
   4.100 +            *l2tab = l2e_from_paddr(__pa(l1start), L2_PROT);
   4.101              l2tab++;
   4.102          }
   4.103 -        *l1tab = l1e_create_pfn(mfn, L1_PROT);
   4.104 +        *l1tab = l1e_from_pfn(mfn, L1_PROT);
   4.105          l1tab++;
   4.106  
   4.107          page = &frame_table[mfn];
   4.108 @@ -463,7 +463,7 @@ int construct_dom0(struct domain *d,
   4.109      l1tab += l1_table_offset(vpt_start);
   4.110      for ( count = 0; count < nr_pt_pages; count++ ) 
   4.111      {
   4.112 -        l1e_remove_flags(l1tab, _PAGE_RW);
   4.113 +        l1e_remove_flags(*l1tab, _PAGE_RW);
   4.114          page = &frame_table[l1e_get_pfn(*l1tab)];
   4.115  
   4.116          /* Read-only mapping + PGC_allocated + page-table page. */
   4.117 @@ -633,10 +633,10 @@ int construct_dom0(struct domain *d,
   4.118              // map this domain's p2m table into current page table,
   4.119              // so that we can easily access it.
   4.120              //
   4.121 -            ASSERT( root_get_value(idle_pg_table[1]) == 0 );
   4.122 -            ASSERT( pagetable_get_phys(d->arch.phys_table) );
   4.123 -            idle_pg_table[1] = root_create_phys(
   4.124 -                pagetable_get_phys(d->arch.phys_table), __PAGE_HYPERVISOR);
   4.125 +            ASSERT( root_get_intpte(idle_pg_table[1]) == 0 );
   4.126 +            ASSERT( pagetable_get_paddr(d->arch.phys_table) );
   4.127 +            idle_pg_table[1] = root_from_paddr(
   4.128 +                pagetable_get_paddr(d->arch.phys_table), __PAGE_HYPERVISOR);
   4.129              translate_l2pgtable(d, (l1_pgentry_t *)(1u << L2_PAGETABLE_SHIFT),
   4.130                                  pagetable_get_pfn(ed->arch.guest_table));
   4.131              idle_pg_table[1] = root_empty();
     5.1 --- a/xen/arch/x86/mm.c	Tue May 31 23:04:23 2005 +0000
     5.2 +++ b/xen/arch/x86/mm.c	Wed Jun 01 09:06:47 2005 +0000
     5.3 @@ -234,7 +234,7 @@ void arch_init_memory(void)
     5.4  
     5.5  void write_ptbase(struct exec_domain *ed)
     5.6  {
     5.7 -    write_cr3(pagetable_get_phys(ed->arch.monitor_table));
     5.8 +    write_cr3(pagetable_get_paddr(ed->arch.monitor_table));
     5.9  }
    5.10  
    5.11  void invalidate_shadow_ldt(struct exec_domain *d)
    5.12 @@ -333,7 +333,7 @@ int map_ldt_shadow_page(unsigned int off
    5.13      if ( unlikely(!res) )
    5.14          return 0;
    5.15  
    5.16 -    nl1e = l1e_create_pfn(gmfn, l1e_get_flags(l1e) | _PAGE_RW);
    5.17 +    nl1e = l1e_from_pfn(gmfn, l1e_get_flags(l1e) | _PAGE_RW);
    5.18  
    5.19      ed->arch.perdomain_ptes[off + 16] = nl1e;
    5.20      ed->arch.shadow_ldt_mapcnt++;
    5.21 @@ -699,13 +699,13 @@ static inline int fixup_pae_linear_mappi
    5.22          return 0;
    5.23      }
    5.24  
    5.25 -    pl2e = map_domain_mem(l3e_get_phys(pl3e[3]));
    5.26 +    pl2e = map_domain_mem(l3e_get_paddr(pl3e[3]));
    5.27      for (i = 0; i < 4; i++) {
    5.28          vaddr = LINEAR_PT_VIRT_START + (i << L2_PAGETABLE_SHIFT);
    5.29          idx = (vaddr >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES-1);
    5.30          if (l3e_get_flags(pl3e[i]) & _PAGE_PRESENT) {
    5.31 -            pl2e[idx] = l2e_create_phys(l3e_get_phys(pl3e[i]),
    5.32 -                                        __PAGE_HYPERVISOR);
    5.33 +            pl2e[idx] = l2e_from_paddr(l3e_get_paddr(pl3e[i]),
    5.34 +                                       __PAGE_HYPERVISOR);
    5.35          } else
    5.36              pl2e[idx] = l2e_empty();
    5.37      }
    5.38 @@ -765,9 +765,9 @@ static int alloc_l2_table(struct pfn_inf
    5.39             &idle_pg_table[L2_PAGETABLE_FIRST_XEN_SLOT],
    5.40             L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
    5.41      pl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
    5.42 -        l2e_create_pfn(pfn, __PAGE_HYPERVISOR);
    5.43 +        l2e_from_pfn(pfn, __PAGE_HYPERVISOR);
    5.44      pl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
    5.45 -        l2e_create_page(
    5.46 +        l2e_from_page(
    5.47              virt_to_page(page_get_owner(page)->arch.mm_perdomain_pt),
    5.48              __PAGE_HYPERVISOR);
    5.49  #endif
    5.50 @@ -784,7 +784,7 @@ static int alloc_l2_table(struct pfn_inf
    5.51               v += (1 << L2_PAGETABLE_SHIFT)) {
    5.52              dst = (v >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES-1);
    5.53              virt = page_get_owner(page)->arch.mm_perdomain_pt + (v-PERDOMAIN_VIRT_START);
    5.54 -            pl2e[dst] = l2e_create_page(virt_to_page(virt), __PAGE_HYPERVISOR);
    5.55 +            pl2e[dst] = l2e_from_page(virt_to_page(virt), __PAGE_HYPERVISOR);
    5.56          }
    5.57          /* see fixup_pae_linear_mappings() for linear pagetables */
    5.58      }
    5.59 @@ -865,9 +865,9 @@ static int alloc_l4_table(struct pfn_inf
    5.60             &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
    5.61             ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
    5.62      pl4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
    5.63 -        l4e_create_pfn(pfn, __PAGE_HYPERVISOR);
    5.64 +        l4e_from_pfn(pfn, __PAGE_HYPERVISOR);
    5.65      pl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
    5.66 -        l4e_create_page(
    5.67 +        l4e_from_page(
    5.68              virt_to_page(page_get_owner(page)->arch.mm_perdomain_l3),
    5.69              __PAGE_HYPERVISOR);
    5.70  
    5.71 @@ -956,16 +956,16 @@ static inline int update_l1e(l1_pgentry_
    5.72                               l1_pgentry_t  ol1e, 
    5.73                               l1_pgentry_t  nl1e)
    5.74  {
    5.75 -    intpte_t o = l1e_get_value(ol1e);
    5.76 -    intpte_t n = l1e_get_value(nl1e);
    5.77 +    intpte_t o = l1e_get_intpte(ol1e);
    5.78 +    intpte_t n = l1e_get_intpte(nl1e);
    5.79  
    5.80      if ( unlikely(cmpxchg_user(pl1e, o, n) != 0) ||
    5.81 -         unlikely(o != l1e_get_value(ol1e)) )
    5.82 +         unlikely(o != l1e_get_intpte(ol1e)) )
    5.83      {
    5.84          MEM_LOG("Failed to update %" PRIpte " -> %" PRIpte
    5.85                  ": saw %" PRIpte "\n",
    5.86 -                l1e_get_value(ol1e),
    5.87 -                l1e_get_value(nl1e),
    5.88 +                l1e_get_intpte(ol1e),
    5.89 +                l1e_get_intpte(nl1e),
    5.90                  o);
    5.91          return 0;
    5.92      }
    5.93 @@ -995,7 +995,7 @@ static int mod_l1_entry(l1_pgentry_t *pl
    5.94          }
    5.95  
    5.96          /* Fast path for identical mapping, r/w and presence. */
    5.97 -        if ( !l1e_has_changed(&ol1e, &nl1e, _PAGE_RW | _PAGE_PRESENT))
    5.98 +        if ( !l1e_has_changed(ol1e, nl1e, _PAGE_RW | _PAGE_PRESENT))
    5.99              return update_l1e(pl1e, ol1e, nl1e);
   5.100  
   5.101          if ( unlikely(!get_page_from_l1e(nl1e, FOREIGNDOM)) )
   5.102 @@ -1019,14 +1019,15 @@ static int mod_l1_entry(l1_pgentry_t *pl
   5.103  
   5.104  #define UPDATE_ENTRY(_t,_p,_o,_n) ({                                    \
   5.105      intpte_t __o = cmpxchg((intpte_t *)(_p),                            \
   5.106 -                           _t ## e_get_value(_o),                       \
   5.107 -                           _t ## e_get_value(_n));                      \
   5.108 -    if ( __o != _t ## e_get_value(_o) )                                 \
   5.109 -        MEM_LOG("Failed to update %" PRIpte " -> %" PRIpte ": saw %" PRIpte "", \
   5.110 -                (_t ## e_get_value(_o)),                                \
   5.111 -                (_t ## e_get_value(_n)),                                \
   5.112 +                           _t ## e_get_intpte(_o),                      \
   5.113 +                           _t ## e_get_intpte(_n));                     \
   5.114 +    if ( __o != _t ## e_get_intpte(_o) )                                \
   5.115 +        MEM_LOG("Failed to update %" PRIpte " -> %" PRIpte              \
   5.116 +                ": saw %" PRIpte "",                                    \
   5.117 +                (_t ## e_get_intpte(_o)),                               \
   5.118 +                (_t ## e_get_intpte(_n)),                               \
   5.119                  (__o));                                                 \
   5.120 -    (__o == _t ## e_get_value(_o)); })
   5.121 +    (__o == _t ## e_get_intpte(_o)); })
   5.122  
   5.123  /* Update the L2 entry at pl2e to new value nl2e. pl2e is within frame pfn. */
   5.124  static int mod_l2_entry(l2_pgentry_t *pl2e, 
   5.125 @@ -1056,7 +1057,7 @@ static int mod_l2_entry(l2_pgentry_t *pl
   5.126          }
   5.127  
   5.128          /* Fast path for identical mapping and presence. */
   5.129 -        if ( !l2e_has_changed(&ol2e, &nl2e, _PAGE_PRESENT))
   5.130 +        if ( !l2e_has_changed(ol2e, nl2e, _PAGE_PRESENT))
   5.131              return UPDATE_ENTRY(l2, pl2e, ol2e, nl2e);
   5.132  
   5.133          vaddr = (((unsigned long)pl2e & ~PAGE_MASK) / sizeof(l2_pgentry_t))
   5.134 @@ -1111,7 +1112,7 @@ static int mod_l3_entry(l3_pgentry_t *pl
   5.135          }
   5.136  
   5.137          /* Fast path for identical mapping and presence. */
   5.138 -        if (!l3e_has_changed(&ol3e, &nl3e, _PAGE_PRESENT))
   5.139 +        if (!l3e_has_changed(ol3e, nl3e, _PAGE_PRESENT))
   5.140              return UPDATE_ENTRY(l3, pl3e, ol3e, nl3e);
   5.141  
   5.142          vaddr = (((unsigned long)pl3e & ~PAGE_MASK) / sizeof(l3_pgentry_t))
   5.143 @@ -1168,7 +1169,7 @@ static int mod_l4_entry(l4_pgentry_t *pl
   5.144          }
   5.145  
   5.146          /* Fast path for identical mapping and presence. */
   5.147 -        if (!l4e_has_changed(&ol4e, &nl4e, _PAGE_PRESENT))
   5.148 +        if (!l4e_has_changed(ol4e, nl4e, _PAGE_PRESENT))
   5.149              return UPDATE_ENTRY(l4, pl4e, ol4e, nl4e);
   5.150  
   5.151          if ( unlikely(!get_page_from_l4e(nl4e, pfn, current->domain)) )
   5.152 @@ -2029,7 +2030,7 @@ int do_mmu_update(
   5.153                      l1_pgentry_t l1e;
   5.154  
   5.155                      /* FIXME: doesn't work with PAE */
   5.156 -                    l1e = l1e_create_phys(req.val, req.val);
   5.157 +                    l1e = l1e_from_intpte(req.val);
   5.158                      okay = mod_l1_entry(va, l1e);
   5.159                      if ( okay && unlikely(shadow_mode_enabled(d)) )
   5.160                          shadow_l1_normal_pt_update(d, req.ptr, l1e, &sh_mapcache);
   5.161 @@ -2044,7 +2045,7 @@ int do_mmu_update(
   5.162                      l2_pgentry_t l2e;
   5.163  
   5.164                      /* FIXME: doesn't work with PAE */
   5.165 -                    l2e = l2e_create_phys(req.val, req.val);
   5.166 +                    l2e = l2e_from_intpte(req.val);
   5.167                      okay = mod_l2_entry((l2_pgentry_t *)va, l2e, mfn, type_info);
   5.168                      if ( okay && unlikely(shadow_mode_enabled(d)) )
   5.169                          shadow_l2_normal_pt_update(d, req.ptr, l2e, &sh_mapcache);
   5.170 @@ -2059,7 +2060,7 @@ int do_mmu_update(
   5.171                      l3_pgentry_t l3e;
   5.172  
   5.173                      /* FIXME: doesn't work with PAE */
   5.174 -                    l3e = l3e_create_phys(req.val,req.val);
   5.175 +                    l3e = l3e_from_intpte(req.val);
   5.176                      okay = mod_l3_entry(va, l3e, mfn);
   5.177                      if ( okay && unlikely(shadow_mode_enabled(d)) )
   5.178                          shadow_l3_normal_pt_update(d, req.ptr, l3e, &sh_mapcache);
   5.179 @@ -2074,7 +2075,7 @@ int do_mmu_update(
   5.180                  {
   5.181                      l4_pgentry_t l4e;
   5.182  
   5.183 -                    l4e = l4e_create_phys(req.val,req.val);
   5.184 +                    l4e = l4e_from_intpte(req.val);
   5.185                      okay = mod_l4_entry(va, l4e, mfn);
   5.186                      if ( okay && unlikely(shadow_mode_enabled(d)) )
   5.187                          shadow_l4_normal_pt_update(d, req.ptr, l4e, &sh_mapcache);
   5.188 @@ -2251,7 +2252,7 @@ int do_update_va_mapping(unsigned long v
   5.189                           unsigned long val32,
   5.190                           unsigned long flags)
   5.191  {
   5.192 -    l1_pgentry_t       val  = l1e_create_phys(val32,val32);
   5.193 +    l1_pgentry_t       val  = l1e_from_intpte(val32);
   5.194      struct exec_domain *ed  = current;
   5.195      struct domain      *d   = ed->domain;
   5.196      unsigned int        cpu = ed->processor;
   5.197 @@ -2420,7 +2421,7 @@ long set_gdt(struct exec_domain *ed,
   5.198      {
   5.199          ed->arch.guest_context.gdt_frames[i] = frames[i];
   5.200          ed->arch.perdomain_ptes[i] =
   5.201 -            l1e_create_pfn(frames[i], __PAGE_HYPERVISOR);
   5.202 +            l1e_from_pfn(frames[i], __PAGE_HYPERVISOR);
   5.203      }
   5.204  
   5.205      return 0;
   5.206 @@ -2562,7 +2563,7 @@ int revalidate_l1(struct domain *d, l1_p
   5.207          ol1e = snapshot[i];
   5.208          nl1e = l1page[i];
   5.209  
   5.210 -        if ( likely(l1e_get_value(ol1e) == l1e_get_value(nl1e)) )
   5.211 +        if ( likely(l1e_get_intpte(ol1e) == l1e_get_intpte(nl1e)) )
   5.212              continue;
   5.213  
   5.214          /* Update number of entries modified. */
   5.215 @@ -2572,7 +2573,7 @@ int revalidate_l1(struct domain *d, l1_p
   5.216           * Fast path for PTEs that have merely been write-protected
   5.217           * (e.g., during a Unix fork()). A strict reduction in privilege.
   5.218           */
   5.219 -        if ( likely(l1e_get_value(ol1e) == (l1e_get_value(nl1e)|_PAGE_RW)) )
   5.220 +        if ( likely(l1e_get_intpte(ol1e) == (l1e_get_intpte(nl1e)|_PAGE_RW)) )
   5.221          {
   5.222              if ( likely(l1e_get_flags(nl1e) & _PAGE_PRESENT) )
   5.223                  put_page_type(&frame_table[l1e_get_pfn(nl1e)]);
   5.224 @@ -2666,7 +2667,7 @@ void ptwr_flush(struct domain *d, const 
   5.225      if ( which == PTWR_PT_ACTIVE )
   5.226      {
   5.227          pl2e = &__linear_l2_table[d->arch.ptwr[which].l2_idx];
   5.228 -        l2e_add_flags(pl2e, _PAGE_PRESENT); 
   5.229 +        l2e_add_flags(*pl2e, _PAGE_PRESENT); 
   5.230      }
   5.231  
   5.232      /*
   5.233 @@ -2741,7 +2742,7 @@ static int ptwr_emulated_update(
   5.234      }
   5.235  
   5.236      /* Check the new PTE. */
   5.237 -    nl1e = l1e_create_phys(val, val & ~PAGE_MASK);
   5.238 +    nl1e = l1e_from_intpte(val);
   5.239      if ( unlikely(!get_page_from_l1e(nl1e, d)) )
   5.240          return X86EMUL_UNHANDLEABLE;
   5.241  
   5.242 @@ -2749,7 +2750,7 @@ static int ptwr_emulated_update(
   5.243      pl1e = map_domain_mem(page_to_phys(page) + (addr & ~PAGE_MASK));
   5.244      if ( do_cmpxchg )
   5.245      {
   5.246 -        ol1e = l1e_create_phys(old, old & ~PAGE_MASK);
   5.247 +        ol1e = l1e_from_intpte(old);
   5.248          if ( cmpxchg((unsigned long *)pl1e, old, val) != old )
   5.249          {
   5.250              unmap_domain_mem(pl1e);
   5.251 @@ -2909,7 +2910,7 @@ int ptwr_do_page_fault(struct domain *d,
   5.252      /* For safety, disconnect the L1 p.t. page from current space. */
   5.253      if ( which == PTWR_PT_ACTIVE )
   5.254      {
   5.255 -        l2e_remove_flags(pl2e, _PAGE_PRESENT);
   5.256 +        l2e_remove_flags(*pl2e, _PAGE_PRESENT);
   5.257          flush_tlb_mask(d->cpuset);
   5.258      }
   5.259      
   5.260 @@ -2920,7 +2921,7 @@ int ptwr_do_page_fault(struct domain *d,
   5.261             L1_PAGETABLE_ENTRIES * sizeof(l1_pgentry_t));
   5.262      
   5.263      /* Finally, make the p.t. page writable by the guest OS. */
   5.264 -    l1e_add_flags(&pte, _PAGE_RW);
   5.265 +    l1e_add_flags(pte, _PAGE_RW);
   5.266      if ( unlikely(__copy_to_user(&linear_pg_table[addr>>PAGE_SHIFT],
   5.267                                   &pte, sizeof(pte))) )
   5.268      {
   5.269 @@ -2993,7 +2994,7 @@ int map_pages_to_xen(
   5.270          {
   5.271              /* Super-page mapping. */
   5.272              ol2e  = *pl2e;
   5.273 -            *pl2e = l2e_create_pfn(pfn, flags|_PAGE_PSE);
   5.274 +            *pl2e = l2e_from_pfn(pfn, flags|_PAGE_PSE);
   5.275  
   5.276              if ( (l2e_get_flags(ol2e) & _PAGE_PRESENT) )
   5.277              {
   5.278 @@ -3013,22 +3014,22 @@ int map_pages_to_xen(
   5.279              {
   5.280                  pl1e = page_to_virt(alloc_xen_pagetable());
   5.281                  clear_page(pl1e);
   5.282 -                *pl2e = l2e_create_page(virt_to_page(pl1e), __PAGE_HYPERVISOR);
   5.283 +                *pl2e = l2e_from_page(virt_to_page(pl1e), __PAGE_HYPERVISOR);
   5.284              }
   5.285              else if ( l2e_get_flags(*pl2e) & _PAGE_PSE )
   5.286              {
   5.287                  pl1e = page_to_virt(alloc_xen_pagetable());
   5.288                  for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
   5.289 -                    pl1e[i] = l1e_create_pfn(
   5.290 +                    pl1e[i] = l1e_from_pfn(
   5.291                          l2e_get_pfn(*pl2e) + i,
   5.292                          l2e_get_flags(*pl2e) & ~_PAGE_PSE);
   5.293 -                *pl2e = l2e_create_page(virt_to_page(pl1e), __PAGE_HYPERVISOR);
   5.294 +                *pl2e = l2e_from_page(virt_to_page(pl1e), __PAGE_HYPERVISOR);
   5.295                  local_flush_tlb_pge();
   5.296              }
   5.297  
   5.298              pl1e  = l2e_to_l1e(*pl2e) + l1_table_offset(virt);
   5.299              ol1e  = *pl1e;
   5.300 -            *pl1e = l1e_create_pfn(pfn, flags);
   5.301 +            *pl1e = l1e_from_pfn(pfn, flags);
   5.302              if ( (l1e_get_flags(ol1e) & _PAGE_PRESENT) )
   5.303                  local_flush_tlb_one(virt);
   5.304  
     6.1 --- a/xen/arch/x86/shadow.c	Tue May 31 23:04:23 2005 +0000
     6.2 +++ b/xen/arch/x86/shadow.c	Wed Jun 01 09:06:47 2005 +0000
     6.3 @@ -573,7 +573,7 @@ static void free_shadow_pages(struct dom
     6.4      //
     6.5      for_each_exec_domain(d, ed)
     6.6      {
     6.7 -        if ( pagetable_get_phys(ed->arch.shadow_table) )
     6.8 +        if ( pagetable_get_paddr(ed->arch.shadow_table) )
     6.9          {
    6.10              put_shadow_ref(pagetable_get_pfn(ed->arch.shadow_table));
    6.11              ed->arch.shadow_table = mk_pagetable(0);
    6.12 @@ -684,7 +684,7 @@ static void alloc_monitor_pagetable(stru
    6.13      struct pfn_info *mmfn_info;
    6.14      struct domain *d = ed->domain;
    6.15  
    6.16 -    ASSERT(pagetable_get_phys(ed->arch.monitor_table) == 0);
    6.17 +    ASSERT(pagetable_get_paddr(ed->arch.monitor_table) == 0);
    6.18  
    6.19      mmfn_info = alloc_domheap_page(NULL);
    6.20      ASSERT(mmfn_info != NULL);
    6.21 @@ -700,12 +700,12 @@ static void alloc_monitor_pagetable(stru
    6.22  #endif
    6.23  
    6.24      mpl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
    6.25 -        l2e_create_phys(__pa(d->arch.mm_perdomain_pt),
    6.26 +        l2e_from_paddr(__pa(d->arch.mm_perdomain_pt),
    6.27                          __PAGE_HYPERVISOR);
    6.28  
    6.29      // map the phys_to_machine map into the Read-Only MPT space for this domain
    6.30      mpl2e[l2_table_offset(RO_MPT_VIRT_START)] =
    6.31 -        l2e_create_phys(pagetable_get_phys(d->arch.phys_table),
    6.32 +        l2e_from_paddr(pagetable_get_paddr(d->arch.phys_table),
    6.33                          __PAGE_HYPERVISOR);
    6.34  
    6.35      // Don't (yet) have mappings for these...
    6.36 @@ -726,7 +726,7 @@ void free_monitor_pagetable(struct exec_
    6.37      l2_pgentry_t *mpl2e, hl2e, sl2e;
    6.38      unsigned long mfn;
    6.39  
    6.40 -    ASSERT( pagetable_get_phys(ed->arch.monitor_table) );
    6.41 +    ASSERT( pagetable_get_paddr(ed->arch.monitor_table) );
    6.42      
    6.43      mpl2e = ed->arch.monitor_vtable;
    6.44  
    6.45 @@ -766,7 +766,7 @@ set_p2m_entry(struct domain *d, unsigned
    6.46                struct map_dom_mem_cache *l2cache,
    6.47                struct map_dom_mem_cache *l1cache)
    6.48  {
    6.49 -    unsigned long phystab = pagetable_get_phys(d->arch.phys_table);
    6.50 +    unsigned long phystab = pagetable_get_paddr(d->arch.phys_table);
    6.51      l2_pgentry_t *l2, l2e;
    6.52      l1_pgentry_t *l1;
    6.53      struct pfn_info *l1page;
    6.54 @@ -789,13 +789,13 @@ set_p2m_entry(struct domain *d, unsigned
    6.55          memset(l1, 0, PAGE_SIZE);
    6.56          unmap_domain_mem_with_cache(l1, l1cache);
    6.57  
    6.58 -        l2e = l2e_create_page(l1page, __PAGE_HYPERVISOR);
    6.59 +        l2e = l2e_from_page(l1page, __PAGE_HYPERVISOR);
    6.60          l2[l2_table_offset(va)] = l2e;
    6.61      }
    6.62      unmap_domain_mem_with_cache(l2, l2cache);
    6.63  
    6.64 -    l1 = map_domain_mem_with_cache(l2e_get_phys(l2e), l1cache);
    6.65 -    l1[l1_table_offset(va)] = l1e_create_pfn(mfn, __PAGE_HYPERVISOR);
    6.66 +    l1 = map_domain_mem_with_cache(l2e_get_paddr(l2e), l1cache);
    6.67 +    l1[l1_table_offset(va)] = l1e_from_pfn(mfn, __PAGE_HYPERVISOR);
    6.68      unmap_domain_mem_with_cache(l1, l1cache);
    6.69  
    6.70      return 1;
    6.71 @@ -965,7 +965,7 @@ int __shadow_mode_enable(struct domain *
    6.72      {
    6.73          if ( !(new_modes & SHM_external) )
    6.74          {
    6.75 -            ASSERT( !pagetable_get_phys(d->arch.phys_table) );
    6.76 +            ASSERT( !pagetable_get_paddr(d->arch.phys_table) );
    6.77              if ( !alloc_p2m_table(d) )
    6.78              {
    6.79                  printk("alloc_p2m_table failed (out-of-memory?)\n");
    6.80 @@ -1051,7 +1051,7 @@ int __shadow_mode_enable(struct domain *
    6.81          d->arch.shadow_dirty_bitmap = NULL;
    6.82      }
    6.83      if ( (new_modes & SHM_translate) && !(new_modes & SHM_external) &&
    6.84 -         pagetable_get_phys(d->arch.phys_table) )
    6.85 +         pagetable_get_paddr(d->arch.phys_table) )
    6.86      {
    6.87          free_p2m_table(d);
    6.88      }
    6.89 @@ -1082,7 +1082,7 @@ translate_l1pgtable(struct domain *d, l1
    6.90              unsigned long mfn = l1e_get_pfn(l1[i]);
    6.91              unsigned long gpfn = __mfn_to_gpfn(d, mfn);
    6.92              ASSERT(l1e_get_pfn(p2m[gpfn]) == mfn);
    6.93 -            l1[i] = l1e_create_pfn(gpfn, l1e_get_flags(l1[i]));
    6.94 +            l1[i] = l1e_from_pfn(gpfn, l1e_get_flags(l1[i]));
    6.95          }
    6.96      }
    6.97      unmap_domain_mem(l1);
    6.98 @@ -1110,7 +1110,7 @@ translate_l2pgtable(struct domain *d, l1
    6.99              unsigned long mfn = l2e_get_pfn(l2[i]);
   6.100              unsigned long gpfn = __mfn_to_gpfn(d, mfn);
   6.101              ASSERT(l1e_get_pfn(p2m[gpfn]) == mfn);
   6.102 -            l2[i] = l2e_create_pfn(gpfn, l2e_get_flags(l2[i]));
   6.103 +            l2[i] = l2e_from_pfn(gpfn, l2e_get_flags(l2[i]));
   6.104              translate_l1pgtable(d, p2m, mfn);
   6.105          }
   6.106      }
   6.107 @@ -1404,17 +1404,17 @@ gpfn_to_mfn_foreign(struct domain *d, un
   6.108      perfc_incrc(gpfn_to_mfn_foreign);
   6.109  
   6.110      unsigned long va = gpfn << PAGE_SHIFT;
   6.111 -    unsigned long phystab = pagetable_get_phys(d->arch.phys_table);
   6.112 +    unsigned long phystab = pagetable_get_paddr(d->arch.phys_table);
   6.113      l2_pgentry_t *l2 = map_domain_mem(phystab);
   6.114      l2_pgentry_t l2e = l2[l2_table_offset(va)];
   6.115      unmap_domain_mem(l2);
   6.116      if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
   6.117      {
   6.118          printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%lx) => 0 l2e=%" PRIpte "\n",
   6.119 -               d->domain_id, gpfn, l2e_get_value(l2e));
   6.120 +               d->domain_id, gpfn, l2e_get_intpte(l2e));
   6.121          return INVALID_MFN;
   6.122      }
   6.123 -    unsigned long l1tab = l2e_get_phys(l2e);
   6.124 +    unsigned long l1tab = l2e_get_paddr(l2e);
   6.125      l1_pgentry_t *l1 = map_domain_mem(l1tab);
   6.126      l1_pgentry_t l1e = l1[l1_table_offset(va)];
   6.127      unmap_domain_mem(l1);
   6.128 @@ -1427,7 +1427,7 @@ gpfn_to_mfn_foreign(struct domain *d, un
   6.129      if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) )
   6.130      {
   6.131          printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%lx) => 0 l1e=%" PRIpte "\n",
   6.132 -               d->domain_id, gpfn, l1e_get_value(l1e));
   6.133 +               d->domain_id, gpfn, l1e_get_intpte(l1e));
   6.134          return INVALID_MFN;
   6.135      }
   6.136  
   6.137 @@ -1476,11 +1476,11 @@ shadow_hl2_table(struct domain *d, unsig
   6.138          // Setup easy access to the GL2, SL2, and HL2 frames.
   6.139          //
   6.140          hl2[l2_table_offset(LINEAR_PT_VIRT_START)] =
   6.141 -            l1e_create_pfn(gmfn, __PAGE_HYPERVISOR);
   6.142 +            l1e_from_pfn(gmfn, __PAGE_HYPERVISOR);
   6.143          hl2[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
   6.144 -            l1e_create_pfn(smfn, __PAGE_HYPERVISOR);
   6.145 +            l1e_from_pfn(smfn, __PAGE_HYPERVISOR);
   6.146          hl2[l2_table_offset(PERDOMAIN_VIRT_START)] =
   6.147 -            l1e_create_pfn(hl2mfn, __PAGE_HYPERVISOR);
   6.148 +            l1e_from_pfn(hl2mfn, __PAGE_HYPERVISOR);
   6.149      }
   6.150  
   6.151      unmap_domain_mem(hl2);
   6.152 @@ -1530,10 +1530,10 @@ static unsigned long shadow_l2_table(
   6.153                 HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
   6.154  
   6.155          spl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
   6.156 -            l2e_create_pfn(smfn, __PAGE_HYPERVISOR);
   6.157 +            l2e_from_pfn(smfn, __PAGE_HYPERVISOR);
   6.158  
   6.159          spl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
   6.160 -            l2e_create_phys(__pa(page_get_owner(&frame_table[gmfn])->arch.mm_perdomain_pt),
   6.161 +            l2e_from_paddr(__pa(page_get_owner(&frame_table[gmfn])->arch.mm_perdomain_pt),
   6.162                              __PAGE_HYPERVISOR);
   6.163  
   6.164          if ( shadow_mode_translate(d) ) // NB: not external
   6.165 @@ -1541,7 +1541,7 @@ static unsigned long shadow_l2_table(
   6.166              unsigned long hl2mfn;
   6.167  
   6.168              spl2e[l2_table_offset(RO_MPT_VIRT_START)] =
   6.169 -                l2e_create_phys(pagetable_get_phys(d->arch.phys_table),
   6.170 +                l2e_from_paddr(pagetable_get_paddr(d->arch.phys_table),
   6.171                                  __PAGE_HYPERVISOR);
   6.172  
   6.173              if ( unlikely(!(hl2mfn = __shadow_status(d, gpfn, PGT_hl2_shadow))) )
   6.174 @@ -1554,11 +1554,11 @@ static unsigned long shadow_l2_table(
   6.175                  BUG();
   6.176              
   6.177              spl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
   6.178 -                l2e_create_pfn(hl2mfn, __PAGE_HYPERVISOR);
   6.179 +                l2e_from_pfn(hl2mfn, __PAGE_HYPERVISOR);
   6.180          }
   6.181          else
   6.182              spl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
   6.183 -                l2e_create_pfn(gmfn, __PAGE_HYPERVISOR);
   6.184 +                l2e_from_pfn(gmfn, __PAGE_HYPERVISOR);
   6.185      }
   6.186      else
   6.187      {
   6.188 @@ -1885,7 +1885,7 @@ void shadow_mark_va_out_of_sync(
   6.189  
   6.190      // NB: this is stored as a machine address.
   6.191      entry->writable_pl1e =
   6.192 -        l2e_get_phys(sl2e) | (sizeof(l1_pgentry_t) * l1_table_offset(va));
   6.193 +        l2e_get_paddr(sl2e) | (sizeof(l1_pgentry_t) * l1_table_offset(va));
   6.194      ASSERT( !(entry->writable_pl1e & (sizeof(l1_pgentry_t)-1)) );
   6.195  
   6.196      // Increment shadow's page count to represent the reference
   6.197 @@ -1920,7 +1920,7 @@ static int snapshot_entry_matches(
   6.198      // This could probably be smarter, but this is sufficent for
   6.199      // our current needs.
   6.200      //
   6.201 -    entries_match = !l1e_has_changed(&guest_pt[index], &snapshot[index],
   6.202 +    entries_match = !l1e_has_changed(guest_pt[index], snapshot[index],
   6.203                                       PAGE_FLAG_MASK);
   6.204  
   6.205      unmap_domain_mem(snapshot);
   6.206 @@ -2074,7 +2074,7 @@ static u32 remove_all_write_access_in_pt
   6.207          ((frame_table[pt_mfn].u.inuse.type_info & PGT_type_mask) ==
   6.208           PGT_l1_shadow);
   6.209  
   6.210 -    match = l1e_create_pfn(readonly_gmfn, flags);
   6.211 +    match = l1e_from_pfn(readonly_gmfn, flags);
   6.212  
   6.213      // returns true if all refs have been found and fixed.
   6.214      //
   6.215 @@ -2083,7 +2083,7 @@ static u32 remove_all_write_access_in_pt
   6.216          l1_pgentry_t old = pt[i];
   6.217          l1_pgentry_t new = old;
   6.218  
   6.219 -        l1e_remove_flags(&new,_PAGE_RW);
   6.220 +        l1e_remove_flags(new,_PAGE_RW);
   6.221          if ( is_l1_shadow && !shadow_get_page_from_l1e(new, d) )
   6.222              BUG();
   6.223          found++;
   6.224 @@ -2101,7 +2101,7 @@ static u32 remove_all_write_access_in_pt
   6.225      }
   6.226  
   6.227      i = readonly_gpfn & (L1_PAGETABLE_ENTRIES - 1);
   6.228 -    if ( !l1e_has_changed(&pt[i], &match, flags) && fix_entry(i) )
   6.229 +    if ( !l1e_has_changed(pt[i], match, flags) && fix_entry(i) )
   6.230      {
   6.231          perfc_incrc(remove_write_fast_exit);
   6.232          increase_writable_pte_prediction(d, readonly_gpfn, prediction);
   6.233 @@ -2111,7 +2111,7 @@ static u32 remove_all_write_access_in_pt
   6.234   
   6.235      for (i = 0; i < L1_PAGETABLE_ENTRIES; i++)
   6.236      {
   6.237 -        if ( unlikely(!l1e_has_changed(&pt[i], &match, flags)) && fix_entry(i) )
   6.238 +        if ( unlikely(!l1e_has_changed(pt[i], match, flags)) && fix_entry(i) )
   6.239              break;
   6.240      }
   6.241  
   6.242 @@ -2216,11 +2216,11 @@ static u32 remove_all_access_in_page(
   6.243          ((frame_table[l1mfn].u.inuse.type_info & PGT_type_mask) ==
   6.244           PGT_l1_shadow);
   6.245  
   6.246 -    match = l1e_create_pfn(forbidden_gmfn, flags);
   6.247 +    match = l1e_from_pfn(forbidden_gmfn, flags);
   6.248      
   6.249      for (i = 0; i < L1_PAGETABLE_ENTRIES; i++)
   6.250      {
   6.251 -        if ( unlikely(!l1e_has_changed(&pl1e[i], &match, flags) == 0) )
   6.252 +        if ( unlikely(!l1e_has_changed(pl1e[i], match, flags) == 0) )
   6.253          {
   6.254              l1_pgentry_t ol2e = pl1e[i];
   6.255              pl1e[i] = l1e_empty();
   6.256 @@ -2361,7 +2361,7 @@ static int resync_all(struct domain *d, 
   6.257              for ( i = min_shadow; i <= max_shadow; i++ )
   6.258              {
   6.259                  if ( (i < min_snapshot) || (i > max_snapshot) ||
   6.260 -                     l1e_has_changed(&guest1[i], &snapshot1[i], PAGE_FLAG_MASK) )
   6.261 +                     l1e_has_changed(guest1[i], snapshot1[i], PAGE_FLAG_MASK) )
   6.262                  {
   6.263                      need_flush |= validate_pte_change(d, guest1[i], &shadow1[i]);
   6.264  
   6.265 @@ -2399,7 +2399,7 @@ static int resync_all(struct domain *d, 
   6.266                      continue;
   6.267  
   6.268                  l2_pgentry_t new_pde = guest2[i];
   6.269 -                if ( l2e_has_changed(&new_pde, &snapshot2[i], PAGE_FLAG_MASK))
   6.270 +                if ( l2e_has_changed(new_pde, snapshot2[i], PAGE_FLAG_MASK))
   6.271                  {
   6.272                      need_flush |= validate_pde_change(d, new_pde, &shadow2[i]);
   6.273  
   6.274 @@ -2410,13 +2410,13 @@ static int resync_all(struct domain *d, 
   6.275  
   6.276                      changed++;
   6.277                  }
   6.278 -                if ( l2e_get_value(new_pde) != 0 ) /* FIXME: check flags? */
   6.279 +                if ( l2e_get_intpte(new_pde) != 0 ) /* FIXME: check flags? */
   6.280                      max = i;
   6.281  
   6.282                  // XXX - This hack works for linux guests.
   6.283                  //       Need a better solution long term.
   6.284                  if ( !(l2e_get_flags(new_pde) & _PAGE_PRESENT) &&
   6.285 -                     unlikely(l2e_get_value(new_pde) != 0) &&
   6.286 +                     unlikely(l2e_get_intpte(new_pde) != 0) &&
   6.287                       !unshadow && MFN_PINNED(smfn) )
   6.288                      unshadow = 1;
   6.289              }
   6.290 @@ -2445,7 +2445,7 @@ static int resync_all(struct domain *d, 
   6.291                      continue;
   6.292  
   6.293                  l2_pgentry_t new_pde = guest2[i];
   6.294 -                if ( l2e_has_changed(&new_pde, &snapshot2[i], PAGE_FLAG_MASK) )
   6.295 +                if ( l2e_has_changed(new_pde, snapshot2[i], PAGE_FLAG_MASK) )
   6.296                  {
   6.297                      need_flush |= validate_hl2e_change(d, new_pde, &shadow2[i]);
   6.298  
   6.299 @@ -2510,7 +2510,7 @@ void __shadow_sync_all(struct domain *d)
   6.300          l1_pgentry_t *ppte = map_domain_mem(entry->writable_pl1e);
   6.301          l1_pgentry_t opte = *ppte;
   6.302          l1_pgentry_t npte = opte;
   6.303 -        l1e_remove_flags(&npte, _PAGE_RW);
   6.304 +        l1e_remove_flags(npte, _PAGE_RW);
   6.305  
   6.306          if ( (l1e_get_flags(npte) & _PAGE_PRESENT) &&
   6.307               !shadow_get_page_from_l1e(npte, d) )
   6.308 @@ -2595,7 +2595,7 @@ int shadow_fault(unsigned long va, struc
   6.309      if ( unlikely(!(l1e_get_flags(gpte) & _PAGE_PRESENT)) )
   6.310      {
   6.311          SH_VVLOG("shadow_fault - EXIT: gpte not present (%lx)",
   6.312 -                 l1e_get_value(gpte));
   6.313 +                 l1e_get_intpte(gpte));
   6.314          perfc_incrc(shadow_fault_bail_pte_not_present);
   6.315          goto fail;
   6.316      }
   6.317 @@ -2610,13 +2610,13 @@ int shadow_fault(unsigned long va, struc
   6.318              if ( shadow_mode_page_writable(d, l1e_get_pfn(gpte)) )
   6.319              {
   6.320                  allow_writes = 1;
   6.321 -                l1e_add_flags(&gpte, _PAGE_RW);
   6.322 +                l1e_add_flags(gpte, _PAGE_RW);
   6.323              }
   6.324              else
   6.325              {
   6.326                  /* Write fault on a read-only mapping. */
   6.327                  SH_VVLOG("shadow_fault - EXIT: wr fault on RO page (%lx)", 
   6.328 -                         l1e_get_value(gpte));
   6.329 +                         l1e_get_intpte(gpte));
   6.330                  perfc_incrc(shadow_fault_bail_ro_mapping);
   6.331                  goto fail;
   6.332              }
   6.333 @@ -2631,7 +2631,7 @@ int shadow_fault(unsigned long va, struc
   6.334          }
   6.335  
   6.336          if ( allow_writes )
   6.337 -            l1e_remove_flags(&gpte, _PAGE_RW);
   6.338 +            l1e_remove_flags(gpte, _PAGE_RW);
   6.339      }
   6.340      else
   6.341      {
   6.342 @@ -2647,7 +2647,7 @@ int shadow_fault(unsigned long va, struc
   6.343      /*
   6.344       * STEP 3. Write the modified shadow PTE and guest PTE back to the tables.
   6.345       */
   6.346 -    if ( l1e_has_changed(&orig_gpte, &gpte, PAGE_FLAG_MASK) )
   6.347 +    if ( l1e_has_changed(orig_gpte, gpte, PAGE_FLAG_MASK) )
   6.348      {
   6.349          /* XXX Watch out for read-only L2 entries! (not used in Linux). */
   6.350          if ( unlikely(__copy_to_user(&linear_pg_table[l1_linear_offset(va)],
   6.351 @@ -2655,7 +2655,7 @@ int shadow_fault(unsigned long va, struc
   6.352          {
   6.353              printk("%s() failed, crashing domain %d "
   6.354                     "due to a read-only L2 page table (gpde=%" PRIpte "), va=%lx\n",
   6.355 -                   __func__,d->domain_id, l2e_get_value(gpde), va);
   6.356 +                   __func__,d->domain_id, l2e_get_intpte(gpde), va);
   6.357              domain_crash_synchronous();
   6.358          }
   6.359  
   6.360 @@ -2693,7 +2693,7 @@ void shadow_l1_normal_pt_update(
   6.361      if ( sl1mfn )
   6.362      {
   6.363          SH_VVLOG("shadow_l1_normal_pt_update pa=%p, gpte=%08lx",
   6.364 -                 (void *)pa, l1e_get_value(gpte));
   6.365 +                 (void *)pa, l1e_get_intpte(gpte));
   6.366          l1pte_propagate_from_guest(current->domain, gpte, &spte);
   6.367  
   6.368          spl1e = map_domain_mem_with_cache(sl1mfn << PAGE_SHIFT, cache);
   6.369 @@ -2718,7 +2718,7 @@ void shadow_l2_normal_pt_update(
   6.370      if ( sl2mfn )
   6.371      {
   6.372          SH_VVLOG("shadow_l2_normal_pt_update pa=%p, gpde=%08lx",
   6.373 -                 (void *)pa, l2e_get_value(gpde));
   6.374 +                 (void *)pa, l2e_get_intpte(gpde));
   6.375          spl2e = map_domain_mem_with_cache(sl2mfn << PAGE_SHIFT, cache);
   6.376          validate_pde_change(d, gpde,
   6.377                              &spl2e[(pa & ~PAGE_MASK) / sizeof(l2_pgentry_t)]);
   6.378 @@ -2758,7 +2758,7 @@ int shadow_do_update_va_mapping(unsigned
   6.379  
   6.380      shadow_lock(d);
   6.381  
   6.382 -    //printk("%s(va=%p, val=%p)\n", __func__, (void *)va, (void *)l1e_get_value(val));
   6.383 +    //printk("%s(va=%p, val=%p)\n", __func__, (void *)va, (void *)l1e_get_intpte(val));
   6.384          
   6.385      // This is actually overkill - we don't need to sync the L1 itself,
   6.386      // just everything involved in getting to this L1 (i.e. we need
   6.387 @@ -2889,14 +2889,14 @@ void __update_pagetables(struct exec_dom
   6.388          if ( !get_shadow_ref(hl2mfn) )
   6.389              BUG();
   6.390          mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
   6.391 -            l2e_create_pfn(hl2mfn, __PAGE_HYPERVISOR);
   6.392 +            l2e_from_pfn(hl2mfn, __PAGE_HYPERVISOR);
   6.393          if ( l2e_get_flags(old_hl2e) & _PAGE_PRESENT )
   6.394              put_shadow_ref(l2e_get_pfn(old_hl2e));
   6.395  
   6.396          if ( !get_shadow_ref(smfn) )
   6.397              BUG();
   6.398          mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
   6.399 -            l2e_create_pfn(smfn, __PAGE_HYPERVISOR);
   6.400 +            l2e_from_pfn(smfn, __PAGE_HYPERVISOR);
   6.401          if ( l2e_get_flags(old_sl2e) & _PAGE_PRESENT )
   6.402              put_shadow_ref(l2e_get_pfn(old_sl2e));
   6.403  
   6.404 @@ -2938,7 +2938,7 @@ mark_shadows_as_reflecting_snapshot(stru
   6.405          for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
   6.406              if ( is_guest_l1_slot(i) &&
   6.407                   (l1e_get_flags(l1e[i]) & _PAGE_PRESENT) )
   6.408 -                l1e_add_flags(&l1e[i], SHADOW_REFLECTS_SNAPSHOT);
   6.409 +                l1e_add_flags(l1e[i], SHADOW_REFLECTS_SNAPSHOT);
   6.410          unmap_domain_mem(l1e);
   6.411      }
   6.412  
   6.413 @@ -2948,7 +2948,7 @@ mark_shadows_as_reflecting_snapshot(stru
   6.414          for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
   6.415              if ( is_guest_l2_slot(i) &&
   6.416                   (l2e_get_flags(l2e[i]) & _PAGE_PRESENT) )
   6.417 -                l2e_add_flags(&l2e[i], SHADOW_REFLECTS_SNAPSHOT);
   6.418 +                l2e_add_flags(l2e[i], SHADOW_REFLECTS_SNAPSHOT);
   6.419          unmap_domain_mem(l2e);
   6.420      }
   6.421  }
   6.422 @@ -2968,7 +2968,7 @@ int shadow_status_noswap;
   6.423          l1_pgentry_t _pte;                                                   \
   6.424          _pte = shadow_linear_pg_table[l1_linear_offset(_a)];                 \
   6.425          if ( l1e_get_flags(_pte) & _PAGE_PRESENT )                           \
   6.426 -            _pa = l1e_get_phys(_pte);                                        \
   6.427 +            _pa = l1e_get_paddr(_pte);                                       \
   6.428      }                                                                        \
   6.429      _pa | (_a & ~PAGE_MASK);                                                 \
   6.430  })
   6.431 @@ -2981,8 +2981,8 @@ int shadow_status_noswap;
   6.432          printk("guest_pte=%lx eff_guest_pte=%lx shadow_pte=%lx "             \
   6.433                 "snapshot_pte=%lx &guest=%p &shadow=%p &snap=%p "             \
   6.434                 "v2m(&guest)=%p v2m(&shadow)=%p v2m(&snap)=%p ea=%08x\n",     \
   6.435 -               l1e_get_value(guest_pte), l1e_get_value(eff_guest_pte),       \
   6.436 -               l1e_get_value(shadow_pte), l1e_get_value(snapshot_pte),       \
   6.437 +               l1e_get_intpte(guest_pte), l1e_get_intpte(eff_guest_pte),     \
   6.438 +               l1e_get_intpte(shadow_pte), l1e_get_intpte(snapshot_pte),     \
   6.439                 p_guest_pte, p_shadow_pte, p_snapshot_pte,                    \
   6.440                 (void *)v2m(ed, p_guest_pte), (void *)v2m(ed, p_shadow_pte),  \
   6.441                 (void *)v2m(ed, p_snapshot_pte),                              \
   6.442 @@ -3007,9 +3007,9 @@ static int check_pte(
   6.443      int errors = 0, guest_writable;
   6.444      int page_table_page;
   6.445  
   6.446 -    if ( (l1e_get_value(shadow_pte) == 0) ||
   6.447 -         (l1e_get_value(shadow_pte) == 0xdeadface) ||
   6.448 -         (l1e_get_value(shadow_pte) == 0x00000E00) )
   6.449 +    if ( (l1e_get_intpte(shadow_pte) == 0) ||
   6.450 +         (l1e_get_intpte(shadow_pte) == 0xdeadface) ||
   6.451 +         (l1e_get_intpte(shadow_pte) == 0x00000E00) )
   6.452          return errors;  /* always safe */
   6.453  
   6.454      if ( !(l1e_get_flags(shadow_pte) & _PAGE_PRESENT) )
   6.455 @@ -3028,7 +3028,7 @@ static int check_pte(
   6.456  
   6.457      mask = ~(_PAGE_GLOBAL|_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW|_PAGE_AVAIL|PAGE_MASK);
   6.458  
   6.459 -    if ( ((l1e_get_value(shadow_pte) & mask) != (l1e_get_value(eff_guest_pte) & mask)) )
   6.460 +    if ( ((l1e_get_intpte(shadow_pte) & mask) != (l1e_get_intpte(eff_guest_pte) & mask)) )
   6.461          FAIL("Corrupt?");
   6.462  
   6.463      if ( (level == 1) &&
   6.464 @@ -3049,7 +3049,7 @@ static int check_pte(
   6.465  
   6.466      if ( !VALID_MFN(eff_guest_mfn) && !shadow_mode_refcounts(d) )
   6.467          FAIL("%s: invalid eff_guest_pfn=%lx eff_guest_pte=%lx\n", __func__, eff_guest_pfn,
   6.468 -             l1e_get_value(eff_guest_pte));
   6.469 +             l1e_get_intpte(eff_guest_pte));
   6.470  
   6.471      page_table_page = mfn_is_page_table(eff_guest_mfn);
   6.472  
   6.473 @@ -3179,26 +3179,26 @@ int check_l2_table(
   6.474          FAILPT("hypervisor linear map inconsistent");
   6.475  #endif
   6.476  
   6.477 -    match = l2e_create_pfn(smfn, __PAGE_HYPERVISOR);
   6.478 +    match = l2e_from_pfn(smfn, __PAGE_HYPERVISOR);
   6.479      if ( !shadow_mode_external(d) &&
   6.480 -         l2e_has_changed(&spl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT],
   6.481 -                         &match, PAGE_FLAG_MASK))
   6.482 +         l2e_has_changed(spl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT],
   6.483 +                         match, PAGE_FLAG_MASK))
   6.484      {
   6.485          FAILPT("hypervisor shadow linear map inconsistent %lx %lx",
   6.486 -               l2e_get_value(spl2e[SH_LINEAR_PT_VIRT_START >>
   6.487 +               l2e_get_intpte(spl2e[SH_LINEAR_PT_VIRT_START >>
   6.488                                     L2_PAGETABLE_SHIFT]),
   6.489 -               l2e_get_value(match));
   6.490 +               l2e_get_intpte(match));
   6.491      }
   6.492  
   6.493 -    match = l2e_create_phys(__pa(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR);
   6.494 +    match = l2e_from_paddr(__pa(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR);
   6.495      if ( !shadow_mode_external(d) &&
   6.496 -         l2e_has_changed(&spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT],
   6.497 -                         &match, PAGE_FLAG_MASK))
   6.498 +         l2e_has_changed(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT],
   6.499 +                         match, PAGE_FLAG_MASK))
   6.500      {
   6.501          FAILPT("hypervisor per-domain map inconsistent saw %lx, expected (va=%p) %lx",
   6.502 -               l2e_get_value(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT]),
   6.503 +               l2e_get_intpte(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT]),
   6.504                 d->arch.mm_perdomain_pt,
   6.505 -               l2e_get_value(match));
   6.506 +               l2e_get_intpte(match));
   6.507      }
   6.508  
   6.509  #ifdef __i386__
   6.510 @@ -3285,7 +3285,7 @@ int _check_pagetable(struct exec_domain 
   6.511          unsigned long gl1mfn = __gpfn_to_mfn(d, gl1pfn);
   6.512          unsigned long sl1mfn = l2e_get_pfn(spl2e[i]);
   6.513  
   6.514 -        if ( l2e_get_value(spl2e[i]) != 0 )  /* FIXME: check flags? */
   6.515 +        if ( l2e_get_intpte(spl2e[i]) != 0 )  /* FIXME: check flags? */
   6.516          {
   6.517              errors += check_l1_table(ed, gl1pfn, gl1mfn, sl1mfn, i);
   6.518          }
     7.1 --- a/xen/arch/x86/traps.c	Tue May 31 23:04:23 2005 +0000
     7.2 +++ b/xen/arch/x86/traps.c	Wed Jun 01 09:06:47 2005 +0000
     7.3 @@ -797,7 +797,7 @@ static int emulate_privileged_op(struct 
     7.4              break;
     7.5              
     7.6          case 3: /* Read CR3 */
     7.7 -            *reg = pagetable_get_phys(ed->arch.guest_table);
     7.8 +            *reg = pagetable_get_paddr(ed->arch.guest_table);
     7.9              break;
    7.10  
    7.11          default:
     8.1 --- a/xen/arch/x86/vmx.c	Tue May 31 23:04:23 2005 +0000
     8.2 +++ b/xen/arch/x86/vmx.c	Wed Jun 01 09:06:47 2005 +0000
     8.3 @@ -142,7 +142,7 @@ static int vmx_do_page_fault(unsigned lo
     8.4      gpte = gva_to_gpte(va);
     8.5      if (!(l1e_get_flags(gpte) & _PAGE_PRESENT) )
     8.6              return 0;
     8.7 -    gpa = l1e_get_phys(gpte) + (va & ~PAGE_MASK);
     8.8 +    gpa = l1e_get_paddr(gpte) + (va & ~PAGE_MASK);
     8.9  
    8.10      /* Use 1:1 page table to identify MMIO address space */
    8.11      if (mmio_space(gpa))
    8.12 @@ -567,7 +567,7 @@ vmx_world_restore(struct exec_domain *d,
    8.13  
    8.14      if (!vmx_paging_enabled(d)) {
    8.15  	VMX_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table");
    8.16 -	__vmwrite(GUEST_CR3, pagetable_get_phys(d->domain->arch.phys_table));
    8.17 +	__vmwrite(GUEST_CR3, pagetable_get_paddr(d->domain->arch.phys_table));
    8.18          goto skip_cr3;
    8.19      }
    8.20  
    8.21 @@ -603,7 +603,7 @@ vmx_world_restore(struct exec_domain *d,
    8.22  	 */
    8.23  	d->arch.arch_vmx.cpu_cr3 = c->cr3;
    8.24  	VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", c->cr3);
    8.25 -	__vmwrite(GUEST_CR3, pagetable_get_phys(d->arch.shadow_table));
    8.26 +	__vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
    8.27      }
    8.28  
    8.29  skip_cr3:
    8.30 @@ -769,7 +769,7 @@ static int vmx_set_cr0(unsigned long val
    8.31          VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx", 
    8.32                  (unsigned long) (mfn << PAGE_SHIFT));
    8.33  
    8.34 -        __vmwrite(GUEST_CR3, pagetable_get_phys(d->arch.shadow_table));
    8.35 +        __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
    8.36          /* 
    8.37           * arch->shadow_table should hold the next CR3 for shadow
    8.38           */
    8.39 @@ -896,7 +896,7 @@ static int mov_to_cr(int gp, int cr, str
    8.40              d->arch.arch_vmx.cpu_cr3 = value;
    8.41              VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx",
    8.42                      value);
    8.43 -            __vmwrite(GUEST_CR3, pagetable_get_phys(d->arch.shadow_table));
    8.44 +            __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
    8.45          }
    8.46          break;
    8.47      }
     9.1 --- a/xen/arch/x86/vmx_io.c	Tue May 31 23:04:23 2005 +0000
     9.2 +++ b/xen/arch/x86/vmx_io.c	Wed Jun 01 09:06:47 2005 +0000
     9.3 @@ -466,12 +466,12 @@ void vmx_do_resume(struct exec_domain *d
     9.4  {
     9.5      vmx_stts();
     9.6      if ( vmx_paging_enabled(d) )
     9.7 -        __vmwrite(GUEST_CR3, pagetable_get_phys(d->arch.shadow_table));
     9.8 +        __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
     9.9      else
    9.10          // paging is not enabled in the guest
    9.11 -        __vmwrite(GUEST_CR3, pagetable_get_phys(d->domain->arch.phys_table));
    9.12 +        __vmwrite(GUEST_CR3, pagetable_get_paddr(d->domain->arch.phys_table));
    9.13  
    9.14 -    __vmwrite(HOST_CR3, pagetable_get_phys(d->arch.monitor_table));
    9.15 +    __vmwrite(HOST_CR3, pagetable_get_paddr(d->arch.monitor_table));
    9.16      __vmwrite(HOST_ESP, (unsigned long)get_stack_bottom());
    9.17  
    9.18      if (event_pending(d)) {
    10.1 --- a/xen/arch/x86/vmx_vmcs.c	Tue May 31 23:04:23 2005 +0000
    10.2 +++ b/xen/arch/x86/vmx_vmcs.c	Wed Jun 01 09:06:47 2005 +0000
    10.3 @@ -196,8 +196,8 @@ void vmx_do_launch(struct exec_domain *e
    10.4      error |= __vmwrite(GUEST_TR_BASE, 0);
    10.5      error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
    10.6  
    10.7 -    __vmwrite(GUEST_CR3, pagetable_get_phys(ed->arch.guest_table));
    10.8 -    __vmwrite(HOST_CR3, pagetable_get_phys(ed->arch.monitor_table));
    10.9 +    __vmwrite(GUEST_CR3, pagetable_get_paddr(ed->arch.guest_table));
   10.10 +    __vmwrite(HOST_CR3, pagetable_get_paddr(ed->arch.monitor_table));
   10.11      __vmwrite(HOST_ESP, (unsigned long)get_stack_bottom());
   10.12  
   10.13      ed->arch.schedule_tail = arch_vmx_do_resume;
    11.1 --- a/xen/arch/x86/x86_32/domain_page.c	Tue May 31 23:04:23 2005 +0000
    11.2 +++ b/xen/arch/x86/x86_32/domain_page.c	Wed Jun 01 09:06:47 2005 +0000
    11.3 @@ -74,7 +74,7 @@ void *map_domain_mem(unsigned long pa)
    11.4      }
    11.5      while ( l1e_get_flags(cache[idx]) & _PAGE_PRESENT );
    11.6  
    11.7 -    cache[idx] = l1e_create_phys(pa, __PAGE_HYPERVISOR);
    11.8 +    cache[idx] = l1e_from_paddr(pa, __PAGE_HYPERVISOR);
    11.9  
   11.10      spin_unlock(&map_lock);
   11.11  
   11.12 @@ -88,5 +88,5 @@ void unmap_domain_mem(void *va)
   11.13      ASSERT((void *)MAPCACHE_VIRT_START <= va);
   11.14      ASSERT(va < (void *)MAPCACHE_VIRT_END);
   11.15      idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
   11.16 -    l1e_add_flags(&mapcache[idx], READY_FOR_TLB_FLUSH);
   11.17 +    l1e_add_flags(mapcache[idx], READY_FOR_TLB_FLUSH);
   11.18  }
    12.1 --- a/xen/arch/x86/x86_32/mm.c	Tue May 31 23:04:23 2005 +0000
    12.2 +++ b/xen/arch/x86/x86_32/mm.c	Wed Jun 01 09:06:47 2005 +0000
    12.3 @@ -85,9 +85,9 @@ void __init paging_init(void)
    12.4          if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER)) == NULL )
    12.5              panic("Not enough memory to bootstrap Xen.\n");
    12.6          idle_pg_table_l2[l2_linear_offset(v)] =
    12.7 -            l2e_create_page(pg, __PAGE_HYPERVISOR | _PAGE_PSE);
    12.8 +            l2e_from_page(pg, __PAGE_HYPERVISOR | _PAGE_PSE);
    12.9          idle_pg_table_l2[l2_linear_offset(v2)] =
   12.10 -            l2e_create_page(pg, (__PAGE_HYPERVISOR | _PAGE_PSE) & ~_PAGE_RW);
   12.11 +            l2e_from_page(pg, (__PAGE_HYPERVISOR | _PAGE_PSE) & ~_PAGE_RW);
   12.12      }
   12.13      memset((void *)RDWR_MPT_VIRT_START, 0x55, mpt_size);
   12.14  
   12.15 @@ -99,7 +99,7 @@ void __init paging_init(void)
   12.16                  continue;
   12.17              if (v >= RO_MPT_VIRT_START && v < RO_MPT_VIRT_END)
   12.18                  continue;
   12.19 -            l2e_add_flags(&idle_pg_table_l2[l2_linear_offset(v)],
   12.20 +            l2e_add_flags(idle_pg_table_l2[l2_linear_offset(v)],
   12.21                            _PAGE_GLOBAL);
   12.22          }
   12.23      }
   12.24 @@ -109,7 +109,7 @@ void __init paging_init(void)
   12.25          ioremap_pt = (void *)alloc_xenheap_page();
   12.26          clear_page(ioremap_pt);
   12.27          idle_pg_table_l2[l2_linear_offset(v)] =
   12.28 -            l2e_create_page(virt_to_page(ioremap_pt), __PAGE_HYPERVISOR);
   12.29 +            l2e_from_page(virt_to_page(ioremap_pt), __PAGE_HYPERVISOR);
   12.30      }
   12.31  
   12.32      /* Set up mapping cache for domain pages. */
   12.33 @@ -119,13 +119,13 @@ void __init paging_init(void)
   12.34           v += (1 << L2_PAGETABLE_SHIFT), i++) {
   12.35          clear_page(mapcache + i*L1_PAGETABLE_ENTRIES);
   12.36          idle_pg_table_l2[l2_linear_offset(v)] =
   12.37 -            l2e_create_page(virt_to_page(mapcache + i*L1_PAGETABLE_ENTRIES),
   12.38 +            l2e_from_page(virt_to_page(mapcache + i*L1_PAGETABLE_ENTRIES),
   12.39                              __PAGE_HYPERVISOR);
   12.40      }
   12.41  
   12.42      for (v = LINEAR_PT_VIRT_START; v != LINEAR_PT_VIRT_END; v += (1 << L2_PAGETABLE_SHIFT)) {
   12.43          idle_pg_table_l2[l2_linear_offset(v)] =
   12.44 -            l2e_create_page(virt_to_page(idle_pg_table_l2 + ((v-RDWR_MPT_VIRT_START) >> PAGETABLE_ORDER)),
   12.45 +            l2e_from_page(virt_to_page(idle_pg_table_l2 + ((v-RDWR_MPT_VIRT_START) >> PAGETABLE_ORDER)),
   12.46                              __PAGE_HYPERVISOR);
   12.47      }
   12.48  }
   12.49 @@ -139,7 +139,7 @@ void __init zap_low_mappings(l2_pgentry_
   12.50          addr = (i << L2_PAGETABLE_SHIFT);
   12.51          if (addr >= HYPERVISOR_VIRT_START)
   12.52              break;
   12.53 -        if (l2e_get_phys(base[i]) != addr)
   12.54 +        if (l2e_get_paddr(base[i]) != addr)
   12.55              continue;
   12.56          base[i] = l2e_empty();
   12.57      }
    13.1 --- a/xen/arch/x86/x86_32/traps.c	Tue May 31 23:04:23 2005 +0000
    13.2 +++ b/xen/arch/x86/x86_32/traps.c	Wed Jun 01 09:06:47 2005 +0000
    13.3 @@ -101,15 +101,15 @@ void show_page_walk(unsigned long addr)
    13.4      printk("Pagetable walk from %08lx:\n", addr);
    13.5      
    13.6      pmd = idle_pg_table_l2[l2_linear_offset(addr)];
    13.7 -    printk(" L2 = %08llx %s\n", (u64)l2e_get_value(pmd),
    13.8 +    printk(" L2 = %"PRIpte" %s\n", l2e_get_intpte(pmd),
    13.9             (l2e_get_flags(pmd) & _PAGE_PSE) ? "(2/4MB)" : "");
   13.10      if ( !(l2e_get_flags(pmd) & _PAGE_PRESENT) ||
   13.11           (l2e_get_flags(pmd) & _PAGE_PSE) )
   13.12          return;
   13.13  
   13.14 -    pte  = __va(l2e_get_phys(pmd));
   13.15 +    pte  = __va(l2e_get_paddr(pmd));
   13.16      pte += l1_table_offset(addr);
   13.17 -    printk("  L1 = %08llx\n", (u64)l1e_get_value(*pte));
   13.18 +    printk("  L1 = %"PRIpte"\n", l1e_get_intpte(*pte));
   13.19  }
   13.20  
   13.21  #define DOUBLEFAULT_STACK_SIZE 1024
    14.1 --- a/xen/arch/x86/x86_64/mm.c	Tue May 31 23:04:23 2005 +0000
    14.2 +++ b/xen/arch/x86/x86_64/mm.c	Wed Jun 01 09:06:47 2005 +0000
    14.3 @@ -1,21 +1,20 @@
    14.4  /******************************************************************************
    14.5   * arch/x86/x86_64/mm.c
    14.6   * 
    14.7 - * Modifications to Linux original are copyright (c) 2004, K A Fraser
    14.8 - * 
    14.9 - * This program is free software; you can redistribute it and/or modify
   14.10 - * it under the terms of the GNU General Public License as published by
   14.11 - * the Free Software Foundation; either version 2 of the License, or
   14.12 - * (at your option) any later version.
   14.13 + * Modifications to Linux original are copyright (c) 2004, K A Fraser tr This 
   14.14 + * program is free software; you can redistribute it and/or modify it under 
   14.15 + * the terms of the GNU General Public License as published by the Free 
   14.16 + * Software Foundation; either version 2 of the License, or (at your option) 
   14.17 + * any later version.
   14.18   * 
   14.19 - * This program is distributed in the hope that it will be useful,
   14.20 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
   14.21 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   14.22 - * GNU General Public License for more details.
   14.23 + * This program is distributed in the hope that it will be useful, but WITHOUT 
   14.24 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
   14.25 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
   14.26 + * more details.
   14.27   * 
   14.28 - * You should have received a copy of the GNU General Public License
   14.29 - * along with this program; if not, write to the Free Software
   14.30 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
   14.31 + * You should have received a copy of the GNU General Public License along 
   14.32 + * with this program; if not, write to the Free Software Foundation, Inc., 59 
   14.33 + * Temple Place, Suite 330, Boston, MA  02111-1307  USA
   14.34   */
   14.35  
   14.36  #include <xen/config.h>
   14.37 @@ -57,7 +56,7 @@ l2_pgentry_t *virt_to_xen_l2e(unsigned l
   14.38      {
   14.39          pl3e = page_to_virt(alloc_xen_pagetable());
   14.40          clear_page(pl3e);
   14.41 -        *pl4e = l4e_create_phys(__pa(pl3e), __PAGE_HYPERVISOR);
   14.42 +        *pl4e = l4e_from_paddr(__pa(pl3e), __PAGE_HYPERVISOR);
   14.43      }
   14.44      
   14.45      pl3e = l4e_to_l3e(*pl4e) + l3_table_offset(v);
   14.46 @@ -65,7 +64,7 @@ l2_pgentry_t *virt_to_xen_l2e(unsigned l
   14.47      {
   14.48          pl2e = page_to_virt(alloc_xen_pagetable());
   14.49          clear_page(pl2e);
   14.50 -        *pl3e = l3e_create_phys(__pa(pl2e), __PAGE_HYPERVISOR);
   14.51 +        *pl3e = l3e_from_paddr(__pa(pl2e), __PAGE_HYPERVISOR);
   14.52      }
   14.53      
   14.54      pl2e = l3e_to_l2e(*pl3e) + l2_table_offset(v);
   14.55 @@ -85,12 +84,12 @@ void __init paging_init(void)
   14.56      l3_ro_mpt = (l3_pgentry_t *)alloc_xenheap_page();
   14.57      clear_page(l3_ro_mpt);
   14.58      idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)] =
   14.59 -        l4e_create_page(
   14.60 +        l4e_from_page(
   14.61              virt_to_page(l3_ro_mpt), __PAGE_HYPERVISOR | _PAGE_USER);
   14.62      l2_ro_mpt = (l2_pgentry_t *)alloc_xenheap_page();
   14.63      clear_page(l2_ro_mpt);
   14.64      l3_ro_mpt[l3_table_offset(RO_MPT_VIRT_START)] =
   14.65 -        l3e_create_page(
   14.66 +        l3e_from_page(
   14.67              virt_to_page(l2_ro_mpt), __PAGE_HYPERVISOR | _PAGE_USER);
   14.68      l2_ro_mpt += l2_table_offset(RO_MPT_VIRT_START);
   14.69  
   14.70 @@ -109,14 +108,14 @@ void __init paging_init(void)
   14.71              PAGE_HYPERVISOR);
   14.72          memset((void *)(RDWR_MPT_VIRT_START + i*8), 0x55,
   14.73                 1UL << L2_PAGETABLE_SHIFT);
   14.74 -        *l2_ro_mpt++ = l2e_create_page(
   14.75 +        *l2_ro_mpt++ = l2e_from_page(
   14.76              pg, _PAGE_GLOBAL|_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT);
   14.77          BUG_ON(((unsigned long)l2_ro_mpt & ~PAGE_MASK) == 0);
   14.78      }
   14.79  
   14.80      /* Set up linear page table mapping. */
   14.81      idle_pg_table[l4_table_offset(LINEAR_PT_VIRT_START)] =
   14.82 -        l4e_create_phys(__pa(idle_pg_table), __PAGE_HYPERVISOR);
   14.83 +        l4e_from_paddr(__pa(idle_pg_table), __PAGE_HYPERVISOR);
   14.84  }
   14.85  
   14.86  void __init zap_low_mappings(void)
    15.1 --- a/xen/common/grant_table.c	Tue May 31 23:04:23 2005 +0000
    15.2 +++ b/xen/common/grant_table.c	Wed Jun 01 09:06:47 2005 +0000
    15.3 @@ -257,9 +257,9 @@ static int
    15.4      {
    15.5          /* Write update into the pagetable. */
    15.6          l1_pgentry_t pte;
    15.7 -        pte = l1e_create_pfn(frame, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
    15.8 +        pte = l1e_from_pfn(frame, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
    15.9          if ( !(dev_hst_ro_flags & GNTMAP_readonly) )
   15.10 -            l1e_add_flags(&pte,_PAGE_RW);
   15.11 +            l1e_add_flags(pte,_PAGE_RW);
   15.12          rc = update_grant_va_mapping( host_virt_addr, pte, 
   15.13                         mapping_d, mapping_ed );
   15.14  
    16.1 --- a/xen/include/asm-x86/page.h	Tue May 31 23:04:23 2005 +0000
    16.2 +++ b/xen/include/asm-x86/page.h	Wed Jun 01 09:06:47 2005 +0000
    16.3 @@ -20,11 +20,11 @@
    16.4  # include <asm/x86_64/page.h>
    16.5  #endif
    16.6  
    16.7 -/* Get pte contents as an integer (intpte_t). */
    16.8 -#define l1e_get_value(x)           ((x).l1)
    16.9 -#define l2e_get_value(x)           ((x).l2)
   16.10 -#define l3e_get_value(x)           ((x).l3)
   16.11 -#define l4e_get_value(x)           ((x).l4)
   16.12 +/* Get direct integer representation of a pte's contents (intpte_t). */
   16.13 +#define l1e_get_intpte(x)          ((x).l1)
   16.14 +#define l2e_get_intpte(x)          ((x).l2)
   16.15 +#define l3e_get_intpte(x)          ((x).l3)
   16.16 +#define l4e_get_intpte(x)          ((x).l4)
   16.17  
   16.18  /* Get pfn mapped by pte (unsigned long). */
   16.19  #define l1e_get_pfn(x)             \
   16.20 @@ -37,15 +37,21 @@
   16.21      ((unsigned long)(((x).l4 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
   16.22  
   16.23  /* Get physical address of page mapped by pte (physaddr_t). */
   16.24 -#define l1e_get_phys(x)            \
   16.25 +#define l1e_get_paddr(x)           \
   16.26      ((physaddr_t)(((x).l1 & (PADDR_MASK&PAGE_MASK))))
   16.27 -#define l2e_get_phys(x)            \
   16.28 +#define l2e_get_paddr(x)           \
   16.29      ((physaddr_t)(((x).l2 & (PADDR_MASK&PAGE_MASK))))
   16.30 -#define l3e_get_phys(x)            \
   16.31 +#define l3e_get_paddr(x)           \
   16.32      ((physaddr_t)(((x).l3 & (PADDR_MASK&PAGE_MASK))))
   16.33 -#define l4e_get_phys(x)            \
   16.34 +#define l4e_get_paddr(x)           \
   16.35      ((physaddr_t)(((x).l4 & (PADDR_MASK&PAGE_MASK))))
   16.36  
   16.37 +/* Get pointer to info structure of page mapped by pte (struct pfn_info *). */
   16.38 +#define l1e_get_page(x)           (pfn_to_page(l1e_get_pfn(x)))
   16.39 +#define l2e_get_page(x)           (pfn_to_page(l2e_get_pfn(x)))
   16.40 +#define l3e_get_page(x)           (pfn_to_page(l3e_get_pfn(x)))
   16.41 +#define l4e_get_page(x)           (pfn_to_page(l4e_get_pfn(x)))
   16.42 +
   16.43  /* Get pte access flags (unsigned int). */
   16.44  #define l1e_get_flags(x)           (get_pte_flags((x).l1))
   16.45  #define l2e_get_flags(x)           (get_pte_flags((x).l2))
   16.46 @@ -59,51 +65,63 @@
   16.47  #define l4e_empty()                ((l4_pgentry_t) { 0 })
   16.48  
   16.49  /* Construct a pte from a pfn and access flags. */
   16.50 -#define l1e_create_pfn(pfn, flags) \
   16.51 +#define l1e_from_pfn(pfn, flags)   \
   16.52      ((l1_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
   16.53 -#define l2e_create_pfn(pfn, flags) \
   16.54 +#define l2e_from_pfn(pfn, flags)   \
   16.55      ((l2_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
   16.56 -#define l3e_create_pfn(pfn, flags) \
   16.57 +#define l3e_from_pfn(pfn, flags)   \
   16.58      ((l3_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
   16.59 -#define l4e_create_pfn(pfn, flags) \
   16.60 +#define l4e_from_pfn(pfn, flags)   \
   16.61      ((l4_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
   16.62  
   16.63  /* Construct a pte from a physical address and access flags. */
   16.64 -#define l1e_create_phys(pa, flags) \
   16.65 +#define l1e_from_paddr(pa, flags)  \
   16.66      ((l1_pgentry_t) { (pa) | put_pte_flags(flags) })
   16.67 -#define l2e_create_phys(pa, flags) \
   16.68 +#define l2e_from_paddr(pa, flags)  \
   16.69      ((l2_pgentry_t) { (pa) | put_pte_flags(flags) })
   16.70 -#define l3e_create_phys(pa, flags) \
   16.71 +#define l3e_from_paddr(pa, flags)  \
   16.72      ((l3_pgentry_t) { (pa) | put_pte_flags(flags) })
   16.73 -#define l4e_create_phys(pa, flags) \
   16.74 +#define l4e_from_paddr(pa, flags)  \
   16.75      ((l4_pgentry_t) { (pa) | put_pte_flags(flags) })
   16.76  
   16.77 +/* Construct a pte from its direct integer representation. */
   16.78 +#define l1e_from_intpte(intpte)    ((l1_pgentry_t) { (intpte_t)(intpte) })
   16.79 +#define l2e_from_intpte(intpte)    ((l2_pgentry_t) { (intpte_t)(intpte) })
   16.80 +#define l3e_from_intpte(intpte)    ((l3_pgentry_t) { (intpte_t)(intpte) })
   16.81 +#define l4e_from_intpte(intpte)    ((l4_pgentry_t) { (intpte_t)(intpte) })
   16.82 +
   16.83 +/* Construct a pte from a page pointer and access flags. */
   16.84 +#define l1e_from_page(page, flags) (l1e_from_pfn(page_to_pfn(page),(flags)))
   16.85 +#define l2e_from_page(page, flags) (l2e_from_pfn(page_to_pfn(page),(flags)))
   16.86 +#define l3e_from_page(page, flags) (l3e_from_pfn(page_to_pfn(page),(flags)))
   16.87 +#define l4e_from_page(page, flags) (l4e_from_pfn(page_to_pfn(page),(flags)))
   16.88 +
   16.89  /* Add extra flags to an existing pte. */
   16.90 -#define l1e_add_flags(x, flags)    ((x)->l1 |= put_pte_flags(flags))
   16.91 -#define l2e_add_flags(x, flags)    ((x)->l2 |= put_pte_flags(flags))
   16.92 -#define l3e_add_flags(x, flags)    ((x)->l3 |= put_pte_flags(flags))
   16.93 -#define l4e_add_flags(x, flags)    ((x)->l4 |= put_pte_flags(flags))
   16.94 +#define l1e_add_flags(x, flags)    ((x).l1 |= put_pte_flags(flags))
   16.95 +#define l2e_add_flags(x, flags)    ((x).l2 |= put_pte_flags(flags))
   16.96 +#define l3e_add_flags(x, flags)    ((x).l3 |= put_pte_flags(flags))
   16.97 +#define l4e_add_flags(x, flags)    ((x).l4 |= put_pte_flags(flags))
   16.98  
   16.99  /* Remove flags from an existing pte. */
  16.100 -#define l1e_remove_flags(x, flags) ((x)->l1 &= ~put_pte_flags(flags))
  16.101 -#define l2e_remove_flags(x, flags) ((x)->l2 &= ~put_pte_flags(flags))
  16.102 -#define l3e_remove_flags(x, flags) ((x)->l3 &= ~put_pte_flags(flags))
  16.103 -#define l4e_remove_flags(x, flags) ((x)->l4 &= ~put_pte_flags(flags))
  16.104 +#define l1e_remove_flags(x, flags) ((x).l1 &= ~put_pte_flags(flags))
  16.105 +#define l2e_remove_flags(x, flags) ((x).l2 &= ~put_pte_flags(flags))
  16.106 +#define l3e_remove_flags(x, flags) ((x).l3 &= ~put_pte_flags(flags))
  16.107 +#define l4e_remove_flags(x, flags) ((x).l4 &= ~put_pte_flags(flags))
  16.108  
  16.109  /* Check if a pte's page mapping or significant access flags have changed. */
  16.110  #define l1e_has_changed(x,y,flags) \
  16.111 -    ( !!(((x)->l1 ^ (y)->l1) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
  16.112 +    ( !!(((x).l1 ^ (y).l1) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
  16.113  #define l2e_has_changed(x,y,flags) \
  16.114 -    ( !!(((x)->l2 ^ (y)->l2) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
  16.115 +    ( !!(((x).l2 ^ (y).l2) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
  16.116  #define l3e_has_changed(x,y,flags) \
  16.117 -    ( !!(((x)->l3 ^ (y)->l3) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
  16.118 +    ( !!(((x).l3 ^ (y).l3) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
  16.119  #define l4e_has_changed(x,y,flags) \
  16.120 -    ( !!(((x)->l4 ^ (y)->l4) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
  16.121 +    ( !!(((x).l4 ^ (y).l4) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
  16.122  
  16.123  /* Pagetable walking. */
  16.124 -#define l2e_to_l1e(x)              ((l1_pgentry_t *)__va(l2e_get_phys(x)))
  16.125 -#define l3e_to_l2e(x)              ((l2_pgentry_t *)__va(l3e_get_phys(x)))
  16.126 -#define l4e_to_l3e(x)              ((l3_pgentry_t *)__va(l4e_get_phys(x)))
  16.127 +#define l2e_to_l1e(x)              ((l1_pgentry_t *)__va(l2e_get_paddr(x)))
  16.128 +#define l3e_to_l2e(x)              ((l2_pgentry_t *)__va(l3e_get_paddr(x)))
  16.129 +#define l4e_to_l3e(x)              ((l3_pgentry_t *)__va(l4e_get_paddr(x)))
  16.130  
  16.131  /* Given a virtual address, get an entry offset into a page table. */
  16.132  #define l1_table_offset(a)         \
  16.133 @@ -116,7 +134,7 @@
  16.134      (((a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
  16.135  
  16.136  /* Convert a pointer to a page-table entry into pagetable slot index. */
  16.137 -#define pgentry_ptr_to_slot(_p) \
  16.138 +#define pgentry_ptr_to_slot(_p)    \
  16.139      (((unsigned long)(_p) & ~PAGE_MASK) / sizeof(*(_p)))
  16.140  
  16.141  /* Page-table type. */
  16.142 @@ -131,9 +149,10 @@ typedef struct { u32 pfn; } pagetable_t;
  16.143  /* x86_64 */
  16.144  typedef struct { u64 pfn; } pagetable_t;
  16.145  #endif
  16.146 -#define pagetable_get_phys(_x) ((physaddr_t)(_x).pfn << PAGE_SHIFT)
  16.147 -#define pagetable_get_pfn(_x)  ((_x).pfn)
  16.148 -#define mk_pagetable(_phys)    ({ pagetable_t __p; __p.pfn = _phys >> PAGE_SHIFT; __p; })
  16.149 +#define pagetable_get_paddr(x) ((physaddr_t)(x).pfn << PAGE_SHIFT)
  16.150 +#define pagetable_get_pfn(x)   ((x).pfn)
  16.151 +#define mk_pagetable(pa)       \
  16.152 +    ({ pagetable_t __p; __p.pfn = (pa) >> PAGE_SHIFT; __p; })
  16.153  #endif
  16.154  
  16.155  #define clear_page(_p)      memset((void *)(_p), 0, PAGE_SIZE)
  16.156 @@ -147,16 +166,6 @@ typedef struct { u64 pfn; } pagetable_t;
  16.157  #define virt_to_page(kaddr) (frame_table + (__pa(kaddr) >> PAGE_SHIFT))
  16.158  #define pfn_valid(_pfn)     ((_pfn) < max_page)
  16.159  
  16.160 -#define l1e_get_page(_x)    (pfn_to_page(l1e_get_pfn(_x)))
  16.161 -#define l2e_get_page(_x)    (pfn_to_page(l2e_get_pfn(_x)))
  16.162 -#define l3e_get_page(_x)    (pfn_to_page(l3e_get_pfn(_x)))
  16.163 -#define l4e_get_page(_x)    (pfn_to_page(l4e_get_pfn(_x)))
  16.164 -
  16.165 -#define l1e_create_page(_x,_y) (l1e_create_pfn(page_to_pfn(_x),(_y)))
  16.166 -#define l2e_create_page(_x,_y) (l2e_create_pfn(page_to_pfn(_x),(_y)))
  16.167 -#define l3e_create_page(_x,_y) (l3e_create_pfn(page_to_pfn(_x),(_y)))
  16.168 -#define l4e_create_page(_x,_y) (l4e_create_pfn(page_to_pfn(_x),(_y)))
  16.169 -
  16.170  /* High table entries are reserved by the hypervisor. */
  16.171  /* FIXME: this breaks with PAE -- kraxel */
  16.172  #define DOMAIN_ENTRIES_PER_L2_PAGETABLE     \
    17.1 --- a/xen/include/asm-x86/shadow.h	Tue May 31 23:04:23 2005 +0000
    17.2 +++ b/xen/include/asm-x86/shadow.h	Wed Jun 01 09:06:47 2005 +0000
    17.3 @@ -377,7 +377,7 @@ shadow_get_page_from_l1e(l1_pgentry_t l1
    17.4          return 1;
    17.5  
    17.6      nl1e = l1e;
    17.7 -    l1e_remove_flags(&nl1e, _PAGE_GLOBAL);
    17.8 +    l1e_remove_flags(nl1e, _PAGE_GLOBAL);
    17.9      res = get_page_from_l1e(nl1e, d);
   17.10  
   17.11      if ( unlikely(!res) && IS_PRIV(d) && !shadow_mode_translate(d) &&
   17.12 @@ -398,7 +398,7 @@ shadow_get_page_from_l1e(l1_pgentry_t l1
   17.13      {
   17.14          perfc_incrc(shadow_get_page_fail);
   17.15          FSH_LOG("%s failed to get ref l1e=%lx\n",
   17.16 -                __func__, l1e_get_value(l1e));
   17.17 +                __func__, l1e_get_intpte(l1e));
   17.18      }
   17.19  
   17.20      return res;
   17.21 @@ -558,13 +558,13 @@ update_hl2e(struct exec_domain *ed, unsi
   17.22  
   17.23      if ( (l2e_get_flags(gl2e) & _PAGE_PRESENT) &&
   17.24           VALID_MFN(mfn = phys_to_machine_mapping(l2e_get_pfn(gl2e))) )
   17.25 -        new_hl2e = l1e_create_pfn(mfn, __PAGE_HYPERVISOR);
   17.26 +        new_hl2e = l1e_from_pfn(mfn, __PAGE_HYPERVISOR);
   17.27      else
   17.28          new_hl2e = l1e_empty();
   17.29  
   17.30      // only do the ref counting if something has changed.
   17.31      //
   17.32 -    if ( (l1e_has_changed(&old_hl2e, &new_hl2e, PAGE_FLAG_MASK)) )
   17.33 +    if ( (l1e_has_changed(old_hl2e, new_hl2e, PAGE_FLAG_MASK)) )
   17.34      {
   17.35          if ( (l1e_get_flags(new_hl2e) & _PAGE_PRESENT) &&
   17.36               !shadow_get_page(ed->domain, pfn_to_page(l1e_get_pfn(new_hl2e)),
   17.37 @@ -735,11 +735,11 @@ static inline int l1pte_write_fault(
   17.38      }
   17.39  
   17.40      ASSERT(l1e_get_flags(gpte) & _PAGE_RW);
   17.41 -    l1e_add_flags(&gpte, _PAGE_DIRTY | _PAGE_ACCESSED);
   17.42 -    spte = l1e_create_pfn(gmfn, l1e_get_flags(gpte) & ~_PAGE_GLOBAL);
   17.43 +    l1e_add_flags(gpte, _PAGE_DIRTY | _PAGE_ACCESSED);
   17.44 +    spte = l1e_from_pfn(gmfn, l1e_get_flags(gpte) & ~_PAGE_GLOBAL);
   17.45  
   17.46      SH_VVLOG("l1pte_write_fault: updating spte=0x%lx gpte=0x%lx",
   17.47 -             l1e_get_value(spte), l1e_get_value(gpte));
   17.48 +             l1e_get_intpte(spte), l1e_get_intpte(gpte));
   17.49  
   17.50      if ( shadow_mode_log_dirty(d) )
   17.51          __mark_dirty(d, gmfn);
   17.52 @@ -768,17 +768,17 @@ static inline int l1pte_read_fault(
   17.53          return 0;
   17.54      }
   17.55  
   17.56 -    l1e_add_flags(&gpte, _PAGE_ACCESSED);
   17.57 -    spte = l1e_create_pfn(mfn, l1e_get_flags(gpte) & ~_PAGE_GLOBAL);
   17.58 +    l1e_add_flags(gpte, _PAGE_ACCESSED);
   17.59 +    spte = l1e_from_pfn(mfn, l1e_get_flags(gpte) & ~_PAGE_GLOBAL);
   17.60  
   17.61      if ( shadow_mode_log_dirty(d) || !(l1e_get_flags(gpte) & _PAGE_DIRTY) ||
   17.62           mfn_is_page_table(mfn) )
   17.63      {
   17.64 -        l1e_remove_flags(&spte, _PAGE_RW);
   17.65 +        l1e_remove_flags(spte, _PAGE_RW);
   17.66      }
   17.67  
   17.68      SH_VVLOG("l1pte_read_fault: updating spte=0x%lx gpte=0x%lx",
   17.69 -             l1e_get_value(spte), l1e_get_value(gpte));
   17.70 +             l1e_get_intpte(spte), l1e_get_intpte(gpte));
   17.71      *gpte_p = gpte;
   17.72      *spte_p = spte;
   17.73  
   17.74 @@ -797,21 +797,20 @@ static inline void l1pte_propagate_from_
   17.75            (_PAGE_PRESENT|_PAGE_ACCESSED)) &&
   17.76           VALID_MFN(mfn = __gpfn_to_mfn(d, l1e_get_pfn(gpte))) )
   17.77      {
   17.78 -        spte = l1e_create_pfn(mfn,
   17.79 -                              l1e_get_flags(gpte) &
   17.80 -                              ~(_PAGE_GLOBAL | _PAGE_AVAIL));
   17.81 +        spte = l1e_from_pfn(
   17.82 +            mfn, l1e_get_flags(gpte) & ~(_PAGE_GLOBAL | _PAGE_AVAIL));
   17.83  
   17.84          if ( shadow_mode_log_dirty(d) ||
   17.85               !(l1e_get_flags(gpte) & _PAGE_DIRTY) ||
   17.86               mfn_is_page_table(mfn) )
   17.87          {
   17.88 -            l1e_remove_flags(&spte, _PAGE_RW);
   17.89 +            l1e_remove_flags(spte, _PAGE_RW);
   17.90          }
   17.91      }
   17.92  
   17.93 -    if ( l1e_get_value(spte) || l1e_get_value(gpte) )
   17.94 +    if ( l1e_get_intpte(spte) || l1e_get_intpte(gpte) )
   17.95          SH_VVVLOG("%s: gpte=%lx, new spte=%lx",
   17.96 -                  __func__, l1e_get_value(gpte), l1e_get_value(spte));
   17.97 +                  __func__, l1e_get_intpte(gpte), l1e_get_intpte(spte));
   17.98  
   17.99      *spte_p = spte;
  17.100  }
  17.101 @@ -840,12 +839,12 @@ static inline void hl2e_propagate_from_g
  17.102              mfn = __gpfn_to_mfn(d, pfn);
  17.103  
  17.104          if ( VALID_MFN(mfn) && (mfn < max_page) )
  17.105 -            hl2e = l1e_create_pfn(mfn, __PAGE_HYPERVISOR);
  17.106 +            hl2e = l1e_from_pfn(mfn, __PAGE_HYPERVISOR);
  17.107      }
  17.108  
  17.109 -    if ( l1e_get_value(hl2e) || l2e_get_value(gpde) )
  17.110 +    if ( l1e_get_intpte(hl2e) || l2e_get_intpte(gpde) )
  17.111          SH_VVLOG("%s: gpde=%lx hl2e=%lx", __func__,
  17.112 -                 l2e_get_value(gpde), l1e_get_value(hl2e));
  17.113 +                 l2e_get_intpte(gpde), l1e_get_intpte(hl2e));
  17.114  
  17.115      *hl2e_p = hl2e;
  17.116  }
  17.117 @@ -862,19 +861,19 @@ static inline void l2pde_general(
  17.118      spde = l2e_empty();
  17.119      if ( (l2e_get_flags(gpde) & _PAGE_PRESENT) && (sl1mfn != 0) )
  17.120      {
  17.121 -        spde = l2e_create_pfn(sl1mfn,
  17.122 -                              (l2e_get_flags(gpde) | _PAGE_RW | _PAGE_ACCESSED)
  17.123 -                              & ~(_PAGE_AVAIL));
  17.124 +        spde = l2e_from_pfn(
  17.125 +            sl1mfn, 
  17.126 +            (l2e_get_flags(gpde) | _PAGE_RW | _PAGE_ACCESSED) & ~_PAGE_AVAIL);
  17.127  
  17.128          /* N.B. PDEs do not have a dirty bit. */
  17.129 -        l2e_add_flags(&gpde, _PAGE_ACCESSED);
  17.130 +        l2e_add_flags(gpde, _PAGE_ACCESSED);
  17.131  
  17.132          *gpde_p = gpde;
  17.133      }
  17.134  
  17.135 -    if ( l2e_get_value(spde) || l2e_get_value(gpde) )
  17.136 +    if ( l2e_get_intpte(spde) || l2e_get_intpte(gpde) )
  17.137          SH_VVLOG("%s: gpde=%lx, new spde=%lx", __func__,
  17.138 -                 l2e_get_value(gpde), l2e_get_value(spde));
  17.139 +                 l2e_get_intpte(gpde), l2e_get_intpte(spde));
  17.140  
  17.141      *spde_p = spde;
  17.142  }
  17.143 @@ -911,13 +910,13 @@ validate_pte_change(
  17.144      {
  17.145          old_spte = *shadow_pte_p;
  17.146  
  17.147 -        if ( l1e_get_value(old_spte) == l1e_get_value(new_spte) )
  17.148 +        if ( l1e_get_intpte(old_spte) == l1e_get_intpte(new_spte) )
  17.149          {
  17.150              // No accounting required...
  17.151              //
  17.152              perfc_incrc(validate_pte_changes1);
  17.153          }
  17.154 -        else if ( l1e_get_value(old_spte) == (l1e_get_value(new_spte)|_PAGE_RW) )
  17.155 +        else if ( l1e_get_intpte(old_spte) == (l1e_get_intpte(new_spte)|_PAGE_RW) )
  17.156          {
  17.157              // Fast path for PTEs that have merely been write-protected
  17.158              // (e.g., during a Unix fork()). A strict reduction in privilege.
  17.159 @@ -928,7 +927,7 @@ validate_pte_change(
  17.160          }
  17.161          else if ( ((l1e_get_flags(old_spte) | l1e_get_flags(new_spte)) &
  17.162                     _PAGE_PRESENT ) &&
  17.163 -                  l1e_has_changed(&old_spte, &new_spte, _PAGE_RW | _PAGE_PRESENT) )
  17.164 +                  l1e_has_changed(old_spte, new_spte, _PAGE_RW | _PAGE_PRESENT) )
  17.165          {
  17.166              // only do the ref counting if something important changed.
  17.167              //
  17.168 @@ -973,7 +972,7 @@ validate_hl2e_change(
  17.169      // Only do the ref counting if something important changed.
  17.170      //
  17.171      if ( ((l1e_get_flags(old_hl2e) | l1e_get_flags(new_hl2e)) & _PAGE_PRESENT) &&
  17.172 -         l1e_has_changed(&old_hl2e, &new_hl2e, _PAGE_PRESENT) )
  17.173 +         l1e_has_changed(old_hl2e, new_hl2e, _PAGE_PRESENT) )
  17.174      {
  17.175          perfc_incrc(validate_hl2e_changes);
  17.176  
  17.177 @@ -1010,8 +1009,8 @@ validate_pde_change(
  17.178  
  17.179      // Only do the ref counting if something important changed.
  17.180      //
  17.181 -    if ( ((l2e_get_value(old_spde) | l2e_get_value(new_spde)) & _PAGE_PRESENT) &&
  17.182 -         l2e_has_changed(&old_spde, &new_spde, _PAGE_PRESENT) )
  17.183 +    if ( ((l2e_get_intpte(old_spde) | l2e_get_intpte(new_spde)) & _PAGE_PRESENT) &&
  17.184 +         l2e_has_changed(old_spde, new_spde, _PAGE_PRESENT) )
  17.185      {
  17.186          perfc_incrc(validate_pde_changes);
  17.187  
  17.188 @@ -1590,7 +1589,7 @@ shadow_set_l1e(unsigned long va, l1_pgen
  17.189  
  17.190          // only do the ref counting if something important changed.
  17.191          //
  17.192 -        if ( l1e_has_changed(&old_spte, &new_spte, _PAGE_RW | _PAGE_PRESENT) )
  17.193 +        if ( l1e_has_changed(old_spte, new_spte, _PAGE_RW | _PAGE_PRESENT) )
  17.194          {
  17.195              if ( (l1e_get_flags(new_spte) & _PAGE_PRESENT) &&
  17.196                   !shadow_get_page_from_l1e(new_spte, d) )
  17.197 @@ -1664,7 +1663,7 @@ static inline unsigned long gva_to_gpa(u
  17.198      if ( !(l1e_get_flags(gpte) & _PAGE_PRESENT) )
  17.199          return 0;
  17.200  
  17.201 -    return l1e_get_phys(gpte) + (gva & ~PAGE_MASK); 
  17.202 +    return l1e_get_paddr(gpte) + (gva & ~PAGE_MASK); 
  17.203  }
  17.204  
  17.205  /************************************************************************/
  17.206 @@ -1684,7 +1683,7 @@ static inline void update_pagetables(str
  17.207          // HACK ALERT: there's currently no easy way to figure out if a domU
  17.208          // has set its arch.guest_table to zero, vs not yet initialized it.
  17.209          //
  17.210 -        paging_enabled = !!pagetable_get_phys(ed->arch.guest_table);
  17.211 +        paging_enabled = !!pagetable_get_paddr(ed->arch.guest_table);
  17.212  
  17.213      /*
  17.214       * We don't call __update_pagetables() when vmx guest paging is
    18.1 --- a/xen/include/asm-x86/x86_32/page-2level.h	Tue May 31 23:04:23 2005 +0000
    18.2 +++ b/xen/include/asm-x86/x86_32/page-2level.h	Wed Jun 01 09:06:47 2005 +0000
    18.3 @@ -31,9 +31,9 @@ typedef l2_pgentry_t root_pgentry_t;
    18.4  /* root table */
    18.5  #define root_get_pfn              l2e_get_pfn
    18.6  #define root_get_flags            l2e_get_flags
    18.7 -#define root_get_value            l2e_get_value
    18.8 +#define root_get_intpte           l2e_get_intpte
    18.9  #define root_empty                l2e_empty
   18.10 -#define root_create_phys          l2e_create_phys
   18.11 +#define root_from_paddr           l2e_from_paddr
   18.12  #define PGT_root_page_table       PGT_l2_page_table
   18.13  
   18.14  /* misc */
    19.1 --- a/xen/include/asm-x86/x86_32/page-3level.h	Tue May 31 23:04:23 2005 +0000
    19.2 +++ b/xen/include/asm-x86/x86_32/page-3level.h	Wed Jun 01 09:06:47 2005 +0000
    19.3 @@ -41,9 +41,9 @@ typedef l3_pgentry_t root_pgentry_t;
    19.4  /* root table */
    19.5  #define root_get_pfn              l3e_get_pfn
    19.6  #define root_get_flags            l3e_get_flags
    19.7 -#define root_get_value            l3e_get_value
    19.8 +#define root_get_intpte           l3e_get_intpte
    19.9  #define root_empty                l3e_empty
   19.10 -#define root_init_phys            l3e_create_phys
   19.11 +#define root_from_paddr           l3e_from_paddr
   19.12  #define PGT_root_page_table       PGT_l3_page_table
   19.13  
   19.14  /* misc */
    20.1 --- a/xen/include/asm-x86/x86_64/page.h	Tue May 31 23:04:23 2005 +0000
    20.2 +++ b/xen/include/asm-x86/x86_64/page.h	Wed Jun 01 09:06:47 2005 +0000
    20.3 @@ -53,10 +53,10 @@ typedef l4_pgentry_t root_pgentry_t;
    20.4  
    20.5  #define root_get_pfn              l4e_get_pfn
    20.6  #define root_get_flags            l4e_get_flags
    20.7 -#define root_get_value            l4e_get_value
    20.8 +#define root_get_intpte           l4e_get_intpte
    20.9  #define root_empty                l4e_empty
   20.10 -#define root_create_phys          l4e_create_phys
   20.11 -#define PGT_root_page_table PGT_l4_page_table
   20.12 +#define root_from_paddr           l4e_from_paddr
   20.13 +#define PGT_root_page_table       PGT_l4_page_table
   20.14  
   20.15  /*
   20.16   * PTE pfn and flags: