ia64/xen-unstable

changeset 4591:6375127fdf23

bitkeeper revision 1.1311.1.1 (426641eeBv97w6sl983zxeR4Dc3Utg)

Cleanup page table handling. Add macros to access page table
entries, fixup plenty of places in the code to use the page
table types instead of "unsigned long".

Signed-off-by: Gerd Knorr <kraxel@bytesex.org>
Signed-off-by: michael.fetterman@cl.cam.ac.uk
author mafetter@fleming.research
date Wed Apr 20 11:50:06 2005 +0000 (2005-04-20)
parents 5a8c28c62a4d
children 1803018b3b05
files xen/arch/x86/dom0_ops.c xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/mm.c xen/arch/x86/shadow.c xen/arch/x86/vmx.c xen/arch/x86/vmx_platform.c xen/arch/x86/x86_32/domain_page.c xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/mm.c xen/common/grant_table.c xen/include/asm-x86/mm.h xen/include/asm-x86/page.h xen/include/asm-x86/shadow.h xen/include/asm-x86/x86_32/domain_page.h xen/include/asm-x86/x86_32/page.h xen/include/asm-x86/x86_64/page.h
line diff
     1.1 --- a/xen/arch/x86/dom0_ops.c	Mon Apr 18 17:47:08 2005 +0000
     1.2 +++ b/xen/arch/x86/dom0_ops.c	Wed Apr 20 11:50:06 2005 +0000
     1.3 @@ -425,7 +425,7 @@ void arch_getdomaininfo_ctxt(
     1.4      {
     1.5          for ( i = 0; i < 16; i++ )
     1.6              c->gdt_frames[i] = 
     1.7 -                l1_pgentry_to_pfn(ed->arch.perdomain_ptes[i]);
     1.8 +                l1e_get_pfn(ed->arch.perdomain_ptes[i]);
     1.9          c->gdt_ents = GET_GDT_ENTRIES(ed);
    1.10      }
    1.11      c->kernel_ss  = ed->arch.kernel_ss;
     2.1 --- a/xen/arch/x86/domain.c	Mon Apr 18 17:47:08 2005 +0000
     2.2 +++ b/xen/arch/x86/domain.c	Wed Apr 20 11:50:06 2005 +0000
     2.3 @@ -260,11 +260,13 @@ void arch_do_createdomain(struct exec_do
     2.4          d->arch.mm_perdomain_l2 = (l2_pgentry_t *)alloc_xenheap_page();
     2.5          memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE);
     2.6          d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)] = 
     2.7 -            mk_l2_pgentry(__pa(d->arch.mm_perdomain_pt) | __PAGE_HYPERVISOR);
     2.8 +            l2e_create_phys(__pa(d->arch.mm_perdomain_pt),
     2.9 +                            __PAGE_HYPERVISOR);
    2.10          d->arch.mm_perdomain_l3 = (l3_pgentry_t *)alloc_xenheap_page();
    2.11          memset(d->arch.mm_perdomain_l3, 0, PAGE_SIZE);
    2.12          d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] = 
    2.13 -            mk_l3_pgentry(__pa(d->arch.mm_perdomain_l2) | __PAGE_HYPERVISOR);
    2.14 +            l3e_create_phys(__pa(d->arch.mm_perdomain_l2),
    2.15 +                            __PAGE_HYPERVISOR);
    2.16  #endif
    2.17  
    2.18          (void)ptwr_init(d);
     3.1 --- a/xen/arch/x86/domain_build.c	Mon Apr 18 17:47:08 2005 +0000
     3.2 +++ b/xen/arch/x86/domain_build.c	Wed Apr 20 11:50:06 2005 +0000
     3.3 @@ -244,9 +244,9 @@ int construct_dom0(struct domain *d,
     3.4      l2start = l2tab = (l2_pgentry_t *)mpt_alloc; mpt_alloc += PAGE_SIZE;
     3.5      memcpy(l2tab, &idle_pg_table[0], PAGE_SIZE);
     3.6      l2tab[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
     3.7 -        mk_l2_pgentry((unsigned long)l2start | __PAGE_HYPERVISOR);
     3.8 +        l2e_create_phys((unsigned long)l2start, __PAGE_HYPERVISOR);
     3.9      l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
    3.10 -        mk_l2_pgentry(__pa(d->arch.mm_perdomain_pt) | __PAGE_HYPERVISOR);
    3.11 +        l2e_create_phys(__pa(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR);
    3.12      ed->arch.guest_table = mk_pagetable((unsigned long)l2start);
    3.13  
    3.14      l2tab += l2_table_offset(dsi.v_start);
    3.15 @@ -257,12 +257,14 @@ int construct_dom0(struct domain *d,
    3.16          {
    3.17              l1start = l1tab = (l1_pgentry_t *)mpt_alloc; 
    3.18              mpt_alloc += PAGE_SIZE;
    3.19 -            *l2tab++ = mk_l2_pgentry((unsigned long)l1start | L2_PROT);
    3.20 +            *l2tab = l2e_create_phys((unsigned long)l1start, L2_PROT);
    3.21 +            l2tab++;
    3.22              clear_page(l1tab);
    3.23              if ( count == 0 )
    3.24                  l1tab += l1_table_offset(dsi.v_start);
    3.25          }
    3.26 -        *l1tab++ = mk_l1_pgentry((mfn << PAGE_SHIFT) | L1_PROT);
    3.27 +        *l1tab = l1e_create_pfn(mfn, L1_PROT);
    3.28 +        l1tab++;
    3.29          
    3.30          page = &frame_table[mfn];
    3.31          if ( !get_page_and_type(page, d, PGT_writable_page) )
    3.32 @@ -273,13 +275,13 @@ int construct_dom0(struct domain *d,
    3.33  
    3.34      /* Pages that are part of page tables must be read only. */
    3.35      l2tab = l2start + l2_table_offset(vpt_start);
    3.36 -    l1start = l1tab = (l1_pgentry_t *)l2_pgentry_to_phys(*l2tab);
    3.37 +    l1start = l1tab = (l1_pgentry_t *)l2e_get_phys(*l2tab);
    3.38      l1tab += l1_table_offset(vpt_start);
    3.39      for ( count = 0; count < nr_pt_pages; count++ ) 
    3.40      {
    3.41 -        page = &frame_table[l1_pgentry_to_pfn(*l1tab)];
    3.42 +        page = &frame_table[l1e_get_pfn(*l1tab)];
    3.43          if ( !opt_dom0_shadow )
    3.44 -            *l1tab = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW);
    3.45 +            l1e_remove_flags(l1tab, _PAGE_RW);
    3.46          else
    3.47              if ( !get_page_type(page, PGT_writable_page) )
    3.48                  BUG();
    3.49 @@ -317,7 +319,7 @@ int construct_dom0(struct domain *d,
    3.50              get_page(page, d); /* an extra ref because of readable mapping */
    3.51          }
    3.52          if ( !((unsigned long)++l1tab & (PAGE_SIZE - 1)) )
    3.53 -            l1start = l1tab = (l1_pgentry_t *)l2_pgentry_to_phys(*++l2tab);
    3.54 +            l1start = l1tab = (l1_pgentry_t *)l2e_get_phys(*++l2tab);
    3.55      }
    3.56  
    3.57  #elif defined(__x86_64__)
    3.58 @@ -335,9 +337,9 @@ int construct_dom0(struct domain *d,
    3.59      l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
    3.60      memcpy(l4tab, &idle_pg_table[0], PAGE_SIZE);
    3.61      l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
    3.62 -        mk_l4_pgentry(__pa(l4start) | __PAGE_HYPERVISOR);
    3.63 +        l4e_create_phys(__pa(l4start), __PAGE_HYPERVISOR);
    3.64      l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
    3.65 -        mk_l4_pgentry(__pa(d->arch.mm_perdomain_l3) | __PAGE_HYPERVISOR);
    3.66 +        l4e_create_phys(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
    3.67      ed->arch.guest_table = mk_pagetable(__pa(l4start));
    3.68  
    3.69      l4tab += l4_table_offset(dsi.v_start);
    3.70 @@ -366,13 +368,17 @@ int construct_dom0(struct domain *d,
    3.71                      clear_page(l3tab);
    3.72                      if ( count == 0 )
    3.73                          l3tab += l3_table_offset(dsi.v_start);
    3.74 -                    *l4tab++ = mk_l4_pgentry(__pa(l3start) | L4_PROT);
    3.75 +                    *l4tab = l4e_create_phys(__pa(l3start), L4_PROT);
    3.76 +                    l4tab++;
    3.77                  }
    3.78 -                *l3tab++ = mk_l3_pgentry(__pa(l2start) | L3_PROT);
    3.79 +                *l3tab = l3e_create_phys(__pa(l2start), L3_PROT);
    3.80 +                l3tab++;
    3.81              }
    3.82 -            *l2tab++ = mk_l2_pgentry(__pa(l1start) | L2_PROT);
    3.83 +            *l2tab = l2e_create_phys(__pa(l1start), L2_PROT);
    3.84 +            l2tab++;
    3.85          }
    3.86 -        *l1tab++ = mk_l1_pgentry((mfn << PAGE_SHIFT) | L1_PROT);
    3.87 +        *l1tab = l1e_create_pfn(mfn, L1_PROT);
    3.88 +        l1tab++;
    3.89  
    3.90          page = &frame_table[mfn];
    3.91          if ( (page->u.inuse.type_info == 0) &&
    3.92 @@ -384,16 +390,16 @@ int construct_dom0(struct domain *d,
    3.93  
    3.94      /* Pages that are part of page tables must be read only. */
    3.95      l4tab = l4start + l4_table_offset(vpt_start);
    3.96 -    l3start = l3tab = l4_pgentry_to_l3(*l4tab);
    3.97 +    l3start = l3tab = l4e_to_l3e(*l4tab);
    3.98      l3tab += l3_table_offset(vpt_start);
    3.99 -    l2start = l2tab = l3_pgentry_to_l2(*l3tab);
   3.100 +    l2start = l2tab = l3e_to_l2e(*l3tab);
   3.101      l2tab += l2_table_offset(vpt_start);
   3.102 -    l1start = l1tab = l2_pgentry_to_l1(*l2tab);
   3.103 +    l1start = l1tab = l2e_to_l1e(*l2tab);
   3.104      l1tab += l1_table_offset(vpt_start);
   3.105      for ( count = 0; count < nr_pt_pages; count++ ) 
   3.106      {
   3.107 -        *l1tab = mk_l1_pgentry(l1_pgentry_val(*l1tab) & ~_PAGE_RW);
   3.108 -        page = &frame_table[l1_pgentry_to_pfn(*l1tab)];
   3.109 +        l1e_remove_flags(l1tab, _PAGE_RW);
   3.110 +        page = &frame_table[l1e_get_pfn(*l1tab)];
   3.111  
   3.112          /* Read-only mapping + PGC_allocated + page-table page. */
   3.113          page->count_info         = PGC_allocated | 3;
   3.114 @@ -412,10 +418,10 @@ int construct_dom0(struct domain *d,
   3.115              if ( !((unsigned long)++l2tab & (PAGE_SIZE - 1)) )
   3.116              {
   3.117                  if ( !((unsigned long)++l3tab & (PAGE_SIZE - 1)) )
   3.118 -                    l3start = l3tab = l4_pgentry_to_l3(*++l4tab); 
   3.119 -                l2start = l2tab = l3_pgentry_to_l2(*l3tab);
   3.120 +                    l3start = l3tab = l4e_to_l3e(*++l4tab); 
   3.121 +                l2start = l2tab = l3e_to_l2e(*l3tab);
   3.122              }
   3.123 -            l1start = l1tab = l2_pgentry_to_l1(*l2tab);
   3.124 +            l1start = l1tab = l2e_to_l1e(*l2tab);
   3.125          }
   3.126      }
   3.127  
   3.128 @@ -525,8 +531,8 @@ int construct_dom0(struct domain *d,
   3.129  #if defined(__i386__)
   3.130      /* Destroy low mappings - they were only for our convenience. */
   3.131      for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
   3.132 -        if ( l2_pgentry_val(l2start[i]) & _PAGE_PSE )
   3.133 -            l2start[i] = mk_l2_pgentry(0);
   3.134 +        if ( l2e_get_flags(l2start[i]) & _PAGE_PSE )
   3.135 +            l2start[i] = l2e_empty();
   3.136      zap_low_mappings(); /* Do the same for the idle page tables. */
   3.137  #endif
   3.138      
   3.139 @@ -544,17 +550,27 @@ int construct_dom0(struct domain *d,
   3.140                                 : SHM_enable));
   3.141          if ( opt_dom0_translate )
   3.142          {
   3.143 +            /* Hmm, what does this?
   3.144 +               Looks like isn't portable across 32/64 bit and pae/non-pae ...
   3.145 +               -- kraxel */
   3.146 +
   3.147 +            /* mafetter: This code is mostly a hack in order to be able to
   3.148 +             * test with dom0's which are running with shadow translate.
   3.149 +             * I expect we'll rip this out once we have a stable set of
   3.150 +             * domU clients which use the various shadow modes, but it's
   3.151 +             * useful to leave this here for now...
   3.152 +             */
   3.153 +
   3.154              // map this domain's p2m table into current page table,
   3.155              // so that we can easily access it.
   3.156              //
   3.157 -            ASSERT( root_pgentry_val(idle_pg_table[1]) == 0 );
   3.158 +            ASSERT( root_get_value(idle_pg_table[1]) == 0 );
   3.159              ASSERT( pagetable_val(d->arch.phys_table) );
   3.160 -            idle_pg_table[1] = mk_root_pgentry(
   3.161 -                pagetable_val(d->arch.phys_table) | __PAGE_HYPERVISOR);
   3.162 +            idle_pg_table[1] = root_create_phys(pagetable_val(d->arch.phys_table),
   3.163 +                                                __PAGE_HYPERVISOR);
   3.164              translate_l2pgtable(d, (l1_pgentry_t *)(1u << L2_PAGETABLE_SHIFT),
   3.165 -                                pagetable_val(ed->arch.guest_table)
   3.166 -                                >> PAGE_SHIFT);
   3.167 -            idle_pg_table[1] = mk_root_pgentry(0);
   3.168 +                                pagetable_val(ed->arch.guest_table) >> PAGE_SHIFT);
   3.169 +            idle_pg_table[1] = root_empty();
   3.170              local_flush_tlb();
   3.171          }
   3.172  
     4.1 --- a/xen/arch/x86/mm.c	Mon Apr 18 17:47:08 2005 +0000
     4.2 +++ b/xen/arch/x86/mm.c	Wed Apr 20 11:50:06 2005 +0000
     4.3 @@ -244,9 +244,9 @@ void invalidate_shadow_ldt(struct exec_d
     4.4  
     4.5      for ( i = 16; i < 32; i++ )
     4.6      {
     4.7 -        pfn = l1_pgentry_to_pfn(d->arch.perdomain_ptes[i]);
     4.8 +        pfn = l1e_get_pfn(d->arch.perdomain_ptes[i]);
     4.9          if ( pfn == 0 ) continue;
    4.10 -        d->arch.perdomain_ptes[i] = mk_l1_pgentry(0);
    4.11 +        d->arch.perdomain_ptes[i] = l1e_empty();
    4.12          page = &frame_table[pfn];
    4.13          ASSERT_PAGE_IS_TYPE(page, PGT_ldt_page);
    4.14          ASSERT_PAGE_IS_DOMAIN(page, d->domain);
    4.15 @@ -283,7 +283,8 @@ int map_ldt_shadow_page(unsigned int off
    4.16  {
    4.17      struct exec_domain *ed = current;
    4.18      struct domain *d = ed->domain;
    4.19 -    unsigned long l1e, nl1e, gpfn, gmfn;
    4.20 +    unsigned long gpfn, gmfn;
    4.21 +    l1_pgentry_t l1e, nl1e;
    4.22      unsigned gva = ed->arch.ldt_base + (off << PAGE_SHIFT);
    4.23      int res;
    4.24  
    4.25 @@ -301,13 +302,14 @@ int map_ldt_shadow_page(unsigned int off
    4.26      shadow_sync_va(ed, gva);
    4.27  
    4.28      TOGGLE_MODE();
    4.29 -    __get_user(l1e, (unsigned long *)&linear_pg_table[l1_linear_offset(gva)]);
    4.30 +    __copy_from_user(&l1e, &linear_pg_table[l1_linear_offset(gva)],
    4.31 +                     sizeof(l1e));
    4.32      TOGGLE_MODE();
    4.33  
    4.34 -    if ( unlikely(!(l1e & _PAGE_PRESENT)) )
    4.35 +    if ( unlikely(!(l1e_get_flags(l1e) & _PAGE_PRESENT)) )
    4.36          return 0;
    4.37  
    4.38 -    gpfn = l1_pgentry_to_pfn(mk_l1_pgentry(l1e));
    4.39 +    gpfn = l1e_get_pfn(l1e);
    4.40      gmfn = __gpfn_to_mfn(d, gpfn);
    4.41      if ( unlikely(!VALID_MFN(gmfn)) )
    4.42          return 0;
    4.43 @@ -325,9 +327,9 @@ int map_ldt_shadow_page(unsigned int off
    4.44      if ( unlikely(!res) )
    4.45          return 0;
    4.46  
    4.47 -    nl1e = (l1e & ~PAGE_MASK) | (gmfn << PAGE_SHIFT) | _PAGE_RW;
    4.48 -
    4.49 -    ed->arch.perdomain_ptes[off + 16] = mk_l1_pgentry(nl1e);
    4.50 +    nl1e = l1e_create_pfn(gmfn, l1e_get_flags(l1e) | _PAGE_RW);
    4.51 +
    4.52 +    ed->arch.perdomain_ptes[off + 16] = nl1e;
    4.53      ed->arch.shadow_ldt_mapcnt++;
    4.54  
    4.55      return 1;
    4.56 @@ -392,13 +394,13 @@ get_linear_pagetable(
    4.57  
    4.58      ASSERT( !shadow_mode_enabled(d) );
    4.59  
    4.60 -    if ( (root_pgentry_val(re) & _PAGE_RW) )
    4.61 +    if ( (root_get_flags(re) & _PAGE_RW) )
    4.62      {
    4.63          MEM_LOG("Attempt to create linear p.t. with write perms");
    4.64          return 0;
    4.65      }
    4.66  
    4.67 -    if ( (pfn = root_pgentry_to_pfn(re)) != re_pfn )
    4.68 +    if ( (pfn = root_get_pfn(re)) != re_pfn )
    4.69      {
    4.70          /* Make sure the mapped frame belongs to the correct domain. */
    4.71          if ( unlikely(!get_page_from_pagenr(pfn, d)) )
    4.72 @@ -431,17 +433,17 @@ int
    4.73  get_page_from_l1e(
    4.74      l1_pgentry_t l1e, struct domain *d)
    4.75  {
    4.76 -    unsigned long l1v = l1_pgentry_val(l1e);
    4.77 -    unsigned long mfn = l1_pgentry_to_pfn(l1e);
    4.78 +    unsigned long mfn = l1e_get_pfn(l1e);
    4.79      struct pfn_info *page = &frame_table[mfn];
    4.80      extern int domain_iomem_in_pfn(struct domain *d, unsigned long pfn);
    4.81  
    4.82 -    if ( !(l1v & _PAGE_PRESENT) )
    4.83 +    if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) )
    4.84          return 1;
    4.85  
    4.86 -    if ( unlikely(l1v & L1_DISALLOW_MASK) )
    4.87 +    if ( unlikely(l1e_get_flags(l1e) & L1_DISALLOW_MASK) )
    4.88      {
    4.89 -        MEM_LOG("Bad L1 type settings %p %p", l1v, l1v & L1_DISALLOW_MASK);
    4.90 +        MEM_LOG("Bad L1 type settings %p %p", l1e_get_value(l1e),
    4.91 +                l1e_get_value(l1e) & L1_DISALLOW_MASK);
    4.92          return 0;
    4.93      }
    4.94  
    4.95 @@ -466,7 +468,7 @@ get_page_from_l1e(
    4.96          d = dom_io;
    4.97      }
    4.98  
    4.99 -    return ((l1v & _PAGE_RW) ?
   4.100 +    return ((l1e_get_flags(l1e) & _PAGE_RW) ?
   4.101              get_page_and_type(page, d, PGT_writable_page) :
   4.102              get_page(page, d));
   4.103  }
   4.104 @@ -482,18 +484,18 @@ get_page_from_l2e(
   4.105  
   4.106      ASSERT( !shadow_mode_enabled(d) );
   4.107  
   4.108 -    if ( !(l2_pgentry_val(l2e) & _PAGE_PRESENT) )
   4.109 +    if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
   4.110          return 1;
   4.111  
   4.112 -    if ( unlikely((l2_pgentry_val(l2e) & L2_DISALLOW_MASK)) )
   4.113 +    if ( unlikely((l2e_get_flags(l2e) & L2_DISALLOW_MASK)) )
   4.114      {
   4.115          MEM_LOG("Bad L2 page type settings %p",
   4.116 -                l2_pgentry_val(l2e) & L2_DISALLOW_MASK);
   4.117 +                l2e_get_value(l2e) & L2_DISALLOW_MASK);
   4.118          return 0;
   4.119      }
   4.120  
   4.121      rc = get_page_and_type_from_pagenr(
   4.122 -        l2_pgentry_to_pfn(l2e), 
   4.123 +        l2e_get_pfn(l2e), 
   4.124          PGT_l1_page_table | (va_idx<<PGT_va_shift), d);
   4.125  
   4.126  #if defined(__i386__)
   4.127 @@ -510,18 +512,18 @@ static int
   4.128  get_page_from_l3e(
   4.129      l3_pgentry_t l3e, unsigned long pfn, struct domain *d)
   4.130  {
   4.131 -    if ( !(l3_pgentry_val(l3e) & _PAGE_PRESENT) )
   4.132 +    if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
   4.133          return 1;
   4.134  
   4.135 -    if ( unlikely((l3_pgentry_val(l3e) & L3_DISALLOW_MASK)) )
   4.136 +    if ( unlikely((l3e_get_flags(l3e) & L3_DISALLOW_MASK)) )
   4.137      {
   4.138          MEM_LOG("Bad L3 page type settings %p",
   4.139 -                l3_pgentry_val(l3e) & L3_DISALLOW_MASK);
   4.140 +                l3e_get_value(l3e) & L3_DISALLOW_MASK);
   4.141          return 0;
   4.142      }
   4.143  
   4.144      return get_page_and_type_from_pagenr(
   4.145 -        l3_pgentry_to_pfn(l3e), PGT_l2_page_table, d);
   4.146 +        l3e_get_pfn(l3e), PGT_l2_page_table, d);
   4.147  }
   4.148  
   4.149  
   4.150 @@ -531,18 +533,18 @@ get_page_from_l4e(
   4.151  {
   4.152      int rc;
   4.153  
   4.154 -    if ( !(l4_pgentry_val(l4e) & _PAGE_PRESENT) )
   4.155 +    if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
   4.156          return 1;
   4.157  
   4.158 -    if ( unlikely((l4_pgentry_val(l4e) & L4_DISALLOW_MASK)) )
   4.159 +    if ( unlikely((l4e_get_flags(l4e) & L4_DISALLOW_MASK)) )
   4.160      {
   4.161          MEM_LOG("Bad L4 page type settings %p",
   4.162 -                l4_pgentry_val(l4e) & L4_DISALLOW_MASK);
   4.163 +                l4e_get_value(l4e) & L4_DISALLOW_MASK);
   4.164          return 0;
   4.165      }
   4.166  
   4.167      rc = get_page_and_type_from_pagenr(
   4.168 -        l4_pgentry_to_pfn(l4e), PGT_l3_page_table, d);
   4.169 +        l4e_get_pfn(l4e), PGT_l3_page_table, d);
   4.170  
   4.171      if ( unlikely(!rc) )
   4.172          return get_linear_pagetable(l4e, pfn, d);
   4.173 @@ -555,12 +557,11 @@ get_page_from_l4e(
   4.174  
   4.175  void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d)
   4.176  {
   4.177 -    unsigned long    l1v  = l1_pgentry_val(l1e);
   4.178 -    unsigned long    pfn  = l1_pgentry_to_pfn(l1e);
   4.179 +    unsigned long    pfn  = l1e_get_pfn(l1e);
   4.180      struct pfn_info *page = &frame_table[pfn];
   4.181      struct domain   *e;
   4.182  
   4.183 -    if ( !(l1v & _PAGE_PRESENT) || !pfn_valid(pfn) )
   4.184 +    if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !pfn_valid(pfn) )
   4.185          return;
   4.186  
   4.187      e = page_get_owner(page);
   4.188 @@ -577,12 +578,13 @@ void put_page_from_l1e(l1_pgentry_t l1e,
   4.189           * mappings and which unmappings are counted via the grant entry, but
   4.190           * really it doesn't matter as privileged domains have carte blanche.
   4.191           */
   4.192 -        if ( likely(gnttab_check_unmap(e, d, pfn, !(l1v & _PAGE_RW))) )
   4.193 +        if (likely(gnttab_check_unmap(e, d, pfn,
   4.194 +                                      !(l1e_get_flags(l1e) & _PAGE_RW))))
   4.195              return;
   4.196          /* Assume this mapping was made via MMUEXT_SET_FOREIGNDOM... */
   4.197      }
   4.198  
   4.199 -    if ( l1v & _PAGE_RW )
   4.200 +    if ( l1e_get_flags(l1e) & _PAGE_RW )
   4.201      {
   4.202          put_page_and_type(page);
   4.203      }
   4.204 @@ -606,9 +608,9 @@ void put_page_from_l1e(l1_pgentry_t l1e,
   4.205   */
   4.206  static void put_page_from_l2e(l2_pgentry_t l2e, unsigned long pfn)
   4.207  {
   4.208 -    if ( (l2_pgentry_val(l2e) & _PAGE_PRESENT) && 
   4.209 -         (l2_pgentry_to_pfn(l2e) != pfn) )
   4.210 -        put_page_and_type(&frame_table[l2_pgentry_to_pfn(l2e)]);
   4.211 +    if ( (l2e_get_flags(l2e) & _PAGE_PRESENT) && 
   4.212 +         (l2e_get_pfn(l2e) != pfn) )
   4.213 +        put_page_and_type(&frame_table[l2e_get_pfn(l2e)]);
   4.214  }
   4.215  
   4.216  
   4.217 @@ -616,17 +618,17 @@ static void put_page_from_l2e(l2_pgentry
   4.218  
   4.219  static void put_page_from_l3e(l3_pgentry_t l3e, unsigned long pfn)
   4.220  {
   4.221 -    if ( (l3_pgentry_val(l3e) & _PAGE_PRESENT) && 
   4.222 -         (l3_pgentry_to_pfn(l3e) != pfn) )
   4.223 -        put_page_and_type(&frame_table[l3_pgentry_to_pfn(l3e)]);
   4.224 +    if ( (l3e_get_flags(l3e) & _PAGE_PRESENT) && 
   4.225 +         (l3e_get_pfn(l3e) != pfn) )
   4.226 +        put_page_and_type(&frame_table[l3e_get_pfn(l3e)]);
   4.227  }
   4.228  
   4.229  
   4.230  static void put_page_from_l4e(l4_pgentry_t l4e, unsigned long pfn)
   4.231  {
   4.232 -    if ( (l4_pgentry_val(l4e) & _PAGE_PRESENT) && 
   4.233 -         (l4_pgentry_to_pfn(l4e) != pfn) )
   4.234 -        put_page_and_type(&frame_table[l4_pgentry_to_pfn(l4e)]);
   4.235 +    if ( (l4e_get_flags(l4e) & _PAGE_PRESENT) && 
   4.236 +         (l4e_get_pfn(l4e) != pfn) )
   4.237 +        put_page_and_type(&frame_table[l4e_get_pfn(l4e)]);
   4.238  }
   4.239  
   4.240  #endif /* __x86_64__ */
   4.241 @@ -686,10 +688,10 @@ static int alloc_l2_table(struct pfn_inf
   4.242             &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
   4.243             ROOT_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
   4.244      pl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
   4.245 -        mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   4.246 +        l2e_create_pfn(pfn, __PAGE_HYPERVISOR);
   4.247      pl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
   4.248 -        mk_l2_pgentry(__pa(page_get_owner(page)->arch.mm_perdomain_pt) | 
   4.249 -                      __PAGE_HYPERVISOR);
   4.250 +        l2e_create_phys(__pa(page_get_owner(page)->arch.mm_perdomain_pt),
   4.251 +                        __PAGE_HYPERVISOR);
   4.252  #endif
   4.253  
   4.254      unmap_domain_mem(pl2e);
   4.255 @@ -754,10 +756,10 @@ static int alloc_l4_table(struct pfn_inf
   4.256             &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
   4.257             ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
   4.258      pl4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
   4.259 -        mk_l4_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   4.260 +        l4e_create_pfn(pfn, __PAGE_HYPERVISOR);
   4.261      pl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
   4.262 -        mk_l4_pgentry(__pa(page_get_owner(page)->arch.mm_perdomain_l3) | 
   4.263 -                      __PAGE_HYPERVISOR);
   4.264 +        l4e_create_phys(__pa(page_get_owner(page)->arch.mm_perdomain_l3),
   4.265 +                        __PAGE_HYPERVISOR);
   4.266  
   4.267      return 1;
   4.268  
   4.269 @@ -837,14 +839,15 @@ static inline int update_l1e(l1_pgentry_
   4.270                               l1_pgentry_t  ol1e, 
   4.271                               l1_pgentry_t  nl1e)
   4.272  {
   4.273 -    unsigned long o = l1_pgentry_val(ol1e);
   4.274 -    unsigned long n = l1_pgentry_val(nl1e);
   4.275 +    /* FIXME: breaks with PAE */
   4.276 +    unsigned long o = l1e_get_value(ol1e);
   4.277 +    unsigned long n = l1e_get_value(nl1e);
   4.278  
   4.279      if ( unlikely(cmpxchg_user(pl1e, o, n) != 0) ||
   4.280 -         unlikely(o != l1_pgentry_val(ol1e)) )
   4.281 +         unlikely(o != l1e_get_value(ol1e)) )
   4.282      {
   4.283          MEM_LOG("Failed to update %p -> %p: saw %p",
   4.284 -                l1_pgentry_val(ol1e), l1_pgentry_val(nl1e), o);
   4.285 +                l1e_get_value(ol1e), l1e_get_value(nl1e), o);
   4.286          return 0;
   4.287      }
   4.288  
   4.289 @@ -856,27 +859,24 @@ static inline int update_l1e(l1_pgentry_
   4.290  static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e)
   4.291  {
   4.292      l1_pgentry_t ol1e;
   4.293 -    unsigned long _ol1e;
   4.294      struct domain *d = current->domain;
   4.295  
   4.296      ASSERT( !shadow_mode_enabled(d) );
   4.297  
   4.298 -    if ( unlikely(__get_user(_ol1e, (unsigned long *)pl1e) != 0) )
   4.299 +    if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) )
   4.300          return 0;
   4.301 -    ol1e = mk_l1_pgentry(_ol1e);
   4.302 -
   4.303 -    if ( l1_pgentry_val(nl1e) & _PAGE_PRESENT )
   4.304 +
   4.305 +    if ( l1e_get_flags(nl1e) & _PAGE_PRESENT )
   4.306      {
   4.307 -        if ( unlikely(l1_pgentry_val(nl1e) & L1_DISALLOW_MASK) )
   4.308 +        if ( unlikely(l1e_get_flags(nl1e) & L1_DISALLOW_MASK) )
   4.309          {
   4.310              MEM_LOG("Bad L1 type settings %p", 
   4.311 -                    l1_pgentry_val(nl1e) & L1_DISALLOW_MASK);
   4.312 +                    l1e_get_value(nl1e) & L1_DISALLOW_MASK);
   4.313              return 0;
   4.314          }
   4.315  
   4.316          /* Fast path for identical mapping, r/w and presence. */
   4.317 -        if ( ((l1_pgentry_val(ol1e) ^ l1_pgentry_val(nl1e)) & 
   4.318 -              ((PADDR_MASK & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT)) == 0 )
   4.319 +        if ( !l1e_has_changed(&ol1e, &nl1e, _PAGE_RW | _PAGE_PRESENT))
   4.320              return update_l1e(pl1e, ol1e, nl1e);
   4.321  
   4.322          if ( unlikely(!get_page_from_l1e(nl1e, FOREIGNDOM)) )
   4.323 @@ -901,12 +901,12 @@ static int mod_l1_entry(l1_pgentry_t *pl
   4.324  
   4.325  #define UPDATE_ENTRY(_t,_p,_o,_n) ({                                    \
   4.326      unsigned long __o = cmpxchg((unsigned long *)(_p),                  \
   4.327 -                                _t ## _pgentry_val(_o),                 \
   4.328 -                                _t ## _pgentry_val(_n));                \
   4.329 -    if ( __o != _t ## _pgentry_val(_o) )                                \
   4.330 +                                _t ## e_get_value(_o),                  \
   4.331 +                                _t ## e_get_value(_n));                 \
   4.332 +    if ( __o != _t ## e_get_value(_o) )                                 \
   4.333          MEM_LOG("Failed to update %p -> %p: saw %p",                    \
   4.334 -                _t ## _pgentry_val(_o), _t ## _pgentry_val(_n), __o);   \
   4.335 -    (__o == _t ## _pgentry_val(_o)); })
   4.336 +                _t ## e_get_value(_o), _t ## e_get_value(_n), __o);     \
   4.337 +    (__o == _t ## e_get_value(_o)); })
   4.338  
   4.339  
   4.340  /* Update the L2 entry at pl2e to new value nl2e. pl2e is within frame pfn. */
   4.341 @@ -915,7 +915,6 @@ static int mod_l2_entry(l2_pgentry_t *pl
   4.342                          unsigned long pfn)
   4.343  {
   4.344      l2_pgentry_t ol2e;
   4.345 -    unsigned long _ol2e;
   4.346  
   4.347      if ( unlikely(!is_guest_l2_slot(pgentry_ptr_to_slot(pl2e))) )
   4.348      {
   4.349 @@ -923,22 +922,20 @@ static int mod_l2_entry(l2_pgentry_t *pl
   4.350          return 0;
   4.351      }
   4.352  
   4.353 -    if ( unlikely(__get_user(_ol2e, (unsigned long *)pl2e) != 0) )
   4.354 +    if ( unlikely(__copy_from_user(&ol2e, pl2e, sizeof(ol2e)) != 0) )
   4.355          return 0;
   4.356 -    ol2e = mk_l2_pgentry(_ol2e);
   4.357 -
   4.358 -    if ( l2_pgentry_val(nl2e) & _PAGE_PRESENT )
   4.359 +
   4.360 +    if ( l2e_get_flags(nl2e) & _PAGE_PRESENT )
   4.361      {
   4.362 -        if ( unlikely(l2_pgentry_val(nl2e) & L2_DISALLOW_MASK) )
   4.363 +        if ( unlikely(l2e_get_flags(nl2e) & L2_DISALLOW_MASK) )
   4.364          {
   4.365              MEM_LOG("Bad L2 type settings %p", 
   4.366 -                    l2_pgentry_val(nl2e) & L2_DISALLOW_MASK);
   4.367 +                    l2e_get_value(nl2e) & L2_DISALLOW_MASK);
   4.368              return 0;
   4.369          }
   4.370  
   4.371          /* Fast path for identical mapping and presence. */
   4.372 -        if ( ((l2_pgentry_val(ol2e) ^ l2_pgentry_val(nl2e)) & 
   4.373 -              ((PADDR_MASK & PAGE_MASK) | _PAGE_PRESENT)) == 0 )
   4.374 +        if ( !l2e_has_changed(&ol2e, &nl2e, _PAGE_PRESENT))
   4.375              return UPDATE_ENTRY(l2, pl2e, ol2e, nl2e);
   4.376  
   4.377          if ( unlikely(!get_page_from_l2e(nl2e, pfn, current->domain,
   4.378 @@ -971,7 +968,6 @@ static int mod_l3_entry(l3_pgentry_t *pl
   4.379                          unsigned long pfn)
   4.380  {
   4.381      l3_pgentry_t ol3e;
   4.382 -    unsigned long _ol3e;
   4.383  
   4.384      if ( unlikely(!is_guest_l3_slot(pgentry_ptr_to_slot(pl3e))) )
   4.385      {
   4.386 @@ -979,22 +975,20 @@ static int mod_l3_entry(l3_pgentry_t *pl
   4.387          return 0;
   4.388      }
   4.389  
   4.390 -    if ( unlikely(__get_user(_ol3e, (unsigned long *)pl3e) != 0) )
   4.391 +    if ( unlikely(__copy_from_user(&ol3e, pl3e, sizeof(ol3e)) != 0) )
   4.392          return 0;
   4.393 -    ol3e = mk_l3_pgentry(_ol3e);
   4.394 -
   4.395 -    if ( l3_pgentry_val(nl3e) & _PAGE_PRESENT )
   4.396 +
   4.397 +    if ( l3e_get_flags(nl3e) & _PAGE_PRESENT )
   4.398      {
   4.399 -        if ( unlikely(l3_pgentry_val(nl3e) & L3_DISALLOW_MASK) )
   4.400 +        if ( unlikely(l3e_get_flags(nl3e) & L3_DISALLOW_MASK) )
   4.401          {
   4.402              MEM_LOG("Bad L3 type settings %p", 
   4.403 -                    l3_pgentry_val(nl3e) & L3_DISALLOW_MASK);
   4.404 +                    l3e_get_value(nl3e) & L3_DISALLOW_MASK);
   4.405              return 0;
   4.406          }
   4.407  
   4.408          /* Fast path for identical mapping and presence. */
   4.409 -        if ( ((l3_pgentry_val(ol3e) ^ l3_pgentry_val(nl3e)) & 
   4.410 -              ((PADDR_MASK & PAGE_MASK) | _PAGE_PRESENT)) == 0 )
   4.411 +        if (!l3e_has_changed(&ol3e, &nl3e, _PAGE_PRESENT))
   4.412              return UPDATE_ENTRY(l3, pl3e, ol3e, nl3e);
   4.413  
   4.414          if ( unlikely(!get_page_from_l3e(nl3e, pfn, current->domain)) )
   4.415 @@ -1024,7 +1018,6 @@ static int mod_l4_entry(l4_pgentry_t *pl
   4.416                          unsigned long pfn)
   4.417  {
   4.418      l4_pgentry_t ol4e;
   4.419 -    unsigned long _ol4e;
   4.420  
   4.421      if ( unlikely(!is_guest_l4_slot(pgentry_ptr_to_slot(pl4e))) )
   4.422      {
   4.423 @@ -1032,22 +1025,20 @@ static int mod_l4_entry(l4_pgentry_t *pl
   4.424          return 0;
   4.425      }
   4.426  
   4.427 -    if ( unlikely(__get_user(_ol4e, (unsigned long *)pl4e) != 0) )
   4.428 +    if ( unlikely(__copy_from_user(&ol4e, pl4e, sizeof(ol4e)) != 0) )
   4.429          return 0;
   4.430 -    ol4e = mk_l4_pgentry(_ol4e);
   4.431 -
   4.432 -    if ( l4_pgentry_val(nl4e) & _PAGE_PRESENT )
   4.433 +
   4.434 +    if ( l4e_get_flags(nl4e) & _PAGE_PRESENT )
   4.435      {
   4.436 -        if ( unlikely(l4_pgentry_val(nl4e) & L4_DISALLOW_MASK) )
   4.437 +        if ( unlikely(l4e_get_flags(nl4e) & L4_DISALLOW_MASK) )
   4.438          {
   4.439              MEM_LOG("Bad L4 type settings %p", 
   4.440 -                    l4_pgentry_val(nl4e) & L4_DISALLOW_MASK);
   4.441 +                    l4e_get_value(nl4e) & L4_DISALLOW_MASK);
   4.442              return 0;
   4.443          }
   4.444  
   4.445          /* Fast path for identical mapping and presence. */
   4.446 -        if ( ((l4_pgentry_val(ol4e) ^ l4_pgentry_val(nl4e)) & 
   4.447 -              ((PADDR_MASK & PAGE_MASK) | _PAGE_PRESENT)) == 0 )
   4.448 +        if (!l4e_has_changed(&ol4e, &nl4e, _PAGE_PRESENT))
   4.449              return UPDATE_ENTRY(l4, pl4e, ol4e, nl4e);
   4.450  
   4.451          if ( unlikely(!get_page_from_l4e(nl4e, pfn, current->domain)) )
   4.452 @@ -1881,8 +1872,11 @@ int do_mmu_update(
   4.453                  if ( likely(get_page_type(
   4.454                      page, type_info & (PGT_type_mask|PGT_va_mask))) )
   4.455                  {
   4.456 -                    okay = mod_l1_entry((l1_pgentry_t *)va, 
   4.457 -                                        mk_l1_pgentry(req.val));
   4.458 +                    l1_pgentry_t pte;
   4.459 +
   4.460 +                    /* FIXME: doesn't work with PAE */
   4.461 +                    pte = l1e_create_phys(req.val, req.val);
   4.462 +                    okay = mod_l1_entry((l1_pgentry_t *)va, pte);
   4.463                      put_page_type(page);
   4.464                  }
   4.465                  break;
   4.466 @@ -1890,9 +1884,11 @@ int do_mmu_update(
   4.467                  ASSERT(!shadow_mode_enabled(d));
   4.468                  if ( likely(get_page_type(page, PGT_l2_page_table)) )
   4.469                  {
   4.470 -                    okay = mod_l2_entry((l2_pgentry_t *)va, 
   4.471 -                                        mk_l2_pgentry(req.val),
   4.472 -                                        mfn);
   4.473 +                    l2_pgentry_t l2e;
   4.474 +
   4.475 +                    /* FIXME: doesn't work with PAE */
   4.476 +                    l2e = l2e_create_phys(req.val, req.val);
   4.477 +                    okay = mod_l2_entry((l2_pgentry_t *)va, l2e, mfn);
   4.478                      put_page_type(page);
   4.479                  }
   4.480                  break;
   4.481 @@ -1901,9 +1897,11 @@ int do_mmu_update(
   4.482                  ASSERT(!shadow_mode_enabled(d));
   4.483                  if ( likely(get_page_type(page, PGT_l3_page_table)) )
   4.484                  {
   4.485 -                    okay = mod_l3_entry((l3_pgentry_t *)va, 
   4.486 -                                        mk_l3_pgentry(req.val),
   4.487 -                                        mfn);
   4.488 +                    l3_pgentry_t l3e;
   4.489 +
   4.490 +                    /* FIXME: doesn't work with PAE */
   4.491 +                    l3e = l3e_create_phys(req.val,req.val);
   4.492 +                    okay = mod_l3_entry((l3_pgentry_t *)va, l3e, mfn);
   4.493                      put_page_type(page);
   4.494                  }
   4.495                  break;
   4.496 @@ -1911,9 +1909,10 @@ int do_mmu_update(
   4.497                  ASSERT(!shadow_mode_enabled(d));
   4.498                  if ( likely(get_page_type(page, PGT_l4_page_table)) )
   4.499                  {
   4.500 -                    okay = mod_l4_entry((l4_pgentry_t *)va, 
   4.501 -                                        mk_l4_pgentry(req.val),
   4.502 -                                        mfn);
   4.503 +                    l4_pgentry_t l4e;
   4.504 +
   4.505 +                    l4e = l4e_create_phys(req.val,req.val);
   4.506 +                    okay = mod_l4_entry((l4_pgentry_t *)va, l4e, mfn);
   4.507                      put_page_type(page);
   4.508                  }
   4.509                  break;
   4.510 @@ -2028,12 +2027,12 @@ int do_mmu_update(
   4.511   * and is running in a shadow mode
   4.512   */
   4.513  int update_shadow_va_mapping(unsigned long va,
   4.514 -                             unsigned long val,
   4.515 +                             l1_pgentry_t val,
   4.516                               struct exec_domain *ed,
   4.517                               struct domain *d)
   4.518  {
   4.519      unsigned long l1mfn;
   4.520 -    unsigned long spte;
   4.521 +    l1_pgentry_t spte;
   4.522      int rc = 0;
   4.523  
   4.524      check_pagetable(ed, "pre-va"); /* debug */
   4.525 @@ -2059,8 +2058,7 @@ int update_shadow_va_mapping(unsigned lo
   4.526       *    to teach it about this boundary case.
   4.527       * So we flush this L1 page, if it's out of sync.
   4.528       */
   4.529 -    l1mfn = (l2_pgentry_val(linear_l2_table(ed)[l2_table_offset(va)]) >>
   4.530 -             PAGE_SHIFT);
   4.531 +    l1mfn = l2e_get_pfn(linear_l2_table(ed)[l2_table_offset(va)]);
   4.532      if ( mfn_out_of_sync(l1mfn) )
   4.533      {
   4.534          perfc_incrc(extra_va_update_sync);
   4.535 @@ -2068,8 +2066,8 @@ int update_shadow_va_mapping(unsigned lo
   4.536      }
   4.537  #endif /* keep check_pagetables() happy */
   4.538  
   4.539 -    if ( unlikely(__put_user(val, &l1_pgentry_val(
   4.540 -                                 linear_pg_table[l1_linear_offset(va)]))) )
   4.541 +    if ( unlikely(__copy_to_user(&linear_pg_table[l1_linear_offset(va)],
   4.542 +                                 &val, sizeof(val))))
   4.543      {
   4.544          rc = -EINVAL;
   4.545          goto out;
   4.546 @@ -2096,7 +2094,7 @@ int update_shadow_va_mapping(unsigned lo
   4.547  }
   4.548  
   4.549  int update_grant_va_mapping(unsigned long va,
   4.550 -                            unsigned long _nl1e, 
   4.551 +                            l1_pgentry_t _nl1e, 
   4.552                              struct domain *d,
   4.553                              struct exec_domain *ed)
   4.554  {
   4.555 @@ -2109,22 +2107,20 @@ int update_grant_va_mapping(unsigned lon
   4.556  
   4.557      int             rc = 0;
   4.558      l1_pgentry_t   *pl1e;
   4.559 -    unsigned long   _ol1e;
   4.560 -
   4.561 +    l1_pgentry_t    ol1e;
   4.562 +    
   4.563      cleanup_writable_pagetable(d);
   4.564  
   4.565      pl1e = &linear_pg_table[l1_linear_offset(va)];
   4.566  
   4.567 -    if ( unlikely(__get_user(_ol1e, (unsigned long *)pl1e) != 0) )
   4.568 +    if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) )
   4.569          rc = -EINVAL;
   4.570      else
   4.571      {
   4.572 -        l1_pgentry_t ol1e = mk_l1_pgentry(_ol1e);
   4.573 -
   4.574 -        if ( update_l1e(pl1e, ol1e, mk_l1_pgentry(_nl1e)) )
   4.575 +        if ( update_l1e(pl1e, ol1e, _nl1e) )
   4.576          {
   4.577              put_page_from_l1e(ol1e, d);
   4.578 -            if ( _ol1e & _PAGE_PRESENT )
   4.579 +            if ( l1e_get_flags(ol1e) & _PAGE_PRESENT )
   4.580                  rc = 0; /* Caller needs to invalidate TLB entry */
   4.581              else
   4.582                  rc = 1; /* Caller need not invalidate TLB entry */
   4.583 @@ -2141,7 +2137,7 @@ int update_grant_va_mapping(unsigned lon
   4.584  
   4.585  
   4.586  int do_update_va_mapping(unsigned long va,
   4.587 -                         unsigned long val, 
   4.588 +                         l1_pgentry_t  val, 
   4.589                           unsigned long flags)
   4.590  {
   4.591      struct exec_domain *ed  = current;
   4.592 @@ -2175,7 +2171,7 @@ int do_update_va_mapping(unsigned long v
   4.593          rc = update_shadow_va_mapping(va, val, ed, d);
   4.594      }
   4.595      else if ( unlikely(!mod_l1_entry(&linear_pg_table[l1_linear_offset(va)],
   4.596 -                                     mk_l1_pgentry(val))) )
   4.597 +                                     val)) )
   4.598          rc = -EINVAL;
   4.599  
   4.600      switch ( flags & UVMF_FLUSHTYPE_MASK )
   4.601 @@ -2232,7 +2228,7 @@ int do_update_va_mapping(unsigned long v
   4.602  }
   4.603  
   4.604  int do_update_va_mapping_otherdomain(unsigned long va,
   4.605 -                                     unsigned long val, 
   4.606 +                                     l1_pgentry_t  val, 
   4.607                                       unsigned long flags,
   4.608                                       domid_t domid)
   4.609  {
   4.610 @@ -2268,9 +2264,9 @@ void destroy_gdt(struct exec_domain *ed)
   4.611  
   4.612      for ( i = 0; i < 16; i++ )
   4.613      {
   4.614 -        if ( (pfn = l1_pgentry_to_pfn(ed->arch.perdomain_ptes[i])) != 0 )
   4.615 +        if ( (pfn = l1e_get_pfn(ed->arch.perdomain_ptes[i])) != 0 )
   4.616              put_page_and_type(&frame_table[pfn]);
   4.617 -        ed->arch.perdomain_ptes[i] = mk_l1_pgentry(0);
   4.618 +        ed->arch.perdomain_ptes[i] = l1e_empty();
   4.619      }
   4.620  }
   4.621  
   4.622 @@ -2327,7 +2323,7 @@ long set_gdt(struct exec_domain *ed,
   4.623      /* Install the new GDT. */
   4.624      for ( i = 0; i < nr_pages; i++ )
   4.625          ed->arch.perdomain_ptes[i] =
   4.626 -            mk_l1_pgentry((frames[i] << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   4.627 +            l1e_create_pfn(frames[i], __PAGE_HYPERVISOR);
   4.628  
   4.629      SET_GDT_ADDRESS(ed, GDT_VIRT_START(ed));
   4.630      SET_GDT_ENTRIES(ed, entries);
   4.631 @@ -2403,7 +2399,7 @@ long do_update_descriptor(unsigned long 
   4.632      case PGT_gdt_page:
   4.633          /* Disallow updates of Xen-reserved descriptors in the current GDT. */
   4.634          for_each_exec_domain(dom, ed) {
   4.635 -            if ( (l1_pgentry_to_pfn(ed->arch.perdomain_ptes[0]) == mfn) &&
   4.636 +            if ( (l1e_get_pfn(ed->arch.perdomain_ptes[0]) == mfn) &&
   4.637                   (((pa&(PAGE_SIZE-1))>>3) >= FIRST_RESERVED_GDT_ENTRY) &&
   4.638                   (((pa&(PAGE_SIZE-1))>>3) <= LAST_RESERVED_GDT_ENTRY) )
   4.639                  goto out;
   4.640 @@ -2526,7 +2522,7 @@ void ptwr_flush(struct domain *d, const 
   4.641          ol1e = d->arch.ptwr[which].page[i];
   4.642          nl1e = pl1e[i];
   4.643  
   4.644 -        if ( likely(l1_pgentry_val(ol1e) == l1_pgentry_val(nl1e)) )
   4.645 +        if ( likely(l1e_get_value(ol1e) == l1e_get_value(nl1e)) )
   4.646              continue;
   4.647  
   4.648          /* Update number of entries modified. */
   4.649 @@ -2536,10 +2532,10 @@ void ptwr_flush(struct domain *d, const 
   4.650           * Fast path for PTEs that have merely been write-protected
   4.651           * (e.g., during a Unix fork()). A strict reduction in privilege.
   4.652           */
   4.653 -        if ( likely(l1_pgentry_val(ol1e) == (l1_pgentry_val(nl1e)|_PAGE_RW)) )
   4.654 +        if ( likely(l1e_get_value(ol1e) == (l1e_get_value(nl1e)|_PAGE_RW)) )
   4.655          {
   4.656 -            if ( likely(l1_pgentry_val(nl1e) & _PAGE_PRESENT) )
   4.657 -                put_page_type(&frame_table[l1_pgentry_to_pfn(nl1e)]);
   4.658 +            if ( likely(l1e_get_flags(nl1e) & _PAGE_PRESENT) )
   4.659 +                put_page_type(&frame_table[l1e_get_pfn(nl1e)]);
   4.660              continue;
   4.661          }
   4.662  
   4.663 @@ -2570,7 +2566,7 @@ void ptwr_flush(struct domain *d, const 
   4.664      if ( which == PTWR_PT_ACTIVE )
   4.665      {
   4.666          pl2e = &__linear_l2_table[d->arch.ptwr[which].l2_idx];
   4.667 -        *pl2e = mk_l2_pgentry(l2_pgentry_val(*pl2e) | _PAGE_PRESENT); 
   4.668 +        l2e_add_flags(pl2e, _PAGE_PRESENT); 
   4.669      }
   4.670  
   4.671      /*
   4.672 @@ -2587,9 +2583,9 @@ static int ptwr_emulated_update(
   4.673      unsigned int bytes,
   4.674      unsigned int do_cmpxchg)
   4.675  {
   4.676 -    unsigned long pte, pfn;
   4.677 +    unsigned long pfn;
   4.678      struct pfn_info *page;
   4.679 -    l1_pgentry_t ol1e, nl1e, *pl1e;
   4.680 +    l1_pgentry_t pte, ol1e, nl1e, *pl1e;
   4.681      struct domain *d = current->domain;
   4.682  
   4.683      /* Aligned access only, thank you. */
   4.684 @@ -2601,6 +2597,7 @@ static int ptwr_emulated_update(
   4.685      }
   4.686  
   4.687      /* Turn a sub-word access into a full-word access. */
   4.688 +    /* FIXME: needs tweaks for PAE */
   4.689      if ( (addr & ((BITS_PER_LONG/8)-1)) != 0 )
   4.690      {
   4.691          int           rc;
   4.692 @@ -2619,18 +2616,18 @@ static int ptwr_emulated_update(
   4.693      }
   4.694  
   4.695      /* Read the PTE that maps the page being updated. */
   4.696 -    if ( __get_user(pte, (unsigned long *)
   4.697 -                    &linear_pg_table[l1_linear_offset(addr)]) )
   4.698 +    if (__copy_from_user(&pte, &linear_pg_table[l1_linear_offset(addr)],
   4.699 +                         sizeof(pte)))
   4.700      {
   4.701          MEM_LOG("ptwr_emulate: Cannot read thru linear_pg_table\n");
   4.702          return X86EMUL_UNHANDLEABLE;
   4.703      }
   4.704  
   4.705 -    pfn  = pte >> PAGE_SHIFT;
   4.706 +    pfn  = l1e_get_pfn(pte);
   4.707      page = &frame_table[pfn];
   4.708  
   4.709      /* We are looking only for read-only mappings of p.t. pages. */
   4.710 -    if ( ((pte & (_PAGE_RW | _PAGE_PRESENT)) != _PAGE_PRESENT) ||
   4.711 +    if ( ((l1e_get_flags(pte) & (_PAGE_RW | _PAGE_PRESENT)) != _PAGE_PRESENT) ||
   4.712           ((page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table) ||
   4.713           (page_get_owner(page) != d) )
   4.714      {
   4.715 @@ -2640,7 +2637,7 @@ static int ptwr_emulated_update(
   4.716      }
   4.717  
   4.718      /* Check the new PTE. */
   4.719 -    nl1e = mk_l1_pgentry(val);
   4.720 +    nl1e = l1e_create_phys(val, val & ~PAGE_MASK);
   4.721      if ( unlikely(!get_page_from_l1e(nl1e, d)) )
   4.722          return X86EMUL_UNHANDLEABLE;
   4.723  
   4.724 @@ -2648,7 +2645,7 @@ static int ptwr_emulated_update(
   4.725      pl1e = map_domain_mem(page_to_phys(page) + (addr & ~PAGE_MASK));
   4.726      if ( do_cmpxchg )
   4.727      {
   4.728 -        ol1e = mk_l1_pgentry(old);
   4.729 +        ol1e = l1e_create_phys(old, old & ~PAGE_MASK);
   4.730          if ( cmpxchg((unsigned long *)pl1e, old, val) != old )
   4.731          {
   4.732              unmap_domain_mem(pl1e);
   4.733 @@ -2673,8 +2670,7 @@ static int ptwr_emulated_update(
   4.734          {
   4.735              sl1e = map_domain_mem(
   4.736                  ((sstat & PSH_pfn_mask) << PAGE_SHIFT) + (addr & ~PAGE_MASK));
   4.737 -            l1pte_propagate_from_guest(
   4.738 -                d, &l1_pgentry_val(nl1e), &l1_pgentry_val(*sl1e));
   4.739 +            l1pte_propagate_from_guest(d, &nl1e, sl1e);
   4.740              unmap_domain_mem(sl1e);
   4.741          }
   4.742  #endif
   4.743 @@ -2714,8 +2710,9 @@ static struct x86_mem_emulator ptwr_mem_
   4.744  /* Write page fault handler: check if guest is trying to modify a PTE. */
   4.745  int ptwr_do_page_fault(struct domain *d, unsigned long addr)
   4.746  {
   4.747 -    unsigned long    pte, pfn, l2e;
   4.748 +    unsigned long    pfn;
   4.749      struct pfn_info *page;
   4.750 +    l1_pgentry_t     pte;
   4.751      l2_pgentry_t    *pl2e;
   4.752      int              which;
   4.753      u32              l2_idx;
   4.754 @@ -2727,19 +2724,19 @@ int ptwr_do_page_fault(struct domain *d,
   4.755       * Attempt to read the PTE that maps the VA being accessed. By checking for
   4.756       * PDE validity in the L2 we avoid many expensive fixups in __get_user().
   4.757       */
   4.758 -    if ( !(l2_pgentry_val(__linear_l2_table[addr>>L2_PAGETABLE_SHIFT]) &
   4.759 +    if ( !(l2e_get_flags(__linear_l2_table[addr>>L2_PAGETABLE_SHIFT]) &
   4.760             _PAGE_PRESENT) ||
   4.761 -         __get_user(pte, (unsigned long *)
   4.762 -                    &linear_pg_table[l1_linear_offset(addr)]) )
   4.763 +         __copy_from_user(&pte,&linear_pg_table[l1_linear_offset(addr)],
   4.764 +                          sizeof(pte)) )
   4.765      {
   4.766          return 0;
   4.767      }
   4.768  
   4.769 -    pfn  = pte >> PAGE_SHIFT;
   4.770 +    pfn  = l1e_get_pfn(pte);
   4.771      page = &frame_table[pfn];
   4.772  
   4.773      /* We are looking only for read-only mappings of p.t. pages. */
   4.774 -    if ( ((pte & (_PAGE_RW | _PAGE_PRESENT)) != _PAGE_PRESENT) ||
   4.775 +    if ( ((l1e_get_flags(pte) & (_PAGE_RW | _PAGE_PRESENT)) != _PAGE_PRESENT) ||
   4.776           ((page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table) ||
   4.777           (page_get_owner(page) != d) )
   4.778      {
   4.779 @@ -2769,9 +2766,8 @@ int ptwr_do_page_fault(struct domain *d,
   4.780       * an ACTIVE p.t., otherwise it is INACTIVE.
   4.781       */
   4.782      pl2e = &__linear_l2_table[l2_idx];
   4.783 -    l2e  = l2_pgentry_val(*pl2e);
   4.784      which = PTWR_PT_INACTIVE;
   4.785 -    if ( (l2e >> PAGE_SHIFT) == pfn )
   4.786 +    if ( (l2e_get_pfn(*pl2e)) == pfn )
   4.787      {
   4.788          /*
   4.789           * Check the PRESENT bit to set ACTIVE mode.
   4.790 @@ -2779,7 +2775,7 @@ int ptwr_do_page_fault(struct domain *d,
   4.791           * ACTIVE p.t. (it may be the same p.t. mapped at another virt addr).
   4.792           * The ptwr_flush call below will restore the PRESENT bit.
   4.793           */
   4.794 -        if ( likely(l2e & _PAGE_PRESENT) ||
   4.795 +        if ( likely(l2e_get_flags(*pl2e) & _PAGE_PRESENT) ||
   4.796               (d->arch.ptwr[PTWR_PT_ACTIVE].l1va &&
   4.797                (l2_idx == d->arch.ptwr[PTWR_PT_ACTIVE].l2_idx)) )
   4.798              which = PTWR_PT_ACTIVE;
   4.799 @@ -2809,7 +2805,7 @@ int ptwr_do_page_fault(struct domain *d,
   4.800      /* For safety, disconnect the L1 p.t. page from current space. */
   4.801      if ( which == PTWR_PT_ACTIVE )
   4.802      {
   4.803 -        *pl2e = mk_l2_pgentry(l2e & ~_PAGE_PRESENT);
   4.804 +        l2e_remove_flags(pl2e, _PAGE_PRESENT);
   4.805          local_flush_tlb(); /* XXX Multi-CPU guests? */
   4.806      }
   4.807      
   4.808 @@ -2820,11 +2816,11 @@ int ptwr_do_page_fault(struct domain *d,
   4.809             L1_PAGETABLE_ENTRIES * sizeof(l1_pgentry_t));
   4.810      
   4.811      /* Finally, make the p.t. page writable by the guest OS. */
   4.812 -    pte |= _PAGE_RW;
   4.813 +    l1e_add_flags(&pte, _PAGE_RW);
   4.814      PTWR_PRINTK("[%c] update %p pte to %p\n", PTWR_PRINT_WHICH,
   4.815                  &linear_pg_table[addr>>PAGE_SHIFT], pte);
   4.816 -    if ( unlikely(__put_user(pte, (unsigned long *)
   4.817 -                             &linear_pg_table[addr>>PAGE_SHIFT])) )
   4.818 +    if ( unlikely(__copy_to_user(&linear_pg_table[addr>>PAGE_SHIFT],
   4.819 +                                 &pte, sizeof(pte))) )
   4.820      {
   4.821          MEM_LOG("ptwr: Could not update pte at %p", (unsigned long *)
   4.822                  &linear_pg_table[addr>>PAGE_SHIFT]);
     5.1 --- a/xen/arch/x86/shadow.c	Mon Apr 18 17:47:08 2005 +0000
     5.2 +++ b/xen/arch/x86/shadow.c	Wed Apr 20 11:50:06 2005 +0000
     5.3 @@ -312,7 +312,7 @@ free_shadow_l1_table(struct domain *d, u
     5.4      for ( i = min; i <= max; i++ )
     5.5      {
     5.6          put_page_from_l1e(pl1e[i], d);
     5.7 -        pl1e[i] = mk_l1_pgentry(0);
     5.8 +        pl1e[i] = l1e_empty();
     5.9      }
    5.10  
    5.11      unmap_domain_mem(pl1e);
    5.12 @@ -337,9 +337,8 @@ free_shadow_hl2_table(struct domain *d, 
    5.13  
    5.14      for ( i = 0; i < limit; i++ )
    5.15      {
    5.16 -        unsigned long hl2e = l1_pgentry_val(hl2[i]);
    5.17 -        if ( hl2e & _PAGE_PRESENT )
    5.18 -            put_page(pfn_to_page(hl2e >> PAGE_SHIFT));
    5.19 +        if ( l1e_get_flags(hl2[i]) & _PAGE_PRESENT )
    5.20 +            put_page(pfn_to_page(l1e_get_pfn(hl2[i])));
    5.21      }
    5.22  
    5.23      unmap_domain_mem(hl2);
    5.24 @@ -557,15 +556,16 @@ static void free_shadow_pages(struct dom
    5.25              l2_pgentry_t *mpl2e = ed->arch.monitor_vtable;
    5.26              l2_pgentry_t hl2e = mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)];
    5.27              l2_pgentry_t smfn = mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)];
    5.28 -            if ( l2_pgentry_val(hl2e) & _PAGE_PRESENT )
    5.29 +
    5.30 +            if ( l2e_get_flags(hl2e) & _PAGE_PRESENT )
    5.31              {
    5.32 -                put_shadow_ref(l2_pgentry_val(hl2e) >> PAGE_SHIFT);
    5.33 -                mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = mk_l2_pgentry(0);
    5.34 +                put_shadow_ref(l2e_get_pfn(hl2e));
    5.35 +                mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = l2e_empty();
    5.36              }
    5.37 -            if ( l2_pgentry_val(smfn) & _PAGE_PRESENT )
    5.38 +            if ( l2e_get_flags(smfn) & _PAGE_PRESENT )
    5.39              {
    5.40 -                put_shadow_ref(l2_pgentry_val(smfn) >> PAGE_SHIFT);
    5.41 -                mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = mk_l2_pgentry(0);
    5.42 +                put_shadow_ref(l2e_get_pfn(smfn));
    5.43 +                mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = l2e_empty();
    5.44              }
    5.45          }
    5.46      }
    5.47 @@ -648,18 +648,19 @@ static void alloc_monitor_pagetable(stru
    5.48  #endif
    5.49  
    5.50      mpl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
    5.51 -        mk_l2_pgentry((__pa(d->arch.mm_perdomain_pt) & PAGE_MASK) 
    5.52 -                      | __PAGE_HYPERVISOR);
    5.53 +        l2e_create_phys(__pa(d->arch.mm_perdomain_pt),
    5.54 +                        __PAGE_HYPERVISOR);
    5.55  
    5.56      // map the phys_to_machine map into the Read-Only MPT space for this domain
    5.57      mpl2e[l2_table_offset(RO_MPT_VIRT_START)] =
    5.58 -        mk_l2_pgentry(pagetable_val(d->arch.phys_table) | __PAGE_HYPERVISOR);
    5.59 +        l2e_create_phys(pagetable_val(d->arch.phys_table),
    5.60 +                        __PAGE_HYPERVISOR);
    5.61  
    5.62      // Don't (yet) have mappings for these...
    5.63      // Don't want to accidentally see the idle_pg_table's linear mapping.
    5.64      //
    5.65 -    mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = mk_l2_pgentry(0);
    5.66 -    mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = mk_l2_pgentry(0);
    5.67 +    mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = l2e_empty();
    5.68 +    mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = l2e_empty();
    5.69  
    5.70      ed->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT);
    5.71      ed->arch.monitor_vtable = mpl2e;
    5.72 @@ -682,17 +683,17 @@ void free_monitor_pagetable(struct exec_
    5.73       * First get the mfn for hl2_table by looking at monitor_table
    5.74       */
    5.75      hl2e = mpl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT];
    5.76 -    if ( l2_pgentry_val(hl2e) & _PAGE_PRESENT )
    5.77 +    if ( l2e_get_flags(hl2e) & _PAGE_PRESENT )
    5.78      {
    5.79 -        mfn = l2_pgentry_val(hl2e) >> PAGE_SHIFT;
    5.80 +        mfn = l2e_get_pfn(hl2e);
    5.81          ASSERT(mfn);
    5.82          put_shadow_ref(mfn);
    5.83      }
    5.84  
    5.85      sl2e = mpl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT];
    5.86 -    if ( l2_pgentry_val(sl2e) & _PAGE_PRESENT )
    5.87 +    if ( l2e_get_flags(sl2e) & _PAGE_PRESENT )
    5.88      {
    5.89 -        mfn = l2_pgentry_val(sl2e) >> PAGE_SHIFT;
    5.90 +        mfn = l2e_get_pfn(sl2e);
    5.91          ASSERT(mfn);
    5.92          put_shadow_ref(mfn);
    5.93      }
    5.94 @@ -721,7 +722,8 @@ set_p2m_entry(struct domain *d, unsigned
    5.95      ASSERT( phystab );
    5.96  
    5.97      l2 = map_domain_mem(phystab);
    5.98 -    if ( !l2_pgentry_val(l2e = l2[l2_table_offset(va)]) )
    5.99 +    l2e = l2[l2_table_offset(va)];
   5.100 +    if ( !l2e_get_value(l2e) ) /* FIXME: check present bit? */
   5.101      {
   5.102          l1page = alloc_domheap_page(NULL);
   5.103          if ( !l1page )
   5.104 @@ -731,15 +733,13 @@ set_p2m_entry(struct domain *d, unsigned
   5.105          memset(l1, 0, PAGE_SIZE);
   5.106          unmap_domain_mem(l1);
   5.107  
   5.108 -        l2e = l2[l2_table_offset(va)] =
   5.109 -            mk_l2_pgentry((page_to_pfn(l1page) << PAGE_SHIFT) |
   5.110 -                          __PAGE_HYPERVISOR);
   5.111 +        l2e = l2e_create_pfn(page_to_pfn(l1page), __PAGE_HYPERVISOR);
   5.112 +        l2[l2_table_offset(va)] = l2e;
   5.113      }
   5.114      unmap_domain_mem(l2);
   5.115  
   5.116 -    l1 = map_domain_mem(l2_pgentry_val(l2e) & PAGE_MASK);
   5.117 -    l1[l1_table_offset(va)] = mk_l1_pgentry((mfn << PAGE_SHIFT) |
   5.118 -                                            __PAGE_HYPERVISOR);
   5.119 +    l1 = map_domain_mem(l2e_get_phys(l2e));
   5.120 +    l1[l1_table_offset(va)] = l1e_create_pfn(mfn, __PAGE_HYPERVISOR);
   5.121      unmap_domain_mem(l1);
   5.122  
   5.123      return 1;
   5.124 @@ -1015,13 +1015,12 @@ translate_l1pgtable(struct domain *d, l1
   5.125      for (i = 0; i < L1_PAGETABLE_ENTRIES; i++)
   5.126      {
   5.127          if ( is_guest_l1_slot(i) &&
   5.128 -             (l1_pgentry_val(l1[i]) & _PAGE_PRESENT) )
   5.129 +             (l1e_get_flags(l1[i]) & _PAGE_PRESENT) )
   5.130          {
   5.131 -            unsigned long mfn = l1_pgentry_val(l1[i]) >> PAGE_SHIFT;
   5.132 +            unsigned long mfn = l1e_get_pfn(l1[i]);
   5.133              unsigned long gpfn = __mfn_to_gpfn(d, mfn);
   5.134 -            ASSERT((l1_pgentry_val(p2m[gpfn]) >> PAGE_SHIFT) == mfn);
   5.135 -            l1[i] = mk_l1_pgentry((gpfn << PAGE_SHIFT) |
   5.136 -                                  (l1_pgentry_val(l1[i]) & ~PAGE_MASK));
   5.137 +            ASSERT(l1e_get_pfn(p2m[gpfn]) == mfn);
   5.138 +            l1[i] = l1e_create_pfn(gpfn, l1e_get_flags(l1[i]));
   5.139          }
   5.140      }
   5.141      unmap_domain_mem(l1);
   5.142 @@ -1043,13 +1042,12 @@ translate_l2pgtable(struct domain *d, l1
   5.143      for (i = 0; i < L2_PAGETABLE_ENTRIES; i++)
   5.144      {
   5.145          if ( is_guest_l2_slot(i) &&
   5.146 -             (l2_pgentry_val(l2[i]) & _PAGE_PRESENT) )
   5.147 +             (l2e_get_flags(l2[i]) & _PAGE_PRESENT) )
   5.148          {
   5.149 -            unsigned long mfn = l2_pgentry_val(l2[i]) >> PAGE_SHIFT;
   5.150 +            unsigned long mfn = l2e_get_pfn(l2[i]);
   5.151              unsigned long gpfn = __mfn_to_gpfn(d, mfn);
   5.152 -            ASSERT((l1_pgentry_val(p2m[gpfn]) >> PAGE_SHIFT) == mfn);
   5.153 -            l2[i] = mk_l2_pgentry((gpfn << PAGE_SHIFT) |
   5.154 -                                  (l2_pgentry_val(l2[i]) & ~PAGE_MASK));
   5.155 +            ASSERT(l1e_get_pfn(p2m[gpfn]) == mfn);
   5.156 +            l2[i] = l2e_create_pfn(gpfn, l2e_get_flags(l2[i]));
   5.157              translate_l1pgtable(d, p2m, mfn);
   5.158          }
   5.159      }
   5.160 @@ -1321,13 +1319,13 @@ gpfn_to_mfn_foreign(struct domain *d, un
   5.161      l2_pgentry_t *l2 = map_domain_mem(phystab);
   5.162      l2_pgentry_t l2e = l2[l2_table_offset(va)];
   5.163      unmap_domain_mem(l2);
   5.164 -    if ( !(l2_pgentry_val(l2e) & _PAGE_PRESENT) )
   5.165 +    if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
   5.166      {
   5.167          printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%p) => 0 l2e=%p\n",
   5.168 -               d->id, gpfn, l2_pgentry_val(l2e));
   5.169 +               d->id, gpfn, l2e_get_value(l2e));
   5.170          return INVALID_MFN;
   5.171      }
   5.172 -    unsigned long l1tab = l2_pgentry_val(l2e) & PAGE_MASK;
   5.173 +    unsigned long l1tab = l2e_get_phys(l2e);
   5.174      l1_pgentry_t *l1 = map_domain_mem(l1tab);
   5.175      l1_pgentry_t l1e = l1[l1_table_offset(va)];
   5.176      unmap_domain_mem(l1);
   5.177 @@ -1337,14 +1335,14 @@ gpfn_to_mfn_foreign(struct domain *d, un
   5.178             d->id, gpfn, l1_pgentry_val(l1e) >> PAGE_SHIFT, phystab, l2e, l1tab, l1e);
   5.179  #endif
   5.180  
   5.181 -    if ( !(l1_pgentry_val(l1e) & _PAGE_PRESENT) )
   5.182 +    if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) )
   5.183      {
   5.184          printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%p) => 0 l1e=%p\n",
   5.185 -               d->id, gpfn, l1_pgentry_val(l1e));
   5.186 +               d->id, gpfn, l1e_get_value(l1e));
   5.187          return INVALID_MFN;
   5.188      }
   5.189  
   5.190 -    return l1_pgentry_val(l1e) >> PAGE_SHIFT;
   5.191 +    return l1e_get_pfn(l1e);
   5.192  }
   5.193  
   5.194  static unsigned long
   5.195 @@ -1388,11 +1386,11 @@ shadow_hl2_table(struct domain *d, unsig
   5.196          // Setup easy access to the GL2, SL2, and HL2 frames.
   5.197          //
   5.198          hl2[l2_table_offset(LINEAR_PT_VIRT_START)] =
   5.199 -            mk_l1_pgentry((gmfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   5.200 +            l1e_create_pfn(gmfn, __PAGE_HYPERVISOR);
   5.201          hl2[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
   5.202 -            mk_l1_pgentry((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   5.203 +            l1e_create_pfn(smfn, __PAGE_HYPERVISOR);
   5.204          hl2[l2_table_offset(PERDOMAIN_VIRT_START)] =
   5.205 -            mk_l1_pgentry((hl2mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   5.206 +            l1e_create_pfn(hl2mfn, __PAGE_HYPERVISOR);
   5.207      }
   5.208  
   5.209      unmap_domain_mem(hl2);
   5.210 @@ -1441,20 +1439,19 @@ static unsigned long shadow_l2_table(
   5.211                 HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
   5.212  
   5.213          spl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
   5.214 -            mk_l2_pgentry((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   5.215 +            l2e_create_pfn(smfn, __PAGE_HYPERVISOR);
   5.216  
   5.217          spl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
   5.218 -            mk_l2_pgentry(__pa(page_get_owner(
   5.219 -                &frame_table[gmfn])->arch.mm_perdomain_pt) |
   5.220 -                          __PAGE_HYPERVISOR);
   5.221 +            l2e_create_phys(__pa(page_get_owner(&frame_table[gmfn])->arch.mm_perdomain_pt),
   5.222 +                            __PAGE_HYPERVISOR);
   5.223  
   5.224          if ( shadow_mode_translate(d) ) // NB: not external
   5.225          {
   5.226              unsigned long hl2mfn;
   5.227  
   5.228              spl2e[l2_table_offset(RO_MPT_VIRT_START)] =
   5.229 -                mk_l2_pgentry(pagetable_val(d->arch.phys_table) |
   5.230 -                              __PAGE_HYPERVISOR);
   5.231 +                l2e_create_phys(pagetable_val(d->arch.phys_table),
   5.232 +                                __PAGE_HYPERVISOR);
   5.233  
   5.234              if ( unlikely(!(hl2mfn = __shadow_status(d, gpfn, PGT_hl2_shadow))) )
   5.235                  hl2mfn = shadow_hl2_table(d, gpfn, gmfn, smfn);
   5.236 @@ -1466,11 +1463,11 @@ static unsigned long shadow_l2_table(
   5.237                  BUG();
   5.238              
   5.239              spl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
   5.240 -                mk_l2_pgentry((hl2mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   5.241 +                l2e_create_pfn(hl2mfn, __PAGE_HYPERVISOR);
   5.242          }
   5.243          else
   5.244              spl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
   5.245 -                mk_l2_pgentry((gmfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   5.246 +                l2e_create_pfn(gmfn, __PAGE_HYPERVISOR);
   5.247      }
   5.248      else
   5.249      {
   5.250 @@ -1487,12 +1484,14 @@ void shadow_map_l1_into_current_l2(unsig
   5.251  { 
   5.252      struct exec_domain *ed = current;
   5.253      struct domain *d = ed->domain;
   5.254 -    unsigned long    *gpl1e, *spl1e, gl2e, sl2e, gl1pfn, gl1mfn, sl1mfn;
   5.255 +    l1_pgentry_t *gpl1e, *spl1e;
   5.256 +    l2_pgentry_t gl2e, sl2e;
   5.257 +    unsigned long gl1pfn, gl1mfn, sl1mfn;
   5.258      int i, init_table = 0;
   5.259  
   5.260      __guest_get_l2e(ed, va, &gl2e);
   5.261 -    ASSERT(gl2e & _PAGE_PRESENT);
   5.262 -    gl1pfn = gl2e >> PAGE_SHIFT;
   5.263 +    ASSERT(l2e_get_flags(gl2e) & _PAGE_PRESENT);
   5.264 +    gl1pfn = l2e_get_pfn(gl2e);
   5.265  
   5.266      if ( !(sl1mfn = __shadow_status(d, gl1pfn, PGT_l1_shadow)) )
   5.267      {
   5.268 @@ -1525,9 +1524,9 @@ void shadow_map_l1_into_current_l2(unsig
   5.269      }
   5.270  
   5.271  #ifndef NDEBUG
   5.272 -    unsigned long old_sl2e;
   5.273 +    l2_pgentry_t old_sl2e;
   5.274      __shadow_get_l2e(ed, va, &old_sl2e);
   5.275 -    ASSERT( !(old_sl2e & _PAGE_PRESENT) );
   5.276 +    ASSERT( !(l2e_get_flags(old_sl2e) & _PAGE_PRESENT) );
   5.277  #endif
   5.278  
   5.279      if ( !get_shadow_ref(sl1mfn) )
   5.280 @@ -1538,25 +1537,23 @@ void shadow_map_l1_into_current_l2(unsig
   5.281  
   5.282      if ( init_table )
   5.283      {
   5.284 -        gpl1e = (unsigned long *)
   5.285 -            &(linear_pg_table[l1_linear_offset(va) &
   5.286 +        gpl1e = &(linear_pg_table[l1_linear_offset(va) &
   5.287                                ~(L1_PAGETABLE_ENTRIES-1)]);
   5.288  
   5.289 -        spl1e = (unsigned long *)
   5.290 -            &(shadow_linear_pg_table[l1_linear_offset(va) &
   5.291 +        spl1e = &(shadow_linear_pg_table[l1_linear_offset(va) &
   5.292                                       ~(L1_PAGETABLE_ENTRIES-1)]);
   5.293  
   5.294 -        unsigned long sl1e;
   5.295 +        l1_pgentry_t sl1e;
   5.296          int index = l1_table_offset(va);
   5.297          int min = 1, max = 0;
   5.298  
   5.299          for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
   5.300          {
   5.301              l1pte_propagate_from_guest(d, gpl1e[i], &sl1e);
   5.302 -            if ( (sl1e & _PAGE_PRESENT) &&
   5.303 -                 !shadow_get_page_from_l1e(mk_l1_pgentry(sl1e), d) )
   5.304 -                sl1e = 0;
   5.305 -            if ( sl1e == 0 )
   5.306 +            if ( (l1e_get_flags(sl1e) & _PAGE_PRESENT) &&
   5.307 +                 !shadow_get_page_from_l1e(sl1e, d) )
   5.308 +                sl1e = l1e_empty();
   5.309 +            if ( l1e_get_value(sl1e) == 0 ) /* FIXME: check flags? */
   5.310              {
   5.311                  // First copy entries from 0 until first invalid.
   5.312                  // Then copy entries from index until first invalid.
   5.313 @@ -1582,7 +1579,7 @@ void shadow_map_l1_into_current_l2(unsig
   5.314  void shadow_invlpg(struct exec_domain *ed, unsigned long va)
   5.315  {
   5.316      struct domain *d = ed->domain;
   5.317 -    unsigned long gpte, spte;
   5.318 +    l1_pgentry_t gpte, spte;
   5.319  
   5.320      ASSERT(shadow_mode_enabled(d));
   5.321  
   5.322 @@ -1595,8 +1592,8 @@ void shadow_invlpg(struct exec_domain *e
   5.323      // It's not strictly necessary to update the shadow here,
   5.324      // but it might save a fault later.
   5.325      //
   5.326 -    if (__get_user(gpte, (unsigned long *)
   5.327 -                   &linear_pg_table[va >> PAGE_SHIFT])) {
   5.328 +    if (__copy_from_user(&gpte, &linear_pg_table[va >> PAGE_SHIFT],
   5.329 +                         sizeof(gpte))) {
   5.330          perfc_incrc(shadow_invlpg_faults);
   5.331          return;
   5.332      }
   5.333 @@ -1764,31 +1761,30 @@ void shadow_mark_va_out_of_sync(
   5.334  {
   5.335      struct out_of_sync_entry *entry =
   5.336          shadow_mark_mfn_out_of_sync(ed, gpfn, mfn);
   5.337 -    unsigned long sl2e;
   5.338 +    l2_pgentry_t sl2e;
   5.339  
   5.340      // We need the address of shadow PTE that maps @va.
   5.341      // It might not exist yet.  Make sure it's there.
   5.342      //
   5.343      __shadow_get_l2e(ed, va, &sl2e);
   5.344 -    if ( !(sl2e & _PAGE_PRESENT) )
   5.345 +    if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) )
   5.346      {
   5.347          // either this L1 isn't shadowed yet, or the shadow isn't linked into
   5.348          // the current L2.
   5.349          shadow_map_l1_into_current_l2(va);
   5.350          __shadow_get_l2e(ed, va, &sl2e);
   5.351      }
   5.352 -    ASSERT(sl2e & _PAGE_PRESENT);
   5.353 +    ASSERT(l2e_get_flags(sl2e) & _PAGE_PRESENT);
   5.354  
   5.355      // NB: this is stored as a machine address.
   5.356      entry->writable_pl1e =
   5.357 -        ((sl2e & PAGE_MASK) |
   5.358 -         (sizeof(l1_pgentry_t) * l1_table_offset(va)));
   5.359 +        l2e_get_phys(sl2e) | (sizeof(l1_pgentry_t) * l1_table_offset(va));
   5.360      ASSERT( !(entry->writable_pl1e & (sizeof(l1_pgentry_t)-1)) );
   5.361  
   5.362      // Increment shadow's page count to represent the reference
   5.363      // inherent in entry->writable_pl1e
   5.364      //
   5.365 -    if ( !get_shadow_ref(sl2e >> PAGE_SHIFT) )
   5.366 +    if ( !get_shadow_ref(l2e_get_pfn(sl2e)) )
   5.367          BUG();
   5.368  
   5.369      FSH_LOG("mark_out_of_sync(va=%p -> writable_pl1e=%p)",
   5.370 @@ -1841,7 +1837,7 @@ int __shadow_out_of_sync(struct exec_dom
   5.371  {
   5.372      struct domain *d = ed->domain;
   5.373      unsigned long l2mfn = pagetable_val(ed->arch.guest_table) >> PAGE_SHIFT;
   5.374 -    unsigned long l2e;
   5.375 +    l2_pgentry_t l2e;
   5.376      unsigned long l1mfn;
   5.377  
   5.378      ASSERT(spin_is_locked(&d->arch.shadow_lock));
   5.379 @@ -1853,10 +1849,10 @@ int __shadow_out_of_sync(struct exec_dom
   5.380          return 1;
   5.381  
   5.382      __guest_get_l2e(ed, va, &l2e);
   5.383 -    if ( !(l2e & _PAGE_PRESENT) )
   5.384 +    if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
   5.385          return 0;
   5.386  
   5.387 -    l1mfn = __gpfn_to_mfn(d, l2e >> PAGE_SHIFT);
   5.388 +    l1mfn = __gpfn_to_mfn(d, l2e_get_pfn(l2e));
   5.389  
   5.390      // If the l1 pfn is invalid, it can't be out of sync...
   5.391      if ( !VALID_MFN(l1mfn) )
   5.392 @@ -1923,31 +1919,31 @@ static u32 remove_all_write_access_in_pt
   5.393      unsigned long readonly_gpfn, unsigned long readonly_gmfn,
   5.394      u32 max_refs_to_find, unsigned long prediction)
   5.395  {
   5.396 -    unsigned long *pt = map_domain_mem(pt_mfn << PAGE_SHIFT);
   5.397 -    unsigned long match =
   5.398 -        (readonly_gmfn << PAGE_SHIFT) | _PAGE_RW | _PAGE_PRESENT;
   5.399 -    unsigned long mask = PAGE_MASK | _PAGE_RW | _PAGE_PRESENT;
   5.400 +    l1_pgentry_t *pt = map_domain_mem(pt_mfn << PAGE_SHIFT);
   5.401 +    l1_pgentry_t match;
   5.402 +    unsigned long flags = _PAGE_RW | _PAGE_PRESENT;
   5.403      int i;
   5.404      u32 found = 0;
   5.405      int is_l1_shadow =
   5.406          ((frame_table[pt_mfn].u.inuse.type_info & PGT_type_mask) ==
   5.407           PGT_l1_shadow);
   5.408  
   5.409 -#define MATCH_ENTRY(_i) (((pt[_i] ^ match) & mask) == 0)
   5.410 +    match = l1e_create_pfn(readonly_gmfn, flags);
   5.411  
   5.412      // returns true if all refs have been found and fixed.
   5.413      //
   5.414      int fix_entry(int i)
   5.415      {
   5.416 -        unsigned long old = pt[i];
   5.417 -        unsigned long new = old & ~_PAGE_RW;
   5.418 -
   5.419 -        if ( is_l1_shadow && !shadow_get_page_from_l1e(mk_l1_pgentry(new), d) )
   5.420 +        l1_pgentry_t old = pt[i];
   5.421 +        l1_pgentry_t new = old;
   5.422 +
   5.423 +        l1e_remove_flags(&new,_PAGE_RW);
   5.424 +        if ( is_l1_shadow && !shadow_get_page_from_l1e(new, d) )
   5.425              BUG();
   5.426          found++;
   5.427          pt[i] = new;
   5.428          if ( is_l1_shadow )
   5.429 -            put_page_from_l1e(mk_l1_pgentry(old), d);
   5.430 +            put_page_from_l1e(old, d);
   5.431  
   5.432  #if 0
   5.433          printk("removed write access to pfn=%p mfn=%p in smfn=%p entry %x "
   5.434 @@ -1958,8 +1954,8 @@ static u32 remove_all_write_access_in_pt
   5.435          return (found == max_refs_to_find);
   5.436      }
   5.437  
   5.438 -    if ( MATCH_ENTRY(readonly_gpfn & (L1_PAGETABLE_ENTRIES - 1)) &&
   5.439 -         fix_entry(readonly_gpfn & (L1_PAGETABLE_ENTRIES - 1)) )
   5.440 +    i = readonly_gpfn & (L1_PAGETABLE_ENTRIES - 1);
   5.441 +    if ( !l1e_has_changed(&pt[i], &match, flags) && fix_entry(i) )
   5.442      {
   5.443          perfc_incrc(remove_write_fast_exit);
   5.444          increase_writable_pte_prediction(d, readonly_gpfn, prediction);
   5.445 @@ -1969,7 +1965,7 @@ static u32 remove_all_write_access_in_pt
   5.446   
   5.447      for (i = 0; i < L1_PAGETABLE_ENTRIES; i++)
   5.448      {
   5.449 -        if ( unlikely(MATCH_ENTRY(i)) && fix_entry(i) )
   5.450 +        if ( unlikely(!l1e_has_changed(&pt[i], &match, flags)) && fix_entry(i) )
   5.451              break;
   5.452      }
   5.453  
   5.454 @@ -2066,25 +2062,27 @@ int shadow_remove_all_write_access(
   5.455  static u32 remove_all_access_in_page(
   5.456      struct domain *d, unsigned long l1mfn, unsigned long forbidden_gmfn)
   5.457  {
   5.458 -    unsigned long *pl1e = map_domain_mem(l1mfn << PAGE_SHIFT);
   5.459 -    unsigned long match = (forbidden_gmfn << PAGE_SHIFT) | _PAGE_PRESENT;
   5.460 -    unsigned long mask  = PAGE_MASK | _PAGE_PRESENT;
   5.461 +    l1_pgentry_t *pl1e = map_domain_mem(l1mfn << PAGE_SHIFT);
   5.462 +    l1_pgentry_t match;
   5.463 +    unsigned long flags  = _PAGE_PRESENT;
   5.464      int i;
   5.465      u32 count = 0;
   5.466      int is_l1_shadow =
   5.467          ((frame_table[l1mfn].u.inuse.type_info & PGT_type_mask) ==
   5.468           PGT_l1_shadow);
   5.469  
   5.470 +    match = l1e_create_pfn(forbidden_gmfn, flags);
   5.471 +    
   5.472      for (i = 0; i < L1_PAGETABLE_ENTRIES; i++)
   5.473      {
   5.474 -        if ( unlikely(((pl1e[i] ^ match) & mask) == 0) )
   5.475 +        if ( unlikely(!l1e_has_changed(&pl1e[i], &match, flags) == 0) )
   5.476          {
   5.477 -            unsigned long ol2e = pl1e[i];
   5.478 -            pl1e[i] = 0;
   5.479 +            l1_pgentry_t ol2e = pl1e[i];
   5.480 +            pl1e[i] = l1e_empty();
   5.481              count++;
   5.482  
   5.483              if ( is_l1_shadow )
   5.484 -                put_page_from_l1e(mk_l1_pgentry(ol2e), d);
   5.485 +                put_page_from_l1e(ol2e, d);
   5.486              else /* must be an hl2 page */
   5.487                  put_page(&frame_table[forbidden_gmfn]);
   5.488          }
   5.489 @@ -2138,7 +2136,7 @@ static int resync_all(struct domain *d, 
   5.490      struct out_of_sync_entry *entry;
   5.491      unsigned i;
   5.492      unsigned long smfn;
   5.493 -    unsigned long *guest, *shadow, *snapshot;
   5.494 +    void *guest, *shadow, *snapshot;
   5.495      int need_flush = 0, external = shadow_mode_external(d);
   5.496      int unshadow;
   5.497      int changed;
   5.498 @@ -2176,14 +2174,18 @@ static int resync_all(struct domain *d, 
   5.499              int min_snapshot = SHADOW_MIN(min_max_snapshot);
   5.500              int max_snapshot = SHADOW_MAX(min_max_snapshot);
   5.501  
   5.502 +            l1_pgentry_t *guest1 = guest;
   5.503 +            l1_pgentry_t *shadow1 = shadow;
   5.504 +            l1_pgentry_t *snapshot1 = snapshot;
   5.505 +
   5.506              changed = 0;
   5.507  
   5.508              for ( i = min_shadow; i <= max_shadow; i++ )
   5.509              {
   5.510                  if ( (i < min_snapshot) || (i > max_snapshot) ||
   5.511 -                     (guest[i] != snapshot[i]) )
   5.512 +                     l1e_has_changed(&guest1[i], &snapshot1[i], PAGE_FLAG_MASK) )
   5.513                  {
   5.514 -                    need_flush |= validate_pte_change(d, guest[i], &shadow[i]);
   5.515 +                    need_flush |= validate_pte_change(d, guest1[i], &shadow1[i]);
   5.516  
   5.517                      // can't update snapshots of linear page tables -- they
   5.518                      // are used multiple times...
   5.519 @@ -2202,16 +2204,20 @@ static int resync_all(struct domain *d, 
   5.520          {
   5.521              int max = -1;
   5.522  
   5.523 +            l2_pgentry_t *guest2 = guest;
   5.524 +            l2_pgentry_t *shadow2 = shadow;
   5.525 +            l2_pgentry_t *snapshot2 = snapshot;
   5.526 +
   5.527              changed = 0;
   5.528              for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
   5.529              {
   5.530                  if ( !is_guest_l2_slot(i) && !external )
   5.531                      continue;
   5.532  
   5.533 -                unsigned long new_pde = guest[i];
   5.534 -                if ( new_pde != snapshot[i] )
   5.535 +                l2_pgentry_t new_pde = guest2[i];
   5.536 +                if ( l2e_has_changed(&new_pde, &snapshot2[i], PAGE_FLAG_MASK))
   5.537                  {
   5.538 -                    need_flush |= validate_pde_change(d, new_pde, &shadow[i]);
   5.539 +                    need_flush |= validate_pde_change(d, new_pde, &shadow2[i]);
   5.540  
   5.541                      // can't update snapshots of linear page tables -- they
   5.542                      // are used multiple times...
   5.543 @@ -2220,12 +2226,13 @@ static int resync_all(struct domain *d, 
   5.544  
   5.545                      changed++;
   5.546                  }
   5.547 -                if ( new_pde != 0 )
   5.548 +                if ( l2e_get_value(new_pde) != 0 ) /* FIXME: check flags? */
   5.549                      max = i;
   5.550  
   5.551                  // XXX - This hack works for linux guests.
   5.552                  //       Need a better solution long term.
   5.553 -                if ( !(new_pde & _PAGE_PRESENT) && unlikely(new_pde != 0) &&
   5.554 +                if ( !(l2e_get_flags(new_pde) & _PAGE_PRESENT) &&
   5.555 +                     unlikely(l2e_get_value(new_pde) != 0) &&
   5.556                       !unshadow &&
   5.557                       (frame_table[smfn].u.inuse.type_info & PGT_pinned) )
   5.558                      unshadow = 1;
   5.559 @@ -2237,16 +2244,21 @@ static int resync_all(struct domain *d, 
   5.560              break;
   5.561          }
   5.562          case PGT_hl2_shadow:
   5.563 +        {
   5.564 +            l2_pgentry_t *guest2 = guest;
   5.565 +            l2_pgentry_t *snapshot2 = snapshot;
   5.566 +            l1_pgentry_t *shadow2 = shadow;
   5.567 +            
   5.568              changed = 0;
   5.569              for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
   5.570              {
   5.571                  if ( !is_guest_l2_slot(i) && !external )
   5.572                      continue;
   5.573  
   5.574 -                unsigned long new_pde = guest[i];
   5.575 -                if ( new_pde != snapshot[i] )
   5.576 +                l2_pgentry_t new_pde = guest2[i];
   5.577 +                if ( l2e_has_changed(&new_pde, &snapshot2[i], PAGE_FLAG_MASK) )
   5.578                  {
   5.579 -                    need_flush |= validate_hl2e_change(d, new_pde, &shadow[i]);
   5.580 +                    need_flush |= validate_hl2e_change(d, new_pde, &shadow2[i]);
   5.581  
   5.582                      // can't update snapshots of linear page tables -- they
   5.583                      // are used multiple times...
   5.584 @@ -2259,6 +2271,7 @@ static int resync_all(struct domain *d, 
   5.585              perfc_incrc(resync_hl2);
   5.586              perfc_incr_histo(shm_hl2_updates, changed, PT_UPDATES);
   5.587              break;
   5.588 +        }
   5.589          default:
   5.590              BUG();
   5.591          }
   5.592 @@ -2304,15 +2317,16 @@ void __shadow_sync_all(struct domain *d)
   5.593          if ( entry->writable_pl1e & (sizeof(l1_pgentry_t)-1) )
   5.594              continue;
   5.595  
   5.596 -        unsigned long *ppte = map_domain_mem(entry->writable_pl1e);
   5.597 -        unsigned long opte = *ppte;
   5.598 -        unsigned long npte = opte & ~_PAGE_RW;
   5.599 -
   5.600 -        if ( (npte & _PAGE_PRESENT) &&
   5.601 -             !shadow_get_page_from_l1e(mk_l1_pgentry(npte), d) )
   5.602 +        l1_pgentry_t *ppte = map_domain_mem(entry->writable_pl1e);
   5.603 +        l1_pgentry_t opte = *ppte;
   5.604 +        l1_pgentry_t npte = opte;
   5.605 +        l1e_remove_flags(&opte, _PAGE_RW);
   5.606 +
   5.607 +        if ( (l1e_get_flags(npte) & _PAGE_PRESENT) &&
   5.608 +             !shadow_get_page_from_l1e(npte, d) )
   5.609              BUG();
   5.610          *ppte = npte;
   5.611 -        put_page_from_l1e(mk_l1_pgentry(opte), d);
   5.612 +        put_page_from_l1e(opte, d);
   5.613  
   5.614          unmap_domain_mem(ppte);
   5.615      }
   5.616 @@ -2347,10 +2361,12 @@ void __shadow_sync_all(struct domain *d)
   5.617  
   5.618  int shadow_fault(unsigned long va, struct xen_regs *regs)
   5.619  {
   5.620 -    unsigned long gpte, spte = 0, orig_gpte;
   5.621 +    l1_pgentry_t gpte, spte, orig_gpte;
   5.622      struct exec_domain *ed = current;
   5.623      struct domain *d = ed->domain;
   5.624 -    unsigned long gpde;
   5.625 +    l2_pgentry_t gpde;
   5.626 +
   5.627 +    spte = l1e_empty();
   5.628  
   5.629      SH_VVLOG("shadow_fault( va=%p, code=%lu )", va, regs->error_code );
   5.630      perfc_incrc(shadow_fault_calls);
   5.631 @@ -2373,7 +2389,7 @@ int shadow_fault(unsigned long va, struc
   5.632       * STEP 2. Check the guest PTE.
   5.633       */
   5.634      __guest_get_l2e(ed, va, &gpde);
   5.635 -    if ( unlikely(!(gpde & _PAGE_PRESENT)) )
   5.636 +    if ( unlikely(!(l1e_get_flags(gpte) & _PAGE_PRESENT)) )
   5.637      {
   5.638          SH_VVLOG("shadow_fault - EXIT: L1 not present" );
   5.639          perfc_incrc(shadow_fault_bail_pde_not_present);
   5.640 @@ -2384,8 +2400,8 @@ int shadow_fault(unsigned long va, struc
   5.641      // the mapping is in-sync, so the check of the PDE's present bit, above,
   5.642      // covers this access.
   5.643      //
   5.644 -    orig_gpte = gpte = l1_pgentry_val(linear_pg_table[l1_linear_offset(va)]);
   5.645 -    if ( unlikely(!(gpte & _PAGE_PRESENT)) )
   5.646 +    orig_gpte = gpte = linear_pg_table[l1_linear_offset(va)];
   5.647 +    if ( unlikely(!(l1e_get_flags(gpte) & _PAGE_PRESENT)) )
   5.648      {
   5.649          SH_VVLOG("shadow_fault - EXIT: gpte not present (%lx)",gpte );
   5.650          perfc_incrc(shadow_fault_bail_pte_not_present);
   5.651 @@ -2395,7 +2411,7 @@ int shadow_fault(unsigned long va, struc
   5.652      /* Write fault? */
   5.653      if ( regs->error_code & 2 )  
   5.654      {
   5.655 -        if ( unlikely(!(gpte & _PAGE_RW)) )
   5.656 +        if ( unlikely(!(l1e_get_flags(gpte) & _PAGE_RW)) )
   5.657          {
   5.658              /* Write fault on a read-only mapping. */
   5.659              SH_VVLOG("shadow_fault - EXIT: wr fault on RO page (%lx)", gpte);
   5.660 @@ -2427,8 +2443,8 @@ int shadow_fault(unsigned long va, struc
   5.661       */
   5.662  
   5.663      /* XXX Watch out for read-only L2 entries! (not used in Linux). */
   5.664 -    if ( unlikely(__put_user(gpte, (unsigned long *)
   5.665 -                             &linear_pg_table[l1_linear_offset(va)])) )
   5.666 +    if ( unlikely(__copy_to_user(&linear_pg_table[l1_linear_offset(va)],
   5.667 +                                 &gpte, sizeof(gpte))) )
   5.668      {
   5.669          printk("shadow_fault() failed, crashing domain %d "
   5.670                 "due to a read-only L2 page table (gpde=%p), va=%p\n",
   5.671 @@ -2437,8 +2453,9 @@ int shadow_fault(unsigned long va, struc
   5.672      }
   5.673  
   5.674      // if necessary, record the page table page as dirty
   5.675 -    if ( unlikely(shadow_mode_log_dirty(d)) && (orig_gpte != gpte) )
   5.676 -        mark_dirty(d, __gpfn_to_mfn(d, gpde >> PAGE_SHIFT));
   5.677 +    if ( unlikely(shadow_mode_log_dirty(d)) &&
   5.678 +         l1e_has_changed(&orig_gpte, &gpte, PAGE_FLAG_MASK))
   5.679 +        mark_dirty(d, __gpfn_to_mfn(d, l2e_get_pfn(gpde)));
   5.680  
   5.681      shadow_set_l1e(va, spte, 1);
   5.682  
   5.683 @@ -2560,16 +2577,16 @@ void __update_pagetables(struct exec_dom
   5.684          if ( !get_shadow_ref(hl2mfn) )
   5.685              BUG();
   5.686          mpl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
   5.687 -            mk_l2_pgentry((hl2mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   5.688 -        if ( l2_pgentry_val(old_hl2e) & _PAGE_PRESENT )
   5.689 -            put_shadow_ref(l2_pgentry_val(old_hl2e) >> PAGE_SHIFT);
   5.690 +            l2e_create_pfn(hl2mfn, __PAGE_HYPERVISOR);
   5.691 +        if ( l2e_get_flags(old_hl2e) & _PAGE_PRESENT )
   5.692 +            put_shadow_ref(l2e_get_pfn(old_hl2e));
   5.693  
   5.694          if ( !get_shadow_ref(smfn) )
   5.695              BUG();
   5.696          mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
   5.697 -            mk_l2_pgentry((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   5.698 -        if ( l2_pgentry_val(old_sl2e) & _PAGE_PRESENT )
   5.699 -            put_shadow_ref(l2_pgentry_val(old_sl2e) >> PAGE_SHIFT);
   5.700 +            l2e_create_pfn(smfn, __PAGE_HYPERVISOR);
   5.701 +        if ( l2e_get_flags(old_sl2e) & _PAGE_PRESENT )
   5.702 +            put_shadow_ref(l2e_get_pfn(old_sl2e));
   5.703  
   5.704          // XXX - maybe this can be optimized somewhat??
   5.705          local_flush_tlb();
   5.706 @@ -2590,10 +2607,9 @@ char * sh_check_name;
   5.707  int shadow_status_noswap;
   5.708  
   5.709  #define v2m(adr) ({                                                      \
   5.710 -    unsigned long _a = (unsigned long)(adr);                             \
   5.711 -    unsigned long _pte = l1_pgentry_val(                                 \
   5.712 -                            shadow_linear_pg_table[_a >> PAGE_SHIFT]);   \
   5.713 -    unsigned long _pa = _pte & PAGE_MASK;                                \
   5.714 +    unsigned long _a  = (unsigned long)(adr);                            \
   5.715 +    l1_pgentry_t _pte = shadow_linear_pg_table[_a >> PAGE_SHIFT];        \
   5.716 +    unsigned long _pa = l1e_get_phys(_pte);                              \
   5.717      _pa | (_a & ~PAGE_MASK);                                             \
   5.718  })
   5.719  
   5.720 @@ -2611,49 +2627,55 @@ int shadow_status_noswap;
   5.721      } while ( 0 )
   5.722  
   5.723  static int check_pte(
   5.724 -    struct domain *d, unsigned long *pgpte, unsigned long *pspte, 
   5.725 +    struct domain *d, l1_pgentry_t *pgpte, l1_pgentry_t *pspte, 
   5.726      int level, int l2_idx, int l1_idx, int oos_ptes)
   5.727  {
   5.728 -    unsigned gpte = *pgpte;
   5.729 -    unsigned spte = *pspte;
   5.730 +    l1_pgentry_t gpte = *pgpte;
   5.731 +    l1_pgentry_t spte = *pspte;
   5.732      unsigned long mask, gpfn, smfn, gmfn;
   5.733      int errors = 0;
   5.734      int page_table_page;
   5.735  
   5.736 -    if ( (spte == 0) || (spte == 0xdeadface) || (spte == 0x00000E00) )
   5.737 +    if ( (l1e_get_value(spte) == 0) ||
   5.738 +         (l1e_get_value(spte) == 0xdeadface) ||
   5.739 +         (l1e_get_value(spte) == 0x00000E00) )
   5.740          return errors;  /* always safe */
   5.741  
   5.742 -    if ( !(spte & _PAGE_PRESENT) )
   5.743 +    if ( !(l1e_get_flags(spte) & _PAGE_PRESENT) )
   5.744          FAIL("Non zero not present spte");
   5.745  
   5.746      if ( level == 2 ) sh_l2_present++;
   5.747      if ( level == 1 ) sh_l1_present++;
   5.748  
   5.749 -    if ( !(gpte & _PAGE_PRESENT) )
   5.750 +    if ( !(l1e_get_flags(gpte) & _PAGE_PRESENT) )
   5.751          FAIL("Guest not present yet shadow is");
   5.752  
   5.753 -    mask = ~(_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW|PAGE_MASK);
   5.754 -
   5.755 -    if ( (spte & mask) != (gpte & mask) )
   5.756 +    mask = ~(_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW);
   5.757 +
   5.758 +    if ( l1e_has_changed(&spte, &gpte, mask) )
   5.759          FAIL("Corrupt?");
   5.760  
   5.761      if ( (level == 1) &&
   5.762 -         (spte & _PAGE_DIRTY ) && !(gpte & _PAGE_DIRTY) && !oos_ptes )
   5.763 +         (l1e_get_flags(spte) & _PAGE_DIRTY ) &&
   5.764 +         !(l1e_get_flags(gpte) & _PAGE_DIRTY) && !oos_ptes )
   5.765          FAIL("Dirty coherence");
   5.766  
   5.767 -    if ( (spte & _PAGE_ACCESSED ) && !(gpte & _PAGE_ACCESSED) && !oos_ptes )
   5.768 +    if ( (l1e_get_flags(spte) & _PAGE_ACCESSED ) &&
   5.769 +         !(l1e_get_flags(gpte) & _PAGE_ACCESSED) && !oos_ptes )
   5.770          FAIL("Accessed coherence");
   5.771  
   5.772 -    smfn = spte >> PAGE_SHIFT;
   5.773 -    gpfn = gpte >> PAGE_SHIFT;
   5.774 +    smfn = l1e_get_pfn(spte);
   5.775 +    gpfn = l1e_get_pfn(gpte);
   5.776      gmfn = __gpfn_to_mfn(d, gpfn);
   5.777  
   5.778      if ( !VALID_MFN(gmfn) )
   5.779 -        FAIL("invalid gpfn=%p gpte=%p\n", __func__, gpfn, gpte);
   5.780 +        FAIL("invalid gpfn=%p gpte=%p\n", __func__, gpfn,
   5.781 +             l1e_get_value(gpte));
   5.782  
   5.783      page_table_page = mfn_is_page_table(gmfn);
   5.784  
   5.785 -    if ( (spte & _PAGE_RW ) && !(gpte & _PAGE_RW) && !oos_ptes )
   5.786 +    if ( (l1e_get_flags(spte) & _PAGE_RW ) &&
   5.787 +         !(l1e_get_flags(gpte) & _PAGE_RW) && !oos_ptes )
   5.788      {
   5.789          printk("gpfn=%p gmfn=%p smfn=%p t=0x%08x page_table_page=%d "
   5.790                 "oos_ptes=%d\n",
   5.791 @@ -2664,8 +2686,9 @@ static int check_pte(
   5.792      }
   5.793  
   5.794      if ( (level == 1) &&
   5.795 -         (spte & _PAGE_RW ) &&
   5.796 -         !((gpte & _PAGE_RW) && (gpte & _PAGE_DIRTY)) &&
   5.797 +         (l1e_get_flags(spte) & _PAGE_RW ) &&
   5.798 +         !((l1e_get_flags(gpte) & _PAGE_RW) &&
   5.799 +           (l1e_get_flags(gpte) & _PAGE_DIRTY)) &&
   5.800           !oos_ptes )
   5.801      {
   5.802          printk("gpfn=%p gmfn=%p smfn=%p t=0x%08x page_table_page=%d "
   5.803 @@ -2704,7 +2727,7 @@ static int check_l1_table(
   5.804      unsigned long gmfn, unsigned long smfn, unsigned l2_idx)
   5.805  {
   5.806      int i;
   5.807 -    unsigned long *gpl1e, *spl1e;
   5.808 +    l1_pgentry_t *gpl1e, *spl1e;
   5.809      int errors = 0, oos_ptes = 0;
   5.810  
   5.811      if ( page_out_of_sync(pfn_to_page(gmfn)) )
   5.812 @@ -2737,6 +2760,7 @@ int check_l2_table(
   5.813  {
   5.814      l2_pgentry_t *gpl2e = (l2_pgentry_t *)map_domain_mem(gmfn << PAGE_SHIFT);
   5.815      l2_pgentry_t *spl2e = (l2_pgentry_t *)map_domain_mem(smfn << PAGE_SHIFT);
   5.816 +    l2_pgentry_t match;
   5.817      int i;
   5.818      int errors = 0;
   5.819      int limit;
   5.820 @@ -2768,25 +2792,26 @@ int check_l2_table(
   5.821          FAILPT("hypervisor linear map inconsistent");
   5.822  #endif
   5.823  
   5.824 +    match = l2e_create_pfn(smfn, __PAGE_HYPERVISOR);
   5.825      if ( !shadow_mode_external(d) &&
   5.826 -         (l2_pgentry_val(spl2e[SH_LINEAR_PT_VIRT_START >> 
   5.827 -                               L2_PAGETABLE_SHIFT]) != 
   5.828 -          ((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR)) )
   5.829 +         l2e_has_changed(&spl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT],
   5.830 +                         &match, PAGE_FLAG_MASK))
   5.831      {
   5.832          FAILPT("hypervisor shadow linear map inconsistent %p %p",
   5.833 -               l2_pgentry_val(spl2e[SH_LINEAR_PT_VIRT_START >>
   5.834 -                                    L2_PAGETABLE_SHIFT]),
   5.835 -               (smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   5.836 +               l2e_get_value(spl2e[SH_LINEAR_PT_VIRT_START >>
   5.837 +                                   L2_PAGETABLE_SHIFT]),
   5.838 +               l2e_get_value(match));
   5.839      }
   5.840  
   5.841 +    match = l2e_create_phys(__pa(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR);
   5.842      if ( !shadow_mode_external(d) &&
   5.843 -         (l2_pgentry_val(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT]) !=
   5.844 -              ((__pa(d->arch.mm_perdomain_pt) | __PAGE_HYPERVISOR))) )
   5.845 +         l2e_has_changed(&spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT],
   5.846 +                         &match, PAGE_FLAG_MASK))
   5.847      {
   5.848          FAILPT("hypervisor per-domain map inconsistent saw %p, expected (va=%p) %p",
   5.849 -               l2_pgentry_val(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT]),
   5.850 +               l2e_get_value(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT]),
   5.851                 d->arch.mm_perdomain_pt,
   5.852 -               (__pa(d->arch.mm_perdomain_pt) | __PAGE_HYPERVISOR));
   5.853 +               l2e_get_value(match));
   5.854      }
   5.855  
   5.856  #ifdef __i386__
   5.857 @@ -2800,7 +2825,10 @@ int check_l2_table(
   5.858  
   5.859      /* Check the whole L2. */
   5.860      for ( i = 0; i < limit; i++ )
   5.861 -        errors += check_pte(d, &l2_pgentry_val(gpl2e[i]), &l2_pgentry_val(spl2e[i]), 2, i, 0, 0);
   5.862 +        errors += check_pte(d,
   5.863 +                            (l1_pgentry_t*)(&gpl2e[i]), /* Hmm, dirty ... */
   5.864 +                            (l1_pgentry_t*)(&spl2e[i]),
   5.865 +                            2, i, 0, 0);
   5.866  
   5.867      unmap_domain_mem(spl2e);
   5.868      unmap_domain_mem(gpl2e);
   5.869 @@ -2864,11 +2892,11 @@ int _check_pagetable(struct exec_domain 
   5.870  
   5.871      for ( i = 0; i < limit; i++ )
   5.872      {
   5.873 -        unsigned long gl1pfn = l2_pgentry_val(gpl2e[i]) >> PAGE_SHIFT;
   5.874 +        unsigned long gl1pfn = l2e_get_pfn(gpl2e[i]);
   5.875          unsigned long gl1mfn = __gpfn_to_mfn(d, gl1pfn);
   5.876 -        unsigned long sl1mfn = l2_pgentry_val(spl2e[i]) >> PAGE_SHIFT;
   5.877 -
   5.878 -        if ( l2_pgentry_val(spl2e[i]) != 0 )
   5.879 +        unsigned long sl1mfn = l2e_get_pfn(spl2e[i]);
   5.880 +
   5.881 +        if ( l2e_get_value(spl2e[i]) != 0 )  /* FIXME: check flags? */
   5.882          {
   5.883              errors += check_l1_table(d, gl1pfn, gl1mfn, sl1mfn, i);
   5.884          }
     6.1 --- a/xen/arch/x86/vmx.c	Mon Apr 18 17:47:08 2005 +0000
     6.2 +++ b/xen/arch/x86/vmx.c	Wed Apr 20 11:50:06 2005 +0000
     6.3 @@ -109,7 +109,8 @@ static int vmx_do_page_fault(unsigned lo
     6.4  {
     6.5      struct exec_domain *ed = current;
     6.6      unsigned long eip;
     6.7 -    unsigned long gpte, gpa;
     6.8 +    l1_pgentry_t gpte;
     6.9 +    unsigned long gpa; /* FIXME: PAE */
    6.10      int result;
    6.11  
    6.12  #if VMX_DEBUG
    6.13 @@ -132,9 +133,9 @@ static int vmx_do_page_fault(unsigned lo
    6.14      }
    6.15  
    6.16      gpte = gva_to_gpte(va);
    6.17 -    if (!(gpte & _PAGE_PRESENT) )
    6.18 +    if (!(l1e_get_flags(gpte) & _PAGE_PRESENT) )
    6.19              return 0;
    6.20 -    gpa = (gpte & PAGE_MASK) + (va & ~PAGE_MASK);
    6.21 +    gpa = l1e_get_phys(gpte) + (va & ~PAGE_MASK);
    6.22  
    6.23      /* Use 1:1 page table to identify MMIO address space */
    6.24      if (mmio_space(gpa))
     7.1 --- a/xen/arch/x86/vmx_platform.c	Mon Apr 18 17:47:08 2005 +0000
     7.2 +++ b/xen/arch/x86/vmx_platform.c	Wed Apr 20 11:50:06 2005 +0000
     7.3 @@ -408,7 +408,7 @@ static int vmx_decode(const unsigned cha
     7.4  
     7.5  static int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, int inst_len)
     7.6  {
     7.7 -    unsigned long gpte;
     7.8 +    l1_pgentry_t gpte;
     7.9      unsigned long mfn;
    7.10      unsigned long ma;
    7.11      unsigned char * inst_start;
    7.12 @@ -419,7 +419,7 @@ static int inst_copy_from_guest(unsigned
    7.13  
    7.14      if ((guest_eip & PAGE_MASK) == ((guest_eip + inst_len) & PAGE_MASK)) {
    7.15          gpte = gva_to_gpte(guest_eip);
    7.16 -        mfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
    7.17 +        mfn = phys_to_machine_mapping(l1e_get_pfn(gpte));
    7.18          ma = (mfn << PAGE_SHIFT) | (guest_eip & (PAGE_SIZE - 1));
    7.19          inst_start = (unsigned char *)map_domain_mem(ma);
    7.20                  
     8.1 --- a/xen/arch/x86/x86_32/domain_page.c	Mon Apr 18 17:47:08 2005 +0000
     8.2 +++ b/xen/arch/x86/x86_32/domain_page.c	Wed Apr 20 11:50:06 2005 +0000
     8.3 @@ -19,7 +19,7 @@
     8.4  #include <asm/flushtlb.h>
     8.5  #include <asm/hardirq.h>
     8.6  
     8.7 -unsigned long *mapcache;
     8.8 +l1_pgentry_t *mapcache;
     8.9  static unsigned int map_idx, epoch, shadow_epoch[NR_CPUS];
    8.10  static spinlock_t map_lock = SPIN_LOCK_UNLOCKED;
    8.11  
    8.12 @@ -28,12 +28,12 @@ static spinlock_t map_lock = SPIN_LOCK_U
    8.13  
    8.14  static void flush_all_ready_maps(void)
    8.15  {
    8.16 -    unsigned long *cache = mapcache;
    8.17 +    l1_pgentry_t *cache = mapcache;
    8.18  
    8.19      /* A bit skanky -- depends on having an aligned PAGE_SIZE set of PTEs. */
    8.20      do {
    8.21 -        if ( (*cache & READY_FOR_TLB_FLUSH) )
    8.22 -            *cache = 0;
    8.23 +        if ( (l1e_get_flags(*cache) & READY_FOR_TLB_FLUSH) )
    8.24 +            *cache = l1e_empty();
    8.25      }
    8.26      while ( ((unsigned long)(++cache) & ~PAGE_MASK) != 0 );
    8.27  }
    8.28 @@ -43,7 +43,7 @@ void *map_domain_mem(unsigned long pa)
    8.29  {
    8.30      unsigned long va;
    8.31      unsigned int idx, cpu = smp_processor_id();
    8.32 -    unsigned long *cache = mapcache;
    8.33 +    l1_pgentry_t *cache = mapcache;
    8.34  #ifndef NDEBUG
    8.35      unsigned int flush_count = 0;
    8.36  #endif
    8.37 @@ -72,9 +72,9 @@ void *map_domain_mem(unsigned long pa)
    8.38              shadow_epoch[cpu] = ++epoch;
    8.39          }
    8.40      }
    8.41 -    while ( cache[idx] != 0 );
    8.42 +    while ( l1e_get_value(cache[idx]) != 0 );
    8.43  
    8.44 -    cache[idx] = (pa & PAGE_MASK) | __PAGE_HYPERVISOR;
    8.45 +    cache[idx] = l1e_create_phys(pa, __PAGE_HYPERVISOR);
    8.46  
    8.47      spin_unlock(&map_lock);
    8.48  
    8.49 @@ -88,5 +88,5 @@ void unmap_domain_mem(void *va)
    8.50      ASSERT((void *)MAPCACHE_VIRT_START <= va);
    8.51      ASSERT(va < (void *)MAPCACHE_VIRT_END);
    8.52      idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
    8.53 -    mapcache[idx] |= READY_FOR_TLB_FLUSH;
    8.54 +    l1e_add_flags(&mapcache[idx], READY_FOR_TLB_FLUSH);
    8.55  }
     9.1 --- a/xen/arch/x86/x86_32/mm.c	Mon Apr 18 17:47:08 2005 +0000
     9.2 +++ b/xen/arch/x86/x86_32/mm.c	Wed Apr 20 11:50:06 2005 +0000
     9.3 @@ -47,9 +47,9 @@ int map_pages(
     9.4          if ( ((s|v|p) & ((1<<L2_PAGETABLE_SHIFT)-1)) == 0 )
     9.5          {
     9.6              /* Super-page mapping. */
     9.7 -            if ( (l2_pgentry_val(*pl2e) & _PAGE_PRESENT) )
     9.8 +            if ( (l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
     9.9                  local_flush_tlb_pge();
    9.10 -            *pl2e = mk_l2_pgentry(p|flags|_PAGE_PSE);
    9.11 +            *pl2e = l2e_create_phys(p, flags|_PAGE_PSE);
    9.12  
    9.13              v += 1 << L2_PAGETABLE_SHIFT;
    9.14              p += 1 << L2_PAGETABLE_SHIFT;
    9.15 @@ -58,16 +58,16 @@ int map_pages(
    9.16          else
    9.17          {
    9.18              /* Normal page mapping. */
    9.19 -            if ( !(l2_pgentry_val(*pl2e) & _PAGE_PRESENT) )
    9.20 +            if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
    9.21              {
    9.22                  newpg = (void *)alloc_xenheap_page();
    9.23                  clear_page(newpg);
    9.24 -                *pl2e = mk_l2_pgentry(__pa(newpg) | (flags & __PTE_MASK));
    9.25 +                *pl2e = l2e_create_phys(__pa(newpg), flags & __PTE_MASK);
    9.26              }
    9.27 -            pl1e = l2_pgentry_to_l1(*pl2e) + l1_table_offset(v);
    9.28 -            if ( (l1_pgentry_val(*pl1e) & _PAGE_PRESENT) )
    9.29 +            pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(v);
    9.30 +            if ( (l1e_get_flags(*pl1e) & _PAGE_PRESENT) )
    9.31                  local_flush_tlb_one(v);
    9.32 -            *pl1e = mk_l1_pgentry(p|flags);
    9.33 +            *pl1e = l1e_create_phys(p, flags);
    9.34  
    9.35              v += 1 << L1_PAGETABLE_SHIFT;
    9.36              p += 1 << L1_PAGETABLE_SHIFT;
    9.37 @@ -90,14 +90,14 @@ void __set_fixmap(
    9.38  void __init paging_init(void)
    9.39  {
    9.40      void *ioremap_pt;
    9.41 -    unsigned long v, l2e;
    9.42 +    unsigned long v;
    9.43      struct pfn_info *pg;
    9.44  
    9.45      /* Allocate and map the machine-to-phys table. */
    9.46      if ( (pg = alloc_domheap_pages(NULL, 10)) == NULL )
    9.47          panic("Not enough memory to bootstrap Xen.\n");
    9.48      idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)] =
    9.49 -        mk_l2_pgentry(page_to_phys(pg) | __PAGE_HYPERVISOR | _PAGE_PSE);
    9.50 +        l2e_create_phys(page_to_phys(pg), __PAGE_HYPERVISOR | _PAGE_PSE);
    9.51      memset((void *)RDWR_MPT_VIRT_START, 0x55, 4UL << 20);
    9.52  
    9.53      /* Xen 4MB mappings can all be GLOBAL. */
    9.54 @@ -105,10 +105,9 @@ void __init paging_init(void)
    9.55      {
    9.56          for ( v = HYPERVISOR_VIRT_START; v; v += (1 << L2_PAGETABLE_SHIFT) )
    9.57          {
    9.58 -             l2e = l2_pgentry_val(idle_pg_table[l2_table_offset(v)]);
    9.59 -             if ( l2e & _PAGE_PSE )
    9.60 -                 l2e |= _PAGE_GLOBAL;
    9.61 -             idle_pg_table[v >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(l2e);
    9.62 +            if (l2e_get_flags(idle_pg_table[l2_table_offset(v)]) & _PAGE_PSE)
    9.63 +                l2e_add_flags(&idle_pg_table[l2_table_offset(v)],
    9.64 +                              _PAGE_GLOBAL);
    9.65          }
    9.66      }
    9.67  
    9.68 @@ -116,33 +115,33 @@ void __init paging_init(void)
    9.69      ioremap_pt = (void *)alloc_xenheap_page();
    9.70      clear_page(ioremap_pt);
    9.71      idle_pg_table[l2_table_offset(IOREMAP_VIRT_START)] =
    9.72 -        mk_l2_pgentry(__pa(ioremap_pt) | __PAGE_HYPERVISOR);
    9.73 +        l2e_create_phys(__pa(ioremap_pt), __PAGE_HYPERVISOR);
    9.74  
    9.75      /* Create read-only mapping of MPT for guest-OS use.
    9.76       * NB. Remove the global bit so that shadow_mode_translate()==true domains
    9.77       *     can reused this address space for their phys-to-machine mapping.
    9.78       */
    9.79      idle_pg_table[l2_table_offset(RO_MPT_VIRT_START)] =
    9.80 -        mk_l2_pgentry(l2_pgentry_val(
    9.81 -                          idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)]) &
    9.82 -                      ~(_PAGE_RW | _PAGE_GLOBAL));
    9.83 +        l2e_create_pfn(l2e_get_pfn(idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)]),
    9.84 +                       l2e_get_flags(idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)])
    9.85 +                       & ~(_PAGE_RW | _PAGE_GLOBAL));
    9.86  
    9.87      /* Set up mapping cache for domain pages. */
    9.88 -    mapcache = (unsigned long *)alloc_xenheap_page();
    9.89 +    mapcache = (l1_pgentry_t *)alloc_xenheap_page();
    9.90      clear_page(mapcache);
    9.91      idle_pg_table[l2_table_offset(MAPCACHE_VIRT_START)] =
    9.92 -        mk_l2_pgentry(__pa(mapcache) | __PAGE_HYPERVISOR);
    9.93 +        l2e_create_phys(__pa(mapcache), __PAGE_HYPERVISOR);
    9.94  
    9.95      /* Set up linear page table mapping. */
    9.96      idle_pg_table[l2_table_offset(LINEAR_PT_VIRT_START)] =
    9.97 -        mk_l2_pgentry(__pa(idle_pg_table) | __PAGE_HYPERVISOR);
    9.98 +        l2e_create_phys(__pa(idle_pg_table), __PAGE_HYPERVISOR);
    9.99  }
   9.100  
   9.101  void __init zap_low_mappings(void)
   9.102  {
   9.103      int i;
   9.104      for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
   9.105 -        idle_pg_table[i] = mk_l2_pgentry(0);
   9.106 +        idle_pg_table[i] = l2e_empty();
   9.107      flush_tlb_all_pge();
   9.108  }
   9.109  
   9.110 @@ -168,7 +167,7 @@ void subarch_init_memory(struct domain *
   9.111      }
   9.112  
   9.113      /* M2P table is mappable read-only by privileged domains. */
   9.114 -    m2p_start_mfn = l2_pgentry_to_pfn(
   9.115 +    m2p_start_mfn = l2e_get_pfn(
   9.116          idle_pg_table[l2_table_offset(RDWR_MPT_VIRT_START)]);
   9.117      for ( i = 0; i < 1024; i++ )
   9.118      {
   9.119 @@ -318,11 +317,11 @@ void *memguard_init(void *heap_start)
   9.120          l1 = (l1_pgentry_t *)heap_start;
   9.121          heap_start = (void *)((unsigned long)heap_start + PAGE_SIZE);
   9.122          for ( j = 0; j < L1_PAGETABLE_ENTRIES; j++ )
   9.123 -            l1[j] = mk_l1_pgentry((i << L2_PAGETABLE_SHIFT) |
   9.124 -                                   (j << L1_PAGETABLE_SHIFT) | 
   9.125 -                                  __PAGE_HYPERVISOR);
   9.126 +            l1[j] = l1e_create_phys((i << L2_PAGETABLE_SHIFT) |
   9.127 +                                    (j << L1_PAGETABLE_SHIFT),
   9.128 +                                    __PAGE_HYPERVISOR);
   9.129          idle_pg_table[i + l2_table_offset(PAGE_OFFSET)] =
   9.130 -            mk_l2_pgentry(virt_to_phys(l1) | __PAGE_HYPERVISOR);
   9.131 +            l2e_create_phys(virt_to_phys(l1), __PAGE_HYPERVISOR);
   9.132      }
   9.133  
   9.134      return heap_start;
   9.135 @@ -344,11 +343,11 @@ static void __memguard_change_range(void
   9.136      while ( _l != 0 )
   9.137      {
   9.138          l2  = &idle_pg_table[l2_table_offset(_p)];
   9.139 -        l1  = l2_pgentry_to_l1(*l2) + l1_table_offset(_p);
   9.140 +        l1  = l2e_to_l1e(*l2) + l1_table_offset(_p);
   9.141          if ( guard )
   9.142 -            *l1 = mk_l1_pgentry(l1_pgentry_val(*l1) & ~_PAGE_PRESENT);
   9.143 +            l1e_remove_flags(l1, _PAGE_PRESENT);
   9.144          else
   9.145 -            *l1 = mk_l1_pgentry(l1_pgentry_val(*l1) | _PAGE_PRESENT);
   9.146 +            l1e_add_flags(l1, _PAGE_PRESENT);
   9.147          _p += PAGE_SIZE;
   9.148          _l -= PAGE_SIZE;
   9.149      }
    10.1 --- a/xen/arch/x86/x86_32/traps.c	Mon Apr 18 17:47:08 2005 +0000
    10.2 +++ b/xen/arch/x86/x86_32/traps.c	Wed Apr 20 11:50:06 2005 +0000
    10.3 @@ -163,7 +163,7 @@ void show_page_walk(unsigned long addr)
    10.4  
    10.5      printk("Pagetable walk from %p:\n", addr);
    10.6      
    10.7 -    page = l2_pgentry_val(idle_pg_table[l2_table_offset(addr)]);
    10.8 +    page = l2e_get_value(idle_pg_table[l2_table_offset(addr)]);
    10.9      printk(" L2 = %p %s\n", page, (page & _PAGE_PSE) ? "(4MB)" : "");
   10.10      if ( !(page & _PAGE_PRESENT) || (page & _PAGE_PSE) )
   10.11          return;
    11.1 --- a/xen/arch/x86/x86_64/mm.c	Mon Apr 18 17:47:08 2005 +0000
    11.2 +++ b/xen/arch/x86/x86_64/mm.c	Wed Apr 20 11:50:06 2005 +0000
    11.3 @@ -69,29 +69,29 @@ int map_pages(
    11.4      while ( s != 0 )
    11.5      {
    11.6          pl4e = &pt[l4_table_offset(v)];
    11.7 -        if ( !(l4_pgentry_val(*pl4e) & _PAGE_PRESENT) )
    11.8 +        if ( !(l4e_get_flags(*pl4e) & _PAGE_PRESENT) )
    11.9          {
   11.10              newpg = safe_page_alloc();
   11.11              clear_page(newpg);
   11.12 -            *pl4e = mk_l4_pgentry(__pa(newpg) | (flags & __PTE_MASK));
   11.13 +            *pl4e = l4e_create_phys(__pa(newpg), flags & __PTE_MASK);
   11.14          }
   11.15  
   11.16 -        pl3e = l4_pgentry_to_l3(*pl4e) + l3_table_offset(v);
   11.17 -        if ( !(l3_pgentry_val(*pl3e) & _PAGE_PRESENT) )
   11.18 +        pl3e = l4e_to_l3e(*pl4e) + l3_table_offset(v);
   11.19 +        if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) )
   11.20          {
   11.21              newpg = safe_page_alloc();
   11.22              clear_page(newpg);
   11.23 -            *pl3e = mk_l3_pgentry(__pa(newpg) | (flags & __PTE_MASK));
   11.24 +            *pl3e = l3e_create_phys(__pa(newpg), flags & __PTE_MASK);
   11.25          }
   11.26  
   11.27 -        pl2e = l3_pgentry_to_l2(*pl3e) + l2_table_offset(v);
   11.28 +        pl2e = l3e_to_l2e(*pl3e) + l2_table_offset(v);
   11.29  
   11.30          if ( ((s|v|p) & ((1<<L2_PAGETABLE_SHIFT)-1)) == 0 )
   11.31          {
   11.32              /* Super-page mapping. */
   11.33 -            if ( (l2_pgentry_val(*pl2e) & _PAGE_PRESENT) )
   11.34 +            if ( (l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
   11.35                  local_flush_tlb_pge();
   11.36 -            *pl2e = mk_l2_pgentry(p|flags|_PAGE_PSE);
   11.37 +            *pl2e = l2e_create_phys(p, flags|_PAGE_PSE);
   11.38  
   11.39              v += 1 << L2_PAGETABLE_SHIFT;
   11.40              p += 1 << L2_PAGETABLE_SHIFT;
   11.41 @@ -100,16 +100,16 @@ int map_pages(
   11.42          else
   11.43          {
   11.44              /* Normal page mapping. */
   11.45 -            if ( !(l2_pgentry_val(*pl2e) & _PAGE_PRESENT) )
   11.46 +            if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
   11.47              {
   11.48                  newpg = safe_page_alloc();
   11.49                  clear_page(newpg);
   11.50 -                *pl2e = mk_l2_pgentry(__pa(newpg) | (flags & __PTE_MASK));
   11.51 +                *pl2e = l2e_create_phys(__pa(newpg), flags & __PTE_MASK);
   11.52              }
   11.53 -            pl1e = l2_pgentry_to_l1(*pl2e) + l1_table_offset(v);
   11.54 -            if ( (l1_pgentry_val(*pl1e) & _PAGE_PRESENT) )
   11.55 +            pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(v);
   11.56 +            if ( (l1e_get_flags(*pl1e) & _PAGE_PRESENT) )
   11.57                  local_flush_tlb_one(v);
   11.58 -            *pl1e = mk_l1_pgentry(p|flags);
   11.59 +            *pl1e = l1e_create_phys(p, flags);
   11.60  
   11.61              v += 1 << L1_PAGETABLE_SHIFT;
   11.62              p += 1 << L1_PAGETABLE_SHIFT;
   11.63 @@ -161,19 +161,18 @@ void __init paging_init(void)
   11.64       * Above we mapped the M2P table as user-accessible and read-writable.
   11.65       * Fix security by denying user access at the top level of the page table.
   11.66       */
   11.67 -    idle_pg_table[l4_table_offset(RDWR_MPT_VIRT_START)] =
   11.68 -        mk_l4_pgentry(l4_pgentry_val(
   11.69 -            idle_pg_table[l4_table_offset(RDWR_MPT_VIRT_START)]) & 
   11.70 -                      ~_PAGE_USER);
   11.71 +    l4e_remove_flags(&idle_pg_table[l4_table_offset(RDWR_MPT_VIRT_START)],
   11.72 +                     _PAGE_USER);
   11.73  
   11.74      /* Create read-only mapping of MPT for guest-OS use. */
   11.75      l3ro = (l3_pgentry_t *)alloc_xenheap_page();
   11.76      clear_page(l3ro);
   11.77      idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)] =
   11.78 -        mk_l4_pgentry((__pa(l3ro) | __PAGE_HYPERVISOR | _PAGE_USER) &
   11.79 -                      ~_PAGE_RW);
   11.80 +        l4e_create_phys(__pa(l3ro),
   11.81 +                        (__PAGE_HYPERVISOR | _PAGE_USER) & ~_PAGE_RW);
   11.82 +
   11.83      /* Copy the L3 mappings from the RDWR_MPT area. */
   11.84 -    l3rw = l4_pgentry_to_l3(
   11.85 +    l3rw = l4e_to_l3e(
   11.86          idle_pg_table[l4_table_offset(RDWR_MPT_VIRT_START)]);
   11.87      l3rw += l3_table_offset(RDWR_MPT_VIRT_START);
   11.88      l3ro += l3_table_offset(RO_MPT_VIRT_START);
   11.89 @@ -182,12 +181,12 @@ void __init paging_init(void)
   11.90  
   11.91      /* Set up linear page table mapping. */
   11.92      idle_pg_table[l4_table_offset(LINEAR_PT_VIRT_START)] =
   11.93 -        mk_l4_pgentry(__pa(idle_pg_table) | __PAGE_HYPERVISOR);
   11.94 +        l4e_create_phys(__pa(idle_pg_table), __PAGE_HYPERVISOR);
   11.95  }
   11.96  
   11.97  void __init zap_low_mappings(void)
   11.98  {
   11.99 -    idle_pg_table[0] = mk_l4_pgentry(0);
  11.100 +    idle_pg_table[0] = l4e_empty();
  11.101      flush_tlb_all_pge();
  11.102  }
  11.103  
  11.104 @@ -217,14 +216,14 @@ void subarch_init_memory(struct domain *
  11.105            v != RDWR_MPT_VIRT_END;
  11.106            v += 1 << L2_PAGETABLE_SHIFT )
  11.107      {
  11.108 -        l3e = l4_pgentry_to_l3(idle_pg_table[l4_table_offset(v)])[
  11.109 +        l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[
  11.110              l3_table_offset(v)];
  11.111 -        if ( !(l3_pgentry_val(l3e) & _PAGE_PRESENT) )
  11.112 +        if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
  11.113              continue;
  11.114 -        l2e = l3_pgentry_to_l2(l3e)[l2_table_offset(v)];
  11.115 -        if ( !(l2_pgentry_val(l2e) & _PAGE_PRESENT) )
  11.116 +        l2e = l3e_to_l2e(l3e)[l2_table_offset(v)];
  11.117 +        if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
  11.118              continue;
  11.119 -        m2p_start_mfn = l2_pgentry_to_pfn(l2e);
  11.120 +        m2p_start_mfn = l2e_get_pfn(l2e);
  11.121  
  11.122          for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
  11.123          {
  11.124 @@ -361,20 +360,20 @@ void *memguard_init(void *heap_start)
  11.125      {
  11.126          ALLOC_PT(l1);
  11.127          for ( j = 0; j < L1_PAGETABLE_ENTRIES; j++ )
  11.128 -            l1[j] = mk_l1_pgentry((i << L2_PAGETABLE_SHIFT) |
  11.129 -                                   (j << L1_PAGETABLE_SHIFT) | 
  11.130 -                                  __PAGE_HYPERVISOR);
  11.131 +            l1[j] = l1e_create_phys((i << L2_PAGETABLE_SHIFT) |
  11.132 +                                    (j << L1_PAGETABLE_SHIFT),
  11.133 +                                    __PAGE_HYPERVISOR);
  11.134          if ( !((unsigned long)l2 & (PAGE_SIZE-1)) )
  11.135          {
  11.136              ALLOC_PT(l2);
  11.137              if ( !((unsigned long)l3 & (PAGE_SIZE-1)) )
  11.138              {
  11.139                  ALLOC_PT(l3);
  11.140 -                *l4++ = mk_l4_pgentry(virt_to_phys(l3) | __PAGE_HYPERVISOR);
  11.141 +                *l4++ = l4e_create_phys(virt_to_phys(l3), __PAGE_HYPERVISOR);
  11.142              }
  11.143 -            *l3++ = mk_l3_pgentry(virt_to_phys(l2) | __PAGE_HYPERVISOR);
  11.144 +            *l3++ = l3e_create_phys(virt_to_phys(l2), __PAGE_HYPERVISOR);
  11.145          }
  11.146 -        *l2++ = mk_l2_pgentry(virt_to_phys(l1) | __PAGE_HYPERVISOR);
  11.147 +        *l2++ = l2e_create_phys(virt_to_phys(l1), __PAGE_HYPERVISOR);
  11.148      }
  11.149  
  11.150      return heap_start;
  11.151 @@ -398,13 +397,13 @@ static void __memguard_change_range(void
  11.152      while ( _l != 0 )
  11.153      {
  11.154          l4 = &idle_pg_table[l4_table_offset(_p)];
  11.155 -        l3 = l4_pgentry_to_l3(*l4) + l3_table_offset(_p);
  11.156 -        l2 = l3_pgentry_to_l2(*l3) + l2_table_offset(_p);
  11.157 -        l1 = l2_pgentry_to_l1(*l2) + l1_table_offset(_p);
  11.158 +        l3 = l4e_to_l3e(*l4) + l3_table_offset(_p);
  11.159 +        l2 = l3e_to_l2e(*l3) + l2_table_offset(_p);
  11.160 +        l1 = l2e_to_l1e(*l2) + l1_table_offset(_p);
  11.161          if ( guard )
  11.162 -            *l1 = mk_l1_pgentry(l1_pgentry_val(*l1) & ~_PAGE_PRESENT);
  11.163 +            l1e_remove_flags(l1, _PAGE_PRESENT);
  11.164          else
  11.165 -            *l1 = mk_l1_pgentry(l1_pgentry_val(*l1) | _PAGE_PRESENT);
  11.166 +            l1e_add_flags(l1, _PAGE_PRESENT);
  11.167          _p += PAGE_SIZE;
  11.168          _l -= PAGE_SIZE;
  11.169      }
    12.1 --- a/xen/common/grant_table.c	Mon Apr 18 17:47:08 2005 +0000
    12.2 +++ b/xen/common/grant_table.c	Wed Apr 20 11:50:06 2005 +0000
    12.3 @@ -253,12 +253,12 @@ static int
    12.4      {
    12.5          /* Write update into the pagetable
    12.6           */
    12.7 +        l1_pgentry_t pte;
    12.8  
    12.9 -        rc = update_grant_va_mapping( host_virt_addr,
   12.10 -                                (frame << PAGE_SHIFT) | _PAGE_PRESENT  |
   12.11 -                                                        _PAGE_ACCESSED |
   12.12 -                                                        _PAGE_DIRTY    |
   12.13 -                       ((dev_hst_ro_flags & GNTMAP_readonly) ? 0 : _PAGE_RW),
   12.14 +        pte = l1e_create_pfn(frame, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
   12.15 +        if ( !(dev_hst_ro_flags & GNTMAP_readonly) )
   12.16 +            l1e_add_flags(&pte,_PAGE_RW);
   12.17 +        rc = update_grant_va_mapping( host_virt_addr, pte, 
   12.18                         mapping_d, mapping_ed );
   12.19  
   12.20          /* IMPORTANT: (rc == 0) => must flush / invalidate entry in TLB.
    13.1 --- a/xen/include/asm-x86/mm.h	Mon Apr 18 17:47:08 2005 +0000
    13.2 +++ b/xen/include/asm-x86/mm.h	Wed Apr 20 11:50:06 2005 +0000
    13.3 @@ -263,13 +263,14 @@ static inline unsigned long phys_to_mach
    13.4      unsigned long mfn;
    13.5      l1_pgentry_t pte;
    13.6  
    13.7 -   if ( !__get_user(l1_pgentry_val(pte), (__phys_to_machine_mapping + pfn)) &&
    13.8 -        (l1_pgentry_val(pte) & _PAGE_PRESENT) )
    13.9 -       mfn = l1_pgentry_to_phys(pte) >> PAGE_SHIFT;
   13.10 -   else
   13.11 -       mfn = INVALID_MFN;
   13.12 -
   13.13 -   return mfn; 
   13.14 +    if (!__copy_from_user(&pte, (__phys_to_machine_mapping + pfn),
   13.15 +			  sizeof(pte))
   13.16 +	&& (l1e_get_flags(pte) & _PAGE_PRESENT) )
   13.17 +	mfn = l1e_get_pfn(pte);
   13.18 +    else
   13.19 +	mfn = INVALID_MFN;
   13.20 +    
   13.21 +    return mfn; 
   13.22  }
   13.23  #define set_machinetophys(_mfn, _pfn) machine_to_phys_mapping[(_mfn)] = (_pfn)
   13.24  
   13.25 @@ -352,7 +353,7 @@ void propagate_page_fault(unsigned long 
   13.26   * hold a reference to the page.
   13.27   */
   13.28  int update_grant_va_mapping(unsigned long va,
   13.29 -                            unsigned long val,
   13.30 +                            l1_pgentry_t _nl1e, 
   13.31                              struct domain *d,
   13.32                              struct exec_domain *ed);
   13.33  #endif /* __ASM_X86_MM_H__ */
    14.1 --- a/xen/include/asm-x86/page.h	Mon Apr 18 17:47:08 2005 +0000
    14.2 +++ b/xen/include/asm-x86/page.h	Wed Apr 20 11:50:06 2005 +0000
    14.3 @@ -2,6 +2,13 @@
    14.4  #ifndef __X86_PAGE_H__
    14.5  #define __X86_PAGE_H__
    14.6  
    14.7 +#ifndef __ASSEMBLY__
    14.8 +#define PAGE_SIZE           (1UL << PAGE_SHIFT)
    14.9 +#else
   14.10 +#define PAGE_SIZE           (1 << PAGE_SHIFT)
   14.11 +#endif
   14.12 +#define PAGE_MASK           (~(PAGE_SIZE-1))
   14.13 +
   14.14  #if defined(__i386__)
   14.15  #include <asm/x86_32/page.h>
   14.16  #elif defined(__x86_64__)
   14.17 @@ -19,13 +26,6 @@ typedef struct { unsigned long pt_lo; } 
   14.18  #define mk_pagetable(_x)    ( (pagetable_t) { (_x) } )
   14.19  #endif
   14.20  
   14.21 -#ifndef __ASSEMBLY__
   14.22 -#define PAGE_SIZE           (1UL << PAGE_SHIFT)
   14.23 -#else
   14.24 -#define PAGE_SIZE           (1 << PAGE_SHIFT)
   14.25 -#endif
   14.26 -#define PAGE_MASK           (~(PAGE_SIZE-1))
   14.27 -
   14.28  #define clear_page(_p)      memset((void *)(_p), 0, PAGE_SIZE)
   14.29  #define copy_page(_t,_f)    memcpy((void *)(_t), (void *)(_f), PAGE_SIZE)
   14.30  
   14.31 @@ -70,7 +70,7 @@ typedef struct { unsigned long pt_lo; } 
   14.32  #define linear_l4_table(_ed) ((_ed)->arch.guest_vl4table)
   14.33  
   14.34  #define va_to_l1mfn(_ed, _va) \
   14.35 -    (l2_pgentry_val(linear_l2_table(_ed)[_va>>L2_PAGETABLE_SHIFT]) >> PAGE_SHIFT)
   14.36 +    (l2e_get_pfn(linear_l2_table(_ed)[_va>>L2_PAGETABLE_SHIFT]))
   14.37  
   14.38  extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES];
   14.39  
    15.1 --- a/xen/include/asm-x86/shadow.h	Mon Apr 18 17:47:08 2005 +0000
    15.2 +++ b/xen/include/asm-x86/shadow.h	Wed Apr 20 11:50:06 2005 +0000
    15.3 @@ -270,18 +270,22 @@ extern int shadow_status_noswap;
    15.4  static inline int
    15.5  shadow_get_page_from_l1e(l1_pgentry_t l1e, struct domain *d)
    15.6  {
    15.7 -    l1_pgentry_t nl1e = mk_l1_pgentry(l1_pgentry_val(l1e) & ~_PAGE_GLOBAL);
    15.8 -    int res = get_page_from_l1e(nl1e, d);
    15.9 +    l1_pgentry_t nl1e;
   15.10 +    int res;
   15.11      unsigned long mfn;
   15.12      struct domain *owner;
   15.13  
   15.14 -    ASSERT( l1_pgentry_val(nl1e) & _PAGE_PRESENT );
   15.15 +    ASSERT(l1e_get_flags(l1e) & _PAGE_PRESENT);
   15.16 +
   15.17 +    nl1e = l1e;
   15.18 +    l1e_remove_flags(&nl1e, _PAGE_GLOBAL);
   15.19 +    res = get_page_from_l1e(nl1e, d);
   15.20  
   15.21      if ( unlikely(!res) && IS_PRIV(d) && !shadow_mode_translate(d) &&
   15.22 -         !(l1_pgentry_val(nl1e) & L1_DISALLOW_MASK) &&
   15.23 -         (mfn = l1_pgentry_to_pfn(nl1e)) &&
   15.24 +         !(l1e_get_flags(l1e) & L1_DISALLOW_MASK) &&
   15.25 +         (mfn = l1e_get_pfn(l1e)) &&
   15.26           pfn_valid(mfn) &&
   15.27 -         (owner = page_get_owner(pfn_to_page(l1_pgentry_to_pfn(nl1e)))) &&
   15.28 +         (owner = page_get_owner(pfn_to_page(l1e_get_pfn(l1e)))) &&
   15.29           (d != owner) )
   15.30      {
   15.31          res = get_page_from_l1e(nl1e, owner);
   15.32 @@ -293,7 +297,7 @@ shadow_get_page_from_l1e(l1_pgentry_t l1
   15.33      if ( unlikely(!res) )
   15.34      {
   15.35          perfc_incrc(shadow_get_page_fail);
   15.36 -        FSH_LOG("%s failed to get ref l1e=%p", __func__, l1_pgentry_val(l1e));
   15.37 +        FSH_LOG("%s failed to get ref l1e=%p\n", __func__, l1e_get_value(l1e));
   15.38      }
   15.39  
   15.40      return res;
   15.41 @@ -303,34 +307,34 @@ shadow_get_page_from_l1e(l1_pgentry_t l1
   15.42  
   15.43  static inline void
   15.44  __shadow_get_l2e(
   15.45 -    struct exec_domain *ed, unsigned long va, unsigned long *psl2e)
   15.46 +    struct exec_domain *ed, unsigned long va, l2_pgentry_t *psl2e)
   15.47  {
   15.48      ASSERT(shadow_mode_enabled(ed->domain));
   15.49  
   15.50 -    *psl2e = l2_pgentry_val( ed->arch.shadow_vtable[l2_table_offset(va)]);
   15.51 +    *psl2e = ed->arch.shadow_vtable[l2_table_offset(va)];
   15.52  }
   15.53  
   15.54  static inline void
   15.55  __shadow_set_l2e(
   15.56 -    struct exec_domain *ed, unsigned long va, unsigned long value)
   15.57 +    struct exec_domain *ed, unsigned long va, l2_pgentry_t value)
   15.58  {
   15.59      ASSERT(shadow_mode_enabled(ed->domain));
   15.60  
   15.61 -    ed->arch.shadow_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
   15.62 +    ed->arch.shadow_vtable[l2_table_offset(va)] = value;
   15.63  }
   15.64  
   15.65  static inline void
   15.66  __guest_get_l2e(
   15.67 -    struct exec_domain *ed, unsigned long va, unsigned long *pl2e)
   15.68 +    struct exec_domain *ed, unsigned long va, l2_pgentry_t *pl2e)
   15.69  {
   15.70 -    *pl2e = l2_pgentry_val(ed->arch.guest_vtable[l2_table_offset(va)]);
   15.71 +    *pl2e = ed->arch.guest_vtable[l2_table_offset(va)];
   15.72  }
   15.73  
   15.74  static inline void
   15.75  __guest_set_l2e(
   15.76 -    struct exec_domain *ed, unsigned long va, unsigned long value)
   15.77 +    struct exec_domain *ed, unsigned long va, l2_pgentry_t value)
   15.78  {
   15.79 -    ed->arch.guest_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
   15.80 +    ed->arch.guest_vtable[l2_table_offset(va)] = value;
   15.81  
   15.82      if ( unlikely(shadow_mode_translate(ed->domain)) )
   15.83          update_hl2e(ed, va);
   15.84 @@ -340,36 +344,36 @@ static inline void
   15.85  update_hl2e(struct exec_domain *ed, unsigned long va)
   15.86  {
   15.87      int index = l2_table_offset(va);
   15.88 -    unsigned long gl2e = l2_pgentry_val(ed->arch.guest_vtable[index]);
   15.89      unsigned long mfn;
   15.90 -    unsigned long old_hl2e, new_hl2e;
   15.91 +    l2_pgentry_t gl2e = ed->arch.guest_vtable[index];
   15.92 +    l1_pgentry_t old_hl2e, new_hl2e;
   15.93      int need_flush = 0;
   15.94  
   15.95      ASSERT(shadow_mode_translate(ed->domain));
   15.96  
   15.97 -    old_hl2e = l1_pgentry_val(ed->arch.hl2_vtable[index]);
   15.98 +    old_hl2e = ed->arch.hl2_vtable[index];
   15.99  
  15.100 -    if ( (gl2e & _PAGE_PRESENT) &&
  15.101 -         VALID_MFN(mfn = phys_to_machine_mapping(gl2e >> PAGE_SHIFT)) )
  15.102 -        new_hl2e = (mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR;
  15.103 +    if ( (l2e_get_flags(gl2e) & _PAGE_PRESENT) &&
  15.104 +         VALID_MFN(mfn = phys_to_machine_mapping(l2e_get_pfn(gl2e)) ))
  15.105 +        new_hl2e = l1e_create_pfn(mfn, __PAGE_HYPERVISOR);
  15.106      else
  15.107 -        new_hl2e = 0;
  15.108 +        new_hl2e = l1e_empty();
  15.109  
  15.110      // only do the ref counting if something important changed.
  15.111      //
  15.112 -    if ( (old_hl2e ^ new_hl2e) & (PAGE_MASK | _PAGE_PRESENT) )
  15.113 +    if ( (l1e_has_changed(&old_hl2e, &new_hl2e, _PAGE_PRESENT)) )
  15.114      {
  15.115 -        if ( (new_hl2e & _PAGE_PRESENT) &&
  15.116 -             !get_page(pfn_to_page(new_hl2e >> PAGE_SHIFT), ed->domain) )
  15.117 -            new_hl2e = 0;
  15.118 -        if ( old_hl2e & _PAGE_PRESENT )
  15.119 +        if ( (l1e_get_flags(new_hl2e) & _PAGE_PRESENT) &&
  15.120 +             !get_page(pfn_to_page(l1e_get_pfn(new_hl2e)), ed->domain) )
  15.121 +            new_hl2e = l1e_empty();
  15.122 +        if ( l1e_get_flags(old_hl2e) & _PAGE_PRESENT )
  15.123          {
  15.124 -            put_page(pfn_to_page(old_hl2e >> PAGE_SHIFT));
  15.125 +            put_page(pfn_to_page(l1e_get_pfn(old_hl2e)));
  15.126              need_flush = 1;
  15.127          }
  15.128      }
  15.129  
  15.130 -    ed->arch.hl2_vtable[l2_table_offset(va)] = mk_l1_pgentry(new_hl2e);
  15.131 +    ed->arch.hl2_vtable[l2_table_offset(va)] = new_hl2e;
  15.132  
  15.133      if ( need_flush )
  15.134      {
  15.135 @@ -564,13 +568,13 @@ extern void shadow_mark_va_out_of_sync(
  15.136      unsigned long va);
  15.137  
  15.138  static inline int l1pte_write_fault(
  15.139 -    struct exec_domain *ed, unsigned long *gpte_p, unsigned long *spte_p,
  15.140 +    struct exec_domain *ed, l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p,
  15.141      unsigned long va)
  15.142  {
  15.143      struct domain *d = ed->domain;
  15.144 -    unsigned long gpte = *gpte_p;
  15.145 -    unsigned long spte;
  15.146 -    unsigned long gpfn = gpte >> PAGE_SHIFT;
  15.147 +    l1_pgentry_t gpte = *gpte_p;
  15.148 +    l1_pgentry_t spte;
  15.149 +    unsigned long gpfn = l1e_get_pfn(gpte);
  15.150      unsigned long gmfn = __gpfn_to_mfn(d, gpfn);
  15.151  
  15.152      //printk("l1pte_write_fault gmfn=%p\n", gmfn);
  15.153 @@ -578,15 +582,16 @@ static inline int l1pte_write_fault(
  15.154      if ( unlikely(!VALID_MFN(gmfn)) )
  15.155      {
  15.156          SH_LOG("l1pte_write_fault: invalid gpfn=%p", gpfn);
  15.157 -        *spte_p = 0;
  15.158 +        *spte_p = l1e_empty();
  15.159          return 0;
  15.160      }
  15.161  
  15.162 -    ASSERT(gpte & _PAGE_RW);
  15.163 -    gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
  15.164 -    spte = (gmfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
  15.165 +    ASSERT(l1e_get_flags(gpte) & _PAGE_RW);
  15.166 +    l1e_add_flags(&gpte, _PAGE_DIRTY | _PAGE_ACCESSED);
  15.167 +    spte = l1e_create_pfn(gmfn, l1e_get_flags(gpte));
  15.168  
  15.169 -    SH_VVLOG("l1pte_write_fault: updating spte=0x%p gpte=0x%p", spte, gpte);
  15.170 +    SH_VVLOG("l1pte_write_fault: updating spte=0x%p gpte=0x%p",
  15.171 +             l1e_get_value(spte), l1e_get_value(gpte));
  15.172  
  15.173      if ( shadow_mode_log_dirty(d) )
  15.174          __mark_dirty(d, gmfn);
  15.175 @@ -601,30 +606,31 @@ static inline int l1pte_write_fault(
  15.176  }
  15.177  
  15.178  static inline int l1pte_read_fault(
  15.179 -    struct domain *d, unsigned long *gpte_p, unsigned long *spte_p)
  15.180 +    struct domain *d, l1_pgentry_t *gpte_p, l1_pgentry_t *spte_p)
  15.181  { 
  15.182 -    unsigned long gpte = *gpte_p;
  15.183 -    unsigned long spte = *spte_p;
  15.184 -    unsigned long pfn = gpte >> PAGE_SHIFT;
  15.185 +    l1_pgentry_t gpte = *gpte_p;
  15.186 +    l1_pgentry_t spte = *spte_p;
  15.187 +    unsigned long pfn = l1e_get_pfn(gpte);
  15.188      unsigned long mfn = __gpfn_to_mfn(d, pfn);
  15.189  
  15.190      if ( unlikely(!VALID_MFN(mfn)) )
  15.191      {
  15.192          SH_LOG("l1pte_read_fault: invalid gpfn=%p", pfn);
  15.193 -        *spte_p = 0;
  15.194 +        *spte_p = l1e_empty();
  15.195          return 0;
  15.196      }
  15.197  
  15.198 -    gpte |= _PAGE_ACCESSED;
  15.199 -    spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
  15.200 +    l1e_add_flags(&gpte, _PAGE_ACCESSED);
  15.201 +    spte = l1e_create_pfn(mfn, l1e_get_flags(gpte));
  15.202  
  15.203 -    if ( shadow_mode_log_dirty(d) || !(gpte & _PAGE_DIRTY) ||
  15.204 +    if ( shadow_mode_log_dirty(d) || !(l1e_get_flags(gpte) & _PAGE_DIRTY) ||
  15.205           mfn_is_page_table(mfn) )
  15.206      {
  15.207 -        spte &= ~_PAGE_RW;
  15.208 +        l1e_remove_flags(&spte, _PAGE_RW);
  15.209      }
  15.210  
  15.211 -    SH_VVLOG("l1pte_read_fault: updating spte=0x%p gpte=0x%p", spte, gpte);
  15.212 +    SH_VVLOG("l1pte_read_fault: updating spte=0x%p gpte=0x%p",
  15.213 +             l1e_get_value(spte), l1e_get_value(gpte));
  15.214      *gpte_p = gpte;
  15.215      *spte_p = spte;
  15.216  
  15.217 @@ -632,23 +638,24 @@ static inline int l1pte_read_fault(
  15.218  }
  15.219  
  15.220  static inline void l1pte_propagate_from_guest(
  15.221 -    struct domain *d, unsigned long gpte, unsigned long *spte_p)
  15.222 +    struct domain *d, l1_pgentry_t gpte, l1_pgentry_t *spte_p)
  15.223  { 
  15.224 -    unsigned long mfn, spte;
  15.225 -
  15.226 -    spte = 0;
  15.227 +    unsigned long mfn;
  15.228 +    l1_pgentry_t spte;
  15.229  
  15.230 -    if ( ((gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
  15.231 +    spte = l1e_empty();
  15.232 +
  15.233 +    if ( ((l1e_get_flags(gpte) & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
  15.234            (_PAGE_PRESENT|_PAGE_ACCESSED)) &&
  15.235 -         VALID_MFN(mfn = __gpfn_to_mfn(d, gpte >> PAGE_SHIFT)) )
  15.236 +         VALID_MFN(mfn = __gpfn_to_mfn(d, l1e_get_pfn(gpte))) )
  15.237      {
  15.238 -        spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
  15.239 +        spte = l1e_create_pfn(mfn, l1e_get_flags(gpte));
  15.240          
  15.241          if ( shadow_mode_log_dirty(d) ||
  15.242 -             !(gpte & _PAGE_DIRTY) ||
  15.243 +             !(l1e_get_flags(gpte) & _PAGE_DIRTY) ||
  15.244               mfn_is_page_table(mfn) )
  15.245          {
  15.246 -            spte &= ~_PAGE_RW;
  15.247 +            l1e_remove_flags(&spte, _PAGE_RW);
  15.248          }
  15.249      }
  15.250  
  15.251 @@ -661,14 +668,15 @@ static inline void l1pte_propagate_from_
  15.252  }
  15.253  
  15.254  static inline void hl2e_propagate_from_guest(
  15.255 -    struct domain *d, unsigned long gpde, unsigned long *hl2e_p)
  15.256 +    struct domain *d, l2_pgentry_t gpde, l1_pgentry_t *hl2e_p)
  15.257  {
  15.258 -    unsigned long pfn = gpde >> PAGE_SHIFT;
  15.259 -    unsigned long mfn, hl2e;
  15.260 +    unsigned long pfn = l2e_get_pfn(gpde);
  15.261 +    unsigned long mfn;
  15.262 +    l1_pgentry_t hl2e;
  15.263 +    
  15.264 +    hl2e = l1e_empty();
  15.265  
  15.266 -    hl2e = 0;
  15.267 -
  15.268 -    if ( gpde & _PAGE_PRESENT )
  15.269 +    if ( l2e_get_flags(gpde) & _PAGE_PRESENT )
  15.270      {
  15.271          if ( unlikely((current->domain != d) && !shadow_mode_external(d)) )
  15.272          {
  15.273 @@ -683,30 +691,31 @@ static inline void hl2e_propagate_from_g
  15.274              mfn = __gpfn_to_mfn(d, pfn);
  15.275  
  15.276          if ( VALID_MFN(mfn) && (mfn < max_page) )
  15.277 -            hl2e = (mfn << PAGE_SHIFT) | __PAGE_HYPERVISOR;
  15.278 +            hl2e = l1e_create_pfn(mfn, __PAGE_HYPERVISOR);
  15.279      }
  15.280  
  15.281 -    if ( hl2e || gpde )
  15.282 -        SH_VVLOG("%s: gpde=%p hl2e=%p", __func__, gpde, hl2e);
  15.283 +    if ( l1e_get_value(hl2e) || l2e_get_value(gpde) )
  15.284 +        SH_VVLOG("%s: gpde=%p hl2e=%p", __func__,
  15.285 +                 l2e_get_value(gpde), l1e_get_value(hl2e));
  15.286  
  15.287      *hl2e_p = hl2e;
  15.288  }
  15.289  
  15.290  static inline void l2pde_general(
  15.291      struct domain *d,
  15.292 -    unsigned long *gpde_p,
  15.293 -    unsigned long *spde_p,
  15.294 +    l2_pgentry_t *gpde_p,
  15.295 +    l2_pgentry_t *spde_p,
  15.296      unsigned long sl1mfn)
  15.297  {
  15.298 -    unsigned long gpde = *gpde_p;
  15.299 -    unsigned long spde;
  15.300 +    l2_pgentry_t gpde = *gpde_p;
  15.301 +    l2_pgentry_t spde;
  15.302  
  15.303 -    spde = 0;
  15.304 -    if ( (gpde & _PAGE_PRESENT) && (sl1mfn != 0) )
  15.305 +    spde = l2e_empty();
  15.306 +    if ( (l2e_get_flags(gpde) & _PAGE_PRESENT) && (sl1mfn != 0) )
  15.307      {
  15.308 -        spde = (gpde & ~PAGE_MASK) | (sl1mfn << PAGE_SHIFT) | 
  15.309 -            _PAGE_RW | _PAGE_ACCESSED;
  15.310 -        gpde |= _PAGE_ACCESSED; /* N.B. PDEs do not have a dirty bit. */
  15.311 +        spde = l2e_create_pfn(sl1mfn, 
  15.312 +                              l2e_get_flags(gpde) | _PAGE_RW | _PAGE_ACCESSED);
  15.313 +        l2e_add_flags(&gpde, _PAGE_ACCESSED); /* N.B. PDEs do not have a dirty bit. */
  15.314  
  15.315          // XXX mafetter: Hmm...
  15.316          //     Shouldn't the dirty log be checked/updated here?
  15.317 @@ -715,19 +724,21 @@ static inline void l2pde_general(
  15.318          *gpde_p = gpde;
  15.319      }
  15.320  
  15.321 -    if ( spde || gpde )
  15.322 -        SH_VVLOG("%s: gpde=%p, new spde=%p", __func__, gpde, spde);
  15.323 +    if ( l2e_get_value(spde) || l2e_get_value(gpde) )
  15.324 +        SH_VVLOG("%s: gpde=%p, new spde=%p", __func__,
  15.325 +                 l2e_get_value(gpde), l2e_get_value(spde));
  15.326  
  15.327      *spde_p = spde;
  15.328  }
  15.329  
  15.330  static inline void l2pde_propagate_from_guest(
  15.331 -    struct domain *d, unsigned long *gpde_p, unsigned long *spde_p)
  15.332 +    struct domain *d, l2_pgentry_t *gpde_p, l2_pgentry_t *spde_p)
  15.333  {
  15.334 -    unsigned long gpde = *gpde_p, sl1mfn = 0;
  15.335 +    l2_pgentry_t gpde = *gpde_p;
  15.336 +    unsigned long sl1mfn = 0;
  15.337  
  15.338 -    if ( gpde & _PAGE_PRESENT )
  15.339 -        sl1mfn =  __shadow_status(d, gpde >> PAGE_SHIFT, PGT_l1_shadow);
  15.340 +    if ( l2e_get_flags(gpde) & _PAGE_PRESENT )
  15.341 +        sl1mfn =  __shadow_status(d, l2e_get_pfn(gpde), PGT_l1_shadow);
  15.342      l2pde_general(d, gpde_p, spde_p, sl1mfn);
  15.343  }
  15.344      
  15.345 @@ -738,10 +749,10 @@ static inline void l2pde_propagate_from_
  15.346  static int inline
  15.347  validate_pte_change(
  15.348      struct domain *d,
  15.349 -    unsigned long new_pte,
  15.350 -    unsigned long *shadow_pte_p)
  15.351 +    l1_pgentry_t new_pte,
  15.352 +    l1_pgentry_t *shadow_pte_p)
  15.353  {
  15.354 -    unsigned long old_spte, new_spte;
  15.355 +    l1_pgentry_t old_spte, new_spte;
  15.356  
  15.357      perfc_incrc(validate_pte_calls);
  15.358  
  15.359 @@ -754,16 +765,16 @@ validate_pte_change(
  15.360  
  15.361      // only do the ref counting if something important changed.
  15.362      //
  15.363 -    if ( ((old_spte | new_spte) & _PAGE_PRESENT ) &&
  15.364 -         ((old_spte ^ new_spte) & (PAGE_MASK | _PAGE_RW | _PAGE_PRESENT)) )
  15.365 +    if ( ((l1e_get_value(old_spte) | l1e_get_value(new_spte)) & _PAGE_PRESENT ) &&
  15.366 +         l1e_has_changed(&old_spte, &new_spte, _PAGE_RW | _PAGE_PRESENT) )
  15.367      {
  15.368          perfc_incrc(validate_pte_changes);
  15.369  
  15.370 -        if ( (new_spte & _PAGE_PRESENT) &&
  15.371 -             !shadow_get_page_from_l1e(mk_l1_pgentry(new_spte), d) )
  15.372 -            new_spte = 0;
  15.373 -        if ( old_spte & _PAGE_PRESENT )
  15.374 -            put_page_from_l1e(mk_l1_pgentry(old_spte), d);
  15.375 +        if ( (l1e_get_flags(new_spte) & _PAGE_PRESENT) &&
  15.376 +             !shadow_get_page_from_l1e(new_spte, d) )
  15.377 +            new_spte = l1e_empty();
  15.378 +        if ( l1e_get_flags(old_spte) & _PAGE_PRESENT )
  15.379 +            put_page_from_l1e(old_spte, d);
  15.380      }
  15.381  
  15.382      *shadow_pte_p = new_spte;
  15.383 @@ -777,10 +788,10 @@ validate_pte_change(
  15.384  static int inline
  15.385  validate_hl2e_change(
  15.386      struct domain *d,
  15.387 -    unsigned long new_gpde,
  15.388 -    unsigned long *shadow_hl2e_p)
  15.389 +    l2_pgentry_t new_gpde,
  15.390 +    l1_pgentry_t *shadow_hl2e_p)
  15.391  {
  15.392 -    unsigned long old_hl2e, new_hl2e;
  15.393 +    l1_pgentry_t old_hl2e, new_hl2e;
  15.394  
  15.395      perfc_incrc(validate_hl2e_calls);
  15.396  
  15.397 @@ -789,16 +800,16 @@ validate_hl2e_change(
  15.398  
  15.399      // Only do the ref counting if something important changed.
  15.400      //
  15.401 -    if ( ((old_hl2e | new_hl2e) & _PAGE_PRESENT) &&
  15.402 -         ((old_hl2e ^ new_hl2e) & (PAGE_MASK | _PAGE_PRESENT)) )
  15.403 +    if ( ((l1e_get_flags(old_hl2e) | l1e_get_flags(new_hl2e)) & _PAGE_PRESENT) &&
  15.404 +         l1e_has_changed(&old_hl2e, &new_hl2e, _PAGE_PRESENT) )
  15.405      {
  15.406          perfc_incrc(validate_hl2e_changes);
  15.407  
  15.408 -        if ( (new_hl2e & _PAGE_PRESENT) &&
  15.409 -             !get_page(pfn_to_page(new_hl2e >> PAGE_SHIFT), d) )
  15.410 -            new_hl2e = 0;
  15.411 -        if ( old_hl2e & _PAGE_PRESENT )
  15.412 -            put_page(pfn_to_page(old_hl2e >> PAGE_SHIFT));
  15.413 +        if ( (l1e_get_flags(new_hl2e) & _PAGE_PRESENT) &&
  15.414 +             !get_page(pfn_to_page(l1e_get_pfn(new_hl2e)), d) )
  15.415 +            new_hl2e = l1e_empty();
  15.416 +        if ( l1e_get_flags(old_hl2e) & _PAGE_PRESENT )
  15.417 +            put_page(pfn_to_page(l1e_get_pfn(old_hl2e)));
  15.418      }
  15.419  
  15.420      *shadow_hl2e_p = new_hl2e;
  15.421 @@ -813,10 +824,10 @@ validate_hl2e_change(
  15.422  static int inline
  15.423  validate_pde_change(
  15.424      struct domain *d,
  15.425 -    unsigned long new_gpde,
  15.426 -    unsigned long *shadow_pde_p)
  15.427 +    l2_pgentry_t new_gpde,
  15.428 +    l2_pgentry_t *shadow_pde_p)
  15.429  {
  15.430 -    unsigned long old_spde, new_spde;
  15.431 +    l2_pgentry_t old_spde, new_spde;
  15.432  
  15.433      perfc_incrc(validate_pde_calls);
  15.434  
  15.435 @@ -828,16 +839,16 @@ validate_pde_change(
  15.436  
  15.437      // Only do the ref counting if something important changed.
  15.438      //
  15.439 -    if ( ((old_spde | new_spde) & _PAGE_PRESENT) &&
  15.440 -         ((old_spde ^ new_spde) & (PAGE_MASK | _PAGE_PRESENT)) )
  15.441 +    if ( ((l2e_get_value(old_spde) | l2e_get_value(new_spde)) & _PAGE_PRESENT) &&
  15.442 +         l2e_has_changed(&old_spde, &new_spde, _PAGE_PRESENT) )
  15.443      {
  15.444          perfc_incrc(validate_pde_changes);
  15.445  
  15.446 -        if ( (new_spde & _PAGE_PRESENT) &&
  15.447 -             !get_shadow_ref(new_spde >> PAGE_SHIFT) )
  15.448 +        if ( (l2e_get_flags(new_spde) & _PAGE_PRESENT) &&
  15.449 +             !get_shadow_ref(l2e_get_pfn(new_spde)) )
  15.450              BUG();
  15.451 -        if ( old_spde & _PAGE_PRESENT )
  15.452 -            put_shadow_ref(old_spde >> PAGE_SHIFT);
  15.453 +        if ( l2e_get_flags(old_spde) & _PAGE_PRESENT )
  15.454 +            put_shadow_ref(l2e_get_pfn(old_spde));
  15.455      }
  15.456  
  15.457      *shadow_pde_p = new_spde;
  15.458 @@ -1347,19 +1358,20 @@ shadow_update_min_max(unsigned long smfn
  15.459  extern void shadow_map_l1_into_current_l2(unsigned long va);
  15.460  
  15.461  void static inline
  15.462 -shadow_set_l1e(unsigned long va, unsigned long new_spte, int create_l1_shadow)
  15.463 +shadow_set_l1e(unsigned long va, l1_pgentry_t new_spte, int create_l1_shadow)
  15.464  {
  15.465      struct exec_domain *ed = current;
  15.466      struct domain *d = ed->domain;
  15.467 -    unsigned long sl2e, old_spte;
  15.468 +    l2_pgentry_t sl2e;
  15.469 +    l1_pgentry_t old_spte;
  15.470  
  15.471  #if 0
  15.472      printk("shadow_set_l1e(va=%p, new_spte=%p, create=%d)\n",
  15.473 -           va, new_spte, create_l1_shadow);
  15.474 +           va, l1e_get_value(new_spte), create_l1_shadow);
  15.475  #endif
  15.476  
  15.477      __shadow_get_l2e(ed, va, &sl2e);
  15.478 -    if ( !(sl2e & _PAGE_PRESENT) )
  15.479 +    if ( !(l2e_get_flags(sl2e) & _PAGE_PRESENT) )
  15.480      {
  15.481          /*
  15.482           * Either the L1 is not shadowed, or the shadow isn't linked into
  15.483 @@ -1372,12 +1384,11 @@ shadow_set_l1e(unsigned long va, unsigne
  15.484          }
  15.485          else /* check to see if it exists; if so, link it in */
  15.486          {
  15.487 -            unsigned long gpde =
  15.488 -                l2_pgentry_val(linear_l2_table(ed)[l2_table_offset(va)]);
  15.489 -            unsigned long gl1pfn = gpde >> PAGE_SHIFT;
  15.490 +            l2_pgentry_t gpde = linear_l2_table(ed)[l2_table_offset(va)];
  15.491 +            unsigned long gl1pfn = l2e_get_pfn(gpde);
  15.492              unsigned long sl1mfn = __shadow_status(d, gl1pfn, PGT_l1_shadow);
  15.493  
  15.494 -            ASSERT( gpde & _PAGE_PRESENT );
  15.495 +            ASSERT( l2e_get_flags(gpde) & _PAGE_PRESENT );
  15.496  
  15.497              if ( sl1mfn )
  15.498              {
  15.499 @@ -1397,47 +1408,49 @@ shadow_set_l1e(unsigned long va, unsigne
  15.500          }
  15.501      }
  15.502  
  15.503 -    old_spte = l1_pgentry_val(shadow_linear_pg_table[l1_linear_offset(va)]);
  15.504 +    old_spte = shadow_linear_pg_table[l1_linear_offset(va)];
  15.505  
  15.506      // only do the ref counting if something important changed.
  15.507      //
  15.508 -    if ( (old_spte ^ new_spte) & (PAGE_MASK | _PAGE_RW | _PAGE_PRESENT) )
  15.509 +    if ( l1e_has_changed(&old_spte, &new_spte, _PAGE_RW | _PAGE_PRESENT) )
  15.510      {
  15.511 -        if ( (new_spte & _PAGE_PRESENT) &&
  15.512 -             !shadow_get_page_from_l1e(mk_l1_pgentry(new_spte), d) )
  15.513 -            new_spte = 0;
  15.514 -        if ( old_spte & _PAGE_PRESENT )
  15.515 -            put_page_from_l1e(mk_l1_pgentry(old_spte), d);
  15.516 +        if ( (l1e_get_flags(new_spte) & _PAGE_PRESENT) &&
  15.517 +             !shadow_get_page_from_l1e(new_spte, d) )
  15.518 +            new_spte = l1e_empty();
  15.519 +        if ( l1e_get_flags(old_spte) & _PAGE_PRESENT )
  15.520 +            put_page_from_l1e(old_spte, d);
  15.521      }
  15.522  
  15.523 -    shadow_linear_pg_table[l1_linear_offset(va)] = mk_l1_pgentry(new_spte);
  15.524 +    shadow_linear_pg_table[l1_linear_offset(va)] = new_spte;
  15.525  
  15.526 -    shadow_update_min_max(sl2e >> PAGE_SHIFT, l1_table_offset(va));
  15.527 +    shadow_update_min_max(l2e_get_pfn(sl2e), l1_table_offset(va));
  15.528  }
  15.529  
  15.530  /************************************************************************/
  15.531  
  15.532 -static inline unsigned long gva_to_gpte(unsigned long gva)
  15.533 +static inline l1_pgentry_t gva_to_gpte(unsigned long gva)
  15.534  {
  15.535 -    unsigned long gpde, gpte;
  15.536 +    l2_pgentry_t gpde;
  15.537 +    l1_pgentry_t gpte;
  15.538      struct exec_domain *ed = current;
  15.539  
  15.540      ASSERT( shadow_mode_translate(current->domain) );
  15.541  
  15.542      __guest_get_l2e(ed, gva, &gpde);
  15.543 -    if ( unlikely(!(gpde & _PAGE_PRESENT)) )
  15.544 -        return 0;
  15.545 +    if ( unlikely(!(l2e_get_flags(gpde) & _PAGE_PRESENT)) )
  15.546 +        return l1e_empty();;
  15.547  
  15.548      // This is actually overkill - we only need to make sure the hl2
  15.549      // is in-sync.
  15.550      //
  15.551      shadow_sync_va(ed, gva);
  15.552  
  15.553 -    if ( unlikely(__get_user(gpte, (unsigned long *)
  15.554 -                             &linear_pg_table[gva >> PAGE_SHIFT])) )
  15.555 +    if ( unlikely(__copy_from_user(&gpte,
  15.556 +                                   &linear_pg_table[gva >> PAGE_SHIFT],
  15.557 +                                   sizeof(gpte))) )
  15.558      {
  15.559          FSH_LOG("gva_to_gpte got a fault on gva=%p", gva);
  15.560 -        return 0;
  15.561 +        return l1e_empty();
  15.562      }
  15.563  
  15.564      return gpte;
  15.565 @@ -1445,13 +1458,13 @@ static inline unsigned long gva_to_gpte(
  15.566  
  15.567  static inline unsigned long gva_to_gpa(unsigned long gva)
  15.568  {
  15.569 -    unsigned long gpte;
  15.570 +    l1_pgentry_t gpte;
  15.571  
  15.572      gpte = gva_to_gpte(gva);
  15.573 -    if ( !(gpte & _PAGE_PRESENT) )
  15.574 +    if ( !(l1e_get_flags(gpte) & _PAGE_PRESENT) )
  15.575          return 0;
  15.576  
  15.577 -    return (gpte & PAGE_MASK) + (gva & ~PAGE_MASK); 
  15.578 +    return l1e_get_phys(gpte) + (gva & ~PAGE_MASK); 
  15.579  }
  15.580  
  15.581  /************************************************************************/
    16.1 --- a/xen/include/asm-x86/x86_32/domain_page.h	Mon Apr 18 17:47:08 2005 +0000
    16.2 +++ b/xen/include/asm-x86/x86_32/domain_page.h	Wed Apr 20 11:50:06 2005 +0000
    16.3 @@ -10,7 +10,7 @@
    16.4  #include <xen/config.h>
    16.5  #include <xen/sched.h>
    16.6  
    16.7 -extern unsigned long *mapcache;
    16.8 +extern l1_pgentry_t *mapcache;
    16.9  #define MAPCACHE_ENTRIES        1024
   16.10  
   16.11  /*
    17.1 --- a/xen/include/asm-x86/x86_32/page.h	Mon Apr 18 17:47:08 2005 +0000
    17.2 +++ b/xen/include/asm-x86/x86_32/page.h	Wed Apr 20 11:50:06 2005 +0000
    17.3 @@ -19,36 +19,93 @@
    17.4  #define PADDR_MASK              (~0UL)
    17.5  #define VADDR_MASK              (~0UL)
    17.6  
    17.7 +#define _PAGE_NX                0UL
    17.8 +#define PAGE_FLAG_MASK          0xfff
    17.9 +
   17.10  #ifndef __ASSEMBLY__
   17.11  #include <xen/config.h>
   17.12 -typedef struct { unsigned long l1_lo; } l1_pgentry_t;
   17.13 -typedef struct { unsigned long l2_lo; } l2_pgentry_t;
   17.14 +#include <asm/types.h>
   17.15 +typedef struct { u32 l1_lo; } l1_pgentry_t;
   17.16 +typedef struct { u32 l2_lo; } l2_pgentry_t;
   17.17  typedef l2_pgentry_t root_pgentry_t;
   17.18 +
   17.19 +/* read access (depricated) */
   17.20 +#define l1e_get_value(_x)         ((_x).l1_lo)
   17.21 +#define l2e_get_value(_x)         ((_x).l2_lo)
   17.22 +
   17.23 +/* read access */
   17.24 +#define l1e_get_pfn(_x)           ((_x).l1_lo >> PAGE_SHIFT)
   17.25 +#define l1e_get_phys(_x)          ((_x).l1_lo &  PAGE_MASK)
   17.26 +#define l1e_get_flags(_x)         ((_x).l1_lo &  PAGE_FLAG_MASK)
   17.27 +
   17.28 +#define l2e_get_pfn(_x)           ((_x).l2_lo >> PAGE_SHIFT)
   17.29 +#define l2e_get_phys(_x)          ((_x).l2_lo &  PAGE_MASK)
   17.30 +#define l2e_get_flags(_x)         ((_x).l2_lo &  PAGE_FLAG_MASK)
   17.31 +
   17.32 +/* write access */
   17.33 +static inline l1_pgentry_t l1e_empty(void)
   17.34 +{
   17.35 +    l1_pgentry_t e = { .l1_lo = 0 };
   17.36 +    return e;
   17.37 +}
   17.38 +static inline l1_pgentry_t l1e_create_pfn(u32 pfn, u32 flags)
   17.39 +{
   17.40 +    l1_pgentry_t e = { .l1_lo = (pfn << PAGE_SHIFT) | flags };
   17.41 +    return e;
   17.42 +}
   17.43 +static inline l1_pgentry_t l1e_create_phys(u32 addr, u32 flags)
   17.44 +{
   17.45 +    l1_pgentry_t e = { .l1_lo = (addr & PAGE_MASK) | flags };
   17.46 +    return e;
   17.47 +}
   17.48 +static inline void l1e_add_flags(l1_pgentry_t *e, u32 flags)
   17.49 +{
   17.50 +    e->l1_lo |= flags;
   17.51 +}
   17.52 +static inline void l1e_remove_flags(l1_pgentry_t *e, u32 flags)
   17.53 +{
   17.54 +    e->l1_lo &= ~flags;
   17.55 +}
   17.56 +
   17.57 +static inline l2_pgentry_t l2e_empty(void)
   17.58 +{
   17.59 +    l2_pgentry_t e = { .l2_lo = 0 };
   17.60 +    return e;
   17.61 +}
   17.62 +static inline l2_pgentry_t l2e_create_pfn(u32 pfn, u32 flags)
   17.63 +{
   17.64 +    l2_pgentry_t e = { .l2_lo = (pfn << PAGE_SHIFT) | flags };
   17.65 +    return e;
   17.66 +}
   17.67 +static inline l2_pgentry_t l2e_create_phys(u32 addr, u32 flags)
   17.68 +{
   17.69 +    l2_pgentry_t e = { .l2_lo = (addr & PAGE_MASK) | flags };
   17.70 +    return e;
   17.71 +}
   17.72 +static inline void l2e_add_flags(l2_pgentry_t *e, u32 flags)
   17.73 +{
   17.74 +    e->l2_lo |= flags;
   17.75 +}
   17.76 +static inline void l2e_remove_flags(l2_pgentry_t *e, u32 flags)
   17.77 +{
   17.78 +    e->l2_lo &= ~flags;
   17.79 +}
   17.80 +
   17.81 +/* check entries */
   17.82 +static inline int l1e_has_changed(l1_pgentry_t *e1, l1_pgentry_t *e2, u32 flags)
   17.83 +{
   17.84 +    return ((e1->l1_lo ^ e2->l1_lo) & (PAGE_MASK | flags)) != 0;
   17.85 +}
   17.86 +static inline int l2e_has_changed(l2_pgentry_t *e1, l2_pgentry_t *e2, u32 flags)
   17.87 +{
   17.88 +    return ((e1->l2_lo ^ e2->l2_lo) & (PAGE_MASK | flags)) != 0;
   17.89 +}
   17.90 +
   17.91  #endif /* !__ASSEMBLY__ */
   17.92  
   17.93 -/* Strip type from a table entry. */
   17.94 -#define l1_pgentry_val(_x)   ((_x).l1_lo)
   17.95 -#define l2_pgentry_val(_x)   ((_x).l2_lo)
   17.96 -#define root_pgentry_val(_x) (l2_pgentry_val(_x))
   17.97 -
   17.98 -/* Add type to a table entry. */
   17.99 -#define mk_l1_pgentry(_x)   ( (l1_pgentry_t) { (_x) } )
  17.100 -#define mk_l2_pgentry(_x)   ( (l2_pgentry_t) { (_x) } )
  17.101 -#define mk_root_pgentry(_x) (mk_l2_pgentry(_x))
  17.102 -
  17.103 -/* Turn a typed table entry into a physical address. */
  17.104 -#define l1_pgentry_to_phys(_x)   (l1_pgentry_val(_x) & PAGE_MASK)
  17.105 -#define l2_pgentry_to_phys(_x)   (l2_pgentry_val(_x) & PAGE_MASK)
  17.106 -#define root_pgentry_to_phys(_x) (l2_pgentry_to_phys(_x))
  17.107 -
  17.108 -/* Turn a typed table entry into a page index. */
  17.109 -#define l1_pgentry_to_pfn(_x)   (l1_pgentry_val(_x) >> PAGE_SHIFT) 
  17.110 -#define l2_pgentry_to_pfn(_x)   (l2_pgentry_val(_x) >> PAGE_SHIFT)
  17.111 -#define root_pgentry_to_pfn(_x) (l2_pgentry_to_pfn(_x))
  17.112 -
  17.113  /* Pagetable walking. */
  17.114 -#define l2_pgentry_to_l1(_x) \
  17.115 -  ((l1_pgentry_t *)__va(l2_pgentry_to_phys(_x)))
  17.116 +#define l2e_to_l1e(_x) \
  17.117 +  ((l1_pgentry_t *)__va(l2e_get_phys(_x)))
  17.118  
  17.119  /* Given a virtual address, get an entry offset into a page table. */
  17.120  #define l1_table_offset(_a) \
  17.121 @@ -62,9 +119,12 @@ typedef l2_pgentry_t root_pgentry_t;
  17.122  #define is_guest_l1_slot(_s) (1)
  17.123  #define is_guest_l2_slot(_s) ((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT)
  17.124  
  17.125 -#define PGT_root_page_table PGT_l2_page_table
  17.126 -
  17.127 -#define _PAGE_NX         0UL
  17.128 +#define root_get_pfn              l2e_get_pfn
  17.129 +#define root_get_flags            l2e_get_flags
  17.130 +#define root_get_value            l2e_get_value
  17.131 +#define root_empty                l2e_empty
  17.132 +#define root_create_phys          l2e_create_phys
  17.133 +#define PGT_root_page_table       PGT_l2_page_table
  17.134  
  17.135  #define L1_DISALLOW_MASK (3UL << 7)
  17.136  #define L2_DISALLOW_MASK (7UL << 7)
    18.1 --- a/xen/include/asm-x86/x86_64/page.h	Mon Apr 18 17:47:08 2005 +0000
    18.2 +++ b/xen/include/asm-x86/x86_64/page.h	Wed Apr 20 11:50:06 2005 +0000
    18.3 @@ -24,50 +24,165 @@
    18.4  #define PADDR_MASK              ((1UL << PADDR_BITS)-1)
    18.5  #define VADDR_MASK              ((1UL << VADDR_BITS)-1)
    18.6  
    18.7 +#define _PAGE_NX                (cpu_has_nx ? (1UL<<63) : 0UL)
    18.8 +#define PAGE_FLAG_MASK          0xfff
    18.9 +
   18.10  #ifndef __ASSEMBLY__
   18.11  #include <xen/config.h>
   18.12 -typedef struct { unsigned long l1_lo; } l1_pgentry_t;
   18.13 -typedef struct { unsigned long l2_lo; } l2_pgentry_t;
   18.14 -typedef struct { unsigned long l3_lo; } l3_pgentry_t;
   18.15 -typedef struct { unsigned long l4_lo; } l4_pgentry_t;
   18.16 +#include <asm/types.h>
   18.17 +typedef struct { u64 l1_lo; } l1_pgentry_t;
   18.18 +typedef struct { u64 l2_lo; } l2_pgentry_t;
   18.19 +typedef struct { u64 l3_lo; } l3_pgentry_t;
   18.20 +typedef struct { u64 l4_lo; } l4_pgentry_t;
   18.21  typedef l4_pgentry_t root_pgentry_t;
   18.22 +
   18.23 +/* read access (depricated) */
   18.24 +#define l1e_get_value(_x)         ((_x).l1_lo)
   18.25 +#define l2e_get_value(_x)         ((_x).l2_lo)
   18.26 +#define l3e_get_value(_x)         ((_x).l3_lo)
   18.27 +#define l4e_get_value(_x)         ((_x).l4_lo)
   18.28 +
   18.29 +/* read access */
   18.30 +#define l1e_get_pfn(_x)           (((_x).l1_lo & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT)
   18.31 +#define l1e_get_phys(_x)          (((_x).l1_lo & (PADDR_MASK&PAGE_MASK)))
   18.32 +#define l1e_get_flags(_x)         ((_x).l1_lo  &  PAGE_FLAG_MASK)
   18.33 +
   18.34 +#define l2e_get_pfn(_x)           (((_x).l2_lo & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT)
   18.35 +#define l2e_get_phys(_x)          (((_x).l2_lo & (PADDR_MASK&PAGE_MASK)))
   18.36 +#define l2e_get_flags(_x)         ((_x).l2_lo  &  PAGE_FLAG_MASK)
   18.37 +
   18.38 +#define l3e_get_pfn(_x)           (((_x).l3_lo & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT)
   18.39 +#define l3e_get_phys(_x)          (((_x).l3_lo & (PADDR_MASK&PAGE_MASK)))
   18.40 +#define l3e_get_flags(_x)         ((_x).l3_lo  &  PAGE_FLAG_MASK)
   18.41 +
   18.42 +#define l4e_get_pfn(_x)           (((_x).l4_lo & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT)
   18.43 +#define l4e_get_phys(_x)          (((_x).l4_lo & (PADDR_MASK&PAGE_MASK)))
   18.44 +#define l4e_get_flags(_x)         ((_x).l4_lo  &  PAGE_FLAG_MASK)
   18.45 +
   18.46 +/* write access */
   18.47 +static inline l1_pgentry_t l1e_empty(void)
   18.48 +{
   18.49 +    l1_pgentry_t e = { .l1_lo = 0 };
   18.50 +    return e;
   18.51 +}
   18.52 +static inline l1_pgentry_t l1e_create_pfn(u64 pfn, u64 flags)
   18.53 +{
   18.54 +    l1_pgentry_t e = { .l1_lo = (pfn << PAGE_SHIFT) | flags };
   18.55 +    return e;
   18.56 +}
   18.57 +static inline l1_pgentry_t l1e_create_phys(u64 addr, u64 flags)
   18.58 +{
   18.59 +    l1_pgentry_t e = { .l1_lo = (addr & (PADDR_MASK&PAGE_MASK)) | flags };
   18.60 +    return e;
   18.61 +}
   18.62 +static inline void l1e_add_flags(l1_pgentry_t *e, u64 flags)
   18.63 +{
   18.64 +    e->l1_lo |= flags;
   18.65 +}
   18.66 +static inline void l1e_remove_flags(l1_pgentry_t *e, u64 flags)
   18.67 +{
   18.68 +    e->l1_lo &= ~flags;
   18.69 +}
   18.70 +
   18.71 +static inline l2_pgentry_t l2e_empty(void)
   18.72 +{
   18.73 +    l2_pgentry_t e = { .l2_lo = 0 };
   18.74 +    return e;
   18.75 +}
   18.76 +static inline l2_pgentry_t l2e_create_pfn(u64 pfn, u64 flags)
   18.77 +{
   18.78 +    l2_pgentry_t e = { .l2_lo = (pfn << PAGE_SHIFT) | flags };
   18.79 +    return e;
   18.80 +}
   18.81 +static inline l2_pgentry_t l2e_create_phys(u64 addr, u64 flags)
   18.82 +{
   18.83 +    l2_pgentry_t e = { .l2_lo = (addr & (PADDR_MASK&PAGE_MASK)) | flags };
   18.84 +    return e;
   18.85 +}
   18.86 +static inline void l2e_add_flags(l2_pgentry_t *e, u64 flags)
   18.87 +{
   18.88 +    e->l2_lo |= flags;
   18.89 +}
   18.90 +static inline void l2e_remove_flags(l2_pgentry_t *e, u64 flags)
   18.91 +{
   18.92 +    e->l2_lo &= ~flags;
   18.93 +}
   18.94 +
   18.95 +static inline l3_pgentry_t l3e_empty(void)
   18.96 +{
   18.97 +    l3_pgentry_t e = { .l3_lo = 0 };
   18.98 +    return e;
   18.99 +}
  18.100 +static inline l3_pgentry_t l3e_create_pfn(u64 pfn, u64 flags)
  18.101 +{
  18.102 +    l3_pgentry_t e = { .l3_lo = (pfn << PAGE_SHIFT) | flags };
  18.103 +    return e;
  18.104 +}
  18.105 +static inline l3_pgentry_t l3e_create_phys(u64 addr, u64 flags)
  18.106 +{
  18.107 +    l3_pgentry_t e = { .l3_lo = (addr & (PADDR_MASK&PAGE_MASK)) | flags };
  18.108 +    return e;
  18.109 +}
  18.110 +static inline void l3e_add_flags(l3_pgentry_t *e, u64 flags)
  18.111 +{
  18.112 +    e->l3_lo |= flags;
  18.113 +}
  18.114 +static inline void l3e_remove_flags(l3_pgentry_t *e, u64 flags)
  18.115 +{
  18.116 +    e->l3_lo &= ~flags;
  18.117 +}
  18.118 +
  18.119 +static inline l4_pgentry_t l4e_empty(void)
  18.120 +{
  18.121 +    l4_pgentry_t e = { .l4_lo = 0 };
  18.122 +    return e;
  18.123 +}
  18.124 +static inline l4_pgentry_t l4e_create_pfn(u64 pfn, u64 flags)
  18.125 +{
  18.126 +    l4_pgentry_t e = { .l4_lo = (pfn << PAGE_SHIFT) | flags };
  18.127 +    return e;
  18.128 +}
  18.129 +static inline l4_pgentry_t l4e_create_phys(u64 addr, u64 flags)
  18.130 +{
  18.131 +    l4_pgentry_t e = { .l4_lo = (addr & (PADDR_MASK&PAGE_MASK)) | flags };
  18.132 +    return e;
  18.133 +}
  18.134 +static inline void l4e_add_flags(l4_pgentry_t *e, u64 flags)
  18.135 +{
  18.136 +    e->l4_lo |= flags;
  18.137 +}
  18.138 +static inline void l4e_remove_flags(l4_pgentry_t *e, u64 flags)
  18.139 +{
  18.140 +    e->l4_lo &= ~flags;
  18.141 +}
  18.142 +
  18.143 +/* check entries */
  18.144 +static inline int l1e_has_changed(l1_pgentry_t *e1, l1_pgentry_t *e2, u32 flags)
  18.145 +{
  18.146 +    return ((e1->l1_lo ^ e2->l1_lo) & ((PADDR_MASK&PAGE_MASK) | flags)) != 0;
  18.147 +}
  18.148 +static inline int l2e_has_changed(l2_pgentry_t *e1, l2_pgentry_t *e2, u32 flags)
  18.149 +{
  18.150 +    return ((e1->l2_lo ^ e2->l2_lo) & ((PADDR_MASK&PAGE_MASK) | flags)) != 0;
  18.151 +}
  18.152 +static inline int l3e_has_changed(l3_pgentry_t *e1, l3_pgentry_t *e2, u32 flags)
  18.153 +{
  18.154 +    return ((e1->l3_lo ^ e2->l3_lo) & ((PADDR_MASK&PAGE_MASK) | flags)) != 0;
  18.155 +}
  18.156 +static inline int l4e_has_changed(l4_pgentry_t *e1, l4_pgentry_t *e2, u32 flags)
  18.157 +{
  18.158 +    return ((e1->l4_lo ^ e2->l4_lo) & ((PADDR_MASK&PAGE_MASK) | flags)) != 0;
  18.159 +}
  18.160 +
  18.161  #endif /* !__ASSEMBLY__ */
  18.162  
  18.163 -/* Strip type from a table entry. */
  18.164 -#define l1_pgentry_val(_x)   ((_x).l1_lo)
  18.165 -#define l2_pgentry_val(_x)   ((_x).l2_lo)
  18.166 -#define l3_pgentry_val(_x)   ((_x).l3_lo)
  18.167 -#define l4_pgentry_val(_x)   ((_x).l4_lo)
  18.168 -#define root_pgentry_val(_x) (l4_pgentry_val(_x))
  18.169 -
  18.170 -/* Add type to a table entry. */
  18.171 -#define mk_l1_pgentry(_x)   ( (l1_pgentry_t) { (_x) } )
  18.172 -#define mk_l2_pgentry(_x)   ( (l2_pgentry_t) { (_x) } )
  18.173 -#define mk_l3_pgentry(_x)   ( (l3_pgentry_t) { (_x) } )
  18.174 -#define mk_l4_pgentry(_x)   ( (l4_pgentry_t) { (_x) } )
  18.175 -#define mk_root_pgentry(_x) (mk_l4_pgentry(_x))
  18.176 -
  18.177 -/* Turn a typed table entry into a physical address. */
  18.178 -#define l1_pgentry_to_phys(_x)   (l1_pgentry_val(_x) & (PADDR_MASK&PAGE_MASK))
  18.179 -#define l2_pgentry_to_phys(_x)   (l2_pgentry_val(_x) & (PADDR_MASK&PAGE_MASK))
  18.180 -#define l3_pgentry_to_phys(_x)   (l3_pgentry_val(_x) & (PADDR_MASK&PAGE_MASK))
  18.181 -#define l4_pgentry_to_phys(_x)   (l4_pgentry_val(_x) & (PADDR_MASK&PAGE_MASK))
  18.182 -#define root_pgentry_to_phys(_x) (l4_pgentry_to_phys(_x))
  18.183 -
  18.184 -/* Turn a typed table entry into a page index. */
  18.185 -#define l1_pgentry_to_pfn(_x)   (l1_pgentry_to_phys(_x) >> PAGE_SHIFT) 
  18.186 -#define l2_pgentry_to_pfn(_x)   (l2_pgentry_to_phys(_x) >> PAGE_SHIFT)
  18.187 -#define l3_pgentry_to_pfn(_x)   (l3_pgentry_to_phys(_x) >> PAGE_SHIFT)
  18.188 -#define l4_pgentry_to_pfn(_x)   (l4_pgentry_to_phys(_x) >> PAGE_SHIFT)
  18.189 -#define root_pgentry_to_pfn(_x) (l4_pgentry_to_pfn(_x))
  18.190 -
  18.191  /* Pagetable walking. */
  18.192 -#define l2_pgentry_to_l1(_x) \
  18.193 -  ((l1_pgentry_t *)__va(l2_pgentry_to_phys(_x)))
  18.194 -#define l3_pgentry_to_l2(_x) \
  18.195 -  ((l2_pgentry_t *)__va(l3_pgentry_to_phys(_x)))
  18.196 -#define l4_pgentry_to_l3(_x) \
  18.197 -  ((l3_pgentry_t *)__va(l4_pgentry_to_phys(_x)))
  18.198 +#define l2e_to_l1e(_x) \
  18.199 +  ((l1_pgentry_t *)__va(l2e_get_phys(_x)))
  18.200 +#define l3e_to_l2e(_x) \
  18.201 +  ((l2_pgentry_t *)__va(l3e_get_phys(_x)))
  18.202 +#define l4e_to_l3e(_x) \
  18.203 +  ((l3_pgentry_t *)__va(l4e_get_phys(_x)))
  18.204  
  18.205  /* Given a virtual address, get an entry offset into a page table. */
  18.206  #define l1_table_offset(_a) \
  18.207 @@ -89,10 +204,13 @@ typedef l4_pgentry_t root_pgentry_t;
  18.208      (((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT) || \
  18.209       ((_s) > ROOT_PAGETABLE_LAST_XEN_SLOT))
  18.210  
  18.211 +#define root_get_pfn              l4e_get_pfn
  18.212 +#define root_get_flags            l4e_get_flags
  18.213 +#define root_get_value            l4e_get_value
  18.214 +#define root_empty                l4e_empty
  18.215 +#define root_create_phys          l4e_create_phys
  18.216  #define PGT_root_page_table PGT_l4_page_table
  18.217  
  18.218 -#define _PAGE_NX         (cpu_has_nx ? (1UL<<63) : 0UL)
  18.219 -
  18.220  #define L1_DISALLOW_MASK ((cpu_has_nx?0:(1UL<<63)) | (3UL << 7))
  18.221  #define L2_DISALLOW_MASK ((cpu_has_nx?0:(1UL<<63)) | (7UL << 7))
  18.222  #define L3_DISALLOW_MASK ((cpu_has_nx?0:(1UL<<63)) | (7UL << 7))