ia64/xen-unstable

changeset 3737:b8f9a4e1627b

bitkeeper revision 1.1159.1.550 (4208ff07n_vZmQRm3MGLW7fSFD_y4g)

Small naming cleanup. p.t. 'entries' and 'shift' macros now have same
naming style.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Tue Feb 08 18:03:51 2005 +0000 (2005-02-08)
parents 7406a28a87bc
children d633a3d0f36c
files xen/arch/x86/domain.c xen/arch/x86/mm.c xen/arch/x86/shadow.c xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_64/mm.c xen/include/asm-x86/page.h xen/include/asm-x86/shadow.h xen/include/asm-x86/x86_32/page.h xen/include/asm-x86/x86_64/page.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Tue Feb 08 17:49:09 2005 +0000
     1.2 +++ b/xen/arch/x86/domain.c	Tue Feb 08 18:03:51 2005 +0000
     1.3 @@ -329,7 +329,7 @@ static void monitor_mk_pagetable(struct 
     1.4      phys_table = (l2_pgentry_t *) map_domain_mem(pagetable_val(
     1.5                                          ed->arch.phys_table));
     1.6      memcpy(d->arch.mm_perdomain_pt, phys_table,
     1.7 -           ENTRIES_PER_L1_PAGETABLE * sizeof(l1_pgentry_t));
     1.8 +           L1_PAGETABLE_ENTRIES * sizeof(l1_pgentry_t));
     1.9  
    1.10      unmap_domain_mem(phys_table);
    1.11      unmap_domain_mem(mpl2e);
     2.1 --- a/xen/arch/x86/mm.c	Tue Feb 08 17:49:09 2005 +0000
     2.2 +++ b/xen/arch/x86/mm.c	Tue Feb 08 18:03:51 2005 +0000
     2.3 @@ -560,7 +560,7 @@ static int alloc_l1_table(struct pfn_inf
     2.4  
     2.5      pl1e = map_domain_mem(page_nr << PAGE_SHIFT);
     2.6  
     2.7 -    for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
     2.8 +    for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
     2.9          if ( unlikely(!get_page_from_l1e(pl1e[i], d)) )
    2.10              goto fail;
    2.11  
    2.12 @@ -600,7 +600,7 @@ static void free_l1_table(struct pfn_inf
    2.13  
    2.14      pl1e = map_domain_mem(page_nr << PAGE_SHIFT);
    2.15  
    2.16 -    for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
    2.17 +    for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
    2.18          put_page_from_l1e(pl1e[i], d);
    2.19  
    2.20      unmap_domain_mem(pl1e);
    2.21 @@ -1917,7 +1917,7 @@ void ptwr_flush(const int which)
    2.22       */
    2.23  
    2.24      pl1e = ptwr_info[cpu].ptinfo[which].pl1e;
    2.25 -    for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
    2.26 +    for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
    2.27      {
    2.28          ol1e = ptwr_info[cpu].ptinfo[which].page[i];
    2.29          nl1e = pl1e[i];
    2.30 @@ -1950,7 +1950,7 @@ void ptwr_flush(const int which)
    2.31               * reference counts are correct.
    2.32               */
    2.33              memcpy(&pl1e[i], &ptwr_info[cpu].ptinfo[which].page[i],
    2.34 -                   (ENTRIES_PER_L1_PAGETABLE - i) * sizeof(l1_pgentry_t));
    2.35 +                   (L1_PAGETABLE_ENTRIES - i) * sizeof(l1_pgentry_t));
    2.36              unmap_domain_mem(pl1e);
    2.37              ptwr_info[cpu].ptinfo[which].l1va = 0;
    2.38              UNLOCK_BIGLOCK(d);
    2.39 @@ -2092,7 +2092,7 @@ int ptwr_do_page_fault(unsigned long add
    2.40      ptwr_info[cpu].ptinfo[which].pl1e = map_domain_mem(pfn << PAGE_SHIFT);
    2.41      memcpy(ptwr_info[cpu].ptinfo[which].page,
    2.42             ptwr_info[cpu].ptinfo[which].pl1e,
    2.43 -           ENTRIES_PER_L1_PAGETABLE * sizeof(l1_pgentry_t));
    2.44 +           L1_PAGETABLE_ENTRIES * sizeof(l1_pgentry_t));
    2.45      
    2.46      /* Finally, make the p.t. page writable by the guest OS. */
    2.47      pte |= _PAGE_RW;
    2.48 @@ -2238,7 +2238,7 @@ void audit_domain(struct domain *d)
    2.49              case PGT_l1_page_table:
    2.50              case PGT_l2_page_table:
    2.51                  pt = map_domain_mem(pfn<<PAGE_SHIFT);
    2.52 -                for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
    2.53 +                for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
    2.54                      if ( (pt[i] & _PAGE_PRESENT) &&
    2.55                           ((pt[i] >> PAGE_SHIFT) == xpfn) )
    2.56                          printk("     found dom=%d i=%x pfn=%lx t=%x c=%x\n",
    2.57 @@ -2399,7 +2399,7 @@ void audit_domain(struct domain *d)
    2.58  #endif
    2.59              pt = map_domain_mem( pfn<<PAGE_SHIFT );
    2.60  
    2.61 -            for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
    2.62 +            for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
    2.63              {
    2.64                  if ( pt[i] & _PAGE_PRESENT )
    2.65                  {
    2.66 @@ -2540,7 +2540,7 @@ void audit_domain(struct domain *d)
    2.67  
    2.68              pt = map_domain_mem( pfn<<PAGE_SHIFT );
    2.69  
    2.70 -            for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
    2.71 +            for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
    2.72              {
    2.73                  if ( pt[i] & _PAGE_PRESENT )
    2.74                  {
     3.1 --- a/xen/arch/x86/shadow.c	Tue Feb 08 17:49:09 2005 +0000
     3.2 +++ b/xen/arch/x86/shadow.c	Tue Feb 08 18:03:51 2005 +0000
     3.3 @@ -121,7 +121,7 @@ static inline int clear_shadow_page(
     3.4      case PGT_l2_page_table:
     3.5          p = map_domain_mem((spage - frame_table) << PAGE_SHIFT);
     3.6          if ( shadow_mode(d) == SHM_full_32 )
     3.7 -            memset(p, 0, ENTRIES_PER_L2_PAGETABLE * sizeof(*p));
     3.8 +            memset(p, 0, L2_PAGETABLE_ENTRIES * sizeof(*p));
     3.9          else 
    3.10              memset(p, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE * sizeof(*p));
    3.11          unmap_domain_mem(p);
    3.12 @@ -544,12 +544,12 @@ static void shadow_map_l1_into_current_l
    3.13          __shadow_set_l2e(ed, va, sl2e);
    3.14  
    3.15          gpl1e = (unsigned long *) &(linear_pg_table[
    3.16 -            (va>>L1_PAGETABLE_SHIFT) & ~(ENTRIES_PER_L1_PAGETABLE-1)]);
    3.17 +            (va>>L1_PAGETABLE_SHIFT) & ~(L1_PAGETABLE_ENTRIES-1)]);
    3.18  
    3.19          spl1e = (unsigned long *) &(shadow_linear_pg_table[
    3.20 -            (va>>L1_PAGETABLE_SHIFT) & ~(ENTRIES_PER_L1_PAGETABLE-1)]);
    3.21 +            (va>>L1_PAGETABLE_SHIFT) & ~(L1_PAGETABLE_ENTRIES-1)]);
    3.22  
    3.23 -        for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
    3.24 +        for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
    3.25              l1pte_propagate_from_guest(d, &gpl1e[i], &spl1e[i]);
    3.26      }
    3.27      else
    3.28 @@ -847,7 +847,7 @@ static int check_l1_table(
    3.29      gpl1e = map_domain_mem(g2mfn << PAGE_SHIFT);
    3.30      spl1e = map_domain_mem(s2mfn << PAGE_SHIFT);
    3.31  
    3.32 -    for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
    3.33 +    for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
    3.34          check_pte(d, &gpl1e[i], &spl1e[i], 1, i);
    3.35   
    3.36      unmap_domain_mem(spl1e);
     4.1 --- a/xen/arch/x86/x86_32/mm.c	Tue Feb 08 17:49:09 2005 +0000
     4.2 +++ b/xen/arch/x86/x86_32/mm.c	Tue Feb 08 18:03:51 2005 +0000
     4.3 @@ -30,7 +30,7 @@
     4.4  
     4.5  /* Map physical byte range (@p, @p+@s) at virt address @v in pagetable @pt. */
     4.6  int map_pages(
     4.7 -    pagetable_t *pt,
     4.8 +    root_pgentry_t *pt,
     4.9      unsigned long v,
    4.10      unsigned long p,
    4.11      unsigned long s,
    4.12 @@ -327,7 +327,7 @@ void *memguard_init(void *heap_start)
    4.13      {
    4.14          l1 = (l1_pgentry_t *)heap_start;
    4.15          heap_start = (void *)((unsigned long)heap_start + PAGE_SIZE);
    4.16 -        for ( j = 0; j < ENTRIES_PER_L1_PAGETABLE; j++ )
    4.17 +        for ( j = 0; j < L1_PAGETABLE_ENTRIES; j++ )
    4.18              l1[j] = mk_l1_pgentry((i << L2_PAGETABLE_SHIFT) |
    4.19                                     (j << L1_PAGETABLE_SHIFT) | 
    4.20                                    __PAGE_HYPERVISOR);
     5.1 --- a/xen/arch/x86/x86_64/mm.c	Tue Feb 08 17:49:09 2005 +0000
     5.2 +++ b/xen/arch/x86/x86_64/mm.c	Tue Feb 08 18:03:51 2005 +0000
     5.3 @@ -38,7 +38,7 @@ void *safe_page_alloc(void)
     5.4  
     5.5  /* Map physical byte range (@p, @p+@s) at virt address @v in pagetable @pt. */
     5.6  int map_pages(
     5.7 -    pagetable_t *pt,
     5.8 +    root_pgentry_t *pt,
     5.9      unsigned long v,
    5.10      unsigned long p,
    5.11      unsigned long s,
    5.12 @@ -119,8 +119,8 @@ void __init paging_init(void)
    5.13      unsigned long i, p, max;
    5.14  
    5.15      /* Map all of physical memory. */
    5.16 -    max = ((max_page + ENTRIES_PER_L1_PAGETABLE - 1) & 
    5.17 -           ~(ENTRIES_PER_L1_PAGETABLE - 1)) << PAGE_SHIFT;
    5.18 +    max = ((max_page + L1_PAGETABLE_ENTRIES - 1) & 
    5.19 +           ~(L1_PAGETABLE_ENTRIES - 1)) << PAGE_SHIFT;
    5.20      map_pages(idle_pg_table, PAGE_OFFSET, 0, max, PAGE_HYPERVISOR);
    5.21  
    5.22      /*
    5.23 @@ -201,7 +201,7 @@ void subarch_init_memory(struct domain *
    5.24              continue;
    5.25          m2p_start_mfn = l2_pgentry_to_pfn(l2e);
    5.26  
    5.27 -        for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ )
    5.28 +        for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
    5.29          {
    5.30              frame_table[m2p_start_mfn+i].count_info = PGC_allocated | 1;
    5.31              /* gdt to make sure it's only mapped read-only by non-privileged
    5.32 @@ -303,7 +303,7 @@ void *memguard_init(void *heap_start)
    5.33      for ( i = 0; i < (xenheap_phys_end >> L2_PAGETABLE_SHIFT); i++ )
    5.34      {
    5.35          ALLOC_PT(l1);
    5.36 -        for ( j = 0; j < ENTRIES_PER_L1_PAGETABLE; j++ )
    5.37 +        for ( j = 0; j < L1_PAGETABLE_ENTRIES; j++ )
    5.38              l1[j] = mk_l1_pgentry((i << L2_PAGETABLE_SHIFT) |
    5.39                                     (j << L1_PAGETABLE_SHIFT) | 
    5.40                                    __PAGE_HYPERVISOR);
     6.1 --- a/xen/include/asm-x86/page.h	Tue Feb 08 17:49:09 2005 +0000
     6.2 +++ b/xen/include/asm-x86/page.h	Tue Feb 08 18:03:51 2005 +0000
     6.3 @@ -9,6 +9,13 @@
     6.4  #include <asm/x86_64/page.h>
     6.5  #endif
     6.6  
     6.7 +/* Page-table type. */
     6.8 +#ifndef __ASSEMBLY__
     6.9 +typedef struct { unsigned long pt_lo; } pagetable_t;
    6.10 +#define pagetable_val(_x)  ((_x).pt_lo)
    6.11 +#define mk_pagetable(_x)   ( (pagetable_t) { (_x) } )
    6.12 +#endif
    6.13 +
    6.14  #ifndef __ASSEMBLY__
    6.15  #define PAGE_SIZE	         (1UL << PAGE_SHIFT)
    6.16  #else
    6.17 @@ -38,7 +45,7 @@
    6.18  #define DOMAIN_ENTRIES_PER_L2_PAGETABLE	    \
    6.19    (HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT)
    6.20  #define HYPERVISOR_ENTRIES_PER_L2_PAGETABLE \
    6.21 -  (ENTRIES_PER_L2_PAGETABLE - DOMAIN_ENTRIES_PER_L2_PAGETABLE)
    6.22 +  (L2_PAGETABLE_ENTRIES - DOMAIN_ENTRIES_PER_L2_PAGETABLE)
    6.23  
    6.24  #ifndef __ASSEMBLY__
    6.25  #include <asm/processor.h>
    6.26 @@ -51,7 +58,7 @@
    6.27  
    6.28  #define va_to_l1mfn(_va) (l2_pgentry_val(linear_l2_table[_va>>L2_PAGETABLE_SHIFT]) >> PAGE_SHIFT)
    6.29  
    6.30 -extern pagetable_t idle_pg_table[ENTRIES_PER_PAGETABLE];
    6.31 +extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES];
    6.32  
    6.33  extern void paging_init(void);
    6.34  
    6.35 @@ -126,7 +133,7 @@ extern void zap_low_mappings(void);
    6.36  /* Map physical byte range (@p, @p+@s) at virt address @v in pagetable @pt. */
    6.37  extern int
    6.38  map_pages(
    6.39 -    pagetable_t *pt,
    6.40 +    root_pgentry_t *pt,
    6.41      unsigned long v,
    6.42      unsigned long p,
    6.43      unsigned long s,
     7.1 --- a/xen/include/asm-x86/shadow.h	Tue Feb 08 17:49:09 2005 +0000
     7.2 +++ b/xen/include/asm-x86/shadow.h	Tue Feb 08 18:03:51 2005 +0000
     7.3 @@ -677,7 +677,7 @@ static inline void vmx_update_shadow_sta
     7.4  
     7.5      spl2e = (l2_pgentry_t *)map_domain_mem(smfn << PAGE_SHIFT);
     7.6      gpl2e = (l2_pgentry_t *)map_domain_mem(gpfn << PAGE_SHIFT);
     7.7 -    memset(spl2e, 0, ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
     7.8 +    memset(spl2e, 0, L2_PAGETABLE_ENTRIES * sizeof(l2_pgentry_t));
     7.9  
    7.10      ed->arch.shadow_vtable = spl2e;
    7.11      ed->arch.vpagetable = gpl2e; /* expect the guest did clean this up */
     8.1 --- a/xen/include/asm-x86/x86_32/page.h	Tue Feb 08 17:49:09 2005 +0000
     8.2 +++ b/xen/include/asm-x86/x86_32/page.h	Tue Feb 08 18:03:51 2005 +0000
     8.3 @@ -3,19 +3,27 @@
     8.4  #ifndef __X86_32_PAGE_H__
     8.5  #define __X86_32_PAGE_H__
     8.6  
     8.7 -#define L1_PAGETABLE_SHIFT       12
     8.8 -#define L2_PAGETABLE_SHIFT       22
     8.9 -#define PAGE_SHIFT               L1_PAGETABLE_SHIFT
    8.10 +#define L1_PAGETABLE_SHIFT      12
    8.11 +#define L2_PAGETABLE_SHIFT      22
    8.12 +#define PAGE_SHIFT              L1_PAGETABLE_SHIFT
    8.13 +#define ROOT_PAGETABLE_SHIFT    L2_PAGETABLE_SHIFT
    8.14  
    8.15 -#define ENTRIES_PER_L1_PAGETABLE 1024
    8.16 -#define ENTRIES_PER_L2_PAGETABLE 1024
    8.17 +#define L1_PAGETABLE_ENTRIES    1024
    8.18 +#define L2_PAGETABLE_ENTRIES    1024
    8.19 +#define ROOT_PAGETABLE_ENTRIES  L2_PAGETABLE_ENTRIES
    8.20  
    8.21 -#define __PAGE_OFFSET		(0xFC400000)
    8.22 +#define __PAGE_OFFSET           (0xFC400000)
    8.23 +
    8.24 +#define PADDR_BITS              32
    8.25 +#define VADDR_BITS              32
    8.26 +#define PADDR_MASK              (~0UL)
    8.27 +#define VADDR_MASK              (~0UL)
    8.28  
    8.29  #ifndef __ASSEMBLY__
    8.30  #include <xen/config.h>
    8.31  typedef struct { unsigned long l1_lo; } l1_pgentry_t;
    8.32  typedef struct { unsigned long l2_lo; } l2_pgentry_t;
    8.33 +typedef l2_pgentry_t root_pgentry_t;
    8.34  #endif /* !__ASSEMBLY__ */
    8.35  
    8.36  /* Strip type from a table entry. */
    8.37 @@ -40,17 +48,11 @@ typedef struct { unsigned long l2_lo; } 
    8.38  
    8.39  /* Given a virtual address, get an entry offset into a page table. */
    8.40  #define l1_table_offset(_a) \
    8.41 -  (((_a) >> L1_PAGETABLE_SHIFT) & (ENTRIES_PER_L1_PAGETABLE - 1))
    8.42 +  (((_a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
    8.43  #define l2_table_offset(_a) \
    8.44    ((_a) >> L2_PAGETABLE_SHIFT)
    8.45  
    8.46  /* Given a virtual address, get an entry offset into a linear page table. */
    8.47  #define l1_linear_offset(_a) ((_a) >> PAGE_SHIFT)
    8.48  
    8.49 -/* Root page-table definitions. */
    8.50 -#define pagetable_t l2_pgentry_t
    8.51 -#define pagetable_val(_x)  ((_x).l2_lo)
    8.52 -#define mk_pagetable(_x)   ( (l2_pgentry_t) { (_x) } )
    8.53 -#define ENTRIES_PER_PAGETABLE ENTRIES_PER_L2_PAGETABLE
    8.54 -
    8.55  #endif /* __X86_32_PAGE_H__ */
     9.1 --- a/xen/include/asm-x86/x86_64/page.h	Tue Feb 08 17:49:09 2005 +0000
     9.2 +++ b/xen/include/asm-x86/x86_64/page.h	Tue Feb 08 18:03:51 2005 +0000
     9.3 @@ -3,18 +3,20 @@
     9.4  #ifndef __X86_64_PAGE_H__
     9.5  #define __X86_64_PAGE_H__
     9.6  
     9.7 -#define L1_PAGETABLE_SHIFT       12
     9.8 -#define L2_PAGETABLE_SHIFT       21
     9.9 -#define L3_PAGETABLE_SHIFT       30
    9.10 -#define L4_PAGETABLE_SHIFT       39
    9.11 -#define PAGE_SHIFT               L1_PAGETABLE_SHIFT
    9.12 +#define L1_PAGETABLE_SHIFT      12
    9.13 +#define L2_PAGETABLE_SHIFT      21
    9.14 +#define L3_PAGETABLE_SHIFT      30
    9.15 +#define L4_PAGETABLE_SHIFT      39
    9.16 +#define PAGE_SHIFT              L1_PAGETABLE_SHIFT
    9.17 +#define ROOT_PAGETABLE_SHIFT    L4_PAGETABLE_SHIFT
    9.18  
    9.19 -#define ENTRIES_PER_L1_PAGETABLE 512
    9.20 -#define ENTRIES_PER_L2_PAGETABLE 512
    9.21 -#define ENTRIES_PER_L3_PAGETABLE 512
    9.22 -#define ENTRIES_PER_L4_PAGETABLE 512
    9.23 +#define L1_PAGETABLE_ENTRIES    512
    9.24 +#define L2_PAGETABLE_ENTRIES    512
    9.25 +#define L3_PAGETABLE_ENTRIES    512
    9.26 +#define L4_PAGETABLE_ENTRIES    512
    9.27 +#define ROOT_PAGETABLE_ENTRIES  L4_PAGETABLE_ENTRIES
    9.28  
    9.29 -#define __PAGE_OFFSET		(0xFFFF830000000000)
    9.30 +#define __PAGE_OFFSET           (0xFFFF830000000000)
    9.31  
    9.32  /* These may increase in future (phys. bits in particular). */
    9.33  #define PADDR_BITS              40
    9.34 @@ -28,6 +30,7 @@ typedef struct { unsigned long l1_lo; } 
    9.35  typedef struct { unsigned long l2_lo; } l2_pgentry_t;
    9.36  typedef struct { unsigned long l3_lo; } l3_pgentry_t;
    9.37  typedef struct { unsigned long l4_lo; } l4_pgentry_t;
    9.38 +typedef l4_pgentry_t root_pgentry_t;
    9.39  #endif /* !__ASSEMBLY__ */
    9.40  
    9.41  /* Strip type from a table entry. */
    9.42 @@ -64,21 +67,15 @@ typedef struct { unsigned long l4_lo; } 
    9.43  
    9.44  /* Given a virtual address, get an entry offset into a page table. */
    9.45  #define l1_table_offset(_a) \
    9.46 -  (((_a) >> L1_PAGETABLE_SHIFT) & (ENTRIES_PER_L1_PAGETABLE - 1))
    9.47 +  (((_a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
    9.48  #define l2_table_offset(_a) \
    9.49 -  (((_a) >> L2_PAGETABLE_SHIFT) & (ENTRIES_PER_L2_PAGETABLE - 1))
    9.50 +  (((_a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1))
    9.51  #define l3_table_offset(_a) \
    9.52 -  (((_a) >> L3_PAGETABLE_SHIFT) & (ENTRIES_PER_L3_PAGETABLE - 1))
    9.53 +  (((_a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1))
    9.54  #define l4_table_offset(_a) \
    9.55 -  (((_a) >> L4_PAGETABLE_SHIFT) & (ENTRIES_PER_L4_PAGETABLE - 1))
    9.56 +  (((_a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
    9.57  
    9.58  /* Given a virtual address, get an entry offset into a linear page table. */
    9.59  #define l1_linear_offset(_a) (((_a) & VADDR_MASK) >> PAGE_SHIFT)
    9.60  
    9.61 -/* Root page-table definitions. */
    9.62 -#define pagetable_t l4_pgentry_t
    9.63 -#define pagetable_val(_x)  ((_x).l4_lo)
    9.64 -#define mk_pagetable(_x)   ( (l4_pgentry_t) { (_x) } )
    9.65 -#define ENTRIES_PER_PAGETABLE ENTRIES_PER_L4_PAGETABLE
    9.66 -
    9.67  #endif /* __X86_64_PAGE_H__ */