ia64/xen-unstable

changeset 10289:9f937ecc4f54

Fix pagetable accessor macros in Xen to have better names and to
preserve top bits of PAE pgdirs situated above 4GB.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Jun 01 22:21:39 2006 +0100 (2006-06-01)
parents 808430428622
children bd16e299db3d
files xen/arch/x86/audit.c xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/mm.c xen/arch/x86/shadow.c xen/arch/x86/shadow32.c xen/arch/x86/shadow_public.c xen/arch/x86/smpboot.c xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_64/mm.c xen/arch/x86/x86_64/traps.c xen/include/asm-x86/page.h
line diff
     1.1 --- a/xen/arch/x86/audit.c	Thu Jun 01 21:49:25 2006 +0100
     1.2 +++ b/xen/arch/x86/audit.c	Thu Jun 01 22:21:39 2006 +0100
     1.3 @@ -432,10 +432,10 @@ int audit_adjust_pgtables(struct domain 
     1.4  
     1.5          for_each_vcpu(d, v)
     1.6          {
     1.7 -            if ( pagetable_get_paddr(v->arch.guest_table) )
     1.8 +            if ( !pagetable_is_null(v->arch.guest_table) )
     1.9                  adjust(mfn_to_page(pagetable_get_pfn(v->arch.guest_table)),
    1.10                         !shadow_mode_refcounts(d));
    1.11 -            if ( pagetable_get_paddr(v->arch.shadow_table) )
    1.12 +            if ( !pagetable_is_null(v->arch.shadow_table) )
    1.13                  adjust(mfn_to_page(pagetable_get_pfn(v->arch.shadow_table)),
    1.14                         0);
    1.15              if ( v->arch.monitor_shadow_ref )
     2.1 --- a/xen/arch/x86/domain.c	Thu Jun 01 21:49:25 2006 +0100
     2.2 +++ b/xen/arch/x86/domain.c	Thu Jun 01 22:21:39 2006 +0100
     2.3 @@ -327,7 +327,7 @@ int arch_set_info_guest(
     2.4              (gmfn_to_mfn(d, phys_basetab >> PAGE_SHIFT) << PAGE_SHIFT) |
     2.5              (phys_basetab & ~PAGE_MASK);
     2.6  
     2.7 -        v->arch.guest_table = mk_pagetable(phys_basetab);
     2.8 +        v->arch.guest_table = pagetable_from_paddr(phys_basetab);
     2.9      }
    2.10  
    2.11      if ( (rc = (int)set_gdt(v, c->gdt_frames, c->gdt_ents)) != 0 )
    2.12 @@ -335,7 +335,7 @@ int arch_set_info_guest(
    2.13  
    2.14      if ( c->flags & VGCF_HVM_GUEST )
    2.15      {
    2.16 -        v->arch.guest_table = mk_pagetable(0);
    2.17 +        v->arch.guest_table = pagetable_null();
    2.18  
    2.19          if ( !hvm_initialize_guest_resources(v) )
    2.20              return -EINVAL;
    2.21 @@ -935,7 +935,7 @@ void domain_relinquish_resources(struct 
    2.22                  put_page_type(mfn_to_page(pfn));
    2.23              put_page(mfn_to_page(pfn));
    2.24  
    2.25 -            v->arch.guest_table = mk_pagetable(0);
    2.26 +            v->arch.guest_table = pagetable_null();
    2.27          }
    2.28  
    2.29          if ( (pfn = pagetable_get_pfn(v->arch.guest_table_user)) != 0 )
    2.30 @@ -944,7 +944,7 @@ void domain_relinquish_resources(struct 
    2.31                  put_page_type(mfn_to_page(pfn));
    2.32              put_page(mfn_to_page(pfn));
    2.33  
    2.34 -            v->arch.guest_table_user = mk_pagetable(0);
    2.35 +            v->arch.guest_table_user = pagetable_null();
    2.36          }
    2.37      }
    2.38  
     3.1 --- a/xen/arch/x86/domain_build.c	Thu Jun 01 21:49:25 2006 +0100
     3.2 +++ b/xen/arch/x86/domain_build.c	Thu Jun 01 22:21:39 2006 +0100
     3.3 @@ -443,13 +443,13 @@ int construct_dom0(struct domain *d,
     3.4          l2tab[(LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT)+i] =
     3.5              l2e_from_paddr((u32)l2tab + i*PAGE_SIZE, __PAGE_HYPERVISOR);
     3.6      }
     3.7 -    v->arch.guest_table = mk_pagetable((unsigned long)l3start);
     3.8 +    v->arch.guest_table = pagetable_from_paddr((unsigned long)l3start);
     3.9  #else
    3.10      l2start = l2tab = (l2_pgentry_t *)mpt_alloc; mpt_alloc += PAGE_SIZE;
    3.11      memcpy(l2tab, idle_pg_table, PAGE_SIZE);
    3.12      l2tab[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
    3.13          l2e_from_paddr((unsigned long)l2start, __PAGE_HYPERVISOR);
    3.14 -    v->arch.guest_table = mk_pagetable((unsigned long)l2start);
    3.15 +    v->arch.guest_table = pagetable_from_paddr((unsigned long)l2start);
    3.16  #endif
    3.17  
    3.18      for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
    3.19 @@ -577,7 +577,7 @@ int construct_dom0(struct domain *d,
    3.20          l4e_from_paddr(__pa(l4start), __PAGE_HYPERVISOR);
    3.21      l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
    3.22          l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
    3.23 -    v->arch.guest_table = mk_pagetable(__pa(l4start));
    3.24 +    v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
    3.25  
    3.26      l4tab += l4_table_offset(dsi.v_start);
    3.27      mfn = alloc_spfn;
     4.1 --- a/xen/arch/x86/hvm/svm/svm.c	Thu Jun 01 21:49:25 2006 +0100
     4.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Thu Jun 01 22:21:39 2006 +0100
     4.3 @@ -744,34 +744,34 @@ static void svm_ctxt_switch_to(struct vc
     4.4  
     4.5  void svm_final_setup_guest(struct vcpu *v)
     4.6  {
     4.7 +    struct domain *d = v->domain;
     4.8 +    struct vcpu *vc;
     4.9 +
    4.10      v->arch.schedule_tail    = arch_svm_do_launch;
    4.11      v->arch.ctxt_switch_from = svm_ctxt_switch_from;
    4.12      v->arch.ctxt_switch_to   = svm_ctxt_switch_to;
    4.13  
    4.14 -    if (v == v->domain->vcpu[0]) 
    4.15 -    {
    4.16 -	struct domain *d = v->domain;
    4.17 -	struct vcpu *vc;
    4.18 -
    4.19 -	/* Initialize monitor page table */
    4.20 -	for_each_vcpu(d, vc)
    4.21 -	    vc->arch.monitor_table = mk_pagetable(0);
    4.22 -
    4.23 -        /* 
    4.24 -         * Required to do this once per domain
    4.25 -         * TODO: add a seperate function to do these.
    4.26 -         */
    4.27 -        memset(&d->shared_info->evtchn_mask[0], 0xff, 
    4.28 -               sizeof(d->shared_info->evtchn_mask));       
    4.29 -
    4.30 -        /* 
    4.31 -         * Put the domain in shadow mode even though we're going to be using
    4.32 -         * the shared 1:1 page table initially. It shouldn't hurt 
    4.33 -         */
    4.34 -        shadow_mode_enable(d, 
    4.35 -                SHM_enable|SHM_refcounts|
    4.36 -		SHM_translate|SHM_external|SHM_wr_pt_pte);
    4.37 -    }
    4.38 +    if ( v != d->vcpu[0] )
    4.39 +        return;
    4.40 +
    4.41 +    /* Initialize monitor page table */
    4.42 +    for_each_vcpu( d, vc )
    4.43 +        vc->arch.monitor_table = pagetable_null();
    4.44 +
    4.45 +    /* 
    4.46 +     * Required to do this once per domain
    4.47 +     * TODO: add a seperate function to do these.
    4.48 +     */
    4.49 +    memset(&d->shared_info->evtchn_mask[0], 0xff, 
    4.50 +           sizeof(d->shared_info->evtchn_mask));       
    4.51 +
    4.52 +    /* 
    4.53 +     * Put the domain in shadow mode even though we're going to be using
    4.54 +     * the shared 1:1 page table initially. It shouldn't hurt 
    4.55 +     */
    4.56 +    shadow_mode_enable(d,
    4.57 +                       SHM_enable|SHM_refcounts|
    4.58 +                       SHM_translate|SHM_external|SHM_wr_pt_pte);
    4.59  }
    4.60  
    4.61  
    4.62 @@ -868,7 +868,7 @@ static int svm_do_page_fault(unsigned lo
    4.63      /* Use 1:1 page table to identify MMIO address space */
    4.64      if (mmio_space(gpa))
    4.65      {
    4.66 -	/* No support for APIC */
    4.67 +        /* No support for APIC */
    4.68          if (!hvm_apic_support(v->domain) && gpa >= 0xFEC00000)
    4.69          { 
    4.70              int inst_len;
    4.71 @@ -1568,7 +1568,7 @@ static int svm_set_cr0(unsigned long val
    4.72          }
    4.73  
    4.74          /* Now arch.guest_table points to machine physical. */
    4.75 -        v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT);
    4.76 +        v->arch.guest_table = pagetable_from_pfn(mfn);
    4.77          update_pagetables(v);
    4.78  
    4.79          HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx", 
    4.80 @@ -1588,7 +1588,7 @@ static int svm_set_cr0(unsigned long val
    4.81          if ( v->arch.hvm_svm.cpu_cr3 ) {
    4.82              put_page(mfn_to_page(get_mfn_from_gpfn(
    4.83                        v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT)));
    4.84 -            v->arch.guest_table = mk_pagetable(0);
    4.85 +            v->arch.guest_table = pagetable_null();
    4.86          }
    4.87  
    4.88      /*
    4.89 @@ -1597,7 +1597,7 @@ static int svm_set_cr0(unsigned long val
    4.90       * created.
    4.91       */
    4.92      if ((value & X86_CR0_PE) == 0) {
    4.93 -    	if (value & X86_CR0_PG) {
    4.94 +        if (value & X86_CR0_PG) {
    4.95              svm_inject_exception(v, TRAP_gp_fault, 1, 0);
    4.96              return 0;
    4.97          }
    4.98 @@ -1738,7 +1738,7 @@ static int mov_to_cr(int gpreg, int cr, 
    4.99              }
   4.100  
   4.101              old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
   4.102 -            v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT);
   4.103 +            v->arch.guest_table = pagetable_from_pfn(mfn);
   4.104  
   4.105              if (old_base_mfn)
   4.106                  put_page(mfn_to_page(old_base_mfn));
   4.107 @@ -1795,7 +1795,7 @@ static int mov_to_cr(int gpreg, int cr, 
   4.108                   * Now arch.guest_table points to machine physical.
   4.109                   */
   4.110  
   4.111 -                v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT);
   4.112 +                v->arch.guest_table = pagetable_from_pfn(mfn);
   4.113                  update_pagetables(v);
   4.114  
   4.115                  HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
     5.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Thu Jun 01 21:49:25 2006 +0100
     5.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Thu Jun 01 22:21:39 2006 +0100
     5.3 @@ -66,7 +66,7 @@ void vmx_final_setup_guest(struct vcpu *
     5.4  
     5.5          /* Initialize monitor page table */
     5.6          for_each_vcpu(d, vc)
     5.7 -            vc->arch.monitor_table = mk_pagetable(0);
     5.8 +            vc->arch.monitor_table = pagetable_null();
     5.9  
    5.10          /*
    5.11           * Required to do this once per domain
    5.12 @@ -1223,7 +1223,7 @@ vmx_world_restore(struct vcpu *v, struct
    5.13          if(!get_page(mfn_to_page(mfn), v->domain))
    5.14                  return 0;
    5.15          old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
    5.16 -        v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT);
    5.17 +        v->arch.guest_table = pagetable_from_pfn(mfn);
    5.18          if (old_base_mfn)
    5.19               put_page(mfn_to_page(old_base_mfn));
    5.20          /*
    5.21 @@ -1459,7 +1459,7 @@ static int vmx_set_cr0(unsigned long val
    5.22          /*
    5.23           * Now arch.guest_table points to machine physical.
    5.24           */
    5.25 -        v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT);
    5.26 +        v->arch.guest_table = pagetable_from_pfn(mfn);
    5.27          update_pagetables(v);
    5.28  
    5.29          HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
    5.30 @@ -1477,7 +1477,7 @@ static int vmx_set_cr0(unsigned long val
    5.31          if ( v->arch.hvm_vmx.cpu_cr3 ) {
    5.32              put_page(mfn_to_page(get_mfn_from_gpfn(
    5.33                        v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT)));
    5.34 -            v->arch.guest_table = mk_pagetable(0);
    5.35 +            v->arch.guest_table = pagetable_null();
    5.36          }
    5.37  
    5.38      /*
    5.39 @@ -1635,7 +1635,7 @@ static int mov_to_cr(int gp, int cr, str
    5.40                  domain_crash_synchronous(); /* need to take a clean path */
    5.41              }
    5.42              old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
    5.43 -            v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT);
    5.44 +            v->arch.guest_table = pagetable_from_pfn(mfn);
    5.45              if (old_base_mfn)
    5.46                  put_page(mfn_to_page(old_base_mfn));
    5.47              /*
    5.48 @@ -1690,7 +1690,7 @@ static int mov_to_cr(int gp, int cr, str
    5.49                   * Now arch.guest_table points to machine physical.
    5.50                   */
    5.51  
    5.52 -                v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT);
    5.53 +                v->arch.guest_table = pagetable_from_pfn(mfn);
    5.54                  update_pagetables(v);
    5.55  
    5.56                  HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
     6.1 --- a/xen/arch/x86/mm.c	Thu Jun 01 21:49:25 2006 +0100
     6.2 +++ b/xen/arch/x86/mm.c	Thu Jun 01 22:21:39 2006 +0100
     6.3 @@ -1714,7 +1714,7 @@ int new_guest_cr3(unsigned long mfn)
     6.4          {
     6.5              /* Switch to idle pagetable: this VCPU has no active p.t. now. */
     6.6              old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
     6.7 -            v->arch.guest_table = mk_pagetable(0);
     6.8 +            v->arch.guest_table = pagetable_null();
     6.9              update_pagetables(v);
    6.10              write_cr3(__pa(idle_pg_table));
    6.11              if ( old_base_mfn != 0 )
    6.12 @@ -1736,7 +1736,7 @@ int new_guest_cr3(unsigned long mfn)
    6.13      invalidate_shadow_ldt(v);
    6.14  
    6.15      old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
    6.16 -    v->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
    6.17 +    v->arch.guest_table = pagetable_from_pfn(mfn);
    6.18      update_pagetables(v); /* update shadow_table and monitor_table */
    6.19  
    6.20      write_ptbase(v);
    6.21 @@ -2003,7 +2003,7 @@ int do_mmuext_op(
    6.22              {
    6.23                  unsigned long old_mfn =
    6.24                      pagetable_get_pfn(v->arch.guest_table_user);
    6.25 -                v->arch.guest_table_user = mk_pagetable(mfn << PAGE_SHIFT);
    6.26 +                v->arch.guest_table_user = pagetable_from_pfn(mfn);
    6.27                  if ( old_mfn != 0 )
    6.28                      put_page_and_type(mfn_to_page(old_mfn));
    6.29              }
     7.1 --- a/xen/arch/x86/shadow.c	Thu Jun 01 21:49:25 2006 +0100
     7.2 +++ b/xen/arch/x86/shadow.c	Thu Jun 01 22:21:39 2006 +0100
     7.3 @@ -2472,7 +2472,7 @@ static void shadow_update_pagetables(str
     7.4      if ( !get_shadow_ref(smfn) )
     7.5          BUG();
     7.6      old_smfn = pagetable_get_pfn(v->arch.shadow_table);
     7.7 -    v->arch.shadow_table = mk_pagetable((u64)smfn << PAGE_SHIFT);
     7.8 +    v->arch.shadow_table = pagetable_from_pfn(smfn);
     7.9      if ( old_smfn )
    7.10          put_shadow_ref(old_smfn);
    7.11  
     8.1 --- a/xen/arch/x86/shadow32.c	Thu Jun 01 21:49:25 2006 +0100
     8.2 +++ b/xen/arch/x86/shadow32.c	Thu Jun 01 22:21:39 2006 +0100
     8.3 @@ -583,7 +583,7 @@ static void free_shadow_pages(struct dom
     8.4          if ( pagetable_get_paddr(v->arch.shadow_table) )
     8.5          {
     8.6              put_shadow_ref(pagetable_get_pfn(v->arch.shadow_table));
     8.7 -            v->arch.shadow_table = mk_pagetable(0);
     8.8 +            v->arch.shadow_table = pagetable_null();
     8.9  
    8.10              if ( shadow_mode_external(d) )
    8.11              {
    8.12 @@ -765,7 +765,7 @@ static void alloc_monitor_pagetable(stru
    8.13      mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] = l2e_empty();
    8.14      mpl2e[l2_table_offset(RO_MPT_VIRT_START)] = l2e_empty();
    8.15  
    8.16 -    v->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT);
    8.17 +    v->arch.monitor_table = pagetable_from_pfn(mmfn);
    8.18      v->arch.monitor_vtable = mpl2e;
    8.19  
    8.20      if ( v->vcpu_id == 0 )
    8.21 @@ -830,7 +830,7 @@ void free_monitor_pagetable(struct vcpu 
    8.22      unmap_domain_page_global(v->arch.monitor_vtable);
    8.23      free_domheap_page(mfn_to_page(mfn));
    8.24  
    8.25 -    v->arch.monitor_table = mk_pagetable(0);
    8.26 +    v->arch.monitor_table = pagetable_null();
    8.27      v->arch.monitor_vtable = 0;
    8.28  }
    8.29  
    8.30 @@ -992,7 +992,7 @@ alloc_p2m_table(struct domain *d)
    8.31  
    8.32          l1tab = map_domain_page(page_to_mfn(page));
    8.33          memset(l1tab, 0, PAGE_SIZE);
    8.34 -        d->arch.phys_table = mk_pagetable(page_to_maddr(page));
    8.35 +        d->arch.phys_table = pagetable_from_page(page);
    8.36      }
    8.37  
    8.38      list_ent = d->page_list.next;
    8.39 @@ -1126,7 +1126,7 @@ int shadow_direct_map_init(struct domain
    8.40      memset(root, 0, PAGE_SIZE);
    8.41      unmap_domain_page(root);
    8.42  
    8.43 -    d->arch.phys_table = mk_pagetable(page_to_maddr(page));
    8.44 +    d->arch.phys_table = pagetable_from_page(page);
    8.45  
    8.46      return 1;
    8.47  }
    8.48 @@ -1156,7 +1156,7 @@ void shadow_direct_map_clean(struct doma
    8.49  
    8.50      unmap_domain_page(l2e);
    8.51  
    8.52 -    d->arch.phys_table = mk_pagetable(0);
    8.53 +    d->arch.phys_table = pagetable_null();
    8.54  }
    8.55  
    8.56  int __shadow_mode_enable(struct domain *d, unsigned int mode)
    8.57 @@ -3231,7 +3231,7 @@ void __update_pagetables(struct vcpu *v)
    8.58      if ( !get_shadow_ref(smfn) )
    8.59          BUG();
    8.60      old_smfn = pagetable_get_pfn(v->arch.shadow_table);
    8.61 -    v->arch.shadow_table = mk_pagetable(smfn << PAGE_SHIFT);
    8.62 +    v->arch.shadow_table = pagetable_from_pfn(smfn);
    8.63      if ( old_smfn )
    8.64          put_shadow_ref(old_smfn);
    8.65  
     9.1 --- a/xen/arch/x86/shadow_public.c	Thu Jun 01 21:49:25 2006 +0100
     9.2 +++ b/xen/arch/x86/shadow_public.c	Thu Jun 01 22:21:39 2006 +0100
     9.3 @@ -50,7 +50,7 @@ int shadow_direct_map_init(struct domain
     9.4      memset(root, 0, PAGE_SIZE);
     9.5      root[PAE_SHADOW_SELF_ENTRY] = l3e_from_page(page, __PAGE_HYPERVISOR);
     9.6  
     9.7 -    d->arch.phys_table = mk_pagetable(page_to_maddr(page));
     9.8 +    d->arch.phys_table = pagetable_from_page(page);
     9.9  
    9.10      unmap_domain_page(root);
    9.11      return 1;
    9.12 @@ -92,7 +92,7 @@ void shadow_direct_map_clean(struct doma
    9.13  
    9.14      unmap_domain_page(l3e);
    9.15  
    9.16 -    d->arch.phys_table = mk_pagetable(0);
    9.17 +    d->arch.phys_table = pagetable_null();
    9.18  }
    9.19  
    9.20  /****************************************************************************/
    9.21 @@ -338,7 +338,7 @@ static void alloc_monitor_pagetable(stru
    9.22  
    9.23      /* map the phys_to_machine map into the per domain Read-Only MPT space */
    9.24  
    9.25 -    v->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT);
    9.26 +    v->arch.monitor_table = pagetable_from_pfn(mmfn);
    9.27      v->arch.monitor_vtable = (l2_pgentry_t *) mpl4e;
    9.28      mpl4e[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
    9.29  
    9.30 @@ -380,7 +380,7 @@ void free_monitor_pagetable(struct vcpu 
    9.31      unmap_domain_page_global(v->arch.monitor_vtable);
    9.32      free_domheap_page(mfn_to_page(mfn));
    9.33  
    9.34 -    v->arch.monitor_table = mk_pagetable(0);
    9.35 +    v->arch.monitor_table = pagetable_null();
    9.36      v->arch.monitor_vtable = 0;
    9.37  }
    9.38  #elif CONFIG_PAGING_LEVELS == 3
    9.39 @@ -431,7 +431,7 @@ static void alloc_monitor_pagetable(stru
    9.40      for ( i = 0; i < (MACHPHYS_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
    9.41          mpl2e[l2_table_offset(RO_MPT_VIRT_START) + i] = l2e_empty();
    9.42  
    9.43 -    v->arch.monitor_table = mk_pagetable(m3mfn << PAGE_SHIFT); /* < 4GB */
    9.44 +    v->arch.monitor_table = pagetable_from_pfn(m3mfn);
    9.45      v->arch.monitor_vtable = (l2_pgentry_t *) mpl3e;
    9.46  
    9.47      if ( v->vcpu_id == 0 )
    9.48 @@ -492,7 +492,7 @@ void free_monitor_pagetable(struct vcpu 
    9.49      unmap_domain_page_global(v->arch.monitor_vtable);
    9.50      free_domheap_page(mfn_to_page(m3mfn));
    9.51  
    9.52 -    v->arch.monitor_table = mk_pagetable(0);
    9.53 +    v->arch.monitor_table = pagetable_null();
    9.54      v->arch.monitor_vtable = 0;
    9.55  }
    9.56  #endif
    9.57 @@ -924,7 +924,7 @@ void free_shadow_pages(struct domain *d)
    9.58          if ( pagetable_get_paddr(v->arch.shadow_table) )
    9.59          {
    9.60              put_shadow_ref(pagetable_get_pfn(v->arch.shadow_table));
    9.61 -            v->arch.shadow_table = mk_pagetable(0);
    9.62 +            v->arch.shadow_table = pagetable_null();
    9.63  
    9.64              if ( shadow_mode_external(d) )
    9.65              {
    10.1 --- a/xen/arch/x86/smpboot.c	Thu Jun 01 21:49:25 2006 +0100
    10.2 +++ b/xen/arch/x86/smpboot.c	Thu Jun 01 22:21:39 2006 +0100
    10.3 @@ -908,7 +908,7 @@ static int __devinit do_boot_cpu(int api
    10.4  	idle_vcpu[cpu] = v;
    10.5  	BUG_ON(v == NULL);
    10.6  
    10.7 -	v->arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
    10.8 +	v->arch.monitor_table = pagetable_from_paddr(__pa(idle_pg_table));
    10.9  
   10.10  	/* start_eip had better be page-aligned! */
   10.11  	start_eip = setup_trampoline();
    11.1 --- a/xen/arch/x86/x86_32/mm.c	Thu Jun 01 21:49:25 2006 +0100
    11.2 +++ b/xen/arch/x86/x86_32/mm.c	Thu Jun 01 22:21:39 2006 +0100
    11.3 @@ -75,7 +75,8 @@ void __init paging_init(void)
    11.4      printk("PAE disabled.\n");
    11.5  #endif
    11.6  
    11.7 -    idle_vcpu[0]->arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
    11.8 +    idle_vcpu[0]->arch.monitor_table =
    11.9 +        pagetable_from_paddr(__pa(idle_pg_table));
   11.10  
   11.11      if ( cpu_has_pge )
   11.12      {
    12.1 --- a/xen/arch/x86/x86_64/mm.c	Thu Jun 01 21:49:25 2006 +0100
    12.2 +++ b/xen/arch/x86/x86_64/mm.c	Thu Jun 01 22:21:39 2006 +0100
    12.3 @@ -81,7 +81,8 @@ void __init paging_init(void)
    12.4      l2_pgentry_t *l2_ro_mpt;
    12.5      struct page_info *pg;
    12.6  
    12.7 -    idle_vcpu[0]->arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
    12.8 +    idle_vcpu[0]->arch.monitor_table =
    12.9 +        pagetable_from_paddr(__pa(idle_pg_table));
   12.10  
   12.11      /* Create user-accessible L2 directory to map the MPT for guests. */
   12.12      l3_ro_mpt = alloc_xenheap_page();
    13.1 --- a/xen/arch/x86/x86_64/traps.c	Thu Jun 01 21:49:25 2006 +0100
    13.2 +++ b/xen/arch/x86/x86_64/traps.c	Thu Jun 01 22:21:39 2006 +0100
    13.3 @@ -195,7 +195,7 @@ unsigned long do_iret(void)
    13.4      /* Returning to user mode? */
    13.5      if ( (iret_saved.cs & 3) == 3 )
    13.6      {
    13.7 -        if ( unlikely(pagetable_get_paddr(v->arch.guest_table_user) == 0) )
    13.8 +        if ( unlikely(pagetable_is_null(v->arch.guest_table_user)) )
    13.9          {
   13.10              DPRINTK("Guest switching to user mode with no user page tables\n");
   13.11              domain_crash_synchronous();
    14.1 --- a/xen/include/asm-x86/page.h	Thu Jun 01 21:49:25 2006 +0100
    14.2 +++ b/xen/include/asm-x86/page.h	Thu Jun 01 22:21:39 2006 +0100
    14.3 @@ -172,10 +172,13 @@ typedef struct { u32 pfn; } pagetable_t;
    14.4  /* x86_64 */
    14.5  typedef struct { u64 pfn; } pagetable_t;
    14.6  #endif
    14.7 -#define pagetable_get_paddr(x) ((paddr_t)(x).pfn << PAGE_SHIFT)
    14.8 -#define pagetable_get_pfn(x)   ((x).pfn)
    14.9 -#define mk_pagetable(pa)       \
   14.10 -    ({ pagetable_t __p; __p.pfn = (pa) >> PAGE_SHIFT; __p; })
   14.11 +#define pagetable_get_paddr(x)  ((paddr_t)(x).pfn << PAGE_SHIFT)
   14.12 +#define pagetable_get_pfn(x)    ((x).pfn)
   14.13 +#define pagetable_is_null(x)    ((x).pfn == 0)
   14.14 +#define pagetable_from_pfn(pfn) ((pagetable_t) { (pfn) })
   14.15 +#define pagetable_from_page(pg) pagetable_from_pfn(page_to_mfn(pg))
   14.16 +#define pagetable_from_paddr(p) pagetable_from_pfn((p)>>PAGE_SHIFT)
   14.17 +#define pagetable_null()        pagetable_from_pfn(0)
   14.18  #endif
   14.19  
   14.20  #define clear_page(_p)      memset((void *)(_p), 0, PAGE_SIZE)