direct-io.hg

changeset 8396:3d1c7be170a7

Remove direct references to frame_table array. Use
pfn_to_page (or page_to_pfn) instead.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Dec 20 12:46:56 2005 +0100 (2005-12-20)
parents 1283d309a603
children b92a36713192
files xen/arch/x86/audit.c xen/arch/x86/dom0_ops.c xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/mm.c xen/arch/x86/shadow.c xen/arch/x86/shadow32.c xen/arch/x86/shadow_public.c xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_64/mm.c xen/common/grant_table.c xen/common/memory.c xen/include/asm-x86/shadow.h xen/include/asm-x86/shadow_public.h
line diff
     1.1 --- a/xen/arch/x86/audit.c	Sun Dec 18 20:29:43 2005 +0100
     1.2 +++ b/xen/arch/x86/audit.c	Tue Dec 20 12:46:56 2005 +0100
     1.3 @@ -61,7 +61,7 @@ int audit_adjust_pgtables(struct domain 
     1.4  #ifdef __i386__
     1.5  #ifdef CONFIG_X86_PAE
     1.6          /* 32b PAE */
     1.7 -        if ( (( frame_table[mfn].u.inuse.type_info & PGT_va_mask ) 
     1.8 +        if ( (( pfn_to_page(mfn)->u.inuse.type_info & PGT_va_mask ) 
     1.9  	    >> PGT_va_shift) == 3 )
    1.10              return l2_table_offset(HYPERVISOR_VIRT_START); 
    1.11          else
    1.12 @@ -364,7 +364,7 @@ int audit_adjust_pgtables(struct domain 
    1.13              {
    1.14                  gmfn = __gpfn_to_mfn(d, a->gpfn_and_flags & PGT_mfn_mask);
    1.15                  smfn = a->smfn;
    1.16 -                page = &frame_table[smfn];
    1.17 +                page = pfn_to_page(smfn);
    1.18  
    1.19                  switch ( a->gpfn_and_flags & PGT_type_mask ) {
    1.20                  case PGT_writable_pred:
    1.21 @@ -433,11 +433,13 @@ int audit_adjust_pgtables(struct domain 
    1.22          for_each_vcpu(d, v)
    1.23          {
    1.24              if ( pagetable_get_paddr(v->arch.guest_table) )
    1.25 -                adjust(&frame_table[pagetable_get_pfn(v->arch.guest_table)], !shadow_mode_refcounts(d));
    1.26 +                adjust(pfn_to_page(pagetable_get_pfn(v->arch.guest_table)),
    1.27 +                       !shadow_mode_refcounts(d));
    1.28              if ( pagetable_get_paddr(v->arch.shadow_table) )
    1.29 -                adjust(&frame_table[pagetable_get_pfn(v->arch.shadow_table)], 0);
    1.30 +                adjust(pfn_to_page(pagetable_get_pfn(v->arch.shadow_table)),
    1.31 +                       0);
    1.32              if ( v->arch.monitor_shadow_ref )
    1.33 -                adjust(&frame_table[v->arch.monitor_shadow_ref], 0);
    1.34 +                adjust(pfn_to_page(v->arch.monitor_shadow_ref), 0);
    1.35          }
    1.36      }
    1.37  
    1.38 @@ -617,7 +619,7 @@ void _audit_domain(struct domain *d, int
    1.39      void scan_for_pfn_in_mfn(struct domain *d, unsigned long xmfn,
    1.40                               unsigned long mfn)
    1.41      {
    1.42 -        struct pfn_info *page = &frame_table[mfn];
    1.43 +        struct pfn_info *page = pfn_to_page(mfn);
    1.44          l1_pgentry_t *pt = map_domain_page(mfn);
    1.45          int i;
    1.46  
     2.1 --- a/xen/arch/x86/dom0_ops.c	Sun Dec 18 20:29:43 2005 +0100
     2.2 +++ b/xen/arch/x86/dom0_ops.c	Tue Dec 20 12:46:56 2005 +0100
     2.3 @@ -210,7 +210,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
     2.4               unlikely((d = find_domain_by_id(dom)) == NULL) )
     2.5              break;
     2.6  
     2.7 -        page = &frame_table[pfn];
     2.8 +        page = pfn_to_page(pfn);
     2.9  
    2.10          if ( likely(get_page(page, d)) )
    2.11          {
    2.12 @@ -285,7 +285,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
    2.13                  struct pfn_info *page;
    2.14                  unsigned long mfn = l_arr[j];
    2.15  
    2.16 -                page = &frame_table[mfn];
    2.17 +                page = pfn_to_page(mfn);
    2.18  
    2.19                  if ( likely(pfn_valid(mfn) && get_page(page, d)) ) 
    2.20                  {
    2.21 @@ -350,15 +350,14 @@ long arch_do_dom0_op(dom0_op_t *op, dom0
    2.22              list_ent = d->page_list.next;
    2.23              for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
    2.24              {
    2.25 -                pfn = list_entry(list_ent, struct pfn_info, list) - 
    2.26 -                    frame_table;
    2.27 +                pfn = page_to_pfn(list_entry(list_ent, struct pfn_info, list));
    2.28                  if ( put_user(pfn, buffer) )
    2.29                  {
    2.30                      ret = -EFAULT;
    2.31                      break;
    2.32                  }
    2.33                  buffer++;
    2.34 -                list_ent = frame_table[pfn].list.next;
    2.35 +                list_ent = pfn_to_page(pfn)->list.next;
    2.36              }
    2.37              spin_unlock(&d->page_alloc_lock);
    2.38  
     3.1 --- a/xen/arch/x86/domain.c	Sun Dec 18 20:29:43 2005 +0100
     3.2 +++ b/xen/arch/x86/domain.c	Tue Dec 20 12:46:56 2005 +0100
     3.3 @@ -190,7 +190,7 @@ void dump_pageframe_info(struct domain *
     3.4          list_for_each_entry ( page, &d->page_list, list )
     3.5          {
     3.6              printk("Page %p: mfn=%p, caf=%08x, taf=%" PRtype_info "\n",
     3.7 -                   _p(page_to_phys(page)), _p(page - frame_table),
     3.8 +                   _p(page_to_phys(page)), _p(page_to_pfn(page)),
     3.9                     page->count_info, page->u.inuse.type_info);
    3.10          }
    3.11      }
    3.12 @@ -198,13 +198,13 @@ void dump_pageframe_info(struct domain *
    3.13      list_for_each_entry ( page, &d->xenpage_list, list )
    3.14      {
    3.15          printk("XenPage %p: mfn=%p, caf=%08x, taf=%" PRtype_info "\n",
    3.16 -               _p(page_to_phys(page)), _p(page - frame_table),
    3.17 +               _p(page_to_phys(page)), _p(page_to_pfn(page)),
    3.18                 page->count_info, page->u.inuse.type_info);
    3.19      }
    3.20  
    3.21      page = virt_to_page(d->shared_info);
    3.22      printk("Shared_info@%p: mfn=%p, caf=%08x, taf=%" PRtype_info "\n",
    3.23 -           _p(page_to_phys(page)), _p(page - frame_table), page->count_info,
    3.24 +           _p(page_to_phys(page)), _p(page_to_pfn(page)), page->count_info,
    3.25             page->u.inuse.type_info);
    3.26  }
    3.27  
    3.28 @@ -391,19 +391,19 @@ int arch_set_info_guest(
    3.29  
    3.30      if ( shadow_mode_refcounts(d) )
    3.31      {
    3.32 -        if ( !get_page(&frame_table[phys_basetab>>PAGE_SHIFT], d) )
    3.33 +        if ( !get_page(pfn_to_page(phys_basetab>>PAGE_SHIFT), d) )
    3.34              return -EINVAL;
    3.35      }
    3.36      else if ( !(c->flags & VGCF_VMX_GUEST) )
    3.37      {
    3.38 -        if ( !get_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT], d,
    3.39 +        if ( !get_page_and_type(pfn_to_page(phys_basetab>>PAGE_SHIFT), d,
    3.40                                  PGT_base_page_table) )
    3.41              return -EINVAL;
    3.42      }
    3.43  
    3.44      if ( (rc = (int)set_gdt(v, c->gdt_frames, c->gdt_ents)) != 0 )
    3.45      {
    3.46 -        put_page_and_type(&frame_table[phys_basetab>>PAGE_SHIFT]);
    3.47 +        put_page_and_type(pfn_to_page(phys_basetab>>PAGE_SHIFT));
    3.48          return rc;
    3.49      }
    3.50  
     4.1 --- a/xen/arch/x86/domain_build.c	Sun Dec 18 20:29:43 2005 +0100
     4.2 +++ b/xen/arch/x86/domain_build.c	Tue Dec 20 12:46:56 2005 +0100
     4.3 @@ -405,7 +405,7 @@ int construct_dom0(struct domain *d,
     4.4          *l1tab = l1e_from_pfn(mfn, L1_PROT);
     4.5          l1tab++;
     4.6          
     4.7 -        page = &frame_table[mfn];
     4.8 +        page = pfn_to_page(mfn);
     4.9          if ( !get_page_and_type(page, d, PGT_writable_page) )
    4.10              BUG();
    4.11  
    4.12 @@ -418,7 +418,7 @@ int construct_dom0(struct domain *d,
    4.13      l1tab += l1_table_offset(vpt_start);
    4.14      for ( count = 0; count < nr_pt_pages; count++ ) 
    4.15      {
    4.16 -        page = &frame_table[l1e_get_pfn(*l1tab)];
    4.17 +        page = pfn_to_page(l1e_get_pfn(*l1tab));
    4.18          if ( !opt_dom0_shadow )
    4.19              l1e_remove_flags(*l1tab, _PAGE_RW);
    4.20          else
    4.21 @@ -548,7 +548,7 @@ int construct_dom0(struct domain *d,
    4.22          *l1tab = l1e_from_pfn(mfn, L1_PROT);
    4.23          l1tab++;
    4.24  
    4.25 -        page = &frame_table[mfn];
    4.26 +        page = pfn_to_page(mfn);
    4.27          if ( (page->u.inuse.type_info == 0) &&
    4.28               !get_page_and_type(page, d, PGT_writable_page) )
    4.29              BUG();
    4.30 @@ -567,7 +567,7 @@ int construct_dom0(struct domain *d,
    4.31      for ( count = 0; count < nr_pt_pages; count++ ) 
    4.32      {
    4.33          l1e_remove_flags(*l1tab, _PAGE_RW);
    4.34 -        page = &frame_table[l1e_get_pfn(*l1tab)];
    4.35 +        page = pfn_to_page(l1e_get_pfn(*l1tab));
    4.36  
    4.37          /* Read-only mapping + PGC_allocated + page-table page. */
    4.38          page->count_info         = PGC_allocated | 3;
     5.1 --- a/xen/arch/x86/mm.c	Sun Dec 18 20:29:43 2005 +0100
     5.2 +++ b/xen/arch/x86/mm.c	Tue Dec 20 12:46:56 2005 +0100
     5.3 @@ -202,7 +202,7 @@ void arch_init_memory(void)
     5.4      /* First 1MB of RAM is historically marked as I/O. */
     5.5      for ( i = 0; i < 0x100; i++ )
     5.6      {
     5.7 -        page = &frame_table[i];
     5.8 +        page = pfn_to_page(i);
     5.9          page->count_info        = PGC_allocated | 1;
    5.10          page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;
    5.11          page_set_owner(page, dom_io);
    5.12 @@ -216,10 +216,10 @@ void arch_init_memory(void)
    5.13          /* Every page from cursor to start of next RAM region is I/O. */
    5.14          rstart_pfn = PFN_UP(e820.map[i].addr);
    5.15          rend_pfn   = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
    5.16 -        while ( pfn < rstart_pfn )
    5.17 +        for ( ; pfn < rstart_pfn; pfn++ )
    5.18          {
    5.19              BUG_ON(!pfn_valid(pfn));
    5.20 -            page = &frame_table[pfn++];
    5.21 +            page = pfn_to_page(pfn);
    5.22              page->count_info        = PGC_allocated | 1;
    5.23              page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;
    5.24              page_set_owner(page, dom_io);
    5.25 @@ -253,7 +253,7 @@ void invalidate_shadow_ldt(struct vcpu *
    5.26          pfn = l1e_get_pfn(v->arch.perdomain_ptes[i]);
    5.27          if ( pfn == 0 ) continue;
    5.28          v->arch.perdomain_ptes[i] = l1e_empty();
    5.29 -        page = &frame_table[pfn];
    5.30 +        page = pfn_to_page(pfn);
    5.31          ASSERT_PAGE_IS_TYPE(page, PGT_ldt_page);
    5.32          ASSERT_PAGE_IS_DOMAIN(page, v->domain);
    5.33          put_page_and_type(page);
    5.34 @@ -320,13 +320,13 @@ int map_ldt_shadow_page(unsigned int off
    5.35      if ( unlikely(!VALID_MFN(gmfn)) )
    5.36          return 0;
    5.37  
    5.38 -    res = get_page_and_type(&frame_table[gmfn], d, PGT_ldt_page);
    5.39 +    res = get_page_and_type(pfn_to_page(gmfn), d, PGT_ldt_page);
    5.40  
    5.41      if ( !res && unlikely(shadow_mode_refcounts(d)) )
    5.42      {
    5.43          shadow_lock(d);
    5.44          shadow_remove_all_write_access(d, gpfn, gmfn);
    5.45 -        res = get_page_and_type(&frame_table[gmfn], d, PGT_ldt_page);
    5.46 +        res = get_page_and_type(pfn_to_page(gmfn), d, PGT_ldt_page);
    5.47          shadow_unlock(d);
    5.48      }
    5.49  
    5.50 @@ -344,7 +344,7 @@ int map_ldt_shadow_page(unsigned int off
    5.51  
    5.52  static int get_page_from_pagenr(unsigned long page_nr, struct domain *d)
    5.53  {
    5.54 -    struct pfn_info *page = &frame_table[page_nr];
    5.55 +    struct pfn_info *page = pfn_to_page(page_nr);
    5.56  
    5.57      if ( unlikely(!pfn_valid(page_nr)) || unlikely(!get_page(page, d)) )
    5.58      {
    5.59 @@ -360,7 +360,7 @@ static int get_page_and_type_from_pagenr
    5.60                                           unsigned long type,
    5.61                                           struct domain *d)
    5.62  {
    5.63 -    struct pfn_info *page = &frame_table[page_nr];
    5.64 +    struct pfn_info *page = pfn_to_page(page_nr);
    5.65  
    5.66      if ( unlikely(!get_page_from_pagenr(page_nr, d)) )
    5.67          return 0;
    5.68 @@ -412,7 +412,7 @@ get_linear_pagetable(
    5.69           * Make sure that the mapped frame is an already-validated L2 table. 
    5.70           * If so, atomically increment the count (checking for overflow).
    5.71           */
    5.72 -        page = &frame_table[pfn];
    5.73 +        page = pfn_to_page(pfn);
    5.74          y = page->u.inuse.type_info;
    5.75          do {
    5.76              x = y;
    5.77 @@ -435,7 +435,7 @@ get_page_from_l1e(
    5.78      l1_pgentry_t l1e, struct domain *d)
    5.79  {
    5.80      unsigned long mfn = l1e_get_pfn(l1e);
    5.81 -    struct pfn_info *page = &frame_table[mfn];
    5.82 +    struct pfn_info *page = pfn_to_page(mfn);
    5.83      int okay;
    5.84      extern int domain_iomem_in_pfn(struct domain *d, unsigned long pfn);
    5.85  
    5.86 @@ -587,7 +587,7 @@ get_page_from_l4e(
    5.87  void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d)
    5.88  {
    5.89      unsigned long    pfn  = l1e_get_pfn(l1e);
    5.90 -    struct pfn_info *page = &frame_table[pfn];
    5.91 +    struct pfn_info *page = pfn_to_page(pfn);
    5.92      struct domain   *e;
    5.93      struct vcpu     *v;
    5.94  
    5.95 @@ -645,7 +645,7 @@ static void put_page_from_l2e(l2_pgentry
    5.96  {
    5.97      if ( (l2e_get_flags(l2e) & _PAGE_PRESENT) && 
    5.98           (l2e_get_pfn(l2e) != pfn) )
    5.99 -        put_page_and_type(&frame_table[l2e_get_pfn(l2e)]);
   5.100 +        put_page_and_type(pfn_to_page(l2e_get_pfn(l2e)));
   5.101  }
   5.102  
   5.103  
   5.104 @@ -655,7 +655,7 @@ static void put_page_from_l3e(l3_pgentry
   5.105  {
   5.106      if ( (l3e_get_flags(l3e) & _PAGE_PRESENT) && 
   5.107           (l3e_get_pfn(l3e) != pfn) )
   5.108 -        put_page_and_type(&frame_table[l3e_get_pfn(l3e)]);
   5.109 +        put_page_and_type(pfn_to_page(l3e_get_pfn(l3e)));
   5.110  }
   5.111  
   5.112  #endif
   5.113 @@ -666,7 +666,7 @@ static void put_page_from_l4e(l4_pgentry
   5.114  {
   5.115      if ( (l4e_get_flags(l4e) & _PAGE_PRESENT) && 
   5.116           (l4e_get_pfn(l4e) != pfn) )
   5.117 -        put_page_and_type(&frame_table[l4e_get_pfn(l4e)]);
   5.118 +        put_page_and_type(pfn_to_page(l4e_get_pfn(l4e)));
   5.119  }
   5.120  
   5.121  #endif
   5.122 @@ -1584,9 +1584,9 @@ int new_guest_cr3(unsigned long mfn)
   5.123          write_ptbase(v);
   5.124  
   5.125          if ( shadow_mode_refcounts(d) )
   5.126 -            put_page(&frame_table[old_base_mfn]);
   5.127 +            put_page(pfn_to_page(old_base_mfn));
   5.128          else
   5.129 -            put_page_and_type(&frame_table[old_base_mfn]);
   5.130 +            put_page_and_type(pfn_to_page(old_base_mfn));
   5.131  
   5.132          /* CR3 also holds a ref to its shadow... */
   5.133          if ( shadow_mode_enabled(d) )
   5.134 @@ -1595,7 +1595,7 @@ int new_guest_cr3(unsigned long mfn)
   5.135                  put_shadow_ref(v->arch.monitor_shadow_ref);
   5.136              v->arch.monitor_shadow_ref =
   5.137                  pagetable_get_pfn(v->arch.monitor_table);
   5.138 -            ASSERT(!page_get_owner(&frame_table[v->arch.monitor_shadow_ref]));
   5.139 +            ASSERT(!page_get_owner(pfn_to_page(v->arch.monitor_shadow_ref)));
   5.140              get_shadow_ref(v->arch.monitor_shadow_ref);
   5.141          }
   5.142      }
   5.143 @@ -1763,7 +1763,7 @@ int do_mmuext_op(
   5.144  
   5.145          okay = 1;
   5.146          mfn  = op.arg1.mfn;
   5.147 -        page = &frame_table[mfn];
   5.148 +        page = pfn_to_page(mfn);
   5.149  
   5.150          switch ( op.cmd )
   5.151          {
   5.152 @@ -1845,7 +1845,7 @@ int do_mmuext_op(
   5.153                      pagetable_get_pfn(v->arch.guest_table_user);
   5.154                  v->arch.guest_table_user = mk_pagetable(mfn << PAGE_SHIFT);
   5.155                  if ( old_mfn != 0 )
   5.156 -                    put_page_and_type(&frame_table[old_mfn]);
   5.157 +                    put_page_and_type(pfn_to_page(old_mfn));
   5.158              }
   5.159              break;
   5.160  #endif
   5.161 @@ -2145,7 +2145,7 @@ int do_mmu_update(
   5.162              va = map_domain_page_with_cache(mfn, &mapcache);
   5.163              va = (void *)((unsigned long)va +
   5.164                            (unsigned long)(req.ptr & ~PAGE_MASK));
   5.165 -            page = &frame_table[mfn];
   5.166 +            page = pfn_to_page(mfn);
   5.167  
   5.168              switch ( (type_info = page->u.inuse.type_info) & PGT_type_mask )
   5.169              {
   5.170 @@ -2285,7 +2285,7 @@ int do_mmu_update(
   5.171  
   5.172              mark_dirty(FOREIGNDOM, mfn);
   5.173  
   5.174 -            put_page(&frame_table[mfn]);
   5.175 +            put_page(pfn_to_page(mfn));
   5.176              break;
   5.177  
   5.178          default:
   5.179 @@ -2728,7 +2728,7 @@ void destroy_gdt(struct vcpu *v)
   5.180      for ( i = 0; i < FIRST_RESERVED_GDT_PAGE; i++ )
   5.181      {
   5.182          if ( (pfn = l1e_get_pfn(v->arch.perdomain_ptes[i])) != 0 )
   5.183 -            put_page_and_type(&frame_table[pfn]);
   5.184 +            put_page_and_type(pfn_to_page(pfn));
   5.185          v->arch.perdomain_ptes[i] = l1e_empty();
   5.186          v->arch.guest_context.gdt_frames[i] = 0;
   5.187      }
   5.188 @@ -2753,7 +2753,7 @@ long set_gdt(struct vcpu *v,
   5.189      for ( i = 0; i < nr_pages; i++ ) {
   5.190          pfn = frames[i];
   5.191          if ((pfn >= max_page) ||
   5.192 -            !get_page_and_type(&frame_table[pfn], d, PGT_gdt_page) )
   5.193 +            !get_page_and_type(pfn_to_page(pfn), d, PGT_gdt_page) )
   5.194              goto fail;
   5.195      }
   5.196  
   5.197 @@ -2773,7 +2773,7 @@ long set_gdt(struct vcpu *v,
   5.198  
   5.199   fail:
   5.200      while ( i-- > 0 )
   5.201 -        put_page_and_type(&frame_table[frames[i]]);
   5.202 +        put_page_and_type(pfn_to_page(frames[i]));
   5.203      return -EINVAL;
   5.204  }
   5.205  
   5.206 @@ -2827,7 +2827,7 @@ long do_update_descriptor(u64 pa, u64 de
   5.207          return -EINVAL;
   5.208      }
   5.209  
   5.210 -    page = &frame_table[mfn];
   5.211 +    page = pfn_to_page(mfn);
   5.212      if ( unlikely(!get_page(page, dom)) )
   5.213      {
   5.214          UNLOCK_BIGLOCK(dom);
   5.215 @@ -3037,7 +3037,7 @@ int revalidate_l1(
   5.216          if ( likely(l1e_get_intpte(ol1e) == (l1e_get_intpte(nl1e)|_PAGE_RW)) )
   5.217          {
   5.218              if ( likely(l1e_get_flags(nl1e) & _PAGE_PRESENT) )
   5.219 -                put_page_type(&frame_table[l1e_get_pfn(nl1e)]);
   5.220 +                put_page_type(pfn_to_page(l1e_get_pfn(nl1e)));
   5.221              continue;
   5.222          }
   5.223  
   5.224 @@ -3220,7 +3220,7 @@ static int ptwr_emulated_update(
   5.225      }
   5.226  
   5.227      pfn  = l1e_get_pfn(pte);
   5.228 -    page = &frame_table[pfn];
   5.229 +    page = pfn_to_page(pfn);
   5.230  
   5.231      /* We are looking only for read-only mappings of p.t. pages. */
   5.232      if ( ((l1e_get_flags(pte) & (_PAGE_RW|_PAGE_PRESENT)) != _PAGE_PRESENT) ||
   5.233 @@ -3331,7 +3331,7 @@ int ptwr_do_page_fault(struct domain *d,
   5.234      }
   5.235  
   5.236      pfn  = l1e_get_pfn(pte);
   5.237 -    page = &frame_table[pfn];
   5.238 +    page = pfn_to_page(pfn);
   5.239  
   5.240  #ifdef CONFIG_X86_64
   5.241  #define WRPT_PTE_FLAGS (_PAGE_RW | _PAGE_PRESENT | _PAGE_USER)
     6.1 --- a/xen/arch/x86/shadow.c	Sun Dec 18 20:29:43 2005 +0100
     6.2 +++ b/xen/arch/x86/shadow.c	Tue Dec 20 12:46:56 2005 +0100
     6.3 @@ -504,7 +504,7 @@ static unsigned long shadow_l2_table(
     6.4              l2e_from_pfn(smfn, __PAGE_HYPERVISOR);
     6.5  
     6.6          spl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
     6.7 -            l2e_from_paddr(__pa(page_get_owner(&frame_table[gmfn])->arch.mm_perdomain_pt),
     6.8 +            l2e_from_paddr(__pa(page_get_owner(pfn_to_page(gmfn))->arch.mm_perdomain_pt),
     6.9                              __PAGE_HYPERVISOR);
    6.10  
    6.11          if ( shadow_mode_translate(d) ) // NB: not external
    6.12 @@ -670,7 +670,7 @@ static void shadow_map_l1_into_current_l
    6.13              set_guest_back_ptr(d, sl1e, sl1mfn, i);
    6.14          }
    6.15  
    6.16 -        frame_table[sl1mfn].tlbflush_timestamp =
    6.17 +        pfn_to_page(sl1mfn)->tlbflush_timestamp =
    6.18              SHADOW_ENCODE_MIN_MAX(min, max);
    6.19  
    6.20          unmap_domain_page(gpl1e);
    6.21 @@ -907,7 +907,7 @@ shadow_make_snapshot(
    6.22      u32 min_max = 0;
    6.23      int min, max, length;
    6.24  
    6.25 -    if ( test_and_set_bit(_PGC_out_of_sync, &frame_table[gmfn].count_info) )
    6.26 +    if ( test_and_set_bit(_PGC_out_of_sync, &pfn_to_page(gmfn)->count_info) )
    6.27      {
    6.28          ASSERT(__shadow_status(d, gpfn, PGT_snapshot));
    6.29          return SHADOW_SNAPSHOT_ELSEWHERE;
    6.30 @@ -953,7 +953,7 @@ static struct out_of_sync_entry *
    6.31                               unsigned long mfn)
    6.32  {
    6.33      struct domain *d = v->domain;
    6.34 -    struct pfn_info *page = &frame_table[mfn];
    6.35 +    struct pfn_info *page = pfn_to_page(mfn);
    6.36      struct out_of_sync_entry *entry = shadow_alloc_oos_entry(d);
    6.37  
    6.38      ASSERT(shadow_lock_is_acquired(d));
    6.39 @@ -1174,7 +1174,7 @@ static int is_out_of_sync(struct vcpu *v
    6.40                  && i == PAGING_L4)
    6.41                  continue;       /* skip the top-level for 3-level */
    6.42  
    6.43 -            if ( page_out_of_sync(&frame_table[gmfn]) &&
    6.44 +            if ( page_out_of_sync(pfn_to_page(gmfn)) &&
    6.45                   !snapshot_entry_matches(
    6.46                       d, guest_pt, gpfn, table_offset_64(va, i)) )
    6.47              {
    6.48 @@ -1200,7 +1200,7 @@ static int is_out_of_sync(struct vcpu *v
    6.49          }
    6.50  
    6.51          /* L2 */
    6.52 -        if ( page_out_of_sync(&frame_table[gmfn]) &&
    6.53 +        if ( page_out_of_sync(pfn_to_page(gmfn)) &&
    6.54               !snapshot_entry_matches(d, guest_pt, gpfn, l2_table_offset(va)) )
    6.55          {
    6.56              unmap_and_return (1);
    6.57 @@ -1214,7 +1214,7 @@ static int is_out_of_sync(struct vcpu *v
    6.58  #undef unmap_and_return
    6.59  #endif /* CONFIG_PAGING_LEVELS >= 3 */
    6.60      {
    6.61 -        if ( page_out_of_sync(&frame_table[l2mfn]) &&
    6.62 +        if ( page_out_of_sync(pfn_to_page(l2mfn)) &&
    6.63               !snapshot_entry_matches(d, (guest_l1_pgentry_t *)v->arch.guest_vtable,
    6.64                                       l2pfn, guest_l2_table_offset(va)) )
    6.65              return 1;
    6.66 @@ -1234,7 +1234,7 @@ static int is_out_of_sync(struct vcpu *v
    6.67  
    6.68      guest_pt = (guest_l1_pgentry_t *) map_domain_page(l1mfn);
    6.69  
    6.70 -    if ( page_out_of_sync(&frame_table[l1mfn]) &&
    6.71 +    if ( page_out_of_sync(pfn_to_page(l1mfn)) &&
    6.72           !snapshot_entry_matches(
    6.73               d, guest_pt, l1pfn, guest_l1_table_offset(va)) ) 
    6.74      {
    6.75 @@ -1324,18 +1324,18 @@ static u32 remove_all_write_access_in_pt
    6.76      int i;
    6.77      u32 found = 0;
    6.78      int is_l1_shadow =
    6.79 -        ((frame_table[pt_mfn].u.inuse.type_info & PGT_type_mask) ==
    6.80 +        ((pfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) ==
    6.81           PGT_l1_shadow);
    6.82  #if CONFIG_PAGING_LEVELS == 4
    6.83      is_l1_shadow |=
    6.84 -      ((frame_table[pt_mfn].u.inuse.type_info & PGT_type_mask) ==
    6.85 +      ((pfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) ==
    6.86                  PGT_fl1_shadow);
    6.87  #endif
    6.88  
    6.89      match = l1e_from_pfn(readonly_gmfn, flags);
    6.90  
    6.91      if ( shadow_mode_external(d) ) {
    6.92 -        i = (frame_table[readonly_gmfn].u.inuse.type_info & PGT_va_mask)
    6.93 +        i = (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_va_mask)
    6.94              >> PGT_va_shift;
    6.95  
    6.96          if ( (i >= 0 && i < L1_PAGETABLE_ENTRIES) &&
    6.97 @@ -1373,7 +1373,7 @@ static int remove_all_write_access(
    6.98  
    6.99      // If it's not a writable page, then no writable refs can be outstanding.
   6.100      //
   6.101 -    if ( (frame_table[readonly_gmfn].u.inuse.type_info & PGT_type_mask) !=
   6.102 +    if ( (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_type_mask) !=
   6.103           PGT_writable_page )
   6.104      {
   6.105          perfc_incrc(remove_write_not_writable);
   6.106 @@ -1383,7 +1383,7 @@ static int remove_all_write_access(
   6.107      // How many outstanding writable PTEs for this page are there?
   6.108      //
   6.109      write_refs =
   6.110 -        (frame_table[readonly_gmfn].u.inuse.type_info & PGT_count_mask);
   6.111 +        (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_count_mask);
   6.112      if ( write_refs && MFN_PINNED(readonly_gmfn) )
   6.113      {
   6.114          write_refs--;
   6.115 @@ -1401,7 +1401,7 @@ static int remove_all_write_access(
   6.116  
   6.117           // Use the back pointer to locate the shadow page that can contain
   6.118           // the PTE of interest
   6.119 -         if ( (predicted_smfn = frame_table[readonly_gmfn].tlbflush_timestamp) ) {
   6.120 +         if ( (predicted_smfn = pfn_to_page(readonly_gmfn)->tlbflush_timestamp) ) {
   6.121               found += remove_all_write_access_in_ptpage(
   6.122                   d, predicted_smfn, predicted_smfn, readonly_gpfn, readonly_gmfn, write_refs, 0);
   6.123               if ( found == write_refs )
   6.124 @@ -1670,7 +1670,7 @@ static int resync_all(struct domain *d, 
   6.125                      if ( !(entry_get_flags(guest_pt[i]) & _PAGE_PRESENT) &&
   6.126                           unlikely(entry_get_value(guest_pt[i]) != 0) &&
   6.127                           !unshadow &&
   6.128 -                         (frame_table[smfn].u.inuse.type_info & PGT_pinned) )
   6.129 +                         (pfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) )
   6.130                          unshadow = 1;
   6.131                  }
   6.132  #endif
   6.133 @@ -1718,7 +1718,7 @@ static int resync_all(struct domain *d, 
   6.134                  if ( !(guest_root_get_flags(new_root_e) & _PAGE_PRESENT) &&
   6.135                       unlikely(guest_root_get_intpte(new_root_e) != 0) &&
   6.136                       !unshadow &&
   6.137 -                     (frame_table[smfn].u.inuse.type_info & PGT_pinned) )
   6.138 +                     (pfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) )
   6.139                      unshadow = 1;
   6.140              }
   6.141              if ( max == -1 )
   6.142 @@ -2401,7 +2401,7 @@ static int check_pte(
   6.143      {
   6.144          printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=0x%08lx page_table_page=%d\n",
   6.145                 eff_guest_pfn, eff_guest_mfn, shadow_mfn,
   6.146 -               frame_table[eff_guest_mfn].u.inuse.type_info,
   6.147 +               pfn_to_page(eff_guest_mfn)->u.inuse.type_info,
   6.148                 page_table_page);
   6.149          FAIL("RW coherence");
   6.150      }
   6.151 @@ -2412,7 +2412,7 @@ static int check_pte(
   6.152      {
   6.153          printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=0x%08lx page_table_page=%d\n",
   6.154                 eff_guest_pfn, eff_guest_mfn, shadow_mfn,
   6.155 -               frame_table[eff_guest_mfn].u.inuse.type_info,
   6.156 +               pfn_to_page(eff_guest_mfn)->u.inuse.type_info,
   6.157                 page_table_page);
   6.158          FAIL("RW2 coherence");
   6.159      }
   6.160 @@ -2781,7 +2781,7 @@ static unsigned long shadow_l3_table(
   6.161           * When we free L2 pages, we need to tell if the page contains
   6.162           * Xen private mappings. Use the va_mask part.
   6.163           */
   6.164 -        frame_table[s2mfn].u.inuse.type_info |= 
   6.165 +        pfn_to_page(s2mfn)->u.inuse.type_info |= 
   6.166              (unsigned long) 3 << PGT_score_shift; 
   6.167  
   6.168          memset(spl2e, 0, 
   6.169 @@ -2794,7 +2794,7 @@ static unsigned long shadow_l3_table(
   6.170          for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
   6.171              spl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
   6.172                  l2e_from_page(
   6.173 -                    virt_to_page(page_get_owner(&frame_table[gmfn])->arch.mm_perdomain_pt) + i, 
   6.174 +                    virt_to_page(page_get_owner(pfn_to_page(gmfn))->arch.mm_perdomain_pt) + i, 
   6.175                      __PAGE_HYPERVISOR);
   6.176          for ( i = 0; i < (LINEARPT_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
   6.177              spl2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
   6.178 @@ -2896,7 +2896,7 @@ static unsigned long shadow_l4_table(
   6.179             ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
   6.180  
   6.181          spl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
   6.182 -            l4e_from_paddr(__pa(page_get_owner(&frame_table[gmfn])->arch.mm_perdomain_l3),
   6.183 +            l4e_from_paddr(__pa(page_get_owner(pfn_to_page(gmfn))->arch.mm_perdomain_l3),
   6.184                              __PAGE_HYPERVISOR);
   6.185  
   6.186          if ( shadow_mode_translate(d) ) // NB: not external
     7.1 --- a/xen/arch/x86/shadow32.c	Sun Dec 18 20:29:43 2005 +0100
     7.2 +++ b/xen/arch/x86/shadow32.c	Tue Dec 20 12:46:56 2005 +0100
     7.3 @@ -30,7 +30,7 @@
     7.4  #include <xen/sched.h>
     7.5  #include <xen/trace.h>
     7.6  
     7.7 -#define MFN_PINNED(_x) (frame_table[_x].u.inuse.type_info & PGT_pinned)
     7.8 +#define MFN_PINNED(_x) (pfn_to_page(_x)->u.inuse.type_info & PGT_pinned)
     7.9  #define va_to_l1mfn(_ed, _va) \
    7.10      (l2e_get_pfn(linear_l2_table(_ed)[_va>>L2_PAGETABLE_SHIFT]))
    7.11  
    7.12 @@ -144,11 +144,11 @@ shadow_demote(struct domain *d, unsigned
    7.13      if ( !shadow_mode_refcounts(d) )
    7.14          return;
    7.15  
    7.16 -    ASSERT(frame_table[gmfn].count_info & PGC_page_table);
    7.17 +    ASSERT(pfn_to_page(gmfn)->count_info & PGC_page_table);
    7.18  
    7.19      if ( shadow_max_pgtable_type(d, gpfn, NULL) == PGT_none )
    7.20      {
    7.21 -        clear_bit(_PGC_page_table, &frame_table[gmfn].count_info);
    7.22 +        clear_bit(_PGC_page_table, &pfn_to_page(gmfn)->count_info);
    7.23  
    7.24          if ( page_out_of_sync(pfn_to_page(gmfn)) )
    7.25          {
    7.26 @@ -380,7 +380,7 @@ free_shadow_l2_table(struct domain *d, u
    7.27  
    7.28  void free_shadow_page(unsigned long smfn)
    7.29  {
    7.30 -    struct pfn_info *page = &frame_table[smfn];
    7.31 +    struct pfn_info *page = pfn_to_page(smfn);
    7.32      unsigned long gmfn = page->u.inuse.type_info & PGT_mfn_mask;
    7.33      struct domain *d = page_get_owner(pfn_to_page(gmfn));
    7.34      unsigned long gpfn = __mfn_to_gpfn(d, gmfn);
    7.35 @@ -465,8 +465,8 @@ release_out_of_sync_entry(struct domain 
    7.36  {
    7.37      struct pfn_info *page;
    7.38  
    7.39 -    page = &frame_table[entry->gmfn];
    7.40 -        
    7.41 +    page = pfn_to_page(entry->gmfn);
    7.42 +
    7.43      // Decrement ref count of guest & shadow pages
    7.44      //
    7.45      put_page(page);
    7.46 @@ -795,7 +795,7 @@ void free_monitor_pagetable(struct vcpu 
    7.47       */
    7.48      mfn = pagetable_get_pfn(v->arch.monitor_table);
    7.49      unmap_domain_page(v->arch.monitor_vtable);
    7.50 -    free_domheap_page(&frame_table[mfn]);
    7.51 +    free_domheap_page(pfn_to_page(mfn));
    7.52  
    7.53      v->arch.monitor_table = mk_pagetable(0);
    7.54      v->arch.monitor_vtable = 0;
    7.55 @@ -1018,8 +1018,8 @@ int __shadow_mode_enable(struct domain *
    7.56          {
    7.57              // external guests provide their own memory for their P2M maps.
    7.58              //
    7.59 -            ASSERT( d == page_get_owner(
    7.60 -                        &frame_table[pagetable_get_pfn(d->arch.phys_table)]) );
    7.61 +            ASSERT(d == page_get_owner(pfn_to_page(pagetable_get_pfn(
    7.62 +                d->arch.phys_table))));
    7.63          }
    7.64      }
    7.65  
    7.66 @@ -1543,7 +1543,7 @@ static unsigned long shadow_l2_table(
    7.67              l2e_from_pfn(smfn, __PAGE_HYPERVISOR);
    7.68  
    7.69          spl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
    7.70 -            l2e_from_paddr(__pa(page_get_owner(&frame_table[gmfn])->arch.mm_perdomain_pt),
    7.71 +            l2e_from_paddr(__pa(page_get_owner(pfn_to_page(gmfn))->arch.mm_perdomain_pt),
    7.72                              __PAGE_HYPERVISOR);
    7.73  
    7.74          if ( shadow_mode_translate(d) ) // NB: not external
    7.75 @@ -1675,7 +1675,7 @@ void shadow_map_l1_into_current_l2(unsig
    7.76              set_guest_back_ptr(d, sl1e, sl1mfn, i);
    7.77          }
    7.78  
    7.79 -        frame_table[sl1mfn].tlbflush_timestamp =
    7.80 +        pfn_to_page(sl1mfn)->tlbflush_timestamp =
    7.81              SHADOW_ENCODE_MIN_MAX(min, max);
    7.82      }
    7.83  }
    7.84 @@ -1758,7 +1758,7 @@ shadow_make_snapshot(
    7.85      u32 min_max = 0;
    7.86      int min, max, length;
    7.87  
    7.88 -    if ( test_and_set_bit(_PGC_out_of_sync, &frame_table[gmfn].count_info) )
    7.89 +    if ( test_and_set_bit(_PGC_out_of_sync, &pfn_to_page(gmfn)->count_info) )
    7.90      {
    7.91          ASSERT(__shadow_status(d, gpfn, PGT_snapshot));
    7.92          return SHADOW_SNAPSHOT_ELSEWHERE;
    7.93 @@ -1809,7 +1809,7 @@ shadow_free_snapshot(struct domain *d, s
    7.94  
    7.95      // Clear the out_of_sync bit.
    7.96      //
    7.97 -    clear_bit(_PGC_out_of_sync, &frame_table[entry->gmfn].count_info);
    7.98 +    clear_bit(_PGC_out_of_sync, &pfn_to_page(entry->gmfn)->count_info);
    7.99  
   7.100      // XXX Need to think about how to protect the domain's
   7.101      // information less expensively.
   7.102 @@ -1826,7 +1826,7 @@ struct out_of_sync_entry *
   7.103                               unsigned long mfn)
   7.104  {
   7.105      struct domain *d = v->domain;
   7.106 -    struct pfn_info *page = &frame_table[mfn];
   7.107 +    struct pfn_info *page = pfn_to_page(mfn);
   7.108      struct out_of_sync_entry *entry = shadow_alloc_oos_entry(d);
   7.109  
   7.110      ASSERT(shadow_lock_is_acquired(d));
   7.111 @@ -1992,7 +1992,7 @@ int __shadow_out_of_sync(struct vcpu *v,
   7.112  
   7.113      perfc_incrc(shadow_out_of_sync_calls);
   7.114  
   7.115 -    if ( page_out_of_sync(&frame_table[l2mfn]) &&
   7.116 +    if ( page_out_of_sync(pfn_to_page(l2mfn)) &&
   7.117           !snapshot_entry_matches(d, (l1_pgentry_t *)v->arch.guest_vtable,
   7.118                                   l2pfn, l2_table_offset(va)) )
   7.119          return 1;
   7.120 @@ -2008,7 +2008,7 @@ int __shadow_out_of_sync(struct vcpu *v,
   7.121      if ( !VALID_MFN(l1mfn) )
   7.122          return 0;
   7.123  
   7.124 -    if ( page_out_of_sync(&frame_table[l1mfn]) &&
   7.125 +    if ( page_out_of_sync(pfn_to_page(l1mfn)) &&
   7.126           !snapshot_entry_matches(
   7.127               d, &linear_pg_table[l1_linear_offset(va) & ~(L1_PAGETABLE_ENTRIES-1)],
   7.128               l1pfn, l1_table_offset(va)) )
   7.129 @@ -2136,13 +2136,13 @@ static u32 remove_all_write_access_in_pt
   7.130      int i;
   7.131      u32 found = 0;
   7.132      int is_l1_shadow =
   7.133 -        ((frame_table[pt_mfn].u.inuse.type_info & PGT_type_mask) ==
   7.134 +        ((pfn_to_page(pt_mfn)->u.inuse.type_info & PGT_type_mask) ==
   7.135           PGT_l1_shadow);
   7.136  
   7.137      match = l1e_from_pfn(readonly_gmfn, flags);
   7.138  
   7.139      if ( shadow_mode_external(d) ) {
   7.140 -        i = (frame_table[readonly_gmfn].u.inuse.type_info & PGT_va_mask) 
   7.141 +        i = (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_va_mask) 
   7.142              >> PGT_va_shift;
   7.143  
   7.144          if ( (i >= 0 && i < L1_PAGETABLE_ENTRIES) &&
   7.145 @@ -2180,7 +2180,7 @@ int shadow_remove_all_write_access(
   7.146  
   7.147      // If it's not a writable page, then no writable refs can be outstanding.
   7.148      //
   7.149 -    if ( (frame_table[readonly_gmfn].u.inuse.type_info & PGT_type_mask) !=
   7.150 +    if ( (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_type_mask) !=
   7.151           PGT_writable_page )
   7.152      {
   7.153          perfc_incrc(remove_write_not_writable);
   7.154 @@ -2190,7 +2190,7 @@ int shadow_remove_all_write_access(
   7.155      // How many outstanding writable PTEs for this page are there?
   7.156      //
   7.157      write_refs =
   7.158 -        (frame_table[readonly_gmfn].u.inuse.type_info & PGT_count_mask);
   7.159 +        (pfn_to_page(readonly_gmfn)->u.inuse.type_info & PGT_count_mask);
   7.160      if ( write_refs && MFN_PINNED(readonly_gmfn) )
   7.161      {
   7.162          write_refs--;
   7.163 @@ -2208,7 +2208,7 @@ int shadow_remove_all_write_access(
   7.164  
   7.165           // Use the back pointer to locate the shadow page that can contain
   7.166           // the PTE of interest
   7.167 -         if ( (predicted_smfn = frame_table[readonly_gmfn].tlbflush_timestamp) ) {
   7.168 +         if ( (predicted_smfn = pfn_to_page(readonly_gmfn)->tlbflush_timestamp) ) {
   7.169               found += remove_all_write_access_in_ptpage(
   7.170                   d, predicted_smfn, predicted_smfn, readonly_gpfn, readonly_gmfn, write_refs, 0);
   7.171               if ( found == write_refs )
   7.172 @@ -2249,7 +2249,7 @@ static u32 remove_all_access_in_page(
   7.173      int i;
   7.174      u32 count = 0;
   7.175      int is_l1_shadow =
   7.176 -        ((frame_table[l1mfn].u.inuse.type_info & PGT_type_mask) ==
   7.177 +        ((pfn_to_page(l1mfn)->u.inuse.type_info & PGT_type_mask) ==
   7.178           PGT_l1_shadow);
   7.179  
   7.180      match = l1e_from_pfn(forbidden_gmfn, flags);
   7.181 @@ -2266,7 +2266,7 @@ static u32 remove_all_access_in_page(
   7.182          if ( is_l1_shadow )
   7.183              shadow_put_page_from_l1e(ol2e, d);
   7.184          else /* must be an hl2 page */
   7.185 -            put_page(&frame_table[forbidden_gmfn]);
   7.186 +            put_page(pfn_to_page(forbidden_gmfn));
   7.187      }
   7.188  
   7.189      unmap_domain_page(pl1e);
   7.190 @@ -3156,7 +3156,7 @@ static int check_pte(
   7.191      {
   7.192          printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=%lx page_table_page=%d\n",
   7.193                 eff_guest_pfn, eff_guest_mfn, shadow_mfn,
   7.194 -               frame_table[eff_guest_mfn].u.inuse.type_info,
   7.195 +               pfn_to_page(eff_guest_mfn)->u.inuse.type_info,
   7.196                 page_table_page);
   7.197          FAIL("RW coherence");
   7.198      }
   7.199 @@ -3167,7 +3167,7 @@ static int check_pte(
   7.200      {
   7.201          printk("eff_guest_pfn=%lx eff_guest_mfn=%lx shadow_mfn=%lx t=%lx page_table_page=%d\n",
   7.202                 eff_guest_pfn, eff_guest_mfn, shadow_mfn,
   7.203 -               frame_table[eff_guest_mfn].u.inuse.type_info,
   7.204 +               pfn_to_page(eff_guest_mfn)->u.inuse.type_info,
   7.205                 page_table_page);
   7.206          FAIL("RW2 coherence");
   7.207      }
     8.1 --- a/xen/arch/x86/shadow_public.c	Sun Dec 18 20:29:43 2005 +0100
     8.2 +++ b/xen/arch/x86/shadow_public.c	Tue Dec 20 12:46:56 2005 +0100
     8.3 @@ -168,14 +168,14 @@ free_shadow_tables(struct domain *d, uns
     8.4  #if CONFIG_PAGING_LEVELS >=3
     8.5      if ( d->arch.ops->guest_paging_levels == PAGING_L2 )
     8.6      {
     8.7 -        struct pfn_info *page = &frame_table[smfn];
     8.8 +        struct pfn_info *page = pfn_to_page(smfn);
     8.9          for ( i = 0; i < PDP_ENTRIES; i++ )
    8.10          {
    8.11              if ( entry_get_flags(ple[i]) & _PAGE_PRESENT )
    8.12                  free_fake_shadow_l2(d,entry_get_pfn(ple[i]));
    8.13          }
    8.14  
    8.15 -        page = &frame_table[entry_get_pfn(ple[0])];
    8.16 +        page = pfn_to_page(entry_get_pfn(ple[0]));
    8.17          free_domheap_pages(page, SL2_ORDER);
    8.18          unmap_domain_page(ple);
    8.19      }
    8.20 @@ -208,7 +208,7 @@ free_shadow_tables(struct domain *d, uns
    8.21                      break;
    8.22                  if ( level == PAGING_L2 )
    8.23                  {
    8.24 -                    struct pfn_info *page = &frame_table[smfn]; 
    8.25 +                    struct pfn_info *page = pfn_to_page(smfn);
    8.26                      if ( is_xen_l2_slot(page->u.inuse.type_info, i) )
    8.27                          continue;
    8.28                  }
    8.29 @@ -299,7 +299,7 @@ void free_monitor_pagetable(struct vcpu 
    8.30       */
    8.31      mfn = pagetable_get_pfn(v->arch.monitor_table);
    8.32      unmap_domain_page(v->arch.monitor_vtable);
    8.33 -    free_domheap_page(&frame_table[mfn]);
    8.34 +    free_domheap_page(pfn_to_page(mfn));
    8.35  
    8.36      v->arch.monitor_table = mk_pagetable(0);
    8.37      v->arch.monitor_vtable = 0;
    8.38 @@ -394,7 +394,7 @@ void free_monitor_pagetable(struct vcpu 
    8.39       */
    8.40      mfn = pagetable_get_pfn(v->arch.monitor_table);
    8.41      unmap_domain_page(v->arch.monitor_vtable);
    8.42 -    free_domheap_page(&frame_table[mfn]);
    8.43 +    free_domheap_page(pfn_to_page(mfn));
    8.44  
    8.45      v->arch.monitor_table = mk_pagetable(0);
    8.46      v->arch.monitor_vtable = 0;
    8.47 @@ -411,7 +411,7 @@ shadow_free_snapshot(struct domain *d, s
    8.48  
    8.49      // Clear the out_of_sync bit.
    8.50      //
    8.51 -    clear_bit(_PGC_out_of_sync, &frame_table[entry->gmfn].count_info);
    8.52 +    clear_bit(_PGC_out_of_sync, &pfn_to_page(entry->gmfn)->count_info);
    8.53  
    8.54      // XXX Need to think about how to protect the domain's
    8.55      // information less expensively.
    8.56 @@ -428,7 +428,7 @@ release_out_of_sync_entry(struct domain 
    8.57  {
    8.58      struct pfn_info *page;
    8.59  
    8.60 -    page = &frame_table[entry->gmfn];
    8.61 +    page = pfn_to_page(entry->gmfn);
    8.62          
    8.63      // Decrement ref count of guest & shadow pages
    8.64      //
    8.65 @@ -501,11 +501,11 @@ shadow_demote(struct domain *d, unsigned
    8.66      if ( !shadow_mode_refcounts(d) )
    8.67          return;
    8.68  
    8.69 -    ASSERT(frame_table[gmfn].count_info & PGC_page_table);
    8.70 +    ASSERT(pfn_to_page(gmfn)->count_info & PGC_page_table);
    8.71  
    8.72      if ( shadow_max_pgtable_type(d, gpfn, NULL) == PGT_none )
    8.73      {
    8.74 -        clear_bit(_PGC_page_table, &frame_table[gmfn].count_info);
    8.75 +        clear_bit(_PGC_page_table, &pfn_to_page(gmfn)->count_info);
    8.76  
    8.77          if ( page_out_of_sync(pfn_to_page(gmfn)) )
    8.78          {
    8.79 @@ -600,7 +600,7 @@ void free_fake_shadow_l2(struct domain *
    8.80  
    8.81  void free_shadow_page(unsigned long smfn)
    8.82  {
    8.83 -    struct pfn_info *page = &frame_table[smfn];
    8.84 +    struct pfn_info *page = pfn_to_page(smfn);
    8.85  
    8.86      unsigned long gmfn = page->u.inuse.type_info & PGT_mfn_mask;
    8.87      struct domain *d = page_get_owner(pfn_to_page(gmfn));
    8.88 @@ -1067,8 +1067,8 @@ int __shadow_mode_enable(struct domain *
    8.89          {
    8.90              // external guests provide their own memory for their P2M maps.
    8.91              //
    8.92 -            ASSERT( d == page_get_owner(
    8.93 -                &frame_table[pagetable_get_pfn(d->arch.phys_table)]) );
    8.94 +            ASSERT(d == page_get_owner(pfn_to_page(pagetable_get_pfn(
    8.95 +                d->arch.phys_table))));
    8.96          }
    8.97      }
    8.98  
    8.99 @@ -1643,7 +1643,7 @@ static u32 remove_all_access_in_page(
   8.100      int i;
   8.101      u32 count = 0;
   8.102      int is_l1_shadow =
   8.103 -        ((frame_table[l1mfn].u.inuse.type_info & PGT_type_mask) ==
   8.104 +        ((pfn_to_page(l1mfn)->u.inuse.type_info & PGT_type_mask) ==
   8.105           PGT_l1_shadow);
   8.106  
   8.107      match = l1e_from_pfn(forbidden_gmfn, flags);
   8.108 @@ -1660,7 +1660,7 @@ static u32 remove_all_access_in_page(
   8.109          if ( is_l1_shadow )
   8.110              shadow_put_page_from_l1e(ol2e, d);
   8.111          else /* must be an hl2 page */
   8.112 -            put_page(&frame_table[forbidden_gmfn]);
   8.113 +            put_page(pfn_to_page(forbidden_gmfn));
   8.114      }
   8.115  
   8.116      unmap_domain_page(pl1e);
     9.1 --- a/xen/arch/x86/x86_32/mm.c	Sun Dec 18 20:29:43 2005 +0100
     9.2 +++ b/xen/arch/x86/x86_32/mm.c	Tue Dec 20 12:46:56 2005 +0100
     9.3 @@ -177,10 +177,11 @@ void subarch_init_memory(struct domain *
     9.4              idle_pg_table_l2[l2_linear_offset(RDWR_MPT_VIRT_START) + i]);
     9.5          for ( j = 0; j < L2_PAGETABLE_ENTRIES; j++ )
     9.6          {
     9.7 -            frame_table[m2p_start_mfn+j].count_info = PGC_allocated | 1;
     9.8 +            struct pfn_info *page = pfn_to_page(m2p_start_mfn + j);
     9.9 +            page->count_info = PGC_allocated | 1;
    9.10              /* Ensure it's only mapped read-only by domains. */
    9.11 -            frame_table[m2p_start_mfn+j].u.inuse.type_info = PGT_gdt_page | 1;
    9.12 -            page_set_owner(&frame_table[m2p_start_mfn+j], dom_xen);
    9.13 +            page->u.inuse.type_info = PGT_gdt_page | 1;
    9.14 +            page_set_owner(page, dom_xen);
    9.15          }
    9.16      }
    9.17  }
    10.1 --- a/xen/arch/x86/x86_64/mm.c	Sun Dec 18 20:29:43 2005 +0100
    10.2 +++ b/xen/arch/x86/x86_64/mm.c	Tue Dec 20 12:46:56 2005 +0100
    10.3 @@ -166,11 +166,12 @@ void subarch_init_memory(struct domain *
    10.4  
    10.5          for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
    10.6          {
    10.7 -            frame_table[m2p_start_mfn+i].count_info = PGC_allocated | 1;
    10.8 +            struct pfn_info *page = pfn_to_page(m2p_start_mfn + i);
    10.9 +            page->count_info = PGC_allocated | 1;
   10.10              /* gdt to make sure it's only mapped read-only by non-privileged
   10.11                 domains. */
   10.12 -            frame_table[m2p_start_mfn+i].u.inuse.type_info = PGT_gdt_page | 1;
   10.13 -            page_set_owner(&frame_table[m2p_start_mfn+i], dom_xen);
   10.14 +            page->u.inuse.type_info = PGT_gdt_page | 1;
   10.15 +            page_set_owner(page, dom_xen);
   10.16          }
   10.17      }
   10.18  }
    11.1 --- a/xen/common/grant_table.c	Sun Dec 18 20:29:43 2005 +0100
    11.2 +++ b/xen/common/grant_table.c	Tue Dec 20 12:46:56 2005 +0100
    11.3 @@ -238,8 +238,8 @@ static int
    11.4  
    11.5          if ( unlikely(!pfn_valid(frame)) ||
    11.6               unlikely(!((dev_hst_ro_flags & GNTMAP_readonly) ?
    11.7 -                        get_page(&frame_table[frame], rd) :
    11.8 -                        get_page_and_type(&frame_table[frame], rd,
    11.9 +                        get_page(pfn_to_page(frame), rd) :
   11.10 +                        get_page_and_type(pfn_to_page(frame), rd,
   11.11                                            PGT_writable_page))) )
   11.12          {
   11.13              clear_bit(_GTF_writing, &sha->flags);
   11.14 @@ -301,7 +301,7 @@ static int
   11.15                  sflags = prev_sflags;
   11.16              }
   11.17  
   11.18 -            if ( unlikely(!get_page_type(&frame_table[frame],
   11.19 +            if ( unlikely(!get_page_type(pfn_to_page(frame),
   11.20                                           PGT_writable_page)) )
   11.21              {
   11.22                  clear_bit(_GTF_writing, &sha->flags);
   11.23 @@ -347,14 +347,14 @@ static int
   11.24                  if ( (act->pin & (GNTPIN_hstw_mask|GNTPIN_devw_mask)) == 0 )
   11.25                  {
   11.26                      clear_bit(_GTF_writing, &sha->flags);
   11.27 -                    put_page_type(&frame_table[frame]);
   11.28 +                    put_page_type(pfn_to_page(frame));
   11.29                  }
   11.30              }
   11.31  
   11.32              if ( act->pin == 0 )
   11.33              {
   11.34                  clear_bit(_GTF_reading, &sha->flags);
   11.35 -                put_page(&frame_table[frame]);
   11.36 +                put_page(pfn_to_page(frame));
   11.37              }
   11.38  
   11.39              spin_unlock(&rd->grant_table->lock);
   11.40 @@ -500,14 +500,14 @@ static int
   11.41           !(flags & GNTMAP_readonly) )
   11.42      {
   11.43          clear_bit(_GTF_writing, &sha->flags);
   11.44 -        put_page_type(&frame_table[frame]);
   11.45 +        put_page_type(pfn_to_page(frame));
   11.46      }
   11.47  
   11.48      if ( act->pin == 0 )
   11.49      {
   11.50          act->frame = 0xdeadbeef;
   11.51          clear_bit(_GTF_reading, &sha->flags);
   11.52 -        put_page(&frame_table[frame]);
   11.53 +        put_page(pfn_to_page(frame));
   11.54      }
   11.55  
   11.56   unmap_out:
   11.57 @@ -691,7 +691,7 @@ gnttab_transfer(
   11.58          }
   11.59  
   11.60          /* Check the passed page frame for basic validity. */
   11.61 -        page = &frame_table[gop.mfn];
   11.62 +        page = pfn_to_page(gop.mfn);
   11.63          if ( unlikely(!pfn_valid(gop.mfn) || IS_XEN_HEAP_FRAME(page)) )
   11.64          { 
   11.65              DPRINTK("gnttab_transfer: out-of-range or xen frame %lx\n",
   11.66 @@ -1016,14 +1016,14 @@ gnttab_release_mappings(
   11.67              if ( (act->pin & (GNTPIN_devw_mask|GNTPIN_hstw_mask)) == 0 )
   11.68              {
   11.69                  clear_bit(_GTF_writing, &sha->flags);
   11.70 -                put_page_type(&frame_table[act->frame]);
   11.71 +                put_page_type(pfn_to_page(act->frame));
   11.72              }
   11.73          }
   11.74  
   11.75          if ( act->pin == 0 )
   11.76          {
   11.77              clear_bit(_GTF_reading, &sha->flags);
   11.78 -            put_page(&frame_table[act->frame]);
   11.79 +            put_page(pfn_to_page(act->frame));
   11.80          }
   11.81  
   11.82          spin_unlock(&rd->grant_table->lock);
    12.1 --- a/xen/common/memory.c	Sun Dec 18 20:29:43 2005 +0100
    12.2 +++ b/xen/common/memory.c	Tue Dec 20 12:46:56 2005 +0100
    12.3 @@ -102,7 +102,7 @@ decrease_reservation(
    12.4                  return i;
    12.5              }
    12.6              
    12.7 -            page = &frame_table[mpfn + j];
    12.8 +            page = pfn_to_page(mpfn + j);
    12.9              if ( unlikely(!get_page(page, d)) )
   12.10              {
   12.11                  DPRINTK("Bad page free for domain %u\n", d->domain_id);
    13.1 --- a/xen/include/asm-x86/shadow.h	Sun Dec 18 20:29:43 2005 +0100
    13.2 +++ b/xen/include/asm-x86/shadow.h	Tue Dec 20 12:46:56 2005 +0100
    13.3 @@ -493,9 +493,9 @@ static inline void __mark_dirty(struct d
    13.4          SH_VLOG("mark_dirty OOR! mfn=%x pfn=%lx max=%x (dom %p)",
    13.5                 mfn, pfn, d->arch.shadow_dirty_bitmap_size, d);
    13.6          SH_VLOG("dom=%p caf=%08x taf=%" PRtype_info, 
    13.7 -               page_get_owner(&frame_table[mfn]),
    13.8 -               frame_table[mfn].count_info, 
    13.9 -               frame_table[mfn].u.inuse.type_info );
   13.10 +                page_get_owner(pfn_to_page(mfn)),
   13.11 +                pfn_to_page(mfn)->count_info, 
   13.12 +                pfn_to_page(mfn)->u.inuse.type_info );
   13.13      }
   13.14  #endif
   13.15  }
   13.16 @@ -648,20 +648,20 @@ get_shadow_ref(unsigned long smfn)
   13.17  
   13.18      ASSERT(pfn_valid(smfn));
   13.19  
   13.20 -    x = frame_table[smfn].count_info;
   13.21 +    x = pfn_to_page(smfn)->count_info;
   13.22      nx = x + 1;
   13.23  
   13.24      if ( unlikely(nx == 0) )
   13.25      {
   13.26          printk("get_shadow_ref overflow, gmfn=%" PRtype_info  " smfn=%lx\n",
   13.27 -               frame_table[smfn].u.inuse.type_info & PGT_mfn_mask,
   13.28 +               pfn_to_page(smfn)->u.inuse.type_info & PGT_mfn_mask,
   13.29                 smfn);
   13.30          BUG();
   13.31      }
   13.32      
   13.33      // Guarded by the shadow lock...
   13.34      //
   13.35 -    frame_table[smfn].count_info = nx;
   13.36 +    pfn_to_page(smfn)->count_info = nx;
   13.37  
   13.38      return 1;
   13.39  }
   13.40 @@ -678,7 +678,7 @@ put_shadow_ref(unsigned long smfn)
   13.41  
   13.42      ASSERT(pfn_valid(smfn));
   13.43  
   13.44 -    x = frame_table[smfn].count_info;
   13.45 +    x = pfn_to_page(smfn)->count_info;
   13.46      nx = x - 1;
   13.47  
   13.48      if ( unlikely(x == 0) )
   13.49 @@ -686,14 +686,14 @@ put_shadow_ref(unsigned long smfn)
   13.50          printk("put_shadow_ref underflow, smfn=%lx oc=%08x t=%" 
   13.51                 PRtype_info "\n",
   13.52                 smfn,
   13.53 -               frame_table[smfn].count_info,
   13.54 -               frame_table[smfn].u.inuse.type_info);
   13.55 +               pfn_to_page(smfn)->count_info,
   13.56 +               pfn_to_page(smfn)->u.inuse.type_info);
   13.57          BUG();
   13.58      }
   13.59  
   13.60      // Guarded by the shadow lock...
   13.61      //
   13.62 -    frame_table[smfn].count_info = nx;
   13.63 +    pfn_to_page(smfn)->count_info = nx;
   13.64  
   13.65      if ( unlikely(nx == 0) )
   13.66      {
   13.67 @@ -704,9 +704,9 @@ put_shadow_ref(unsigned long smfn)
   13.68  static inline void
   13.69  shadow_pin(unsigned long smfn)
   13.70  {
   13.71 -    ASSERT( !(frame_table[smfn].u.inuse.type_info & PGT_pinned) );
   13.72 +    ASSERT( !(pfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) );
   13.73  
   13.74 -    frame_table[smfn].u.inuse.type_info |= PGT_pinned;
   13.75 +    pfn_to_page(smfn)->u.inuse.type_info |= PGT_pinned;
   13.76      if ( unlikely(!get_shadow_ref(smfn)) )
   13.77          BUG();
   13.78  }
   13.79 @@ -714,9 +714,9 @@ shadow_pin(unsigned long smfn)
   13.80  static inline void
   13.81  shadow_unpin(unsigned long smfn)
   13.82  {
   13.83 -    ASSERT( (frame_table[smfn].u.inuse.type_info & PGT_pinned) );
   13.84 +    ASSERT( (pfn_to_page(smfn)->u.inuse.type_info & PGT_pinned) );
   13.85  
   13.86 -    frame_table[smfn].u.inuse.type_info &= ~PGT_pinned;
   13.87 +    pfn_to_page(smfn)->u.inuse.type_info &= ~PGT_pinned;
   13.88      put_shadow_ref(smfn);
   13.89  }
   13.90  
   13.91 @@ -732,9 +732,9 @@ static inline void set_guest_back_ptr(
   13.92  
   13.93          ASSERT(shadow_lock_is_acquired(d));
   13.94          gmfn = l1e_get_pfn(spte);
   13.95 -        frame_table[gmfn].tlbflush_timestamp = smfn;
   13.96 -        frame_table[gmfn].u.inuse.type_info &= ~PGT_va_mask;
   13.97 -        frame_table[gmfn].u.inuse.type_info |= (unsigned long) index << PGT_va_shift;
   13.98 +        pfn_to_page(gmfn)->tlbflush_timestamp = smfn;
   13.99 +        pfn_to_page(gmfn)->u.inuse.type_info &= ~PGT_va_mask;
  13.100 +        pfn_to_page(gmfn)->u.inuse.type_info |= (unsigned long) index << PGT_va_shift;
  13.101      }
  13.102  }
  13.103  
  13.104 @@ -941,7 +941,7 @@ validate_pte_change(
  13.105              //
  13.106              perfc_incrc(validate_pte_changes2);
  13.107              if ( likely(l1e_get_flags(new_spte) & _PAGE_PRESENT) )
  13.108 -                shadow_put_page_type(d, &frame_table[l1e_get_pfn(new_spte)]);
  13.109 +                shadow_put_page_type(d, pfn_to_page(l1e_get_pfn(new_spte)));
  13.110          }
  13.111          else if ( ((l1e_get_flags(old_spte) | l1e_get_flags(new_spte)) &
  13.112                     _PAGE_PRESENT ) &&
  13.113 @@ -1216,8 +1216,8 @@ static inline unsigned long __shadow_sta
  13.114              printk("d->id=%d gpfn=%lx gmfn=%lx stype=%lx c=%x t=%" PRtype_info " "
  13.115                     "mfn_out_of_sync(gmfn)=%d mfn_is_page_table(gmfn)=%d\n",
  13.116                     d->domain_id, gpfn, gmfn, stype,
  13.117 -                   frame_table[gmfn].count_info,
  13.118 -                   frame_table[gmfn].u.inuse.type_info,
  13.119 +                   pfn_to_page(gmfn)->count_info,
  13.120 +                   pfn_to_page(gmfn)->u.inuse.type_info,
  13.121                     mfn_out_of_sync(gmfn), mfn_is_page_table(gmfn));
  13.122              BUG();
  13.123          }
  13.124 @@ -1597,7 +1597,7 @@ shadow_mode_page_writable(unsigned long 
  13.125      struct vcpu *v = current;
  13.126      struct domain *d = v->domain;
  13.127      unsigned long mfn = __gpfn_to_mfn(d, gpfn);
  13.128 -    u32 type = frame_table[mfn].u.inuse.type_info & PGT_type_mask;
  13.129 +    u32 type = pfn_to_page(mfn)->u.inuse.type_info & PGT_type_mask;
  13.130  
  13.131      if ( shadow_mode_refcounts(d) &&
  13.132           (type == PGT_writable_page) )
    14.1 --- a/xen/include/asm-x86/shadow_public.h	Sun Dec 18 20:29:43 2005 +0100
    14.2 +++ b/xen/include/asm-x86/shadow_public.h	Tue Dec 20 12:46:56 2005 +0100
    14.3 @@ -22,7 +22,7 @@
    14.4  #ifndef _XEN_SHADOW_PUBLIC_H
    14.5  #define _XEN_SHADOW_PUBLIC_H
    14.6  #if CONFIG_PAGING_LEVELS >= 3
    14.7 -#define MFN_PINNED(_x) (frame_table[_x].u.inuse.type_info & PGT_pinned)
    14.8 +#define MFN_PINNED(_x) (pfn_to_page(_x)->u.inuse.type_info & PGT_pinned)
    14.9  
   14.10  extern int alloc_p2m_table(struct domain *d);
   14.11