ia64/xen-unstable

changeset 19134:5848b49b74fc

x86-64: use MFNs for linking together pages on lists

Unless more than 16Tb are going to ever be supported in Xen, this will
allow reducing the linked list entries in struct page_info from 16 to
8 bytes.

This doesn't modify struct shadow_page_info, yet, so in order to meet
the constraints of that 'mirror' structure the list entry gets
artificially forced to be 16 bytes in size. That workaround will be
removed in a subsequent patch.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jan 30 11:03:28 2009 +0000 (2009-01-30)
parents 686144ac1951
children deab3a069185
files xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/domctl.c xen/arch/x86/e820.c xen/arch/x86/mm.c xen/arch/x86/mm/hap/hap.c xen/arch/x86/mm/hap/p2m-ept.c xen/arch/x86/mm/p2m.c xen/arch/x86/mm/shadow/common.c xen/arch/x86/numa.c xen/common/domain.c xen/common/grant_table.c xen/common/memory.c xen/common/page_alloc.c xen/drivers/passthrough/amd/iommu_map.c xen/drivers/passthrough/amd/pci_amd_iommu.c xen/drivers/passthrough/iommu.c xen/include/asm-x86/domain.h xen/include/asm-x86/mm.h xen/include/asm-x86/p2m.h xen/include/asm-x86/perfc.h xen/include/xen/mm.h xen/include/xen/sched.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Fri Jan 30 10:51:01 2009 +0000
     1.2 +++ b/xen/arch/x86/domain.c	Fri Jan 30 11:03:28 2009 +0000
     1.3 @@ -141,7 +141,7 @@ void dump_pageframe_info(struct domain *
     1.4      }
     1.5      else
     1.6      {
     1.7 -        list_for_each_entry ( page, &d->page_list, list )
     1.8 +        page_list_for_each ( page, &d->page_list )
     1.9          {
    1.10              printk("    DomPage %p: caf=%08lx, taf=%" PRtype_info "\n",
    1.11                     _p(page_to_mfn(page)),
    1.12 @@ -154,7 +154,7 @@ void dump_pageframe_info(struct domain *
    1.13          p2m_pod_dump_data(d);
    1.14      }
    1.15  
    1.16 -    list_for_each_entry ( page, &d->xenpage_list, list )
    1.17 +    page_list_for_each ( page, &d->xenpage_list )
    1.18      {
    1.19          printk("    XenPage %p: caf=%08lx, taf=%" PRtype_info "\n",
    1.20                 _p(page_to_mfn(page)),
    1.21 @@ -380,7 +380,7 @@ int arch_domain_create(struct domain *d,
    1.22      INIT_LIST_HEAD(&d->arch.pdev_list);
    1.23  
    1.24      d->arch.relmem = RELMEM_not_started;
    1.25 -    INIT_LIST_HEAD(&d->arch.relmem_list);
    1.26 +    INIT_PAGE_LIST_HEAD(&d->arch.relmem_list);
    1.27  
    1.28      pdpt_order = get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t));
    1.29      d->arch.mm_perdomain_pt = alloc_xenheap_pages(pdpt_order, 0);
    1.30 @@ -1655,27 +1655,25 @@ int hypercall_xlat_continuation(unsigned
    1.31  #endif
    1.32  
    1.33  static int relinquish_memory(
    1.34 -    struct domain *d, struct list_head *list, unsigned long type)
    1.35 +    struct domain *d, struct page_list_head *list, unsigned long type)
    1.36  {
    1.37 -    struct list_head *ent;
    1.38 -    struct page_info  *page;
    1.39 +    struct page_info  *page, *cur;
    1.40      unsigned long     x, y;
    1.41      int               ret = 0;
    1.42  
    1.43      /* Use a recursive lock, as we may enter 'free_domheap_page'. */
    1.44      spin_lock_recursive(&d->page_alloc_lock);
    1.45  
    1.46 -    ent = list->next;
    1.47 -    while ( ent != list )
    1.48 +    page = page_list_first(list);
    1.49 +    while ( !page_list_is_eol(page, list) )
    1.50      {
    1.51 -        page = list_entry(ent, struct page_info, list);
    1.52 -
    1.53          /* Grab a reference to the page so it won't disappear from under us. */
    1.54          if ( unlikely(!get_page(page, d)) )
    1.55          {
    1.56              /* Couldn't get a reference -- someone is freeing this page. */
    1.57 -            ent = ent->next;
    1.58 -            list_move_tail(&page->list, &d->arch.relmem_list);
    1.59 +            cur = page;
    1.60 +            page = page_list_next(page, list);
    1.61 +            page_list_move_tail(cur, list, &d->arch.relmem_list);
    1.62              continue;
    1.63          }
    1.64  
    1.65 @@ -1747,9 +1745,10 @@ static int relinquish_memory(
    1.66          }
    1.67  
    1.68          /* Follow the list chain and /then/ potentially free the page. */
    1.69 -        ent = ent->next;
    1.70 -        list_move_tail(&page->list, &d->arch.relmem_list);
    1.71 -        put_page(page);
    1.72 +        cur = page;
    1.73 +        page = page_list_next(page, list);
    1.74 +        page_list_move_tail(cur, list, &d->arch.relmem_list);
    1.75 +        put_page(cur);
    1.76  
    1.77          if ( hypercall_preempt_check() )
    1.78          {
    1.79 @@ -1758,7 +1757,7 @@ static int relinquish_memory(
    1.80          }
    1.81      }
    1.82  
    1.83 -    list_splice_init(&d->arch.relmem_list, list);
    1.84 +    page_list_splice_init(&d->arch.relmem_list, list);
    1.85  
    1.86   out:
    1.87      spin_unlock_recursive(&d->page_alloc_lock);
     2.1 --- a/xen/arch/x86/domain_build.c	Fri Jan 30 10:51:01 2009 +0000
     2.2 +++ b/xen/arch/x86/domain_build.c	Fri Jan 30 11:03:28 2009 +0000
     2.3 @@ -880,7 +880,7 @@ int __init construct_dom0(
     2.4      }
     2.5      si->first_p2m_pfn = pfn;
     2.6      si->nr_p2m_frames = d->tot_pages - count;
     2.7 -    list_for_each_entry ( page, &d->page_list, list )
     2.8 +    page_list_for_each ( page, &d->page_list )
     2.9      {
    2.10          mfn = page_to_mfn(page);
    2.11          if ( get_gpfn_from_mfn(mfn) >= count )
     3.1 --- a/xen/arch/x86/domctl.c	Fri Jan 30 10:51:01 2009 +0000
     3.2 +++ b/xen/arch/x86/domctl.c	Fri Jan 30 11:03:28 2009 +0000
     3.3 @@ -240,7 +240,7 @@ long arch_do_domctl(
     3.4          struct domain *d = rcu_lock_domain_by_id(domctl->domain);
     3.5          unsigned long max_pfns = domctl->u.getmemlist.max_pfns;
     3.6          uint64_t mfn;
     3.7 -        struct list_head *list_ent;
     3.8 +        struct page_info *page;
     3.9  
    3.10          ret = -EINVAL;
    3.11          if ( d != NULL )
    3.12 @@ -259,19 +259,19 @@ long arch_do_domctl(
    3.13                  goto getmemlist_out;
    3.14              }
    3.15  
    3.16 -            ret = 0;
    3.17 -            list_ent = d->page_list.next;
    3.18 -            for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
    3.19 +            ret = i = 0;
    3.20 +            page_list_for_each(page, &d->page_list)
    3.21              {
    3.22 -                mfn = page_to_mfn(list_entry(
    3.23 -                    list_ent, struct page_info, list));
    3.24 +                if ( i >= max_pfns )
    3.25 +                    break;
    3.26 +                mfn = page_to_mfn(page);
    3.27                  if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
    3.28                                            i, &mfn, 1) )
    3.29                  {
    3.30                      ret = -EFAULT;
    3.31                      break;
    3.32                  }
    3.33 -                list_ent = mfn_to_page(mfn)->list.next;
    3.34 +                ++i;
    3.35              }
    3.36              
    3.37              spin_unlock(&d->page_alloc_lock);
     4.1 --- a/xen/arch/x86/e820.c	Fri Jan 30 10:51:01 2009 +0000
     4.2 +++ b/xen/arch/x86/e820.c	Fri Jan 30 11:03:28 2009 +0000
     4.3 @@ -1,10 +1,10 @@
     4.4  #include <xen/config.h>
     4.5  #include <xen/init.h>
     4.6  #include <xen/lib.h>
     4.7 +#include <xen/mm.h>
     4.8  #include <xen/compat.h>
     4.9  #include <xen/dmi.h>
    4.10  #include <asm/e820.h>
    4.11 -#include <asm/mm.h>
    4.12  #include <asm/page.h>
    4.13  
    4.14  /* opt_mem: Limit of physical RAM. Any RAM beyond this point is ignored. */
     5.1 --- a/xen/arch/x86/mm.c	Fri Jan 30 10:51:01 2009 +0000
     5.2 +++ b/xen/arch/x86/mm.c	Fri Jan 30 11:03:28 2009 +0000
     5.3 @@ -333,7 +333,7 @@ void share_xen_page_with_guest(
     5.4          page->count_info |= PGC_allocated | 1;
     5.5          if ( unlikely(d->xenheap_pages++ == 0) )
     5.6              get_knownalive_domain(d);
     5.7 -        list_add_tail(&page->list, &d->xenpage_list);
     5.8 +        page_list_add_tail(page, &d->xenpage_list);
     5.9      }
    5.10  
    5.11      spin_unlock(&d->page_alloc_lock);
    5.12 @@ -3508,7 +3508,7 @@ int steal_page(
    5.13      /* Unlink from original owner. */
    5.14      if ( !(memflags & MEMF_no_refcount) )
    5.15          d->tot_pages--;
    5.16 -    list_del(&page->list);
    5.17 +    page_list_del(page, &d->page_list);
    5.18  
    5.19      spin_unlock(&d->page_alloc_lock);
    5.20      return 0;
     6.1 --- a/xen/arch/x86/mm/hap/hap.c	Fri Jan 30 10:51:01 2009 +0000
     6.2 +++ b/xen/arch/x86/mm/hap/hap.c	Fri Jan 30 11:03:28 2009 +0000
     6.3 @@ -96,11 +96,10 @@ static struct page_info *hap_alloc(struc
     6.4  
     6.5      ASSERT(hap_locked_by_me(d));
     6.6  
     6.7 -    if ( unlikely(list_empty(&d->arch.paging.hap.freelist)) )
     6.8 +    pg = page_list_remove_head(&d->arch.paging.hap.freelist);
     6.9 +    if ( unlikely(!pg) )
    6.10          return NULL;
    6.11  
    6.12 -    pg = list_entry(d->arch.paging.hap.freelist.next, struct page_info, list);
    6.13 -    list_del(&pg->list);
    6.14      d->arch.paging.hap.free_pages--;
    6.15  
    6.16      p = hap_map_domain_page(page_to_mfn(pg));
    6.17 @@ -118,7 +117,7 @@ static void hap_free(struct domain *d, m
    6.18      ASSERT(hap_locked_by_me(d));
    6.19  
    6.20      d->arch.paging.hap.free_pages++;
    6.21 -    list_add_tail(&pg->list, &d->arch.paging.hap.freelist);
    6.22 +    page_list_add_tail(pg, &d->arch.paging.hap.freelist);
    6.23  }
    6.24  
    6.25  static struct page_info *hap_alloc_p2m_page(struct domain *d)
    6.26 @@ -210,15 +209,13 @@ hap_set_allocation(struct domain *d, uns
    6.27              }
    6.28              d->arch.paging.hap.free_pages++;
    6.29              d->arch.paging.hap.total_pages++;
    6.30 -            list_add_tail(&pg->list, &d->arch.paging.hap.freelist);
    6.31 +            page_list_add_tail(pg, &d->arch.paging.hap.freelist);
    6.32          }
    6.33          else if ( d->arch.paging.hap.total_pages > pages )
    6.34          {
    6.35              /* Need to return memory to domheap */
    6.36 -            ASSERT(!list_empty(&d->arch.paging.hap.freelist));
    6.37 -            pg = list_entry(d->arch.paging.hap.freelist.next,
    6.38 -                            struct page_info, list);
    6.39 -            list_del(&pg->list);
    6.40 +            pg = page_list_remove_head(&d->arch.paging.hap.freelist);
    6.41 +            ASSERT(pg);
    6.42              d->arch.paging.hap.free_pages--;
    6.43              d->arch.paging.hap.total_pages--;
    6.44              pg->count_info = 0;
    6.45 @@ -393,7 +390,7 @@ static void hap_destroy_monitor_table(st
    6.46  void hap_domain_init(struct domain *d)
    6.47  {
    6.48      hap_lock_init(d);
    6.49 -    INIT_LIST_HEAD(&d->arch.paging.hap.freelist);
    6.50 +    INIT_PAGE_LIST_HEAD(&d->arch.paging.hap.freelist);
    6.51  
    6.52      /* This domain will use HAP for log-dirty mode */
    6.53      paging_log_dirty_init(d, hap_enable_log_dirty, hap_disable_log_dirty,
     7.1 --- a/xen/arch/x86/mm/hap/p2m-ept.c	Fri Jan 30 10:51:01 2009 +0000
     7.2 +++ b/xen/arch/x86/mm/hap/p2m-ept.c	Fri Jan 30 11:03:28 2009 +0000
     7.3 @@ -63,7 +63,7 @@ static int ept_set_middle_entry(struct d
     7.4  
     7.5      pg->count_info = 1;
     7.6      pg->u.inuse.type_info = 1 | PGT_validated;
     7.7 -    list_add_tail(&pg->list, &d->arch.p2m->pages);
     7.8 +    page_list_add_tail(pg, &d->arch.p2m->pages);
     7.9  
    7.10      ept_entry->emt = 0;
    7.11      ept_entry->igmt = 0;
     8.1 --- a/xen/arch/x86/mm/p2m.c	Fri Jan 30 10:51:01 2009 +0000
     8.2 +++ b/xen/arch/x86/mm/p2m.c	Fri Jan 30 11:03:28 2009 +0000
     8.3 @@ -175,7 +175,7 @@ p2m_next_level(struct domain *d, mfn_t *
     8.4          struct page_info *pg = d->arch.p2m->alloc_page(d);
     8.5          if ( pg == NULL )
     8.6              return 0;
     8.7 -        list_add_tail(&pg->list, &d->arch.p2m->pages);
     8.8 +        page_list_add_tail(pg, &d->arch.p2m->pages);
     8.9          pg->u.inuse.type_info = type | 1 | PGT_validated;
    8.10          pg->count_info = 1;
    8.11  
    8.12 @@ -214,7 +214,7 @@ p2m_next_level(struct domain *d, mfn_t *
    8.13          struct page_info *pg = d->arch.p2m->alloc_page(d);
    8.14          if ( pg == NULL )
    8.15              return 0;
    8.16 -        list_add_tail(&pg->list, &d->arch.p2m->pages);
    8.17 +        page_list_add_tail(pg, &d->arch.p2m->pages);
    8.18          pg->u.inuse.type_info = PGT_l1_page_table | 1 | PGT_validated;
    8.19          pg->count_info = 1;
    8.20          
    8.21 @@ -300,18 +300,18 @@ p2m_pod_cache_add(struct domain *d,
    8.22      for(i=0; i < 1 << order ; i++)
    8.23      {
    8.24          p = page + i;
    8.25 -        list_del(&p->list);
    8.26 +        page_list_del(p, &d->page_list);
    8.27      }
    8.28  
    8.29      /* Then add the first one to the appropriate populate-on-demand list */
    8.30      switch(order)
    8.31      {
    8.32      case 9:
    8.33 -        list_add_tail(&page->list, &p2md->pod.super); /* lock: page_alloc */
    8.34 +        page_list_add_tail(page, &p2md->pod.super); /* lock: page_alloc */
    8.35          p2md->pod.count += 1 << order;
    8.36          break;
    8.37      case 0:
    8.38 -        list_add_tail(&page->list, &p2md->pod.single); /* lock: page_alloc */
    8.39 +        page_list_add_tail(page, &p2md->pod.single); /* lock: page_alloc */
    8.40          p2md->pod.count += 1 ;
    8.41          break;
    8.42      default:
    8.43 @@ -334,54 +334,51 @@ static struct page_info * p2m_pod_cache_
    8.44      struct page_info *p = NULL;
    8.45      int i;
    8.46  
    8.47 -    if ( order == 9 && list_empty(&p2md->pod.super) )
    8.48 +    if ( order == 9 && page_list_empty(&p2md->pod.super) )
    8.49      {
    8.50          return NULL;
    8.51      }
    8.52 -    else if ( order == 0 && list_empty(&p2md->pod.single) )
    8.53 +    else if ( order == 0 && page_list_empty(&p2md->pod.single) )
    8.54      {
    8.55          unsigned long mfn;
    8.56          struct page_info *q;
    8.57  
    8.58 -        BUG_ON( list_empty(&p2md->pod.super) );
    8.59 +        BUG_ON( page_list_empty(&p2md->pod.super) );
    8.60  
    8.61          /* Break up a superpage to make single pages. NB count doesn't
    8.62           * need to be adjusted. */
    8.63          printk("%s: Breaking up superpage.\n", __func__);
    8.64 -        p = list_entry(p2md->pod.super.next, struct page_info, list);
    8.65 -        list_del(&p->list);
    8.66 +        p = page_list_remove_head(&p2md->pod.super);
    8.67          mfn = mfn_x(page_to_mfn(p));
    8.68  
    8.69          for ( i=0; i<(1<<9); i++ )
    8.70          {
    8.71              q = mfn_to_page(_mfn(mfn+i));
    8.72 -            list_add_tail(&q->list, &p2md->pod.single);
    8.73 +            page_list_add_tail(q, &p2md->pod.single);
    8.74          }
    8.75      }
    8.76  
    8.77      switch ( order )
    8.78      {
    8.79      case 9:
    8.80 -        BUG_ON( list_empty(&p2md->pod.super) );
    8.81 -        p = list_entry(p2md->pod.super.next, struct page_info, list); 
    8.82 +        BUG_ON( page_list_empty(&p2md->pod.super) );
    8.83 +        p = page_list_remove_head(&p2md->pod.super);
    8.84          p2md->pod.count -= 1 << order; /* Lock: page_alloc */
    8.85          break;
    8.86      case 0:
    8.87 -        BUG_ON( list_empty(&p2md->pod.single) );
    8.88 -        p = list_entry(p2md->pod.single.next, struct page_info, list);
    8.89 +        BUG_ON( page_list_empty(&p2md->pod.single) );
    8.90 +        p = page_list_remove_head(&p2md->pod.single);
    8.91          p2md->pod.count -= 1;
    8.92          break;
    8.93      default:
    8.94          BUG();
    8.95      }
    8.96  
    8.97 -    list_del(&p->list);
    8.98 -
    8.99      /* Put the pages back on the domain page_list */
   8.100      for ( i = 0 ; i < (1 << order) ; i++ )
   8.101      {
   8.102          BUG_ON(page_get_owner(p + i) != d);
   8.103 -        list_add_tail(&p[i].list, &d->page_list);
   8.104 +        page_list_add_tail(p + i, &d->page_list);
   8.105      }
   8.106  
   8.107      return p;
   8.108 @@ -425,7 +422,7 @@ p2m_pod_set_cache_target(struct domain *
   8.109          spin_lock(&d->page_alloc_lock);
   8.110  
   8.111          if ( (p2md->pod.count - pod_target) > (1>>9)
   8.112 -             && !list_empty(&p2md->pod.super) )
   8.113 +             && !page_list_empty(&p2md->pod.super) )
   8.114              order = 9;
   8.115          else
   8.116              order = 0;
   8.117 @@ -535,38 +532,27 @@ void
   8.118  p2m_pod_empty_cache(struct domain *d)
   8.119  {
   8.120      struct p2m_domain *p2md = d->arch.p2m;
   8.121 -    struct list_head *q, *p;
   8.122 +    struct page_info *page;
   8.123  
   8.124      spin_lock(&d->page_alloc_lock);
   8.125  
   8.126 -    list_for_each_safe(p, q, &p2md->pod.super) /* lock: page_alloc */
   8.127 +    while ( (page = page_list_remove_head(&p2md->pod.super)) )
   8.128      {
   8.129          int i;
   8.130 -        struct page_info *page;
   8.131              
   8.132 -        list_del(p);
   8.133 -            
   8.134 -        page = list_entry(p, struct page_info, list);
   8.135 -
   8.136          for ( i = 0 ; i < (1 << 9) ; i++ )
   8.137          {
   8.138              BUG_ON(page_get_owner(page + i) != d);
   8.139 -            list_add_tail(&page[i].list, &d->page_list);
   8.140 +            page_list_add_tail(page + i, &d->page_list);
   8.141          }
   8.142  
   8.143          p2md->pod.count -= 1<<9;
   8.144      }
   8.145  
   8.146 -    list_for_each_safe(p, q, &p2md->pod.single)
   8.147 +    while ( (page = page_list_remove_head(&p2md->pod.single)) )
   8.148      {
   8.149 -        struct page_info *page;
   8.150 -            
   8.151 -        list_del(p);
   8.152 -            
   8.153 -        page = list_entry(p, struct page_info, list);
   8.154 -
   8.155          BUG_ON(page_get_owner(page) != d);
   8.156 -        list_add_tail(&page->list, &d->page_list);
   8.157 +        page_list_add_tail(page, &d->page_list);
   8.158  
   8.159          p2md->pod.count -= 1;
   8.160      }
   8.161 @@ -952,7 +938,7 @@ p2m_pod_emergency_sweep_super(struct dom
   8.162           * NB that this is a zero-sum game; we're increasing our cache size
   8.163           * by increasing our 'debt'.  Since we hold the p2m lock,
   8.164           * (entry_count - count) must remain the same. */
   8.165 -        if ( !list_empty(&p2md->pod.super) &&  i < limit )
   8.166 +        if ( !page_list_empty(&p2md->pod.super) &&  i < limit )
   8.167              break;
   8.168      }
   8.169  
   8.170 @@ -1035,12 +1021,12 @@ p2m_pod_demand_populate(struct domain *d
   8.171      }
   8.172  
   8.173      /* If we're low, start a sweep */
   8.174 -    if ( order == 9 && list_empty(&p2md->pod.super) )
   8.175 +    if ( order == 9 && page_list_empty(&p2md->pod.super) )
   8.176          p2m_pod_emergency_sweep_super(d);
   8.177  
   8.178 -    if ( list_empty(&p2md->pod.single) &&
   8.179 +    if ( page_list_empty(&p2md->pod.single) &&
   8.180           ( ( order == 0 )
   8.181 -           || (order == 9 && list_empty(&p2md->pod.super) ) ) )
   8.182 +           || (order == 9 && page_list_empty(&p2md->pod.super) ) ) )
   8.183          p2m_pod_emergency_sweep(d);
   8.184  
   8.185      /* Keep track of the highest gfn demand-populated by a guest fault */
   8.186 @@ -1477,9 +1463,9 @@ int p2m_init(struct domain *d)
   8.187  
   8.188      memset(p2m, 0, sizeof(*p2m));
   8.189      p2m_lock_init(p2m);
   8.190 -    INIT_LIST_HEAD(&p2m->pages);
   8.191 -    INIT_LIST_HEAD(&p2m->pod.super);
   8.192 -    INIT_LIST_HEAD(&p2m->pod.single);
   8.193 +    INIT_PAGE_LIST_HEAD(&p2m->pages);
   8.194 +    INIT_PAGE_LIST_HEAD(&p2m->pod.super);
   8.195 +    INIT_PAGE_LIST_HEAD(&p2m->pod.single);
   8.196  
   8.197      p2m->set_entry = p2m_set_entry;
   8.198      p2m->get_entry = p2m_gfn_to_mfn;
   8.199 @@ -1540,7 +1526,6 @@ int p2m_alloc_table(struct domain *d,
   8.200  
   8.201  {
   8.202      mfn_t mfn = _mfn(INVALID_MFN);
   8.203 -    struct list_head *entry;
   8.204      struct page_info *page, *p2m_top;
   8.205      unsigned int page_count = 0;
   8.206      unsigned long gfn = -1UL;
   8.207 @@ -1566,7 +1551,7 @@ int p2m_alloc_table(struct domain *d,
   8.208          p2m_unlock(p2m);
   8.209          return -ENOMEM;
   8.210      }
   8.211 -    list_add_tail(&p2m_top->list, &p2m->pages);
   8.212 +    page_list_add_tail(p2m_top, &p2m->pages);
   8.213  
   8.214      p2m_top->count_info = 1;
   8.215      p2m_top->u.inuse.type_info =
   8.216 @@ -1587,11 +1572,8 @@ int p2m_alloc_table(struct domain *d,
   8.217          goto error;
   8.218  
   8.219      /* Copy all existing mappings from the page list and m2p */
   8.220 -    for ( entry = d->page_list.next;
   8.221 -          entry != &d->page_list;
   8.222 -          entry = entry->next )
   8.223 +    page_list_for_each(page, &d->page_list)
   8.224      {
   8.225 -        page = list_entry(entry, struct page_info, list);
   8.226          mfn = page_to_mfn(page);
   8.227          gfn = get_gpfn_from_mfn(mfn_x(mfn));
   8.228          page_count++;
   8.229 @@ -1621,19 +1603,14 @@ void p2m_teardown(struct domain *d)
   8.230  /* Return all the p2m pages to Xen.
   8.231   * We know we don't have any extra mappings to these pages */
   8.232  {
   8.233 -    struct list_head *entry, *n;
   8.234      struct page_info *pg;
   8.235      struct p2m_domain *p2m = d->arch.p2m;
   8.236  
   8.237      p2m_lock(p2m);
   8.238      d->arch.phys_table = pagetable_null();
   8.239  
   8.240 -    list_for_each_safe(entry, n, &p2m->pages)
   8.241 -    {
   8.242 -        pg = list_entry(entry, struct page_info, list);
   8.243 -        list_del(entry);
   8.244 +    while ( (pg = page_list_remove_head(&p2m->pages)) )
   8.245          p2m->free_page(d, pg);
   8.246 -    }
   8.247      p2m_unlock(p2m);
   8.248  }
   8.249  
     9.1 --- a/xen/arch/x86/mm/shadow/common.c	Fri Jan 30 10:51:01 2009 +0000
     9.2 +++ b/xen/arch/x86/mm/shadow/common.c	Fri Jan 30 11:03:28 2009 +0000
     9.3 @@ -49,7 +49,7 @@ void shadow_domain_init(struct domain *d
     9.4      shadow_lock_init(d);
     9.5      for ( i = 0; i <= SHADOW_MAX_ORDER; i++ )
     9.6          INIT_LIST_HEAD(&d->arch.paging.shadow.freelists[i]);
     9.7 -    INIT_LIST_HEAD(&d->arch.paging.shadow.p2m_freelist);
     9.8 +    INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.p2m_freelist);
     9.9      INIT_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows);
    9.10  
    9.11      /* Use shadow pagetables for log-dirty support */
    9.12 @@ -1672,7 +1672,7 @@ sh_alloc_p2m_pages(struct domain *d)
    9.13           */
    9.14          page_set_owner(&pg[i], d);
    9.15          pg[i].count_info = 1;
    9.16 -        list_add_tail(&pg[i].list, &d->arch.paging.shadow.p2m_freelist);
    9.17 +        page_list_add_tail(&pg[i], &d->arch.paging.shadow.p2m_freelist);
    9.18      }
    9.19      return 1;
    9.20  }
    9.21 @@ -1681,25 +1681,22 @@ sh_alloc_p2m_pages(struct domain *d)
    9.22  static struct page_info *
    9.23  shadow_alloc_p2m_page(struct domain *d)
    9.24  {
    9.25 -    struct list_head *entry;
    9.26      struct page_info *pg;
    9.27      mfn_t mfn;
    9.28      void *p;
    9.29      
    9.30      shadow_lock(d);
    9.31  
    9.32 -    if ( list_empty(&d->arch.paging.shadow.p2m_freelist) &&
    9.33 +    if ( page_list_empty(&d->arch.paging.shadow.p2m_freelist) &&
    9.34           !sh_alloc_p2m_pages(d) )
    9.35      {
    9.36          shadow_unlock(d);
    9.37          return NULL;
    9.38      }
    9.39 -    entry = d->arch.paging.shadow.p2m_freelist.next;
    9.40 -    list_del(entry);
    9.41 +    pg = page_list_remove_head(&d->arch.paging.shadow.p2m_freelist);
    9.42  
    9.43      shadow_unlock(d);
    9.44  
    9.45 -    pg = list_entry(entry, struct page_info, list);
    9.46      mfn = page_to_mfn(pg);
    9.47      p = sh_map_domain_page(mfn);
    9.48      clear_page(p);
    9.49 @@ -3156,7 +3153,6 @@ void shadow_teardown(struct domain *d)
    9.50  {
    9.51      struct vcpu *v;
    9.52      mfn_t mfn;
    9.53 -    struct list_head *entry, *n;
    9.54      struct page_info *pg;
    9.55  
    9.56      ASSERT(d->is_dying);
    9.57 @@ -3208,12 +3204,8 @@ void shadow_teardown(struct domain *d)
    9.58      }
    9.59  #endif /* (SHADOW_OPTIMIZATIONS & (SHOPT_VIRTUAL_TLB|SHOPT_OUT_OF_SYNC)) */
    9.60  
    9.61 -    list_for_each_safe(entry, n, &d->arch.paging.shadow.p2m_freelist)
    9.62 -    {
    9.63 -        list_del(entry);
    9.64 -        pg = list_entry(entry, struct page_info, list);
    9.65 +    while ( (pg = page_list_remove_head(&d->arch.paging.shadow.p2m_freelist)) )
    9.66          shadow_free_p2m_page(d, pg);
    9.67 -    }
    9.68  
    9.69      if ( d->arch.paging.shadow.total_pages != 0 )
    9.70      {
    10.1 --- a/xen/arch/x86/numa.c	Fri Jan 30 10:51:01 2009 +0000
    10.2 +++ b/xen/arch/x86/numa.c	Fri Jan 30 11:03:28 2009 +0000
    10.3 @@ -312,7 +312,7 @@ static void dump_numa(unsigned char key)
    10.4  		for_each_online_node(i)
    10.5  			page_num_node[i] = 0;
    10.6  
    10.7 -		list_for_each_entry(page, &d->page_list, list)
    10.8 +		page_list_for_each(page, &d->page_list)
    10.9  		{
   10.10  			i = phys_to_nid(page_to_mfn(page) << PAGE_SHIFT);
   10.11  			page_num_node[i]++;
    11.1 --- a/xen/common/domain.c	Fri Jan 30 10:51:01 2009 +0000
    11.2 +++ b/xen/common/domain.c	Fri Jan 30 11:03:28 2009 +0000
    11.3 @@ -233,8 +233,8 @@ struct domain *domain_create(
    11.4      spin_lock_init(&d->page_alloc_lock);
    11.5      spin_lock_init(&d->shutdown_lock);
    11.6      spin_lock_init(&d->hypercall_deadlock_mutex);
    11.7 -    INIT_LIST_HEAD(&d->page_list);
    11.8 -    INIT_LIST_HEAD(&d->xenpage_list);
    11.9 +    INIT_PAGE_LIST_HEAD(&d->page_list);
   11.10 +    INIT_PAGE_LIST_HEAD(&d->xenpage_list);
   11.11  
   11.12      if ( domcr_flags & DOMCRF_hvm )
   11.13          d->is_hvm = 1;
    12.1 --- a/xen/common/grant_table.c	Fri Jan 30 10:51:01 2009 +0000
    12.2 +++ b/xen/common/grant_table.c	Fri Jan 30 11:03:28 2009 +0000
    12.3 @@ -1192,7 +1192,7 @@ gnttab_transfer(
    12.4          /* Okay, add the page to 'e'. */
    12.5          if ( unlikely(e->tot_pages++ == 0) )
    12.6              get_knownalive_domain(e);
    12.7 -        list_add_tail(&page->list, &e->page_list);
    12.8 +        page_list_add_tail(page, &e->page_list);
    12.9          page_set_owner(page, e);
   12.10  
   12.11          spin_unlock(&e->page_alloc_lock);
    13.1 --- a/xen/common/memory.c	Fri Jan 30 10:51:01 2009 +0000
    13.2 +++ b/xen/common/memory.c	Fri Jan 30 11:03:28 2009 +0000
    13.3 @@ -218,8 +218,8 @@ static void decrease_reservation(struct 
    13.4  static long memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
    13.5  {
    13.6      struct xen_memory_exchange exch;
    13.7 -    LIST_HEAD(in_chunk_list);
    13.8 -    LIST_HEAD(out_chunk_list);
    13.9 +    PAGE_LIST_HEAD(in_chunk_list);
   13.10 +    PAGE_LIST_HEAD(out_chunk_list);
   13.11      unsigned long in_chunk_order, out_chunk_order;
   13.12      xen_pfn_t     gpfn, gmfn, mfn;
   13.13      unsigned long i, j, k;
   13.14 @@ -325,7 +325,7 @@ static long memory_exchange(XEN_GUEST_HA
   13.15                      goto fail;
   13.16                  }
   13.17  
   13.18 -                list_add(&page->list, &in_chunk_list);
   13.19 +                page_list_add(page, &in_chunk_list);
   13.20              }
   13.21          }
   13.22  
   13.23 @@ -339,7 +339,7 @@ static long memory_exchange(XEN_GUEST_HA
   13.24                  goto fail;
   13.25              }
   13.26  
   13.27 -            list_add(&page->list, &out_chunk_list);
   13.28 +            page_list_add(page, &out_chunk_list);
   13.29          }
   13.30  
   13.31          /*
   13.32 @@ -347,10 +347,8 @@ static long memory_exchange(XEN_GUEST_HA
   13.33           */
   13.34  
   13.35          /* Destroy final reference to each input page. */
   13.36 -        while ( !list_empty(&in_chunk_list) )
   13.37 +        while ( (page = page_list_remove_head(&in_chunk_list)) )
   13.38          {
   13.39 -            page = list_entry(in_chunk_list.next, struct page_info, list);
   13.40 -            list_del(&page->list);
   13.41              if ( !test_and_clear_bit(_PGC_allocated, &page->count_info) )
   13.42                  BUG();
   13.43              mfn = page_to_mfn(page);
   13.44 @@ -360,10 +358,8 @@ static long memory_exchange(XEN_GUEST_HA
   13.45  
   13.46          /* Assign each output page to the domain. */
   13.47          j = 0;
   13.48 -        while ( !list_empty(&out_chunk_list) )
   13.49 +        while ( (page = page_list_remove_head(&out_chunk_list)) )
   13.50          {
   13.51 -            page = list_entry(out_chunk_list.next, struct page_info, list);
   13.52 -            list_del(&page->list);
   13.53              if ( assign_pages(d, page, exch.out.extent_order,
   13.54                                MEMF_no_refcount) )
   13.55                  BUG();
   13.56 @@ -399,21 +395,13 @@ static long memory_exchange(XEN_GUEST_HA
   13.57       */
   13.58   fail:
   13.59      /* Reassign any input pages we managed to steal. */
   13.60 -    while ( !list_empty(&in_chunk_list) )
   13.61 -    {
   13.62 -        page = list_entry(in_chunk_list.next, struct page_info, list);
   13.63 -        list_del(&page->list);
   13.64 +    while ( (page = page_list_remove_head(&in_chunk_list)) )
   13.65          if ( assign_pages(d, page, 0, MEMF_no_refcount) )
   13.66              BUG();
   13.67 -    }
   13.68  
   13.69      /* Free any output pages we managed to allocate. */
   13.70 -    while ( !list_empty(&out_chunk_list) )
   13.71 -    {
   13.72 -        page = list_entry(out_chunk_list.next, struct page_info, list);
   13.73 -        list_del(&page->list);
   13.74 +    while ( (page = page_list_remove_head(&out_chunk_list)) )
   13.75          free_domheap_pages(page, exch.out.extent_order);
   13.76 -    }
   13.77  
   13.78      exch.nr_exchanged = i << in_chunk_order;
   13.79  
    14.1 --- a/xen/common/page_alloc.c	Fri Jan 30 10:51:01 2009 +0000
    14.2 +++ b/xen/common/page_alloc.c	Fri Jan 30 11:03:28 2009 +0000
    14.3 @@ -71,7 +71,7 @@ integer_param("dma_bits", dma_bitsize);
    14.4  #endif
    14.5  
    14.6  static DEFINE_SPINLOCK(page_scrub_lock);
    14.7 -LIST_HEAD(page_scrub_list);
    14.8 +PAGE_LIST_HEAD(page_scrub_list);
    14.9  static unsigned long scrub_pages;
   14.10  
   14.11  /*********************
   14.12 @@ -264,7 +264,7 @@ unsigned long __init alloc_boot_pages(
   14.13  #define page_to_zone(pg) (is_xen_heap_page(pg) ? MEMZONE_XEN :  \
   14.14                            (fls(page_to_mfn(pg)) - 1))
   14.15  
   14.16 -typedef struct list_head heap_by_zone_and_order_t[NR_ZONES][MAX_ORDER+1];
   14.17 +typedef struct page_list_head heap_by_zone_and_order_t[NR_ZONES][MAX_ORDER+1];
   14.18  static heap_by_zone_and_order_t *_heap[MAX_NUMNODES];
   14.19  #define heap(node, zone, order) ((*_heap[node])[zone][order])
   14.20  
   14.21 @@ -298,7 +298,7 @@ static void init_node_heap(int node)
   14.22  
   14.23      for ( i = 0; i < NR_ZONES; i++ )
   14.24          for ( j = 0; j <= MAX_ORDER; j++ )
   14.25 -            INIT_LIST_HEAD(&(*_heap[node])[i][j]);
   14.26 +            INIT_PAGE_LIST_HEAD(&(*_heap[node])[i][j]);
   14.27  }
   14.28  
   14.29  /* Allocate 2^@order contiguous pages. */
   14.30 @@ -340,7 +340,7 @@ static struct page_info *alloc_heap_page
   14.31  
   14.32              /* Find smallest order which can satisfy the request. */
   14.33              for ( j = order; j <= MAX_ORDER; j++ )
   14.34 -                if ( !list_empty(&heap(node, zone, j)) )
   14.35 +                if ( (pg = page_list_remove_head(&heap(node, zone, j))) )
   14.36                      goto found;
   14.37          } while ( zone-- > zone_lo ); /* careful: unsigned zone may wrap */
   14.38  
   14.39 @@ -354,14 +354,11 @@ static struct page_info *alloc_heap_page
   14.40      return NULL;
   14.41  
   14.42   found: 
   14.43 -    pg = list_entry(heap(node, zone, j).next, struct page_info, list);
   14.44 -    list_del(&pg->list);
   14.45 -
   14.46      /* We may have to halve the chunk a number of times. */
   14.47      while ( j != order )
   14.48      {
   14.49          PFN_ORDER(pg) = --j;
   14.50 -        list_add_tail(&pg->list, &heap(node, zone, j));
   14.51 +        page_list_add_tail(pg, &heap(node, zone, j));
   14.52          pg += 1 << j;
   14.53      }
   14.54      
   14.55 @@ -452,8 +449,8 @@ static void free_heap_pages(
   14.56              if ( allocated_in_map(page_to_mfn(pg)-mask) ||
   14.57                   (PFN_ORDER(pg-mask) != order) )
   14.58                  break;
   14.59 -            list_del(&(pg-mask)->list);
   14.60              pg -= mask;
   14.61 +            page_list_del(pg, &heap(node, zone, order));
   14.62          }
   14.63          else
   14.64          {
   14.65 @@ -461,7 +458,7 @@ static void free_heap_pages(
   14.66              if ( allocated_in_map(page_to_mfn(pg)+mask) ||
   14.67                   (PFN_ORDER(pg+mask) != order) )
   14.68                  break;
   14.69 -            list_del(&(pg+mask)->list);
   14.70 +            page_list_del(pg + mask, &heap(node, zone, order));
   14.71          }
   14.72          
   14.73          order++;
   14.74 @@ -471,7 +468,7 @@ static void free_heap_pages(
   14.75      }
   14.76  
   14.77      PFN_ORDER(pg) = order;
   14.78 -    list_add_tail(&pg->list, &heap(node, zone, order));
   14.79 +    page_list_add_tail(pg, &heap(node, zone, order));
   14.80  
   14.81      spin_unlock(&heap_lock);
   14.82  }
   14.83 @@ -786,7 +783,7 @@ int assign_pages(
   14.84          page_set_owner(&pg[i], d);
   14.85          wmb(); /* Domain pointer must be visible before updating refcnt. */
   14.86          pg[i].count_info = PGC_allocated | 1;
   14.87 -        list_add_tail(&pg[i].list, &d->page_list);
   14.88 +        page_list_add_tail(&pg[i], &d->page_list);
   14.89      }
   14.90  
   14.91      spin_unlock(&d->page_alloc_lock);
   14.92 @@ -844,7 +841,7 @@ void free_domheap_pages(struct page_info
   14.93          spin_lock_recursive(&d->page_alloc_lock);
   14.94  
   14.95          for ( i = 0; i < (1 << order); i++ )
   14.96 -            list_del(&pg[i].list);
   14.97 +            page_list_del2(&pg[i], &d->xenpage_list, &d->arch.relmem_list);
   14.98  
   14.99          d->xenheap_pages -= 1 << order;
  14.100          drop_dom_ref = (d->xenheap_pages == 0);
  14.101 @@ -859,7 +856,7 @@ void free_domheap_pages(struct page_info
  14.102          for ( i = 0; i < (1 << order); i++ )
  14.103          {
  14.104              BUG_ON((pg[i].u.inuse.type_info & PGT_count_mask) != 0);
  14.105 -            list_del(&pg[i].list);
  14.106 +            page_list_del2(&pg[i], &d->page_list, &d->arch.relmem_list);
  14.107          }
  14.108  
  14.109          d->tot_pages -= 1 << order;
  14.110 @@ -882,7 +879,7 @@ void free_domheap_pages(struct page_info
  14.111              {
  14.112                  page_set_owner(&pg[i], NULL);
  14.113                  spin_lock(&page_scrub_lock);
  14.114 -                list_add(&pg[i].list, &page_scrub_list);
  14.115 +                page_list_add(&pg[i], &page_scrub_list);
  14.116                  scrub_pages++;
  14.117                  spin_unlock(&page_scrub_lock);
  14.118              }
  14.119 @@ -965,7 +962,7 @@ static DEFINE_PER_CPU(struct timer, page
  14.120  
  14.121  static void page_scrub_softirq(void)
  14.122  {
  14.123 -    struct list_head *ent;
  14.124 +    PAGE_LIST_HEAD(list);
  14.125      struct page_info  *pg;
  14.126      void             *p;
  14.127      int               i;
  14.128 @@ -983,32 +980,26 @@ static void page_scrub_softirq(void)
  14.129      do {
  14.130          spin_lock(&page_scrub_lock);
  14.131  
  14.132 -        if ( unlikely((ent = page_scrub_list.next) == &page_scrub_list) )
  14.133 +        /* Peel up to 16 pages from the list. */
  14.134 +        for ( i = 0; i < 16; i++ )
  14.135 +        {
  14.136 +            if ( !(pg = page_list_remove_head(&page_scrub_list)) )
  14.137 +                break;
  14.138 +            page_list_add_tail(pg, &list);
  14.139 +        }
  14.140 +        
  14.141 +        if ( unlikely(i == 0) )
  14.142          {
  14.143              spin_unlock(&page_scrub_lock);
  14.144              goto out;
  14.145          }
  14.146 -        
  14.147 -        /* Peel up to 16 pages from the list. */
  14.148 -        for ( i = 0; i < 16; i++ )
  14.149 -        {
  14.150 -            if ( ent->next == &page_scrub_list )
  14.151 -                break;
  14.152 -            ent = ent->next;
  14.153 -        }
  14.154 -        
  14.155 -        /* Remove peeled pages from the list. */
  14.156 -        ent->next->prev = &page_scrub_list;
  14.157 -        page_scrub_list.next = ent->next;
  14.158 -        scrub_pages -= (i+1);
  14.159 +
  14.160 +        scrub_pages -= i;
  14.161  
  14.162          spin_unlock(&page_scrub_lock);
  14.163  
  14.164 -        /* Working backwards, scrub each page in turn. */
  14.165 -        while ( ent != &page_scrub_list )
  14.166 -        {
  14.167 -            pg = list_entry(ent, struct page_info, list);
  14.168 -            ent = ent->prev;
  14.169 +        /* Scrub each page in turn. */
  14.170 +        while ( (pg = page_list_remove_head(&list)) ) {
  14.171              p = map_domain_page(page_to_mfn(pg));
  14.172              scrub_page(p);
  14.173              unmap_domain_page(p);
    15.1 --- a/xen/drivers/passthrough/amd/iommu_map.c	Fri Jan 30 10:51:01 2009 +0000
    15.2 +++ b/xen/drivers/passthrough/amd/iommu_map.c	Fri Jan 30 11:03:28 2009 +0000
    15.3 @@ -552,7 +552,6 @@ int amd_iommu_sync_p2m(struct domain *d)
    15.4  {
    15.5      unsigned long mfn, gfn, flags;
    15.6      u64 iommu_l2e;
    15.7 -    struct list_head *entry;
    15.8      struct page_info *page;
    15.9      struct hvm_iommu *hd;
   15.10      int iw = IOMMU_IO_WRITE_ENABLED;
   15.11 @@ -568,10 +567,8 @@ int amd_iommu_sync_p2m(struct domain *d)
   15.12      if ( hd->p2m_synchronized )
   15.13          goto out;
   15.14  
   15.15 -    for ( entry = d->page_list.next; entry != &d->page_list;
   15.16 -            entry = entry->next )
   15.17 +    page_list_for_each ( page, &d->page_list )
   15.18      {
   15.19 -        page = list_entry(entry, struct page_info, list);
   15.20          mfn = page_to_mfn(page);
   15.21          gfn = get_gpfn_from_mfn(mfn);
   15.22  
    16.1 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c	Fri Jan 30 10:51:01 2009 +0000
    16.2 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c	Fri Jan 30 11:03:28 2009 +0000
    16.3 @@ -23,7 +23,6 @@
    16.4  #include <xen/pci_regs.h>
    16.5  #include <asm/amd-iommu.h>
    16.6  #include <asm/hvm/svm/amd-iommu-proto.h>
    16.7 -#include <asm/mm.h>
    16.8  
    16.9  extern unsigned short ivrs_bdf_entries;
   16.10  extern struct ivrs_mappings *ivrs_mappings;
    17.1 --- a/xen/drivers/passthrough/iommu.c	Fri Jan 30 10:51:01 2009 +0000
    17.2 +++ b/xen/drivers/passthrough/iommu.c	Fri Jan 30 11:03:28 2009 +0000
    17.3 @@ -141,7 +141,7 @@ static int iommu_populate_page_table(str
    17.4  
    17.5      spin_lock(&d->page_alloc_lock);
    17.6  
    17.7 -    list_for_each_entry ( page, &d->page_list, list )
    17.8 +    page_list_for_each ( page, &d->page_list )
    17.9      {
   17.10          if ( (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page )
   17.11          {
    18.1 --- a/xen/include/asm-x86/domain.h	Fri Jan 30 10:51:01 2009 +0000
    18.2 +++ b/xen/include/asm-x86/domain.h	Fri Jan 30 11:03:28 2009 +0000
    18.3 @@ -83,7 +83,7 @@ struct shadow_domain {
    18.4  
    18.5      /* Memory allocation */
    18.6      struct list_head  freelists[SHADOW_MAX_ORDER + 1];
    18.7 -    struct list_head  p2m_freelist;
    18.8 +    struct page_list_head p2m_freelist;
    18.9      unsigned int      total_pages;  /* number of pages allocated */
   18.10      unsigned int      free_pages;   /* number of pages on freelists */
   18.11      unsigned int      p2m_pages;    /* number of pages allocates to p2m */
   18.12 @@ -143,7 +143,7 @@ struct hap_domain {
   18.13      int               locker;
   18.14      const char       *locker_function;
   18.15  
   18.16 -    struct list_head  freelist;
   18.17 +    struct page_list_head freelist;
   18.18      unsigned int      total_pages;  /* number of pages allocated */
   18.19      unsigned int      free_pages;   /* number of pages on freelists */
   18.20      unsigned int      p2m_pages;    /* number of pages allocates to p2m */
   18.21 @@ -265,7 +265,7 @@ struct arch_domain
   18.22          RELMEM_l2,
   18.23          RELMEM_done,
   18.24      } relmem;
   18.25 -    struct list_head relmem_list;
   18.26 +    struct page_list_head relmem_list;
   18.27  
   18.28      cpuid_input_t cpuids[MAX_CPUID_INPUT];
   18.29  } __cacheline_aligned;
    19.1 --- a/xen/include/asm-x86/mm.h	Fri Jan 30 10:51:01 2009 +0000
    19.2 +++ b/xen/include/asm-x86/mm.h	Fri Jan 30 11:03:28 2009 +0000
    19.3 @@ -12,15 +12,24 @@
    19.4   * Per-page-frame information.
    19.5   * 
    19.6   * Every architecture must ensure the following:
    19.7 - *  1. 'struct page_info' contains a 'struct list_head list'.
    19.8 + *  1. 'struct page_info' contains a 'struct page_list_entry list'.
    19.9   *  2. Provide a PFN_ORDER() macro for accessing the order of a free page.
   19.10   */
   19.11  #define PFN_ORDER(_pfn) ((_pfn)->u.free.order)
   19.12  
   19.13 +#ifndef __i386__
   19.14 +# undef page_list_entry
   19.15 +struct page_list_entry
   19.16 +{
   19.17 +    unsigned int next, prev;
   19.18 +    unsigned long _pad_for_sh_; /* until struct shadow_page_info gets updated */
   19.19 +};
   19.20 +#endif
   19.21 +
   19.22  struct page_info
   19.23  {
   19.24      /* Each frame can be threaded onto a doubly-linked list. */
   19.25 -    struct list_head list;
   19.26 +    struct page_list_entry list;
   19.27  
   19.28      /* Reference count and various PGC_xxx flags and fields. */
   19.29      unsigned long count_info;
    20.1 --- a/xen/include/asm-x86/p2m.h	Fri Jan 30 10:51:01 2009 +0000
    20.2 +++ b/xen/include/asm-x86/p2m.h	Fri Jan 30 11:03:28 2009 +0000
    20.3 @@ -110,7 +110,7 @@ struct p2m_domain {
    20.4      const char        *locker_function; /* Func that took it */
    20.5  
    20.6      /* Pages used to construct the p2m */
    20.7 -    struct list_head   pages;
    20.8 +    struct page_list_head pages;
    20.9  
   20.10      /* Functions to call to get or free pages for the p2m */
   20.11      struct page_info * (*alloc_page  )(struct domain *d);
   20.12 @@ -148,7 +148,7 @@ struct p2m_domain {
   20.13       *   protect moving stuff from the PoD cache to the domain page list.
   20.14       */
   20.15      struct {
   20.16 -        struct list_head super,        /* List of superpages                */
   20.17 +        struct page_list_head super,   /* List of superpages                */
   20.18                           single;       /* Non-super lists                   */
   20.19          int              count,        /* # of pages in cache lists         */
   20.20                           entry_count;  /* # of pages in p2m marked pod      */
    21.1 --- a/xen/include/asm-x86/perfc.h	Fri Jan 30 10:51:01 2009 +0000
    21.2 +++ b/xen/include/asm-x86/perfc.h	Fri Jan 30 11:03:28 2009 +0000
    21.3 @@ -1,6 +1,5 @@
    21.4  #ifndef __ASM_PERFC_H__
    21.5  #define __ASM_PERFC_H__
    21.6 -#include <asm/mm.h>
    21.7  
    21.8  static inline void arch_perfc_printall(void)
    21.9  {
    22.1 --- a/xen/include/xen/mm.h	Fri Jan 30 10:51:01 2009 +0000
    22.2 +++ b/xen/include/xen/mm.h	Fri Jan 30 11:03:28 2009 +0000
    22.3 @@ -85,22 +85,221 @@ int assign_pages(
    22.4  #define MAX_ORDER 20 /* 2^20 contiguous pages */
    22.5  #endif
    22.6  
    22.7 +#define page_list_entry list_head
    22.8 +
    22.9 +#include <asm/mm.h>
   22.10 +
   22.11 +#ifndef page_list_entry
   22.12 +struct page_list_head
   22.13 +{
   22.14 +    struct page_info *next, *tail;
   22.15 +};
   22.16 +/* These must only have instances in struct page_info. */
   22.17 +# define page_list_entry
   22.18 +
   22.19 +# define PAGE_LIST_HEAD_INIT(name) { NULL, NULL }
   22.20 +# define PAGE_LIST_HEAD(name) \
   22.21 +    struct page_list_head name = PAGE_LIST_HEAD_INIT(name)
   22.22 +# define INIT_PAGE_LIST_HEAD(head) ((head)->tail = (head)->next = NULL)
   22.23 +# define INIT_PAGE_LIST_ENTRY(ent) ((ent)->prev = (ent)->next = ~0)
   22.24 +
   22.25 +static inline int
   22.26 +page_list_empty(const struct page_list_head *head)
   22.27 +{
   22.28 +    return !head->next;
   22.29 +}
   22.30 +static inline struct page_info *
   22.31 +page_list_first(const struct page_list_head *head)
   22.32 +{
   22.33 +    return head->next;
   22.34 +}
   22.35 +static inline struct page_info *
   22.36 +page_list_next(const struct page_info *page,
   22.37 +               const struct page_list_head *head)
   22.38 +{
   22.39 +    return page != head->tail ? mfn_to_page(page->list.next) : NULL;
   22.40 +}
   22.41 +static inline struct page_info *
   22.42 +page_list_prev(const struct page_info *page,
   22.43 +               const struct page_list_head *head)
   22.44 +{
   22.45 +    return page != head->next ? mfn_to_page(page->list.prev) : NULL;
   22.46 +}
   22.47 +static inline int
   22.48 +page_list_is_eol(const struct page_info *page,
   22.49 +                 const struct page_list_head *head)
   22.50 +{
   22.51 +    return !page;
   22.52 +}
   22.53 +static inline void
   22.54 +page_list_add(struct page_info *page, struct page_list_head *head)
   22.55 +{
   22.56 +    if ( head->next )
   22.57 +    {
   22.58 +        page->list.next = page_to_mfn(head->next);
   22.59 +        head->next->list.prev = page_to_mfn(page);
   22.60 +    }
   22.61 +    else
   22.62 +    {
   22.63 +        head->tail = page;
   22.64 +        page->list.next = ~0;
   22.65 +    }
   22.66 +    page->list.prev = ~0;
   22.67 +    head->next = page;
   22.68 +}
   22.69 +static inline void
   22.70 +page_list_add_tail(struct page_info *page, struct page_list_head *head)
   22.71 +{
   22.72 +    page->list.next = ~0;
   22.73 +    if ( head->next )
   22.74 +    {
   22.75 +        page->list.prev = page_to_mfn(head->tail);
   22.76 +        head->tail->list.next = page_to_mfn(page);
   22.77 +    }
   22.78 +    else
   22.79 +    {
   22.80 +        page->list.prev = ~0;
   22.81 +        head->next = page;
   22.82 +    }
   22.83 +    head->tail = page;
   22.84 +}
   22.85 +static inline bool_t
   22.86 +__page_list_del_head(struct page_info *page, struct page_list_head *head,
   22.87 +                     struct page_info *next, struct page_info *prev)
   22.88 +{
   22.89 +    if ( head->next == page )
   22.90 +    {
   22.91 +        if ( head->tail != page )
   22.92 +        {
   22.93 +            next->list.prev = ~0;
   22.94 +            head->next = next;
   22.95 +        }
   22.96 +        else
   22.97 +            head->tail = head->next = NULL;
   22.98 +        return 1;
   22.99 +    }
  22.100 +
  22.101 +    if ( head->tail == page )
  22.102 +    {
  22.103 +        prev->list.next = ~0;
  22.104 +        head->tail = prev;
  22.105 +        return 1;
  22.106 +    }
  22.107 +
  22.108 +    return 0;
  22.109 +}
  22.110 +static inline void
  22.111 +page_list_del(struct page_info *page, struct page_list_head *head)
  22.112 +{
  22.113 +    struct page_info *next = mfn_to_page(page->list.next);
  22.114 +    struct page_info *prev = mfn_to_page(page->list.prev);
  22.115 +
  22.116 +    if ( !__page_list_del_head(page, head, next, prev) )
  22.117 +    {
  22.118 +        next->list.prev = page->list.prev;
  22.119 +        prev->list.next = page->list.next;
  22.120 +    }
  22.121 +}
  22.122 +static inline void
  22.123 +page_list_del2(struct page_info *page, struct page_list_head *head1,
  22.124 +               struct page_list_head *head2)
  22.125 +{
  22.126 +    struct page_info *next = mfn_to_page(page->list.next);
  22.127 +    struct page_info *prev = mfn_to_page(page->list.prev);
  22.128 +
  22.129 +    if ( !__page_list_del_head(page, head1, next, prev) &&
  22.130 +         !__page_list_del_head(page, head2, next, prev) )
  22.131 +    {
  22.132 +        next->list.prev = page->list.prev;
  22.133 +        prev->list.next = page->list.next;
  22.134 +    }
  22.135 +}
  22.136 +static inline void
  22.137 +page_list_move_tail(struct page_info *page, struct page_list_head *list,
  22.138 +                    struct page_list_head *head)
  22.139 +{
  22.140 +    page_list_del(page, list);
  22.141 +    page_list_add_tail(page, head);
  22.142 +}
  22.143 +static inline struct page_info *
  22.144 +page_list_remove_head(struct page_list_head *head)
  22.145 +{
  22.146 +    struct page_info *page = head->next;
  22.147 +
  22.148 +    if ( page )
  22.149 +        page_list_del(page, head);
  22.150 +
  22.151 +    return page;
  22.152 +}
  22.153 +static inline void
  22.154 +page_list_splice_init(struct page_list_head *list, struct page_list_head *head)
  22.155 +{
  22.156 +    if ( !page_list_empty(list) )
  22.157 +    {
  22.158 +        if ( head->next )
  22.159 +            head->tail->list.next = page_to_mfn(list->next);
  22.160 +        else
  22.161 +            head->next = list->next;
  22.162 +        head->tail = list->tail;
  22.163 +        INIT_PAGE_LIST_HEAD(list);
  22.164 +    }
  22.165 +}
  22.166 +
  22.167 +#define page_list_for_each(pos, head) \
  22.168 +    for ( pos = (head)->next; pos; pos = page_list_next(pos, head) )
  22.169 +#define page_list_for_each_safe(pos, tmp, head) \
  22.170 +    for ( pos = (head)->next; \
  22.171 +          pos ? (tmp = page_list_next(pos, head), 1) : 0; \
  22.172 +          pos = tmp )
  22.173 +#define page_list_for_each_safe_reverse(pos, tmp, head) \
  22.174 +    for ( pos = (head)->tail; \
  22.175 +          pos ? (tmp = page_list_prev(pos, head), 1) : 0; \
  22.176 +          pos = tmp )
  22.177 +#else
  22.178 +# define page_list_head                  list_head
  22.179 +# define PAGE_LIST_HEAD_INIT             LIST_HEAD_INIT
  22.180 +# define PAGE_LIST_HEAD                  LIST_HEAD
  22.181 +# define INIT_PAGE_LIST_HEAD             INIT_LIST_HEAD
  22.182 +# define INIT_PAGE_LIST_ENTRY            INIT_LIST_HEAD
  22.183 +# define page_list_empty                 list_empty
  22.184 +# define page_list_first(hd)             list_entry((hd)->next, \
  22.185 +                                                    struct page_info, list)
  22.186 +# define page_list_next(pg, hd)          list_entry((pg)->list.next, \
  22.187 +                                                    struct page_info, list)
  22.188 +# define page_list_is_eol(pg, hd)        (&(pg)->list == (hd))
  22.189 +# define page_list_add(pg, hd)           list_add(&(pg)->list, hd)
  22.190 +# define page_list_add_tail(pg, hd)      list_add_tail(&(pg)->list, hd)
  22.191 +# define page_list_del(pg, hd)           list_del(&(pg)->list)
  22.192 +# define page_list_del2(pg, hd1, hd2)    list_del(&(pg)->list)
  22.193 +# define page_list_move_tail(pg, o, n)   list_move_tail(&(pg)->list, n)
  22.194 +# define page_list_remove_head(hd)       (!page_list_empty(hd) ? \
  22.195 +    ({ \
  22.196 +        struct page_info *__pg = page_list_first(hd); \
  22.197 +        list_del(&__pg->list); \
  22.198 +        __pg; \
  22.199 +    }) : NULL)
  22.200 +# define page_list_splice_init           list_splice_init
  22.201 +# define page_list_for_each(pos, head)   list_for_each_entry(pos, head, list)
  22.202 +# define page_list_for_each_safe(pos, tmp, head) \
  22.203 +    list_for_each_entry_safe(pos, tmp, head, list)
  22.204 +# define page_list_for_each_safe_reverse(pos, tmp, head) \
  22.205 +    list_for_each_entry_safe_reverse(pos, tmp, head, list)
  22.206 +#endif
  22.207 +
  22.208  /* Automatic page scrubbing for dead domains. */
  22.209 -extern struct list_head page_scrub_list;
  22.210 -#define page_scrub_schedule_work()              \
  22.211 -    do {                                        \
  22.212 -        if ( !list_empty(&page_scrub_list) )    \
  22.213 -            raise_softirq(PAGE_SCRUB_SOFTIRQ);  \
  22.214 +extern struct page_list_head page_scrub_list;
  22.215 +#define page_scrub_schedule_work()                 \
  22.216 +    do {                                           \
  22.217 +        if ( !page_list_empty(&page_scrub_list) )  \
  22.218 +            raise_softirq(PAGE_SCRUB_SOFTIRQ);     \
  22.219      } while ( 0 )
  22.220  #define page_scrub_kick()                                               \
  22.221      do {                                                                \
  22.222 -        if ( !list_empty(&page_scrub_list) )                            \
  22.223 +        if ( !page_list_empty(&page_scrub_list) )                       \
  22.224              cpumask_raise_softirq(cpu_online_map, PAGE_SCRUB_SOFTIRQ);  \
  22.225      } while ( 0 )
  22.226  unsigned long avail_scrub_pages(void);
  22.227  
  22.228 -#include <asm/mm.h>
  22.229 -
  22.230  int guest_remove_page(struct domain *d, unsigned long gmfn);
  22.231  
  22.232  /* Returns TRUE if the whole page at @mfn is ordinary RAM. */
    23.1 --- a/xen/include/xen/sched.h	Fri Jan 30 10:51:01 2009 +0000
    23.2 +++ b/xen/include/xen/sched.h	Fri Jan 30 11:03:28 2009 +0000
    23.3 @@ -171,8 +171,8 @@ struct domain
    23.4      spinlock_t       domain_lock;
    23.5  
    23.6      spinlock_t       page_alloc_lock; /* protects all the following fields  */
    23.7 -    struct list_head page_list;       /* linked list, of size tot_pages     */
    23.8 -    struct list_head xenpage_list;    /* linked list, of size xenheap_pages */
    23.9 +    struct page_list_head page_list;  /* linked list, of size tot_pages     */
   23.10 +    struct page_list_head xenpage_list; /* linked list (size xenheap_pages) */
   23.11      unsigned int     tot_pages;       /* number of pages currently possesed */
   23.12      unsigned int     max_pages;       /* maximum value for tot_pages        */
   23.13      unsigned int     xenheap_pages;   /* # pages allocated from Xen heap    */