ia64/xen-unstable

changeset 19202:af0da711bbdb

[IA64] use page_list_head and related stuff.

Use page_list_head and stuff for consistency with x86 code.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Fri Feb 13 11:22:59 2009 +0900 (2009-02-13)
parents c7cba853583d
children 726b56720b16
files xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/mm.c xen/arch/ia64/xen/tlb_track.c xen/include/asm-ia64/domain.h xen/include/asm-ia64/mm.h xen/include/asm-ia64/tlb_track.h
line diff
     1.1 --- a/xen/arch/ia64/xen/domain.c	Fri Feb 13 11:22:28 2009 +0900
     1.2 +++ b/xen/arch/ia64/xen/domain.c	Fri Feb 13 11:22:59 2009 +0900
     1.3 @@ -608,7 +608,7 @@ int arch_domain_create(struct domain *d,
     1.4  	memset(&d->arch.mm, 0, sizeof(d->arch.mm));
     1.5  	d->arch.relres = RELRES_not_started;
     1.6  	d->arch.mm_teardown_offset = 0;
     1.7 -	INIT_LIST_HEAD(&d->arch.relmem_list);
     1.8 +	INIT_PAGE_LIST_HEAD(&d->arch.relmem_list);
     1.9  
    1.10  	if ((d->arch.mm.pgd = pgd_alloc(&d->arch.mm)) == NULL)
    1.11  	    goto fail_nomem;
    1.12 @@ -1626,9 +1626,8 @@ int arch_set_info_guest(struct vcpu *v, 
    1.13  	return rc;
    1.14  }
    1.15  
    1.16 -static int relinquish_memory(struct domain *d, struct list_head *list)
    1.17 +static int relinquish_memory(struct domain *d, struct page_list_head *list)
    1.18  {
    1.19 -    struct list_head *ent;
    1.20      struct page_info *page;
    1.21  #ifndef __ia64__
    1.22      unsigned long     x, y;
    1.23 @@ -1637,16 +1636,14 @@ static int relinquish_memory(struct doma
    1.24  
    1.25      /* Use a recursive lock, as we may enter 'free_domheap_page'. */
    1.26      spin_lock_recursive(&d->page_alloc_lock);
    1.27 -    ent = list->next;
    1.28 -    while ( ent != list )
    1.29 +
    1.30 +    while ( (page = page_list_remove_head(list)) )
    1.31      {
    1.32 -        page = list_entry(ent, struct page_info, list);
    1.33          /* Grab a reference to the page so it won't disappear from under us. */
    1.34          if ( unlikely(!get_page(page, d)) )
    1.35          {
    1.36              /* Couldn't get a reference -- someone is freeing this page. */
    1.37 -            ent = ent->next;
    1.38 -            list_move_tail(&page->list, &d->arch.relmem_list);
    1.39 +            page_list_add_tail(page, &d->arch.relmem_list);
    1.40              continue;
    1.41          }
    1.42  
    1.43 @@ -1681,9 +1678,8 @@ static int relinquish_memory(struct doma
    1.44  #endif
    1.45  
    1.46          /* Follow the list chain and /then/ potentially free the page. */
    1.47 -        ent = ent->next;
    1.48          BUG_ON(get_gpfn_from_mfn(page_to_mfn(page)) != INVALID_M2P_ENTRY);
    1.49 -        list_move_tail(&page->list, &d->arch.relmem_list);
    1.50 +        page_list_add_tail(page, &d->arch.relmem_list);
    1.51          put_page(page);
    1.52  
    1.53          if (hypercall_preempt_check()) {
    1.54 @@ -1692,7 +1688,13 @@ static int relinquish_memory(struct doma
    1.55          }
    1.56      }
    1.57  
    1.58 -    list_splice_init(&d->arch.relmem_list, list);
    1.59 +    /* list is empty at this point. */
    1.60 +    if ( !page_list_empty(&d->arch.relmem_list) )
    1.61 +    {
    1.62 +        *list = d->arch.relmem_list;
    1.63 +        INIT_PAGE_LIST_HEAD(&d->arch.relmem_list);
    1.64 +    }
    1.65 +
    1.66  
    1.67   out:
    1.68      spin_unlock_recursive(&d->page_alloc_lock);
     2.1 --- a/xen/arch/ia64/xen/mm.c	Fri Feb 13 11:22:28 2009 +0900
     2.2 +++ b/xen/arch/ia64/xen/mm.c	Fri Feb 13 11:22:59 2009 +0900
     2.3 @@ -474,7 +474,7 @@ share_xen_page_with_guest(struct page_in
     2.4          page->count_info |= PGC_allocated | 1;
     2.5          if ( unlikely(d->xenheap_pages++ == 0) )
     2.6              get_knownalive_domain(d);
     2.7 -        list_add_tail(&page->list, &d->xenpage_list);
     2.8 +        page_list_add_tail(page, &d->xenpage_list);
     2.9      }
    2.10  
    2.11      // grant_table_destroy() releases these pages.
    2.12 @@ -2856,7 +2856,7 @@ steal_page(struct domain *d, struct page
    2.13      /* Unlink from original owner. */
    2.14      if ( !(memflags & MEMF_no_refcount) )
    2.15          d->tot_pages--;
    2.16 -    list_del(&page->list);
    2.17 +    page_list_del(page, &d->page_list);
    2.18  
    2.19      spin_unlock(&d->page_alloc_lock);
    2.20      perfc_incr(steal_page);
     3.1 --- a/xen/arch/ia64/xen/tlb_track.c	Fri Feb 13 11:22:28 2009 +0900
     3.2 +++ b/xen/arch/ia64/xen/tlb_track.c	Fri Feb 13 11:22:59 2009 +0900
     3.3 @@ -56,7 +56,7 @@ tlb_track_allocate_entries(struct tlb_tr
     3.4          return -ENOMEM;
     3.5      }
     3.6  
     3.7 -    list_add(&entry_page->list, &tlb_track->page_list);
     3.8 +    page_list_add(entry_page, &tlb_track->page_list);
     3.9      track_entries = (struct tlb_track_entry*)page_to_virt(entry_page);
    3.10      allocated = PAGE_SIZE / sizeof(track_entries[0]);
    3.11      tlb_track->num_entries += allocated;
    3.12 @@ -93,7 +93,7 @@ tlb_track_create(struct domain* d)
    3.13      tlb_track->limit = TLB_TRACK_LIMIT_ENTRIES;
    3.14      tlb_track->num_entries = 0;
    3.15      tlb_track->num_free = 0;
    3.16 -    INIT_LIST_HEAD(&tlb_track->page_list);
    3.17 +    INIT_PAGE_LIST_HEAD(&tlb_track->page_list);
    3.18      if (tlb_track_allocate_entries(tlb_track) < 0)
    3.19          goto out;
    3.20  
    3.21 @@ -136,8 +136,8 @@ tlb_track_destroy(struct domain* d)
    3.22      spin_lock(&tlb_track->free_list_lock);
    3.23      BUG_ON(tlb_track->num_free != tlb_track->num_entries);
    3.24  
    3.25 -    list_for_each_entry_safe(page, next, &tlb_track->page_list, list) {
    3.26 -        list_del(&page->list);
    3.27 +    page_list_for_each_safe(page, next, &tlb_track->page_list) {
    3.28 +        page_list_del(page, &tlb_track->page_list);
    3.29          free_domheap_page(page);
    3.30      }
    3.31  
     4.1 --- a/xen/include/asm-ia64/domain.h	Fri Feb 13 11:22:28 2009 +0900
     4.2 +++ b/xen/include/asm-ia64/domain.h	Fri Feb 13 11:22:59 2009 +0900
     4.3 @@ -10,6 +10,7 @@
     4.4  #include <asm/vmx_platform.h>
     4.5  #include <xen/list.h>
     4.6  #include <xen/cpumask.h>
     4.7 +#include <xen/mm.h>
     4.8  #include <asm/fpswa.h>
     4.9  #include <xen/rangeset.h>
    4.10  
    4.11 @@ -224,7 +225,7 @@ struct arch_domain {
    4.12      /* Continuable mm_teardown() */
    4.13      unsigned long mm_teardown_offset;
    4.14      /* Continuable domain_relinquish_resources() */
    4.15 -    struct list_head relmem_list;
    4.16 +    struct page_list_head relmem_list;
    4.17  };
    4.18  #define INT_ENABLE_OFFSET(v) 		  \
    4.19      (sizeof(vcpu_info_t) * (v)->vcpu_id + \
     5.1 --- a/xen/include/asm-ia64/mm.h	Fri Feb 13 11:22:28 2009 +0900
     5.2 +++ b/xen/include/asm-ia64/mm.h	Fri Feb 13 11:22:59 2009 +0900
     5.3 @@ -39,10 +39,23 @@ typedef unsigned long page_flags_t;
     5.4  
     5.5  #define PRtype_info "016lx"
     5.6  
     5.7 +#if 0
     5.8 +/*
     5.9 + * See include/xen/mm.h.
    5.10 + * For now, abandon to compress struct page_info
    5.11 + * seeing IA64_MAX_PHYS_BITS and page size.
    5.12 + */
    5.13 +#undef page_list_entry
    5.14 +struct page_list_entry
    5.15 +{
    5.16 +    unsigned long next, prev;
    5.17 +};
    5.18 +#endif
    5.19 +
    5.20  struct page_info
    5.21  {
    5.22      /* Each frame can be threaded onto a doubly-linked list. */
    5.23 -    struct list_head list;
    5.24 +    struct page_list_entry list;
    5.25  
    5.26      /* Reference count and various PGC_xxx flags and fields. */
    5.27      unsigned long count_info;
     6.1 --- a/xen/include/asm-ia64/tlb_track.h	Fri Feb 13 11:22:28 2009 +0900
     6.2 +++ b/xen/include/asm-ia64/tlb_track.h	Fri Feb 13 11:22:59 2009 +0900
     6.3 @@ -72,7 +72,7 @@ struct tlb_track {
     6.4      unsigned int                limit;
     6.5      unsigned int                num_entries;
     6.6      unsigned int                num_free;
     6.7 -    struct list_head            page_list;
     6.8 +    struct page_list_head       page_list;
     6.9  
    6.10      /* XXX hash table size */
    6.11      spinlock_t                  hash_lock;