ia64/xen-unstable

changeset 11516:7b350fc692d5

[POWERPC][XEN] Track pages correctly

The following patch tracks and frees pages correctly. It solves the problem where a foreign mapping would zombie a domain because the refcnts remained.
This involved:
- implement relinquish_memory() for PowerPC
- remove free_rma() since all pages get "relinquished" now.
- foreign pages are get and put correctly

Signed-off-by: Jimi Xenidis <jimix@watson.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Jimi Xenidis <jimix@watson.ibm.com>
date Wed Sep 13 18:41:11 2006 -0400 (2006-09-13)
parents 990bd509a5f5
children 3ee3376a5eb3
files xen/arch/powerpc/domain.c xen/arch/powerpc/mm.c xen/arch/powerpc/papr/xlate.c xen/include/asm-powerpc/mm.h
line diff
     1.1 --- a/xen/arch/powerpc/domain.c	Tue Sep 12 14:28:16 2006 -0500
     1.2 +++ b/xen/arch/powerpc/domain.c	Wed Sep 13 18:41:11 2006 -0400
     1.3 @@ -242,10 +242,44 @@ void sync_vcpu_execstate(struct vcpu *v)
     1.4      return;
     1.5  }
     1.6  
     1.7 +static void relinquish_memory(struct domain *d, struct list_head *list)
     1.8 +{
     1.9 +    struct list_head *ent;
    1.10 +    struct page_info  *page;
    1.11 +
    1.12 +    /* Use a recursive lock, as we may enter 'free_domheap_page'. */
    1.13 +    spin_lock_recursive(&d->page_alloc_lock);
    1.14 +
    1.15 +    ent = list->next;
    1.16 +    while ( ent != list )
    1.17 +    {
    1.18 +        page = list_entry(ent, struct page_info, list);
    1.19 +
    1.20 +        /* Grab a reference to the page so it won't disappear from under us. */
    1.21 +        if ( unlikely(!get_page(page, d)) )
    1.22 +        {
    1.23 +            /* Couldn't get a reference -- someone is freeing this page. */
    1.24 +            ent = ent->next;
    1.25 +            continue;
    1.26 +        }
    1.27 +        if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
    1.28 +            put_page_and_type(page);
    1.29 +
    1.30 +        if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
    1.31 +            put_page(page);
    1.32 +
    1.33 +        /* Follow the list chain and /then/ potentially free the page. */
    1.34 +        ent = ent->next;
    1.35 +        put_page(page);
    1.36 +    }
    1.37 +    spin_unlock_recursive(&d->page_alloc_lock);
    1.38 +}
    1.39 +
    1.40  void domain_relinquish_resources(struct domain *d)
    1.41  {
    1.42 -    free_rma(d);
    1.43 +    relinquish_memory(d, &d->page_list);
    1.44      free_extents(d);
    1.45 +    return;
    1.46  }
    1.47  
    1.48  void arch_dump_domain_info(struct domain *d)
     2.1 --- a/xen/arch/powerpc/mm.c	Tue Sep 12 14:28:16 2006 -0500
     2.2 +++ b/xen/arch/powerpc/mm.c	Wed Sep 13 18:41:11 2006 -0400
     2.3 @@ -329,13 +329,6 @@ int allocate_rma(struct domain *d, unsig
     2.4      return 0;
     2.5  }
     2.6  
     2.7 -void free_rma(struct domain *d)
     2.8 -{
     2.9 -    if (d->arch.rma_page) {
    2.10 -        free_domheap_pages(d->arch.rma_page, d->arch.rma_order);
    2.11 -    }
    2.12 -}
    2.13 -
    2.14  ulong pfn2mfn(struct domain *d, ulong pfn, int *type)
    2.15  {
    2.16      ulong rma_base_mfn = page_to_mfn(d->arch.rma_page);
     3.1 --- a/xen/arch/powerpc/papr/xlate.c	Tue Sep 12 14:28:16 2006 -0500
     3.2 +++ b/xen/arch/powerpc/papr/xlate.c	Wed Sep 13 18:41:11 2006 -0400
     3.3 @@ -263,6 +263,7 @@ static void h_enter(struct cpu_user_regs
     3.4  
     3.5                  BUG_ON(f == d);
     3.6                  get_domain(f);
     3.7 +                get_page(pg, f);
     3.8              }
     3.9                  break;
    3.10              case PFN_TYPE_RMA:
    3.11 @@ -510,8 +511,10 @@ static void h_remove(struct cpu_user_reg
    3.12              struct page_info *pg = mfn_to_page(mfn);
    3.13              struct domain *f = page_get_owner(pg);
    3.14  
    3.15 -            if (f != d)
    3.16 +            if (f != d) {
    3.17                  put_domain(f);
    3.18 +                put_page(pg);
    3.19 +            }
    3.20          }
    3.21      }
    3.22  
     4.1 --- a/xen/include/asm-powerpc/mm.h	Tue Sep 12 14:28:16 2006 -0500
     4.2 +++ b/xen/include/asm-powerpc/mm.h	Wed Sep 13 18:41:11 2006 -0400
     4.3 @@ -154,7 +154,6 @@ static inline void put_page(struct page_
     4.4      while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
     4.5  
     4.6      if ( unlikely((nx & PGC_count_mask) == 0) ) {
     4.7 -        panic("about to free page: 0x%lx\n", page_to_mfn(page));
     4.8          free_domheap_page(page);
     4.9      }
    4.10  }
    4.11 @@ -259,7 +258,6 @@ static inline unsigned long gmfn_to_mfn(
    4.12  #define mfn_to_gmfn(_d, mfn) (mfn)
    4.13  
    4.14  extern int allocate_rma(struct domain *d, unsigned int order_pages);
    4.15 -extern void free_rma(struct domain *d);
    4.16  extern uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages);
    4.17  extern void free_extents(struct domain *d);
    4.18