ia64/xen-unstable

changeset 18728:11c86c51a697

x86: fix domain cleanup

The preemptable page type handling changes modified free_page_type()
behavior without adjusting the call site in relinquish_memory(): Any
type reference left pending when leaving hypercall handlers is
associated with a page reference, and when successful free_page_type()
decrements the type refcount - hence relinquish_memory() must now also
drop the page reference.

Also, the recursion avoidance during domain shutdown somehow (probably
by me when I merged the patch up to a newer snapshot) got screwed up:
The avoidance logic in mm.c should short circuit levels below the top
one currently being processed, rather than the top one itself.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Oct 27 13:22:43 2008 +0000 (2008-10-27)
parents 101e50cffc78
children 15aed96c7b5c
files xen/arch/x86/domain.c xen/arch/x86/mm.c
line diff
     1.1 --- a/xen/arch/x86/domain.c	Mon Oct 27 13:20:52 2008 +0000
     1.2 +++ b/xen/arch/x86/domain.c	Mon Oct 27 13:22:43 2008 +0000
     1.3 @@ -1687,6 +1687,7 @@ static int relinquish_memory(
     1.4              {
     1.5                  if ( free_page_type(page, x, 0) != 0 )
     1.6                      BUG();
     1.7 +                put_page(page);
     1.8                  break;
     1.9              }
    1.10          }
     2.1 --- a/xen/arch/x86/mm.c	Mon Oct 27 13:20:52 2008 +0000
     2.2 +++ b/xen/arch/x86/mm.c	Mon Oct 27 13:22:43 2008 +0000
     2.3 @@ -1343,7 +1343,7 @@ static void free_l1_table(struct page_in
     2.4  
     2.5  static int free_l2_table(struct page_info *page, int preemptible)
     2.6  {
     2.7 -#ifdef CONFIG_COMPAT
     2.8 +#if defined(CONFIG_COMPAT) || defined(DOMAIN_DESTRUCT_AVOID_RECURSION)
     2.9      struct domain *d = page_get_owner(page);
    2.10  #endif
    2.11      unsigned long pfn = page_to_mfn(page);
    2.12 @@ -1351,6 +1351,11 @@ static int free_l2_table(struct page_inf
    2.13      unsigned int  i = page->nr_validated_ptes - 1;
    2.14      int err = 0;
    2.15  
    2.16 +#ifdef DOMAIN_DESTRUCT_AVOID_RECURSION
    2.17 +    if ( d->arch.relmem == RELMEM_l3 )
    2.18 +        return 0;
    2.19 +#endif
    2.20 +
    2.21      pl2e = map_domain_page(pfn);
    2.22  
    2.23      ASSERT(page->nr_validated_ptes);
    2.24 @@ -1381,7 +1386,7 @@ static int free_l3_table(struct page_inf
    2.25      int rc = 0;
    2.26  
    2.27  #ifdef DOMAIN_DESTRUCT_AVOID_RECURSION
    2.28 -    if ( d->arch.relmem == RELMEM_l3 )
    2.29 +    if ( d->arch.relmem == RELMEM_l4 )
    2.30          return 0;
    2.31  #endif
    2.32  
    2.33 @@ -1424,11 +1429,6 @@ static int free_l4_table(struct page_inf
    2.34      unsigned int  i = page->nr_validated_ptes - !page->partial_pte;
    2.35      int rc = 0;
    2.36  
    2.37 -#ifdef DOMAIN_DESTRUCT_AVOID_RECURSION
    2.38 -    if ( d->arch.relmem == RELMEM_l4 )
    2.39 -        return 0;
    2.40 -#endif
    2.41 -
    2.42      do {
    2.43          if ( is_guest_l4_slot(d, i) )
    2.44              rc = put_page_from_l4e(pl4e[i], pfn, preemptible);