ia64/xen-unstable

changeset 19164:de853e901b5c

Remove cpumask for page_info struct.

This makes TLB flushing on page allocation more conservative, but the
flush clock should still save us most of the time (page freeing and
alloc'ing tends to happen in batches, and not necesasrily close
together). We could add some optimisations to the flush filter if this
does turn out to be a significant overhead for some (useful)
workloads.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Feb 04 15:29:51 2009 +0000 (2009-02-04)
parents 13a0272c8c02
children 3fc7d4115d6c
files xen/common/page_alloc.c xen/include/asm-ia64/mm.h xen/include/asm-x86/mm.h
line diff
     1.1 --- a/xen/common/page_alloc.c	Wed Feb 04 15:08:46 2009 +0000
     1.2 +++ b/xen/common/page_alloc.c	Wed Feb 04 15:29:51 2009 +0000
     1.3 @@ -400,7 +400,7 @@ static struct page_info *alloc_heap_page
     1.4          BUG_ON(pg[i].count_info != 0);
     1.5  
     1.6          /* Add in any extra CPUs that need flushing because of this page. */
     1.7 -        cpus_andnot(extra_cpus_mask, pg[i].u.free.cpumask, mask);
     1.8 +        cpus_andnot(extra_cpus_mask, cpu_online_map, mask);
     1.9          tlbflush_filter(extra_cpus_mask, pg[i].tlbflush_timestamp);
    1.10          cpus_or(mask, mask, extra_cpus_mask);
    1.11  
    1.12 @@ -425,7 +425,6 @@ static void free_heap_pages(
    1.13      unsigned long mask;
    1.14      unsigned int i, node = phys_to_nid(page_to_maddr(pg));
    1.15      unsigned int zone = page_to_zone(pg);
    1.16 -    struct domain *d;
    1.17  
    1.18      ASSERT(order <= MAX_ORDER);
    1.19      ASSERT(node >= 0);
    1.20 @@ -446,15 +445,9 @@ static void free_heap_pages(
    1.21           */
    1.22          pg[i].count_info = 0;
    1.23  
    1.24 -        if ( (d = page_get_owner(&pg[i])) != NULL )
    1.25 -        {
    1.26 -            pg[i].tlbflush_timestamp = tlbflush_current_time();
    1.27 -            pg[i].u.free.cpumask     = d->domain_dirty_cpumask;
    1.28 -        }
    1.29 -        else
    1.30 -        {
    1.31 -            cpus_clear(pg[i].u.free.cpumask);
    1.32 -        }
    1.33 +        /* If a page has no owner it will need no safety TLB flush. */
    1.34 +        pg[i].tlbflush_timestamp =
    1.35 +            page_get_owner(&pg[i]) ? tlbflush_current_time() : 0;
    1.36      }
    1.37  
    1.38      spin_lock(&heap_lock);
     2.1 --- a/xen/include/asm-ia64/mm.h	Wed Feb 04 15:08:46 2009 +0000
     2.2 +++ b/xen/include/asm-ia64/mm.h	Wed Feb 04 15:29:51 2009 +0000
     2.3 @@ -62,21 +62,12 @@ struct page_info
     2.4          struct {
     2.5              /* Order-size of the free chunk this page is the head of. */
     2.6              u32 order;
     2.7 -            /* Mask of possibly-tainted TLBs. */
     2.8 -            cpumask_t cpumask;
     2.9          } free;
    2.10  
    2.11      } u;
    2.12  
    2.13      /* Timestamp from 'TLB clock', used to reduce need for safety flushes. */
    2.14      u32 tlbflush_timestamp;
    2.15 -
    2.16 -#if 0
    2.17 -// following added for Linux compiling
    2.18 -    page_flags_t flags;
    2.19 -    atomic_t _count;
    2.20 -    struct list_head lru;	// is this the same as above "list"?
    2.21 -#endif
    2.22  };
    2.23  
    2.24  #define set_page_count(p,v) 	atomic_set(&(p)->_count, v - 1)
     3.1 --- a/xen/include/asm-x86/mm.h	Wed Feb 04 15:08:46 2009 +0000
     3.2 +++ b/xen/include/asm-x86/mm.h	Wed Feb 04 15:29:51 2009 +0000
     3.3 @@ -66,12 +66,6 @@ struct page_info
     3.4              unsigned long count:26; /* Reference count */
     3.5          } sh;
     3.6  
     3.7 -        /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
     3.8 -        struct {
     3.9 -            /* Mask of possibly-tainted TLBs. */
    3.10 -            cpumask_t cpumask;
    3.11 -        } free;
    3.12 -
    3.13      } u;
    3.14  
    3.15      union {