From bec85fb4f6bf9ed5443f08db4d068aa9f24e8b0a Mon Sep 17 00:00:00 2001 From: "kaf24@scramble.cl.cam.ac.uk" Date: Wed, 19 Jan 2005 08:53:07 +0000 Subject: [PATCH] bitkeeper revision 1.1159.223.4 (41ee1ff3MuvZmbN8TXMWZl0WCI4yMg) Clean up buddy allocator, as suggested by Rusty Russell. --- xen/common/page_alloc.c | 47 +++++++++++++++++++++++------------------ xen/include/xen/mm.h | 16 ++++++++------ 2 files changed, 35 insertions(+), 28 deletions(-) diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index 0127f52c4d..878948a121 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -203,10 +203,8 @@ unsigned long alloc_boot_pages(unsigned long size, unsigned long align) #define NR_ZONES 2 /* Up to 2^10 pages can be allocated at once. */ -#define MIN_ORDER 0 #define MAX_ORDER 10 -#define NR_ORDERS (MAX_ORDER - MIN_ORDER + 1) -static struct list_head heap[NR_ZONES][NR_ORDERS]; +static struct list_head heap[NR_ZONES][MAX_ORDER+1]; static unsigned long avail[NR_ZONES]; @@ -220,7 +218,7 @@ void end_boot_allocator(void) memset(avail, 0, sizeof(avail)); for ( i = 0; i < NR_ZONES; i++ ) - for ( j = 0; j < NR_ORDERS; j++ ) + for ( j = 0; j <= MAX_ORDER; j++ ) INIT_LIST_HEAD(&heap[i][j]); /* Pages that are free now go to the domain sub-allocator. */ @@ -236,34 +234,41 @@ void end_boot_allocator(void) } /* Hand the specified arbitrary page range to the specified heap zone. */ -void init_heap_pages(int zone, struct pfn_info *pg, unsigned long nr_pages) +void init_heap_pages( + unsigned int zone, struct pfn_info *pg, unsigned long nr_pages) { unsigned long i; + ASSERT(zone < NR_ZONES); + for ( i = 0; i < nr_pages; i++ ) free_heap_pages(zone, pg+i, 0); } /* Allocate 2^@order contiguous pages. */ -struct pfn_info *alloc_heap_pages(int zone, int order) +struct pfn_info *alloc_heap_pages(unsigned int zone, unsigned int order) { int i; struct pfn_info *pg; - if ( unlikely(order < MIN_ORDER) || unlikely(order > MAX_ORDER) ) + ASSERT(zone < NR_ZONES); + + if ( unlikely(order > MAX_ORDER) ) return NULL; spin_lock(&heap_lock); /* Find smallest order which can satisfy the request. */ - for ( i = order; i < NR_ORDERS; i++ ) + for ( i = order; i <= MAX_ORDER; i++ ) if ( !list_empty(&heap[zone][i]) ) - break; + goto found; - if ( i == NR_ORDERS ) - goto no_memory; - + /* No suitable memory blocks. Fail the request. */ + spin_unlock(&heap_lock); + return NULL; + + found: pg = list_entry(heap[zone][i].next, struct pfn_info, list); list_del(&pg->list); @@ -281,18 +286,18 @@ struct pfn_info *alloc_heap_pages(int zone, int order) spin_unlock(&heap_lock); return pg; - - no_memory: - spin_unlock(&heap_lock); - return NULL; } /* Free 2^@order set of pages. */ -void free_heap_pages(int zone, struct pfn_info *pg, int order) +void free_heap_pages( + unsigned int zone, struct pfn_info *pg, unsigned int order) { unsigned long mask; + ASSERT(zone < NR_ZONES); + ASSERT(order <= MAX_ORDER); + spin_lock(&heap_lock); map_free(page_to_pfn(pg), 1 << order); @@ -393,7 +398,7 @@ void init_xenheap_pages(unsigned long ps, unsigned long pe) } -unsigned long alloc_xenheap_pages(int order) +unsigned long alloc_xenheap_pages(unsigned int order) { unsigned long flags; struct pfn_info *pg; @@ -431,7 +436,7 @@ unsigned long alloc_xenheap_pages(int order) } -void free_xenheap_pages(unsigned long p, int order) +void free_xenheap_pages(unsigned long p, unsigned int order) { unsigned long flags; @@ -459,7 +464,7 @@ void init_domheap_pages(unsigned long ps, unsigned long pe) } -struct pfn_info *alloc_domheap_pages(struct domain *d, int order) +struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order) { struct pfn_info *pg; unsigned long mask, flushed_mask, pfn_stamp, cpu_stamp; @@ -535,7 +540,7 @@ struct pfn_info *alloc_domheap_pages(struct domain *d, int order) } -void free_domheap_pages(struct pfn_info *pg, int order) +void free_domheap_pages(struct pfn_info *pg, unsigned int order) { int i, drop_dom_ref; struct domain *d = pg->u.inuse.domain; diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h index 21184b3a43..12242ca491 100644 --- a/xen/include/xen/mm.h +++ b/xen/include/xen/mm.h @@ -12,22 +12,24 @@ unsigned long alloc_boot_pages(unsigned long size, unsigned long align); void end_boot_allocator(void); /* Generic allocator. These functions are *not* interrupt-safe. */ -void init_heap_pages(int zone, struct pfn_info *pg, unsigned long nr_pages); -struct pfn_info *alloc_heap_pages(int zone, int order); -void free_heap_pages(int zone, struct pfn_info *pg, int order); +void init_heap_pages( + unsigned int zone, struct pfn_info *pg, unsigned long nr_pages); +struct pfn_info *alloc_heap_pages(unsigned int zone, unsigned int order); +void free_heap_pages( + unsigned int zone, struct pfn_info *pg, unsigned int order); void scrub_heap_pages(void); /* Xen suballocator. These functions are interrupt-safe. */ void init_xenheap_pages(unsigned long ps, unsigned long pe); -unsigned long alloc_xenheap_pages(int order); -void free_xenheap_pages(unsigned long p, int order); +unsigned long alloc_xenheap_pages(unsigned int order); +void free_xenheap_pages(unsigned long p, unsigned int order); #define alloc_xenheap_page() (alloc_xenheap_pages(0)) #define free_xenheap_page(_p) (free_xenheap_pages(_p,0)) /* Domain suballocator. These functions are *not* interrupt-safe.*/ void init_domheap_pages(unsigned long ps, unsigned long pe); -struct pfn_info *alloc_domheap_pages(struct domain *d, int order); -void free_domheap_pages(struct pfn_info *pg, int order); +struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order); +void free_domheap_pages(struct pfn_info *pg, unsigned int order); unsigned long avail_domheap_pages(void); #define alloc_domheap_page(_d) (alloc_domheap_pages(_d,0)) #define free_domheap_page(_p) (free_domheap_pages(_p,0)) -- 2.39.5