ia64/xen-unstable
changeset 3461:04f50d583813
bitkeeper revision 1.1159.223.4 (41ee1ff3MuvZmbN8TXMWZl0WCI4yMg)
Clean up buddy allocator, as suggested by Rusty Russell.
Clean up buddy allocator, as suggested by Rusty Russell.
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Wed Jan 19 08:53:07 2005 +0000 (2005-01-19) |
parents | 20b07fc026cd |
children | 1f6bfd28d0c6 af362b7301a0 |
files | xen/common/page_alloc.c xen/include/xen/mm.h |
line diff
1.1 --- a/xen/common/page_alloc.c Wed Jan 19 08:35:45 2005 +0000 1.2 +++ b/xen/common/page_alloc.c Wed Jan 19 08:53:07 2005 +0000 1.3 @@ -203,10 +203,8 @@ unsigned long alloc_boot_pages(unsigned 1.4 #define NR_ZONES 2 1.5 1.6 /* Up to 2^10 pages can be allocated at once. */ 1.7 -#define MIN_ORDER 0 1.8 #define MAX_ORDER 10 1.9 -#define NR_ORDERS (MAX_ORDER - MIN_ORDER + 1) 1.10 -static struct list_head heap[NR_ZONES][NR_ORDERS]; 1.11 +static struct list_head heap[NR_ZONES][MAX_ORDER+1]; 1.12 1.13 static unsigned long avail[NR_ZONES]; 1.14 1.15 @@ -220,7 +218,7 @@ void end_boot_allocator(void) 1.16 memset(avail, 0, sizeof(avail)); 1.17 1.18 for ( i = 0; i < NR_ZONES; i++ ) 1.19 - for ( j = 0; j < NR_ORDERS; j++ ) 1.20 + for ( j = 0; j <= MAX_ORDER; j++ ) 1.21 INIT_LIST_HEAD(&heap[i][j]); 1.22 1.23 /* Pages that are free now go to the domain sub-allocator. */ 1.24 @@ -236,34 +234,41 @@ void end_boot_allocator(void) 1.25 } 1.26 1.27 /* Hand the specified arbitrary page range to the specified heap zone. */ 1.28 -void init_heap_pages(int zone, struct pfn_info *pg, unsigned long nr_pages) 1.29 +void init_heap_pages( 1.30 + unsigned int zone, struct pfn_info *pg, unsigned long nr_pages) 1.31 { 1.32 unsigned long i; 1.33 1.34 + ASSERT(zone < NR_ZONES); 1.35 + 1.36 for ( i = 0; i < nr_pages; i++ ) 1.37 free_heap_pages(zone, pg+i, 0); 1.38 } 1.39 1.40 1.41 /* Allocate 2^@order contiguous pages. */ 1.42 -struct pfn_info *alloc_heap_pages(int zone, int order) 1.43 +struct pfn_info *alloc_heap_pages(unsigned int zone, unsigned int order) 1.44 { 1.45 int i; 1.46 struct pfn_info *pg; 1.47 1.48 - if ( unlikely(order < MIN_ORDER) || unlikely(order > MAX_ORDER) ) 1.49 + ASSERT(zone < NR_ZONES); 1.50 + 1.51 + if ( unlikely(order > MAX_ORDER) ) 1.52 return NULL; 1.53 1.54 spin_lock(&heap_lock); 1.55 1.56 /* Find smallest order which can satisfy the request. */ 1.57 - for ( i = order; i < NR_ORDERS; i++ ) 1.58 + for ( i = order; i <= MAX_ORDER; i++ ) 1.59 if ( !list_empty(&heap[zone][i]) ) 1.60 - break; 1.61 + goto found; 1.62 1.63 - if ( i == NR_ORDERS ) 1.64 - goto no_memory; 1.65 - 1.66 + /* No suitable memory blocks. Fail the request. */ 1.67 + spin_unlock(&heap_lock); 1.68 + return NULL; 1.69 + 1.70 + found: 1.71 pg = list_entry(heap[zone][i].next, struct pfn_info, list); 1.72 list_del(&pg->list); 1.73 1.74 @@ -281,18 +286,18 @@ struct pfn_info *alloc_heap_pages(int zo 1.75 spin_unlock(&heap_lock); 1.76 1.77 return pg; 1.78 - 1.79 - no_memory: 1.80 - spin_unlock(&heap_lock); 1.81 - return NULL; 1.82 } 1.83 1.84 1.85 /* Free 2^@order set of pages. */ 1.86 -void free_heap_pages(int zone, struct pfn_info *pg, int order) 1.87 +void free_heap_pages( 1.88 + unsigned int zone, struct pfn_info *pg, unsigned int order) 1.89 { 1.90 unsigned long mask; 1.91 1.92 + ASSERT(zone < NR_ZONES); 1.93 + ASSERT(order <= MAX_ORDER); 1.94 + 1.95 spin_lock(&heap_lock); 1.96 1.97 map_free(page_to_pfn(pg), 1 << order); 1.98 @@ -393,7 +398,7 @@ void init_xenheap_pages(unsigned long ps 1.99 } 1.100 1.101 1.102 -unsigned long alloc_xenheap_pages(int order) 1.103 +unsigned long alloc_xenheap_pages(unsigned int order) 1.104 { 1.105 unsigned long flags; 1.106 struct pfn_info *pg; 1.107 @@ -431,7 +436,7 @@ unsigned long alloc_xenheap_pages(int or 1.108 } 1.109 1.110 1.111 -void free_xenheap_pages(unsigned long p, int order) 1.112 +void free_xenheap_pages(unsigned long p, unsigned int order) 1.113 { 1.114 unsigned long flags; 1.115 1.116 @@ -459,7 +464,7 @@ void init_domheap_pages(unsigned long ps 1.117 } 1.118 1.119 1.120 -struct pfn_info *alloc_domheap_pages(struct domain *d, int order) 1.121 +struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order) 1.122 { 1.123 struct pfn_info *pg; 1.124 unsigned long mask, flushed_mask, pfn_stamp, cpu_stamp; 1.125 @@ -535,7 +540,7 @@ struct pfn_info *alloc_domheap_pages(str 1.126 } 1.127 1.128 1.129 -void free_domheap_pages(struct pfn_info *pg, int order) 1.130 +void free_domheap_pages(struct pfn_info *pg, unsigned int order) 1.131 { 1.132 int i, drop_dom_ref; 1.133 struct domain *d = pg->u.inuse.domain;
2.1 --- a/xen/include/xen/mm.h Wed Jan 19 08:35:45 2005 +0000 2.2 +++ b/xen/include/xen/mm.h Wed Jan 19 08:53:07 2005 +0000 2.3 @@ -12,22 +12,24 @@ unsigned long alloc_boot_pages(unsigned 2.4 void end_boot_allocator(void); 2.5 2.6 /* Generic allocator. These functions are *not* interrupt-safe. */ 2.7 -void init_heap_pages(int zone, struct pfn_info *pg, unsigned long nr_pages); 2.8 -struct pfn_info *alloc_heap_pages(int zone, int order); 2.9 -void free_heap_pages(int zone, struct pfn_info *pg, int order); 2.10 +void init_heap_pages( 2.11 + unsigned int zone, struct pfn_info *pg, unsigned long nr_pages); 2.12 +struct pfn_info *alloc_heap_pages(unsigned int zone, unsigned int order); 2.13 +void free_heap_pages( 2.14 + unsigned int zone, struct pfn_info *pg, unsigned int order); 2.15 void scrub_heap_pages(void); 2.16 2.17 /* Xen suballocator. These functions are interrupt-safe. */ 2.18 void init_xenheap_pages(unsigned long ps, unsigned long pe); 2.19 -unsigned long alloc_xenheap_pages(int order); 2.20 -void free_xenheap_pages(unsigned long p, int order); 2.21 +unsigned long alloc_xenheap_pages(unsigned int order); 2.22 +void free_xenheap_pages(unsigned long p, unsigned int order); 2.23 #define alloc_xenheap_page() (alloc_xenheap_pages(0)) 2.24 #define free_xenheap_page(_p) (free_xenheap_pages(_p,0)) 2.25 2.26 /* Domain suballocator. These functions are *not* interrupt-safe.*/ 2.27 void init_domheap_pages(unsigned long ps, unsigned long pe); 2.28 -struct pfn_info *alloc_domheap_pages(struct domain *d, int order); 2.29 -void free_domheap_pages(struct pfn_info *pg, int order); 2.30 +struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order); 2.31 +void free_domheap_pages(struct pfn_info *pg, unsigned int order); 2.32 unsigned long avail_domheap_pages(void); 2.33 #define alloc_domheap_page(_d) (alloc_domheap_pages(_d,0)) 2.34 #define free_domheap_page(_p) (free_domheap_pages(_p,0))