ia64/xen-unstable
changeset 3462:1f6bfd28d0c6
bitkeeper revision 1.1159.212.22 (41ee20131dSkJwS4ElWa61syAlMc-g)
Merge scramble.cl.cam.ac.uk:/local/scratch/kaf24/xen-2.0-testing.bk
into scramble.cl.cam.ac.uk:/local/scratch/kaf24/xen-unstable.bk
Merge scramble.cl.cam.ac.uk:/local/scratch/kaf24/xen-2.0-testing.bk
into scramble.cl.cam.ac.uk:/local/scratch/kaf24/xen-unstable.bk
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Wed Jan 19 08:53:39 2005 +0000 (2005-01-19) |
parents | 7413468a8d01 04f50d583813 |
children | 3ced9b0f4dab 023c30e91254 |
files | linux-2.4.28-xen-sparse/arch/xen/mm/init.c xen/common/dom0_ops.c xen/common/page_alloc.c xen/include/xen/mm.h |
line diff
1.1 --- a/linux-2.4.28-xen-sparse/arch/xen/mm/init.c Tue Jan 18 11:04:43 2005 +0000 1.2 +++ b/linux-2.4.28-xen-sparse/arch/xen/mm/init.c Wed Jan 19 08:53:39 2005 +0000 1.3 @@ -377,7 +377,7 @@ static int __init free_pages_init(void) 1.4 } 1.5 #ifdef CONFIG_HIGHMEM 1.6 for (pfn = highend_pfn-1; pfn >= highstart_pfn; pfn--) 1.7 - one_highpage_init((struct page *) (mem_map + pfn), pfn, 1.8 + one_highpage_init((struct page *) (mem_map + pfn), 1.9 (pfn < xen_start_info.nr_pages)); 1.10 totalram_pages += totalhigh_pages; 1.11 #endif
2.1 --- a/xen/common/dom0_ops.c Tue Jan 18 11:04:43 2005 +0000 2.2 +++ b/xen/common/dom0_ops.c Wed Jan 19 08:53:39 2005 +0000 2.3 @@ -662,7 +662,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op) 2.4 { 2.5 struct domain *d; 2.6 ret = -ESRCH; 2.7 - d = find_domain_by_id( op->u.setdomainmaxmem.domain ); 2.8 + d = find_domain_by_id( op->u.setdomainvmassist.domain ); 2.9 if ( d != NULL ) 2.10 { 2.11 vm_assist(d, op->u.setdomainvmassist.cmd,
3.1 --- a/xen/common/page_alloc.c Tue Jan 18 11:04:43 2005 +0000 3.2 +++ b/xen/common/page_alloc.c Wed Jan 19 08:53:39 2005 +0000 3.3 @@ -203,10 +203,8 @@ unsigned long alloc_boot_pages(unsigned 3.4 #define NR_ZONES 2 3.5 3.6 /* Up to 2^10 pages can be allocated at once. */ 3.7 -#define MIN_ORDER 0 3.8 #define MAX_ORDER 10 3.9 -#define NR_ORDERS (MAX_ORDER - MIN_ORDER + 1) 3.10 -static struct list_head heap[NR_ZONES][NR_ORDERS]; 3.11 +static struct list_head heap[NR_ZONES][MAX_ORDER+1]; 3.12 3.13 static unsigned long avail[NR_ZONES]; 3.14 3.15 @@ -220,7 +218,7 @@ void end_boot_allocator(void) 3.16 memset(avail, 0, sizeof(avail)); 3.17 3.18 for ( i = 0; i < NR_ZONES; i++ ) 3.19 - for ( j = 0; j < NR_ORDERS; j++ ) 3.20 + for ( j = 0; j <= MAX_ORDER; j++ ) 3.21 INIT_LIST_HEAD(&heap[i][j]); 3.22 3.23 /* Pages that are free now go to the domain sub-allocator. */ 3.24 @@ -236,34 +234,41 @@ void end_boot_allocator(void) 3.25 } 3.26 3.27 /* Hand the specified arbitrary page range to the specified heap zone. */ 3.28 -void init_heap_pages(int zone, struct pfn_info *pg, unsigned long nr_pages) 3.29 +void init_heap_pages( 3.30 + unsigned int zone, struct pfn_info *pg, unsigned long nr_pages) 3.31 { 3.32 unsigned long i; 3.33 3.34 + ASSERT(zone < NR_ZONES); 3.35 + 3.36 for ( i = 0; i < nr_pages; i++ ) 3.37 free_heap_pages(zone, pg+i, 0); 3.38 } 3.39 3.40 3.41 /* Allocate 2^@order contiguous pages. */ 3.42 -struct pfn_info *alloc_heap_pages(int zone, int order) 3.43 +struct pfn_info *alloc_heap_pages(unsigned int zone, unsigned int order) 3.44 { 3.45 int i; 3.46 struct pfn_info *pg; 3.47 3.48 - if ( unlikely(order < MIN_ORDER) || unlikely(order > MAX_ORDER) ) 3.49 + ASSERT(zone < NR_ZONES); 3.50 + 3.51 + if ( unlikely(order > MAX_ORDER) ) 3.52 return NULL; 3.53 3.54 spin_lock(&heap_lock); 3.55 3.56 /* Find smallest order which can satisfy the request. */ 3.57 - for ( i = order; i < NR_ORDERS; i++ ) 3.58 + for ( i = order; i <= MAX_ORDER; i++ ) 3.59 if ( !list_empty(&heap[zone][i]) ) 3.60 - break; 3.61 + goto found; 3.62 3.63 - if ( i == NR_ORDERS ) 3.64 - goto no_memory; 3.65 - 3.66 + /* No suitable memory blocks. Fail the request. */ 3.67 + spin_unlock(&heap_lock); 3.68 + return NULL; 3.69 + 3.70 + found: 3.71 pg = list_entry(heap[zone][i].next, struct pfn_info, list); 3.72 list_del(&pg->list); 3.73 3.74 @@ -281,18 +286,18 @@ struct pfn_info *alloc_heap_pages(int zo 3.75 spin_unlock(&heap_lock); 3.76 3.77 return pg; 3.78 - 3.79 - no_memory: 3.80 - spin_unlock(&heap_lock); 3.81 - return NULL; 3.82 } 3.83 3.84 3.85 /* Free 2^@order set of pages. */ 3.86 -void free_heap_pages(int zone, struct pfn_info *pg, int order) 3.87 +void free_heap_pages( 3.88 + unsigned int zone, struct pfn_info *pg, unsigned int order) 3.89 { 3.90 unsigned long mask; 3.91 3.92 + ASSERT(zone < NR_ZONES); 3.93 + ASSERT(order <= MAX_ORDER); 3.94 + 3.95 spin_lock(&heap_lock); 3.96 3.97 map_free(page_to_pfn(pg), 1 << order); 3.98 @@ -393,7 +398,7 @@ void init_xenheap_pages(unsigned long ps 3.99 } 3.100 3.101 3.102 -unsigned long alloc_xenheap_pages(int order) 3.103 +unsigned long alloc_xenheap_pages(unsigned int order) 3.104 { 3.105 unsigned long flags; 3.106 struct pfn_info *pg; 3.107 @@ -431,7 +436,7 @@ unsigned long alloc_xenheap_pages(int or 3.108 } 3.109 3.110 3.111 -void free_xenheap_pages(unsigned long p, int order) 3.112 +void free_xenheap_pages(unsigned long p, unsigned int order) 3.113 { 3.114 unsigned long flags; 3.115 3.116 @@ -459,7 +464,7 @@ void init_domheap_pages(unsigned long ps 3.117 } 3.118 3.119 3.120 -struct pfn_info *alloc_domheap_pages(struct domain *d, int order) 3.121 +struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order) 3.122 { 3.123 struct pfn_info *pg; 3.124 unsigned long mask, flushed_mask, pfn_stamp, cpu_stamp; 3.125 @@ -535,7 +540,7 @@ struct pfn_info *alloc_domheap_pages(str 3.126 } 3.127 3.128 3.129 -void free_domheap_pages(struct pfn_info *pg, int order) 3.130 +void free_domheap_pages(struct pfn_info *pg, unsigned int order) 3.131 { 3.132 int i, drop_dom_ref; 3.133 struct domain *d = pg->u.inuse.domain;
4.1 --- a/xen/include/xen/mm.h Tue Jan 18 11:04:43 2005 +0000 4.2 +++ b/xen/include/xen/mm.h Wed Jan 19 08:53:39 2005 +0000 4.3 @@ -12,22 +12,24 @@ unsigned long alloc_boot_pages(unsigned 4.4 void end_boot_allocator(void); 4.5 4.6 /* Generic allocator. These functions are *not* interrupt-safe. */ 4.7 -void init_heap_pages(int zone, struct pfn_info *pg, unsigned long nr_pages); 4.8 -struct pfn_info *alloc_heap_pages(int zone, int order); 4.9 -void free_heap_pages(int zone, struct pfn_info *pg, int order); 4.10 +void init_heap_pages( 4.11 + unsigned int zone, struct pfn_info *pg, unsigned long nr_pages); 4.12 +struct pfn_info *alloc_heap_pages(unsigned int zone, unsigned int order); 4.13 +void free_heap_pages( 4.14 + unsigned int zone, struct pfn_info *pg, unsigned int order); 4.15 void scrub_heap_pages(void); 4.16 4.17 /* Xen suballocator. These functions are interrupt-safe. */ 4.18 void init_xenheap_pages(unsigned long ps, unsigned long pe); 4.19 -unsigned long alloc_xenheap_pages(int order); 4.20 -void free_xenheap_pages(unsigned long p, int order); 4.21 +unsigned long alloc_xenheap_pages(unsigned int order); 4.22 +void free_xenheap_pages(unsigned long p, unsigned int order); 4.23 #define alloc_xenheap_page() (alloc_xenheap_pages(0)) 4.24 #define free_xenheap_page(_p) (free_xenheap_pages(_p,0)) 4.25 4.26 /* Domain suballocator. These functions are *not* interrupt-safe.*/ 4.27 void init_domheap_pages(unsigned long ps, unsigned long pe); 4.28 -struct pfn_info *alloc_domheap_pages(struct domain *d, int order); 4.29 -void free_domheap_pages(struct pfn_info *pg, int order); 4.30 +struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order); 4.31 +void free_domheap_pages(struct pfn_info *pg, unsigned int order); 4.32 unsigned long avail_domheap_pages(void); 4.33 #define alloc_domheap_page(_d) (alloc_domheap_pages(_d,0)) 4.34 #define free_domheap_page(_p) (free_domheap_pages(_p,0))