ia64/xen-unstable
changeset 14288:dad3d143c3b0
xen: Replace stupid page_alloc fix.
I broke the 'correct' fix when I copied it out of an email. The
actual correct version, with an extra +1 in the for-loop header is
rather abusive of for loops, so I've changed it now to a do-while loop
and an extra comment so I don't screw up this backwards loop ever
again.
This version does actually boot. :-)
Signed-off-by: Keir Fraser <keir@xensource.com>
I broke the 'correct' fix when I copied it out of an email. The
actual correct version, with an extra +1 in the for-loop header is
rather abusive of for loops, so I've changed it now to a do-while loop
and an extra comment so I don't screw up this backwards loop ever
again.
This version does actually boot. :-)
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kfraser@localhost.localdomain |
---|---|
date | Wed Mar 07 11:17:03 2007 +0000 (2007-03-07) |
parents | e68ee3665cba |
children | fbbf1f07fefe |
files | xen/common/page_alloc.c |
line diff
1.1 --- a/xen/common/page_alloc.c Wed Mar 07 10:53:34 2007 +0000 1.2 +++ b/xen/common/page_alloc.c Wed Mar 07 11:17:03 2007 +0000 1.3 @@ -339,7 +339,7 @@ static void init_heap_block(heap_by_zone 1.4 1.5 /* Allocate 2^@order contiguous pages. */ 1.6 static struct page_info *alloc_heap_pages( 1.7 - unsigned int zone_lo, unsigned zone_hi, 1.8 + unsigned int zone_lo, unsigned int zone_hi, 1.9 unsigned int cpu, unsigned int order) 1.10 { 1.11 unsigned int i, j, zone; 1.12 @@ -357,25 +357,24 @@ static struct page_info *alloc_heap_page 1.13 1.14 spin_lock(&heap_lock); 1.15 1.16 - /* start with requested node, but exhaust all node memory 1.17 - * in requested zone before failing, only calc new node 1.18 - * value if we fail to find memory in target node, this avoids 1.19 - * needless computation on fast-path */ 1.20 + /* 1.21 + * Start with requested node, but exhaust all node memory in requested 1.22 + * zone before failing, only calc new node value if we fail to find memory 1.23 + * in target node, this avoids needless computation on fast-path. 1.24 + */ 1.25 for ( i = 0; i < num_nodes; i++ ) 1.26 { 1.27 - for ( zone = zone_hi; zone-- > zone_lo; ) 1.28 - { 1.29 - /* check if target node can support the allocation */ 1.30 - if ( avail[node] && (avail[node][zone] >= request) ) 1.31 - { 1.32 - /* Find smallest order which can satisfy the request. */ 1.33 - for ( j = order; j <= MAX_ORDER; j++ ) 1.34 - { 1.35 - if ( !list_empty(&heap(node, zone, j)) ) 1.36 - goto found; 1.37 - } 1.38 - } 1.39 - } 1.40 + zone = zone_hi; 1.41 + do { 1.42 + /* Check if target node can support the allocation. */ 1.43 + if ( !avail[node] || (avail[node][zone] < request) ) 1.44 + continue; 1.45 + 1.46 + /* Find smallest order which can satisfy the request. */ 1.47 + for ( j = order; j <= MAX_ORDER; j++ ) 1.48 + if ( !list_empty(&heap(node, zone, j)) ) 1.49 + goto found; 1.50 + } while ( zone-- > zone_lo ); /* careful: unsigned zone may wrap */ 1.51 1.52 /* Pick next node, wrapping around if needed. */ 1.53 if ( ++node == num_nodes )