direct-io.hg

changeset 1267:5864a35a6c29

bitkeeper revision 1.825.3.23 (406c044cT7cMBER-dmrtu4_WyL1Jjg)

page_alloc.c:
Fix nasty bug in Xen heap allocator.
author kaf24@scramble.cl.cam.ac.uk
date Thu Apr 01 12:00:12 2004 +0000 (2004-04-01)
parents 9c5521fa1195
children 369b77bc3884 cf5c08d3c03d
files xen/common/page_alloc.c
line diff
     1.1 --- a/xen/common/page_alloc.c	Thu Apr 01 10:22:12 2004 +0000
     1.2 +++ b/xen/common/page_alloc.c	Thu Apr 01 12:00:12 2004 +0000
     1.3 @@ -56,6 +56,13 @@ static void map_alloc(unsigned long firs
     1.4  {
     1.5      unsigned long start_off, end_off, curr_idx, end_idx;
     1.6  
     1.7 +#ifndef NDEBUG
     1.8 +    unsigned long i;
     1.9 +    /* Check that the block isn't already allocated. */
    1.10 +    for ( i = 0; i < nr_pages; i++ )
    1.11 +        ASSERT(!allocated_in_map(first_page + i));
    1.12 +#endif
    1.13 +
    1.14      curr_idx  = first_page / PAGES_PER_MAPWORD;
    1.15      start_off = first_page & (PAGES_PER_MAPWORD-1);
    1.16      end_idx   = (first_page + nr_pages) / PAGES_PER_MAPWORD;
    1.17 @@ -78,6 +85,13 @@ static void map_free(unsigned long first
    1.18  {
    1.19      unsigned long start_off, end_off, curr_idx, end_idx;
    1.20  
    1.21 +#ifndef NDEBUG
    1.22 +    unsigned long i;
    1.23 +    /* Check that the block isn't already freed. */
    1.24 +    for ( i = 0; i < nr_pages; i++ )
    1.25 +        ASSERT(allocated_in_map(first_page + i));
    1.26 +#endif
    1.27 +
    1.28      curr_idx = first_page / PAGES_PER_MAPWORD;
    1.29      start_off = first_page & (PAGES_PER_MAPWORD-1);
    1.30      end_idx   = (first_page + nr_pages) / PAGES_PER_MAPWORD;
    1.31 @@ -227,7 +241,7 @@ void __init init_page_allocator(unsigned
    1.32      /* Allocate space for the allocation bitmap. */
    1.33      bitmap_size  = (max+1) >> (PAGE_SHIFT+3);
    1.34      bitmap_size  = round_pgup(bitmap_size);
    1.35 -    alloc_bitmap = (unsigned long *)__va(min);
    1.36 +    alloc_bitmap = (unsigned long *)phys_to_virt(min);
    1.37      min         += bitmap_size;
    1.38      range        = max - min;
    1.39  
    1.40 @@ -240,6 +254,8 @@ void __init init_page_allocator(unsigned
    1.41      min += PAGE_OFFSET;
    1.42      max += PAGE_OFFSET;
    1.43  
    1.44 +    printk("Initialising Xen allocator with %luMB memory\n", range >> 20);
    1.45 +
    1.46      p         = min;
    1.47      remaining = range;
    1.48      while ( remaining != 0 )
    1.49 @@ -315,7 +331,7 @@ retry:
    1.50          GUARD(spare_ch, i);
    1.51      }
    1.52      
    1.53 -    map_alloc(__pa(alloc_ch)>>PAGE_SHIFT, 1<<order);
    1.54 +    map_alloc(virt_to_phys(alloc_ch)>>PAGE_SHIFT, 1<<order);
    1.55  
    1.56      spin_unlock_irqrestore(&alloc_lock, flags);
    1.57  
    1.58 @@ -349,14 +365,11 @@ void __free_pages(unsigned long p, int o
    1.59      chunk_head_t *ch;
    1.60      chunk_tail_t *ct;
    1.61      unsigned long flags;
    1.62 -    unsigned long pagenr = __pa(p) >> PAGE_SHIFT;
    1.63 +    unsigned long pfn = virt_to_phys((void *)p) >> PAGE_SHIFT;
    1.64  
    1.65      spin_lock_irqsave(&alloc_lock, flags);
    1.66  
    1.67  #ifdef MEMORY_GUARD
    1.68 -    /* Check that the block isn't already freed. */
    1.69 -    if ( !allocated_in_map(pagenr) )
    1.70 -        BUG();
    1.71      /* Check that the block isn't already guarded. */
    1.72      if ( __put_user(1, (int*)p) )
    1.73          BUG();
    1.74 @@ -364,7 +377,7 @@ void __free_pages(unsigned long p, int o
    1.75      memset((void *)p, 0xaa, size);
    1.76  #endif
    1.77  
    1.78 -    map_free(pagenr, 1<<order);
    1.79 +    map_free(pfn, 1<<order);
    1.80      
    1.81      /* Merge chunks as far as possible. */
    1.82      for ( ; ; )
    1.83 @@ -372,18 +385,19 @@ void __free_pages(unsigned long p, int o
    1.84          if ( (p & size) )
    1.85          {
    1.86              /* Merge with predecessor block? */
    1.87 -            if ( allocated_in_map(pagenr-1) )
    1.88 +            if ( allocated_in_map(pfn-1) )
    1.89                  break;
    1.90              ct = (chunk_tail_t *)p - 1;
    1.91              if ( TAIL_LEVEL(ct) != order )
    1.92                  break;
    1.93 -            ch = (chunk_head_t *)(p - size);
    1.94 -            p -= size;
    1.95 +            p   -= size;
    1.96 +            pfn -= 1<<order;
    1.97 +            ch   = (chunk_head_t *)p;
    1.98          }
    1.99          else
   1.100          {
   1.101              /* Merge with successor block? */
   1.102 -            if ( allocated_in_map(pagenr+(1<<order)) )
   1.103 +            if ( allocated_in_map(pfn+(1<<order)) )
   1.104                  break;
   1.105              ch = (chunk_head_t *)(p + size);
   1.106              if ( HEAD_LEVEL(ch) != order )