ia64/xen-unstable

changeset 3355:d1e0d9a8fde0

bitkeeper revision 1.1159.1.518 (41d448acXfjJK8iSoExMLCtViOvsoA)

Merge scramble.cl.cam.ac.uk:/local/scratch/kaf24/xen-2.0-testing.bk
into scramble.cl.cam.ac.uk:/local/scratch/kaf24/xen-unstable.bk
author kaf24@scramble.cl.cam.ac.uk
date Thu Dec 30 18:27:56 2004 +0000 (2004-12-30)
parents b7f02043001c 7f2bf9fecd7e
children ec21acd5e1a1
files .rootkeys xen/arch/x86/e820.c xen/arch/x86/memory.c xen/arch/x86/nmi.c xen/arch/x86/setup.c xen/arch/x86/x86_32/mm.c xen/common/memory.c xen/common/page_alloc.c xen/include/asm-x86/config.h xen/include/asm-x86/e820.h xen/include/asm-x86/mm.h xen/include/asm-x86/page.h xen/include/xen/lib.h xen/include/xen/mm.h
line diff
     1.1 --- a/.rootkeys	Thu Dec 30 14:59:09 2004 +0000
     1.2 +++ b/.rootkeys	Thu Dec 30 18:27:56 2004 +0000
     1.3 @@ -724,7 +724,6 @@ 41262590gGIOn-1pvF5KpUu8Wb6_JA xen/commo
     1.4  3ddb79bd9drcFPVxd4w2GPOIjLlXpA xen/common/kernel.c
     1.5  3e4cd9d8LAAghUY0hNIK72uc2ch_Nw xen/common/keyhandler.c
     1.6  3ddb79bduhSEZI8xa7IbGQCpap5y2A xen/common/lib.c
     1.7 -3ddb79bdS39UXxUtZnaScie83-7VTQ xen/common/memory.c
     1.8  41a61536SZbR6cj1ukWTb0DYU-vz9w xen/common/multicall.c
     1.9  3ddb79bdD4SLmmdMD7yLW5HcUWucXw xen/common/page_alloc.c
    1.10  3e54c38dkHAev597bPr71-hGzTdocg xen/common/perfc.c
     2.1 --- a/xen/arch/x86/e820.c	Thu Dec 30 14:59:09 2004 +0000
     2.2 +++ b/xen/arch/x86/e820.c	Thu Dec 30 18:27:56 2004 +0000
     2.3 @@ -27,12 +27,12 @@ static void __init add_memory_region(uns
     2.4  
     2.5  #define E820_DEBUG	1
     2.6  
     2.7 -static void __init print_memory_map(char *who)
     2.8 +static void __init print_memory_map(void)
     2.9  {
    2.10      int i;
    2.11  
    2.12      for (i = 0; i < e820.nr_map; i++) {
    2.13 -        printk(" %s: %016Lx - %016Lx ", who,
    2.14 +        printk(" %016Lx - %016Lx ",
    2.15                 e820.map[i].addr,
    2.16                 e820.map[i].addr + e820.map[i].size);
    2.17          switch (e820.map[i].type) {
    2.18 @@ -305,19 +305,18 @@ static unsigned long __init find_max_pfn
    2.19      return max_pfn;
    2.20  }
    2.21  
    2.22 -static char * __init machine_specific_memory_setup(
    2.23 +static void __init machine_specific_memory_setup(
    2.24      struct e820entry *raw, int raw_nr)
    2.25  {
    2.26      char nr = (char)raw_nr;
    2.27 -    char *who = "Pseudo-e820";
    2.28      sanitize_e820_map(raw, &nr);
    2.29      (void)copy_e820_map(raw, nr);
    2.30 -    return who;
    2.31  }
    2.32  
    2.33  unsigned long init_e820(struct e820entry *raw, int raw_nr)
    2.34  {
    2.35 +    machine_specific_memory_setup(raw, raw_nr);
    2.36      printk(KERN_INFO "Physical RAM map:\n");
    2.37 -    print_memory_map(machine_specific_memory_setup(raw, raw_nr));
    2.38 +    print_memory_map();
    2.39      return find_max_pfn();
    2.40  }
     3.1 --- a/xen/arch/x86/memory.c	Thu Dec 30 14:59:09 2004 +0000
     3.2 +++ b/xen/arch/x86/memory.c	Thu Dec 30 18:27:56 2004 +0000
     3.3 @@ -86,6 +86,7 @@
     3.4  
     3.5  #include <xen/config.h>
     3.6  #include <xen/init.h>
     3.7 +#include <xen/kernel.h>
     3.8  #include <xen/lib.h>
     3.9  #include <xen/mm.h>
    3.10  #include <xen/sched.h>
    3.11 @@ -140,9 +141,34 @@ static struct {
    3.12  /* Private domain structs for DOMID_XEN and DOMID_IO. */
    3.13  static struct domain *dom_xen, *dom_io;
    3.14  
    3.15 +/* Frame table and its size in pages. */
    3.16 +struct pfn_info *frame_table;
    3.17 +unsigned long frame_table_size;
    3.18 +unsigned long max_page;
    3.19 +
    3.20 +void __init init_frametable(void)
    3.21 +{
    3.22 +    unsigned long i, p;
    3.23 +
    3.24 +    frame_table      = (struct pfn_info *)FRAMETABLE_VIRT_START;
    3.25 +    frame_table_size = max_page * sizeof(struct pfn_info);
    3.26 +    frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
    3.27 +
    3.28 +    for ( i = 0; i < frame_table_size; i += (4UL << 20) )
    3.29 +    {
    3.30 +        p = alloc_boot_pages(min(frame_table_size - i, 4UL << 20), 4UL << 20);
    3.31 +        if ( p == 0 )
    3.32 +            panic("Not enough memory for frame table\n");
    3.33 +        idle_pg_table[(FRAMETABLE_VIRT_START + i) >> L2_PAGETABLE_SHIFT] =
    3.34 +            mk_l2_pgentry(p | __PAGE_HYPERVISOR | _PAGE_PSE);
    3.35 +    }
    3.36 +
    3.37 +    memset(frame_table, 0, frame_table_size);
    3.38 +}
    3.39 +
    3.40  void arch_init_memory(void)
    3.41  {
    3.42 -    unsigned long mfn;
    3.43 +    unsigned long mfn, i;
    3.44  
    3.45      /*
    3.46       * We are rather picky about the layout of 'struct pfn_info'. The
    3.47 @@ -185,13 +211,13 @@ void arch_init_memory(void)
    3.48      dom_io->id = DOMID_IO;
    3.49  
    3.50      /* M2P table is mappable read-only by privileged domains. */
    3.51 -    for ( mfn = virt_to_phys(&machine_to_phys_mapping[0<<20])>>PAGE_SHIFT;
    3.52 -          mfn < virt_to_phys(&machine_to_phys_mapping[1<<20])>>PAGE_SHIFT;
    3.53 -          mfn++ )
    3.54 +    mfn = l2_pgentry_to_pagenr(
    3.55 +        idle_pg_table[RDWR_MPT_VIRT_START >> L2_PAGETABLE_SHIFT]);
    3.56 +    for ( i = 0; i < 1024; i++ )
    3.57      {
    3.58 -        frame_table[mfn].count_info        = PGC_allocated | 1;
    3.59 -        frame_table[mfn].u.inuse.type_info = PGT_gdt_page | 1; /* non-RW */
    3.60 -        frame_table[mfn].u.inuse.domain    = dom_xen;
    3.61 +        frame_table[mfn+i].count_info        = PGC_allocated | 1;
    3.62 +        frame_table[mfn+i].u.inuse.type_info = PGT_gdt_page | 1; /* non-RW */
    3.63 +        frame_table[mfn+i].u.inuse.domain    = dom_xen;
    3.64      }
    3.65  }
    3.66  
    3.67 @@ -501,11 +527,10 @@ static int alloc_l2_table(struct pfn_inf
    3.68     
    3.69      pl2e = map_domain_mem(page_nr << PAGE_SHIFT);
    3.70  
    3.71 -    for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ ) {
    3.72 +    for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
    3.73          if ( unlikely(!get_page_from_l2e(pl2e[i], page_nr, d, i)) )
    3.74              goto fail;
    3.75 -    }
    3.76 -    
    3.77 +
    3.78  #if defined(__i386__)
    3.79      /* Now we add our private high mappings. */
    3.80      memcpy(&pl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 
     4.1 --- a/xen/arch/x86/nmi.c	Thu Dec 30 14:59:09 2004 +0000
     4.2 +++ b/xen/arch/x86/nmi.c	Thu Dec 30 18:27:56 2004 +0000
     4.3 @@ -286,6 +286,7 @@ void nmi_watchdog_tick (struct xen_regs 
     4.4          if ( alert_counter[cpu] == 5*nmi_hz )
     4.5          {
     4.6              console_force_unlock();
     4.7 +            printk("Watchdog timer detects that CPU%d is stuck!\n", cpu);
     4.8              fatal_trap(TRAP_nmi, regs);
     4.9          }
    4.10      } 
     5.1 --- a/xen/arch/x86/setup.c	Thu Dec 30 14:59:09 2004 +0000
     5.2 +++ b/xen/arch/x86/setup.c	Thu Dec 30 18:27:56 2004 +0000
     5.3 @@ -30,7 +30,9 @@ integer_param("dom0_mem", opt_dom0_mem);
     5.4   * pfn_info table and allocation bitmap.
     5.5   */
     5.6  static unsigned int opt_xenheap_megabytes = XENHEAP_DEFAULT_MB;
     5.7 +#if defined(__x86_64__)
     5.8  integer_param("xenheap_megabytes", opt_xenheap_megabytes);
     5.9 +#endif
    5.10  
    5.11  /* opt_noht: If true, Hyperthreading is ignored. */
    5.12  int opt_noht = 0;
    5.13 @@ -466,15 +468,14 @@ static void __init start_of_day(void)
    5.14  
    5.15  void __init __start_xen(multiboot_info_t *mbi)
    5.16  {
    5.17 -    unsigned long max_page;
    5.18      unsigned char *cmdline;
    5.19      module_t *mod = (module_t *)__va(mbi->mods_addr);
    5.20      void *heap_start;
    5.21 -    unsigned long max_mem;
    5.22 +    unsigned long firsthole_start, nr_pages;
    5.23      unsigned long dom0_memory_start, dom0_memory_end;
    5.24      unsigned long initial_images_start, initial_images_end;
    5.25      struct e820entry e820_raw[E820MAX];
    5.26 -    int e820_raw_nr = 0, bytes = 0;
    5.27 +    int i, e820_raw_nr = 0, bytes = 0;
    5.28  
    5.29      /* Parse the command-line options. */
    5.30      if ( (mbi->flags & MBI_CMDLINE) && (mbi->cmdline != 0) )
    5.31 @@ -495,12 +496,6 @@ void __init __start_xen(multiboot_info_t
    5.32          for ( ; ; ) ;
    5.33      }
    5.34  
    5.35 -    if ( opt_xenheap_megabytes < 4 )
    5.36 -    {
    5.37 -        printk("FATAL ERROR: Xen heap is too small to safely continue!\n");
    5.38 -        for ( ; ; ) ;
    5.39 -    }
    5.40 -
    5.41      xenheap_phys_end = opt_xenheap_megabytes << 20;
    5.42  
    5.43      if ( mbi->flags & MBI_MEMMAP )
    5.44 @@ -523,9 +518,9 @@ void __init __start_xen(multiboot_info_t
    5.45          e820_raw[0].addr = 0;
    5.46          e820_raw[0].size = mbi->mem_lower << 10;
    5.47          e820_raw[0].type = E820_RAM;
    5.48 -        e820_raw[0].addr = 0x100000;
    5.49 -        e820_raw[0].size = mbi->mem_upper << 10;
    5.50 -        e820_raw[0].type = E820_RAM;
    5.51 +        e820_raw[1].addr = 0x100000;
    5.52 +        e820_raw[1].size = mbi->mem_upper << 10;
    5.53 +        e820_raw[1].type = E820_RAM;
    5.54          e820_raw_nr = 2;
    5.55      }
    5.56      else
    5.57 @@ -534,76 +529,70 @@ void __init __start_xen(multiboot_info_t
    5.58          for ( ; ; ) ;
    5.59      }
    5.60  
    5.61 -    max_mem = max_page = init_e820(e820_raw, e820_raw_nr);
    5.62 -    max_mem = max_page = (mbi->mem_upper+1024) >> (PAGE_SHIFT - 10);
    5.63 +    max_page = init_e820(e820_raw, e820_raw_nr);
    5.64  
    5.65 -#if defined(__i386__)
    5.66 +    /* Find the first high-memory RAM hole. */
    5.67 +    for ( i = 0; i < e820.nr_map; i++ )
    5.68 +        if ( (e820.map[i].type == E820_RAM) &&
    5.69 +             (e820.map[i].addr >= 0x100000) )
    5.70 +            break;
    5.71 +    firsthole_start = e820.map[i].addr + e820.map[i].size;
    5.72  
    5.73 -    initial_images_start = DIRECTMAP_PHYS_END;
    5.74 +    /* Relocate the Multiboot modules. */
    5.75 +    initial_images_start = xenheap_phys_end;
    5.76      initial_images_end   = initial_images_start + 
    5.77          (mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
    5.78 -    if ( initial_images_end > (max_page << PAGE_SHIFT) )
    5.79 +    if ( initial_images_end > firsthole_start )
    5.80      {
    5.81          printk("Not enough memory to stash the DOM0 kernel image.\n");
    5.82          for ( ; ; ) ;
    5.83      }
    5.84 +#if defined(__i386__)
    5.85      memmove((void *)initial_images_start,  /* use low mapping */
    5.86              (void *)mod[0].mod_start,      /* use low mapping */
    5.87              mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
    5.88 -
    5.89 -    if ( opt_xenheap_megabytes > XENHEAP_DEFAULT_MB )
    5.90 -    {
    5.91 -        printk("Xen heap size is limited to %dMB - you specified %dMB.\n",
    5.92 -               XENHEAP_DEFAULT_MB, opt_xenheap_megabytes);
    5.93 -        for ( ; ; ) ;
    5.94 -    }
    5.95 -
    5.96 -    ASSERT((sizeof(struct pfn_info) << 20) <=
    5.97 -           (FRAMETABLE_VIRT_END - FRAMETABLE_VIRT_START));
    5.98 -
    5.99 -    init_frametable((void *)FRAMETABLE_VIRT_START, max_page);
   5.100 -
   5.101  #elif defined(__x86_64__)
   5.102 -
   5.103 -    init_frametable(__va(xenheap_phys_end), max_page);
   5.104 -
   5.105 -    initial_images_start = __pa(frame_table) + frame_table_size;
   5.106 -    initial_images_end   = initial_images_start + 
   5.107 -        (mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
   5.108 -    if ( initial_images_end > (max_page << PAGE_SHIFT) )
   5.109 -    {
   5.110 -        printk("Not enough memory to stash the DOM0 kernel image.\n");
   5.111 -        for ( ; ; ) ;
   5.112 -    }
   5.113      memmove(__va(initial_images_start),
   5.114              __va(mod[0].mod_start),
   5.115              mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
   5.116 -
   5.117  #endif
   5.118  
   5.119 -    dom0_memory_start    = (initial_images_end + ((4<<20)-1)) & ~((4<<20)-1);
   5.120 -    dom0_memory_end      = dom0_memory_start + (opt_dom0_mem << 10);
   5.121 -    dom0_memory_end      = (dom0_memory_end + PAGE_SIZE - 1) & PAGE_MASK;
   5.122 -    
   5.123 -    /* Cheesy sanity check: enough memory for DOM0 allocation + some slack? */
   5.124 -    if ( (dom0_memory_end + (8<<20)) > (max_page << PAGE_SHIFT) )
   5.125 +    /* Initialise boot-time allocator with all RAM situated after modules. */
   5.126 +    heap_start = memguard_init(&_end);
   5.127 +    heap_start = __va(init_boot_allocator(__pa(heap_start)));
   5.128 +    nr_pages   = 0;
   5.129 +    for ( i = 0; i < e820.nr_map; i++ )
   5.130 +    {
   5.131 +        if ( e820.map[i].type != E820_RAM )
   5.132 +            continue;
   5.133 +        nr_pages += e820.map[i].size >> PAGE_SHIFT;
   5.134 +        if ( (e820.map[i].addr + e820.map[i].size) >= initial_images_end )
   5.135 +            init_boot_pages((e820.map[i].addr < initial_images_end) ?
   5.136 +                            initial_images_end : e820.map[i].addr,
   5.137 +                            e820.map[i].addr + e820.map[i].size);
   5.138 +    }
   5.139 +
   5.140 +    printk("System RAM: %luMB (%lukB)\n", 
   5.141 +           nr_pages >> (20 - PAGE_SHIFT),
   5.142 +           nr_pages << (PAGE_SHIFT - 10));
   5.143 +
   5.144 +    /* Allocate an aligned chunk of RAM for DOM0. */
   5.145 +    dom0_memory_start = alloc_boot_pages(opt_dom0_mem << 10, 4UL << 20);
   5.146 +    dom0_memory_end   = dom0_memory_start + (opt_dom0_mem << 10);
   5.147 +    if ( dom0_memory_start == 0 )
   5.148      {
   5.149          printk("Not enough memory for DOM0 memory reservation.\n");
   5.150          for ( ; ; ) ;
   5.151      }
   5.152  
   5.153 -    printk("Initialised %luMB memory (%lu pages) on a %luMB machine\n",
   5.154 -           max_page >> (20-PAGE_SHIFT), max_page,
   5.155 -	   max_mem  >> (20-PAGE_SHIFT));
   5.156 +    init_frametable();
   5.157 +
   5.158 +    end_boot_allocator();
   5.159  
   5.160 -    heap_start = memguard_init(&_end);
   5.161 -    heap_start = __va(init_heap_allocator(__pa(heap_start), max_page));
   5.162 - 
   5.163      init_xenheap_pages(__pa(heap_start), xenheap_phys_end);
   5.164 -    printk("Xen heap size is %luKB\n", 
   5.165 -	   (xenheap_phys_end-__pa(heap_start))/1024 );
   5.166 -
   5.167 -    init_domheap_pages(dom0_memory_end, max_page << PAGE_SHIFT);
   5.168 +    printk("Xen heap: %luMB (%lukB)\n",
   5.169 +	   (xenheap_phys_end-__pa(heap_start)) >> 20,
   5.170 +	   (xenheap_phys_end-__pa(heap_start)) >> 10);
   5.171  
   5.172      /* Initialise the slab allocator. */
   5.173      xmem_cache_init();
   5.174 @@ -649,8 +638,7 @@ void __init __start_xen(multiboot_info_t
   5.175          panic("Could not set up DOM0 guest OS\n");
   5.176  
   5.177      /* The stash space for the initial kernel image can now be freed up. */
   5.178 -    init_domheap_pages(__pa(frame_table) + frame_table_size,
   5.179 -                       dom0_memory_start);
   5.180 +    init_domheap_pages(initial_images_start, initial_images_end);
   5.181  
   5.182      scrub_heap_pages();
   5.183  
     6.1 --- a/xen/arch/x86/x86_32/mm.c	Thu Dec 30 14:59:09 2004 +0000
     6.2 +++ b/xen/arch/x86/x86_32/mm.c	Thu Dec 30 18:27:56 2004 +0000
     6.3 @@ -57,14 +57,25 @@ void __set_fixmap(enum fixed_addresses i
     6.4  void __init paging_init(void)
     6.5  {
     6.6      void *ioremap_pt;
     6.7 -    int i;
     6.8 +    unsigned long v, l2e;
     6.9 +    struct pfn_info *pg;
    6.10  
    6.11 -    /* Xen heap mappings can be GLOBAL. */
    6.12 +    /* Allocate and map the machine-to-phys table. */
    6.13 +    if ( (pg = alloc_domheap_pages(NULL, 10)) == NULL )
    6.14 +        panic("Not enough memory to bootstrap Xen.\n");
    6.15 +    idle_pg_table[RDWR_MPT_VIRT_START >> L2_PAGETABLE_SHIFT] =
    6.16 +        mk_l2_pgentry(page_to_phys(pg) | __PAGE_HYPERVISOR | _PAGE_PSE);
    6.17 +
    6.18 +    /* Xen 4MB mappings can all be GLOBAL. */
    6.19      if ( cpu_has_pge )
    6.20      {
    6.21 -        for ( i = 0; i < DIRECTMAP_PHYS_END; i += (1 << L2_PAGETABLE_SHIFT) )
    6.22 -            ((unsigned long *)idle_pg_table)
    6.23 -                [(i + PAGE_OFFSET) >> L2_PAGETABLE_SHIFT] |= _PAGE_GLOBAL;
    6.24 +        for ( v = HYPERVISOR_VIRT_START; v; v += (1 << L2_PAGETABLE_SHIFT) )
    6.25 +        {
    6.26 +             l2e = l2_pgentry_val(idle_pg_table[v >> L2_PAGETABLE_SHIFT]);
    6.27 +             if ( l2e & _PAGE_PSE )
    6.28 +                 l2e |= _PAGE_GLOBAL;
    6.29 +             idle_pg_table[v >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(l2e);
    6.30 +        }
    6.31      }
    6.32  
    6.33      /* Create page table for ioremap(). */
    6.34 @@ -422,7 +433,7 @@ void *memguard_init(void *heap_start)
    6.35              l1[j] = mk_l1_pgentry((i << L2_PAGETABLE_SHIFT) |
    6.36                                     (j << L1_PAGETABLE_SHIFT) | 
    6.37                                    __PAGE_HYPERVISOR);
    6.38 -        idle_pg_table[i] = idle_pg_table[i + l2_table_offset(PAGE_OFFSET)] =
    6.39 +        idle_pg_table[i + l2_table_offset(PAGE_OFFSET)] =
    6.40              mk_l2_pgentry(virt_to_phys(l1) | __PAGE_HYPERVISOR);
    6.41      }
    6.42  
     7.1 --- a/xen/common/memory.c	Thu Dec 30 14:59:09 2004 +0000
     7.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.3 @@ -1,51 +0,0 @@
     7.4 -/******************************************************************************
     7.5 - * memory.c
     7.6 - * 
     7.7 - * Copyright (c) 2002-2004 K A Fraser
     7.8 - * 
     7.9 - * This program is free software; you can redistribute it and/or modify
    7.10 - * it under the terms of the GNU General Public License as published by
    7.11 - * the Free Software Foundation; either version 2 of the License, or
    7.12 - * (at your option) any later version.
    7.13 - * 
    7.14 - * This program is distributed in the hope that it will be useful,
    7.15 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
    7.16 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    7.17 - * GNU General Public License for more details.
    7.18 - * 
    7.19 - * You should have received a copy of the GNU General Public License
    7.20 - * along with this program; if not, write to the Free Software
    7.21 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
    7.22 - */
    7.23 -
    7.24 -#include <xen/config.h>
    7.25 -#include <xen/init.h>
    7.26 -#include <xen/lib.h>
    7.27 -#include <xen/mm.h>
    7.28 -#include <xen/sched.h>
    7.29 -#include <xen/errno.h>
    7.30 -#include <xen/perfc.h>
    7.31 -#include <xen/irq.h>
    7.32 -#include <asm/page.h>
    7.33 -#include <asm/flushtlb.h>
    7.34 -#include <asm/io.h>
    7.35 -#include <asm/uaccess.h>
    7.36 -#include <asm/domain_page.h>
    7.37 -
    7.38 -/* Frame table and its size in pages. */
    7.39 -struct pfn_info *frame_table;
    7.40 -unsigned long frame_table_size;
    7.41 -unsigned long max_page;
    7.42 -
    7.43 -void __init init_frametable(void *frametable_vstart, unsigned long nr_pages)
    7.44 -{
    7.45 -    max_page = nr_pages;
    7.46 -    frame_table_size = nr_pages * sizeof(struct pfn_info);
    7.47 -    frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
    7.48 -    frame_table = frametable_vstart;
    7.49 -
    7.50 -    if ( (__pa(frame_table) + frame_table_size) > (max_page << PAGE_SHIFT) )
    7.51 -        panic("Not enough memory for frame table - reduce Xen heap size?\n");
    7.52 -
    7.53 -    memset(frame_table, 0, frame_table_size);
    7.54 -}
     8.1 --- a/xen/common/page_alloc.c	Thu Dec 30 14:59:09 2004 +0000
     8.2 +++ b/xen/common/page_alloc.c	Thu Dec 30 18:27:56 2004 +0000
     8.3 @@ -37,6 +37,9 @@
     8.4  static char opt_badpage[100] = "";
     8.5  string_param("badpage", opt_badpage);
     8.6  
     8.7 +#define round_pgdown(_p)  ((_p)&PAGE_MASK)
     8.8 +#define round_pgup(_p)    (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
     8.9 +
    8.10  /*********************
    8.11   * ALLOCATION BITMAP
    8.12   *  One bit per page of memory. Bit set => page is allocated.
    8.13 @@ -98,7 +101,7 @@ static void map_free(unsigned long first
    8.14          ASSERT(allocated_in_map(first_page + i));
    8.15  #endif
    8.16  
    8.17 -    curr_idx = first_page / PAGES_PER_MAPWORD;
    8.18 +    curr_idx  = first_page / PAGES_PER_MAPWORD;
    8.19      start_off = first_page & (PAGES_PER_MAPWORD-1);
    8.20      end_idx   = (first_page + nr_pages) / PAGES_PER_MAPWORD;
    8.21      end_off   = (first_page + nr_pages) & (PAGES_PER_MAPWORD-1);
    8.22 @@ -118,6 +121,80 @@ static void map_free(unsigned long first
    8.23  
    8.24  
    8.25  /*************************
    8.26 + * BOOT-TIME ALLOCATOR
    8.27 + */
    8.28 +
    8.29 +/* Initialise allocator to handle up to @max_page pages. */
    8.30 +unsigned long init_boot_allocator(unsigned long bitmap_start)
    8.31 +{
    8.32 +    bitmap_start = round_pgup(bitmap_start);
    8.33 +
    8.34 +    /* Allocate space for the allocation bitmap. */
    8.35 +    bitmap_size  = max_page / 8;
    8.36 +    bitmap_size  = round_pgup(bitmap_size);
    8.37 +    alloc_bitmap = (unsigned long *)phys_to_virt(bitmap_start);
    8.38 +
    8.39 +    /* All allocated by default. */
    8.40 +    memset(alloc_bitmap, ~0, bitmap_size);
    8.41 +
    8.42 +    return bitmap_start + bitmap_size;
    8.43 +}
    8.44 +
    8.45 +void init_boot_pages(unsigned long ps, unsigned long pe)
    8.46 +{
    8.47 +    unsigned long bad_pfn;
    8.48 +    char *p;
    8.49 +
    8.50 +    ps = round_pgup(ps);
    8.51 +    pe = round_pgdown(pe);
    8.52 +
    8.53 +    map_free(ps >> PAGE_SHIFT, (pe - ps) >> PAGE_SHIFT);
    8.54 +
    8.55 +    /* Check new pages against the bad-page list. */
    8.56 +    p = opt_badpage;
    8.57 +    while ( *p != '\0' )
    8.58 +    {
    8.59 +        bad_pfn = simple_strtoul(p, &p, 0);
    8.60 +
    8.61 +        if ( *p == ',' )
    8.62 +            p++;
    8.63 +        else if ( *p != '\0' )
    8.64 +            break;
    8.65 +
    8.66 +        if ( (bad_pfn < (bitmap_size*8)) && !allocated_in_map(bad_pfn) )
    8.67 +        {
    8.68 +            printk("Marking page %08lx as bad\n", bad_pfn);
    8.69 +            map_alloc(bad_pfn, 1);
    8.70 +        }
    8.71 +    }
    8.72 +}
    8.73 +
    8.74 +unsigned long alloc_boot_pages(unsigned long size, unsigned long align)
    8.75 +{
    8.76 +    unsigned long pg, i;
    8.77 +
    8.78 +    size  = round_pgup(size) >> PAGE_SHIFT;
    8.79 +    align = round_pgup(align) >> PAGE_SHIFT;
    8.80 +
    8.81 +    for ( pg = 0; (pg + size) < (bitmap_size*PAGES_PER_MAPWORD); pg += align )
    8.82 +    {
    8.83 +        for ( i = 0; i < size; i++ )
    8.84 +            if ( allocated_in_map(pg + i) )
    8.85 +                 break;
    8.86 +
    8.87 +        if ( i == size )
    8.88 +        {
    8.89 +            map_alloc(pg, size);
    8.90 +            return pg << PAGE_SHIFT;
    8.91 +        }
    8.92 +    }
    8.93 +
    8.94 +    return 0;
    8.95 +}
    8.96 +
    8.97 +
    8.98 +
    8.99 +/*************************
   8.100   * BINARY BUDDY ALLOCATOR
   8.101   */
   8.102  
   8.103 @@ -133,18 +210,12 @@ static struct list_head heap[NR_ZONES][N
   8.104  
   8.105  static unsigned long avail[NR_ZONES];
   8.106  
   8.107 -#define round_pgdown(_p)  ((_p)&PAGE_MASK)
   8.108 -#define round_pgup(_p)    (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
   8.109 -
   8.110  static spinlock_t heap_lock = SPIN_LOCK_UNLOCKED;
   8.111  
   8.112 -/* Initialise allocator to handle up to @max_pages. */
   8.113 -unsigned long init_heap_allocator(
   8.114 -    unsigned long bitmap_start, unsigned long max_pages)
   8.115 +void end_boot_allocator(void)
   8.116  {
   8.117 -    int i, j;
   8.118 -    unsigned long bad_pfn;
   8.119 -    char *p;
   8.120 +    unsigned long i, j;
   8.121 +    int curr_free = 0, next_free = 0;
   8.122  
   8.123      memset(avail, 0, sizeof(avail));
   8.124  
   8.125 @@ -152,53 +223,25 @@ unsigned long init_heap_allocator(
   8.126          for ( j = 0; j < NR_ORDERS; j++ )
   8.127              INIT_LIST_HEAD(&heap[i][j]);
   8.128  
   8.129 -    bitmap_start = round_pgup(bitmap_start);
   8.130 -
   8.131 -    /* Allocate space for the allocation bitmap. */
   8.132 -    bitmap_size  = max_pages / 8;
   8.133 -    bitmap_size  = round_pgup(bitmap_size);
   8.134 -    alloc_bitmap = (unsigned long *)phys_to_virt(bitmap_start);
   8.135 -
   8.136 -    /* All allocated by default. */
   8.137 -    memset(alloc_bitmap, ~0, bitmap_size);
   8.138 -
   8.139 -    /*
   8.140 -     * Process the bad-page list. Marking the page free in the bitmap will
   8.141 -     * indicate to init_heap_pages() that it should not be placed on the 
   8.142 -     * buddy lists.
   8.143 -     */
   8.144 -    p = opt_badpage;
   8.145 -    while ( *p != '\0' )
   8.146 +    /* Pages that are free now go to the domain sub-allocator. */
   8.147 +    for ( i = 0; i < max_page; i++ )
   8.148      {
   8.149 -        bad_pfn = simple_strtoul(p, &p, 0);
   8.150 -
   8.151 -        if ( *p == ',' )
   8.152 -            p++;
   8.153 -        else if ( *p != '\0' )
   8.154 -            break;
   8.155 -
   8.156 -        if ( (bad_pfn < max_pages) && allocated_in_map(bad_pfn) )
   8.157 -        {
   8.158 -            printk("Marking page %08lx as bad\n", bad_pfn);
   8.159 -            map_free(bad_pfn, 1);
   8.160 -        }
   8.161 +        curr_free = next_free;
   8.162 +        next_free = !allocated_in_map(i+1);
   8.163 +        if ( next_free )
   8.164 +            map_alloc(i+1, 1); /* prevent merging in free_heap_pages() */
   8.165 +        if ( curr_free )
   8.166 +            free_heap_pages(MEMZONE_DOM, pfn_to_page(i), 0);
   8.167      }
   8.168 -
   8.169 -    return bitmap_start + bitmap_size;
   8.170  }
   8.171  
   8.172 -
   8.173  /* Hand the specified arbitrary page range to the specified heap zone. */
   8.174  void init_heap_pages(int zone, struct pfn_info *pg, unsigned long nr_pages)
   8.175  {
   8.176 -    unsigned long i, pfn = page_to_pfn(pg);
   8.177 +    unsigned long i;
   8.178  
   8.179 -    /* Process each page in turn, skipping bad pages. */
   8.180      for ( i = 0; i < nr_pages; i++ )
   8.181 -    {
   8.182 -        if ( likely(allocated_in_map(pfn+i)) ) /* bad page? */
   8.183 -            free_heap_pages(zone, pg+i, 0);
   8.184 -    }
   8.185 +        free_heap_pages(zone, pg+i, 0);
   8.186  }
   8.187  
   8.188  
     9.1 --- a/xen/include/asm-x86/config.h	Thu Dec 30 14:59:09 2004 +0000
     9.2 +++ b/xen/include/asm-x86/config.h	Thu Dec 30 18:27:56 2004 +0000
     9.3 @@ -165,7 +165,7 @@ extern void __out_of_line_bug(int line) 
     9.4  #elif defined(__i386__)
     9.5  
     9.6  #define XENHEAP_DEFAULT_MB (12)
     9.7 -#define DIRECTMAP_PHYS_END (40*1024*1024)
     9.8 +#define DIRECTMAP_PHYS_END (12*1024*1024)
     9.9  
    9.10  /* Hypervisor owns top 64MB of virtual address space. */
    9.11  #define __HYPERVISOR_VIRT_START  0xFC000000
    9.12 @@ -177,17 +177,19 @@ extern void __out_of_line_bug(int line) 
    9.13   */
    9.14  #define RO_MPT_VIRT_START     (HYPERVISOR_VIRT_START)
    9.15  #define RO_MPT_VIRT_END       (RO_MPT_VIRT_START + (4*1024*1024))
    9.16 -/* The virtual addresses for the 40MB direct-map region. */
    9.17 +/* Xen heap extends to end of 1:1 direct-mapped memory region. */
    9.18  #define DIRECTMAP_VIRT_START  (RO_MPT_VIRT_END)
    9.19  #define DIRECTMAP_VIRT_END    (DIRECTMAP_VIRT_START + DIRECTMAP_PHYS_END)
    9.20  #define XENHEAP_VIRT_START    (DIRECTMAP_VIRT_START)
    9.21 -#define XENHEAP_VIRT_END      (XENHEAP_VIRT_START + (XENHEAP_DEFAULT_MB<<20))
    9.22 +#define XENHEAP_VIRT_END      (DIRECTMAP_VIRT_END)
    9.23 +/* Machine-to-phys conversion table. */
    9.24  #define RDWR_MPT_VIRT_START   (XENHEAP_VIRT_END)
    9.25  #define RDWR_MPT_VIRT_END     (RDWR_MPT_VIRT_START + (4*1024*1024))
    9.26 +/* Variable-length page-frame information array. */
    9.27  #define FRAMETABLE_VIRT_START (RDWR_MPT_VIRT_END)
    9.28 -#define FRAMETABLE_VIRT_END   (DIRECTMAP_VIRT_END)
    9.29 +#define FRAMETABLE_VIRT_END   (FRAMETABLE_VIRT_START + (24*1024*1024))
    9.30  /* Next 4MB of virtual address space is used as a linear p.t. mapping. */
    9.31 -#define LINEAR_PT_VIRT_START  (DIRECTMAP_VIRT_END)
    9.32 +#define LINEAR_PT_VIRT_START  (FRAMETABLE_VIRT_END)
    9.33  #define LINEAR_PT_VIRT_END    (LINEAR_PT_VIRT_START + (4*1024*1024))
    9.34  /* Next 4MB of virtual address space is used as a shadow linear p.t. map. */
    9.35  #define SH_LINEAR_PT_VIRT_START (LINEAR_PT_VIRT_END)
    10.1 --- a/xen/include/asm-x86/e820.h	Thu Dec 30 14:59:09 2004 +0000
    10.2 +++ b/xen/include/asm-x86/e820.h	Thu Dec 30 18:27:56 2004 +0000
    10.3 @@ -29,7 +29,7 @@ extern struct e820map e820;
    10.4  
    10.5  #endif /*!__ASSEMBLY__*/
    10.6  
    10.7 -#define PFN_DOWN(_p)  ((_p)&PAGE_MASK)
    10.8 -#define PFN_UP(_p)    (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
    10.9 +#define PFN_DOWN(x)   ((x) >> PAGE_SHIFT)
   10.10 +#define PFN_UP(x)     (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
   10.11  
   10.12  #endif /*__E820_HEADER*/
    11.1 --- a/xen/include/asm-x86/mm.h	Thu Dec 30 14:59:09 2004 +0000
    11.2 +++ b/xen/include/asm-x86/mm.h	Thu Dec 30 18:27:56 2004 +0000
    11.3 @@ -118,7 +118,7 @@ struct pfn_info
    11.4  extern struct pfn_info *frame_table;
    11.5  extern unsigned long frame_table_size;
    11.6  extern unsigned long max_page;
    11.7 -void init_frametable(void *frametable_vstart, unsigned long nr_pages);
    11.8 +void init_frametable(void);
    11.9  
   11.10  int alloc_page_type(struct pfn_info *page, unsigned int type);
   11.11  void free_page_type(struct pfn_info *page, unsigned int type);
    12.1 --- a/xen/include/asm-x86/page.h	Thu Dec 30 14:59:09 2004 +0000
    12.2 +++ b/xen/include/asm-x86/page.h	Thu Dec 30 18:27:56 2004 +0000
    12.3 @@ -102,6 +102,7 @@ typedef struct { unsigned long pt_lo; } 
    12.4  #define __pa(x)			((unsigned long)(x)-PAGE_OFFSET)
    12.5  #define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
    12.6  #define page_address(_p)        (__va(((_p) - frame_table) << PAGE_SHIFT))
    12.7 +#define pfn_to_page(_pfn)       (frame_table + (_pfn))
    12.8  #define phys_to_page(kaddr)     (frame_table + ((kaddr) >> PAGE_SHIFT))
    12.9  #define virt_to_page(kaddr)	(frame_table + (__pa(kaddr) >> PAGE_SHIFT))
   12.10  #define VALID_PAGE(page)	((page - frame_table) < max_mapnr)
    13.1 --- a/xen/include/xen/lib.h	Thu Dec 30 14:59:09 2004 +0000
    13.2 +++ b/xen/include/xen/lib.h	Thu Dec 30 18:27:56 2004 +0000
    13.3 @@ -14,8 +14,7 @@
    13.4  #define SWAP(_a, _b) \
    13.5     do { typeof(_a) _t = (_a); (_a) = (_b); (_b) = _t; } while ( 0 )
    13.6  
    13.7 -#define reserve_bootmem(_p,_l) \
    13.8 -printk("Memory Reservation 0x%lx, %lu bytes\n", (_p), (_l))
    13.9 +#define reserve_bootmem(_p,_l) ((void)0)
   13.10  
   13.11  struct domain;
   13.12  
    14.1 --- a/xen/include/xen/mm.h	Thu Dec 30 14:59:09 2004 +0000
    14.2 +++ b/xen/include/xen/mm.h	Thu Dec 30 18:27:56 2004 +0000
    14.3 @@ -5,9 +5,13 @@
    14.4  struct domain;
    14.5  struct pfn_info;
    14.6  
    14.7 +/* Boot-time allocator. Turns into generic allocator after bootstrap. */
    14.8 +unsigned long init_boot_allocator(unsigned long bitmap_start);
    14.9 +void init_boot_pages(unsigned long ps, unsigned long pe);
   14.10 +unsigned long alloc_boot_pages(unsigned long size, unsigned long align);
   14.11 +void end_boot_allocator(void);
   14.12 +
   14.13  /* Generic allocator. These functions are *not* interrupt-safe. */
   14.14 -unsigned long init_heap_allocator(
   14.15 -    unsigned long bitmap_start, unsigned long max_pages);
   14.16  void init_heap_pages(int zone, struct pfn_info *pg, unsigned long nr_pages);
   14.17  struct pfn_info *alloc_heap_pages(int zone, int order);
   14.18  void free_heap_pages(int zone, struct pfn_info *pg, int order);