ia64/xen-unstable

changeset 4950:cdb951900d9d

bitkeeper revision 1.1428 (4289c9f6GdUyHEH-FCu9rRuWtOXnXQ)

Initialise 1:1 mapping of physical memory map early during x86/64 boot.
This mapping should include all ACPI tables, so simplify the mapping
check in the ACPI code.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue May 17 10:39:50 2005 +0000 (2005-05-17)
parents 386956408063
children 3b25e1de5efb
files xen/arch/x86/acpi/boot.c xen/arch/x86/e820.c xen/arch/x86/setup.c xen/arch/x86/x86_64/mm.c xen/common/grant_table.c xen/include/asm-x86/e820.h
line diff
     1.1 --- a/xen/arch/x86/acpi/boot.c	Tue May 17 09:14:58 2005 +0000
     1.2 +++ b/xen/arch/x86/acpi/boot.c	Tue May 17 10:39:50 2005 +0000
     1.3 @@ -40,7 +40,6 @@
     1.4  #include <mach_mpparse.h>
     1.5  
     1.6  int sbf_port;
     1.7 -#define end_pfn_map max_page
     1.8  #define CONFIG_ACPI_PCI
     1.9  
    1.10  #define BAD_MADT_ENTRY(entry, end) (					    \
    1.11 @@ -96,12 +95,9 @@ enum acpi_irq_model_id		acpi_irq_model =
    1.12  char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
    1.13  {
    1.14  	if (!phys_addr || !size)
    1.15 -	return NULL;
    1.16 -
    1.17 -	if (phys_addr < (end_pfn_map << PAGE_SHIFT))
    1.18 -		return __va(phys_addr);
    1.19 -
    1.20 -	return NULL;
    1.21 +		return NULL;
    1.22 +	/* XEN: We map all e820 areas which should include every ACPI table. */
    1.23 +	return __va(phys_addr);
    1.24  }
    1.25  
    1.26  #else
     2.1 --- a/xen/arch/x86/e820.c	Tue May 17 09:14:58 2005 +0000
     2.2 +++ b/xen/arch/x86/e820.c	Tue May 17 10:39:50 2005 +0000
     2.3 @@ -372,16 +372,17 @@ static void __init clip_mem(void)
     2.4  }
     2.5  
     2.6  static void __init machine_specific_memory_setup(
     2.7 -    struct e820entry *raw, int raw_nr)
     2.8 +    struct e820entry *raw, int *raw_nr)
     2.9  {
    2.10 -    char nr = (char)raw_nr;
    2.11 +    char nr = (char)*raw_nr;
    2.12      sanitize_e820_map(raw, &nr);
    2.13 +    *raw_nr = nr;
    2.14      (void)copy_e820_map(raw, nr);
    2.15      clip_4gb();
    2.16      clip_mem();
    2.17  }
    2.18  
    2.19 -unsigned long __init init_e820(struct e820entry *raw, int raw_nr)
    2.20 +unsigned long __init init_e820(struct e820entry *raw, int *raw_nr)
    2.21  {
    2.22      machine_specific_memory_setup(raw, raw_nr);
    2.23      printk(KERN_INFO "Physical RAM map:\n");
     3.1 --- a/xen/arch/x86/setup.c	Tue May 17 09:14:58 2005 +0000
     3.2 +++ b/xen/arch/x86/setup.c	Tue May 17 10:39:50 2005 +0000
     3.3 @@ -25,7 +25,7 @@
     3.4   * pfn_info table and allocation bitmap.
     3.5   */
     3.6  static unsigned int opt_xenheap_megabytes = XENHEAP_DEFAULT_MB;
     3.7 -#if defined(__x86_64__)
     3.8 +#if defined(CONFIG_X86_64)
     3.9  integer_param("xenheap_megabytes", opt_xenheap_megabytes);
    3.10  #endif
    3.11  
    3.12 @@ -70,7 +70,7 @@ extern int do_timer_lists_from_pit;
    3.13  
    3.14  struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1 };
    3.15  
    3.16 -#if defined(__x86_64__)
    3.17 +#if defined(CONFIG_X86_64)
    3.18  unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE;
    3.19  #else
    3.20  unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE;
    3.21 @@ -312,10 +312,10 @@ void __init cpu_init(void)
    3.22  
    3.23      /* Set up and load the per-CPU TSS and LDT. */
    3.24      t->bitmap = IOBMP_INVALID_OFFSET;
    3.25 -#if defined(__i386__)
    3.26 +#if defined(CONFIG_X86_32)
    3.27      t->ss0  = __HYPERVISOR_DS;
    3.28      t->esp0 = get_stack_bottom();
    3.29 -#elif defined(__x86_64__)
    3.30 +#elif defined(CONFIG_X86_64)
    3.31      t->rsp0 = get_stack_bottom();
    3.32  #endif
    3.33      set_tss_desc(nr,t);
    3.34 @@ -452,7 +452,7 @@ static void __init start_of_day(void)
    3.35  
    3.36      watchdog_enable();
    3.37  
    3.38 -#ifdef __x86_64__ /* x86_32 uses low mappings when building DOM0. */
    3.39 +#ifdef CONFIG_X86_64 /* x86_32 uses low mappings when building DOM0. */
    3.40      zap_low_mappings();
    3.41  #endif
    3.42  }
    3.43 @@ -519,7 +519,7 @@ void __init __start_xen(multiboot_info_t
    3.44          for ( ; ; ) ;
    3.45      }
    3.46  
    3.47 -    max_page = init_e820(e820_raw, e820_raw_nr);
    3.48 +    max_page = init_e820(e820_raw, &e820_raw_nr);
    3.49  
    3.50      /* Find the first high-memory RAM hole. */
    3.51      for ( i = 0; i < e820.nr_map; i++ )
    3.52 @@ -537,11 +537,11 @@ void __init __start_xen(multiboot_info_t
    3.53          printk("Not enough memory to stash the DOM0 kernel image.\n");
    3.54          for ( ; ; ) ;
    3.55      }
    3.56 -#if defined(__i386__)
    3.57 +#if defined(CONFIG_X86_32)
    3.58      memmove((void *)initial_images_start,  /* use low mapping */
    3.59              (void *)mod[0].mod_start,      /* use low mapping */
    3.60              mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
    3.61 -#elif defined(__x86_64__)
    3.62 +#elif defined(CONFIG_X86_64)
    3.63      memmove(__va(initial_images_start),
    3.64              __va(mod[0].mod_start),
    3.65              mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
    3.66 @@ -562,6 +562,21 @@ void __init __start_xen(multiboot_info_t
    3.67                              e820.map[i].addr + e820.map[i].size);
    3.68      }
    3.69  
    3.70 +#if defined (CONFIG_X86_64)
    3.71 +    /* On x86/64 we can 1:1 map every registered memory area. */
    3.72 +    /* We use the raw_e820 map because we sometimes truncate the cooked map. */
    3.73 +    for ( i = 0; i < e820_raw_nr; i++ )
    3.74 +    {
    3.75 +        unsigned long min, sz;
    3.76 +        min = (unsigned long)e820_raw[i].addr &
    3.77 +            ~(((unsigned long)L1_PAGETABLE_ENTRIES << PAGE_SHIFT) - 1);
    3.78 +        sz  = ((unsigned long)e820_raw[i].size +
    3.79 +               ((unsigned long)L1_PAGETABLE_ENTRIES << PAGE_SHIFT) - 1) &
    3.80 +            ~(((unsigned long)L1_PAGETABLE_ENTRIES << PAGE_SHIFT) - 1);
    3.81 +        map_pages(idle_pg_table, PAGE_OFFSET + min, min, sz, PAGE_HYPERVISOR);
    3.82 +    }
    3.83 +#endif
    3.84 +
    3.85      printk("System RAM: %luMB (%lukB)\n", 
    3.86             nr_pages >> (20 - PAGE_SHIFT),
    3.87             nr_pages << (PAGE_SHIFT - 10));
     4.1 --- a/xen/arch/x86/x86_64/mm.c	Tue May 17 09:14:58 2005 +0000
     4.2 +++ b/xen/arch/x86/x86_64/mm.c	Tue May 17 10:39:50 2005 +0000
     4.3 @@ -128,18 +128,12 @@ void __set_fixmap(
     4.4      map_pages(idle_pg_table, fix_to_virt(idx), p, PAGE_SIZE, flags);
     4.5  }
     4.6  
     4.7 -
     4.8  void __init paging_init(void)
     4.9  {
    4.10 -    unsigned long i, p, max;
    4.11 +    unsigned long i, p;
    4.12      l3_pgentry_t *l3rw, *l3ro;
    4.13      struct pfn_info *pg;
    4.14  
    4.15 -    /* Map all of physical memory. */
    4.16 -    max = ((max_page + L1_PAGETABLE_ENTRIES - 1) & 
    4.17 -           ~(L1_PAGETABLE_ENTRIES - 1)) << PAGE_SHIFT;
    4.18 -    map_pages(idle_pg_table, PAGE_OFFSET, 0, max, PAGE_HYPERVISOR);
    4.19 -
    4.20      /*
    4.21       * Allocate and map the machine-to-phys table.
    4.22       * This also ensures L3 is present for ioremap().
     5.1 --- a/xen/common/grant_table.c	Tue May 17 09:14:58 2005 +0000
     5.2 +++ b/xen/common/grant_table.c	Tue May 17 10:39:50 2005 +0000
     5.3 @@ -1227,7 +1227,6 @@ grant_table_init(
     5.4      void)
     5.5  {
     5.6      /* Nothing. */
     5.7 -    DPRINTK("Grant table init\n");
     5.8  }
     5.9  
    5.10  /*
     6.1 --- a/xen/include/asm-x86/e820.h	Tue May 17 09:14:58 2005 +0000
     6.2 +++ b/xen/include/asm-x86/e820.h	Tue May 17 10:39:50 2005 +0000
     6.3 @@ -24,7 +24,7 @@ struct e820map {
     6.4      struct e820entry map[E820MAX];
     6.5  };
     6.6  
     6.7 -extern unsigned long init_e820(struct e820entry *, int);
     6.8 +extern unsigned long init_e820(struct e820entry *, int *);
     6.9  extern struct e820map e820;
    6.10  
    6.11  #ifndef NDEBUG