direct-io.hg

changeset 10247:2fd7f4fb7d14

[LINUX][X86_64] Fix initial memory mapping code.

The temporary mappings needed to set up the 1:1 mappings must be torn down after use; otherwise they may trigger the
WARN_ON() in vmap_pte_range() (namely if the chunk allocated to hold kernel and initial page tables gets close to or
exceeds 128Mb, or if a sufficiently high mem= argument causes the static allocations to grow beyond 128Mb, which in
either case means these mappings extend into the modules area).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Jun 01 19:07:40 2006 +0100 (2006-06-01)
parents 61e2ea81bd65
children 7f219d68e684
files linux-2.6-xen-sparse/arch/x86_64/mm/init-xen.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/x86_64/mm/init-xen.c	Thu Jun 01 18:32:04 2006 +0100
     1.2 +++ b/linux-2.6-xen-sparse/arch/x86_64/mm/init-xen.c	Thu Jun 01 19:07:40 2006 +0100
     1.3 @@ -56,6 +56,8 @@
     1.4  struct dma_mapping_ops* dma_ops;
     1.5  EXPORT_SYMBOL(dma_ops);
     1.6  
     1.7 +int after_bootmem;
     1.8 +
     1.9  extern unsigned long *contiguous_bitmap;
    1.10  
    1.11  static unsigned long dma_reserve __initdata;
    1.12 @@ -74,7 +76,7 @@ extern unsigned long start_pfn;
    1.13  	(((mfn_to_pfn((addr) >> PAGE_SHIFT)) << PAGE_SHIFT) +	\
    1.14  	__START_KERNEL_map)))
    1.15  
    1.16 -static void early_make_page_readonly(void *va, unsigned int feature)
    1.17 +static void __meminit early_make_page_readonly(void *va, unsigned int feature)
    1.18  {
    1.19  	unsigned long addr, _va = (unsigned long)va;
    1.20  	pte_t pte, *ptep;
    1.21 @@ -83,6 +85,11 @@ static void early_make_page_readonly(voi
    1.22  	if (xen_feature(feature))
    1.23  		return;
    1.24  
    1.25 +	if (after_bootmem) {
    1.26 +		make_page_readonly(va, feature);
    1.27 +		return;
    1.28 +	}
    1.29 +
    1.30  	addr = (unsigned long) page[pgd_index(_va)];
    1.31  	addr_to_page(addr, page);
    1.32  
    1.33 @@ -198,10 +205,6 @@ void show_mem(void)
    1.34  	printk(KERN_INFO "%lu pages swap cached\n",cached);
    1.35  }
    1.36  
    1.37 -/* References to section boundaries */
    1.38 -
    1.39 -int after_bootmem;
    1.40 -
    1.41  static void *spp_getpage(void)
    1.42  { 
    1.43  	void *ptr;
    1.44 @@ -448,9 +451,9 @@ phys_pmd_init(pmd_t *pmd, unsigned long 
    1.45  		pte = alloc_static_page(&pte_phys);
    1.46  		pte_save = pte;
    1.47  		for (k = 0; k < PTRS_PER_PTE; pte++, k++, address += PTE_SIZE) {
    1.48 -			if ((address >= end) ||
    1.49 -			    ((address >> PAGE_SHIFT) >=
    1.50 -			     xen_start_info->nr_pages)) { 
    1.51 +			if (address >= (after_bootmem
    1.52 +			                ? end
    1.53 +			                : xen_start_info->nr_pages << PAGE_SHIFT)) {
    1.54  				__set_pte(pte, __pte(0)); 
    1.55  				continue;
    1.56  			}
    1.57 @@ -550,7 +553,7 @@ void __init xen_init_pt(void)
    1.58  		mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
    1.59  }
    1.60  
    1.61 -void __init extend_init_mapping(unsigned long tables_space)
    1.62 +static void __init extend_init_mapping(unsigned long tables_space)
    1.63  {
    1.64  	unsigned long va = __START_KERNEL_map;
    1.65  	unsigned long phys, addr, *pte_page;
    1.66 @@ -666,7 +669,18 @@ void __meminit init_memory_mapping(unsig
    1.67  			set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
    1.68  	}
    1.69  
    1.70 -	BUG_ON(!after_bootmem && start_pfn != table_end);
    1.71 +	if (!after_bootmem) {
    1.72 +		BUG_ON(start_pfn != table_end);
    1.73 +
    1.74 +		/* Destroy the temporary mappings created above. */
    1.75 +		start = __START_KERNEL_map + (table_start << PAGE_SHIFT);
    1.76 +		end = start + tables_space;
    1.77 +		for (; start < end; start += PAGE_SIZE) {
    1.78 +			/* Should also clear out and reclaim any page table
    1.79 +			   pages no longer needed... */
    1.80 +			WARN_ON(HYPERVISOR_update_va_mapping(start, __pte_ma(0), 0));
    1.81 +		}
    1.82 +	}
    1.83  
    1.84  	__flush_tlb_all();
    1.85  }