direct-io.hg

changeset 7793:090e44133d40

Use make_lowmem_page_readonly/writable() in preference to the
generic functions where appropriate. This prevents us using the
generic functions early during boot, when pte_pfn() does not work
(because max_mapnr is not initialised).

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Nov 14 18:13:38 2005 +0100 (2005-11-14)
parents bdf1a8039d13
children 4f03592bc7f5
files linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/common.c linux-2.6-xen-sparse/arch/xen/i386/mm/init.c linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c xen/arch/x86/mm.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/common.c	Mon Nov 14 15:21:16 2005 +0100
     1.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/cpu/common.c	Mon Nov 14 18:13:38 2005 +0100
     1.3 @@ -572,7 +572,7 @@ void __cpuinit cpu_gdt_init(struct Xgt_d
     1.4  	     va < gdt_descr->address + gdt_descr->size;
     1.5  	     va += PAGE_SIZE, f++) {
     1.6  		frames[f] = virt_to_mfn(va);
     1.7 -		make_page_readonly((void *)va);
     1.8 +		make_lowmem_page_readonly((void *)va);
     1.9  	}
    1.10  	if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
    1.11  		BUG();
     2.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/init.c	Mon Nov 14 15:21:16 2005 +0100
     2.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/init.c	Mon Nov 14 18:13:38 2005 +0100
     2.3 @@ -68,7 +68,7 @@ static pmd_t * __init one_md_table_init(
     2.4  
     2.5  #ifdef CONFIG_X86_PAE
     2.6  	pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
     2.7 -	make_page_readonly(pmd_table);
     2.8 +	make_lowmem_page_readonly(pmd_table);
     2.9  	set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
    2.10  	pud = pud_offset(pgd, 0);
    2.11  	if (pmd_table != pmd_offset(pud, 0)) 
    2.12 @@ -89,7 +89,7 @@ static pte_t * __init one_page_table_ini
    2.13  {
    2.14  	if (pmd_none(*pmd)) {
    2.15  		pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
    2.16 -		make_page_readonly(page_table);
    2.17 +		make_lowmem_page_readonly(page_table);
    2.18  		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
    2.19  		if (page_table != pte_offset_kernel(pmd, 0))
    2.20  			BUG();	
     3.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c	Mon Nov 14 15:21:16 2005 +0100
     3.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c	Mon Nov 14 18:13:38 2005 +0100
     3.3 @@ -199,7 +199,7 @@ pte_t *pte_alloc_one_kernel(struct mm_st
     3.4  {
     3.5  	pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
     3.6  	if (pte)
     3.7 -		make_page_readonly(pte);
     3.8 +		make_lowmem_page_readonly(pte);
     3.9  	return pte;
    3.10  }
    3.11  
    3.12 @@ -336,7 +336,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
    3.13  		spin_lock_irqsave(&pgd_lock, flags);
    3.14  		memcpy(pmd, copy_pmd, PAGE_SIZE);
    3.15  		spin_unlock_irqrestore(&pgd_lock, flags);
    3.16 -		make_page_readonly(pmd);
    3.17 +		make_lowmem_page_readonly(pmd);
    3.18  		set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
    3.19  	}
    3.20  
    3.21 @@ -367,12 +367,12 @@ void pgd_free(pgd_t *pgd)
    3.22  	if (PTRS_PER_PMD > 1) {
    3.23  		for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
    3.24  			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
    3.25 -			make_page_writable(pmd);
    3.26 +			make_lowmem_page_writable(pmd);
    3.27  			kmem_cache_free(pmd_cache, pmd);
    3.28  		}
    3.29  		if (!HAVE_SHARED_KERNEL_PMD) {
    3.30  			pmd_t *pmd = (void *)__va(pgd_val(pgd[USER_PTRS_PER_PGD])-1);
    3.31 -			make_page_writable(pmd);
    3.32 +			make_lowmem_page_writable(pmd);
    3.33  			memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
    3.34  			kmem_cache_free(pmd_cache, pmd);
    3.35  		}
    3.36 @@ -382,6 +382,7 @@ void pgd_free(pgd_t *pgd)
    3.37  }
    3.38  
    3.39  #ifndef CONFIG_XEN_SHADOW_MODE
    3.40 +asmlinkage int xprintk(const char *fmt, ...);
    3.41  void make_lowmem_page_readonly(void *va)
    3.42  {
    3.43  	pte_t *pte = virt_to_ptep(va);
    3.44 @@ -399,8 +400,7 @@ void make_page_readonly(void *va)
    3.45  	pte_t *pte = virt_to_ptep(va);
    3.46  	set_pte(pte, pte_wrprotect(*pte));
    3.47  	if ((unsigned long)va >= (unsigned long)high_memory) {
    3.48 -		unsigned long pfn; 
    3.49 -		pfn = pte_pfn(*pte); 
    3.50 +		unsigned long pfn = pte_pfn(*pte);
    3.51  #ifdef CONFIG_HIGHMEM
    3.52  		if (pfn < highstart_pfn)
    3.53  #endif
    3.54 @@ -414,8 +414,7 @@ void make_page_writable(void *va)
    3.55  	pte_t *pte = virt_to_ptep(va);
    3.56  	set_pte(pte, pte_mkwrite(*pte));
    3.57  	if ((unsigned long)va >= (unsigned long)high_memory) {
    3.58 -		unsigned long pfn; 
    3.59 -		pfn = pte_pfn(*pte); 
    3.60 +		unsigned long pfn = pte_pfn(*pte); 
    3.61  #ifdef CONFIG_HIGHMEM
    3.62  		if (pfn < highstart_pfn)
    3.63  #endif
     4.1 --- a/xen/arch/x86/mm.c	Mon Nov 14 15:21:16 2005 +0100
     4.2 +++ b/xen/arch/x86/mm.c	Mon Nov 14 18:13:38 2005 +0100
     4.3 @@ -3125,7 +3125,10 @@ static int ptwr_emulated_update(
     4.4      /* Check the new PTE. */
     4.5      nl1e = l1e_from_intpte(val);
     4.6      if ( unlikely(!get_page_from_l1e(nl1e, d)) )
     4.7 +    {
     4.8 +        MEM_LOG("ptwr_emulate: could not get_page_from_l1e()");
     4.9          return X86EMUL_UNHANDLEABLE;
    4.10 +    }
    4.11  
    4.12      /* Checked successfully: do the update (write or cmpxchg). */
    4.13      pl1e = map_domain_page(page_to_pfn(page));
    4.14 @@ -3248,6 +3251,9 @@ int ptwr_do_page_fault(struct domain *d,
    4.15      goto emulate; 
    4.16  #endif
    4.17  
    4.18 +    PTWR_PRINTK("ptwr_page_fault on l1 pt at va %lx, pfn %lx, eip %lx\n",
    4.19 +                addr, pfn, (unsigned long)regs->eip);
    4.20 +    
    4.21      /* Get the L2 index at which this L1 p.t. is always mapped. */
    4.22      l2_idx = page->u.inuse.type_info & PGT_va_mask;
    4.23      if ( unlikely(l2_idx >= PGT_va_unknown) )
    4.24 @@ -3292,10 +3298,6 @@ int ptwr_do_page_fault(struct domain *d,
    4.25          goto emulate;
    4.26      }
    4.27  
    4.28 -    PTWR_PRINTK("[%c] page_fault on l1 pt at va %lx, pt for %08lx, "
    4.29 -                "pfn %lx\n", PTWR_PRINT_WHICH,
    4.30 -                addr, l2_idx << L2_PAGETABLE_SHIFT, pfn);
    4.31 -    
    4.32      /*
    4.33       * We only allow one ACTIVE and one INACTIVE p.t. to be updated at at 
    4.34       * time. If there is already one, we must flush it out.
    4.35 @@ -3314,6 +3316,10 @@ int ptwr_do_page_fault(struct domain *d,
    4.36          goto emulate;
    4.37      }
    4.38  
    4.39 +    PTWR_PRINTK("[%c] batched ptwr_page_fault at va %lx, pt for %08lx, "
    4.40 +                "pfn %lx\n", PTWR_PRINT_WHICH, addr,
    4.41 +                l2_idx << L2_PAGETABLE_SHIFT, pfn);
    4.42 +
    4.43      d->arch.ptwr[which].l1va   = addr | 1;
    4.44      d->arch.ptwr[which].l2_idx = l2_idx;
    4.45      d->arch.ptwr[which].vcpu   = current;