direct-io.hg

changeset 13344:7c5c72a0283f

[LINUX] x86/64: Sync pagetable management with i386 Xen code.

PUDs,PMDs,PTEs are all marked as ForeignPage so that they can be
grabbed from tlb_remove_page() at the appropriate time and freed in a
special way.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author kfraser@localhost.localdomain
date Fri Jan 12 10:22:36 2007 +0000 (2007-01-12)
parents ded167dc4dc9
children 648e58e31c26
files linux-2.6-xen-sparse/arch/x86_64/mm/pageattr-xen.c linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/pgalloc.h
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/x86_64/mm/pageattr-xen.c	Fri Jan 12 10:13:25 2007 +0000
     1.2 +++ b/linux-2.6-xen-sparse/arch/x86_64/mm/pageattr-xen.c	Fri Jan 12 10:22:36 2007 +0000
     1.3 @@ -164,6 +164,18 @@ void _arch_exit_mmap(struct mm_struct *m
     1.4          mm_unpin(mm);
     1.5  }
     1.6  
     1.7 +struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
     1.8 +{
     1.9 +	struct page *pte;
    1.10 +
    1.11 +	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
    1.12 +	if (pte) {
    1.13 +		SetPageForeign(pte, pte_free);
    1.14 +		set_page_count(pte, 1);
    1.15 +	}
    1.16 +	return pte;
    1.17 +}
    1.18 +
    1.19  void pte_free(struct page *pte)
    1.20  {
    1.21  	unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
    1.22 @@ -171,6 +183,10 @@ void pte_free(struct page *pte)
    1.23  	if (!pte_write(*virt_to_ptep(va)))
    1.24  		BUG_ON(HYPERVISOR_update_va_mapping(
    1.25  			va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
    1.26 +
    1.27 +	ClearPageForeign(pte);
    1.28 +	set_page_count(pte, 1);
    1.29 +
    1.30  	__free_page(pte);
    1.31  }
    1.32  #endif	/* CONFIG_XEN */
     2.1 --- a/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/pgalloc.h	Fri Jan 12 10:13:25 2007 +0000
     2.2 +++ b/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/pgalloc.h	Fri Jan 12 10:22:36 2007 +0000
     2.3 @@ -64,50 +64,43 @@ static inline void pgd_populate(struct m
     2.4  	}
     2.5  }
     2.6  
     2.7 -static inline void pmd_free(pmd_t *pmd)
     2.8 -{
     2.9 -	pte_t *ptep = virt_to_ptep(pmd);
    2.10 -
    2.11 -	if (!pte_write(*ptep)) {
    2.12 -		BUG_ON(HYPERVISOR_update_va_mapping(
    2.13 -			(unsigned long)pmd,
    2.14 -			pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT, PAGE_KERNEL),
    2.15 -			0));
    2.16 -	}
    2.17 -	free_page((unsigned long)pmd);
    2.18 -}
    2.19 +extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
    2.20 +extern void pte_free(struct page *pte);
    2.21  
    2.22  static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
    2.23  {
    2.24 -        pmd_t *pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
    2.25 -        return pmd;
    2.26 +	struct page *pg;
    2.27 +
    2.28 +	pg = pte_alloc_one(mm, addr);
    2.29 +	return pg ? page_address(pg) : NULL;
    2.30 +}
    2.31 +
    2.32 +static inline void pmd_free(pmd_t *pmd)
    2.33 +{
    2.34 +	BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
    2.35 +	pte_free(virt_to_page(pmd));
    2.36  }
    2.37  
    2.38  static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
    2.39  {
    2.40 -        pud_t *pud = (pud_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
    2.41 -        return pud;
    2.42 +	struct page *pg;
    2.43 +
    2.44 +	pg = pte_alloc_one(mm, addr);
    2.45 +	return pg ? page_address(pg) : NULL;
    2.46  }
    2.47  
    2.48  static inline void pud_free(pud_t *pud)
    2.49  {
    2.50 -	pte_t *ptep = virt_to_ptep(pud);
    2.51 -
    2.52 -	if (!pte_write(*ptep)) {
    2.53 -		BUG_ON(HYPERVISOR_update_va_mapping(
    2.54 -			(unsigned long)pud,
    2.55 -			pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT, PAGE_KERNEL),
    2.56 -			0));
    2.57 -	}
    2.58 -	free_page((unsigned long)pud);
    2.59 +	BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
    2.60 +	pte_free(virt_to_page(pud));
    2.61  }
    2.62  
    2.63  static inline pgd_t *pgd_alloc(struct mm_struct *mm)
    2.64  {
    2.65 -        /*
    2.66 -         * We allocate two contiguous pages for kernel and user.
    2.67 -         */
    2.68 -        unsigned boundary;
    2.69 +	/*
    2.70 +	 * We allocate two contiguous pages for kernel and user.
    2.71 +	 */
    2.72 +	unsigned boundary;
    2.73  	pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1);
    2.74  
    2.75  	if (!pgd)
    2.76 @@ -124,11 +117,11 @@ static inline pgd_t *pgd_alloc(struct mm
    2.77  	       (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
    2.78  
    2.79  	memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */
    2.80 -        /*
    2.81 -         * Set level3_user_pgt for vsyscall area
    2.82 -         */
    2.83 +	/*
    2.84 +	 * Set level3_user_pgt for vsyscall area
    2.85 +	 */
    2.86  	set_pgd(__user_pgd(pgd) + pgd_index(VSYSCALL_START), 
    2.87 -                mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
    2.88 +		mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
    2.89  	return pgd;
    2.90  }
    2.91  
    2.92 @@ -160,39 +153,25 @@ static inline void pgd_free(pgd_t *pgd)
    2.93  
    2.94  static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
    2.95  {
    2.96 -        pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
    2.97 -        if (pte)
    2.98 +	pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
    2.99 +	if (pte)
   2.100  		make_page_readonly(pte, XENFEAT_writable_page_tables);
   2.101  
   2.102  	return pte;
   2.103  }
   2.104  
   2.105 -static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
   2.106 -{
   2.107 -	struct page *pte;
   2.108 -
   2.109 -	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
   2.110 -	return pte;
   2.111 -}
   2.112 -
   2.113  /* Should really implement gc for free page table pages. This could be
   2.114     done with a reference count in struct page. */
   2.115  
   2.116  static inline void pte_free_kernel(pte_t *pte)
   2.117  {
   2.118  	BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
   2.119 -        make_page_writable(pte, XENFEAT_writable_page_tables);
   2.120 +	make_page_writable(pte, XENFEAT_writable_page_tables);
   2.121  	free_page((unsigned long)pte); 
   2.122  }
   2.123  
   2.124 -extern void pte_free(struct page *pte);
   2.125 -
   2.126 -//#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) 
   2.127 -//#define __pmd_free_tlb(tlb,x)   tlb_remove_page((tlb),virt_to_page(x))
   2.128 -//#define __pud_free_tlb(tlb,x)   tlb_remove_page((tlb),virt_to_page(x))
   2.129 -
   2.130 -#define __pte_free_tlb(tlb,x)   pte_free((x))
   2.131 -#define __pmd_free_tlb(tlb,x)   pmd_free((x))
   2.132 -#define __pud_free_tlb(tlb,x)   pud_free((x))
   2.133 +#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
   2.134 +#define __pmd_free_tlb(tlb,x)   tlb_remove_page((tlb),virt_to_page(x))
   2.135 +#define __pud_free_tlb(tlb,x)   tlb_remove_page((tlb),virt_to_page(x))
   2.136  
   2.137  #endif /* _X86_64_PGALLOC_H */