ia64/xen-unstable
changeset 5736:0d415d73f8f2
This adds a patch for the vanilla kernel, to be pushed upstream some
day. It adds a #define which is 1 or 0 depending on whether the pmd
for the kernel address space is shared or not. Xen can't use a
shared pmd due to linear mappings in the Xen private area.
Also includes patches for modified files in the sparse tree.
Signed-off-by: Gerd Knorr <kraxel@suse.de>
Signed-off-by: Keir Fraser <keir@xensource.com>
day. It adds a #define which is 1 or 0 depending on whether the pmd
for the kernel address space is shared or not. Xen can't use a
shared pmd due to linear mappings in the Xen private area.
Also includes patches for modified files in the sparse tree.
Signed-off-by: Gerd Knorr <kraxel@suse.de>
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Mon Jul 11 15:46:46 2005 +0000 (2005-07-11) |
parents | de2e58cdec37 |
children | e6f48ae99035 |
files | linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-2level-defs.h patches/linux-2.6.12/pmd-shared.patch |
line diff
1.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c Mon Jul 11 15:43:12 2005 +0000 1.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c Mon Jul 11 15:46:46 2005 +0000 1.3 @@ -274,14 +274,14 @@ void pgd_ctor(void *pgd, kmem_cache_t *c 1.4 { 1.5 unsigned long flags; 1.6 1.7 - if (PTRS_PER_PMD == 1) 1.8 + if (!HAVE_SHARED_KERNEL_PMD) 1.9 spin_lock_irqsave(&pgd_lock, flags); 1.10 1.11 memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD, 1.12 swapper_pg_dir + USER_PTRS_PER_PGD, 1.13 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); 1.14 1.15 - if (PTRS_PER_PMD > 1) 1.16 + if (HAVE_SHARED_KERNEL_PMD) 1.17 return; 1.18 1.19 pgd_list_add(pgd); 1.20 @@ -289,12 +289,11 @@ void pgd_ctor(void *pgd, kmem_cache_t *c 1.21 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); 1.22 } 1.23 1.24 -/* never called when PTRS_PER_PMD > 1 */ 1.25 void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused) 1.26 { 1.27 unsigned long flags; /* can be called from interrupt context */ 1.28 1.29 - if (PTRS_PER_PMD > 1) 1.30 + if (HAVE_SHARED_KERNEL_PMD) 1.31 return; 1.32 1.33 spin_lock_irqsave(&pgd_lock, flags); 1.34 @@ -304,12 +303,30 @@ void pgd_dtor(void *pgd, kmem_cache_t *c 1.35 1.36 pgd_t *pgd_alloc(struct mm_struct *mm) 1.37 { 1.38 - int i; 1.39 + int i = 0; 1.40 pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL); 1.41 1.42 if (PTRS_PER_PMD == 1 || !pgd) 1.43 return pgd; 1.44 1.45 + if (!HAVE_SHARED_KERNEL_PMD) { 1.46 + /* alloc and copy kernel pmd */ 1.47 + unsigned long flags; 1.48 + pgd_t *copy_pgd = pgd_offset_k(PAGE_OFFSET); 1.49 + pud_t *copy_pud = pud_offset(copy_pgd, PAGE_OFFSET); 1.50 + pmd_t *copy_pmd = pmd_offset(copy_pud, PAGE_OFFSET); 1.51 + pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); 1.52 + if (0 == pmd) 1.53 + goto out_oom; 1.54 + 1.55 + spin_lock_irqsave(&pgd_lock, flags); 1.56 + memcpy(pmd, copy_pmd, PAGE_SIZE); 1.57 + spin_unlock_irqrestore(&pgd_lock, flags); 1.58 + make_page_readonly(pmd); 1.59 + set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd))); 1.60 + } 1.61 + 1.62 + /* alloc user pmds */ 1.63 for (i = 0; i < USER_PTRS_PER_PGD; ++i) { 1.64 pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); 1.65 if (!pmd) 1.66 @@ -339,9 +356,17 @@ void pgd_free(pgd_t *pgd) 1.67 } 1.68 1.69 /* in the PAE case user pgd entries are overwritten before usage */ 1.70 - if (PTRS_PER_PMD > 1) 1.71 - for (i = 0; i < USER_PTRS_PER_PGD; ++i) 1.72 - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); 1.73 + if (PTRS_PER_PMD > 1) { 1.74 + for (i = 0; i < USER_PTRS_PER_PGD; ++i) { 1.75 + pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1); 1.76 + kmem_cache_free(pmd_cache, pmd); 1.77 + } 1.78 + if (!HAVE_SHARED_KERNEL_PMD) { 1.79 + pmd_t *pmd = (void *)__va(pgd_val(pgd[USER_PTRS_PER_PGD])-1); 1.80 + make_page_writable(pmd); 1.81 + kmem_cache_free(pmd_cache, pmd); 1.82 + } 1.83 + } 1.84 /* in the non-PAE case, free_pgtables() clears user pgd entries */ 1.85 kmem_cache_free(pgd_cache, pgd); 1.86 }
2.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-2level-defs.h Mon Jul 11 15:43:12 2005 +0000 2.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-2level-defs.h Mon Jul 11 15:46:46 2005 +0000 2.3 @@ -1,6 +1,8 @@ 2.4 #ifndef _I386_PGTABLE_2LEVEL_DEFS_H 2.5 #define _I386_PGTABLE_2LEVEL_DEFS_H 2.6 2.7 +#define HAVE_SHARED_KERNEL_PMD 0 2.8 + 2.9 /* 2.10 * traditional i386 two-level paging structure: 2.11 */
3.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 3.2 +++ b/patches/linux-2.6.12/pmd-shared.patch Mon Jul 11 15:46:46 2005 +0000 3.3 @@ -0,0 +1,134 @@ 3.4 +diff -urNpP linux-2.6.12/arch/i386/mm/init.c linux-2.6.12.new/arch/i386/mm/init.c 3.5 +--- linux-2.6.12/arch/i386/mm/init.c 2005-06-17 20:48:29.000000000 +0100 3.6 ++++ linux-2.6.12.new/arch/i386/mm/init.c 2005-07-11 16:28:09.778165582 +0100 3.7 +@@ -634,7 +634,7 @@ void __init pgtable_cache_init(void) 3.8 + PTRS_PER_PGD*sizeof(pgd_t), 3.9 + 0, 3.10 + pgd_ctor, 3.11 +- PTRS_PER_PMD == 1 ? pgd_dtor : NULL); 3.12 ++ pgd_dtor); 3.13 + if (!pgd_cache) 3.14 + panic("pgtable_cache_init(): Cannot create pgd cache"); 3.15 + } 3.16 +diff -urNpP linux-2.6.12/arch/i386/mm/pageattr.c linux-2.6.12.new/arch/i386/mm/pageattr.c 3.17 +--- linux-2.6.12/arch/i386/mm/pageattr.c 2005-06-17 20:48:29.000000000 +0100 3.18 ++++ linux-2.6.12.new/arch/i386/mm/pageattr.c 2005-07-11 16:28:09.775165494 +0100 3.19 +@@ -75,7 +75,7 @@ static void set_pmd_pte(pte_t *kpte, uns 3.20 + unsigned long flags; 3.21 + 3.22 + set_pte_atomic(kpte, pte); /* change init_mm */ 3.23 +- if (PTRS_PER_PMD > 1) 3.24 ++ if (HAVE_SHARED_KERNEL_PMD) 3.25 + return; 3.26 + 3.27 + spin_lock_irqsave(&pgd_lock, flags); 3.28 +diff -urNpP linux-2.6.12/arch/i386/mm/pgtable.c linux-2.6.12.new/arch/i386/mm/pgtable.c 3.29 +--- linux-2.6.12/arch/i386/mm/pgtable.c 2005-06-17 20:48:29.000000000 +0100 3.30 ++++ linux-2.6.12.new/arch/i386/mm/pgtable.c 2005-07-11 16:32:01.478023726 +0100 3.31 +@@ -199,14 +199,14 @@ void pgd_ctor(void *pgd, kmem_cache_t *c 3.32 + { 3.33 + unsigned long flags; 3.34 + 3.35 +- if (PTRS_PER_PMD == 1) 3.36 ++ if (!HAVE_SHARED_KERNEL_PMD) 3.37 + spin_lock_irqsave(&pgd_lock, flags); 3.38 + 3.39 + memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD, 3.40 + swapper_pg_dir + USER_PTRS_PER_PGD, 3.41 + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); 3.42 + 3.43 +- if (PTRS_PER_PMD > 1) 3.44 ++ if (HAVE_SHARED_KERNEL_PMD) 3.45 + return; 3.46 + 3.47 + pgd_list_add(pgd); 3.48 +@@ -214,11 +214,13 @@ void pgd_ctor(void *pgd, kmem_cache_t *c 3.49 + memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); 3.50 + } 3.51 + 3.52 +-/* never called when PTRS_PER_PMD > 1 */ 3.53 + void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused) 3.54 + { 3.55 + unsigned long flags; /* can be called from interrupt context */ 3.56 + 3.57 ++ if (HAVE_SHARED_KERNEL_PMD) 3.58 ++ return; 3.59 ++ 3.60 + spin_lock_irqsave(&pgd_lock, flags); 3.61 + pgd_list_del(pgd); 3.62 + spin_unlock_irqrestore(&pgd_lock, flags); 3.63 +@@ -226,12 +228,29 @@ void pgd_dtor(void *pgd, kmem_cache_t *c 3.64 + 3.65 + pgd_t *pgd_alloc(struct mm_struct *mm) 3.66 + { 3.67 +- int i; 3.68 ++ int i = 0; 3.69 + pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL); 3.70 + 3.71 + if (PTRS_PER_PMD == 1 || !pgd) 3.72 + return pgd; 3.73 + 3.74 ++ if (!HAVE_SHARED_KERNEL_PMD) { 3.75 ++ /* alloc and copy kernel pmd */ 3.76 ++ unsigned long flags; 3.77 ++ pgd_t *copy_pgd = pgd_offset_k(PAGE_OFFSET); 3.78 ++ pud_t *copy_pud = pud_offset(copy_pgd, PAGE_OFFSET); 3.79 ++ pmd_t *copy_pmd = pmd_offset(copy_pud, PAGE_OFFSET); 3.80 ++ pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); 3.81 ++ if (0 == pmd) 3.82 ++ goto out_oom; 3.83 ++ 3.84 ++ spin_lock_irqsave(&pgd_lock, flags); 3.85 ++ memcpy(pmd, copy_pmd, PAGE_SIZE); 3.86 ++ spin_unlock_irqrestore(&pgd_lock, flags); 3.87 ++ set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd))); 3.88 ++ } 3.89 ++ 3.90 ++ /* alloc user pmds */ 3.91 + for (i = 0; i < USER_PTRS_PER_PGD; ++i) { 3.92 + pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); 3.93 + if (!pmd) 3.94 +@@ -252,9 +271,16 @@ void pgd_free(pgd_t *pgd) 3.95 + int i; 3.96 + 3.97 + /* in the PAE case user pgd entries are overwritten before usage */ 3.98 +- if (PTRS_PER_PMD > 1) 3.99 +- for (i = 0; i < USER_PTRS_PER_PGD; ++i) 3.100 +- kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); 3.101 ++ if (PTRS_PER_PMD > 1) { 3.102 ++ for (i = 0; i < USER_PTRS_PER_PGD; ++i) { 3.103 ++ pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1); 3.104 ++ kmem_cache_free(pmd_cache, pmd); 3.105 ++ } 3.106 ++ if (!HAVE_SHARED_KERNEL_PMD) { 3.107 ++ pmd_t *pmd = (void *)__va(pgd_val(pgd[USER_PTRS_PER_PGD])-1); 3.108 ++ kmem_cache_free(pmd_cache, pmd); 3.109 ++ } 3.110 ++ } 3.111 + /* in the non-PAE case, free_pgtables() clears user pgd entries */ 3.112 + kmem_cache_free(pgd_cache, pgd); 3.113 + } 3.114 +diff -urNpP linux-2.6.12/include/asm-i386/pgtable-2level-defs.h linux-2.6.12.new/include/asm-i386/pgtable-2level-defs.h 3.115 +--- linux-2.6.12/include/asm-i386/pgtable-2level-defs.h 2005-06-17 20:48:29.000000000 +0100 3.116 ++++ linux-2.6.12.new/include/asm-i386/pgtable-2level-defs.h 2005-07-11 16:28:09.733164251 +0100 3.117 +@@ -1,6 +1,8 @@ 3.118 + #ifndef _I386_PGTABLE_2LEVEL_DEFS_H 3.119 + #define _I386_PGTABLE_2LEVEL_DEFS_H 3.120 + 3.121 ++#define HAVE_SHARED_KERNEL_PMD 0 3.122 ++ 3.123 + /* 3.124 + * traditional i386 two-level paging structure: 3.125 + */ 3.126 +diff -urNpP linux-2.6.12/include/asm-i386/pgtable-3level-defs.h linux-2.6.12.new/include/asm-i386/pgtable-3level-defs.h 3.127 +--- linux-2.6.12/include/asm-i386/pgtable-3level-defs.h 2005-06-17 20:48:29.000000000 +0100 3.128 ++++ linux-2.6.12.new/include/asm-i386/pgtable-3level-defs.h 2005-07-11 16:28:09.755164902 +0100 3.129 +@@ -1,6 +1,8 @@ 3.130 + #ifndef _I386_PGTABLE_3LEVEL_DEFS_H 3.131 + #define _I386_PGTABLE_3LEVEL_DEFS_H 3.132 + 3.133 ++#define HAVE_SHARED_KERNEL_PMD 1 3.134 ++ 3.135 + /* 3.136 + * PGDIR_SHIFT determines what a top-level page table entry can map 3.137 + */