ia64/xen-unstable

view patches/linux-2.6.12/pmd-shared.patch @ 6128:57b3fdca5dae

Support VCPU migration for VMX guests.

Add a hook to support CPU migration for VMX domains

Reorganize the low level asm code to support relaunching a VMCS on a different
logical CPU.

Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Signed-off-by: Arun Sharma <arun.sharma@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Aug 11 21:38:58 2005 +0000 (2005-08-11)
parents 0d415d73f8f2
children b0338759544e
line source
1 diff -urNpP linux-2.6.12/arch/i386/mm/init.c linux-2.6.12.new/arch/i386/mm/init.c
2 --- linux-2.6.12/arch/i386/mm/init.c 2005-06-17 20:48:29.000000000 +0100
3 +++ linux-2.6.12.new/arch/i386/mm/init.c 2005-07-11 16:28:09.778165582 +0100
4 @@ -634,7 +634,7 @@ void __init pgtable_cache_init(void)
5 PTRS_PER_PGD*sizeof(pgd_t),
6 0,
7 pgd_ctor,
8 - PTRS_PER_PMD == 1 ? pgd_dtor : NULL);
9 + pgd_dtor);
10 if (!pgd_cache)
11 panic("pgtable_cache_init(): Cannot create pgd cache");
12 }
13 diff -urNpP linux-2.6.12/arch/i386/mm/pageattr.c linux-2.6.12.new/arch/i386/mm/pageattr.c
14 --- linux-2.6.12/arch/i386/mm/pageattr.c 2005-06-17 20:48:29.000000000 +0100
15 +++ linux-2.6.12.new/arch/i386/mm/pageattr.c 2005-07-11 16:28:09.775165494 +0100
16 @@ -75,7 +75,7 @@ static void set_pmd_pte(pte_t *kpte, uns
17 unsigned long flags;
19 set_pte_atomic(kpte, pte); /* change init_mm */
20 - if (PTRS_PER_PMD > 1)
21 + if (HAVE_SHARED_KERNEL_PMD)
22 return;
24 spin_lock_irqsave(&pgd_lock, flags);
25 diff -urNpP linux-2.6.12/arch/i386/mm/pgtable.c linux-2.6.12.new/arch/i386/mm/pgtable.c
26 --- linux-2.6.12/arch/i386/mm/pgtable.c 2005-06-17 20:48:29.000000000 +0100
27 +++ linux-2.6.12.new/arch/i386/mm/pgtable.c 2005-07-11 16:32:01.478023726 +0100
28 @@ -199,14 +199,14 @@ void pgd_ctor(void *pgd, kmem_cache_t *c
29 {
30 unsigned long flags;
32 - if (PTRS_PER_PMD == 1)
33 + if (!HAVE_SHARED_KERNEL_PMD)
34 spin_lock_irqsave(&pgd_lock, flags);
36 memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
37 swapper_pg_dir + USER_PTRS_PER_PGD,
38 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
40 - if (PTRS_PER_PMD > 1)
41 + if (HAVE_SHARED_KERNEL_PMD)
42 return;
44 pgd_list_add(pgd);
45 @@ -214,11 +214,13 @@ void pgd_ctor(void *pgd, kmem_cache_t *c
46 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
47 }
49 -/* never called when PTRS_PER_PMD > 1 */
50 void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
51 {
52 unsigned long flags; /* can be called from interrupt context */
54 + if (HAVE_SHARED_KERNEL_PMD)
55 + return;
56 +
57 spin_lock_irqsave(&pgd_lock, flags);
58 pgd_list_del(pgd);
59 spin_unlock_irqrestore(&pgd_lock, flags);
60 @@ -226,12 +228,29 @@ void pgd_dtor(void *pgd, kmem_cache_t *c
62 pgd_t *pgd_alloc(struct mm_struct *mm)
63 {
64 - int i;
65 + int i = 0;
66 pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
68 if (PTRS_PER_PMD == 1 || !pgd)
69 return pgd;
71 + if (!HAVE_SHARED_KERNEL_PMD) {
72 + /* alloc and copy kernel pmd */
73 + unsigned long flags;
74 + pgd_t *copy_pgd = pgd_offset_k(PAGE_OFFSET);
75 + pud_t *copy_pud = pud_offset(copy_pgd, PAGE_OFFSET);
76 + pmd_t *copy_pmd = pmd_offset(copy_pud, PAGE_OFFSET);
77 + pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
78 + if (0 == pmd)
79 + goto out_oom;
80 +
81 + spin_lock_irqsave(&pgd_lock, flags);
82 + memcpy(pmd, copy_pmd, PAGE_SIZE);
83 + spin_unlock_irqrestore(&pgd_lock, flags);
84 + set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
85 + }
86 +
87 + /* alloc user pmds */
88 for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
89 pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
90 if (!pmd)
91 @@ -252,9 +271,16 @@ void pgd_free(pgd_t *pgd)
92 int i;
94 /* in the PAE case user pgd entries are overwritten before usage */
95 - if (PTRS_PER_PMD > 1)
96 - for (i = 0; i < USER_PTRS_PER_PGD; ++i)
97 - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
98 + if (PTRS_PER_PMD > 1) {
99 + for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
100 + pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
101 + kmem_cache_free(pmd_cache, pmd);
102 + }
103 + if (!HAVE_SHARED_KERNEL_PMD) {
104 + pmd_t *pmd = (void *)__va(pgd_val(pgd[USER_PTRS_PER_PGD])-1);
105 + kmem_cache_free(pmd_cache, pmd);
106 + }
107 + }
108 /* in the non-PAE case, free_pgtables() clears user pgd entries */
109 kmem_cache_free(pgd_cache, pgd);
110 }
111 diff -urNpP linux-2.6.12/include/asm-i386/pgtable-2level-defs.h linux-2.6.12.new/include/asm-i386/pgtable-2level-defs.h
112 --- linux-2.6.12/include/asm-i386/pgtable-2level-defs.h 2005-06-17 20:48:29.000000000 +0100
113 +++ linux-2.6.12.new/include/asm-i386/pgtable-2level-defs.h 2005-07-11 16:28:09.733164251 +0100
114 @@ -1,6 +1,8 @@
115 #ifndef _I386_PGTABLE_2LEVEL_DEFS_H
116 #define _I386_PGTABLE_2LEVEL_DEFS_H
118 +#define HAVE_SHARED_KERNEL_PMD 0
119 +
120 /*
121 * traditional i386 two-level paging structure:
122 */
123 diff -urNpP linux-2.6.12/include/asm-i386/pgtable-3level-defs.h linux-2.6.12.new/include/asm-i386/pgtable-3level-defs.h
124 --- linux-2.6.12/include/asm-i386/pgtable-3level-defs.h 2005-06-17 20:48:29.000000000 +0100
125 +++ linux-2.6.12.new/include/asm-i386/pgtable-3level-defs.h 2005-07-11 16:28:09.755164902 +0100
126 @@ -1,6 +1,8 @@
127 #ifndef _I386_PGTABLE_3LEVEL_DEFS_H
128 #define _I386_PGTABLE_3LEVEL_DEFS_H
130 +#define HAVE_SHARED_KERNEL_PMD 1
131 +
132 /*
133 * PGDIR_SHIFT determines what a top-level page table entry can map
134 */