ia64/xen-unstable

view linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c @ 4702:75a775c40caf

bitkeeper revision 1.1389.1.10 (42715992Ch3pU44CxrgkzMqOnsRiOA)

merge
author kaf24@firebug.cl.cam.ac.uk
date Thu Apr 28 21:45:54 2005 +0000 (2005-04-28)
parents 98d5be103415 5ca208755ef2
children 65b28c74cec2
line source
1 /******************************************************************************
2 * mm/hypervisor.c
3 *
4 * Update page tables via the hypervisor.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This file may be distributed separately from the Linux kernel, or
9 * incorporated into other software packages, subject to the following license:
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this source file (the "Software"), to deal in the Software without
13 * restriction, including without limitation the rights to use, copy, modify,
14 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
15 * and to permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 * IN THE SOFTWARE.
28 */
30 #include <linux/config.h>
31 #include <linux/sched.h>
32 #include <linux/mm.h>
33 #include <linux/vmalloc.h>
34 #include <asm/page.h>
35 #include <asm/pgtable.h>
36 #include <asm-xen/hypervisor.h>
37 #include <asm-xen/balloon.h>
38 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
39 #include <linux/percpu.h>
40 #include <asm/tlbflush.h>
41 #endif
43 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
44 #define pte_offset_kernel pte_offset
45 #define pud_t pgd_t
46 #define pud_offset(d, va) d
47 #else
48 #define pmd_val_ma(v) (v).pud.pgd.pgd;
49 #endif
51 #ifndef CONFIG_XEN_SHADOW_MODE
52 void xen_l1_entry_update(pte_t *ptr, unsigned long val)
53 {
54 mmu_update_t u;
55 u.ptr = virt_to_machine(ptr);
56 u.val = val;
57 BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
58 }
60 void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
61 {
62 mmu_update_t u;
63 u.ptr = virt_to_machine(ptr);
64 u.val = pmd_val_ma(val);
65 BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
66 }
67 #endif
69 void xen_machphys_update(unsigned long mfn, unsigned long pfn)
70 {
71 mmu_update_t u;
72 u.ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
73 u.val = pfn;
74 BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
75 }
77 void xen_pt_switch(unsigned long ptr)
78 {
79 struct mmuext_op op;
80 op.cmd = MMUEXT_NEW_BASEPTR;
81 op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
82 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
83 }
85 void xen_tlb_flush(void)
86 {
87 struct mmuext_op op;
88 op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
89 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
90 }
92 void xen_invlpg(unsigned long ptr)
93 {
94 struct mmuext_op op;
95 op.cmd = MMUEXT_INVLPG_LOCAL;
96 op.linear_addr = ptr & PAGE_MASK;
97 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
98 }
100 #ifdef CONFIG_SMP
102 void xen_tlb_flush_all(void)
103 {
104 struct mmuext_op op;
105 op.cmd = MMUEXT_TLB_FLUSH_ALL;
106 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
107 }
109 void xen_tlb_flush_mask(cpumask_t *mask)
110 {
111 struct mmuext_op op;
112 if ( cpus_empty(*mask) )
113 return;
114 op.cmd = MMUEXT_TLB_FLUSH_MULTI;
115 op.cpuset = mask->bits;
116 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
117 }
119 void xen_invlpg_all(unsigned long ptr)
120 {
121 struct mmuext_op op;
122 op.cmd = MMUEXT_INVLPG_ALL;
123 op.linear_addr = ptr & PAGE_MASK;
124 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
125 }
127 void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
128 {
129 struct mmuext_op op;
130 if ( cpus_empty(*mask) )
131 return;
132 op.cmd = MMUEXT_INVLPG_MULTI;
133 op.cpuset = mask->bits;
134 op.linear_addr = ptr & PAGE_MASK;
135 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
136 }
138 #endif /* CONFIG_SMP */
140 #ifndef CONFIG_XEN_SHADOW_MODE
141 void xen_pgd_pin(unsigned long ptr)
142 {
143 struct mmuext_op op;
144 op.cmd = MMUEXT_PIN_L2_TABLE;
145 op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
146 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
147 }
149 void xen_pgd_unpin(unsigned long ptr)
150 {
151 struct mmuext_op op;
152 op.cmd = MMUEXT_UNPIN_TABLE;
153 op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
154 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
155 }
157 void xen_pte_pin(unsigned long ptr)
158 {
159 struct mmuext_op op;
160 op.cmd = MMUEXT_PIN_L1_TABLE;
161 op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
162 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
163 }
165 void xen_pte_unpin(unsigned long ptr)
166 {
167 struct mmuext_op op;
168 op.cmd = MMUEXT_UNPIN_TABLE;
169 op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
170 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
171 }
172 #endif
174 void xen_set_ldt(unsigned long ptr, unsigned long len)
175 {
176 struct mmuext_op op;
177 op.cmd = MMUEXT_SET_LDT;
178 op.linear_addr = ptr;
179 op.nr_ents = len;
180 BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
181 }
183 #ifdef CONFIG_XEN_PHYSDEV_ACCESS
185 unsigned long allocate_empty_lowmem_region(unsigned long pages)
186 {
187 pgd_t *pgd;
188 pud_t *pud;
189 pmd_t *pmd;
190 pte_t *pte;
191 unsigned long *pfn_array;
192 unsigned long vstart;
193 unsigned long i;
194 unsigned int order = get_order(pages*PAGE_SIZE);
196 vstart = __get_free_pages(GFP_KERNEL, order);
197 if ( vstart == 0 )
198 return 0UL;
200 scrub_pages(vstart, 1 << order);
202 pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
203 if ( pfn_array == NULL )
204 BUG();
206 for ( i = 0; i < (1<<order); i++ )
207 {
208 pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
209 pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
210 pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
211 pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
212 pfn_array[i] = pte->pte_low >> PAGE_SHIFT;
213 HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), __pte_ma(0), 0);
214 phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
215 INVALID_P2M_ENTRY;
216 }
218 flush_tlb_all();
220 balloon_put_pages(pfn_array, 1 << order);
222 vfree(pfn_array);
224 return vstart;
225 }
227 #endif /* CONFIG_XEN_PHYSDEV_ACCESS */