ia64/xen-unstable

view linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c @ 4132:634970ed570c

bitkeeper revision 1.1236.25.18 (4234e6faTHNQkziFhjGrL_0FVPFZ3A)

Fix SMP build.
Signed-off-by: Christian Limpach <chris@xensource.com>
author cl349@firebug.cl.cam.ac.uk
date Mon Mar 14 01:20:58 2005 +0000 (2005-03-14)
parents db5a30a327e6
children 8ba8ae4eba9b
line source
1 /******************************************************************************
2 * mm/hypervisor.c
3 *
4 * Update page tables via the hypervisor.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This file may be distributed separately from the Linux kernel, or
9 * incorporated into other software packages, subject to the following license:
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this source file (the "Software"), to deal in the Software without
13 * restriction, including without limitation the rights to use, copy, modify,
14 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
15 * and to permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 * IN THE SOFTWARE.
28 */
30 #include <linux/config.h>
31 #include <linux/sched.h>
32 #include <linux/mm.h>
33 #include <linux/vmalloc.h>
34 #include <asm/page.h>
35 #include <asm/pgtable.h>
36 #include <asm-xen/hypervisor.h>
37 #include <asm-xen/multicall.h>
38 #include <asm-xen/balloon.h>
39 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
40 #include <linux/percpu.h>
41 #endif
43 /*
44 * This suffices to protect us if we ever move to SMP domains.
45 * Further, it protects us against interrupts. At the very least, this is
46 * required for the network driver which flushes the update queue before
47 * pushing new receive buffers.
48 */
49 static spinlock_t update_lock = SPIN_LOCK_UNLOCKED;
51 /* Linux 2.6 isn't using the traditional batched interface. */
52 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
53 #define QUEUE_SIZE 2048
54 #define pte_offset_kernel pte_offset
55 #define pmd_val_ma(v) (v).pmd;
56 #define pud_t pgd_t
57 #define pud_offset(d, va) d
58 #else
59 #ifdef CONFIG_SMP
60 #define QUEUE_SIZE 1
61 #else
62 #define QUEUE_SIZE 128
63 #endif
64 #define pmd_val_ma(v) (v).pud.pgd.pgd;
65 #endif
67 DEFINE_PER_CPU(mmu_update_t, update_queue[QUEUE_SIZE]);
68 DEFINE_PER_CPU(unsigned int, mmu_update_queue_idx);
70 /*
71 * MULTICALL_flush_page_update_queue:
72 * This is a version of the flush which queues as part of a multicall.
73 */
74 void MULTICALL_flush_page_update_queue(void)
75 {
76 int cpu = smp_processor_id();
77 int idx;
78 unsigned long flags;
79 unsigned int _idx;
80 spin_lock_irqsave(&update_lock, flags);
81 idx = per_cpu(mmu_update_queue_idx, cpu);
82 if ( (_idx = idx) != 0 )
83 {
84 per_cpu(mmu_update_queue_idx, cpu) = 0;
85 wmb(); /* Make sure index is cleared first to avoid double updates. */
86 queue_multicall3(__HYPERVISOR_mmu_update,
87 (unsigned long)&per_cpu(update_queue[0], cpu),
88 (unsigned long)_idx,
89 (unsigned long)NULL);
90 }
91 spin_unlock_irqrestore(&update_lock, flags);
92 }
94 static inline void __flush_page_update_queue(void)
95 {
96 int cpu = smp_processor_id();
97 unsigned int _idx = per_cpu(mmu_update_queue_idx, cpu);
98 per_cpu(mmu_update_queue_idx, cpu) = 0;
99 wmb(); /* Make sure index is cleared first to avoid double updates. */
100 if ( unlikely(HYPERVISOR_mmu_update(&per_cpu(update_queue[0], cpu), _idx, NULL) < 0) )
101 {
102 printk(KERN_ALERT "Failed to execute MMU updates.\n");
103 BUG();
104 }
105 }
107 void _flush_page_update_queue(void)
108 {
109 int cpu = smp_processor_id();
110 unsigned long flags;
111 spin_lock_irqsave(&update_lock, flags);
112 if ( per_cpu(mmu_update_queue_idx, cpu) != 0 ) __flush_page_update_queue();
113 spin_unlock_irqrestore(&update_lock, flags);
114 }
116 static inline void increment_index(void)
117 {
118 int cpu = smp_processor_id();
119 per_cpu(mmu_update_queue_idx, cpu)++;
120 if ( unlikely(per_cpu(mmu_update_queue_idx, cpu) == QUEUE_SIZE) ) __flush_page_update_queue();
121 }
123 static inline void increment_index_and_flush(void)
124 {
125 int cpu = smp_processor_id();
126 per_cpu(mmu_update_queue_idx, cpu)++;
127 __flush_page_update_queue();
128 }
130 void queue_l1_entry_update(pte_t *ptr, unsigned long val)
131 {
132 int cpu = smp_processor_id();
133 int idx;
134 unsigned long flags;
135 spin_lock_irqsave(&update_lock, flags);
136 idx = per_cpu(mmu_update_queue_idx, cpu);
137 per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
138 per_cpu(update_queue[idx], cpu).val = val;
139 increment_index();
140 spin_unlock_irqrestore(&update_lock, flags);
141 }
143 void queue_l2_entry_update(pmd_t *ptr, pmd_t val)
144 {
145 int cpu = smp_processor_id();
146 int idx;
147 unsigned long flags;
148 spin_lock_irqsave(&update_lock, flags);
149 idx = per_cpu(mmu_update_queue_idx, cpu);
150 per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
151 per_cpu(update_queue[idx], cpu).val = pmd_val_ma(val);
152 increment_index();
153 spin_unlock_irqrestore(&update_lock, flags);
154 }
156 void queue_pt_switch(unsigned long ptr)
157 {
158 int cpu = smp_processor_id();
159 int idx;
160 unsigned long flags;
161 spin_lock_irqsave(&update_lock, flags);
162 idx = per_cpu(mmu_update_queue_idx, cpu);
163 per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
164 per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
165 per_cpu(update_queue[idx], cpu).val = MMUEXT_NEW_BASEPTR;
166 increment_index();
167 spin_unlock_irqrestore(&update_lock, flags);
168 }
170 void queue_tlb_flush(void)
171 {
172 int cpu = smp_processor_id();
173 int idx;
174 unsigned long flags;
175 spin_lock_irqsave(&update_lock, flags);
176 idx = per_cpu(mmu_update_queue_idx, cpu);
177 per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND;
178 per_cpu(update_queue[idx], cpu).val = MMUEXT_TLB_FLUSH;
179 increment_index();
180 spin_unlock_irqrestore(&update_lock, flags);
181 }
183 void queue_invlpg(unsigned long ptr)
184 {
185 int cpu = smp_processor_id();
186 int idx;
187 unsigned long flags;
188 spin_lock_irqsave(&update_lock, flags);
189 idx = per_cpu(mmu_update_queue_idx, cpu);
190 per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND;
191 per_cpu(update_queue[idx], cpu).ptr |= ptr & PAGE_MASK;
192 per_cpu(update_queue[idx], cpu).val = MMUEXT_INVLPG;
193 increment_index();
194 spin_unlock_irqrestore(&update_lock, flags);
195 }
197 void queue_pgd_pin(unsigned long ptr)
198 {
199 int cpu = smp_processor_id();
200 int idx;
201 unsigned long flags;
202 spin_lock_irqsave(&update_lock, flags);
203 idx = per_cpu(mmu_update_queue_idx, cpu);
204 per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
205 per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
206 per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L2_TABLE;
207 increment_index();
208 spin_unlock_irqrestore(&update_lock, flags);
209 }
211 void queue_pgd_unpin(unsigned long ptr)
212 {
213 int cpu = smp_processor_id();
214 int idx;
215 unsigned long flags;
216 spin_lock_irqsave(&update_lock, flags);
217 idx = per_cpu(mmu_update_queue_idx, cpu);
218 per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
219 per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
220 per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
221 increment_index();
222 spin_unlock_irqrestore(&update_lock, flags);
223 }
225 void queue_pte_pin(unsigned long ptr)
226 {
227 int cpu = smp_processor_id();
228 int idx;
229 unsigned long flags;
230 spin_lock_irqsave(&update_lock, flags);
231 idx = per_cpu(mmu_update_queue_idx, cpu);
232 per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
233 per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
234 per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L1_TABLE;
235 increment_index();
236 spin_unlock_irqrestore(&update_lock, flags);
237 }
239 void queue_pte_unpin(unsigned long ptr)
240 {
241 int cpu = smp_processor_id();
242 int idx;
243 unsigned long flags;
244 spin_lock_irqsave(&update_lock, flags);
245 idx = per_cpu(mmu_update_queue_idx, cpu);
246 per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
247 per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
248 per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
249 increment_index();
250 spin_unlock_irqrestore(&update_lock, flags);
251 }
253 void queue_set_ldt(unsigned long ptr, unsigned long len)
254 {
255 int cpu = smp_processor_id();
256 int idx;
257 unsigned long flags;
258 spin_lock_irqsave(&update_lock, flags);
259 idx = per_cpu(mmu_update_queue_idx, cpu);
260 per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND | ptr;
261 per_cpu(update_queue[idx], cpu).val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
262 increment_index();
263 spin_unlock_irqrestore(&update_lock, flags);
264 }
266 void queue_machphys_update(unsigned long mfn, unsigned long pfn)
267 {
268 int cpu = smp_processor_id();
269 int idx;
270 unsigned long flags;
271 spin_lock_irqsave(&update_lock, flags);
272 idx = per_cpu(mmu_update_queue_idx, cpu);
273 per_cpu(update_queue[idx], cpu).ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
274 per_cpu(update_queue[idx], cpu).val = pfn;
275 increment_index();
276 spin_unlock_irqrestore(&update_lock, flags);
277 }
279 /* queue and flush versions of the above */
280 void xen_l1_entry_update(pte_t *ptr, unsigned long val)
281 {
282 int cpu = smp_processor_id();
283 int idx;
284 unsigned long flags;
285 spin_lock_irqsave(&update_lock, flags);
286 idx = per_cpu(mmu_update_queue_idx, cpu);
287 per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
288 per_cpu(update_queue[idx], cpu).val = val;
289 increment_index_and_flush();
290 spin_unlock_irqrestore(&update_lock, flags);
291 }
293 void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
294 {
295 int cpu = smp_processor_id();
296 int idx;
297 unsigned long flags;
298 spin_lock_irqsave(&update_lock, flags);
299 idx = per_cpu(mmu_update_queue_idx, cpu);
300 per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
301 per_cpu(update_queue[idx], cpu).val = pmd_val_ma(val);
302 increment_index_and_flush();
303 spin_unlock_irqrestore(&update_lock, flags);
304 }
306 void xen_pt_switch(unsigned long ptr)
307 {
308 int cpu = smp_processor_id();
309 int idx;
310 unsigned long flags;
311 spin_lock_irqsave(&update_lock, flags);
312 idx = per_cpu(mmu_update_queue_idx, cpu);
313 per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
314 per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
315 per_cpu(update_queue[idx], cpu).val = MMUEXT_NEW_BASEPTR;
316 increment_index_and_flush();
317 spin_unlock_irqrestore(&update_lock, flags);
318 }
320 void xen_tlb_flush(void)
321 {
322 int cpu = smp_processor_id();
323 int idx;
324 unsigned long flags;
325 spin_lock_irqsave(&update_lock, flags);
326 idx = per_cpu(mmu_update_queue_idx, cpu);
327 per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND;
328 per_cpu(update_queue[idx], cpu).val = MMUEXT_TLB_FLUSH;
329 increment_index_and_flush();
330 spin_unlock_irqrestore(&update_lock, flags);
331 }
333 void xen_invlpg(unsigned long ptr)
334 {
335 int cpu = smp_processor_id();
336 int idx;
337 unsigned long flags;
338 spin_lock_irqsave(&update_lock, flags);
339 idx = per_cpu(mmu_update_queue_idx, cpu);
340 per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND;
341 per_cpu(update_queue[idx], cpu).ptr |= ptr & PAGE_MASK;
342 per_cpu(update_queue[idx], cpu).val = MMUEXT_INVLPG;
343 increment_index_and_flush();
344 spin_unlock_irqrestore(&update_lock, flags);
345 }
347 void xen_pgd_pin(unsigned long ptr)
348 {
349 int cpu = smp_processor_id();
350 int idx;
351 unsigned long flags;
352 spin_lock_irqsave(&update_lock, flags);
353 idx = per_cpu(mmu_update_queue_idx, cpu);
354 per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
355 per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
356 per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L2_TABLE;
357 increment_index_and_flush();
358 spin_unlock_irqrestore(&update_lock, flags);
359 }
361 void xen_pgd_unpin(unsigned long ptr)
362 {
363 int cpu = smp_processor_id();
364 int idx;
365 unsigned long flags;
366 spin_lock_irqsave(&update_lock, flags);
367 idx = per_cpu(mmu_update_queue_idx, cpu);
368 per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
369 per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
370 per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
371 increment_index_and_flush();
372 spin_unlock_irqrestore(&update_lock, flags);
373 }
375 void xen_pte_pin(unsigned long ptr)
376 {
377 int cpu = smp_processor_id();
378 int idx;
379 unsigned long flags;
380 spin_lock_irqsave(&update_lock, flags);
381 idx = per_cpu(mmu_update_queue_idx, cpu);
382 per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
383 per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
384 per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L1_TABLE;
385 increment_index_and_flush();
386 spin_unlock_irqrestore(&update_lock, flags);
387 }
389 void xen_pte_unpin(unsigned long ptr)
390 {
391 int cpu = smp_processor_id();
392 int idx;
393 unsigned long flags;
394 spin_lock_irqsave(&update_lock, flags);
395 idx = per_cpu(mmu_update_queue_idx, cpu);
396 per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
397 per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
398 per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
399 increment_index_and_flush();
400 spin_unlock_irqrestore(&update_lock, flags);
401 }
403 void xen_set_ldt(unsigned long ptr, unsigned long len)
404 {
405 int cpu = smp_processor_id();
406 int idx;
407 unsigned long flags;
408 spin_lock_irqsave(&update_lock, flags);
409 idx = per_cpu(mmu_update_queue_idx, cpu);
410 per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND | ptr;
411 per_cpu(update_queue[idx], cpu).val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
412 increment_index_and_flush();
413 spin_unlock_irqrestore(&update_lock, flags);
414 }
416 void xen_machphys_update(unsigned long mfn, unsigned long pfn)
417 {
418 int cpu = smp_processor_id();
419 int idx;
420 unsigned long flags;
421 spin_lock_irqsave(&update_lock, flags);
422 idx = per_cpu(mmu_update_queue_idx, cpu);
423 per_cpu(update_queue[idx], cpu).ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
424 per_cpu(update_queue[idx], cpu).val = pfn;
425 increment_index_and_flush();
426 spin_unlock_irqrestore(&update_lock, flags);
427 }
429 #ifdef CONFIG_XEN_PHYSDEV_ACCESS
431 unsigned long allocate_empty_lowmem_region(unsigned long pages)
432 {
433 pgd_t *pgd;
434 pud_t *pud;
435 pmd_t *pmd;
436 pte_t *pte;
437 unsigned long *pfn_array;
438 unsigned long vstart;
439 unsigned long i;
440 unsigned int order = get_order(pages*PAGE_SIZE);
442 vstart = __get_free_pages(GFP_KERNEL, order);
443 if ( vstart == 0 )
444 return 0UL;
446 scrub_pages(vstart, 1 << order);
448 pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
449 if ( pfn_array == NULL )
450 BUG();
452 for ( i = 0; i < (1<<order); i++ )
453 {
454 pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
455 pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
456 pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
457 pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
458 pfn_array[i] = pte->pte_low >> PAGE_SHIFT;
459 queue_l1_entry_update(pte, 0);
460 phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = INVALID_P2M_ENTRY;
461 }
463 /* Flush updates through and flush the TLB. */
464 xen_tlb_flush();
466 balloon_put_pages(pfn_array, 1 << order);
468 vfree(pfn_array);
470 return vstart;
471 }
473 #endif /* CONFIG_XEN_PHYSDEV_ACCESS */