ia64/xen-unstable

view linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c @ 9981:874661fc2d42

[IA64] compilation fix of ia64 hypervisor.c

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Tue May 09 12:42:44 2006 -0600 (2006-05-09)
parents 873a3451a81a
children d86236cb824a
line source
1 /******************************************************************************
2 * include/asm-ia64/shadow.h
3 *
4 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
23 //#include <linux/kernel.h>
24 #include <linux/spinlock.h>
25 #include <linux/bootmem.h>
26 #include <asm/page.h>
27 #include <asm/hypervisor.h>
28 #include <asm/hypercall.h>
30 #define XEN_IA64_BALLOON_IS_NOT_YET
31 #ifndef XEN_IA64_BALLOON_IS_NOT_YET
32 #include <xen/balloon.h>
33 #else
34 #define balloon_lock(flags) ((void)flags)
35 #define balloon_unlock(flags) ((void)flags)
36 #endif
38 //XXX xen/ia64 copy_from_guest() is broken.
39 // This is a temporal work around until it is fixed.
40 // used by balloon.c netfront.c
42 // get_xen_guest_handle is defined only when __XEN_TOOLS__ is defined
43 // if the definition in arch-ia64.h is changed, this must be updated.
44 #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
46 int
47 ia64_xenmem_reservation_op(unsigned long op,
48 struct xen_memory_reservation* reservation__)
49 {
50 struct xen_memory_reservation reservation = *reservation__;
51 unsigned long* frame_list;
52 unsigned long nr_extents = reservation__->nr_extents;
53 int ret = 0;
54 get_xen_guest_handle(frame_list, reservation__->extent_start);
56 BUG_ON(op != XENMEM_increase_reservation &&
57 op != XENMEM_decrease_reservation &&
58 op != XENMEM_populate_physmap);
60 while (nr_extents > 0) {
61 int tmp_ret;
62 volatile unsigned long dummy;
64 set_xen_guest_handle(reservation.extent_start, frame_list);
65 reservation.nr_extents = nr_extents;
67 dummy = frame_list[0];// re-install tlb entry before hypercall
68 tmp_ret = ____HYPERVISOR_memory_op(op, &reservation);
69 if (tmp_ret < 0) {
70 if (ret == 0) {
71 ret = tmp_ret;
72 }
73 break;
74 }
75 frame_list += tmp_ret;
76 nr_extents -= tmp_ret;
77 ret += tmp_ret;
78 }
79 return ret;
80 }
82 //XXX same as i386, x86_64 contiguous_bitmap_set(), contiguous_bitmap_clear()
83 // move those to lib/contiguous_bitmap?
84 //XXX discontigmem/sparsemem
86 /*
87 * Bitmap is indexed by page number. If bit is set, the page is part of a
88 * xen_create_contiguous_region() area of memory.
89 */
90 unsigned long *contiguous_bitmap;
92 void
93 contiguous_bitmap_init(unsigned long end_pfn)
94 {
95 unsigned long size = (end_pfn + 2 * BITS_PER_LONG) >> 3;
96 contiguous_bitmap = alloc_bootmem_low_pages(size);
97 BUG_ON(!contiguous_bitmap);
98 memset(contiguous_bitmap, 0, size);
99 }
101 #if 0
102 int
103 contiguous_bitmap_test(void* p)
104 {
105 return test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap);
106 }
107 #endif
109 static void contiguous_bitmap_set(
110 unsigned long first_page, unsigned long nr_pages)
111 {
112 unsigned long start_off, end_off, curr_idx, end_idx;
114 curr_idx = first_page / BITS_PER_LONG;
115 start_off = first_page & (BITS_PER_LONG-1);
116 end_idx = (first_page + nr_pages) / BITS_PER_LONG;
117 end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
119 if (curr_idx == end_idx) {
120 contiguous_bitmap[curr_idx] |=
121 ((1UL<<end_off)-1) & -(1UL<<start_off);
122 } else {
123 contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
124 while ( ++curr_idx < end_idx )
125 contiguous_bitmap[curr_idx] = ~0UL;
126 contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
127 }
128 }
130 static void contiguous_bitmap_clear(
131 unsigned long first_page, unsigned long nr_pages)
132 {
133 unsigned long start_off, end_off, curr_idx, end_idx;
135 curr_idx = first_page / BITS_PER_LONG;
136 start_off = first_page & (BITS_PER_LONG-1);
137 end_idx = (first_page + nr_pages) / BITS_PER_LONG;
138 end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
140 if (curr_idx == end_idx) {
141 contiguous_bitmap[curr_idx] &=
142 -(1UL<<end_off) | ((1UL<<start_off)-1);
143 } else {
144 contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
145 while ( ++curr_idx != end_idx )
146 contiguous_bitmap[curr_idx] = 0;
147 contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
148 }
149 }
151 /* Ensure multi-page extents are contiguous in machine memory. */
152 int
153 __xen_create_contiguous_region(unsigned long vstart,
154 unsigned int order, unsigned int address_bits)
155 {
156 unsigned long error = 0;
157 unsigned long gphys = __pa(vstart);
158 unsigned long start_gpfn = gphys >> PAGE_SHIFT;
159 unsigned long num_pfn = 1 << order;
160 unsigned long i;
161 unsigned long flags;
163 scrub_pages(vstart, 1 << order);
165 balloon_lock(flags);
167 //XXX order
168 for (i = 0; i < num_pfn; i++) {
169 error = HYPERVISOR_zap_physmap(start_gpfn + i, 0);
170 if (error) {
171 goto out;
172 }
173 }
175 error = HYPERVISOR_populate_physmap(start_gpfn, order, address_bits);
176 contiguous_bitmap_set(start_gpfn, 1UL << order);
177 #if 0
178 {
179 unsigned long mfn;
180 unsigned long mfn_prev = ~0UL;
181 for (i = 0; i < 1 << order; i++) {
182 mfn = pfn_to_mfn_for_dma(start_gpfn + i);
183 if (mfn_prev != ~0UL && mfn != mfn_prev + 1) {
184 xprintk("\n");
185 xprintk("%s:%d order %d "
186 "start 0x%lx bus 0x%lx machine 0x%lx\n",
187 __func__, __LINE__, order,
188 vstart, virt_to_bus((void*)vstart),
189 phys_to_machine_for_dma(gphys));
190 xprintk("mfn: ");
191 for (i = 0; i < 1 << order; i++) {
192 mfn = pfn_to_mfn_for_dma(start_gpfn + i);
193 xprintk("0x%lx ", mfn);
194 }
195 xprintk("\n");
196 goto out;
197 }
198 mfn_prev = mfn;
199 }
200 }
201 #endif
202 out:
203 balloon_unlock(flags);
204 return error;
205 }
207 void
208 __xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
209 {
210 unsigned long error = 0;
211 unsigned long gphys = __pa(vstart);
212 unsigned long start_gpfn = gphys >> PAGE_SHIFT;
213 unsigned long num_pfn = 1 << order;
214 unsigned long i;
215 unsigned long flags;
217 scrub_pages(vstart, 1 << order);
219 balloon_lock(flags);
221 contiguous_bitmap_clear(start_gpfn, 1UL << order);
223 //XXX order
224 for (i = 0; i < num_pfn; i++) {
225 error = HYPERVISOR_zap_physmap(start_gpfn + i, 0);
226 if (error) {
227 goto out;
228 }
229 }
231 for (i = 0; i < num_pfn; i++) {
232 error = HYPERVISOR_populate_physmap(start_gpfn + i, 0, 0);
233 if (error) {
234 goto out;
235 }
236 }
238 out:
239 balloon_unlock(flags);
240 if (error) {
241 //XXX
242 }
243 }
246 ///////////////////////////////////////////////////////////////////////////
247 // grant table hack
248 // cmd: GNTTABOP_xxx
250 #include <linux/mm.h>
251 #include <xen/interface/xen.h>
252 #include <xen/gnttab.h>
254 static void
255 gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop)
256 {
257 uint32_t flags;
259 flags = uop->flags;
260 if (flags & GNTMAP_readonly) {
261 #if 0
262 xprintd("GNTMAP_readonly is not supported yet\n");
263 #endif
264 flags &= ~GNTMAP_readonly;
265 }
267 if (flags & GNTMAP_host_map) {
268 if (flags & GNTMAP_application_map) {
269 xprintd("GNTMAP_application_map is not supported yet: flags 0x%x\n", flags);
270 BUG();
271 }
272 if (flags & GNTMAP_contains_pte) {
273 xprintd("GNTMAP_contains_pte is not supported yet flags 0x%x\n", flags);
274 BUG();
275 }
276 } else if (flags & GNTMAP_device_map) {
277 xprintd("GNTMAP_device_map is not supported yet 0x%x\n", flags);
278 BUG();//XXX not yet. actually this flag is not used.
279 } else {
280 BUG();
281 }
282 }
284 int
285 HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
286 {
287 if (cmd == GNTTABOP_map_grant_ref) {
288 unsigned int i;
289 for (i = 0; i < count; i++) {
290 gnttab_map_grant_ref_pre(
291 (struct gnttab_map_grant_ref*)uop + i);
292 }
293 }
295 return ____HYPERVISOR_grant_table_op(cmd, uop, count);
296 }
299 ///////////////////////////////////////////////////////////////////////////
300 //XXX taken from balloon.c
301 // temporal hack until balloon driver support.
302 #include <linux/module.h>
304 struct page *balloon_alloc_empty_page_range(unsigned long nr_pages)
305 {
306 unsigned long vstart;
307 unsigned int order = get_order(nr_pages * PAGE_SIZE);
309 vstart = __get_free_pages(GFP_KERNEL, order);
310 if (vstart == 0)
311 return NULL;
313 return virt_to_page(vstart);
314 }
316 void balloon_dealloc_empty_page_range(
317 struct page *page, unsigned long nr_pages)
318 {
319 __free_pages(page, get_order(nr_pages * PAGE_SIZE));
320 }
322 void balloon_update_driver_allowance(long delta)
323 {
324 }
326 EXPORT_SYMBOL(balloon_alloc_empty_page_range);
327 EXPORT_SYMBOL(balloon_dealloc_empty_page_range);
328 EXPORT_SYMBOL(balloon_update_driver_allowance);
331 ///////////////////////////////////////////////////////////////////////////
332 // PageForeign(), SetPageForeign(), ClearPageForeign()
334 struct address_space xen_ia64_foreign_dummy_mapping;
336 ///////////////////////////////////////////////////////////////////////////
337 // foreign mapping
339 struct xen_ia64_privcmd_entry {
340 atomic_t map_count;
341 struct page* page;
342 unsigned long mfn;
343 };
345 static void
346 xen_ia64_privcmd_init_entry(struct xen_ia64_privcmd_entry* entry)
347 {
348 atomic_set(&entry->map_count, 0);
349 entry->page = NULL;
350 entry->mfn = INVALID_MFN;
351 }
353 static int
354 xen_ia64_privcmd_entry_mmap(struct vm_area_struct* vma,
355 unsigned long addr,
356 struct xen_ia64_privcmd_entry* entry,
357 unsigned long mfn,
358 pgprot_t prot,
359 domid_t domid)
360 {
361 int error = 0;
362 struct page* page;
363 unsigned long gpfn;
365 BUG_ON((addr & ~PAGE_MASK) != 0);
366 BUG_ON(mfn == INVALID_MFN);
368 if (entry->page != NULL) {
369 error = -EBUSY;
370 goto out;
371 }
372 page = alloc_page(GFP_KERNEL);
373 if (page == NULL) {
374 error = -ENOMEM;
375 goto out;
376 }
377 gpfn = page_to_pfn(page);
379 error = HYPERVISOR_add_physmap(gpfn, mfn, 0/* prot:XXX */,
380 domid);
381 if (error != 0) {
382 goto out;
383 }
385 prot = vma->vm_page_prot;
386 error = remap_pfn_range(vma, addr, gpfn, 1 << PAGE_SHIFT, prot);
387 if (error != 0) {
388 (void)HYPERVISOR_zap_physmap(gpfn, 0);
389 error = HYPERVISOR_populate_physmap(gpfn, 0, 0);
390 if (error) {
391 BUG();//XXX
392 }
393 __free_page(page);
394 } else {
395 atomic_inc(&entry->map_count);
396 entry->page = page;
397 entry->mfn = mfn;
398 }
400 out:
401 return error;
402 }
404 static void
405 xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_entry* entry)
406 {
407 struct page* page = entry->page;
408 unsigned long gpfn = page_to_pfn(page);
409 int error;
411 error = HYPERVISOR_zap_physmap(gpfn, 0);
412 if (error) {
413 BUG();//XXX
414 }
416 error = HYPERVISOR_populate_physmap(gpfn, 0, 0);
417 if (error) {
418 BUG();//XXX
419 }
421 entry->page = NULL;
422 entry->mfn = INVALID_MFN;
423 __free_page(page);
424 }
426 static int
427 xen_ia64_privcmd_entry_open(struct xen_ia64_privcmd_entry* entry)
428 {
429 if (entry->page != NULL) {
430 atomic_inc(&entry->map_count);
431 } else {
432 BUG_ON(atomic_read(&entry->map_count) != 0);
433 }
434 }
436 static int
437 xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_entry* entry)
438 {
439 if (entry->page != NULL && atomic_dec_and_test(&entry->map_count)) {
440 xen_ia64_privcmd_entry_munmap(entry);
441 }
442 }
444 struct xen_ia64_privcmd_file {
445 struct file* file;
446 atomic_t map_count;
447 unsigned long pgoff; // in PAGE_SIZE
449 unsigned long num_entries;
450 struct xen_ia64_privcmd_entry entries[0];
451 };
453 struct xen_ia64_privcmd_vma {
454 struct xen_ia64_privcmd_file* file;
455 unsigned long num_entries;
456 struct xen_ia64_privcmd_entry* entries;
457 };
459 static void xen_ia64_privcmd_vma_open(struct vm_area_struct* vma);
460 static void xen_ia64_privcmd_vma_close(struct vm_area_struct* vma);
462 struct vm_operations_struct xen_ia64_privcmd_vm_ops = {
463 .open = &xen_ia64_privcmd_vma_open,
464 .close = &xen_ia64_privcmd_vma_close,
465 };
467 static void
468 __xen_ia64_privcmd_vma_open(struct vm_area_struct* vma,
469 struct xen_ia64_privcmd_vma* privcmd_vma)
470 {
471 struct xen_ia64_privcmd_file* privcmd_file =
472 (struct xen_ia64_privcmd_file*)vma->vm_file->private_data;
473 unsigned long entry_offset = vma->vm_pgoff - privcmd_file->pgoff;
474 unsigned long num_entries = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
475 unsigned long i;
477 BUG_ON(entry_offset < 0);
478 BUG_ON(entry_offset + num_entries > privcmd_file->num_entries);
480 privcmd_vma->file = privcmd_file;
481 privcmd_vma->num_entries = num_entries;
482 privcmd_vma->entries = &privcmd_file->entries[entry_offset];
483 vma->vm_private_data = privcmd_vma;
484 for (i = 0; i < privcmd_vma->num_entries; i++) {
485 xen_ia64_privcmd_entry_open(&privcmd_vma->entries[i]);
486 }
488 vma->vm_private_data = privcmd_vma;
489 vma->vm_ops = &xen_ia64_privcmd_vm_ops;
490 }
492 static void
493 xen_ia64_privcmd_vma_open(struct vm_area_struct* vma)
494 {
495 struct xen_ia64_privcmd_file* privcmd_file =
496 (struct xen_ia64_privcmd_file*)vma->vm_file->private_data;
497 struct xen_ia64_privcmd_vma* privcmd_vma;
499 atomic_inc(&privcmd_file->map_count);
500 // vm_op->open() can't fail.
501 privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL | __GFP_NOFAIL);
503 __xen_ia64_privcmd_vma_open(vma, privcmd_vma);
504 }
506 static void
507 xen_ia64_privcmd_vma_close(struct vm_area_struct* vma)
508 {
509 struct xen_ia64_privcmd_vma* privcmd_vma =
510 (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
511 struct xen_ia64_privcmd_file* privcmd_file = privcmd_vma->file;
512 unsigned long i;
514 for (i = 0; i < privcmd_vma->num_entries; i++) {
515 xen_ia64_privcmd_entry_close(&privcmd_vma->entries[i]);
516 }
517 vma->vm_private_data = NULL;
518 kfree(privcmd_vma);
520 if (atomic_dec_and_test(&privcmd_file->map_count)) {
521 #if 1
522 for (i = 0; i < privcmd_file->num_entries; i++) {
523 struct xen_ia64_privcmd_entry* entry =
524 &privcmd_vma->entries[i];
525 BUG_ON(atomic_read(&entry->map_count) != 0);
526 BUG_ON(entry->page != NULL);
527 }
528 #endif
529 privcmd_file->file->private_data = NULL;
530 kfree(privcmd_file->file->private_data);
531 }
532 }
534 int
535 privcmd_mmap(struct file * file, struct vm_area_struct * vma)
536 {
537 unsigned long num_entries = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
538 struct xen_ia64_privcmd_file* privcmd_file;
539 struct xen_ia64_privcmd_vma* privcmd_vma;
540 unsigned long i;
541 BUG_ON(!running_on_xen);
543 /* DONTCOPY is essential for Xen as copy_page_range is broken. */
544 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
546 if (file->private_data != NULL) {
547 return -EBUSY;
548 }
550 privcmd_file = kmalloc(sizeof(*privcmd_file) +
551 sizeof(privcmd_file->entries[0]) * num_entries,
552 GFP_KERNEL);
553 if (privcmd_file == NULL) {
554 goto out_enomem0;
555 }
556 privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL);
557 if (privcmd_vma == NULL) {
558 goto out_enomem1;
559 }
561 atomic_set(&privcmd_file->map_count, 1);
562 privcmd_file->num_entries = num_entries;
563 for (i = 0; i < privcmd_file->num_entries; i++) {
564 xen_ia64_privcmd_init_entry(&privcmd_file->entries[i]);
565 }
566 file->private_data = privcmd_file;
567 privcmd_file->file = file;
568 privcmd_file->pgoff = vma->vm_pgoff;
570 __xen_ia64_privcmd_vma_open(vma, privcmd_vma);
571 return 0;
573 out_enomem1:
574 kfree(privcmd_vma);
575 out_enomem0:
576 kfree(privcmd_file);
577 return -ENOMEM;
578 }
580 int
581 direct_remap_pfn_range(struct vm_area_struct *vma,
582 unsigned long address, // process virtual address
583 unsigned long mfn, // mfn, mfn + 1, ... mfn + size/PAGE_SIZE
584 unsigned long size,
585 pgprot_t prot,
586 domid_t domid) // target domain
587 {
588 struct xen_ia64_privcmd_vma* privcmd_vma =
589 (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
590 unsigned long i;
591 unsigned long offset;
592 int error = 0;
593 BUG_ON(!running_on_xen);
595 #if 0
596 if (prot != vm->vm_page_prot) {
597 return -EINVAL;
598 }
599 #endif
601 i = (address - vma->vm_start) >> PAGE_SHIFT;
602 for (offset = 0; offset < size; offset += PAGE_SIZE) {
603 struct xen_ia64_privcmd_entry* entry =
604 &privcmd_vma->file->entries[i];
605 error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & PAGE_MASK, entry, mfn, prot, domid);
606 if (error != 0) {
607 break;
608 }
610 i++;
611 mfn++;
612 }
614 return error;
615 }