ia64/xen-unstable

view linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c @ 10692:306d7857928c

[IA64] Save & restore.

xc_ia64_linux_save.c and xc_ia64_linux_restore.c added.
vcpu context has more registers and states (eg: tr registers).
Per cpu irqs are deallocated when cpu is switched off.
#if/#endif added in reboot.c for ia64.

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Tue Jul 11 12:51:18 2006 -0600 (2006-07-11)
parents 8dc4af3f192c
children 571022d5afa2
line source
1 /******************************************************************************
2 * include/asm-ia64/shadow.h
3 *
4 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
23 //#include <linux/kernel.h>
24 #include <linux/spinlock.h>
25 #include <linux/bootmem.h>
26 #include <linux/module.h>
27 #include <linux/vmalloc.h>
28 #include <asm/page.h>
29 #include <asm/hypervisor.h>
30 #include <asm/hypercall.h>
31 #include <xen/interface/memory.h>
32 #include <xen/balloon.h>
34 shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)XSI_BASE;
35 EXPORT_SYMBOL(HYPERVISOR_shared_info);
37 start_info_t *xen_start_info;
39 int running_on_xen;
40 EXPORT_SYMBOL(running_on_xen);
42 //XXX xen/ia64 copy_from_guest() is broken.
43 // This is a temporal work around until it is fixed.
44 // used by balloon.c netfront.c
46 // get_xen_guest_handle is defined only when __XEN_TOOLS__ is defined
47 // if the definition in arch-ia64.h is changed, this must be updated.
48 #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
50 int
51 ia64_xenmem_reservation_op(unsigned long op,
52 struct xen_memory_reservation* reservation__)
53 {
54 struct xen_memory_reservation reservation = *reservation__;
55 unsigned long* frame_list;
56 unsigned long nr_extents = reservation__->nr_extents;
57 int ret = 0;
58 get_xen_guest_handle(frame_list, reservation__->extent_start);
60 BUG_ON(op != XENMEM_increase_reservation &&
61 op != XENMEM_decrease_reservation &&
62 op != XENMEM_populate_physmap);
64 while (nr_extents > 0) {
65 int tmp_ret;
66 volatile unsigned long dummy;
68 set_xen_guest_handle(reservation.extent_start, frame_list);
69 reservation.nr_extents = nr_extents;
71 dummy = frame_list[0];// re-install tlb entry before hypercall
72 tmp_ret = ____HYPERVISOR_memory_op(op, &reservation);
73 if (tmp_ret < 0) {
74 if (ret == 0) {
75 ret = tmp_ret;
76 }
77 break;
78 }
79 if (tmp_ret == 0) {
80 //XXX dirty work around for skbuff_ctor()
81 // of a non-privileged domain,
82 if ((op == XENMEM_increase_reservation ||
83 op == XENMEM_populate_physmap) &&
84 !(xen_start_info->flags & SIF_PRIVILEGED) &&
85 reservation.extent_order > 0)
86 return ret;
87 }
88 frame_list += tmp_ret;
89 nr_extents -= tmp_ret;
90 ret += tmp_ret;
91 }
92 return ret;
93 }
95 //XXX same as i386, x86_64 contiguous_bitmap_set(), contiguous_bitmap_clear()
96 // move those to lib/contiguous_bitmap?
97 //XXX discontigmem/sparsemem
99 /*
100 * Bitmap is indexed by page number. If bit is set, the page is part of a
101 * xen_create_contiguous_region() area of memory.
102 */
103 unsigned long *contiguous_bitmap;
105 void
106 contiguous_bitmap_init(unsigned long end_pfn)
107 {
108 unsigned long size = (end_pfn + 2 * BITS_PER_LONG) >> 3;
109 contiguous_bitmap = alloc_bootmem_low_pages(size);
110 BUG_ON(!contiguous_bitmap);
111 memset(contiguous_bitmap, 0, size);
112 }
114 #if 0
115 int
116 contiguous_bitmap_test(void* p)
117 {
118 return test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap);
119 }
120 #endif
122 static void contiguous_bitmap_set(
123 unsigned long first_page, unsigned long nr_pages)
124 {
125 unsigned long start_off, end_off, curr_idx, end_idx;
127 curr_idx = first_page / BITS_PER_LONG;
128 start_off = first_page & (BITS_PER_LONG-1);
129 end_idx = (first_page + nr_pages) / BITS_PER_LONG;
130 end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
132 if (curr_idx == end_idx) {
133 contiguous_bitmap[curr_idx] |=
134 ((1UL<<end_off)-1) & -(1UL<<start_off);
135 } else {
136 contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
137 while ( ++curr_idx < end_idx )
138 contiguous_bitmap[curr_idx] = ~0UL;
139 contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
140 }
141 }
143 static void contiguous_bitmap_clear(
144 unsigned long first_page, unsigned long nr_pages)
145 {
146 unsigned long start_off, end_off, curr_idx, end_idx;
148 curr_idx = first_page / BITS_PER_LONG;
149 start_off = first_page & (BITS_PER_LONG-1);
150 end_idx = (first_page + nr_pages) / BITS_PER_LONG;
151 end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
153 if (curr_idx == end_idx) {
154 contiguous_bitmap[curr_idx] &=
155 -(1UL<<end_off) | ((1UL<<start_off)-1);
156 } else {
157 contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
158 while ( ++curr_idx != end_idx )
159 contiguous_bitmap[curr_idx] = 0;
160 contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
161 }
162 }
164 // __xen_create_contiguous_region(), __xen_destroy_contiguous_region()
165 // are based on i386 xen_create_contiguous_region(),
166 // xen_destroy_contiguous_region()
168 /* Protected by balloon_lock. */
169 #define MAX_CONTIG_ORDER 7
170 static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
172 /* Ensure multi-page extents are contiguous in machine memory. */
173 int
174 __xen_create_contiguous_region(unsigned long vstart,
175 unsigned int order, unsigned int address_bits)
176 {
177 unsigned long error = 0;
178 unsigned long gphys = __pa(vstart);
179 unsigned long start_gpfn = gphys >> PAGE_SHIFT;
180 unsigned long num_gpfn = 1 << order;
181 unsigned long i;
182 unsigned long flags;
184 unsigned long *in_frames = discontig_frames, out_frame;
185 int success;
186 struct xen_memory_exchange exchange = {
187 .in = {
188 .nr_extents = num_gpfn,
189 .extent_order = 0,
190 .domid = DOMID_SELF
191 },
192 .out = {
193 .nr_extents = 1,
194 .extent_order = order,
195 .address_bits = address_bits,
196 .domid = DOMID_SELF
197 },
198 .nr_exchanged = 0
199 };
201 if (unlikely(order > MAX_CONTIG_ORDER))
202 return -ENOMEM;
204 set_xen_guest_handle(exchange.in.extent_start, in_frames);
205 set_xen_guest_handle(exchange.out.extent_start, &out_frame);
207 scrub_pages(vstart, num_gpfn);
209 balloon_lock(flags);
211 /* Get a new contiguous memory extent. */
212 for (i = 0; i < num_gpfn; i++) {
213 in_frames[i] = start_gpfn + i;
214 }
215 out_frame = start_gpfn;
216 error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
217 success = (exchange.nr_exchanged == num_gpfn);
218 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (error == 0)));
219 BUG_ON(success && (error != 0));
220 if (unlikely(error == -ENOSYS)) {
221 /* Compatibility when XENMEM_exchange is unsupported. */
222 error = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
223 &exchange.in);
224 BUG_ON(error != num_gpfn);
225 error = HYPERVISOR_memory_op(XENMEM_populate_physmap,
226 &exchange.out);
227 if (error != 1) {
228 /* Couldn't get special memory: fall back to normal. */
229 for (i = 0; i < num_gpfn; i++) {
230 in_frames[i] = start_gpfn + i;
231 }
232 error = HYPERVISOR_memory_op(XENMEM_populate_physmap,
233 &exchange.in);
234 BUG_ON(error != num_gpfn);
235 success = 0;
236 } else
237 success = 1;
238 }
239 if (success)
240 contiguous_bitmap_set(start_gpfn, num_gpfn);
241 #if 0
242 if (success) {
243 unsigned long mfn;
244 unsigned long mfn_prev = ~0UL;
245 for (i = 0; i < num_gpfn; i++) {
246 mfn = pfn_to_mfn_for_dma(start_gpfn + i);
247 if (mfn_prev != ~0UL && mfn != mfn_prev + 1) {
248 xprintk("\n");
249 xprintk("%s:%d order %d "
250 "start 0x%lx bus 0x%lx "
251 "machine 0x%lx\n",
252 __func__, __LINE__, order,
253 vstart, virt_to_bus((void*)vstart),
254 phys_to_machine_for_dma(gphys));
255 xprintk("mfn: ");
256 for (i = 0; i < num_gpfn; i++) {
257 mfn = pfn_to_mfn_for_dma(
258 start_gpfn + i);
259 xprintk("0x%lx ", mfn);
260 }
261 xprintk("\n");
262 break;
263 }
264 mfn_prev = mfn;
265 }
266 }
267 #endif
268 balloon_unlock(flags);
269 return success? 0: -ENOMEM;
270 }
272 void
273 __xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
274 {
275 unsigned long flags;
276 unsigned long error = 0;
277 unsigned long start_gpfn = __pa(vstart) >> PAGE_SHIFT;
278 unsigned long num_gpfn = 1UL << order;
279 unsigned long i;
281 unsigned long *out_frames = discontig_frames, in_frame;
282 int success;
283 struct xen_memory_exchange exchange = {
284 .in = {
285 .nr_extents = 1,
286 .extent_order = order,
287 .domid = DOMID_SELF
288 },
289 .out = {
290 .nr_extents = num_gpfn,
291 .extent_order = 0,
292 .address_bits = 0,
293 .domid = DOMID_SELF
294 },
295 .nr_exchanged = 0
296 };
299 if (!test_bit(start_gpfn, contiguous_bitmap))
300 return;
302 if (unlikely(order > MAX_CONTIG_ORDER))
303 return;
305 set_xen_guest_handle(exchange.in.extent_start, &in_frame);
306 set_xen_guest_handle(exchange.out.extent_start, out_frames);
308 scrub_pages(vstart, num_gpfn);
310 balloon_lock(flags);
312 contiguous_bitmap_clear(start_gpfn, num_gpfn);
314 /* Do the exchange for non-contiguous MFNs. */
315 in_frame = start_gpfn;
316 for (i = 0; i < num_gpfn; i++) {
317 out_frames[i] = start_gpfn + i;
318 }
319 error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
320 success = (exchange.nr_exchanged == 1);
321 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (error == 0)));
322 BUG_ON(success && (error != 0));
323 if (unlikely(error == -ENOSYS)) {
324 /* Compatibility when XENMEM_exchange is unsupported. */
325 error = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
326 &exchange.in);
327 BUG_ON(error != 1);
329 error = HYPERVISOR_memory_op(XENMEM_populate_physmap,
330 &exchange.out);
331 BUG_ON(error != num_gpfn);
332 }
333 balloon_unlock(flags);
334 }
337 ///////////////////////////////////////////////////////////////////////////
338 // grant table hack
339 // cmd: GNTTABOP_xxx
341 #include <linux/mm.h>
342 #include <xen/interface/xen.h>
343 #include <xen/gnttab.h>
345 static void
346 gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop)
347 {
348 uint32_t flags;
350 flags = uop->flags;
352 if (flags & GNTMAP_host_map) {
353 if (flags & GNTMAP_application_map) {
354 xprintd("GNTMAP_application_map is not supported yet: flags 0x%x\n", flags);
355 BUG();
356 }
357 if (flags & GNTMAP_contains_pte) {
358 xprintd("GNTMAP_contains_pte is not supported yet flags 0x%x\n", flags);
359 BUG();
360 }
361 } else if (flags & GNTMAP_device_map) {
362 xprintd("GNTMAP_device_map is not supported yet 0x%x\n", flags);
363 BUG();//XXX not yet. actually this flag is not used.
364 } else {
365 BUG();
366 }
367 }
369 int
370 HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
371 {
372 if (cmd == GNTTABOP_map_grant_ref) {
373 unsigned int i;
374 for (i = 0; i < count; i++) {
375 gnttab_map_grant_ref_pre(
376 (struct gnttab_map_grant_ref*)uop + i);
377 }
378 }
380 return ____HYPERVISOR_grant_table_op(cmd, uop, count);
381 }
384 ///////////////////////////////////////////////////////////////////////////
385 // PageForeign(), SetPageForeign(), ClearPageForeign()
387 struct address_space xen_ia64_foreign_dummy_mapping;
389 ///////////////////////////////////////////////////////////////////////////
390 // foreign mapping
391 #include <linux/efi.h>
392 #include <asm/meminit.h> // for IA64_GRANULE_SIZE, GRANULEROUND{UP,DOWN}()
394 static unsigned long privcmd_resource_min = 0;
395 // Xen/ia64 currently can handle pseudo physical address bits up to
396 // (PAGE_SHIFT * 3)
397 static unsigned long privcmd_resource_max = GRANULEROUNDDOWN((1UL << (PAGE_SHIFT * 3)) - 1);
398 static unsigned long privcmd_resource_align = IA64_GRANULE_SIZE;
400 static unsigned long
401 md_end_addr(const efi_memory_desc_t *md)
402 {
403 return md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
404 }
406 #define XEN_IA64_PRIVCMD_LEAST_GAP_SIZE (1024 * 1024 * 1024UL)
407 static int
408 xen_ia64_privcmd_check_size(unsigned long start, unsigned long end)
409 {
410 return (start < end &&
411 (end - start) > XEN_IA64_PRIVCMD_LEAST_GAP_SIZE);
412 }
414 static int __init
415 xen_ia64_privcmd_init(void)
416 {
417 void *efi_map_start, *efi_map_end, *p;
418 u64 efi_desc_size;
419 efi_memory_desc_t *md;
420 unsigned long tmp_min;
421 unsigned long tmp_max;
422 unsigned long gap_size;
423 unsigned long prev_end;
425 if (!is_running_on_xen())
426 return -1;
428 efi_map_start = __va(ia64_boot_param->efi_memmap);
429 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
430 efi_desc_size = ia64_boot_param->efi_memdesc_size;
432 // at first check the used highest address
433 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
434 // nothing
435 }
436 md = p - efi_desc_size;
437 privcmd_resource_min = GRANULEROUNDUP(md_end_addr(md));
438 if (xen_ia64_privcmd_check_size(privcmd_resource_min,
439 privcmd_resource_max)) {
440 goto out;
441 }
443 // the used highest address is too large. try to find the largest gap.
444 tmp_min = privcmd_resource_max;
445 tmp_max = 0;
446 gap_size = 0;
447 prev_end = 0;
448 for (p = efi_map_start;
449 p < efi_map_end - efi_desc_size;
450 p += efi_desc_size) {
451 unsigned long end;
452 efi_memory_desc_t* next;
453 unsigned long next_start;
455 md = p;
456 end = md_end_addr(md);
457 if (end > privcmd_resource_max) {
458 break;
459 }
460 if (end < prev_end) {
461 // work around.
462 // Xen may pass incompletely sorted memory
463 // descriptors like
464 // [x, x + length]
465 // [x, x]
466 // this order should be reversed.
467 continue;
468 }
469 next = p + efi_desc_size;
470 next_start = next->phys_addr;
471 if (next_start > privcmd_resource_max) {
472 next_start = privcmd_resource_max;
473 }
474 if (end < next_start && gap_size < (next_start - end)) {
475 tmp_min = end;
476 tmp_max = next_start;
477 gap_size = tmp_max - tmp_min;
478 }
479 prev_end = end;
480 }
482 privcmd_resource_min = GRANULEROUNDUP(tmp_min);
483 if (xen_ia64_privcmd_check_size(privcmd_resource_min, tmp_max)) {
484 privcmd_resource_max = tmp_max;
485 goto out;
486 }
488 privcmd_resource_min = tmp_min;
489 privcmd_resource_max = tmp_max;
490 if (!xen_ia64_privcmd_check_size(privcmd_resource_min,
491 privcmd_resource_max)) {
492 // Any large enough gap isn't found.
493 // go ahead anyway with the warning hoping that large region
494 // won't be requested.
495 printk(KERN_WARNING "xen privcmd: large enough region for privcmd mmap is not found.\n");
496 }
498 out:
499 printk(KERN_INFO "xen privcmd uses pseudo physical addr range [0x%lx, 0x%lx] (%ldMB)\n",
500 privcmd_resource_min, privcmd_resource_max,
501 (privcmd_resource_max - privcmd_resource_min) >> 20);
502 BUG_ON(privcmd_resource_min >= privcmd_resource_max);
503 return 0;
504 }
505 late_initcall(xen_ia64_privcmd_init);
507 struct xen_ia64_privcmd_entry {
508 atomic_t map_count;
509 #define INVALID_GPFN (~0UL)
510 unsigned long gpfn;
511 };
513 struct xen_ia64_privcmd_range {
514 atomic_t ref_count;
515 unsigned long pgoff; // in PAGE_SIZE
516 struct resource* res;
518 unsigned long num_entries;
519 struct xen_ia64_privcmd_entry entries[0];
520 };
522 struct xen_ia64_privcmd_vma {
523 struct xen_ia64_privcmd_range* range;
525 unsigned long num_entries;
526 struct xen_ia64_privcmd_entry* entries;
527 };
529 static void
530 xen_ia64_privcmd_init_entry(struct xen_ia64_privcmd_entry* entry)
531 {
532 atomic_set(&entry->map_count, 0);
533 entry->gpfn = INVALID_GPFN;
534 }
536 static int
537 xen_ia64_privcmd_entry_mmap(struct vm_area_struct* vma,
538 unsigned long addr,
539 struct xen_ia64_privcmd_range* privcmd_range,
540 int i,
541 unsigned long mfn,
542 pgprot_t prot,
543 domid_t domid)
544 {
545 int error = 0;
546 struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
547 unsigned long gpfn;
548 unsigned long flags;
550 if ((addr & ~PAGE_MASK) != 0 || mfn == INVALID_MFN) {
551 error = -EINVAL;
552 goto out;
553 }
555 if (entry->gpfn != INVALID_GPFN) {
556 error = -EBUSY;
557 goto out;
558 }
559 gpfn = (privcmd_range->res->start >> PAGE_SHIFT) + i;
561 flags = ASSIGN_writable;
562 if (pgprot_val(prot) == PROT_READ) {
563 flags = ASSIGN_readonly;
564 }
565 error = HYPERVISOR_add_physmap(gpfn, mfn, flags, domid);
566 if (error != 0) {
567 goto out;
568 }
570 prot = vma->vm_page_prot;
571 error = remap_pfn_range(vma, addr, gpfn, 1 << PAGE_SHIFT, prot);
572 if (error != 0) {
573 error = HYPERVISOR_zap_physmap(gpfn, 0);
574 if (error) {
575 BUG();//XXX
576 }
577 } else {
578 atomic_inc(&entry->map_count);
579 entry->gpfn = gpfn;
580 }
582 out:
583 return error;
584 }
586 static void
587 xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_range* privcmd_range,
588 int i)
589 {
590 struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
591 unsigned long gpfn = entry->gpfn;
592 //gpfn = (privcmd_range->res->start >> PAGE_SHIFT) +
593 // (vma->vm_pgoff - privcmd_range->pgoff);
594 int error;
596 error = HYPERVISOR_zap_physmap(gpfn, 0);
597 if (error) {
598 BUG();//XXX
599 }
600 entry->gpfn = INVALID_GPFN;
601 }
603 static void
604 xen_ia64_privcmd_entry_open(struct xen_ia64_privcmd_range* privcmd_range,
605 int i)
606 {
607 struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
608 if (entry->gpfn != INVALID_GPFN) {
609 atomic_inc(&entry->map_count);
610 } else {
611 BUG_ON(atomic_read(&entry->map_count) != 0);
612 }
613 }
615 static void
616 xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_range* privcmd_range,
617 int i)
618 {
619 struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
620 if (entry->gpfn != INVALID_GPFN &&
621 atomic_dec_and_test(&entry->map_count)) {
622 xen_ia64_privcmd_entry_munmap(privcmd_range, i);
623 }
624 }
626 static void xen_ia64_privcmd_vma_open(struct vm_area_struct* vma);
627 static void xen_ia64_privcmd_vma_close(struct vm_area_struct* vma);
629 struct vm_operations_struct xen_ia64_privcmd_vm_ops = {
630 .open = &xen_ia64_privcmd_vma_open,
631 .close = &xen_ia64_privcmd_vma_close,
632 };
634 static void
635 __xen_ia64_privcmd_vma_open(struct vm_area_struct* vma,
636 struct xen_ia64_privcmd_vma* privcmd_vma,
637 struct xen_ia64_privcmd_range* privcmd_range)
638 {
639 unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
640 unsigned long num_entries = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
641 unsigned long i;
643 BUG_ON(entry_offset < 0);
644 BUG_ON(entry_offset + num_entries > privcmd_range->num_entries);
646 privcmd_vma->range = privcmd_range;
647 privcmd_vma->num_entries = num_entries;
648 privcmd_vma->entries = &privcmd_range->entries[entry_offset];
649 vma->vm_private_data = privcmd_vma;
650 for (i = 0; i < privcmd_vma->num_entries; i++) {
651 xen_ia64_privcmd_entry_open(privcmd_range, entry_offset + i);
652 }
654 vma->vm_private_data = privcmd_vma;
655 vma->vm_ops = &xen_ia64_privcmd_vm_ops;
656 }
658 static void
659 xen_ia64_privcmd_vma_open(struct vm_area_struct* vma)
660 {
661 struct xen_ia64_privcmd_vma* privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
662 struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
664 atomic_inc(&privcmd_range->ref_count);
665 // vm_op->open() can't fail.
666 privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL | __GFP_NOFAIL);
668 __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
669 }
671 static void
672 xen_ia64_privcmd_vma_close(struct vm_area_struct* vma)
673 {
674 struct xen_ia64_privcmd_vma* privcmd_vma =
675 (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
676 struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
677 unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
678 unsigned long i;
680 for (i = 0; i < privcmd_vma->num_entries; i++) {
681 xen_ia64_privcmd_entry_close(privcmd_range, entry_offset + i);
682 }
683 vma->vm_private_data = NULL;
684 kfree(privcmd_vma);
686 if (atomic_dec_and_test(&privcmd_range->ref_count)) {
687 #if 1
688 for (i = 0; i < privcmd_range->num_entries; i++) {
689 struct xen_ia64_privcmd_entry* entry =
690 &privcmd_range->entries[i];
691 BUG_ON(atomic_read(&entry->map_count) != 0);
692 BUG_ON(entry->gpfn != INVALID_GPFN);
693 }
694 #endif
695 release_resource(privcmd_range->res);
696 kfree(privcmd_range->res);
697 vfree(privcmd_range);
698 }
699 }
701 int
702 privcmd_mmap(struct file * file, struct vm_area_struct * vma)
703 {
704 int error;
705 unsigned long size = vma->vm_end - vma->vm_start;
706 unsigned long num_entries = size >> PAGE_SHIFT;
707 struct xen_ia64_privcmd_range* privcmd_range = NULL;
708 struct xen_ia64_privcmd_vma* privcmd_vma = NULL;
709 struct resource* res = NULL;
710 unsigned long i;
711 BUG_ON(!is_running_on_xen());
713 BUG_ON(file->private_data != NULL);
715 error = -ENOMEM;
716 privcmd_range =
717 vmalloc(sizeof(*privcmd_range) +
718 sizeof(privcmd_range->entries[0]) * num_entries);
719 if (privcmd_range == NULL) {
720 goto out_enomem0;
721 }
722 privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL);
723 if (privcmd_vma == NULL) {
724 goto out_enomem1;
725 }
726 res = kzalloc(sizeof(*res), GFP_KERNEL);
727 if (res == NULL) {
728 goto out_enomem1;
729 }
730 res->name = "Xen privcmd mmap";
731 error = allocate_resource(&iomem_resource, res, size,
732 privcmd_resource_min, privcmd_resource_max,
733 privcmd_resource_align, NULL, NULL);
734 if (error) {
735 goto out_enomem1;
736 }
737 privcmd_range->res = res;
739 /* DONTCOPY is essential for Xen as copy_page_range is broken. */
740 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
742 atomic_set(&privcmd_range->ref_count, 1);
743 privcmd_range->pgoff = vma->vm_pgoff;
744 privcmd_range->num_entries = num_entries;
745 for (i = 0; i < privcmd_range->num_entries; i++) {
746 xen_ia64_privcmd_init_entry(&privcmd_range->entries[i]);
747 }
749 __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
750 return 0;
752 out_enomem1:
753 kfree(res);
754 kfree(privcmd_vma);
755 out_enomem0:
756 vfree(privcmd_range);
757 return error;
758 }
760 int
761 direct_remap_pfn_range(struct vm_area_struct *vma,
762 unsigned long address, // process virtual address
763 unsigned long mfn, // mfn, mfn + 1, ... mfn + size/PAGE_SIZE
764 unsigned long size,
765 pgprot_t prot,
766 domid_t domid) // target domain
767 {
768 struct xen_ia64_privcmd_vma* privcmd_vma =
769 (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
770 struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
771 unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
773 unsigned long i;
774 unsigned long offset;
775 int error = 0;
776 BUG_ON(!is_running_on_xen());
778 #if 0
779 if (prot != vm->vm_page_prot) {
780 return -EINVAL;
781 }
782 #endif
784 i = (address - vma->vm_start) >> PAGE_SHIFT;
785 for (offset = 0; offset < size; offset += PAGE_SIZE) {
786 error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & PAGE_MASK, privcmd_range, entry_offset + i, mfn, prot, domid);
787 if (error != 0) {
788 break;
789 }
791 i++;
792 mfn++;
793 }
795 return error;
796 }
799 /* Called after suspend, to resume time. */
800 void
801 time_resume(void)
802 {
803 extern void ia64_cpu_local_tick(void);
805 /* Just trigger a tick. */
806 ia64_cpu_local_tick();
807 }