ia64/xen-unstable

view xen/arch/x86/hvm/hvm.c @ 13144:ed815cbdc90e

[XEN] Fix cset 13107:04c5f7b71ff4
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Wed Dec 20 13:54:04 2006 +0000 (2006-12-20)
parents 04c5f7b71ff4
children 93667f6c5cc8
line source
1 /*
2 * hvm.c: Common hardware virtual machine abstractions.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/init.h>
23 #include <xen/lib.h>
24 #include <xen/trace.h>
25 #include <xen/sched.h>
26 #include <xen/irq.h>
27 #include <xen/softirq.h>
28 #include <xen/domain.h>
29 #include <xen/domain_page.h>
30 #include <xen/hypercall.h>
31 #include <xen/guest_access.h>
32 #include <xen/event.h>
33 #include <xen/shadow.h>
34 #include <asm/current.h>
35 #include <asm/e820.h>
36 #include <asm/io.h>
37 #include <asm/shadow.h>
38 #include <asm/regs.h>
39 #include <asm/cpufeature.h>
40 #include <asm/processor.h>
41 #include <asm/types.h>
42 #include <asm/msr.h>
43 #include <asm/mc146818rtc.h>
44 #include <asm/spinlock.h>
45 #include <asm/hvm/hvm.h>
46 #include <asm/hvm/vpt.h>
47 #include <asm/hvm/support.h>
48 #include <public/sched.h>
49 #include <public/hvm/ioreq.h>
50 #include <public/version.h>
51 #include <public/memory.h>
53 int hvm_enabled = 0;
55 unsigned int opt_hvm_debug_level = 0;
56 integer_param("hvm_debug", opt_hvm_debug_level);
58 struct hvm_function_table hvm_funcs;
60 void hvm_stts(struct vcpu *v)
61 {
62 /* FPU state already dirty? Then no need to setup_fpu() lazily. */
63 if ( !test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
64 hvm_funcs.stts(v);
65 }
67 void hvm_set_guest_time(struct vcpu *v, u64 gtime)
68 {
69 u64 host_tsc;
71 rdtscll(host_tsc);
73 v->arch.hvm_vcpu.cache_tsc_offset = gtime - host_tsc;
74 hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
75 }
77 u64 hvm_get_guest_time(struct vcpu *v)
78 {
79 u64 host_tsc;
81 rdtscll(host_tsc);
82 return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
83 }
85 void hvm_migrate_timers(struct vcpu *v)
86 {
87 pit_migrate_timers(v);
88 rtc_migrate_timers(v);
89 pmtimer_migrate_timers(v);
90 if ( vcpu_vlapic(v)->pt.enabled )
91 migrate_timer(&vcpu_vlapic(v)->pt.timer, v->processor);
92 }
94 void hvm_do_resume(struct vcpu *v)
95 {
96 ioreq_t *p;
98 hvm_stts(v);
100 pt_thaw_time(v);
102 /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
103 p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
104 while ( p->state != STATE_IOREQ_NONE )
105 {
106 switch ( p->state )
107 {
108 case STATE_IORESP_READY: /* IORESP_READY -> NONE */
109 hvm_io_assist(v);
110 break;
111 case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
112 case STATE_IOREQ_INPROCESS:
113 wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port,
114 (p->state != STATE_IOREQ_READY) &&
115 (p->state != STATE_IOREQ_INPROCESS));
116 break;
117 default:
118 gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state);
119 domain_crash_synchronous();
120 }
121 }
122 }
124 int hvm_domain_initialise(struct domain *d)
125 {
126 int rc;
128 if ( !hvm_enabled )
129 {
130 gdprintk(XENLOG_WARNING, "Attempt to create a HVM guest "
131 "on a non-VT/AMDV platform.\n");
132 return -EINVAL;
133 }
135 spin_lock_init(&d->arch.hvm_domain.pbuf_lock);
136 spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
137 spin_lock_init(&d->arch.hvm_domain.irq.lock);
139 rc = shadow_enable(d, SHM2_refcounts|SHM2_translate|SHM2_external);
140 if ( rc != 0 )
141 return rc;
143 vpic_init(d);
144 vioapic_init(d);
146 return 0;
147 }
149 void hvm_domain_destroy(struct domain *d)
150 {
151 pit_deinit(d);
152 rtc_deinit(d);
153 pmtimer_deinit(d);
155 if ( d->arch.hvm_domain.shared_page_va )
156 unmap_domain_page_global(
157 (void *)d->arch.hvm_domain.shared_page_va);
159 if ( d->arch.hvm_domain.buffered_io_va )
160 unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
161 }
163 int hvm_vcpu_initialise(struct vcpu *v)
164 {
165 int rc;
167 if ( (rc = vlapic_init(v)) != 0 )
168 return rc;
170 if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 )
171 {
172 vlapic_destroy(v);
173 return rc;
174 }
176 /* Create ioreq event channel. */
177 v->arch.hvm_vcpu.xen_port = alloc_unbound_xen_event_channel(v, 0);
178 if ( get_sp(v->domain) && get_vio(v->domain, v->vcpu_id) )
179 get_vio(v->domain, v->vcpu_id)->vp_eport =
180 v->arch.hvm_vcpu.xen_port;
182 INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
184 if ( v->vcpu_id != 0 )
185 return 0;
187 rtc_init(v, RTC_PORT(0), RTC_IRQ);
188 pmtimer_init(v, ACPI_PM_TMR_BLK_ADDRESS);
190 /* Init guest TSC to start from zero. */
191 hvm_set_guest_time(v, 0);
193 return 0;
194 }
196 void hvm_vcpu_destroy(struct vcpu *v)
197 {
198 vlapic_destroy(v);
199 hvm_funcs.vcpu_destroy(v);
201 /* Event channel is already freed by evtchn_destroy(). */
202 /*free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);*/
203 }
205 static void hvm_vcpu_down(void)
206 {
207 struct vcpu *v = current;
208 struct domain *d = v->domain;
209 int online_count = 0;
211 gdprintk(XENLOG_INFO, "DOM%d/VCPU%d: going offline.\n",
212 d->domain_id, v->vcpu_id);
214 /* Doesn't halt us immediately, but we'll never return to guest context. */
215 set_bit(_VCPUF_down, &v->vcpu_flags);
216 vcpu_sleep_nosync(v);
218 /* Any other VCPUs online? ... */
219 LOCK_BIGLOCK(d);
220 for_each_vcpu ( d, v )
221 if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
222 online_count++;
223 UNLOCK_BIGLOCK(d);
225 /* ... Shut down the domain if not. */
226 if ( online_count == 0 )
227 {
228 gdprintk(XENLOG_INFO, "DOM%d: all CPUs offline -- powering off.\n",
229 d->domain_id);
230 domain_shutdown(d, SHUTDOWN_poweroff);
231 }
232 }
234 void hvm_send_assist_req(struct vcpu *v)
235 {
236 ioreq_t *p;
238 p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
239 if ( unlikely(p->state != STATE_IOREQ_NONE) )
240 {
241 /* This indicates a bug in the device model. Crash the domain. */
242 gdprintk(XENLOG_ERR, "Device model set bad IO state %d.\n", p->state);
243 domain_crash_synchronous();
244 }
246 prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
248 /*
249 * Following happens /after/ blocking and setting up ioreq contents.
250 * prepare_wait_on_xen_event_channel() is an implicit barrier.
251 */
252 p->state = STATE_IOREQ_READY;
253 notify_via_xen_event_channel(v->arch.hvm_vcpu.xen_port);
254 }
256 void hvm_hlt(unsigned long rflags)
257 {
258 /*
259 * If we halt with interrupts disabled, that's a pretty sure sign that we
260 * want to shut down. In a real processor, NMIs are the only way to break
261 * out of this.
262 */
263 if ( unlikely(!(rflags & X86_EFLAGS_IF)) )
264 return hvm_vcpu_down();
266 do_sched_op_compat(SCHEDOP_block, 0);
267 }
269 /*
270 * __hvm_copy():
271 * @buf = hypervisor buffer
272 * @addr = guest address to copy to/from
273 * @size = number of bytes to copy
274 * @dir = copy *to* guest (TRUE) or *from* guest (FALSE)?
275 * @virt = addr is *virtual* (TRUE) or *guest physical* (FALSE)?
276 * Returns number of bytes failed to copy (0 == complete success).
277 */
278 static int __hvm_copy(void *buf, paddr_t addr, int size, int dir, int virt)
279 {
280 unsigned long mfn;
281 char *p;
282 int count, todo;
284 todo = size;
285 while ( todo > 0 )
286 {
287 count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo);
289 if ( virt )
290 mfn = get_mfn_from_gpfn(shadow_gva_to_gfn(current, addr));
291 else
292 mfn = get_mfn_from_gpfn(addr >> PAGE_SHIFT);
294 if ( mfn == INVALID_MFN )
295 return todo;
297 p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK);
299 if ( dir )
300 memcpy(p, buf, count); /* dir == TRUE: *to* guest */
301 else
302 memcpy(buf, p, count); /* dir == FALSE: *from guest */
304 unmap_domain_page(p);
306 addr += count;
307 buf += count;
308 todo -= count;
309 }
311 return 0;
312 }
314 int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size)
315 {
316 return __hvm_copy(buf, paddr, size, 1, 0);
317 }
319 int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size)
320 {
321 return __hvm_copy(buf, paddr, size, 0, 0);
322 }
324 int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size)
325 {
326 return __hvm_copy(buf, vaddr, size, 1, 1);
327 }
329 int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size)
330 {
331 return __hvm_copy(buf, vaddr, size, 0, 1);
332 }
335 /* HVM specific printbuf. Mostly used for hvmloader chit-chat. */
336 void hvm_print_line(struct vcpu *v, const char c)
337 {
338 struct hvm_domain *hd = &v->domain->arch.hvm_domain;
340 spin_lock(&hd->pbuf_lock);
341 hd->pbuf[hd->pbuf_idx++] = c;
342 if ( (hd->pbuf_idx == (sizeof(hd->pbuf) - 2)) || (c == '\n') )
343 {
344 if ( c != '\n' )
345 hd->pbuf[hd->pbuf_idx++] = '\n';
346 hd->pbuf[hd->pbuf_idx] = '\0';
347 printk(XENLOG_G_DEBUG "HVM%u: %s", v->domain->domain_id, hd->pbuf);
348 hd->pbuf_idx = 0;
349 }
350 spin_unlock(&hd->pbuf_lock);
351 }
353 void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
354 unsigned int *ecx, unsigned int *edx)
355 {
356 if ( !cpuid_hypervisor_leaves(input, eax, ebx, ecx, edx) )
357 {
358 cpuid(input, eax, ebx, ecx, edx);
360 if ( input == 0x00000001 )
361 {
362 struct vcpu *v = current;
364 clear_bit(X86_FEATURE_MWAIT & 31, ecx);
366 if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
367 clear_bit(X86_FEATURE_APIC & 31, edx);
369 #if CONFIG_PAGING_LEVELS >= 3
370 if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )
371 #endif
372 {
373 clear_bit(X86_FEATURE_PAE & 31, edx);
374 clear_bit(X86_FEATURE_CX8 & 31, edx);
375 }
376 clear_bit(X86_FEATURE_PSE36 & 31, edx);
377 }
378 else if ( input == 0x80000001 )
379 {
380 #if CONFIG_PAGING_LEVELS >= 3
381 struct vcpu *v = current;
382 if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )
383 #endif
384 clear_bit(X86_FEATURE_NX & 31, edx);
385 #ifdef __i386__
386 /* Mask feature for Intel ia32e or AMD long mode. */
387 clear_bit(X86_FEATURE_LAHF_LM & 31, ecx);
389 clear_bit(X86_FEATURE_LM & 31, edx);
390 clear_bit(X86_FEATURE_SYSCALL & 31, edx);
391 #endif
392 }
393 }
394 }
396 typedef unsigned long hvm_hypercall_t(
397 unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
399 #define HYPERCALL(x) \
400 [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x
401 #define HYPERCALL_COMPAT32(x) \
402 [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x ## _compat32
404 #if defined(__i386__)
406 static hvm_hypercall_t *hvm_hypercall_table[NR_hypercalls] = {
407 HYPERCALL(memory_op),
408 HYPERCALL(multicall),
409 HYPERCALL(xen_version),
410 HYPERCALL(event_channel_op),
411 HYPERCALL(hvm_op)
412 };
414 void hvm_do_hypercall(struct cpu_user_regs *pregs)
415 {
416 if ( unlikely(ring_3(pregs)) )
417 {
418 pregs->eax = -EPERM;
419 return;
420 }
422 if ( (pregs->eax >= NR_hypercalls) || !hvm_hypercall_table[pregs->eax] )
423 {
424 gdprintk(XENLOG_WARNING, "HVM vcpu %d:%d did a bad hypercall %d.\n",
425 current->domain->domain_id, current->vcpu_id,
426 pregs->eax);
427 pregs->eax = -ENOSYS;
428 return;
429 }
431 pregs->eax = hvm_hypercall_table[pregs->eax](
432 pregs->ebx, pregs->ecx, pregs->edx, pregs->esi, pregs->edi);
433 }
435 #else /* defined(__x86_64__) */
437 static long do_memory_op_compat32(int cmd, XEN_GUEST_HANDLE(void) arg)
438 {
439 extern long do_add_to_physmap(struct xen_add_to_physmap *xatp);
440 long rc;
442 switch ( cmd )
443 {
444 case XENMEM_add_to_physmap:
445 {
446 struct {
447 domid_t domid;
448 uint32_t space;
449 uint32_t idx;
450 uint32_t gpfn;
451 } u;
452 struct xen_add_to_physmap h;
454 if ( copy_from_guest(&u, arg, 1) )
455 return -EFAULT;
457 h.domid = u.domid;
458 h.space = u.space;
459 h.idx = u.idx;
460 h.gpfn = u.gpfn;
462 this_cpu(guest_handles_in_xen_space) = 1;
463 rc = do_memory_op(cmd, guest_handle_from_ptr(&h, void));
464 this_cpu(guest_handles_in_xen_space) = 0;
466 break;
467 }
469 default:
470 gdprintk(XENLOG_WARNING, "memory_op %d.\n", cmd);
471 rc = -ENOSYS;
472 break;
473 }
475 return rc;
476 }
478 static hvm_hypercall_t *hvm_hypercall64_table[NR_hypercalls] = {
479 HYPERCALL(memory_op),
480 HYPERCALL(xen_version),
481 HYPERCALL(hvm_op),
482 HYPERCALL(event_channel_op)
483 };
485 static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
486 HYPERCALL_COMPAT32(memory_op),
487 HYPERCALL(xen_version),
488 HYPERCALL(hvm_op),
489 HYPERCALL(event_channel_op)
490 };
492 void hvm_do_hypercall(struct cpu_user_regs *pregs)
493 {
494 if ( unlikely(ring_3(pregs)) )
495 {
496 pregs->rax = -EPERM;
497 return;
498 }
500 pregs->rax = (uint32_t)pregs->eax; /* mask in case compat32 caller */
501 if ( (pregs->rax >= NR_hypercalls) || !hvm_hypercall64_table[pregs->rax] )
502 {
503 gdprintk(XENLOG_WARNING, "HVM vcpu %d:%d did a bad hypercall %ld.\n",
504 current->domain->domain_id, current->vcpu_id,
505 pregs->rax);
506 pregs->rax = -ENOSYS;
507 return;
508 }
510 if ( current->arch.shadow.mode->guest_levels == 4 )
511 {
512 pregs->rax = hvm_hypercall64_table[pregs->rax](pregs->rdi,
513 pregs->rsi,
514 pregs->rdx,
515 pregs->r10,
516 pregs->r8);
517 }
518 else
519 {
520 pregs->eax = hvm_hypercall32_table[pregs->eax]((uint32_t)pregs->ebx,
521 (uint32_t)pregs->ecx,
522 (uint32_t)pregs->edx,
523 (uint32_t)pregs->esi,
524 (uint32_t)pregs->edi);
525 }
526 }
528 #endif /* defined(__x86_64__) */
530 void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3)
531 {
532 v->arch.hvm_vcpu.hw_cr3 = guest_cr3;
533 hvm_funcs.update_guest_cr3(v);
534 }
536 /* Initialise a hypercall transfer page for a VMX domain using
537 paravirtualised drivers. */
538 void hvm_hypercall_page_initialise(struct domain *d,
539 void *hypercall_page)
540 {
541 hvm_funcs.init_hypercall_page(d, hypercall_page);
542 }
545 /*
546 * only called in HVM domain BSP context
547 * when booting, vcpuid is always equal to apic_id
548 */
549 int hvm_bringup_ap(int vcpuid, int trampoline_vector)
550 {
551 struct vcpu *bsp = current, *v;
552 struct domain *d = bsp->domain;
553 struct vcpu_guest_context *ctxt;
554 int rc = 0;
556 BUG_ON(!is_hvm_domain(d));
558 if ( bsp->vcpu_id != 0 )
559 {
560 gdprintk(XENLOG_ERR, "Not calling hvm_bringup_ap from BSP context.\n");
561 domain_crash(bsp->domain);
562 return -EINVAL;
563 }
565 if ( (v = d->vcpu[vcpuid]) == NULL )
566 return -ENOENT;
568 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
569 {
570 gdprintk(XENLOG_ERR,
571 "Failed to allocate memory in hvm_bringup_ap.\n");
572 return -ENOMEM;
573 }
575 hvm_init_ap_context(ctxt, vcpuid, trampoline_vector);
577 /* Sync AP's TSC with BSP's. */
578 v->arch.hvm_vcpu.cache_tsc_offset =
579 v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
580 hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
582 LOCK_BIGLOCK(d);
583 rc = -EEXIST;
584 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
585 rc = boot_vcpu(d, vcpuid, ctxt);
586 UNLOCK_BIGLOCK(d);
588 if ( rc != 0 )
589 {
590 gdprintk(XENLOG_ERR,
591 "AP %d bringup failed in boot_vcpu %x.\n", vcpuid, rc);
592 goto out;
593 }
595 if ( test_and_clear_bit(_VCPUF_down, &d->vcpu[vcpuid]->vcpu_flags) )
596 vcpu_wake(d->vcpu[vcpuid]);
597 gdprintk(XENLOG_INFO, "AP %d bringup suceeded.\n", vcpuid);
599 out:
600 xfree(ctxt);
601 return rc;
602 }
604 static int hvmop_set_pci_intx_level(
605 XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t) uop)
606 {
607 struct xen_hvm_set_pci_intx_level op;
608 struct domain *d;
609 int rc;
611 if ( copy_from_guest(&op, uop, 1) )
612 return -EFAULT;
614 if ( !IS_PRIV(current->domain) )
615 return -EPERM;
617 if ( (op.domain > 0) || (op.bus > 0) || (op.device > 31) || (op.intx > 3) )
618 return -EINVAL;
620 d = find_domain_by_id(op.domid);
621 if ( d == NULL )
622 return -ESRCH;
624 rc = -EINVAL;
625 if ( !is_hvm_domain(d) )
626 goto out;
628 rc = 0;
629 switch ( op.level )
630 {
631 case 0:
632 hvm_pci_intx_deassert(d, op.device, op.intx);
633 break;
634 case 1:
635 hvm_pci_intx_assert(d, op.device, op.intx);
636 break;
637 default:
638 rc = -EINVAL;
639 break;
640 }
642 out:
643 put_domain(d);
644 return rc;
645 }
647 static int hvmop_set_isa_irq_level(
648 XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t) uop)
649 {
650 struct xen_hvm_set_isa_irq_level op;
651 struct domain *d;
652 int rc;
654 if ( copy_from_guest(&op, uop, 1) )
655 return -EFAULT;
657 if ( !IS_PRIV(current->domain) )
658 return -EPERM;
660 if ( op.isa_irq > 15 )
661 return -EINVAL;
663 d = find_domain_by_id(op.domid);
664 if ( d == NULL )
665 return -ESRCH;
667 rc = -EINVAL;
668 if ( !is_hvm_domain(d) )
669 goto out;
671 rc = 0;
672 switch ( op.level )
673 {
674 case 0:
675 hvm_isa_irq_deassert(d, op.isa_irq);
676 break;
677 case 1:
678 hvm_isa_irq_assert(d, op.isa_irq);
679 break;
680 default:
681 rc = -EINVAL;
682 break;
683 }
685 out:
686 put_domain(d);
687 return rc;
688 }
690 static int hvmop_set_pci_link_route(
691 XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t) uop)
692 {
693 struct xen_hvm_set_pci_link_route op;
694 struct domain *d;
695 int rc;
697 if ( copy_from_guest(&op, uop, 1) )
698 return -EFAULT;
700 if ( !IS_PRIV(current->domain) )
701 return -EPERM;
703 if ( (op.link > 3) || (op.isa_irq > 15) )
704 return -EINVAL;
706 d = find_domain_by_id(op.domid);
707 if ( d == NULL )
708 return -ESRCH;
710 rc = -EINVAL;
711 if ( !is_hvm_domain(d) )
712 goto out;
714 rc = 0;
715 hvm_set_pci_link_route(d, op.link, op.isa_irq);
717 out:
718 put_domain(d);
719 return rc;
720 }
722 long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
724 {
725 long rc = 0;
727 switch ( op )
728 {
729 case HVMOP_set_param:
730 case HVMOP_get_param:
731 {
732 struct xen_hvm_param a;
733 struct domain *d;
734 struct vcpu *v;
735 unsigned long mfn;
736 void *p;
738 if ( copy_from_guest(&a, arg, 1) )
739 return -EFAULT;
741 if ( a.index >= HVM_NR_PARAMS )
742 return -EINVAL;
744 if ( a.domid == DOMID_SELF )
745 {
746 get_knownalive_domain(current->domain);
747 d = current->domain;
748 }
749 else if ( IS_PRIV(current->domain) )
750 {
751 d = find_domain_by_id(a.domid);
752 if ( d == NULL )
753 return -ESRCH;
754 }
755 else
756 {
757 return -EPERM;
758 }
760 rc = -EINVAL;
761 if ( !is_hvm_domain(d) )
762 goto param_fail;
764 if ( op == HVMOP_set_param )
765 {
766 switch ( a.index )
767 {
768 case HVM_PARAM_IOREQ_PFN:
769 if ( d->arch.hvm_domain.shared_page_va )
770 goto param_fail;
771 mfn = gmfn_to_mfn(d, a.value);
772 if ( mfn == INVALID_MFN )
773 goto param_fail;
774 p = map_domain_page_global(mfn);
775 if ( p == NULL )
776 goto param_fail;
777 d->arch.hvm_domain.shared_page_va = (unsigned long)p;
778 /* Initialise evtchn port info if VCPUs already created. */
779 for_each_vcpu ( d, v )
780 get_vio(d, v->vcpu_id)->vp_eport =
781 v->arch.hvm_vcpu.xen_port;
782 break;
783 case HVM_PARAM_BUFIOREQ_PFN:
784 if ( d->arch.hvm_domain.buffered_io_va )
785 goto param_fail;
786 mfn = gmfn_to_mfn(d, a.value);
787 if ( mfn == INVALID_MFN )
788 goto param_fail;
789 p = map_domain_page_global(mfn);
790 if ( p == NULL )
791 goto param_fail;
792 d->arch.hvm_domain.buffered_io_va = (unsigned long)p;
793 break;
794 case HVM_PARAM_CALLBACK_IRQ:
795 hvm_set_callback_gsi(d, a.value);
796 break;
797 }
798 d->arch.hvm_domain.params[a.index] = a.value;
799 rc = 0;
800 }
801 else
802 {
803 a.value = d->arch.hvm_domain.params[a.index];
804 rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
805 }
807 param_fail:
808 put_domain(d);
809 break;
810 }
812 case HVMOP_set_pci_intx_level:
813 rc = hvmop_set_pci_intx_level(
814 guest_handle_cast(arg, xen_hvm_set_pci_intx_level_t));
815 break;
817 case HVMOP_set_isa_irq_level:
818 rc = hvmop_set_isa_irq_level(
819 guest_handle_cast(arg, xen_hvm_set_isa_irq_level_t));
820 break;
822 case HVMOP_set_pci_link_route:
823 rc = hvmop_set_pci_link_route(
824 guest_handle_cast(arg, xen_hvm_set_pci_link_route_t));
825 break;
827 default:
828 {
829 gdprintk(XENLOG_WARNING, "Bad HVM op %ld.\n", op);
830 rc = -ENOSYS;
831 break;
832 }
833 }
835 return rc;
836 }
838 /*
839 * Local variables:
840 * mode: C
841 * c-set-style: "BSD"
842 * c-basic-offset: 4
843 * tab-width: 4
844 * indent-tabs-mode: nil
845 * End:
846 */