ia64/xen-unstable

view xen/arch/x86/hvm/hvm.c @ 14090:cdc765772f69

hvm: Clean up initialisation of hvm_funcs.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Fri Feb 23 11:32:25 2007 +0000 (2007-02-23)
parents 6daa91dc9247
children 720afbf74001
line source
1 /*
2 * hvm.c: Common hardware virtual machine abstractions.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/init.h>
23 #include <xen/lib.h>
24 #include <xen/trace.h>
25 #include <xen/sched.h>
26 #include <xen/irq.h>
27 #include <xen/softirq.h>
28 #include <xen/domain.h>
29 #include <xen/domain_page.h>
30 #include <xen/hypercall.h>
31 #include <xen/guest_access.h>
32 #include <xen/event.h>
33 #include <asm/current.h>
34 #include <asm/e820.h>
35 #include <asm/io.h>
36 #include <asm/paging.h>
37 #include <asm/regs.h>
38 #include <asm/cpufeature.h>
39 #include <asm/processor.h>
40 #include <asm/types.h>
41 #include <asm/msr.h>
42 #include <asm/mc146818rtc.h>
43 #include <asm/spinlock.h>
44 #include <asm/hvm/hvm.h>
45 #include <asm/hvm/vpt.h>
46 #include <asm/hvm/support.h>
47 #include <public/sched.h>
48 #include <public/hvm/ioreq.h>
49 #include <public/version.h>
50 #include <public/memory.h>
52 int hvm_enabled __read_mostly;
54 unsigned int opt_hvm_debug_level __read_mostly;
55 integer_param("hvm_debug", opt_hvm_debug_level);
57 struct hvm_function_table hvm_funcs __read_mostly;
59 /* I/O permission bitmap is globally shared by all HVM guests. */
60 char __attribute__ ((__section__ (".bss.page_aligned")))
61 hvm_io_bitmap[3*PAGE_SIZE];
63 void hvm_enable(struct hvm_function_table *fns)
64 {
65 if ( hvm_enabled )
66 return;
68 /*
69 * Allow direct access to the PC debug port (it is often used for I/O
70 * delays, but the vmexits simply slow things down).
71 */
72 memset(hvm_io_bitmap, ~0, sizeof(hvm_io_bitmap));
73 clear_bit(0x80, hvm_io_bitmap);
75 hvm_funcs = *fns;
76 hvm_enabled = 1;
77 }
79 void hvm_stts(struct vcpu *v)
80 {
81 /* FPU state already dirty? Then no need to setup_fpu() lazily. */
82 if ( !test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
83 hvm_funcs.stts(v);
84 }
86 void hvm_set_guest_time(struct vcpu *v, u64 gtime)
87 {
88 u64 host_tsc;
90 rdtscll(host_tsc);
92 v->arch.hvm_vcpu.cache_tsc_offset = gtime - host_tsc;
93 hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
94 }
96 u64 hvm_get_guest_time(struct vcpu *v)
97 {
98 u64 host_tsc;
100 rdtscll(host_tsc);
101 return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
102 }
104 void hvm_migrate_timers(struct vcpu *v)
105 {
106 pit_migrate_timers(v);
107 rtc_migrate_timers(v);
108 hpet_migrate_timers(v);
109 if ( vcpu_vlapic(v)->pt.enabled )
110 migrate_timer(&vcpu_vlapic(v)->pt.timer, v->processor);
111 }
113 void hvm_do_resume(struct vcpu *v)
114 {
115 ioreq_t *p;
117 hvm_stts(v);
119 pt_thaw_time(v);
121 /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
122 p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
123 while ( p->state != STATE_IOREQ_NONE )
124 {
125 switch ( p->state )
126 {
127 case STATE_IORESP_READY: /* IORESP_READY -> NONE */
128 hvm_io_assist(v);
129 break;
130 case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
131 case STATE_IOREQ_INPROCESS:
132 wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port,
133 (p->state != STATE_IOREQ_READY) &&
134 (p->state != STATE_IOREQ_INPROCESS));
135 break;
136 default:
137 gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state);
138 domain_crash_synchronous();
139 }
140 }
141 }
143 int hvm_domain_initialise(struct domain *d)
144 {
145 int rc;
147 if ( !hvm_enabled )
148 {
149 gdprintk(XENLOG_WARNING, "Attempt to create a HVM guest "
150 "on a non-VT/AMDV platform.\n");
151 return -EINVAL;
152 }
154 spin_lock_init(&d->arch.hvm_domain.pbuf_lock);
155 spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
156 spin_lock_init(&d->arch.hvm_domain.irq_lock);
158 rc = paging_enable(d, PG_SH_enable|PG_refcounts|PG_translate|PG_external);
159 if ( rc != 0 )
160 return rc;
162 vpic_init(d);
163 vioapic_init(d);
165 return 0;
166 }
168 void hvm_domain_destroy(struct domain *d)
169 {
170 pit_deinit(d);
171 rtc_deinit(d);
172 hpet_deinit(d);
174 if ( d->arch.hvm_domain.shared_page_va )
175 unmap_domain_page_global(
176 (void *)d->arch.hvm_domain.shared_page_va);
178 if ( d->arch.hvm_domain.buffered_io_va )
179 unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
180 }
182 static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
183 {
184 struct vcpu *v;
185 struct hvm_hw_cpu ctxt;
187 for_each_vcpu(d, v)
188 {
189 /* We don't need to save state for a vcpu that is down; the restore
190 * code will leave it down if there is nothing saved. */
191 if ( test_bit(_VCPUF_down, &v->vcpu_flags) )
192 continue;
194 hvm_funcs.save_cpu_ctxt(v, &ctxt);
195 if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 )
196 return 1;
197 }
198 return 0;
199 }
201 static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
202 {
203 int vcpuid;
204 struct vcpu *v;
205 struct hvm_hw_cpu ctxt;
207 /* Which vcpu is this? */
208 vcpuid = hvm_load_instance(h);
209 if ( vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
210 {
211 gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
212 return -EINVAL;
213 }
215 if ( hvm_load_entry(CPU, h, &ctxt) != 0 )
216 return -EINVAL;
218 if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 )
219 return -EINVAL;
221 /* Auxiliary processors should be woken immediately. */
222 if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
223 vcpu_wake(v);
225 return 0;
226 }
228 HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt,
229 1, HVMSR_PER_VCPU);
231 int hvm_vcpu_initialise(struct vcpu *v)
232 {
233 int rc;
235 if ( (rc = vlapic_init(v)) != 0 )
236 return rc;
238 if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 )
239 {
240 vlapic_destroy(v);
241 return rc;
242 }
244 /* Create ioreq event channel. */
245 v->arch.hvm_vcpu.xen_port = alloc_unbound_xen_event_channel(v, 0);
246 if ( get_sp(v->domain) && get_vio(v->domain, v->vcpu_id) )
247 get_vio(v->domain, v->vcpu_id)->vp_eport =
248 v->arch.hvm_vcpu.xen_port;
250 INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
252 if ( v->vcpu_id != 0 )
253 return 0;
255 pit_init(v, cpu_khz);
256 rtc_init(v, RTC_PORT(0));
257 pmtimer_init(v, ACPI_PM_TMR_BLK_ADDRESS);
258 hpet_init(v);
260 /* Init guest TSC to start from zero. */
261 hvm_set_guest_time(v, 0);
263 return 0;
264 }
266 void hvm_vcpu_destroy(struct vcpu *v)
267 {
268 vlapic_destroy(v);
269 hvm_funcs.vcpu_destroy(v);
271 /* Event channel is already freed by evtchn_destroy(). */
272 /*free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);*/
273 }
276 void hvm_vcpu_reset(struct vcpu *v)
277 {
278 vcpu_pause(v);
280 vlapic_reset(vcpu_vlapic(v));
282 hvm_funcs.vcpu_initialise(v);
284 set_bit(_VCPUF_down, &v->vcpu_flags);
285 clear_bit(_VCPUF_initialised, &v->vcpu_flags);
286 clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
287 clear_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags);
288 clear_bit(_VCPUF_blocked, &v->vcpu_flags);
290 vcpu_unpause(v);
291 }
293 static void hvm_vcpu_down(void)
294 {
295 struct vcpu *v = current;
296 struct domain *d = v->domain;
297 int online_count = 0;
299 gdprintk(XENLOG_INFO, "DOM%d/VCPU%d: going offline.\n",
300 d->domain_id, v->vcpu_id);
302 /* Doesn't halt us immediately, but we'll never return to guest context. */
303 set_bit(_VCPUF_down, &v->vcpu_flags);
304 vcpu_sleep_nosync(v);
306 /* Any other VCPUs online? ... */
307 LOCK_BIGLOCK(d);
308 for_each_vcpu ( d, v )
309 if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
310 online_count++;
311 UNLOCK_BIGLOCK(d);
313 /* ... Shut down the domain if not. */
314 if ( online_count == 0 )
315 {
316 gdprintk(XENLOG_INFO, "DOM%d: all CPUs offline -- powering off.\n",
317 d->domain_id);
318 domain_shutdown(d, SHUTDOWN_poweroff);
319 }
320 }
322 void hvm_send_assist_req(struct vcpu *v)
323 {
324 ioreq_t *p;
326 p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
327 if ( unlikely(p->state != STATE_IOREQ_NONE) )
328 {
329 /* This indicates a bug in the device model. Crash the domain. */
330 gdprintk(XENLOG_ERR, "Device model set bad IO state %d.\n", p->state);
331 domain_crash_synchronous();
332 }
334 prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
336 /*
337 * Following happens /after/ blocking and setting up ioreq contents.
338 * prepare_wait_on_xen_event_channel() is an implicit barrier.
339 */
340 p->state = STATE_IOREQ_READY;
341 notify_via_xen_event_channel(v->arch.hvm_vcpu.xen_port);
342 }
344 void hvm_hlt(unsigned long rflags)
345 {
346 /*
347 * If we halt with interrupts disabled, that's a pretty sure sign that we
348 * want to shut down. In a real processor, NMIs are the only way to break
349 * out of this.
350 */
351 if ( unlikely(!(rflags & X86_EFLAGS_IF)) )
352 return hvm_vcpu_down();
354 do_sched_op_compat(SCHEDOP_block, 0);
355 }
357 void hvm_triple_fault(void)
358 {
359 struct vcpu *v = current;
360 gdprintk(XENLOG_INFO, "Triple fault on VCPU%d - "
361 "invoking HVM system reset.\n", v->vcpu_id);
362 domain_shutdown(v->domain, SHUTDOWN_reboot);
363 }
365 /*
366 * __hvm_copy():
367 * @buf = hypervisor buffer
368 * @addr = guest address to copy to/from
369 * @size = number of bytes to copy
370 * @dir = copy *to* guest (TRUE) or *from* guest (FALSE)?
371 * @virt = addr is *virtual* (TRUE) or *guest physical* (FALSE)?
372 * Returns number of bytes failed to copy (0 == complete success).
373 */
374 static int __hvm_copy(void *buf, paddr_t addr, int size, int dir, int virt)
375 {
376 unsigned long mfn;
377 char *p;
378 int count, todo;
380 todo = size;
381 while ( todo > 0 )
382 {
383 count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo);
385 if ( virt )
386 mfn = get_mfn_from_gpfn(paging_gva_to_gfn(current, addr));
387 else
388 mfn = get_mfn_from_gpfn(addr >> PAGE_SHIFT);
390 if ( mfn == INVALID_MFN )
391 return todo;
393 p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK);
395 if ( dir )
396 memcpy(p, buf, count); /* dir == TRUE: *to* guest */
397 else
398 memcpy(buf, p, count); /* dir == FALSE: *from guest */
400 unmap_domain_page(p);
402 addr += count;
403 buf += count;
404 todo -= count;
405 }
407 return 0;
408 }
410 int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size)
411 {
412 return __hvm_copy(buf, paddr, size, 1, 0);
413 }
415 int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size)
416 {
417 return __hvm_copy(buf, paddr, size, 0, 0);
418 }
420 int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size)
421 {
422 return __hvm_copy(buf, vaddr, size, 1, 1);
423 }
425 int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size)
426 {
427 return __hvm_copy(buf, vaddr, size, 0, 1);
428 }
431 /* HVM specific printbuf. Mostly used for hvmloader chit-chat. */
432 void hvm_print_line(struct vcpu *v, const char c)
433 {
434 struct hvm_domain *hd = &v->domain->arch.hvm_domain;
436 spin_lock(&hd->pbuf_lock);
437 hd->pbuf[hd->pbuf_idx++] = c;
438 if ( (hd->pbuf_idx == (sizeof(hd->pbuf) - 2)) || (c == '\n') )
439 {
440 if ( c != '\n' )
441 hd->pbuf[hd->pbuf_idx++] = '\n';
442 hd->pbuf[hd->pbuf_idx] = '\0';
443 printk(XENLOG_G_DEBUG "HVM%u: %s", v->domain->domain_id, hd->pbuf);
444 hd->pbuf_idx = 0;
445 }
446 spin_unlock(&hd->pbuf_lock);
447 }
449 void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
450 unsigned int *ecx, unsigned int *edx)
451 {
452 if ( !cpuid_hypervisor_leaves(input, eax, ebx, ecx, edx) )
453 {
454 cpuid(input, eax, ebx, ecx, edx);
456 if ( input == 0x00000001 )
457 {
458 struct vcpu *v = current;
460 clear_bit(X86_FEATURE_MWAIT & 31, ecx);
462 if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
463 clear_bit(X86_FEATURE_APIC & 31, edx);
465 #if CONFIG_PAGING_LEVELS >= 3
466 if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )
467 #endif
468 clear_bit(X86_FEATURE_PAE & 31, edx);
469 clear_bit(X86_FEATURE_PSE36 & 31, edx);
470 }
471 else if ( input == 0x80000001 )
472 {
473 #if CONFIG_PAGING_LEVELS >= 3
474 struct vcpu *v = current;
475 if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )
476 #endif
477 clear_bit(X86_FEATURE_NX & 31, edx);
478 #ifdef __i386__
479 /* Mask feature for Intel ia32e or AMD long mode. */
480 clear_bit(X86_FEATURE_LAHF_LM & 31, ecx);
482 clear_bit(X86_FEATURE_LM & 31, edx);
483 clear_bit(X86_FEATURE_SYSCALL & 31, edx);
484 #endif
485 }
486 }
487 }
489 typedef unsigned long hvm_hypercall_t(
490 unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
492 #define HYPERCALL(x) \
493 [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x
494 #define HYPERCALL_COMPAT32(x) \
495 [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x ## _compat32
497 #if defined(__i386__)
499 static hvm_hypercall_t *hvm_hypercall_table[NR_hypercalls] = {
500 HYPERCALL(memory_op),
501 HYPERCALL(multicall),
502 HYPERCALL(xen_version),
503 HYPERCALL(event_channel_op),
504 HYPERCALL(hvm_op)
505 };
507 void hvm_do_hypercall(struct cpu_user_regs *pregs)
508 {
509 if ( unlikely(ring_3(pregs)) )
510 {
511 pregs->eax = -EPERM;
512 return;
513 }
515 if ( (pregs->eax >= NR_hypercalls) || !hvm_hypercall_table[pregs->eax] )
516 {
517 gdprintk(XENLOG_WARNING, "HVM vcpu %d:%d did a bad hypercall %d.\n",
518 current->domain->domain_id, current->vcpu_id,
519 pregs->eax);
520 pregs->eax = -ENOSYS;
521 return;
522 }
524 pregs->eax = hvm_hypercall_table[pregs->eax](
525 pregs->ebx, pregs->ecx, pregs->edx, pregs->esi, pregs->edi);
526 }
528 #else /* defined(__x86_64__) */
530 static long do_memory_op_compat32(int cmd, XEN_GUEST_HANDLE(void) arg)
531 {
532 extern long do_add_to_physmap(struct xen_add_to_physmap *xatp);
533 long rc;
535 switch ( cmd )
536 {
537 case XENMEM_add_to_physmap:
538 {
539 struct {
540 domid_t domid;
541 uint32_t space;
542 uint32_t idx;
543 uint32_t gpfn;
544 } u;
545 struct xen_add_to_physmap h;
547 if ( copy_from_guest(&u, arg, 1) )
548 return -EFAULT;
550 h.domid = u.domid;
551 h.space = u.space;
552 h.idx = u.idx;
553 h.gpfn = u.gpfn;
555 this_cpu(guest_handles_in_xen_space) = 1;
556 rc = do_memory_op(cmd, guest_handle_from_ptr(&h, void));
557 this_cpu(guest_handles_in_xen_space) = 0;
559 break;
560 }
562 default:
563 gdprintk(XENLOG_WARNING, "memory_op %d.\n", cmd);
564 rc = -ENOSYS;
565 break;
566 }
568 return rc;
569 }
571 static hvm_hypercall_t *hvm_hypercall64_table[NR_hypercalls] = {
572 HYPERCALL(memory_op),
573 HYPERCALL(xen_version),
574 HYPERCALL(hvm_op),
575 HYPERCALL(event_channel_op)
576 };
578 static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
579 HYPERCALL_COMPAT32(memory_op),
580 HYPERCALL(xen_version),
581 HYPERCALL(hvm_op),
582 HYPERCALL(event_channel_op)
583 };
585 void hvm_do_hypercall(struct cpu_user_regs *pregs)
586 {
587 if ( unlikely(ring_3(pregs)) )
588 {
589 pregs->rax = -EPERM;
590 return;
591 }
593 pregs->rax = (uint32_t)pregs->eax; /* mask in case compat32 caller */
594 if ( (pregs->rax >= NR_hypercalls) || !hvm_hypercall64_table[pregs->rax] )
595 {
596 gdprintk(XENLOG_WARNING, "HVM vcpu %d:%d did a bad hypercall %ld.\n",
597 current->domain->domain_id, current->vcpu_id,
598 pregs->rax);
599 pregs->rax = -ENOSYS;
600 return;
601 }
603 if ( current->arch.paging.mode->guest_levels == 4 )
604 {
605 pregs->rax = hvm_hypercall64_table[pregs->rax](pregs->rdi,
606 pregs->rsi,
607 pregs->rdx,
608 pregs->r10,
609 pregs->r8);
610 }
611 else
612 {
613 pregs->eax = hvm_hypercall32_table[pregs->eax]((uint32_t)pregs->ebx,
614 (uint32_t)pregs->ecx,
615 (uint32_t)pregs->edx,
616 (uint32_t)pregs->esi,
617 (uint32_t)pregs->edi);
618 }
619 }
621 #endif /* defined(__x86_64__) */
623 void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3)
624 {
625 v->arch.hvm_vcpu.hw_cr3 = guest_cr3;
626 hvm_funcs.update_guest_cr3(v);
627 }
629 /* Initialise a hypercall transfer page for a VMX domain using
630 paravirtualised drivers. */
631 void hvm_hypercall_page_initialise(struct domain *d,
632 void *hypercall_page)
633 {
634 hvm_funcs.init_hypercall_page(d, hypercall_page);
635 }
638 /*
639 * only called in HVM domain BSP context
640 * when booting, vcpuid is always equal to apic_id
641 */
642 int hvm_bringup_ap(int vcpuid, int trampoline_vector)
643 {
644 struct vcpu *v;
645 struct domain *d = current->domain;
646 struct vcpu_guest_context *ctxt;
647 int rc = 0;
649 BUG_ON(!is_hvm_domain(d));
651 if ( (v = d->vcpu[vcpuid]) == NULL )
652 return -ENOENT;
654 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
655 {
656 gdprintk(XENLOG_ERR,
657 "Failed to allocate memory in hvm_bringup_ap.\n");
658 return -ENOMEM;
659 }
661 hvm_init_ap_context(ctxt, vcpuid, trampoline_vector);
663 /* Sync AP's TSC with BSP's. */
664 v->arch.hvm_vcpu.cache_tsc_offset =
665 v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
666 hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
668 LOCK_BIGLOCK(d);
669 rc = -EEXIST;
670 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
671 rc = boot_vcpu(d, vcpuid, ctxt);
672 UNLOCK_BIGLOCK(d);
674 if ( rc != 0 )
675 {
676 gdprintk(XENLOG_ERR,
677 "AP %d bringup failed in boot_vcpu %x.\n", vcpuid, rc);
678 goto out;
679 }
681 if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
682 vcpu_wake(v);
683 gdprintk(XENLOG_INFO, "AP %d bringup suceeded.\n", vcpuid);
685 out:
686 xfree(ctxt);
687 return rc;
688 }
690 static int hvmop_set_pci_intx_level(
691 XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t) uop)
692 {
693 struct xen_hvm_set_pci_intx_level op;
694 struct domain *d;
695 int rc;
697 if ( copy_from_guest(&op, uop, 1) )
698 return -EFAULT;
700 if ( !IS_PRIV(current->domain) )
701 return -EPERM;
703 if ( (op.domain > 0) || (op.bus > 0) || (op.device > 31) || (op.intx > 3) )
704 return -EINVAL;
706 d = get_domain_by_id(op.domid);
707 if ( d == NULL )
708 return -ESRCH;
710 rc = -EINVAL;
711 if ( !is_hvm_domain(d) )
712 goto out;
714 rc = 0;
715 switch ( op.level )
716 {
717 case 0:
718 hvm_pci_intx_deassert(d, op.device, op.intx);
719 break;
720 case 1:
721 hvm_pci_intx_assert(d, op.device, op.intx);
722 break;
723 default:
724 rc = -EINVAL;
725 break;
726 }
728 out:
729 put_domain(d);
730 return rc;
731 }
733 static int hvmop_set_isa_irq_level(
734 XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t) uop)
735 {
736 struct xen_hvm_set_isa_irq_level op;
737 struct domain *d;
738 int rc;
740 if ( copy_from_guest(&op, uop, 1) )
741 return -EFAULT;
743 if ( !IS_PRIV(current->domain) )
744 return -EPERM;
746 if ( op.isa_irq > 15 )
747 return -EINVAL;
749 d = get_domain_by_id(op.domid);
750 if ( d == NULL )
751 return -ESRCH;
753 rc = -EINVAL;
754 if ( !is_hvm_domain(d) )
755 goto out;
757 rc = 0;
758 switch ( op.level )
759 {
760 case 0:
761 hvm_isa_irq_deassert(d, op.isa_irq);
762 break;
763 case 1:
764 hvm_isa_irq_assert(d, op.isa_irq);
765 break;
766 default:
767 rc = -EINVAL;
768 break;
769 }
771 out:
772 put_domain(d);
773 return rc;
774 }
776 static int hvmop_set_pci_link_route(
777 XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t) uop)
778 {
779 struct xen_hvm_set_pci_link_route op;
780 struct domain *d;
781 int rc;
783 if ( copy_from_guest(&op, uop, 1) )
784 return -EFAULT;
786 if ( !IS_PRIV(current->domain) )
787 return -EPERM;
789 if ( (op.link > 3) || (op.isa_irq > 15) )
790 return -EINVAL;
792 d = get_domain_by_id(op.domid);
793 if ( d == NULL )
794 return -ESRCH;
796 rc = -EINVAL;
797 if ( !is_hvm_domain(d) )
798 goto out;
800 rc = 0;
801 hvm_set_pci_link_route(d, op.link, op.isa_irq);
803 out:
804 put_domain(d);
805 return rc;
806 }
808 long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
810 {
811 long rc = 0;
813 switch ( op )
814 {
815 case HVMOP_set_param:
816 case HVMOP_get_param:
817 {
818 struct xen_hvm_param a;
819 struct domain *d;
820 struct vcpu *v;
821 unsigned long mfn;
822 void *p;
824 if ( copy_from_guest(&a, arg, 1) )
825 return -EFAULT;
827 if ( a.index >= HVM_NR_PARAMS )
828 return -EINVAL;
830 if ( a.domid == DOMID_SELF )
831 {
832 get_knownalive_domain(current->domain);
833 d = current->domain;
834 }
835 else if ( IS_PRIV(current->domain) )
836 {
837 d = get_domain_by_id(a.domid);
838 if ( d == NULL )
839 return -ESRCH;
840 }
841 else
842 {
843 return -EPERM;
844 }
846 rc = -EINVAL;
847 if ( !is_hvm_domain(d) )
848 goto param_fail;
850 if ( op == HVMOP_set_param )
851 {
852 switch ( a.index )
853 {
854 case HVM_PARAM_IOREQ_PFN:
855 if ( d->arch.hvm_domain.shared_page_va )
856 goto param_fail;
857 mfn = gmfn_to_mfn(d, a.value);
858 if ( mfn == INVALID_MFN )
859 goto param_fail;
860 p = map_domain_page_global(mfn);
861 if ( p == NULL )
862 goto param_fail;
863 d->arch.hvm_domain.shared_page_va = (unsigned long)p;
864 /* Initialise evtchn port info if VCPUs already created. */
865 for_each_vcpu ( d, v )
866 get_vio(d, v->vcpu_id)->vp_eport =
867 v->arch.hvm_vcpu.xen_port;
868 break;
869 case HVM_PARAM_BUFIOREQ_PFN:
870 if ( d->arch.hvm_domain.buffered_io_va )
871 goto param_fail;
872 mfn = gmfn_to_mfn(d, a.value);
873 if ( mfn == INVALID_MFN )
874 goto param_fail;
875 p = map_domain_page_global(mfn);
876 if ( p == NULL )
877 goto param_fail;
878 d->arch.hvm_domain.buffered_io_va = (unsigned long)p;
879 break;
880 case HVM_PARAM_CALLBACK_IRQ:
881 hvm_set_callback_via(d, a.value);
882 break;
883 }
884 d->arch.hvm_domain.params[a.index] = a.value;
885 rc = 0;
886 }
887 else
888 {
889 a.value = d->arch.hvm_domain.params[a.index];
890 rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
891 }
893 param_fail:
894 put_domain(d);
895 break;
896 }
898 case HVMOP_set_pci_intx_level:
899 rc = hvmop_set_pci_intx_level(
900 guest_handle_cast(arg, xen_hvm_set_pci_intx_level_t));
901 break;
903 case HVMOP_set_isa_irq_level:
904 rc = hvmop_set_isa_irq_level(
905 guest_handle_cast(arg, xen_hvm_set_isa_irq_level_t));
906 break;
908 case HVMOP_set_pci_link_route:
909 rc = hvmop_set_pci_link_route(
910 guest_handle_cast(arg, xen_hvm_set_pci_link_route_t));
911 break;
913 default:
914 {
915 gdprintk(XENLOG_WARNING, "Bad HVM op %ld.\n", op);
916 rc = -ENOSYS;
917 break;
918 }
919 }
921 return rc;
922 }
924 /*
925 * Local variables:
926 * mode: C
927 * c-set-style: "BSD"
928 * c-basic-offset: 4
929 * tab-width: 4
930 * indent-tabs-mode: nil
931 * End:
932 */