ia64/xen-unstable

view xen/arch/x86/hvm/hvm.c @ 14137:720afbf74001

[XEN] Allow log-dirty mode to be enabled on already-shadowed domains.
and catch a few missing mark_dirty() calls
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Mon Feb 26 13:56:01 2007 +0000 (2007-02-26)
parents cdc765772f69
children d39dcdb9cca3
line source
1 /*
2 * hvm.c: Common hardware virtual machine abstractions.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/init.h>
23 #include <xen/lib.h>
24 #include <xen/trace.h>
25 #include <xen/sched.h>
26 #include <xen/irq.h>
27 #include <xen/softirq.h>
28 #include <xen/domain.h>
29 #include <xen/domain_page.h>
30 #include <xen/hypercall.h>
31 #include <xen/guest_access.h>
32 #include <xen/event.h>
33 #include <asm/current.h>
34 #include <asm/e820.h>
35 #include <asm/io.h>
36 #include <asm/paging.h>
37 #include <asm/regs.h>
38 #include <asm/cpufeature.h>
39 #include <asm/processor.h>
40 #include <asm/types.h>
41 #include <asm/msr.h>
42 #include <asm/mc146818rtc.h>
43 #include <asm/spinlock.h>
44 #include <asm/hvm/hvm.h>
45 #include <asm/hvm/vpt.h>
46 #include <asm/hvm/support.h>
47 #include <public/sched.h>
48 #include <public/hvm/ioreq.h>
49 #include <public/version.h>
50 #include <public/memory.h>
52 int hvm_enabled __read_mostly;
54 unsigned int opt_hvm_debug_level __read_mostly;
55 integer_param("hvm_debug", opt_hvm_debug_level);
57 struct hvm_function_table hvm_funcs __read_mostly;
59 /* I/O permission bitmap is globally shared by all HVM guests. */
60 char __attribute__ ((__section__ (".bss.page_aligned")))
61 hvm_io_bitmap[3*PAGE_SIZE];
63 void hvm_enable(struct hvm_function_table *fns)
64 {
65 if ( hvm_enabled )
66 return;
68 /*
69 * Allow direct access to the PC debug port (it is often used for I/O
70 * delays, but the vmexits simply slow things down).
71 */
72 memset(hvm_io_bitmap, ~0, sizeof(hvm_io_bitmap));
73 clear_bit(0x80, hvm_io_bitmap);
75 hvm_funcs = *fns;
76 hvm_enabled = 1;
77 }
79 void hvm_stts(struct vcpu *v)
80 {
81 /* FPU state already dirty? Then no need to setup_fpu() lazily. */
82 if ( !test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
83 hvm_funcs.stts(v);
84 }
86 void hvm_set_guest_time(struct vcpu *v, u64 gtime)
87 {
88 u64 host_tsc;
90 rdtscll(host_tsc);
92 v->arch.hvm_vcpu.cache_tsc_offset = gtime - host_tsc;
93 hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
94 }
96 u64 hvm_get_guest_time(struct vcpu *v)
97 {
98 u64 host_tsc;
100 rdtscll(host_tsc);
101 return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
102 }
104 void hvm_migrate_timers(struct vcpu *v)
105 {
106 pit_migrate_timers(v);
107 rtc_migrate_timers(v);
108 hpet_migrate_timers(v);
109 if ( vcpu_vlapic(v)->pt.enabled )
110 migrate_timer(&vcpu_vlapic(v)->pt.timer, v->processor);
111 }
113 void hvm_do_resume(struct vcpu *v)
114 {
115 ioreq_t *p;
117 hvm_stts(v);
119 pt_thaw_time(v);
121 /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
122 p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
123 while ( p->state != STATE_IOREQ_NONE )
124 {
125 switch ( p->state )
126 {
127 case STATE_IORESP_READY: /* IORESP_READY -> NONE */
128 hvm_io_assist(v);
129 break;
130 case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
131 case STATE_IOREQ_INPROCESS:
132 wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port,
133 (p->state != STATE_IOREQ_READY) &&
134 (p->state != STATE_IOREQ_INPROCESS));
135 break;
136 default:
137 gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state);
138 domain_crash_synchronous();
139 }
140 }
141 }
143 int hvm_domain_initialise(struct domain *d)
144 {
145 int rc;
147 if ( !hvm_enabled )
148 {
149 gdprintk(XENLOG_WARNING, "Attempt to create a HVM guest "
150 "on a non-VT/AMDV platform.\n");
151 return -EINVAL;
152 }
154 spin_lock_init(&d->arch.hvm_domain.pbuf_lock);
155 spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
156 spin_lock_init(&d->arch.hvm_domain.irq_lock);
158 rc = paging_enable(d, PG_SH_enable|PG_refcounts|PG_translate|PG_external);
159 if ( rc != 0 )
160 return rc;
162 vpic_init(d);
163 vioapic_init(d);
165 return 0;
166 }
168 void hvm_domain_destroy(struct domain *d)
169 {
170 pit_deinit(d);
171 rtc_deinit(d);
172 hpet_deinit(d);
174 if ( d->arch.hvm_domain.shared_page_va )
175 unmap_domain_page_global(
176 (void *)d->arch.hvm_domain.shared_page_va);
178 if ( d->arch.hvm_domain.buffered_io_va )
179 unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
180 }
182 static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
183 {
184 struct vcpu *v;
185 struct hvm_hw_cpu ctxt;
187 for_each_vcpu(d, v)
188 {
189 /* We don't need to save state for a vcpu that is down; the restore
190 * code will leave it down if there is nothing saved. */
191 if ( test_bit(_VCPUF_down, &v->vcpu_flags) )
192 continue;
194 hvm_funcs.save_cpu_ctxt(v, &ctxt);
195 if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 )
196 return 1;
197 }
198 return 0;
199 }
201 static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
202 {
203 int vcpuid;
204 struct vcpu *v;
205 struct hvm_hw_cpu ctxt;
207 /* Which vcpu is this? */
208 vcpuid = hvm_load_instance(h);
209 if ( vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
210 {
211 gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
212 return -EINVAL;
213 }
215 if ( hvm_load_entry(CPU, h, &ctxt) != 0 )
216 return -EINVAL;
218 if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 )
219 return -EINVAL;
221 /* Auxiliary processors should be woken immediately. */
222 if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
223 vcpu_wake(v);
225 return 0;
226 }
228 HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt,
229 1, HVMSR_PER_VCPU);
231 int hvm_vcpu_initialise(struct vcpu *v)
232 {
233 int rc;
235 if ( (rc = vlapic_init(v)) != 0 )
236 return rc;
238 if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 )
239 {
240 vlapic_destroy(v);
241 return rc;
242 }
244 /* Create ioreq event channel. */
245 v->arch.hvm_vcpu.xen_port = alloc_unbound_xen_event_channel(v, 0);
246 if ( get_sp(v->domain) && get_vio(v->domain, v->vcpu_id) )
247 get_vio(v->domain, v->vcpu_id)->vp_eport =
248 v->arch.hvm_vcpu.xen_port;
250 INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
252 if ( v->vcpu_id != 0 )
253 return 0;
255 pit_init(v, cpu_khz);
256 rtc_init(v, RTC_PORT(0));
257 pmtimer_init(v, ACPI_PM_TMR_BLK_ADDRESS);
258 hpet_init(v);
260 /* Init guest TSC to start from zero. */
261 hvm_set_guest_time(v, 0);
263 return 0;
264 }
266 void hvm_vcpu_destroy(struct vcpu *v)
267 {
268 vlapic_destroy(v);
269 hvm_funcs.vcpu_destroy(v);
271 /* Event channel is already freed by evtchn_destroy(). */
272 /*free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);*/
273 }
276 void hvm_vcpu_reset(struct vcpu *v)
277 {
278 vcpu_pause(v);
280 vlapic_reset(vcpu_vlapic(v));
282 hvm_funcs.vcpu_initialise(v);
284 set_bit(_VCPUF_down, &v->vcpu_flags);
285 clear_bit(_VCPUF_initialised, &v->vcpu_flags);
286 clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
287 clear_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags);
288 clear_bit(_VCPUF_blocked, &v->vcpu_flags);
290 vcpu_unpause(v);
291 }
293 static void hvm_vcpu_down(void)
294 {
295 struct vcpu *v = current;
296 struct domain *d = v->domain;
297 int online_count = 0;
299 gdprintk(XENLOG_INFO, "DOM%d/VCPU%d: going offline.\n",
300 d->domain_id, v->vcpu_id);
302 /* Doesn't halt us immediately, but we'll never return to guest context. */
303 set_bit(_VCPUF_down, &v->vcpu_flags);
304 vcpu_sleep_nosync(v);
306 /* Any other VCPUs online? ... */
307 LOCK_BIGLOCK(d);
308 for_each_vcpu ( d, v )
309 if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
310 online_count++;
311 UNLOCK_BIGLOCK(d);
313 /* ... Shut down the domain if not. */
314 if ( online_count == 0 )
315 {
316 gdprintk(XENLOG_INFO, "DOM%d: all CPUs offline -- powering off.\n",
317 d->domain_id);
318 domain_shutdown(d, SHUTDOWN_poweroff);
319 }
320 }
322 void hvm_send_assist_req(struct vcpu *v)
323 {
324 ioreq_t *p;
326 p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
327 if ( unlikely(p->state != STATE_IOREQ_NONE) )
328 {
329 /* This indicates a bug in the device model. Crash the domain. */
330 gdprintk(XENLOG_ERR, "Device model set bad IO state %d.\n", p->state);
331 domain_crash_synchronous();
332 }
334 prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
336 /*
337 * Following happens /after/ blocking and setting up ioreq contents.
338 * prepare_wait_on_xen_event_channel() is an implicit barrier.
339 */
340 p->state = STATE_IOREQ_READY;
341 notify_via_xen_event_channel(v->arch.hvm_vcpu.xen_port);
342 }
344 void hvm_hlt(unsigned long rflags)
345 {
346 /*
347 * If we halt with interrupts disabled, that's a pretty sure sign that we
348 * want to shut down. In a real processor, NMIs are the only way to break
349 * out of this.
350 */
351 if ( unlikely(!(rflags & X86_EFLAGS_IF)) )
352 return hvm_vcpu_down();
354 do_sched_op_compat(SCHEDOP_block, 0);
355 }
357 void hvm_triple_fault(void)
358 {
359 struct vcpu *v = current;
360 gdprintk(XENLOG_INFO, "Triple fault on VCPU%d - "
361 "invoking HVM system reset.\n", v->vcpu_id);
362 domain_shutdown(v->domain, SHUTDOWN_reboot);
363 }
365 /*
366 * __hvm_copy():
367 * @buf = hypervisor buffer
368 * @addr = guest address to copy to/from
369 * @size = number of bytes to copy
370 * @dir = copy *to* guest (TRUE) or *from* guest (FALSE)?
371 * @virt = addr is *virtual* (TRUE) or *guest physical* (FALSE)?
372 * Returns number of bytes failed to copy (0 == complete success).
373 */
374 static int __hvm_copy(void *buf, paddr_t addr, int size, int dir, int virt)
375 {
376 unsigned long mfn;
377 char *p;
378 int count, todo;
380 todo = size;
381 while ( todo > 0 )
382 {
383 count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo);
385 if ( virt )
386 mfn = get_mfn_from_gpfn(paging_gva_to_gfn(current, addr));
387 else
388 mfn = get_mfn_from_gpfn(addr >> PAGE_SHIFT);
390 if ( mfn == INVALID_MFN )
391 return todo;
393 p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK);
395 if ( dir )
396 memcpy(p, buf, count); /* dir == TRUE: *to* guest */
397 else
398 memcpy(buf, p, count); /* dir == FALSE: *from guest */
400 unmap_domain_page(p);
402 mark_dirty(current->domain, mfn);
404 addr += count;
405 buf += count;
406 todo -= count;
407 }
409 return 0;
410 }
412 int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size)
413 {
414 return __hvm_copy(buf, paddr, size, 1, 0);
415 }
417 int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size)
418 {
419 return __hvm_copy(buf, paddr, size, 0, 0);
420 }
422 int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size)
423 {
424 return __hvm_copy(buf, vaddr, size, 1, 1);
425 }
427 int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size)
428 {
429 return __hvm_copy(buf, vaddr, size, 0, 1);
430 }
433 /* HVM specific printbuf. Mostly used for hvmloader chit-chat. */
434 void hvm_print_line(struct vcpu *v, const char c)
435 {
436 struct hvm_domain *hd = &v->domain->arch.hvm_domain;
438 spin_lock(&hd->pbuf_lock);
439 hd->pbuf[hd->pbuf_idx++] = c;
440 if ( (hd->pbuf_idx == (sizeof(hd->pbuf) - 2)) || (c == '\n') )
441 {
442 if ( c != '\n' )
443 hd->pbuf[hd->pbuf_idx++] = '\n';
444 hd->pbuf[hd->pbuf_idx] = '\0';
445 printk(XENLOG_G_DEBUG "HVM%u: %s", v->domain->domain_id, hd->pbuf);
446 hd->pbuf_idx = 0;
447 }
448 spin_unlock(&hd->pbuf_lock);
449 }
451 void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
452 unsigned int *ecx, unsigned int *edx)
453 {
454 if ( !cpuid_hypervisor_leaves(input, eax, ebx, ecx, edx) )
455 {
456 cpuid(input, eax, ebx, ecx, edx);
458 if ( input == 0x00000001 )
459 {
460 struct vcpu *v = current;
462 clear_bit(X86_FEATURE_MWAIT & 31, ecx);
464 if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
465 clear_bit(X86_FEATURE_APIC & 31, edx);
467 #if CONFIG_PAGING_LEVELS >= 3
468 if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )
469 #endif
470 clear_bit(X86_FEATURE_PAE & 31, edx);
471 clear_bit(X86_FEATURE_PSE36 & 31, edx);
472 }
473 else if ( input == 0x80000001 )
474 {
475 #if CONFIG_PAGING_LEVELS >= 3
476 struct vcpu *v = current;
477 if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )
478 #endif
479 clear_bit(X86_FEATURE_NX & 31, edx);
480 #ifdef __i386__
481 /* Mask feature for Intel ia32e or AMD long mode. */
482 clear_bit(X86_FEATURE_LAHF_LM & 31, ecx);
484 clear_bit(X86_FEATURE_LM & 31, edx);
485 clear_bit(X86_FEATURE_SYSCALL & 31, edx);
486 #endif
487 }
488 }
489 }
491 typedef unsigned long hvm_hypercall_t(
492 unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
494 #define HYPERCALL(x) \
495 [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x
496 #define HYPERCALL_COMPAT32(x) \
497 [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x ## _compat32
499 #if defined(__i386__)
501 static hvm_hypercall_t *hvm_hypercall_table[NR_hypercalls] = {
502 HYPERCALL(memory_op),
503 HYPERCALL(multicall),
504 HYPERCALL(xen_version),
505 HYPERCALL(event_channel_op),
506 HYPERCALL(hvm_op)
507 };
509 void hvm_do_hypercall(struct cpu_user_regs *pregs)
510 {
511 if ( unlikely(ring_3(pregs)) )
512 {
513 pregs->eax = -EPERM;
514 return;
515 }
517 if ( (pregs->eax >= NR_hypercalls) || !hvm_hypercall_table[pregs->eax] )
518 {
519 gdprintk(XENLOG_WARNING, "HVM vcpu %d:%d did a bad hypercall %d.\n",
520 current->domain->domain_id, current->vcpu_id,
521 pregs->eax);
522 pregs->eax = -ENOSYS;
523 return;
524 }
526 pregs->eax = hvm_hypercall_table[pregs->eax](
527 pregs->ebx, pregs->ecx, pregs->edx, pregs->esi, pregs->edi);
528 }
530 #else /* defined(__x86_64__) */
532 static long do_memory_op_compat32(int cmd, XEN_GUEST_HANDLE(void) arg)
533 {
534 extern long do_add_to_physmap(struct xen_add_to_physmap *xatp);
535 long rc;
537 switch ( cmd )
538 {
539 case XENMEM_add_to_physmap:
540 {
541 struct {
542 domid_t domid;
543 uint32_t space;
544 uint32_t idx;
545 uint32_t gpfn;
546 } u;
547 struct xen_add_to_physmap h;
549 if ( copy_from_guest(&u, arg, 1) )
550 return -EFAULT;
552 h.domid = u.domid;
553 h.space = u.space;
554 h.idx = u.idx;
555 h.gpfn = u.gpfn;
557 this_cpu(guest_handles_in_xen_space) = 1;
558 rc = do_memory_op(cmd, guest_handle_from_ptr(&h, void));
559 this_cpu(guest_handles_in_xen_space) = 0;
561 break;
562 }
564 default:
565 gdprintk(XENLOG_WARNING, "memory_op %d.\n", cmd);
566 rc = -ENOSYS;
567 break;
568 }
570 return rc;
571 }
573 static hvm_hypercall_t *hvm_hypercall64_table[NR_hypercalls] = {
574 HYPERCALL(memory_op),
575 HYPERCALL(xen_version),
576 HYPERCALL(hvm_op),
577 HYPERCALL(event_channel_op)
578 };
580 static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
581 HYPERCALL_COMPAT32(memory_op),
582 HYPERCALL(xen_version),
583 HYPERCALL(hvm_op),
584 HYPERCALL(event_channel_op)
585 };
587 void hvm_do_hypercall(struct cpu_user_regs *pregs)
588 {
589 if ( unlikely(ring_3(pregs)) )
590 {
591 pregs->rax = -EPERM;
592 return;
593 }
595 pregs->rax = (uint32_t)pregs->eax; /* mask in case compat32 caller */
596 if ( (pregs->rax >= NR_hypercalls) || !hvm_hypercall64_table[pregs->rax] )
597 {
598 gdprintk(XENLOG_WARNING, "HVM vcpu %d:%d did a bad hypercall %ld.\n",
599 current->domain->domain_id, current->vcpu_id,
600 pregs->rax);
601 pregs->rax = -ENOSYS;
602 return;
603 }
605 if ( current->arch.paging.mode->guest_levels == 4 )
606 {
607 pregs->rax = hvm_hypercall64_table[pregs->rax](pregs->rdi,
608 pregs->rsi,
609 pregs->rdx,
610 pregs->r10,
611 pregs->r8);
612 }
613 else
614 {
615 pregs->eax = hvm_hypercall32_table[pregs->eax]((uint32_t)pregs->ebx,
616 (uint32_t)pregs->ecx,
617 (uint32_t)pregs->edx,
618 (uint32_t)pregs->esi,
619 (uint32_t)pregs->edi);
620 }
621 }
623 #endif /* defined(__x86_64__) */
625 void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3)
626 {
627 v->arch.hvm_vcpu.hw_cr3 = guest_cr3;
628 hvm_funcs.update_guest_cr3(v);
629 }
631 /* Initialise a hypercall transfer page for a VMX domain using
632 paravirtualised drivers. */
633 void hvm_hypercall_page_initialise(struct domain *d,
634 void *hypercall_page)
635 {
636 hvm_funcs.init_hypercall_page(d, hypercall_page);
637 }
640 /*
641 * only called in HVM domain BSP context
642 * when booting, vcpuid is always equal to apic_id
643 */
644 int hvm_bringup_ap(int vcpuid, int trampoline_vector)
645 {
646 struct vcpu *v;
647 struct domain *d = current->domain;
648 struct vcpu_guest_context *ctxt;
649 int rc = 0;
651 BUG_ON(!is_hvm_domain(d));
653 if ( (v = d->vcpu[vcpuid]) == NULL )
654 return -ENOENT;
656 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
657 {
658 gdprintk(XENLOG_ERR,
659 "Failed to allocate memory in hvm_bringup_ap.\n");
660 return -ENOMEM;
661 }
663 hvm_init_ap_context(ctxt, vcpuid, trampoline_vector);
665 /* Sync AP's TSC with BSP's. */
666 v->arch.hvm_vcpu.cache_tsc_offset =
667 v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
668 hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
670 LOCK_BIGLOCK(d);
671 rc = -EEXIST;
672 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
673 rc = boot_vcpu(d, vcpuid, ctxt);
674 UNLOCK_BIGLOCK(d);
676 if ( rc != 0 )
677 {
678 gdprintk(XENLOG_ERR,
679 "AP %d bringup failed in boot_vcpu %x.\n", vcpuid, rc);
680 goto out;
681 }
683 if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
684 vcpu_wake(v);
685 gdprintk(XENLOG_INFO, "AP %d bringup suceeded.\n", vcpuid);
687 out:
688 xfree(ctxt);
689 return rc;
690 }
692 static int hvmop_set_pci_intx_level(
693 XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t) uop)
694 {
695 struct xen_hvm_set_pci_intx_level op;
696 struct domain *d;
697 int rc;
699 if ( copy_from_guest(&op, uop, 1) )
700 return -EFAULT;
702 if ( !IS_PRIV(current->domain) )
703 return -EPERM;
705 if ( (op.domain > 0) || (op.bus > 0) || (op.device > 31) || (op.intx > 3) )
706 return -EINVAL;
708 d = get_domain_by_id(op.domid);
709 if ( d == NULL )
710 return -ESRCH;
712 rc = -EINVAL;
713 if ( !is_hvm_domain(d) )
714 goto out;
716 rc = 0;
717 switch ( op.level )
718 {
719 case 0:
720 hvm_pci_intx_deassert(d, op.device, op.intx);
721 break;
722 case 1:
723 hvm_pci_intx_assert(d, op.device, op.intx);
724 break;
725 default:
726 rc = -EINVAL;
727 break;
728 }
730 out:
731 put_domain(d);
732 return rc;
733 }
735 static int hvmop_set_isa_irq_level(
736 XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t) uop)
737 {
738 struct xen_hvm_set_isa_irq_level op;
739 struct domain *d;
740 int rc;
742 if ( copy_from_guest(&op, uop, 1) )
743 return -EFAULT;
745 if ( !IS_PRIV(current->domain) )
746 return -EPERM;
748 if ( op.isa_irq > 15 )
749 return -EINVAL;
751 d = get_domain_by_id(op.domid);
752 if ( d == NULL )
753 return -ESRCH;
755 rc = -EINVAL;
756 if ( !is_hvm_domain(d) )
757 goto out;
759 rc = 0;
760 switch ( op.level )
761 {
762 case 0:
763 hvm_isa_irq_deassert(d, op.isa_irq);
764 break;
765 case 1:
766 hvm_isa_irq_assert(d, op.isa_irq);
767 break;
768 default:
769 rc = -EINVAL;
770 break;
771 }
773 out:
774 put_domain(d);
775 return rc;
776 }
778 static int hvmop_set_pci_link_route(
779 XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t) uop)
780 {
781 struct xen_hvm_set_pci_link_route op;
782 struct domain *d;
783 int rc;
785 if ( copy_from_guest(&op, uop, 1) )
786 return -EFAULT;
788 if ( !IS_PRIV(current->domain) )
789 return -EPERM;
791 if ( (op.link > 3) || (op.isa_irq > 15) )
792 return -EINVAL;
794 d = get_domain_by_id(op.domid);
795 if ( d == NULL )
796 return -ESRCH;
798 rc = -EINVAL;
799 if ( !is_hvm_domain(d) )
800 goto out;
802 rc = 0;
803 hvm_set_pci_link_route(d, op.link, op.isa_irq);
805 out:
806 put_domain(d);
807 return rc;
808 }
810 long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
812 {
813 long rc = 0;
815 switch ( op )
816 {
817 case HVMOP_set_param:
818 case HVMOP_get_param:
819 {
820 struct xen_hvm_param a;
821 struct domain *d;
822 struct vcpu *v;
823 unsigned long mfn;
824 void *p;
826 if ( copy_from_guest(&a, arg, 1) )
827 return -EFAULT;
829 if ( a.index >= HVM_NR_PARAMS )
830 return -EINVAL;
832 if ( a.domid == DOMID_SELF )
833 {
834 get_knownalive_domain(current->domain);
835 d = current->domain;
836 }
837 else if ( IS_PRIV(current->domain) )
838 {
839 d = get_domain_by_id(a.domid);
840 if ( d == NULL )
841 return -ESRCH;
842 }
843 else
844 {
845 return -EPERM;
846 }
848 rc = -EINVAL;
849 if ( !is_hvm_domain(d) )
850 goto param_fail;
852 if ( op == HVMOP_set_param )
853 {
854 switch ( a.index )
855 {
856 case HVM_PARAM_IOREQ_PFN:
857 if ( d->arch.hvm_domain.shared_page_va )
858 goto param_fail;
859 mfn = gmfn_to_mfn(d, a.value);
860 if ( mfn == INVALID_MFN )
861 goto param_fail;
862 p = map_domain_page_global(mfn);
863 if ( p == NULL )
864 goto param_fail;
865 d->arch.hvm_domain.shared_page_va = (unsigned long)p;
866 /* Initialise evtchn port info if VCPUs already created. */
867 for_each_vcpu ( d, v )
868 get_vio(d, v->vcpu_id)->vp_eport =
869 v->arch.hvm_vcpu.xen_port;
870 break;
871 case HVM_PARAM_BUFIOREQ_PFN:
872 if ( d->arch.hvm_domain.buffered_io_va )
873 goto param_fail;
874 mfn = gmfn_to_mfn(d, a.value);
875 if ( mfn == INVALID_MFN )
876 goto param_fail;
877 p = map_domain_page_global(mfn);
878 if ( p == NULL )
879 goto param_fail;
880 d->arch.hvm_domain.buffered_io_va = (unsigned long)p;
881 break;
882 case HVM_PARAM_CALLBACK_IRQ:
883 hvm_set_callback_via(d, a.value);
884 break;
885 }
886 d->arch.hvm_domain.params[a.index] = a.value;
887 rc = 0;
888 }
889 else
890 {
891 a.value = d->arch.hvm_domain.params[a.index];
892 rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
893 }
895 param_fail:
896 put_domain(d);
897 break;
898 }
900 case HVMOP_set_pci_intx_level:
901 rc = hvmop_set_pci_intx_level(
902 guest_handle_cast(arg, xen_hvm_set_pci_intx_level_t));
903 break;
905 case HVMOP_set_isa_irq_level:
906 rc = hvmop_set_isa_irq_level(
907 guest_handle_cast(arg, xen_hvm_set_isa_irq_level_t));
908 break;
910 case HVMOP_set_pci_link_route:
911 rc = hvmop_set_pci_link_route(
912 guest_handle_cast(arg, xen_hvm_set_pci_link_route_t));
913 break;
915 default:
916 {
917 gdprintk(XENLOG_WARNING, "Bad HVM op %ld.\n", op);
918 rc = -ENOSYS;
919 break;
920 }
921 }
923 return rc;
924 }
926 /*
927 * Local variables:
928 * mode: C
929 * c-set-style: "BSD"
930 * c-basic-offset: 4
931 * tab-width: 4
932 * indent-tabs-mode: nil
933 * End:
934 */