ia64/xen-unstable

view xen/arch/x86/hvm/hvm.c @ 14196:9d36026b1b43

xen: Cleanups and bug fixes after the rcu_lock_domain patch.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Mar 01 11:38:55 2007 +0000 (2007-03-01)
parents 09a9b6d6c356
children a7f6392ea850
line source
1 /*
2 * hvm.c: Common hardware virtual machine abstractions.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/init.h>
23 #include <xen/lib.h>
24 #include <xen/trace.h>
25 #include <xen/sched.h>
26 #include <xen/irq.h>
27 #include <xen/softirq.h>
28 #include <xen/domain.h>
29 #include <xen/domain_page.h>
30 #include <xen/hypercall.h>
31 #include <xen/guest_access.h>
32 #include <xen/event.h>
33 #include <asm/current.h>
34 #include <asm/e820.h>
35 #include <asm/io.h>
36 #include <asm/paging.h>
37 #include <asm/regs.h>
38 #include <asm/cpufeature.h>
39 #include <asm/processor.h>
40 #include <asm/types.h>
41 #include <asm/msr.h>
42 #include <asm/mc146818rtc.h>
43 #include <asm/spinlock.h>
44 #include <asm/hvm/hvm.h>
45 #include <asm/hvm/vpt.h>
46 #include <asm/hvm/support.h>
47 #include <public/sched.h>
48 #include <public/hvm/ioreq.h>
49 #include <public/version.h>
50 #include <public/memory.h>
52 int hvm_enabled __read_mostly;
54 unsigned int opt_hvm_debug_level __read_mostly;
55 integer_param("hvm_debug", opt_hvm_debug_level);
57 struct hvm_function_table hvm_funcs __read_mostly;
59 /* I/O permission bitmap is globally shared by all HVM guests. */
60 char __attribute__ ((__section__ (".bss.page_aligned")))
61 hvm_io_bitmap[3*PAGE_SIZE];
63 void hvm_enable(struct hvm_function_table *fns)
64 {
65 if ( hvm_enabled )
66 return;
68 /*
69 * Allow direct access to the PC debug port (it is often used for I/O
70 * delays, but the vmexits simply slow things down).
71 */
72 memset(hvm_io_bitmap, ~0, sizeof(hvm_io_bitmap));
73 clear_bit(0x80, hvm_io_bitmap);
75 hvm_funcs = *fns;
76 hvm_enabled = 1;
77 }
79 void hvm_disable(void)
80 {
81 if ( hvm_enabled )
82 hvm_funcs.disable();
83 }
85 void hvm_stts(struct vcpu *v)
86 {
87 /* FPU state already dirty? Then no need to setup_fpu() lazily. */
88 if ( !test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
89 hvm_funcs.stts(v);
90 }
92 void hvm_set_guest_time(struct vcpu *v, u64 gtime)
93 {
94 u64 host_tsc;
96 rdtscll(host_tsc);
98 v->arch.hvm_vcpu.cache_tsc_offset = gtime - host_tsc;
99 hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
100 }
102 u64 hvm_get_guest_time(struct vcpu *v)
103 {
104 u64 host_tsc;
106 rdtscll(host_tsc);
107 return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
108 }
110 void hvm_migrate_timers(struct vcpu *v)
111 {
112 pit_migrate_timers(v);
113 rtc_migrate_timers(v);
114 hpet_migrate_timers(v);
115 if ( vcpu_vlapic(v)->pt.enabled )
116 migrate_timer(&vcpu_vlapic(v)->pt.timer, v->processor);
117 }
119 void hvm_do_resume(struct vcpu *v)
120 {
121 ioreq_t *p;
123 hvm_stts(v);
125 pt_thaw_time(v);
127 /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
128 p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
129 while ( p->state != STATE_IOREQ_NONE )
130 {
131 switch ( p->state )
132 {
133 case STATE_IORESP_READY: /* IORESP_READY -> NONE */
134 hvm_io_assist(v);
135 break;
136 case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
137 case STATE_IOREQ_INPROCESS:
138 wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port,
139 (p->state != STATE_IOREQ_READY) &&
140 (p->state != STATE_IOREQ_INPROCESS));
141 break;
142 default:
143 gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state);
144 domain_crash_synchronous();
145 }
146 }
147 }
149 int hvm_domain_initialise(struct domain *d)
150 {
151 int rc;
153 if ( !hvm_enabled )
154 {
155 gdprintk(XENLOG_WARNING, "Attempt to create a HVM guest "
156 "on a non-VT/AMDV platform.\n");
157 return -EINVAL;
158 }
160 spin_lock_init(&d->arch.hvm_domain.pbuf_lock);
161 spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
162 spin_lock_init(&d->arch.hvm_domain.irq_lock);
164 rc = paging_enable(d, PG_SH_enable|PG_refcounts|PG_translate|PG_external);
165 if ( rc != 0 )
166 return rc;
168 vpic_init(d);
169 vioapic_init(d);
171 return 0;
172 }
174 void hvm_domain_destroy(struct domain *d)
175 {
176 pit_deinit(d);
177 rtc_deinit(d);
178 hpet_deinit(d);
180 if ( d->arch.hvm_domain.shared_page_va )
181 unmap_domain_page_global(
182 (void *)d->arch.hvm_domain.shared_page_va);
184 if ( d->arch.hvm_domain.buffered_io_va )
185 unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
186 }
188 static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
189 {
190 struct vcpu *v;
191 struct hvm_hw_cpu ctxt;
193 for_each_vcpu(d, v)
194 {
195 /* We don't need to save state for a vcpu that is down; the restore
196 * code will leave it down if there is nothing saved. */
197 if ( test_bit(_VCPUF_down, &v->vcpu_flags) )
198 continue;
200 hvm_funcs.save_cpu_ctxt(v, &ctxt);
201 if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 )
202 return 1;
203 }
204 return 0;
205 }
207 static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
208 {
209 int vcpuid;
210 struct vcpu *v;
211 struct hvm_hw_cpu ctxt;
213 /* Which vcpu is this? */
214 vcpuid = hvm_load_instance(h);
215 if ( vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
216 {
217 gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
218 return -EINVAL;
219 }
221 if ( hvm_load_entry(CPU, h, &ctxt) != 0 )
222 return -EINVAL;
224 if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 )
225 return -EINVAL;
227 /* Auxiliary processors should be woken immediately. */
228 if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
229 vcpu_wake(v);
231 return 0;
232 }
234 HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt,
235 1, HVMSR_PER_VCPU);
237 int hvm_vcpu_initialise(struct vcpu *v)
238 {
239 int rc;
241 if ( (rc = vlapic_init(v)) != 0 )
242 return rc;
244 if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 )
245 {
246 vlapic_destroy(v);
247 return rc;
248 }
250 /* Create ioreq event channel. */
251 v->arch.hvm_vcpu.xen_port = alloc_unbound_xen_event_channel(v, 0);
252 if ( get_sp(v->domain) && get_vio(v->domain, v->vcpu_id) )
253 get_vio(v->domain, v->vcpu_id)->vp_eport =
254 v->arch.hvm_vcpu.xen_port;
256 INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
258 if ( v->vcpu_id != 0 )
259 return 0;
261 pit_init(v, cpu_khz);
262 rtc_init(v, RTC_PORT(0));
263 pmtimer_init(v, ACPI_PM_TMR_BLK_ADDRESS);
264 hpet_init(v);
266 /* Init guest TSC to start from zero. */
267 hvm_set_guest_time(v, 0);
269 return 0;
270 }
272 void hvm_vcpu_destroy(struct vcpu *v)
273 {
274 vlapic_destroy(v);
275 hvm_funcs.vcpu_destroy(v);
277 /* Event channel is already freed by evtchn_destroy(). */
278 /*free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);*/
279 }
282 void hvm_vcpu_reset(struct vcpu *v)
283 {
284 vcpu_pause(v);
286 vlapic_reset(vcpu_vlapic(v));
288 hvm_funcs.vcpu_initialise(v);
290 set_bit(_VCPUF_down, &v->vcpu_flags);
291 clear_bit(_VCPUF_initialised, &v->vcpu_flags);
292 clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
293 clear_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags);
294 clear_bit(_VCPUF_blocked, &v->vcpu_flags);
296 vcpu_unpause(v);
297 }
299 static void hvm_vcpu_down(void)
300 {
301 struct vcpu *v = current;
302 struct domain *d = v->domain;
303 int online_count = 0;
305 gdprintk(XENLOG_INFO, "DOM%d/VCPU%d: going offline.\n",
306 d->domain_id, v->vcpu_id);
308 /* Doesn't halt us immediately, but we'll never return to guest context. */
309 set_bit(_VCPUF_down, &v->vcpu_flags);
310 vcpu_sleep_nosync(v);
312 /* Any other VCPUs online? ... */
313 LOCK_BIGLOCK(d);
314 for_each_vcpu ( d, v )
315 if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
316 online_count++;
317 UNLOCK_BIGLOCK(d);
319 /* ... Shut down the domain if not. */
320 if ( online_count == 0 )
321 {
322 gdprintk(XENLOG_INFO, "DOM%d: all CPUs offline -- powering off.\n",
323 d->domain_id);
324 domain_shutdown(d, SHUTDOWN_poweroff);
325 }
326 }
328 void hvm_send_assist_req(struct vcpu *v)
329 {
330 ioreq_t *p;
332 p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
333 if ( unlikely(p->state != STATE_IOREQ_NONE) )
334 {
335 /* This indicates a bug in the device model. Crash the domain. */
336 gdprintk(XENLOG_ERR, "Device model set bad IO state %d.\n", p->state);
337 domain_crash_synchronous();
338 }
340 prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
342 /*
343 * Following happens /after/ blocking and setting up ioreq contents.
344 * prepare_wait_on_xen_event_channel() is an implicit barrier.
345 */
346 p->state = STATE_IOREQ_READY;
347 notify_via_xen_event_channel(v->arch.hvm_vcpu.xen_port);
348 }
350 void hvm_hlt(unsigned long rflags)
351 {
352 /*
353 * If we halt with interrupts disabled, that's a pretty sure sign that we
354 * want to shut down. In a real processor, NMIs are the only way to break
355 * out of this.
356 */
357 if ( unlikely(!(rflags & X86_EFLAGS_IF)) )
358 return hvm_vcpu_down();
360 do_sched_op_compat(SCHEDOP_block, 0);
361 }
363 void hvm_triple_fault(void)
364 {
365 struct vcpu *v = current;
366 gdprintk(XENLOG_INFO, "Triple fault on VCPU%d - "
367 "invoking HVM system reset.\n", v->vcpu_id);
368 domain_shutdown(v->domain, SHUTDOWN_reboot);
369 }
371 /*
372 * __hvm_copy():
373 * @buf = hypervisor buffer
374 * @addr = guest address to copy to/from
375 * @size = number of bytes to copy
376 * @dir = copy *to* guest (TRUE) or *from* guest (FALSE)?
377 * @virt = addr is *virtual* (TRUE) or *guest physical* (FALSE)?
378 * Returns number of bytes failed to copy (0 == complete success).
379 */
380 static int __hvm_copy(void *buf, paddr_t addr, int size, int dir, int virt)
381 {
382 unsigned long mfn;
383 char *p;
384 int count, todo;
386 todo = size;
387 while ( todo > 0 )
388 {
389 count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo);
391 if ( virt )
392 mfn = get_mfn_from_gpfn(paging_gva_to_gfn(current, addr));
393 else
394 mfn = get_mfn_from_gpfn(addr >> PAGE_SHIFT);
396 if ( mfn == INVALID_MFN )
397 return todo;
399 p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK);
401 if ( dir )
402 memcpy(p, buf, count); /* dir == TRUE: *to* guest */
403 else
404 memcpy(buf, p, count); /* dir == FALSE: *from guest */
406 unmap_domain_page(p);
408 mark_dirty(current->domain, mfn);
410 addr += count;
411 buf += count;
412 todo -= count;
413 }
415 return 0;
416 }
418 int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size)
419 {
420 return __hvm_copy(buf, paddr, size, 1, 0);
421 }
423 int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size)
424 {
425 return __hvm_copy(buf, paddr, size, 0, 0);
426 }
428 int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size)
429 {
430 return __hvm_copy(buf, vaddr, size, 1, 1);
431 }
433 int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size)
434 {
435 return __hvm_copy(buf, vaddr, size, 0, 1);
436 }
439 /* HVM specific printbuf. Mostly used for hvmloader chit-chat. */
440 void hvm_print_line(struct vcpu *v, const char c)
441 {
442 struct hvm_domain *hd = &v->domain->arch.hvm_domain;
444 spin_lock(&hd->pbuf_lock);
445 hd->pbuf[hd->pbuf_idx++] = c;
446 if ( (hd->pbuf_idx == (sizeof(hd->pbuf) - 2)) || (c == '\n') )
447 {
448 if ( c != '\n' )
449 hd->pbuf[hd->pbuf_idx++] = '\n';
450 hd->pbuf[hd->pbuf_idx] = '\0';
451 printk(XENLOG_G_DEBUG "HVM%u: %s", v->domain->domain_id, hd->pbuf);
452 hd->pbuf_idx = 0;
453 }
454 spin_unlock(&hd->pbuf_lock);
455 }
457 void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
458 unsigned int *ecx, unsigned int *edx)
459 {
460 if ( !cpuid_hypervisor_leaves(input, eax, ebx, ecx, edx) )
461 {
462 cpuid(input, eax, ebx, ecx, edx);
464 if ( input == 0x00000001 )
465 {
466 struct vcpu *v = current;
468 clear_bit(X86_FEATURE_MWAIT & 31, ecx);
470 if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
471 clear_bit(X86_FEATURE_APIC & 31, edx);
473 #if CONFIG_PAGING_LEVELS >= 3
474 if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )
475 #endif
476 clear_bit(X86_FEATURE_PAE & 31, edx);
477 clear_bit(X86_FEATURE_PSE36 & 31, edx);
478 }
479 else if ( input == 0x80000001 )
480 {
481 #if CONFIG_PAGING_LEVELS >= 3
482 struct vcpu *v = current;
483 if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )
484 #endif
485 clear_bit(X86_FEATURE_NX & 31, edx);
486 #ifdef __i386__
487 /* Mask feature for Intel ia32e or AMD long mode. */
488 clear_bit(X86_FEATURE_LAHF_LM & 31, ecx);
490 clear_bit(X86_FEATURE_LM & 31, edx);
491 clear_bit(X86_FEATURE_SYSCALL & 31, edx);
492 #endif
493 }
494 }
495 }
497 typedef unsigned long hvm_hypercall_t(
498 unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
500 #define HYPERCALL(x) \
501 [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x
502 #define HYPERCALL_COMPAT32(x) \
503 [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x ## _compat32
505 #if defined(__i386__)
507 static hvm_hypercall_t *hvm_hypercall_table[NR_hypercalls] = {
508 HYPERCALL(memory_op),
509 HYPERCALL(multicall),
510 HYPERCALL(xen_version),
511 HYPERCALL(event_channel_op),
512 HYPERCALL(hvm_op)
513 };
515 void hvm_do_hypercall(struct cpu_user_regs *pregs)
516 {
517 if ( unlikely(ring_3(pregs)) )
518 {
519 pregs->eax = -EPERM;
520 return;
521 }
523 if ( (pregs->eax >= NR_hypercalls) || !hvm_hypercall_table[pregs->eax] )
524 {
525 gdprintk(XENLOG_WARNING, "HVM vcpu %d:%d did a bad hypercall %d.\n",
526 current->domain->domain_id, current->vcpu_id,
527 pregs->eax);
528 pregs->eax = -ENOSYS;
529 return;
530 }
532 pregs->eax = hvm_hypercall_table[pregs->eax](
533 pregs->ebx, pregs->ecx, pregs->edx, pregs->esi, pregs->edi);
534 }
536 #else /* defined(__x86_64__) */
538 static long do_memory_op_compat32(int cmd, XEN_GUEST_HANDLE(void) arg)
539 {
540 extern long do_add_to_physmap(struct xen_add_to_physmap *xatp);
541 long rc;
543 switch ( cmd )
544 {
545 case XENMEM_add_to_physmap:
546 {
547 struct {
548 domid_t domid;
549 uint32_t space;
550 uint32_t idx;
551 uint32_t gpfn;
552 } u;
553 struct xen_add_to_physmap h;
555 if ( copy_from_guest(&u, arg, 1) )
556 return -EFAULT;
558 h.domid = u.domid;
559 h.space = u.space;
560 h.idx = u.idx;
561 h.gpfn = u.gpfn;
563 this_cpu(guest_handles_in_xen_space) = 1;
564 rc = do_memory_op(cmd, guest_handle_from_ptr(&h, void));
565 this_cpu(guest_handles_in_xen_space) = 0;
567 break;
568 }
570 default:
571 gdprintk(XENLOG_WARNING, "memory_op %d.\n", cmd);
572 rc = -ENOSYS;
573 break;
574 }
576 return rc;
577 }
579 static hvm_hypercall_t *hvm_hypercall64_table[NR_hypercalls] = {
580 HYPERCALL(memory_op),
581 HYPERCALL(xen_version),
582 HYPERCALL(hvm_op),
583 HYPERCALL(event_channel_op)
584 };
586 static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
587 HYPERCALL_COMPAT32(memory_op),
588 HYPERCALL(xen_version),
589 HYPERCALL(hvm_op),
590 HYPERCALL(event_channel_op)
591 };
593 void hvm_do_hypercall(struct cpu_user_regs *pregs)
594 {
595 if ( unlikely(ring_3(pregs)) )
596 {
597 pregs->rax = -EPERM;
598 return;
599 }
601 pregs->rax = (uint32_t)pregs->eax; /* mask in case compat32 caller */
602 if ( (pregs->rax >= NR_hypercalls) || !hvm_hypercall64_table[pregs->rax] )
603 {
604 gdprintk(XENLOG_WARNING, "HVM vcpu %d:%d did a bad hypercall %ld.\n",
605 current->domain->domain_id, current->vcpu_id,
606 pregs->rax);
607 pregs->rax = -ENOSYS;
608 return;
609 }
611 if ( current->arch.paging.mode->guest_levels == 4 )
612 {
613 pregs->rax = hvm_hypercall64_table[pregs->rax](pregs->rdi,
614 pregs->rsi,
615 pregs->rdx,
616 pregs->r10,
617 pregs->r8);
618 }
619 else
620 {
621 pregs->eax = hvm_hypercall32_table[pregs->eax]((uint32_t)pregs->ebx,
622 (uint32_t)pregs->ecx,
623 (uint32_t)pregs->edx,
624 (uint32_t)pregs->esi,
625 (uint32_t)pregs->edi);
626 }
627 }
629 #endif /* defined(__x86_64__) */
631 void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3)
632 {
633 v->arch.hvm_vcpu.hw_cr3 = guest_cr3;
634 hvm_funcs.update_guest_cr3(v);
635 }
637 /* Initialise a hypercall transfer page for a VMX domain using
638 paravirtualised drivers. */
639 void hvm_hypercall_page_initialise(struct domain *d,
640 void *hypercall_page)
641 {
642 hvm_funcs.init_hypercall_page(d, hypercall_page);
643 }
646 /*
647 * only called in HVM domain BSP context
648 * when booting, vcpuid is always equal to apic_id
649 */
650 int hvm_bringup_ap(int vcpuid, int trampoline_vector)
651 {
652 struct vcpu *v;
653 struct domain *d = current->domain;
654 struct vcpu_guest_context *ctxt;
655 int rc = 0;
657 BUG_ON(!is_hvm_domain(d));
659 if ( (v = d->vcpu[vcpuid]) == NULL )
660 return -ENOENT;
662 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
663 {
664 gdprintk(XENLOG_ERR,
665 "Failed to allocate memory in hvm_bringup_ap.\n");
666 return -ENOMEM;
667 }
669 hvm_init_ap_context(ctxt, vcpuid, trampoline_vector);
671 /* Sync AP's TSC with BSP's. */
672 v->arch.hvm_vcpu.cache_tsc_offset =
673 v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
674 hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
676 LOCK_BIGLOCK(d);
677 rc = -EEXIST;
678 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
679 rc = boot_vcpu(d, vcpuid, ctxt);
680 UNLOCK_BIGLOCK(d);
682 if ( rc != 0 )
683 {
684 gdprintk(XENLOG_ERR,
685 "AP %d bringup failed in boot_vcpu %x.\n", vcpuid, rc);
686 goto out;
687 }
689 if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
690 vcpu_wake(v);
691 gdprintk(XENLOG_INFO, "AP %d bringup suceeded.\n", vcpuid);
693 out:
694 xfree(ctxt);
695 return rc;
696 }
698 static int hvmop_set_pci_intx_level(
699 XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t) uop)
700 {
701 struct xen_hvm_set_pci_intx_level op;
702 struct domain *d;
703 int rc;
705 if ( copy_from_guest(&op, uop, 1) )
706 return -EFAULT;
708 if ( !IS_PRIV(current->domain) )
709 return -EPERM;
711 if ( (op.domain > 0) || (op.bus > 0) || (op.device > 31) || (op.intx > 3) )
712 return -EINVAL;
714 d = rcu_lock_domain_by_id(op.domid);
715 if ( d == NULL )
716 return -ESRCH;
718 rc = -EINVAL;
719 if ( !is_hvm_domain(d) )
720 goto out;
722 rc = 0;
723 switch ( op.level )
724 {
725 case 0:
726 hvm_pci_intx_deassert(d, op.device, op.intx);
727 break;
728 case 1:
729 hvm_pci_intx_assert(d, op.device, op.intx);
730 break;
731 default:
732 rc = -EINVAL;
733 break;
734 }
736 out:
737 rcu_unlock_domain(d);
738 return rc;
739 }
741 static int hvmop_set_isa_irq_level(
742 XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t) uop)
743 {
744 struct xen_hvm_set_isa_irq_level op;
745 struct domain *d;
746 int rc;
748 if ( copy_from_guest(&op, uop, 1) )
749 return -EFAULT;
751 if ( !IS_PRIV(current->domain) )
752 return -EPERM;
754 if ( op.isa_irq > 15 )
755 return -EINVAL;
757 d = rcu_lock_domain_by_id(op.domid);
758 if ( d == NULL )
759 return -ESRCH;
761 rc = -EINVAL;
762 if ( !is_hvm_domain(d) )
763 goto out;
765 rc = 0;
766 switch ( op.level )
767 {
768 case 0:
769 hvm_isa_irq_deassert(d, op.isa_irq);
770 break;
771 case 1:
772 hvm_isa_irq_assert(d, op.isa_irq);
773 break;
774 default:
775 rc = -EINVAL;
776 break;
777 }
779 out:
780 rcu_unlock_domain(d);
781 return rc;
782 }
784 static int hvmop_set_pci_link_route(
785 XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t) uop)
786 {
787 struct xen_hvm_set_pci_link_route op;
788 struct domain *d;
789 int rc;
791 if ( copy_from_guest(&op, uop, 1) )
792 return -EFAULT;
794 if ( !IS_PRIV(current->domain) )
795 return -EPERM;
797 if ( (op.link > 3) || (op.isa_irq > 15) )
798 return -EINVAL;
800 d = rcu_lock_domain_by_id(op.domid);
801 if ( d == NULL )
802 return -ESRCH;
804 rc = -EINVAL;
805 if ( !is_hvm_domain(d) )
806 goto out;
808 rc = 0;
809 hvm_set_pci_link_route(d, op.link, op.isa_irq);
811 out:
812 rcu_unlock_domain(d);
813 return rc;
814 }
816 long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
818 {
819 long rc = 0;
821 switch ( op )
822 {
823 case HVMOP_set_param:
824 case HVMOP_get_param:
825 {
826 struct xen_hvm_param a;
827 struct domain *d;
828 struct vcpu *v;
829 unsigned long mfn;
830 void *p;
832 if ( copy_from_guest(&a, arg, 1) )
833 return -EFAULT;
835 if ( a.index >= HVM_NR_PARAMS )
836 return -EINVAL;
838 if ( a.domid == DOMID_SELF )
839 d = rcu_lock_current_domain();
840 else if ( IS_PRIV(current->domain) )
841 d = rcu_lock_domain_by_id(a.domid);
842 else
843 return -EPERM;
845 if ( d == NULL )
846 return -ESRCH;
848 rc = -EINVAL;
849 if ( !is_hvm_domain(d) )
850 goto param_fail;
852 if ( op == HVMOP_set_param )
853 {
854 switch ( a.index )
855 {
856 case HVM_PARAM_IOREQ_PFN:
857 if ( d->arch.hvm_domain.shared_page_va )
858 goto param_fail;
859 mfn = gmfn_to_mfn(d, a.value);
860 if ( mfn == INVALID_MFN )
861 goto param_fail;
862 p = map_domain_page_global(mfn);
863 if ( p == NULL )
864 goto param_fail;
865 d->arch.hvm_domain.shared_page_va = (unsigned long)p;
866 /* Initialise evtchn port info if VCPUs already created. */
867 for_each_vcpu ( d, v )
868 get_vio(d, v->vcpu_id)->vp_eport =
869 v->arch.hvm_vcpu.xen_port;
870 break;
871 case HVM_PARAM_BUFIOREQ_PFN:
872 if ( d->arch.hvm_domain.buffered_io_va )
873 goto param_fail;
874 mfn = gmfn_to_mfn(d, a.value);
875 if ( mfn == INVALID_MFN )
876 goto param_fail;
877 p = map_domain_page_global(mfn);
878 if ( p == NULL )
879 goto param_fail;
880 d->arch.hvm_domain.buffered_io_va = (unsigned long)p;
881 break;
882 case HVM_PARAM_CALLBACK_IRQ:
883 hvm_set_callback_via(d, a.value);
884 break;
885 }
886 d->arch.hvm_domain.params[a.index] = a.value;
887 rc = 0;
888 }
889 else
890 {
891 a.value = d->arch.hvm_domain.params[a.index];
892 rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
893 }
895 param_fail:
896 rcu_unlock_domain(d);
897 break;
898 }
900 case HVMOP_set_pci_intx_level:
901 rc = hvmop_set_pci_intx_level(
902 guest_handle_cast(arg, xen_hvm_set_pci_intx_level_t));
903 break;
905 case HVMOP_set_isa_irq_level:
906 rc = hvmop_set_isa_irq_level(
907 guest_handle_cast(arg, xen_hvm_set_isa_irq_level_t));
908 break;
910 case HVMOP_set_pci_link_route:
911 rc = hvmop_set_pci_link_route(
912 guest_handle_cast(arg, xen_hvm_set_pci_link_route_t));
913 break;
915 default:
916 {
917 gdprintk(XENLOG_WARNING, "Bad HVM op %ld.\n", op);
918 rc = -ENOSYS;
919 break;
920 }
921 }
923 return rc;
924 }
926 /*
927 * Local variables:
928 * mode: C
929 * c-set-style: "BSD"
930 * c-basic-offset: 4
931 * tab-width: 4
932 * indent-tabs-mode: nil
933 * End:
934 */