ia64/xen-unstable

view xen/arch/x86/hvm/hvm.c @ 14330:c9dba7b35393

[HVM] Revert thinko from 14291
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Fri Mar 09 11:11:23 2007 +0000 (2007-03-09)
parents eedbddf55e51
children 3afefd64e392
line source
1 /*
2 * hvm.c: Common hardware virtual machine abstractions.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/init.h>
23 #include <xen/lib.h>
24 #include <xen/trace.h>
25 #include <xen/sched.h>
26 #include <xen/irq.h>
27 #include <xen/softirq.h>
28 #include <xen/domain.h>
29 #include <xen/domain_page.h>
30 #include <xen/hypercall.h>
31 #include <xen/guest_access.h>
32 #include <xen/event.h>
33 #include <asm/current.h>
34 #include <asm/e820.h>
35 #include <asm/io.h>
36 #include <asm/paging.h>
37 #include <asm/regs.h>
38 #include <asm/cpufeature.h>
39 #include <asm/processor.h>
40 #include <asm/types.h>
41 #include <asm/msr.h>
42 #include <asm/mc146818rtc.h>
43 #include <asm/spinlock.h>
44 #include <asm/hvm/hvm.h>
45 #include <asm/hvm/vpt.h>
46 #include <asm/hvm/support.h>
47 #include <public/sched.h>
48 #include <public/hvm/ioreq.h>
49 #include <public/version.h>
50 #include <public/memory.h>
52 int hvm_enabled __read_mostly;
54 unsigned int opt_hvm_debug_level __read_mostly;
55 integer_param("hvm_debug", opt_hvm_debug_level);
57 struct hvm_function_table hvm_funcs __read_mostly;
59 /* I/O permission bitmap is globally shared by all HVM guests. */
60 char __attribute__ ((__section__ (".bss.page_aligned")))
61 hvm_io_bitmap[3*PAGE_SIZE];
63 void hvm_enable(struct hvm_function_table *fns)
64 {
65 if ( hvm_enabled )
66 return;
68 /*
69 * Allow direct access to the PC debug port (it is often used for I/O
70 * delays, but the vmexits simply slow things down).
71 */
72 memset(hvm_io_bitmap, ~0, sizeof(hvm_io_bitmap));
73 clear_bit(0x80, hvm_io_bitmap);
75 hvm_funcs = *fns;
76 hvm_enabled = 1;
77 }
79 void hvm_disable(void)
80 {
81 if ( hvm_enabled )
82 hvm_funcs.disable();
83 }
85 void hvm_stts(struct vcpu *v)
86 {
87 /* FPU state already dirty? Then no need to setup_fpu() lazily. */
88 if ( !test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
89 hvm_funcs.stts(v);
90 }
92 void hvm_set_guest_time(struct vcpu *v, u64 gtime)
93 {
94 u64 host_tsc;
96 rdtscll(host_tsc);
98 v->arch.hvm_vcpu.cache_tsc_offset = gtime - host_tsc;
99 hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
100 }
102 u64 hvm_get_guest_time(struct vcpu *v)
103 {
104 u64 host_tsc;
106 rdtscll(host_tsc);
107 return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
108 }
110 void hvm_migrate_timers(struct vcpu *v)
111 {
112 pit_migrate_timers(v);
113 rtc_migrate_timers(v);
114 hpet_migrate_timers(v);
115 if ( vcpu_vlapic(v)->pt.enabled )
116 migrate_timer(&vcpu_vlapic(v)->pt.timer, v->processor);
117 }
119 void hvm_do_resume(struct vcpu *v)
120 {
121 ioreq_t *p;
123 hvm_stts(v);
125 pt_thaw_time(v);
127 /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
128 p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
129 while ( p->state != STATE_IOREQ_NONE )
130 {
131 switch ( p->state )
132 {
133 case STATE_IORESP_READY: /* IORESP_READY -> NONE */
134 hvm_io_assist(v);
135 break;
136 case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
137 case STATE_IOREQ_INPROCESS:
138 wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port,
139 (p->state != STATE_IOREQ_READY) &&
140 (p->state != STATE_IOREQ_INPROCESS));
141 break;
142 default:
143 gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state);
144 domain_crash_synchronous();
145 }
146 }
147 }
149 /* Called from the tools when saving a domain to make sure the io
150 * request-response ring is entirely empty. */
151 static int hvmop_drain_io(
152 XEN_GUEST_HANDLE(xen_hvm_drain_io_t) uop)
153 {
154 struct xen_hvm_drain_io op;
155 struct domain *d;
156 struct vcpu *v;
157 ioreq_t *p;
158 int rc;
160 if ( copy_from_guest(&op, uop, 1) )
161 return -EFAULT;
163 if ( !IS_PRIV(current->domain) )
164 return -EPERM;
166 d = rcu_lock_domain_by_id(op.domid);
167 if ( d == NULL )
168 return -ESRCH;
170 rc = -EINVAL;
171 /* Can't do this to yourself, or to a domain without an ioreq ring */
172 if ( d == current->domain || !is_hvm_domain(d) || get_sp(d) == NULL )
173 goto out;
175 rc = 0;
177 domain_pause(d); /* It's not safe to do this to running vcpus */
178 for_each_vcpu(d, v)
179 {
180 p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
181 if ( p->state == STATE_IORESP_READY )
182 hvm_io_assist(v);
183 }
184 domain_unpause(d);
186 out:
187 rcu_unlock_domain(d);
188 return rc;
189 }
191 int hvm_domain_initialise(struct domain *d)
192 {
193 int rc;
195 if ( !hvm_enabled )
196 {
197 gdprintk(XENLOG_WARNING, "Attempt to create a HVM guest "
198 "on a non-VT/AMDV platform.\n");
199 return -EINVAL;
200 }
202 spin_lock_init(&d->arch.hvm_domain.pbuf_lock);
203 spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
204 spin_lock_init(&d->arch.hvm_domain.irq_lock);
206 /* paging support will be determined inside paging.c */
207 rc = paging_enable(d, PG_refcounts|PG_translate|PG_external);
208 if ( rc != 0 )
209 return rc;
211 vpic_init(d);
212 vioapic_init(d);
214 return 0;
215 }
217 void hvm_domain_destroy(struct domain *d)
218 {
219 pit_deinit(d);
220 rtc_deinit(d);
221 hpet_deinit(d);
223 if ( d->arch.hvm_domain.shared_page_va )
224 unmap_domain_page_global(
225 (void *)d->arch.hvm_domain.shared_page_va);
227 if ( d->arch.hvm_domain.buffered_io_va )
228 unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
229 }
231 static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
232 {
233 struct vcpu *v;
234 struct hvm_hw_cpu ctxt;
236 for_each_vcpu(d, v)
237 {
238 /* We don't need to save state for a vcpu that is down; the restore
239 * code will leave it down if there is nothing saved. */
240 if ( test_bit(_VCPUF_down, &v->vcpu_flags) )
241 continue;
243 hvm_funcs.save_cpu_ctxt(v, &ctxt);
244 if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 )
245 return 1;
246 }
247 return 0;
248 }
250 static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
251 {
252 int vcpuid;
253 struct vcpu *v;
254 struct hvm_hw_cpu ctxt;
256 /* Which vcpu is this? */
257 vcpuid = hvm_load_instance(h);
258 if ( vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
259 {
260 gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
261 return -EINVAL;
262 }
264 if ( hvm_load_entry(CPU, h, &ctxt) != 0 )
265 return -EINVAL;
267 if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 )
268 return -EINVAL;
270 /* Auxiliary processors should be woken immediately. */
271 if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
272 vcpu_wake(v);
274 return 0;
275 }
277 HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt,
278 1, HVMSR_PER_VCPU);
280 int hvm_vcpu_initialise(struct vcpu *v)
281 {
282 int rc;
284 if ( (rc = vlapic_init(v)) != 0 )
285 return rc;
287 if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 )
288 {
289 vlapic_destroy(v);
290 return rc;
291 }
293 /* Create ioreq event channel. */
294 v->arch.hvm_vcpu.xen_port = alloc_unbound_xen_event_channel(v, 0);
295 if ( get_sp(v->domain) && get_vio(v->domain, v->vcpu_id) )
296 get_vio(v->domain, v->vcpu_id)->vp_eport =
297 v->arch.hvm_vcpu.xen_port;
299 INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
301 if ( v->vcpu_id != 0 )
302 return 0;
304 pit_init(v, cpu_khz);
305 rtc_init(v, RTC_PORT(0));
306 pmtimer_init(v, ACPI_PM_TMR_BLK_ADDRESS);
307 hpet_init(v);
309 /* Init guest TSC to start from zero. */
310 hvm_set_guest_time(v, 0);
312 return 0;
313 }
315 void hvm_vcpu_destroy(struct vcpu *v)
316 {
317 vlapic_destroy(v);
318 hvm_funcs.vcpu_destroy(v);
320 /* Event channel is already freed by evtchn_destroy(). */
321 /*free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);*/
322 }
325 void hvm_vcpu_reset(struct vcpu *v)
326 {
327 vcpu_pause(v);
329 vlapic_reset(vcpu_vlapic(v));
331 hvm_funcs.vcpu_initialise(v);
333 set_bit(_VCPUF_down, &v->vcpu_flags);
334 clear_bit(_VCPUF_initialised, &v->vcpu_flags);
335 clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
336 clear_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags);
337 clear_bit(_VCPUF_blocked, &v->vcpu_flags);
339 vcpu_unpause(v);
340 }
342 static void hvm_vcpu_down(void)
343 {
344 struct vcpu *v = current;
345 struct domain *d = v->domain;
346 int online_count = 0;
348 gdprintk(XENLOG_INFO, "DOM%d/VCPU%d: going offline.\n",
349 d->domain_id, v->vcpu_id);
351 /* Doesn't halt us immediately, but we'll never return to guest context. */
352 set_bit(_VCPUF_down, &v->vcpu_flags);
353 vcpu_sleep_nosync(v);
355 /* Any other VCPUs online? ... */
356 LOCK_BIGLOCK(d);
357 for_each_vcpu ( d, v )
358 if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
359 online_count++;
360 UNLOCK_BIGLOCK(d);
362 /* ... Shut down the domain if not. */
363 if ( online_count == 0 )
364 {
365 gdprintk(XENLOG_INFO, "DOM%d: all CPUs offline -- powering off.\n",
366 d->domain_id);
367 domain_shutdown(d, SHUTDOWN_poweroff);
368 }
369 }
371 void hvm_send_assist_req(struct vcpu *v)
372 {
373 ioreq_t *p;
375 p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
376 if ( unlikely(p->state != STATE_IOREQ_NONE) )
377 {
378 /* This indicates a bug in the device model. Crash the domain. */
379 gdprintk(XENLOG_ERR, "Device model set bad IO state %d.\n", p->state);
380 domain_crash_synchronous();
381 }
383 prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
385 /*
386 * Following happens /after/ blocking and setting up ioreq contents.
387 * prepare_wait_on_xen_event_channel() is an implicit barrier.
388 */
389 p->state = STATE_IOREQ_READY;
390 notify_via_xen_event_channel(v->arch.hvm_vcpu.xen_port);
391 }
393 void hvm_hlt(unsigned long rflags)
394 {
395 /*
396 * If we halt with interrupts disabled, that's a pretty sure sign that we
397 * want to shut down. In a real processor, NMIs are the only way to break
398 * out of this.
399 */
400 if ( unlikely(!(rflags & X86_EFLAGS_IF)) )
401 return hvm_vcpu_down();
403 do_sched_op_compat(SCHEDOP_block, 0);
404 }
406 void hvm_triple_fault(void)
407 {
408 struct vcpu *v = current;
409 gdprintk(XENLOG_INFO, "Triple fault on VCPU%d - "
410 "invoking HVM system reset.\n", v->vcpu_id);
411 domain_shutdown(v->domain, SHUTDOWN_reboot);
412 }
414 /*
415 * __hvm_copy():
416 * @buf = hypervisor buffer
417 * @addr = guest address to copy to/from
418 * @size = number of bytes to copy
419 * @dir = copy *to* guest (TRUE) or *from* guest (FALSE)?
420 * @virt = addr is *virtual* (TRUE) or *guest physical* (FALSE)?
421 * Returns number of bytes failed to copy (0 == complete success).
422 */
423 static int __hvm_copy(void *buf, paddr_t addr, int size, int dir, int virt)
424 {
425 unsigned long gfn, mfn;
426 char *p;
427 int count, todo;
429 todo = size;
430 while ( todo > 0 )
431 {
432 count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo);
434 if ( virt )
435 gfn = paging_gva_to_gfn(current, addr);
436 else
437 gfn = addr >> PAGE_SHIFT;
439 mfn = get_mfn_from_gpfn(gfn);
441 if ( mfn == INVALID_MFN )
442 return todo;
444 p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK);
446 if ( dir )
447 {
448 memcpy(p, buf, count); /* dir == TRUE: *to* guest */
449 mark_dirty(current->domain, mfn);
450 }
451 else
452 memcpy(buf, p, count); /* dir == FALSE: *from guest */
454 unmap_domain_page(p);
456 addr += count;
457 buf += count;
458 todo -= count;
459 }
461 return 0;
462 }
464 int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size)
465 {
466 return __hvm_copy(buf, paddr, size, 1, 0);
467 }
469 int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size)
470 {
471 return __hvm_copy(buf, paddr, size, 0, 0);
472 }
474 int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size)
475 {
476 return __hvm_copy(buf, vaddr, size, 1, 1);
477 }
479 int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size)
480 {
481 return __hvm_copy(buf, vaddr, size, 0, 1);
482 }
485 /* HVM specific printbuf. Mostly used for hvmloader chit-chat. */
486 void hvm_print_line(struct vcpu *v, const char c)
487 {
488 struct hvm_domain *hd = &v->domain->arch.hvm_domain;
490 spin_lock(&hd->pbuf_lock);
491 hd->pbuf[hd->pbuf_idx++] = c;
492 if ( (hd->pbuf_idx == (sizeof(hd->pbuf) - 2)) || (c == '\n') )
493 {
494 if ( c != '\n' )
495 hd->pbuf[hd->pbuf_idx++] = '\n';
496 hd->pbuf[hd->pbuf_idx] = '\0';
497 printk(XENLOG_G_DEBUG "HVM%u: %s", v->domain->domain_id, hd->pbuf);
498 hd->pbuf_idx = 0;
499 }
500 spin_unlock(&hd->pbuf_lock);
501 }
503 void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
504 unsigned int *ecx, unsigned int *edx)
505 {
506 if ( !cpuid_hypervisor_leaves(input, eax, ebx, ecx, edx) )
507 {
508 cpuid(input, eax, ebx, ecx, edx);
510 if ( input == 0x00000001 )
511 {
512 struct vcpu *v = current;
514 clear_bit(X86_FEATURE_MWAIT & 31, ecx);
516 if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
517 clear_bit(X86_FEATURE_APIC & 31, edx);
519 #if CONFIG_PAGING_LEVELS >= 3
520 if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )
521 #endif
522 clear_bit(X86_FEATURE_PAE & 31, edx);
523 clear_bit(X86_FEATURE_PSE36 & 31, edx);
524 }
525 else if ( input == 0x80000001 )
526 {
527 #if CONFIG_PAGING_LEVELS >= 3
528 struct vcpu *v = current;
529 if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )
530 #endif
531 clear_bit(X86_FEATURE_NX & 31, edx);
532 #ifdef __i386__
533 /* Mask feature for Intel ia32e or AMD long mode. */
534 clear_bit(X86_FEATURE_LAHF_LM & 31, ecx);
536 clear_bit(X86_FEATURE_LM & 31, edx);
537 clear_bit(X86_FEATURE_SYSCALL & 31, edx);
538 #endif
539 }
540 }
541 }
543 typedef unsigned long hvm_hypercall_t(
544 unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
546 #define HYPERCALL(x) \
547 [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x
548 #define HYPERCALL_COMPAT32(x) \
549 [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x ## _compat32
551 #if defined(__i386__)
553 static hvm_hypercall_t *hvm_hypercall_table[NR_hypercalls] = {
554 HYPERCALL(memory_op),
555 HYPERCALL(multicall),
556 HYPERCALL(xen_version),
557 HYPERCALL(event_channel_op),
558 HYPERCALL(hvm_op)
559 };
561 void hvm_do_hypercall(struct cpu_user_regs *pregs)
562 {
563 if ( unlikely(ring_3(pregs)) )
564 {
565 pregs->eax = -EPERM;
566 return;
567 }
569 if ( (pregs->eax >= NR_hypercalls) || !hvm_hypercall_table[pregs->eax] )
570 {
571 gdprintk(XENLOG_WARNING, "HVM vcpu %d:%d did a bad hypercall %d.\n",
572 current->domain->domain_id, current->vcpu_id,
573 pregs->eax);
574 pregs->eax = -ENOSYS;
575 return;
576 }
578 pregs->eax = hvm_hypercall_table[pregs->eax](
579 pregs->ebx, pregs->ecx, pregs->edx, pregs->esi, pregs->edi);
580 }
582 #else /* defined(__x86_64__) */
584 static long do_memory_op_compat32(int cmd, XEN_GUEST_HANDLE(void) arg)
585 {
586 extern long do_add_to_physmap(struct xen_add_to_physmap *xatp);
587 long rc;
589 switch ( cmd )
590 {
591 case XENMEM_add_to_physmap:
592 {
593 struct {
594 domid_t domid;
595 uint32_t space;
596 uint32_t idx;
597 uint32_t gpfn;
598 } u;
599 struct xen_add_to_physmap h;
601 if ( copy_from_guest(&u, arg, 1) )
602 return -EFAULT;
604 h.domid = u.domid;
605 h.space = u.space;
606 h.idx = u.idx;
607 h.gpfn = u.gpfn;
609 this_cpu(guest_handles_in_xen_space) = 1;
610 rc = do_memory_op(cmd, guest_handle_from_ptr(&h, void));
611 this_cpu(guest_handles_in_xen_space) = 0;
613 break;
614 }
616 default:
617 gdprintk(XENLOG_WARNING, "memory_op %d.\n", cmd);
618 rc = -ENOSYS;
619 break;
620 }
622 return rc;
623 }
625 static hvm_hypercall_t *hvm_hypercall64_table[NR_hypercalls] = {
626 HYPERCALL(memory_op),
627 HYPERCALL(xen_version),
628 HYPERCALL(hvm_op),
629 HYPERCALL(event_channel_op)
630 };
632 static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
633 HYPERCALL_COMPAT32(memory_op),
634 HYPERCALL(xen_version),
635 HYPERCALL(hvm_op),
636 HYPERCALL(event_channel_op)
637 };
639 void hvm_do_hypercall(struct cpu_user_regs *pregs)
640 {
641 if ( unlikely(ring_3(pregs)) )
642 {
643 pregs->rax = -EPERM;
644 return;
645 }
647 pregs->rax = (uint32_t)pregs->eax; /* mask in case compat32 caller */
648 if ( (pregs->rax >= NR_hypercalls) || !hvm_hypercall64_table[pregs->rax] )
649 {
650 gdprintk(XENLOG_WARNING, "HVM vcpu %d:%d did a bad hypercall %ld.\n",
651 current->domain->domain_id, current->vcpu_id,
652 pregs->rax);
653 pregs->rax = -ENOSYS;
654 return;
655 }
657 if ( current->arch.paging.mode->guest_levels == 4 )
658 {
659 pregs->rax = hvm_hypercall64_table[pregs->rax](pregs->rdi,
660 pregs->rsi,
661 pregs->rdx,
662 pregs->r10,
663 pregs->r8);
664 }
665 else
666 {
667 pregs->eax = hvm_hypercall32_table[pregs->eax]((uint32_t)pregs->ebx,
668 (uint32_t)pregs->ecx,
669 (uint32_t)pregs->edx,
670 (uint32_t)pregs->esi,
671 (uint32_t)pregs->edi);
672 }
673 }
675 #endif /* defined(__x86_64__) */
677 void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3)
678 {
679 v->arch.hvm_vcpu.hw_cr3 = guest_cr3;
680 hvm_funcs.update_guest_cr3(v);
681 }
683 /* Initialise a hypercall transfer page for a VMX domain using
684 paravirtualised drivers. */
685 void hvm_hypercall_page_initialise(struct domain *d,
686 void *hypercall_page)
687 {
688 hvm_funcs.init_hypercall_page(d, hypercall_page);
689 }
692 /*
693 * only called in HVM domain BSP context
694 * when booting, vcpuid is always equal to apic_id
695 */
696 int hvm_bringup_ap(int vcpuid, int trampoline_vector)
697 {
698 struct vcpu *v;
699 struct domain *d = current->domain;
700 struct vcpu_guest_context *ctxt;
701 int rc = 0;
703 BUG_ON(!is_hvm_domain(d));
705 if ( (v = d->vcpu[vcpuid]) == NULL )
706 return -ENOENT;
708 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
709 {
710 gdprintk(XENLOG_ERR,
711 "Failed to allocate memory in hvm_bringup_ap.\n");
712 return -ENOMEM;
713 }
715 hvm_init_ap_context(ctxt, vcpuid, trampoline_vector);
717 /* Sync AP's TSC with BSP's. */
718 v->arch.hvm_vcpu.cache_tsc_offset =
719 v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
720 hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
722 LOCK_BIGLOCK(d);
723 rc = -EEXIST;
724 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
725 rc = boot_vcpu(d, vcpuid, ctxt);
726 UNLOCK_BIGLOCK(d);
728 if ( rc != 0 )
729 {
730 gdprintk(XENLOG_ERR,
731 "AP %d bringup failed in boot_vcpu %x.\n", vcpuid, rc);
732 goto out;
733 }
735 if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
736 vcpu_wake(v);
737 gdprintk(XENLOG_INFO, "AP %d bringup suceeded.\n", vcpuid);
739 out:
740 xfree(ctxt);
741 return rc;
742 }
744 static int hvmop_set_pci_intx_level(
745 XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t) uop)
746 {
747 struct xen_hvm_set_pci_intx_level op;
748 struct domain *d;
749 int rc;
751 if ( copy_from_guest(&op, uop, 1) )
752 return -EFAULT;
754 if ( !IS_PRIV(current->domain) )
755 return -EPERM;
757 if ( (op.domain > 0) || (op.bus > 0) || (op.device > 31) || (op.intx > 3) )
758 return -EINVAL;
760 d = rcu_lock_domain_by_id(op.domid);
761 if ( d == NULL )
762 return -ESRCH;
764 rc = -EINVAL;
765 if ( !is_hvm_domain(d) )
766 goto out;
768 rc = 0;
769 switch ( op.level )
770 {
771 case 0:
772 hvm_pci_intx_deassert(d, op.device, op.intx);
773 break;
774 case 1:
775 hvm_pci_intx_assert(d, op.device, op.intx);
776 break;
777 default:
778 rc = -EINVAL;
779 break;
780 }
782 out:
783 rcu_unlock_domain(d);
784 return rc;
785 }
787 static int hvmop_set_isa_irq_level(
788 XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t) uop)
789 {
790 struct xen_hvm_set_isa_irq_level op;
791 struct domain *d;
792 int rc;
794 if ( copy_from_guest(&op, uop, 1) )
795 return -EFAULT;
797 if ( !IS_PRIV(current->domain) )
798 return -EPERM;
800 if ( op.isa_irq > 15 )
801 return -EINVAL;
803 d = rcu_lock_domain_by_id(op.domid);
804 if ( d == NULL )
805 return -ESRCH;
807 rc = -EINVAL;
808 if ( !is_hvm_domain(d) )
809 goto out;
811 rc = 0;
812 switch ( op.level )
813 {
814 case 0:
815 hvm_isa_irq_deassert(d, op.isa_irq);
816 break;
817 case 1:
818 hvm_isa_irq_assert(d, op.isa_irq);
819 break;
820 default:
821 rc = -EINVAL;
822 break;
823 }
825 out:
826 rcu_unlock_domain(d);
827 return rc;
828 }
830 static int hvmop_set_pci_link_route(
831 XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t) uop)
832 {
833 struct xen_hvm_set_pci_link_route op;
834 struct domain *d;
835 int rc;
837 if ( copy_from_guest(&op, uop, 1) )
838 return -EFAULT;
840 if ( !IS_PRIV(current->domain) )
841 return -EPERM;
843 if ( (op.link > 3) || (op.isa_irq > 15) )
844 return -EINVAL;
846 d = rcu_lock_domain_by_id(op.domid);
847 if ( d == NULL )
848 return -ESRCH;
850 rc = -EINVAL;
851 if ( !is_hvm_domain(d) )
852 goto out;
854 rc = 0;
855 hvm_set_pci_link_route(d, op.link, op.isa_irq);
857 out:
858 rcu_unlock_domain(d);
859 return rc;
860 }
862 long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
864 {
865 long rc = 0;
867 switch ( op )
868 {
869 case HVMOP_set_param:
870 case HVMOP_get_param:
871 {
872 struct xen_hvm_param a;
873 struct domain *d;
874 struct vcpu *v;
875 unsigned long mfn;
876 void *p;
878 if ( copy_from_guest(&a, arg, 1) )
879 return -EFAULT;
881 if ( a.index >= HVM_NR_PARAMS )
882 return -EINVAL;
884 if ( a.domid == DOMID_SELF )
885 d = rcu_lock_current_domain();
886 else if ( IS_PRIV(current->domain) )
887 d = rcu_lock_domain_by_id(a.domid);
888 else
889 return -EPERM;
891 if ( d == NULL )
892 return -ESRCH;
894 rc = -EINVAL;
895 if ( !is_hvm_domain(d) )
896 goto param_fail;
898 if ( op == HVMOP_set_param )
899 {
900 switch ( a.index )
901 {
902 case HVM_PARAM_IOREQ_PFN:
903 if ( d->arch.hvm_domain.shared_page_va )
904 goto param_fail;
905 mfn = gmfn_to_mfn(d, a.value);
906 if ( mfn == INVALID_MFN )
907 goto param_fail;
908 p = map_domain_page_global(mfn);
909 if ( p == NULL )
910 goto param_fail;
911 d->arch.hvm_domain.shared_page_va = (unsigned long)p;
912 /* Initialise evtchn port info if VCPUs already created. */
913 for_each_vcpu ( d, v )
914 get_vio(d, v->vcpu_id)->vp_eport =
915 v->arch.hvm_vcpu.xen_port;
916 break;
917 case HVM_PARAM_BUFIOREQ_PFN:
918 if ( d->arch.hvm_domain.buffered_io_va )
919 goto param_fail;
920 mfn = gmfn_to_mfn(d, a.value);
921 if ( mfn == INVALID_MFN )
922 goto param_fail;
923 p = map_domain_page_global(mfn);
924 if ( p == NULL )
925 goto param_fail;
926 d->arch.hvm_domain.buffered_io_va = (unsigned long)p;
927 break;
928 case HVM_PARAM_CALLBACK_IRQ:
929 hvm_set_callback_via(d, a.value);
930 break;
931 }
932 d->arch.hvm_domain.params[a.index] = a.value;
933 rc = 0;
934 }
935 else
936 {
937 a.value = d->arch.hvm_domain.params[a.index];
938 rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
939 }
941 param_fail:
942 rcu_unlock_domain(d);
943 break;
944 }
946 case HVMOP_set_pci_intx_level:
947 rc = hvmop_set_pci_intx_level(
948 guest_handle_cast(arg, xen_hvm_set_pci_intx_level_t));
949 break;
951 case HVMOP_set_isa_irq_level:
952 rc = hvmop_set_isa_irq_level(
953 guest_handle_cast(arg, xen_hvm_set_isa_irq_level_t));
954 break;
956 case HVMOP_set_pci_link_route:
957 rc = hvmop_set_pci_link_route(
958 guest_handle_cast(arg, xen_hvm_set_pci_link_route_t));
959 break;
961 case HVMOP_drain_io:
962 rc = hvmop_drain_io(
963 guest_handle_cast(arg, xen_hvm_drain_io_t));
964 break;
967 default:
968 {
969 gdprintk(XENLOG_WARNING, "Bad HVM op %ld.\n", op);
970 rc = -ENOSYS;
971 break;
972 }
973 }
975 return rc;
976 }
978 /*
979 * Local variables:
980 * mode: C
981 * c-set-style: "BSD"
982 * c-basic-offset: 4
983 * tab-width: 4
984 * indent-tabs-mode: nil
985 * End:
986 */