ia64/xen-unstable

view xen/arch/x86/hvm/hvm.c @ 11081:323eb29083e6

[HVM] Remove unused apic_enabled field from hvm_info_table.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Aug 10 15:45:47 2006 +0100 (2006-08-10)
parents dc7b56b8cfb5
children befab551b0e1
line source
1 /*
2 * hvm.c: Common hardware virtual machine abstractions.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/init.h>
23 #include <xen/lib.h>
24 #include <xen/trace.h>
25 #include <xen/sched.h>
26 #include <xen/irq.h>
27 #include <xen/softirq.h>
28 #include <xen/domain.h>
29 #include <xen/domain_page.h>
30 #include <xen/hypercall.h>
31 #include <xen/guest_access.h>
32 #include <xen/event.h>
33 #include <asm/current.h>
34 #include <asm/io.h>
35 #include <asm/shadow.h>
36 #include <asm/regs.h>
37 #include <asm/cpufeature.h>
38 #include <asm/processor.h>
39 #include <asm/types.h>
40 #include <asm/msr.h>
41 #include <asm/spinlock.h>
42 #include <asm/hvm/hvm.h>
43 #include <asm/hvm/support.h>
44 #include <asm/shadow.h>
45 #if CONFIG_PAGING_LEVELS >= 3
46 #include <asm/shadow_64.h>
47 #endif
48 #include <public/sched.h>
49 #include <public/hvm/ioreq.h>
50 #include <public/version.h>
51 #include <public/memory.h>
53 int hvm_enabled = 0;
55 unsigned int opt_hvm_debug_level = 0;
56 integer_param("hvm_debug", opt_hvm_debug_level);
58 struct hvm_function_table hvm_funcs;
60 static void hvm_zap_mmio_range(
61 struct domain *d, unsigned long pfn, unsigned long nr_pfn)
62 {
63 unsigned long i, val = INVALID_MFN;
65 ASSERT(d == current->domain);
67 for ( i = 0; i < nr_pfn; i++ )
68 {
69 if ( pfn + i >= 0xfffff )
70 break;
72 __copy_to_user(&phys_to_machine_mapping[pfn + i], &val, sizeof (val));
73 }
74 }
76 static void e820_zap_iommu_callback(struct domain *d,
77 struct e820entry *e,
78 void *ign)
79 {
80 if ( e->type == E820_IO )
81 hvm_zap_mmio_range(d, e->addr >> PAGE_SHIFT, e->size >> PAGE_SHIFT);
82 }
84 static void e820_foreach(struct domain *d,
85 void (*cb)(struct domain *d,
86 struct e820entry *e,
87 void *data),
88 void *data)
89 {
90 int i;
91 unsigned char e820_map_nr;
92 struct e820entry *e820entry;
93 unsigned char *p;
94 unsigned long mfn;
96 mfn = gmfn_to_mfn(d, E820_MAP_PAGE >> PAGE_SHIFT);
97 if ( mfn == INVALID_MFN )
98 {
99 printk("Can not find E820 memory map page for HVM domain.\n");
100 domain_crash_synchronous();
101 }
103 p = map_domain_page(mfn);
104 if ( p == NULL )
105 {
106 printk("Can not map E820 memory map page for HVM domain.\n");
107 domain_crash_synchronous();
108 }
110 e820_map_nr = *(p + E820_MAP_NR_OFFSET);
111 e820entry = (struct e820entry *)(p + E820_MAP_OFFSET);
113 for ( i = 0; i < e820_map_nr; i++ )
114 cb(d, e820entry + i, data);
116 unmap_domain_page(p);
117 }
119 static void hvm_zap_iommu_pages(struct domain *d)
120 {
121 e820_foreach(d, e820_zap_iommu_callback, NULL);
122 }
124 static void e820_map_io_shared_callback(struct domain *d,
125 struct e820entry *e,
126 void *data)
127 {
128 unsigned long *mfn = data;
129 if ( e->type == E820_SHARED_PAGE )
130 {
131 ASSERT(*mfn == INVALID_MFN);
132 *mfn = gmfn_to_mfn(d, e->addr >> PAGE_SHIFT);
133 }
134 }
136 static void e820_map_buffered_io_callback(struct domain *d,
137 struct e820entry *e,
138 void *data)
139 {
140 unsigned long *mfn = data;
141 if ( e->type == E820_BUFFERED_IO ) {
142 ASSERT(*mfn == INVALID_MFN);
143 *mfn = gmfn_to_mfn(d, e->addr >> PAGE_SHIFT);
144 }
145 }
147 void hvm_map_io_shared_pages(struct vcpu *v)
148 {
149 unsigned long mfn;
150 void *p;
151 struct domain *d = v->domain;
153 if ( d->arch.hvm_domain.shared_page_va ||
154 d->arch.hvm_domain.buffered_io_va )
155 return;
157 mfn = INVALID_MFN;
158 e820_foreach(d, e820_map_io_shared_callback, &mfn);
160 if ( mfn == INVALID_MFN )
161 {
162 printk("Can not find io request shared page for HVM domain.\n");
163 domain_crash_synchronous();
164 }
166 p = map_domain_page_global(mfn);
167 if ( p == NULL )
168 {
169 printk("Can not map io request shared page for HVM domain.\n");
170 domain_crash_synchronous();
171 }
173 d->arch.hvm_domain.shared_page_va = (unsigned long)p;
175 mfn = INVALID_MFN;
176 e820_foreach(d, e820_map_buffered_io_callback, &mfn);
177 if ( mfn != INVALID_MFN ) {
178 p = map_domain_page_global(mfn);
179 if ( p )
180 d->arch.hvm_domain.buffered_io_va = (unsigned long)p;
181 }
182 }
184 void hvm_create_event_channels(struct vcpu *v)
185 {
186 vcpu_iodata_t *p;
187 struct vcpu *o;
189 if ( v->vcpu_id == 0 ) {
190 /* Ugly: create event channels for every vcpu when vcpu 0
191 starts, so that they're available for ioemu to bind to. */
192 for_each_vcpu(v->domain, o) {
193 p = get_vio(v->domain, o->vcpu_id);
194 o->arch.hvm_vcpu.xen_port = p->vp_eport =
195 alloc_unbound_xen_event_channel(o, 0);
196 DPRINTK("Allocated port %d for hvm.\n", o->arch.hvm_vcpu.xen_port);
197 }
198 }
199 }
201 void hvm_release_assist_channel(struct vcpu *v)
202 {
203 free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);
204 }
207 void hvm_setup_platform(struct domain* d)
208 {
209 struct hvm_domain *platform;
210 struct vcpu *v=current;
212 if ( !hvm_guest(v) || (v->vcpu_id != 0) )
213 return;
215 if ( shadow_direct_map_init(d) == 0 )
216 {
217 printk("Can not allocate shadow direct map for HVM domain.\n");
218 domain_crash_synchronous();
219 }
221 hvm_zap_iommu_pages(d);
223 platform = &d->arch.hvm_domain;
224 pic_init(&platform->vpic, pic_irq_request, &platform->interrupt_request);
225 register_pic_io_hook();
227 if ( hvm_apic_support(d) )
228 {
229 spin_lock_init(&d->arch.hvm_domain.round_robin_lock);
230 hvm_vioapic_init(d);
231 }
233 spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
235 init_timer(&platform->pl_time.periodic_tm.timer,
236 pt_timer_fn, v, v->processor);
237 pit_init(v, cpu_khz);
238 }
240 void pic_irq_request(void *data, int level)
241 {
242 int *interrupt_request = data;
243 *interrupt_request = level;
244 }
246 void hvm_pic_assist(struct vcpu *v)
247 {
248 global_iodata_t *spg;
249 u16 *virq_line, irqs;
250 struct hvm_virpic *pic = &v->domain->arch.hvm_domain.vpic;
252 spg = &get_sp(v->domain)->sp_global;
253 virq_line = &spg->pic_clear_irr;
254 if ( *virq_line ) {
255 do {
256 irqs = *(volatile u16*)virq_line;
257 } while ( (u16)cmpxchg(virq_line,irqs, 0) != irqs );
258 do_pic_irqs_clear(pic, irqs);
259 }
260 virq_line = &spg->pic_irr;
261 if ( *virq_line ) {
262 do {
263 irqs = *(volatile u16*)virq_line;
264 } while ( (u16)cmpxchg(virq_line,irqs, 0) != irqs );
265 do_pic_irqs(pic, irqs);
266 }
267 }
269 u64 hvm_get_guest_time(struct vcpu *v)
270 {
271 u64 host_tsc;
273 rdtscll(host_tsc);
274 return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
275 }
277 int cpu_get_interrupt(struct vcpu *v, int *type)
278 {
279 int intno;
280 struct hvm_virpic *s = &v->domain->arch.hvm_domain.vpic;
281 unsigned long flags;
283 if ( (intno = cpu_get_apic_interrupt(v, type)) != -1 ) {
284 /* set irq request if a PIC irq is still pending */
285 /* XXX: improve that */
286 spin_lock_irqsave(&s->lock, flags);
287 pic_update_irq(s);
288 spin_unlock_irqrestore(&s->lock, flags);
289 return intno;
290 }
291 /* read the irq from the PIC */
292 if ( v->vcpu_id == 0 && (intno = cpu_get_pic_interrupt(v, type)) != -1 )
293 return intno;
295 return -1;
296 }
298 /*
299 * Copy from/to guest virtual.
300 */
301 int
302 hvm_copy(void *buf, unsigned long vaddr, int size, int dir)
303 {
304 unsigned long mfn;
305 char *addr;
306 int count;
308 while (size > 0) {
309 count = PAGE_SIZE - (vaddr & ~PAGE_MASK);
310 if (count > size)
311 count = size;
313 if (hvm_paging_enabled(current))
314 mfn = gva_to_mfn(vaddr);
315 else
316 mfn = get_mfn_from_gpfn(vaddr >> PAGE_SHIFT);
317 if (mfn == INVALID_MFN)
318 return 0;
320 addr = (char *)map_domain_page(mfn) + (vaddr & ~PAGE_MASK);
322 if (dir == HVM_COPY_IN)
323 memcpy(buf, addr, count);
324 else
325 memcpy(addr, buf, count);
327 unmap_domain_page(addr);
329 vaddr += count;
330 buf += count;
331 size -= count;
332 }
334 return 1;
335 }
337 /*
338 * HVM specific printbuf. Mostly used for hvmloader chit-chat.
339 */
340 void hvm_print_line(struct vcpu *v, const char c)
341 {
342 int *index = &v->domain->arch.hvm_domain.pbuf_index;
343 char *pbuf = v->domain->arch.hvm_domain.pbuf;
345 if (*index == HVM_PBUF_SIZE-2 || c == '\n') {
346 if (*index == HVM_PBUF_SIZE-2)
347 pbuf[(*index)++] = c;
348 pbuf[*index] = '\0';
349 printk("(GUEST: %u) %s\n", v->domain->domain_id, pbuf);
350 *index = 0;
351 } else
352 pbuf[(*index)++] = c;
353 }
355 typedef unsigned long hvm_hypercall_t(
356 unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
358 #define HYPERCALL(x) \
359 [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x
360 #define HYPERCALL_COMPAT32(x) \
361 [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x ## _compat32
363 #if defined(__i386__)
365 static hvm_hypercall_t *hvm_hypercall_table[] = {
366 HYPERCALL(memory_op),
367 HYPERCALL(multicall),
368 HYPERCALL(xen_version),
369 HYPERCALL(event_channel_op),
370 HYPERCALL(hvm_op)
371 };
373 void hvm_do_hypercall(struct cpu_user_regs *pregs)
374 {
375 if ( unlikely(ring_3(pregs)) )
376 {
377 pregs->eax = -EPERM;
378 return;
379 }
381 if ( (pregs->eax >= NR_hypercalls) || !hvm_hypercall_table[pregs->eax] )
382 {
383 DPRINTK("HVM vcpu %d:%d did a bad hypercall %d.\n",
384 current->domain->domain_id, current->vcpu_id,
385 pregs->eax);
386 pregs->eax = -ENOSYS;
387 return;
388 }
390 pregs->eax = hvm_hypercall_table[pregs->eax](
391 pregs->ebx, pregs->ecx, pregs->edx, pregs->esi, pregs->edi);
392 }
394 #else /* defined(__x86_64__) */
396 static long do_memory_op_compat32(int cmd, XEN_GUEST_HANDLE(void) arg)
397 {
398 extern long do_add_to_physmap(struct xen_add_to_physmap *xatp);
399 long rc;
401 switch ( cmd )
402 {
403 case XENMEM_add_to_physmap:
404 {
405 struct {
406 domid_t domid;
407 uint32_t space;
408 uint32_t idx;
409 uint32_t gpfn;
410 } u;
411 struct xen_add_to_physmap h;
413 if ( copy_from_guest(&u, arg, 1) )
414 return -EFAULT;
416 h.domid = u.domid;
417 h.space = u.space;
418 h.idx = u.idx;
419 h.gpfn = u.gpfn;
421 this_cpu(guest_handles_in_xen_space) = 1;
422 rc = do_memory_op(cmd, guest_handle_from_ptr(&h, void));
423 this_cpu(guest_handles_in_xen_space) = 0;
425 break;
426 }
428 default:
429 DPRINTK("memory_op %d.\n", cmd);
430 rc = -ENOSYS;
431 break;
432 }
434 return rc;
435 }
437 static hvm_hypercall_t *hvm_hypercall64_table[NR_hypercalls] = {
438 HYPERCALL(memory_op),
439 HYPERCALL(xen_version),
440 HYPERCALL(hvm_op),
441 HYPERCALL(event_channel_op)
442 };
444 static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
445 HYPERCALL_COMPAT32(memory_op),
446 HYPERCALL(xen_version),
447 HYPERCALL(hvm_op),
448 HYPERCALL(event_channel_op)
449 };
451 void hvm_do_hypercall(struct cpu_user_regs *pregs)
452 {
453 if ( unlikely(ring_3(pregs)) )
454 {
455 pregs->rax = -EPERM;
456 return;
457 }
459 pregs->rax = (uint32_t)pregs->eax; /* mask in case compat32 caller */
460 if ( (pregs->rax >= NR_hypercalls) || !hvm_hypercall64_table[pregs->rax] )
461 {
462 DPRINTK("HVM vcpu %d:%d did a bad hypercall %ld.\n",
463 current->domain->domain_id, current->vcpu_id,
464 pregs->rax);
465 pregs->rax = -ENOSYS;
466 return;
467 }
469 if ( current->domain->arch.ops->guest_paging_levels == PAGING_L4 )
470 {
471 pregs->rax = hvm_hypercall64_table[pregs->rax](pregs->rdi,
472 pregs->rsi,
473 pregs->rdx,
474 pregs->r10,
475 pregs->r8);
476 }
477 else
478 {
479 pregs->eax = hvm_hypercall32_table[pregs->eax]((uint32_t)pregs->ebx,
480 (uint32_t)pregs->ecx,
481 (uint32_t)pregs->edx,
482 (uint32_t)pregs->esi,
483 (uint32_t)pregs->edi);
484 }
485 }
487 #endif /* defined(__x86_64__) */
489 /* Initialise a hypercall transfer page for a VMX domain using
490 paravirtualised drivers. */
491 void hvm_hypercall_page_initialise(struct domain *d,
492 void *hypercall_page)
493 {
494 hvm_funcs.init_hypercall_page(d, hypercall_page);
495 }
498 /*
499 * only called in HVM domain BSP context
500 * when booting, vcpuid is always equal to apic_id
501 */
502 int hvm_bringup_ap(int vcpuid, int trampoline_vector)
503 {
504 struct vcpu *bsp = current, *v;
505 struct domain *d = bsp->domain;
506 struct vcpu_guest_context *ctxt;
507 int rc = 0;
509 /* current must be HVM domain BSP */
510 if ( !(hvm_guest(bsp) && bsp->vcpu_id == 0) ) {
511 printk("Not calling hvm_bringup_ap from BSP context.\n");
512 domain_crash_synchronous();
513 }
515 if ( (v = d->vcpu[vcpuid]) == NULL )
516 return -ENOENT;
518 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL ) {
519 printk("Failed to allocate memory in hvm_bringup_ap.\n");
520 return -ENOMEM;
521 }
523 hvm_init_ap_context(ctxt, vcpuid, trampoline_vector);
525 LOCK_BIGLOCK(d);
526 rc = -EEXIST;
527 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
528 rc = boot_vcpu(d, vcpuid, ctxt);
529 UNLOCK_BIGLOCK(d);
531 if ( rc != 0 )
532 printk("AP %d bringup failed in boot_vcpu %x.\n", vcpuid, rc);
533 else {
534 if ( test_and_clear_bit(_VCPUF_down, &d->vcpu[vcpuid]->vcpu_flags) )
535 vcpu_wake(d->vcpu[vcpuid]);
536 printk("AP %d bringup suceeded.\n", vcpuid);
537 }
539 xfree(ctxt);
541 return rc;
542 }
544 long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
546 {
547 long rc = 0;
549 switch ( op )
550 {
551 case HVMOP_set_param:
552 case HVMOP_get_param:
553 {
554 struct xen_hvm_param a;
555 struct domain *d;
557 if ( copy_from_guest(&a, arg, 1) )
558 return -EFAULT;
560 if ( a.index >= HVM_NR_PARAMS )
561 return -EINVAL;
563 if ( a.domid == DOMID_SELF )
564 {
565 get_knownalive_domain(current->domain);
566 d = current->domain;
567 }
568 else if ( IS_PRIV(current->domain) )
569 {
570 d = find_domain_by_id(a.domid);
571 if ( !d )
572 return -ESRCH;
573 }
574 else
575 {
576 return -EPERM;
577 }
579 if ( op == HVMOP_set_param )
580 {
581 rc = 0;
582 d->arch.hvm_domain.params[a.index] = a.value;
583 }
584 else
585 {
586 rc = d->arch.hvm_domain.params[a.index];
587 }
589 put_domain(d);
590 return rc;
591 }
593 default:
594 {
595 DPRINTK("Bad HVM op %ld.\n", op);
596 rc = -ENOSYS;
597 }
598 }
600 return rc;
601 }
603 /*
604 * Local variables:
605 * mode: C
606 * c-set-style: "BSD"
607 * c-basic-offset: 4
608 * tab-width: 4
609 * indent-tabs-mode: nil
610 * End:
611 */