ia64/xen-unstable

view xen/arch/x86/hvm/hvm.c @ 10892:0d2ba35c0cf2

[XEN] Add hypercall support for HVM guests. This is
fairly useless at the moment, since all of the hypercalls
fail, since copy_from_user doesn't work correctly in HVM
domains.

Signed-off-by: Steven Smith <ssmith@xensource.com>

Add a CPUID hypervisor platform interface at leaf
0x40000000. Allow hypercall transfer page to be filled
in via MSR 0x40000000.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Tue Aug 01 17:18:05 2006 +0100 (2006-08-01)
parents da7fe04d8e80
children c8ee670ac87e
line source
1 /*
2 * hvm.c: Common hardware virtual machine abstractions.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/init.h>
23 #include <xen/lib.h>
24 #include <xen/trace.h>
25 #include <xen/sched.h>
26 #include <xen/irq.h>
27 #include <xen/softirq.h>
28 #include <xen/domain.h>
29 #include <xen/domain_page.h>
30 #include <xen/hypercall.h>
31 #include <asm/current.h>
32 #include <asm/io.h>
33 #include <asm/shadow.h>
34 #include <asm/regs.h>
35 #include <asm/cpufeature.h>
36 #include <asm/processor.h>
37 #include <asm/types.h>
38 #include <asm/msr.h>
39 #include <asm/spinlock.h>
40 #include <asm/hvm/hvm.h>
41 #include <asm/hvm/support.h>
42 #include <asm/shadow.h>
43 #if CONFIG_PAGING_LEVELS >= 3
44 #include <asm/shadow_64.h>
45 #endif
46 #include <public/sched.h>
47 #include <public/hvm/ioreq.h>
48 #include <public/hvm/hvm_info_table.h>
50 int hvm_enabled = 0;
52 unsigned int opt_hvm_debug_level = 0;
53 integer_param("hvm_debug", opt_hvm_debug_level);
55 struct hvm_function_table hvm_funcs;
57 static void hvm_zap_mmio_range(
58 struct domain *d, unsigned long pfn, unsigned long nr_pfn)
59 {
60 unsigned long i, val = INVALID_MFN;
62 for ( i = 0; i < nr_pfn; i++ )
63 {
64 if ( pfn + i >= 0xfffff )
65 break;
67 __copy_to_user(&phys_to_machine_mapping[pfn + i], &val, sizeof (val));
68 }
69 }
71 static void hvm_map_io_shared_page(struct domain *d)
72 {
73 int i;
74 unsigned char e820_map_nr;
75 struct e820entry *e820entry;
76 unsigned char *p;
77 unsigned long mfn;
78 unsigned long gpfn = 0;
80 local_flush_tlb_pge();
82 mfn = get_mfn_from_gpfn(E820_MAP_PAGE >> PAGE_SHIFT);
83 if (mfn == INVALID_MFN) {
84 printk("Can not find E820 memory map page for HVM domain.\n");
85 domain_crash_synchronous();
86 }
88 p = map_domain_page(mfn);
89 if (p == NULL) {
90 printk("Can not map E820 memory map page for HVM domain.\n");
91 domain_crash_synchronous();
92 }
94 e820_map_nr = *(p + E820_MAP_NR_OFFSET);
95 e820entry = (struct e820entry *)(p + E820_MAP_OFFSET);
97 for ( i = 0; i < e820_map_nr; i++ )
98 {
99 if ( e820entry[i].type == E820_SHARED_PAGE )
100 gpfn = (e820entry[i].addr >> PAGE_SHIFT);
101 if ( e820entry[i].type == E820_IO )
102 hvm_zap_mmio_range(
103 d,
104 e820entry[i].addr >> PAGE_SHIFT,
105 e820entry[i].size >> PAGE_SHIFT);
106 }
108 if ( gpfn == 0 ) {
109 printk("Can not get io request shared page"
110 " from E820 memory map for HVM domain.\n");
111 unmap_domain_page(p);
112 domain_crash_synchronous();
113 }
114 unmap_domain_page(p);
116 /* Initialise shared page */
117 mfn = get_mfn_from_gpfn(gpfn);
118 if (mfn == INVALID_MFN) {
119 printk("Can not find io request shared page for HVM domain.\n");
120 domain_crash_synchronous();
121 }
123 p = map_domain_page_global(mfn);
124 if (p == NULL) {
125 printk("Can not map io request shared page for HVM domain.\n");
126 domain_crash_synchronous();
127 }
128 d->arch.hvm_domain.shared_page_va = (unsigned long)p;
129 }
131 static int validate_hvm_info(struct hvm_info_table *t)
132 {
133 char signature[] = "HVM INFO";
134 uint8_t *ptr = (uint8_t *)t;
135 uint8_t sum = 0;
136 int i;
138 /* strncmp(t->signature, "HVM INFO", 8) */
139 for ( i = 0; i < 8; i++ ) {
140 if ( signature[i] != t->signature[i] ) {
141 printk("Bad hvm info signature\n");
142 return 0;
143 }
144 }
146 for ( i = 0; i < t->length; i++ )
147 sum += ptr[i];
149 return (sum == 0);
150 }
152 static void hvm_get_info(struct domain *d)
153 {
154 unsigned char *p;
155 unsigned long mfn;
156 struct hvm_info_table *t;
158 mfn = get_mfn_from_gpfn(HVM_INFO_PFN);
159 if ( mfn == INVALID_MFN ) {
160 printk("Can not get info page mfn for HVM domain.\n");
161 domain_crash_synchronous();
162 }
164 p = map_domain_page(mfn);
165 if ( p == NULL ) {
166 printk("Can not map info page for HVM domain.\n");
167 domain_crash_synchronous();
168 }
170 t = (struct hvm_info_table *)(p + HVM_INFO_OFFSET);
172 if ( validate_hvm_info(t) ) {
173 d->arch.hvm_domain.nr_vcpus = t->nr_vcpus;
174 d->arch.hvm_domain.apic_enabled = t->apic_enabled;
175 d->arch.hvm_domain.pae_enabled = t->pae_enabled;
176 } else {
177 printk("Bad hvm info table\n");
178 d->arch.hvm_domain.nr_vcpus = 1;
179 d->arch.hvm_domain.apic_enabled = 0;
180 d->arch.hvm_domain.pae_enabled = 0;
181 }
183 unmap_domain_page(p);
184 }
186 void hvm_setup_platform(struct domain* d)
187 {
188 struct hvm_domain *platform;
189 struct vcpu *v=current;
191 if ( !hvm_guest(v) || (v->vcpu_id != 0) )
192 return;
194 if ( shadow_direct_map_init(d) == 0 )
195 {
196 printk("Can not allocate shadow direct map for HVM domain.\n");
197 domain_crash_synchronous();
198 }
200 hvm_map_io_shared_page(d);
201 hvm_get_info(d);
203 platform = &d->arch.hvm_domain;
204 pic_init(&platform->vpic, pic_irq_request, &platform->interrupt_request);
205 register_pic_io_hook();
207 if ( hvm_apic_support(d) )
208 {
209 spin_lock_init(&d->arch.hvm_domain.round_robin_lock);
210 hvm_vioapic_init(d);
211 }
213 init_timer(&platform->pl_time.periodic_tm.timer, pt_timer_fn, v, v->processor);
214 pit_init(v, cpu_khz);
215 }
217 void pic_irq_request(void *data, int level)
218 {
219 int *interrupt_request = data;
220 *interrupt_request = level;
221 }
223 void hvm_pic_assist(struct vcpu *v)
224 {
225 global_iodata_t *spg;
226 u16 *virq_line, irqs;
227 struct hvm_virpic *pic = &v->domain->arch.hvm_domain.vpic;
229 spg = &get_sp(v->domain)->sp_global;
230 virq_line = &spg->pic_clear_irr;
231 if ( *virq_line ) {
232 do {
233 irqs = *(volatile u16*)virq_line;
234 } while ( (u16)cmpxchg(virq_line,irqs, 0) != irqs );
235 do_pic_irqs_clear(pic, irqs);
236 }
237 virq_line = &spg->pic_irr;
238 if ( *virq_line ) {
239 do {
240 irqs = *(volatile u16*)virq_line;
241 } while ( (u16)cmpxchg(virq_line,irqs, 0) != irqs );
242 do_pic_irqs(pic, irqs);
243 }
244 }
246 u64 hvm_get_guest_time(struct vcpu *v)
247 {
248 u64 host_tsc;
250 rdtscll(host_tsc);
251 return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
252 }
254 int cpu_get_interrupt(struct vcpu *v, int *type)
255 {
256 int intno;
257 struct hvm_virpic *s = &v->domain->arch.hvm_domain.vpic;
258 unsigned long flags;
260 if ( (intno = cpu_get_apic_interrupt(v, type)) != -1 ) {
261 /* set irq request if a PIC irq is still pending */
262 /* XXX: improve that */
263 spin_lock_irqsave(&s->lock, flags);
264 pic_update_irq(s);
265 spin_unlock_irqrestore(&s->lock, flags);
266 return intno;
267 }
268 /* read the irq from the PIC */
269 if ( v->vcpu_id == 0 && (intno = cpu_get_pic_interrupt(v, type)) != -1 )
270 return intno;
272 return -1;
273 }
275 /*
276 * Copy from/to guest virtual.
277 */
278 int
279 hvm_copy(void *buf, unsigned long vaddr, int size, int dir)
280 {
281 unsigned long gpa, mfn;
282 char *addr;
283 int count;
285 while (size > 0) {
286 count = PAGE_SIZE - (vaddr & ~PAGE_MASK);
287 if (count > size)
288 count = size;
290 if (hvm_paging_enabled(current)) {
291 gpa = gva_to_gpa(vaddr);
292 mfn = get_mfn_from_gpfn(gpa >> PAGE_SHIFT);
293 } else
294 mfn = get_mfn_from_gpfn(vaddr >> PAGE_SHIFT);
295 if (mfn == INVALID_MFN)
296 return 0;
298 addr = (char *)map_domain_page(mfn) + (vaddr & ~PAGE_MASK);
300 if (dir == HVM_COPY_IN)
301 memcpy(buf, addr, count);
302 else
303 memcpy(addr, buf, count);
305 unmap_domain_page(addr);
307 vaddr += count;
308 buf += count;
309 size -= count;
310 }
312 return 1;
313 }
315 /*
316 * HVM specific printbuf. Mostly used for hvmloader chit-chat.
317 */
318 void hvm_print_line(struct vcpu *v, const char c)
319 {
320 int *index = &v->domain->arch.hvm_domain.pbuf_index;
321 char *pbuf = v->domain->arch.hvm_domain.pbuf;
323 if (*index == HVM_PBUF_SIZE-2 || c == '\n') {
324 if (*index == HVM_PBUF_SIZE-2)
325 pbuf[(*index)++] = c;
326 pbuf[*index] = '\0';
327 printk("(GUEST: %u) %s\n", v->domain->domain_id, pbuf);
328 *index = 0;
329 } else
330 pbuf[(*index)++] = c;
331 }
333 #if defined(__i386__)
335 typedef unsigned long hvm_hypercall_t(
336 unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
337 #define HYPERCALL(x) [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x
338 static hvm_hypercall_handler *hvm_hypercall_table[] = {
339 HYPERCALL(mmu_update),
340 HYPERCALL(memory_op),
341 HYPERCALL(multicall),
342 HYPERCALL(update_va_mapping),
343 HYPERCALL(event_channel_op_compat),
344 HYPERCALL(xen_version),
345 HYPERCALL(grant_table_op),
346 HYPERCALL(event_channel_op),
347 HYPERCALL(hvm_op)
348 };
349 #undef HYPERCALL
351 void hvm_do_hypercall(struct cpu_user_regs *pregs)
352 {
353 if ( ring_3(pregs) )
354 {
355 pregs->eax = -EPERM;
356 return;
357 }
359 if ( pregs->eax > ARRAY_SIZE(hvm_hypercall_table) ||
360 !hvm_hypercall_table[pregs->eax] )
361 {
362 DPRINTK("HVM vcpu %d:%d did a bad hypercall %d.\n",
363 current->domain->domain_id, current->vcpu_id,
364 pregs->eax);
365 pregs->eax = -ENOSYS;
366 }
367 else
368 {
369 pregs->eax = hvm_hypercall_table[pregs->eax](
370 pregs->ebx, pregs->ecx, pregs->edx, pregs->esi, pregs->edi);
371 }
372 }
374 #else /* __x86_64__ */
376 void hvm_do_hypercall(struct cpu_user_regs *pregs)
377 {
378 printk("not supported yet!\n");
379 }
381 #endif
383 /* Initialise a hypercall transfer page for a VMX domain using
384 paravirtualised drivers. */
385 void hvm_hypercall_page_initialise(struct domain *d,
386 void *hypercall_page)
387 {
388 hvm_funcs.init_hypercall_page(d, hypercall_page);
389 }
392 /*
393 * only called in HVM domain BSP context
394 * when booting, vcpuid is always equal to apic_id
395 */
396 int hvm_bringup_ap(int vcpuid, int trampoline_vector)
397 {
398 struct vcpu *bsp = current, *v;
399 struct domain *d = bsp->domain;
400 struct vcpu_guest_context *ctxt;
401 int rc = 0;
403 /* current must be HVM domain BSP */
404 if ( !(hvm_guest(bsp) && bsp->vcpu_id == 0) ) {
405 printk("Not calling hvm_bringup_ap from BSP context.\n");
406 domain_crash_synchronous();
407 }
409 if ( (v = d->vcpu[vcpuid]) == NULL )
410 return -ENOENT;
412 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL ) {
413 printk("Failed to allocate memory in hvm_bringup_ap.\n");
414 return -ENOMEM;
415 }
417 hvm_init_ap_context(ctxt, vcpuid, trampoline_vector);
419 LOCK_BIGLOCK(d);
420 rc = -EEXIST;
421 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
422 rc = boot_vcpu(d, vcpuid, ctxt);
423 UNLOCK_BIGLOCK(d);
425 if ( rc != 0 )
426 printk("AP %d bringup failed in boot_vcpu %x.\n", vcpuid, rc);
427 else {
428 if ( test_and_clear_bit(_VCPUF_down, &d->vcpu[vcpuid]->vcpu_flags) )
429 vcpu_wake(d->vcpu[vcpuid]);
430 printk("AP %d bringup suceeded.\n", vcpuid);
431 }
433 xfree(ctxt);
435 return rc;
436 }
438 /*
439 * Local variables:
440 * mode: C
441 * c-set-style: "BSD"
442 * c-basic-offset: 4
443 * tab-width: 4
444 * indent-tabs-mode: nil
445 * End:
446 */