ia64/xen-unstable

view xen/arch/x86/hvm/hvm.c @ 10946:7ff6020e4758

[HVM] Tidy up e820 parsing in Xen.
Signed-off-by: Steven Smith <ssmith@xensource.com>
author kfraser@localhost.localdomain
date Thu Aug 03 15:02:34 2006 +0100 (2006-08-03)
parents b33c08de3d98
children bfe12b4d45d3
line source
1 /*
2 * hvm.c: Common hardware virtual machine abstractions.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/init.h>
23 #include <xen/lib.h>
24 #include <xen/trace.h>
25 #include <xen/sched.h>
26 #include <xen/irq.h>
27 #include <xen/softirq.h>
28 #include <xen/domain.h>
29 #include <xen/domain_page.h>
30 #include <xen/hypercall.h>
31 #include <asm/current.h>
32 #include <asm/io.h>
33 #include <asm/shadow.h>
34 #include <asm/regs.h>
35 #include <asm/cpufeature.h>
36 #include <asm/processor.h>
37 #include <asm/types.h>
38 #include <asm/msr.h>
39 #include <asm/spinlock.h>
40 #include <asm/hvm/hvm.h>
41 #include <asm/hvm/support.h>
42 #include <asm/shadow.h>
43 #if CONFIG_PAGING_LEVELS >= 3
44 #include <asm/shadow_64.h>
45 #endif
46 #include <public/sched.h>
47 #include <public/hvm/ioreq.h>
48 #include <public/hvm/hvm_info_table.h>
49 #include <xen/guest_access.h>
51 int hvm_enabled = 0;
53 unsigned int opt_hvm_debug_level = 0;
54 integer_param("hvm_debug", opt_hvm_debug_level);
56 struct hvm_function_table hvm_funcs;
58 static void hvm_zap_mmio_range(
59 struct domain *d, unsigned long pfn, unsigned long nr_pfn)
60 {
61 unsigned long i, val = INVALID_MFN;
63 ASSERT(d == current->domain);
65 for ( i = 0; i < nr_pfn; i++ )
66 {
67 if ( pfn + i >= 0xfffff )
68 break;
70 __copy_to_user(&phys_to_machine_mapping[pfn + i], &val, sizeof (val));
71 }
72 }
74 static void e820_zap_iommu_callback(struct domain *d,
75 struct e820entry *e,
76 void *ign)
77 {
78 if ( e->type == E820_IO )
79 hvm_zap_mmio_range(d, e->addr >> PAGE_SHIFT, e->size >> PAGE_SHIFT);
80 }
82 static void e820_foreach(struct domain *d,
83 void (*cb)(struct domain *d,
84 struct e820entry *e,
85 void *data),
86 void *data)
87 {
88 int i;
89 unsigned char e820_map_nr;
90 struct e820entry *e820entry;
91 unsigned char *p;
92 unsigned long mfn;
94 mfn = gmfn_to_mfn(d, E820_MAP_PAGE >> PAGE_SHIFT);
95 if ( mfn == INVALID_MFN )
96 {
97 printk("Can not find E820 memory map page for HVM domain.\n");
98 domain_crash_synchronous();
99 }
101 p = map_domain_page(mfn);
102 if ( p == NULL )
103 {
104 printk("Can not map E820 memory map page for HVM domain.\n");
105 domain_crash_synchronous();
106 }
108 e820_map_nr = *(p + E820_MAP_NR_OFFSET);
109 e820entry = (struct e820entry *)(p + E820_MAP_OFFSET);
111 for ( i = 0; i < e820_map_nr; i++ )
112 cb(d, e820entry + i, data);
114 unmap_domain_page(p);
115 }
117 static void hvm_zap_iommu_pages(struct domain *d)
118 {
119 e820_foreach(d, e820_zap_iommu_callback, NULL);
120 }
122 static void e820_map_io_shared_callback(struct domain *d,
123 struct e820entry *e,
124 void *data)
125 {
126 unsigned long *mfn = data;
127 if ( e->type == E820_SHARED_PAGE )
128 {
129 ASSERT(*mfn == INVALID_MFN);
130 *mfn = gmfn_to_mfn(d, e->addr >> PAGE_SHIFT);
131 }
132 }
134 void hvm_map_io_shared_page(struct vcpu *v)
135 {
136 unsigned long mfn = INVALID_MFN;
137 void *p;
138 struct domain *d = v->domain;
140 if ( d->arch.hvm_domain.shared_page_va )
141 return;
143 e820_foreach(d, e820_map_io_shared_callback, &mfn);
145 if ( mfn == INVALID_MFN )
146 {
147 printk("Can not find io request shared page for HVM domain.\n");
148 domain_crash_synchronous();
149 }
151 p = map_domain_page_global(mfn);
152 if ( p == NULL )
153 {
154 printk("Can not map io request shared page for HVM domain.\n");
155 domain_crash_synchronous();
156 }
158 d->arch.hvm_domain.shared_page_va = (unsigned long)p;
159 }
161 void hvm_setup_platform(struct domain* d)
162 {
163 struct hvm_domain *platform;
164 struct vcpu *v=current;
166 if ( !hvm_guest(v) || (v->vcpu_id != 0) )
167 return;
169 if ( shadow_direct_map_init(d) == 0 )
170 {
171 printk("Can not allocate shadow direct map for HVM domain.\n");
172 domain_crash_synchronous();
173 }
175 hvm_zap_iommu_pages(d);
176 hvm_map_io_shared_page(v);
178 platform = &d->arch.hvm_domain;
179 pic_init(&platform->vpic, pic_irq_request, &platform->interrupt_request);
180 register_pic_io_hook();
182 if ( hvm_apic_support(d) )
183 {
184 spin_lock_init(&d->arch.hvm_domain.round_robin_lock);
185 hvm_vioapic_init(d);
186 }
188 init_timer(&platform->pl_time.periodic_tm.timer,
189 pt_timer_fn, v, v->processor);
190 pit_init(v, cpu_khz);
191 }
193 void pic_irq_request(void *data, int level)
194 {
195 int *interrupt_request = data;
196 *interrupt_request = level;
197 }
199 void hvm_pic_assist(struct vcpu *v)
200 {
201 global_iodata_t *spg;
202 u16 *virq_line, irqs;
203 struct hvm_virpic *pic = &v->domain->arch.hvm_domain.vpic;
205 spg = &get_sp(v->domain)->sp_global;
206 virq_line = &spg->pic_clear_irr;
207 if ( *virq_line ) {
208 do {
209 irqs = *(volatile u16*)virq_line;
210 } while ( (u16)cmpxchg(virq_line,irqs, 0) != irqs );
211 do_pic_irqs_clear(pic, irqs);
212 }
213 virq_line = &spg->pic_irr;
214 if ( *virq_line ) {
215 do {
216 irqs = *(volatile u16*)virq_line;
217 } while ( (u16)cmpxchg(virq_line,irqs, 0) != irqs );
218 do_pic_irqs(pic, irqs);
219 }
220 }
222 u64 hvm_get_guest_time(struct vcpu *v)
223 {
224 u64 host_tsc;
226 rdtscll(host_tsc);
227 return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
228 }
230 int cpu_get_interrupt(struct vcpu *v, int *type)
231 {
232 int intno;
233 struct hvm_virpic *s = &v->domain->arch.hvm_domain.vpic;
234 unsigned long flags;
236 if ( (intno = cpu_get_apic_interrupt(v, type)) != -1 ) {
237 /* set irq request if a PIC irq is still pending */
238 /* XXX: improve that */
239 spin_lock_irqsave(&s->lock, flags);
240 pic_update_irq(s);
241 spin_unlock_irqrestore(&s->lock, flags);
242 return intno;
243 }
244 /* read the irq from the PIC */
245 if ( v->vcpu_id == 0 && (intno = cpu_get_pic_interrupt(v, type)) != -1 )
246 return intno;
248 return -1;
249 }
251 /*
252 * Copy from/to guest virtual.
253 */
254 int
255 hvm_copy(void *buf, unsigned long vaddr, int size, int dir)
256 {
257 unsigned long gpa, mfn;
258 char *addr;
259 int count;
261 while (size > 0) {
262 count = PAGE_SIZE - (vaddr & ~PAGE_MASK);
263 if (count > size)
264 count = size;
266 if (hvm_paging_enabled(current)) {
267 gpa = gva_to_gpa(vaddr);
268 mfn = get_mfn_from_gpfn(gpa >> PAGE_SHIFT);
269 } else
270 mfn = get_mfn_from_gpfn(vaddr >> PAGE_SHIFT);
271 if (mfn == INVALID_MFN)
272 return 0;
274 addr = (char *)map_domain_page(mfn) + (vaddr & ~PAGE_MASK);
276 if (dir == HVM_COPY_IN)
277 memcpy(buf, addr, count);
278 else
279 memcpy(addr, buf, count);
281 unmap_domain_page(addr);
283 vaddr += count;
284 buf += count;
285 size -= count;
286 }
288 return 1;
289 }
291 /*
292 * HVM specific printbuf. Mostly used for hvmloader chit-chat.
293 */
294 void hvm_print_line(struct vcpu *v, const char c)
295 {
296 int *index = &v->domain->arch.hvm_domain.pbuf_index;
297 char *pbuf = v->domain->arch.hvm_domain.pbuf;
299 if (*index == HVM_PBUF_SIZE-2 || c == '\n') {
300 if (*index == HVM_PBUF_SIZE-2)
301 pbuf[(*index)++] = c;
302 pbuf[*index] = '\0';
303 printk("(GUEST: %u) %s\n", v->domain->domain_id, pbuf);
304 *index = 0;
305 } else
306 pbuf[(*index)++] = c;
307 }
309 #if defined(__i386__)
311 typedef unsigned long hvm_hypercall_t(
312 unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
313 #define HYPERCALL(x) [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x
314 static hvm_hypercall_t *hvm_hypercall_table[] = {
315 HYPERCALL(mmu_update),
316 HYPERCALL(memory_op),
317 HYPERCALL(multicall),
318 HYPERCALL(update_va_mapping),
319 HYPERCALL(event_channel_op_compat),
320 HYPERCALL(xen_version),
321 HYPERCALL(grant_table_op),
322 HYPERCALL(event_channel_op),
323 HYPERCALL(hvm_op)
324 };
325 #undef HYPERCALL
327 void hvm_do_hypercall(struct cpu_user_regs *pregs)
328 {
329 if ( ring_3(pregs) )
330 {
331 pregs->eax = -EPERM;
332 return;
333 }
335 if ( pregs->eax > ARRAY_SIZE(hvm_hypercall_table) ||
336 !hvm_hypercall_table[pregs->eax] )
337 {
338 DPRINTK("HVM vcpu %d:%d did a bad hypercall %d.\n",
339 current->domain->domain_id, current->vcpu_id,
340 pregs->eax);
341 pregs->eax = -ENOSYS;
342 }
343 else
344 {
345 pregs->eax = hvm_hypercall_table[pregs->eax](
346 pregs->ebx, pregs->ecx, pregs->edx, pregs->esi, pregs->edi);
347 }
348 }
350 #else /* __x86_64__ */
352 void hvm_do_hypercall(struct cpu_user_regs *pregs)
353 {
354 printk("not supported yet!\n");
355 }
357 #endif
359 /* Initialise a hypercall transfer page for a VMX domain using
360 paravirtualised drivers. */
361 void hvm_hypercall_page_initialise(struct domain *d,
362 void *hypercall_page)
363 {
364 hvm_funcs.init_hypercall_page(d, hypercall_page);
365 }
368 /*
369 * only called in HVM domain BSP context
370 * when booting, vcpuid is always equal to apic_id
371 */
372 int hvm_bringup_ap(int vcpuid, int trampoline_vector)
373 {
374 struct vcpu *bsp = current, *v;
375 struct domain *d = bsp->domain;
376 struct vcpu_guest_context *ctxt;
377 int rc = 0;
379 /* current must be HVM domain BSP */
380 if ( !(hvm_guest(bsp) && bsp->vcpu_id == 0) ) {
381 printk("Not calling hvm_bringup_ap from BSP context.\n");
382 domain_crash_synchronous();
383 }
385 if ( (v = d->vcpu[vcpuid]) == NULL )
386 return -ENOENT;
388 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL ) {
389 printk("Failed to allocate memory in hvm_bringup_ap.\n");
390 return -ENOMEM;
391 }
393 hvm_init_ap_context(ctxt, vcpuid, trampoline_vector);
395 LOCK_BIGLOCK(d);
396 rc = -EEXIST;
397 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
398 rc = boot_vcpu(d, vcpuid, ctxt);
399 UNLOCK_BIGLOCK(d);
401 if ( rc != 0 )
402 printk("AP %d bringup failed in boot_vcpu %x.\n", vcpuid, rc);
403 else {
404 if ( test_and_clear_bit(_VCPUF_down, &d->vcpu[vcpuid]->vcpu_flags) )
405 vcpu_wake(d->vcpu[vcpuid]);
406 printk("AP %d bringup suceeded.\n", vcpuid);
407 }
409 xfree(ctxt);
411 return rc;
412 }
414 long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
416 {
417 long rc = 0;
419 switch ( op )
420 {
421 case HVMOP_set_param:
422 case HVMOP_get_param:
423 {
424 struct xen_hvm_param a;
425 struct domain *d;
427 if ( copy_from_guest(&a, arg, 1) )
428 return -EFAULT;
430 if ( a.index >= HVM_NR_PARAMS )
431 return -EINVAL;
433 if ( a.domid == DOMID_SELF )
434 {
435 get_knownalive_domain(current->domain);
436 d = current->domain;
437 }
438 else if ( IS_PRIV(current->domain) )
439 {
440 d = find_domain_by_id(a.domid);
441 if ( !d )
442 return -ESRCH;
443 }
444 else
445 {
446 return -EPERM;
447 }
449 if ( op == HVMOP_set_param )
450 {
451 rc = 0;
452 d->arch.hvm_domain.params[a.index] = a.value;
453 }
454 else
455 {
456 rc = d->arch.hvm_domain.params[a.index];
457 }
459 put_domain(d);
460 return rc;
461 }
463 default:
464 {
465 DPRINTK("Bad HVM op %ld.\n", op);
466 rc = -ENOSYS;
467 }
468 }
470 return rc;
471 }
473 /*
474 * Local variables:
475 * mode: C
476 * c-set-style: "BSD"
477 * c-basic-offset: 4
478 * tab-width: 4
479 * indent-tabs-mode: nil
480 * End:
481 */