ia64/xen-unstable

view xen/arch/x86/hvm/hvm.c @ 9016:cf1c1bb9f6d2

Bring up AP of VMX domain.
1) add INIT-SIPI-SIPI IPI sequence handling code to HVM virtual lapic
code.
2) add an new interface init_ap_context to hvm_funcs, and implement the
VMX side.
3) add a hvm generic function hvm_bringup_ap, which in turn calls
init_ap_context.

Signed-off-by: Xin Li <xin.b.li@intel.com>
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Feb 24 17:32:58 2006 +0100 (2006-02-24)
parents b5bb9920bf48
children 94b10faa7577
line source
1 /*
2 * hvm.c: Common hardware virtual machine abstractions.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/init.h>
23 #include <xen/lib.h>
24 #include <xen/trace.h>
25 #include <xen/sched.h>
26 #include <xen/irq.h>
27 #include <xen/softirq.h>
28 #include <xen/domain.h>
29 #include <xen/domain_page.h>
30 #include <asm/current.h>
31 #include <asm/io.h>
32 #include <asm/shadow.h>
33 #include <asm/regs.h>
34 #include <asm/cpufeature.h>
35 #include <asm/processor.h>
36 #include <asm/types.h>
37 #include <asm/msr.h>
38 #include <asm/spinlock.h>
39 #include <asm/hvm/hvm.h>
40 #include <asm/hvm/support.h>
41 #include <asm/shadow.h>
42 #if CONFIG_PAGING_LEVELS >= 3
43 #include <asm/shadow_64.h>
44 #endif
45 #include <public/sched.h>
46 #include <public/hvm/ioreq.h>
47 #include <public/hvm/hvm_info_table.h>
49 int hvm_enabled = 0;
51 unsigned int opt_hvm_debug_level = 0;
52 integer_param("hvm_debug", opt_hvm_debug_level);
54 struct hvm_function_table hvm_funcs;
56 static void hvm_zap_mmio_range(
57 struct domain *d, unsigned long pfn, unsigned long nr_pfn)
58 {
59 unsigned long i, val = INVALID_MFN;
61 for ( i = 0; i < nr_pfn; i++ )
62 {
63 if ( pfn + i >= 0xfffff )
64 break;
66 __copy_to_user(&phys_to_machine_mapping[pfn + i], &val, sizeof (val));
67 }
68 }
70 static void hvm_map_io_shared_page(struct domain *d)
71 {
72 int i;
73 unsigned char e820_map_nr;
74 struct e820entry *e820entry;
75 unsigned char *p;
76 unsigned long mfn;
77 unsigned long gpfn = 0;
79 local_flush_tlb_pge();
81 mfn = get_mfn_from_gpfn(E820_MAP_PAGE >> PAGE_SHIFT);
82 if (mfn == INVALID_MFN) {
83 printk("Can not find E820 memory map page for HVM domain.\n");
84 domain_crash_synchronous();
85 }
87 p = map_domain_page(mfn);
88 if (p == NULL) {
89 printk("Can not map E820 memory map page for HVM domain.\n");
90 domain_crash_synchronous();
91 }
93 e820_map_nr = *(p + E820_MAP_NR_OFFSET);
94 e820entry = (struct e820entry *)(p + E820_MAP_OFFSET);
96 for ( i = 0; i < e820_map_nr; i++ )
97 {
98 if ( e820entry[i].type == E820_SHARED_PAGE )
99 gpfn = (e820entry[i].addr >> PAGE_SHIFT);
100 if ( e820entry[i].type == E820_IO )
101 hvm_zap_mmio_range(
102 d,
103 e820entry[i].addr >> PAGE_SHIFT,
104 e820entry[i].size >> PAGE_SHIFT);
105 }
107 if ( gpfn == 0 ) {
108 printk("Can not get io request shared page"
109 " from E820 memory map for HVM domain.\n");
110 unmap_domain_page(p);
111 domain_crash_synchronous();
112 }
113 unmap_domain_page(p);
115 /* Initialise shared page */
116 mfn = get_mfn_from_gpfn(gpfn);
117 if (mfn == INVALID_MFN) {
118 printk("Can not find io request shared page for HVM domain.\n");
119 domain_crash_synchronous();
120 }
122 p = map_domain_page_global(mfn);
123 if (p == NULL) {
124 printk("Can not map io request shared page for HVM domain.\n");
125 domain_crash_synchronous();
126 }
127 d->arch.hvm_domain.shared_page_va = (unsigned long)p;
128 }
130 static int validate_hvm_info(struct hvm_info_table *t)
131 {
132 char signature[] = "HVM INFO";
133 uint8_t *ptr = (uint8_t *)t;
134 uint8_t sum = 0;
135 int i;
137 /* strncmp(t->signature, "HVM INFO", 8) */
138 for ( i = 0; i < 8; i++ ) {
139 if ( signature[i] != t->signature[i] ) {
140 printk("Bad hvm info signature\n");
141 return 0;
142 }
143 }
145 for ( i = 0; i < t->length; i++ )
146 sum += ptr[i];
148 return (sum == 0);
149 }
151 static void hvm_get_info(struct domain *d)
152 {
153 unsigned char *p;
154 unsigned long mfn;
155 struct hvm_info_table *t;
157 mfn = get_mfn_from_gpfn(HVM_INFO_PFN);
158 if ( mfn == INVALID_MFN ) {
159 printk("Can not get info page mfn for HVM domain.\n");
160 domain_crash_synchronous();
161 }
163 p = map_domain_page(mfn);
164 if ( p == NULL ) {
165 printk("Can not map info page for HVM domain.\n");
166 domain_crash_synchronous();
167 }
169 t = (struct hvm_info_table *)(p + HVM_INFO_OFFSET);
171 if ( validate_hvm_info(t) ) {
172 d->arch.hvm_domain.nr_vcpus = t->nr_vcpus;
173 d->arch.hvm_domain.apic_enabled = t->apic_enabled;
174 d->arch.hvm_domain.pae_enabled = t->pae_enabled;
175 } else {
176 printk("Bad hvm info table\n");
177 d->arch.hvm_domain.nr_vcpus = 1;
178 d->arch.hvm_domain.apic_enabled = 0;
179 d->arch.hvm_domain.pae_enabled = 0;
180 }
182 unmap_domain_page(p);
183 }
185 void hvm_setup_platform(struct domain* d)
186 {
187 struct hvm_domain *platform;
189 if ( !HVM_DOMAIN(current) || (current->vcpu_id != 0) )
190 return;
192 shadow_direct_map_init(d);
194 hvm_map_io_shared_page(d);
195 hvm_get_info(d);
197 platform = &d->arch.hvm_domain;
198 pic_init(&platform->vpic, pic_irq_request, &platform->interrupt_request);
199 register_pic_io_hook();
201 if ( hvm_apic_support(d) )
202 {
203 spin_lock_init(&d->arch.hvm_domain.round_robin_lock);
204 hvm_vioapic_init(d);
205 }
206 }
208 void pic_irq_request(int *interrupt_request, int level)
209 {
210 if (level)
211 *interrupt_request = 1;
212 else
213 *interrupt_request = 0;
214 }
216 void hvm_pic_assist(struct vcpu *v)
217 {
218 global_iodata_t *spg;
219 u16 *virq_line, irqs;
220 struct hvm_virpic *pic = &v->domain->arch.hvm_domain.vpic;
222 spg = &get_sp(v->domain)->sp_global;
223 virq_line = &spg->pic_clear_irr;
224 if ( *virq_line ) {
225 do {
226 irqs = *(volatile u16*)virq_line;
227 } while ( (u16)cmpxchg(virq_line,irqs, 0) != irqs );
228 do_pic_irqs_clear(pic, irqs);
229 }
230 virq_line = &spg->pic_irr;
231 if ( *virq_line ) {
232 do {
233 irqs = *(volatile u16*)virq_line;
234 } while ( (u16)cmpxchg(virq_line,irqs, 0) != irqs );
235 do_pic_irqs(pic, irqs);
236 }
237 }
239 int cpu_get_interrupt(struct vcpu *v, int *type)
240 {
241 int intno;
242 struct hvm_virpic *s = &v->domain->arch.hvm_domain.vpic;
244 if ( (intno = cpu_get_apic_interrupt(v, type)) != -1 ) {
245 /* set irq request if a PIC irq is still pending */
246 /* XXX: improve that */
247 pic_update_irq(s);
248 return intno;
249 }
250 /* read the irq from the PIC */
251 if ( (intno = cpu_get_pic_interrupt(v, type)) != -1 )
252 return intno;
254 return -1;
255 }
257 /*
258 * Copy from/to guest virtual.
259 */
260 int
261 hvm_copy(void *buf, unsigned long vaddr, int size, int dir)
262 {
263 unsigned long gpa, mfn;
264 char *addr;
265 int count;
267 while (size > 0) {
268 count = PAGE_SIZE - (vaddr & ~PAGE_MASK);
269 if (count > size)
270 count = size;
272 if (hvm_paging_enabled(current)) {
273 gpa = gva_to_gpa(vaddr);
274 mfn = get_mfn_from_gpfn(gpa >> PAGE_SHIFT);
275 } else
276 mfn = get_mfn_from_gpfn(vaddr >> PAGE_SHIFT);
277 if (mfn == INVALID_MFN)
278 return 0;
280 addr = (char *)map_domain_page(mfn) + (vaddr & ~PAGE_MASK);
282 if (dir == HVM_COPY_IN)
283 memcpy(buf, addr, count);
284 else
285 memcpy(addr, buf, count);
287 unmap_domain_page(addr);
289 vaddr += count;
290 buf += count;
291 size -= count;
292 }
294 return 1;
295 }
297 /*
298 * HVM specific printbuf. Mostly used for hvmloader chit-chat.
299 */
300 void hvm_print_line(struct vcpu *v, const char c)
301 {
302 int *index = &v->domain->arch.hvm_domain.pbuf_index;
303 char *pbuf = v->domain->arch.hvm_domain.pbuf;
305 if (*index == HVM_PBUF_SIZE-2 || c == '\n') {
306 if (*index == HVM_PBUF_SIZE-2)
307 pbuf[(*index)++] = c;
308 pbuf[*index] = '\0';
309 printk("(GUEST: %u) %s\n", v->domain->domain_id, pbuf);
310 *index = 0;
311 } else
312 pbuf[(*index)++] = c;
313 }
315 /*
316 * only called in HVM domain BSP context
317 * when booting, vcpuid is always equal to apic_id
318 */
319 int hvm_bringup_ap(int vcpuid, int trampoline_vector)
320 {
321 struct vcpu *bsp = current, *v;
322 struct domain *d = bsp->domain;
323 struct vcpu_guest_context *ctxt;
324 int rc = 0;
326 /* current must be HVM domain BSP */
327 if ( !(HVM_DOMAIN(bsp) && bsp->vcpu_id == 0) ) {
328 printk("Not calling hvm_bringup_ap from BSP context.\n");
329 domain_crash_synchronous();
330 }
332 if ( (v = d->vcpu[vcpuid]) == NULL )
333 return -ENOENT;
335 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL ) {
336 printk("Failed to allocate memory in hvm_bringup_ap.\n");
337 return -ENOMEM;
338 }
340 hvm_init_ap_context(ctxt, vcpuid, trampoline_vector);
342 LOCK_BIGLOCK(d);
343 rc = -EEXIST;
344 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
345 rc = boot_vcpu(d, vcpuid, ctxt);
346 UNLOCK_BIGLOCK(d);
348 if ( rc != 0 )
349 printk("AP %d bringup failed in boot_vcpu %x.\n", vcpuid, rc);
350 else {
351 if ( test_and_clear_bit(_VCPUF_down, &d->vcpu[vcpuid]->vcpu_flags) )
352 vcpu_wake(d->vcpu[vcpuid]);
353 printk("AP %d bringup suceeded.\n", vcpuid);
354 }
356 xfree(ctxt);
358 return rc;
359 }
361 /*
362 * Local variables:
363 * mode: C
364 * c-set-style: "BSD"
365 * c-basic-offset: 4
366 * tab-width: 4
367 * indent-tabs-mode: nil
368 * End:
369 */