direct-io.hg

view xen/arch/ia64/vmx/vmx_init.c @ 10115:b487f4e1b09f

[IA64] Fix VTI boot

fix cset 10003. the variable, end, of
vmx_build_physmap_table() also must adjusted.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Sun May 14 09:16:21 2006 -0600 (2006-05-14)
parents 8b2295822e0d
children 608ac00f4cfc
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_init.c: initialization work for vt specific domain
4 * Copyright (c) 2005, Intel Corporation.
5 * Kun Tian (Kevin Tian) <kevin.tian@intel.com>
6 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
7 * Fred Yang <fred.yang@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
20 * Place - Suite 330, Boston, MA 02111-1307 USA.
21 *
22 */
24 /*
25 * 05/08/16 Kun tian (Kevin Tian) <kevin.tian@intel.com>:
26 * Disable doubling mapping
27 *
28 * 05/03/23 Kun Tian (Kevin Tian) <kevin.tian@intel.com>:
29 * Simplied design in first step:
30 * - One virtual environment
31 * - Domain is bound to one LP
32 * Later to support guest SMP:
33 * - Need interface to handle VP scheduled to different LP
34 */
35 #include <xen/config.h>
36 #include <xen/types.h>
37 #include <xen/sched.h>
38 #include <asm/pal.h>
39 #include <asm/page.h>
40 #include <asm/processor.h>
41 #include <asm/vmx_vcpu.h>
42 #include <xen/lib.h>
43 #include <asm/vmmu.h>
44 #include <public/arch-ia64.h>
45 #include <public/hvm/ioreq.h>
46 #include <asm/vmx_phy_mode.h>
47 #include <asm/processor.h>
48 #include <asm/vmx.h>
49 #include <xen/mm.h>
50 #include <public/arch-ia64.h>
51 #include <asm/hvm/vioapic.h>
52 #include <public/event_channel.h>
53 #include <xen/event.h>
54 #include <asm/vlsapic.h>
56 /* Global flag to identify whether Intel vmx feature is on */
57 u32 vmx_enabled = 0;
58 unsigned int opt_vmx_debug_level = 0;
59 static u32 vm_order;
60 static u64 buffer_size;
61 static u64 vp_env_info;
62 static u64 vm_buffer = 0; /* Buffer required to bring up VMX feature */
63 u64 __vsa_base = 0; /* Run-time service base of VMX */
65 /* Check whether vt feature is enabled or not. */
66 void
67 identify_vmx_feature(void)
68 {
69 pal_status_t ret;
70 u64 avail = 1, status = 1, control = 1;
72 vmx_enabled = 0;
73 /* Check VT-i feature */
74 ret = ia64_pal_proc_get_features(&avail, &status, &control);
75 if (ret != PAL_STATUS_SUCCESS) {
76 printk("Get proc features failed.\n");
77 goto no_vti;
78 }
80 /* FIXME: do we need to check status field, to see whether
81 * PSR.vm is actually enabled? If yes, aonther call to
82 * ia64_pal_proc_set_features may be reuqired then.
83 */
84 printk("avail:0x%lx, status:0x%lx,control:0x%lx, vm?0x%lx\n",
85 avail, status, control, avail & PAL_PROC_VM_BIT);
86 if (!(avail & PAL_PROC_VM_BIT)) {
87 printk("No VT feature supported.\n");
88 goto no_vti;
89 }
91 ret = ia64_pal_vp_env_info(&buffer_size, &vp_env_info);
92 if (ret != PAL_STATUS_SUCCESS) {
93 printk("Get vp environment info failed.\n");
94 goto no_vti;
95 }
97 /* Does xen has ability to decode itself? */
98 if (!(vp_env_info & VP_OPCODE))
99 printk("WARNING: no opcode provided from hardware(%lx)!!!\n", vp_env_info);
100 vm_order = get_order(buffer_size);
101 printk("vm buffer size: %ld, order: %d\n", buffer_size, vm_order);
103 vmx_enabled = 1;
104 no_vti:
105 return;
106 }
108 /*
109 * Init virtual environment on current LP
110 * vsa_base is the indicator whether it's first LP to be initialized
111 * for current domain.
112 */
113 void
114 vmx_init_env(void)
115 {
116 u64 status, tmp_base;
118 if (!vm_buffer) {
119 vm_buffer = (unsigned long)alloc_xenheap_pages(vm_order);
120 ASSERT(vm_buffer);
121 printk("vm_buffer: 0x%lx\n", vm_buffer);
122 }
124 status=ia64_pal_vp_init_env(__vsa_base ? VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
125 __pa(vm_buffer),
126 vm_buffer,
127 &tmp_base);
129 if (status != PAL_STATUS_SUCCESS) {
130 printk("ia64_pal_vp_init_env failed.\n");
131 return ;
132 }
134 if (!__vsa_base)
135 __vsa_base = tmp_base;
136 else
137 ASSERT(tmp_base != __vsa_base);
139 }
141 typedef union {
142 u64 value;
143 struct {
144 u64 number : 8;
145 u64 revision : 8;
146 u64 model : 8;
147 u64 family : 8;
148 u64 archrev : 8;
149 u64 rv : 24;
150 };
151 } cpuid3_t;
153 /* Allocate vpd from xenheap */
154 static vpd_t *alloc_vpd(void)
155 {
156 int i;
157 cpuid3_t cpuid3;
158 vpd_t *vpd;
160 vpd = alloc_xenheap_pages(get_order(VPD_SIZE));
161 if (!vpd) {
162 printk("VPD allocation failed.\n");
163 return NULL;
164 }
166 printk("vpd base: 0x%p, vpd size:%ld\n", vpd, sizeof(vpd_t));
167 memset(vpd, 0, VPD_SIZE);
168 /* CPUID init */
169 for (i = 0; i < 5; i++)
170 vpd->vcpuid[i] = ia64_get_cpuid(i);
172 /* Limit the CPUID number to 5 */
173 cpuid3.value = vpd->vcpuid[3];
174 cpuid3.number = 4; /* 5 - 1 */
175 vpd->vcpuid[3] = cpuid3.value;
177 vpd->vac.a_from_int_cr = 1;
178 vpd->vac.a_to_int_cr = 1;
179 vpd->vac.a_from_psr = 1;
180 vpd->vac.a_from_cpuid = 1;
181 vpd->vac.a_cover = 1;
182 vpd->vac.a_bsw = 1;
184 vpd->vdc.d_vmsw = 1;
186 return vpd;
187 }
189 /* Free vpd to xenheap */
190 static void
191 free_vpd(struct vcpu *v)
192 {
193 if ( v->arch.privregs )
194 free_xenheap_pages(v->arch.privregs, get_order(VPD_SIZE));
195 }
197 /*
198 * Create a VP on intialized VMX environment.
199 */
200 static void
201 vmx_create_vp(struct vcpu *v)
202 {
203 u64 ret;
204 vpd_t *vpd = v->arch.privregs;
205 u64 ivt_base;
206 extern char vmx_ia64_ivt;
207 /* ia64_ivt is function pointer, so need this tranlation */
208 ivt_base = (u64) &vmx_ia64_ivt;
209 printk("ivt_base: 0x%lx\n", ivt_base);
210 ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)ivt_base, 0);
211 if (ret != PAL_STATUS_SUCCESS){
212 panic_domain(vcpu_regs(v),"ia64_pal_vp_create failed. \n");
213 }
214 }
216 /* Other non-context related tasks can be done in context switch */
217 void
218 vmx_save_state(struct vcpu *v)
219 {
220 u64 status;
222 /* FIXME: about setting of pal_proc_vector... time consuming */
223 status = ia64_pal_vp_save((u64 *)v->arch.privregs, 0);
224 if (status != PAL_STATUS_SUCCESS){
225 panic_domain(vcpu_regs(v),"Save vp status failed\n");
226 }
229 /* Need to save KR when domain switch, though HV itself doesn;t
230 * use them.
231 */
232 v->arch.arch_vmx.vkr[0] = ia64_get_kr(0);
233 v->arch.arch_vmx.vkr[1] = ia64_get_kr(1);
234 v->arch.arch_vmx.vkr[2] = ia64_get_kr(2);
235 v->arch.arch_vmx.vkr[3] = ia64_get_kr(3);
236 v->arch.arch_vmx.vkr[4] = ia64_get_kr(4);
237 v->arch.arch_vmx.vkr[5] = ia64_get_kr(5);
238 v->arch.arch_vmx.vkr[6] = ia64_get_kr(6);
239 v->arch.arch_vmx.vkr[7] = ia64_get_kr(7);
240 }
242 /* Even guest is in physical mode, we still need such double mapping */
243 void
244 vmx_load_state(struct vcpu *v)
245 {
246 u64 status;
248 status = ia64_pal_vp_restore((u64 *)v->arch.privregs, 0);
249 if (status != PAL_STATUS_SUCCESS){
250 panic_domain(vcpu_regs(v),"Restore vp status failed\n");
251 }
253 ia64_set_kr(0, v->arch.arch_vmx.vkr[0]);
254 ia64_set_kr(1, v->arch.arch_vmx.vkr[1]);
255 ia64_set_kr(2, v->arch.arch_vmx.vkr[2]);
256 ia64_set_kr(3, v->arch.arch_vmx.vkr[3]);
257 ia64_set_kr(4, v->arch.arch_vmx.vkr[4]);
258 ia64_set_kr(5, v->arch.arch_vmx.vkr[5]);
259 ia64_set_kr(6, v->arch.arch_vmx.vkr[6]);
260 ia64_set_kr(7, v->arch.arch_vmx.vkr[7]);
261 /* Guest vTLB is not required to be switched explicitly, since
262 * anchored in vcpu */
263 }
265 /*
266 * Initialize VMX envirenment for guest. Only the 1st vp/vcpu
267 * is registered here.
268 */
269 void
270 vmx_final_setup_guest(struct vcpu *v)
271 {
272 vpd_t *vpd;
274 free_xenheap_pages(v->arch.privregs, get_order(sizeof(mapped_regs_t)));
276 vpd = alloc_vpd();
277 ASSERT(vpd);
279 v->arch.privregs = vpd;
280 vpd->virt_env_vaddr = vm_buffer;
282 /* Per-domain vTLB and vhpt implementation. Now vmx domain will stick
283 * to this solution. Maybe it can be deferred until we know created
284 * one as vmx domain */
285 #ifndef HASH_VHPT
286 init_domain_tlb(v);
287 #endif
288 /* v->arch.schedule_tail = arch_vmx_do_launch; */
289 vmx_create_vp(v);
291 /* Set this ed to be vmx */
292 set_bit(ARCH_VMX_VMCS_LOADED, &v->arch.arch_vmx.flags);
294 /* Physical mode emulation initialization, including
295 * emulation ID allcation and related memory request
296 */
297 physical_mode_init(v);
299 vlsapic_reset(v);
300 vtm_init(v);
302 /* One more step to enable interrupt assist */
303 set_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags);
304 }
306 void
307 vmx_relinquish_vcpu_resources(struct vcpu *v)
308 {
309 vtime_t *vtm = &(v->arch.arch_vmx.vtm);
311 kill_timer(&vtm->vtm_timer);
313 free_domain_tlb(v);
314 free_vpd(v);
315 }
317 typedef struct io_range {
318 unsigned long start;
319 unsigned long size;
320 unsigned long type;
321 } io_range_t;
323 io_range_t io_ranges[] = {
324 {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
325 {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
326 {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
327 {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
328 {PIB_START, PIB_SIZE, GPFN_PIB},
329 };
331 #define VMX_SYS_PAGES (2 + (GFW_SIZE >> PAGE_SHIFT))
332 #define VMX_CONFIG_PAGES(d) ((d)->max_pages - VMX_SYS_PAGES)
334 int vmx_build_physmap_table(struct domain *d)
335 {
336 unsigned long i, j, start, tmp, end, mfn;
337 struct vcpu *v = d->vcpu[0];
338 struct list_head *list_ent = d->page_list.next;
340 ASSERT(!d->arch.physmap_built);
341 ASSERT(!test_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags));
342 ASSERT(d->max_pages == d->tot_pages);
344 /* Mark I/O ranges */
345 for (i = 0; i < (sizeof(io_ranges) / sizeof(io_range_t)); i++) {
346 for (j = io_ranges[i].start;
347 j < io_ranges[i].start + io_ranges[i].size;
348 j += PAGE_SIZE)
349 __assign_domain_page(d, j, io_ranges[i].type);
350 }
352 /* Map normal memory below 3G */
353 end = VMX_CONFIG_PAGES(d) << PAGE_SHIFT;
354 if (end > VGA_IO_START)
355 end += VGA_IO_SIZE;
356 tmp = end < MMIO_START ? end : MMIO_START;
357 for (i = 0; (i < tmp) && (list_ent != &d->page_list); i += PAGE_SIZE) {
358 if (VGA_IO_START <= i && i < VGA_IO_START + VGA_IO_SIZE)
359 continue;
361 mfn = page_to_mfn(list_entry(
362 list_ent, struct page_info, list));
363 assign_domain_page(d, i, mfn << PAGE_SHIFT);
364 list_ent = mfn_to_page(mfn)->list.next;
365 }
366 ASSERT(list_ent != &d->page_list);
368 /* Map normal memory beyond 4G */
369 if (unlikely(end > MMIO_START)) {
370 start = 4 * MEM_G;
371 end = start + (end - 3 * MEM_G);
372 for (i = start; (i < end) &&
373 (list_ent != &d->page_list); i += PAGE_SIZE) {
374 mfn = page_to_mfn(list_entry(
375 list_ent, struct page_info, list));
376 assign_domain_page(d, i, mfn << PAGE_SHIFT);
377 list_ent = mfn_to_page(mfn)->list.next;
378 }
379 ASSERT(list_ent != &d->page_list);
380 }
382 /* Map guest firmware */
383 for (i = GFW_START; (i < GFW_START + GFW_SIZE) &&
384 (list_ent != &d->page_list); i += PAGE_SIZE) {
385 mfn = page_to_mfn(list_entry(
386 list_ent, struct page_info, list));
387 assign_domain_page(d, i, mfn << PAGE_SHIFT);
388 list_ent = mfn_to_page(mfn)->list.next;
389 }
390 ASSERT(list_ent != &d->page_list);
392 /* Map for shared I/O page and xenstore */
393 mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
394 assign_domain_page(d, IO_PAGE_START, mfn << PAGE_SHIFT);
395 list_ent = mfn_to_page(mfn)->list.next;
396 ASSERT(list_ent != &d->page_list);
398 mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
399 assign_domain_page(d, STORE_PAGE_START, mfn << PAGE_SHIFT);
400 list_ent = mfn_to_page(mfn)->list.next;
401 ASSERT(list_ent == &d->page_list);
403 d->arch.max_pfn = end >> PAGE_SHIFT;
404 d->arch.physmap_built = 1;
405 set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags);
406 return 0;
407 }
409 void vmx_setup_platform(struct domain *d, struct vcpu_guest_context *c)
410 {
411 ASSERT(d != dom0); /* only for non-privileged vti domain */
413 if (!d->arch.physmap_built)
414 vmx_build_physmap_table(d);
416 d->arch.vmx_platform.shared_page_va =
417 (unsigned long)__va(__gpa_to_mpa(d, IO_PAGE_START));
418 /* TEMP */
419 d->arch.vmx_platform.pib_base = 0xfee00000UL;
421 /* Only open one port for I/O and interrupt emulation */
422 memset(&d->shared_info->evtchn_mask[0], 0xff,
423 sizeof(d->shared_info->evtchn_mask));
425 /* Initialize the virtual interrupt lines */
426 vmx_virq_line_init(d);
428 /* Initialize iosapic model within hypervisor */
429 hvm_vioapic_init(d);
430 }
432 void vmx_do_launch(struct vcpu *v)
433 {
434 if (evtchn_bind_vcpu(iopacket_port(v), v->vcpu_id) < 0) {
435 printk("VMX domain bind port %d to vcpu %d failed!\n",
436 iopacket_port(v), v->vcpu_id);
437 domain_crash_synchronous();
438 }
440 clear_bit(iopacket_port(v),
441 &v->domain->shared_info->evtchn_mask[0]);
443 vmx_load_all_rr(v);
444 }