ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_init.c @ 8370:2d5c57be196d

Remove some unused VTI code segments
Signed-off-by Anthony Xu <anthony.xu@intel.com>
author djm@kirby.fc.hp.com
date Thu Dec 15 16:10:22 2005 -0600 (2005-12-15)
parents b2ea26d2099a
children 9fc306e40a7c
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_init.c: initialization work for vt specific domain
4 * Copyright (c) 2005, Intel Corporation.
5 * Kun Tian (Kevin Tian) <kevin.tian@intel.com>
6 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
7 * Fred Yang <fred.yang@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
20 * Place - Suite 330, Boston, MA 02111-1307 USA.
21 *
22 */
24 /*
25 * 05/08/16 Kun tian (Kevin Tian) <kevin.tian@intel.com>:
26 * Disable doubling mapping
27 *
28 * 05/03/23 Kun Tian (Kevin Tian) <kevin.tian@intel.com>:
29 * Simplied design in first step:
30 * - One virtual environment
31 * - Domain is bound to one LP
32 * Later to support guest SMP:
33 * - Need interface to handle VP scheduled to different LP
34 */
35 #include <xen/config.h>
36 #include <xen/types.h>
37 #include <xen/sched.h>
38 #include <asm/pal.h>
39 #include <asm/page.h>
40 #include <asm/processor.h>
41 #include <asm/vmx_vcpu.h>
42 #include <xen/lib.h>
43 #include <asm/vmmu.h>
44 #include <public/arch-ia64.h>
45 #include <public/io/ioreq.h>
46 #include <asm/vmx_phy_mode.h>
47 #include <asm/processor.h>
48 #include <asm/vmx.h>
49 #include <xen/mm.h>
50 #include <public/arch-ia64.h>
51 #include <asm/vmx_vioapic.h>
53 /* Global flag to identify whether Intel vmx feature is on */
54 u32 vmx_enabled = 0;
55 unsigned int opt_vmx_debug_level = 0;
56 static u32 vm_order;
57 static u64 buffer_size;
58 static u64 vp_env_info;
59 static u64 vm_buffer = 0; /* Buffer required to bring up VMX feature */
60 u64 __vsa_base = 0; /* Run-time service base of VMX */
62 /* Check whether vt feature is enabled or not. */
63 void
64 identify_vmx_feature(void)
65 {
66 pal_status_t ret;
67 u64 avail = 1, status = 1, control = 1;
69 vmx_enabled = 0;
70 /* Check VT-i feature */
71 ret = ia64_pal_proc_get_features(&avail, &status, &control);
72 if (ret != PAL_STATUS_SUCCESS) {
73 printk("Get proc features failed.\n");
74 goto no_vti;
75 }
77 /* FIXME: do we need to check status field, to see whether
78 * PSR.vm is actually enabled? If yes, aonther call to
79 * ia64_pal_proc_set_features may be reuqired then.
80 */
81 printk("avail:0x%lx, status:0x%lx,control:0x%lx, vm?0x%lx\n",
82 avail, status, control, avail & PAL_PROC_VM_BIT);
83 if (!(avail & PAL_PROC_VM_BIT)) {
84 printk("No VT feature supported.\n");
85 goto no_vti;
86 }
88 ret = ia64_pal_vp_env_info(&buffer_size, &vp_env_info);
89 if (ret != PAL_STATUS_SUCCESS) {
90 printk("Get vp environment info failed.\n");
91 goto no_vti;
92 }
94 /* Does xen has ability to decode itself? */
95 if (!(vp_env_info & VP_OPCODE))
96 printk("WARNING: no opcode provided from hardware(%lx)!!!\n", vp_env_info);
97 vm_order = get_order(buffer_size);
98 printk("vm buffer size: %d, order: %d\n", buffer_size, vm_order);
100 vmx_enabled = 1;
101 no_vti:
102 return;
103 }
105 /*
106 * Init virtual environment on current LP
107 * vsa_base is the indicator whether it's first LP to be initialized
108 * for current domain.
109 */
110 void
111 vmx_init_env(void)
112 {
113 u64 status, tmp_base;
115 if (!vm_buffer) {
116 vm_buffer = alloc_xenheap_pages(vm_order);
117 ASSERT(vm_buffer);
118 printk("vm_buffer: 0x%lx\n", vm_buffer);
119 }
121 status=ia64_pal_vp_init_env(__vsa_base ? VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
122 __pa(vm_buffer),
123 vm_buffer,
124 &tmp_base);
126 if (status != PAL_STATUS_SUCCESS) {
127 printk("ia64_pal_vp_init_env failed.\n");
128 return -1;
129 }
131 if (!__vsa_base)
132 __vsa_base = tmp_base;
133 else
134 ASSERT(tmp_base != __vsa_base);
136 }
138 typedef union {
139 u64 value;
140 struct {
141 u64 number : 8;
142 u64 revision : 8;
143 u64 model : 8;
144 u64 family : 8;
145 u64 archrev : 8;
146 u64 rv : 24;
147 };
148 } cpuid3_t;
150 /* Allocate vpd from xenheap */
151 static vpd_t *alloc_vpd(void)
152 {
153 int i;
154 cpuid3_t cpuid3;
155 vpd_t *vpd;
157 vpd = alloc_xenheap_pages(get_order(VPD_SIZE));
158 if (!vpd) {
159 printk("VPD allocation failed.\n");
160 return NULL;
161 }
163 printk("vpd base: 0x%lx, vpd size:%d\n", vpd, sizeof(vpd_t));
164 memset(vpd, 0, VPD_SIZE);
165 /* CPUID init */
166 for (i = 0; i < 5; i++)
167 vpd->vcpuid[i] = ia64_get_cpuid(i);
169 /* Limit the CPUID number to 5 */
170 cpuid3.value = vpd->vcpuid[3];
171 cpuid3.number = 4; /* 5 - 1 */
172 vpd->vcpuid[3] = cpuid3.value;
174 vpd->vdc.d_vmsw = 1;
175 return vpd;
176 }
179 /*
180 * Create a VP on intialized VMX environment.
181 */
182 static void
183 vmx_create_vp(struct vcpu *v)
184 {
185 u64 ret;
186 vpd_t *vpd = v->arch.privregs;
187 u64 ivt_base;
188 extern char vmx_ia64_ivt;
189 /* ia64_ivt is function pointer, so need this tranlation */
190 ivt_base = (u64) &vmx_ia64_ivt;
191 printk("ivt_base: 0x%lx\n", ivt_base);
192 ret = ia64_pal_vp_create(vpd, ivt_base, 0);
193 if (ret != PAL_STATUS_SUCCESS)
194 panic("ia64_pal_vp_create failed. \n");
195 }
197 /* Other non-context related tasks can be done in context switch */
198 void
199 vmx_save_state(struct vcpu *v)
200 {
201 u64 status, psr;
202 u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
204 /* FIXME: about setting of pal_proc_vector... time consuming */
205 status = ia64_pal_vp_save(v->arch.privregs, 0);
206 if (status != PAL_STATUS_SUCCESS)
207 panic("Save vp status failed\n");
210 /* Need to save KR when domain switch, though HV itself doesn;t
211 * use them.
212 */
213 v->arch.arch_vmx.vkr[0] = ia64_get_kr(0);
214 v->arch.arch_vmx.vkr[1] = ia64_get_kr(1);
215 v->arch.arch_vmx.vkr[2] = ia64_get_kr(2);
216 v->arch.arch_vmx.vkr[3] = ia64_get_kr(3);
217 v->arch.arch_vmx.vkr[4] = ia64_get_kr(4);
218 v->arch.arch_vmx.vkr[5] = ia64_get_kr(5);
219 v->arch.arch_vmx.vkr[6] = ia64_get_kr(6);
220 v->arch.arch_vmx.vkr[7] = ia64_get_kr(7);
221 }
223 /* Even guest is in physical mode, we still need such double mapping */
224 void
225 vmx_load_state(struct vcpu *v)
226 {
227 u64 status, psr;
228 u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
229 u64 pte_xen, pte_vhpt;
230 int i;
232 status = ia64_pal_vp_restore(v->arch.privregs, 0);
233 if (status != PAL_STATUS_SUCCESS)
234 panic("Restore vp status failed\n");
236 ia64_set_kr(0, v->arch.arch_vmx.vkr[0]);
237 ia64_set_kr(1, v->arch.arch_vmx.vkr[1]);
238 ia64_set_kr(2, v->arch.arch_vmx.vkr[2]);
239 ia64_set_kr(3, v->arch.arch_vmx.vkr[3]);
240 ia64_set_kr(4, v->arch.arch_vmx.vkr[4]);
241 ia64_set_kr(5, v->arch.arch_vmx.vkr[5]);
242 ia64_set_kr(6, v->arch.arch_vmx.vkr[6]);
243 ia64_set_kr(7, v->arch.arch_vmx.vkr[7]);
244 /* Guest vTLB is not required to be switched explicitly, since
245 * anchored in vcpu */
246 }
248 /*
249 * Initialize VMX envirenment for guest. Only the 1st vp/vcpu
250 * is registered here.
251 */
252 void
253 vmx_final_setup_guest(struct vcpu *v)
254 {
255 vpd_t *vpd;
257 /* Allocate resources for vcpu 0 */
258 //memset(&v->arch.arch_vmx, 0, sizeof(struct arch_vmx_struct));
260 vpd = alloc_vpd();
261 ASSERT(vpd);
263 v->arch.privregs = vpd;
264 vpd->virt_env_vaddr = vm_buffer;
266 /* Per-domain vTLB and vhpt implementation. Now vmx domain will stick
267 * to this solution. Maybe it can be deferred until we know created
268 * one as vmx domain */
269 v->arch.vtlb = init_domain_tlb(v);
271 /* v->arch.schedule_tail = arch_vmx_do_launch; */
272 vmx_create_vp(v);
274 /* Set this ed to be vmx */
275 set_bit(ARCH_VMX_VMCS_LOADED, &v->arch.arch_vmx.flags);
277 /* Physical mode emulation initialization, including
278 * emulation ID allcation and related memory request
279 */
280 physical_mode_init(v);
282 vlsapic_reset(v);
283 vtm_init(v);
285 /* One more step to enable interrupt assist */
286 set_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags);
287 }
289 typedef struct io_range {
290 unsigned long start;
291 unsigned long size;
292 unsigned long type;
293 } io_range_t;
295 io_range_t io_ranges[] = {
296 {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
297 {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
298 {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
299 {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
300 {PIB_START, PIB_SIZE, GPFN_PIB},
301 };
303 #define VMX_SYS_PAGES (2 + (GFW_SIZE >> PAGE_SHIFT))
304 #define VMX_CONFIG_PAGES(d) ((d)->max_pages - VMX_SYS_PAGES)
306 int vmx_alloc_contig_pages(struct domain *d)
307 {
308 unsigned int order;
309 unsigned long i, j, start, end, pgnr, conf_nr;
310 struct pfn_info *page;
311 struct vcpu *v = d->vcpu[0];
313 ASSERT(!test_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags));
315 /* Mark I/O ranges */
316 for (i = 0; i < (sizeof(io_ranges) / sizeof(io_range_t)); i++) {
317 for (j = io_ranges[i].start;
318 j < io_ranges[i].start + io_ranges[i].size;
319 j += PAGE_SIZE)
320 map_domain_page(d, j, io_ranges[i].type);
321 }
323 conf_nr = VMX_CONFIG_PAGES(d);
324 order = get_order_from_pages(conf_nr);
325 if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) {
326 printk("Could not allocate order=%d pages for vmx contig alloc\n",
327 order);
328 return -1;
329 }
331 /* Map normal memory below 3G */
332 pgnr = page_to_pfn(page);
333 end = conf_nr << PAGE_SHIFT;
334 for (i = 0;
335 i < (end < MMIO_START ? end : MMIO_START);
336 i += PAGE_SIZE, pgnr++)
337 map_domain_page(d, i, pgnr << PAGE_SHIFT);
339 /* Map normal memory beyond 4G */
340 if (unlikely(end > MMIO_START)) {
341 start = 4 * MEM_G;
342 end = start + (end - 3 * MEM_G);
343 for (i = start; i < end; i += PAGE_SIZE, pgnr++)
344 map_domain_page(d, i, pgnr << PAGE_SHIFT);
345 }
347 d->arch.max_pfn = end >> PAGE_SHIFT;
349 order = get_order_from_pages(GFW_SIZE >> PAGE_SHIFT);
350 if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) {
351 printk("Could not allocate order=%d pages for vmx contig alloc\n",
352 order);
353 return -1;
354 }
356 /* Map guest firmware */
357 pgnr = page_to_pfn(page);
358 for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++)
359 map_domain_page(d, i, pgnr << PAGE_SHIFT);
361 if (unlikely((page = alloc_domheap_pages(d, 1, 0)) == NULL)) {
362 printk("Could not allocate order=1 pages for vmx contig alloc\n");
363 return -1;
364 }
366 /* Map for shared I/O page and xenstore */
367 pgnr = page_to_pfn(page);
368 map_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
369 pgnr++;
370 map_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
372 set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags);
373 return 0;
374 }
376 void vmx_setup_platform(struct domain *d, struct vcpu_guest_context *c)
377 {
378 shared_iopage_t *sp;
380 ASSERT(d != dom0); /* only for non-privileged vti domain */
381 d->arch.vmx_platform.shared_page_va =
382 __va(__gpa_to_mpa(d, IO_PAGE_START));
383 sp = get_sp(d);
384 //memset((char *)sp,0,PAGE_SIZE);
385 /* TEMP */
386 d->arch.vmx_platform.pib_base = 0xfee00000UL;
388 /* Only open one port for I/O and interrupt emulation */
389 memset(&d->shared_info->evtchn_mask[0], 0xff,
390 sizeof(d->shared_info->evtchn_mask));
391 clear_bit(iopacket_port(d), &d->shared_info->evtchn_mask[0]);
393 /* Initialize the virtual interrupt lines */
394 vmx_virq_line_init(d);
396 /* Initialize iosapic model within hypervisor */
397 vmx_vioapic_init(d);
398 }