direct-io.hg

view xen/arch/ia64/vmx/vmx_init.c @ 14153:a9d246105752

Accelerate IDE PIO on HVM/IA64 [3/3]

Add a bufferring mechanism for IDE PIO in a hypervisor.

Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
author kfraser@localhost.localdomain
date Tue Feb 27 15:34:55 2007 +0000 (2007-02-27)
parents 779d21cf58e7
children 93e11f6d6791
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_init.c: initialization work for vt specific domain
4 * Copyright (c) 2005, Intel Corporation.
5 * Kun Tian (Kevin Tian) <kevin.tian@intel.com>
6 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
7 * Fred Yang <fred.yang@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
20 * Place - Suite 330, Boston, MA 02111-1307 USA.
21 *
22 */
24 /*
25 * 05/08/16 Kun tian (Kevin Tian) <kevin.tian@intel.com>:
26 * Disable doubling mapping
27 *
28 * 05/03/23 Kun Tian (Kevin Tian) <kevin.tian@intel.com>:
29 * Simplied design in first step:
30 * - One virtual environment
31 * - Domain is bound to one LP
32 * Later to support guest SMP:
33 * - Need interface to handle VP scheduled to different LP
34 */
35 #include <xen/config.h>
36 #include <xen/types.h>
37 #include <xen/sched.h>
38 #include <asm/pal.h>
39 #include <asm/page.h>
40 #include <asm/processor.h>
41 #include <asm/vmx_vcpu.h>
42 #include <xen/lib.h>
43 #include <asm/vmmu.h>
44 #include <public/xen.h>
45 #include <public/hvm/ioreq.h>
46 #include <public/event_channel.h>
47 #include <asm/vmx_phy_mode.h>
48 #include <asm/processor.h>
49 #include <asm/vmx.h>
50 #include <xen/mm.h>
51 #include <asm/viosapic.h>
52 #include <xen/event.h>
53 #include <asm/vlsapic.h>
55 /* Global flag to identify whether Intel vmx feature is on */
56 u32 vmx_enabled = 0;
57 static u32 vm_order;
58 static u64 buffer_size;
59 static u64 vp_env_info;
60 static u64 vm_buffer = 0; /* Buffer required to bring up VMX feature */
61 u64 __vsa_base = 0; /* Run-time service base of VMX */
63 /* Check whether vt feature is enabled or not. */
64 void
65 identify_vmx_feature(void)
66 {
67 pal_status_t ret;
68 u64 avail = 1, status = 1, control = 1;
70 vmx_enabled = 0;
71 /* Check VT-i feature */
72 ret = ia64_pal_proc_get_features(&avail, &status, &control);
73 if (ret != PAL_STATUS_SUCCESS) {
74 printk("Get proc features failed.\n");
75 goto no_vti;
76 }
78 /* FIXME: do we need to check status field, to see whether
79 * PSR.vm is actually enabled? If yes, aonther call to
80 * ia64_pal_proc_set_features may be reuqired then.
81 */
82 printk("avail:0x%lx, status:0x%lx,control:0x%lx, vm?0x%lx\n",
83 avail, status, control, avail & PAL_PROC_VM_BIT);
84 if (!(avail & PAL_PROC_VM_BIT)) {
85 printk("No VT feature supported.\n");
86 goto no_vti;
87 }
89 ret = ia64_pal_vp_env_info(&buffer_size, &vp_env_info);
90 if (ret != PAL_STATUS_SUCCESS) {
91 printk("Get vp environment info failed.\n");
92 goto no_vti;
93 }
95 /* Does xen has ability to decode itself? */
96 if (!(vp_env_info & VP_OPCODE))
97 printk("WARNING: no opcode provided from hardware(%lx)!!!\n", vp_env_info);
98 vm_order = get_order(buffer_size);
99 printk("vm buffer size: %ld, order: %d\n", buffer_size, vm_order);
101 vmx_enabled = 1;
102 no_vti:
103 return;
104 }
106 /*
107 * Init virtual environment on current LP
108 * vsa_base is the indicator whether it's first LP to be initialized
109 * for current domain.
110 */
111 void
112 vmx_init_env(void)
113 {
114 u64 status, tmp_base;
116 if (!vm_buffer) {
117 vm_buffer = (unsigned long)alloc_xenheap_pages(vm_order);
118 ASSERT(vm_buffer);
119 printk("vm_buffer: 0x%lx\n", vm_buffer);
120 }
122 status=ia64_pal_vp_init_env(__vsa_base ? VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
123 __pa(vm_buffer),
124 vm_buffer,
125 &tmp_base);
127 if (status != PAL_STATUS_SUCCESS) {
128 printk("ia64_pal_vp_init_env failed.\n");
129 return ;
130 }
132 if (!__vsa_base)
133 __vsa_base = tmp_base;
134 else
135 ASSERT(tmp_base != __vsa_base);
137 }
139 typedef union {
140 u64 value;
141 struct {
142 u64 number : 8;
143 u64 revision : 8;
144 u64 model : 8;
145 u64 family : 8;
146 u64 archrev : 8;
147 u64 rv : 24;
148 };
149 } cpuid3_t;
151 /* Allocate vpd from xenheap */
152 static vpd_t *alloc_vpd(void)
153 {
154 int i;
155 cpuid3_t cpuid3;
156 vpd_t *vpd;
157 mapped_regs_t *mregs;
159 vpd = alloc_xenheap_pages(get_order(VPD_SIZE));
160 if (!vpd) {
161 printk("VPD allocation failed.\n");
162 return NULL;
163 }
165 printk(XENLOG_DEBUG "vpd base: 0x%p, vpd size:%ld\n",
166 vpd, sizeof(vpd_t));
167 memset(vpd, 0, VPD_SIZE);
168 mregs = &vpd->vpd_low;
170 /* CPUID init */
171 for (i = 0; i < 5; i++)
172 mregs->vcpuid[i] = ia64_get_cpuid(i);
174 /* Limit the CPUID number to 5 */
175 cpuid3.value = mregs->vcpuid[3];
176 cpuid3.number = 4; /* 5 - 1 */
177 mregs->vcpuid[3] = cpuid3.value;
179 mregs->vac.a_from_int_cr = 1;
180 mregs->vac.a_to_int_cr = 1;
181 mregs->vac.a_from_psr = 1;
182 mregs->vac.a_from_cpuid = 1;
183 mregs->vac.a_cover = 1;
184 mregs->vac.a_bsw = 1;
185 mregs->vac.a_int = 1;
186 mregs->vdc.d_vmsw = 1;
188 return vpd;
189 }
191 /* Free vpd to xenheap */
192 static void
193 free_vpd(struct vcpu *v)
194 {
195 if ( v->arch.privregs )
196 free_xenheap_pages(v->arch.privregs, get_order(VPD_SIZE));
197 }
199 /*
200 * Create a VP on intialized VMX environment.
201 */
202 static void
203 vmx_create_vp(struct vcpu *v)
204 {
205 u64 ret;
206 vpd_t *vpd = (vpd_t *)v->arch.privregs;
207 u64 ivt_base;
208 extern char vmx_ia64_ivt;
209 /* ia64_ivt is function pointer, so need this tranlation */
210 ivt_base = (u64) &vmx_ia64_ivt;
211 printk(XENLOG_DEBUG "ivt_base: 0x%lx\n", ivt_base);
212 ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)ivt_base, 0);
213 if (ret != PAL_STATUS_SUCCESS){
214 panic_domain(vcpu_regs(v),"ia64_pal_vp_create failed. \n");
215 }
216 }
218 /* Other non-context related tasks can be done in context switch */
219 void
220 vmx_save_state(struct vcpu *v)
221 {
222 u64 status;
224 /* FIXME: about setting of pal_proc_vector... time consuming */
225 status = ia64_pal_vp_save((u64 *)v->arch.privregs, 0);
226 if (status != PAL_STATUS_SUCCESS){
227 panic_domain(vcpu_regs(v),"Save vp status failed\n");
228 }
231 /* Need to save KR when domain switch, though HV itself doesn;t
232 * use them.
233 */
234 v->arch.arch_vmx.vkr[0] = ia64_get_kr(0);
235 v->arch.arch_vmx.vkr[1] = ia64_get_kr(1);
236 v->arch.arch_vmx.vkr[2] = ia64_get_kr(2);
237 v->arch.arch_vmx.vkr[3] = ia64_get_kr(3);
238 v->arch.arch_vmx.vkr[4] = ia64_get_kr(4);
239 v->arch.arch_vmx.vkr[5] = ia64_get_kr(5);
240 v->arch.arch_vmx.vkr[6] = ia64_get_kr(6);
241 v->arch.arch_vmx.vkr[7] = ia64_get_kr(7);
242 }
244 /* Even guest is in physical mode, we still need such double mapping */
245 void
246 vmx_load_state(struct vcpu *v)
247 {
248 u64 status;
250 status = ia64_pal_vp_restore((u64 *)v->arch.privregs, 0);
251 if (status != PAL_STATUS_SUCCESS){
252 panic_domain(vcpu_regs(v),"Restore vp status failed\n");
253 }
255 ia64_set_kr(0, v->arch.arch_vmx.vkr[0]);
256 ia64_set_kr(1, v->arch.arch_vmx.vkr[1]);
257 ia64_set_kr(2, v->arch.arch_vmx.vkr[2]);
258 ia64_set_kr(3, v->arch.arch_vmx.vkr[3]);
259 ia64_set_kr(4, v->arch.arch_vmx.vkr[4]);
260 ia64_set_kr(5, v->arch.arch_vmx.vkr[5]);
261 ia64_set_kr(6, v->arch.arch_vmx.vkr[6]);
262 ia64_set_kr(7, v->arch.arch_vmx.vkr[7]);
263 /* Guest vTLB is not required to be switched explicitly, since
264 * anchored in vcpu */
265 }
267 static void vmx_create_event_channels(struct vcpu *v)
268 {
269 vcpu_iodata_t *p;
270 struct vcpu *o;
272 if (v->vcpu_id == 0) {
273 /* Ugly: create event channels for every vcpu when vcpu 0
274 starts, so that they're available for ioemu to bind to. */
275 for_each_vcpu(v->domain, o) {
276 p = get_vio(v->domain, o->vcpu_id);
277 o->arch.arch_vmx.xen_port = p->vp_eport =
278 alloc_unbound_xen_event_channel(o, 0);
279 gdprintk(XENLOG_INFO, "Allocated port %ld for hvm.\n",
280 o->arch.arch_vmx.xen_port);
281 }
282 }
283 }
285 static void vmx_release_assist_channel(struct vcpu *v)
286 {
287 free_xen_event_channel(v, v->arch.arch_vmx.xen_port);
288 }
290 /*
291 * Initialize VMX envirenment for guest. Only the 1st vp/vcpu
292 * is registered here.
293 */
294 int
295 vmx_final_setup_guest(struct vcpu *v)
296 {
297 vpd_t *vpd;
299 vpd = alloc_vpd();
300 ASSERT(vpd);
302 v->arch.privregs = (mapped_regs_t *)vpd;
303 vpd->vpd_low.virt_env_vaddr = vm_buffer;
305 /* Per-domain vTLB and vhpt implementation. Now vmx domain will stick
306 * to this solution. Maybe it can be deferred until we know created
307 * one as vmx domain */
308 #ifndef HASH_VHPT
309 if (init_domain_tlb(v) != 0)
310 return -1;
311 #endif
312 vmx_create_event_channels(v);
314 /* v->arch.schedule_tail = arch_vmx_do_launch; */
315 vmx_create_vp(v);
317 /* Physical mode emulation initialization, including
318 * emulation ID allcation and related memory request
319 */
320 physical_mode_init(v);
322 vlsapic_reset(v);
323 vtm_init(v);
325 /* Set up guest 's indicator for VTi domain*/
326 set_bit(ARCH_VMX_DOMAIN, &v->arch.arch_vmx.flags);
328 return 0;
329 }
331 void
332 vmx_relinquish_guest_resources(struct domain *d)
333 {
334 struct vcpu *v;
336 for_each_vcpu(d, v)
337 vmx_release_assist_channel(v);
338 }
340 void
341 vmx_relinquish_vcpu_resources(struct vcpu *v)
342 {
343 vtime_t *vtm = &(v->arch.arch_vmx.vtm);
345 kill_timer(&vtm->vtm_timer);
347 free_domain_tlb(v);
348 free_vpd(v);
349 }
351 typedef struct io_range {
352 unsigned long start;
353 unsigned long size;
354 unsigned long type;
355 } io_range_t;
357 static const io_range_t io_ranges[] = {
358 {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
359 {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
360 {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
361 {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
362 {PIB_START, PIB_SIZE, GPFN_PIB},
363 };
365 // The P2M table is built in libxc/ia64/xc_ia64_hvm_build.c @ setup_guest()
366 // so only mark IO memory space here
367 static void vmx_build_io_physmap_table(struct domain *d)
368 {
369 unsigned long i, j;
371 /* Mark I/O ranges */
372 for (i = 0; i < (sizeof(io_ranges) / sizeof(io_range_t)); i++) {
373 for (j = io_ranges[i].start;
374 j < io_ranges[i].start + io_ranges[i].size; j += PAGE_SIZE)
375 (void)__assign_domain_page(d, j, io_ranges[i].type,
376 ASSIGN_writable);
377 }
379 }
381 void vmx_setup_platform(struct domain *d)
382 {
383 ASSERT(d != dom0); /* only for non-privileged vti domain */
385 vmx_build_io_physmap_table(d);
387 d->arch.vmx_platform.shared_page_va =
388 (unsigned long)__va(__gpa_to_mpa(d, IO_PAGE_START));
389 /* For buffered IO requests. */
390 spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
391 d->arch.hvm_domain.buffered_io_va =
392 (unsigned long)__va(__gpa_to_mpa(d, BUFFER_IO_PAGE_START));
393 d->arch.hvm_domain.buffered_pio_va =
394 (unsigned long)__va(__gpa_to_mpa(d, BUFFER_PIO_PAGE_START));
395 /* TEMP */
396 d->arch.vmx_platform.pib_base = 0xfee00000UL;
398 d->arch.sal_data = xmalloc(struct xen_sal_data);
400 /* Only open one port for I/O and interrupt emulation */
401 memset(&d->shared_info->evtchn_mask[0], 0xff,
402 sizeof(d->shared_info->evtchn_mask));
404 /* Initialize iosapic model within hypervisor */
405 viosapic_init(d);
406 }
408 void vmx_do_launch(struct vcpu *v)
409 {
410 vmx_load_all_rr(v);
411 }