ia64/xen-unstable

view xen/arch/ia64/vmx_process.c @ 5797:ca44d2dbb273

Intel's pre-bk->hg transition patches
Signed-off-by Eddie Dong <Eddie.dong@intel.com>
Signed-off-by Anthony Xu <Anthony.xu@intel.com>
Signed-off-by Kevin Tian <Kevin.tian@intel.com>
author djm@kirby.fc.hp.com
date Sat Jul 09 07:58:56 2005 -0700 (2005-07-09)
parents 649cd37aa1ab
children a83ac0806d6b
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_process.c: handling VMX architecture-related VM exits
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com>
20 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
21 */
23 #include <xen/config.h>
24 #include <xen/lib.h>
25 #include <xen/errno.h>
26 #include <xen/sched.h>
27 #include <xen/smp.h>
28 #include <asm/ptrace.h>
29 #include <xen/delay.h>
31 #include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */
32 #include <asm/sal.h> /* FOR struct ia64_sal_retval */
34 #include <asm/system.h>
35 #include <asm/io.h>
36 #include <asm/processor.h>
37 #include <asm/desc.h>
38 //#include <asm/ldt.h>
39 #include <xen/irq.h>
40 #include <xen/event.h>
41 #include <asm/regionreg.h>
42 #include <asm/privop.h>
43 #include <asm/ia64_int.h>
44 #include <asm/hpsim_ssc.h>
45 #include <asm/dom_fw.h>
46 #include <asm/vmx_vcpu.h>
47 #include <asm/kregs.h>
48 #include <asm/vmx.h>
49 #include <asm/vmx_mm_def.h>
50 #include <xen/mm.h>
51 /* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
52 #define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
55 extern struct ia64_sal_retval pal_emulator_static(UINT64);
56 extern struct ia64_sal_retval sal_emulator(UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64,UINT64);
57 extern void rnat_consumption (VCPU *vcpu);
58 #define DOMN_PAL_REQUEST 0x110000
59 IA64FAULT
60 vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
61 {
62 static int first_time = 1;
63 struct domain *d = (struct domain *) current->domain;
64 struct vcpu *v = (struct domain *) current;
65 extern unsigned long running_on_sim;
66 unsigned long i, sal_param[8];
68 #if 0
69 if (first_time) {
70 if (platform_is_hp_ski()) running_on_sim = 1;
71 else running_on_sim = 0;
72 first_time = 0;
73 }
74 if (iim == 0x80001 || iim == 0x80002) { //FIXME: don't hardcode constant
75 if (running_on_sim) do_ssc(vcpu_get_gr(current,36), regs);
76 else do_ssc(vcpu_get_gr(current,36), regs);
77 }
78 #endif
79 if (iim == d->arch.breakimm) {
80 struct ia64_sal_retval x;
81 switch (regs->r2) {
82 case FW_HYPERCALL_PAL_CALL:
83 //printf("*** PAL hypercall: index=%d\n",regs->r28);
84 //FIXME: This should call a C routine
85 x = pal_emulator_static(VMX_VPD(v, vgr[12]));
86 regs->r8 = x.status; regs->r9 = x.v0;
87 regs->r10 = x.v1; regs->r11 = x.v2;
88 #if 0
89 if (regs->r8)
90 printk("Failed vpal emulation, with index:0x%lx\n",
91 VMX_VPD(v, vgr[12]));
92 #endif
93 break;
94 case FW_HYPERCALL_SAL_CALL:
95 for (i = 0; i < 8; i++)
96 vmx_vcpu_get_gr(v, 32+i, &sal_param[i]);
97 x = sal_emulator(sal_param[0], sal_param[1],
98 sal_param[2], sal_param[3],
99 sal_param[4], sal_param[5],
100 sal_param[6], sal_param[7]);
101 regs->r8 = x.status; regs->r9 = x.v0;
102 regs->r10 = x.v1; regs->r11 = x.v2;
103 #if 0
104 if (regs->r8)
105 printk("Failed vsal emulation, with index:0x%lx\n",
106 sal_param[0]);
107 #endif
108 break;
109 case FW_HYPERCALL_EFI_RESET_SYSTEM:
110 printf("efi.reset_system called ");
111 if (current->domain == dom0) {
112 printf("(by dom0)\n ");
113 (*efi.reset_system)(EFI_RESET_WARM,0,0,NULL);
114 }
115 printf("(not supported for non-0 domain)\n");
116 regs->r8 = EFI_UNSUPPORTED;
117 break;
118 case FW_HYPERCALL_EFI_GET_TIME:
119 {
120 unsigned long *tv, *tc;
121 vmx_vcpu_get_gr(v, 32, &tv);
122 vmx_vcpu_get_gr(v, 33, &tc);
123 printf("efi_get_time(%p,%p) called...",tv,tc);
124 tv = __va(translate_domain_mpaddr(tv));
125 if (tc) tc = __va(translate_domain_mpaddr(tc));
126 regs->r8 = (*efi.get_time)(tv,tc);
127 printf("and returns %lx\n",regs->r8);
128 }
129 break;
130 case FW_HYPERCALL_EFI_SET_TIME:
131 case FW_HYPERCALL_EFI_GET_WAKEUP_TIME:
132 case FW_HYPERCALL_EFI_SET_WAKEUP_TIME:
133 // FIXME: need fixes in efi.h from 2.6.9
134 case FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP:
135 // FIXME: WARNING!! IF THIS EVER GETS IMPLEMENTED
136 // SOME OF THE OTHER EFI EMULATIONS WILL CHANGE AS
137 // POINTER ARGUMENTS WILL BE VIRTUAL!!
138 case FW_HYPERCALL_EFI_GET_VARIABLE:
139 // FIXME: need fixes in efi.h from 2.6.9
140 case FW_HYPERCALL_EFI_GET_NEXT_VARIABLE:
141 case FW_HYPERCALL_EFI_SET_VARIABLE:
142 case FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT:
143 // FIXME: need fixes in efi.h from 2.6.9
144 regs->r8 = EFI_UNSUPPORTED;
145 break;
146 }
147 #if 0
148 if (regs->r8)
149 printk("Failed vgfw emulation, with index:0x%lx\n",
150 regs->r2);
151 #endif
152 vmx_vcpu_increment_iip(current);
153 }else if(iim == DOMN_PAL_REQUEST){
154 pal_emul(current);
155 vmx_vcpu_increment_iip(current);
156 } else
157 vmx_reflect_interruption(ifa,isr,iim,11);
158 }
160 static UINT64 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000, 0x1400,0x1800,
161 0x1c00,0x2000,0x2400,0x2800,0x2c00,0x3000,0x3400,0x3800,0x3c00,0x4000,
162 0x4400,0x4800,0x4c00,0x5000,0x5100,0x5200,0x5300,0x5400,0x5500,0x5600,
163 0x5700,0x5800,0x5900,0x5a00,0x5b00,0x5c00,0x5d00,0x5e00,0x5f00,0x6000,
164 0x6100,0x6200,0x6300,0x6400,0x6500,0x6600,0x6700,0x6800,0x6900,0x6a00,
165 0x6b00,0x6c00,0x6d00,0x6e00,0x6f00,0x7000,0x7100,0x7200,0x7300,0x7400,
166 0x7500,0x7600,0x7700,0x7800,0x7900,0x7a00,0x7b00,0x7c00,0x7d00,0x7e00,
167 0x7f00,
168 };
172 void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim,
173 UINT64 vector)
174 {
175 VCPU *vcpu = current;
176 REGS *regs=vcpu_regs(vcpu);
177 UINT64 viha,vpsr = vmx_vcpu_get_psr(vcpu);
178 if(!(vpsr&IA64_PSR_IC)&&(vector!=5)){
179 panic("Guest nested fault!");
180 }
181 VPD_CR(vcpu,isr)=isr;
182 VPD_CR(vcpu,iipa) = regs->cr_iip;
183 vector=vec2off[vector];
184 if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
185 VPD_CR(vcpu,iim) = iim;
186 else {
187 set_ifa_itir_iha(vcpu,ifa,1,1,1);
188 }
189 inject_guest_interruption(vcpu, vector);
190 }
192 // ONLY gets called from ia64_leave_kernel
193 // ONLY call with interrupts disabled?? (else might miss one?)
194 // NEVER successful if already reflecting a trap/fault because psr.i==0
195 void leave_hypervisor_tail(struct pt_regs *regs)
196 {
197 struct domain *d = current->domain;
198 struct vcpu *v = current;
199 // FIXME: Will this work properly if doing an RFI???
200 if (!is_idle_task(d) ) { // always comes from guest
201 extern void vmx_dorfirfi(void);
202 struct pt_regs *user_regs = vcpu_regs(current);
204 if (local_softirq_pending())
205 do_softirq();
206 local_irq_disable();
208 if (user_regs != regs)
209 printk("WARNING: checking pending interrupt in nested interrupt!!!\n");
211 /* VMX Domain N has other interrupt source, saying DM */
212 if (test_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags))
213 vmx_intr_assist(v);
215 /* FIXME: Check event pending indicator, and set
216 * pending bit if necessary to inject back to guest.
217 * Should be careful about window between this check
218 * and above assist, since IOPACKET_PORT shouldn't be
219 * injected into vmx domain.
220 *
221 * Now hardcode the vector as 0x10 temporarily
222 */
223 if (event_pending(v)&&(!((v->arch.arch_vmx.in_service[0])&(1UL<<0x10)))) {
224 VPD_CR(v, irr[0]) |= 1UL << 0x10;
225 v->arch.irq_new_pending = 1;
226 }
228 if ( v->arch.irq_new_pending ) {
229 v->arch.irq_new_pending = 0;
230 vmx_check_pending_irq(v);
231 }
232 }
233 }
235 extern ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
237 /* We came here because the H/W VHPT walker failed to find an entry */
238 void vmx_hpw_miss(VCPU *vcpu, u64 vec, u64 vadr)
239 {
240 IA64_PSR vpsr;
241 CACHE_LINE_TYPE type;
242 u64 vhpt_adr;
243 ISR misr;
244 ia64_rr vrr;
245 REGS *regs;
246 thash_cb_t *vtlb, *vhpt;
247 thash_data_t *data, me;
248 vtlb=vmx_vcpu_get_vtlb(vcpu);
249 #ifdef VTLB_DEBUG
250 check_vtlb_sanity(vtlb);
251 dump_vtlb(vtlb);
252 #endif
253 vpsr.val = vmx_vcpu_get_psr(vcpu);
254 regs = vcpu_regs(vcpu);
255 misr.val=regs->cr_isr;
256 /* TODO
257 if(vcpu->domain->id && vec == 2 &&
258 vpsr.dt == 0 && is_gpa_io(MASK_PMA(vaddr))){
259 emulate_ins(&v);
260 return;
261 }
262 */
264 if((vec==1)&&(!vpsr.it)){
265 physical_itlb_miss(vcpu, vadr);
266 return;
267 }
268 if((vec==2)&&(!vpsr.dt)){
269 if(vcpu->domain!=dom0&&__gpfn_is_io(vcpu->domain,(vadr<<1)>>(PAGE_SHIFT+1))){
270 emulate_io_inst(vcpu,((vadr<<1)>>1),4); // UC
271 }else{
272 physical_dtlb_miss(vcpu, vadr);
273 }
274 return;
275 }
276 vrr = vmx_vcpu_rr(vcpu,vadr);
277 if(vec == 1) type = ISIDE_TLB;
278 else if(vec == 2) type = DSIDE_TLB;
279 else panic("wrong vec\n");
281 // prepare_if_physical_mode(vcpu);
283 if(data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type)){
284 if(vcpu->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(vcpu->domain, data->ppn>>(PAGE_SHIFT-12))){
285 vadr=(vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
286 emulate_io_inst(vcpu, vadr, data->ma);
287 return IA64_FAULT;
288 }
289 if ( data->ps != vrr.ps ) {
290 machine_tlb_insert(vcpu, data);
291 }
292 else {
293 thash_insert(vtlb->ts->vhpt,data,vadr);
294 }
295 }else if(type == DSIDE_TLB){
296 if(!vhpt_enabled(vcpu, vadr, misr.rs?RSE_REF:DATA_REF)){
297 if(vpsr.ic){
298 vmx_vcpu_set_isr(vcpu, misr.val);
299 alt_dtlb(vcpu, vadr);
300 return IA64_FAULT;
301 } else{
302 if(misr.sp){
303 //TODO lds emulation
304 panic("Don't support speculation load");
305 }else{
306 nested_dtlb(vcpu);
307 return IA64_FAULT;
308 }
309 }
310 } else{
311 vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
312 vrr=vmx_vcpu_rr(vcpu,vhpt_adr);
313 data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB);
314 if(data){
315 if(vpsr.ic){
316 vmx_vcpu_set_isr(vcpu, misr.val);
317 dtlb_fault(vcpu, vadr);
318 return IA64_FAULT;
319 }else{
320 if(misr.sp){
321 //TODO lds emulation
322 panic("Don't support speculation load");
323 }else{
324 nested_dtlb(vcpu);
325 return IA64_FAULT;
326 }
327 }
328 }else{
329 if(vpsr.ic){
330 vmx_vcpu_set_isr(vcpu, misr.val);
331 dvhpt_fault(vcpu, vadr);
332 return IA64_FAULT;
333 }else{
334 if(misr.sp){
335 //TODO lds emulation
336 panic("Don't support speculation load");
337 }else{
338 nested_dtlb(vcpu);
339 return IA64_FAULT;
340 }
341 }
342 }
343 }
344 }else if(type == ISIDE_TLB){
345 if(!vhpt_enabled(vcpu, vadr, misr.rs?RSE_REF:DATA_REF)){
346 if(!vpsr.ic){
347 misr.ni=1;
348 }
349 vmx_vcpu_set_isr(vcpu, misr.val);
350 alt_itlb(vcpu, vadr);
351 return IA64_FAULT;
352 } else{
353 vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
354 vrr=vmx_vcpu_rr(vcpu,vhpt_adr);
355 data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB);
356 if(data){
357 if(!vpsr.ic){
358 misr.ni=1;
359 }
360 vmx_vcpu_set_isr(vcpu, misr.val);
361 itlb_fault(vcpu, vadr);
362 return IA64_FAULT;
363 }else{
364 if(!vpsr.ic){
365 misr.ni=1;
366 }
367 vmx_vcpu_set_isr(vcpu, misr.val);
368 ivhpt_fault(vcpu, vadr);
369 return IA64_FAULT;
370 }
371 }
372 }
373 }