ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_process.c @ 8918:c18c63f87b7d

[IA64] VTI: Fix two bugs

1. Vmx_check_pending_irq should not be called in vmx_vcpu_set_eoi,
because vmx_vcpu_increment_iip is called after vmx_vcpu_set_eoi. That
is, the first instruction of guest interrupt handler will be skipped.
2. Remove code segment which was used to send events to VTIdomain, when
dom0 was VTIdomain. This is not needed any more, and will cause
VTIdomain on SMP-HOST complain "Unexpected interrupt ..." .

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Fri Feb 24 13:38:26 2006 -0700 (2006-02-24)
parents 0f59ace5442c
children cfe20f41f043
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_process.c: handling VMX architecture-related VM exits
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com>
20 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
21 */
23 #include <xen/config.h>
24 #include <xen/lib.h>
25 #include <xen/errno.h>
26 #include <xen/sched.h>
27 #include <xen/smp.h>
28 #include <asm/ptrace.h>
29 #include <xen/delay.h>
31 #include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */
32 #include <asm/sal.h> /* FOR struct ia64_sal_retval */
34 #include <asm/system.h>
35 #include <asm/io.h>
36 #include <asm/processor.h>
37 #include <asm/desc.h>
38 //#include <asm/ldt.h>
39 #include <xen/irq.h>
40 #include <xen/event.h>
41 #include <asm/regionreg.h>
42 #include <asm/privop.h>
43 #include <asm/ia64_int.h>
44 #include <asm/debugger.h>
45 //#include <asm/hpsim_ssc.h>
46 #include <asm/dom_fw.h>
47 #include <asm/vmx_vcpu.h>
48 #include <asm/kregs.h>
49 #include <asm/vmx.h>
50 #include <asm/vmx_mm_def.h>
51 #include <asm/vmx_phy_mode.h>
52 #include <xen/mm.h>
53 #include <asm/vmx_pal.h>
54 /* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
55 #define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
58 extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
59 extern void rnat_consumption (VCPU *vcpu);
60 #define DOMN_PAL_REQUEST 0x110000
62 static UINT64 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000, 0x1400,0x1800,
63 0x1c00,0x2000,0x2400,0x2800,0x2c00,0x3000,0x3400,0x3800,0x3c00,0x4000,
64 0x4400,0x4800,0x4c00,0x5000,0x5100,0x5200,0x5300,0x5400,0x5500,0x5600,
65 0x5700,0x5800,0x5900,0x5a00,0x5b00,0x5c00,0x5d00,0x5e00,0x5f00,0x6000,
66 0x6100,0x6200,0x6300,0x6400,0x6500,0x6600,0x6700,0x6800,0x6900,0x6a00,
67 0x6b00,0x6c00,0x6d00,0x6e00,0x6f00,0x7000,0x7100,0x7200,0x7300,0x7400,
68 0x7500,0x7600,0x7700,0x7800,0x7900,0x7a00,0x7b00,0x7c00,0x7d00,0x7e00,
69 0x7f00
70 };
74 void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim,
75 UINT64 vector,REGS *regs)
76 {
77 VCPU *vcpu = current;
78 UINT64 vpsr = vmx_vcpu_get_psr(vcpu);
79 if(!(vpsr&IA64_PSR_IC)&&(vector!=5)){
80 panic("Guest nested fault!");
81 }
82 VCPU(vcpu,isr)=isr;
83 VCPU(vcpu,iipa) = regs->cr_iip;
84 vector=vec2off[vector];
85 if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
86 VCPU(vcpu,iim) = iim;
87 else {
88 set_ifa_itir_iha(vcpu,ifa,1,1,1);
89 }
90 inject_guest_interruption(vcpu, vector);
91 }
93 IA64FAULT
94 vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
95 {
96 struct domain *d = (struct domain *) current->domain;
97 struct vcpu *v = (struct vcpu *) current;
98 unsigned long i, sal_param[8];
100 #if 0
101 if (first_time) {
102 if (platform_is_hp_ski()) running_on_sim = 1;
103 else running_on_sim = 0;
104 first_time = 0;
105 }
106 if (iim == 0x80001 || iim == 0x80002) { //FIXME: don't hardcode constant
107 if (running_on_sim) do_ssc(vcpu_get_gr_nat(current,36), regs);
108 else do_ssc(vcpu_get_gr_nat(current,36), regs);
109 }
110 #endif
111 #ifdef CRASH_DEBUG
112 if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs) &&
113 IS_VMM_ADDRESS(regs->cr_iip)) {
114 if (iim == 0)
115 show_registers(regs);
116 debugger_trap_fatal(0 /* don't care */, regs);
117 } else
118 #endif
119 if (iim == d->arch.breakimm) {
120 struct ia64_pal_retval y;
121 struct sal_ret_values x;
122 switch (regs->r2) {
123 case FW_HYPERCALL_PAL_CALL:
124 //printf("*** PAL hypercall: index=%d\n",regs->r28);
125 //FIXME: This should call a C routine
126 y = pal_emulator_static(VCPU(v, vgr[12]));
127 regs->r8 = y.status; regs->r9 = y.v0;
128 regs->r10 = y.v1; regs->r11 = y.v2;
129 #if 0
130 if (regs->r8)
131 printk("Failed vpal emulation, with index:0x%lx\n",
132 VCPU(v, vgr[12]));
133 #endif
134 break;
135 case FW_HYPERCALL_SAL_CALL:
136 for (i = 0; i < 8; i++)
137 vcpu_get_gr_nat(v, 32+i, &sal_param[i]);
138 x = sal_emulator(sal_param[0], sal_param[1],
139 sal_param[2], sal_param[3],
140 sal_param[4], sal_param[5],
141 sal_param[6], sal_param[7]);
142 regs->r8 = x.r8; regs->r9 = x.r9;
143 regs->r10 = x.r10; regs->r11 = x.r11;
144 #if 0
145 if (regs->r8)
146 printk("Failed vsal emulation, with index:0x%lx\n",
147 sal_param[0]);
148 #endif
149 break;
150 case FW_HYPERCALL_EFI_RESET_SYSTEM:
151 printf("efi.reset_system called ");
152 if (current->domain == dom0) {
153 printf("(by dom0)\n ");
154 (*efi.reset_system)(EFI_RESET_WARM,0,0,NULL);
155 }
156 printf("(not supported for non-0 domain)\n");
157 regs->r8 = EFI_UNSUPPORTED;
158 break;
159 case FW_HYPERCALL_EFI_GET_TIME:
160 {
161 unsigned long *tv, *tc;
162 vcpu_get_gr_nat(v, 32, (u64 *)&tv);
163 vcpu_get_gr_nat(v, 33, (u64 *)&tc);
164 printf("efi_get_time(%p,%p) called...",tv,tc);
165 tv = __va(translate_domain_mpaddr((unsigned long)tv));
166 if (tc) tc = __va(translate_domain_mpaddr((unsigned long)tc));
167 regs->r8 = (*efi.get_time)((efi_time_t *)tv,(efi_time_cap_t *)tc);
168 printf("and returns %lx\n",regs->r8);
169 }
170 break;
171 case FW_HYPERCALL_EFI_SET_TIME:
172 case FW_HYPERCALL_EFI_GET_WAKEUP_TIME:
173 case FW_HYPERCALL_EFI_SET_WAKEUP_TIME:
174 // FIXME: need fixes in efi.h from 2.6.9
175 case FW_HYPERCALL_EFI_SET_VIRTUAL_ADDRESS_MAP:
176 // FIXME: WARNING!! IF THIS EVER GETS IMPLEMENTED
177 // SOME OF THE OTHER EFI EMULATIONS WILL CHANGE AS
178 // POINTER ARGUMENTS WILL BE VIRTUAL!!
179 case FW_HYPERCALL_EFI_GET_VARIABLE:
180 // FIXME: need fixes in efi.h from 2.6.9
181 case FW_HYPERCALL_EFI_GET_NEXT_VARIABLE:
182 case FW_HYPERCALL_EFI_SET_VARIABLE:
183 case FW_HYPERCALL_EFI_GET_NEXT_HIGH_MONO_COUNT:
184 // FIXME: need fixes in efi.h from 2.6.9
185 regs->r8 = EFI_UNSUPPORTED;
186 break;
187 }
188 #if 0
189 if (regs->r8)
190 printk("Failed vgfw emulation, with index:0x%lx\n",
191 regs->r2);
192 #endif
193 vmx_vcpu_increment_iip(current);
194 }else if(iim == DOMN_PAL_REQUEST){
195 pal_emul(current);
196 vmx_vcpu_increment_iip(current);
197 } else {
198 if (iim == 0)
199 die_if_kernel("bug check", regs, iim);
200 vmx_reflect_interruption(ifa,isr,iim,11,regs);
201 }
202 return IA64_NO_FAULT;
203 }
206 void save_banked_regs_to_vpd(VCPU *v, REGS *regs)
207 {
208 unsigned long i=0UL, * src,* dst, *sunat, *dunat;
209 IA64_PSR vpsr;
210 src=&regs->r16;
211 sunat=&regs->eml_unat;
212 vpsr.val = vmx_vcpu_get_psr(v);
213 if(vpsr.bn){
214 dst = &VCPU(v, vgr[0]);
215 dunat =&VCPU(v, vnat);
216 __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;; \
217 dep %2 = %0, %2, 0, 16;; \
218 st8 [%3] = %2;;"
219 ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
221 }else{
222 dst = &VCPU(v, vbgr[0]);
223 // dunat =&VCPU(v, vbnat);
224 // __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;;
225 // dep %2 = %0, %2, 16, 16;;
226 // st8 [%3] = %2;;"
227 // ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
229 }
230 for(i=0; i<16; i++)
231 *dst++ = *src++;
232 }
235 // ONLY gets called from ia64_leave_kernel
236 // ONLY call with interrupts disabled?? (else might miss one?)
237 // NEVER successful if already reflecting a trap/fault because psr.i==0
238 void leave_hypervisor_tail(struct pt_regs *regs)
239 {
240 struct domain *d = current->domain;
241 struct vcpu *v = current;
242 // FIXME: Will this work properly if doing an RFI???
243 if (!is_idle_domain(d) ) { // always comes from guest
244 extern void vmx_dorfirfi(void);
245 struct pt_regs *user_regs = vcpu_regs(current);
246 if (local_softirq_pending())
247 do_softirq();
248 local_irq_disable();
250 if (user_regs != regs)
251 printk("WARNING: checking pending interrupt in nested interrupt!!!\n");
253 /* VMX Domain N has other interrupt source, saying DM */
254 if (test_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags))
255 vmx_intr_assist(v);
257 /* FIXME: Check event pending indicator, and set
258 * pending bit if necessary to inject back to guest.
259 * Should be careful about window between this check
260 * and above assist, since IOPACKET_PORT shouldn't be
261 * injected into vmx domain.
262 *
263 * Now hardcode the vector as 0x10 temporarily
264 */
265 // if (event_pending(v)&&(!(VLSAPIC_INSVC(v,0)&(1UL<<0x10)))) {
266 // VCPU(v, irr[0]) |= 1UL << 0x10;
267 // v->arch.irq_new_pending = 1;
268 // }
270 if ( v->arch.irq_new_pending ) {
271 v->arch.irq_new_pending = 0;
272 vmx_check_pending_irq(v);
273 }
274 // if (VCPU(v,vac).a_bsw){
275 // save_banked_regs_to_vpd(v,regs);
276 // }
278 }
279 }
281 extern ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
283 static int vmx_handle_lds(REGS* regs)
284 {
285 regs->cr_ipsr |=IA64_PSR_ED;
286 return IA64_FAULT;
287 }
289 /* We came here because the H/W VHPT walker failed to find an entry */
290 IA64FAULT
291 vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs)
292 {
293 IA64_PSR vpsr;
294 CACHE_LINE_TYPE type=ISIDE_TLB;
295 u64 vhpt_adr, gppa;
296 ISR misr;
297 ia64_rr vrr;
298 // REGS *regs;
299 thash_cb_t *vtlb;
300 thash_data_t *data;
301 VCPU *v = current;
302 vtlb=vmx_vcpu_get_vtlb(v);
303 #ifdef VTLB_DEBUG
304 check_vtlb_sanity(vtlb);
305 dump_vtlb(vtlb);
306 #endif
307 vpsr.val = vmx_vcpu_get_psr(v);
308 misr.val=VMX(v,cr_isr);
310 /* TODO
311 if(v->domain->id && vec == 2 &&
312 vpsr.dt == 0 && is_gpa_io(MASK_PMA(vaddr))){
313 emulate_ins(&v);
314 return;
315 }
316 */
317 if(is_physical_mode(v)&&(!(vadr<<1>>62))){
318 if(vec==1){
319 physical_itlb_miss(v, vadr);
320 return IA64_FAULT;
321 }
322 if(vec==2){
323 if(v->domain!=dom0&&__gpfn_is_io(v->domain,(vadr<<1)>>(PAGE_SHIFT+1))){
324 emulate_io_inst(v,((vadr<<1)>>1),4); // UC
325 }else{
326 physical_dtlb_miss(v, vadr);
327 }
328 return IA64_FAULT;
329 }
330 }
331 vrr = vmx_vcpu_rr(v, vadr);
332 if(vec == 1) type = ISIDE_TLB;
333 else if(vec == 2) type = DSIDE_TLB;
334 else panic("wrong vec\n");
336 // prepare_if_physical_mode(v);
338 if((data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type))!=0){
339 gppa = (vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
340 if(v->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(v->domain,gppa>>PAGE_SHIFT)){
341 emulate_io_inst(v, gppa, data->ma);
342 return IA64_FAULT;
343 }
345 if ( data->ps != vrr.ps ) {
346 machine_tlb_insert(v, data);
347 }
348 else {
349 thash_insert(vtlb->ts->vhpt,data,vadr);
350 }
351 }else if(type == DSIDE_TLB){
352 if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
353 if(vpsr.ic){
354 vcpu_set_isr(v, misr.val);
355 alt_dtlb(v, vadr);
356 return IA64_FAULT;
357 } else{
358 if(misr.sp){
359 //TODO lds emulation
360 //panic("Don't support speculation load");
361 return vmx_handle_lds(regs);
362 }else{
363 nested_dtlb(v);
364 return IA64_FAULT;
365 }
366 }
367 } else{
368 vmx_vcpu_thash(v, vadr, &vhpt_adr);
369 vrr=vmx_vcpu_rr(v,vhpt_adr);
370 data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB);
371 if(data){
372 if(vpsr.ic){
373 vcpu_set_isr(v, misr.val);
374 dtlb_fault(v, vadr);
375 return IA64_FAULT;
376 }else{
377 if(misr.sp){
378 //TODO lds emulation
379 //panic("Don't support speculation load");
380 return vmx_handle_lds(regs);
381 }else{
382 nested_dtlb(v);
383 return IA64_FAULT;
384 }
385 }
386 }else{
387 if(vpsr.ic){
388 vcpu_set_isr(v, misr.val);
389 dvhpt_fault(v, vadr);
390 return IA64_FAULT;
391 }else{
392 if(misr.sp){
393 //TODO lds emulation
394 //panic("Don't support speculation load");
395 return vmx_handle_lds(regs);
396 }else{
397 nested_dtlb(v);
398 return IA64_FAULT;
399 }
400 }
401 }
402 }
403 }else if(type == ISIDE_TLB){
404 if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
405 if(!vpsr.ic){
406 misr.ni=1;
407 }
408 vcpu_set_isr(v, misr.val);
409 alt_itlb(v, vadr);
410 return IA64_FAULT;
411 } else{
412 vmx_vcpu_thash(v, vadr, &vhpt_adr);
413 vrr=vmx_vcpu_rr(v,vhpt_adr);
414 data = vtlb_lookup_ex(vtlb, vrr.rid, vhpt_adr, DSIDE_TLB);
415 if(data){
416 if(!vpsr.ic){
417 misr.ni=1;
418 }
419 vcpu_set_isr(v, misr.val);
420 itlb_fault(v, vadr);
421 return IA64_FAULT;
422 }else{
423 if(!vpsr.ic){
424 misr.ni=1;
425 }
426 vcpu_set_isr(v, misr.val);
427 ivhpt_fault(v, vadr);
428 return IA64_FAULT;
429 }
430 }
431 }
432 return IA64_NO_FAULT;
433 }