ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_process.c @ 10695:6703fed8870f

[IA64] enable acceleration of external interrupt

This patch is to enable acceleration of externel interrupt
which is described in VTI spec.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Wed Jul 12 13:20:15 2006 -0600 (2006-07-12)
parents f24993f27cc4
children 4834d1e8f26e
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_process.c: handling VMX architecture-related VM exits
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com>
20 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
21 */
23 #include <xen/config.h>
24 #include <xen/lib.h>
25 #include <xen/errno.h>
26 #include <xen/sched.h>
27 #include <xen/smp.h>
28 #include <asm/ptrace.h>
29 #include <xen/delay.h>
31 #include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */
32 #include <asm/sal.h> /* FOR struct ia64_sal_retval */
34 #include <asm/system.h>
35 #include <asm/io.h>
36 #include <asm/processor.h>
37 #include <asm/desc.h>
38 #include <asm/vlsapic.h>
39 #include <xen/irq.h>
40 #include <xen/event.h>
41 #include <asm/regionreg.h>
42 #include <asm/privop.h>
43 #include <asm/ia64_int.h>
44 #include <asm/debugger.h>
45 //#include <asm/hpsim_ssc.h>
46 #include <asm/dom_fw.h>
47 #include <asm/vmx_vcpu.h>
48 #include <asm/kregs.h>
49 #include <asm/vmx.h>
50 #include <asm/vmmu.h>
51 #include <asm/vmx_mm_def.h>
52 #include <asm/vmx_phy_mode.h>
53 #include <xen/mm.h>
54 #include <asm/vmx_pal.h>
55 /* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
56 #define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
59 extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
60 extern void rnat_consumption (VCPU *vcpu);
61 extern void alt_itlb (VCPU *vcpu, u64 vadr);
62 extern void itlb_fault (VCPU *vcpu, u64 vadr);
63 extern void ivhpt_fault (VCPU *vcpu, u64 vadr);
64 extern unsigned long handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr);
66 #define DOMN_PAL_REQUEST 0x110000
67 #define DOMN_SAL_REQUEST 0x110001
69 static UINT64 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000, 0x1400,0x1800,
70 0x1c00,0x2000,0x2400,0x2800,0x2c00,0x3000,0x3400,0x3800,0x3c00,0x4000,
71 0x4400,0x4800,0x4c00,0x5000,0x5100,0x5200,0x5300,0x5400,0x5500,0x5600,
72 0x5700,0x5800,0x5900,0x5a00,0x5b00,0x5c00,0x5d00,0x5e00,0x5f00,0x6000,
73 0x6100,0x6200,0x6300,0x6400,0x6500,0x6600,0x6700,0x6800,0x6900,0x6a00,
74 0x6b00,0x6c00,0x6d00,0x6e00,0x6f00,0x7000,0x7100,0x7200,0x7300,0x7400,
75 0x7500,0x7600,0x7700,0x7800,0x7900,0x7a00,0x7b00,0x7c00,0x7d00,0x7e00,
76 0x7f00
77 };
81 void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim,
82 UINT64 vector,REGS *regs)
83 {
84 VCPU *vcpu = current;
85 UINT64 vpsr = vmx_vcpu_get_psr(vcpu);
86 vector=vec2off[vector];
87 if(!(vpsr&IA64_PSR_IC)&&(vector!=IA64_DATA_NESTED_TLB_VECTOR)){
88 panic_domain(regs, "Guest nested fault vector=%lx!\n", vector);
89 }
90 else{ // handle fpswa emulation
91 // fp fault
92 if(vector == IA64_FP_FAULT_VECTOR && !handle_fpu_swa(1, regs, isr)){
93 vmx_vcpu_increment_iip(vcpu);
94 return;
95 }
96 //fp trap
97 else if(vector == IA64_FP_TRAP_VECTOR && !handle_fpu_swa(0, regs, isr)){
98 return;
99 }
100 }
101 VCPU(vcpu,isr)=isr;
102 VCPU(vcpu,iipa) = regs->cr_iip;
103 if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
104 VCPU(vcpu,iim) = iim;
105 else {
106 set_ifa_itir_iha(vcpu,ifa,1,1,1);
107 }
108 inject_guest_interruption(vcpu, vector);
109 }
112 IA64FAULT
113 vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
114 {
115 struct domain *d = current->domain;
116 struct vcpu *v = current;
118 #ifdef CRASH_DEBUG
119 if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs) &&
120 IS_VMM_ADDRESS(regs->cr_iip)) {
121 if (iim == 0)
122 show_registers(regs);
123 debugger_trap_fatal(0 /* don't care */, regs);
124 } else
125 #endif
126 {
127 if (iim == 0)
128 vmx_die_if_kernel("Break 0 in Hypervisor.", regs, iim);
130 if (!user_mode(regs)) {
131 /* Allow hypercalls only when cpl = 0. */
132 if (iim == d->arch.breakimm) {
133 ia64_hypercall(regs);
134 vmx_vcpu_increment_iip(v);
135 return IA64_NO_FAULT;
136 }
137 else if(iim == DOMN_PAL_REQUEST){
138 pal_emul(v);
139 vmx_vcpu_increment_iip(v);
140 return IA64_NO_FAULT;
141 }else if(iim == DOMN_SAL_REQUEST){
142 sal_emul(v);
143 vmx_vcpu_increment_iip(v);
144 return IA64_NO_FAULT;
145 }
146 }
147 vmx_reflect_interruption(ifa,isr,iim,11,regs);
148 }
149 return IA64_NO_FAULT;
150 }
153 void save_banked_regs_to_vpd(VCPU *v, REGS *regs)
154 {
155 unsigned long i=0UL, * src,* dst, *sunat, *dunat;
156 IA64_PSR vpsr;
157 src=&regs->r16;
158 sunat=&regs->eml_unat;
159 vpsr.val = vmx_vcpu_get_psr(v);
160 if(vpsr.bn){
161 dst = &VCPU(v, vgr[0]);
162 dunat =&VCPU(v, vnat);
163 __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;; \
164 dep %2 = %0, %2, 0, 16;; \
165 st8 [%3] = %2;;"
166 ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
168 }else{
169 dst = &VCPU(v, vbgr[0]);
170 // dunat =&VCPU(v, vbnat);
171 // __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;;
172 // dep %2 = %0, %2, 16, 16;;
173 // st8 [%3] = %2;;"
174 // ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
176 }
177 for(i=0; i<16; i++)
178 *dst++ = *src++;
179 }
182 // ONLY gets called from ia64_leave_kernel
183 // ONLY call with interrupts disabled?? (else might miss one?)
184 // NEVER successful if already reflecting a trap/fault because psr.i==0
185 void leave_hypervisor_tail(struct pt_regs *regs)
186 {
187 struct domain *d = current->domain;
188 struct vcpu *v = current;
189 // FIXME: Will this work properly if doing an RFI???
190 if (!is_idle_domain(d) ) { // always comes from guest
191 // struct pt_regs *user_regs = vcpu_regs(current);
192 local_irq_enable();
193 do_softirq();
194 local_irq_disable();
196 // if (user_regs != regs)
197 // printk("WARNING: checking pending interrupt in nested interrupt!!!\n");
199 /* VMX Domain N has other interrupt source, saying DM */
200 if (test_bit(ARCH_VMX_INTR_ASSIST, &v->arch.arch_vmx.flags))
201 vmx_intr_assist(v);
203 /* FIXME: Check event pending indicator, and set
204 * pending bit if necessary to inject back to guest.
205 * Should be careful about window between this check
206 * and above assist, since IOPACKET_PORT shouldn't be
207 * injected into vmx domain.
208 *
209 * Now hardcode the vector as 0x10 temporarily
210 */
211 // if (event_pending(v)&&(!(VLSAPIC_INSVC(v,0)&(1UL<<0x10)))) {
212 // VCPU(v, irr[0]) |= 1UL << 0x10;
213 // v->arch.irq_new_pending = 1;
214 // }
216 if ( v->arch.irq_new_pending ) {
217 v->arch.irq_new_pending = 0;
218 v->arch.irq_new_condition = 0;
219 vmx_check_pending_irq(v);
220 return;
221 }
222 if (VCPU(v, vac).a_int) {
223 vhpi_detection(v);
224 return;
225 }
226 if (v->arch.irq_new_condition) {
227 v->arch.irq_new_condition = 0;
228 vhpi_detection(v);
229 }
230 }
231 }
233 extern ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr);
235 static int vmx_handle_lds(REGS* regs)
236 {
237 regs->cr_ipsr |=IA64_PSR_ED;
238 return IA64_FAULT;
239 }
241 /* We came here because the H/W VHPT walker failed to find an entry */
242 IA64FAULT
243 vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs)
244 {
245 IA64_PSR vpsr;
246 int type=ISIDE_TLB;
247 u64 vhpt_adr, gppa, pteval, rr, itir;
248 ISR misr;
249 // REGS *regs;
250 thash_data_t *data;
251 VCPU *v = current;
252 #ifdef VTLB_DEBUG
253 check_vtlb_sanity(vtlb);
254 dump_vtlb(vtlb);
255 #endif
256 vpsr.val = vmx_vcpu_get_psr(v);
257 misr.val=VMX(v,cr_isr);
259 if(is_physical_mode(v)&&(!(vadr<<1>>62))){
260 if(vec==2){
261 if(v->domain!=dom0&&__gpfn_is_io(v->domain,(vadr<<1)>>(PAGE_SHIFT+1))){
262 emulate_io_inst(v,((vadr<<1)>>1),4); // UC
263 return IA64_FAULT;
264 }
265 }
266 physical_tlb_miss(v, vadr);
267 return IA64_FAULT;
268 }
269 if(vec == 1) type = ISIDE_TLB;
270 else if(vec == 2) type = DSIDE_TLB;
271 else panic_domain(regs,"wrong vec:%lx\n",vec);
273 // prepare_if_physical_mode(v);
275 if((data=vtlb_lookup(v, vadr,type))!=0){
276 // gppa = (vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
277 // if(v->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(v->domain,gppa>>PAGE_SHIFT)){
278 if(v->domain!=dom0 && data->io && type==DSIDE_TLB ){
279 if(data->pl >= ((regs->cr_ipsr>>IA64_PSR_CPL0_BIT)&3)){
280 gppa = (vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
281 emulate_io_inst(v, gppa, data->ma);
282 }else{
283 vcpu_set_isr(v,misr.val);
284 data_access_rights(v, vadr);
285 }
286 return IA64_FAULT;
287 }
289 thash_vhpt_insert(v,data->page_flags, data->itir ,vadr);
290 }else if(type == DSIDE_TLB){
291 if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
292 if(vpsr.ic){
293 vcpu_set_isr(v, misr.val);
294 alt_dtlb(v, vadr);
295 return IA64_FAULT;
296 } else{
297 if(misr.sp){
298 //TODO lds emulation
299 //panic("Don't support speculation load");
300 return vmx_handle_lds(regs);
301 }else{
302 nested_dtlb(v);
303 return IA64_FAULT;
304 }
305 }
306 } else{
307 vmx_vcpu_thash(v, vadr, &vhpt_adr);
308 if(!guest_vhpt_lookup(vhpt_adr, &pteval)){
309 if (pteval & _PAGE_P){
310 vcpu_get_rr(v, vadr, &rr);
311 itir = rr&(RR_RID_MASK | RR_PS_MASK);
312 thash_purge_and_insert(v, pteval, itir , vadr);
313 return IA64_NO_FAULT;
314 }
315 if(vpsr.ic){
316 vcpu_set_isr(v, misr.val);
317 dtlb_fault(v, vadr);
318 return IA64_FAULT;
319 }else{
320 if(misr.sp){
321 //TODO lds emulation
322 //panic("Don't support speculation load");
323 return vmx_handle_lds(regs);
324 }else{
325 nested_dtlb(v);
326 return IA64_FAULT;
327 }
328 }
329 }else{
330 if(vpsr.ic){
331 vcpu_set_isr(v, misr.val);
332 dvhpt_fault(v, vadr);
333 return IA64_FAULT;
334 }else{
335 if(misr.sp){
336 //TODO lds emulation
337 //panic("Don't support speculation load");
338 return vmx_handle_lds(regs);
339 }else{
340 nested_dtlb(v);
341 return IA64_FAULT;
342 }
343 }
344 }
345 }
346 }else if(type == ISIDE_TLB){
347 if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
348 if(!vpsr.ic){
349 misr.ni=1;
350 }
351 vcpu_set_isr(v, misr.val);
352 alt_itlb(v, vadr);
353 return IA64_FAULT;
354 } else{
355 vmx_vcpu_thash(v, vadr, &vhpt_adr);
356 if(!guest_vhpt_lookup(vhpt_adr, &pteval)){
357 if (pteval & _PAGE_P){
358 vcpu_get_rr(v, vadr, &rr);
359 itir = rr&(RR_RID_MASK | RR_PS_MASK);
360 thash_purge_and_insert(v, pteval, itir , vadr);
361 return IA64_NO_FAULT;
362 }
363 if(!vpsr.ic){
364 misr.ni=1;
365 }
366 vcpu_set_isr(v, misr.val);
367 itlb_fault(v, vadr);
368 return IA64_FAULT;
369 }else{
370 if(!vpsr.ic){
371 misr.ni=1;
372 }
373 vcpu_set_isr(v, misr.val);
374 ivhpt_fault(v, vadr);
375 return IA64_FAULT;
376 }
377 }
378 }
379 return IA64_NO_FAULT;
380 }