direct-io.hg

view xen/arch/ia64/vmx/vmx_process.c @ 13449:f69a329a4778

[IA64] Rename RID maker HVM_PARAM_CALLBACK_IRQ_RID to IA64_CALLBACK_IRQ_RID

Signed-off-by: Tsunehisa Doi <Doi.Tsunehisa@jp.fujitsu.com>
author awilliam@xenbuild2.aw
date Tue Jan 02 16:39:42 2007 -0700 (2007-01-02)
parents 46c44b5e6a1b
children db72b85b81bb
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_process.c: handling VMX architecture-related VM exits
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com>
20 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
21 */
23 #include <xen/config.h>
24 #include <xen/lib.h>
25 #include <xen/errno.h>
26 #include <xen/sched.h>
27 #include <xen/smp.h>
28 #include <asm/ptrace.h>
29 #include <xen/delay.h>
31 #include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */
32 #include <asm/sal.h> /* FOR struct ia64_sal_retval */
34 #include <asm/system.h>
35 #include <asm/io.h>
36 #include <asm/processor.h>
37 #include <asm/desc.h>
38 #include <asm/vlsapic.h>
39 #include <xen/irq.h>
40 #include <xen/event.h>
41 #include <asm/regionreg.h>
42 #include <asm/privop.h>
43 #include <asm/ia64_int.h>
44 #include <asm/debugger.h>
45 //#include <asm/hpsim_ssc.h>
46 #include <asm/dom_fw.h>
47 #include <asm/vmx_vcpu.h>
48 #include <asm/kregs.h>
49 #include <asm/vmx.h>
50 #include <asm/vmmu.h>
51 #include <asm/vmx_mm_def.h>
52 #include <asm/vmx_phy_mode.h>
53 #include <xen/mm.h>
54 #include <asm/vmx_pal.h>
55 /* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
56 #define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
59 extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
60 extern void rnat_consumption (VCPU *vcpu);
61 extern void alt_itlb (VCPU *vcpu, u64 vadr);
62 extern void itlb_fault (VCPU *vcpu, u64 vadr);
63 extern void ivhpt_fault (VCPU *vcpu, u64 vadr);
64 extern unsigned long handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr);
66 #define DOMN_PAL_REQUEST 0x110000
67 #define DOMN_SAL_REQUEST 0x110001
69 static u64 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000,0x1400,0x1800,
70 0x1c00,0x2000,0x2400,0x2800,0x2c00,0x3000,0x3400,0x3800,0x3c00,0x4000,
71 0x4400,0x4800,0x4c00,0x5000,0x5100,0x5200,0x5300,0x5400,0x5500,0x5600,
72 0x5700,0x5800,0x5900,0x5a00,0x5b00,0x5c00,0x5d00,0x5e00,0x5f00,0x6000,
73 0x6100,0x6200,0x6300,0x6400,0x6500,0x6600,0x6700,0x6800,0x6900,0x6a00,
74 0x6b00,0x6c00,0x6d00,0x6e00,0x6f00,0x7000,0x7100,0x7200,0x7300,0x7400,
75 0x7500,0x7600,0x7700,0x7800,0x7900,0x7a00,0x7b00,0x7c00,0x7d00,0x7e00,
76 0x7f00
77 };
81 void vmx_reflect_interruption(u64 ifa, u64 isr, u64 iim,
82 u64 vector, REGS *regs)
83 {
84 u64 status;
85 VCPU *vcpu = current;
86 u64 vpsr = VCPU(vcpu, vpsr);
87 vector=vec2off[vector];
88 if(!(vpsr&IA64_PSR_IC)&&(vector!=IA64_DATA_NESTED_TLB_VECTOR)){
89 panic_domain(regs, "Guest nested fault vector=%lx!\n", vector);
90 }
91 else{ // handle fpswa emulation
92 // fp fault
93 if (vector == IA64_FP_FAULT_VECTOR) {
94 status = handle_fpu_swa(1, regs, isr);
95 if (!status) {
96 vmx_vcpu_increment_iip(vcpu);
97 return;
98 } else if (IA64_RETRY == status)
99 return;
100 }
101 //fp trap
102 else if (vector == IA64_FP_TRAP_VECTOR) {
103 status = handle_fpu_swa(0, regs, isr);
104 if (!status)
105 return;
106 else if (IA64_RETRY == status) {
107 vmx_vcpu_decrement_iip(vcpu);
108 return;
109 }
110 }
111 }
112 VCPU(vcpu,isr)=isr;
113 VCPU(vcpu,iipa) = regs->cr_iip;
114 if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
115 VCPU(vcpu,iim) = iim;
116 else {
117 set_ifa_itir_iha(vcpu,ifa,1,1,1);
118 }
119 inject_guest_interruption(vcpu, vector);
120 }
123 IA64FAULT
124 vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
125 {
126 struct domain *d = current->domain;
127 struct vcpu *v = current;
129 perfc_incrc(vmx_ia64_handle_break);
130 #ifdef CRASH_DEBUG
131 if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs) &&
132 IS_VMM_ADDRESS(regs->cr_iip)) {
133 if (iim == 0)
134 show_registers(regs);
135 debugger_trap_fatal(0 /* don't care */, regs);
136 } else
137 #endif
138 {
139 if (iim == 0)
140 vmx_die_if_kernel("Break 0 in Hypervisor.", regs, iim);
142 if (!user_mode(regs)) {
143 /* Allow hypercalls only when cpl = 0. */
144 if (iim == d->arch.breakimm) {
145 ia64_hypercall(regs);
146 vmx_vcpu_increment_iip(v);
147 return IA64_NO_FAULT;
148 }
149 else if(iim == DOMN_PAL_REQUEST){
150 pal_emul(v);
151 vmx_vcpu_increment_iip(v);
152 return IA64_NO_FAULT;
153 }else if(iim == DOMN_SAL_REQUEST){
154 sal_emul(v);
155 vmx_vcpu_increment_iip(v);
156 return IA64_NO_FAULT;
157 }
158 }
159 vmx_reflect_interruption(ifa,isr,iim,11,regs);
160 }
161 return IA64_NO_FAULT;
162 }
165 void save_banked_regs_to_vpd(VCPU *v, REGS *regs)
166 {
167 unsigned long i=0UL, * src,* dst, *sunat, *dunat;
168 IA64_PSR vpsr;
169 src=&regs->r16;
170 sunat=&regs->eml_unat;
171 vpsr.val = VCPU(v, vpsr);
172 if(vpsr.bn){
173 dst = &VCPU(v, vgr[0]);
174 dunat =&VCPU(v, vnat);
175 __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;; \
176 dep %2 = %0, %2, 0, 16;; \
177 st8 [%3] = %2;;"
178 ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
180 }else{
181 dst = &VCPU(v, vbgr[0]);
182 // dunat =&VCPU(v, vbnat);
183 // __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;;
184 // dep %2 = %0, %2, 16, 16;;
185 // st8 [%3] = %2;;"
186 // ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
188 }
189 for(i=0; i<16; i++)
190 *dst++ = *src++;
191 }
194 // ONLY gets called from ia64_leave_kernel
195 // ONLY call with interrupts disabled?? (else might miss one?)
196 // NEVER successful if already reflecting a trap/fault because psr.i==0
197 void leave_hypervisor_tail(struct pt_regs *regs)
198 {
199 struct domain *d = current->domain;
200 struct vcpu *v = current;
202 // FIXME: Will this work properly if doing an RFI???
203 if (!is_idle_domain(d) ) { // always comes from guest
204 // struct pt_regs *user_regs = vcpu_regs(current);
205 local_irq_enable();
206 do_softirq();
207 local_irq_disable();
209 if (v->vcpu_id == 0) {
210 int callback_irq =
211 d->arch.hvm_domain.params[HVM_PARAM_CALLBACK_IRQ];
212 if (callback_irq != 0 && local_events_need_delivery()) {
213 /* change level for para-device callback irq */
214 /* use level irq to send discrete event */
215 if (callback_irq & IA64_CALLBACK_IRQ_RID) {
216 /* case of using Requester-ID as callback irq */
217 /* RID: '<#bus(8)><#dev(5)><#func(3)>' */
218 int dev = (callback_irq >> 3) & 0x1f;
219 viosapic_set_pci_irq(d, dev, 0, 1);
220 viosapic_set_pci_irq(d, dev, 0, 0);
221 } else {
222 /* case of using GSI as callback irq */
223 viosapic_set_irq(d, callback_irq, 1);
224 viosapic_set_irq(d, callback_irq, 0);
225 }
226 }
227 }
229 rmb();
230 if (xchg(&v->arch.irq_new_pending, 0)) {
231 v->arch.irq_new_condition = 0;
232 vmx_check_pending_irq(v);
233 return;
234 }
236 if (v->arch.irq_new_condition) {
237 v->arch.irq_new_condition = 0;
238 vhpi_detection(v);
239 }
240 }
241 }
243 extern ia64_rr vmx_vcpu_rr(VCPU *vcpu, u64 vadr);
245 static int vmx_handle_lds(REGS* regs)
246 {
247 regs->cr_ipsr |=IA64_PSR_ED;
248 return IA64_FAULT;
249 }
251 /* We came here because the H/W VHPT walker failed to find an entry */
252 IA64FAULT
253 vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs)
254 {
255 IA64_PSR vpsr;
256 int type;
257 u64 vhpt_adr, gppa, pteval, rr, itir;
258 ISR misr;
259 PTA vpta;
260 thash_data_t *data;
261 VCPU *v = current;
263 vpsr.val = VCPU(v, vpsr);
264 misr.val = VMX(v,cr_isr);
266 if (vec == 1)
267 type = ISIDE_TLB;
268 else if (vec == 2)
269 type = DSIDE_TLB;
270 else
271 panic_domain(regs, "wrong vec:%lx\n", vec);
273 if(is_physical_mode(v)&&(!(vadr<<1>>62))){
274 if(vec==2){
275 if (v->domain != dom0
276 && __gpfn_is_io(v->domain, (vadr << 1) >> (PAGE_SHIFT + 1))) {
277 emulate_io_inst(v,((vadr<<1)>>1),4); // UC
278 return IA64_FAULT;
279 }
280 }
281 physical_tlb_miss(v, vadr, type);
282 return IA64_FAULT;
283 }
285 if((data=vtlb_lookup(v, vadr,type))!=0){
286 if (v->domain != dom0 && type == DSIDE_TLB) {
287 gppa = (vadr & ((1UL << data->ps) - 1)) +
288 (data->ppn >> (data->ps - 12) << data->ps);
289 if (__gpfn_is_io(v->domain, gppa >> PAGE_SHIFT)) {
290 if (data->pl >= ((regs->cr_ipsr >> IA64_PSR_CPL0_BIT) & 3))
291 emulate_io_inst(v, gppa, data->ma);
292 else {
293 vcpu_set_isr(v, misr.val);
294 data_access_rights(v, vadr);
295 }
296 return IA64_FAULT;
297 }
298 }
299 thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type);
301 }else if(type == DSIDE_TLB){
303 if (misr.sp)
304 return vmx_handle_lds(regs);
306 if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
307 if(vpsr.ic){
308 vcpu_set_isr(v, misr.val);
309 alt_dtlb(v, vadr);
310 return IA64_FAULT;
311 } else{
312 nested_dtlb(v);
313 return IA64_FAULT;
314 }
315 }
317 vmx_vcpu_get_pta(v, &vpta.val);
318 if (vpta.vf) {
319 /* Long format is not yet supported. */
320 if (vpsr.ic) {
321 vcpu_set_isr(v, misr.val);
322 dtlb_fault(v, vadr);
323 return IA64_FAULT;
324 } else {
325 nested_dtlb(v);
326 return IA64_FAULT;
327 }
328 }
330 vmx_vcpu_thash(v, vadr, &vhpt_adr);
331 if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
332 /* VHPT successfully read. */
333 if (!(pteval & _PAGE_P)) {
334 if (vpsr.ic) {
335 vcpu_set_isr(v, misr.val);
336 dtlb_fault(v, vadr);
337 return IA64_FAULT;
338 } else {
339 nested_dtlb(v);
340 return IA64_FAULT;
341 }
342 } else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) {
343 vcpu_get_rr(v, vadr, &rr);
344 itir = rr & (RR_RID_MASK | RR_PS_MASK);
345 thash_purge_and_insert(v, pteval, itir, vadr, DSIDE_TLB);
346 return IA64_NO_FAULT;
347 } else if (vpsr.ic) {
348 vcpu_set_isr(v, misr.val);
349 dtlb_fault(v, vadr);
350 return IA64_FAULT;
351 }else{
352 nested_dtlb(v);
353 return IA64_FAULT;
354 }
355 } else {
356 /* Can't read VHPT. */
357 if (vpsr.ic) {
358 vcpu_set_isr(v, misr.val);
359 dvhpt_fault(v, vadr);
360 return IA64_FAULT;
361 } else {
362 nested_dtlb(v);
363 return IA64_FAULT;
364 }
365 }
366 }else if(type == ISIDE_TLB){
368 if (!vpsr.ic)
369 misr.ni = 1;
370 if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
371 vcpu_set_isr(v, misr.val);
372 alt_itlb(v, vadr);
373 return IA64_FAULT;
374 }
376 vmx_vcpu_get_pta(v, &vpta.val);
377 if (vpta.vf) {
378 /* Long format is not yet supported. */
379 vcpu_set_isr(v, misr.val);
380 itlb_fault(v, vadr);
381 return IA64_FAULT;
382 }
385 vmx_vcpu_thash(v, vadr, &vhpt_adr);
386 if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
387 /* VHPT successfully read. */
388 if (pteval & _PAGE_P) {
389 vcpu_get_rr(v, vadr, &rr);
390 itir = rr & (RR_RID_MASK | RR_PS_MASK);
391 thash_purge_and_insert(v, pteval, itir, vadr, ISIDE_TLB);
392 return IA64_NO_FAULT;
393 } else {
394 vcpu_set_isr(v, misr.val);
395 inst_page_not_present(v, vadr);
396 return IA64_FAULT;
397 }
398 } else {
399 vcpu_set_isr(v, misr.val);
400 ivhpt_fault(v, vadr);
401 return IA64_FAULT;
402 }
403 }
404 return IA64_NO_FAULT;
405 }