ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_fault.c @ 16682:7515dc56c124

[IA64] Sort out the XEN_IA64_OPTF_IDENT_MAP_REG[457] constants confusion

Currently the constants are used for two different purpose.
one is for the OPTF hypercall sub command.
another is bit flag for struct opt_feature::mask.
They are different spaces, split them out.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Sun Dec 30 13:02:16 2007 -0700 (2007-12-30)
parents 85613b8c4176
children 2d0193702170
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_fault.c: handling VMX architecture-related VM exits
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com>
20 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
21 */
23 #include <xen/config.h>
24 #include <xen/lib.h>
25 #include <xen/errno.h>
26 #include <xen/sched.h>
27 #include <xen/smp.h>
28 #include <asm/ptrace.h>
29 #include <xen/delay.h>
31 #include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */
32 #include <asm/sal.h> /* FOR struct ia64_sal_retval */
34 #include <asm/system.h>
35 #include <asm/io.h>
36 #include <asm/processor.h>
37 #include <asm/desc.h>
38 #include <asm/vlsapic.h>
39 #include <xen/irq.h>
40 #include <xen/event.h>
41 #include <asm/regionreg.h>
42 #include <asm/privop.h>
43 #include <asm/ia64_int.h>
44 #include <asm/debugger.h>
45 //#include <asm/hpsim_ssc.h>
46 #include <asm/dom_fw.h>
47 #include <asm/vmx_vcpu.h>
48 #include <asm/kregs.h>
49 #include <asm/vmx.h>
50 #include <asm/vmmu.h>
51 #include <asm/vmx_mm_def.h>
52 #include <asm/vmx_phy_mode.h>
53 #include <xen/mm.h>
54 #include <asm/vmx_pal.h>
55 /* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
56 #define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
59 extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
60 extern void rnat_consumption (VCPU *vcpu);
61 extern void alt_itlb (VCPU *vcpu, u64 vadr);
62 extern void itlb_fault (VCPU *vcpu, u64 vadr);
63 extern void ivhpt_fault (VCPU *vcpu, u64 vadr);
64 extern unsigned long handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr);
66 #define DOMN_PAL_REQUEST 0x110000
67 #define DOMN_SAL_REQUEST 0x110001
69 static const u16 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000,0x1400,0x1800,
70 0x1c00,0x2000,0x2400,0x2800,0x2c00,0x3000,0x3400,0x3800,0x3c00,0x4000,
71 0x4400,0x4800,0x4c00,0x5000,0x5100,0x5200,0x5300,0x5400,0x5500,0x5600,
72 0x5700,0x5800,0x5900,0x5a00,0x5b00,0x5c00,0x5d00,0x5e00,0x5f00,0x6000,
73 0x6100,0x6200,0x6300,0x6400,0x6500,0x6600,0x6700,0x6800,0x6900,0x6a00,
74 0x6b00,0x6c00,0x6d00,0x6e00,0x6f00,0x7000,0x7100,0x7200,0x7300,0x7400,
75 0x7500,0x7600,0x7700,0x7800,0x7900,0x7a00,0x7b00,0x7c00,0x7d00,0x7e00,
76 0x7f00
77 };
81 void vmx_reflect_interruption(u64 ifa, u64 isr, u64 iim,
82 u64 vec, REGS *regs)
83 {
84 u64 status, vector;
85 VCPU *vcpu = current;
86 u64 vpsr = VCPU(vcpu, vpsr);
88 vector = vec2off[vec];
90 switch (vec) {
91 case 5: // IA64_DATA_NESTED_TLB_VECTOR
92 break;
93 case 22: // IA64_INST_ACCESS_RIGHTS_VECTOR
94 if (!(vpsr & IA64_PSR_IC))
95 goto nested_fault;
96 if (vhpt_access_rights_fixup(vcpu, ifa, 0))
97 return;
98 break;
100 case 25: // IA64_DISABLED_FPREG_VECTOR
101 if (!(vpsr & IA64_PSR_IC))
102 goto nested_fault;
103 if (FP_PSR(vcpu) & IA64_PSR_DFH) {
104 FP_PSR(vcpu) = IA64_PSR_MFH;
105 if (__ia64_per_cpu_var(fp_owner) != vcpu)
106 __ia64_load_fpu(vcpu->arch._thread.fph);
107 }
108 if (!(VCPU(vcpu, vpsr) & IA64_PSR_DFH)) {
109 regs->cr_ipsr &= ~IA64_PSR_DFH;
110 return;
111 }
113 break;
115 case 32: // IA64_FP_FAULT_VECTOR
116 if (!(vpsr & IA64_PSR_IC))
117 goto nested_fault;
118 // handle fpswa emulation
119 // fp fault
120 status = handle_fpu_swa(1, regs, isr);
121 if (!status) {
122 vcpu_increment_iip(vcpu);
123 return;
124 } else if (IA64_RETRY == status)
125 return;
126 break;
128 case 33: // IA64_FP_TRAP_VECTOR
129 if (!(vpsr & IA64_PSR_IC))
130 goto nested_fault;
131 //fp trap
132 status = handle_fpu_swa(0, regs, isr);
133 if (!status)
134 return;
135 else if (IA64_RETRY == status) {
136 vcpu_decrement_iip(vcpu);
137 return;
138 }
139 break;
141 case 29: // IA64_DEBUG_VECTOR
142 case 35: // IA64_TAKEN_BRANCH_TRAP_VECTOR
143 case 36: // IA64_SINGLE_STEP_TRAP_VECTOR
144 if (vmx_guest_kernel_mode(regs)
145 && current->domain->debugger_attached) {
146 domain_pause_for_debugger();
147 return;
148 }
149 if (!(vpsr & IA64_PSR_IC))
150 goto nested_fault;
151 break;
153 default:
154 if (!(vpsr & IA64_PSR_IC))
155 goto nested_fault;
156 break;
157 }
158 VCPU(vcpu,isr) = isr;
159 VCPU(vcpu,iipa) = regs->cr_iip;
160 if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
161 VCPU(vcpu,iim) = iim;
162 else
163 set_ifa_itir_iha(vcpu, ifa, 1, 1, 1);
164 inject_guest_interruption(vcpu, vector);
165 return;
167 nested_fault:
168 panic_domain(regs, "Guest nested fault vector=%lx!\n", vector);
169 }
172 IA64FAULT
173 vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
174 {
175 struct domain *d = current->domain;
176 struct vcpu *v = current;
178 perfc_incr(vmx_ia64_handle_break);
179 #ifdef CRASH_DEBUG
180 if ((iim == 0 || iim == CDB_BREAK_NUM) && !guest_mode(regs) &&
181 IS_VMM_ADDRESS(regs->cr_iip)) {
182 if (iim == 0)
183 show_registers(regs);
184 debugger_trap_fatal(0 /* don't care */, regs);
185 } else
186 #endif
187 {
188 if (iim == 0)
189 vmx_die_if_kernel("Break 0 in Hypervisor.", regs, iim);
191 if (ia64_psr(regs)->cpl == 0) {
192 /* Allow hypercalls only when cpl = 0. */
193 if (iim == d->arch.breakimm) {
194 ia64_hypercall(regs);
195 vcpu_increment_iip(v);
196 return IA64_NO_FAULT;
197 }
198 else if (iim == DOMN_PAL_REQUEST) {
199 pal_emul(v);
200 vcpu_increment_iip(v);
201 return IA64_NO_FAULT;
202 } else if (iim == DOMN_SAL_REQUEST) {
203 sal_emul(v);
204 vcpu_increment_iip(v);
205 return IA64_NO_FAULT;
206 }
207 }
208 vmx_reflect_interruption(ifa, isr, iim, 11, regs);
209 }
210 return IA64_NO_FAULT;
211 }
214 void save_banked_regs_to_vpd(VCPU *v, REGS *regs)
215 {
216 unsigned long i=0UL, * src,* dst, *sunat, *dunat;
217 IA64_PSR vpsr;
218 src=&regs->r16;
219 sunat=&regs->eml_unat;
220 vpsr.val = VCPU(v, vpsr);
221 if(vpsr.bn){
222 dst = &VCPU(v, vgr[0]);
223 dunat =&VCPU(v, vnat);
224 __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;; \
225 dep %2 = %0, %2, 0, 16;; \
226 st8 [%3] = %2;;"
227 ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
229 }else{
230 dst = &VCPU(v, vbgr[0]);
231 // dunat =&VCPU(v, vbnat);
232 // __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;;
233 // dep %2 = %0, %2, 16, 16;;
234 // st8 [%3] = %2;;"
235 // ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
237 }
238 for(i=0; i<16; i++)
239 *dst++ = *src++;
240 }
243 // ONLY gets called from ia64_leave_kernel
244 // ONLY call with interrupts disabled?? (else might miss one?)
245 // NEVER successful if already reflecting a trap/fault because psr.i==0
246 void leave_hypervisor_tail(void)
247 {
248 struct domain *d = current->domain;
249 struct vcpu *v = current;
251 // FIXME: Will this work properly if doing an RFI???
252 if (!is_idle_domain(d) ) { // always comes from guest
253 // struct pt_regs *user_regs = vcpu_regs(current);
254 local_irq_enable();
255 do_softirq();
256 local_irq_disable();
258 if (v->vcpu_id == 0) {
259 unsigned long callback_irq =
260 d->arch.hvm_domain.params[HVM_PARAM_CALLBACK_IRQ];
262 if ( v->arch.arch_vmx.pal_init_pending ) {
263 /*inject INIT interruption to guest pal*/
264 v->arch.arch_vmx.pal_init_pending = 0;
265 deliver_pal_init(v);
266 return;
267 }
269 /*
270 * val[63:56] == 1: val[55:0] is a delivery PCI INTx line:
271 * Domain = val[47:32], Bus = val[31:16],
272 * DevFn = val[15: 8], IntX = val[ 1: 0]
273 * val[63:56] == 0: val[55:0] is a delivery as GSI
274 */
275 if (callback_irq != 0 && local_events_need_delivery()) {
276 /* change level for para-device callback irq */
277 /* use level irq to send discrete event */
278 if ((uint8_t)(callback_irq >> 56) == 1) {
279 /* case of using PCI INTx line as callback irq */
280 int pdev = (callback_irq >> 11) & 0x1f;
281 int pintx = callback_irq & 3;
282 viosapic_set_pci_irq(d, pdev, pintx, 1);
283 viosapic_set_pci_irq(d, pdev, pintx, 0);
284 } else {
285 /* case of using GSI as callback irq */
286 viosapic_set_irq(d, callback_irq, 1);
287 viosapic_set_irq(d, callback_irq, 0);
288 }
289 }
290 }
292 rmb();
293 if (xchg(&v->arch.irq_new_pending, 0)) {
294 v->arch.irq_new_condition = 0;
295 vmx_check_pending_irq(v);
296 return;
297 }
299 if (v->arch.irq_new_condition) {
300 v->arch.irq_new_condition = 0;
301 vhpi_detection(v);
302 }
303 }
304 }
306 static int vmx_handle_lds(REGS* regs)
307 {
308 regs->cr_ipsr |= IA64_PSR_ED;
309 return IA64_FAULT;
310 }
312 /* We came here because the H/W VHPT walker failed to find an entry */
313 IA64FAULT
314 vmx_hpw_miss(u64 vadr, u64 vec, REGS* regs)
315 {
316 IA64_PSR vpsr;
317 int type;
318 unsigned int mmu_mode;
319 u64 vhpt_adr, gppa, pteval, rr, itir;
320 ISR misr;
321 PTA vpta;
322 thash_data_t *data;
323 VCPU *v = current;
325 vpsr.val = VCPU(v, vpsr);
326 misr.val = VMX(v,cr_isr);
328 if (vec == 1 || vec == 3)
329 type = ISIDE_TLB;
330 else if (vec == 2 || vec == 4)
331 type = DSIDE_TLB;
332 else
333 panic_domain(regs, "wrong vec:%lx\n", vec);
335 /* Physical mode and region is 0 or 4. */
336 mmu_mode = VMX_MMU_MODE(v);
337 if ((mmu_mode == VMX_MMU_PHY_DT
338 || (mmu_mode == VMX_MMU_PHY_D && type == DSIDE_TLB))
339 && !((vadr<<1)>>62)) {
340 if (type == DSIDE_TLB) {
341 /* DTLB miss. */
342 if (misr.sp) /* Refer to SDM Vol2 Table 4-11,4-12 */
343 return vmx_handle_lds(regs);
344 /* Clear UC bit in vadr with the shifts. */
345 if (v->domain != dom0
346 && __gpfn_is_io(v->domain, (vadr << 1) >> (PAGE_SHIFT + 1))) {
347 emulate_io_inst(v, ((vadr << 1) >> 1), 4);
348 return IA64_FAULT;
349 }
350 }
351 physical_tlb_miss(v, vadr, type);
352 return IA64_FAULT;
353 }
355 try_again:
356 /* Search in VTLB. */
357 data = vtlb_lookup(v, vadr, type);
358 if (data != 0) {
359 /* Found. */
360 if (v->domain != dom0 && type == DSIDE_TLB) {
361 if (misr.sp) { /* Refer to SDM Vol2 Table 4-10,4-12 */
362 if ((data->ma == VA_MATTR_UC) || (data->ma == VA_MATTR_UCE))
363 return vmx_handle_lds(regs);
364 }
365 gppa = (vadr & ((1UL << data->ps) - 1)) +
366 (data->ppn >> (data->ps - 12) << data->ps);
367 if (__gpfn_is_io(v->domain, gppa >> PAGE_SHIFT)) {
368 if (misr.sp)
369 panic_domain(NULL, "ld.s on I/O page not with UC attr."
370 " pte=0x%lx\n", data->page_flags);
371 if (data->pl >= ((regs->cr_ipsr >> IA64_PSR_CPL0_BIT) & 3))
372 emulate_io_inst(v, gppa, data->ma);
373 else {
374 vcpu_set_isr(v, misr.val);
375 data_access_rights(v, vadr);
376 }
377 return IA64_FAULT;
378 }
379 }
380 thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type);
381 return IA64_NO_FAULT;
382 }
384 if (type == DSIDE_TLB) {
385 struct opt_feature* optf = &(v->domain->arch.opt_feature);
387 if (misr.sp)
388 return vmx_handle_lds(regs);
390 vcpu_get_rr(v, vadr, &rr);
391 itir = rr & (RR_RID_MASK | RR_PS_MASK);
393 if (!vhpt_enabled(v, vadr, misr.rs ? RSE_REF : DATA_REF)) {
394 /* windows use region 4 and 5 for identity mapping */
395 if ((optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG4_FLG) &&
396 REGION_NUMBER(vadr) == 4 && !(regs->cr_ipsr & IA64_PSR_CPL) &&
397 REGION_OFFSET(vadr) <= _PAGE_PPN_MASK) {
399 pteval = PAGEALIGN(REGION_OFFSET(vadr), itir_ps(itir)) |
400 optf->im_reg4.pgprot;
401 if (thash_purge_and_insert(v, pteval, itir, vadr, type))
402 goto try_again;
403 return IA64_NO_FAULT;
404 }
405 if ((optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG5_FLG) &&
406 REGION_NUMBER(vadr) == 5 && !(regs->cr_ipsr & IA64_PSR_CPL) &&
407 REGION_OFFSET(vadr) <= _PAGE_PPN_MASK) {
409 pteval = PAGEALIGN(REGION_OFFSET(vadr), itir_ps(itir)) |
410 optf->im_reg5.pgprot;
411 if (thash_purge_and_insert(v, pteval, itir, vadr, type))
412 goto try_again;
413 return IA64_NO_FAULT;
414 }
415 if (vpsr.ic) {
416 vcpu_set_isr(v, misr.val);
417 alt_dtlb(v, vadr);
418 } else {
419 nested_dtlb(v);
420 }
421 return IA64_FAULT;
422 }
424 vpta.val = vmx_vcpu_get_pta(v);
425 if (vpta.vf) {
426 /* Long format is not yet supported. */
427 goto inject_dtlb_fault;
428 }
430 /* avoid recursively walking (short format) VHPT */
431 if (!(optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG4_FLG) &&
432 !(optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG5_FLG) &&
433 (((vadr ^ vpta.val) << 3) >> (vpta.size + 3)) == 0) {
434 goto inject_dtlb_fault;
435 }
437 vhpt_adr = vmx_vcpu_thash(v, vadr);
438 if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
439 /* VHPT successfully read. */
440 if (!(pteval & _PAGE_P)) {
441 goto inject_dtlb_fault;
442 } else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) {
443 thash_purge_and_insert(v, pteval, itir, vadr, DSIDE_TLB);
444 return IA64_NO_FAULT;
445 }
446 goto inject_dtlb_fault;
447 } else {
448 /* Can't read VHPT. */
449 if (vpsr.ic) {
450 vcpu_set_isr(v, misr.val);
451 dvhpt_fault(v, vadr);
452 return IA64_FAULT;
453 } else {
454 nested_dtlb(v);
455 return IA64_FAULT;
456 }
457 }
458 } else if (type == ISIDE_TLB) {
460 if (!vpsr.ic)
461 misr.ni = 1;
463 /* Don't bother with PHY_D mode (will require rr0+rr4 switches,
464 and certainly used only within nested TLB handler (hence TR mapped
465 and ic=0). */
466 if (mmu_mode == VMX_MMU_PHY_D)
467 goto inject_itlb_fault;
469 if (!vhpt_enabled(v, vadr, INST_REF)) {
470 vcpu_set_isr(v, misr.val);
471 alt_itlb(v, vadr);
472 return IA64_FAULT;
473 }
475 vpta.val = vmx_vcpu_get_pta(v);
476 if (vpta.vf) {
477 /* Long format is not yet supported. */
478 goto inject_itlb_fault;
479 }
482 vhpt_adr = vmx_vcpu_thash(v, vadr);
483 if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
484 /* VHPT successfully read. */
485 if (pteval & _PAGE_P) {
486 if ((pteval & _PAGE_MA_MASK) == _PAGE_MA_ST) {
487 goto inject_itlb_fault;
488 }
489 vcpu_get_rr(v, vadr, &rr);
490 itir = rr & (RR_RID_MASK | RR_PS_MASK);
491 thash_purge_and_insert(v, pteval, itir, vadr, ISIDE_TLB);
492 return IA64_NO_FAULT;
493 } else {
494 vcpu_set_isr(v, misr.val);
495 inst_page_not_present(v, vadr);
496 return IA64_FAULT;
497 }
498 } else {
499 vcpu_set_isr(v, misr.val);
500 ivhpt_fault(v, vadr);
501 return IA64_FAULT;
502 }
503 }
504 return IA64_NO_FAULT;
506 inject_dtlb_fault:
507 if (vpsr.ic) {
508 vcpu_set_isr(v, misr.val);
509 dtlb_fault(v, vadr);
510 } else
511 nested_dtlb(v);
513 return IA64_FAULT;
515 inject_itlb_fault:
516 vcpu_set_isr(v, misr.val);
517 itlb_fault(v, vadr);
518 return IA64_FAULT;
519 }