ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_process.c @ 15419:962f22223817

[IA64] Domain debugger for VTi: virtualize ibr and dbr.

Misc cleanup.

Signed-off-by: Tristan Gingold <tgingold@free.fr>
author Alex Williamson <alex.williamson@hp.com>
date Mon Jul 02 10:10:32 2007 -0600 (2007-07-02)
parents 466f71b1e831
children
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_process.c: handling VMX architecture-related VM exits
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Xiaoyan Feng (Fleming Feng) <fleming.feng@intel.com>
20 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
21 */
23 #include <xen/config.h>
24 #include <xen/lib.h>
25 #include <xen/errno.h>
26 #include <xen/sched.h>
27 #include <xen/smp.h>
28 #include <asm/ptrace.h>
29 #include <xen/delay.h>
31 #include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */
32 #include <asm/sal.h> /* FOR struct ia64_sal_retval */
34 #include <asm/system.h>
35 #include <asm/io.h>
36 #include <asm/processor.h>
37 #include <asm/desc.h>
38 #include <asm/vlsapic.h>
39 #include <xen/irq.h>
40 #include <xen/event.h>
41 #include <asm/regionreg.h>
42 #include <asm/privop.h>
43 #include <asm/ia64_int.h>
44 #include <asm/debugger.h>
45 //#include <asm/hpsim_ssc.h>
46 #include <asm/dom_fw.h>
47 #include <asm/vmx_vcpu.h>
48 #include <asm/kregs.h>
49 #include <asm/vmx.h>
50 #include <asm/vmmu.h>
51 #include <asm/vmx_mm_def.h>
52 #include <asm/vmx_phy_mode.h>
53 #include <xen/mm.h>
54 #include <asm/vmx_pal.h>
55 /* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
56 #define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
59 extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
60 extern void rnat_consumption (VCPU *vcpu);
61 extern void alt_itlb (VCPU *vcpu, u64 vadr);
62 extern void itlb_fault (VCPU *vcpu, u64 vadr);
63 extern void ivhpt_fault (VCPU *vcpu, u64 vadr);
64 extern unsigned long handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr);
66 #define DOMN_PAL_REQUEST 0x110000
67 #define DOMN_SAL_REQUEST 0x110001
69 static u64 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000,0x1400,0x1800,
70 0x1c00,0x2000,0x2400,0x2800,0x2c00,0x3000,0x3400,0x3800,0x3c00,0x4000,
71 0x4400,0x4800,0x4c00,0x5000,0x5100,0x5200,0x5300,0x5400,0x5500,0x5600,
72 0x5700,0x5800,0x5900,0x5a00,0x5b00,0x5c00,0x5d00,0x5e00,0x5f00,0x6000,
73 0x6100,0x6200,0x6300,0x6400,0x6500,0x6600,0x6700,0x6800,0x6900,0x6a00,
74 0x6b00,0x6c00,0x6d00,0x6e00,0x6f00,0x7000,0x7100,0x7200,0x7300,0x7400,
75 0x7500,0x7600,0x7700,0x7800,0x7900,0x7a00,0x7b00,0x7c00,0x7d00,0x7e00,
76 0x7f00
77 };
81 void vmx_reflect_interruption(u64 ifa, u64 isr, u64 iim,
82 u64 vec, REGS *regs)
83 {
84 u64 status, vector;
85 VCPU *vcpu = current;
86 u64 vpsr = VCPU(vcpu, vpsr);
88 vector = vec2off[vec];
90 switch (vec) {
91 case 5: // IA64_DATA_NESTED_TLB_VECTOR
92 break;
93 case 22: // IA64_INST_ACCESS_RIGHTS_VECTOR
94 if (!(vpsr & IA64_PSR_IC))
95 goto nested_fault;
96 if (vhpt_access_rights_fixup(vcpu, ifa, 0))
97 return;
98 break;
100 case 25: // IA64_DISABLED_FPREG_VECTOR
101 if (!(vpsr & IA64_PSR_IC))
102 goto nested_fault;
103 if (FP_PSR(vcpu) & IA64_PSR_DFH) {
104 FP_PSR(vcpu) = IA64_PSR_MFH;
105 if (__ia64_per_cpu_var(fp_owner) != vcpu)
106 __ia64_load_fpu(vcpu->arch._thread.fph);
107 }
108 if (!(VCPU(vcpu, vpsr) & IA64_PSR_DFH)) {
109 regs->cr_ipsr &= ~IA64_PSR_DFH;
110 return;
111 }
113 break;
115 case 32: // IA64_FP_FAULT_VECTOR
116 if (!(vpsr & IA64_PSR_IC))
117 goto nested_fault;
118 // handle fpswa emulation
119 // fp fault
120 status = handle_fpu_swa(1, regs, isr);
121 if (!status) {
122 vcpu_increment_iip(vcpu);
123 return;
124 } else if (IA64_RETRY == status)
125 return;
126 break;
128 case 33: // IA64_FP_TRAP_VECTOR
129 if (!(vpsr & IA64_PSR_IC))
130 goto nested_fault;
131 //fp trap
132 status = handle_fpu_swa(0, regs, isr);
133 if (!status)
134 return;
135 else if (IA64_RETRY == status) {
136 vcpu_decrement_iip(vcpu);
137 return;
138 }
139 break;
141 case 29: // IA64_DEBUG_VECTOR
142 case 35: // IA64_TAKEN_BRANCH_TRAP_VECTOR
143 case 36: // IA64_SINGLE_STEP_TRAP_VECTOR
144 if (vmx_guest_kernel_mode(regs)
145 && current->domain->debugger_attached) {
146 domain_pause_for_debugger();
147 return;
148 }
149 if (!(vpsr & IA64_PSR_IC))
150 goto nested_fault;
151 break;
153 default:
154 if (!(vpsr & IA64_PSR_IC))
155 goto nested_fault;
156 break;
157 }
158 VCPU(vcpu,isr)=isr;
159 VCPU(vcpu,iipa) = regs->cr_iip;
160 if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
161 VCPU(vcpu,iim) = iim;
162 else {
163 set_ifa_itir_iha(vcpu,ifa,1,1,1);
164 }
165 inject_guest_interruption(vcpu, vector);
166 return;
168 nested_fault:
169 panic_domain(regs, "Guest nested fault vector=%lx!\n", vector);
170 }
173 IA64FAULT
174 vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
175 {
176 struct domain *d = current->domain;
177 struct vcpu *v = current;
179 perfc_incr(vmx_ia64_handle_break);
180 #ifdef CRASH_DEBUG
181 if ((iim == 0 || iim == CDB_BREAK_NUM) && !guest_mode(regs) &&
182 IS_VMM_ADDRESS(regs->cr_iip)) {
183 if (iim == 0)
184 show_registers(regs);
185 debugger_trap_fatal(0 /* don't care */, regs);
186 } else
187 #endif
188 {
189 if (iim == 0)
190 vmx_die_if_kernel("Break 0 in Hypervisor.", regs, iim);
192 if (ia64_psr(regs)->cpl == 0) {
193 /* Allow hypercalls only when cpl = 0. */
194 if (iim == d->arch.breakimm) {
195 ia64_hypercall(regs);
196 vcpu_increment_iip(v);
197 return IA64_NO_FAULT;
198 }
199 else if(iim == DOMN_PAL_REQUEST){
200 pal_emul(v);
201 vcpu_increment_iip(v);
202 return IA64_NO_FAULT;
203 }else if(iim == DOMN_SAL_REQUEST){
204 sal_emul(v);
205 vcpu_increment_iip(v);
206 return IA64_NO_FAULT;
207 }
208 }
209 vmx_reflect_interruption(ifa,isr,iim,11,regs);
210 }
211 return IA64_NO_FAULT;
212 }
215 void save_banked_regs_to_vpd(VCPU *v, REGS *regs)
216 {
217 unsigned long i=0UL, * src,* dst, *sunat, *dunat;
218 IA64_PSR vpsr;
219 src=&regs->r16;
220 sunat=&regs->eml_unat;
221 vpsr.val = VCPU(v, vpsr);
222 if(vpsr.bn){
223 dst = &VCPU(v, vgr[0]);
224 dunat =&VCPU(v, vnat);
225 __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;; \
226 dep %2 = %0, %2, 0, 16;; \
227 st8 [%3] = %2;;"
228 ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
230 }else{
231 dst = &VCPU(v, vbgr[0]);
232 // dunat =&VCPU(v, vbnat);
233 // __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;;
234 // dep %2 = %0, %2, 16, 16;;
235 // st8 [%3] = %2;;"
236 // ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
238 }
239 for(i=0; i<16; i++)
240 *dst++ = *src++;
241 }
244 // ONLY gets called from ia64_leave_kernel
245 // ONLY call with interrupts disabled?? (else might miss one?)
246 // NEVER successful if already reflecting a trap/fault because psr.i==0
247 void leave_hypervisor_tail(void)
248 {
249 struct domain *d = current->domain;
250 struct vcpu *v = current;
252 // FIXME: Will this work properly if doing an RFI???
253 if (!is_idle_domain(d) ) { // always comes from guest
254 // struct pt_regs *user_regs = vcpu_regs(current);
255 local_irq_enable();
256 do_softirq();
257 local_irq_disable();
259 if (v->vcpu_id == 0) {
260 unsigned long callback_irq =
261 d->arch.hvm_domain.params[HVM_PARAM_CALLBACK_IRQ];
263 if ( v->arch.arch_vmx.pal_init_pending ) {
264 /*inject INIT interruption to guest pal*/
265 v->arch.arch_vmx.pal_init_pending = 0;
266 deliver_pal_init(v);
267 return;
268 }
270 /*
271 * val[63:56] == 1: val[55:0] is a delivery PCI INTx line:
272 * Domain = val[47:32], Bus = val[31:16],
273 * DevFn = val[15: 8], IntX = val[ 1: 0]
274 * val[63:56] == 0: val[55:0] is a delivery as GSI
275 */
276 if (callback_irq != 0 && local_events_need_delivery()) {
277 /* change level for para-device callback irq */
278 /* use level irq to send discrete event */
279 if ((uint8_t)(callback_irq >> 56) == 1) {
280 /* case of using PCI INTx line as callback irq */
281 int pdev = (callback_irq >> 11) & 0x1f;
282 int pintx = callback_irq & 3;
283 viosapic_set_pci_irq(d, pdev, pintx, 1);
284 viosapic_set_pci_irq(d, pdev, pintx, 0);
285 } else {
286 /* case of using GSI as callback irq */
287 viosapic_set_irq(d, callback_irq, 1);
288 viosapic_set_irq(d, callback_irq, 0);
289 }
290 }
291 }
293 rmb();
294 if (xchg(&v->arch.irq_new_pending, 0)) {
295 v->arch.irq_new_condition = 0;
296 vmx_check_pending_irq(v);
297 return;
298 }
300 if (v->arch.irq_new_condition) {
301 v->arch.irq_new_condition = 0;
302 vhpi_detection(v);
303 }
304 }
305 }
307 extern ia64_rr vmx_vcpu_rr(VCPU *vcpu, u64 vadr);
309 static int vmx_handle_lds(REGS* regs)
310 {
311 regs->cr_ipsr |=IA64_PSR_ED;
312 return IA64_FAULT;
313 }
315 /* We came here because the H/W VHPT walker failed to find an entry */
316 IA64FAULT
317 vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs)
318 {
319 IA64_PSR vpsr;
320 int type;
321 u64 vhpt_adr, gppa, pteval, rr, itir;
322 ISR misr;
323 PTA vpta;
324 thash_data_t *data;
325 VCPU *v = current;
327 vpsr.val = VCPU(v, vpsr);
328 misr.val = VMX(v,cr_isr);
330 if (vec == 1)
331 type = ISIDE_TLB;
332 else if (vec == 2)
333 type = DSIDE_TLB;
334 else
335 panic_domain(regs, "wrong vec:%lx\n", vec);
337 if(is_physical_mode(v)&&(!(vadr<<1>>62))){
338 if(vec==2){
339 if (misr.sp) /* Refer to SDM Vol2 Table 4-11,4-12 */
340 return vmx_handle_lds(regs);
341 if (v->domain != dom0
342 && __gpfn_is_io(v->domain, (vadr << 1) >> (PAGE_SHIFT + 1))) {
343 emulate_io_inst(v,((vadr<<1)>>1),4); // UC
344 return IA64_FAULT;
345 }
346 }
347 physical_tlb_miss(v, vadr, type);
348 return IA64_FAULT;
349 }
351 try_again:
352 if((data=vtlb_lookup(v, vadr,type))!=0){
353 if (v->domain != dom0 && type == DSIDE_TLB) {
354 if (misr.sp) { /* Refer to SDM Vol2 Table 4-10,4-12 */
355 if ((data->ma == VA_MATTR_UC) || (data->ma == VA_MATTR_UCE))
356 return vmx_handle_lds(regs);
357 }
358 gppa = (vadr & ((1UL << data->ps) - 1)) +
359 (data->ppn >> (data->ps - 12) << data->ps);
360 if (__gpfn_is_io(v->domain, gppa >> PAGE_SHIFT)) {
361 if (misr.sp)
362 panic_domain(NULL, "ld.s on I/O page not with UC attr."
363 " pte=0x%lx\n", data->page_flags);
364 if (data->pl >= ((regs->cr_ipsr >> IA64_PSR_CPL0_BIT) & 3))
365 emulate_io_inst(v, gppa, data->ma);
366 else {
367 vcpu_set_isr(v, misr.val);
368 data_access_rights(v, vadr);
369 }
370 return IA64_FAULT;
371 }
372 }
373 thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type);
375 }else if(type == DSIDE_TLB){
377 if (misr.sp)
378 return vmx_handle_lds(regs);
380 vcpu_get_rr(v, vadr, &rr);
381 itir = rr & (RR_RID_MASK | RR_PS_MASK);
383 if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
384 if (GOS_WINDOWS(v)) {
385 /* windows use region 4 and 5 for identity mapping */
386 if (REGION_NUMBER(vadr) == 4 && !(regs->cr_ipsr & IA64_PSR_CPL)
387 && (REGION_OFFSET(vadr)<= _PAGE_PPN_MASK)) {
389 pteval = PAGEALIGN(REGION_OFFSET(vadr), itir_ps(itir)) |
390 (_PAGE_P | _PAGE_A | _PAGE_D |
391 _PAGE_MA_WB | _PAGE_AR_RW);
393 if (thash_purge_and_insert(v, pteval, itir, vadr, type))
394 goto try_again;
396 return IA64_NO_FAULT;
397 }
399 if (REGION_NUMBER(vadr) == 5 && !(regs->cr_ipsr & IA64_PSR_CPL)
400 && (REGION_OFFSET(vadr)<= _PAGE_PPN_MASK)) {
402 pteval = PAGEALIGN(REGION_OFFSET(vadr),itir_ps(itir)) |
403 (_PAGE_P | _PAGE_A | _PAGE_D |
404 _PAGE_MA_UC | _PAGE_AR_RW);
406 if (thash_purge_and_insert(v, pteval, itir, vadr, type))
407 goto try_again;
409 return IA64_NO_FAULT;
410 }
411 }
413 if(vpsr.ic){
414 vcpu_set_isr(v, misr.val);
415 alt_dtlb(v, vadr);
416 return IA64_FAULT;
417 } else{
418 nested_dtlb(v);
419 return IA64_FAULT;
420 }
421 }
423 vpta.val = vmx_vcpu_get_pta(v);
424 if (vpta.vf) {
425 /* Long format is not yet supported. */
426 if (vpsr.ic) {
427 vcpu_set_isr(v, misr.val);
428 dtlb_fault(v, vadr);
429 return IA64_FAULT;
430 } else {
431 nested_dtlb(v);
432 return IA64_FAULT;
433 }
434 }
436 /* avoid recursively walking (short format) VHPT */
437 if (!GOS_WINDOWS(v) &&
438 (((vadr ^ vpta.val) << 3) >> (vpta.size + 3)) == 0) {
440 if (vpsr.ic) {
441 vcpu_set_isr(v, misr.val);
442 dtlb_fault(v, vadr);
443 return IA64_FAULT;
444 } else {
445 nested_dtlb(v);
446 return IA64_FAULT;
447 }
448 }
450 vhpt_adr = vmx_vcpu_thash(v, vadr);
451 if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
452 /* VHPT successfully read. */
453 if (!(pteval & _PAGE_P)) {
454 if (vpsr.ic) {
455 vcpu_set_isr(v, misr.val);
456 dtlb_fault(v, vadr);
457 return IA64_FAULT;
458 } else {
459 nested_dtlb(v);
460 return IA64_FAULT;
461 }
462 } else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) {
463 thash_purge_and_insert(v, pteval, itir, vadr, DSIDE_TLB);
464 return IA64_NO_FAULT;
465 } else if (vpsr.ic) {
466 vcpu_set_isr(v, misr.val);
467 dtlb_fault(v, vadr);
468 return IA64_FAULT;
469 }else{
470 nested_dtlb(v);
471 return IA64_FAULT;
472 }
473 } else {
474 /* Can't read VHPT. */
475 if (vpsr.ic) {
476 vcpu_set_isr(v, misr.val);
477 dvhpt_fault(v, vadr);
478 return IA64_FAULT;
479 } else {
480 nested_dtlb(v);
481 return IA64_FAULT;
482 }
483 }
484 }else if(type == ISIDE_TLB){
486 if (!vpsr.ic)
487 misr.ni = 1;
488 if (!vhpt_enabled(v, vadr, INST_REF)) {
489 vcpu_set_isr(v, misr.val);
490 alt_itlb(v, vadr);
491 return IA64_FAULT;
492 }
494 vpta.val = vmx_vcpu_get_pta(v);
495 if (vpta.vf) {
496 /* Long format is not yet supported. */
497 vcpu_set_isr(v, misr.val);
498 itlb_fault(v, vadr);
499 return IA64_FAULT;
500 }
503 vhpt_adr = vmx_vcpu_thash(v, vadr);
504 if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
505 /* VHPT successfully read. */
506 if (pteval & _PAGE_P) {
507 if ((pteval & _PAGE_MA_MASK) == _PAGE_MA_ST) {
508 vcpu_set_isr(v, misr.val);
509 itlb_fault(v, vadr);
510 return IA64_FAULT;
511 }
512 vcpu_get_rr(v, vadr, &rr);
513 itir = rr & (RR_RID_MASK | RR_PS_MASK);
514 thash_purge_and_insert(v, pteval, itir, vadr, ISIDE_TLB);
515 return IA64_NO_FAULT;
516 } else {
517 vcpu_set_isr(v, misr.val);
518 inst_page_not_present(v, vadr);
519 return IA64_FAULT;
520 }
521 } else {
522 vcpu_set_isr(v, misr.val);
523 ivhpt_fault(v, vadr);
524 return IA64_FAULT;
525 }
526 }
527 return IA64_NO_FAULT;
528 }