ia64/xen-unstable
changeset 16175:85613b8c4176
[IA64] Handle phy_d mode in vmx_hpw_miss
Handle PHY_D mmu mode.
Indentation.
Avoid code duplication in vmx_hpw_miss.
Signed-off-by: Tristan Gingold <tgingold@free.fr>
Handle PHY_D mmu mode.
Indentation.
Avoid code duplication in vmx_hpw_miss.
Signed-off-by: Tristan Gingold <tgingold@free.fr>
author | Alex Williamson <alex.williamson@hp.com> |
---|---|
date | Sun Oct 21 15:55:10 2007 -0600 (2007-10-21) |
parents | 1e27eb0c9f22 |
children | da8e527d20bd |
files | xen/arch/ia64/vmx/vmx_fault.c |
line diff
1.1 --- a/xen/arch/ia64/vmx/vmx_fault.c Sun Oct 21 15:52:25 2007 -0600 1.2 +++ b/xen/arch/ia64/vmx/vmx_fault.c Sun Oct 21 15:55:10 2007 -0600 1.3 @@ -66,7 +66,7 @@ extern unsigned long handle_fpu_swa (int 1.4 #define DOMN_PAL_REQUEST 0x110000 1.5 #define DOMN_SAL_REQUEST 0x110001 1.6 1.7 -static u64 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000,0x1400,0x1800, 1.8 +static const u16 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000,0x1400,0x1800, 1.9 0x1c00,0x2000,0x2400,0x2800,0x2c00,0x3000,0x3400,0x3800,0x3c00,0x4000, 1.10 0x4400,0x4800,0x4c00,0x5000,0x5100,0x5200,0x5300,0x5400,0x5500,0x5600, 1.11 0x5700,0x5800,0x5900,0x5a00,0x5b00,0x5c00,0x5d00,0x5e00,0x5f00,0x6000, 1.12 @@ -155,13 +155,12 @@ void vmx_reflect_interruption(u64 ifa, u 1.13 goto nested_fault; 1.14 break; 1.15 } 1.16 - VCPU(vcpu,isr)=isr; 1.17 + VCPU(vcpu,isr) = isr; 1.18 VCPU(vcpu,iipa) = regs->cr_iip; 1.19 if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR) 1.20 VCPU(vcpu,iim) = iim; 1.21 - else { 1.22 - set_ifa_itir_iha(vcpu,ifa,1,1,1); 1.23 - } 1.24 + else 1.25 + set_ifa_itir_iha(vcpu, ifa, 1, 1, 1); 1.26 inject_guest_interruption(vcpu, vector); 1.27 return; 1.28 1.29 @@ -196,17 +195,17 @@ vmx_ia64_handle_break (unsigned long ifa 1.30 vcpu_increment_iip(v); 1.31 return IA64_NO_FAULT; 1.32 } 1.33 - else if(iim == DOMN_PAL_REQUEST){ 1.34 + else if (iim == DOMN_PAL_REQUEST) { 1.35 pal_emul(v); 1.36 vcpu_increment_iip(v); 1.37 return IA64_NO_FAULT; 1.38 - }else if(iim == DOMN_SAL_REQUEST){ 1.39 + } else if (iim == DOMN_SAL_REQUEST) { 1.40 sal_emul(v); 1.41 vcpu_increment_iip(v); 1.42 return IA64_NO_FAULT; 1.43 } 1.44 } 1.45 - vmx_reflect_interruption(ifa,isr,iim,11,regs); 1.46 + vmx_reflect_interruption(ifa, isr, iim, 11, regs); 1.47 } 1.48 return IA64_NO_FAULT; 1.49 } 1.50 @@ -306,7 +305,7 @@ void leave_hypervisor_tail(void) 1.51 1.52 static int vmx_handle_lds(REGS* regs) 1.53 { 1.54 - regs->cr_ipsr |=IA64_PSR_ED; 1.55 + regs->cr_ipsr |= IA64_PSR_ED; 1.56 return IA64_FAULT; 1.57 } 1.58 1.59 @@ -316,6 +315,7 @@ vmx_hpw_miss(u64 vadr, u64 vec, REGS* re 1.60 { 1.61 IA64_PSR vpsr; 1.62 int type; 1.63 + unsigned int mmu_mode; 1.64 u64 vhpt_adr, gppa, pteval, rr, itir; 1.65 ISR misr; 1.66 PTA vpta; 1.67 @@ -325,22 +325,26 @@ vmx_hpw_miss(u64 vadr, u64 vec, REGS* re 1.68 vpsr.val = VCPU(v, vpsr); 1.69 misr.val = VMX(v,cr_isr); 1.70 1.71 - if (vec == 1) 1.72 + if (vec == 1 || vec == 3) 1.73 type = ISIDE_TLB; 1.74 - else if (vec == 2) 1.75 + else if (vec == 2 || vec == 4) 1.76 type = DSIDE_TLB; 1.77 else 1.78 panic_domain(regs, "wrong vec:%lx\n", vec); 1.79 1.80 /* Physical mode and region is 0 or 4. */ 1.81 - if (!is_virtual_mode(v) && (!((vadr << 1) >> 62))) { 1.82 - if (vec == 2) { 1.83 + mmu_mode = VMX_MMU_MODE(v); 1.84 + if ((mmu_mode == VMX_MMU_PHY_DT 1.85 + || (mmu_mode == VMX_MMU_PHY_D && type == DSIDE_TLB)) 1.86 + && !((vadr<<1)>>62)) { 1.87 + if (type == DSIDE_TLB) { 1.88 /* DTLB miss. */ 1.89 if (misr.sp) /* Refer to SDM Vol2 Table 4-11,4-12 */ 1.90 return vmx_handle_lds(regs); 1.91 + /* Clear UC bit in vadr with the shifts. */ 1.92 if (v->domain != dom0 1.93 && __gpfn_is_io(v->domain, (vadr << 1) >> (PAGE_SHIFT + 1))) { 1.94 - emulate_io_inst(v, ((vadr<<1)>>1),4); // UC 1.95 + emulate_io_inst(v, ((vadr << 1) >> 1), 4); 1.96 return IA64_FAULT; 1.97 } 1.98 } 1.99 @@ -411,64 +415,35 @@ try_again: 1.100 if (vpsr.ic) { 1.101 vcpu_set_isr(v, misr.val); 1.102 alt_dtlb(v, vadr); 1.103 - return IA64_FAULT; 1.104 } else { 1.105 nested_dtlb(v); 1.106 - return IA64_FAULT; 1.107 } 1.108 + return IA64_FAULT; 1.109 } 1.110 1.111 vpta.val = vmx_vcpu_get_pta(v); 1.112 if (vpta.vf) { 1.113 /* Long format is not yet supported. */ 1.114 - if (vpsr.ic) { 1.115 - vcpu_set_isr(v, misr.val); 1.116 - dtlb_fault(v, vadr); 1.117 - return IA64_FAULT; 1.118 - } else { 1.119 - nested_dtlb(v); 1.120 - return IA64_FAULT; 1.121 - } 1.122 + goto inject_dtlb_fault; 1.123 } 1.124 1.125 /* avoid recursively walking (short format) VHPT */ 1.126 if (!(optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG4) && 1.127 !(optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG5) && 1.128 (((vadr ^ vpta.val) << 3) >> (vpta.size + 3)) == 0) { 1.129 - 1.130 - if (vpsr.ic) { 1.131 - vcpu_set_isr(v, misr.val); 1.132 - dtlb_fault(v, vadr); 1.133 - return IA64_FAULT; 1.134 - } else { 1.135 - nested_dtlb(v); 1.136 - return IA64_FAULT; 1.137 - } 1.138 + goto inject_dtlb_fault; 1.139 } 1.140 1.141 vhpt_adr = vmx_vcpu_thash(v, vadr); 1.142 if (!guest_vhpt_lookup(vhpt_adr, &pteval)) { 1.143 /* VHPT successfully read. */ 1.144 if (!(pteval & _PAGE_P)) { 1.145 - if (vpsr.ic) { 1.146 - vcpu_set_isr(v, misr.val); 1.147 - dtlb_fault(v, vadr); 1.148 - return IA64_FAULT; 1.149 - } else { 1.150 - nested_dtlb(v); 1.151 - return IA64_FAULT; 1.152 - } 1.153 + goto inject_dtlb_fault; 1.154 } else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) { 1.155 thash_purge_and_insert(v, pteval, itir, vadr, DSIDE_TLB); 1.156 - return IA64_NO_FAULT; 1.157 - } else if (vpsr.ic) { 1.158 - vcpu_set_isr(v, misr.val); 1.159 - dtlb_fault(v, vadr); 1.160 - return IA64_FAULT; 1.161 - } else { 1.162 - nested_dtlb(v); 1.163 - return IA64_FAULT; 1.164 + return IA64_NO_FAULT; 1.165 } 1.166 + goto inject_dtlb_fault; 1.167 } else { 1.168 /* Can't read VHPT. */ 1.169 if (vpsr.ic) { 1.170 @@ -484,6 +459,13 @@ try_again: 1.171 1.172 if (!vpsr.ic) 1.173 misr.ni = 1; 1.174 + 1.175 + /* Don't bother with PHY_D mode (will require rr0+rr4 switches, 1.176 + and certainly used only within nested TLB handler (hence TR mapped 1.177 + and ic=0). */ 1.178 + if (mmu_mode == VMX_MMU_PHY_D) 1.179 + goto inject_itlb_fault; 1.180 + 1.181 if (!vhpt_enabled(v, vadr, INST_REF)) { 1.182 vcpu_set_isr(v, misr.val); 1.183 alt_itlb(v, vadr); 1.184 @@ -493,9 +475,7 @@ try_again: 1.185 vpta.val = vmx_vcpu_get_pta(v); 1.186 if (vpta.vf) { 1.187 /* Long format is not yet supported. */ 1.188 - vcpu_set_isr(v, misr.val); 1.189 - itlb_fault(v, vadr); 1.190 - return IA64_FAULT; 1.191 + goto inject_itlb_fault; 1.192 } 1.193 1.194 1.195 @@ -504,9 +484,7 @@ try_again: 1.196 /* VHPT successfully read. */ 1.197 if (pteval & _PAGE_P) { 1.198 if ((pteval & _PAGE_MA_MASK) == _PAGE_MA_ST) { 1.199 - vcpu_set_isr(v, misr.val); 1.200 - itlb_fault(v, vadr); 1.201 - return IA64_FAULT; 1.202 + goto inject_itlb_fault; 1.203 } 1.204 vcpu_get_rr(v, vadr, &rr); 1.205 itir = rr & (RR_RID_MASK | RR_PS_MASK); 1.206 @@ -524,4 +502,18 @@ try_again: 1.207 } 1.208 } 1.209 return IA64_NO_FAULT; 1.210 + 1.211 + inject_dtlb_fault: 1.212 + if (vpsr.ic) { 1.213 + vcpu_set_isr(v, misr.val); 1.214 + dtlb_fault(v, vadr); 1.215 + } else 1.216 + nested_dtlb(v); 1.217 + 1.218 + return IA64_FAULT; 1.219 + 1.220 + inject_itlb_fault: 1.221 + vcpu_set_isr(v, misr.val); 1.222 + itlb_fault(v, vadr); 1.223 + return IA64_FAULT; 1.224 }