ia64/xen-unstable
changeset 17209:8c921adf4833
[IA64] Raise a fault with unimplemented physical address
An unimplemented data fault or an unimplemented instruction trap
should be raised with unimplemented physical address.
Also some cleanups.
Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
An unimplemented data fault or an unimplemented instruction trap
should be raised with unimplemented physical address.
Also some cleanups.
Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
author | Alex Williamson <alex.williamson@hp.com> |
---|---|
date | Fri Mar 14 15:07:45 2008 -0600 (2008-03-14) |
parents | 82fa2e6cb592 |
children | 42f6c206c951 |
files | xen/arch/ia64/vmx/vmx_fault.c xen/arch/ia64/vmx/vmx_virt.c xen/include/asm-ia64/vmx_vcpu.h |
line diff
1.1 --- a/xen/arch/ia64/vmx/vmx_fault.c Fri Mar 14 15:02:12 2008 -0600 1.2 +++ b/xen/arch/ia64/vmx/vmx_fault.c Fri Mar 14 15:07:45 2008 -0600 1.3 @@ -328,6 +328,11 @@ static int vmx_handle_lds(REGS* regs) 1.4 return IA64_FAULT; 1.5 } 1.6 1.7 +static inline int unimpl_phys_addr (u64 paddr) 1.8 +{ 1.9 + return (pa_clear_uc(paddr) >> MAX_PHYS_ADDR_BITS) != 0; 1.10 +} 1.11 + 1.12 /* We came here because the H/W VHPT walker failed to find an entry */ 1.13 IA64FAULT 1.14 vmx_hpw_miss(u64 vadr, u64 vec, REGS* regs) 1.15 @@ -361,12 +366,20 @@ vmx_hpw_miss(u64 vadr, u64 vec, REGS* re 1.16 /* DTLB miss. */ 1.17 if (misr.sp) /* Refer to SDM Vol2 Table 4-11,4-12 */ 1.18 return vmx_handle_lds(regs); 1.19 + if (unlikely(unimpl_phys_addr(vadr))) { 1.20 + unimpl_daddr(v); 1.21 + return IA64_FAULT; 1.22 + } 1.23 pte = lookup_domain_mpa(v->domain, pa_clear_uc(vadr), NULL); 1.24 - /* Clear UC bit in vadr with the shifts. */ 1.25 if (v->domain != dom0 && (pte & GPFN_IO_MASK)) { 1.26 emulate_io_inst(v, pa_clear_uc(vadr), 4, pte); 1.27 return IA64_FAULT; 1.28 } 1.29 + } else { 1.30 + if (unlikely(unimpl_phys_addr(vadr))) { 1.31 + unimpl_iaddr_trap(v, vadr); 1.32 + return IA64_FAULT; 1.33 + } 1.34 } 1.35 physical_tlb_miss(v, vadr, type); 1.36 return IA64_FAULT;
2.1 --- a/xen/arch/ia64/vmx/vmx_virt.c Fri Mar 14 15:02:12 2008 -0600 2.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c Fri Mar 14 15:07:45 2008 -0600 2.3 @@ -277,9 +277,6 @@ static IA64FAULT vmx_emul_ptc_l(VCPU *vc 2.4 } 2.5 #ifdef VMAL_NO_FAULT_CHECK 2.6 if (unimplemented_gva(vcpu,r3) ) { 2.7 - isr.val = set_isr_ei_ni(vcpu); 2.8 - isr.code = IA64_RESERVED_REG_FAULT; 2.9 - vcpu_set_isr(vcpu, isr.val); 2.10 unimpl_daddr(vcpu); 2.11 return IA64_FAULT; 2.12 } 2.13 @@ -338,9 +335,6 @@ static IA64FAULT vmx_emul_ptc_g(VCPU *vc 2.14 } 2.15 #ifdef VMAL_NO_FAULT_CHECK 2.16 if (unimplemented_gva(vcpu,r3) ) { 2.17 - isr.val = set_isr_ei_ni(vcpu); 2.18 - isr.code = IA64_RESERVED_REG_FAULT; 2.19 - vcpu_set_isr(vcpu, isr.val); 2.20 unimpl_daddr(vcpu); 2.21 return IA64_FAULT; 2.22 } 2.23 @@ -374,9 +368,6 @@ static IA64FAULT vmx_emul_ptc_ga(VCPU *v 2.24 } 2.25 #ifdef VMAL_NO_FAULT_CHECK 2.26 if (unimplemented_gva(vcpu,r3) ) { 2.27 - isr.val = set_isr_ei_ni(vcpu); 2.28 - isr.code = IA64_RESERVED_REG_FAULT; 2.29 - vcpu_set_isr(vcpu, isr.val); 2.30 unimpl_daddr(vcpu); 2.31 return IA64_FAULT; 2.32 } 2.33 @@ -411,9 +402,6 @@ static IA64FAULT ptr_fault_check(VCPU *v 2.34 return IA64_FAULT; 2.35 } 2.36 if (unimplemented_gva(vcpu,r3) ) { 2.37 - isr.val = set_isr_ei_ni(vcpu); 2.38 - isr.code = IA64_RESERVED_REG_FAULT; 2.39 - vcpu_set_isr(vcpu, isr.val); 2.40 unimpl_daddr(vcpu); 2.41 return IA64_FAULT; 2.42 } 2.43 @@ -635,9 +623,6 @@ static IA64FAULT vmx_emul_itr_d(VCPU *vc 2.44 return IA64_FAULT; 2.45 } 2.46 if (unimplemented_gva(vcpu, ifa)) { 2.47 - isr.val = set_isr_ei_ni(vcpu); 2.48 - isr.code = IA64_RESERVED_REG_FAULT; 2.49 - vcpu_set_isr(vcpu, isr.val); 2.50 unimpl_daddr(vcpu); 2.51 return IA64_FAULT; 2.52 } 2.53 @@ -703,9 +688,6 @@ static IA64FAULT vmx_emul_itr_i(VCPU *vc 2.54 return IA64_FAULT; 2.55 } 2.56 if (unimplemented_gva(vcpu, ifa)) { 2.57 - isr.val = set_isr_ei_ni(vcpu); 2.58 - isr.code = IA64_RESERVED_REG_FAULT; 2.59 - vcpu_set_isr(vcpu, isr.val); 2.60 unimpl_daddr(vcpu); 2.61 return IA64_FAULT; 2.62 } 2.63 @@ -764,9 +746,6 @@ static IA64FAULT itc_fault_check(VCPU *v 2.64 } 2.65 #ifdef VMAL_NO_FAULT_CHECK 2.66 if (unimplemented_gva(vcpu,ifa) ) { 2.67 - isr.val = set_isr_ei_ni(vcpu); 2.68 - isr.code = IA64_RESERVED_REG_FAULT; 2.69 - vcpu_set_isr(vcpu, isr.val); 2.70 unimpl_daddr(vcpu); 2.71 return IA64_FAULT; 2.72 }
3.1 --- a/xen/include/asm-ia64/vmx_vcpu.h Fri Mar 14 15:02:12 2008 -0600 3.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h Fri Mar 14 15:07:45 2008 -0600 3.3 @@ -582,6 +582,11 @@ privilege_op (VCPU *vcpu) 3.4 static inline void 3.5 unimpl_daddr (VCPU *vcpu) 3.6 { 3.7 + ISR isr; 3.8 + 3.9 + isr.val = set_isr_ei_ni(vcpu); 3.10 + isr.code = IA64_UNIMPL_DADDR_FAULT; 3.11 + vcpu_set_isr(vcpu, isr.val); 3.12 _general_exception(vcpu); 3.13 } 3.14 3.15 @@ -695,4 +700,21 @@ data_access_rights(VCPU *vcpu, u64 vadr) 3.16 set_ifa_itir_iha(vcpu, vadr, 1, 1, 0); 3.17 inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR); 3.18 } 3.19 + 3.20 +/* 3.21 + * Unimplement Instruction Address Trap 3.22 + * @ Lower-Privilege Transfer Trap Vector 3.23 + * Refer to SDM Vol2 Table 5-6 & 8-1 3.24 + */ 3.25 +static inline void 3.26 +unimpl_iaddr_trap (VCPU *vcpu, u64 vadr) 3.27 +{ 3.28 + ISR isr; 3.29 + 3.30 + isr.val = set_isr_ei_ni(vcpu); 3.31 + isr.code = IA64_UNIMPL_IADDR_TRAP; 3.32 + vcpu_set_isr(vcpu, isr.val); 3.33 + vcpu_set_ifa(vcpu, vadr); 3.34 + inject_guest_interruption(vcpu, IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR); 3.35 +} 3.36 #endif