ia64/xen-unstable

changeset 11610:9da2d9b48ff8

[IA64] Complete fpswa handler retry mechanism

When handling fpswa fault, Xen needs to fetch opcode, it may fail.
This patch finishes retry mechanism.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Tue Sep 26 16:15:45 2006 -0600 (2006-09-26)
parents 7b250cf49e50
children f34e37d0742d
files xen/arch/ia64/vmx/vmx_process.c xen/arch/ia64/vmx/vmx_vcpu.c xen/include/asm-ia64/ia64_int.h xen/include/asm-ia64/vmx_vcpu.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/vmx_process.c	Sun Sep 24 14:55:57 2006 -0600
     1.2 +++ b/xen/arch/ia64/vmx/vmx_process.c	Tue Sep 26 16:15:45 2006 -0600
     1.3 @@ -81,6 +81,7 @@ static UINT64 vec2off[68] = {0x0,0x400,0
     1.4  void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim,
     1.5       UINT64 vector,REGS *regs)
     1.6  {
     1.7 +    UINT64 status;
     1.8      VCPU *vcpu = current;
     1.9      UINT64 vpsr = VCPU(vcpu, vpsr);
    1.10      vector=vec2off[vector];
    1.11 @@ -89,13 +90,23 @@ void vmx_reflect_interruption(UINT64 ifa
    1.12      }
    1.13      else{ // handle fpswa emulation
    1.14          // fp fault
    1.15 -        if(vector == IA64_FP_FAULT_VECTOR && !handle_fpu_swa(1, regs, isr)){
    1.16 -            vmx_vcpu_increment_iip(vcpu);
    1.17 -            return;
    1.18 +        if (vector == IA64_FP_FAULT_VECTOR) {
    1.19 +            status = handle_fpu_swa(1, regs, isr);
    1.20 +            if (!status) {
    1.21 +                vmx_vcpu_increment_iip(vcpu);
    1.22 +                return;
    1.23 +            } else if (IA64_RETRY == status)
    1.24 +                return;
    1.25          }
    1.26          //fp trap
    1.27 -        else if(vector == IA64_FP_TRAP_VECTOR && !handle_fpu_swa(0, regs, isr)){
    1.28 -            return; 
    1.29 +        else if (vector == IA64_FP_TRAP_VECTOR) {
    1.30 +            status = handle_fpu_swa(0, regs, isr);
    1.31 +            if (!status)
    1.32 +                return;
    1.33 +            else if (IA64_RETRY == status) {
    1.34 +                vmx_vcpu_decrement_iip(vcpu);
    1.35 +                return;
    1.36 +            }
    1.37          }
    1.38      }
    1.39      VCPU(vcpu,isr)=isr;
     2.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c	Sun Sep 24 14:55:57 2006 -0600
     2.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c	Tue Sep 26 16:15:45 2006 -0600
     2.3 @@ -172,6 +172,21 @@ IA64FAULT vmx_vcpu_increment_iip(VCPU *v
     2.4  }
     2.5  
     2.6  
     2.7 +IA64FAULT vmx_vcpu_decrement_iip(VCPU *vcpu)
     2.8 +{
     2.9 +    REGS *regs = vcpu_regs(vcpu);
    2.10 +    IA64_PSR *ipsr = (IA64_PSR *)&regs->cr_ipsr;
    2.11 +    
    2.12 +    if (ipsr->ri == 0) {
    2.13 +        ipsr->ri = 2;
    2.14 +        regs->cr_iip -= 16;
    2.15 +    } else {
    2.16 +        ipsr->ri--;
    2.17 +    }
    2.18 +    return (IA64_NO_FAULT);
    2.19 +}
    2.20 +
    2.21 +
    2.22  IA64FAULT vmx_vcpu_cover(VCPU *vcpu)
    2.23  {
    2.24      REGS *regs = vcpu_regs(vcpu);
     3.1 --- a/xen/include/asm-ia64/ia64_int.h	Sun Sep 24 14:55:57 2006 -0600
     3.2 +++ b/xen/include/asm-ia64/ia64_int.h	Tue Sep 26 16:15:45 2006 -0600
     3.3 @@ -36,7 +36,9 @@
     3.4  #define	IA64_NO_FAULT		0x0000
     3.5  #define IA64_FAULT		        0x0001
     3.6  #define	IA64_RFI_IN_PROGRESS	0x0002
     3.7 -#define IA64_RETRY              0x0003
     3.8 +// To avoid conflicting with return value of handle_fpu_swa()
     3.9 +// set IA64_RETRY to -0x000f 
    3.10 +#define IA64_RETRY		(-0x000f)
    3.11  #define IA64_FORCED_IFA         0x0004
    3.12  #define IA64_USE_TLB		0x0005
    3.13  #define	IA64_ILLOP_FAULT	(IA64_GENEX_VECTOR | 0x00)
     4.1 --- a/xen/include/asm-ia64/vmx_vcpu.h	Sun Sep 24 14:55:57 2006 -0600
     4.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h	Tue Sep 26 16:15:45 2006 -0600
     4.3 @@ -114,6 +114,7 @@ extern void memwrite_v(VCPU *vcpu, thash
     4.4  extern void memwrite_p(VCPU *vcpu, u64 *src, u64 *dest, size_t s);
     4.5  extern void vcpu_load_kernel_regs(VCPU *vcpu);
     4.6  extern IA64FAULT vmx_vcpu_increment_iip(VCPU *vcpu);
     4.7 +extern IA64FAULT vmx_vcpu_decrement_iip(VCPU *vcpu);
     4.8  extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *);
     4.9  
    4.10  extern void dtlb_fault (VCPU *vcpu, u64 vadr);