ia64/xen-unstable

changeset 10697:4834d1e8f26e

[IA64] optimize vpsr

vpsr can't keep track flowing bits of guest psr
be,up,ac,mfl,mfh,cpl,ri.
Previously every time xen gets control, xen will sync
vpsr with cr.ipsr, it's not neccessary.
Xen sync with cr.ipsr when needed.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Fri Jul 14 11:05:40 2006 -0600 (2006-07-14)
parents 000789c36d28
children c4af6e854010
files xen/arch/ia64/vmx/vlsapic.c xen/arch/ia64/vmx/vmmu.c xen/arch/ia64/vmx/vmx_interrupt.c xen/arch/ia64/vmx/vmx_minstate.h xen/arch/ia64/vmx/vmx_phy_mode.c xen/arch/ia64/vmx/vmx_process.c xen/arch/ia64/vmx/vmx_utility.c xen/arch/ia64/vmx/vmx_vcpu.c xen/arch/ia64/vmx/vmx_virt.c
line diff
     1.1 --- a/xen/arch/ia64/vmx/vlsapic.c	Wed Jul 12 13:26:09 2006 -0600
     1.2 +++ b/xen/arch/ia64/vmx/vlsapic.c	Fri Jul 14 11:05:40 2006 -0600
     1.3 @@ -570,7 +570,7 @@ int vmx_check_pending_irq(VCPU *vcpu)
     1.4      }
     1.5      h_inservice = highest_inservice_irq(vcpu);
     1.6  
     1.7 -    vpsr.val = vmx_vcpu_get_psr(vcpu);
     1.8 +    vpsr.val = VCPU(vcpu, vpsr);
     1.9      mask = irq_masked(vcpu, h_pending, h_inservice);
    1.10      if (  vpsr.i && IRQ_NO_MASKED == mask ) {
    1.11          isr = vpsr.val & IA64_PSR_RI;
    1.12 @@ -654,7 +654,7 @@ static void generate_exirq(VCPU *vcpu)
    1.13      IA64_PSR    vpsr;
    1.14      uint64_t    isr;
    1.15      REGS *regs=vcpu_regs(vcpu);
    1.16 -    vpsr.val = vmx_vcpu_get_psr(vcpu);
    1.17 +    vpsr.val = VCPU(vcpu, vpsr);
    1.18      update_vhpi(vcpu, NULL_VECTOR);
    1.19      isr = vpsr.val & IA64_PSR_RI;
    1.20      if ( !vpsr.ic )
    1.21 @@ -668,7 +668,7 @@ void vhpi_detection(VCPU *vcpu)
    1.22      tpr_t       vtpr;
    1.23      IA64_PSR    vpsr;
    1.24      
    1.25 -    vpsr.val = vmx_vcpu_get_psr(vcpu);
    1.26 +    vpsr.val = VCPU(vcpu, vpsr);
    1.27      vtpr.val = VCPU(vcpu, tpr);
    1.28  
    1.29      threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
     2.1 --- a/xen/arch/ia64/vmx/vmmu.c	Wed Jul 12 13:26:09 2006 -0600
     2.2 +++ b/xen/arch/ia64/vmx/vmmu.c	Fri Jul 14 11:05:40 2006 -0600
     2.3 @@ -268,7 +268,7 @@ int vhpt_enabled(VCPU *vcpu, uint64_t va
     2.4      PTA   vpta;
     2.5      IA64_PSR  vpsr; 
     2.6  
     2.7 -    vpsr.val = vmx_vcpu_get_psr(vcpu);
     2.8 +    vpsr.val = VCPU(vcpu, vpsr);
     2.9      vcpu_get_rr(vcpu, vadr, &vrr.rrval);
    2.10      vmx_vcpu_get_pta(vcpu,&vpta.val);
    2.11  
    2.12 @@ -622,7 +622,7 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, UINT6
    2.13      visr.val=0;
    2.14      visr.ei=pt_isr.ei;
    2.15      visr.ir=pt_isr.ir;
    2.16 -    vpsr.val = vmx_vcpu_get_psr(vcpu);
    2.17 +    vpsr.val = VCPU(vcpu, vpsr);
    2.18      if(vpsr.ic==0){
    2.19          visr.ni=1;
    2.20      }
     3.1 --- a/xen/arch/ia64/vmx/vmx_interrupt.c	Wed Jul 12 13:26:09 2006 -0600
     3.2 +++ b/xen/arch/ia64/vmx/vmx_interrupt.c	Fri Jul 14 11:05:40 2006 -0600
     3.3 @@ -117,7 +117,7 @@ set_ifa_itir_iha (VCPU *vcpu, u64 vadr,
     3.4  {
     3.5      IA64_PSR vpsr;
     3.6      u64 value;
     3.7 -    vpsr.val = vmx_vcpu_get_psr(vcpu);
     3.8 +    vpsr.val = VCPU(vcpu, vpsr);
     3.9      /* Vol2, Table 8-1 */
    3.10      if ( vpsr.ic ) {
    3.11          if ( set_ifa){
     4.1 --- a/xen/arch/ia64/vmx/vmx_minstate.h	Wed Jul 12 13:26:09 2006 -0600
     4.2 +++ b/xen/arch/ia64/vmx/vmx_minstate.h	Fri Jul 14 11:05:40 2006 -0600
     4.3 @@ -57,8 +57,8 @@
     4.4      ;;
     4.5  
     4.6  
     4.7 -#define PAL_VSA_SYNC_READ_CLEANUP_PSR_PL           \
     4.8 -    /* begin to call pal vps sync_read and cleanup psr.pl */     \
     4.9 +#define PAL_VSA_SYNC_READ           \
    4.10 +    /* begin to call pal vps sync_read */     \
    4.11      add r25=IA64_VPD_BASE_OFFSET, r21;       \
    4.12      movl r20=__vsa_base;     \
    4.13      ;;          \
    4.14 @@ -68,31 +68,17 @@
    4.15      add r20=PAL_VPS_SYNC_READ,r20;  \
    4.16      ;;  \
    4.17  { .mii;  \
    4.18 -    add r22=VPD(VPSR),r25;   \
    4.19 +    nop 0x0;   \
    4.20      mov r24=ip;        \
    4.21      mov b0=r20;     \
    4.22      ;;      \
    4.23  };           \
    4.24  { .mmb;      \
    4.25      add r24 = 0x20, r24;    \
    4.26 -    mov r16 = cr.ipsr;  /* Temp workaround since psr.ic is off */ \
    4.27 +    nop 0x0;   	 \
    4.28      br.cond.sptk b0;        /*  call the service */ \
    4.29      ;;              \
    4.30  };           \
    4.31 -    ld8 r17=[r22];   \
    4.32 -    /* deposite ipsr bit cpl into vpd.vpsr, since epc will change */    \
    4.33 -    extr.u r30=r16, IA64_PSR_CPL0_BIT, 2;   \
    4.34 -    ;;      \
    4.35 -    dep r17=r30, r17, IA64_PSR_CPL0_BIT, 2;   \
    4.36 -    extr.u r30=r16, IA64_PSR_BE_BIT, 5;   \
    4.37 -    ;;      \
    4.38 -    dep r17=r30, r17, IA64_PSR_BE_BIT, 5;   \
    4.39 -    extr.u r30=r16, IA64_PSR_RI_BIT, 2;   \
    4.40 -    ;;      \
    4.41 -    dep r17=r30, r17, IA64_PSR_RI_BIT, 2;   \
    4.42 -    ;;      \
    4.43 -    st8 [r22]=r17;      \
    4.44 -    ;;
    4.45  
    4.46  
    4.47  
    4.48 @@ -219,7 +205,7 @@
    4.49      movl r11=FPSR_DEFAULT;   /* L-unit */                           \
    4.50      movl r1=__gp;       /* establish kernel global pointer */               \
    4.51      ;;                                          \
    4.52 -    PAL_VSA_SYNC_READ_CLEANUP_PSR_PL           \
    4.53 +    PAL_VSA_SYNC_READ           \
    4.54      VMX_MINSTATE_END_SAVE_MIN
    4.55  
    4.56  /*
     5.1 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c	Wed Jul 12 13:26:09 2006 -0600
     5.2 +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c	Fri Jul 14 11:05:40 2006 -0600
     5.3 @@ -110,10 +110,8 @@ void
     5.4  physical_tlb_miss(VCPU *vcpu, u64 vadr)
     5.5  {
     5.6      u64 pte;
     5.7 -    IA64_PSR vpsr;
     5.8 -    vpsr.val=vmx_vcpu_get_psr(vcpu);
     5.9      pte =  vadr& _PAGE_PPN_MASK;
    5.10 -    pte = pte|(vpsr.cpl<<7)|PHY_PAGE_WB;
    5.11 +    pte = pte | PHY_PAGE_WB;
    5.12      thash_purge_and_insert(vcpu, pte, (PAGE_SHIFT<<2), vadr);
    5.13      return;
    5.14  }
     6.1 --- a/xen/arch/ia64/vmx/vmx_process.c	Wed Jul 12 13:26:09 2006 -0600
     6.2 +++ b/xen/arch/ia64/vmx/vmx_process.c	Fri Jul 14 11:05:40 2006 -0600
     6.3 @@ -82,7 +82,7 @@ void vmx_reflect_interruption(UINT64 ifa
     6.4       UINT64 vector,REGS *regs)
     6.5  {
     6.6      VCPU *vcpu = current;
     6.7 -    UINT64 vpsr = vmx_vcpu_get_psr(vcpu);
     6.8 +    UINT64 vpsr = VCPU(vcpu, vpsr);
     6.9      vector=vec2off[vector];
    6.10      if(!(vpsr&IA64_PSR_IC)&&(vector!=IA64_DATA_NESTED_TLB_VECTOR)){
    6.11          panic_domain(regs, "Guest nested fault vector=%lx!\n", vector);
    6.12 @@ -156,7 +156,7 @@ void save_banked_regs_to_vpd(VCPU *v, RE
    6.13      IA64_PSR vpsr;
    6.14      src=&regs->r16;
    6.15      sunat=&regs->eml_unat;
    6.16 -    vpsr.val = vmx_vcpu_get_psr(v);
    6.17 +    vpsr.val = VCPU(v, vpsr);
    6.18      if(vpsr.bn){
    6.19          dst = &VCPU(v, vgr[0]);
    6.20          dunat =&VCPU(v, vnat);
    6.21 @@ -253,7 +253,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
    6.22      check_vtlb_sanity(vtlb);
    6.23      dump_vtlb(vtlb);
    6.24  #endif
    6.25 -    vpsr.val = vmx_vcpu_get_psr(v);
    6.26 +    vpsr.val = VCPU(v, vpsr);
    6.27      misr.val=VMX(v,cr_isr);
    6.28  
    6.29      if(is_physical_mode(v)&&(!(vadr<<1>>62))){
     7.1 --- a/xen/arch/ia64/vmx/vmx_utility.c	Wed Jul 12 13:26:09 2006 -0600
     7.2 +++ b/xen/arch/ia64/vmx/vmx_utility.c	Fri Jul 14 11:05:40 2006 -0600
     7.3 @@ -381,7 +381,7 @@ set_isr_ei_ni (VCPU *vcpu)
     7.4  
     7.5      visr.val = 0;
     7.6  
     7.7 -    vpsr.val = vmx_vcpu_get_psr (vcpu);
     7.8 +    vpsr.val = VCPU(vcpu, vpsr);
     7.9  
    7.10      if (!vpsr.ic == 1 ) {
    7.11          /* Set ISR.ni */
     8.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c	Wed Jul 12 13:26:09 2006 -0600
     8.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c	Fri Jul 14 11:05:40 2006 -0600
     8.3 @@ -67,6 +67,8 @@
     8.4  #include <asm/vmx_pal_vsa.h>
     8.5  #include <asm/kregs.h>
     8.6  //unsigned long last_guest_rsm = 0x0;
     8.7 +
     8.8 +#ifdef	VTI_DEBUG
     8.9  struct guest_psr_bundle{
    8.10      unsigned long ip;
    8.11      unsigned long psr;
    8.12 @@ -74,6 +76,7 @@ struct guest_psr_bundle{
    8.13  
    8.14  struct guest_psr_bundle guest_psr_buf[100];
    8.15  unsigned long guest_psr_index = 0;
    8.16 +#endif
    8.17  
    8.18  void
    8.19  vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
    8.20 @@ -82,7 +85,7 @@ vmx_vcpu_set_psr(VCPU *vcpu, unsigned lo
    8.21      UINT64 mask;
    8.22      REGS *regs;
    8.23      IA64_PSR old_psr, new_psr;
    8.24 -    old_psr.val=vmx_vcpu_get_psr(vcpu);
    8.25 +    old_psr.val=VCPU(vcpu, vpsr);
    8.26  
    8.27      regs=vcpu_regs(vcpu);
    8.28      /* We only support guest as:
    8.29 @@ -108,7 +111,8 @@ vmx_vcpu_set_psr(VCPU *vcpu, unsigned lo
    8.30          // vpsr.i 0->1
    8.31          vcpu->arch.irq_new_condition = 1;
    8.32      }
    8.33 -    new_psr.val=vmx_vcpu_get_psr(vcpu);
    8.34 +    new_psr.val=VCPU(vcpu, vpsr);
    8.35 +#ifdef	VTI_DEBUG    
    8.36      {
    8.37      struct pt_regs *regs = vcpu_regs(vcpu);
    8.38      guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
    8.39 @@ -116,6 +120,7 @@ vmx_vcpu_set_psr(VCPU *vcpu, unsigned lo
    8.40      if (++guest_psr_index >= 100)
    8.41          guest_psr_index = 0;
    8.42      }
    8.43 +#endif    
    8.44  #if 0
    8.45      if (old_psr.i != new_psr.i) {
    8.46      if (old_psr.i)
    8.47 @@ -149,25 +154,15 @@ IA64FAULT vmx_vcpu_increment_iip(VCPU *v
    8.48  {
    8.49      // TODO: trap_bounce?? Eddie
    8.50      REGS *regs = vcpu_regs(vcpu);
    8.51 -    IA64_PSR vpsr;
    8.52      IA64_PSR *ipsr = (IA64_PSR *)&regs->cr_ipsr;
    8.53  
    8.54 -    vpsr.val = vmx_vcpu_get_psr(vcpu);
    8.55 -    if (vpsr.ri == 2) {
    8.56 -    vpsr.ri = 0;
    8.57 -    regs->cr_iip += 16;
    8.58 +    if (ipsr->ri == 2) {
    8.59 +        ipsr->ri = 0;
    8.60 +        regs->cr_iip += 16;
    8.61      } else {
    8.62 -    vpsr.ri++;
    8.63 +        ipsr->ri++;
    8.64      }
    8.65  
    8.66 -    ipsr->ri = vpsr.ri;
    8.67 -    vpsr.val &=
    8.68 -            (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
    8.69 -                IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
    8.70 -            ));
    8.71 -
    8.72 -    VCPU(vcpu, vpsr) = vpsr.val;
    8.73 -
    8.74      ipsr->val &=
    8.75              (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
    8.76                  IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
    8.77 @@ -181,7 +176,7 @@ IA64FAULT vmx_vcpu_cover(VCPU *vcpu)
    8.78  {
    8.79      REGS *regs = vcpu_regs(vcpu);
    8.80      IA64_PSR vpsr;
    8.81 -    vpsr.val = vmx_vcpu_get_psr(vcpu);
    8.82 +    vpsr.val = VCPU(vcpu, vpsr);
    8.83  
    8.84      if(!vpsr.ic)
    8.85          VCPU(vcpu,ifs) = regs->cr_ifs;
    8.86 @@ -287,12 +282,6 @@ IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
    8.87  }
    8.88  
    8.89  
    8.90 -UINT64
    8.91 -vmx_vcpu_get_psr(VCPU *vcpu)
    8.92 -{
    8.93 -    return VCPU(vcpu,vpsr);
    8.94 -}
    8.95 -
    8.96  #if 0
    8.97  IA64FAULT
    8.98  vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val)
    8.99 @@ -390,6 +379,20 @@ vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg
   8.100  
   8.101  #endif
   8.102  
   8.103 +/*
   8.104 +    VPSR can't keep track of below bits of guest PSR
   8.105 +    This function gets guest PSR
   8.106 + */
   8.107 +
   8.108 +UINT64 vmx_vcpu_get_psr(VCPU *vcpu)
   8.109 +{
   8.110 +    UINT64 mask;
   8.111 +    REGS *regs = vcpu_regs(vcpu);
   8.112 +    mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
   8.113 +           IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
   8.114 +    return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
   8.115 +}
   8.116 +
   8.117  IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
   8.118  {
   8.119      UINT64 vpsr;
   8.120 @@ -412,6 +415,7 @@ IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu
   8.121  
   8.122  IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
   8.123  {
   8.124 +    val = (val & MASK(0, 32)) | (vmx_vcpu_get_psr(vcpu) & MASK(32, 32));
   8.125      vmx_vcpu_set_psr(vcpu, val);
   8.126      return IA64_NO_FAULT;
   8.127  }
     9.1 --- a/xen/arch/ia64/vmx/vmx_virt.c	Wed Jul 12 13:26:09 2006 -0600
     9.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c	Fri Jul 14 11:05:40 2006 -0600
     9.3 @@ -154,7 +154,6 @@ IA64FAULT vmx_emul_ssm(VCPU *vcpu, INST6
     9.4      return vmx_vcpu_set_psr_sm(vcpu,imm24);
     9.5  }
     9.6  
     9.7 -unsigned long last_guest_psr = 0x0;
     9.8  IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst)
     9.9  {
    9.10      UINT64 tgt = inst.M33.r1;
    9.11 @@ -167,7 +166,6 @@ IA64FAULT vmx_emul_mov_from_psr(VCPU *vc
    9.12      */
    9.13      val = vmx_vcpu_get_psr(vcpu);
    9.14      val = (val & MASK(0, 32)) | (val & MASK(35, 2));
    9.15 -    last_guest_psr = val;
    9.16      return vcpu_set_gr(vcpu, tgt, val, 0);
    9.17  }
    9.18  
    9.19 @@ -181,14 +179,7 @@ IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu
    9.20      if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
    9.21  	panic_domain(vcpu_regs(vcpu),"get_psr nat bit fault\n");
    9.22  
    9.23 -	val = (val & MASK(0, 32)) | (VCPU(vcpu, vpsr) & MASK(32, 32));
    9.24 -#if 0
    9.25 -	if (last_mov_from_psr && (last_guest_psr != (val & MASK(0,32))))
    9.26 -		while(1);
    9.27 -	else
    9.28 -		last_mov_from_psr = 0;
    9.29 -#endif
    9.30 -        return vmx_vcpu_set_psr_l(vcpu,val);
    9.31 +    return vmx_vcpu_set_psr_l(vcpu, val);
    9.32  }
    9.33  
    9.34  
    9.35 @@ -256,6 +247,7 @@ IA64FAULT vmx_emul_cover(VCPU *vcpu, INS
    9.36  IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst)
    9.37  {
    9.38      u64 r2,r3;
    9.39 +#ifdef  VMAL_NO_FAULT_CHECK
    9.40      IA64_PSR  vpsr;
    9.41  
    9.42      vpsr.val=vmx_vcpu_get_psr(vcpu);
    9.43 @@ -265,6 +257,7 @@ IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INS
    9.44          privilege_op (vcpu);
    9.45          return IA64_FAULT;
    9.46      }
    9.47 +#endif // VMAL_NO_FAULT_CHECK
    9.48      if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
    9.49  #ifdef  VMAL_NO_FAULT_CHECK
    9.50          ISR isr;
    9.51 @@ -288,10 +281,10 @@ IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INS
    9.52  IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst)
    9.53  {
    9.54      u64 r3;
    9.55 +#ifdef  VMAL_NO_FAULT_CHECK
    9.56      IA64_PSR  vpsr;
    9.57  
    9.58      vpsr.val=vmx_vcpu_get_psr(vcpu);
    9.59 -#ifdef  VMAL_NO_FAULT_CHECK
    9.60      ISR isr;
    9.61      if ( vpsr.cpl != 0) {
    9.62          /* Inject Privileged Operation fault into guest */
    9.63 @@ -574,6 +567,7 @@ IA64FAULT vmx_emul_tak(VCPU *vcpu, INST6
    9.64  IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
    9.65  {
    9.66      UINT64 itir, ifa, pte, slot;
    9.67 +#ifdef  VMAL_NO_FAULT_CHECK
    9.68      IA64_PSR  vpsr;
    9.69      vpsr.val=vmx_vcpu_get_psr(vcpu);
    9.70      if ( vpsr.ic ) {
    9.71 @@ -581,7 +575,6 @@ IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INS
    9.72          illegal_op(vcpu);
    9.73          return IA64_FAULT;
    9.74      }
    9.75 -#ifdef  VMAL_NO_FAULT_CHECK
    9.76      ISR isr;
    9.77      if ( vpsr.cpl != 0) {
    9.78          /* Inject Privileged Operation fault into guest */
    9.79 @@ -633,7 +626,6 @@ IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INS
    9.80      UINT64 itir, ifa, pte, slot;
    9.81  #ifdef  VMAL_NO_FAULT_CHECK
    9.82      ISR isr;
    9.83 -#endif
    9.84      IA64_PSR  vpsr;
    9.85      vpsr.val=vmx_vcpu_get_psr(vcpu);
    9.86      if ( vpsr.ic ) {
    9.87 @@ -641,7 +633,6 @@ IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INS
    9.88          illegal_op(vcpu);
    9.89          return IA64_FAULT;
    9.90      }
    9.91 -#ifdef  VMAL_NO_FAULT_CHECK
    9.92      if ( vpsr.cpl != 0) {
    9.93          /* Inject Privileged Operation fault into guest */
    9.94          set_privileged_operation_isr (vcpu, 0);
    9.95 @@ -689,9 +680,10 @@ IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INS
    9.96  
    9.97  IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa,u64 *pte)
    9.98  {
    9.99 -    IA64_PSR  vpsr;
   9.100      IA64FAULT	ret1;
   9.101  
   9.102 +#ifdef  VMAL_NO_FAULT_CHECK
   9.103 +    IA64_PSR  vpsr;
   9.104      vpsr.val=vmx_vcpu_get_psr(vcpu);
   9.105      if ( vpsr.ic ) {
   9.106          set_illegal_op_isr(vcpu);
   9.107 @@ -699,7 +691,6 @@ IA64FAULT itc_fault_check(VCPU *vcpu, IN
   9.108          return IA64_FAULT;
   9.109      }
   9.110  
   9.111 -#ifdef  VMAL_NO_FAULT_CHECK
   9.112      UINT64 fault;
   9.113      ISR isr;
   9.114      if ( vpsr.cpl != 0) {