ia64/xen-unstable

changeset 15009:6cf6f49f26ab

[IA64] Optimize some functions

Optmize some functions by changing parameter passing mode from pointer to value.
This can reduce redundant memory access.

Signed-off-by: Xu, Anthony <Anthony.xu@intel.com>
Signed-off-by: Zhang Xin <xing.z.zhang@intel.com>
author Alex Williamson <alex.williamson@hp.com>
date Thu May 03 13:55:28 2007 -0600 (2007-05-03)
parents 2b653a785fb8
children 8924215a5f95
files xen/arch/ia64/vmx/vmmu.c xen/arch/ia64/vmx/vmx_interrupt.c xen/arch/ia64/vmx/vmx_process.c xen/arch/ia64/vmx/vmx_vcpu.c xen/arch/ia64/vmx/vmx_virt.c xen/include/asm-ia64/vmx_vcpu.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/vmmu.c	Thu May 03 13:36:06 2007 -0600
     1.2 +++ b/xen/arch/ia64/vmx/vmmu.c	Thu May 03 13:55:28 2007 -0600
     1.3 @@ -295,7 +295,7 @@ int vhpt_enabled(VCPU *vcpu, uint64_t va
     1.4  
     1.5      vpsr.val = VCPU(vcpu, vpsr);
     1.6      vcpu_get_rr(vcpu, vadr, &vrr.rrval);
     1.7 -    vmx_vcpu_get_pta(vcpu,&vpta.val);
     1.8 +    vpta.val = vmx_vcpu_get_pta(vcpu);
     1.9  
    1.10      if ( vrr.ve & vpta.ve ) {
    1.11          switch ( ref ) {
    1.12 @@ -629,38 +629,41 @@ again: /* Try again if VCPU has migrated
    1.13  }
    1.14  
    1.15  
    1.16 -IA64FAULT vmx_vcpu_thash(VCPU *vcpu, u64 vadr, u64 *pval)
    1.17 +u64 vmx_vcpu_thash(VCPU *vcpu, u64 vadr)
    1.18  {
    1.19      PTA vpta;
    1.20      ia64_rr vrr;
    1.21 +    u64 pval;
    1.22      u64 vhpt_offset;
    1.23 -    vmx_vcpu_get_pta(vcpu, &vpta.val);
    1.24 +    vpta.val = vmx_vcpu_get_pta(vcpu);
    1.25      vcpu_get_rr(vcpu, vadr, &vrr.rrval);
    1.26      if(vpta.vf){
    1.27 -        *pval = ia64_call_vsa(PAL_VPS_THASH,vadr,vrr.rrval,vpta.val,0,0,0,0);
    1.28 -        *pval = vpta.val & ~0xffff;
    1.29 +        pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.rrval,
    1.30 +                             vpta.val, 0, 0, 0, 0);
    1.31 +        pval = vpta.val & ~0xffff;
    1.32      }else{
    1.33          vhpt_offset=((vadr>>vrr.ps)<<3)&((1UL<<(vpta.size))-1);
    1.34 -        *pval = (vadr&VRN_MASK)|
    1.35 +        pval = (vadr & VRN_MASK) |
    1.36              (vpta.val<<3>>(vpta.size+3)<<(vpta.size))|
    1.37              vhpt_offset;
    1.38      }
    1.39 -    return  IA64_NO_FAULT;
    1.40 +    return  pval;
    1.41  }
    1.42  
    1.43  
    1.44 -IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, u64 vadr, u64 *pval)
    1.45 +u64 vmx_vcpu_ttag(VCPU *vcpu, u64 vadr)
    1.46  {
    1.47      ia64_rr vrr;
    1.48      PTA vpta;
    1.49 -    vmx_vcpu_get_pta(vcpu, &vpta.val);
    1.50 +    u64 pval;
    1.51 +    vpta.val = vmx_vcpu_get_pta(vcpu);
    1.52      vcpu_get_rr(vcpu, vadr, &vrr.rrval);
    1.53      if(vpta.vf){
    1.54 -        *pval = ia64_call_vsa(PAL_VPS_TTAG,vadr,vrr.rrval,0,0,0,0,0);
    1.55 +        pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.rrval, 0, 0, 0, 0, 0);
    1.56      }else{
    1.57 -        *pval = 1;
    1.58 +        pval = 1;
    1.59      }
    1.60 -    return  IA64_NO_FAULT;
    1.61 +    return  pval;
    1.62  }
    1.63  
    1.64  
    1.65 @@ -725,7 +728,7 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, u64 v
    1.66              }
    1.67          }
    1.68          else{
    1.69 -            vmx_vcpu_thash(vcpu, vadr, &vhpt_adr);
    1.70 +            vhpt_adr = vmx_vcpu_thash(vcpu, vadr);
    1.71              data = vtlb_lookup(vcpu, vhpt_adr, DSIDE_TLB);
    1.72              if(data){
    1.73                  if(vpsr.ic){
    1.74 @@ -753,20 +756,21 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, u64 v
    1.75      }
    1.76  }
    1.77  
    1.78 -IA64FAULT vmx_vcpu_tak(VCPU *vcpu, u64 vadr, u64 *key)
    1.79 +u64 vmx_vcpu_tak(VCPU *vcpu, u64 vadr)
    1.80  {
    1.81      thash_data_t *data;
    1.82      PTA vpta;
    1.83 -    vmx_vcpu_get_pta(vcpu, &vpta.val);
    1.84 +    u64 key;
    1.85 +    vpta.val = vmx_vcpu_get_pta(vcpu);
    1.86      if(vpta.vf==0 || unimplemented_gva(vcpu, vadr)){
    1.87 -        *key=1;
    1.88 -        return IA64_NO_FAULT;
    1.89 +        key=1;
    1.90 +        return key;
    1.91      }
    1.92      data = vtlb_lookup(vcpu, vadr, DSIDE_TLB);
    1.93      if(!data||!data->p){
    1.94 -        *key=1;
    1.95 +        key = 1;
    1.96      }else{
    1.97 -        *key=data->key;
    1.98 +        key = data->key;
    1.99      }
   1.100 -    return IA64_NO_FAULT;
   1.101 +    return key;
   1.102  }
     2.1 --- a/xen/arch/ia64/vmx/vmx_interrupt.c	Thu May 03 13:36:06 2007 -0600
     2.2 +++ b/xen/arch/ia64/vmx/vmx_interrupt.c	Thu May 03 13:55:28 2007 -0600
     2.3 @@ -105,7 +105,7 @@ inject_guest_interruption(VCPU *vcpu, u6
     2.4      collect_interruption(vcpu);
     2.5      vmx_ia64_set_dcr(vcpu);
     2.6  
     2.7 -    vmx_vcpu_get_iva(vcpu,&viva);
     2.8 +    viva = vmx_vcpu_get_iva(vcpu);
     2.9      regs->cr_iip = viva + vec;
    2.10  }
    2.11  
    2.12 @@ -135,7 +135,7 @@ set_ifa_itir_iha (VCPU *vcpu, u64 vadr,
    2.13          }
    2.14  
    2.15          if ( set_iha) {
    2.16 -            vmx_vcpu_thash(vcpu, vadr, &value);
    2.17 +            value = vmx_vcpu_thash(vcpu, vadr);
    2.18              vcpu_set_iha(vcpu, value);
    2.19          }
    2.20      }
     3.1 --- a/xen/arch/ia64/vmx/vmx_process.c	Thu May 03 13:36:06 2007 -0600
     3.2 +++ b/xen/arch/ia64/vmx/vmx_process.c	Thu May 03 13:55:28 2007 -0600
     3.3 @@ -353,7 +353,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
     3.4              }
     3.5          }
     3.6  
     3.7 -        vmx_vcpu_get_pta(v, &vpta.val);
     3.8 +        vpta.val = vmx_vcpu_get_pta(v);
     3.9          if (vpta.vf) {
    3.10              /* Long format is not yet supported.  */
    3.11              if (vpsr.ic) {
    3.12 @@ -378,7 +378,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
    3.13              }
    3.14          }
    3.15              
    3.16 -        vmx_vcpu_thash(v, vadr, &vhpt_adr);
    3.17 +        vhpt_adr = vmx_vcpu_thash(v, vadr);
    3.18          if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
    3.19              /* VHPT successfully read.  */
    3.20              if (!(pteval & _PAGE_P)) {
    3.21 @@ -424,7 +424,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
    3.22              return IA64_FAULT;
    3.23          }
    3.24  
    3.25 -        vmx_vcpu_get_pta(v, &vpta.val);
    3.26 +        vpta.val = vmx_vcpu_get_pta(v);
    3.27          if (vpta.vf) {
    3.28              /* Long format is not yet supported.  */
    3.29              vcpu_set_isr(v, misr.val);
    3.30 @@ -433,7 +433,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
    3.31          }
    3.32  
    3.33  
    3.34 -        vmx_vcpu_thash(v, vadr, &vhpt_adr);
    3.35 +        vhpt_adr = vmx_vcpu_thash(v, vadr);
    3.36          if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
    3.37              /* VHPT successfully read.  */
    3.38              if (pteval & _PAGE_P) {
     4.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c	Thu May 03 13:36:06 2007 -0600
     4.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c	Thu May 03 13:55:28 2007 -0600
     4.3 @@ -202,11 +202,9 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u6
     4.4   VCPU protection key register access routines
     4.5  **************************************************************************/
     4.6  
     4.7 -IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, u64 reg, u64 *pval)
     4.8 +u64 vmx_vcpu_get_pkr(VCPU *vcpu, u64 reg)
     4.9  {
    4.10 -    u64 val = (u64)ia64_get_pkr(reg);
    4.11 -    *pval = val;
    4.12 -    return (IA64_NO_FAULT);
    4.13 +    return ((u64)ia64_get_pkr(reg));
    4.14  }
    4.15  
    4.16  IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, u64 reg, u64 val)
     5.1 --- a/xen/arch/ia64/vmx/vmx_virt.c	Thu May 03 13:36:06 2007 -0600
     5.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c	Thu May 03 13:55:28 2007 -0600
     5.3 @@ -446,7 +446,7 @@ static IA64FAULT vmx_emul_thash(VCPU *vc
     5.4          return IA64_NO_FAULT;
     5.5      }
     5.6  #endif  //CHECK_FAULT
     5.7 -    vmx_vcpu_thash(vcpu, r3, &r1);
     5.8 +    r1 = vmx_vcpu_thash(vcpu, r3);
     5.9      vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
    5.10      return(IA64_NO_FAULT);
    5.11  }
    5.12 @@ -478,7 +478,7 @@ static IA64FAULT vmx_emul_ttag(VCPU *vcp
    5.13          return IA64_NO_FAULT;
    5.14      }
    5.15  #endif  //CHECK_FAULT
    5.16 -    vmx_vcpu_ttag(vcpu, r3, &r1);
    5.17 +    r1 = vmx_vcpu_ttag(vcpu, r3);
    5.18      vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
    5.19      return(IA64_NO_FAULT);
    5.20  }
    5.21 @@ -554,9 +554,7 @@ static IA64FAULT vmx_emul_tak(VCPU *vcpu
    5.22          return IA64_FAULT;
    5.23  #endif
    5.24      }
    5.25 -    if(vmx_vcpu_tak(vcpu, r3, &r1)){
    5.26 -        return IA64_FAULT;
    5.27 -    }
    5.28 +    r1 = vmx_vcpu_tak(vcpu, r3);
    5.29      vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
    5.30      return(IA64_NO_FAULT);
    5.31  }
    5.32 @@ -833,7 +831,7 @@ static IA64FAULT vmx_emul_mov_from_ar_re
    5.33          return IA64_FAULT;
    5.34      }
    5.35  #endif // CHECK_FAULT
    5.36 -    vmx_vcpu_get_itc(vcpu,&r1);
    5.37 +    r1 = vmx_vcpu_get_itc(vcpu);
    5.38      vcpu_set_gr(vcpu,inst.M31.r1,r1,0);
    5.39      return IA64_NO_FAULT;
    5.40  }
    5.41 @@ -1057,7 +1055,7 @@ static IA64FAULT vmx_emul_mov_from_pkr(V
    5.42          return IA64_FAULT;
    5.43      }
    5.44  #endif  //CHECK_FAULT
    5.45 -    vmx_vcpu_get_pkr(vcpu,r3,&r1);
    5.46 +    r1 = vmx_vcpu_get_pkr(vcpu, r3);
    5.47      return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
    5.48  }
    5.49  
    5.50 @@ -1094,7 +1092,7 @@ static IA64FAULT vmx_emul_mov_from_dbr(V
    5.51          return IA64_FAULT;
    5.52      }
    5.53  #endif  //CHECK_FAULT
    5.54 -    vmx_vcpu_get_dbr(vcpu,r3,&r1);
    5.55 +    r1 = vmx_vcpu_get_dbr(vcpu, r3);
    5.56      return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
    5.57  }
    5.58  
    5.59 @@ -1131,7 +1129,7 @@ static IA64FAULT vmx_emul_mov_from_ibr(V
    5.60          return IA64_FAULT;
    5.61      }
    5.62  #endif  //CHECK_FAULT
    5.63 -    vmx_vcpu_get_ibr(vcpu,r3,&r1);
    5.64 +    r1 = vmx_vcpu_get_ibr(vcpu, r3);
    5.65      return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
    5.66  }
    5.67  
    5.68 @@ -1168,7 +1166,7 @@ static IA64FAULT vmx_emul_mov_from_pmc(V
    5.69          return IA64_FAULT;
    5.70      }
    5.71  #endif  //CHECK_FAULT
    5.72 -    vmx_vcpu_get_pmc(vcpu,r3,&r1);
    5.73 +    r1 = vmx_vcpu_get_pmc(vcpu, r3);
    5.74      return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
    5.75  }
    5.76  
    5.77 @@ -1196,7 +1194,7 @@ static IA64FAULT vmx_emul_mov_from_cpuid
    5.78          return IA64_FAULT;
    5.79      }
    5.80  #endif  //CHECK_FAULT
    5.81 -    vmx_vcpu_get_cpuid(vcpu,r3,&r1);
    5.82 +    r1 = vmx_vcpu_get_cpuid(vcpu, r3);
    5.83      return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
    5.84  }
    5.85  
    5.86 @@ -1274,9 +1272,15 @@ static IA64FAULT vmx_emul_mov_to_cr(VCPU
    5.87      ((fault=vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
    5.88          vcpu_set_gr(vcpu, tgt, val,0):fault;
    5.89  
    5.90 +//#define cr_get(cr) (vcpu_set_gr(vcpu, tgt, vcpu_get##cr(vcpu), 0)
    5.91 +
    5.92 +/*
    5.93  #define vmx_cr_get(cr) \
    5.94      ((fault=vmx_vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
    5.95          vcpu_set_gr(vcpu, tgt, val,0):fault;
    5.96 +*/
    5.97 +
    5.98 +#define vmx_cr_get(cr) (vcpu_set_gr(vcpu, tgt, vmx_vcpu_get_##cr(vcpu), 0))
    5.99  
   5.100  static IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst)
   5.101  {
   5.102 @@ -1317,7 +1321,7 @@ static IA64FAULT vmx_emul_mov_from_cr(VC
   5.103          case 25:return cr_get(iha);
   5.104          case 64:return vmx_cr_get(lid);
   5.105          case 65:
   5.106 -                vmx_vcpu_get_ivr(vcpu,&val);
   5.107 +                val = vmx_vcpu_get_ivr(vcpu);
   5.108                  return vcpu_set_gr(vcpu,tgt,val,0);
   5.109          case 66:return vmx_cr_get(tpr);
   5.110          case 67:return vcpu_set_gr(vcpu,tgt,0L,0);
     6.1 --- a/xen/include/asm-ia64/vmx_vcpu.h	Thu May 03 13:36:06 2007 -0600
     6.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h	Thu May 03 13:55:28 2007 -0600
     6.3 @@ -61,7 +61,7 @@ extern u64 vmx_vcpu_sync_mpsr(u64 mipsr,
     6.4  extern void vmx_vcpu_set_psr_sync_mpsr(VCPU * vcpu, u64 value);
     6.5  extern IA64FAULT vmx_vcpu_cover(VCPU * vcpu);
     6.6  extern IA64FAULT vmx_vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val);
     6.7 -extern IA64FAULT vmx_vcpu_get_pkr(VCPU * vcpu, u64 reg, u64 * pval);
     6.8 +extern u64 vmx_vcpu_get_pkr(VCPU * vcpu, u64 reg);
     6.9  IA64FAULT vmx_vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val);
    6.10  extern IA64FAULT vmx_vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa);
    6.11  extern IA64FAULT vmx_vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa);
    6.12 @@ -75,11 +75,11 @@ extern IA64FAULT vmx_vcpu_ptc_l(VCPU * v
    6.13  extern IA64FAULT vmx_vcpu_ptc_e(VCPU * vcpu, u64 vadr);
    6.14  extern IA64FAULT vmx_vcpu_ptc_g(VCPU * vcpu, u64 vadr, u64 ps);
    6.15  extern IA64FAULT vmx_vcpu_ptc_ga(VCPU * vcpu, u64 vadr, u64 ps);
    6.16 -extern IA64FAULT vmx_vcpu_thash(VCPU * vcpu, u64 vadr, u64 * pval);
    6.17 +extern u64 vmx_vcpu_thash(VCPU * vcpu, u64 vadr);
    6.18  extern u64 vmx_vcpu_get_itir_on_fault(VCPU * vcpu, u64 ifa);
    6.19 -extern IA64FAULT vmx_vcpu_ttag(VCPU * vcpu, u64 vadr, u64 * pval);
    6.20 +extern u64 vmx_vcpu_ttag(VCPU * vcpu, u64 vadr);
    6.21  extern IA64FAULT vmx_vcpu_tpa(VCPU * vcpu, u64 vadr, u64 * padr);
    6.22 -extern IA64FAULT vmx_vcpu_tak(VCPU * vcpu, u64 vadr, u64 * key);
    6.23 +extern u64 vmx_vcpu_tak(VCPU * vcpu, u64 vadr);
    6.24  extern IA64FAULT vmx_vcpu_rfi(VCPU * vcpu);
    6.25  extern u64 vmx_vcpu_get_psr(VCPU * vcpu);
    6.26  extern IA64FAULT vmx_vcpu_get_bgr(VCPU * vcpu, unsigned int reg, u64 * val);
    6.27 @@ -132,100 +132,84 @@ extern void vmx_ia64_set_dcr(VCPU * v);
    6.28   VCPU control register access routines
    6.29  **************************************************************************/
    6.30  
    6.31 -static inline IA64FAULT vmx_vcpu_get_itm(VCPU * vcpu, u64 * pval)
    6.32 -{
    6.33 -	*pval = VCPU(vcpu, itm);
    6.34 -	return IA64_NO_FAULT;
    6.35 -}
    6.36 -
    6.37 -static inline IA64FAULT vmx_vcpu_get_iva(VCPU * vcpu, u64 * pval)
    6.38 -{
    6.39 -	*pval = VCPU(vcpu, iva);
    6.40 -	return IA64_NO_FAULT;
    6.41 -}
    6.42 -
    6.43 -static inline IA64FAULT vmx_vcpu_get_pta(VCPU * vcpu, u64 * pval)
    6.44 -{
    6.45 -	*pval = VCPU(vcpu, pta);
    6.46 -	return IA64_NO_FAULT;
    6.47 -}
    6.48 -
    6.49 -static inline IA64FAULT vmx_vcpu_get_lid(VCPU * vcpu, u64 * pval)
    6.50 +static inline u64 vmx_vcpu_get_itm(VCPU * vcpu)
    6.51  {
    6.52 -	*pval = VCPU(vcpu, lid);
    6.53 -	return IA64_NO_FAULT;
    6.54 -}
    6.55 -
    6.56 -static inline IA64FAULT vmx_vcpu_get_ivr(VCPU * vcpu, u64 * pval)
    6.57 -{
    6.58 -	*pval = guest_read_vivr(vcpu);
    6.59 -	return IA64_NO_FAULT;
    6.60 -}
    6.61 -
    6.62 -static inline IA64FAULT vmx_vcpu_get_tpr(VCPU * vcpu, u64 * pval)
    6.63 -{
    6.64 -	*pval = VCPU(vcpu, tpr);
    6.65 -	return IA64_NO_FAULT;
    6.66 -}
    6.67 -
    6.68 -static inline IA64FAULT vmx_vcpu_get_eoi(VCPU * vcpu, u64 * pval)
    6.69 -{
    6.70 -	*pval = 0L;		// reads of eoi always return 0
    6.71 -	return IA64_NO_FAULT;
    6.72 +	return ((u64)VCPU(vcpu, itm));
    6.73  }
    6.74  
    6.75 -static inline IA64FAULT vmx_vcpu_get_irr0(VCPU * vcpu, u64 * pval)
    6.76 -{
    6.77 -	*pval = VCPU(vcpu, irr[0]);
    6.78 -	return IA64_NO_FAULT;
    6.79 -}
    6.80 -
    6.81 -static inline IA64FAULT vmx_vcpu_get_irr1(VCPU * vcpu, u64 * pval)
    6.82 +static inline u64 vmx_vcpu_get_iva(VCPU * vcpu)
    6.83  {
    6.84 -	*pval = VCPU(vcpu, irr[1]);
    6.85 -	return IA64_NO_FAULT;
    6.86 -}
    6.87 -
    6.88 -static inline IA64FAULT vmx_vcpu_get_irr2(VCPU * vcpu, u64 * pval)
    6.89 -{
    6.90 -	*pval = VCPU(vcpu, irr[2]);
    6.91 -	return IA64_NO_FAULT;
    6.92 -}
    6.93 -
    6.94 -static inline IA64FAULT vmx_vcpu_get_irr3(VCPU * vcpu, u64 * pval)
    6.95 -{
    6.96 -	*pval = VCPU(vcpu, irr[3]);
    6.97 -	return IA64_NO_FAULT;
    6.98 +	return ((u64)VCPU(vcpu, iva));
    6.99  }
   6.100  
   6.101 -static inline IA64FAULT vmx_vcpu_get_itv(VCPU * vcpu, u64 * pval)
   6.102 +static inline u64 vmx_vcpu_get_pta(VCPU * vcpu)
   6.103  {
   6.104 -	*pval = VCPU(vcpu, itv);
   6.105 -	return IA64_NO_FAULT;
   6.106 -}
   6.107 -
   6.108 -static inline IA64FAULT vmx_vcpu_get_pmv(VCPU * vcpu, u64 * pval)
   6.109 -{
   6.110 -	*pval = VCPU(vcpu, pmv);
   6.111 -	return IA64_NO_FAULT;
   6.112 +	return ((u64)VCPU(vcpu, pta));
   6.113  }
   6.114  
   6.115 -static inline IA64FAULT vmx_vcpu_get_cmcv(VCPU * vcpu, u64 * pval)
   6.116 +static inline u64 vmx_vcpu_get_lid(VCPU * vcpu)
   6.117  {
   6.118 -	*pval = VCPU(vcpu, cmcv);
   6.119 -	return IA64_NO_FAULT;
   6.120 +	return ((u64)VCPU(vcpu, lid));
   6.121  }
   6.122  
   6.123 -static inline IA64FAULT vmx_vcpu_get_lrr0(VCPU * vcpu, u64 * pval)
   6.124 +static inline u64 vmx_vcpu_get_ivr(VCPU * vcpu)
   6.125  {
   6.126 -	*pval = VCPU(vcpu, lrr0);
   6.127 -	return IA64_NO_FAULT;
   6.128 +	return ((u64)guest_read_vivr(vcpu));
   6.129  }
   6.130  
   6.131 -static inline IA64FAULT vmx_vcpu_get_lrr1(VCPU * vcpu, u64 * pval)
   6.132 +static inline u64 vmx_vcpu_get_tpr(VCPU * vcpu)
   6.133  {
   6.134 -	*pval = VCPU(vcpu, lrr1);
   6.135 -	return IA64_NO_FAULT;
   6.136 +	return ((u64)VCPU(vcpu, tpr));
   6.137 +}
   6.138 +
   6.139 +static inline u64 vmx_vcpu_get_eoi(VCPU * vcpu)
   6.140 +{
   6.141 +	return (0UL);		// reads of eoi always return 0
   6.142 +}
   6.143 +
   6.144 +static inline u64 vmx_vcpu_get_irr0(VCPU * vcpu)
   6.145 +{
   6.146 +	return ((u64)VCPU(vcpu, irr[0]));
   6.147 +}
   6.148 +
   6.149 +static inline u64 vmx_vcpu_get_irr1(VCPU * vcpu)
   6.150 +{
   6.151 +	return ((u64)VCPU(vcpu, irr[1]));
   6.152 +}
   6.153 +
   6.154 +static inline u64 vmx_vcpu_get_irr2(VCPU * vcpu)
   6.155 +{
   6.156 +	return ((u64)VCPU(vcpu, irr[2]));
   6.157 +}
   6.158 +
   6.159 +static inline u64 vmx_vcpu_get_irr3(VCPU * vcpu)
   6.160 +{
   6.161 +	return ((u64)VCPU(vcpu, irr[3]));
   6.162 +}
   6.163 +
   6.164 +static inline u64 vmx_vcpu_get_itv(VCPU * vcpu)
   6.165 +{
   6.166 +	return ((u64)VCPU(vcpu, itv));
   6.167 +}
   6.168 +
   6.169 +static inline u64 vmx_vcpu_get_pmv(VCPU * vcpu)
   6.170 +{
   6.171 +	return ((u64)VCPU(vcpu, pmv));
   6.172 +}
   6.173 +
   6.174 +static inline u64 vmx_vcpu_get_cmcv(VCPU * vcpu)
   6.175 +{
   6.176 +	return ((u64)VCPU(vcpu, cmcv));
   6.177 +}
   6.178 +
   6.179 +static inline u64 vmx_vcpu_get_lrr0(VCPU * vcpu)
   6.180 +{
   6.181 +	return ((u64)VCPU(vcpu, lrr0));
   6.182 +}
   6.183 +
   6.184 +static inline u64 vmx_vcpu_get_lrr1(VCPU * vcpu)
   6.185 +{
   6.186 +	return ((u64)VCPU(vcpu, lrr1));
   6.187  }
   6.188  
   6.189  static inline IA64FAULT vmx_vcpu_set_itm(VCPU * vcpu, u64 val)
   6.190 @@ -299,10 +283,9 @@ static inline IA64FAULT vmx_vcpu_set_itc
   6.191  	return IA64_NO_FAULT;
   6.192  }
   6.193  
   6.194 -static inline IA64FAULT vmx_vcpu_get_itc(VCPU * vcpu, u64 * val)
   6.195 +static inline u64 vmx_vcpu_get_itc(VCPU * vcpu)
   6.196  {
   6.197 -	*val = vtm_get_itc(vcpu);
   6.198 -	return IA64_NO_FAULT;
   6.199 +	return ((u64)vtm_get_itc(vcpu));
   6.200  }
   6.201  
   6.202  /*
   6.203 @@ -317,7 +300,7 @@ IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, u6
   6.204   VCPU debug breakpoint register access routines
   6.205  **************************************************************************/
   6.206  
   6.207 -static inline IA64FAULT vmx_vcpu_get_cpuid(VCPU * vcpu, u64 reg, u64 * pval)
   6.208 +static inline u64 vmx_vcpu_get_cpuid(VCPU * vcpu, u64 reg)
   6.209  {
   6.210  	// TODO: unimplemented DBRs return a reserved register fault
   6.211  	// TODO: Should set Logical CPU state, not just physical
   6.212 @@ -325,8 +308,7 @@ static inline IA64FAULT vmx_vcpu_get_cpu
   6.213  		panic_domain(vcpu_regs(vcpu),
   6.214  			     "there are only five cpuid registers");
   6.215  	}
   6.216 -	*pval = VCPU(vcpu, vcpuid[reg]);
   6.217 -	return IA64_NO_FAULT;
   6.218 +	return ((u64)VCPU(vcpu, vcpuid[reg]));
   6.219  }
   6.220  
   6.221  static inline IA64FAULT vmx_vcpu_set_dbr(VCPU * vcpu, u64 reg, u64 val)
   6.222 @@ -345,20 +327,16 @@ static inline IA64FAULT vmx_vcpu_set_ibr
   6.223  	return IA64_NO_FAULT;
   6.224  }
   6.225  
   6.226 -static inline IA64FAULT vmx_vcpu_get_dbr(VCPU * vcpu, u64 reg, u64 * pval)
   6.227 +static inline u64 vmx_vcpu_get_dbr(VCPU * vcpu, u64 reg)
   6.228  {
   6.229  	// TODO: unimplemented DBRs return a reserved register fault
   6.230 -	u64 val = ia64_get_dbr(reg);
   6.231 -	*pval = val;
   6.232 -	return IA64_NO_FAULT;
   6.233 +	return ((u64)ia64_get_dbr(reg));
   6.234  }
   6.235  
   6.236 -static inline IA64FAULT vmx_vcpu_get_ibr(VCPU * vcpu, u64 reg, u64 * pval)
   6.237 +static inline u64 vmx_vcpu_get_ibr(VCPU * vcpu, u64 reg)
   6.238  {
   6.239  	// TODO: unimplemented IBRs return a reserved register fault
   6.240 -	u64 val = ia64_get_ibr(reg);
   6.241 -	*pval = val;
   6.242 -	return IA64_NO_FAULT;
   6.243 +	return ((u64)ia64_get_ibr(reg));
   6.244  }
   6.245  
   6.246  /**************************************************************************
   6.247 @@ -380,20 +358,16 @@ static inline IA64FAULT vmx_vcpu_set_pmd
   6.248  	return IA64_NO_FAULT;
   6.249  }
   6.250  
   6.251 -static inline IA64FAULT vmx_vcpu_get_pmc(VCPU * vcpu, u64 reg, u64 * pval)
   6.252 +static inline u64 vmx_vcpu_get_pmc(VCPU * vcpu, u64 reg)
   6.253  {
   6.254  	// NOTE: Reads from unimplemented PMC registers return zero
   6.255 -	u64 val = (u64) ia64_get_pmc(reg);
   6.256 -	*pval = val;
   6.257 -	return IA64_NO_FAULT;
   6.258 +	return ((u64)ia64_get_pmc(reg));
   6.259  }
   6.260  
   6.261 -static inline IA64FAULT vmx_vcpu_get_pmd(VCPU * vcpu, u64 reg, u64 * pval)
   6.262 +static inline u64 vmx_vcpu_get_pmd(VCPU * vcpu, u64 reg)
   6.263  {
   6.264  	// NOTE: Reads from unimplemented PMD registers return zero
   6.265 -	u64 val = (u64) ia64_get_pmd(reg);
   6.266 -	*pval = val;
   6.267 -	return IA64_NO_FAULT;
   6.268 +	return ((u64)ia64_get_pmd(reg));
   6.269  }
   6.270  
   6.271  /**************************************************************************