ia64/xen-unstable
changeset 15009:6cf6f49f26ab
[IA64] Optimize some functions
Optmize some functions by changing parameter passing mode from pointer to value.
This can reduce redundant memory access.
Signed-off-by: Xu, Anthony <Anthony.xu@intel.com>
Signed-off-by: Zhang Xin <xing.z.zhang@intel.com>
Optmize some functions by changing parameter passing mode from pointer to value.
This can reduce redundant memory access.
Signed-off-by: Xu, Anthony <Anthony.xu@intel.com>
Signed-off-by: Zhang Xin <xing.z.zhang@intel.com>
author | Alex Williamson <alex.williamson@hp.com> |
---|---|
date | Thu May 03 13:55:28 2007 -0600 (2007-05-03) |
parents | 2b653a785fb8 |
children | 8924215a5f95 |
files | xen/arch/ia64/vmx/vmmu.c xen/arch/ia64/vmx/vmx_interrupt.c xen/arch/ia64/vmx/vmx_process.c xen/arch/ia64/vmx/vmx_vcpu.c xen/arch/ia64/vmx/vmx_virt.c xen/include/asm-ia64/vmx_vcpu.h |
line diff
1.1 --- a/xen/arch/ia64/vmx/vmmu.c Thu May 03 13:36:06 2007 -0600 1.2 +++ b/xen/arch/ia64/vmx/vmmu.c Thu May 03 13:55:28 2007 -0600 1.3 @@ -295,7 +295,7 @@ int vhpt_enabled(VCPU *vcpu, uint64_t va 1.4 1.5 vpsr.val = VCPU(vcpu, vpsr); 1.6 vcpu_get_rr(vcpu, vadr, &vrr.rrval); 1.7 - vmx_vcpu_get_pta(vcpu,&vpta.val); 1.8 + vpta.val = vmx_vcpu_get_pta(vcpu); 1.9 1.10 if ( vrr.ve & vpta.ve ) { 1.11 switch ( ref ) { 1.12 @@ -629,38 +629,41 @@ again: /* Try again if VCPU has migrated 1.13 } 1.14 1.15 1.16 -IA64FAULT vmx_vcpu_thash(VCPU *vcpu, u64 vadr, u64 *pval) 1.17 +u64 vmx_vcpu_thash(VCPU *vcpu, u64 vadr) 1.18 { 1.19 PTA vpta; 1.20 ia64_rr vrr; 1.21 + u64 pval; 1.22 u64 vhpt_offset; 1.23 - vmx_vcpu_get_pta(vcpu, &vpta.val); 1.24 + vpta.val = vmx_vcpu_get_pta(vcpu); 1.25 vcpu_get_rr(vcpu, vadr, &vrr.rrval); 1.26 if(vpta.vf){ 1.27 - *pval = ia64_call_vsa(PAL_VPS_THASH,vadr,vrr.rrval,vpta.val,0,0,0,0); 1.28 - *pval = vpta.val & ~0xffff; 1.29 + pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.rrval, 1.30 + vpta.val, 0, 0, 0, 0); 1.31 + pval = vpta.val & ~0xffff; 1.32 }else{ 1.33 vhpt_offset=((vadr>>vrr.ps)<<3)&((1UL<<(vpta.size))-1); 1.34 - *pval = (vadr&VRN_MASK)| 1.35 + pval = (vadr & VRN_MASK) | 1.36 (vpta.val<<3>>(vpta.size+3)<<(vpta.size))| 1.37 vhpt_offset; 1.38 } 1.39 - return IA64_NO_FAULT; 1.40 + return pval; 1.41 } 1.42 1.43 1.44 -IA64FAULT vmx_vcpu_ttag(VCPU *vcpu, u64 vadr, u64 *pval) 1.45 +u64 vmx_vcpu_ttag(VCPU *vcpu, u64 vadr) 1.46 { 1.47 ia64_rr vrr; 1.48 PTA vpta; 1.49 - vmx_vcpu_get_pta(vcpu, &vpta.val); 1.50 + u64 pval; 1.51 + vpta.val = vmx_vcpu_get_pta(vcpu); 1.52 vcpu_get_rr(vcpu, vadr, &vrr.rrval); 1.53 if(vpta.vf){ 1.54 - *pval = ia64_call_vsa(PAL_VPS_TTAG,vadr,vrr.rrval,0,0,0,0,0); 1.55 + pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.rrval, 0, 0, 0, 0, 0); 1.56 }else{ 1.57 - *pval = 1; 1.58 + pval = 1; 1.59 } 1.60 - return IA64_NO_FAULT; 1.61 + return pval; 1.62 } 1.63 1.64 1.65 @@ -725,7 +728,7 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, u64 v 1.66 } 1.67 } 1.68 else{ 1.69 - vmx_vcpu_thash(vcpu, vadr, &vhpt_adr); 1.70 + vhpt_adr = vmx_vcpu_thash(vcpu, vadr); 1.71 data = vtlb_lookup(vcpu, vhpt_adr, DSIDE_TLB); 1.72 if(data){ 1.73 if(vpsr.ic){ 1.74 @@ -753,20 +756,21 @@ IA64FAULT vmx_vcpu_tpa(VCPU *vcpu, u64 v 1.75 } 1.76 } 1.77 1.78 -IA64FAULT vmx_vcpu_tak(VCPU *vcpu, u64 vadr, u64 *key) 1.79 +u64 vmx_vcpu_tak(VCPU *vcpu, u64 vadr) 1.80 { 1.81 thash_data_t *data; 1.82 PTA vpta; 1.83 - vmx_vcpu_get_pta(vcpu, &vpta.val); 1.84 + u64 key; 1.85 + vpta.val = vmx_vcpu_get_pta(vcpu); 1.86 if(vpta.vf==0 || unimplemented_gva(vcpu, vadr)){ 1.87 - *key=1; 1.88 - return IA64_NO_FAULT; 1.89 + key=1; 1.90 + return key; 1.91 } 1.92 data = vtlb_lookup(vcpu, vadr, DSIDE_TLB); 1.93 if(!data||!data->p){ 1.94 - *key=1; 1.95 + key = 1; 1.96 }else{ 1.97 - *key=data->key; 1.98 + key = data->key; 1.99 } 1.100 - return IA64_NO_FAULT; 1.101 + return key; 1.102 }
2.1 --- a/xen/arch/ia64/vmx/vmx_interrupt.c Thu May 03 13:36:06 2007 -0600 2.2 +++ b/xen/arch/ia64/vmx/vmx_interrupt.c Thu May 03 13:55:28 2007 -0600 2.3 @@ -105,7 +105,7 @@ inject_guest_interruption(VCPU *vcpu, u6 2.4 collect_interruption(vcpu); 2.5 vmx_ia64_set_dcr(vcpu); 2.6 2.7 - vmx_vcpu_get_iva(vcpu,&viva); 2.8 + viva = vmx_vcpu_get_iva(vcpu); 2.9 regs->cr_iip = viva + vec; 2.10 } 2.11 2.12 @@ -135,7 +135,7 @@ set_ifa_itir_iha (VCPU *vcpu, u64 vadr, 2.13 } 2.14 2.15 if ( set_iha) { 2.16 - vmx_vcpu_thash(vcpu, vadr, &value); 2.17 + value = vmx_vcpu_thash(vcpu, vadr); 2.18 vcpu_set_iha(vcpu, value); 2.19 } 2.20 }
3.1 --- a/xen/arch/ia64/vmx/vmx_process.c Thu May 03 13:36:06 2007 -0600 3.2 +++ b/xen/arch/ia64/vmx/vmx_process.c Thu May 03 13:55:28 2007 -0600 3.3 @@ -353,7 +353,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r 3.4 } 3.5 } 3.6 3.7 - vmx_vcpu_get_pta(v, &vpta.val); 3.8 + vpta.val = vmx_vcpu_get_pta(v); 3.9 if (vpta.vf) { 3.10 /* Long format is not yet supported. */ 3.11 if (vpsr.ic) { 3.12 @@ -378,7 +378,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r 3.13 } 3.14 } 3.15 3.16 - vmx_vcpu_thash(v, vadr, &vhpt_adr); 3.17 + vhpt_adr = vmx_vcpu_thash(v, vadr); 3.18 if (!guest_vhpt_lookup(vhpt_adr, &pteval)) { 3.19 /* VHPT successfully read. */ 3.20 if (!(pteval & _PAGE_P)) { 3.21 @@ -424,7 +424,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r 3.22 return IA64_FAULT; 3.23 } 3.24 3.25 - vmx_vcpu_get_pta(v, &vpta.val); 3.26 + vpta.val = vmx_vcpu_get_pta(v); 3.27 if (vpta.vf) { 3.28 /* Long format is not yet supported. */ 3.29 vcpu_set_isr(v, misr.val); 3.30 @@ -433,7 +433,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r 3.31 } 3.32 3.33 3.34 - vmx_vcpu_thash(v, vadr, &vhpt_adr); 3.35 + vhpt_adr = vmx_vcpu_thash(v, vadr); 3.36 if (!guest_vhpt_lookup(vhpt_adr, &pteval)) { 3.37 /* VHPT successfully read. */ 3.38 if (pteval & _PAGE_P) {
4.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c Thu May 03 13:36:06 2007 -0600 4.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c Thu May 03 13:55:28 2007 -0600 4.3 @@ -202,11 +202,9 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u6 4.4 VCPU protection key register access routines 4.5 **************************************************************************/ 4.6 4.7 -IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, u64 reg, u64 *pval) 4.8 +u64 vmx_vcpu_get_pkr(VCPU *vcpu, u64 reg) 4.9 { 4.10 - u64 val = (u64)ia64_get_pkr(reg); 4.11 - *pval = val; 4.12 - return (IA64_NO_FAULT); 4.13 + return ((u64)ia64_get_pkr(reg)); 4.14 } 4.15 4.16 IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, u64 reg, u64 val)
5.1 --- a/xen/arch/ia64/vmx/vmx_virt.c Thu May 03 13:36:06 2007 -0600 5.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c Thu May 03 13:55:28 2007 -0600 5.3 @@ -446,7 +446,7 @@ static IA64FAULT vmx_emul_thash(VCPU *vc 5.4 return IA64_NO_FAULT; 5.5 } 5.6 #endif //CHECK_FAULT 5.7 - vmx_vcpu_thash(vcpu, r3, &r1); 5.8 + r1 = vmx_vcpu_thash(vcpu, r3); 5.9 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); 5.10 return(IA64_NO_FAULT); 5.11 } 5.12 @@ -478,7 +478,7 @@ static IA64FAULT vmx_emul_ttag(VCPU *vcp 5.13 return IA64_NO_FAULT; 5.14 } 5.15 #endif //CHECK_FAULT 5.16 - vmx_vcpu_ttag(vcpu, r3, &r1); 5.17 + r1 = vmx_vcpu_ttag(vcpu, r3); 5.18 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); 5.19 return(IA64_NO_FAULT); 5.20 } 5.21 @@ -554,9 +554,7 @@ static IA64FAULT vmx_emul_tak(VCPU *vcpu 5.22 return IA64_FAULT; 5.23 #endif 5.24 } 5.25 - if(vmx_vcpu_tak(vcpu, r3, &r1)){ 5.26 - return IA64_FAULT; 5.27 - } 5.28 + r1 = vmx_vcpu_tak(vcpu, r3); 5.29 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); 5.30 return(IA64_NO_FAULT); 5.31 } 5.32 @@ -833,7 +831,7 @@ static IA64FAULT vmx_emul_mov_from_ar_re 5.33 return IA64_FAULT; 5.34 } 5.35 #endif // CHECK_FAULT 5.36 - vmx_vcpu_get_itc(vcpu,&r1); 5.37 + r1 = vmx_vcpu_get_itc(vcpu); 5.38 vcpu_set_gr(vcpu,inst.M31.r1,r1,0); 5.39 return IA64_NO_FAULT; 5.40 } 5.41 @@ -1057,7 +1055,7 @@ static IA64FAULT vmx_emul_mov_from_pkr(V 5.42 return IA64_FAULT; 5.43 } 5.44 #endif //CHECK_FAULT 5.45 - vmx_vcpu_get_pkr(vcpu,r3,&r1); 5.46 + r1 = vmx_vcpu_get_pkr(vcpu, r3); 5.47 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0); 5.48 } 5.49 5.50 @@ -1094,7 +1092,7 @@ static IA64FAULT vmx_emul_mov_from_dbr(V 5.51 return IA64_FAULT; 5.52 } 5.53 #endif //CHECK_FAULT 5.54 - vmx_vcpu_get_dbr(vcpu,r3,&r1); 5.55 + r1 = vmx_vcpu_get_dbr(vcpu, r3); 5.56 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0); 5.57 } 5.58 5.59 @@ -1131,7 +1129,7 @@ static IA64FAULT vmx_emul_mov_from_ibr(V 5.60 return IA64_FAULT; 5.61 } 5.62 #endif //CHECK_FAULT 5.63 - vmx_vcpu_get_ibr(vcpu,r3,&r1); 5.64 + r1 = vmx_vcpu_get_ibr(vcpu, r3); 5.65 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0); 5.66 } 5.67 5.68 @@ -1168,7 +1166,7 @@ static IA64FAULT vmx_emul_mov_from_pmc(V 5.69 return IA64_FAULT; 5.70 } 5.71 #endif //CHECK_FAULT 5.72 - vmx_vcpu_get_pmc(vcpu,r3,&r1); 5.73 + r1 = vmx_vcpu_get_pmc(vcpu, r3); 5.74 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0); 5.75 } 5.76 5.77 @@ -1196,7 +1194,7 @@ static IA64FAULT vmx_emul_mov_from_cpuid 5.78 return IA64_FAULT; 5.79 } 5.80 #endif //CHECK_FAULT 5.81 - vmx_vcpu_get_cpuid(vcpu,r3,&r1); 5.82 + r1 = vmx_vcpu_get_cpuid(vcpu, r3); 5.83 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0); 5.84 } 5.85 5.86 @@ -1274,9 +1272,15 @@ static IA64FAULT vmx_emul_mov_to_cr(VCPU 5.87 ((fault=vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\ 5.88 vcpu_set_gr(vcpu, tgt, val,0):fault; 5.89 5.90 +//#define cr_get(cr) (vcpu_set_gr(vcpu, tgt, vcpu_get##cr(vcpu), 0) 5.91 + 5.92 +/* 5.93 #define vmx_cr_get(cr) \ 5.94 ((fault=vmx_vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\ 5.95 vcpu_set_gr(vcpu, tgt, val,0):fault; 5.96 +*/ 5.97 + 5.98 +#define vmx_cr_get(cr) (vcpu_set_gr(vcpu, tgt, vmx_vcpu_get_##cr(vcpu), 0)) 5.99 5.100 static IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst) 5.101 { 5.102 @@ -1317,7 +1321,7 @@ static IA64FAULT vmx_emul_mov_from_cr(VC 5.103 case 25:return cr_get(iha); 5.104 case 64:return vmx_cr_get(lid); 5.105 case 65: 5.106 - vmx_vcpu_get_ivr(vcpu,&val); 5.107 + val = vmx_vcpu_get_ivr(vcpu); 5.108 return vcpu_set_gr(vcpu,tgt,val,0); 5.109 case 66:return vmx_cr_get(tpr); 5.110 case 67:return vcpu_set_gr(vcpu,tgt,0L,0);
6.1 --- a/xen/include/asm-ia64/vmx_vcpu.h Thu May 03 13:36:06 2007 -0600 6.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h Thu May 03 13:55:28 2007 -0600 6.3 @@ -61,7 +61,7 @@ extern u64 vmx_vcpu_sync_mpsr(u64 mipsr, 6.4 extern void vmx_vcpu_set_psr_sync_mpsr(VCPU * vcpu, u64 value); 6.5 extern IA64FAULT vmx_vcpu_cover(VCPU * vcpu); 6.6 extern IA64FAULT vmx_vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val); 6.7 -extern IA64FAULT vmx_vcpu_get_pkr(VCPU * vcpu, u64 reg, u64 * pval); 6.8 +extern u64 vmx_vcpu_get_pkr(VCPU * vcpu, u64 reg); 6.9 IA64FAULT vmx_vcpu_set_pkr(VCPU * vcpu, u64 reg, u64 val); 6.10 extern IA64FAULT vmx_vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa); 6.11 extern IA64FAULT vmx_vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa); 6.12 @@ -75,11 +75,11 @@ extern IA64FAULT vmx_vcpu_ptc_l(VCPU * v 6.13 extern IA64FAULT vmx_vcpu_ptc_e(VCPU * vcpu, u64 vadr); 6.14 extern IA64FAULT vmx_vcpu_ptc_g(VCPU * vcpu, u64 vadr, u64 ps); 6.15 extern IA64FAULT vmx_vcpu_ptc_ga(VCPU * vcpu, u64 vadr, u64 ps); 6.16 -extern IA64FAULT vmx_vcpu_thash(VCPU * vcpu, u64 vadr, u64 * pval); 6.17 +extern u64 vmx_vcpu_thash(VCPU * vcpu, u64 vadr); 6.18 extern u64 vmx_vcpu_get_itir_on_fault(VCPU * vcpu, u64 ifa); 6.19 -extern IA64FAULT vmx_vcpu_ttag(VCPU * vcpu, u64 vadr, u64 * pval); 6.20 +extern u64 vmx_vcpu_ttag(VCPU * vcpu, u64 vadr); 6.21 extern IA64FAULT vmx_vcpu_tpa(VCPU * vcpu, u64 vadr, u64 * padr); 6.22 -extern IA64FAULT vmx_vcpu_tak(VCPU * vcpu, u64 vadr, u64 * key); 6.23 +extern u64 vmx_vcpu_tak(VCPU * vcpu, u64 vadr); 6.24 extern IA64FAULT vmx_vcpu_rfi(VCPU * vcpu); 6.25 extern u64 vmx_vcpu_get_psr(VCPU * vcpu); 6.26 extern IA64FAULT vmx_vcpu_get_bgr(VCPU * vcpu, unsigned int reg, u64 * val); 6.27 @@ -132,100 +132,84 @@ extern void vmx_ia64_set_dcr(VCPU * v); 6.28 VCPU control register access routines 6.29 **************************************************************************/ 6.30 6.31 -static inline IA64FAULT vmx_vcpu_get_itm(VCPU * vcpu, u64 * pval) 6.32 +static inline u64 vmx_vcpu_get_itm(VCPU * vcpu) 6.33 { 6.34 - *pval = VCPU(vcpu, itm); 6.35 - return IA64_NO_FAULT; 6.36 + return ((u64)VCPU(vcpu, itm)); 6.37 } 6.38 6.39 -static inline IA64FAULT vmx_vcpu_get_iva(VCPU * vcpu, u64 * pval) 6.40 +static inline u64 vmx_vcpu_get_iva(VCPU * vcpu) 6.41 { 6.42 - *pval = VCPU(vcpu, iva); 6.43 - return IA64_NO_FAULT; 6.44 + return ((u64)VCPU(vcpu, iva)); 6.45 } 6.46 6.47 -static inline IA64FAULT vmx_vcpu_get_pta(VCPU * vcpu, u64 * pval) 6.48 +static inline u64 vmx_vcpu_get_pta(VCPU * vcpu) 6.49 { 6.50 - *pval = VCPU(vcpu, pta); 6.51 - return IA64_NO_FAULT; 6.52 + return ((u64)VCPU(vcpu, pta)); 6.53 } 6.54 6.55 -static inline IA64FAULT vmx_vcpu_get_lid(VCPU * vcpu, u64 * pval) 6.56 +static inline u64 vmx_vcpu_get_lid(VCPU * vcpu) 6.57 { 6.58 - *pval = VCPU(vcpu, lid); 6.59 - return IA64_NO_FAULT; 6.60 + return ((u64)VCPU(vcpu, lid)); 6.61 } 6.62 6.63 -static inline IA64FAULT vmx_vcpu_get_ivr(VCPU * vcpu, u64 * pval) 6.64 +static inline u64 vmx_vcpu_get_ivr(VCPU * vcpu) 6.65 { 6.66 - *pval = guest_read_vivr(vcpu); 6.67 - return IA64_NO_FAULT; 6.68 + return ((u64)guest_read_vivr(vcpu)); 6.69 } 6.70 6.71 -static inline IA64FAULT vmx_vcpu_get_tpr(VCPU * vcpu, u64 * pval) 6.72 +static inline u64 vmx_vcpu_get_tpr(VCPU * vcpu) 6.73 { 6.74 - *pval = VCPU(vcpu, tpr); 6.75 - return IA64_NO_FAULT; 6.76 + return ((u64)VCPU(vcpu, tpr)); 6.77 } 6.78 6.79 -static inline IA64FAULT vmx_vcpu_get_eoi(VCPU * vcpu, u64 * pval) 6.80 +static inline u64 vmx_vcpu_get_eoi(VCPU * vcpu) 6.81 { 6.82 - *pval = 0L; // reads of eoi always return 0 6.83 - return IA64_NO_FAULT; 6.84 + return (0UL); // reads of eoi always return 0 6.85 } 6.86 6.87 -static inline IA64FAULT vmx_vcpu_get_irr0(VCPU * vcpu, u64 * pval) 6.88 +static inline u64 vmx_vcpu_get_irr0(VCPU * vcpu) 6.89 { 6.90 - *pval = VCPU(vcpu, irr[0]); 6.91 - return IA64_NO_FAULT; 6.92 + return ((u64)VCPU(vcpu, irr[0])); 6.93 } 6.94 6.95 -static inline IA64FAULT vmx_vcpu_get_irr1(VCPU * vcpu, u64 * pval) 6.96 +static inline u64 vmx_vcpu_get_irr1(VCPU * vcpu) 6.97 { 6.98 - *pval = VCPU(vcpu, irr[1]); 6.99 - return IA64_NO_FAULT; 6.100 + return ((u64)VCPU(vcpu, irr[1])); 6.101 } 6.102 6.103 -static inline IA64FAULT vmx_vcpu_get_irr2(VCPU * vcpu, u64 * pval) 6.104 +static inline u64 vmx_vcpu_get_irr2(VCPU * vcpu) 6.105 { 6.106 - *pval = VCPU(vcpu, irr[2]); 6.107 - return IA64_NO_FAULT; 6.108 + return ((u64)VCPU(vcpu, irr[2])); 6.109 } 6.110 6.111 -static inline IA64FAULT vmx_vcpu_get_irr3(VCPU * vcpu, u64 * pval) 6.112 +static inline u64 vmx_vcpu_get_irr3(VCPU * vcpu) 6.113 { 6.114 - *pval = VCPU(vcpu, irr[3]); 6.115 - return IA64_NO_FAULT; 6.116 + return ((u64)VCPU(vcpu, irr[3])); 6.117 } 6.118 6.119 -static inline IA64FAULT vmx_vcpu_get_itv(VCPU * vcpu, u64 * pval) 6.120 +static inline u64 vmx_vcpu_get_itv(VCPU * vcpu) 6.121 { 6.122 - *pval = VCPU(vcpu, itv); 6.123 - return IA64_NO_FAULT; 6.124 + return ((u64)VCPU(vcpu, itv)); 6.125 } 6.126 6.127 -static inline IA64FAULT vmx_vcpu_get_pmv(VCPU * vcpu, u64 * pval) 6.128 +static inline u64 vmx_vcpu_get_pmv(VCPU * vcpu) 6.129 { 6.130 - *pval = VCPU(vcpu, pmv); 6.131 - return IA64_NO_FAULT; 6.132 + return ((u64)VCPU(vcpu, pmv)); 6.133 } 6.134 6.135 -static inline IA64FAULT vmx_vcpu_get_cmcv(VCPU * vcpu, u64 * pval) 6.136 +static inline u64 vmx_vcpu_get_cmcv(VCPU * vcpu) 6.137 { 6.138 - *pval = VCPU(vcpu, cmcv); 6.139 - return IA64_NO_FAULT; 6.140 + return ((u64)VCPU(vcpu, cmcv)); 6.141 } 6.142 6.143 -static inline IA64FAULT vmx_vcpu_get_lrr0(VCPU * vcpu, u64 * pval) 6.144 +static inline u64 vmx_vcpu_get_lrr0(VCPU * vcpu) 6.145 { 6.146 - *pval = VCPU(vcpu, lrr0); 6.147 - return IA64_NO_FAULT; 6.148 + return ((u64)VCPU(vcpu, lrr0)); 6.149 } 6.150 6.151 -static inline IA64FAULT vmx_vcpu_get_lrr1(VCPU * vcpu, u64 * pval) 6.152 +static inline u64 vmx_vcpu_get_lrr1(VCPU * vcpu) 6.153 { 6.154 - *pval = VCPU(vcpu, lrr1); 6.155 - return IA64_NO_FAULT; 6.156 + return ((u64)VCPU(vcpu, lrr1)); 6.157 } 6.158 6.159 static inline IA64FAULT vmx_vcpu_set_itm(VCPU * vcpu, u64 val) 6.160 @@ -299,10 +283,9 @@ static inline IA64FAULT vmx_vcpu_set_itc 6.161 return IA64_NO_FAULT; 6.162 } 6.163 6.164 -static inline IA64FAULT vmx_vcpu_get_itc(VCPU * vcpu, u64 * val) 6.165 +static inline u64 vmx_vcpu_get_itc(VCPU * vcpu) 6.166 { 6.167 - *val = vtm_get_itc(vcpu); 6.168 - return IA64_NO_FAULT; 6.169 + return ((u64)vtm_get_itc(vcpu)); 6.170 } 6.171 6.172 /* 6.173 @@ -317,7 +300,7 @@ IA64FAULT vmx_vcpu_get_rr(VCPU *vcpu, u6 6.174 VCPU debug breakpoint register access routines 6.175 **************************************************************************/ 6.176 6.177 -static inline IA64FAULT vmx_vcpu_get_cpuid(VCPU * vcpu, u64 reg, u64 * pval) 6.178 +static inline u64 vmx_vcpu_get_cpuid(VCPU * vcpu, u64 reg) 6.179 { 6.180 // TODO: unimplemented DBRs return a reserved register fault 6.181 // TODO: Should set Logical CPU state, not just physical 6.182 @@ -325,8 +308,7 @@ static inline IA64FAULT vmx_vcpu_get_cpu 6.183 panic_domain(vcpu_regs(vcpu), 6.184 "there are only five cpuid registers"); 6.185 } 6.186 - *pval = VCPU(vcpu, vcpuid[reg]); 6.187 - return IA64_NO_FAULT; 6.188 + return ((u64)VCPU(vcpu, vcpuid[reg])); 6.189 } 6.190 6.191 static inline IA64FAULT vmx_vcpu_set_dbr(VCPU * vcpu, u64 reg, u64 val) 6.192 @@ -345,20 +327,16 @@ static inline IA64FAULT vmx_vcpu_set_ibr 6.193 return IA64_NO_FAULT; 6.194 } 6.195 6.196 -static inline IA64FAULT vmx_vcpu_get_dbr(VCPU * vcpu, u64 reg, u64 * pval) 6.197 +static inline u64 vmx_vcpu_get_dbr(VCPU * vcpu, u64 reg) 6.198 { 6.199 // TODO: unimplemented DBRs return a reserved register fault 6.200 - u64 val = ia64_get_dbr(reg); 6.201 - *pval = val; 6.202 - return IA64_NO_FAULT; 6.203 + return ((u64)ia64_get_dbr(reg)); 6.204 } 6.205 6.206 -static inline IA64FAULT vmx_vcpu_get_ibr(VCPU * vcpu, u64 reg, u64 * pval) 6.207 +static inline u64 vmx_vcpu_get_ibr(VCPU * vcpu, u64 reg) 6.208 { 6.209 // TODO: unimplemented IBRs return a reserved register fault 6.210 - u64 val = ia64_get_ibr(reg); 6.211 - *pval = val; 6.212 - return IA64_NO_FAULT; 6.213 + return ((u64)ia64_get_ibr(reg)); 6.214 } 6.215 6.216 /************************************************************************** 6.217 @@ -380,20 +358,16 @@ static inline IA64FAULT vmx_vcpu_set_pmd 6.218 return IA64_NO_FAULT; 6.219 } 6.220 6.221 -static inline IA64FAULT vmx_vcpu_get_pmc(VCPU * vcpu, u64 reg, u64 * pval) 6.222 +static inline u64 vmx_vcpu_get_pmc(VCPU * vcpu, u64 reg) 6.223 { 6.224 // NOTE: Reads from unimplemented PMC registers return zero 6.225 - u64 val = (u64) ia64_get_pmc(reg); 6.226 - *pval = val; 6.227 - return IA64_NO_FAULT; 6.228 + return ((u64)ia64_get_pmc(reg)); 6.229 } 6.230 6.231 -static inline IA64FAULT vmx_vcpu_get_pmd(VCPU * vcpu, u64 reg, u64 * pval) 6.232 +static inline u64 vmx_vcpu_get_pmd(VCPU * vcpu, u64 reg) 6.233 { 6.234 // NOTE: Reads from unimplemented PMD registers return zero 6.235 - u64 val = (u64) ia64_get_pmd(reg); 6.236 - *pval = val; 6.237 - return IA64_NO_FAULT; 6.238 + return ((u64)ia64_get_pmd(reg)); 6.239 } 6.240 6.241 /**************************************************************************