ia64/xen-unstable
changeset 13477:6745b7274eff
[IA64] Optimize vmx_vcpu_thash()
Implement in assembly
Signed-off-by: Zhang Xin <xing.z.zhang@intel.com>
Implement in assembly
Signed-off-by: Zhang Xin <xing.z.zhang@intel.com>
author | awilliam@xenbuild2.aw |
---|---|
date | Thu Jan 25 14:58:41 2007 -0700 (2007-01-25) |
parents | 730e24a1594a |
children | b741b300a0f2 |
files | xen/arch/ia64/asm-offsets.c xen/arch/ia64/vmx/optvfault.S xen/arch/ia64/vmx/vmx_ivt.S xen/include/asm-ia64/vmx_mm_def.h |
line diff
1.1 --- a/xen/arch/ia64/asm-offsets.c Thu Jan 25 14:40:45 2007 -0700 1.2 +++ b/xen/arch/ia64/asm-offsets.c Thu Jan 25 14:58:41 2007 -0700 1.3 @@ -200,6 +200,7 @@ void foo(void) 1.4 DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct vcpu, arch.privregs)); 1.5 DEFINE(IA64_VPD_VIFS_OFFSET, offsetof (mapped_regs_t, ifs)); 1.6 DEFINE(IA64_VLSAPIC_INSVC_BASE_OFFSET, offsetof (struct vcpu, arch.insvc[0])); 1.7 + DEFINE(IA64_VPD_VPTA_OFFSET, offsetof (struct mapped_regs, pta)); 1.8 DEFINE(IA64_VPD_CR_VPTA_OFFSET, offsetof (cr_t, pta)); 1.9 DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t)); 1.10
2.1 --- a/xen/arch/ia64/vmx/optvfault.S Thu Jan 25 14:40:45 2007 -0700 2.2 +++ b/xen/arch/ia64/vmx/optvfault.S Thu Jan 25 14:58:41 2007 -0700 2.3 @@ -15,6 +15,7 @@ 2.4 #include <asm/vmx_vpd.h> 2.5 #include <asm/vmx_pal_vsa.h> 2.6 #include <asm/asm-offsets.h> 2.7 +#include <asm-ia64/vmx_mm_def.h> 2.8 2.9 #define ACCE_MOV_FROM_AR 2.10 #define ACCE_MOV_FROM_RR 2.11 @@ -22,6 +23,7 @@ 2.12 #define ACCE_RSM 2.13 #define ACCE_SSM 2.14 #define ACCE_MOV_TO_PSR 2.15 +#define ACCE_THASH 2.16 2.17 //mov r1=ar3 2.18 GLOBAL_ENTRY(vmx_asm_mov_from_ar) 2.19 @@ -418,6 +420,64 @@ ENTRY(vmx_asm_dispatch_vexirq) 2.20 br.many vmx_dispatch_vexirq 2.21 END(vmx_asm_dispatch_vexirq) 2.22 2.23 +// thash 2.24 +// TODO: add support when pta.vf = 1 2.25 +GLOBAL_ENTRY(vmx_asm_thash) 2.26 +#ifndef ACCE_THASH 2.27 + br.many vmx_virtualization_fault_back 2.28 +#endif 2.29 + extr.u r17=r25,20,7 // get r3 from opcode in r25 2.30 + extr.u r18=r25,6,7 // get r1 from opcode in r25 2.31 + movl r20=asm_mov_from_reg 2.32 + ;; 2.33 + adds r30=vmx_asm_thash_back1-asm_mov_from_reg,r20 2.34 + shladd r17=r17,4,r20 // get addr of MOVE_FROM_REG(r17) 2.35 + adds r16=IA64_VPD_BASE_OFFSET,r21 // get vcpu.arch.priveregs 2.36 + ;; 2.37 + mov r24=b0 2.38 + ;; 2.39 + ld8 r16=[r16] // get VPD addr 2.40 + mov b0=r17 2.41 + br.many b0 // r19 return value 2.42 + ;; 2.43 +vmx_asm_thash_back1: 2.44 + shr.u r23=r19,61 // get RR number 2.45 + adds r25=VCPU_VRR0_OFS,r21 // get vcpu->arch.arch_vmx.vrr[0]'s addr 2.46 + adds r16=IA64_VPD_VPTA_OFFSET,r16 // get vpta 2.47 + ;; 2.48 + shladd r27=r23,3,r25 // get vcpu->arch.arch_vmx.vrr[r23]'s addr 2.49 + ld8 r17=[r16] // get PTA 2.50 + mov r26=1 2.51 + ;; 2.52 + extr.u r29=r17,2,6 // get pta.size 2.53 + ld8 r25=[r27] // get vcpu->arch.arch_vmx.vrr[r23]'s value 2.54 + ;; 2.55 + extr.u r25=r25,2,6 // get rr.ps 2.56 + shl r22=r26,r29 // 1UL << pta.size 2.57 + ;; 2.58 + shr.u r23=r19,r25 // vaddr >> rr.ps 2.59 + adds r26=3,r29 // pta.size + 3 2.60 + shl r27=r17,3 // pta << 3 2.61 + ;; 2.62 + shl r23=r23,3 // (vaddr >> rr.ps) << 3 2.63 + shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3) 2.64 + movl r16=VRN_MASK 2.65 + ;; 2.66 + adds r22=-1,r22 // (1UL << pta.size) - 1 2.67 + shl r27=r27,r29 // ((pta<<3)>>(pta.size+3))<<pta.size 2.68 + and r19=r19,r16 // vaddr & VRN_MASK 2.69 + ;; 2.70 + and r22=r22,r23 // vhpt_offset 2.71 + or r19=r19,r27 // (vadr&VRN_MASK) |(((pta<<3)>>(pta.size + 3))<<pta.size) 2.72 + adds r26=asm_mov_to_reg-asm_mov_from_reg,r20 2.73 + ;; 2.74 + or r19=r19,r22 // calc pval 2.75 + shladd r17=r18,4,r26 2.76 + adds r30=vmx_resume_to_guest-asm_mov_from_reg,r20 2.77 + ;; 2.78 + mov b0=r17 2.79 + br.many b0 2.80 +END(vmx_asm_thash) 2.81 2.82 #define MOV_TO_REG0 \ 2.83 {; \
3.1 --- a/xen/arch/ia64/vmx/vmx_ivt.S Thu Jan 25 14:40:45 2007 -0700 3.2 +++ b/xen/arch/ia64/vmx/vmx_ivt.S Thu Jan 25 14:58:41 2007 -0700 3.3 @@ -795,12 +795,14 @@ ENTRY(vmx_virtualization_fault) 3.4 cmp.eq p9,p0=EVENT_RSM,r24 3.5 cmp.eq p10,p0=EVENT_SSM,r24 3.6 cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24 3.7 + cmp.eq p12,p0=EVENT_THASH,r24 3.8 (p6) br.dptk.many vmx_asm_mov_from_ar 3.9 (p7) br.dptk.many vmx_asm_mov_from_rr 3.10 (p8) br.dptk.many vmx_asm_mov_to_rr 3.11 (p9) br.dptk.many vmx_asm_rsm 3.12 (p10) br.dptk.many vmx_asm_ssm 3.13 (p11) br.dptk.many vmx_asm_mov_to_psr 3.14 + (p12) br.dptk.many vmx_asm_thash 3.15 ;; 3.16 vmx_virtualization_fault_back: 3.17 mov r19=37
4.1 --- a/xen/include/asm-ia64/vmx_mm_def.h Thu Jan 25 14:40:45 2007 -0700 4.2 +++ b/xen/include/asm-ia64/vmx_mm_def.h Thu Jan 25 14:58:41 2007 -0700 4.3 @@ -103,7 +103,7 @@ 4.4 #define VA_MATTR_WC 0x6 4.5 #define VA_MATTR_NATPAGE 0x7 4.6 4.7 -#define VRN_MASK 0xe000000000000000L 4.8 +#define VRN_MASK 0xe000000000000000 4.9 #define PTA_BASE_MASK 0x3fffffffffffL 4.10 #define PTA_BASE_SHIFT 15 4.11 #define VHPT_OFFSET_MASK 0x7fff 4.12 @@ -114,6 +114,7 @@ 4.13 #define HPA_MAPPING_ATTRIBUTE 0x61 //ED:0;AR:0;PL:0;D:1;A:1;P:1 4.14 #define VPN_2_VRN(vpn) ((vpn << PPN_SHIFT) >> IA64_VRN_SHIFT) 4.15 4.16 +#ifndef __ASSEMBLY__ 4.17 typedef enum { INSTRUCTION, DATA, REGISTER } miss_type; 4.18 4.19 //typedef enum { MVHPT, STLB } vtlb_loc_type_t; 4.20 @@ -169,5 +170,6 @@ bits_v(uint64_t v, uint32_t bs, uint32_t 4.21 "M" ((len))); \ 4.22 ret; \ 4.23 }) 4.24 +#endif 4.25 4.26 #endif