ia64/xen-unstable
changeset 10699:e61bb865ec74
[IA64] optimize entry and exit path
VMM save/restore r4~r7 and unat to handle virtualization fault
and mmio access, but it is not necessary for other faults to
save/restore these registers. This patch is to save/restore
these registers only when dtlb and virtualization fault happen.
Signed-off-by: Anthony Xu <anthony.xu@intel.com>
VMM save/restore r4~r7 and unat to handle virtualization fault
and mmio access, but it is not necessary for other faults to
save/restore these registers. This patch is to save/restore
these registers only when dtlb and virtualization fault happen.
Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author | awilliam@xenbuild.aw |
---|---|
date | Fri Jul 14 11:18:36 2006 -0600 (2006-07-14) |
parents | c4af6e854010 |
children | b2abc70be89e |
files | xen/arch/ia64/vmx/vmx_entry.S xen/arch/ia64/vmx/vmx_ivt.S xen/arch/ia64/vmx/vmx_minstate.h |
line diff
1.1 --- a/xen/arch/ia64/vmx/vmx_entry.S Fri Jul 14 11:06:38 2006 -0600 1.2 +++ b/xen/arch/ia64/vmx/vmx_entry.S Fri Jul 14 11:18:36 2006 -0600 1.3 @@ -163,24 +163,39 @@ END(ia64_leave_nested) 1.4 1.5 1.6 1.7 -GLOBAL_ENTRY(ia64_leave_hypervisor) 1.8 +GLOBAL_ENTRY(ia64_leave_hypervisor_prepare) 1.9 PT_REGS_UNWIND_INFO(0) 1.10 /* 1.11 * work.need_resched etc. mustn't get changed by this CPU before it returns to 1.12 ;; 1.13 * user- or fsys-mode, hence we disable interrupts early on: 1.14 */ 1.15 + adds r2 = PT(R4)+16,r12 1.16 + adds r3 = PT(R5)+16,r12 1.17 + adds r8 = PT(EML_UNAT)+16,r12 1.18 + ;; 1.19 + ld8 r8 = [r8] 1.20 + ;; 1.21 + mov ar.unat=r8 1.22 + ;; 1.23 + ld8.fill r4=[r2],16 //load r4 1.24 + ld8.fill r5=[r3],16 //load r5 1.25 + ;; 1.26 + ld8.fill r6=[r2] //load r6 1.27 + ld8.fill r7=[r3] //load r7 1.28 + ;; 1.29 +END(ia64_leave_hypervisor_prepare) 1.30 +//fall through 1.31 +GLOBAL_ENTRY(ia64_leave_hypervisor) 1.32 + PT_REGS_UNWIND_INFO(0) 1.33 rsm psr.i 1.34 ;; 1.35 alloc loc0=ar.pfs,0,1,1,0 1.36 + ;; 1.37 adds out0=16,r12 1.38 - adds r7 = PT(EML_UNAT)+16,r12 1.39 - ;; 1.40 - ld8 r7 = [r7] 1.41 br.call.sptk.many b0=leave_hypervisor_tail 1.42 ;; 1.43 mov ar.pfs=loc0 1.44 - mov ar.unat=r7 1.45 adds r20=PT(PR)+16,r12 1.46 ;; 1.47 lfetch [r20],PT(CR_IPSR)-PT(PR) 1.48 @@ -245,12 +260,6 @@ GLOBAL_ENTRY(ia64_leave_hypervisor) 1.49 ldf.fill f10=[r2],32 1.50 ldf.fill f11=[r3],24 1.51 ;; 1.52 - ld8.fill r4=[r2],16 //load r4 1.53 - ld8.fill r5=[r3],16 //load r5 1.54 - ;; 1.55 - ld8.fill r6=[r2] //load r6 1.56 - ld8.fill r7=[r3] //load r7 1.57 - ;; 1.58 srlz.i // ensure interruption collection is off 1.59 ;; 1.60 bsw.0
2.1 --- a/xen/arch/ia64/vmx/vmx_ivt.S Fri Jul 14 11:06:38 2006 -0600 2.2 +++ b/xen/arch/ia64/vmx/vmx_ivt.S Fri Jul 14 11:18:36 2006 -0600 2.3 @@ -201,7 +201,7 @@ vmx_itlb_loop: 2.4 ;; 2.5 vmx_itlb_out: 2.6 mov r19 = 1 2.7 - br.sptk vmx_dispatch_tlb_miss 2.8 + br.sptk vmx_dispatch_itlb_miss 2.9 VMX_FAULT(1); 2.10 END(vmx_itlb_miss) 2.11 2.12 @@ -275,7 +275,7 @@ vmx_dtlb_loop: 2.13 ;; 2.14 vmx_dtlb_out: 2.15 mov r19 = 2 2.16 - br.sptk vmx_dispatch_tlb_miss 2.17 + br.sptk vmx_dispatch_dtlb_miss 2.18 VMX_FAULT(2); 2.19 END(vmx_dtlb_miss) 2.20 2.21 @@ -1041,9 +1041,10 @@ ENTRY(vmx_dispatch_virtualization_fault) 2.22 srlz.i // guarantee that interruption collection is on 2.23 ;; 2.24 (p15) ssm psr.i // restore psr.i 2.25 - movl r14=ia64_leave_hypervisor 2.26 + movl r14=ia64_leave_hypervisor_prepare 2.27 ;; 2.28 VMX_SAVE_REST 2.29 + VMX_SAVE_EXTRA 2.30 mov rp=r14 2.31 ;; 2.32 adds out1=16,sp //regs 2.33 @@ -1070,7 +1071,7 @@ ENTRY(vmx_dispatch_vexirq) 2.34 br.call.sptk.many b6=vmx_vexirq 2.35 END(vmx_dispatch_vexirq) 2.36 2.37 -ENTRY(vmx_dispatch_tlb_miss) 2.38 +ENTRY(vmx_dispatch_itlb_miss) 2.39 VMX_SAVE_MIN_WITH_COVER_R19 2.40 alloc r14=ar.pfs,0,0,3,0 2.41 mov out0=cr.ifa 2.42 @@ -1089,8 +1090,29 @@ ENTRY(vmx_dispatch_tlb_miss) 2.43 ;; 2.44 adds out2=16,r12 2.45 br.call.sptk.many b6=vmx_hpw_miss 2.46 -END(vmx_dispatch_tlb_miss) 2.47 +END(vmx_dispatch_itlb_miss) 2.48 2.49 +ENTRY(vmx_dispatch_dtlb_miss) 2.50 + VMX_SAVE_MIN_WITH_COVER_R19 2.51 + alloc r14=ar.pfs,0,0,3,0 2.52 + mov out0=cr.ifa 2.53 + mov out1=r15 2.54 + adds r3=8,r2 // set up second base pointer 2.55 + ;; 2.56 + ssm psr.ic 2.57 + ;; 2.58 + srlz.i // guarantee that interruption collection is on 2.59 + ;; 2.60 + (p15) ssm psr.i // restore psr.i 2.61 + movl r14=ia64_leave_hypervisor_prepare 2.62 + ;; 2.63 + VMX_SAVE_REST 2.64 + VMX_SAVE_EXTRA 2.65 + mov rp=r14 2.66 + ;; 2.67 + adds out2=16,r12 2.68 + br.call.sptk.many b6=vmx_hpw_miss 2.69 +END(vmx_dispatch_dtlb_miss) 2.70 2.71 ENTRY(vmx_dispatch_break_fault) 2.72 VMX_SAVE_MIN_WITH_COVER_R19
3.1 --- a/xen/arch/ia64/vmx/vmx_minstate.h Fri Jul 14 11:06:38 2006 -0600 3.2 +++ b/xen/arch/ia64/vmx/vmx_minstate.h Fri Jul 14 11:18:36 2006 -0600 3.3 @@ -260,24 +260,27 @@ 3.4 stf.spill [r3]=f9,32; \ 3.5 ;; \ 3.6 stf.spill [r2]=f10,32; \ 3.7 - stf.spill [r3]=f11,24; \ 3.8 + stf.spill [r3]=f11; \ 3.9 + adds r25=PT(B7)-PT(F11),r3; \ 3.10 ;; \ 3.11 + st8 [r24]=r18,16; /* b6 */ \ 3.12 + st8 [r25]=r19,16; /* b7 */ \ 3.13 + adds r3=PT(R5)-PT(F11),r3; \ 3.14 + ;; \ 3.15 + st8 [r24]=r9; /* ar.csd */ \ 3.16 + st8 [r25]=r10; /* ar.ssd */ \ 3.17 + ;; 3.18 + 3.19 +#define VMX_SAVE_EXTRA \ 3.20 .mem.offset 0,0; st8.spill [r2]=r4,16; \ 3.21 .mem.offset 8,0; st8.spill [r3]=r5,16; \ 3.22 ;; \ 3.23 .mem.offset 0,0; st8.spill [r2]=r6,16; \ 3.24 .mem.offset 8,0; st8.spill [r3]=r7; \ 3.25 - adds r25=PT(B7)-PT(R7),r3; \ 3.26 - ;; \ 3.27 - st8 [r24]=r18,16; /* b6 */ \ 3.28 - st8 [r25]=r19,16; /* b7 */ \ 3.29 - ;; \ 3.30 - st8 [r24]=r9; /* ar.csd */ \ 3.31 - mov r26=ar.unat; \ 3.32 - ;; \ 3.33 - st8 [r25]=r10; /* ar.ssd */ \ 3.34 + ;; \ 3.35 + mov r26=ar.unat; \ 3.36 + ;; \ 3.37 st8 [r2]=r26; /* eml_unat */ \ 3.38 - ;; 3.39 3.40 #define VMX_SAVE_MIN_WITH_COVER VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs,) 3.41 #define VMX_SAVE_MIN_WITH_COVER_R19 VMX_DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)