ia64/xen-unstable
changeset 16006:5c56ce7b9892
[IA64] Replace mode_flags by mmu_mode
Replace mode_flags by mmu_mode and put it into arch_vmx structure.
Cleanup in vmx_phy_mode.c to prepare for half-physical mode (dt=0,it=1)
Signed-off-by: Tristan Gingold <tgingold@free.fr>
Replace mode_flags by mmu_mode and put it into arch_vmx structure.
Cleanup in vmx_phy_mode.c to prepare for half-physical mode (dt=0,it=1)
Signed-off-by: Tristan Gingold <tgingold@free.fr>
author | Alex Williamson <alex.williamson@hp.com> |
---|---|
date | Mon Oct 01 09:52:14 2007 -0600 (2007-10-01) |
parents | 83239b289072 |
children | 3874bdc78204 |
files | xen/arch/ia64/asm-offsets.c xen/arch/ia64/vmx/optvfault.S xen/arch/ia64/vmx/vmx_fault.c xen/arch/ia64/vmx/vmx_phy_mode.c xen/arch/ia64/vmx/vmx_vcpu.c xen/include/asm-ia64/domain.h xen/include/asm-ia64/vmx_phy_mode.h xen/include/asm-ia64/vmx_vpd.h |
line diff
1.1 --- a/xen/arch/ia64/asm-offsets.c Thu Sep 27 16:29:43 2007 -0600 1.2 +++ b/xen/arch/ia64/asm-offsets.c Mon Oct 01 09:52:14 2007 -0600 1.3 @@ -59,6 +59,7 @@ void foo(void) 1.4 DEFINE(IA64_VCPU_HYPERCALL_CONTINUATION_OFS, offsetof (struct vcpu, arch.hypercall_continuation)); 1.5 DEFINE(IA64_VCPU_FP_PSR_OFFSET, offsetof (struct vcpu, arch.fp_psr)); 1.6 DEFINE(IA64_VCPU_META_RID_DT_OFFSET, offsetof (struct vcpu, arch.metaphysical_rid_dt)); 1.7 + DEFINE(IA64_VCPU_META_RID_D_OFFSET, offsetof (struct vcpu, arch.metaphysical_rid_d)); 1.8 DEFINE(IA64_VCPU_META_SAVED_RR0_OFFSET, offsetof (struct vcpu, arch.metaphysical_saved_rr0)); 1.9 DEFINE(IA64_VCPU_BREAKIMM_OFFSET, offsetof (struct vcpu, arch.breakimm)); 1.10 DEFINE(IA64_VCPU_IVA_OFFSET, offsetof (struct vcpu, arch.iva)); 1.11 @@ -149,7 +150,7 @@ void foo(void) 1.12 DEFINE(SWITCH_MPTA_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.mpta)); 1.13 DEFINE(IA64_PT_REGS_R16_SLOT, (((offsetof(struct pt_regs, r16)-sizeof(struct pt_regs))>>3)&0x3f)); 1.14 DEFINE(IA64_VCPU_FLAGS_OFFSET,offsetof(struct vcpu ,arch.arch_vmx.flags)); 1.15 - DEFINE(IA64_VCPU_MODE_FLAGS_OFFSET,offsetof(struct vcpu, arch.mode_flags)); 1.16 + DEFINE(IA64_VCPU_MMU_MODE_OFFSET,offsetof(struct vcpu, arch.arch_vmx.mmu_mode)); 1.17 1.18 BLANK(); 1.19 1.20 @@ -202,7 +203,6 @@ void foo(void) 1.21 DEFINE(IA64_VPD_VIFS_OFFSET, offsetof (mapped_regs_t, ifs)); 1.22 DEFINE(IA64_VLSAPIC_INSVC_BASE_OFFSET, offsetof (struct vcpu, arch.insvc[0])); 1.23 DEFINE(IA64_VPD_VPTA_OFFSET, offsetof (struct mapped_regs, pta)); 1.24 - DEFINE(IA64_VPD_CR_VPTA_OFFSET, offsetof (cr_t, pta)); 1.25 DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t)); 1.26 1.27 BLANK();
2.1 --- a/xen/arch/ia64/vmx/optvfault.S Thu Sep 27 16:29:43 2007 -0600 2.2 +++ b/xen/arch/ia64/vmx/optvfault.S Mon Oct 01 09:52:14 2007 -0600 2.3 @@ -149,15 +149,15 @@ vmx_asm_mov_to_rr_back_2: 2.4 ;; 2.5 cmp.eq.or p6,p0=4,r23 2.6 ;; 2.7 - adds r16=IA64_VCPU_MODE_FLAGS_OFFSET,r21 2.8 + adds r16=IA64_VCPU_MMU_MODE_OFFSET,r21 2.9 (p6) adds r17=IA64_VCPU_META_SAVED_RR0_OFFSET,r21 2.10 ;; 2.11 - ld4 r16=[r16] 2.12 + ld1 r16=[r16] 2.13 cmp.eq p7,p0=r0,r0 2.14 (p6) shladd r17=r23,1,r17 2.15 ;; 2.16 (p6) st8 [r17]=r19 2.17 - (p6) tbit.nz p6,p7=r16,GUEST_IN_PHY_BIT // Set physical rr if in virt mode 2.18 + (p6) cmp.eq p7,p0=VMX_MMU_VIRTUAL,r16 // Set physical rr if in virt mode 2.19 ;; 2.20 (p7) mov rr[r28]=r19 2.21 mov r24=r22 2.22 @@ -179,13 +179,13 @@ GLOBAL_ENTRY(vmx_asm_rsm) 2.23 dep r26=r27,r26,21,2 2.24 ;; 2.25 add r17=VPD_VPSR_START_OFFSET,r16 2.26 - add r22=IA64_VCPU_MODE_FLAGS_OFFSET,r21 2.27 + add r22=IA64_VCPU_MMU_MODE_OFFSET,r21 2.28 //r26 is imm24 2.29 dep r26=r28,r26,23,1 2.30 ;; 2.31 ld8 r18=[r17] 2.32 movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI 2.33 - ld4 r23=[r22] 2.34 + ld1 r23=[r22] 2.35 sub r27=-1,r26 // ~r26 2.36 mov r24=b0 2.37 ;; 2.38 @@ -199,22 +199,22 @@ GLOBAL_ENTRY(vmx_asm_rsm) 2.39 ;; 2.40 ld8 r27=[r27] 2.41 ;; 2.42 - tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT 2.43 + tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT 2.44 ;; 2.45 (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1 // Keep dfh 2.46 ;; 2.47 mov cr.ipsr=r20 2.48 - tbit.nz p6,p0=r23,GUEST_IN_PHY_BIT 2.49 + cmp.ne p6,p0=VMX_MMU_VIRTUAL,r23 2.50 ;; 2.51 tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT 2.52 - (p6) br.dptk vmx_resume_to_guest // (DT set or already in phy mode) 2.53 + (p6) br.dptk vmx_resume_to_guest // DT not cleared or already in phy mode 2.54 ;; 2.55 // Switch to meta physical mode. 2.56 add r26=IA64_VCPU_META_RID_DT_OFFSET,r21 2.57 - dep r23=-1,r23,GUEST_IN_PHY_BIT,1 // Set GUEST_IN_PHY 2.58 + mov r23=VMX_MMU_PHY_DT 2.59 ;; 2.60 ld8 r26=[r26] 2.61 - st4 [r22]=r23 2.62 + st1 [r22]=r23 2.63 dep.z r28=4,61,3 2.64 ;; 2.65 mov rr[r0]=r26 2.66 @@ -245,30 +245,30 @@ GLOBAL_ENTRY(vmx_asm_ssm) 2.67 ld8 r29=[r27] 2.68 mov r24=b0 2.69 ;; 2.70 - add r22=IA64_VCPU_MODE_FLAGS_OFFSET,r21 2.71 + add r22=IA64_VCPU_MMU_MODE_OFFSET,r21 2.72 mov r20=cr.ipsr 2.73 or r19=r29,r26 2.74 ;; 2.75 - ld4 r23=[r22] 2.76 - st8 [r27]=r19 2.77 + ld1 r23=[r22] // mmu_mode 2.78 + st8 [r27]=r19 // vpsr 2.79 or r20=r20,r26 2.80 ;; 2.81 mov cr.ipsr=r20 2.82 movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT 2.83 ;; 2.84 and r19=r28,r19 2.85 - tbit.z p6,p0=r23,GUEST_IN_PHY_BIT 2.86 + cmp.eq p6,p0=VMX_MMU_VIRTUAL,r23 2.87 ;; 2.88 - cmp.ne.or p6,p0=r28,r19 2.89 + cmp.ne.or p6,p0=r28,r19 // (vpsr & (it+dt+rt)) /= (it+dt+rt) ie stay in phy 2.90 (p6) br.dptk vmx_asm_ssm_1 2.91 ;; 2.92 add r26=IA64_VCPU_META_SAVED_RR0_OFFSET,r21 2.93 add r27=IA64_VCPU_META_SAVED_RR0_OFFSET+8,r21 2.94 - dep r23=0,r23,GUEST_IN_PHY_BIT,1 // Clear GUEST_IN_PHY 2.95 + mov r23=VMX_MMU_VIRTUAL 2.96 ;; 2.97 ld8 r26=[r26] 2.98 ld8 r27=[r27] 2.99 - st4 [r22]=r23 2.100 + st1 [r22]=r23 2.101 dep.z r28=4,61,3 2.102 ;; 2.103 mov rr[r0]=r26 2.104 @@ -320,37 +320,37 @@ GLOBAL_ENTRY(vmx_asm_mov_to_psr) 2.105 br.many b0 2.106 ;; 2.107 vmx_asm_mov_to_psr_back: 2.108 - ld8 r17=[r27] 2.109 - add r22=IA64_VCPU_MODE_FLAGS_OFFSET,r21 2.110 - dep r19=0,r19,32,32 2.111 + ld8 r17=[r27] // vpsr 2.112 + add r22=IA64_VCPU_MMU_MODE_OFFSET,r21 2.113 + dep r19=0,r19,32,32 // Clear bits 32-63 2.114 ;; 2.115 - ld4 r23=[r22] 2.116 + ld1 r23=[r22] // mmu_mode 2.117 dep r18=0,r17,0,32 2.118 ;; 2.119 - add r30=r18,r19 2.120 + or r30=r18,r19 2.121 movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT 2.122 ;; 2.123 - st8 [r27]=r30 2.124 + st8 [r27]=r30 // set vpsr 2.125 and r27=r28,r30 2.126 and r29=r28,r17 2.127 ;; 2.128 - cmp.eq p5,p0=r29,r27 2.129 - cmp.eq p6,p7=r28,r27 2.130 - (p5) br.many vmx_asm_mov_to_psr_1 2.131 + cmp.eq p5,p0=r29,r27 // (old_vpsr & (dt+rt+it)) == (new_vpsr & (dt+rt+it)) 2.132 + cmp.eq p6,p7=r28,r27 // (new_vpsr & (dt+rt+it)) == (dt+rt+it) 2.133 + (p5) br.many vmx_asm_mov_to_psr_1 // no change 2.134 ;; 2.135 //virtual to physical 2.136 (p7) add r26=IA64_VCPU_META_RID_DT_OFFSET,r21 2.137 (p7) add r27=IA64_VCPU_META_RID_DT_OFFSET,r21 2.138 - (p7) dep r23=-1,r23,GUEST_IN_PHY_BIT,1 2.139 + (p7) mov r23=VMX_MMU_PHY_DT 2.140 ;; 2.141 //physical to virtual 2.142 (p6) add r26=IA64_VCPU_META_SAVED_RR0_OFFSET,r21 2.143 (p6) add r27=IA64_VCPU_META_SAVED_RR0_OFFSET+8,r21 2.144 - (p6) dep r23=0,r23,GUEST_IN_PHY_BIT,1 2.145 + (p6) mov r23=VMX_MMU_VIRTUAL 2.146 ;; 2.147 ld8 r26=[r26] 2.148 ld8 r27=[r27] 2.149 - st4 [r22]=r23 2.150 + st1 [r22]=r23 2.151 dep.z r28=4,61,3 2.152 ;; 2.153 mov rr[r0]=r26 2.154 @@ -443,10 +443,10 @@ GLOBAL_ENTRY(vmx_asm_thash) 2.155 vmx_asm_thash_back1: 2.156 shr.u r23=r19,61 // get RR number 2.157 adds r25=VCPU_VRR0_OFS,r21 // get vcpu->arch.arch_vmx.vrr[0]'s addr 2.158 - adds r16=IA64_VPD_VPTA_OFFSET,r16 // get vpta 2.159 + adds r16=IA64_VPD_VPTA_OFFSET,r16 // get virtual pta 2.160 ;; 2.161 shladd r27=r23,3,r25 // get vcpu->arch.arch_vmx.vrr[r23]'s addr 2.162 - ld8 r17=[r16] // get PTA 2.163 + ld8 r17=[r16] // get virtual PTA 2.164 mov r26=1 2.165 ;; 2.166 extr.u r29=r17,2,6 // get pta.size
3.1 --- a/xen/arch/ia64/vmx/vmx_fault.c Thu Sep 27 16:29:43 2007 -0600 3.2 +++ b/xen/arch/ia64/vmx/vmx_fault.c Mon Oct 01 09:52:14 2007 -0600 3.3 @@ -335,7 +335,7 @@ vmx_hpw_miss(u64 vadr, u64 vec, REGS* re 3.4 panic_domain(regs, "wrong vec:%lx\n", vec); 3.5 3.6 /* Physical mode and region is 0 or 4. */ 3.7 - if (is_physical_mode(v) && (!((vadr<<1)>>62))) { 3.8 + if (!is_virtual_mode(v) && (!((vadr << 1) >> 62))) { 3.9 if (vec == 2) { 3.10 /* DTLB miss. */ 3.11 if (misr.sp) /* Refer to SDM Vol2 Table 4-11,4-12 */ 3.12 @@ -351,7 +351,10 @@ vmx_hpw_miss(u64 vadr, u64 vec, REGS* re 3.13 } 3.14 3.15 try_again: 3.16 - if ((data=vtlb_lookup(v, vadr,type)) != 0) { 3.17 + /* Search in VTLB. */ 3.18 + data = vtlb_lookup(v, vadr, type); 3.19 + if (data != 0) { 3.20 + /* Found. */ 3.21 if (v->domain != dom0 && type == DSIDE_TLB) { 3.22 if (misr.sp) { /* Refer to SDM Vol2 Table 4-10,4-12 */ 3.23 if ((data->ma == VA_MATTR_UC) || (data->ma == VA_MATTR_UCE)) 3.24 @@ -373,8 +376,10 @@ try_again: 3.25 } 3.26 } 3.27 thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type); 3.28 + return IA64_NO_FAULT; 3.29 + } 3.30 3.31 - } else if (type == DSIDE_TLB) { 3.32 + if (type == DSIDE_TLB) { 3.33 struct opt_feature* optf = &(v->domain->arch.opt_feature); 3.34 3.35 if (misr.sp) 3.36 @@ -385,7 +390,7 @@ try_again: 3.37 3.38 if (!vhpt_enabled(v, vadr, misr.rs ? RSE_REF : DATA_REF)) { 3.39 /* windows use region 4 and 5 for identity mapping */ 3.40 - if (optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG4 && 3.41 + if ((optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG4) && 3.42 REGION_NUMBER(vadr) == 4 && !(regs->cr_ipsr & IA64_PSR_CPL) && 3.43 REGION_OFFSET(vadr) <= _PAGE_PPN_MASK) { 3.44 3.45 @@ -395,7 +400,7 @@ try_again: 3.46 goto try_again; 3.47 return IA64_NO_FAULT; 3.48 } 3.49 - if (optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG5 && 3.50 + if ((optf->mask & XEN_IA64_OPTF_IDENT_MAP_REG5) && 3.51 REGION_NUMBER(vadr) == 5 && !(regs->cr_ipsr & IA64_PSR_CPL) && 3.52 REGION_OFFSET(vadr) <= _PAGE_PPN_MASK) { 3.53
4.1 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c Thu Sep 27 16:29:43 2007 -0600 4.2 +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c Mon Oct 01 09:52:14 2007 -0600 4.3 @@ -25,7 +25,6 @@ 4.4 #include <asm/processor.h> 4.5 #include <asm/gcc_intrin.h> 4.6 #include <asm/vmx_phy_mode.h> 4.7 -#include <xen/sched.h> 4.8 #include <asm/pgtable.h> 4.9 #include <asm/vmmu.h> 4.10 #include <asm/debugger.h> 4.11 @@ -44,11 +43,10 @@ 4.12 * Special notes: 4.13 * - Index by it/dt/rt sequence 4.14 * - Only existing mode transitions are allowed in this table 4.15 - * - RSE is placed at lazy mode when emulating guest partial mode 4.16 * - If gva happens to be rr0 and rr4, only allowed case is identity 4.17 * mapping (gva=gpa), or panic! (How?) 4.18 */ 4.19 -static const int mm_switch_table[8][8] = { 4.20 +static const unsigned char mm_switch_table[8][8] = { 4.21 /* 2004/09/12(Kevin): Allow switch to self */ 4.22 /* 4.23 * (it,dt,rt): (0,0,0) -> (1,1,1) 4.24 @@ -94,41 +92,36 @@ static const int mm_switch_table[8][8] = 4.25 * (see "arch/ia64/kernel/head.S") 4.26 * (1,1,1)->(1,0,0) 4.27 */ 4.28 - 4.29 {SW_V2P_DT, 0, 0, 0, SW_V2P_D, SW_V2P_D, 0, SW_SELF}, 4.30 }; 4.31 4.32 void 4.33 physical_mode_init(VCPU *vcpu) 4.34 { 4.35 - vcpu->arch.mode_flags = GUEST_IN_PHY; 4.36 + vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_DT; 4.37 } 4.38 4.39 void 4.40 physical_tlb_miss(VCPU *vcpu, u64 vadr, int type) 4.41 { 4.42 u64 pte; 4.43 - ia64_rr rr; 4.44 - rr.rrval = ia64_get_rr(vadr); 4.45 - pte = vadr & _PAGE_PPN_MASK; 4.46 - pte = pte | PHY_PAGE_WB; 4.47 - thash_vhpt_insert(vcpu, pte, (rr.ps << 2), vadr, type); 4.48 - return; 4.49 + 4.50 + pte = (vadr & _PAGE_PPN_MASK) | PHY_PAGE_WB; 4.51 + thash_vhpt_insert(vcpu, pte, (PAGE_SHIFT << 2), vadr, type); 4.52 } 4.53 4.54 void 4.55 vmx_init_all_rr(VCPU *vcpu) 4.56 { 4.57 - VMX(vcpu, vrr[VRN0]) = 0x38; 4.58 // enable vhpt in guest physical mode 4.59 vcpu->arch.metaphysical_rid_dt |= 1; 4.60 + 4.61 + VMX(vcpu, vrr[VRN0]) = 0x38; 4.62 vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(vcpu, 0x38); 4.63 VMX(vcpu, vrr[VRN1]) = 0x38; 4.64 VMX(vcpu, vrr[VRN2]) = 0x38; 4.65 VMX(vcpu, vrr[VRN3]) = 0x38; 4.66 VMX(vcpu, vrr[VRN4]) = 0x38; 4.67 - // enable vhpt in guest physical mode 4.68 - vcpu->arch.metaphysical_rid_d |= 0; /* VHPT not enabled! */ 4.69 vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(vcpu, 0x38); 4.70 VMX(vcpu, vrr[VRN5]) = 0x38; 4.71 VMX(vcpu, vrr[VRN6]) = 0x38; 4.72 @@ -141,31 +134,31 @@ void 4.73 vmx_load_all_rr(VCPU *vcpu) 4.74 { 4.75 unsigned long psr; 4.76 - 4.77 - local_irq_save(psr); 4.78 + unsigned long rr0, rr4; 4.79 4.80 - /* WARNING: not allow co-exist of both virtual mode and physical 4.81 - * mode in same region 4.82 - */ 4.83 - if (is_physical_mode(vcpu)) { 4.84 - if (vcpu->arch.mode_flags & GUEST_PHY_EMUL){ 4.85 - panic_domain(vcpu_regs(vcpu), 4.86 - "Unexpected domain switch in phy emul\n"); 4.87 - } 4.88 - ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rid_dt); 4.89 - ia64_dv_serialize_data(); 4.90 - ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rid_dt); 4.91 - ia64_dv_serialize_data(); 4.92 - } else { 4.93 - ia64_set_rr((VRN0 << VRN_SHIFT), 4.94 - vcpu->arch.metaphysical_saved_rr0); 4.95 - ia64_dv_serialize_data(); 4.96 - ia64_set_rr((VRN4 << VRN_SHIFT), 4.97 - vcpu->arch.metaphysical_saved_rr4); 4.98 - ia64_dv_serialize_data(); 4.99 + switch (vcpu->arch.arch_vmx.mmu_mode) { 4.100 + case VMX_MMU_VIRTUAL: 4.101 + rr0 = vcpu->arch.metaphysical_saved_rr0; 4.102 + rr4 = vcpu->arch.metaphysical_saved_rr4; 4.103 + break; 4.104 + case VMX_MMU_PHY_DT: 4.105 + rr0 = vcpu->arch.metaphysical_rid_dt; 4.106 + rr4 = vcpu->arch.metaphysical_rid_dt; 4.107 + break; 4.108 + case VMX_MMU_PHY_D: 4.109 + rr0 = vcpu->arch.metaphysical_rid_d; 4.110 + rr4 = vcpu->arch.metaphysical_rid_d; 4.111 + break; 4.112 + default: 4.113 + panic_domain(NULL, "bad mmu mode value"); 4.114 } 4.115 4.116 - /* rr567 will be postponed to last point when resuming back to guest */ 4.117 + psr = ia64_clear_ic(); 4.118 + 4.119 + ia64_set_rr((VRN0 << VRN_SHIFT), rr0); 4.120 + ia64_dv_serialize_data(); 4.121 + ia64_set_rr((VRN4 << VRN_SHIFT), rr4); 4.122 + ia64_dv_serialize_data(); 4.123 ia64_set_rr((VRN1 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1]))); 4.124 ia64_dv_serialize_data(); 4.125 ia64_set_rr((VRN2 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2]))); 4.126 @@ -190,13 +183,25 @@ void 4.127 switch_to_physical_rid(VCPU *vcpu) 4.128 { 4.129 u64 psr; 4.130 + u64 rr; 4.131 4.132 + switch (vcpu->arch.arch_vmx.mmu_mode) { 4.133 + case VMX_MMU_PHY_DT: 4.134 + rr = vcpu->arch.metaphysical_rid_dt; 4.135 + break; 4.136 + case VMX_MMU_PHY_D: 4.137 + rr = vcpu->arch.metaphysical_rid_d; 4.138 + break; 4.139 + default: 4.140 + panic_domain(NULL, "bad mmu mode value"); 4.141 + } 4.142 + 4.143 psr = ia64_clear_ic(); 4.144 - ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_rid_dt); 4.145 + ia64_set_rr(VRN0<<VRN_SHIFT, rr); 4.146 + ia64_dv_serialize_data(); 4.147 + ia64_set_rr(VRN4<<VRN_SHIFT, rr); 4.148 ia64_srlz_d(); 4.149 - ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_rid_dt); 4.150 - ia64_srlz_d(); 4.151 - 4.152 + 4.153 ia64_set_psr(psr); 4.154 ia64_srlz_i(); 4.155 return; 4.156 @@ -206,9 +211,10 @@ void 4.157 switch_to_virtual_rid(VCPU *vcpu) 4.158 { 4.159 u64 psr; 4.160 - psr=ia64_clear_ic(); 4.161 + 4.162 + psr = ia64_clear_ic(); 4.163 ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0); 4.164 - ia64_srlz_d(); 4.165 + ia64_dv_serialize_data(); 4.166 ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4); 4.167 ia64_srlz_d(); 4.168 ia64_set_psr(psr); 4.169 @@ -232,22 +238,14 @@ switch_mm_mode(VCPU *vcpu, IA64_PSR old_ 4.170 case SW_V2P_D: 4.171 // printk("V -> P mode transition: (0x%lx -> 0x%lx)\n", 4.172 // old_psr.val, new_psr.val); 4.173 + vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_DT; 4.174 switch_to_physical_rid(vcpu); 4.175 - /* 4.176 - * Set rse to enforced lazy, to prevent active rse save/restor when 4.177 - * guest physical mode. 4.178 - */ 4.179 - vcpu->arch.mode_flags |= GUEST_IN_PHY; 4.180 break; 4.181 case SW_P2V: 4.182 // printk("P -> V mode transition: (0x%lx -> 0x%lx)\n", 4.183 // old_psr.val, new_psr.val); 4.184 + vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_VIRTUAL; 4.185 switch_to_virtual_rid(vcpu); 4.186 - /* 4.187 - * recover old mode which is saved when entering 4.188 - * guest physical mode 4.189 - */ 4.190 - vcpu->arch.mode_flags &= ~GUEST_IN_PHY; 4.191 break; 4.192 case SW_SELF: 4.193 printk("Switch to self-0x%lx!!! MM mode doesn't change...\n", 4.194 @@ -259,7 +257,9 @@ switch_mm_mode(VCPU *vcpu, IA64_PSR old_ 4.195 break; 4.196 default: 4.197 /* Sanity check */ 4.198 - panic_domain(vcpu_regs(vcpu),"Unexpected virtual <--> physical mode transition,old:%lx,new:%lx\n",old_psr.val,new_psr.val); 4.199 + panic_domain(vcpu_regs(vcpu), 4.200 + "Unexpected virtual <--> physical mode transition, " 4.201 + "old:%lx, new:%lx\n", old_psr.val, new_psr.val); 4.202 break; 4.203 } 4.204 return; 4.205 @@ -268,16 +268,12 @@ switch_mm_mode(VCPU *vcpu, IA64_PSR old_ 4.206 void 4.207 check_mm_mode_switch (VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr) 4.208 { 4.209 - 4.210 if (old_psr.dt != new_psr.dt || 4.211 old_psr.it != new_psr.it || 4.212 old_psr.rt != new_psr.rt) { 4.213 - 4.214 switch_mm_mode(vcpu, old_psr, new_psr); 4.215 debugger_event(XEN_IA64_DEBUG_ON_MMU); 4.216 } 4.217 - 4.218 - return; 4.219 } 4.220 4.221 4.222 @@ -300,10 +296,8 @@ check_mm_mode_switch (VCPU *vcpu, IA64_ 4.223 void 4.224 prepare_if_physical_mode(VCPU *vcpu) 4.225 { 4.226 - if (is_physical_mode(vcpu)) { 4.227 - vcpu->arch.mode_flags |= GUEST_PHY_EMUL; 4.228 + if (!is_virtual_mode(vcpu)) 4.229 switch_to_virtual_rid(vcpu); 4.230 - } 4.231 return; 4.232 } 4.233 4.234 @@ -311,9 +305,8 @@ prepare_if_physical_mode(VCPU *vcpu) 4.235 void 4.236 recover_if_physical_mode(VCPU *vcpu) 4.237 { 4.238 - if (is_physical_mode(vcpu)) 4.239 + if (!is_virtual_mode(vcpu)) 4.240 switch_to_physical_rid(vcpu); 4.241 - vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL; 4.242 return; 4.243 } 4.244
5.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c Thu Sep 27 16:29:43 2007 -0600 5.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c Mon Oct 01 09:52:14 2007 -0600 5.3 @@ -178,13 +178,13 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u6 5.4 case VRN4: 5.5 rrval = vrrtomrr(vcpu,val); 5.6 vcpu->arch.metaphysical_saved_rr4 = rrval; 5.7 - if (!is_physical_mode(vcpu)) 5.8 + if (is_virtual_mode(vcpu)) 5.9 ia64_set_rr(reg,rrval); 5.10 break; 5.11 case VRN0: 5.12 rrval = vrrtomrr(vcpu,val); 5.13 vcpu->arch.metaphysical_saved_rr0 = rrval; 5.14 - if (!is_physical_mode(vcpu)) 5.15 + if (is_virtual_mode(vcpu)) 5.16 ia64_set_rr(reg,rrval); 5.17 break; 5.18 default:
6.1 --- a/xen/include/asm-ia64/domain.h Thu Sep 27 16:29:43 2007 -0600 6.2 +++ b/xen/include/asm-ia64/domain.h Mon Oct 01 09:52:14 2007 -0600 6.3 @@ -254,8 +254,6 @@ struct arch_vcpu { 6.4 char irq_new_condition; // vpsr.i/vtpr change, check for pending VHPI 6.5 char hypercall_continuation; 6.6 6.7 - // for physical emulation 6.8 - int mode_flags; 6.9 fpswa_ret_t fpswa_ret; /* save return values of FPSWA emulation */ 6.10 struct timer hlt_timer; 6.11 struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */
7.1 --- a/xen/include/asm-ia64/vmx_phy_mode.h Thu Sep 27 16:29:43 2007 -0600 7.2 +++ b/xen/include/asm-ia64/vmx_phy_mode.h Mon Oct 01 09:52:14 2007 -0600 7.3 @@ -85,22 +85,13 @@ extern void recover_if_physical_mode(VCP 7.4 extern void vmx_init_all_rr(VCPU *vcpu); 7.5 extern void vmx_load_all_rr(VCPU *vcpu); 7.6 extern void physical_tlb_miss(VCPU *vcpu, u64 vadr, int type); 7.7 -/* 7.8 - * No sanity check here, since all psr changes have been 7.9 - * checked in switch_mm_mode(). 7.10 - */ 7.11 -#define is_physical_mode(v) \ 7.12 - ((v->arch.mode_flags) & GUEST_IN_PHY) 7.13 7.14 -#define is_virtual_mode(v) \ 7.15 - (!is_physical_mode(v)) 7.16 +#define is_virtual_mode(v) ((v)->arch.arch_vmx.mmu_mode == VMX_MMU_VIRTUAL) 7.17 7.18 #endif /* __ASSEMBLY__ */ 7.19 7.20 -#define GUEST_IN_PHY_BIT 0 7.21 -#define GUEST_PHY_EMUL_BIT 1 7.22 - 7.23 -#define GUEST_IN_PHY (1 << GUEST_IN_PHY_BIT) 7.24 -#define GUEST_PHY_EMUL (1 << GUEST_PHY_EMUL_BIT) 7.25 +#define VMX_MMU_VIRTUAL 0 /* Full virtual mode: it=dt=1 */ 7.26 +#define VMX_MMU_PHY_D 1 /* Half physical: it=1,dt=0 */ 7.27 +#define VMX_MMU_PHY_DT 3 /* Full physical mode: it=0,dt=0 */ 7.28 7.29 #endif /* _PHY_MODE_H_ */
8.1 --- a/xen/include/asm-ia64/vmx_vpd.h Thu Sep 27 16:29:43 2007 -0600 8.2 +++ b/xen/include/asm-ia64/vmx_vpd.h Mon Oct 01 09:52:14 2007 -0600 8.3 @@ -32,38 +32,6 @@ 8.4 #define VPD_SHIFT 16 8.5 #define VPD_SIZE (1 << VPD_SHIFT) 8.6 8.7 -typedef struct { 8.8 - unsigned long dcr; // CR0 8.9 - unsigned long itm; 8.10 - unsigned long iva; 8.11 - unsigned long rsv1[5]; 8.12 - unsigned long pta; // CR8 8.13 - unsigned long rsv2[7]; 8.14 - unsigned long ipsr; // CR16 8.15 - unsigned long isr; 8.16 - unsigned long rsv3; 8.17 - unsigned long iip; 8.18 - unsigned long ifa; 8.19 - unsigned long itir; 8.20 - unsigned long iipa; 8.21 - unsigned long ifs; 8.22 - unsigned long iim; // CR24 8.23 - unsigned long iha; 8.24 - unsigned long rsv4[38]; 8.25 - unsigned long lid; // CR64 8.26 - unsigned long ivr; 8.27 - unsigned long tpr; 8.28 - unsigned long eoi; 8.29 - unsigned long irr[4]; 8.30 - unsigned long itv; // CR72 8.31 - unsigned long pmv; 8.32 - unsigned long cmcv; 8.33 - unsigned long rsv5[5]; 8.34 - unsigned long lrr0; // CR80 8.35 - unsigned long lrr1; 8.36 - unsigned long rsv6[46]; 8.37 -} cr_t; 8.38 - 8.39 #ifdef VTI_DEBUG 8.40 struct ivt_debug{ 8.41 unsigned long iip; 8.42 @@ -91,6 +59,7 @@ struct arch_vmx_struct { 8.43 unsigned long xen_port; 8.44 unsigned char xtp; 8.45 unsigned char pal_init_pending; 8.46 + unsigned char mmu_mode; /* Current mmu mode. See vmx_phy_mode.h */ 8.47 #ifdef VTI_DEBUG 8.48 unsigned long ivt_current; 8.49 struct ivt_debug ivt_debug[IVT_DEBUG_MAX];