#include <asm/vmx_pal_vsa.h>
#include <asm/asm-offsets.h>
#include <asm-ia64/vmx_mm_def.h>
+#include <asm-ia64/vmx_phy_mode.h>
#define ACCE_MOV_FROM_AR
#define ACCE_MOV_FROM_RR
#define ACCE_MOV_TO_PSR
#define ACCE_THASH
-//mov r1=ar3
+//mov r1=ar3 (only itc is virtualized)
GLOBAL_ENTRY(vmx_asm_mov_from_ar)
#ifndef ACCE_MOV_FROM_AR
br.many vmx_virtualization_fault_back
;; //mangling rid 1 and 3
extr.u r16=r19,8,8
extr.u r17=r19,24,8
- extr.u r18=r19,2,6
+ extr.u r18=r19,2,6 // page size
;;
dep r19=r16,r19,24,8
;;
(p6) shladd r17=r23,1,r17
;;
(p6) st8 [r17]=r19
- (p6) tbit.nz p6,p7=r16,0
+ (p6) tbit.nz p6,p7=r16,GUEST_IN_PHY_BIT // Set physical rr if in virt mode
;;
(p7) mov rr[r28]=r19
mov r24=r22
br.many vmx_virtualization_fault_back
#endif
add r16=IA64_VPD_BASE_OFFSET,r21
- extr.u r26=r25,6,21
- extr.u r27=r25,31,2
+ extr.u r26=r25,6,21 // Imm21
+ extr.u r27=r25,31,2 // I2d
;;
ld8 r16=[r16]
- extr.u r28=r25,36,1
+ extr.u r28=r25,36,1 // I
dep r26=r27,r26,21,2
;;
add r17=VPD_VPSR_START_OFFSET,r16
ld8 r18=[r17]
movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI
ld4 r23=[r22]
- sub r27=-1,r26
+ sub r27=-1,r26 // ~r26
mov r24=b0
;;
mov r20=cr.ipsr
- or r28=r27,r28
- and r19=r18,r27
+ or r28=r27,r28 // Keep IC,I,DT,SI
+ and r19=r18,r27 // Update vpsr
;;
st8 [r17]=r19
- and r20=r20,r28
+ and r20=r20,r28 // Update ipsr
adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
;;
ld8 r27=[r27]
;;
tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT
;;
- (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
+ (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1 // Keep dfh
;;
mov cr.ipsr=r20
- tbit.nz p6,p0=r23,0
+ tbit.nz p6,p0=r23,GUEST_IN_PHY_BIT
;;
tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
- (p6) br.dptk vmx_resume_to_guest
+ (p6) br.dptk vmx_resume_to_guest // (DT set or already in phy mode)
;;
- add r26=IA64_VCPU_META_RR0_OFFSET,r21
- add r27=IA64_VCPU_META_RR0_OFFSET+8,r21
- dep r23=-1,r23,0,1
+ // Switch to meta physical mode.
+ add r26=IA64_VCPU_META_RID_DT_OFFSET,r21
+ dep r23=-1,r23,GUEST_IN_PHY_BIT,1 // Set GUEST_IN_PHY
;;
ld8 r26=[r26]
- ld8 r27=[r27]
- st4 [r22]=r23
+ st4 [r22]=r23
dep.z r28=4,61,3
;;
mov rr[r0]=r26
;;
- mov rr[r28]=r27
+ mov rr[r28]=r26
;;
srlz.d
br.many vmx_resume_to_guest
movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
;;
and r19=r28,r19
- tbit.z p6,p0=r23,0
+ tbit.z p6,p0=r23,GUEST_IN_PHY_BIT
;;
cmp.ne.or p6,p0=r28,r19
(p6) br.dptk vmx_asm_ssm_1
;;
add r26=IA64_VCPU_META_SAVED_RR0_OFFSET,r21
add r27=IA64_VCPU_META_SAVED_RR0_OFFSET+8,r21
- dep r23=0,r23,0,1
+ dep r23=0,r23,GUEST_IN_PHY_BIT,1 // Clear GUEST_IN_PHY
;;
ld8 r26=[r26]
ld8 r27=[r27]
(p5) br.many vmx_asm_mov_to_psr_1
;;
//virtual to physical
- (p7) add r26=IA64_VCPU_META_RR0_OFFSET,r21
- (p7) add r27=IA64_VCPU_META_RR0_OFFSET+8,r21
- (p7) dep r23=-1,r23,0,1
+ (p7) add r26=IA64_VCPU_META_RID_DT_OFFSET,r21
+ (p7) add r27=IA64_VCPU_META_RID_DT_OFFSET,r21
+ (p7) dep r23=-1,r23,GUEST_IN_PHY_BIT,1
;;
//physical to virtual
(p6) add r26=IA64_VCPU_META_SAVED_RR0_OFFSET,r21
(p6) add r27=IA64_VCPU_META_SAVED_RR0_OFFSET+8,r21
- (p6) dep r23=0,r23,0,1
+ (p6) dep r23=0,r23,GUEST_IN_PHY_BIT,1
;;
ld8 r26=[r26]
ld8 r27=[r27]
// mov from reg table
+// r19: value, r30: return address
ENTRY(asm_mov_from_reg)
MOV_FROM_REG(0)
MOV_FROM_REG(1)
// mov to reg table
+// r19: value, r30: return address
ENTRY(asm_mov_to_reg)
MOV_TO_REG0
MOV_TO_REG(1)
#include <asm/vmmu.h>
#include <asm/debugger.h>
-static const int valid_mm_mode[8] = {
- GUEST_PHYS, /* (it, dt, rt) -> (0, 0, 0) */
- INV_MODE,
- INV_MODE,
- GUEST_PHYS, /* (it, dt, rt) -> (0, 1, 1) */
- INV_MODE,
- GUEST_PHYS, /* (it, dt, rt) -> (1, 0, 1) */
- INV_MODE,
- GUEST_VIRT, /* (it, dt, rt) -> (1, 1, 1).*/
-};
+#define MODE_IND(psr) \
+ (((psr).it << 2) + ((psr).dt << 1) + (psr).rt)
+
+#define SW_BAD 0 /* Bad mode transitition */
+#define SW_V2P_DT 1 /* Physical emulation is activated */
+#define SW_V2P_D 2 /* Physical emulation is activated (only for data) */
+#define SW_P2V 3 /* Exit physical mode emulation */
+#define SW_SELF 4 /* No mode transition */
+#define SW_NOP 5 /* Mode transition, but without action required */
/*
* Special notes:
*/
static const int mm_switch_table[8][8] = {
/* 2004/09/12(Kevin): Allow switch to self */
- /*
- * (it,dt,rt): (0,0,0) -> (1,1,1)
- * This kind of transition usually occurs in the very early
+ /*
+ * (it,dt,rt): (0,0,0) -> (1,1,1)
+ * This kind of transition usually occurs in the very early
* stage of Linux boot up procedure. Another case is in efi
* and pal calls. (see "arch/ia64/kernel/head.S")
*
* service. Due to gva = gpa in this case (Same region),
* data access can be satisfied though itlb entry for physical
* emulation is hit.
- */
+ */
{SW_SELF,0, 0, SW_NOP, 0, 0, 0, SW_P2V},
{0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0},
/* (1,0,0)->(1,1,1) */
{0, 0, 0, 0, 0, 0, 0, SW_P2V},
/*
- * (it,dt,rt): (1,0,1) -> (1,1,1)
- * This kind of transition usually occurs when Linux returns
+ * (it,dt,rt): (1,0,1) -> (1,1,1)
+ * This kind of transition usually occurs when Linux returns
* from the low level TLB miss handlers.
- * (see "arch/ia64/kernel/ivt.S")
- */
+ * (see "arch/ia64/kernel/ivt.S")
+ */
{0, 0, 0, 0, 0, SW_SELF,0, SW_P2V},
{0, 0, 0, 0, 0, 0, 0, 0},
/*
- * (it,dt,rt): (1,1,1) -> (1,0,1)
- * This kind of transition usually occurs in Linux low level
+ * (it,dt,rt): (1,1,1) -> (1,0,1)
+ * This kind of transition usually occurs in Linux low level
* TLB miss handler. (see "arch/ia64/kernel/ivt.S")
*
* (it,dt,rt): (1,1,1) -> (0,0,0)
* (1,1,1)->(1,0,0)
*/
- {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF},
+ {SW_V2P_DT, 0, 0, 0, SW_V2P_D, SW_V2P_D, 0, SW_SELF},
};
void
u64 pte;
ia64_rr rr;
rr.rrval = ia64_get_rr(vadr);
- pte = vadr& _PAGE_PPN_MASK;
+ pte = vadr & _PAGE_PPN_MASK;
pte = pte | PHY_PAGE_WB;
thash_vhpt_insert(vcpu, pte, (rr.ps << 2), vadr, type);
return;
}
-
void
vmx_init_all_rr(VCPU *vcpu)
{
VMX(vcpu, vrr[VRN0]) = 0x38;
// enable vhpt in guest physical mode
- vcpu->arch.metaphysical_rr0 |= 1;
+ vcpu->arch.metaphysical_rid_dt |= 1;
vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(vcpu, 0x38);
VMX(vcpu, vrr[VRN1]) = 0x38;
VMX(vcpu, vrr[VRN2]) = 0x38;
VMX(vcpu, vrr[VRN3]) = 0x38;
VMX(vcpu, vrr[VRN4]) = 0x38;
// enable vhpt in guest physical mode
- vcpu->arch.metaphysical_rr4 |= 1;
+ vcpu->arch.metaphysical_rid_d |= 0; /* VHPT not enabled! */
vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(vcpu, 0x38);
VMX(vcpu, vrr[VRN5]) = 0x38;
VMX(vcpu, vrr[VRN6]) = 0x38;
panic_domain(vcpu_regs(vcpu),
"Unexpected domain switch in phy emul\n");
}
- ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
+ ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rid_dt);
ia64_dv_serialize_data();
- ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
+ ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rid_dt);
ia64_dv_serialize_data();
} else {
ia64_set_rr((VRN0 << VRN_SHIFT),
- vcpu->arch.metaphysical_saved_rr0);
+ vcpu->arch.metaphysical_saved_rr0);
ia64_dv_serialize_data();
ia64_set_rr((VRN4 << VRN_SHIFT),
- vcpu->arch.metaphysical_saved_rr4);
+ vcpu->arch.metaphysical_saved_rr4);
ia64_dv_serialize_data();
}
/* rr567 will be postponed to last point when resuming back to guest */
- ia64_set_rr((VRN1 << VRN_SHIFT),
- vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
+ ia64_set_rr((VRN1 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
ia64_dv_serialize_data();
- ia64_set_rr((VRN2 << VRN_SHIFT),
- vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
+ ia64_set_rr((VRN2 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
ia64_dv_serialize_data();
- ia64_set_rr((VRN3 << VRN_SHIFT),
- vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
+ ia64_set_rr((VRN3 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
ia64_dv_serialize_data();
- ia64_set_rr((VRN5 << VRN_SHIFT),
- vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
+ ia64_set_rr((VRN5 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
ia64_dv_serialize_data();
- ia64_set_rr((VRN6 << VRN_SHIFT),
- vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
+ ia64_set_rr((VRN6 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
ia64_dv_serialize_data();
vmx_switch_rr7(vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),
- (void *)vcpu->arch.vhpt.hash, pal_vaddr );
+ (void *)vcpu->arch.vhpt.hash, pal_vaddr);
ia64_set_pta(VMX(vcpu, mpta));
vmx_ia64_set_dcr(vcpu);
ia64_srlz_i();
}
-
-
void
switch_to_physical_rid(VCPU *vcpu)
{
u64 psr;
- /* Save original virtual mode rr[0] and rr[4] */
- psr=ia64_clear_ic();
- ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
+
+ psr = ia64_clear_ic();
+ ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_rid_dt);
ia64_srlz_d();
- ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
+ ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_rid_dt);
ia64_srlz_d();
ia64_set_psr(psr);
return;
}
-
void
switch_to_virtual_rid(VCPU *vcpu)
{
act = mm_switch_action(old_psr, new_psr);
perfc_incra(vmx_switch_mm_mode, act);
switch (act) {
- case SW_V2P:
+ case SW_V2P_DT:
+ case SW_V2P_D:
// printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
// old_psr.val, new_psr.val);
switch_to_physical_rid(vcpu);
return;
}
-
-
-/*
- * In physical mode, insert tc/tr for region 0 and 4 uses
- * RID[0] and RID[4] which is for physical mode emulation.
- * However what those inserted tc/tr wants is rid for
- * virtual mode. So original virtual rid needs to be restored
- * before insert.
- *
- * Operations which required such switch include:
- * - insertions (itc.*, itr.*)
- * - purges (ptc.* and ptr.*)
- * - tpa
- * - tak
- * - thash?, ttag?
- * All above needs actual virtual rid for destination entry.
- */
-
void
check_mm_mode_switch (VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
{
- if ( (old_psr.dt != new_psr.dt ) ||
- (old_psr.it != new_psr.it ) ||
- (old_psr.rt != new_psr.rt )
- ) {
- switch_mm_mode (vcpu, old_psr, new_psr);
+ if (old_psr.dt != new_psr.dt ||
+ old_psr.it != new_psr.it ||
+ old_psr.rt != new_psr.rt) {
+
+ switch_mm_mode(vcpu, old_psr, new_psr);
debugger_event(XEN_IA64_DEBUG_ON_MMU);
}