ia64/xen-unstable

changeset 15898:788c39a0b905

[IA64] Use same RID fro rr0 and rr4 in metaphysical mode.

Renames metaphysical_rr0 to metaphysical_rid_dt
Renames metaphysical_rr4 to metaphysical_rid_d
Add comments in optvfault.S
cleanup and update vmx_phy_mode.[ch]

Signed-off-by: Tristan Gingold <tgingold@free.fr>
author Alex Williamson <alex.williamson@hp.com>
date Wed Sep 26 13:22:17 2007 -0600 (2007-09-26)
parents c7140b8f56ac
children 65e336ea451f
files xen/arch/ia64/asm-offsets.c xen/arch/ia64/vmx/optvfault.S xen/arch/ia64/vmx/vmx_ivt.S xen/arch/ia64/vmx/vmx_phy_mode.c xen/arch/ia64/vmx/vtlb.c xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/hyperprivop.S xen/arch/ia64/xen/regionreg.c xen/include/asm-ia64/domain.h xen/include/asm-ia64/vmx_phy_mode.h
line diff
     1.1 --- a/xen/arch/ia64/asm-offsets.c	Wed Sep 26 12:43:41 2007 -0600
     1.2 +++ b/xen/arch/ia64/asm-offsets.c	Wed Sep 26 13:22:17 2007 -0600
     1.3 @@ -58,7 +58,7 @@ void foo(void)
     1.4  	DEFINE(IA64_VCPU_DOMAIN_OFFSET, offsetof (struct vcpu, domain));
     1.5  	DEFINE(IA64_VCPU_HYPERCALL_CONTINUATION_OFS, offsetof (struct vcpu, arch.hypercall_continuation));
     1.6  	DEFINE(IA64_VCPU_FP_PSR_OFFSET, offsetof (struct vcpu, arch.fp_psr));
     1.7 -	DEFINE(IA64_VCPU_META_RR0_OFFSET, offsetof (struct vcpu, arch.metaphysical_rr0));
     1.8 +	DEFINE(IA64_VCPU_META_RID_DT_OFFSET, offsetof (struct vcpu, arch.metaphysical_rid_dt));
     1.9  	DEFINE(IA64_VCPU_META_SAVED_RR0_OFFSET, offsetof (struct vcpu, arch.metaphysical_saved_rr0));
    1.10  	DEFINE(IA64_VCPU_BREAKIMM_OFFSET, offsetof (struct vcpu, arch.breakimm));
    1.11  	DEFINE(IA64_VCPU_IVA_OFFSET, offsetof (struct vcpu, arch.iva));
     2.1 --- a/xen/arch/ia64/vmx/optvfault.S	Wed Sep 26 12:43:41 2007 -0600
     2.2 +++ b/xen/arch/ia64/vmx/optvfault.S	Wed Sep 26 13:22:17 2007 -0600
     2.3 @@ -16,6 +16,7 @@
     2.4  #include <asm/vmx_pal_vsa.h>
     2.5  #include <asm/asm-offsets.h>
     2.6  #include <asm-ia64/vmx_mm_def.h>
     2.7 +#include <asm-ia64/vmx_phy_mode.h>
     2.8  
     2.9  #define ACCE_MOV_FROM_AR
    2.10  #define ACCE_MOV_FROM_RR
    2.11 @@ -25,7 +26,7 @@
    2.12  #define ACCE_MOV_TO_PSR
    2.13  #define ACCE_THASH
    2.14  
    2.15 -//mov r1=ar3
    2.16 +//mov r1=ar3 (only itc is virtualized)
    2.17  GLOBAL_ENTRY(vmx_asm_mov_from_ar)
    2.18  #ifndef ACCE_MOV_FROM_AR
    2.19      br.many vmx_virtualization_fault_back
    2.20 @@ -131,7 +132,7 @@ vmx_asm_mov_to_rr_back_2:
    2.21      ;; //mangling rid 1 and 3
    2.22      extr.u r16=r19,8,8
    2.23      extr.u r17=r19,24,8
    2.24 -    extr.u r18=r19,2,6
    2.25 +    extr.u r18=r19,2,6 // page size
    2.26      ;;
    2.27      dep r19=r16,r19,24,8
    2.28      ;;
    2.29 @@ -156,7 +157,7 @@ vmx_asm_mov_to_rr_back_2:
    2.30      (p6) shladd r17=r23,1,r17
    2.31      ;;
    2.32      (p6) st8 [r17]=r19
    2.33 -    (p6) tbit.nz p6,p7=r16,0
    2.34 +    (p6) tbit.nz p6,p7=r16,GUEST_IN_PHY_BIT // Set physical rr if in virt mode
    2.35      ;;
    2.36      (p7) mov rr[r28]=r19
    2.37      mov r24=r22
    2.38 @@ -170,11 +171,11 @@ GLOBAL_ENTRY(vmx_asm_rsm)
    2.39      br.many vmx_virtualization_fault_back
    2.40  #endif
    2.41      add r16=IA64_VPD_BASE_OFFSET,r21
    2.42 -    extr.u r26=r25,6,21
    2.43 -    extr.u r27=r25,31,2
    2.44 +    extr.u r26=r25,6,21 // Imm21
    2.45 +    extr.u r27=r25,31,2 // I2d
    2.46      ;;
    2.47      ld8 r16=[r16]
    2.48 -    extr.u r28=r25,36,1
    2.49 +    extr.u r28=r25,36,1 // I
    2.50      dep r26=r27,r26,21,2
    2.51      ;;
    2.52      add r17=VPD_VPSR_START_OFFSET,r16
    2.53 @@ -185,41 +186,40 @@ GLOBAL_ENTRY(vmx_asm_rsm)
    2.54      ld8 r18=[r17]
    2.55      movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI
    2.56      ld4 r23=[r22]
    2.57 -    sub r27=-1,r26
    2.58 +    sub r27=-1,r26 // ~r26
    2.59      mov r24=b0
    2.60      ;;
    2.61      mov r20=cr.ipsr
    2.62 -    or r28=r27,r28
    2.63 -    and r19=r18,r27
    2.64 +    or r28=r27,r28 // Keep IC,I,DT,SI
    2.65 +    and r19=r18,r27 // Update vpsr
    2.66      ;;   
    2.67      st8 [r17]=r19
    2.68 -    and r20=r20,r28
    2.69 +    and r20=r20,r28 // Update ipsr
    2.70      adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
    2.71      ;;
    2.72      ld8 r27=[r27]
    2.73      ;;
    2.74      tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT
    2.75      ;;
    2.76 -    (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
    2.77 +    (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1  // Keep dfh
    2.78      ;;
    2.79      mov cr.ipsr=r20
    2.80 -    tbit.nz p6,p0=r23,0
    2.81 +    tbit.nz p6,p0=r23,GUEST_IN_PHY_BIT
    2.82      ;;
    2.83      tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
    2.84 -    (p6) br.dptk vmx_resume_to_guest
    2.85 +    (p6) br.dptk vmx_resume_to_guest  // (DT set or already in phy mode)
    2.86      ;;
    2.87 -    add r26=IA64_VCPU_META_RR0_OFFSET,r21
    2.88 -    add r27=IA64_VCPU_META_RR0_OFFSET+8,r21
    2.89 -    dep r23=-1,r23,0,1
    2.90 +    // Switch to meta physical mode.
    2.91 +    add r26=IA64_VCPU_META_RID_DT_OFFSET,r21
    2.92 +    dep r23=-1,r23,GUEST_IN_PHY_BIT,1 // Set GUEST_IN_PHY
    2.93      ;;
    2.94      ld8 r26=[r26]
    2.95 -    ld8 r27=[r27]
    2.96 -    st4 [r22]=r23
    2.97 +    st4 [r22]=r23 
    2.98      dep.z r28=4,61,3
    2.99      ;;
   2.100      mov rr[r0]=r26
   2.101      ;;
   2.102 -    mov rr[r28]=r27
   2.103 +    mov rr[r28]=r26
   2.104      ;;
   2.105      srlz.d
   2.106      br.many vmx_resume_to_guest
   2.107 @@ -257,14 +257,14 @@ GLOBAL_ENTRY(vmx_asm_ssm)
   2.108      movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
   2.109      ;;
   2.110      and r19=r28,r19
   2.111 -    tbit.z p6,p0=r23,0
   2.112 +    tbit.z p6,p0=r23,GUEST_IN_PHY_BIT
   2.113      ;;
   2.114      cmp.ne.or p6,p0=r28,r19
   2.115      (p6) br.dptk vmx_asm_ssm_1
   2.116      ;;
   2.117      add r26=IA64_VCPU_META_SAVED_RR0_OFFSET,r21
   2.118      add r27=IA64_VCPU_META_SAVED_RR0_OFFSET+8,r21
   2.119 -    dep r23=0,r23,0,1
   2.120 +    dep r23=0,r23,GUEST_IN_PHY_BIT,1  // Clear GUEST_IN_PHY
   2.121      ;;
   2.122      ld8 r26=[r26]
   2.123      ld8 r27=[r27]
   2.124 @@ -339,14 +339,14 @@ vmx_asm_mov_to_psr_back:
   2.125      (p5) br.many vmx_asm_mov_to_psr_1
   2.126      ;;
   2.127      //virtual to physical
   2.128 -    (p7) add r26=IA64_VCPU_META_RR0_OFFSET,r21
   2.129 -    (p7) add r27=IA64_VCPU_META_RR0_OFFSET+8,r21
   2.130 -    (p7) dep r23=-1,r23,0,1
   2.131 +    (p7) add r26=IA64_VCPU_META_RID_DT_OFFSET,r21
   2.132 +    (p7) add r27=IA64_VCPU_META_RID_DT_OFFSET,r21
   2.133 +    (p7) dep r23=-1,r23,GUEST_IN_PHY_BIT,1
   2.134      ;;
   2.135      //physical to virtual
   2.136      (p6) add r26=IA64_VCPU_META_SAVED_RR0_OFFSET,r21
   2.137      (p6) add r27=IA64_VCPU_META_SAVED_RR0_OFFSET+8,r21
   2.138 -    (p6) dep r23=0,r23,0,1
   2.139 +    (p6) dep r23=0,r23,GUEST_IN_PHY_BIT,1
   2.140      ;;
   2.141      ld8 r26=[r26]
   2.142      ld8 r27=[r27]
   2.143 @@ -594,6 +594,7 @@ MOV_FROM_BANK0_REG(31)
   2.144  
   2.145  
   2.146  // mov from reg table
   2.147 +// r19:	value, r30: return address
   2.148  ENTRY(asm_mov_from_reg)
   2.149      MOV_FROM_REG(0)
   2.150      MOV_FROM_REG(1)
   2.151 @@ -789,6 +790,7 @@ MOV_TO_BANK0_REG(31)
   2.152  
   2.153  
   2.154  // mov to reg table
   2.155 +// r19:	value, r30: return address
   2.156  ENTRY(asm_mov_to_reg)
   2.157      MOV_TO_REG0
   2.158      MOV_TO_REG(1)
     3.1 --- a/xen/arch/ia64/vmx/vmx_ivt.S	Wed Sep 26 12:43:41 2007 -0600
     3.2 +++ b/xen/arch/ia64/vmx/vmx_ivt.S	Wed Sep 26 13:22:17 2007 -0600
     3.3 @@ -308,9 +308,9 @@ END(vmx_dtlb_miss)
     3.4  ENTRY(vmx_alt_itlb_miss)
     3.5      VMX_DBG_FAULT(3)
     3.6      mov r31 = pr
     3.7 -    mov r29=cr.ipsr;
     3.8 +    mov r29=cr.ipsr
     3.9      ;;
    3.10 -    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
    3.11 +    tbit.z p6,p7=r29,IA64_PSR_VM_BIT
    3.12  (p7)br.spnt vmx_fault_3
    3.13  vmx_alt_itlb_miss_1:
    3.14      mov r16=cr.ifa    // get address that caused the TLB miss
     4.1 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c	Wed Sep 26 12:43:41 2007 -0600
     4.2 +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c	Wed Sep 26 13:22:17 2007 -0600
     4.3 @@ -30,16 +30,15 @@
     4.4  #include <asm/vmmu.h>
     4.5  #include <asm/debugger.h>
     4.6  
     4.7 -static const int valid_mm_mode[8] = {
     4.8 -    GUEST_PHYS, /* (it, dt, rt) -> (0, 0, 0) */
     4.9 -    INV_MODE,
    4.10 -    INV_MODE,
    4.11 -    GUEST_PHYS, /* (it, dt, rt) -> (0, 1, 1) */
    4.12 -    INV_MODE,
    4.13 -    GUEST_PHYS, /* (it, dt, rt) -> (1, 0, 1) */
    4.14 -    INV_MODE,
    4.15 -    GUEST_VIRT, /* (it, dt, rt) -> (1, 1, 1).*/
    4.16 -};
    4.17 +#define MODE_IND(psr)   \
    4.18 +    (((psr).it << 2) + ((psr).dt << 1) + (psr).rt)
    4.19 +
    4.20 +#define SW_BAD    0   /* Bad mode transitition */
    4.21 +#define SW_V2P_DT 1   /* Physical emulation is activated */
    4.22 +#define SW_V2P_D  2   /* Physical emulation is activated (only for data) */
    4.23 +#define SW_P2V    3   /* Exit physical mode emulation */
    4.24 +#define SW_SELF   4   /* No mode transition */
    4.25 +#define SW_NOP    5   /* Mode transition, but without action required */
    4.26  
    4.27  /*
    4.28   * Special notes:
    4.29 @@ -51,9 +50,9 @@ static const int valid_mm_mode[8] = {
    4.30   */
    4.31  static const int mm_switch_table[8][8] = {
    4.32      /*  2004/09/12(Kevin): Allow switch to self */
    4.33 -        /*
    4.34 -         *  (it,dt,rt): (0,0,0) -> (1,1,1)
    4.35 -         *  This kind of transition usually occurs in the very early
    4.36 +    /*
    4.37 +     *  (it,dt,rt): (0,0,0) -> (1,1,1)
    4.38 +     *  This kind of transition usually occurs in the very early
    4.39       *  stage of Linux boot up procedure. Another case is in efi
    4.40       *  and pal calls. (see "arch/ia64/kernel/head.S")
    4.41       *
    4.42 @@ -62,7 +61,7 @@ static const int mm_switch_table[8][8] =
    4.43       *  service. Due to gva = gpa in this case (Same region),
    4.44       *  data access can be satisfied though itlb entry for physical
    4.45       *  emulation is hit.
    4.46 -         */
    4.47 +     */
    4.48      {SW_SELF,0,  0,  SW_NOP, 0,  0,  0,  SW_P2V},
    4.49      {0,  0,  0,  0,  0,  0,  0,  0},
    4.50      {0,  0,  0,  0,  0,  0,  0,  0},
    4.51 @@ -77,16 +76,16 @@ static const int mm_switch_table[8][8] =
    4.52      /* (1,0,0)->(1,1,1) */
    4.53      {0,  0,  0,  0,  0,  0,  0,  SW_P2V},
    4.54      /*
    4.55 -         *  (it,dt,rt): (1,0,1) -> (1,1,1)
    4.56 -         *  This kind of transition usually occurs when Linux returns
    4.57 +     *  (it,dt,rt): (1,0,1) -> (1,1,1)
    4.58 +     *  This kind of transition usually occurs when Linux returns
    4.59       *  from the low level TLB miss handlers.
    4.60 -         *  (see "arch/ia64/kernel/ivt.S")
    4.61 -         */
    4.62 +     *  (see "arch/ia64/kernel/ivt.S")
    4.63 +     */
    4.64      {0,  0,  0,  0,  0,  SW_SELF,0,  SW_P2V},
    4.65      {0,  0,  0,  0,  0,  0,  0,  0},
    4.66      /*
    4.67 -         *  (it,dt,rt): (1,1,1) -> (1,0,1)
    4.68 -         *  This kind of transition usually occurs in Linux low level
    4.69 +     *  (it,dt,rt): (1,1,1) -> (1,0,1)
    4.70 +     *  This kind of transition usually occurs in Linux low level
    4.71       *  TLB miss handler. (see "arch/ia64/kernel/ivt.S")
    4.72       *
    4.73       *  (it,dt,rt): (1,1,1) -> (0,0,0)
    4.74 @@ -96,7 +95,7 @@ static const int mm_switch_table[8][8] =
    4.75       *  (1,1,1)->(1,0,0)
    4.76       */
    4.77  
    4.78 -    {SW_V2P, 0,  0,  0,  SW_V2P, SW_V2P, 0,  SW_SELF},
    4.79 +    {SW_V2P_DT, 0,  0,  0,  SW_V2P_D, SW_V2P_D, 0,  SW_SELF},
    4.80  };
    4.81  
    4.82  void
    4.83 @@ -111,26 +110,25 @@ physical_tlb_miss(VCPU *vcpu, u64 vadr, 
    4.84      u64 pte;
    4.85      ia64_rr rr;
    4.86      rr.rrval = ia64_get_rr(vadr);
    4.87 -    pte =  vadr& _PAGE_PPN_MASK;
    4.88 +    pte = vadr & _PAGE_PPN_MASK;
    4.89      pte = pte | PHY_PAGE_WB;
    4.90      thash_vhpt_insert(vcpu, pte, (rr.ps << 2), vadr, type);
    4.91      return;
    4.92  }
    4.93  
    4.94 -
    4.95  void
    4.96  vmx_init_all_rr(VCPU *vcpu)
    4.97  {
    4.98  	VMX(vcpu, vrr[VRN0]) = 0x38;
    4.99  	// enable vhpt in guest physical mode
   4.100 -	vcpu->arch.metaphysical_rr0 |= 1;
   4.101 +	vcpu->arch.metaphysical_rid_dt |= 1;
   4.102  	vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(vcpu, 0x38);
   4.103  	VMX(vcpu, vrr[VRN1]) = 0x38;
   4.104  	VMX(vcpu, vrr[VRN2]) = 0x38;
   4.105  	VMX(vcpu, vrr[VRN3]) = 0x38;
   4.106  	VMX(vcpu, vrr[VRN4]) = 0x38;
   4.107  	// enable vhpt in guest physical mode
   4.108 -	vcpu->arch.metaphysical_rr4 |= 1;
   4.109 +	vcpu->arch.metaphysical_rid_d |= 0; /* VHPT not enabled! */
   4.110  	vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(vcpu, 0x38);
   4.111  	VMX(vcpu, vrr[VRN5]) = 0x38;
   4.112  	VMX(vcpu, vrr[VRN6]) = 0x38;
   4.113 @@ -154,37 +152,32 @@ vmx_load_all_rr(VCPU *vcpu)
   4.114  			panic_domain(vcpu_regs(vcpu),
   4.115  			             "Unexpected domain switch in phy emul\n");
   4.116  		}
   4.117 -		ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
   4.118 +		ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rid_dt);
   4.119  		ia64_dv_serialize_data();
   4.120 -		ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
   4.121 +		ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rid_dt);
   4.122  		ia64_dv_serialize_data();
   4.123  	} else {
   4.124  		ia64_set_rr((VRN0 << VRN_SHIFT),
   4.125 -                            vcpu->arch.metaphysical_saved_rr0);
   4.126 +			    vcpu->arch.metaphysical_saved_rr0);
   4.127  		ia64_dv_serialize_data();
   4.128  		ia64_set_rr((VRN4 << VRN_SHIFT),
   4.129 -                            vcpu->arch.metaphysical_saved_rr4);
   4.130 +			    vcpu->arch.metaphysical_saved_rr4);
   4.131  		ia64_dv_serialize_data();
   4.132  	}
   4.133  
   4.134  	/* rr567 will be postponed to last point when resuming back to guest */
   4.135 -	ia64_set_rr((VRN1 << VRN_SHIFT),
   4.136 -		     vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
   4.137 +	ia64_set_rr((VRN1 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
   4.138  	ia64_dv_serialize_data();
   4.139 -	ia64_set_rr((VRN2 << VRN_SHIFT),
   4.140 -		     vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
   4.141 +	ia64_set_rr((VRN2 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
   4.142  	ia64_dv_serialize_data();
   4.143 -	ia64_set_rr((VRN3 << VRN_SHIFT),
   4.144 -		     vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
   4.145 +	ia64_set_rr((VRN3 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
   4.146  	ia64_dv_serialize_data();
   4.147 -	ia64_set_rr((VRN5 << VRN_SHIFT),
   4.148 -		     vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
   4.149 +	ia64_set_rr((VRN5 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
   4.150  	ia64_dv_serialize_data();
   4.151 -	ia64_set_rr((VRN6 << VRN_SHIFT),
   4.152 -		     vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
   4.153 +	ia64_set_rr((VRN6 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
   4.154  	ia64_dv_serialize_data();
   4.155  	vmx_switch_rr7(vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),
   4.156 -			(void *)vcpu->arch.vhpt.hash, pal_vaddr );
   4.157 +		       (void *)vcpu->arch.vhpt.hash, pal_vaddr);
   4.158  	ia64_set_pta(VMX(vcpu, mpta));
   4.159  	vmx_ia64_set_dcr(vcpu);
   4.160  
   4.161 @@ -193,17 +186,15 @@ vmx_load_all_rr(VCPU *vcpu)
   4.162  	ia64_srlz_i();
   4.163  }
   4.164  
   4.165 -
   4.166 -
   4.167  void
   4.168  switch_to_physical_rid(VCPU *vcpu)
   4.169  {
   4.170      u64 psr;
   4.171 -    /* Save original virtual mode rr[0] and rr[4] */
   4.172 -    psr=ia64_clear_ic();
   4.173 -    ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
   4.174 +
   4.175 +    psr = ia64_clear_ic();
   4.176 +    ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_rid_dt);
   4.177      ia64_srlz_d();
   4.178 -    ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
   4.179 +    ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_rid_dt);
   4.180      ia64_srlz_d();
   4.181  
   4.182      ia64_set_psr(psr);
   4.183 @@ -211,7 +202,6 @@ switch_to_physical_rid(VCPU *vcpu)
   4.184      return;
   4.185  }
   4.186  
   4.187 -
   4.188  void
   4.189  switch_to_virtual_rid(VCPU *vcpu)
   4.190  {
   4.191 @@ -238,7 +228,8 @@ switch_mm_mode(VCPU *vcpu, IA64_PSR old_
   4.192      act = mm_switch_action(old_psr, new_psr);
   4.193      perfc_incra(vmx_switch_mm_mode, act);
   4.194      switch (act) {
   4.195 -    case SW_V2P:
   4.196 +    case SW_V2P_DT:
   4.197 +    case SW_V2P_D:
   4.198  //        printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
   4.199  //               old_psr.val, new_psr.val);
   4.200          switch_to_physical_rid(vcpu);
   4.201 @@ -274,33 +265,15 @@ switch_mm_mode(VCPU *vcpu, IA64_PSR old_
   4.202      return;
   4.203  }
   4.204  
   4.205 -
   4.206 -
   4.207 -/*
   4.208 - * In physical mode, insert tc/tr for region 0 and 4 uses
   4.209 - * RID[0] and RID[4] which is for physical mode emulation.
   4.210 - * However what those inserted tc/tr wants is rid for
   4.211 - * virtual mode. So original virtual rid needs to be restored
   4.212 - * before insert.
   4.213 - *
   4.214 - * Operations which required such switch include:
   4.215 - *  - insertions (itc.*, itr.*)
   4.216 - *  - purges (ptc.* and ptr.*)
   4.217 - *  - tpa
   4.218 - *  - tak
   4.219 - *  - thash?, ttag?
   4.220 - * All above needs actual virtual rid for destination entry.
   4.221 - */
   4.222 -
   4.223  void
   4.224  check_mm_mode_switch (VCPU *vcpu,  IA64_PSR old_psr, IA64_PSR new_psr)
   4.225  {
   4.226  
   4.227 -    if ( (old_psr.dt != new_psr.dt ) ||
   4.228 -         (old_psr.it != new_psr.it ) ||
   4.229 -         (old_psr.rt != new_psr.rt )
   4.230 -         ) {
   4.231 -        switch_mm_mode (vcpu, old_psr, new_psr);
   4.232 +    if (old_psr.dt != new_psr.dt ||
   4.233 +        old_psr.it != new_psr.it ||
   4.234 +        old_psr.rt != new_psr.rt) {
   4.235 +
   4.236 +        switch_mm_mode(vcpu, old_psr, new_psr);
   4.237          debugger_event(XEN_IA64_DEBUG_ON_MMU);
   4.238      }
   4.239  
     5.1 --- a/xen/arch/ia64/vmx/vtlb.c	Wed Sep 26 12:43:41 2007 -0600
     5.2 +++ b/xen/arch/ia64/vmx/vtlb.c	Wed Sep 26 13:22:17 2007 -0600
     5.3 @@ -187,7 +187,7 @@ void thash_vhpt_insert(VCPU *v, u64 pte,
     5.4      if (itir_ps(itir) >= mrr.ps) {
     5.5          vmx_vhpt_insert(vcpu_get_vhpt(v), phy_pte, itir, va);
     5.6      } else {
     5.7 -        phy_pte  &= ~PAGE_FLAGS_RV_MASK;
     5.8 +        phy_pte &= ~PAGE_FLAGS_RV_MASK;
     5.9          psr = ia64_clear_ic();
    5.10          ia64_itc(type + 1, va, phy_pte, itir);
    5.11          ia64_set_psr(psr);
     6.1 --- a/xen/arch/ia64/xen/domain.c	Wed Sep 26 12:43:41 2007 -0600
     6.2 +++ b/xen/arch/ia64/xen/domain.c	Wed Sep 26 13:22:17 2007 -0600
     6.3 @@ -426,10 +426,11 @@ int vcpu_initialise(struct vcpu *v)
     6.4  	struct domain *d = v->domain;
     6.5  
     6.6  	if (!is_idle_domain(d)) {
     6.7 -	    v->arch.metaphysical_rr0 = d->arch.metaphysical_rr0;
     6.8 -	    v->arch.metaphysical_rr4 = d->arch.metaphysical_rr4;
     6.9 -	    v->arch.metaphysical_saved_rr0 = d->arch.metaphysical_rr0;
    6.10 -	    v->arch.metaphysical_saved_rr4 = d->arch.metaphysical_rr4;
    6.11 +	    v->arch.metaphysical_rid_dt = d->arch.metaphysical_rid_dt;
    6.12 +	    v->arch.metaphysical_rid_d = d->arch.metaphysical_rid_d;
    6.13 +	    /* Set default values to saved_rr.  */
    6.14 +	    v->arch.metaphysical_saved_rr0 = d->arch.metaphysical_rid_dt;
    6.15 +	    v->arch.metaphysical_saved_rr4 = d->arch.metaphysical_rid_dt;
    6.16  
    6.17  	    /* Is it correct ?
    6.18  	       It depends on the domain rid usage.
     7.1 --- a/xen/arch/ia64/xen/hyperprivop.S	Wed Sep 26 12:43:41 2007 -0600
     7.2 +++ b/xen/arch/ia64/xen/hyperprivop.S	Wed Sep 26 13:22:17 2007 -0600
     7.3 @@ -1423,7 +1423,7 @@ ENTRY(hyper_rsm_dt)
     7.4  (p7)	br.spnt.many	1f ;;	// already in metaphysical mode
     7.5  	movl r22=THIS_CPU(cpu_kr)+IA64_KR_CURRENT_OFFSET;;
     7.6  	ld8 r22=[r22];;
     7.7 -	adds r22=IA64_VCPU_META_RR0_OFFSET,r22;;
     7.8 +	adds r22=IA64_VCPU_META_RID_DT_OFFSET,r22;;
     7.9  	ld8 r23=[r22];;
    7.10  	mov rr[r0]=r23;;
    7.11  	srlz.i;;
     8.1 --- a/xen/arch/ia64/xen/regionreg.c	Wed Sep 26 12:43:41 2007 -0600
     8.2 +++ b/xen/arch/ia64/xen/regionreg.c	Wed Sep 26 13:22:17 2007 -0600
     8.3 @@ -185,8 +185,8 @@ int allocate_rid_range(struct domain *d,
     8.4  	d->arch.starting_mp_rid = i << mp_rid_shift;
     8.5  	d->arch.ending_mp_rid = (i + 1) << mp_rid_shift;
     8.6  
     8.7 -	d->arch.metaphysical_rr0 = allocate_metaphysical_rr(d, 0);
     8.8 -	d->arch.metaphysical_rr4 = allocate_metaphysical_rr(d, 1);
     8.9 +	d->arch.metaphysical_rid_dt = allocate_metaphysical_rr(d, 0);
    8.10 +	d->arch.metaphysical_rid_d = allocate_metaphysical_rr(d, 1);
    8.11  
    8.12  	dprintk(XENLOG_DEBUG, "### domain %p: rid=%x-%x mp_rid=%x\n",
    8.13  		d, d->arch.starting_rid, d->arch.ending_rid,
    8.14 @@ -238,7 +238,8 @@ int set_one_rr(unsigned long rr, unsigne
    8.15  	ia64_rr rrv, newrrv, memrrv;
    8.16  	unsigned long newrid;
    8.17  
    8.18 -	if (val == -1) return 1;
    8.19 +	if (val == -1)
    8.20 +		return 1;
    8.21  
    8.22  	rrv.rrval = val;
    8.23  	newrrv.rrval = 0;
    8.24 @@ -277,7 +278,7 @@ int set_metaphysical_rr0(void)
    8.25  //	ia64_rr rrv;
    8.26  	
    8.27  //	rrv.ve = 1; 	FIXME: TURN ME BACK ON WHEN VHPT IS WORKING
    8.28 -	ia64_set_rr(0,v->arch.metaphysical_rr0);
    8.29 +	ia64_set_rr(0, v->arch.metaphysical_rid_dt);
    8.30  	ia64_srlz_d();
    8.31  	return 1;
    8.32  }
    8.33 @@ -290,7 +291,8 @@ void init_all_rr(struct vcpu *v)
    8.34  	//rrv.rrval = v->domain->arch.metaphysical_rr0;
    8.35  	rrv.ps = v->arch.vhpt_pg_shift;
    8.36  	rrv.ve = 1;
    8.37 -if (!v->vcpu_info) { panic("Stopping in init_all_rr\n"); }
    8.38 +	if (!v->vcpu_info)
    8.39 +		panic("Stopping in init_all_rr\n");
    8.40  	VCPU(v,rrs[0]) = -1;
    8.41  	VCPU(v,rrs[1]) = rrv.rrval;
    8.42  	VCPU(v,rrs[2]) = rrv.rrval;
    8.43 @@ -319,7 +321,7 @@ void load_region_regs(struct vcpu *v)
    8.44  	unsigned long bad = 0;
    8.45  
    8.46  	if (VCPU(v,metaphysical_mode)) {
    8.47 -		rr0 = v->domain->arch.metaphysical_rr0;
    8.48 +		rr0 = v->domain->arch.metaphysical_rid_dt;
    8.49  		ia64_set_rr(0x0000000000000000L, rr0);
    8.50  		ia64_srlz_d();
    8.51  	}
     9.1 --- a/xen/include/asm-ia64/domain.h	Wed Sep 26 12:43:41 2007 -0600
     9.2 +++ b/xen/include/asm-ia64/domain.h	Wed Sep 26 13:22:17 2007 -0600
     9.3 @@ -143,8 +143,8 @@ struct arch_domain {
     9.4      int starting_mp_rid;
     9.5      int ending_mp_rid;
     9.6      /* RID for metaphysical mode.  */
     9.7 -    unsigned long metaphysical_rr0;
     9.8 -    unsigned long metaphysical_rr4;
     9.9 +    unsigned long metaphysical_rid_dt;	/* dt=it=0  */
    9.10 +    unsigned long metaphysical_rid_d;  /* dt=0, it=1  */
    9.11      
    9.12      int rid_bits;		/* number of virtual rid bits (default: 18) */
    9.13      int breakimm;     /* The imm value for hypercalls.  */
    9.14 @@ -232,8 +232,8 @@ struct arch_vcpu {
    9.15  
    9.16      /* These fields are copied from arch_domain to make access easier/faster
    9.17         in assembly code.  */
    9.18 -    unsigned long metaphysical_rr0;		// from arch_domain (so is pinned)
    9.19 -    unsigned long metaphysical_rr4;		// from arch_domain (so is pinned)
    9.20 +    unsigned long metaphysical_rid_dt;	// from arch_domain (so is pinned)
    9.21 +    unsigned long metaphysical_rid_d;	// from arch_domain (so is pinned)
    9.22      unsigned long metaphysical_saved_rr0;	// from arch_domain (so is pinned)
    9.23      unsigned long metaphysical_saved_rr4;	// from arch_domain (so is pinned)
    9.24      unsigned long fp_psr;       // used for lazy float register
    9.25 @@ -254,7 +254,7 @@ struct arch_vcpu {
    9.26      char irq_new_condition;    // vpsr.i/vtpr change, check for pending VHPI
    9.27      char hypercall_continuation;
    9.28  
    9.29 -    //for phycial  emulation
    9.30 +    // for physical emulation
    9.31      int mode_flags;
    9.32      fpswa_ret_t fpswa_ret;	/* save return values of FPSWA emulation */
    9.33      struct timer hlt_timer;
    10.1 --- a/xen/include/asm-ia64/vmx_phy_mode.h	Wed Sep 26 12:43:41 2007 -0600
    10.2 +++ b/xen/include/asm-ia64/vmx_phy_mode.h	Wed Sep 26 13:22:17 2007 -0600
    10.3 @@ -66,29 +66,19 @@
    10.4   */
    10.5  
    10.6  
    10.7 +#ifndef __ASSEMBLY__
    10.8 +
    10.9  #include <asm/vmx_vcpu.h>
   10.10  #include <asm/regionreg.h>
   10.11  #include <asm/gcc_intrin.h>
   10.12  #include <asm/pgtable.h>
   10.13 -/* Due to change of ia64_set_rr interface */
   10.14  
   10.15 -#define PHY_PAGE_UC (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_UC|_PAGE_AR_RWX)
   10.16  #define PHY_PAGE_WB (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_WB|_PAGE_AR_RWX)
   10.17  
   10.18 -//#ifdef PHY_16M  /* 16M: large granule for test*/
   10.19 -//#define EMUL_PHY_PAGE_SHIFT 24
   10.20 -//#else   /* 4K: emulated physical page granule */
   10.21 -//#define EMUL_PHY_PAGE_SHIFT 12
   10.22 -//#endif
   10.23 -#define IA64_RSC_MODE       0x0000000000000003
   10.24 -#define XEN_RR7_RID    (0xf00010)
   10.25 -#define GUEST_IN_PHY    0x1
   10.26 -#define GUEST_PHY_EMUL	0x2
   10.27  extern void physical_mode_init(VCPU *);
   10.28  extern void switch_to_physical_rid(VCPU *);
   10.29  extern void switch_to_virtual_rid(VCPU *vcpu);
   10.30  extern void switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr);
   10.31 -extern void stlb_phys_lookup(VCPU *vcpu, u64 paddr, u64 type);
   10.32  extern void check_mm_mode_switch (VCPU *vcpu,  IA64_PSR old_psr, IA64_PSR new_psr);
   10.33  extern void prepare_if_physical_mode(VCPU *vcpu);
   10.34  extern void recover_if_physical_mode(VCPU *vcpu);
   10.35 @@ -105,17 +95,12 @@ extern void physical_tlb_miss(VCPU *vcpu
   10.36  #define is_virtual_mode(v) \
   10.37      (!is_physical_mode(v))
   10.38  
   10.39 -#define MODE_IND(psr)   \
   10.40 -    (((psr).it << 2) + ((psr).dt << 1) + (psr).rt)
   10.41 +#endif /* __ASSEMBLY__ */
   10.42  
   10.43 -#define SW_BAD  0   /* Bad mode transitition */
   10.44 -#define SW_V2P  1   /* Physical emulatino is activated */
   10.45 -#define SW_P2V  2   /* Exit physical mode emulation */
   10.46 -#define SW_SELF 3   /* No mode transition */
   10.47 -#define SW_NOP  4   /* Mode transition, but without action required */
   10.48 +#define GUEST_IN_PHY_BIT   0
   10.49 +#define GUEST_PHY_EMUL_BIT 1
   10.50  
   10.51 -#define INV_MODE    0   /* Invalid mode */
   10.52 -#define GUEST_VIRT  1   /* Guest in virtual mode */
   10.53 -#define GUEST_PHYS  2   /* Guest in physical mode, requiring emulation */
   10.54 +#define GUEST_IN_PHY   (1 << GUEST_IN_PHY_BIT)
   10.55 +#define GUEST_PHY_EMUL (1 << GUEST_PHY_EMUL_BIT)
   10.56  
   10.57  #endif /* _PHY_MODE_H_ */