ia64/xen-unstable

annotate xen/arch/ia64/vmx/vmx_phy_mode.c @ 18362:6607624285b2

[IA64] EFI mapping: restoring mapping correctly.

When swiching back from efi mapping, correctly switch back
depending on the current vcpu type.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Aug 25 19:04:37 2008 +0900 (2008-08-25)
parents 7da7b53b2139
children 3d96f88fb220
rev   line source
djm@6458 1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
djm@6458 2 /*
djm@6458 3 * vmx_phy_mode.c: emulating domain physical mode.
djm@6458 4 * Copyright (c) 2005, Intel Corporation.
djm@6458 5 *
djm@6458 6 * This program is free software; you can redistribute it and/or modify it
djm@6458 7 * under the terms and conditions of the GNU General Public License,
djm@6458 8 * version 2, as published by the Free Software Foundation.
djm@6458 9 *
djm@6458 10 * This program is distributed in the hope it will be useful, but WITHOUT
djm@6458 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
djm@6458 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
djm@6458 13 * more details.
djm@6458 14 *
djm@6458 15 * You should have received a copy of the GNU General Public License along with
djm@6458 16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
djm@6458 17 * Place - Suite 330, Boston, MA 02111-1307 USA.
djm@6458 18 *
djm@6458 19 * Arun Sharma (arun.sharma@intel.com)
djm@6458 20 * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
djm@6458 21 * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
djm@6458 22 */
djm@6458 23
djm@6458 24
djm@6458 25 #include <asm/processor.h>
djm@6458 26 #include <asm/gcc_intrin.h>
djm@6458 27 #include <asm/vmx_phy_mode.h>
djm@6458 28 #include <asm/pgtable.h>
awilliam@9011 29 #include <asm/vmmu.h>
alex@15893 30 #include <asm/debugger.h>
alex@15858 31
alex@15898 32 #define MODE_IND(psr) \
alex@15898 33 (((psr).it << 2) + ((psr).dt << 1) + (psr).rt)
alex@15898 34
alex@15898 35 #define SW_BAD 0 /* Bad mode transitition */
alex@16470 36 #define SW_2P_DT 1 /* Physical emulation is activated */
alex@16470 37 #define SW_2P_D 2 /* Physical emulation is activated (only for data) */
alex@16470 38 #define SW_2V 3 /* Exit physical mode emulation */
alex@15898 39 #define SW_SELF 4 /* No mode transition */
alex@15898 40 #define SW_NOP 5 /* Mode transition, but without action required */
djm@6458 41
djm@6458 42 /*
djm@6458 43 * Special notes:
djm@6458 44 * - Index by it/dt/rt sequence
djm@6458 45 * - Only existing mode transitions are allowed in this table
djm@6458 46 * - If gva happens to be rr0 and rr4, only allowed case is identity
djm@6458 47 * mapping (gva=gpa), or panic! (How?)
djm@6458 48 */
alex@16006 49 static const unsigned char mm_switch_table[8][8] = {
djm@6458 50 /* 2004/09/12(Kevin): Allow switch to self */
alex@15898 51 /*
alex@15898 52 * (it,dt,rt): (0,0,0) -> (1,1,1)
alex@15898 53 * This kind of transition usually occurs in the very early
djm@6458 54 * stage of Linux boot up procedure. Another case is in efi
djm@6458 55 * and pal calls. (see "arch/ia64/kernel/head.S")
djm@6458 56 *
djm@6458 57 * (it,dt,rt): (0,0,0) -> (0,1,1)
djm@6458 58 * This kind of transition is found when OSYa exits efi boot
djm@6458 59 * service. Due to gva = gpa in this case (Same region),
djm@6458 60 * data access can be satisfied though itlb entry for physical
djm@6458 61 * emulation is hit.
alex@16470 62 *
alex@16470 63 * (it,dt,rt): (0,0,0) -> (1,0,1)
alex@15898 64 */
alex@16470 65 {SW_SELF,0, 0, SW_NOP, 0, SW_2P_D, 0, SW_2V},
awilliam@8916 66 {0, 0, 0, 0, 0, 0, 0, 0},
awilliam@8916 67 {0, 0, 0, 0, 0, 0, 0, 0},
djm@6458 68 /*
djm@6458 69 * (it,dt,rt): (0,1,1) -> (1,1,1)
djm@6458 70 * This kind of transition is found in OSYa.
djm@6458 71 *
djm@6458 72 * (it,dt,rt): (0,1,1) -> (0,0,0)
djm@6458 73 * This kind of transition is found in OSYa
djm@6458 74 */
alex@16470 75 {SW_NOP, 0, 0, SW_SELF,0, 0, 0, SW_2V},
djm@6458 76 /* (1,0,0)->(1,1,1) */
alex@16470 77 {0, 0, 0, 0, 0, 0, 0, SW_2V},
djm@6458 78 /*
alex@15898 79 * (it,dt,rt): (1,0,1) -> (1,1,1)
alex@15898 80 * This kind of transition usually occurs when Linux returns
djm@6458 81 * from the low level TLB miss handlers.
alex@15898 82 * (see "arch/ia64/kernel/ivt.S")
alex@16470 83 *
alex@16470 84 * (it,dt,rt): (1,0,1) -> (0,0,0)
alex@15898 85 */
alex@16470 86 {SW_2P_DT, 0, 0, 0, 0, SW_SELF,0, SW_2V},
awilliam@8916 87 {0, 0, 0, 0, 0, 0, 0, 0},
djm@6458 88 /*
alex@15898 89 * (it,dt,rt): (1,1,1) -> (1,0,1)
alex@15898 90 * This kind of transition usually occurs in Linux low level
djm@6458 91 * TLB miss handler. (see "arch/ia64/kernel/ivt.S")
djm@6458 92 *
djm@6458 93 * (it,dt,rt): (1,1,1) -> (0,0,0)
djm@6458 94 * This kind of transition usually occurs in pal and efi calls,
djm@6458 95 * which requires running in physical mode.
djm@6458 96 * (see "arch/ia64/kernel/head.S")
alex@16470 97 *
alex@16470 98 * (it,dt,rt): (1,1,1)->(1,0,0)
djm@6458 99 */
alex@16470 100 {SW_2P_DT, 0, 0, 0, SW_2P_D, SW_2P_D, 0, SW_SELF},
djm@6458 101 };
djm@6458 102
djm@6458 103 void
djm@6458 104 physical_mode_init(VCPU *vcpu)
djm@6458 105 {
alex@16006 106 vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_DT;
djm@6458 107 }
djm@6458 108
awilliam@10259 109 void
awilliam@11932 110 physical_tlb_miss(VCPU *vcpu, u64 vadr, int type)
djm@6458 111 {
awilliam@10259 112 u64 pte;
alex@16006 113
alex@16006 114 pte = (vadr & _PAGE_PPN_MASK) | PHY_PAGE_WB;
alex@16006 115 thash_vhpt_insert(vcpu, pte, (PAGE_SHIFT << 2), vadr, type);
djm@6458 116 }
djm@6458 117
djm@6458 118 void
djm@6458 119 vmx_init_all_rr(VCPU *vcpu)
djm@6458 120 {
awilliam@11640 121 // enable vhpt in guest physical mode
alex@15898 122 vcpu->arch.metaphysical_rid_dt |= 1;
alex@16006 123
alex@16006 124 VMX(vcpu, vrr[VRN0]) = 0x38;
awilliam@11640 125 vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(vcpu, 0x38);
awilliam@10876 126 VMX(vcpu, vrr[VRN1]) = 0x38;
awilliam@10876 127 VMX(vcpu, vrr[VRN2]) = 0x38;
awilliam@10876 128 VMX(vcpu, vrr[VRN3]) = 0x38;
awilliam@10876 129 VMX(vcpu, vrr[VRN4]) = 0x38;
awilliam@11640 130 vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(vcpu, 0x38);
awilliam@10876 131 VMX(vcpu, vrr[VRN5]) = 0x38;
awilliam@10876 132 VMX(vcpu, vrr[VRN6]) = 0x38;
awilliam@10876 133 VMX(vcpu, vrr[VRN7]) = 0x738;
djm@6458 134 }
djm@6458 135
djm@6458 136 void
djm@6458 137 vmx_load_all_rr(VCPU *vcpu)
djm@6458 138 {
alex@16006 139 unsigned long rr0, rr4;
djm@6458 140
alex@16006 141 switch (vcpu->arch.arch_vmx.mmu_mode) {
alex@16006 142 case VMX_MMU_VIRTUAL:
alex@16006 143 rr0 = vcpu->arch.metaphysical_saved_rr0;
alex@16006 144 rr4 = vcpu->arch.metaphysical_saved_rr4;
alex@16006 145 break;
alex@16006 146 case VMX_MMU_PHY_DT:
alex@16006 147 rr0 = vcpu->arch.metaphysical_rid_dt;
alex@16006 148 rr4 = vcpu->arch.metaphysical_rid_dt;
alex@16006 149 break;
alex@16006 150 case VMX_MMU_PHY_D:
alex@16006 151 rr0 = vcpu->arch.metaphysical_rid_d;
alex@16006 152 rr4 = vcpu->arch.metaphysical_rid_d;
alex@16006 153 break;
alex@16006 154 default:
alex@16006 155 panic_domain(NULL, "bad mmu mode value");
djm@6458 156 }
djm@6458 157
alex@16006 158 ia64_set_rr((VRN0 << VRN_SHIFT), rr0);
alex@16006 159 ia64_dv_serialize_data();
alex@16006 160 ia64_set_rr((VRN4 << VRN_SHIFT), rr4);
alex@16006 161 ia64_dv_serialize_data();
alex@15898 162 ia64_set_rr((VRN1 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
awilliam@9854 163 ia64_dv_serialize_data();
alex@15898 164 ia64_set_rr((VRN2 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
awilliam@9854 165 ia64_dv_serialize_data();
alex@15898 166 ia64_set_rr((VRN3 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
awilliam@9854 167 ia64_dv_serialize_data();
alex@15898 168 ia64_set_rr((VRN5 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
alex@15898 169 ia64_dv_serialize_data();
alex@15898 170 ia64_set_rr((VRN6 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
awilliam@9854 171 ia64_dv_serialize_data();
yamahata@18362 172 vmx_switch_rr7_vcpu(vcpu, vrrtomrr(vcpu, VMX(vcpu, vrr[VRN7])));
awilliam@11045 173 ia64_set_pta(VMX(vcpu, mpta));
awilliam@13840 174 vmx_ia64_set_dcr(vcpu);
djm@6469 175
djm@6458 176 ia64_srlz_d();
djm@6458 177 }
djm@6458 178
awilliam@10430 179 void
djm@6458 180 switch_to_physical_rid(VCPU *vcpu)
djm@6458 181 {
awilliam@11817 182 u64 psr;
alex@16006 183 u64 rr;
alex@15898 184
alex@16006 185 switch (vcpu->arch.arch_vmx.mmu_mode) {
alex@16006 186 case VMX_MMU_PHY_DT:
alex@16006 187 rr = vcpu->arch.metaphysical_rid_dt;
alex@16006 188 break;
alex@16006 189 case VMX_MMU_PHY_D:
alex@16006 190 rr = vcpu->arch.metaphysical_rid_d;
alex@16006 191 break;
alex@16006 192 default:
alex@16006 193 panic_domain(NULL, "bad mmu mode value");
alex@16006 194 }
alex@16006 195
alex@15898 196 psr = ia64_clear_ic();
alex@16006 197 ia64_set_rr(VRN0<<VRN_SHIFT, rr);
alex@16006 198 ia64_dv_serialize_data();
alex@16006 199 ia64_set_rr(VRN4<<VRN_SHIFT, rr);
djm@6458 200 ia64_srlz_d();
alex@16006 201
djm@6458 202 ia64_set_psr(psr);
djm@6458 203 ia64_srlz_i();
djm@6458 204 return;
djm@6458 205 }
djm@6458 206
djm@6458 207 void
djm@6458 208 switch_to_virtual_rid(VCPU *vcpu)
djm@6458 209 {
awilliam@11817 210 u64 psr;
alex@16006 211
alex@16006 212 psr = ia64_clear_ic();
awilliam@11640 213 ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
alex@16006 214 ia64_dv_serialize_data();
awilliam@11640 215 ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
djm@6458 216 ia64_srlz_d();
djm@6458 217 ia64_set_psr(psr);
djm@6458 218 ia64_srlz_i();
djm@6458 219 return;
djm@6458 220 }
djm@6458 221
djm@6458 222 static int mm_switch_action(IA64_PSR opsr, IA64_PSR npsr)
djm@6458 223 {
djm@6458 224 return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
djm@6458 225 }
djm@6458 226
yamahata@17607 227 /* In fast path, psr.ic = 0, psr.i = 0, psr.bn = 0
yamahata@17607 228 * so that no tlb miss is allowed.
yamahata@17607 229 */
yamahata@17607 230 void
yamahata@17607 231 switch_mm_mode_fast(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
yamahata@17607 232 {
yamahata@17607 233 int act;
yamahata@17607 234 act = mm_switch_action(old_psr, new_psr);
yamahata@17607 235 switch (act) {
yamahata@17607 236 case SW_2P_DT:
yamahata@17607 237 vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_DT;
yamahata@17607 238 switch_to_physical_rid(vcpu);
yamahata@17607 239 break;
yamahata@17607 240 case SW_2P_D:
yamahata@17607 241 vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_D;
yamahata@17607 242 switch_to_physical_rid(vcpu);
yamahata@17607 243 break;
yamahata@17607 244 case SW_2V:
yamahata@17607 245 vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_VIRTUAL;
yamahata@17607 246 switch_to_virtual_rid(vcpu);
yamahata@17607 247 break;
yamahata@17607 248 default:
yamahata@17607 249 break;
yamahata@17607 250 }
yamahata@17607 251 return;
yamahata@17607 252 }
yamahata@17607 253
djm@6458 254 void
djm@6458 255 switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
djm@6458 256 {
djm@6458 257 int act;
djm@6458 258 act = mm_switch_action(old_psr, new_psr);
awilliam@11418 259 perfc_incra(vmx_switch_mm_mode, act);
djm@6458 260 switch (act) {
alex@16470 261 case SW_2P_DT:
alex@16177 262 vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_DT;
alex@16177 263 switch_to_physical_rid(vcpu);
alex@16177 264 break;
alex@16470 265 case SW_2P_D:
alex@16177 266 // printk("V -> P_D mode transition: (0x%lx -> 0x%lx)\n",
awilliam@10876 267 // old_psr.val, new_psr.val);
alex@16177 268 vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_D;
djm@6458 269 switch_to_physical_rid(vcpu);
djm@6458 270 break;
alex@16470 271 case SW_2V:
kfraser@11947 272 // printk("P -> V mode transition: (0x%lx -> 0x%lx)\n",
awilliam@10876 273 // old_psr.val, new_psr.val);
alex@16006 274 vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_VIRTUAL;
djm@6458 275 switch_to_virtual_rid(vcpu);
djm@6458 276 break;
djm@6458 277 case SW_SELF:
kfraser@11947 278 printk("Switch to self-0x%lx!!! MM mode doesn't change...\n",
djm@6458 279 old_psr.val);
djm@6458 280 break;
djm@6458 281 case SW_NOP:
kfraser@11947 282 // printk("No action required for mode transition: (0x%lx -> 0x%lx)\n",
awilliam@10876 283 // old_psr.val, new_psr.val);
djm@6458 284 break;
djm@6458 285 default:
djm@6458 286 /* Sanity check */
alex@16006 287 panic_domain(vcpu_regs(vcpu),
alex@16006 288 "Unexpected virtual <--> physical mode transition, "
alex@16006 289 "old:%lx, new:%lx\n", old_psr.val, new_psr.val);
djm@6458 290 break;
djm@6458 291 }
djm@6458 292 return;
djm@6458 293 }
djm@6458 294
djm@6458 295 void
djm@6458 296 check_mm_mode_switch (VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
djm@6458 297 {
alex@15898 298 if (old_psr.dt != new_psr.dt ||
alex@15898 299 old_psr.it != new_psr.it ||
alex@15898 300 old_psr.rt != new_psr.rt) {
alex@15898 301 switch_mm_mode(vcpu, old_psr, new_psr);
alex@15893 302 debugger_event(XEN_IA64_DEBUG_ON_MMU);
djm@6458 303 }
djm@6458 304 }
djm@6458 305
djm@6458 306
djm@6458 307 /*
djm@6458 308 * In physical mode, insert tc/tr for region 0 and 4 uses
djm@6458 309 * RID[0] and RID[4] which is for physical mode emulation.
djm@6458 310 * However what those inserted tc/tr wants is rid for
djm@6458 311 * virtual mode. So original virtual rid needs to be restored
djm@6458 312 * before insert.
djm@6458 313 *
djm@6458 314 * Operations which required such switch include:
djm@6458 315 * - insertions (itc.*, itr.*)
djm@6458 316 * - purges (ptc.* and ptr.*)
djm@6458 317 * - tpa
djm@6458 318 * - tak
djm@6458 319 * - thash?, ttag?
djm@6458 320 * All above needs actual virtual rid for destination entry.
djm@6458 321 */
djm@6458 322
djm@6458 323 void
djm@6458 324 prepare_if_physical_mode(VCPU *vcpu)
djm@6458 325 {
alex@16006 326 if (!is_virtual_mode(vcpu))
djm@6458 327 switch_to_virtual_rid(vcpu);
djm@6458 328 return;
djm@6458 329 }
djm@6458 330
djm@6458 331 /* Recover always follows prepare */
djm@6458 332 void
djm@6458 333 recover_if_physical_mode(VCPU *vcpu)
djm@6458 334 {
alex@16006 335 if (!is_virtual_mode(vcpu))
djm@6458 336 switch_to_physical_rid(vcpu);
djm@6458 337 return;
djm@6458 338 }
djm@6458 339