ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_phy_mode.c @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents 3d96f88fb220
children
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_phy_mode.c: emulating domain physical mode.
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Arun Sharma (arun.sharma@intel.com)
20 * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
21 * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
22 */
25 #include <asm/processor.h>
26 #include <asm/gcc_intrin.h>
27 #include <asm/vmx_phy_mode.h>
28 #include <asm/pgtable.h>
29 #include <asm/vmmu.h>
30 #include <asm/debugger.h>
32 #define MODE_IND(psr) \
33 (((psr).it << 2) + ((psr).dt << 1) + (psr).rt)
35 #define SW_BAD 0 /* Bad mode transitition */
36 #define SW_2P_DT 1 /* Physical emulation is activated */
37 #define SW_2P_D 2 /* Physical emulation is activated (only for data) */
38 #define SW_2V 3 /* Exit physical mode emulation */
39 #define SW_SELF 4 /* No mode transition */
40 #define SW_NOP 5 /* Mode transition, but without action required */
42 /*
43 * Special notes:
44 * - Index by it/dt/rt sequence
45 * - Only existing mode transitions are allowed in this table
46 * - If gva happens to be rr0 and rr4, only allowed case is identity
47 * mapping (gva=gpa), or panic! (How?)
48 */
49 static const unsigned char mm_switch_table[8][8] = {
50 /* 2004/09/12(Kevin): Allow switch to self */
51 /*
52 * (it,dt,rt): (0,0,0) -> (1,1,1)
53 * This kind of transition usually occurs in the very early
54 * stage of Linux boot up procedure. Another case is in efi
55 * and pal calls. (see "arch/ia64/kernel/head.S")
56 *
57 * (it,dt,rt): (0,0,0) -> (0,1,1)
58 * This kind of transition is found when OSYa exits efi boot
59 * service. Due to gva = gpa in this case (Same region),
60 * data access can be satisfied though itlb entry for physical
61 * emulation is hit.
62 *
63 * (it,dt,rt): (0,0,0) -> (1,0,1)
64 */
65 {SW_SELF,0, 0, SW_NOP, 0, SW_2P_D, 0, SW_2V},
66 {0, 0, 0, 0, 0, 0, 0, 0},
67 {0, 0, 0, 0, 0, 0, 0, 0},
68 /*
69 * (it,dt,rt): (0,1,1) -> (1,1,1)
70 * This kind of transition is found in OSYa.
71 *
72 * (it,dt,rt): (0,1,1) -> (0,0,0)
73 * This kind of transition is found in OSYa
74 */
75 {SW_NOP, 0, 0, SW_SELF,0, 0, 0, SW_2V},
76 /* (1,0,0)->(1,1,1) */
77 {0, 0, 0, 0, 0, 0, 0, SW_2V},
78 /*
79 * (it,dt,rt): (1,0,1) -> (1,1,1)
80 * This kind of transition usually occurs when Linux returns
81 * from the low level TLB miss handlers.
82 * (see "arch/ia64/kernel/ivt.S")
83 *
84 * (it,dt,rt): (1,0,1) -> (0,0,0)
85 */
86 {SW_2P_DT, 0, 0, 0, 0, SW_SELF,0, SW_2V},
87 {0, 0, 0, 0, 0, 0, 0, 0},
88 /*
89 * (it,dt,rt): (1,1,1) -> (1,0,1)
90 * This kind of transition usually occurs in Linux low level
91 * TLB miss handler. (see "arch/ia64/kernel/ivt.S")
92 *
93 * (it,dt,rt): (1,1,1) -> (0,0,0)
94 * This kind of transition usually occurs in pal and efi calls,
95 * which requires running in physical mode.
96 * (see "arch/ia64/kernel/head.S")
97 *
98 * (it,dt,rt): (1,1,1)->(1,0,0)
99 */
100 {SW_2P_DT, 0, 0, 0, SW_2P_D, SW_2P_D, 0, SW_SELF},
101 };
103 void
104 physical_mode_init(VCPU *vcpu)
105 {
106 vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_DT;
107 }
109 void
110 physical_tlb_miss(VCPU *vcpu, u64 vadr, int type)
111 {
112 u64 pte;
114 pte = (vadr & _PAGE_PPN_MASK) | PHY_PAGE_WB;
115 thash_vhpt_insert(vcpu, pte, (PAGE_SHIFT << 2), vadr, type);
116 }
118 void
119 vmx_init_all_rr(VCPU *vcpu)
120 {
121 // enable vhpt in guest physical mode
122 vcpu->arch.metaphysical_rid_dt |= 1;
124 VMX(vcpu, vrr[VRN0]) = 0x38;
125 vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(vcpu, 0x38);
126 VMX(vcpu, vrr[VRN1]) = 0x38;
127 VMX(vcpu, vrr[VRN2]) = 0x38;
128 VMX(vcpu, vrr[VRN3]) = 0x38;
129 VMX(vcpu, vrr[VRN4]) = 0x38;
130 vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(vcpu, 0x38);
131 VMX(vcpu, vrr[VRN5]) = 0x38;
132 VMX(vcpu, vrr[VRN6]) = 0x38;
133 VMX(vcpu, vrr[VRN7]) = 0x738;
134 }
136 void
137 vmx_load_all_rr(VCPU *vcpu)
138 {
139 unsigned long rr0, rr4;
141 switch (vcpu->arch.arch_vmx.mmu_mode) {
142 case VMX_MMU_VIRTUAL:
143 rr0 = vcpu->arch.metaphysical_saved_rr0;
144 rr4 = vcpu->arch.metaphysical_saved_rr4;
145 break;
146 case VMX_MMU_PHY_DT:
147 rr0 = vcpu->arch.metaphysical_rid_dt;
148 rr4 = vcpu->arch.metaphysical_rid_dt;
149 break;
150 case VMX_MMU_PHY_D:
151 rr0 = vcpu->arch.metaphysical_rid_d;
152 rr4 = vcpu->arch.metaphysical_rid_d;
153 break;
154 default:
155 panic_domain(NULL, "bad mmu mode value");
156 }
158 ia64_set_rr((VRN0 << VRN_SHIFT), rr0);
159 ia64_dv_serialize_data();
160 ia64_set_rr((VRN4 << VRN_SHIFT), rr4);
161 ia64_dv_serialize_data();
162 ia64_set_rr((VRN1 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
163 ia64_dv_serialize_data();
164 ia64_set_rr((VRN2 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
165 ia64_dv_serialize_data();
166 ia64_set_rr((VRN3 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
167 ia64_dv_serialize_data();
168 ia64_set_rr((VRN5 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
169 ia64_dv_serialize_data();
170 ia64_set_rr((VRN6 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
171 ia64_dv_serialize_data();
172 vmx_switch_rr7_vcpu(vcpu, vrrtomrr(vcpu, VMX(vcpu, vrr[VRN7])));
173 ia64_set_pta(VMX(vcpu, mpta));
174 vmx_ia64_set_dcr(vcpu);
176 ia64_srlz_d();
177 }
179 void
180 switch_to_physical_rid(VCPU *vcpu)
181 {
182 u64 psr;
183 u64 rr;
185 switch (vcpu->arch.arch_vmx.mmu_mode) {
186 case VMX_MMU_PHY_DT:
187 rr = vcpu->arch.metaphysical_rid_dt;
188 break;
189 case VMX_MMU_PHY_D:
190 rr = vcpu->arch.metaphysical_rid_d;
191 break;
192 default:
193 panic_domain(NULL, "bad mmu mode value");
194 }
196 psr = ia64_clear_ic();
197 ia64_set_rr(VRN0<<VRN_SHIFT, rr);
198 ia64_dv_serialize_data();
199 ia64_set_rr(VRN4<<VRN_SHIFT, rr);
200 ia64_srlz_d();
202 ia64_set_psr(psr);
203 ia64_srlz_i();
204 return;
205 }
207 void
208 switch_to_virtual_rid(VCPU *vcpu)
209 {
210 u64 psr;
212 psr = ia64_clear_ic();
213 ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
214 ia64_dv_serialize_data();
215 ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
216 ia64_srlz_d();
217 ia64_set_psr(psr);
218 ia64_srlz_i();
219 return;
220 }
222 static int mm_switch_action(IA64_PSR opsr, IA64_PSR npsr)
223 {
224 return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
225 }
227 /* In fast path, psr.ic = 0, psr.i = 0, psr.bn = 0
228 * so that no tlb miss is allowed.
229 */
230 void
231 switch_mm_mode_fast(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
232 {
233 int act;
234 act = mm_switch_action(old_psr, new_psr);
235 switch (act) {
236 case SW_2P_DT:
237 vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_DT;
238 switch_to_physical_rid(vcpu);
239 break;
240 case SW_2P_D:
241 vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_D;
242 switch_to_physical_rid(vcpu);
243 break;
244 case SW_2V:
245 vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_VIRTUAL;
246 switch_to_virtual_rid(vcpu);
247 break;
248 default:
249 break;
250 }
251 return;
252 }
254 void
255 switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
256 {
257 int act;
258 /* Switch to physical mode when injecting PAL_INIT */
259 if (unlikely(MODE_IND(new_psr) == 0 &&
260 vcpu_regs(vcpu)->cr_iip == PAL_INIT_ENTRY))
261 act = SW_2P_DT;
262 else
263 act = mm_switch_action(old_psr, new_psr);
264 perfc_incra(vmx_switch_mm_mode, act);
265 switch (act) {
266 case SW_2P_DT:
267 vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_DT;
268 switch_to_physical_rid(vcpu);
269 break;
270 case SW_2P_D:
271 // printk("V -> P_D mode transition: (0x%lx -> 0x%lx)\n",
272 // old_psr.val, new_psr.val);
273 vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_PHY_D;
274 switch_to_physical_rid(vcpu);
275 break;
276 case SW_2V:
277 // printk("P -> V mode transition: (0x%lx -> 0x%lx)\n",
278 // old_psr.val, new_psr.val);
279 vcpu->arch.arch_vmx.mmu_mode = VMX_MMU_VIRTUAL;
280 switch_to_virtual_rid(vcpu);
281 break;
282 case SW_SELF:
283 printk("Switch to self-0x%lx!!! MM mode doesn't change...\n",
284 old_psr.val);
285 break;
286 case SW_NOP:
287 // printk("No action required for mode transition: (0x%lx -> 0x%lx)\n",
288 // old_psr.val, new_psr.val);
289 break;
290 default:
291 /* Sanity check */
292 panic_domain(vcpu_regs(vcpu),
293 "Unexpected virtual <--> physical mode transition, "
294 "old:%lx, new:%lx\n", old_psr.val, new_psr.val);
295 break;
296 }
297 return;
298 }
300 void
301 check_mm_mode_switch (VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
302 {
303 if (old_psr.dt != new_psr.dt ||
304 old_psr.it != new_psr.it ||
305 old_psr.rt != new_psr.rt) {
306 switch_mm_mode(vcpu, old_psr, new_psr);
307 debugger_event(XEN_IA64_DEBUG_ON_MMU);
308 }
309 }
312 /*
313 * In physical mode, insert tc/tr for region 0 and 4 uses
314 * RID[0] and RID[4] which is for physical mode emulation.
315 * However what those inserted tc/tr wants is rid for
316 * virtual mode. So original virtual rid needs to be restored
317 * before insert.
318 *
319 * Operations which required such switch include:
320 * - insertions (itc.*, itr.*)
321 * - purges (ptc.* and ptr.*)
322 * - tpa
323 * - tak
324 * - thash?, ttag?
325 * All above needs actual virtual rid for destination entry.
326 */
328 void
329 prepare_if_physical_mode(VCPU *vcpu)
330 {
331 if (!is_virtual_mode(vcpu))
332 switch_to_virtual_rid(vcpu);
333 return;
334 }
336 /* Recover always follows prepare */
337 void
338 recover_if_physical_mode(VCPU *vcpu)
339 {
340 if (!is_virtual_mode(vcpu))
341 switch_to_physical_rid(vcpu);
342 return;
343 }