ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_phy_mode.c @ 12014:9c649ca5c1cc

[IA64] physical mode fix

1. use WB attribute to emulate UC guest physical page.
2. corretly handle GUEST_PHY_EMUL

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Tue Oct 31 22:25:40 2006 -0700 (2006-10-31)
parents d246b79986d1
children 0a490cf4b21d
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_phy_mode.c: emulating domain physical mode.
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Arun Sharma (arun.sharma@intel.com)
20 * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
21 * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
22 */
25 #include <asm/processor.h>
26 #include <asm/gcc_intrin.h>
27 #include <asm/vmx_phy_mode.h>
28 #include <xen/sched.h>
29 #include <asm/pgtable.h>
30 #include <asm/vmmu.h>
31 int valid_mm_mode[8] = {
32 GUEST_PHYS, /* (it, dt, rt) -> (0, 0, 0) */
33 INV_MODE,
34 INV_MODE,
35 GUEST_PHYS, /* (it, dt, rt) -> (0, 1, 1) */
36 INV_MODE,
37 GUEST_PHYS, /* (it, dt, rt) -> (1, 0, 1) */
38 INV_MODE,
39 GUEST_VIRT, /* (it, dt, rt) -> (1, 1, 1).*/
40 };
42 /*
43 * Special notes:
44 * - Index by it/dt/rt sequence
45 * - Only existing mode transitions are allowed in this table
46 * - RSE is placed at lazy mode when emulating guest partial mode
47 * - If gva happens to be rr0 and rr4, only allowed case is identity
48 * mapping (gva=gpa), or panic! (How?)
49 */
50 int mm_switch_table[8][8] = {
51 /* 2004/09/12(Kevin): Allow switch to self */
52 /*
53 * (it,dt,rt): (0,0,0) -> (1,1,1)
54 * This kind of transition usually occurs in the very early
55 * stage of Linux boot up procedure. Another case is in efi
56 * and pal calls. (see "arch/ia64/kernel/head.S")
57 *
58 * (it,dt,rt): (0,0,0) -> (0,1,1)
59 * This kind of transition is found when OSYa exits efi boot
60 * service. Due to gva = gpa in this case (Same region),
61 * data access can be satisfied though itlb entry for physical
62 * emulation is hit.
63 */
64 {SW_SELF,0, 0, SW_NOP, 0, 0, 0, SW_P2V},
65 {0, 0, 0, 0, 0, 0, 0, 0},
66 {0, 0, 0, 0, 0, 0, 0, 0},
67 /*
68 * (it,dt,rt): (0,1,1) -> (1,1,1)
69 * This kind of transition is found in OSYa.
70 *
71 * (it,dt,rt): (0,1,1) -> (0,0,0)
72 * This kind of transition is found in OSYa
73 */
74 {SW_NOP, 0, 0, SW_SELF,0, 0, 0, SW_P2V},
75 /* (1,0,0)->(1,1,1) */
76 {0, 0, 0, 0, 0, 0, 0, SW_P2V},
77 /*
78 * (it,dt,rt): (1,0,1) -> (1,1,1)
79 * This kind of transition usually occurs when Linux returns
80 * from the low level TLB miss handlers.
81 * (see "arch/ia64/kernel/ivt.S")
82 */
83 {0, 0, 0, 0, 0, SW_SELF,0, SW_P2V},
84 {0, 0, 0, 0, 0, 0, 0, 0},
85 /*
86 * (it,dt,rt): (1,1,1) -> (1,0,1)
87 * This kind of transition usually occurs in Linux low level
88 * TLB miss handler. (see "arch/ia64/kernel/ivt.S")
89 *
90 * (it,dt,rt): (1,1,1) -> (0,0,0)
91 * This kind of transition usually occurs in pal and efi calls,
92 * which requires running in physical mode.
93 * (see "arch/ia64/kernel/head.S")
94 * (1,1,1)->(1,0,0)
95 */
97 {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF},
98 };
100 void
101 physical_mode_init(VCPU *vcpu)
102 {
103 vcpu->arch.old_rsc = 0;
104 vcpu->arch.mode_flags = GUEST_IN_PHY;
105 }
107 extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *);
109 void
110 physical_tlb_miss(VCPU *vcpu, u64 vadr, int type)
111 {
112 u64 pte;
113 ia64_rr rr;
114 rr.rrval = ia64_get_rr(vadr);
115 pte = vadr& _PAGE_PPN_MASK;
116 pte = pte | PHY_PAGE_WB;
117 thash_vhpt_insert(vcpu, pte, (rr.ps << 2), vadr, type);
118 return;
119 }
122 void
123 vmx_init_all_rr(VCPU *vcpu)
124 {
125 VMX(vcpu, vrr[VRN0]) = 0x38;
126 // enable vhpt in guest physical mode
127 vcpu->arch.metaphysical_rr0 |= 1;
128 vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(vcpu, 0x38);
129 VMX(vcpu, vrr[VRN1]) = 0x38;
130 VMX(vcpu, vrr[VRN2]) = 0x38;
131 VMX(vcpu, vrr[VRN3]) = 0x38;
132 VMX(vcpu, vrr[VRN4]) = 0x38;
133 // enable vhpt in guest physical mode
134 vcpu->arch.metaphysical_rr4 |= 1;
135 vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(vcpu, 0x38);
136 VMX(vcpu, vrr[VRN5]) = 0x38;
137 VMX(vcpu, vrr[VRN6]) = 0x38;
138 VMX(vcpu, vrr[VRN7]) = 0x738;
139 }
141 extern void * pal_vaddr;
143 void
144 vmx_load_all_rr(VCPU *vcpu)
145 {
146 unsigned long psr;
148 local_irq_save(psr);
150 /* WARNING: not allow co-exist of both virtual mode and physical
151 * mode in same region
152 */
153 if (is_physical_mode(vcpu)) {
154 if (vcpu->arch.mode_flags & GUEST_PHY_EMUL){
155 panic_domain(vcpu_regs(vcpu),
156 "Unexpected domain switch in phy emul\n");
157 }
158 ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
159 ia64_dv_serialize_data();
160 ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
161 ia64_dv_serialize_data();
162 } else {
163 ia64_set_rr((VRN0 << VRN_SHIFT),
164 vcpu->arch.metaphysical_saved_rr0);
165 ia64_dv_serialize_data();
166 ia64_set_rr((VRN4 << VRN_SHIFT),
167 vcpu->arch.metaphysical_saved_rr4);
168 ia64_dv_serialize_data();
169 }
171 /* rr567 will be postponed to last point when resuming back to guest */
172 ia64_set_rr((VRN1 << VRN_SHIFT),
173 vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
174 ia64_dv_serialize_data();
175 ia64_set_rr((VRN2 << VRN_SHIFT),
176 vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
177 ia64_dv_serialize_data();
178 ia64_set_rr((VRN3 << VRN_SHIFT),
179 vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
180 ia64_dv_serialize_data();
181 ia64_set_rr((VRN5 << VRN_SHIFT),
182 vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
183 ia64_dv_serialize_data();
184 ia64_set_rr((VRN6 << VRN_SHIFT),
185 vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
186 ia64_dv_serialize_data();
187 vmx_switch_rr7(vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),
188 (void *)vcpu->domain->shared_info,
189 (void *)vcpu->arch.privregs,
190 (void *)vcpu->arch.vhpt.hash, pal_vaddr );
191 ia64_set_pta(VMX(vcpu, mpta));
192 ia64_set_dcr(VMX(vcpu, mdcr));
194 ia64_srlz_d();
195 ia64_set_psr(psr);
196 ia64_srlz_i();
197 }
201 void
202 switch_to_physical_rid(VCPU *vcpu)
203 {
204 u64 psr;
205 /* Save original virtual mode rr[0] and rr[4] */
206 psr=ia64_clear_ic();
207 ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
208 ia64_srlz_d();
209 ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
210 ia64_srlz_d();
212 ia64_set_psr(psr);
213 ia64_srlz_i();
214 return;
215 }
218 void
219 switch_to_virtual_rid(VCPU *vcpu)
220 {
221 u64 psr;
222 psr=ia64_clear_ic();
223 ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
224 ia64_srlz_d();
225 ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
226 ia64_srlz_d();
227 ia64_set_psr(psr);
228 ia64_srlz_i();
229 return;
230 }
232 static int mm_switch_action(IA64_PSR opsr, IA64_PSR npsr)
233 {
234 return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
235 }
237 void
238 switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
239 {
240 int act;
241 REGS * regs=vcpu_regs(vcpu);
242 act = mm_switch_action(old_psr, new_psr);
243 perfc_incra(vmx_switch_mm_mode, act);
244 switch (act) {
245 case SW_V2P:
246 // printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
247 // old_psr.val, new_psr.val);
248 vcpu->arch.old_rsc = regs->ar_rsc;
249 switch_to_physical_rid(vcpu);
250 /*
251 * Set rse to enforced lazy, to prevent active rse save/restor when
252 * guest physical mode.
253 */
254 regs->ar_rsc &= ~(IA64_RSC_MODE);
255 vcpu->arch.mode_flags |= GUEST_IN_PHY;
256 break;
257 case SW_P2V:
258 // printk("P -> V mode transition: (0x%lx -> 0x%lx)\n",
259 // old_psr.val, new_psr.val);
260 switch_to_virtual_rid(vcpu);
261 /*
262 * recover old mode which is saved when entering
263 * guest physical mode
264 */
265 regs->ar_rsc = vcpu->arch.old_rsc;
266 vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
267 break;
268 case SW_SELF:
269 printk("Switch to self-0x%lx!!! MM mode doesn't change...\n",
270 old_psr.val);
271 break;
272 case SW_NOP:
273 // printk("No action required for mode transition: (0x%lx -> 0x%lx)\n",
274 // old_psr.val, new_psr.val);
275 break;
276 default:
277 /* Sanity check */
278 panic_domain(vcpu_regs(vcpu),"Unexpected virtual <--> physical mode transition,old:%lx,new:%lx\n",old_psr.val,new_psr.val);
279 break;
280 }
281 return;
282 }
286 /*
287 * In physical mode, insert tc/tr for region 0 and 4 uses
288 * RID[0] and RID[4] which is for physical mode emulation.
289 * However what those inserted tc/tr wants is rid for
290 * virtual mode. So original virtual rid needs to be restored
291 * before insert.
292 *
293 * Operations which required such switch include:
294 * - insertions (itc.*, itr.*)
295 * - purges (ptc.* and ptr.*)
296 * - tpa
297 * - tak
298 * - thash?, ttag?
299 * All above needs actual virtual rid for destination entry.
300 */
302 void
303 check_mm_mode_switch (VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
304 {
306 if ( (old_psr.dt != new_psr.dt ) ||
307 (old_psr.it != new_psr.it ) ||
308 (old_psr.rt != new_psr.rt )
309 ) {
310 switch_mm_mode (vcpu, old_psr, new_psr);
311 }
313 return;
314 }
317 /*
318 * In physical mode, insert tc/tr for region 0 and 4 uses
319 * RID[0] and RID[4] which is for physical mode emulation.
320 * However what those inserted tc/tr wants is rid for
321 * virtual mode. So original virtual rid needs to be restored
322 * before insert.
323 *
324 * Operations which required such switch include:
325 * - insertions (itc.*, itr.*)
326 * - purges (ptc.* and ptr.*)
327 * - tpa
328 * - tak
329 * - thash?, ttag?
330 * All above needs actual virtual rid for destination entry.
331 */
333 void
334 prepare_if_physical_mode(VCPU *vcpu)
335 {
336 if (is_physical_mode(vcpu)) {
337 vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
338 switch_to_virtual_rid(vcpu);
339 }
340 return;
341 }
343 /* Recover always follows prepare */
344 void
345 recover_if_physical_mode(VCPU *vcpu)
346 {
347 if (is_physical_mode(vcpu))
348 switch_to_physical_rid(vcpu);
349 vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
350 return;
351 }