ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_phy_mode.c @ 9765:7c7bcf173f8b

[IA64] cleanup vtlb code

This patch is to clean up vtlb code.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Tue Apr 25 20:53:38 2006 -0600 (2006-04-25)
parents 00111084c70a
children 4174856876f9
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_phy_mode.c: emulating domain physical mode.
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Arun Sharma (arun.sharma@intel.com)
20 * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
21 * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
22 */
25 #include <asm/processor.h>
26 #include <asm/gcc_intrin.h>
27 #include <asm/vmx_phy_mode.h>
28 #include <xen/sched.h>
29 #include <asm/pgtable.h>
30 #include <asm/vmmu.h>
31 int valid_mm_mode[8] = {
32 GUEST_PHYS, /* (it, dt, rt) -> (0, 0, 0) */
33 INV_MODE,
34 INV_MODE,
35 GUEST_PHYS, /* (it, dt, rt) -> (0, 1, 1) */
36 INV_MODE,
37 GUEST_PHYS, /* (it, dt, rt) -> (1, 0, 1) */
38 INV_MODE,
39 GUEST_VIRT, /* (it, dt, rt) -> (1, 1, 1).*/
40 };
42 /*
43 * Special notes:
44 * - Index by it/dt/rt sequence
45 * - Only existing mode transitions are allowed in this table
46 * - RSE is placed at lazy mode when emulating guest partial mode
47 * - If gva happens to be rr0 and rr4, only allowed case is identity
48 * mapping (gva=gpa), or panic! (How?)
49 */
50 int mm_switch_table[8][8] = {
51 /* 2004/09/12(Kevin): Allow switch to self */
52 /*
53 * (it,dt,rt): (0,0,0) -> (1,1,1)
54 * This kind of transition usually occurs in the very early
55 * stage of Linux boot up procedure. Another case is in efi
56 * and pal calls. (see "arch/ia64/kernel/head.S")
57 *
58 * (it,dt,rt): (0,0,0) -> (0,1,1)
59 * This kind of transition is found when OSYa exits efi boot
60 * service. Due to gva = gpa in this case (Same region),
61 * data access can be satisfied though itlb entry for physical
62 * emulation is hit.
63 */
64 {SW_SELF,0, 0, SW_NOP, 0, 0, 0, SW_P2V},
65 {0, 0, 0, 0, 0, 0, 0, 0},
66 {0, 0, 0, 0, 0, 0, 0, 0},
67 /*
68 * (it,dt,rt): (0,1,1) -> (1,1,1)
69 * This kind of transition is found in OSYa.
70 *
71 * (it,dt,rt): (0,1,1) -> (0,0,0)
72 * This kind of transition is found in OSYa
73 */
74 {SW_NOP, 0, 0, SW_SELF,0, 0, 0, SW_P2V},
75 /* (1,0,0)->(1,1,1) */
76 {0, 0, 0, 0, 0, 0, 0, SW_P2V},
77 /*
78 * (it,dt,rt): (1,0,1) -> (1,1,1)
79 * This kind of transition usually occurs when Linux returns
80 * from the low level TLB miss handlers.
81 * (see "arch/ia64/kernel/ivt.S")
82 */
83 {0, 0, 0, 0, 0, SW_SELF,0, SW_P2V},
84 {0, 0, 0, 0, 0, 0, 0, 0},
85 /*
86 * (it,dt,rt): (1,1,1) -> (1,0,1)
87 * This kind of transition usually occurs in Linux low level
88 * TLB miss handler. (see "arch/ia64/kernel/ivt.S")
89 *
90 * (it,dt,rt): (1,1,1) -> (0,0,0)
91 * This kind of transition usually occurs in pal and efi calls,
92 * which requires running in physical mode.
93 * (see "arch/ia64/kernel/head.S")
94 * (1,1,1)->(1,0,0)
95 */
97 {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF},
98 };
100 void
101 physical_mode_init(VCPU *vcpu)
102 {
103 vcpu->arch.old_rsc = 0;
104 vcpu->arch.mode_flags = GUEST_IN_PHY;
105 }
107 extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *);
108 /*void
109 physical_itlb_miss(VCPU *vcpu, u64 vadr)
110 {
111 u64 psr;
112 IA64_PSR vpsr;
113 u64 xen_mppn,xen_gppn;
114 vpsr.val=vmx_vcpu_get_psr(vcpu);
115 xen_gppn=(vadr<<1)>>(PAGE_SHIFT+1);
116 xen_mppn = gmfn_to_mfn(vcpu->domain, xen_gppn);
117 xen_mppn=(xen_mppn<<PAGE_SHIFT)|(vpsr.cpl<<7);
118 if(vadr>>63)
119 xen_mppn |= PHY_PAGE_UC;
120 else
121 xen_mppn |= PHY_PAGE_WB;
123 psr=ia64_clear_ic();
124 ia64_itc(1,vadr&PAGE_MASK,xen_mppn,PAGE_SHIFT);
125 ia64_set_psr(psr);
126 ia64_srlz_i();
127 return;
128 }
130 */
131 /*
132 * vec=1, itlb miss
133 * vec=2, dtlb miss
134 */
135 void
136 physical_tlb_miss(VCPU *vcpu, u64 vadr, u64 vec)
137 {
138 u64 psr;
139 IA64_PSR vpsr;
140 u64 xen_mppn,xen_gppn;
141 vpsr.val=vmx_vcpu_get_psr(vcpu);
142 xen_gppn=(vadr<<1)>>(PAGE_SHIFT+1);
143 xen_mppn = gmfn_to_mfn(vcpu->domain, xen_gppn);
144 xen_mppn=(xen_mppn<<PAGE_SHIFT)|(vpsr.cpl<<7);
145 if(vadr>>63)
146 xen_mppn |= PHY_PAGE_UC;
147 else
148 xen_mppn |= PHY_PAGE_WB;
150 psr=ia64_clear_ic();
151 ia64_itc(vec,vadr&PAGE_MASK,xen_mppn,PAGE_SHIFT);
152 ia64_set_psr(psr);
153 ia64_srlz_i();
154 return;
155 }
157 void
158 vmx_init_all_rr(VCPU *vcpu)
159 {
160 VMX(vcpu,vrr[VRN0]) = 0x38;
161 VMX(vcpu,vrr[VRN1]) = 0x138;
162 VMX(vcpu,vrr[VRN2]) = 0x238;
163 VMX(vcpu,vrr[VRN3]) = 0x338;
164 VMX(vcpu,vrr[VRN4]) = 0x438;
165 VMX(vcpu,vrr[VRN5]) = 0x538;
166 VMX(vcpu,vrr[VRN6]) = 0x660;
167 VMX(vcpu,vrr[VRN7]) = 0x760;
168 #if 0
169 VMX(vcpu,mrr5) = vrrtomrr(vcpu, 0x38);
170 VMX(vcpu,mrr6) = vrrtomrr(vcpu, 0x60);
171 VMX(vcpu,mrr7) = vrrtomrr(vcpu, 0x60);
172 #endif
173 }
175 void
176 vmx_load_all_rr(VCPU *vcpu)
177 {
178 unsigned long psr;
179 ia64_rr phy_rr;
181 extern void * pal_vaddr;
182 local_irq_save(psr);
185 /* WARNING: not allow co-exist of both virtual mode and physical
186 * mode in same region
187 */
188 if (is_physical_mode(vcpu)) {
189 if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
190 panic("Unexpected domain switch in phy emul\n");
191 phy_rr.rrval = vcpu->arch.metaphysical_rr0;
192 //phy_rr.ps = PAGE_SHIFT;
193 phy_rr.ve = 1;
195 ia64_set_rr((VRN0 << VRN_SHIFT), phy_rr.rrval);
196 phy_rr.rrval = vcpu->arch.metaphysical_rr4;
197 //phy_rr.ps = PAGE_SHIFT;
198 phy_rr.ve = 1;
200 ia64_set_rr((VRN4 << VRN_SHIFT), phy_rr.rrval);
201 } else {
202 ia64_set_rr((VRN0 << VRN_SHIFT),
203 vrrtomrr(vcpu, VMX(vcpu, vrr[VRN0])));
204 ia64_set_rr((VRN4 << VRN_SHIFT),
205 vrrtomrr(vcpu, VMX(vcpu, vrr[VRN4])));
206 }
208 /* rr567 will be postponed to last point when resuming back to guest */
209 ia64_set_rr((VRN1 << VRN_SHIFT),
210 vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
211 ia64_set_rr((VRN2 << VRN_SHIFT),
212 vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
213 ia64_set_rr((VRN3 << VRN_SHIFT),
214 vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
215 ia64_set_rr((VRN5 << VRN_SHIFT),
216 vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
217 ia64_set_rr((VRN6 << VRN_SHIFT),
218 vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
219 vmx_switch_rr7(vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),
220 (void *)vcpu->domain->shared_info,
221 (void *)vcpu->arch.privregs,
222 (void *)vcpu->arch.vhpt.hash, pal_vaddr );
223 ia64_set_pta(vcpu->arch.arch_vmx.mpta);
225 ia64_srlz_d();
226 ia64_set_psr(psr);
227 ia64_srlz_i();
228 }
230 void
231 switch_to_physical_rid(VCPU *vcpu)
232 {
233 UINT64 psr;
234 ia64_rr phy_rr;
237 /* Save original virtual mode rr[0] and rr[4] */
238 psr=ia64_clear_ic();
239 phy_rr.rrval = vcpu->domain->arch.metaphysical_rr0;
240 // phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
241 phy_rr.ve = 1;
242 ia64_set_rr(VRN0<<VRN_SHIFT, phy_rr.rrval);
243 ia64_srlz_d();
244 phy_rr.rrval = vcpu->domain->arch.metaphysical_rr4;
245 // phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
246 phy_rr.ve = 1;
247 ia64_set_rr(VRN4<<VRN_SHIFT, phy_rr.rrval);
248 ia64_srlz_d();
250 ia64_set_psr(psr);
251 ia64_srlz_i();
252 return;
253 }
256 void
257 switch_to_virtual_rid(VCPU *vcpu)
258 {
259 UINT64 psr;
260 ia64_rr mrr;
262 psr=ia64_clear_ic();
264 vcpu_get_rr(vcpu,VRN0<<VRN_SHIFT,&mrr.rrval);
265 ia64_set_rr(VRN0<<VRN_SHIFT, vrrtomrr(vcpu, mrr.rrval));
266 ia64_srlz_d();
267 vcpu_get_rr(vcpu,VRN4<<VRN_SHIFT,&mrr.rrval);
268 ia64_set_rr(VRN4<<VRN_SHIFT, vrrtomrr(vcpu, mrr.rrval));
269 ia64_srlz_d();
270 ia64_set_psr(psr);
271 ia64_srlz_i();
272 return;
273 }
275 static int mm_switch_action(IA64_PSR opsr, IA64_PSR npsr)
276 {
277 return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
278 }
280 void
281 switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
282 {
283 int act;
284 REGS * regs=vcpu_regs(vcpu);
285 act = mm_switch_action(old_psr, new_psr);
286 switch (act) {
287 case SW_V2P:
288 vcpu->arch.old_rsc = regs->ar_rsc;
289 switch_to_physical_rid(vcpu);
290 /*
291 * Set rse to enforced lazy, to prevent active rse save/restor when
292 * guest physical mode.
293 */
294 regs->ar_rsc &= ~(IA64_RSC_MODE);
295 vcpu->arch.mode_flags |= GUEST_IN_PHY;
296 break;
297 case SW_P2V:
298 switch_to_virtual_rid(vcpu);
299 /*
300 * recover old mode which is saved when entering
301 * guest physical mode
302 */
303 regs->ar_rsc = vcpu->arch.old_rsc;
304 vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
305 break;
306 case SW_SELF:
307 printf("Switch to self-0x%lx!!! MM mode doesn't change...\n",
308 old_psr.val);
309 break;
310 case SW_NOP:
311 printf("No action required for mode transition: (0x%lx -> 0x%lx)\n",
312 old_psr.val, new_psr.val);
313 break;
314 default:
315 /* Sanity check */
316 printf("old: %lx, new: %lx\n", old_psr.val, new_psr.val);
317 panic("Unexpected virtual <--> physical mode transition");
318 break;
319 }
320 return;
321 }
325 /*
326 * In physical mode, insert tc/tr for region 0 and 4 uses
327 * RID[0] and RID[4] which is for physical mode emulation.
328 * However what those inserted tc/tr wants is rid for
329 * virtual mode. So original virtual rid needs to be restored
330 * before insert.
331 *
332 * Operations which required such switch include:
333 * - insertions (itc.*, itr.*)
334 * - purges (ptc.* and ptr.*)
335 * - tpa
336 * - tak
337 * - thash?, ttag?
338 * All above needs actual virtual rid for destination entry.
339 */
341 void
342 check_mm_mode_switch (VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
343 {
345 if ( (old_psr.dt != new_psr.dt ) ||
346 (old_psr.it != new_psr.it ) ||
347 (old_psr.rt != new_psr.rt )
348 ) {
349 switch_mm_mode (vcpu, old_psr, new_psr);
350 }
352 return;
353 }
356 /*
357 * In physical mode, insert tc/tr for region 0 and 4 uses
358 * RID[0] and RID[4] which is for physical mode emulation.
359 * However what those inserted tc/tr wants is rid for
360 * virtual mode. So original virtual rid needs to be restored
361 * before insert.
362 *
363 * Operations which required such switch include:
364 * - insertions (itc.*, itr.*)
365 * - purges (ptc.* and ptr.*)
366 * - tpa
367 * - tak
368 * - thash?, ttag?
369 * All above needs actual virtual rid for destination entry.
370 */
372 void
373 prepare_if_physical_mode(VCPU *vcpu)
374 {
375 if (is_physical_mode(vcpu)) {
376 vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
377 switch_to_virtual_rid(vcpu);
378 }
379 return;
380 }
382 /* Recover always follows prepare */
383 void
384 recover_if_physical_mode(VCPU *vcpu)
385 {
386 if (is_physical_mode(vcpu)) {
387 vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
388 switch_to_physical_rid(vcpu);
389 }
390 return;
391 }