ia64/xen-unstable

view xen/arch/ia64/vmx_phy_mode.c @ 5797:ca44d2dbb273

Intel's pre-bk->hg transition patches
Signed-off-by Eddie Dong <Eddie.dong@intel.com>
Signed-off-by Anthony Xu <Anthony.xu@intel.com>
Signed-off-by Kevin Tian <Kevin.tian@intel.com>
author djm@kirby.fc.hp.com
date Sat Jul 09 07:58:56 2005 -0700 (2005-07-09)
parents c91f74efda05
children a83ac0806d6b
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_phy_mode.c: emulating domain physical mode.
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Arun Sharma (arun.sharma@intel.com)
20 * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
21 * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
22 */
25 #include <asm/processor.h>
26 #include <asm/gcc_intrin.h>
27 #include <asm/vmx_phy_mode.h>
28 #include <xen/sched.h>
29 #include <asm/pgtable.h>
32 int valid_mm_mode[8] = {
33 GUEST_PHYS, /* (it, dt, rt) -> (0, 0, 0) */
34 INV_MODE,
35 INV_MODE,
36 GUEST_PHYS, /* (it, dt, rt) -> (0, 1, 1) */
37 INV_MODE,
38 GUEST_PHYS, /* (it, dt, rt) -> (1, 0, 1) */
39 INV_MODE,
40 GUEST_VIRT, /* (it, dt, rt) -> (1, 1, 1).*/
41 };
43 /*
44 * Special notes:
45 * - Index by it/dt/rt sequence
46 * - Only existing mode transitions are allowed in this table
47 * - RSE is placed at lazy mode when emulating guest partial mode
48 * - If gva happens to be rr0 and rr4, only allowed case is identity
49 * mapping (gva=gpa), or panic! (How?)
50 */
51 int mm_switch_table[8][8] = {
52 /* 2004/09/12(Kevin): Allow switch to self */
53 /*
54 * (it,dt,rt): (0,0,0) -> (1,1,1)
55 * This kind of transition usually occurs in the very early
56 * stage of Linux boot up procedure. Another case is in efi
57 * and pal calls. (see "arch/ia64/kernel/head.S")
58 *
59 * (it,dt,rt): (0,0,0) -> (0,1,1)
60 * This kind of transition is found when OSYa exits efi boot
61 * service. Due to gva = gpa in this case (Same region),
62 * data access can be satisfied though itlb entry for physical
63 * emulation is hit.
64 */
65 SW_SELF,0, 0, SW_NOP, 0, 0, 0, SW_P2V,
66 0, 0, 0, 0, 0, 0, 0, 0,
67 0, 0, 0, 0, 0, 0, 0, 0,
68 /*
69 * (it,dt,rt): (0,1,1) -> (1,1,1)
70 * This kind of transition is found in OSYa.
71 *
72 * (it,dt,rt): (0,1,1) -> (0,0,0)
73 * This kind of transition is found in OSYa
74 */
75 SW_NOP, 0, 0, SW_SELF,0, 0, 0, SW_P2V,
76 /* (1,0,0)->(1,1,1) */
77 0, 0, 0, 0, 0, 0, 0, SW_P2V,
78 /*
79 * (it,dt,rt): (1,0,1) -> (1,1,1)
80 * This kind of transition usually occurs when Linux returns
81 * from the low level TLB miss handlers.
82 * (see "arch/ia64/kernel/ivt.S")
83 */
84 0, 0, 0, 0, 0, SW_SELF,0, SW_P2V,
85 0, 0, 0, 0, 0, 0, 0, 0,
86 /*
87 * (it,dt,rt): (1,1,1) -> (1,0,1)
88 * This kind of transition usually occurs in Linux low level
89 * TLB miss handler. (see "arch/ia64/kernel/ivt.S")
90 *
91 * (it,dt,rt): (1,1,1) -> (0,0,0)
92 * This kind of transition usually occurs in pal and efi calls,
93 * which requires running in physical mode.
94 * (see "arch/ia64/kernel/head.S")
95 * (1,1,1)->(1,0,0)
96 */
98 SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF,
99 };
101 void
102 physical_mode_init(VCPU *vcpu)
103 {
104 UINT64 psr;
105 struct domain * d = vcpu->domain;
107 vcpu->domain->arch.emul_phy_rr0.rid = XEN_RR7_RID+((d->domain_id)<<3);
108 /* FIXME */
109 #if 0
110 vcpu->domain->arch.emul_phy_rr0.ps = 28; /* set page size to 256M */
111 #endif
112 vcpu->domain->arch.emul_phy_rr0.ps = EMUL_PHY_PAGE_SHIFT; /* set page size to 4k */
113 vcpu->domain->arch.emul_phy_rr0.ve = 1; /* enable VHPT walker on this region */
115 vcpu->domain->arch.emul_phy_rr4.rid = XEN_RR7_RID + ((d->domain_id)<<3) + 4;
116 vcpu->domain->arch.emul_phy_rr4.ps = EMUL_PHY_PAGE_SHIFT; /* set page size to 4k */
117 vcpu->domain->arch.emul_phy_rr4.ve = 1; /* enable VHPT walker on this region */
119 vcpu->arch.old_rsc = 0;
120 vcpu->arch.mode_flags = GUEST_IN_PHY;
122 return;
123 }
125 extern u64 get_mfn(domid_t domid, u64 gpfn, u64 pages);
126 #if 0
127 void
128 physical_itlb_miss_domn(VCPU *vcpu, u64 vadr)
129 {
130 u64 psr;
131 IA64_PSR vpsr;
132 u64 mppn,gppn,mpp1,gpp1;
133 struct domain *d;
134 static u64 test=0;
135 d=vcpu->domain;
136 if(test)
137 panic("domn physical itlb miss happen\n");
138 else
139 test=1;
140 vpsr.val=vmx_vcpu_get_psr(vcpu);
141 gppn=(vadr<<1)>>13;
142 mppn = get_mfn(DOMID_SELF,gppn,1);
143 mppn=(mppn<<12)|(vpsr.cpl<<7);
144 gpp1=0;
145 mpp1 = get_mfn(DOMID_SELF,gpp1,1);
146 mpp1=(mpp1<<12)|(vpsr.cpl<<7);
147 // if(vadr>>63)
148 // mppn |= PHY_PAGE_UC;
149 // else
150 // mppn |= PHY_PAGE_WB;
151 mpp1 |= PHY_PAGE_WB;
152 psr=ia64_clear_ic();
153 ia64_itr(0x1, IA64_TEMP_PHYSICAL, vadr&(~0xfff), (mppn|PHY_PAGE_WB), 24);
154 ia64_srlz_i();
155 ia64_itr(0x2, IA64_TEMP_PHYSICAL, vadr&(~0xfff), (mppn|PHY_PAGE_WB), 24);
156 ia64_stop();
157 ia64_srlz_i();
158 ia64_itr(0x1, IA64_TEMP_PHYSICAL+1, vadr&(~0x8000000000000fffUL), (mppn|PHY_PAGE_WB), 24);
159 ia64_srlz_i();
160 ia64_itr(0x2, IA64_TEMP_PHYSICAL+1, vadr&(~0x8000000000000fffUL), (mppn|PHY_PAGE_WB), 24);
161 ia64_stop();
162 ia64_srlz_i();
163 ia64_itr(0x1, IA64_TEMP_PHYSICAL+2, gpp1&(~0xfff), mpp1, 28);
164 ia64_srlz_i();
165 ia64_itr(0x2, IA64_TEMP_PHYSICAL+2, gpp1&(~0xfff), mpp1, 28);
166 ia64_stop();
167 ia64_srlz_i();
168 ia64_set_psr(psr);
169 ia64_srlz_i();
170 return;
171 }
172 #endif
174 void
175 physical_itlb_miss(VCPU *vcpu, u64 vadr)
176 {
177 physical_itlb_miss_dom0(vcpu, vadr);
178 }
181 void
182 physical_itlb_miss_dom0(VCPU *vcpu, u64 vadr)
183 {
184 u64 psr;
185 IA64_PSR vpsr;
186 u64 mppn,gppn;
187 vpsr.val=vmx_vcpu_get_psr(vcpu);
188 gppn=(vadr<<1)>>13;
189 mppn = get_mfn(DOMID_SELF,gppn,1);
190 mppn=(mppn<<12)|(vpsr.cpl<<7);
191 // if(vadr>>63)
192 // mppn |= PHY_PAGE_UC;
193 // else
194 mppn |= PHY_PAGE_WB;
196 psr=ia64_clear_ic();
197 ia64_itc(1,vadr&(~0xfff),mppn,EMUL_PHY_PAGE_SHIFT);
198 ia64_set_psr(psr);
199 ia64_srlz_i();
200 return;
201 }
204 void
205 physical_dtlb_miss(VCPU *vcpu, u64 vadr)
206 {
207 u64 psr;
208 IA64_PSR vpsr;
209 u64 mppn,gppn;
210 // if(vcpu->domain!=dom0)
211 // panic("dom n physical dtlb miss happen\n");
212 vpsr.val=vmx_vcpu_get_psr(vcpu);
213 gppn=(vadr<<1)>>13;
214 mppn = get_mfn(DOMID_SELF,gppn,1);
215 mppn=(mppn<<12)|(vpsr.cpl<<7);
216 if(vadr>>63)
217 mppn |= PHY_PAGE_UC;
218 else
219 mppn |= PHY_PAGE_WB;
221 psr=ia64_clear_ic();
222 ia64_itc(2,vadr&(~0xfff),mppn,EMUL_PHY_PAGE_SHIFT);
223 ia64_set_psr(psr);
224 ia64_srlz_i();
225 return;
226 }
228 void
229 vmx_init_all_rr(VCPU *vcpu)
230 {
231 VMX(vcpu,vrr[VRN0]) = 0x38;
232 VMX(vcpu,vrr[VRN1]) = 0x38;
233 VMX(vcpu,vrr[VRN2]) = 0x38;
234 VMX(vcpu,vrr[VRN3]) = 0x38;
235 VMX(vcpu,vrr[VRN4]) = 0x38;
236 VMX(vcpu,vrr[VRN5]) = 0x38;
237 VMX(vcpu,vrr[VRN6]) = 0x60;
238 VMX(vcpu,vrr[VRN7]) = 0x60;
240 VMX(vcpu,mrr5) = vmx_vrrtomrr(vcpu, 0x38);
241 VMX(vcpu,mrr6) = vmx_vrrtomrr(vcpu, 0x60);
242 VMX(vcpu,mrr7) = vmx_vrrtomrr(vcpu, 0x60);
243 }
245 void
246 vmx_load_all_rr(VCPU *vcpu)
247 {
248 unsigned long psr;
250 psr = ia64_clear_ic();
252 /* WARNING: not allow co-exist of both virtual mode and physical
253 * mode in same region
254 */
255 if (is_physical_mode(vcpu)) {
256 if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
257 panic("Unexpected domain switch in phy emul\n");
258 ia64_set_rr((VRN0 << VRN_SHIFT),
259 vcpu->domain->arch.emul_phy_rr0.rrval);
260 ia64_set_rr((VRN4 << VRN_SHIFT),
261 vcpu->domain->arch.emul_phy_rr4.rrval);
262 } else {
263 ia64_set_rr((VRN0 << VRN_SHIFT),
264 vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN0])));
265 ia64_set_rr((VRN4 << VRN_SHIFT),
266 vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN4])));
267 }
269 #if 1
270 /* rr567 will be postponed to last point when resuming back to guest */
271 ia64_set_rr((VRN1 << VRN_SHIFT),
272 vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
273 ia64_set_rr((VRN2 << VRN_SHIFT),
274 vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
275 ia64_set_rr((VRN3 << VRN_SHIFT),
276 vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
277 #endif
278 ia64_srlz_d();
279 ia64_set_psr(psr);
280 ia64_srlz_i();
281 }
283 void
284 switch_to_physical_rid(VCPU *vcpu)
285 {
286 UINT64 psr;
288 /* Save original virtual mode rr[0] and rr[4] */
290 psr=ia64_clear_ic();
291 ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->domain->arch.emul_phy_rr0.rrval);
292 ia64_srlz_d();
293 ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->domain->arch.emul_phy_rr4.rrval);
294 ia64_srlz_d();
296 ia64_set_psr(psr);
297 ia64_srlz_i();
298 return;
299 }
302 void
303 switch_to_virtual_rid(VCPU *vcpu)
304 {
305 UINT64 psr;
306 ia64_rr mrr;
308 psr=ia64_clear_ic();
310 mrr=vmx_vcpu_rr(vcpu,VRN0<<VRN_SHIFT);
311 ia64_set_rr(VRN0<<VRN_SHIFT, vmx_vrrtomrr(vcpu, mrr.rrval));
312 ia64_srlz_d();
313 mrr=vmx_vcpu_rr(vcpu,VRN4<<VRN_SHIFT);
314 ia64_set_rr(VRN4<<VRN_SHIFT, vmx_vrrtomrr(vcpu, mrr.rrval));
315 ia64_srlz_d();
316 ia64_set_psr(psr);
317 ia64_srlz_i();
318 return;
319 }
321 static int mm_switch_action(IA64_PSR opsr, IA64_PSR npsr)
322 {
323 return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
324 }
326 void
327 switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
328 {
329 int act;
330 REGS * regs=vcpu_regs(vcpu);
331 act = mm_switch_action(old_psr, new_psr);
332 switch (act) {
333 case SW_V2P:
334 vcpu->arch.old_rsc = regs->ar_rsc;
335 switch_to_physical_rid(vcpu);
336 /*
337 * Set rse to enforced lazy, to prevent active rse save/restor when
338 * guest physical mode.
339 */
340 regs->ar_rsc &= ~(IA64_RSC_MODE);
341 vcpu->arch.mode_flags |= GUEST_IN_PHY;
342 break;
343 case SW_P2V:
344 switch_to_virtual_rid(vcpu);
345 /*
346 * recover old mode which is saved when entering
347 * guest physical mode
348 */
349 regs->ar_rsc = vcpu->arch.old_rsc;
350 vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
351 break;
352 case SW_SELF:
353 printf("Switch to self-0x%lx!!! MM mode doesn't change...\n",
354 old_psr.val);
355 break;
356 case SW_NOP:
357 printf("No action required for mode transition: (0x%lx -> 0x%lx)\n",
358 old_psr.val, new_psr.val);
359 break;
360 default:
361 /* Sanity check */
362 printf("old: %lx, new: %lx\n", old_psr.val, new_psr.val);
363 panic("Unexpected virtual <--> physical mode transition");
364 break;
365 }
366 return;
367 }
371 /*
372 * In physical mode, insert tc/tr for region 0 and 4 uses
373 * RID[0] and RID[4] which is for physical mode emulation.
374 * However what those inserted tc/tr wants is rid for
375 * virtual mode. So original virtual rid needs to be restored
376 * before insert.
377 *
378 * Operations which required such switch include:
379 * - insertions (itc.*, itr.*)
380 * - purges (ptc.* and ptr.*)
381 * - tpa
382 * - tak
383 * - thash?, ttag?
384 * All above needs actual virtual rid for destination entry.
385 */
387 void
388 check_mm_mode_switch (VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
389 {
391 if ( (old_psr.dt != new_psr.dt ) ||
392 (old_psr.it != new_psr.it ) ||
393 (old_psr.rt != new_psr.rt )
394 ) {
395 switch_mm_mode (vcpu, old_psr, new_psr);
396 }
398 return 0;
399 }
402 /*
403 * In physical mode, insert tc/tr for region 0 and 4 uses
404 * RID[0] and RID[4] which is for physical mode emulation.
405 * However what those inserted tc/tr wants is rid for
406 * virtual mode. So original virtual rid needs to be restored
407 * before insert.
408 *
409 * Operations which required such switch include:
410 * - insertions (itc.*, itr.*)
411 * - purges (ptc.* and ptr.*)
412 * - tpa
413 * - tak
414 * - thash?, ttag?
415 * All above needs actual virtual rid for destination entry.
416 */
418 void
419 prepare_if_physical_mode(VCPU *vcpu)
420 {
421 if (is_physical_mode(vcpu)) {
422 vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
423 switch_to_virtual_rid(vcpu);
424 }
425 return;
426 }
428 /* Recover always follows prepare */
429 void
430 recover_if_physical_mode(VCPU *vcpu)
431 {
432 if (is_physical_mode(vcpu)) {
433 vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
434 switch_to_physical_rid(vcpu);
435 }
436 return;
437 }