ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_phy_mode.c @ 8370:2d5c57be196d

Remove some unused VTI code segments
Signed-off-by Anthony Xu <anthony.xu@intel.com>
author djm@kirby.fc.hp.com
date Thu Dec 15 16:10:22 2005 -0600 (2005-12-15)
parents ebc92fd2fac8
children 0f59ace5442c
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_phy_mode.c: emulating domain physical mode.
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Arun Sharma (arun.sharma@intel.com)
20 * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
21 * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
22 */
25 #include <asm/processor.h>
26 #include <asm/gcc_intrin.h>
27 #include <asm/vmx_phy_mode.h>
28 #include <xen/sched.h>
29 #include <asm/pgtable.h>
31 int valid_mm_mode[8] = {
32 GUEST_PHYS, /* (it, dt, rt) -> (0, 0, 0) */
33 INV_MODE,
34 INV_MODE,
35 GUEST_PHYS, /* (it, dt, rt) -> (0, 1, 1) */
36 INV_MODE,
37 GUEST_PHYS, /* (it, dt, rt) -> (1, 0, 1) */
38 INV_MODE,
39 GUEST_VIRT, /* (it, dt, rt) -> (1, 1, 1).*/
40 };
42 /*
43 * Special notes:
44 * - Index by it/dt/rt sequence
45 * - Only existing mode transitions are allowed in this table
46 * - RSE is placed at lazy mode when emulating guest partial mode
47 * - If gva happens to be rr0 and rr4, only allowed case is identity
48 * mapping (gva=gpa), or panic! (How?)
49 */
50 int mm_switch_table[8][8] = {
51 /* 2004/09/12(Kevin): Allow switch to self */
52 /*
53 * (it,dt,rt): (0,0,0) -> (1,1,1)
54 * This kind of transition usually occurs in the very early
55 * stage of Linux boot up procedure. Another case is in efi
56 * and pal calls. (see "arch/ia64/kernel/head.S")
57 *
58 * (it,dt,rt): (0,0,0) -> (0,1,1)
59 * This kind of transition is found when OSYa exits efi boot
60 * service. Due to gva = gpa in this case (Same region),
61 * data access can be satisfied though itlb entry for physical
62 * emulation is hit.
63 */
64 SW_SELF,0, 0, SW_NOP, 0, 0, 0, SW_P2V,
65 0, 0, 0, 0, 0, 0, 0, 0,
66 0, 0, 0, 0, 0, 0, 0, 0,
67 /*
68 * (it,dt,rt): (0,1,1) -> (1,1,1)
69 * This kind of transition is found in OSYa.
70 *
71 * (it,dt,rt): (0,1,1) -> (0,0,0)
72 * This kind of transition is found in OSYa
73 */
74 SW_NOP, 0, 0, SW_SELF,0, 0, 0, SW_P2V,
75 /* (1,0,0)->(1,1,1) */
76 0, 0, 0, 0, 0, 0, 0, SW_P2V,
77 /*
78 * (it,dt,rt): (1,0,1) -> (1,1,1)
79 * This kind of transition usually occurs when Linux returns
80 * from the low level TLB miss handlers.
81 * (see "arch/ia64/kernel/ivt.S")
82 */
83 0, 0, 0, 0, 0, SW_SELF,0, SW_P2V,
84 0, 0, 0, 0, 0, 0, 0, 0,
85 /*
86 * (it,dt,rt): (1,1,1) -> (1,0,1)
87 * This kind of transition usually occurs in Linux low level
88 * TLB miss handler. (see "arch/ia64/kernel/ivt.S")
89 *
90 * (it,dt,rt): (1,1,1) -> (0,0,0)
91 * This kind of transition usually occurs in pal and efi calls,
92 * which requires running in physical mode.
93 * (see "arch/ia64/kernel/head.S")
94 * (1,1,1)->(1,0,0)
95 */
97 SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF,
98 };
100 void
101 physical_mode_init(VCPU *vcpu)
102 {
103 UINT64 psr;
104 struct domain * d = vcpu->domain;
106 vcpu->arch.old_rsc = 0;
107 vcpu->arch.mode_flags = GUEST_IN_PHY;
108 }
110 extern u64 get_mfn(domid_t domid, u64 gpfn, u64 pages);
111 #if 0
112 void
113 physical_itlb_miss_domn(VCPU *vcpu, u64 vadr)
114 {
115 u64 psr;
116 IA64_PSR vpsr;
117 u64 mppn,gppn,mpp1,gpp1;
118 struct domain *d;
119 static u64 test=0;
120 d=vcpu->domain;
121 if(test)
122 panic("domn physical itlb miss happen\n");
123 else
124 test=1;
125 vpsr.val=vmx_vcpu_get_psr(vcpu);
126 gppn=(vadr<<1)>>13;
127 mppn = get_mfn(DOMID_SELF,gppn,1);
128 mppn=(mppn<<12)|(vpsr.cpl<<7);
129 gpp1=0;
130 mpp1 = get_mfn(DOMID_SELF,gpp1,1);
131 mpp1=(mpp1<<12)|(vpsr.cpl<<7);
132 // if(vadr>>63)
133 // mppn |= PHY_PAGE_UC;
134 // else
135 // mppn |= PHY_PAGE_WB;
136 mpp1 |= PHY_PAGE_WB;
137 psr=ia64_clear_ic();
138 ia64_itr(0x1, IA64_TEMP_PHYSICAL, vadr&(~0xfff), (mppn|PHY_PAGE_WB), 24);
139 ia64_srlz_i();
140 ia64_itr(0x2, IA64_TEMP_PHYSICAL, vadr&(~0xfff), (mppn|PHY_PAGE_WB), 24);
141 ia64_stop();
142 ia64_srlz_i();
143 ia64_itr(0x1, IA64_TEMP_PHYSICAL+1, vadr&(~0x8000000000000fffUL), (mppn|PHY_PAGE_WB), 24);
144 ia64_srlz_i();
145 ia64_itr(0x2, IA64_TEMP_PHYSICAL+1, vadr&(~0x8000000000000fffUL), (mppn|PHY_PAGE_WB), 24);
146 ia64_stop();
147 ia64_srlz_i();
148 ia64_itr(0x1, IA64_TEMP_PHYSICAL+2, gpp1&(~0xfff), mpp1, 28);
149 ia64_srlz_i();
150 ia64_itr(0x2, IA64_TEMP_PHYSICAL+2, gpp1&(~0xfff), mpp1, 28);
151 ia64_stop();
152 ia64_srlz_i();
153 ia64_set_psr(psr);
154 ia64_srlz_i();
155 return;
156 }
157 #endif
159 void
160 physical_itlb_miss_dom0(VCPU *vcpu, u64 vadr)
161 {
162 u64 psr;
163 IA64_PSR vpsr;
164 u64 mppn,gppn;
165 vpsr.val=vmx_vcpu_get_psr(vcpu);
166 gppn=(vadr<<1)>>13;
167 mppn = get_mfn(DOMID_SELF,gppn,1);
168 mppn=(mppn<<12)|(vpsr.cpl<<7);
169 // if(vadr>>63)
170 // mppn |= PHY_PAGE_UC;
171 // else
172 mppn |= PHY_PAGE_WB;
174 psr=ia64_clear_ic();
175 ia64_itc(1,vadr&(~0xfff),mppn,EMUL_PHY_PAGE_SHIFT);
176 ia64_set_psr(psr);
177 ia64_srlz_i();
178 return;
179 }
182 void
183 physical_itlb_miss(VCPU *vcpu, u64 vadr)
184 {
185 physical_itlb_miss_dom0(vcpu, vadr);
186 }
189 void
190 physical_dtlb_miss(VCPU *vcpu, u64 vadr)
191 {
192 u64 psr;
193 IA64_PSR vpsr;
194 u64 mppn,gppn;
195 // if(vcpu->domain!=dom0)
196 // panic("dom n physical dtlb miss happen\n");
197 vpsr.val=vmx_vcpu_get_psr(vcpu);
198 gppn=(vadr<<1)>>13;
199 mppn = get_mfn(DOMID_SELF,gppn,1);
200 mppn=(mppn<<12)|(vpsr.cpl<<7);
201 if(vadr>>63)
202 mppn |= PHY_PAGE_UC;
203 else
204 mppn |= PHY_PAGE_WB;
206 psr=ia64_clear_ic();
207 ia64_itc(2,vadr&(~0xfff),mppn,EMUL_PHY_PAGE_SHIFT);
208 ia64_set_psr(psr);
209 ia64_srlz_i();
210 return;
211 }
213 void
214 vmx_init_all_rr(VCPU *vcpu)
215 {
216 VMX(vcpu,vrr[VRN0]) = 0x38;
217 VMX(vcpu,vrr[VRN1]) = 0x138;
218 VMX(vcpu,vrr[VRN2]) = 0x238;
219 VMX(vcpu,vrr[VRN3]) = 0x338;
220 VMX(vcpu,vrr[VRN4]) = 0x438;
221 VMX(vcpu,vrr[VRN5]) = 0x538;
222 VMX(vcpu,vrr[VRN6]) = 0x660;
223 VMX(vcpu,vrr[VRN7]) = 0x760;
224 #if 0
225 VMX(vcpu,mrr5) = vmx_vrrtomrr(vcpu, 0x38);
226 VMX(vcpu,mrr6) = vmx_vrrtomrr(vcpu, 0x60);
227 VMX(vcpu,mrr7) = vmx_vrrtomrr(vcpu, 0x60);
228 #endif
229 }
231 void
232 vmx_load_all_rr(VCPU *vcpu)
233 {
234 unsigned long psr;
235 ia64_rr phy_rr;
237 local_irq_save(psr);
240 /* WARNING: not allow co-exist of both virtual mode and physical
241 * mode in same region
242 */
243 if (is_physical_mode(vcpu)) {
244 if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
245 panic("Unexpected domain switch in phy emul\n");
246 phy_rr.rrval = vcpu->domain->arch.metaphysical_rr0;
247 phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
248 phy_rr.ve = 1;
250 ia64_set_rr((VRN0 << VRN_SHIFT), phy_rr.rrval);
251 phy_rr.rrval = vcpu->domain->arch.metaphysical_rr4;
252 phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
253 phy_rr.ve = 1;
255 ia64_set_rr((VRN4 << VRN_SHIFT), phy_rr.rrval);
256 } else {
257 ia64_set_rr((VRN0 << VRN_SHIFT),
258 vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN0])));
259 ia64_set_rr((VRN4 << VRN_SHIFT),
260 vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN4])));
261 }
263 /* rr567 will be postponed to last point when resuming back to guest */
264 ia64_set_rr((VRN1 << VRN_SHIFT),
265 vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
266 ia64_set_rr((VRN2 << VRN_SHIFT),
267 vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
268 ia64_set_rr((VRN3 << VRN_SHIFT),
269 vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
270 ia64_set_rr((VRN5 << VRN_SHIFT),
271 vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
272 ia64_set_rr((VRN6 << VRN_SHIFT),
273 vmx_vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
274 extern void * pal_vaddr;
275 vmx_switch_rr7(vmx_vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])),(void *)vcpu->domain->shared_info,
276 (void *)vcpu->arch.privregs,
277 ( void *)vcpu->arch.vtlb->ts->vhpt->hash, pal_vaddr );
278 ia64_set_pta(vcpu->arch.arch_vmx.mpta);
280 ia64_srlz_d();
281 ia64_set_psr(psr);
282 ia64_srlz_i();
283 }
285 void
286 switch_to_physical_rid(VCPU *vcpu)
287 {
288 UINT64 psr;
289 ia64_rr phy_rr;
292 /* Save original virtual mode rr[0] and rr[4] */
293 psr=ia64_clear_ic();
294 phy_rr.rrval = vcpu->domain->arch.metaphysical_rr0;
295 phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
296 phy_rr.ve = 1;
297 ia64_set_rr(VRN0<<VRN_SHIFT, phy_rr.rrval);
298 ia64_srlz_d();
299 phy_rr.rrval = vcpu->domain->arch.metaphysical_rr4;
300 phy_rr.ps = EMUL_PHY_PAGE_SHIFT;
301 phy_rr.ve = 1;
302 ia64_set_rr(VRN4<<VRN_SHIFT, phy_rr.rrval);
303 ia64_srlz_d();
305 ia64_set_psr(psr);
306 ia64_srlz_i();
307 return;
308 }
311 void
312 switch_to_virtual_rid(VCPU *vcpu)
313 {
314 UINT64 psr;
315 ia64_rr mrr;
317 psr=ia64_clear_ic();
319 mrr=vmx_vcpu_rr(vcpu,VRN0<<VRN_SHIFT);
320 ia64_set_rr(VRN0<<VRN_SHIFT, vmx_vrrtomrr(vcpu, mrr.rrval));
321 ia64_srlz_d();
322 mrr=vmx_vcpu_rr(vcpu,VRN4<<VRN_SHIFT);
323 ia64_set_rr(VRN4<<VRN_SHIFT, vmx_vrrtomrr(vcpu, mrr.rrval));
324 ia64_srlz_d();
325 ia64_set_psr(psr);
326 ia64_srlz_i();
327 return;
328 }
330 static int mm_switch_action(IA64_PSR opsr, IA64_PSR npsr)
331 {
332 return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
333 }
335 void
336 switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
337 {
338 int act;
339 REGS * regs=vcpu_regs(vcpu);
340 act = mm_switch_action(old_psr, new_psr);
341 switch (act) {
342 case SW_V2P:
343 vcpu->arch.old_rsc = regs->ar_rsc;
344 switch_to_physical_rid(vcpu);
345 /*
346 * Set rse to enforced lazy, to prevent active rse save/restor when
347 * guest physical mode.
348 */
349 regs->ar_rsc &= ~(IA64_RSC_MODE);
350 vcpu->arch.mode_flags |= GUEST_IN_PHY;
351 break;
352 case SW_P2V:
353 switch_to_virtual_rid(vcpu);
354 /*
355 * recover old mode which is saved when entering
356 * guest physical mode
357 */
358 regs->ar_rsc = vcpu->arch.old_rsc;
359 vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
360 break;
361 case SW_SELF:
362 printf("Switch to self-0x%lx!!! MM mode doesn't change...\n",
363 old_psr.val);
364 break;
365 case SW_NOP:
366 printf("No action required for mode transition: (0x%lx -> 0x%lx)\n",
367 old_psr.val, new_psr.val);
368 break;
369 default:
370 /* Sanity check */
371 printf("old: %lx, new: %lx\n", old_psr.val, new_psr.val);
372 panic("Unexpected virtual <--> physical mode transition");
373 break;
374 }
375 return;
376 }
380 /*
381 * In physical mode, insert tc/tr for region 0 and 4 uses
382 * RID[0] and RID[4] which is for physical mode emulation.
383 * However what those inserted tc/tr wants is rid for
384 * virtual mode. So original virtual rid needs to be restored
385 * before insert.
386 *
387 * Operations which required such switch include:
388 * - insertions (itc.*, itr.*)
389 * - purges (ptc.* and ptr.*)
390 * - tpa
391 * - tak
392 * - thash?, ttag?
393 * All above needs actual virtual rid for destination entry.
394 */
396 void
397 check_mm_mode_switch (VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr)
398 {
400 if ( (old_psr.dt != new_psr.dt ) ||
401 (old_psr.it != new_psr.it ) ||
402 (old_psr.rt != new_psr.rt )
403 ) {
404 switch_mm_mode (vcpu, old_psr, new_psr);
405 }
407 return 0;
408 }
411 /*
412 * In physical mode, insert tc/tr for region 0 and 4 uses
413 * RID[0] and RID[4] which is for physical mode emulation.
414 * However what those inserted tc/tr wants is rid for
415 * virtual mode. So original virtual rid needs to be restored
416 * before insert.
417 *
418 * Operations which required such switch include:
419 * - insertions (itc.*, itr.*)
420 * - purges (ptc.* and ptr.*)
421 * - tpa
422 * - tak
423 * - thash?, ttag?
424 * All above needs actual virtual rid for destination entry.
425 */
427 void
428 prepare_if_physical_mode(VCPU *vcpu)
429 {
430 if (is_physical_mode(vcpu)) {
431 vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
432 switch_to_virtual_rid(vcpu);
433 }
434 return;
435 }
437 /* Recover always follows prepare */
438 void
439 recover_if_physical_mode(VCPU *vcpu)
440 {
441 if (is_physical_mode(vcpu)) {
442 vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
443 switch_to_physical_rid(vcpu);
444 }
445 return;
446 }