ia64/xen-unstable

view xen/arch/ia64/vmx_vcpu.c @ 5797:ca44d2dbb273

Intel's pre-bk->hg transition patches
Signed-off-by Eddie Dong <Eddie.dong@intel.com>
Signed-off-by Anthony Xu <Anthony.xu@intel.com>
Signed-off-by Kevin Tian <Kevin.tian@intel.com>
author djm@kirby.fc.hp.com
date Sat Jul 09 07:58:56 2005 -0700 (2005-07-09)
parents c91f74efda05
children a83ac0806d6b
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_vcpu.c: handling all virtual cpu related thing.
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Fred yang (fred.yang@intel.com)
20 * Arun Sharma (arun.sharma@intel.com)
21 * Shaofan Li (Susue Li) <susie.li@intel.com>
22 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
23 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
24 */
26 #include <linux/sched.h>
27 #include <public/arch-ia64.h>
28 #include <asm/ia64_int.h>
29 #include <asm/vmx_vcpu.h>
30 #include <asm/regionreg.h>
31 #include <asm/tlb.h>
32 #include <asm/processor.h>
33 #include <asm/delay.h>
34 #include <asm/regs.h>
35 #include <asm/gcc_intrin.h>
36 #include <asm/vmx_mm_def.h>
37 #include <asm/vmx.h>
39 //u64 fire_itc;
40 //u64 fire_itc2;
41 //u64 fire_itm;
42 //u64 fire_itm2;
43 /*
44 * Copyright (c) 2005 Intel Corporation.
45 * Anthony Xu (anthony.xu@intel.com)
46 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
47 *
48 * This program is free software; you can redistribute it and/or modify it
49 * under the terms and conditions of the GNU General Public License,
50 * version 2, as published by the Free Software Foundation.
51 *
52 * This program is distributed in the hope it will be useful, but WITHOUT
53 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
54 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
55 * more details.
56 *
57 * You should have received a copy of the GNU General Public License along with
58 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
59 * Place - Suite 330, Boston, MA 02111-1307 USA.
60 *
61 */
63 /**************************************************************************
64 VCPU general register access routines
65 **************************************************************************/
66 #include <asm/hw_irq.h>
67 #include <asm/vmx_pal_vsa.h>
68 #include <asm/kregs.h>
70 //unsigned long last_guest_rsm = 0x0;
71 struct guest_psr_bundle{
72 unsigned long ip;
73 unsigned long psr;
74 };
76 struct guest_psr_bundle guest_psr_buf[100];
77 unsigned long guest_psr_index = 0;
79 void
80 vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
81 {
83 UINT64 mask;
84 REGS *regs;
85 IA64_PSR old_psr, new_psr;
86 old_psr.val=vmx_vcpu_get_psr(vcpu);
88 regs=vcpu_regs(vcpu);
89 /* We only support guest as:
90 * vpsr.pk = 0
91 * vpsr.is = 0
92 * Otherwise panic
93 */
94 if ( value & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM )) {
95 panic ("Setting unsupport guest psr!");
96 }
98 /*
99 * For those IA64_PSR bits: id/da/dd/ss/ed/ia
100 * Since these bits will become 0, after success execution of each
101 * instruction, we will change set them to mIA64_PSR
102 */
103 VMX_VPD(vcpu,vpsr) = value &
104 (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
105 IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
106 ));
108 if ( !old_psr.i && (value & IA64_PSR_I) ) {
109 // vpsr.i 0->1
110 vcpu->arch.irq_new_condition = 1;
111 }
112 new_psr.val=vmx_vcpu_get_psr(vcpu);
113 {
114 struct xen_regs *regs = vcpu_regs(vcpu);
115 guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
116 guest_psr_buf[guest_psr_index].psr = new_psr.val;
117 if (++guest_psr_index >= 100)
118 guest_psr_index = 0;
119 }
120 #if 0
121 if (old_psr.i != new_psr.i) {
122 if (old_psr.i)
123 last_guest_rsm = vcpu_regs(vcpu)->cr_iip;
124 else
125 last_guest_rsm = 0;
126 }
127 #endif
129 /*
130 * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
131 * , except for the following bits:
132 * ic/i/dt/si/rt/mc/it/bn/vm
133 */
134 mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
135 IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
136 IA64_PSR_VM;
138 regs->cr_ipsr = (regs->cr_ipsr & mask ) | ( value & (~mask) );
140 check_mm_mode_switch(vcpu, old_psr, new_psr);
141 return IA64_NO_FAULT;
142 }
144 /* Adjust slot both in xen_regs and vpd, upon vpsr.ri which
145 * should have sync with ipsr in entry.
146 *
147 * Clear some bits due to successfully emulation.
148 */
149 IA64FAULT vmx_vcpu_increment_iip(VCPU *vcpu)
150 {
151 // TODO: trap_bounce?? Eddie
152 REGS *regs = vcpu_regs(vcpu);
153 IA64_PSR vpsr;
154 IA64_PSR *ipsr = (IA64_PSR *)&regs->cr_ipsr;
156 vpsr.val = vmx_vcpu_get_psr(vcpu);
157 if (vpsr.ri == 2) {
158 vpsr.ri = 0;
159 regs->cr_iip += 16;
160 } else {
161 vpsr.ri++;
162 }
164 ipsr->ri = vpsr.ri;
165 vpsr.val &=
166 (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
167 IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
168 ));
170 VMX_VPD(vcpu, vpsr) = vpsr.val;
172 ipsr->val &=
173 (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
174 IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
175 ));
177 return (IA64_NO_FAULT);
178 }
181 IA64FAULT vmx_vcpu_cover(VCPU *vcpu)
182 {
183 REGS *regs = vcpu_regs(vcpu);
184 IA64_PSR vpsr;
185 vpsr.val = vmx_vcpu_get_psr(vcpu);
187 if(!vpsr.ic)
188 VPD_CR(vcpu,ifs) = regs->cr_ifs;
189 regs->cr_ifs = IA64_IFS_V;
190 return (IA64_NO_FAULT);
191 }
194 thash_cb_t *
195 vmx_vcpu_get_vtlb(VCPU *vcpu)
196 {
197 return vcpu->arch.vtlb;
198 }
201 struct virutal_platform_def *
202 vmx_vcpu_get_plat(VCPU *vcpu)
203 {
204 return &(vcpu->arch.arch_vmx.vmx_platform);
205 }
208 ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr)
209 {
210 return (ia64_rr)VMX(vcpu,vrr[vadr>>61]);
211 }
214 IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
215 {
216 extern void set_one_rr(UINT64, UINT64);
217 ia64_rr oldrr,newrr;
218 thash_cb_t *hcb;
219 oldrr=vmx_vcpu_rr(vcpu,reg);
220 newrr.rrval=val;
221 #if 1
222 if(oldrr.ps!=newrr.ps){
223 hcb = vmx_vcpu_get_vtlb(vcpu);
224 thash_purge_all(hcb);
225 }
226 #endif
227 VMX(vcpu,vrr[reg>>61]) = val;
228 switch((u64)(reg>>61)) {
229 case VRN5:
230 VMX(vcpu,mrr5)=vmx_vrrtomrr(vcpu,val);
231 break;
232 case VRN6:
233 VMX(vcpu,mrr6)=vmx_vrrtomrr(vcpu,val);
234 break;
235 case VRN7:
236 VMX(vcpu,mrr7)=vmx_vrrtomrr(vcpu,val);
237 /* Change double mapping for this domain */
238 vmx_change_double_mapping(vcpu,
239 vmx_vrrtomrr(vcpu,oldrr.rrval),
240 vmx_vrrtomrr(vcpu,newrr.rrval));
241 break;
242 default:
243 ia64_set_rr(reg,vmx_vrrtomrr(vcpu,val));
244 break;
245 }
247 return (IA64_NO_FAULT);
248 }
252 /**************************************************************************
253 VCPU protection key register access routines
254 **************************************************************************/
256 IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
257 {
258 UINT64 val = (UINT64)ia64_get_pkr(reg);
259 *pval = val;
260 return (IA64_NO_FAULT);
261 }
263 IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
264 {
265 ia64_set_pkr(reg,val);
266 return (IA64_NO_FAULT);
267 }
269 #if 0
270 int tlb_debug=0;
271 check_entry(u64 va, u64 ps, char *str)
272 {
273 va &= ~ (PSIZE(ps)-1);
274 if ( va == 0x2000000002908000UL ||
275 va == 0x600000000000C000UL ) {
276 stop();
277 }
278 if (tlb_debug) printf("%s at %lx %lx\n", str, va, 1UL<<ps);
279 }
280 #endif
283 u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa)
284 {
285 ia64_rr rr,rr1;
286 rr=vmx_vcpu_rr(vcpu,ifa);
287 rr1.rrval=0;
288 rr1.ps=rr.ps;
289 rr1.rid=rr.rid;
290 return (rr1.rrval);
291 }
296 IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
297 {
298 // TODO: Only allowed for current vcpu
299 UINT64 ifs, psr;
300 REGS *regs = vcpu_regs(vcpu);
301 psr = VPD_CR(vcpu,ipsr);
302 vmx_vcpu_set_psr(vcpu,psr);
303 ifs=VPD_CR(vcpu,ifs);
304 if((ifs>>63)&&(ifs<<1)){
305 ifs=(regs->cr_ifs)&0x7f;
306 regs->rfi_pfs = (ifs<<7)|ifs;
307 regs->cr_ifs = VPD_CR(vcpu,ifs);
308 }
309 regs->cr_iip = VPD_CR(vcpu,iip);
310 return (IA64_NO_FAULT);
311 }
314 UINT64
315 vmx_vcpu_get_psr(VCPU *vcpu)
316 {
317 return VMX_VPD(vcpu,vpsr);
318 }
321 IA64FAULT
322 vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val)
323 {
324 IA64_PSR vpsr;
326 vpsr.val = vmx_vcpu_get_psr(vcpu);
327 if ( vpsr.bn ) {
328 *val=VMX_VPD(vcpu,vgr[reg-16]);
329 // Check NAT bit
330 if ( VMX_VPD(vcpu,vnat) & (1UL<<(reg-16)) ) {
331 // TODO
332 //panic ("NAT consumption fault\n");
333 return IA64_FAULT;
334 }
336 }
337 else {
338 *val=VMX_VPD(vcpu,vbgr[reg-16]);
339 if ( VMX_VPD(vcpu,vbnat) & (1UL<<reg) ) {
340 //panic ("NAT consumption fault\n");
341 return IA64_FAULT;
342 }
344 }
345 return IA64_NO_FAULT;
346 }
348 IA64FAULT
349 vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat)
350 {
351 IA64_PSR vpsr;
352 vpsr.val = vmx_vcpu_get_psr(vcpu);
353 if ( vpsr.bn ) {
354 VMX_VPD(vcpu,vgr[reg-16]) = val;
355 if(nat){
356 VMX_VPD(vcpu,vnat) |= ( 1UL<<(reg-16) );
357 }else{
358 VMX_VPD(vcpu,vbnat) &= ~( 1UL<<(reg-16) );
359 }
360 }
361 else {
362 VMX_VPD(vcpu,vbgr[reg-16]) = val;
363 if(nat){
364 VMX_VPD(vcpu,vnat) |= ( 1UL<<(reg) );
365 }else{
366 VMX_VPD(vcpu,vbnat) &= ~( 1UL<<(reg) );
367 }
368 }
369 return IA64_NO_FAULT;
370 }
374 IA64FAULT
375 vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val)
376 {
377 REGS *regs=vcpu_regs(vcpu);
378 u64 nat;
379 //TODO, Eddie
380 if (!regs) return 0;
381 if (reg >= 16 && reg < 32) {
382 return vmx_vcpu_get_bgr(vcpu,reg,val);
383 }
384 getreg(reg,val,&nat,regs); // FIXME: handle NATs later
385 if(nat){
386 return IA64_FAULT;
387 }
388 return IA64_NO_FAULT;
389 }
391 // returns:
392 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
393 // IA64_NO_FAULT otherwise
395 IA64FAULT
396 vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat)
397 {
398 REGS *regs = vcpu_regs(vcpu);
399 long sof = (regs->cr_ifs) & 0x7f;
400 //TODO Eddie
402 if (!regs) return IA64_ILLOP_FAULT;
403 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
404 if ( reg >= 16 && reg < 32 ) {
405 return vmx_vcpu_set_bgr(vcpu,reg, value, nat);
406 }
407 setreg(reg,value,nat,regs);
408 return IA64_NO_FAULT;
409 }
412 IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
413 {
414 UINT64 vpsr;
415 vpsr = vmx_vcpu_get_psr(vcpu);
416 vpsr &= (~imm24);
417 vmx_vcpu_set_psr(vcpu, vpsr);
418 return IA64_NO_FAULT;
419 }
422 IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
423 {
424 UINT64 vpsr;
425 vpsr = vmx_vcpu_get_psr(vcpu);
426 vpsr |= imm24;
427 vmx_vcpu_set_psr(vcpu, vpsr);
428 return IA64_NO_FAULT;
429 }
432 IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
433 {
434 vmx_vcpu_set_psr(vcpu, val);
435 return IA64_NO_FAULT;
436 }
438 IA64FAULT
439 vmx_vcpu_set_tpr(VCPU *vcpu, u64 val)
440 {
441 VPD_CR(vcpu,tpr)=val;
442 vcpu->arch.irq_new_condition = 1;
443 return IA64_NO_FAULT;
444 }