ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_vcpu.c @ 9765:7c7bcf173f8b

[IA64] cleanup vtlb code

This patch is to clean up vtlb code.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Tue Apr 25 20:53:38 2006 -0600 (2006-04-25)
parents b5c2dba60b69
children cd1df13fb1c4
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_vcpu.c: handling all virtual cpu related thing.
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Fred yang (fred.yang@intel.com)
20 * Arun Sharma (arun.sharma@intel.com)
21 * Shaofan Li (Susue Li) <susie.li@intel.com>
22 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
23 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
24 */
26 #include <xen/sched.h>
27 #include <public/arch-ia64.h>
28 #include <asm/ia64_int.h>
29 #include <asm/vmx_vcpu.h>
30 #include <asm/regionreg.h>
31 #include <asm/tlb.h>
32 #include <asm/processor.h>
33 #include <asm/delay.h>
34 #include <asm/regs.h>
35 #include <asm/gcc_intrin.h>
36 #include <asm/vmx_mm_def.h>
37 #include <asm/vmx.h>
38 #include <asm/vmx_phy_mode.h>
39 //u64 fire_itc;
40 //u64 fire_itc2;
41 //u64 fire_itm;
42 //u64 fire_itm2;
43 /*
44 * Copyright (c) 2005 Intel Corporation.
45 * Anthony Xu (anthony.xu@intel.com)
46 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
47 *
48 * This program is free software; you can redistribute it and/or modify it
49 * under the terms and conditions of the GNU General Public License,
50 * version 2, as published by the Free Software Foundation.
51 *
52 * This program is distributed in the hope it will be useful, but WITHOUT
53 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
54 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
55 * more details.
56 *
57 * You should have received a copy of the GNU General Public License along with
58 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
59 * Place - Suite 330, Boston, MA 02111-1307 USA.
60 *
61 */
63 /**************************************************************************
64 VCPU general register access routines
65 **************************************************************************/
66 #include <asm/hw_irq.h>
67 #include <asm/vmx_pal_vsa.h>
68 #include <asm/kregs.h>
69 //unsigned long last_guest_rsm = 0x0;
70 struct guest_psr_bundle{
71 unsigned long ip;
72 unsigned long psr;
73 };
75 struct guest_psr_bundle guest_psr_buf[100];
76 unsigned long guest_psr_index = 0;
78 void
79 vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
80 {
82 UINT64 mask;
83 REGS *regs;
84 IA64_PSR old_psr, new_psr;
85 old_psr.val=vmx_vcpu_get_psr(vcpu);
87 regs=vcpu_regs(vcpu);
88 /* We only support guest as:
89 * vpsr.pk = 0
90 * vpsr.is = 0
91 * Otherwise panic
92 */
93 if ( value & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM )) {
94 panic ("Setting unsupport guest psr!");
95 }
97 /*
98 * For those IA64_PSR bits: id/da/dd/ss/ed/ia
99 * Since these bits will become 0, after success execution of each
100 * instruction, we will change set them to mIA64_PSR
101 */
102 VCPU(vcpu,vpsr) = value &
103 (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
104 IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
105 ));
107 if ( !old_psr.i && (value & IA64_PSR_I) ) {
108 // vpsr.i 0->1
109 vcpu->arch.irq_new_condition = 1;
110 }
111 new_psr.val=vmx_vcpu_get_psr(vcpu);
112 {
113 struct pt_regs *regs = vcpu_regs(vcpu);
114 guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
115 guest_psr_buf[guest_psr_index].psr = new_psr.val;
116 if (++guest_psr_index >= 100)
117 guest_psr_index = 0;
118 }
119 #if 0
120 if (old_psr.i != new_psr.i) {
121 if (old_psr.i)
122 last_guest_rsm = vcpu_regs(vcpu)->cr_iip;
123 else
124 last_guest_rsm = 0;
125 }
126 #endif
128 /*
129 * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
130 * , except for the following bits:
131 * ic/i/dt/si/rt/mc/it/bn/vm
132 */
133 mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
134 IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
135 IA64_PSR_VM;
137 regs->cr_ipsr = (regs->cr_ipsr & mask ) | ( value & (~mask) );
139 check_mm_mode_switch(vcpu, old_psr, new_psr);
140 return ;
141 }
143 /* Adjust slot both in pt_regs and vpd, upon vpsr.ri which
144 * should have sync with ipsr in entry.
145 *
146 * Clear some bits due to successfully emulation.
147 */
148 IA64FAULT vmx_vcpu_increment_iip(VCPU *vcpu)
149 {
150 // TODO: trap_bounce?? Eddie
151 REGS *regs = vcpu_regs(vcpu);
152 IA64_PSR vpsr;
153 IA64_PSR *ipsr = (IA64_PSR *)&regs->cr_ipsr;
155 vpsr.val = vmx_vcpu_get_psr(vcpu);
156 if (vpsr.ri == 2) {
157 vpsr.ri = 0;
158 regs->cr_iip += 16;
159 } else {
160 vpsr.ri++;
161 }
163 ipsr->ri = vpsr.ri;
164 vpsr.val &=
165 (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
166 IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
167 ));
169 VCPU(vcpu, vpsr) = vpsr.val;
171 ipsr->val &=
172 (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
173 IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
174 ));
176 return (IA64_NO_FAULT);
177 }
180 IA64FAULT vmx_vcpu_cover(VCPU *vcpu)
181 {
182 REGS *regs = vcpu_regs(vcpu);
183 IA64_PSR vpsr;
184 vpsr.val = vmx_vcpu_get_psr(vcpu);
186 if(!vpsr.ic)
187 VCPU(vcpu,ifs) = regs->cr_ifs;
188 regs->cr_ifs = IA64_IFS_V;
189 return (IA64_NO_FAULT);
190 }
193 struct virtual_platform_def *
194 vmx_vcpu_get_plat(VCPU *vcpu)
195 {
196 return &(vcpu->domain->arch.vmx_platform);
197 }
201 IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
202 {
203 ia64_rr oldrr,newrr;
204 extern void * pal_vaddr;
206 vcpu_get_rr(vcpu, reg, &oldrr.rrval);
207 newrr.rrval=val;
208 if (newrr.rid >= (1 << vcpu->domain->arch.rid_bits))
209 panic_domain (NULL, "use of invalid rid %lx\n", newrr.rid);
210 if(oldrr.ps!=newrr.ps){
211 thash_purge_all(vcpu);
212 }
213 VMX(vcpu,vrr[reg>>61]) = val;
214 switch((u64)(reg>>61)) {
215 case VRN7:
216 vmx_switch_rr7(vrrtomrr(vcpu,val),vcpu->domain->shared_info,
217 (void *)vcpu->arch.privregs,
218 (void *)vcpu->arch.vhpt.hash, pal_vaddr );
219 break;
220 default:
221 ia64_set_rr(reg,vrrtomrr(vcpu,val));
222 break;
223 }
225 return (IA64_NO_FAULT);
226 }
230 /**************************************************************************
231 VCPU protection key register access routines
232 **************************************************************************/
234 IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
235 {
236 UINT64 val = (UINT64)ia64_get_pkr(reg);
237 *pval = val;
238 return (IA64_NO_FAULT);
239 }
241 IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
242 {
243 ia64_set_pkr(reg,val);
244 return (IA64_NO_FAULT);
245 }
247 #if 0
248 int tlb_debug=0;
249 check_entry(u64 va, u64 ps, char *str)
250 {
251 va &= ~ (PSIZE(ps)-1);
252 if ( va == 0x2000000002908000UL ||
253 va == 0x600000000000C000UL ) {
254 stop();
255 }
256 if (tlb_debug) printf("%s at %lx %lx\n", str, va, 1UL<<ps);
257 }
258 #endif
261 u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa)
262 {
263 ia64_rr rr,rr1;
264 vcpu_get_rr(vcpu,ifa,&rr.rrval);
265 rr1.rrval=0;
266 rr1.ps=rr.ps;
267 rr1.rid=rr.rid;
268 return (rr1.rrval);
269 }
274 IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
275 {
276 // TODO: Only allowed for current vcpu
277 UINT64 ifs, psr;
278 REGS *regs = vcpu_regs(vcpu);
279 psr = VCPU(vcpu,ipsr);
280 vcpu_bsw1(vcpu);
281 vmx_vcpu_set_psr(vcpu,psr);
282 ifs=VCPU(vcpu,ifs);
283 if((ifs>>63)&&(ifs<<1)){
284 ifs=(regs->cr_ifs)&0x7f;
285 regs->rfi_pfs = (ifs<<7)|ifs;
286 regs->cr_ifs = VCPU(vcpu,ifs);
287 }
288 regs->cr_iip = VCPU(vcpu,iip);
289 return (IA64_NO_FAULT);
290 }
293 UINT64
294 vmx_vcpu_get_psr(VCPU *vcpu)
295 {
296 return VCPU(vcpu,vpsr);
297 }
299 #if 0
300 IA64FAULT
301 vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val)
302 {
303 IA64_PSR vpsr;
305 vpsr.val = vmx_vcpu_get_psr(vcpu);
306 if ( vpsr.bn ) {
307 *val=VCPU(vcpu,vgr[reg-16]);
308 // Check NAT bit
309 if ( VCPU(vcpu,vnat) & (1UL<<(reg-16)) ) {
310 // TODO
311 //panic ("NAT consumption fault\n");
312 return IA64_FAULT;
313 }
315 }
316 else {
317 *val=VCPU(vcpu,vbgr[reg-16]);
318 if ( VCPU(vcpu,vbnat) & (1UL<<reg) ) {
319 //panic ("NAT consumption fault\n");
320 return IA64_FAULT;
321 }
323 }
324 return IA64_NO_FAULT;
325 }
327 IA64FAULT
328 vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat)
329 {
330 IA64_PSR vpsr;
331 vpsr.val = vmx_vcpu_get_psr(vcpu);
332 if ( vpsr.bn ) {
333 VCPU(vcpu,vgr[reg-16]) = val;
334 if(nat){
335 VCPU(vcpu,vnat) |= ( 1UL<<(reg-16) );
336 }else{
337 VCPU(vcpu,vbnat) &= ~( 1UL<<(reg-16) );
338 }
339 }
340 else {
341 VCPU(vcpu,vbgr[reg-16]) = val;
342 if(nat){
343 VCPU(vcpu,vnat) |= ( 1UL<<(reg) );
344 }else{
345 VCPU(vcpu,vbnat) &= ~( 1UL<<(reg) );
346 }
347 }
348 return IA64_NO_FAULT;
349 }
351 #endif
352 #if 0
353 IA64FAULT
354 vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val)
355 {
356 REGS *regs=vcpu_regs(vcpu);
357 int nat;
358 //TODO, Eddie
359 if (!regs) return 0;
360 #if 0
361 if (reg >= 16 && reg < 32) {
362 return vmx_vcpu_get_bgr(vcpu,reg,val);
363 }
364 #endif
365 getreg(reg,val,&nat,regs); // FIXME: handle NATs later
366 if(nat){
367 return IA64_FAULT;
368 }
369 return IA64_NO_FAULT;
370 }
372 // returns:
373 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
374 // IA64_NO_FAULT otherwise
376 IA64FAULT
377 vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat)
378 {
379 REGS *regs = vcpu_regs(vcpu);
380 long sof = (regs->cr_ifs) & 0x7f;
381 //TODO Eddie
383 if (!regs) return IA64_ILLOP_FAULT;
384 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
385 #if 0
386 if ( reg >= 16 && reg < 32 ) {
387 return vmx_vcpu_set_bgr(vcpu,reg, value, nat);
388 }
389 #endif
390 setreg(reg,value,nat,regs);
391 return IA64_NO_FAULT;
392 }
394 #endif
396 IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
397 {
398 UINT64 vpsr;
399 vpsr = vmx_vcpu_get_psr(vcpu);
400 vpsr &= (~imm24);
401 vmx_vcpu_set_psr(vcpu, vpsr);
402 return IA64_NO_FAULT;
403 }
406 IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
407 {
408 UINT64 vpsr;
409 vpsr = vmx_vcpu_get_psr(vcpu);
410 vpsr |= imm24;
411 vmx_vcpu_set_psr(vcpu, vpsr);
412 return IA64_NO_FAULT;
413 }
416 IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
417 {
418 vmx_vcpu_set_psr(vcpu, val);
419 return IA64_NO_FAULT;
420 }
422 IA64FAULT
423 vmx_vcpu_set_tpr(VCPU *vcpu, u64 val)
424 {
425 VCPU(vcpu,tpr)=val;
426 vcpu->arch.irq_new_condition = 1;
427 return IA64_NO_FAULT;
428 }