ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_vcpu.c @ 8370:2d5c57be196d

Remove some unused VTI code segments
Signed-off-by Anthony Xu <anthony.xu@intel.com>
author djm@kirby.fc.hp.com
date Thu Dec 15 16:10:22 2005 -0600 (2005-12-15)
parents 06d84bf87159
children f1b361b05bf3
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_vcpu.c: handling all virtual cpu related thing.
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Fred yang (fred.yang@intel.com)
20 * Arun Sharma (arun.sharma@intel.com)
21 * Shaofan Li (Susue Li) <susie.li@intel.com>
22 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
23 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
24 */
26 #include <xen/sched.h>
27 #include <public/arch-ia64.h>
28 #include <asm/ia64_int.h>
29 #include <asm/vmx_vcpu.h>
30 #include <asm/regionreg.h>
31 #include <asm/tlb.h>
32 #include <asm/processor.h>
33 #include <asm/delay.h>
34 #include <asm/regs.h>
35 #include <asm/gcc_intrin.h>
36 #include <asm/vmx_mm_def.h>
37 #include <asm/vmx.h>
39 //u64 fire_itc;
40 //u64 fire_itc2;
41 //u64 fire_itm;
42 //u64 fire_itm2;
43 /*
44 * Copyright (c) 2005 Intel Corporation.
45 * Anthony Xu (anthony.xu@intel.com)
46 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
47 *
48 * This program is free software; you can redistribute it and/or modify it
49 * under the terms and conditions of the GNU General Public License,
50 * version 2, as published by the Free Software Foundation.
51 *
52 * This program is distributed in the hope it will be useful, but WITHOUT
53 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
54 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
55 * more details.
56 *
57 * You should have received a copy of the GNU General Public License along with
58 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
59 * Place - Suite 330, Boston, MA 02111-1307 USA.
60 *
61 */
63 /**************************************************************************
64 VCPU general register access routines
65 **************************************************************************/
66 #include <asm/hw_irq.h>
67 #include <asm/vmx_pal_vsa.h>
68 #include <asm/kregs.h>
70 //unsigned long last_guest_rsm = 0x0;
71 struct guest_psr_bundle{
72 unsigned long ip;
73 unsigned long psr;
74 };
76 struct guest_psr_bundle guest_psr_buf[100];
77 unsigned long guest_psr_index = 0;
79 void
80 vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
81 {
83 UINT64 mask;
84 REGS *regs;
85 IA64_PSR old_psr, new_psr;
86 old_psr.val=vmx_vcpu_get_psr(vcpu);
88 regs=vcpu_regs(vcpu);
89 /* We only support guest as:
90 * vpsr.pk = 0
91 * vpsr.is = 0
92 * Otherwise panic
93 */
94 if ( value & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM )) {
95 panic ("Setting unsupport guest psr!");
96 }
98 /*
99 * For those IA64_PSR bits: id/da/dd/ss/ed/ia
100 * Since these bits will become 0, after success execution of each
101 * instruction, we will change set them to mIA64_PSR
102 */
103 VCPU(vcpu,vpsr) = value &
104 (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
105 IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
106 ));
108 if ( !old_psr.i && (value & IA64_PSR_I) ) {
109 // vpsr.i 0->1
110 vcpu->arch.irq_new_condition = 1;
111 }
112 new_psr.val=vmx_vcpu_get_psr(vcpu);
113 {
114 struct pt_regs *regs = vcpu_regs(vcpu);
115 guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
116 guest_psr_buf[guest_psr_index].psr = new_psr.val;
117 if (++guest_psr_index >= 100)
118 guest_psr_index = 0;
119 }
120 #if 0
121 if (old_psr.i != new_psr.i) {
122 if (old_psr.i)
123 last_guest_rsm = vcpu_regs(vcpu)->cr_iip;
124 else
125 last_guest_rsm = 0;
126 }
127 #endif
129 /*
130 * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
131 * , except for the following bits:
132 * ic/i/dt/si/rt/mc/it/bn/vm
133 */
134 mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
135 IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
136 IA64_PSR_VM;
138 regs->cr_ipsr = (regs->cr_ipsr & mask ) | ( value & (~mask) );
140 check_mm_mode_switch(vcpu, old_psr, new_psr);
141 return IA64_NO_FAULT;
142 }
144 /* Adjust slot both in pt_regs and vpd, upon vpsr.ri which
145 * should have sync with ipsr in entry.
146 *
147 * Clear some bits due to successfully emulation.
148 */
149 IA64FAULT vmx_vcpu_increment_iip(VCPU *vcpu)
150 {
151 // TODO: trap_bounce?? Eddie
152 REGS *regs = vcpu_regs(vcpu);
153 IA64_PSR vpsr;
154 IA64_PSR *ipsr = (IA64_PSR *)&regs->cr_ipsr;
156 vpsr.val = vmx_vcpu_get_psr(vcpu);
157 if (vpsr.ri == 2) {
158 vpsr.ri = 0;
159 regs->cr_iip += 16;
160 } else {
161 vpsr.ri++;
162 }
164 ipsr->ri = vpsr.ri;
165 vpsr.val &=
166 (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
167 IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
168 ));
170 VCPU(vcpu, vpsr) = vpsr.val;
172 ipsr->val &=
173 (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
174 IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
175 ));
177 return (IA64_NO_FAULT);
178 }
181 IA64FAULT vmx_vcpu_cover(VCPU *vcpu)
182 {
183 REGS *regs = vcpu_regs(vcpu);
184 IA64_PSR vpsr;
185 vpsr.val = vmx_vcpu_get_psr(vcpu);
187 if(!vpsr.ic)
188 VCPU(vcpu,ifs) = regs->cr_ifs;
189 regs->cr_ifs = IA64_IFS_V;
190 return (IA64_NO_FAULT);
191 }
194 thash_cb_t *
195 vmx_vcpu_get_vtlb(VCPU *vcpu)
196 {
197 return vcpu->arch.vtlb;
198 }
201 struct virutal_platform_def *
202 vmx_vcpu_get_plat(VCPU *vcpu)
203 {
204 return &(vcpu->domain->arch.vmx_platform);
205 }
208 ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr)
209 {
210 return (ia64_rr)VMX(vcpu,vrr[vadr>>61]);
211 }
214 IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
215 {
216 ia64_rr oldrr,newrr;
217 thash_cb_t *hcb;
218 extern void * pal_vaddr;
219 oldrr=vmx_vcpu_rr(vcpu,reg);
220 newrr.rrval=val;
221 #if 1
222 if(oldrr.ps!=newrr.ps){
223 hcb = vmx_vcpu_get_vtlb(vcpu);
224 thash_purge_all(hcb);
225 }
226 #endif
227 VMX(vcpu,vrr[reg>>61]) = val;
229 switch((u64)(reg>>61)) {
230 case VRN7:
231 vmx_switch_rr7(vmx_vrrtomrr(vcpu,val),vcpu->domain->shared_info,
232 (void *)vcpu->arch.privregs,
233 ( void *)vcpu->arch.vtlb->ts->vhpt->hash, pal_vaddr );
234 break;
235 default:
236 ia64_set_rr(reg,vmx_vrrtomrr(vcpu,val));
237 break;
238 }
240 return (IA64_NO_FAULT);
241 }
245 /**************************************************************************
246 VCPU protection key register access routines
247 **************************************************************************/
249 IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
250 {
251 UINT64 val = (UINT64)ia64_get_pkr(reg);
252 *pval = val;
253 return (IA64_NO_FAULT);
254 }
256 IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
257 {
258 ia64_set_pkr(reg,val);
259 return (IA64_NO_FAULT);
260 }
262 #if 0
263 int tlb_debug=0;
264 check_entry(u64 va, u64 ps, char *str)
265 {
266 va &= ~ (PSIZE(ps)-1);
267 if ( va == 0x2000000002908000UL ||
268 va == 0x600000000000C000UL ) {
269 stop();
270 }
271 if (tlb_debug) printf("%s at %lx %lx\n", str, va, 1UL<<ps);
272 }
273 #endif
276 u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa)
277 {
278 ia64_rr rr,rr1;
279 rr=vmx_vcpu_rr(vcpu,ifa);
280 rr1.rrval=0;
281 rr1.ps=rr.ps;
282 rr1.rid=rr.rid;
283 return (rr1.rrval);
284 }
289 IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
290 {
291 // TODO: Only allowed for current vcpu
292 UINT64 ifs, psr;
293 REGS *regs = vcpu_regs(vcpu);
294 psr = VCPU(vcpu,ipsr);
295 vcpu_bsw1(vcpu);
296 vmx_vcpu_set_psr(vcpu,psr);
297 ifs=VCPU(vcpu,ifs);
298 if((ifs>>63)&&(ifs<<1)){
299 ifs=(regs->cr_ifs)&0x7f;
300 regs->rfi_pfs = (ifs<<7)|ifs;
301 regs->cr_ifs = VCPU(vcpu,ifs);
302 }
303 regs->cr_iip = VCPU(vcpu,iip);
304 return (IA64_NO_FAULT);
305 }
308 UINT64
309 vmx_vcpu_get_psr(VCPU *vcpu)
310 {
311 return VCPU(vcpu,vpsr);
312 }
314 #if 0
315 IA64FAULT
316 vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val)
317 {
318 IA64_PSR vpsr;
320 vpsr.val = vmx_vcpu_get_psr(vcpu);
321 if ( vpsr.bn ) {
322 *val=VCPU(vcpu,vgr[reg-16]);
323 // Check NAT bit
324 if ( VCPU(vcpu,vnat) & (1UL<<(reg-16)) ) {
325 // TODO
326 //panic ("NAT consumption fault\n");
327 return IA64_FAULT;
328 }
330 }
331 else {
332 *val=VCPU(vcpu,vbgr[reg-16]);
333 if ( VCPU(vcpu,vbnat) & (1UL<<reg) ) {
334 //panic ("NAT consumption fault\n");
335 return IA64_FAULT;
336 }
338 }
339 return IA64_NO_FAULT;
340 }
342 IA64FAULT
343 vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat)
344 {
345 IA64_PSR vpsr;
346 vpsr.val = vmx_vcpu_get_psr(vcpu);
347 if ( vpsr.bn ) {
348 VCPU(vcpu,vgr[reg-16]) = val;
349 if(nat){
350 VCPU(vcpu,vnat) |= ( 1UL<<(reg-16) );
351 }else{
352 VCPU(vcpu,vbnat) &= ~( 1UL<<(reg-16) );
353 }
354 }
355 else {
356 VCPU(vcpu,vbgr[reg-16]) = val;
357 if(nat){
358 VCPU(vcpu,vnat) |= ( 1UL<<(reg) );
359 }else{
360 VCPU(vcpu,vbnat) &= ~( 1UL<<(reg) );
361 }
362 }
363 return IA64_NO_FAULT;
364 }
366 #endif
367 #if 0
368 IA64FAULT
369 vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val)
370 {
371 REGS *regs=vcpu_regs(vcpu);
372 int nat;
373 //TODO, Eddie
374 if (!regs) return 0;
375 #if 0
376 if (reg >= 16 && reg < 32) {
377 return vmx_vcpu_get_bgr(vcpu,reg,val);
378 }
379 #endif
380 getreg(reg,val,&nat,regs); // FIXME: handle NATs later
381 if(nat){
382 return IA64_FAULT;
383 }
384 return IA64_NO_FAULT;
385 }
387 // returns:
388 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
389 // IA64_NO_FAULT otherwise
391 IA64FAULT
392 vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat)
393 {
394 REGS *regs = vcpu_regs(vcpu);
395 long sof = (regs->cr_ifs) & 0x7f;
396 //TODO Eddie
398 if (!regs) return IA64_ILLOP_FAULT;
399 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
400 #if 0
401 if ( reg >= 16 && reg < 32 ) {
402 return vmx_vcpu_set_bgr(vcpu,reg, value, nat);
403 }
404 #endif
405 setreg(reg,value,nat,regs);
406 return IA64_NO_FAULT;
407 }
409 #endif
411 IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
412 {
413 UINT64 vpsr;
414 vpsr = vmx_vcpu_get_psr(vcpu);
415 vpsr &= (~imm24);
416 vmx_vcpu_set_psr(vcpu, vpsr);
417 return IA64_NO_FAULT;
418 }
421 IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
422 {
423 UINT64 vpsr;
424 vpsr = vmx_vcpu_get_psr(vcpu);
425 vpsr |= imm24;
426 vmx_vcpu_set_psr(vcpu, vpsr);
427 return IA64_NO_FAULT;
428 }
431 IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
432 {
433 vmx_vcpu_set_psr(vcpu, val);
434 return IA64_NO_FAULT;
435 }
437 IA64FAULT
438 vmx_vcpu_set_tpr(VCPU *vcpu, u64 val)
439 {
440 VCPU(vcpu,tpr)=val;
441 vcpu->arch.irq_new_condition = 1;
442 return IA64_NO_FAULT;
443 }