ia64/xen-unstable

view xen/arch/ia64/vmx_vcpu.c @ 5975:0380b4cc3c1a

Merge
author adsharma@los-vmm.sc.intel.com
date Mon Aug 08 10:26:44 2005 -0800 (2005-08-08)
parents e173a853dc46
children a6bb47919161 d4fd332df775
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_vcpu.c: handling all virtual cpu related thing.
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Fred yang (fred.yang@intel.com)
20 * Arun Sharma (arun.sharma@intel.com)
21 * Shaofan Li (Susue Li) <susie.li@intel.com>
22 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
23 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
24 */
26 #include <xen/sched.h>
27 #include <public/arch-ia64.h>
28 #include <asm/ia64_int.h>
29 #include <asm/vmx_vcpu.h>
30 #include <asm/regionreg.h>
31 #include <asm/tlb.h>
32 #include <asm/processor.h>
33 #include <asm/delay.h>
34 #include <asm/regs.h>
35 #include <asm/gcc_intrin.h>
36 #include <asm/vmx_mm_def.h>
37 #include <asm/vmx.h>
39 //u64 fire_itc;
40 //u64 fire_itc2;
41 //u64 fire_itm;
42 //u64 fire_itm2;
43 /*
44 * Copyright (c) 2005 Intel Corporation.
45 * Anthony Xu (anthony.xu@intel.com)
46 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
47 *
48 * This program is free software; you can redistribute it and/or modify it
49 * under the terms and conditions of the GNU General Public License,
50 * version 2, as published by the Free Software Foundation.
51 *
52 * This program is distributed in the hope it will be useful, but WITHOUT
53 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
54 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
55 * more details.
56 *
57 * You should have received a copy of the GNU General Public License along with
58 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
59 * Place - Suite 330, Boston, MA 02111-1307 USA.
60 *
61 */
63 /**************************************************************************
64 VCPU general register access routines
65 **************************************************************************/
66 #include <asm/hw_irq.h>
67 #include <asm/vmx_pal_vsa.h>
68 #include <asm/kregs.h>
70 //unsigned long last_guest_rsm = 0x0;
71 struct guest_psr_bundle{
72 unsigned long ip;
73 unsigned long psr;
74 };
76 struct guest_psr_bundle guest_psr_buf[100];
77 unsigned long guest_psr_index = 0;
79 void
80 vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
81 {
83 UINT64 mask;
84 REGS *regs;
85 IA64_PSR old_psr, new_psr;
86 old_psr.val=vmx_vcpu_get_psr(vcpu);
88 regs=vcpu_regs(vcpu);
89 /* We only support guest as:
90 * vpsr.pk = 0
91 * vpsr.is = 0
92 * Otherwise panic
93 */
94 if ( value & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM )) {
95 panic ("Setting unsupport guest psr!");
96 }
98 /*
99 * For those IA64_PSR bits: id/da/dd/ss/ed/ia
100 * Since these bits will become 0, after success execution of each
101 * instruction, we will change set them to mIA64_PSR
102 */
103 VMX_VPD(vcpu,vpsr) = value &
104 (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
105 IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
106 ));
108 if ( !old_psr.i && (value & IA64_PSR_I) ) {
109 // vpsr.i 0->1
110 vcpu->arch.irq_new_condition = 1;
111 }
112 new_psr.val=vmx_vcpu_get_psr(vcpu);
113 {
114 struct xen_regs *regs = vcpu_regs(vcpu);
115 guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
116 guest_psr_buf[guest_psr_index].psr = new_psr.val;
117 if (++guest_psr_index >= 100)
118 guest_psr_index = 0;
119 }
120 #if 0
121 if (old_psr.i != new_psr.i) {
122 if (old_psr.i)
123 last_guest_rsm = vcpu_regs(vcpu)->cr_iip;
124 else
125 last_guest_rsm = 0;
126 }
127 #endif
129 /*
130 * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
131 * , except for the following bits:
132 * ic/i/dt/si/rt/mc/it/bn/vm
133 */
134 mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
135 IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
136 IA64_PSR_VM;
138 regs->cr_ipsr = (regs->cr_ipsr & mask ) | ( value & (~mask) );
140 check_mm_mode_switch(vcpu, old_psr, new_psr);
141 return IA64_NO_FAULT;
142 }
144 /* Adjust slot both in xen_regs and vpd, upon vpsr.ri which
145 * should have sync with ipsr in entry.
146 *
147 * Clear some bits due to successfully emulation.
148 */
149 IA64FAULT vmx_vcpu_increment_iip(VCPU *vcpu)
150 {
151 // TODO: trap_bounce?? Eddie
152 REGS *regs = vcpu_regs(vcpu);
153 IA64_PSR vpsr;
154 IA64_PSR *ipsr = (IA64_PSR *)&regs->cr_ipsr;
156 vpsr.val = vmx_vcpu_get_psr(vcpu);
157 if (vpsr.ri == 2) {
158 vpsr.ri = 0;
159 regs->cr_iip += 16;
160 } else {
161 vpsr.ri++;
162 }
164 ipsr->ri = vpsr.ri;
165 vpsr.val &=
166 (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
167 IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
168 ));
170 VMX_VPD(vcpu, vpsr) = vpsr.val;
172 ipsr->val &=
173 (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
174 IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
175 ));
177 return (IA64_NO_FAULT);
178 }
181 IA64FAULT vmx_vcpu_cover(VCPU *vcpu)
182 {
183 REGS *regs = vcpu_regs(vcpu);
184 IA64_PSR vpsr;
185 vpsr.val = vmx_vcpu_get_psr(vcpu);
187 if(!vpsr.ic)
188 VPD_CR(vcpu,ifs) = regs->cr_ifs;
189 regs->cr_ifs = IA64_IFS_V;
190 return (IA64_NO_FAULT);
191 }
194 thash_cb_t *
195 vmx_vcpu_get_vtlb(VCPU *vcpu)
196 {
197 return vcpu->arch.vtlb;
198 }
201 struct virutal_platform_def *
202 vmx_vcpu_get_plat(VCPU *vcpu)
203 {
204 return &(vcpu->domain->arch.vmx_platform);
205 }
208 ia64_rr vmx_vcpu_rr(VCPU *vcpu,UINT64 vadr)
209 {
210 return (ia64_rr)VMX(vcpu,vrr[vadr>>61]);
211 }
214 IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UINT64 reg, UINT64 val)
215 {
216 ia64_rr oldrr,newrr;
217 thash_cb_t *hcb;
218 oldrr=vmx_vcpu_rr(vcpu,reg);
219 newrr.rrval=val;
220 #if 1
221 if(oldrr.ps!=newrr.ps){
222 hcb = vmx_vcpu_get_vtlb(vcpu);
223 thash_purge_all(hcb);
224 }
225 #endif
226 VMX(vcpu,vrr[reg>>61]) = val;
227 switch((u64)(reg>>61)) {
228 case VRN5:
229 VMX(vcpu,mrr5)=vmx_vrrtomrr(vcpu,val);
230 break;
231 case VRN6:
232 VMX(vcpu,mrr6)=vmx_vrrtomrr(vcpu,val);
233 break;
234 case VRN7:
235 VMX(vcpu,mrr7)=vmx_vrrtomrr(vcpu,val);
236 /* Change double mapping for this domain */
237 vmx_change_double_mapping(vcpu,
238 vmx_vrrtomrr(vcpu,oldrr.rrval),
239 vmx_vrrtomrr(vcpu,newrr.rrval));
240 break;
241 default:
242 ia64_set_rr(reg,vmx_vrrtomrr(vcpu,val));
243 break;
244 }
246 return (IA64_NO_FAULT);
247 }
251 /**************************************************************************
252 VCPU protection key register access routines
253 **************************************************************************/
255 IA64FAULT vmx_vcpu_get_pkr(VCPU *vcpu, UINT64 reg, UINT64 *pval)
256 {
257 UINT64 val = (UINT64)ia64_get_pkr(reg);
258 *pval = val;
259 return (IA64_NO_FAULT);
260 }
262 IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val)
263 {
264 ia64_set_pkr(reg,val);
265 return (IA64_NO_FAULT);
266 }
268 #if 0
269 int tlb_debug=0;
270 check_entry(u64 va, u64 ps, char *str)
271 {
272 va &= ~ (PSIZE(ps)-1);
273 if ( va == 0x2000000002908000UL ||
274 va == 0x600000000000C000UL ) {
275 stop();
276 }
277 if (tlb_debug) printf("%s at %lx %lx\n", str, va, 1UL<<ps);
278 }
279 #endif
282 u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa)
283 {
284 ia64_rr rr,rr1;
285 rr=vmx_vcpu_rr(vcpu,ifa);
286 rr1.rrval=0;
287 rr1.ps=rr.ps;
288 rr1.rid=rr.rid;
289 return (rr1.rrval);
290 }
295 IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
296 {
297 // TODO: Only allowed for current vcpu
298 UINT64 ifs, psr;
299 REGS *regs = vcpu_regs(vcpu);
300 psr = VPD_CR(vcpu,ipsr);
301 vmx_vcpu_set_psr(vcpu,psr);
302 ifs=VPD_CR(vcpu,ifs);
303 if((ifs>>63)&&(ifs<<1)){
304 ifs=(regs->cr_ifs)&0x7f;
305 regs->rfi_pfs = (ifs<<7)|ifs;
306 regs->cr_ifs = VPD_CR(vcpu,ifs);
307 }
308 regs->cr_iip = VPD_CR(vcpu,iip);
309 return (IA64_NO_FAULT);
310 }
313 UINT64
314 vmx_vcpu_get_psr(VCPU *vcpu)
315 {
316 return VMX_VPD(vcpu,vpsr);
317 }
320 IA64FAULT
321 vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val)
322 {
323 IA64_PSR vpsr;
325 vpsr.val = vmx_vcpu_get_psr(vcpu);
326 if ( vpsr.bn ) {
327 *val=VMX_VPD(vcpu,vgr[reg-16]);
328 // Check NAT bit
329 if ( VMX_VPD(vcpu,vnat) & (1UL<<(reg-16)) ) {
330 // TODO
331 //panic ("NAT consumption fault\n");
332 return IA64_FAULT;
333 }
335 }
336 else {
337 *val=VMX_VPD(vcpu,vbgr[reg-16]);
338 if ( VMX_VPD(vcpu,vbnat) & (1UL<<reg) ) {
339 //panic ("NAT consumption fault\n");
340 return IA64_FAULT;
341 }
343 }
344 return IA64_NO_FAULT;
345 }
347 IA64FAULT
348 vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat)
349 {
350 IA64_PSR vpsr;
351 vpsr.val = vmx_vcpu_get_psr(vcpu);
352 if ( vpsr.bn ) {
353 VMX_VPD(vcpu,vgr[reg-16]) = val;
354 if(nat){
355 VMX_VPD(vcpu,vnat) |= ( 1UL<<(reg-16) );
356 }else{
357 VMX_VPD(vcpu,vbnat) &= ~( 1UL<<(reg-16) );
358 }
359 }
360 else {
361 VMX_VPD(vcpu,vbgr[reg-16]) = val;
362 if(nat){
363 VMX_VPD(vcpu,vnat) |= ( 1UL<<(reg) );
364 }else{
365 VMX_VPD(vcpu,vbnat) &= ~( 1UL<<(reg) );
366 }
367 }
368 return IA64_NO_FAULT;
369 }
373 IA64FAULT
374 vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, UINT64 * val)
375 {
376 REGS *regs=vcpu_regs(vcpu);
377 int nat;
378 //TODO, Eddie
379 if (!regs) return 0;
380 if (reg >= 16 && reg < 32) {
381 return vmx_vcpu_get_bgr(vcpu,reg,val);
382 }
383 getreg(reg,val,&nat,regs); // FIXME: handle NATs later
384 if(nat){
385 return IA64_FAULT;
386 }
387 return IA64_NO_FAULT;
388 }
390 // returns:
391 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
392 // IA64_NO_FAULT otherwise
394 IA64FAULT
395 vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat)
396 {
397 REGS *regs = vcpu_regs(vcpu);
398 long sof = (regs->cr_ifs) & 0x7f;
399 //TODO Eddie
401 if (!regs) return IA64_ILLOP_FAULT;
402 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
403 if ( reg >= 16 && reg < 32 ) {
404 return vmx_vcpu_set_bgr(vcpu,reg, value, nat);
405 }
406 setreg(reg,value,nat,regs);
407 return IA64_NO_FAULT;
408 }
411 IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
412 {
413 UINT64 vpsr;
414 vpsr = vmx_vcpu_get_psr(vcpu);
415 vpsr &= (~imm24);
416 vmx_vcpu_set_psr(vcpu, vpsr);
417 return IA64_NO_FAULT;
418 }
421 IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24)
422 {
423 UINT64 vpsr;
424 vpsr = vmx_vcpu_get_psr(vcpu);
425 vpsr |= imm24;
426 vmx_vcpu_set_psr(vcpu, vpsr);
427 return IA64_NO_FAULT;
428 }
431 IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
432 {
433 vmx_vcpu_set_psr(vcpu, val);
434 return IA64_NO_FAULT;
435 }
437 IA64FAULT
438 vmx_vcpu_set_tpr(VCPU *vcpu, u64 val)
439 {
440 VPD_CR(vcpu,tpr)=val;
441 vcpu->arch.irq_new_condition = 1;
442 return IA64_NO_FAULT;
443 }