ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_vcpu.c @ 16104:f0f4cd789eaa

[IA64] Clean up: vmx_vcpu_set_rr()

vmx_vcpu_set_rr(): remove the obsolete variable, oldrr.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Fri Oct 12 14:10:02 2007 -0600 (2007-10-12)
parents 5c56ce7b9892
children 4e45ba84a1fa
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_vcpu.c: handling all virtual cpu related thing.
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Fred yang (fred.yang@intel.com)
20 * Arun Sharma (arun.sharma@intel.com)
21 * Shaofan Li (Susue Li) <susie.li@intel.com>
22 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
23 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
24 */
25 #include <xen/sched.h>
26 #include <public/xen.h>
27 #include <asm/ia64_int.h>
28 #include <asm/vmx_vcpu.h>
29 #include <asm/regionreg.h>
30 #include <asm/tlb.h>
31 #include <asm/processor.h>
32 #include <asm/delay.h>
33 #include <asm/regs.h>
34 #include <asm/gcc_intrin.h>
35 #include <asm/vmx_mm_def.h>
36 #include <asm/vmx.h>
37 #include <asm/vmx_phy_mode.h>
39 /**************************************************************************
40 VCPU general register access routines
41 **************************************************************************/
42 #include <asm/hw_irq.h>
43 #include <asm/vmx_pal_vsa.h>
44 #include <asm/kregs.h>
45 #include <linux/efi.h>
46 //unsigned long last_guest_rsm = 0x0;
48 #ifdef VTI_DEBUG
49 struct guest_psr_bundle{
50 unsigned long ip;
51 unsigned long psr;
52 };
54 struct guest_psr_bundle guest_psr_buf[100];
55 unsigned long guest_psr_index = 0;
56 #endif
59 void
60 vmx_ia64_set_dcr(VCPU *v)
61 {
62 unsigned long dcr_bits = IA64_DEFAULT_DCR_BITS;
64 // if guest is runing on cpl > 0, set dcr.dm=1
65 // if geust is runing on cpl = 0, set dcr.dm=0
66 // because Guest OS may ld.s on tr mapped page.
67 if (!(VCPU(v, vpsr) & IA64_PSR_CPL))
68 dcr_bits &= ~IA64_DCR_DM;
70 ia64_set_dcr(dcr_bits);
71 }
74 void
75 vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
76 {
78 u64 mask;
79 REGS *regs;
80 IA64_PSR old_psr, new_psr;
81 old_psr.val=VCPU(vcpu, vpsr);
83 regs=vcpu_regs(vcpu);
84 /* We only support guest as:
85 * vpsr.pk = 0
86 * vpsr.is = 0
87 * Otherwise panic
88 */
89 if ( value & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM )) {
90 panic_domain (regs,"Setting unsupport guest psr!");
91 }
93 /*
94 * For those IA64_PSR bits: id/da/dd/ss/ed/ia
95 * Since these bits will become 0, after success execution of each
96 * instruction, we will change set them to mIA64_PSR
97 */
98 VCPU(vcpu,vpsr) = value &
99 (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
100 IA64_PSR_ED | IA64_PSR_IA));
102 if ( !old_psr.i && (value & IA64_PSR_I) ) {
103 // vpsr.i 0->1
104 vcpu->arch.irq_new_condition = 1;
105 }
106 new_psr.val=VCPU(vcpu, vpsr);
107 #ifdef VTI_DEBUG
108 {
109 struct pt_regs *regs = vcpu_regs(vcpu);
110 guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
111 guest_psr_buf[guest_psr_index].psr = new_psr.val;
112 if (++guest_psr_index >= 100)
113 guest_psr_index = 0;
114 }
115 #endif
116 #if 0
117 if (old_psr.i != new_psr.i) {
118 if (old_psr.i)
119 last_guest_rsm = vcpu_regs(vcpu)->cr_iip;
120 else
121 last_guest_rsm = 0;
122 }
123 #endif
125 /*
126 * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
127 * , except for the following bits:
128 * ic/i/dt/si/rt/mc/it/bn/vm
129 */
130 mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
131 IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
132 IA64_PSR_VM;
134 regs->cr_ipsr = (regs->cr_ipsr & mask ) | ( value & (~mask) );
136 if (FP_PSR(vcpu) & IA64_PSR_DFH)
137 regs->cr_ipsr |= IA64_PSR_DFH;
139 if (unlikely(vcpu->domain->debugger_attached)) {
140 if (vcpu->domain->arch.debug_flags & XEN_IA64_DEBUG_FORCE_SS)
141 regs->cr_ipsr |= IA64_PSR_SS;
142 if (vcpu->domain->arch.debug_flags & XEN_IA64_DEBUG_FORCE_DB)
143 regs->cr_ipsr |= IA64_PSR_DB;
144 }
146 check_mm_mode_switch(vcpu, old_psr, new_psr);
147 return ;
148 }
150 IA64FAULT vmx_vcpu_cover(VCPU *vcpu)
151 {
152 REGS *regs = vcpu_regs(vcpu);
153 IA64_PSR vpsr;
154 vpsr.val = VCPU(vcpu, vpsr);
156 if(!vpsr.ic)
157 VCPU(vcpu,ifs) = regs->cr_ifs;
158 regs->cr_ifs = IA64_IFS_V;
159 return (IA64_NO_FAULT);
160 }
162 IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val)
163 {
164 ia64_rr newrr;
165 u64 rrval;
167 newrr.rrval=val;
168 if (newrr.rid >= (1 << vcpu->domain->arch.rid_bits))
169 panic_domain (NULL, "use of invalid rid %x\n", newrr.rid);
171 VMX(vcpu,vrr[reg>>VRN_SHIFT]) = val;
172 switch((u64)(reg>>VRN_SHIFT)) {
173 case VRN7:
174 vmx_switch_rr7(vrrtomrr(vcpu,val),
175 (void *)vcpu->arch.vhpt.hash, pal_vaddr );
176 break;
177 case VRN4:
178 rrval = vrrtomrr(vcpu,val);
179 vcpu->arch.metaphysical_saved_rr4 = rrval;
180 if (is_virtual_mode(vcpu))
181 ia64_set_rr(reg,rrval);
182 break;
183 case VRN0:
184 rrval = vrrtomrr(vcpu,val);
185 vcpu->arch.metaphysical_saved_rr0 = rrval;
186 if (is_virtual_mode(vcpu))
187 ia64_set_rr(reg,rrval);
188 break;
189 default:
190 ia64_set_rr(reg,vrrtomrr(vcpu,val));
191 break;
192 }
194 return (IA64_NO_FAULT);
195 }
199 /**************************************************************************
200 VCPU protection key register access routines
201 **************************************************************************/
203 u64 vmx_vcpu_get_pkr(VCPU *vcpu, u64 reg)
204 {
205 return ((u64)ia64_get_pkr(reg));
206 }
208 IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, u64 reg, u64 val)
209 {
210 ia64_set_pkr(reg,val);
211 return (IA64_NO_FAULT);
212 }
214 #if 0
215 int tlb_debug=0;
216 check_entry(u64 va, u64 ps, char *str)
217 {
218 va &= ~ (PSIZE(ps)-1);
219 if ( va == 0x2000000002908000UL ||
220 va == 0x600000000000C000UL ) {
221 stop();
222 }
223 if (tlb_debug) printk("%s at %lx %lx\n", str, va, 1UL<<ps);
224 }
225 #endif
228 u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa)
229 {
230 ia64_rr rr,rr1;
231 vcpu_get_rr(vcpu,ifa,&rr.rrval);
232 rr1.rrval=0;
233 rr1.ps=rr.ps;
234 rr1.rid=rr.rid;
235 return (rr1.rrval);
236 }
241 IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
242 {
243 // TODO: Only allowed for current vcpu
244 u64 ifs, psr;
245 REGS *regs = vcpu_regs(vcpu);
246 psr = VCPU(vcpu,ipsr);
247 if (psr & IA64_PSR_BN)
248 vcpu_bsw1(vcpu);
249 else
250 vcpu_bsw0(vcpu);
251 vmx_vcpu_set_psr(vcpu,psr);
252 vmx_ia64_set_dcr(vcpu);
253 ifs=VCPU(vcpu,ifs);
254 if(ifs>>63)
255 regs->cr_ifs = ifs;
256 regs->cr_iip = VCPU(vcpu,iip);
257 return (IA64_NO_FAULT);
258 }
261 #if 0
262 IA64FAULT
263 vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, u64 *val)
264 {
265 IA64_PSR vpsr;
267 vpsr.val = vmx_vcpu_get_psr(vcpu);
268 if ( vpsr.bn ) {
269 *val=VCPU(vcpu,vgr[reg-16]);
270 // Check NAT bit
271 if ( VCPU(vcpu,vnat) & (1UL<<(reg-16)) ) {
272 // TODO
273 //panic ("NAT consumption fault\n");
274 return IA64_FAULT;
275 }
277 }
278 else {
279 *val=VCPU(vcpu,vbgr[reg-16]);
280 if ( VCPU(vcpu,vbnat) & (1UL<<reg) ) {
281 //panic ("NAT consumption fault\n");
282 return IA64_FAULT;
283 }
285 }
286 return IA64_NO_FAULT;
287 }
289 IA64FAULT
290 vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat)
291 {
292 IA64_PSR vpsr;
293 vpsr.val = vmx_vcpu_get_psr(vcpu);
294 if ( vpsr.bn ) {
295 VCPU(vcpu,vgr[reg-16]) = val;
296 if(nat){
297 VCPU(vcpu,vnat) |= ( 1UL<<(reg-16) );
298 }else{
299 VCPU(vcpu,vbnat) &= ~( 1UL<<(reg-16) );
300 }
301 }
302 else {
303 VCPU(vcpu,vbgr[reg-16]) = val;
304 if(nat){
305 VCPU(vcpu,vnat) |= ( 1UL<<(reg) );
306 }else{
307 VCPU(vcpu,vbnat) &= ~( 1UL<<(reg) );
308 }
309 }
310 return IA64_NO_FAULT;
311 }
313 #endif
314 #if 0
315 IA64FAULT
316 vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, u64 * val)
317 {
318 REGS *regs=vcpu_regs(vcpu);
319 int nat;
320 //TODO, Eddie
321 if (!regs) return 0;
322 #if 0
323 if (reg >= 16 && reg < 32) {
324 return vmx_vcpu_get_bgr(vcpu,reg,val);
325 }
326 #endif
327 getreg(reg,val,&nat,regs); // FIXME: handle NATs later
328 if(nat){
329 return IA64_FAULT;
330 }
331 return IA64_NO_FAULT;
332 }
334 // returns:
335 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
336 // IA64_NO_FAULT otherwise
338 IA64FAULT
339 vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat)
340 {
341 REGS *regs = vcpu_regs(vcpu);
342 long sof = (regs->cr_ifs) & 0x7f;
343 //TODO Eddie
345 if (!regs) return IA64_ILLOP_FAULT;
346 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
347 #if 0
348 if ( reg >= 16 && reg < 32 ) {
349 return vmx_vcpu_set_bgr(vcpu,reg, value, nat);
350 }
351 #endif
352 setreg(reg,value,nat,regs);
353 return IA64_NO_FAULT;
354 }
356 #endif
358 /*
359 VPSR can't keep track of below bits of guest PSR
360 This function gets guest PSR
361 */
363 u64 vmx_vcpu_get_psr(VCPU *vcpu)
364 {
365 u64 mask;
366 REGS *regs = vcpu_regs(vcpu);
367 mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
368 IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
369 return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
370 }
372 IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, u64 imm24)
373 {
374 u64 vpsr;
375 vpsr = vmx_vcpu_get_psr(vcpu);
376 vpsr &= (~imm24);
377 vmx_vcpu_set_psr(vcpu, vpsr);
378 return IA64_NO_FAULT;
379 }
382 IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, u64 imm24)
383 {
384 u64 vpsr;
385 vpsr = vmx_vcpu_get_psr(vcpu);
386 vpsr |= imm24;
387 vmx_vcpu_set_psr(vcpu, vpsr);
388 return IA64_NO_FAULT;
389 }
392 IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, u64 val)
393 {
394 val = (val & MASK(0, 32)) | (vmx_vcpu_get_psr(vcpu) & MASK(32, 32));
395 vmx_vcpu_set_psr(vcpu, val);
396 return IA64_NO_FAULT;
397 }
399 IA64FAULT
400 vmx_vcpu_set_tpr(VCPU *vcpu, u64 val)
401 {
402 VCPU(vcpu,tpr)=val;
403 vcpu->arch.irq_new_condition = 1;
404 return IA64_NO_FAULT;
405 }