ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_vcpu.c @ 18085:4f0428e4dd15

[IA64] kexec: Unpin shared_info, mapped_regs and VPD TR in ia64_do_tlb_purge

Unpinning shared_info, mapped_regs and VPD seems to be missing
from ia64_do_tlb_purge and seems to be needed for kexec.

Like VHPT, the pinned value is recored in a percpu variable
so that the correct value can be unpinned.

Cc: Isaku Yamahata <yamahata@valinux.co.jp>
Signed-off-by: Simon Horman <horms@verge.net.au>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Tue Jul 22 12:15:02 2008 +0900 (2008-07-22)
parents 0b72d16e794b
children 7da7b53b2139
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_vcpu.c: handling all virtual cpu related thing.
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Fred yang (fred.yang@intel.com)
20 * Arun Sharma (arun.sharma@intel.com)
21 * Shaofan Li (Susue Li) <susie.li@intel.com>
22 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
23 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
24 */
25 #include <xen/sched.h>
26 #include <public/xen.h>
27 #include <asm/ia64_int.h>
28 #include <asm/vmx_vcpu.h>
29 #include <asm/regionreg.h>
30 #include <asm/tlb.h>
31 #include <asm/processor.h>
32 #include <asm/delay.h>
33 #include <asm/regs.h>
34 #include <asm/gcc_intrin.h>
35 #include <asm/vmx_mm_def.h>
36 #include <asm/vmx.h>
37 #include <asm/vmx_phy_mode.h>
38 #include <asm/debugger.h>
40 /**************************************************************************
41 VCPU general register access routines
42 **************************************************************************/
43 #include <asm/hw_irq.h>
44 #include <asm/vmx_pal_vsa.h>
45 #include <asm/kregs.h>
46 #include <linux/efi.h>
47 //unsigned long last_guest_rsm = 0x0;
49 #ifdef VTI_DEBUG
50 struct guest_psr_bundle{
51 unsigned long ip;
52 unsigned long psr;
53 };
55 struct guest_psr_bundle guest_psr_buf[100];
56 unsigned long guest_psr_index = 0;
57 #endif
60 void
61 vmx_ia64_set_dcr(VCPU *v)
62 {
63 /* xenoprof:
64 * don't change psr.pp.
65 * It is manipulated by xenoprof.
66 */
67 unsigned long dcr_bits = (IA64_DEFAULT_DCR_BITS & ~IA64_DCR_PP) |
68 (ia64_getreg(_IA64_REG_CR_DCR) & IA64_DCR_PP);
70 // if guest is runing on cpl > 0, set dcr.dm=1
71 // if geust is runing on cpl = 0, set dcr.dm=0
72 // because Guest OS may ld.s on tr mapped page.
73 if (!(VCPU(v, vpsr) & IA64_PSR_CPL))
74 dcr_bits &= ~IA64_DCR_DM;
76 ia64_set_dcr(dcr_bits);
77 }
80 void
81 vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
82 {
84 u64 mask;
85 REGS *regs;
86 IA64_PSR old_psr, new_psr;
87 old_psr.val=VCPU(vcpu, vpsr);
89 regs=vcpu_regs(vcpu);
90 /* We only support guest as:
91 * vpsr.pk = 0
92 * vpsr.is = 0
93 * Otherwise panic
94 */
95 if ( value & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM )) {
96 panic_domain (regs,"Setting unsupport guest psr!");
97 }
99 /*
100 * For those IA64_PSR bits: id/da/dd/ss/ed/ia
101 * Since these bits will become 0, after success execution of each
102 * instruction, we will change set them to mIA64_PSR
103 */
104 VCPU(vcpu,vpsr) = value &
105 (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
106 IA64_PSR_ED | IA64_PSR_IA));
108 if ( !old_psr.i && (value & IA64_PSR_I) ) {
109 // vpsr.i 0->1
110 vcpu->arch.irq_new_condition = 1;
111 }
112 new_psr.val=VCPU(vcpu, vpsr);
113 #ifdef VTI_DEBUG
114 guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
115 guest_psr_buf[guest_psr_index].psr = new_psr.val;
116 if (++guest_psr_index >= 100)
117 guest_psr_index = 0;
118 #endif
119 #if 0
120 if (old_psr.i != new_psr.i) {
121 if (old_psr.i)
122 last_guest_rsm = vcpu_regs(vcpu)->cr_iip;
123 else
124 last_guest_rsm = 0;
125 }
126 #endif
128 /*
129 * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
130 * , except for the following bits:
131 * ic/i/dt/si/rt/mc/it/bn/vm
132 */
133 mask = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_DT | IA64_PSR_SI |
134 IA64_PSR_RT | IA64_PSR_MC | IA64_PSR_IT | IA64_PSR_BN |
135 IA64_PSR_VM;
137 /* xenoprof:
138 * don't change psr.pp.
139 * It is manipulated by xenoprof.
140 */
141 mask |= IA64_PSR_PP;
143 regs->cr_ipsr = (regs->cr_ipsr & mask ) | ( value & (~mask) );
145 if (FP_PSR(vcpu) & IA64_PSR_DFH)
146 regs->cr_ipsr |= IA64_PSR_DFH;
148 if (unlikely(vcpu->domain->debugger_attached)) {
149 if (vcpu->domain->arch.debug_flags & XEN_IA64_DEBUG_FORCE_SS)
150 regs->cr_ipsr |= IA64_PSR_SS;
151 if (vcpu->domain->arch.debug_flags & XEN_IA64_DEBUG_FORCE_DB)
152 regs->cr_ipsr |= IA64_PSR_DB;
153 }
155 check_mm_mode_switch(vcpu, old_psr, new_psr);
156 return ;
157 }
159 IA64FAULT vmx_vcpu_cover(VCPU *vcpu)
160 {
161 REGS *regs = vcpu_regs(vcpu);
162 IA64_PSR vpsr;
163 vpsr.val = VCPU(vcpu, vpsr);
165 if(!vpsr.ic)
166 VCPU(vcpu,ifs) = regs->cr_ifs;
167 regs->cr_ifs = IA64_IFS_V;
168 return (IA64_NO_FAULT);
169 }
171 /* In fast path, psr.ic = 0, psr.i = 0, psr.bn = 0
172 * so that no tlb miss is allowed.
173 */
174 void vmx_vcpu_set_rr_fast(VCPU *vcpu, u64 reg, u64 val)
175 {
176 u64 rrval;
178 VMX(vcpu, vrr[reg >> VRN_SHIFT]) = val;
179 switch((u64)(reg >> VRN_SHIFT)) {
180 case VRN4:
181 rrval = vrrtomrr(vcpu, val);
182 vcpu->arch.metaphysical_saved_rr4 = rrval;
183 if (is_virtual_mode(vcpu) && likely(vcpu == current))
184 ia64_set_rr(reg, rrval);
185 break;
186 case VRN0:
187 rrval = vrrtomrr(vcpu, val);
188 vcpu->arch.metaphysical_saved_rr0 = rrval;
189 if (is_virtual_mode(vcpu) && likely(vcpu == current))
190 ia64_set_rr(reg, rrval);
191 break;
192 default:
193 if (likely(vcpu == current))
194 ia64_set_rr(reg, vrrtomrr(vcpu, val));
195 break;
196 }
197 }
199 void vmx_switch_rr7(unsigned long rid, void *guest_vhpt,
200 void *pal_vaddr, void *shared_arch_info)
201 {
202 __get_cpu_var(inserted_vhpt) = (unsigned long)guest_vhpt;
203 __get_cpu_var(inserted_vpd) = (unsigned long)shared_arch_info;
204 __get_cpu_var(inserted_mapped_regs) = (unsigned long)shared_arch_info;
205 __vmx_switch_rr7(rid, guest_vhpt, pal_vaddr, shared_arch_info);
206 }
208 IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val)
209 {
210 u64 rrval;
212 if (unlikely(is_reserved_rr_rid(vcpu, val))) {
213 gdprintk(XENLOG_DEBUG, "use of invalid rrval %lx\n", val);
214 return IA64_RSVDREG_FAULT;
215 }
217 VMX(vcpu,vrr[reg>>VRN_SHIFT]) = val;
218 switch((u64)(reg>>VRN_SHIFT)) {
219 case VRN7:
220 if (likely(vcpu == current))
221 vmx_switch_rr7(vrrtomrr(vcpu,val), (void *)vcpu->arch.vhpt.hash,
222 pal_vaddr, vcpu->arch.privregs);
223 break;
224 case VRN4:
225 rrval = vrrtomrr(vcpu,val);
226 vcpu->arch.metaphysical_saved_rr4 = rrval;
227 if (is_virtual_mode(vcpu) && likely(vcpu == current))
228 ia64_set_rr(reg,rrval);
229 break;
230 case VRN0:
231 rrval = vrrtomrr(vcpu,val);
232 vcpu->arch.metaphysical_saved_rr0 = rrval;
233 if (is_virtual_mode(vcpu) && likely(vcpu == current))
234 ia64_set_rr(reg,rrval);
235 break;
236 default:
237 if (likely(vcpu == current))
238 ia64_set_rr(reg,vrrtomrr(vcpu,val));
239 break;
240 }
242 return (IA64_NO_FAULT);
243 }
247 /**************************************************************************
248 VCPU protection key register access routines
249 **************************************************************************/
251 u64 vmx_vcpu_get_pkr(VCPU *vcpu, u64 reg)
252 {
253 return ((u64)ia64_get_pkr(reg));
254 }
256 IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, u64 reg, u64 val)
257 {
258 ia64_set_pkr(reg,val);
259 return (IA64_NO_FAULT);
260 }
262 #if 0
263 int tlb_debug=0;
264 check_entry(u64 va, u64 ps, char *str)
265 {
266 va &= ~ (PSIZE(ps)-1);
267 if ( va == 0x2000000002908000UL ||
268 va == 0x600000000000C000UL ) {
269 stop();
270 }
271 if (tlb_debug) printk("%s at %lx %lx\n", str, va, 1UL<<ps);
272 }
273 #endif
276 u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa)
277 {
278 ia64_rr rr,rr1;
279 vcpu_get_rr(vcpu,ifa,&rr.rrval);
280 rr1.rrval=0;
281 rr1.ps=rr.ps;
282 rr1.rid=rr.rid;
283 return (rr1.rrval);
284 }
286 /* In fast path, psr.ic = 0, psr.i = 0, psr.bn = 0
287 * so that no tlb miss is allowed.
288 */
289 void vmx_vcpu_mov_to_psr_fast(VCPU *vcpu, u64 value)
290 {
291 /* TODO: Only allowed for current vcpu */
292 u64 old_vpsr, new_vpsr, mipsr, mask;
293 old_vpsr = VCPU(vcpu, vpsr);
295 new_vpsr = (old_vpsr & 0xffffffff00000000) | (value & 0xffffffff);
296 VCPU(vcpu, vpsr) = new_vpsr;
298 mipsr = ia64_getreg(_IA64_REG_CR_IPSR);
300 /* xenoprof:
301 * don't change psr.pp.
302 * It is manipulated by xenoprof.
303 */
304 mask = 0xffffffff00000000 | IA64_PSR_IC | IA64_PSR_I
305 | IA64_PSR_DT | IA64_PSR_PP | IA64_PSR_SI | IA64_PSR_RT;
307 mipsr = (mipsr & mask) | (value & (~mask));
309 if (FP_PSR(vcpu) & IA64_PSR_DFH)
310 mipsr |= IA64_PSR_DFH;
312 ia64_setreg(_IA64_REG_CR_IPSR, mipsr);
314 switch_mm_mode_fast(vcpu, (IA64_PSR)old_vpsr, (IA64_PSR)new_vpsr);
315 }
317 #define IA64_PSR_MMU_VIRT (IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_IT)
318 /* In fast path, psr.ic = 0, psr.i = 0, psr.bn = 0
319 * so that no tlb miss is allowed.
320 */
321 void vmx_vcpu_rfi_fast(VCPU *vcpu)
322 {
323 /* TODO: Only allowed for current vcpu */
324 u64 vifs, vipsr, vpsr, mipsr, mask;
325 vipsr = VCPU(vcpu, ipsr);
326 vpsr = VCPU(vcpu, vpsr);
327 vifs = VCPU(vcpu, ifs);
328 if (vipsr & IA64_PSR_BN) {
329 if(!(vpsr & IA64_PSR_BN))
330 vmx_asm_bsw1();
331 } else if (vpsr & IA64_PSR_BN)
332 vmx_asm_bsw0();
334 /*
335 * For those IA64_PSR bits: id/da/dd/ss/ed/ia
336 * Since these bits will become 0, after success execution of each
337 * instruction, we will change set them to mIA64_PSR
338 */
339 VCPU(vcpu, vpsr) = vipsr & (~ (IA64_PSR_ID |IA64_PSR_DA
340 | IA64_PSR_DD | IA64_PSR_ED | IA64_PSR_IA));
342 /*
343 * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
344 * , except for the following bits:
345 * ic/i/dt/si/rt/mc/it/bn/vm
346 */
347 /* xenoprof */
348 mask = (IA64_PSR_IC | IA64_PSR_I | IA64_PSR_DT | IA64_PSR_SI |
349 IA64_PSR_RT | IA64_PSR_MC | IA64_PSR_IT | IA64_PSR_BN |
350 IA64_PSR_VM | IA64_PSR_PP);
351 mipsr = ia64_getreg(_IA64_REG_CR_IPSR);
352 mipsr = (mipsr & mask) | (vipsr & (~mask));
354 if (FP_PSR(vcpu) & IA64_PSR_DFH)
355 mipsr |= IA64_PSR_DFH;
357 ia64_setreg(_IA64_REG_CR_IPSR, mipsr);
358 vmx_ia64_set_dcr(vcpu);
360 if(vifs >> 63)
361 ia64_setreg(_IA64_REG_CR_IFS, vifs);
363 ia64_setreg(_IA64_REG_CR_IIP, VCPU(vcpu, iip));
365 switch_mm_mode_fast(vcpu, (IA64_PSR)vpsr, (IA64_PSR)vipsr);
366 }
368 /* In fast path, psr.ic = 0, psr.i = 0, psr.bn = 0
369 * so that no tlb miss is allowed.
370 */
371 void vmx_vcpu_ssm_fast(VCPU *vcpu, u64 imm24)
372 {
373 u64 old_vpsr, new_vpsr, mipsr;
375 old_vpsr = VCPU(vcpu, vpsr);
376 new_vpsr = old_vpsr | imm24;
378 VCPU(vcpu, vpsr) = new_vpsr;
380 mipsr = ia64_getreg(_IA64_REG_CR_IPSR);
381 /* xenoprof:
382 * don't change psr.pp.
383 * It is manipulated by xenoprof.
384 */
385 mipsr |= imm24 & (~IA64_PSR_PP);
386 ia64_setreg(_IA64_REG_CR_IPSR, mipsr);
388 switch_mm_mode_fast(vcpu, (IA64_PSR)old_vpsr, (IA64_PSR)new_vpsr);
389 }
391 /* In fast path, psr.ic = 0, psr.i = 0, psr.bn = 0
392 * so that no tlb miss is allowed.
393 */
394 void vmx_vcpu_rsm_fast(VCPU *vcpu, u64 imm24)
395 {
396 u64 old_vpsr, new_vpsr, mipsr;
398 old_vpsr = VCPU(vcpu, vpsr);
399 new_vpsr = old_vpsr & ~imm24;
401 VCPU(vcpu, vpsr) = new_vpsr;
403 mipsr = ia64_getreg(_IA64_REG_CR_IPSR);
404 /* xenoprof:
405 * don't change psr.pp.
406 * It is manipulated by xenoprof.
407 */
408 mipsr &= (~imm24) | IA64_PSR_PP;
409 mipsr |= IA64_PSR_IC | IA64_PSR_I | IA64_PSR_DT | IA64_PSR_SI;
411 if (FP_PSR(vcpu) & IA64_PSR_DFH)
412 mipsr |= IA64_PSR_DFH;
414 ia64_setreg(_IA64_REG_CR_IPSR, mipsr);
416 switch_mm_mode_fast(vcpu, (IA64_PSR)old_vpsr, (IA64_PSR)new_vpsr);
417 }
419 IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
420 {
421 // TODO: Only allowed for current vcpu
422 u64 ifs, psr;
423 REGS *regs = vcpu_regs(vcpu);
424 psr = VCPU(vcpu,ipsr);
425 if (psr & IA64_PSR_BN)
426 vcpu_bsw1(vcpu);
427 else
428 vcpu_bsw0(vcpu);
429 vmx_vcpu_set_psr(vcpu,psr);
430 vmx_ia64_set_dcr(vcpu);
431 ifs=VCPU(vcpu,ifs);
432 if(ifs>>63)
433 regs->cr_ifs = ifs;
434 regs->cr_iip = VCPU(vcpu,iip);
435 return (IA64_NO_FAULT);
436 }
439 #if 0
440 IA64FAULT
441 vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, u64 *val)
442 {
443 IA64_PSR vpsr;
445 vpsr.val = vmx_vcpu_get_psr(vcpu);
446 if ( vpsr.bn ) {
447 *val=VCPU(vcpu,vgr[reg-16]);
448 // Check NAT bit
449 if ( VCPU(vcpu,vnat) & (1UL<<(reg-16)) ) {
450 // TODO
451 //panic ("NAT consumption fault\n");
452 return IA64_FAULT;
453 }
455 }
456 else {
457 *val=VCPU(vcpu,vbgr[reg-16]);
458 if ( VCPU(vcpu,vbnat) & (1UL<<reg) ) {
459 //panic ("NAT consumption fault\n");
460 return IA64_FAULT;
461 }
463 }
464 return IA64_NO_FAULT;
465 }
467 IA64FAULT
468 vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat)
469 {
470 IA64_PSR vpsr;
471 vpsr.val = vmx_vcpu_get_psr(vcpu);
472 if ( vpsr.bn ) {
473 VCPU(vcpu,vgr[reg-16]) = val;
474 if(nat){
475 VCPU(vcpu,vnat) |= ( 1UL<<(reg-16) );
476 }else{
477 VCPU(vcpu,vbnat) &= ~( 1UL<<(reg-16) );
478 }
479 }
480 else {
481 VCPU(vcpu,vbgr[reg-16]) = val;
482 if(nat){
483 VCPU(vcpu,vnat) |= ( 1UL<<(reg) );
484 }else{
485 VCPU(vcpu,vbnat) &= ~( 1UL<<(reg) );
486 }
487 }
488 return IA64_NO_FAULT;
489 }
491 #endif
492 #if 0
493 IA64FAULT
494 vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, u64 * val)
495 {
496 REGS *regs=vcpu_regs(vcpu);
497 int nat;
498 //TODO, Eddie
499 if (!regs) return 0;
500 #if 0
501 if (reg >= 16 && reg < 32) {
502 return vmx_vcpu_get_bgr(vcpu,reg,val);
503 }
504 #endif
505 getreg(reg,val,&nat,regs); // FIXME: handle NATs later
506 if(nat){
507 return IA64_FAULT;
508 }
509 return IA64_NO_FAULT;
510 }
512 // returns:
513 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
514 // IA64_NO_FAULT otherwise
516 IA64FAULT
517 vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat)
518 {
519 REGS *regs = vcpu_regs(vcpu);
520 long sof = (regs->cr_ifs) & 0x7f;
521 //TODO Eddie
523 if (!regs) return IA64_ILLOP_FAULT;
524 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
525 #if 0
526 if ( reg >= 16 && reg < 32 ) {
527 return vmx_vcpu_set_bgr(vcpu,reg, value, nat);
528 }
529 #endif
530 setreg(reg,value,nat,regs);
531 return IA64_NO_FAULT;
532 }
534 #endif
536 /*
537 VPSR can't keep track of below bits of guest PSR
538 This function gets guest PSR
539 */
541 u64 vmx_vcpu_get_psr(VCPU *vcpu)
542 {
543 u64 mask;
544 REGS *regs = vcpu_regs(vcpu);
545 mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
546 IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
547 return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
548 }
550 IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, u64 imm24)
551 {
552 u64 vpsr;
553 vpsr = vmx_vcpu_get_psr(vcpu);
554 vpsr &= (~imm24);
555 vmx_vcpu_set_psr(vcpu, vpsr);
556 return IA64_NO_FAULT;
557 }
560 IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, u64 imm24)
561 {
562 u64 vpsr;
563 vpsr = vmx_vcpu_get_psr(vcpu);
564 vpsr |= imm24;
565 vmx_vcpu_set_psr(vcpu, vpsr);
566 return IA64_NO_FAULT;
567 }
570 IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, u64 val)
571 {
572 val = (val & MASK(0, 32)) | (vmx_vcpu_get_psr(vcpu) & MASK(32, 32));
573 vmx_vcpu_set_psr(vcpu, val);
574 return IA64_NO_FAULT;
575 }
577 IA64FAULT
578 vmx_vcpu_set_tpr(VCPU *vcpu, u64 val)
579 {
580 VCPU(vcpu,tpr)=val;
581 vcpu->arch.irq_new_condition = 1;
582 return IA64_NO_FAULT;
583 }