ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_virt.c @ 10697:4834d1e8f26e

[IA64] optimize vpsr

vpsr can't keep track flowing bits of guest psr
be,up,ac,mfl,mfh,cpl,ri.
Previously every time xen gets control, xen will sync
vpsr with cr.ipsr, it's not neccessary.
Xen sync with cr.ipsr when needed.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Fri Jul 14 11:05:40 2006 -0600 (2006-07-14)
parents 6703fed8870f
children bfc69471550e
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_virt.c:
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Fred yang (fred.yang@intel.com)
20 * Shaofan Li (Susue Li) <susie.li@intel.com>
21 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
22 */
23 #include <asm/bundle.h>
24 #include <asm/vmx_vcpu.h>
25 #include <asm/processor.h>
26 #include <asm/delay.h> // Debug only
27 #include <asm/vmmu.h>
28 #include <asm/vmx_mm_def.h>
29 #include <asm/smp.h>
30 #include <asm/vmx.h>
31 #include <asm/virt_event.h>
32 #include <asm/vmx_phy_mode.h>
34 void
35 ia64_priv_decoder(IA64_SLOT_TYPE slot_type, INST64 inst, UINT64 * cause)
36 {
37 *cause=0;
38 switch (slot_type) {
39 case M:
40 if (inst.generic.major==0){
41 if(inst.M28.x3==0){
42 if(inst.M44.x4==6){
43 *cause=EVENT_SSM;
44 }else if(inst.M44.x4==7){
45 *cause=EVENT_RSM;
46 }else if(inst.M30.x4==8&&inst.M30.x2==2){
47 *cause=EVENT_MOV_TO_AR_IMM;
48 }
49 }
50 }
51 else if(inst.generic.major==1){
52 if(inst.M28.x3==0){
53 if(inst.M32.x6==0x2c){
54 *cause=EVENT_MOV_TO_CR;
55 }else if(inst.M33.x6==0x24){
56 *cause=EVENT_MOV_FROM_CR;
57 }else if(inst.M35.x6==0x2d){
58 *cause=EVENT_MOV_TO_PSR;
59 }else if(inst.M36.x6==0x25){
60 *cause=EVENT_MOV_FROM_PSR;
61 }else if(inst.M29.x6==0x2A){
62 *cause=EVENT_MOV_TO_AR;
63 }else if(inst.M31.x6==0x22){
64 *cause=EVENT_MOV_FROM_AR;
65 }else if(inst.M45.x6==0x09){
66 *cause=EVENT_PTC_L;
67 }else if(inst.M45.x6==0x0A){
68 *cause=EVENT_PTC_G;
69 }else if(inst.M45.x6==0x0B){
70 *cause=EVENT_PTC_GA;
71 }else if(inst.M45.x6==0x0C){
72 *cause=EVENT_PTR_D;
73 }else if(inst.M45.x6==0x0D){
74 *cause=EVENT_PTR_I;
75 }else if(inst.M46.x6==0x1A){
76 *cause=EVENT_THASH;
77 }else if(inst.M46.x6==0x1B){
78 *cause=EVENT_TTAG;
79 }else if(inst.M46.x6==0x1E){
80 *cause=EVENT_TPA;
81 }else if(inst.M46.x6==0x1F){
82 *cause=EVENT_TAK;
83 }else if(inst.M47.x6==0x34){
84 *cause=EVENT_PTC_E;
85 }else if(inst.M41.x6==0x2E){
86 *cause=EVENT_ITC_D;
87 }else if(inst.M41.x6==0x2F){
88 *cause=EVENT_ITC_I;
89 }else if(inst.M42.x6==0x00){
90 *cause=EVENT_MOV_TO_RR;
91 }else if(inst.M42.x6==0x01){
92 *cause=EVENT_MOV_TO_DBR;
93 }else if(inst.M42.x6==0x02){
94 *cause=EVENT_MOV_TO_IBR;
95 }else if(inst.M42.x6==0x03){
96 *cause=EVENT_MOV_TO_PKR;
97 }else if(inst.M42.x6==0x04){
98 *cause=EVENT_MOV_TO_PMC;
99 }else if(inst.M42.x6==0x05){
100 *cause=EVENT_MOV_TO_PMD;
101 }else if(inst.M42.x6==0x0E){
102 *cause=EVENT_ITR_D;
103 }else if(inst.M42.x6==0x0F){
104 *cause=EVENT_ITR_I;
105 }else if(inst.M43.x6==0x10){
106 *cause=EVENT_MOV_FROM_RR;
107 }else if(inst.M43.x6==0x11){
108 *cause=EVENT_MOV_FROM_DBR;
109 }else if(inst.M43.x6==0x12){
110 *cause=EVENT_MOV_FROM_IBR;
111 }else if(inst.M43.x6==0x13){
112 *cause=EVENT_MOV_FROM_PKR;
113 }else if(inst.M43.x6==0x14){
114 *cause=EVENT_MOV_FROM_PMC;
115 /*
116 }else if(inst.M43.x6==0x15){
117 *cause=EVENT_MOV_FROM_PMD;
118 */
119 }else if(inst.M43.x6==0x17){
120 *cause=EVENT_MOV_FROM_CPUID;
121 }
122 }
123 }
124 break;
125 case B:
126 if(inst.generic.major==0){
127 if(inst.B8.x6==0x02){
128 *cause=EVENT_COVER;
129 }else if(inst.B8.x6==0x08){
130 *cause=EVENT_RFI;
131 }else if(inst.B8.x6==0x0c){
132 *cause=EVENT_BSW_0;
133 }else if(inst.B8.x6==0x0d){
134 *cause=EVENT_BSW_1;
135 }
136 }
137 case I:
138 case F:
139 case L:
140 case ILLEGAL:
141 break;
142 }
143 }
145 IA64FAULT vmx_emul_rsm(VCPU *vcpu, INST64 inst)
146 {
147 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
148 return vmx_vcpu_reset_psr_sm(vcpu,imm24);
149 }
151 IA64FAULT vmx_emul_ssm(VCPU *vcpu, INST64 inst)
152 {
153 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
154 return vmx_vcpu_set_psr_sm(vcpu,imm24);
155 }
157 IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst)
158 {
159 UINT64 tgt = inst.M33.r1;
160 UINT64 val;
162 /*
163 if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
164 return vcpu_set_gr(vcpu, tgt, val);
165 else return fault;
166 */
167 val = vmx_vcpu_get_psr(vcpu);
168 val = (val & MASK(0, 32)) | (val & MASK(35, 2));
169 return vcpu_set_gr(vcpu, tgt, val, 0);
170 }
172 /**
173 * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
174 */
175 IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst)
176 {
177 UINT64 val;
179 if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
180 panic_domain(vcpu_regs(vcpu),"get_psr nat bit fault\n");
182 return vmx_vcpu_set_psr_l(vcpu, val);
183 }
186 /**************************************************************************
187 Privileged operation emulation routines
188 **************************************************************************/
190 IA64FAULT vmx_emul_rfi(VCPU *vcpu, INST64 inst)
191 {
192 IA64_PSR vpsr;
193 REGS *regs;
194 #ifdef CHECK_FAULT
195 vpsr.val=vmx_vcpu_get_psr(vcpu);
196 if ( vpsr.cpl != 0) {
197 /* Inject Privileged Operation fault into guest */
198 set_privileged_operation_isr (vcpu, 0);
199 privilege_op (vcpu);
200 return IA64_FAULT;
201 }
202 #endif // CHECK_FAULT
203 regs=vcpu_regs(vcpu);
204 vpsr.val=regs->cr_ipsr;
205 if ( vpsr.is == 1 ) {
206 panic_domain(regs,"We do not support IA32 instruction yet");
207 }
209 return vmx_vcpu_rfi(vcpu);
210 }
212 IA64FAULT vmx_emul_bsw0(VCPU *vcpu, INST64 inst)
213 {
214 #ifdef CHECK_FAULT
215 IA64_PSR vpsr;
216 vpsr.val=vmx_vcpu_get_psr(vcpu);
217 if ( vpsr.cpl != 0) {
218 /* Inject Privileged Operation fault into guest */
219 set_privileged_operation_isr (vcpu, 0);
220 privilege_op (vcpu);
221 return IA64_FAULT;
222 }
223 #endif // CHECK_FAULT
224 return vcpu_bsw0(vcpu);
225 }
227 IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST64 inst)
228 {
229 #ifdef CHECK_FAULT
230 IA64_PSR vpsr;
231 vpsr.val=vmx_vcpu_get_psr(vcpu);
232 if ( vpsr.cpl != 0) {
233 /* Inject Privileged Operation fault into guest */
234 set_privileged_operation_isr (vcpu, 0);
235 privilege_op (vcpu);
236 return IA64_FAULT;
237 }
238 #endif // CHECK_FAULT
239 return vcpu_bsw1(vcpu);
240 }
242 IA64FAULT vmx_emul_cover(VCPU *vcpu, INST64 inst)
243 {
244 return vmx_vcpu_cover(vcpu);
245 }
247 IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst)
248 {
249 u64 r2,r3;
250 #ifdef VMAL_NO_FAULT_CHECK
251 IA64_PSR vpsr;
253 vpsr.val=vmx_vcpu_get_psr(vcpu);
254 if ( vpsr.cpl != 0) {
255 /* Inject Privileged Operation fault into guest */
256 set_privileged_operation_isr (vcpu, 0);
257 privilege_op (vcpu);
258 return IA64_FAULT;
259 }
260 #endif // VMAL_NO_FAULT_CHECK
261 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
262 #ifdef VMAL_NO_FAULT_CHECK
263 ISR isr;
264 set_isr_reg_nat_consumption(vcpu,0,0);
265 rnat_comsumption(vcpu);
266 return IA64_FAULT;
267 #endif // VMAL_NO_FAULT_CHECK
268 }
269 #ifdef VMAL_NO_FAULT_CHECK
270 if (unimplemented_gva(vcpu,r3) ) {
271 isr.val = set_isr_ei_ni(vcpu);
272 isr.code = IA64_RESERVED_REG_FAULT;
273 vcpu_set_isr(vcpu, isr.val);
274 unimpl_daddr(vcpu);
275 return IA64_FAULT;
276 }
277 #endif // VMAL_NO_FAULT_CHECK
278 return vmx_vcpu_ptc_l(vcpu,r3,bits(r2,2,7));
279 }
281 IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst)
282 {
283 u64 r3;
284 #ifdef VMAL_NO_FAULT_CHECK
285 IA64_PSR vpsr;
287 vpsr.val=vmx_vcpu_get_psr(vcpu);
288 ISR isr;
289 if ( vpsr.cpl != 0) {
290 /* Inject Privileged Operation fault into guest */
291 set_privileged_operation_isr (vcpu, 0);
292 privilege_op (vcpu);
293 return IA64_FAULT;
294 }
295 #endif // VMAL_NO_FAULT_CHECK
296 if(vcpu_get_gr_nat(vcpu,inst.M47.r3,&r3)){
297 #ifdef VMAL_NO_FAULT_CHECK
298 set_isr_reg_nat_consumption(vcpu,0,0);
299 rnat_comsumption(vcpu);
300 return IA64_FAULT;
301 #endif // VMAL_NO_FAULT_CHECK
302 }
303 return vmx_vcpu_ptc_e(vcpu,r3);
304 }
306 IA64FAULT vmx_emul_ptc_g(VCPU *vcpu, INST64 inst)
307 {
308 u64 r2,r3;
309 #ifdef VMAL_NO_FAULT_CHECK
310 IA64_PSR vpsr;
311 vpsr.val=vmx_vcpu_get_psr(vcpu);
312 if ( vpsr.cpl != 0) {
313 /* Inject Privileged Operation fault into guest */
314 set_privileged_operation_isr (vcpu, 0);
315 privilege_op (vcpu);
316 return IA64_FAULT;
317 }
318 #endif // VMAL_NO_FAULT_CHECK
319 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
320 #ifdef VMAL_NO_FAULT_CHECK
321 ISR isr;
322 set_isr_reg_nat_consumption(vcpu,0,0);
323 rnat_comsumption(vcpu);
324 return IA64_FAULT;
325 #endif // VMAL_NO_FAULT_CHECK
326 }
327 #ifdef VMAL_NO_FAULT_CHECK
328 if (unimplemented_gva(vcpu,r3) ) {
329 isr.val = set_isr_ei_ni(vcpu);
330 isr.code = IA64_RESERVED_REG_FAULT;
331 vcpu_set_isr(vcpu, isr.val);
332 unimpl_daddr(vcpu);
333 return IA64_FAULT;
334 }
335 #endif // VMAL_NO_FAULT_CHECK
336 return vmx_vcpu_ptc_g(vcpu,r3,bits(r2,2,7));
337 }
339 IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, INST64 inst)
340 {
341 u64 r2,r3;
342 #ifdef VMAL_NO_FAULT_CHECK
343 IA64_PSR vpsr;
344 vpsr.val=vmx_vcpu_get_psr(vcpu);
345 if ( vpsr.cpl != 0) {
346 /* Inject Privileged Operation fault into guest */
347 set_privileged_operation_isr (vcpu, 0);
348 privilege_op (vcpu);
349 return IA64_FAULT;
350 }
351 #endif // VMAL_NO_FAULT_CHECK
352 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
353 #ifdef VMAL_NO_FAULT_CHECK
354 ISR isr;
355 set_isr_reg_nat_consumption(vcpu,0,0);
356 rnat_comsumption(vcpu);
357 return IA64_FAULT;
358 #endif // VMAL_NO_FAULT_CHECK
359 }
360 #ifdef VMAL_NO_FAULT_CHECK
361 if (unimplemented_gva(vcpu,r3) ) {
362 isr.val = set_isr_ei_ni(vcpu);
363 isr.code = IA64_RESERVED_REG_FAULT;
364 vcpu_set_isr(vcpu, isr.val);
365 unimpl_daddr(vcpu);
366 return IA64_FAULT;
367 }
368 #endif // VMAL_NO_FAULT_CHECK
369 return vmx_vcpu_ptc_ga(vcpu,r3,bits(r2,2,7));
370 }
372 IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3)
373 {
374 IA64FAULT ret1, ret2;
376 #ifdef VMAL_NO_FAULT_CHECK
377 ISR isr;
378 IA64_PSR vpsr;
379 vpsr.val=vmx_vcpu_get_psr(vcpu);
380 if ( vpsr.cpl != 0) {
381 /* Inject Privileged Operation fault into guest */
382 set_privileged_operation_isr (vcpu, 0);
383 privilege_op (vcpu);
384 return IA64_FAULT;
385 }
386 #endif // VMAL_NO_FAULT_CHECK
387 ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r3,pr3);
388 ret2 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pr2);
389 #ifdef VMAL_NO_FAULT_CHECK
390 if ( ret1 != IA64_NO_FAULT || ret2 != IA64_NO_FAULT ) {
391 set_isr_reg_nat_consumption(vcpu,0,0);
392 rnat_comsumption(vcpu);
393 return IA64_FAULT;
394 }
395 if (unimplemented_gva(vcpu,r3) ) {
396 isr.val = set_isr_ei_ni(vcpu);
397 isr.code = IA64_RESERVED_REG_FAULT;
398 vcpu_set_isr(vcpu, isr.val);
399 unimpl_daddr(vcpu);
400 return IA64_FAULT;
401 }
402 #endif // VMAL_NO_FAULT_CHECK
403 return IA64_NO_FAULT;
404 }
406 IA64FAULT vmx_emul_ptr_d(VCPU *vcpu, INST64 inst)
407 {
408 u64 r2,r3;
409 if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
410 return IA64_FAULT;
411 return vmx_vcpu_ptr_d(vcpu,r3,bits(r2,2,7));
412 }
414 IA64FAULT vmx_emul_ptr_i(VCPU *vcpu, INST64 inst)
415 {
416 u64 r2,r3;
417 if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
418 return IA64_FAULT;
419 return vmx_vcpu_ptr_i(vcpu,r3,bits(r2,2,7));
420 }
423 IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst)
424 {
425 u64 r1,r3;
426 #ifdef CHECK_FAULT
427 ISR visr;
428 IA64_PSR vpsr;
429 if(check_target_register(vcpu, inst.M46.r1)){
430 set_illegal_op_isr(vcpu);
431 illegal_op(vcpu);
432 return IA64_FAULT;
433 }
434 #endif //CHECK_FAULT
435 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
436 #ifdef CHECK_FAULT
437 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
438 return IA64_NO_FAULT;
439 #endif //CHECK_FAULT
440 }
441 #ifdef CHECK_FAULT
442 if(unimplemented_gva(vcpu, r3)){
443 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
444 return IA64_NO_FAULT;
445 }
446 #endif //CHECK_FAULT
447 vmx_vcpu_thash(vcpu, r3, &r1);
448 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
449 return(IA64_NO_FAULT);
450 }
453 IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst)
454 {
455 u64 r1,r3;
456 #ifdef CHECK_FAULT
457 ISR visr;
458 IA64_PSR vpsr;
459 #endif
460 #ifdef CHECK_FAULT
461 if(check_target_register(vcpu, inst.M46.r1)){
462 set_illegal_op_isr(vcpu);
463 illegal_op(vcpu);
464 return IA64_FAULT;
465 }
466 #endif //CHECK_FAULT
467 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
468 #ifdef CHECK_FAULT
469 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
470 return IA64_NO_FAULT;
471 #endif //CHECK_FAULT
472 }
473 #ifdef CHECK_FAULT
474 if(unimplemented_gva(vcpu, r3)){
475 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
476 return IA64_NO_FAULT;
477 }
478 #endif //CHECK_FAULT
479 vmx_vcpu_ttag(vcpu, r3, &r1);
480 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
481 return(IA64_NO_FAULT);
482 }
485 IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst)
486 {
487 u64 r1,r3;
488 #ifdef CHECK_FAULT
489 ISR visr;
490 if(check_target_register(vcpu, inst.M46.r1)){
491 set_illegal_op_isr(vcpu);
492 illegal_op(vcpu);
493 return IA64_FAULT;
494 }
495 IA64_PSR vpsr;
496 vpsr.val=vmx_vcpu_get_psr(vcpu);
497 if(vpsr.cpl!=0){
498 visr.val=0;
499 vcpu_set_isr(vcpu, visr.val);
500 return IA64_FAULT;
501 }
502 #endif //CHECK_FAULT
503 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
504 #ifdef CHECK_FAULT
505 set_isr_reg_nat_consumption(vcpu,0,1);
506 rnat_comsumption(vcpu);
507 return IA64_FAULT;
508 #endif //CHECK_FAULT
509 }
510 #ifdef CHECK_FAULT
511 if (unimplemented_gva(vcpu,r3) ) {
512 // inject unimplemented_data_address_fault
513 visr.val = set_isr_ei_ni(vcpu);
514 visr.code = IA64_RESERVED_REG_FAULT;
515 vcpu_set_isr(vcpu, isr.val);
516 // FAULT_UNIMPLEMENTED_DATA_ADDRESS.
517 unimpl_daddr(vcpu);
518 return IA64_FAULT;
519 }
520 #endif //CHECK_FAULT
522 if(vmx_vcpu_tpa(vcpu, r3, &r1)){
523 return IA64_FAULT;
524 }
525 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
526 return(IA64_NO_FAULT);
527 }
529 IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst)
530 {
531 u64 r1,r3;
532 #ifdef CHECK_FAULT
533 ISR visr;
534 IA64_PSR vpsr;
535 int fault=IA64_NO_FAULT;
536 visr.val=0;
537 if(check_target_register(vcpu, inst.M46.r1)){
538 set_illegal_op_isr(vcpu);
539 illegal_op(vcpu);
540 return IA64_FAULT;
541 }
542 vpsr.val=vmx_vcpu_get_psr(vcpu);
543 if(vpsr.cpl!=0){
544 vcpu_set_isr(vcpu, visr.val);
545 return IA64_FAULT;
546 }
547 #endif
548 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
549 #ifdef CHECK_FAULT
550 set_isr_reg_nat_consumption(vcpu,0,1);
551 rnat_comsumption(vcpu);
552 return IA64_FAULT;
553 #endif
554 }
555 if(vmx_vcpu_tak(vcpu, r3, &r1)){
556 return IA64_FAULT;
557 }
558 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
559 return(IA64_NO_FAULT);
560 }
563 /************************************
564 * Insert translation register/cache
565 ************************************/
567 IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
568 {
569 UINT64 itir, ifa, pte, slot;
570 #ifdef VMAL_NO_FAULT_CHECK
571 IA64_PSR vpsr;
572 vpsr.val=vmx_vcpu_get_psr(vcpu);
573 if ( vpsr.ic ) {
574 set_illegal_op_isr(vcpu);
575 illegal_op(vcpu);
576 return IA64_FAULT;
577 }
578 ISR isr;
579 if ( vpsr.cpl != 0) {
580 /* Inject Privileged Operation fault into guest */
581 set_privileged_operation_isr (vcpu, 0);
582 privilege_op (vcpu);
583 return IA64_FAULT;
584 }
585 #endif // VMAL_NO_FAULT_CHECK
586 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){
587 #ifdef VMAL_NO_FAULT_CHECK
588 set_isr_reg_nat_consumption(vcpu,0,0);
589 rnat_comsumption(vcpu);
590 return IA64_FAULT;
591 #endif // VMAL_NO_FAULT_CHECK
592 }
593 #ifdef VMAL_NO_FAULT_CHECK
594 if(is_reserved_rr_register(vcpu, slot)){
595 set_illegal_op_isr(vcpu);
596 illegal_op(vcpu);
597 return IA64_FAULT;
598 }
599 #endif // VMAL_NO_FAULT_CHECK
601 if (vcpu_get_itir(vcpu,&itir)){
602 return(IA64_FAULT);
603 }
604 if (vcpu_get_ifa(vcpu,&ifa)){
605 return(IA64_FAULT);
606 }
607 #ifdef VMAL_NO_FAULT_CHECK
608 if (is_reserved_itir_field(vcpu, itir)) {
609 // TODO
610 return IA64_FAULT;
611 }
612 if (unimplemented_gva(vcpu,ifa) ) {
613 isr.val = set_isr_ei_ni(vcpu);
614 isr.code = IA64_RESERVED_REG_FAULT;
615 vcpu_set_isr(vcpu, isr.val);
616 unimpl_daddr(vcpu);
617 return IA64_FAULT;
618 }
619 #endif // VMAL_NO_FAULT_CHECK
621 return (vmx_vcpu_itr_d(vcpu,slot,pte,itir,ifa));
622 }
624 IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
625 {
626 UINT64 itir, ifa, pte, slot;
627 #ifdef VMAL_NO_FAULT_CHECK
628 ISR isr;
629 IA64_PSR vpsr;
630 vpsr.val=vmx_vcpu_get_psr(vcpu);
631 if ( vpsr.ic ) {
632 set_illegal_op_isr(vcpu);
633 illegal_op(vcpu);
634 return IA64_FAULT;
635 }
636 if ( vpsr.cpl != 0) {
637 /* Inject Privileged Operation fault into guest */
638 set_privileged_operation_isr (vcpu, 0);
639 privilege_op (vcpu);
640 return IA64_FAULT;
641 }
642 #endif // VMAL_NO_FAULT_CHECK
643 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){
644 #ifdef VMAL_NO_FAULT_CHECK
645 set_isr_reg_nat_consumption(vcpu,0,0);
646 rnat_comsumption(vcpu);
647 return IA64_FAULT;
648 #endif // VMAL_NO_FAULT_CHECK
649 }
650 #ifdef VMAL_NO_FAULT_CHECK
651 if(is_reserved_rr_register(vcpu, slot)){
652 set_illegal_op_isr(vcpu);
653 illegal_op(vcpu);
654 return IA64_FAULT;
655 }
656 #endif // VMAL_NO_FAULT_CHECK
658 if (vcpu_get_itir(vcpu,&itir)){
659 return(IA64_FAULT);
660 }
661 if (vcpu_get_ifa(vcpu,&ifa)){
662 return(IA64_FAULT);
663 }
664 #ifdef VMAL_NO_FAULT_CHECK
665 if (is_reserved_itir_field(vcpu, itir)) {
666 // TODO
667 return IA64_FAULT;
668 }
669 if (unimplemented_gva(vcpu,ifa) ) {
670 isr.val = set_isr_ei_ni(vcpu);
671 isr.code = IA64_RESERVED_REG_FAULT;
672 vcpu_set_isr(vcpu, isr.val);
673 unimpl_daddr(vcpu);
674 return IA64_FAULT;
675 }
676 #endif // VMAL_NO_FAULT_CHECK
678 return (vmx_vcpu_itr_i(vcpu,slot,pte,itir,ifa));
679 }
681 IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa,u64 *pte)
682 {
683 IA64FAULT ret1;
685 #ifdef VMAL_NO_FAULT_CHECK
686 IA64_PSR vpsr;
687 vpsr.val=vmx_vcpu_get_psr(vcpu);
688 if ( vpsr.ic ) {
689 set_illegal_op_isr(vcpu);
690 illegal_op(vcpu);
691 return IA64_FAULT;
692 }
694 UINT64 fault;
695 ISR isr;
696 if ( vpsr.cpl != 0) {
697 /* Inject Privileged Operation fault into guest */
698 set_privileged_operation_isr (vcpu, 0);
699 privilege_op (vcpu);
700 return IA64_FAULT;
701 }
702 #endif // VMAL_NO_FAULT_CHECK
703 ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pte);
704 #ifdef VMAL_NO_FAULT_CHECK
705 if( ret1 != IA64_NO_FAULT ){
706 set_isr_reg_nat_consumption(vcpu,0,0);
707 rnat_comsumption(vcpu);
708 return IA64_FAULT;
709 }
710 #endif // VMAL_NO_FAULT_CHECK
712 if (vcpu_get_itir(vcpu,itir)){
713 return(IA64_FAULT);
714 }
715 if (vcpu_get_ifa(vcpu,ifa)){
716 return(IA64_FAULT);
717 }
718 #ifdef VMAL_NO_FAULT_CHECK
719 if (unimplemented_gva(vcpu,ifa) ) {
720 isr.val = set_isr_ei_ni(vcpu);
721 isr.code = IA64_RESERVED_REG_FAULT;
722 vcpu_set_isr(vcpu, isr.val);
723 unimpl_daddr(vcpu);
724 return IA64_FAULT;
725 }
726 #endif // VMAL_NO_FAULT_CHECK
727 return IA64_NO_FAULT;
728 }
730 IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INST64 inst)
731 {
732 UINT64 itir, ifa, pte;
734 if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) {
735 return IA64_FAULT;
736 }
738 return (vmx_vcpu_itc_d(vcpu,pte,itir,ifa));
739 }
741 IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INST64 inst)
742 {
743 UINT64 itir, ifa, pte;
745 if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) {
746 return IA64_FAULT;
747 }
749 return (vmx_vcpu_itc_i(vcpu,pte,itir,ifa));
751 }
753 /*************************************
754 * Moves to semi-privileged registers
755 *************************************/
757 IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
758 {
759 // I27 and M30 are identical for these fields
760 UINT64 imm;
762 if(inst.M30.ar3!=44){
763 panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc");
764 }
765 #ifdef CHECK_FAULT
766 IA64_PSR vpsr;
767 vpsr.val=vmx_vcpu_get_psr(vcpu);
768 if ( vpsr.cpl != 0) {
769 /* Inject Privileged Operation fault into guest */
770 set_privileged_operation_isr (vcpu, 0);
771 privilege_op (vcpu);
772 return IA64_FAULT;
773 }
774 #endif // CHECK_FAULT
775 if(inst.M30.s){
776 imm = -inst.M30.imm;
777 }else{
778 imm = inst.M30.imm;
779 }
780 return (vmx_vcpu_set_itc(vcpu, imm));
781 }
783 IA64FAULT vmx_emul_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
784 {
785 // I26 and M29 are identical for these fields
786 u64 r2;
787 if(inst.M29.ar3!=44){
788 panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc");
789 }
790 if(vcpu_get_gr_nat(vcpu,inst.M29.r2,&r2)){
791 #ifdef CHECK_FAULT
792 set_isr_reg_nat_consumption(vcpu,0,0);
793 rnat_comsumption(vcpu);
794 return IA64_FAULT;
795 #endif //CHECK_FAULT
796 }
797 #ifdef CHECK_FAULT
798 IA64_PSR vpsr;
799 vpsr.val=vmx_vcpu_get_psr(vcpu);
800 if ( vpsr.cpl != 0) {
801 /* Inject Privileged Operation fault into guest */
802 set_privileged_operation_isr (vcpu, 0);
803 privilege_op (vcpu);
804 return IA64_FAULT;
805 }
806 #endif // CHECK_FAULT
807 return (vmx_vcpu_set_itc(vcpu, r2));
808 }
811 IA64FAULT vmx_emul_mov_from_ar_reg(VCPU *vcpu, INST64 inst)
812 {
813 // I27 and M30 are identical for these fields
814 u64 r1;
815 if(inst.M31.ar3!=44){
816 panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc");
817 }
818 #ifdef CHECK_FAULT
819 if(check_target_register(vcpu,inst.M31.r1)){
820 set_illegal_op_isr(vcpu);
821 illegal_op(vcpu);
822 return IA64_FAULT;
823 }
824 IA64_PSR vpsr;
825 vpsr.val=vmx_vcpu_get_psr(vcpu);
826 if (vpsr.si&& vpsr.cpl != 0) {
827 /* Inject Privileged Operation fault into guest */
828 set_privileged_operation_isr (vcpu, 0);
829 privilege_op (vcpu);
830 return IA64_FAULT;
831 }
832 #endif // CHECK_FAULT
833 vmx_vcpu_get_itc(vcpu,&r1);
834 vcpu_set_gr(vcpu,inst.M31.r1,r1,0);
835 return IA64_NO_FAULT;
836 }
839 /********************************
840 * Moves to privileged registers
841 ********************************/
843 IA64FAULT vmx_emul_mov_to_pkr(VCPU *vcpu, INST64 inst)
844 {
845 u64 r3,r2;
846 #ifdef CHECK_FAULT
847 IA64_PSR vpsr;
848 vpsr.val=vmx_vcpu_get_psr(vcpu);
849 if (vpsr.cpl != 0) {
850 /* Inject Privileged Operation fault into guest */
851 set_privileged_operation_isr (vcpu, 0);
852 privilege_op (vcpu);
853 return IA64_FAULT;
854 }
855 #endif // CHECK_FAULT
856 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
857 #ifdef CHECK_FAULT
858 set_isr_reg_nat_consumption(vcpu,0,0);
859 rnat_comsumption(vcpu);
860 return IA64_FAULT;
861 #endif //CHECK_FAULT
862 }
863 return (vmx_vcpu_set_pkr(vcpu,r3,r2));
864 }
866 IA64FAULT vmx_emul_mov_to_rr(VCPU *vcpu, INST64 inst)
867 {
868 u64 r3,r2;
869 #ifdef CHECK_FAULT
870 IA64_PSR vpsr;
871 vpsr.val=vmx_vcpu_get_psr(vcpu);
872 if (vpsr.cpl != 0) {
873 /* Inject Privileged Operation fault into guest */
874 set_privileged_operation_isr (vcpu, 0);
875 privilege_op (vcpu);
876 return IA64_FAULT;
877 }
878 #endif // CHECK_FAULT
879 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
880 #ifdef CHECK_FAULT
881 set_isr_reg_nat_consumption(vcpu,0,0);
882 rnat_comsumption(vcpu);
883 return IA64_FAULT;
884 #endif //CHECK_FAULT
885 }
886 return (vmx_vcpu_set_rr(vcpu,r3,r2));
887 }
889 IA64FAULT vmx_emul_mov_to_dbr(VCPU *vcpu, INST64 inst)
890 {
891 u64 r3,r2;
892 return IA64_NO_FAULT;
893 #ifdef CHECK_FAULT
894 IA64_PSR vpsr;
895 vpsr.val=vmx_vcpu_get_psr(vcpu);
896 if (vpsr.cpl != 0) {
897 /* Inject Privileged Operation fault into guest */
898 set_privileged_operation_isr (vcpu, 0);
899 privilege_op (vcpu);
900 return IA64_FAULT;
901 }
902 #endif // CHECK_FAULT
903 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
904 #ifdef CHECK_FAULT
905 set_isr_reg_nat_consumption(vcpu,0,0);
906 rnat_comsumption(vcpu);
907 return IA64_FAULT;
908 #endif //CHECK_FAULT
909 }
910 return (vmx_vcpu_set_dbr(vcpu,r3,r2));
911 }
913 IA64FAULT vmx_emul_mov_to_ibr(VCPU *vcpu, INST64 inst)
914 {
915 u64 r3,r2;
916 return IA64_NO_FAULT;
917 #ifdef CHECK_FAULT
918 IA64_PSR vpsr;
919 vpsr.val=vmx_vcpu_get_psr(vcpu);
920 if (vpsr.cpl != 0) {
921 /* Inject Privileged Operation fault into guest */
922 set_privileged_operation_isr (vcpu, 0);
923 privilege_op (vcpu);
924 return IA64_FAULT;
925 }
926 #endif // CHECK_FAULT
927 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
928 #ifdef CHECK_FAULT
929 set_isr_reg_nat_consumption(vcpu,0,0);
930 rnat_comsumption(vcpu);
931 return IA64_FAULT;
932 #endif //CHECK_FAULT
933 }
934 return (vmx_vcpu_set_ibr(vcpu,r3,r2));
935 }
937 IA64FAULT vmx_emul_mov_to_pmc(VCPU *vcpu, INST64 inst)
938 {
939 u64 r3,r2;
940 #ifdef CHECK_FAULT
941 IA64_PSR vpsr;
942 vpsr.val=vmx_vcpu_get_psr(vcpu);
943 if (vpsr.cpl != 0) {
944 /* Inject Privileged Operation fault into guest */
945 set_privileged_operation_isr (vcpu, 0);
946 privilege_op (vcpu);
947 return IA64_FAULT;
948 }
949 #endif // CHECK_FAULT
950 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
951 #ifdef CHECK_FAULT
952 set_isr_reg_nat_consumption(vcpu,0,0);
953 rnat_comsumption(vcpu);
954 return IA64_FAULT;
955 #endif //CHECK_FAULT
956 }
957 return (vmx_vcpu_set_pmc(vcpu,r3,r2));
958 }
960 IA64FAULT vmx_emul_mov_to_pmd(VCPU *vcpu, INST64 inst)
961 {
962 u64 r3,r2;
963 #ifdef CHECK_FAULT
964 IA64_PSR vpsr;
965 vpsr.val=vmx_vcpu_get_psr(vcpu);
966 if (vpsr.cpl != 0) {
967 /* Inject Privileged Operation fault into guest */
968 set_privileged_operation_isr (vcpu, 0);
969 privilege_op (vcpu);
970 return IA64_FAULT;
971 }
972 #endif // CHECK_FAULT
973 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
974 #ifdef CHECK_FAULT
975 set_isr_reg_nat_consumption(vcpu,0,0);
976 rnat_comsumption(vcpu);
977 return IA64_FAULT;
978 #endif //CHECK_FAULT
979 }
980 return (vmx_vcpu_set_pmd(vcpu,r3,r2));
981 }
984 /**********************************
985 * Moves from privileged registers
986 **********************************/
988 IA64FAULT vmx_emul_mov_from_rr(VCPU *vcpu, INST64 inst)
989 {
990 u64 r3,r1;
991 #ifdef CHECK_FAULT
992 if(check_target_register(vcpu, inst.M43.r1)){
993 set_illegal_op_isr(vcpu);
994 illegal_op(vcpu);
995 return IA64_FAULT;
996 }
997 IA64_PSR vpsr;
998 vpsr.val=vmx_vcpu_get_psr(vcpu);
999 if (vpsr.cpl != 0) {
1000 /* Inject Privileged Operation fault into guest */
1001 set_privileged_operation_isr (vcpu, 0);
1002 privilege_op (vcpu);
1003 return IA64_FAULT;
1006 #endif //CHECK_FAULT
1007 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1008 #ifdef CHECK_FAULT
1009 set_isr_reg_nat_consumption(vcpu,0,0);
1010 rnat_comsumption(vcpu);
1011 return IA64_FAULT;
1012 #endif //CHECK_FAULT
1014 #ifdef CHECK_FAULT
1015 if(is_reserved_rr_register(vcpu,r3>>VRN_SHIFT)){
1016 set_rsv_reg_field_isr(vcpu);
1017 rsv_reg_field(vcpu);
1019 #endif //CHECK_FAULT
1020 vcpu_get_rr(vcpu,r3,&r1);
1021 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1024 IA64FAULT vmx_emul_mov_from_pkr(VCPU *vcpu, INST64 inst)
1026 u64 r3,r1;
1027 #ifdef CHECK_FAULT
1028 if(check_target_register(vcpu, inst.M43.r1)){
1029 set_illegal_op_isr(vcpu);
1030 illegal_op(vcpu);
1031 return IA64_FAULT;
1033 IA64_PSR vpsr;
1034 vpsr.val=vmx_vcpu_get_psr(vcpu);
1035 if (vpsr.cpl != 0) {
1036 /* Inject Privileged Operation fault into guest */
1037 set_privileged_operation_isr (vcpu, 0);
1038 privilege_op (vcpu);
1039 return IA64_FAULT;
1042 #endif //CHECK_FAULT
1043 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1044 #ifdef CHECK_FAULT
1045 set_isr_reg_nat_consumption(vcpu,0,0);
1046 rnat_comsumption(vcpu);
1047 return IA64_FAULT;
1048 #endif //CHECK_FAULT
1050 #ifdef CHECK_FAULT
1051 if(is_reserved_indirect_register(vcpu,r3)){
1052 set_rsv_reg_field_isr(vcpu);
1053 rsv_reg_field(vcpu);
1054 return IA64_FAULT;
1056 #endif //CHECK_FAULT
1057 vmx_vcpu_get_pkr(vcpu,r3,&r1);
1058 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1061 IA64FAULT vmx_emul_mov_from_dbr(VCPU *vcpu, INST64 inst)
1063 u64 r3,r1;
1064 #ifdef CHECK_FAULT
1065 if(check_target_register(vcpu, inst.M43.r1)){
1066 set_illegal_op_isr(vcpu);
1067 illegal_op(vcpu);
1068 return IA64_FAULT;
1070 IA64_PSR vpsr;
1071 vpsr.val=vmx_vcpu_get_psr(vcpu);
1072 if (vpsr.cpl != 0) {
1073 /* Inject Privileged Operation fault into guest */
1074 set_privileged_operation_isr (vcpu, 0);
1075 privilege_op (vcpu);
1076 return IA64_FAULT;
1079 #endif //CHECK_FAULT
1080 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1081 #ifdef CHECK_FAULT
1082 set_isr_reg_nat_consumption(vcpu,0,0);
1083 rnat_comsumption(vcpu);
1084 return IA64_FAULT;
1085 #endif //CHECK_FAULT
1087 #ifdef CHECK_FAULT
1088 if(is_reserved_indirect_register(vcpu,r3)){
1089 set_rsv_reg_field_isr(vcpu);
1090 rsv_reg_field(vcpu);
1091 return IA64_FAULT;
1093 #endif //CHECK_FAULT
1094 vmx_vcpu_get_dbr(vcpu,r3,&r1);
1095 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1098 IA64FAULT vmx_emul_mov_from_ibr(VCPU *vcpu, INST64 inst)
1100 u64 r3,r1;
1101 #ifdef CHECK_FAULT
1102 if(check_target_register(vcpu, inst.M43.r1)){
1103 set_illegal_op_isr(vcpu);
1104 illegal_op(vcpu);
1105 return IA64_FAULT;
1107 IA64_PSR vpsr;
1108 vpsr.val=vmx_vcpu_get_psr(vcpu);
1109 if (vpsr.cpl != 0) {
1110 /* Inject Privileged Operation fault into guest */
1111 set_privileged_operation_isr (vcpu, 0);
1112 privilege_op (vcpu);
1113 return IA64_FAULT;
1116 #endif //CHECK_FAULT
1117 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1118 #ifdef CHECK_FAULT
1119 set_isr_reg_nat_consumption(vcpu,0,0);
1120 rnat_comsumption(vcpu);
1121 return IA64_FAULT;
1122 #endif //CHECK_FAULT
1124 #ifdef CHECK_FAULT
1125 if(is_reserved_indirect_register(vcpu,r3)){
1126 set_rsv_reg_field_isr(vcpu);
1127 rsv_reg_field(vcpu);
1128 return IA64_FAULT;
1130 #endif //CHECK_FAULT
1131 vmx_vcpu_get_ibr(vcpu,r3,&r1);
1132 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1135 IA64FAULT vmx_emul_mov_from_pmc(VCPU *vcpu, INST64 inst)
1137 u64 r3,r1;
1138 #ifdef CHECK_FAULT
1139 if(check_target_register(vcpu, inst.M43.r1)){
1140 set_illegal_op_isr(vcpu);
1141 illegal_op(vcpu);
1142 return IA64_FAULT;
1144 IA64_PSR vpsr;
1145 vpsr.val=vmx_vcpu_get_psr(vcpu);
1146 if (vpsr.cpl != 0) {
1147 /* Inject Privileged Operation fault into guest */
1148 set_privileged_operation_isr (vcpu, 0);
1149 privilege_op (vcpu);
1150 return IA64_FAULT;
1153 #endif //CHECK_FAULT
1154 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1155 #ifdef CHECK_FAULT
1156 set_isr_reg_nat_consumption(vcpu,0,0);
1157 rnat_comsumption(vcpu);
1158 return IA64_FAULT;
1159 #endif //CHECK_FAULT
1161 #ifdef CHECK_FAULT
1162 if(is_reserved_indirect_register(vcpu,r3)){
1163 set_rsv_reg_field_isr(vcpu);
1164 rsv_reg_field(vcpu);
1165 return IA64_FAULT;
1167 #endif //CHECK_FAULT
1168 vmx_vcpu_get_pmc(vcpu,r3,&r1);
1169 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1172 IA64FAULT vmx_emul_mov_from_cpuid(VCPU *vcpu, INST64 inst)
1174 u64 r3,r1;
1175 #ifdef CHECK_FAULT
1176 if(check_target_register(vcpu, inst.M43.r1)){
1177 set_illegal_op_isr(vcpu);
1178 illegal_op(vcpu);
1179 return IA64_FAULT;
1181 #endif //CHECK_FAULT
1182 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1183 #ifdef CHECK_FAULT
1184 set_isr_reg_nat_consumption(vcpu,0,0);
1185 rnat_comsumption(vcpu);
1186 return IA64_FAULT;
1187 #endif //CHECK_FAULT
1189 #ifdef CHECK_FAULT
1190 if(is_reserved_indirect_register(vcpu,r3)){
1191 set_rsv_reg_field_isr(vcpu);
1192 rsv_reg_field(vcpu);
1193 return IA64_FAULT;
1195 #endif //CHECK_FAULT
1196 vmx_vcpu_get_cpuid(vcpu,r3,&r1);
1197 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1200 IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
1202 u64 r2;
1203 extern u64 cr_igfld_mask(int index, u64 value);
1204 #ifdef CHECK_FAULT
1205 IA64_PSR vpsr;
1206 vpsr.val=vmx_vcpu_get_psr(vcpu);
1207 if(is_reserved_cr(inst.M32.cr3)||(vpsr.ic&&is_interruption_control_cr(inst.M32.cr3))){
1208 set_illegal_op_isr(vcpu);
1209 illegal_op(vcpu);
1210 return IA64_FAULT;
1212 if ( vpsr.cpl != 0) {
1213 /* Inject Privileged Operation fault into guest */
1214 set_privileged_operation_isr (vcpu, 0);
1215 privilege_op (vcpu);
1216 return IA64_FAULT;
1218 #endif // CHECK_FAULT
1219 if(vcpu_get_gr_nat(vcpu, inst.M32.r2, &r2)){
1220 #ifdef CHECK_FAULT
1221 set_isr_reg_nat_consumption(vcpu,0,0);
1222 rnat_comsumption(vcpu);
1223 return IA64_FAULT;
1224 #endif //CHECK_FAULT
1226 #ifdef CHECK_FAULT
1227 if ( check_cr_rsv_fields (inst.M32.cr3, r2)) {
1228 /* Inject Reserved Register/Field fault
1229 * into guest */
1230 set_rsv_reg_field_isr (vcpu,0);
1231 rsv_reg_field (vcpu);
1232 return IA64_FAULT;
1234 #endif //CHECK_FAULT
1235 r2 = cr_igfld_mask(inst.M32.cr3,r2);
1236 switch (inst.M32.cr3) {
1237 case 0: return vmx_vcpu_set_dcr(vcpu,r2);
1238 case 1: return vmx_vcpu_set_itm(vcpu,r2);
1239 case 2: return vmx_vcpu_set_iva(vcpu,r2);
1240 case 8: return vmx_vcpu_set_pta(vcpu,r2);
1241 case 16:return vcpu_set_ipsr(vcpu,r2);
1242 case 17:return vcpu_set_isr(vcpu,r2);
1243 case 19:return vcpu_set_iip(vcpu,r2);
1244 case 20:return vcpu_set_ifa(vcpu,r2);
1245 case 21:return vcpu_set_itir(vcpu,r2);
1246 case 22:return vcpu_set_iipa(vcpu,r2);
1247 case 23:return vcpu_set_ifs(vcpu,r2);
1248 case 24:return vcpu_set_iim(vcpu,r2);
1249 case 25:return vcpu_set_iha(vcpu,r2);
1250 case 64:printk("SET LID to 0x%lx\n", r2);
1251 return IA64_NO_FAULT;
1252 case 65:return IA64_NO_FAULT;
1253 case 66:return vmx_vcpu_set_tpr(vcpu,r2);
1254 case 67:return vmx_vcpu_set_eoi(vcpu,r2);
1255 case 68:return IA64_NO_FAULT;
1256 case 69:return IA64_NO_FAULT;
1257 case 70:return IA64_NO_FAULT;
1258 case 71:return IA64_NO_FAULT;
1259 case 72:return vmx_vcpu_set_itv(vcpu,r2);
1260 case 73:return vmx_vcpu_set_pmv(vcpu,r2);
1261 case 74:return vmx_vcpu_set_cmcv(vcpu,r2);
1262 case 80:return vmx_vcpu_set_lrr0(vcpu,r2);
1263 case 81:return vmx_vcpu_set_lrr1(vcpu,r2);
1264 default:VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
1265 return IA64_NO_FAULT;
1270 #define cr_get(cr) \
1271 ((fault=vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
1272 vcpu_set_gr(vcpu, tgt, val,0):fault;
1274 #define vmx_cr_get(cr) \
1275 ((fault=vmx_vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
1276 vcpu_set_gr(vcpu, tgt, val,0):fault;
1278 IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst)
1280 UINT64 tgt = inst.M33.r1;
1281 UINT64 val;
1282 IA64FAULT fault;
1283 #ifdef CHECK_FAULT
1284 IA64_PSR vpsr;
1285 vpsr.val=vmx_vcpu_get_psr(vcpu);
1286 if(is_reserved_cr(inst.M33.cr3)||is_read_only_cr(inst.M33.cr3||
1287 (vpsr.ic&&is_interruption_control_cr(inst.M33.cr3)))){
1288 set_illegal_op_isr(vcpu);
1289 illegal_op(vcpu);
1290 return IA64_FAULT;
1292 if ( vpsr.cpl != 0) {
1293 /* Inject Privileged Operation fault into guest */
1294 set_privileged_operation_isr (vcpu, 0);
1295 privilege_op (vcpu);
1296 return IA64_FAULT;
1298 #endif // CHECK_FAULT
1300 // from_cr_cnt[inst.M33.cr3]++;
1301 switch (inst.M33.cr3) {
1302 case 0: return vmx_cr_get(dcr);
1303 case 1: return vmx_cr_get(itm);
1304 case 2: return vmx_cr_get(iva);
1305 case 8: return vmx_cr_get(pta);
1306 case 16:return cr_get(ipsr);
1307 case 17:return cr_get(isr);
1308 case 19:return cr_get(iip);
1309 case 20:return cr_get(ifa);
1310 case 21:return cr_get(itir);
1311 case 22:return cr_get(iipa);
1312 case 23:return cr_get(ifs);
1313 case 24:return cr_get(iim);
1314 case 25:return cr_get(iha);
1315 case 64:return vmx_cr_get(lid);
1316 case 65:
1317 vmx_vcpu_get_ivr(vcpu,&val);
1318 return vcpu_set_gr(vcpu,tgt,val,0);
1319 case 66:return vmx_cr_get(tpr);
1320 case 67:return vcpu_set_gr(vcpu,tgt,0L,0);
1321 case 68:return vmx_cr_get(irr0);
1322 case 69:return vmx_cr_get(irr1);
1323 case 70:return vmx_cr_get(irr2);
1324 case 71:return vmx_cr_get(irr3);
1325 case 72:return vmx_cr_get(itv);
1326 case 73:return vmx_cr_get(pmv);
1327 case 74:return vmx_cr_get(cmcv);
1328 case 80:return vmx_cr_get(lrr0);
1329 case 81:return vmx_cr_get(lrr1);
1330 default: return IA64_NO_FAULT;
1335 //#define BYPASS_VMAL_OPCODE
1336 extern IA64_SLOT_TYPE slot_types[0x20][3];
1337 IA64_BUNDLE __vmx_get_domain_bundle(u64 iip)
1339 IA64_BUNDLE bundle;
1340 fetch_code( current, iip, &bundle.i64[0], &bundle.i64[1]);
1341 return bundle;
1344 /** Emulate a privileged operation.
1347 * @param vcpu virtual cpu
1348 * @cause the reason cause virtualization fault
1349 * @opcode the instruction code which cause virtualization fault
1350 */
1352 void
1353 vmx_emulate(VCPU *vcpu, REGS *regs)
1355 IA64FAULT status;
1356 INST64 inst;
1357 UINT64 iip, cause, opcode;
1358 iip = regs->cr_iip;
1359 cause = VMX(vcpu,cause);
1360 opcode = VMX(vcpu,opcode);
1362 #ifdef VTLB_DEBUG
1363 check_vtlb_sanity(vmx_vcpu_get_vtlb(vcpu));
1364 dump_vtlb(vmx_vcpu_get_vtlb(vcpu));
1365 #endif
1366 #if 0
1367 if ( (cause == 0xff && opcode == 0x1e000000000) || cause == 0 ) {
1368 printf ("VMAL decode error: cause - %lx; op - %lx\n",
1369 cause, opcode );
1370 return;
1372 #endif
1373 #ifdef BYPASS_VMAL_OPCODE
1374 // make a local copy of the bundle containing the privop
1375 IA64_BUNDLE bundle;
1376 int slot;
1377 IA64_SLOT_TYPE slot_type;
1378 IA64_PSR vpsr;
1379 bundle = __vmx_get_domain_bundle(iip);
1380 slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
1381 if (!slot) inst.inst = bundle.slot0;
1382 else if (slot == 1)
1383 inst.inst = bundle.slot1a + (bundle.slot1b<<18);
1384 else if (slot == 2) inst.inst = bundle.slot2;
1385 else printf("priv_handle_op: illegal slot: %d\n", slot);
1386 slot_type = slot_types[bundle.template][slot];
1387 ia64_priv_decoder(slot_type, inst, &cause);
1388 if(cause==0){
1389 panic_domain(regs,"This instruction at 0x%lx slot %d can't be virtualized", iip, slot);
1391 #else
1392 inst.inst=opcode;
1393 #endif /* BYPASS_VMAL_OPCODE */
1394 /*
1395 * Switch to actual virtual rid in rr0 and rr4,
1396 * which is required by some tlb related instructions.
1397 */
1398 prepare_if_physical_mode(vcpu);
1400 switch(cause) {
1401 case EVENT_RSM:
1402 status=vmx_emul_rsm(vcpu, inst);
1403 break;
1404 case EVENT_SSM:
1405 status=vmx_emul_ssm(vcpu, inst);
1406 break;
1407 case EVENT_MOV_TO_PSR:
1408 status=vmx_emul_mov_to_psr(vcpu, inst);
1409 break;
1410 case EVENT_MOV_FROM_PSR:
1411 status=vmx_emul_mov_from_psr(vcpu, inst);
1412 break;
1413 case EVENT_MOV_FROM_CR:
1414 status=vmx_emul_mov_from_cr(vcpu, inst);
1415 break;
1416 case EVENT_MOV_TO_CR:
1417 status=vmx_emul_mov_to_cr(vcpu, inst);
1418 break;
1419 case EVENT_BSW_0:
1420 status=vmx_emul_bsw0(vcpu, inst);
1421 break;
1422 case EVENT_BSW_1:
1423 status=vmx_emul_bsw1(vcpu, inst);
1424 break;
1425 case EVENT_COVER:
1426 status=vmx_emul_cover(vcpu, inst);
1427 break;
1428 case EVENT_RFI:
1429 status=vmx_emul_rfi(vcpu, inst);
1430 break;
1431 case EVENT_ITR_D:
1432 status=vmx_emul_itr_d(vcpu, inst);
1433 break;
1434 case EVENT_ITR_I:
1435 status=vmx_emul_itr_i(vcpu, inst);
1436 break;
1437 case EVENT_PTR_D:
1438 status=vmx_emul_ptr_d(vcpu, inst);
1439 break;
1440 case EVENT_PTR_I:
1441 status=vmx_emul_ptr_i(vcpu, inst);
1442 break;
1443 case EVENT_ITC_D:
1444 status=vmx_emul_itc_d(vcpu, inst);
1445 break;
1446 case EVENT_ITC_I:
1447 status=vmx_emul_itc_i(vcpu, inst);
1448 break;
1449 case EVENT_PTC_L:
1450 status=vmx_emul_ptc_l(vcpu, inst);
1451 break;
1452 case EVENT_PTC_G:
1453 status=vmx_emul_ptc_g(vcpu, inst);
1454 break;
1455 case EVENT_PTC_GA:
1456 status=vmx_emul_ptc_ga(vcpu, inst);
1457 break;
1458 case EVENT_PTC_E:
1459 status=vmx_emul_ptc_e(vcpu, inst);
1460 break;
1461 case EVENT_MOV_TO_RR:
1462 status=vmx_emul_mov_to_rr(vcpu, inst);
1463 break;
1464 case EVENT_MOV_FROM_RR:
1465 status=vmx_emul_mov_from_rr(vcpu, inst);
1466 break;
1467 case EVENT_THASH:
1468 status=vmx_emul_thash(vcpu, inst);
1469 break;
1470 case EVENT_TTAG:
1471 status=vmx_emul_ttag(vcpu, inst);
1472 break;
1473 case EVENT_TPA:
1474 status=vmx_emul_tpa(vcpu, inst);
1475 break;
1476 case EVENT_TAK:
1477 status=vmx_emul_tak(vcpu, inst);
1478 break;
1479 case EVENT_MOV_TO_AR_IMM:
1480 status=vmx_emul_mov_to_ar_imm(vcpu, inst);
1481 break;
1482 case EVENT_MOV_TO_AR:
1483 status=vmx_emul_mov_to_ar_reg(vcpu, inst);
1484 break;
1485 case EVENT_MOV_FROM_AR:
1486 status=vmx_emul_mov_from_ar_reg(vcpu, inst);
1487 break;
1488 case EVENT_MOV_TO_DBR:
1489 status=vmx_emul_mov_to_dbr(vcpu, inst);
1490 break;
1491 case EVENT_MOV_TO_IBR:
1492 status=vmx_emul_mov_to_ibr(vcpu, inst);
1493 break;
1494 case EVENT_MOV_TO_PMC:
1495 status=vmx_emul_mov_to_pmc(vcpu, inst);
1496 break;
1497 case EVENT_MOV_TO_PMD:
1498 status=vmx_emul_mov_to_pmd(vcpu, inst);
1499 break;
1500 case EVENT_MOV_TO_PKR:
1501 status=vmx_emul_mov_to_pkr(vcpu, inst);
1502 break;
1503 case EVENT_MOV_FROM_DBR:
1504 status=vmx_emul_mov_from_dbr(vcpu, inst);
1505 break;
1506 case EVENT_MOV_FROM_IBR:
1507 status=vmx_emul_mov_from_ibr(vcpu, inst);
1508 break;
1509 case EVENT_MOV_FROM_PMC:
1510 status=vmx_emul_mov_from_pmc(vcpu, inst);
1511 break;
1512 case EVENT_MOV_FROM_PKR:
1513 status=vmx_emul_mov_from_pkr(vcpu, inst);
1514 break;
1515 case EVENT_MOV_FROM_CPUID:
1516 status=vmx_emul_mov_from_cpuid(vcpu, inst);
1517 break;
1518 case EVENT_VMSW:
1519 printf ("Unimplemented instruction %ld\n", cause);
1520 status=IA64_FAULT;
1521 break;
1522 default:
1523 panic_domain(regs,"unknown cause %ld, iip: %lx, ipsr: %lx\n", cause,regs->cr_iip,regs->cr_ipsr);
1524 break;
1525 };
1527 #if 0
1528 if (status == IA64_FAULT)
1529 panic("Emulation failed with cause %d:\n", cause);
1530 #endif
1532 if ( status == IA64_NO_FAULT && cause !=EVENT_RFI ) {
1533 vmx_vcpu_increment_iip(vcpu);
1536 recover_if_physical_mode(vcpu);
1537 return;