ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_virt.c @ 9392:00111084c70a

[IA64] Remove warning messages

This patch removed warning messages in vmx_phy_mode.c
and vmx_virt.c.

Signed-off-by: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
author awilliam@xenbuild.aw
date Thu Mar 23 13:22:56 2006 -0700 (2006-03-23)
parents 8a551ec13d93
children 6e979aa0e6d2
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_virt.c:
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Fred yang (fred.yang@intel.com)
20 * Shaofan Li (Susue Li) <susie.li@intel.com>
21 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
22 */
26 #include <asm/privop.h>
27 #include <asm/vmx_vcpu.h>
28 #include <asm/processor.h>
29 #include <asm/delay.h> // Debug only
30 #include <asm/vmmu.h>
31 #include <asm/vmx_mm_def.h>
32 #include <asm/smp.h>
33 #include <asm/vmx.h>
34 #include <asm/virt_event.h>
35 #include <asm/vmx_phy_mode.h>
36 extern UINT64 privop_trace;
37 extern void vhpi_detection(VCPU *vcpu);//temporarily place here,need a header file.
39 void
40 ia64_priv_decoder(IA64_SLOT_TYPE slot_type, INST64 inst, UINT64 * cause)
41 {
42 *cause=0;
43 switch (slot_type) {
44 case M:
45 if (inst.generic.major==0){
46 if(inst.M28.x3==0){
47 if(inst.M44.x4==6){
48 *cause=EVENT_SSM;
49 }else if(inst.M44.x4==7){
50 *cause=EVENT_RSM;
51 }else if(inst.M30.x4==8&&inst.M30.x2==2){
52 *cause=EVENT_MOV_TO_AR_IMM;
53 }
54 }
55 }
56 else if(inst.generic.major==1){
57 if(inst.M28.x3==0){
58 if(inst.M32.x6==0x2c){
59 *cause=EVENT_MOV_TO_CR;
60 }else if(inst.M33.x6==0x24){
61 *cause=EVENT_MOV_FROM_CR;
62 }else if(inst.M35.x6==0x2d){
63 *cause=EVENT_MOV_TO_PSR;
64 }else if(inst.M36.x6==0x25){
65 *cause=EVENT_MOV_FROM_PSR;
66 }else if(inst.M29.x6==0x2A){
67 *cause=EVENT_MOV_TO_AR;
68 }else if(inst.M31.x6==0x22){
69 *cause=EVENT_MOV_FROM_AR;
70 }else if(inst.M45.x6==0x09){
71 *cause=EVENT_PTC_L;
72 }else if(inst.M45.x6==0x0A){
73 *cause=EVENT_PTC_G;
74 }else if(inst.M45.x6==0x0B){
75 *cause=EVENT_PTC_GA;
76 }else if(inst.M45.x6==0x0C){
77 *cause=EVENT_PTR_D;
78 }else if(inst.M45.x6==0x0D){
79 *cause=EVENT_PTR_I;
80 }else if(inst.M46.x6==0x1A){
81 *cause=EVENT_THASH;
82 }else if(inst.M46.x6==0x1B){
83 *cause=EVENT_TTAG;
84 }else if(inst.M46.x6==0x1E){
85 *cause=EVENT_TPA;
86 }else if(inst.M46.x6==0x1F){
87 *cause=EVENT_TAK;
88 }else if(inst.M47.x6==0x34){
89 *cause=EVENT_PTC_E;
90 }else if(inst.M41.x6==0x2E){
91 *cause=EVENT_ITC_D;
92 }else if(inst.M41.x6==0x2F){
93 *cause=EVENT_ITC_I;
94 }else if(inst.M42.x6==0x00){
95 *cause=EVENT_MOV_TO_RR;
96 }else if(inst.M42.x6==0x01){
97 *cause=EVENT_MOV_TO_DBR;
98 }else if(inst.M42.x6==0x02){
99 *cause=EVENT_MOV_TO_IBR;
100 }else if(inst.M42.x6==0x03){
101 *cause=EVENT_MOV_TO_PKR;
102 }else if(inst.M42.x6==0x04){
103 *cause=EVENT_MOV_TO_PMC;
104 }else if(inst.M42.x6==0x05){
105 *cause=EVENT_MOV_TO_PMD;
106 }else if(inst.M42.x6==0x0E){
107 *cause=EVENT_ITR_D;
108 }else if(inst.M42.x6==0x0F){
109 *cause=EVENT_ITR_I;
110 }else if(inst.M43.x6==0x10){
111 *cause=EVENT_MOV_FROM_RR;
112 }else if(inst.M43.x6==0x11){
113 *cause=EVENT_MOV_FROM_DBR;
114 }else if(inst.M43.x6==0x12){
115 *cause=EVENT_MOV_FROM_IBR;
116 }else if(inst.M43.x6==0x13){
117 *cause=EVENT_MOV_FROM_PKR;
118 }else if(inst.M43.x6==0x14){
119 *cause=EVENT_MOV_FROM_PMC;
120 /*
121 }else if(inst.M43.x6==0x15){
122 *cause=EVENT_MOV_FROM_PMD;
123 */
124 }else if(inst.M43.x6==0x17){
125 *cause=EVENT_MOV_FROM_CPUID;
126 }
127 }
128 }
129 break;
130 case B:
131 if(inst.generic.major==0){
132 if(inst.B8.x6==0x02){
133 *cause=EVENT_COVER;
134 }else if(inst.B8.x6==0x08){
135 *cause=EVENT_RFI;
136 }else if(inst.B8.x6==0x0c){
137 *cause=EVENT_BSW_0;
138 }else if(inst.B8.x6==0x0d){
139 *cause=EVENT_BSW_1;
140 }
141 }
142 case I:
143 case F:
144 case L:
145 case ILLEGAL:
146 break;
147 }
148 }
150 IA64FAULT vmx_emul_rsm(VCPU *vcpu, INST64 inst)
151 {
152 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
153 return vmx_vcpu_reset_psr_sm(vcpu,imm24);
154 }
156 IA64FAULT vmx_emul_ssm(VCPU *vcpu, INST64 inst)
157 {
158 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
159 return vmx_vcpu_set_psr_sm(vcpu,imm24);
160 }
162 unsigned long last_guest_psr = 0x0;
163 IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst)
164 {
165 UINT64 tgt = inst.M33.r1;
166 UINT64 val;
168 /*
169 if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
170 return vcpu_set_gr(vcpu, tgt, val);
171 else return fault;
172 */
173 val = vmx_vcpu_get_psr(vcpu);
174 val = (val & MASK(0, 32)) | (val & MASK(35, 2));
175 last_guest_psr = val;
176 return vcpu_set_gr(vcpu, tgt, val, 0);
177 }
179 /**
180 * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
181 */
182 IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst)
183 {
184 UINT64 val;
185 if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
186 panic(" get_psr nat bit fault\n");
188 val = (val & MASK(0, 32)) | (VCPU(vcpu, vpsr) & MASK(32, 32));
189 #if 0
190 if (last_mov_from_psr && (last_guest_psr != (val & MASK(0,32))))
191 while(1);
192 else
193 last_mov_from_psr = 0;
194 #endif
195 return vmx_vcpu_set_psr_l(vcpu,val);
196 }
199 /**************************************************************************
200 Privileged operation emulation routines
201 **************************************************************************/
203 IA64FAULT vmx_emul_rfi(VCPU *vcpu, INST64 inst)
204 {
205 IA64_PSR vpsr;
206 REGS *regs;
207 #ifdef CHECK_FAULT
208 vpsr.val=vmx_vcpu_get_psr(vcpu);
209 if ( vpsr.cpl != 0) {
210 /* Inject Privileged Operation fault into guest */
211 set_privileged_operation_isr (vcpu, 0);
212 privilege_op (vcpu);
213 return IA64_FAULT;
214 }
215 #endif // CHECK_FAULT
216 regs=vcpu_regs(vcpu);
217 vpsr.val=regs->cr_ipsr;
218 if ( vpsr.is == 1 ) {
219 panic ("We do not support IA32 instruction yet");
220 }
222 return vmx_vcpu_rfi(vcpu);
223 }
225 IA64FAULT vmx_emul_bsw0(VCPU *vcpu, INST64 inst)
226 {
227 #ifdef CHECK_FAULT
228 IA64_PSR vpsr;
229 vpsr.val=vmx_vcpu_get_psr(vcpu);
230 if ( vpsr.cpl != 0) {
231 /* Inject Privileged Operation fault into guest */
232 set_privileged_operation_isr (vcpu, 0);
233 privilege_op (vcpu);
234 return IA64_FAULT;
235 }
236 #endif // CHECK_FAULT
237 return vcpu_bsw0(vcpu);
238 }
240 IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST64 inst)
241 {
242 #ifdef CHECK_FAULT
243 IA64_PSR vpsr;
244 vpsr.val=vmx_vcpu_get_psr(vcpu);
245 if ( vpsr.cpl != 0) {
246 /* Inject Privileged Operation fault into guest */
247 set_privileged_operation_isr (vcpu, 0);
248 privilege_op (vcpu);
249 return IA64_FAULT;
250 }
251 #endif // CHECK_FAULT
252 return vcpu_bsw1(vcpu);
253 }
255 IA64FAULT vmx_emul_cover(VCPU *vcpu, INST64 inst)
256 {
257 return vmx_vcpu_cover(vcpu);
258 }
260 IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst)
261 {
262 u64 r2,r3;
263 IA64_PSR vpsr;
265 vpsr.val=vmx_vcpu_get_psr(vcpu);
266 if ( vpsr.cpl != 0) {
267 /* Inject Privileged Operation fault into guest */
268 set_privileged_operation_isr (vcpu, 0);
269 privilege_op (vcpu);
270 return IA64_FAULT;
271 }
272 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
273 #ifdef VMAL_NO_FAULT_CHECK
274 ISR isr;
275 set_isr_reg_nat_consumption(vcpu,0,0);
276 rnat_comsumption(vcpu);
277 return IA64_FAULT;
278 #endif // VMAL_NO_FAULT_CHECK
279 }
280 #ifdef VMAL_NO_FAULT_CHECK
281 if (unimplemented_gva(vcpu,r3) ) {
282 isr.val = set_isr_ei_ni(vcpu);
283 isr.code = IA64_RESERVED_REG_FAULT;
284 vcpu_set_isr(vcpu, isr.val);
285 unimpl_daddr(vcpu);
286 return IA64_FAULT;
287 }
288 #endif // VMAL_NO_FAULT_CHECK
289 return vmx_vcpu_ptc_l(vcpu,r3,bits(r2,2,7));
290 }
292 IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst)
293 {
294 u64 r3;
295 IA64_PSR vpsr;
297 vpsr.val=vmx_vcpu_get_psr(vcpu);
298 #ifdef VMAL_NO_FAULT_CHECK
299 ISR isr;
300 if ( vpsr.cpl != 0) {
301 /* Inject Privileged Operation fault into guest */
302 set_privileged_operation_isr (vcpu, 0);
303 privilege_op (vcpu);
304 return IA64_FAULT;
305 }
306 #endif // VMAL_NO_FAULT_CHECK
307 if(vcpu_get_gr_nat(vcpu,inst.M47.r3,&r3)){
308 #ifdef VMAL_NO_FAULT_CHECK
309 set_isr_reg_nat_consumption(vcpu,0,0);
310 rnat_comsumption(vcpu);
311 return IA64_FAULT;
312 #endif // VMAL_NO_FAULT_CHECK
313 }
314 return vmx_vcpu_ptc_e(vcpu,r3);
315 }
317 IA64FAULT vmx_emul_ptc_g(VCPU *vcpu, INST64 inst)
318 {
319 return vmx_emul_ptc_l(vcpu, inst);
320 }
322 IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, INST64 inst)
323 {
324 return vmx_emul_ptc_l(vcpu, inst);
325 }
327 IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3)
328 {
329 IA64FAULT ret1, ret2;
331 #ifdef VMAL_NO_FAULT_CHECK
332 ISR isr;
333 IA64_PSR vpsr;
334 vpsr.val=vmx_vcpu_get_psr(vcpu);
335 if ( vpsr.cpl != 0) {
336 /* Inject Privileged Operation fault into guest */
337 set_privileged_operation_isr (vcpu, 0);
338 privilege_op (vcpu);
339 return IA64_FAULT;
340 }
341 #endif // VMAL_NO_FAULT_CHECK
342 ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r3,pr3);
343 ret2 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pr2);
344 #ifdef VMAL_NO_FAULT_CHECK
345 if ( ret1 != IA64_NO_FAULT || ret2 != IA64_NO_FAULT ) {
346 set_isr_reg_nat_consumption(vcpu,0,0);
347 rnat_comsumption(vcpu);
348 return IA64_FAULT;
349 }
350 if (unimplemented_gva(vcpu,r3) ) {
351 isr.val = set_isr_ei_ni(vcpu);
352 isr.code = IA64_RESERVED_REG_FAULT;
353 vcpu_set_isr(vcpu, isr.val);
354 unimpl_daddr(vcpu);
355 return IA64_FAULT;
356 }
357 #endif // VMAL_NO_FAULT_CHECK
358 return IA64_NO_FAULT;
359 }
361 IA64FAULT vmx_emul_ptr_d(VCPU *vcpu, INST64 inst)
362 {
363 u64 r2,r3;
364 if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
365 return IA64_FAULT;
366 return vmx_vcpu_ptr_d(vcpu,r3,bits(r2,2,7));
367 }
369 IA64FAULT vmx_emul_ptr_i(VCPU *vcpu, INST64 inst)
370 {
371 u64 r2,r3;
372 if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
373 return IA64_FAULT;
374 return vmx_vcpu_ptr_i(vcpu,r3,bits(r2,2,7));
375 }
378 IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst)
379 {
380 u64 r1,r3;
381 #ifdef CHECK_FAULT
382 ISR visr;
383 IA64_PSR vpsr;
384 if(check_target_register(vcpu, inst.M46.r1)){
385 set_illegal_op_isr(vcpu);
386 illegal_op(vcpu);
387 return IA64_FAULT;
388 }
389 #endif //CHECK_FAULT
390 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
391 #ifdef CHECK_FAULT
392 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
393 return IA64_NO_FAULT;
394 #endif //CHECK_FAULT
395 }
396 #ifdef CHECK_FAULT
397 if(unimplemented_gva(vcpu, r3)){
398 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
399 return IA64_NO_FAULT;
400 }
401 #endif //CHECK_FAULT
402 vmx_vcpu_thash(vcpu, r3, &r1);
403 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
404 return(IA64_NO_FAULT);
405 }
408 IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst)
409 {
410 u64 r1,r3;
411 #ifdef CHECK_FAULT
412 ISR visr;
413 IA64_PSR vpsr;
414 #endif
415 #ifdef CHECK_FAULT
416 if(check_target_register(vcpu, inst.M46.r1)){
417 set_illegal_op_isr(vcpu);
418 illegal_op(vcpu);
419 return IA64_FAULT;
420 }
421 #endif //CHECK_FAULT
422 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
423 #ifdef CHECK_FAULT
424 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
425 return IA64_NO_FAULT;
426 #endif //CHECK_FAULT
427 }
428 #ifdef CHECK_FAULT
429 if(unimplemented_gva(vcpu, r3)){
430 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
431 return IA64_NO_FAULT;
432 }
433 #endif //CHECK_FAULT
434 vmx_vcpu_ttag(vcpu, r3, &r1);
435 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
436 return(IA64_NO_FAULT);
437 }
440 IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst)
441 {
442 u64 r1,r3;
443 #ifdef CHECK_FAULT
444 ISR visr;
445 if(check_target_register(vcpu, inst.M46.r1)){
446 set_illegal_op_isr(vcpu);
447 illegal_op(vcpu);
448 return IA64_FAULT;
449 }
450 IA64_PSR vpsr;
451 vpsr.val=vmx_vcpu_get_psr(vcpu);
452 if(vpsr.cpl!=0){
453 visr.val=0;
454 vcpu_set_isr(vcpu, visr.val);
455 return IA64_FAULT;
456 }
457 #endif //CHECK_FAULT
458 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
459 #ifdef CHECK_FAULT
460 set_isr_reg_nat_consumption(vcpu,0,1);
461 rnat_comsumption(vcpu);
462 return IA64_FAULT;
463 #endif //CHECK_FAULT
464 }
465 #ifdef CHECK_FAULT
466 if (unimplemented_gva(vcpu,r3) ) {
467 // inject unimplemented_data_address_fault
468 visr.val = set_isr_ei_ni(vcpu);
469 visr.code = IA64_RESERVED_REG_FAULT;
470 vcpu_set_isr(vcpu, isr.val);
471 // FAULT_UNIMPLEMENTED_DATA_ADDRESS.
472 unimpl_daddr(vcpu);
473 return IA64_FAULT;
474 }
475 #endif //CHECK_FAULT
477 if(vmx_vcpu_tpa(vcpu, r3, &r1)){
478 return IA64_FAULT;
479 }
480 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
481 return(IA64_NO_FAULT);
482 }
484 IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst)
485 {
486 u64 r1,r3;
487 #ifdef CHECK_FAULT
488 ISR visr;
489 IA64_PSR vpsr;
490 int fault=IA64_NO_FAULT;
491 visr.val=0;
492 if(check_target_register(vcpu, inst.M46.r1)){
493 set_illegal_op_isr(vcpu);
494 illegal_op(vcpu);
495 return IA64_FAULT;
496 }
497 vpsr.val=vmx_vcpu_get_psr(vcpu);
498 if(vpsr.cpl!=0){
499 vcpu_set_isr(vcpu, visr.val);
500 return IA64_FAULT;
501 }
502 #endif
503 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
504 #ifdef CHECK_FAULT
505 set_isr_reg_nat_consumption(vcpu,0,1);
506 rnat_comsumption(vcpu);
507 return IA64_FAULT;
508 #endif
509 }
510 if(vmx_vcpu_tak(vcpu, r3, &r1)){
511 return IA64_FAULT;
512 }
513 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
514 return(IA64_NO_FAULT);
515 }
518 /************************************
519 * Insert translation register/cache
520 ************************************/
522 IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
523 {
524 UINT64 itir, ifa, pte, slot;
525 IA64_PSR vpsr;
526 vpsr.val=vmx_vcpu_get_psr(vcpu);
527 if ( vpsr.ic ) {
528 set_illegal_op_isr(vcpu);
529 illegal_op(vcpu);
530 return IA64_FAULT;
531 }
532 #ifdef VMAL_NO_FAULT_CHECK
533 ISR isr;
534 if ( vpsr.cpl != 0) {
535 /* Inject Privileged Operation fault into guest */
536 set_privileged_operation_isr (vcpu, 0);
537 privilege_op (vcpu);
538 return IA64_FAULT;
539 }
540 #endif // VMAL_NO_FAULT_CHECK
541 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){
542 #ifdef VMAL_NO_FAULT_CHECK
543 set_isr_reg_nat_consumption(vcpu,0,0);
544 rnat_comsumption(vcpu);
545 return IA64_FAULT;
546 #endif // VMAL_NO_FAULT_CHECK
547 }
548 #ifdef VMAL_NO_FAULT_CHECK
549 if(is_reserved_rr_register(vcpu, slot)){
550 set_illegal_op_isr(vcpu);
551 illegal_op(vcpu);
552 return IA64_FAULT;
553 }
554 #endif // VMAL_NO_FAULT_CHECK
556 if (vcpu_get_itir(vcpu,&itir)){
557 return(IA64_FAULT);
558 }
559 if (vcpu_get_ifa(vcpu,&ifa)){
560 return(IA64_FAULT);
561 }
562 #ifdef VMAL_NO_FAULT_CHECK
563 if (is_reserved_itir_field(vcpu, itir)) {
564 // TODO
565 return IA64_FAULT;
566 }
567 if (unimplemented_gva(vcpu,ifa) ) {
568 isr.val = set_isr_ei_ni(vcpu);
569 isr.code = IA64_RESERVED_REG_FAULT;
570 vcpu_set_isr(vcpu, isr.val);
571 unimpl_daddr(vcpu);
572 return IA64_FAULT;
573 }
574 #endif // VMAL_NO_FAULT_CHECK
576 return (vmx_vcpu_itr_d(vcpu,slot,pte,itir,ifa));
577 }
579 IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
580 {
581 UINT64 itir, ifa, pte, slot;
582 #ifdef VMAL_NO_FAULT_CHECK
583 ISR isr;
584 #endif
585 IA64_PSR vpsr;
586 vpsr.val=vmx_vcpu_get_psr(vcpu);
587 if ( vpsr.ic ) {
588 set_illegal_op_isr(vcpu);
589 illegal_op(vcpu);
590 return IA64_FAULT;
591 }
592 #ifdef VMAL_NO_FAULT_CHECK
593 if ( vpsr.cpl != 0) {
594 /* Inject Privileged Operation fault into guest */
595 set_privileged_operation_isr (vcpu, 0);
596 privilege_op (vcpu);
597 return IA64_FAULT;
598 }
599 #endif // VMAL_NO_FAULT_CHECK
600 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){
601 #ifdef VMAL_NO_FAULT_CHECK
602 set_isr_reg_nat_consumption(vcpu,0,0);
603 rnat_comsumption(vcpu);
604 return IA64_FAULT;
605 #endif // VMAL_NO_FAULT_CHECK
606 }
607 #ifdef VMAL_NO_FAULT_CHECK
608 if(is_reserved_rr_register(vcpu, slot)){
609 set_illegal_op_isr(vcpu);
610 illegal_op(vcpu);
611 return IA64_FAULT;
612 }
613 #endif // VMAL_NO_FAULT_CHECK
615 if (vcpu_get_itir(vcpu,&itir)){
616 return(IA64_FAULT);
617 }
618 if (vcpu_get_ifa(vcpu,&ifa)){
619 return(IA64_FAULT);
620 }
621 #ifdef VMAL_NO_FAULT_CHECK
622 if (is_reserved_itir_field(vcpu, itir)) {
623 // TODO
624 return IA64_FAULT;
625 }
626 if (unimplemented_gva(vcpu,ifa) ) {
627 isr.val = set_isr_ei_ni(vcpu);
628 isr.code = IA64_RESERVED_REG_FAULT;
629 vcpu_set_isr(vcpu, isr.val);
630 unimpl_daddr(vcpu);
631 return IA64_FAULT;
632 }
633 #endif // VMAL_NO_FAULT_CHECK
635 return (vmx_vcpu_itr_i(vcpu,slot,pte,itir,ifa));
636 }
638 IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa,u64 *pte)
639 {
640 IA64_PSR vpsr;
641 IA64FAULT ret1;
643 vpsr.val=vmx_vcpu_get_psr(vcpu);
644 if ( vpsr.ic ) {
645 set_illegal_op_isr(vcpu);
646 illegal_op(vcpu);
647 return IA64_FAULT;
648 }
650 #ifdef VMAL_NO_FAULT_CHECK
651 UINT64 fault;
652 ISR isr;
653 if ( vpsr.cpl != 0) {
654 /* Inject Privileged Operation fault into guest */
655 set_privileged_operation_isr (vcpu, 0);
656 privilege_op (vcpu);
657 return IA64_FAULT;
658 }
659 #endif // VMAL_NO_FAULT_CHECK
660 ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pte);
661 #ifdef VMAL_NO_FAULT_CHECK
662 if( ret1 != IA64_NO_FAULT ){
663 set_isr_reg_nat_consumption(vcpu,0,0);
664 rnat_comsumption(vcpu);
665 return IA64_FAULT;
666 }
667 #endif // VMAL_NO_FAULT_CHECK
669 if (vcpu_get_itir(vcpu,itir)){
670 return(IA64_FAULT);
671 }
672 if (vcpu_get_ifa(vcpu,ifa)){
673 return(IA64_FAULT);
674 }
675 #ifdef VMAL_NO_FAULT_CHECK
676 if (unimplemented_gva(vcpu,ifa) ) {
677 isr.val = set_isr_ei_ni(vcpu);
678 isr.code = IA64_RESERVED_REG_FAULT;
679 vcpu_set_isr(vcpu, isr.val);
680 unimpl_daddr(vcpu);
681 return IA64_FAULT;
682 }
683 #endif // VMAL_NO_FAULT_CHECK
684 return IA64_NO_FAULT;
685 }
687 IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INST64 inst)
688 {
689 UINT64 itir, ifa, pte;
691 if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) {
692 return IA64_FAULT;
693 }
695 return (vmx_vcpu_itc_d(vcpu,pte,itir,ifa));
696 }
698 IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INST64 inst)
699 {
700 UINT64 itir, ifa, pte;
702 if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) {
703 return IA64_FAULT;
704 }
706 return (vmx_vcpu_itc_i(vcpu,pte,itir,ifa));
708 }
710 /*************************************
711 * Moves to semi-privileged registers
712 *************************************/
714 IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
715 {
716 // I27 and M30 are identical for these fields
717 UINT64 imm;
718 if(inst.M30.ar3!=44){
719 panic("Can't support ar register other than itc");
720 }
721 #ifdef CHECK_FAULT
722 IA64_PSR vpsr;
723 vpsr.val=vmx_vcpu_get_psr(vcpu);
724 if ( vpsr.cpl != 0) {
725 /* Inject Privileged Operation fault into guest */
726 set_privileged_operation_isr (vcpu, 0);
727 privilege_op (vcpu);
728 return IA64_FAULT;
729 }
730 #endif // CHECK_FAULT
731 if(inst.M30.s){
732 imm = -inst.M30.imm;
733 }else{
734 imm = inst.M30.imm;
735 }
736 return (vmx_vcpu_set_itc(vcpu, imm));
737 }
739 IA64FAULT vmx_emul_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
740 {
741 // I26 and M29 are identical for these fields
742 u64 r2;
743 if(inst.M29.ar3!=44){
744 panic("Can't support ar register other than itc");
745 }
746 if(vcpu_get_gr_nat(vcpu,inst.M29.r2,&r2)){
747 #ifdef CHECK_FAULT
748 set_isr_reg_nat_consumption(vcpu,0,0);
749 rnat_comsumption(vcpu);
750 return IA64_FAULT;
751 #endif //CHECK_FAULT
752 }
753 #ifdef CHECK_FAULT
754 IA64_PSR vpsr;
755 vpsr.val=vmx_vcpu_get_psr(vcpu);
756 if ( vpsr.cpl != 0) {
757 /* Inject Privileged Operation fault into guest */
758 set_privileged_operation_isr (vcpu, 0);
759 privilege_op (vcpu);
760 return IA64_FAULT;
761 }
762 #endif // CHECK_FAULT
763 return (vmx_vcpu_set_itc(vcpu, r2));
764 }
767 IA64FAULT vmx_emul_mov_from_ar_reg(VCPU *vcpu, INST64 inst)
768 {
769 // I27 and M30 are identical for these fields
770 u64 r1;
771 if(inst.M31.ar3!=44){
772 panic("Can't support ar register other than itc");
773 }
774 #ifdef CHECK_FAULT
775 if(check_target_register(vcpu,inst.M31.r1)){
776 set_illegal_op_isr(vcpu);
777 illegal_op(vcpu);
778 return IA64_FAULT;
779 }
780 IA64_PSR vpsr;
781 vpsr.val=vmx_vcpu_get_psr(vcpu);
782 if (vpsr.si&& vpsr.cpl != 0) {
783 /* Inject Privileged Operation fault into guest */
784 set_privileged_operation_isr (vcpu, 0);
785 privilege_op (vcpu);
786 return IA64_FAULT;
787 }
788 #endif // CHECK_FAULT
789 vmx_vcpu_get_itc(vcpu,&r1);
790 vcpu_set_gr(vcpu,inst.M31.r1,r1,0);
791 return IA64_NO_FAULT;
792 }
795 /********************************
796 * Moves to privileged registers
797 ********************************/
799 IA64FAULT vmx_emul_mov_to_pkr(VCPU *vcpu, INST64 inst)
800 {
801 u64 r3,r2;
802 #ifdef CHECK_FAULT
803 IA64_PSR vpsr;
804 vpsr.val=vmx_vcpu_get_psr(vcpu);
805 if (vpsr.cpl != 0) {
806 /* Inject Privileged Operation fault into guest */
807 set_privileged_operation_isr (vcpu, 0);
808 privilege_op (vcpu);
809 return IA64_FAULT;
810 }
811 #endif // CHECK_FAULT
812 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
813 #ifdef CHECK_FAULT
814 set_isr_reg_nat_consumption(vcpu,0,0);
815 rnat_comsumption(vcpu);
816 return IA64_FAULT;
817 #endif //CHECK_FAULT
818 }
819 return (vmx_vcpu_set_pkr(vcpu,r3,r2));
820 }
822 IA64FAULT vmx_emul_mov_to_rr(VCPU *vcpu, INST64 inst)
823 {
824 u64 r3,r2;
825 #ifdef CHECK_FAULT
826 IA64_PSR vpsr;
827 vpsr.val=vmx_vcpu_get_psr(vcpu);
828 if (vpsr.cpl != 0) {
829 /* Inject Privileged Operation fault into guest */
830 set_privileged_operation_isr (vcpu, 0);
831 privilege_op (vcpu);
832 return IA64_FAULT;
833 }
834 #endif // CHECK_FAULT
835 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
836 #ifdef CHECK_FAULT
837 set_isr_reg_nat_consumption(vcpu,0,0);
838 rnat_comsumption(vcpu);
839 return IA64_FAULT;
840 #endif //CHECK_FAULT
841 }
842 return (vmx_vcpu_set_rr(vcpu,r3,r2));
843 }
845 IA64FAULT vmx_emul_mov_to_dbr(VCPU *vcpu, INST64 inst)
846 {
847 u64 r3,r2;
848 return IA64_NO_FAULT;
849 #ifdef CHECK_FAULT
850 IA64_PSR vpsr;
851 vpsr.val=vmx_vcpu_get_psr(vcpu);
852 if (vpsr.cpl != 0) {
853 /* Inject Privileged Operation fault into guest */
854 set_privileged_operation_isr (vcpu, 0);
855 privilege_op (vcpu);
856 return IA64_FAULT;
857 }
858 #endif // CHECK_FAULT
859 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
860 #ifdef CHECK_FAULT
861 set_isr_reg_nat_consumption(vcpu,0,0);
862 rnat_comsumption(vcpu);
863 return IA64_FAULT;
864 #endif //CHECK_FAULT
865 }
866 return (vmx_vcpu_set_dbr(vcpu,r3,r2));
867 }
869 IA64FAULT vmx_emul_mov_to_ibr(VCPU *vcpu, INST64 inst)
870 {
871 u64 r3,r2;
872 return IA64_NO_FAULT;
873 #ifdef CHECK_FAULT
874 IA64_PSR vpsr;
875 vpsr.val=vmx_vcpu_get_psr(vcpu);
876 if (vpsr.cpl != 0) {
877 /* Inject Privileged Operation fault into guest */
878 set_privileged_operation_isr (vcpu, 0);
879 privilege_op (vcpu);
880 return IA64_FAULT;
881 }
882 #endif // CHECK_FAULT
883 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
884 #ifdef CHECK_FAULT
885 set_isr_reg_nat_consumption(vcpu,0,0);
886 rnat_comsumption(vcpu);
887 return IA64_FAULT;
888 #endif //CHECK_FAULT
889 }
890 return (vmx_vcpu_set_ibr(vcpu,r3,r2));
891 }
893 IA64FAULT vmx_emul_mov_to_pmc(VCPU *vcpu, INST64 inst)
894 {
895 u64 r3,r2;
896 #ifdef CHECK_FAULT
897 IA64_PSR vpsr;
898 vpsr.val=vmx_vcpu_get_psr(vcpu);
899 if (vpsr.cpl != 0) {
900 /* Inject Privileged Operation fault into guest */
901 set_privileged_operation_isr (vcpu, 0);
902 privilege_op (vcpu);
903 return IA64_FAULT;
904 }
905 #endif // CHECK_FAULT
906 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
907 #ifdef CHECK_FAULT
908 set_isr_reg_nat_consumption(vcpu,0,0);
909 rnat_comsumption(vcpu);
910 return IA64_FAULT;
911 #endif //CHECK_FAULT
912 }
913 return (vmx_vcpu_set_pmc(vcpu,r3,r2));
914 }
916 IA64FAULT vmx_emul_mov_to_pmd(VCPU *vcpu, INST64 inst)
917 {
918 u64 r3,r2;
919 #ifdef CHECK_FAULT
920 IA64_PSR vpsr;
921 vpsr.val=vmx_vcpu_get_psr(vcpu);
922 if (vpsr.cpl != 0) {
923 /* Inject Privileged Operation fault into guest */
924 set_privileged_operation_isr (vcpu, 0);
925 privilege_op (vcpu);
926 return IA64_FAULT;
927 }
928 #endif // CHECK_FAULT
929 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
930 #ifdef CHECK_FAULT
931 set_isr_reg_nat_consumption(vcpu,0,0);
932 rnat_comsumption(vcpu);
933 return IA64_FAULT;
934 #endif //CHECK_FAULT
935 }
936 return (vmx_vcpu_set_pmd(vcpu,r3,r2));
937 }
940 /**********************************
941 * Moves from privileged registers
942 **********************************/
944 IA64FAULT vmx_emul_mov_from_rr(VCPU *vcpu, INST64 inst)
945 {
946 u64 r3,r1;
947 #ifdef CHECK_FAULT
948 if(check_target_register(vcpu, inst.M43.r1)){
949 set_illegal_op_isr(vcpu);
950 illegal_op(vcpu);
951 return IA64_FAULT;
952 }
953 IA64_PSR vpsr;
954 vpsr.val=vmx_vcpu_get_psr(vcpu);
955 if (vpsr.cpl != 0) {
956 /* Inject Privileged Operation fault into guest */
957 set_privileged_operation_isr (vcpu, 0);
958 privilege_op (vcpu);
959 return IA64_FAULT;
960 }
962 #endif //CHECK_FAULT
963 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
964 #ifdef CHECK_FAULT
965 set_isr_reg_nat_consumption(vcpu,0,0);
966 rnat_comsumption(vcpu);
967 return IA64_FAULT;
968 #endif //CHECK_FAULT
969 }
970 #ifdef CHECK_FAULT
971 if(is_reserved_rr_register(vcpu,r3>>VRN_SHIFT)){
972 set_rsv_reg_field_isr(vcpu);
973 rsv_reg_field(vcpu);
974 }
975 #endif //CHECK_FAULT
976 vcpu_get_rr(vcpu,r3,&r1);
977 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
978 }
980 IA64FAULT vmx_emul_mov_from_pkr(VCPU *vcpu, INST64 inst)
981 {
982 u64 r3,r1;
983 #ifdef CHECK_FAULT
984 if(check_target_register(vcpu, inst.M43.r1)){
985 set_illegal_op_isr(vcpu);
986 illegal_op(vcpu);
987 return IA64_FAULT;
988 }
989 IA64_PSR vpsr;
990 vpsr.val=vmx_vcpu_get_psr(vcpu);
991 if (vpsr.cpl != 0) {
992 /* Inject Privileged Operation fault into guest */
993 set_privileged_operation_isr (vcpu, 0);
994 privilege_op (vcpu);
995 return IA64_FAULT;
996 }
998 #endif //CHECK_FAULT
999 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1000 #ifdef CHECK_FAULT
1001 set_isr_reg_nat_consumption(vcpu,0,0);
1002 rnat_comsumption(vcpu);
1003 return IA64_FAULT;
1004 #endif //CHECK_FAULT
1006 #ifdef CHECK_FAULT
1007 if(is_reserved_indirect_register(vcpu,r3)){
1008 set_rsv_reg_field_isr(vcpu);
1009 rsv_reg_field(vcpu);
1010 return IA64_FAULT;
1012 #endif //CHECK_FAULT
1013 vmx_vcpu_get_pkr(vcpu,r3,&r1);
1014 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1017 IA64FAULT vmx_emul_mov_from_dbr(VCPU *vcpu, INST64 inst)
1019 u64 r3,r1;
1020 #ifdef CHECK_FAULT
1021 if(check_target_register(vcpu, inst.M43.r1)){
1022 set_illegal_op_isr(vcpu);
1023 illegal_op(vcpu);
1024 return IA64_FAULT;
1026 IA64_PSR vpsr;
1027 vpsr.val=vmx_vcpu_get_psr(vcpu);
1028 if (vpsr.cpl != 0) {
1029 /* Inject Privileged Operation fault into guest */
1030 set_privileged_operation_isr (vcpu, 0);
1031 privilege_op (vcpu);
1032 return IA64_FAULT;
1035 #endif //CHECK_FAULT
1036 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1037 #ifdef CHECK_FAULT
1038 set_isr_reg_nat_consumption(vcpu,0,0);
1039 rnat_comsumption(vcpu);
1040 return IA64_FAULT;
1041 #endif //CHECK_FAULT
1043 #ifdef CHECK_FAULT
1044 if(is_reserved_indirect_register(vcpu,r3)){
1045 set_rsv_reg_field_isr(vcpu);
1046 rsv_reg_field(vcpu);
1047 return IA64_FAULT;
1049 #endif //CHECK_FAULT
1050 vmx_vcpu_get_dbr(vcpu,r3,&r1);
1051 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1054 IA64FAULT vmx_emul_mov_from_ibr(VCPU *vcpu, INST64 inst)
1056 u64 r3,r1;
1057 #ifdef CHECK_FAULT
1058 if(check_target_register(vcpu, inst.M43.r1)){
1059 set_illegal_op_isr(vcpu);
1060 illegal_op(vcpu);
1061 return IA64_FAULT;
1063 IA64_PSR vpsr;
1064 vpsr.val=vmx_vcpu_get_psr(vcpu);
1065 if (vpsr.cpl != 0) {
1066 /* Inject Privileged Operation fault into guest */
1067 set_privileged_operation_isr (vcpu, 0);
1068 privilege_op (vcpu);
1069 return IA64_FAULT;
1072 #endif //CHECK_FAULT
1073 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1074 #ifdef CHECK_FAULT
1075 set_isr_reg_nat_consumption(vcpu,0,0);
1076 rnat_comsumption(vcpu);
1077 return IA64_FAULT;
1078 #endif //CHECK_FAULT
1080 #ifdef CHECK_FAULT
1081 if(is_reserved_indirect_register(vcpu,r3)){
1082 set_rsv_reg_field_isr(vcpu);
1083 rsv_reg_field(vcpu);
1084 return IA64_FAULT;
1086 #endif //CHECK_FAULT
1087 vmx_vcpu_get_ibr(vcpu,r3,&r1);
1088 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1091 IA64FAULT vmx_emul_mov_from_pmc(VCPU *vcpu, INST64 inst)
1093 u64 r3,r1;
1094 #ifdef CHECK_FAULT
1095 if(check_target_register(vcpu, inst.M43.r1)){
1096 set_illegal_op_isr(vcpu);
1097 illegal_op(vcpu);
1098 return IA64_FAULT;
1100 IA64_PSR vpsr;
1101 vpsr.val=vmx_vcpu_get_psr(vcpu);
1102 if (vpsr.cpl != 0) {
1103 /* Inject Privileged Operation fault into guest */
1104 set_privileged_operation_isr (vcpu, 0);
1105 privilege_op (vcpu);
1106 return IA64_FAULT;
1109 #endif //CHECK_FAULT
1110 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1111 #ifdef CHECK_FAULT
1112 set_isr_reg_nat_consumption(vcpu,0,0);
1113 rnat_comsumption(vcpu);
1114 return IA64_FAULT;
1115 #endif //CHECK_FAULT
1117 #ifdef CHECK_FAULT
1118 if(is_reserved_indirect_register(vcpu,r3)){
1119 set_rsv_reg_field_isr(vcpu);
1120 rsv_reg_field(vcpu);
1121 return IA64_FAULT;
1123 #endif //CHECK_FAULT
1124 vmx_vcpu_get_pmc(vcpu,r3,&r1);
1125 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1128 IA64FAULT vmx_emul_mov_from_cpuid(VCPU *vcpu, INST64 inst)
1130 u64 r3,r1;
1131 #ifdef CHECK_FAULT
1132 if(check_target_register(vcpu, inst.M43.r1)){
1133 set_illegal_op_isr(vcpu);
1134 illegal_op(vcpu);
1135 return IA64_FAULT;
1137 #endif //CHECK_FAULT
1138 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1139 #ifdef CHECK_FAULT
1140 set_isr_reg_nat_consumption(vcpu,0,0);
1141 rnat_comsumption(vcpu);
1142 return IA64_FAULT;
1143 #endif //CHECK_FAULT
1145 #ifdef CHECK_FAULT
1146 if(is_reserved_indirect_register(vcpu,r3)){
1147 set_rsv_reg_field_isr(vcpu);
1148 rsv_reg_field(vcpu);
1149 return IA64_FAULT;
1151 #endif //CHECK_FAULT
1152 vmx_vcpu_get_cpuid(vcpu,r3,&r1);
1153 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1156 IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
1158 u64 r2;
1159 extern u64 cr_igfld_mask(int index, u64 value);
1160 #ifdef CHECK_FAULT
1161 IA64_PSR vpsr;
1162 vpsr.val=vmx_vcpu_get_psr(vcpu);
1163 if(is_reserved_cr(inst.M32.cr3)||(vpsr.ic&&is_interruption_control_cr(inst.M32.cr3))){
1164 set_illegal_op_isr(vcpu);
1165 illegal_op(vcpu);
1166 return IA64_FAULT;
1168 if ( vpsr.cpl != 0) {
1169 /* Inject Privileged Operation fault into guest */
1170 set_privileged_operation_isr (vcpu, 0);
1171 privilege_op (vcpu);
1172 return IA64_FAULT;
1174 #endif // CHECK_FAULT
1175 if(vcpu_get_gr_nat(vcpu, inst.M32.r2, &r2)){
1176 #ifdef CHECK_FAULT
1177 set_isr_reg_nat_consumption(vcpu,0,0);
1178 rnat_comsumption(vcpu);
1179 return IA64_FAULT;
1180 #endif //CHECK_FAULT
1182 #ifdef CHECK_FAULT
1183 if ( check_cr_rsv_fields (inst.M32.cr3, r2)) {
1184 /* Inject Reserved Register/Field fault
1185 * into guest */
1186 set_rsv_reg_field_isr (vcpu,0);
1187 rsv_reg_field (vcpu);
1188 return IA64_FAULT;
1190 #endif //CHECK_FAULT
1191 r2 = cr_igfld_mask(inst.M32.cr3,r2);
1192 VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
1193 switch (inst.M32.cr3) {
1194 case 0: return vmx_vcpu_set_dcr(vcpu,r2);
1195 case 1: return vmx_vcpu_set_itm(vcpu,r2);
1196 case 2: return vmx_vcpu_set_iva(vcpu,r2);
1197 case 8: return vmx_vcpu_set_pta(vcpu,r2);
1198 case 16:return vcpu_set_ipsr(vcpu,r2);
1199 case 17:return vcpu_set_isr(vcpu,r2);
1200 case 19:return vcpu_set_iip(vcpu,r2);
1201 case 20:return vcpu_set_ifa(vcpu,r2);
1202 case 21:return vcpu_set_itir(vcpu,r2);
1203 case 22:return vcpu_set_iipa(vcpu,r2);
1204 case 23:return vcpu_set_ifs(vcpu,r2);
1205 case 24:return vcpu_set_iim(vcpu,r2);
1206 case 25:return vcpu_set_iha(vcpu,r2);
1207 case 64:printk("SET LID to 0x%lx\n", r2);
1208 return vmx_vcpu_set_lid(vcpu,r2);
1209 case 65:return IA64_NO_FAULT;
1210 case 66:return vmx_vcpu_set_tpr(vcpu,r2);
1211 case 67:return vmx_vcpu_set_eoi(vcpu,r2);
1212 case 68:return IA64_NO_FAULT;
1213 case 69:return IA64_NO_FAULT;
1214 case 70:return IA64_NO_FAULT;
1215 case 71:return IA64_NO_FAULT;
1216 case 72:return vmx_vcpu_set_itv(vcpu,r2);
1217 case 73:return vmx_vcpu_set_pmv(vcpu,r2);
1218 case 74:return vmx_vcpu_set_cmcv(vcpu,r2);
1219 case 80:return vmx_vcpu_set_lrr0(vcpu,r2);
1220 case 81:return vmx_vcpu_set_lrr1(vcpu,r2);
1221 default: return IA64_NO_FAULT;
1226 #define cr_get(cr) \
1227 ((fault=vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
1228 vcpu_set_gr(vcpu, tgt, val,0):fault;
1230 #define vmx_cr_get(cr) \
1231 ((fault=vmx_vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
1232 vcpu_set_gr(vcpu, tgt, val,0):fault;
1234 IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst)
1236 UINT64 tgt = inst.M33.r1;
1237 UINT64 val;
1238 IA64FAULT fault;
1239 #ifdef CHECK_FAULT
1240 IA64_PSR vpsr;
1241 vpsr.val=vmx_vcpu_get_psr(vcpu);
1242 if(is_reserved_cr(inst.M33.cr3)||is_read_only_cr(inst.M33.cr3||
1243 (vpsr.ic&&is_interruption_control_cr(inst.M33.cr3)))){
1244 set_illegal_op_isr(vcpu);
1245 illegal_op(vcpu);
1246 return IA64_FAULT;
1248 if ( vpsr.cpl != 0) {
1249 /* Inject Privileged Operation fault into guest */
1250 set_privileged_operation_isr (vcpu, 0);
1251 privilege_op (vcpu);
1252 return IA64_FAULT;
1254 #endif // CHECK_FAULT
1256 // from_cr_cnt[inst.M33.cr3]++;
1257 switch (inst.M33.cr3) {
1258 case 0: return vmx_cr_get(dcr);
1259 case 1: return vmx_cr_get(itm);
1260 case 2: return vmx_cr_get(iva);
1261 case 8: return vmx_cr_get(pta);
1262 case 16:return cr_get(ipsr);
1263 case 17:return cr_get(isr);
1264 case 19:return cr_get(iip);
1265 case 20:return cr_get(ifa);
1266 case 21:return cr_get(itir);
1267 case 22:return cr_get(iipa);
1268 case 23:return cr_get(ifs);
1269 case 24:return cr_get(iim);
1270 case 25:return cr_get(iha);
1271 case 64:return vmx_cr_get(lid);
1272 case 65:
1273 vmx_vcpu_get_ivr(vcpu,&val);
1274 return vcpu_set_gr(vcpu,tgt,val,0);
1275 case 66:return vmx_cr_get(tpr);
1276 case 67:return vcpu_set_gr(vcpu,tgt,0L,0);
1277 case 68:return vmx_cr_get(irr0);
1278 case 69:return vmx_cr_get(irr1);
1279 case 70:return vmx_cr_get(irr2);
1280 case 71:return vmx_cr_get(irr3);
1281 case 72:return vmx_cr_get(itv);
1282 case 73:return vmx_cr_get(pmv);
1283 case 74:return vmx_cr_get(cmcv);
1284 case 80:return vmx_cr_get(lrr0);
1285 case 81:return vmx_cr_get(lrr1);
1286 default: return IA64_NO_FAULT;
1291 static void post_emulation_action(VCPU *vcpu)
1293 if ( vcpu->arch.irq_new_condition ) {
1294 vcpu->arch.irq_new_condition = 0;
1295 vhpi_detection(vcpu);
1299 //#define BYPASS_VMAL_OPCODE
1300 extern IA64_SLOT_TYPE slot_types[0x20][3];
1301 IA64_BUNDLE __vmx_get_domain_bundle(u64 iip)
1303 IA64_BUNDLE bundle;
1304 fetch_code( current, iip, &bundle.i64[0], &bundle.i64[1]);
1305 return bundle;
1308 /** Emulate a privileged operation.
1311 * @param vcpu virtual cpu
1312 * @cause the reason cause virtualization fault
1313 * @opcode the instruction code which cause virtualization fault
1314 */
1316 void
1317 vmx_emulate(VCPU *vcpu, REGS *regs)
1319 IA64FAULT status;
1320 INST64 inst;
1321 UINT64 iip, cause, opcode;
1322 iip = regs->cr_iip;
1323 cause = VMX(vcpu,cause);
1324 opcode = VMX(vcpu,opcode);
1326 /*
1327 if (privop_trace) {
1328 static long i = 400;
1329 //if (i > 0) printf("privop @%p\n",iip);
1330 if (i > 0) printf("priv_handle_op: @%p, itc=%lx, itm=%lx\n",
1331 iip,ia64_get_itc(),ia64_get_itm());
1332 i--;
1334 */
1335 #ifdef VTLB_DEBUG
1336 check_vtlb_sanity(vmx_vcpu_get_vtlb(vcpu));
1337 dump_vtlb(vmx_vcpu_get_vtlb(vcpu));
1338 #endif
1339 #if 0
1340 if ( (cause == 0xff && opcode == 0x1e000000000) || cause == 0 ) {
1341 printf ("VMAL decode error: cause - %lx; op - %lx\n",
1342 cause, opcode );
1343 return;
1345 #endif
1346 #ifdef BYPASS_VMAL_OPCODE
1347 // make a local copy of the bundle containing the privop
1348 IA64_BUNDLE bundle;
1349 int slot;
1350 IA64_SLOT_TYPE slot_type;
1351 IA64_PSR vpsr;
1352 bundle = __vmx_get_domain_bundle(iip);
1353 slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
1354 if (!slot) inst.inst = bundle.slot0;
1355 else if (slot == 1)
1356 inst.inst = bundle.slot1a + (bundle.slot1b<<18);
1357 else if (slot == 2) inst.inst = bundle.slot2;
1358 else printf("priv_handle_op: illegal slot: %d\n", slot);
1359 slot_type = slot_types[bundle.template][slot];
1360 ia64_priv_decoder(slot_type, inst, &cause);
1361 if(cause==0){
1362 printf("This instruction at 0x%lx slot %d can't be virtualized", iip, slot);
1363 panic("123456\n");
1365 #else
1366 inst.inst=opcode;
1367 #endif /* BYPASS_VMAL_OPCODE */
1368 /*
1369 * Switch to actual virtual rid in rr0 and rr4,
1370 * which is required by some tlb related instructions.
1371 */
1372 prepare_if_physical_mode(vcpu);
1374 switch(cause) {
1375 case EVENT_RSM:
1376 status=vmx_emul_rsm(vcpu, inst);
1377 break;
1378 case EVENT_SSM:
1379 status=vmx_emul_ssm(vcpu, inst);
1380 break;
1381 case EVENT_MOV_TO_PSR:
1382 status=vmx_emul_mov_to_psr(vcpu, inst);
1383 break;
1384 case EVENT_MOV_FROM_PSR:
1385 status=vmx_emul_mov_from_psr(vcpu, inst);
1386 break;
1387 case EVENT_MOV_FROM_CR:
1388 status=vmx_emul_mov_from_cr(vcpu, inst);
1389 break;
1390 case EVENT_MOV_TO_CR:
1391 status=vmx_emul_mov_to_cr(vcpu, inst);
1392 break;
1393 case EVENT_BSW_0:
1394 status=vmx_emul_bsw0(vcpu, inst);
1395 break;
1396 case EVENT_BSW_1:
1397 status=vmx_emul_bsw1(vcpu, inst);
1398 break;
1399 case EVENT_COVER:
1400 status=vmx_emul_cover(vcpu, inst);
1401 break;
1402 case EVENT_RFI:
1403 status=vmx_emul_rfi(vcpu, inst);
1404 break;
1405 case EVENT_ITR_D:
1406 status=vmx_emul_itr_d(vcpu, inst);
1407 break;
1408 case EVENT_ITR_I:
1409 status=vmx_emul_itr_i(vcpu, inst);
1410 break;
1411 case EVENT_PTR_D:
1412 status=vmx_emul_ptr_d(vcpu, inst);
1413 break;
1414 case EVENT_PTR_I:
1415 status=vmx_emul_ptr_i(vcpu, inst);
1416 break;
1417 case EVENT_ITC_D:
1418 status=vmx_emul_itc_d(vcpu, inst);
1419 break;
1420 case EVENT_ITC_I:
1421 status=vmx_emul_itc_i(vcpu, inst);
1422 break;
1423 case EVENT_PTC_L:
1424 status=vmx_emul_ptc_l(vcpu, inst);
1425 break;
1426 case EVENT_PTC_G:
1427 status=vmx_emul_ptc_g(vcpu, inst);
1428 break;
1429 case EVENT_PTC_GA:
1430 status=vmx_emul_ptc_ga(vcpu, inst);
1431 break;
1432 case EVENT_PTC_E:
1433 status=vmx_emul_ptc_e(vcpu, inst);
1434 break;
1435 case EVENT_MOV_TO_RR:
1436 status=vmx_emul_mov_to_rr(vcpu, inst);
1437 break;
1438 case EVENT_MOV_FROM_RR:
1439 status=vmx_emul_mov_from_rr(vcpu, inst);
1440 break;
1441 case EVENT_THASH:
1442 status=vmx_emul_thash(vcpu, inst);
1443 break;
1444 case EVENT_TTAG:
1445 status=vmx_emul_ttag(vcpu, inst);
1446 break;
1447 case EVENT_TPA:
1448 status=vmx_emul_tpa(vcpu, inst);
1449 break;
1450 case EVENT_TAK:
1451 status=vmx_emul_tak(vcpu, inst);
1452 break;
1453 case EVENT_MOV_TO_AR_IMM:
1454 status=vmx_emul_mov_to_ar_imm(vcpu, inst);
1455 break;
1456 case EVENT_MOV_TO_AR:
1457 status=vmx_emul_mov_to_ar_reg(vcpu, inst);
1458 break;
1459 case EVENT_MOV_FROM_AR:
1460 status=vmx_emul_mov_from_ar_reg(vcpu, inst);
1461 break;
1462 case EVENT_MOV_TO_DBR:
1463 status=vmx_emul_mov_to_dbr(vcpu, inst);
1464 break;
1465 case EVENT_MOV_TO_IBR:
1466 status=vmx_emul_mov_to_ibr(vcpu, inst);
1467 break;
1468 case EVENT_MOV_TO_PMC:
1469 status=vmx_emul_mov_to_pmc(vcpu, inst);
1470 break;
1471 case EVENT_MOV_TO_PMD:
1472 status=vmx_emul_mov_to_pmd(vcpu, inst);
1473 break;
1474 case EVENT_MOV_TO_PKR:
1475 status=vmx_emul_mov_to_pkr(vcpu, inst);
1476 break;
1477 case EVENT_MOV_FROM_DBR:
1478 status=vmx_emul_mov_from_dbr(vcpu, inst);
1479 break;
1480 case EVENT_MOV_FROM_IBR:
1481 status=vmx_emul_mov_from_ibr(vcpu, inst);
1482 break;
1483 case EVENT_MOV_FROM_PMC:
1484 status=vmx_emul_mov_from_pmc(vcpu, inst);
1485 break;
1486 case EVENT_MOV_FROM_PKR:
1487 status=vmx_emul_mov_from_pkr(vcpu, inst);
1488 break;
1489 case EVENT_MOV_FROM_CPUID:
1490 status=vmx_emul_mov_from_cpuid(vcpu, inst);
1491 break;
1492 case EVENT_VMSW:
1493 printf ("Unimplemented instruction %ld\n", cause);
1494 status=IA64_FAULT;
1495 break;
1496 default:
1497 printf("unknown cause %ld, iip: %lx, ipsr: %lx\n", cause,regs->cr_iip,regs->cr_ipsr);
1498 while(1);
1499 /* For unknown cause, let hardware to re-execute */
1500 status=IA64_RETRY;
1501 break;
1502 // panic("unknown cause in virtualization intercept");
1503 };
1505 #if 0
1506 if (status == IA64_FAULT)
1507 panic("Emulation failed with cause %d:\n", cause);
1508 #endif
1510 if ( status == IA64_NO_FAULT && cause !=EVENT_RFI ) {
1511 vmx_vcpu_increment_iip(vcpu);
1514 recover_if_physical_mode(vcpu);
1515 post_emulation_action (vcpu);
1516 //TODO set_irq_check(v);
1517 return;