ia64/xen-unstable

view xen/arch/ia64/vmx_virt.c @ 5797:ca44d2dbb273

Intel's pre-bk->hg transition patches
Signed-off-by Eddie Dong <Eddie.dong@intel.com>
Signed-off-by Anthony Xu <Anthony.xu@intel.com>
Signed-off-by Kevin Tian <Kevin.tian@intel.com>
author djm@kirby.fc.hp.com
date Sat Jul 09 07:58:56 2005 -0700 (2005-07-09)
parents c91f74efda05
children a83ac0806d6b
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_virt.c:
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Fred yang (fred.yang@intel.com)
20 * Shaofan Li (Susue Li) <susie.li@intel.com>
21 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
22 */
26 #include <asm/privop.h>
27 #include <asm/vmx_vcpu.h>
28 #include <asm/processor.h>
29 #include <asm/delay.h> // Debug only
30 #include <asm/vmmu.h>
31 #include <asm/vmx_mm_def.h>
32 #include <asm/smp.h>
34 #include <asm/virt_event.h>
35 extern UINT64 privop_trace;
37 void
38 ia64_priv_decoder(IA64_SLOT_TYPE slot_type, INST64 inst, UINT64 * cause)
39 {
40 *cause=0;
41 switch (slot_type) {
42 case M:
43 if (inst.generic.major==0){
44 if(inst.M28.x3==0){
45 if(inst.M44.x4==6){
46 *cause=EVENT_SSM;
47 }else if(inst.M44.x4==7){
48 *cause=EVENT_RSM;
49 }else if(inst.M30.x4==8&&inst.M30.x2==2){
50 *cause=EVENT_MOV_TO_AR_IMM;
51 }
52 }
53 }
54 else if(inst.generic.major==1){
55 if(inst.M28.x3==0){
56 if(inst.M32.x6==0x2c){
57 *cause=EVENT_MOV_TO_CR;
58 }else if(inst.M33.x6==0x24){
59 *cause=EVENT_MOV_FROM_CR;
60 }else if(inst.M35.x6==0x2d){
61 *cause=EVENT_MOV_TO_PSR;
62 }else if(inst.M36.x6==0x25){
63 *cause=EVENT_MOV_FROM_PSR;
64 }else if(inst.M29.x6==0x2A){
65 *cause=EVENT_MOV_TO_AR;
66 }else if(inst.M31.x6==0x22){
67 *cause=EVENT_MOV_FROM_AR;
68 }else if(inst.M45.x6==0x09){
69 *cause=EVENT_PTC_L;
70 }else if(inst.M45.x6==0x0A){
71 *cause=EVENT_PTC_G;
72 }else if(inst.M45.x6==0x0B){
73 *cause=EVENT_PTC_GA;
74 }else if(inst.M45.x6==0x0C){
75 *cause=EVENT_PTR_D;
76 }else if(inst.M45.x6==0x0D){
77 *cause=EVENT_PTR_I;
78 }else if(inst.M46.x6==0x1A){
79 *cause=EVENT_THASH;
80 }else if(inst.M46.x6==0x1B){
81 *cause=EVENT_TTAG;
82 }else if(inst.M46.x6==0x1E){
83 *cause=EVENT_TPA;
84 }else if(inst.M46.x6==0x1F){
85 *cause=EVENT_TAK;
86 }else if(inst.M47.x6==0x34){
87 *cause=EVENT_PTC_E;
88 }else if(inst.M41.x6==0x2E){
89 *cause=EVENT_ITC_D;
90 }else if(inst.M41.x6==0x2F){
91 *cause=EVENT_ITC_I;
92 }else if(inst.M42.x6==0x00){
93 *cause=EVENT_MOV_TO_RR;
94 }else if(inst.M42.x6==0x01){
95 *cause=EVENT_MOV_TO_DBR;
96 }else if(inst.M42.x6==0x02){
97 *cause=EVENT_MOV_TO_IBR;
98 }else if(inst.M42.x6==0x03){
99 *cause=EVENT_MOV_TO_PKR;
100 }else if(inst.M42.x6==0x04){
101 *cause=EVENT_MOV_TO_PMC;
102 }else if(inst.M42.x6==0x05){
103 *cause=EVENT_MOV_TO_PMD;
104 }else if(inst.M42.x6==0x0E){
105 *cause=EVENT_ITR_D;
106 }else if(inst.M42.x6==0x0F){
107 *cause=EVENT_ITR_I;
108 }else if(inst.M43.x6==0x10){
109 *cause=EVENT_MOV_FROM_RR;
110 }else if(inst.M43.x6==0x11){
111 *cause=EVENT_MOV_FROM_DBR;
112 }else if(inst.M43.x6==0x12){
113 *cause=EVENT_MOV_FROM_IBR;
114 }else if(inst.M43.x6==0x13){
115 *cause=EVENT_MOV_FROM_PKR;
116 }else if(inst.M43.x6==0x14){
117 *cause=EVENT_MOV_FROM_PMC;
118 /*
119 }else if(inst.M43.x6==0x15){
120 *cause=EVENT_MOV_FROM_PMD;
121 */
122 }else if(inst.M43.x6==0x17){
123 *cause=EVENT_MOV_FROM_CPUID;
124 }
125 }
126 }
127 break;
128 case B:
129 if(inst.generic.major==0){
130 if(inst.B8.x6==0x02){
131 *cause=EVENT_COVER;
132 }else if(inst.B8.x6==0x08){
133 *cause=EVENT_RFI;
134 }else if(inst.B8.x6==0x0c){
135 *cause=EVENT_BSW_0;
136 }else if(inst.B8.x6==0x0d){
137 *cause=EVENT_BSW_1;
138 }
139 }
140 }
141 }
143 IA64FAULT vmx_emul_rsm(VCPU *vcpu, INST64 inst)
144 {
145 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
146 return vmx_vcpu_reset_psr_sm(vcpu,imm24);
147 }
149 IA64FAULT vmx_emul_ssm(VCPU *vcpu, INST64 inst)
150 {
151 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
152 return vmx_vcpu_set_psr_sm(vcpu,imm24);
153 }
155 unsigned long last_guest_psr = 0x0;
156 IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst)
157 {
158 UINT64 tgt = inst.M33.r1;
159 UINT64 val;
160 IA64FAULT fault;
162 /*
163 if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
164 return vmx_vcpu_set_gr(vcpu, tgt, val);
165 else return fault;
166 */
167 val = vmx_vcpu_get_psr(vcpu);
168 val = (val & MASK(0, 32)) | (val & MASK(35, 2));
169 last_guest_psr = val;
170 return vmx_vcpu_set_gr(vcpu, tgt, val, 0);
171 }
173 /**
174 * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
175 */
176 IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst)
177 {
178 UINT64 val;
179 IA64FAULT fault;
180 if(vmx_vcpu_get_gr(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
181 panic(" get_psr nat bit fault\n");
183 val = (val & MASK(0, 32)) | (VMX_VPD(vcpu, vpsr) & MASK(32, 32));
184 #if 0
185 if (last_mov_from_psr && (last_guest_psr != (val & MASK(0,32))))
186 while(1);
187 else
188 last_mov_from_psr = 0;
189 #endif
190 return vmx_vcpu_set_psr_l(vcpu,val);
191 }
194 /**************************************************************************
195 Privileged operation emulation routines
196 **************************************************************************/
198 IA64FAULT vmx_emul_rfi(VCPU *vcpu, INST64 inst)
199 {
200 IA64_PSR vpsr;
201 REGS *regs;
202 #ifdef CHECK_FAULT
203 vpsr.val=vmx_vcpu_get_psr(vcpu);
204 if ( vpsr.cpl != 0) {
205 /* Inject Privileged Operation fault into guest */
206 set_privileged_operation_isr (vcpu, 0);
207 privilege_op (vcpu);
208 return IA64_FAULT;
209 }
210 #endif // CHECK_FAULT
211 regs=vcpu_regs(vcpu);
212 vpsr.val=regs->cr_ipsr;
213 if ( vpsr.is == 1 ) {
214 panic ("We do not support IA32 instruction yet");
215 }
217 return vmx_vcpu_rfi(vcpu);
218 }
220 IA64FAULT vmx_emul_bsw0(VCPU *vcpu, INST64 inst)
221 {
222 #ifdef CHECK_FAULT
223 IA64_PSR vpsr;
224 vpsr.val=vmx_vcpu_get_psr(vcpu);
225 if ( vpsr.cpl != 0) {
226 /* Inject Privileged Operation fault into guest */
227 set_privileged_operation_isr (vcpu, 0);
228 privilege_op (vcpu);
229 return IA64_FAULT;
230 }
231 #endif // CHECK_FAULT
232 return vmx_vcpu_bsw0(vcpu);
233 }
235 IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST64 inst)
236 {
237 #ifdef CHECK_FAULT
238 IA64_PSR vpsr;
239 vpsr.val=vmx_vcpu_get_psr(vcpu);
240 if ( vpsr.cpl != 0) {
241 /* Inject Privileged Operation fault into guest */
242 set_privileged_operation_isr (vcpu, 0);
243 privilege_op (vcpu);
244 return IA64_FAULT;
245 }
246 #endif // CHECK_FAULT
247 return vmx_vcpu_bsw1(vcpu);
248 }
250 IA64FAULT vmx_emul_cover(VCPU *vcpu, INST64 inst)
251 {
252 return vmx_vcpu_cover(vcpu);
253 }
255 IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst)
256 {
257 u64 r2,r3;
258 ISR isr;
259 IA64_PSR vpsr;
261 vpsr.val=vmx_vcpu_get_psr(vcpu);
262 if ( vpsr.cpl != 0) {
263 /* Inject Privileged Operation fault into guest */
264 set_privileged_operation_isr (vcpu, 0);
265 privilege_op (vcpu);
266 return IA64_FAULT;
267 }
268 if(vmx_vcpu_get_gr(vcpu,inst.M45.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M45.r2,&r2)){
269 #ifdef VMAL_NO_FAULT_CHECK
270 set_isr_reg_nat_consumption(vcpu,0,0);
271 rnat_comsumption(vcpu);
272 return IA64_FAULT;
273 #endif // VMAL_NO_FAULT_CHECK
274 }
275 #ifdef VMAL_NO_FAULT_CHECK
276 if (unimplemented_gva(vcpu,r3) ) {
277 isr.val = set_isr_ei_ni(vcpu);
278 isr.code = IA64_RESERVED_REG_FAULT;
279 vcpu_set_isr(vcpu, isr.val);
280 unimpl_daddr(vcpu);
281 return IA64_FAULT;
282 }
283 #endif // VMAL_NO_FAULT_CHECK
284 return vmx_vcpu_ptc_l(vcpu,r3,bits(r2,2,7));
285 }
287 IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst)
288 {
289 u64 r3;
290 ISR isr;
291 IA64_PSR vpsr;
293 vpsr.val=vmx_vcpu_get_psr(vcpu);
294 #ifdef VMAL_NO_FAULT_CHECK
295 if ( vpsr.cpl != 0) {
296 /* Inject Privileged Operation fault into guest */
297 set_privileged_operation_isr (vcpu, 0);
298 privilege_op (vcpu);
299 return IA64_FAULT;
300 }
301 #endif // VMAL_NO_FAULT_CHECK
302 if(vmx_vcpu_get_gr(vcpu,inst.M47.r3,&r3)){
303 #ifdef VMAL_NO_FAULT_CHECK
304 set_isr_reg_nat_consumption(vcpu,0,0);
305 rnat_comsumption(vcpu);
306 return IA64_FAULT;
307 #endif // VMAL_NO_FAULT_CHECK
308 }
309 return vmx_vcpu_ptc_e(vcpu,r3);
310 }
312 IA64FAULT vmx_emul_ptc_g(VCPU *vcpu, INST64 inst)
313 {
314 return vmx_emul_ptc_l(vcpu, inst);
315 }
317 IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, INST64 inst)
318 {
319 return vmx_emul_ptc_l(vcpu, inst);
320 }
322 IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3)
323 {
324 ISR isr;
325 IA64FAULT ret1, ret2;
327 #ifdef VMAL_NO_FAULT_CHECK
328 IA64_PSR vpsr;
329 vpsr.val=vmx_vcpu_get_psr(vcpu);
330 if ( vpsr.cpl != 0) {
331 /* Inject Privileged Operation fault into guest */
332 set_privileged_operation_isr (vcpu, 0);
333 privilege_op (vcpu);
334 return IA64_FAULT;
335 }
336 #endif // VMAL_NO_FAULT_CHECK
337 ret1 = vmx_vcpu_get_gr(vcpu,inst.M45.r3,pr3);
338 ret2 = vmx_vcpu_get_gr(vcpu,inst.M45.r2,pr2);
339 #ifdef VMAL_NO_FAULT_CHECK
340 if ( ret1 != IA64_NO_FAULT || ret2 != IA64_NO_FAULT ) {
341 set_isr_reg_nat_consumption(vcpu,0,0);
342 rnat_comsumption(vcpu);
343 return IA64_FAULT;
344 }
345 if (unimplemented_gva(vcpu,r3) ) {
346 isr.val = set_isr_ei_ni(vcpu);
347 isr.code = IA64_RESERVED_REG_FAULT;
348 vcpu_set_isr(vcpu, isr.val);
349 unimpl_daddr(vcpu);
350 return IA64_FAULT;
351 }
352 #endif // VMAL_NO_FAULT_CHECK
353 return IA64_NO_FAULT;
354 }
356 IA64FAULT vmx_emul_ptr_d(VCPU *vcpu, INST64 inst)
357 {
358 u64 r2,r3;
359 if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
360 return IA64_FAULT;
361 return vmx_vcpu_ptr_d(vcpu,r3,bits(r2,2,7));
362 }
364 IA64FAULT vmx_emul_ptr_i(VCPU *vcpu, INST64 inst)
365 {
366 u64 r2,r3;
367 if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
368 return IA64_FAULT;
369 return vmx_vcpu_ptr_i(vcpu,r3,bits(r2,2,7));
370 }
373 IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst)
374 {
375 u64 r1,r3;
376 ISR visr;
377 IA64_PSR vpsr;
378 #ifdef CHECK_FAULT
379 if(check_target_register(vcpu, inst.M46.r1)){
380 set_illegal_op_isr(vcpu);
381 illegal_op(vcpu);
382 return IA64_FAULT;
383 }
384 #endif //CHECK_FAULT
385 if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
386 #ifdef CHECK_FAULT
387 vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
388 return IA64_NO_FAULT;
389 #endif //CHECK_FAULT
390 }
391 #ifdef CHECK_FAULT
392 if(unimplemented_gva(vcpu, r3)){
393 vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
394 return IA64_NO_FAULT;
395 }
396 #endif //CHECK_FAULT
397 vmx_vcpu_thash(vcpu, r3, &r1);
398 vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
399 return(IA64_NO_FAULT);
400 }
403 IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst)
404 {
405 u64 r1,r3;
406 ISR visr;
407 IA64_PSR vpsr;
408 #ifdef CHECK_FAULT
409 if(check_target_register(vcpu, inst.M46.r1)){
410 set_illegal_op_isr(vcpu);
411 illegal_op(vcpu);
412 return IA64_FAULT;
413 }
414 #endif //CHECK_FAULT
415 if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
416 #ifdef CHECK_FAULT
417 vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
418 return IA64_NO_FAULT;
419 #endif //CHECK_FAULT
420 }
421 #ifdef CHECK_FAULT
422 if(unimplemented_gva(vcpu, r3)){
423 vmx_vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
424 return IA64_NO_FAULT;
425 }
426 #endif //CHECK_FAULT
427 vmx_vcpu_ttag(vcpu, r3, &r1);
428 vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
429 return(IA64_NO_FAULT);
430 }
433 IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst)
434 {
435 u64 r1,r3;
436 ISR visr;
437 #ifdef CHECK_FAULT
438 if(check_target_register(vcpu, inst.M46.r1)){
439 set_illegal_op_isr(vcpu);
440 illegal_op(vcpu);
441 return IA64_FAULT;
442 }
443 IA64_PSR vpsr;
444 vpsr.val=vmx_vcpu_get_psr(vcpu);
445 if(vpsr.cpl!=0){
446 visr.val=0;
447 vcpu_set_isr(vcpu, visr.val);
448 return IA64_FAULT;
449 }
450 #endif //CHECK_FAULT
451 if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
452 #ifdef CHECK_FAULT
453 set_isr_reg_nat_consumption(vcpu,0,1);
454 rnat_comsumption(vcpu);
455 return IA64_FAULT;
456 #endif //CHECK_FAULT
457 }
458 #ifdef CHECK_FAULT
459 if (unimplemented_gva(vcpu,r3) ) {
460 // inject unimplemented_data_address_fault
461 visr.val = set_isr_ei_ni(vcpu);
462 visr.code = IA64_RESERVED_REG_FAULT;
463 vcpu_set_isr(vcpu, isr.val);
464 // FAULT_UNIMPLEMENTED_DATA_ADDRESS.
465 unimpl_daddr(vcpu);
466 return IA64_FAULT;
467 }
468 #endif //CHECK_FAULT
470 if(vmx_vcpu_tpa(vcpu, r3, &r1)){
471 return IA64_FAULT;
472 }
473 vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
474 return(IA64_NO_FAULT);
475 }
477 IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst)
478 {
479 u64 r1,r3;
480 ISR visr;
481 IA64_PSR vpsr;
482 int fault=IA64_NO_FAULT;
483 #ifdef CHECK_FAULT
484 visr.val=0;
485 if(check_target_register(vcpu, inst.M46.r1)){
486 set_illegal_op_isr(vcpu);
487 illegal_op(vcpu);
488 return IA64_FAULT;
489 }
490 vpsr.val=vmx_vcpu_get_psr(vcpu);
491 if(vpsr.cpl!=0){
492 vcpu_set_isr(vcpu, visr.val);
493 return IA64_FAULT;
494 }
495 #endif
496 if(vmx_vcpu_get_gr(vcpu, inst.M46.r3, &r3)){
497 #ifdef CHECK_FAULT
498 set_isr_reg_nat_consumption(vcpu,0,1);
499 rnat_comsumption(vcpu);
500 return IA64_FAULT;
501 #endif
502 }
503 if(vmx_vcpu_tak(vcpu, r3, &r1)){
504 return IA64_FAULT;
505 }
506 vmx_vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
507 return(IA64_NO_FAULT);
508 }
511 /************************************
512 * Insert translation register/cache
513 ************************************/
515 IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
516 {
517 UINT64 fault, itir, ifa, pte, slot;
518 ISR isr;
519 IA64_PSR vpsr;
520 vpsr.val=vmx_vcpu_get_psr(vcpu);
521 if ( vpsr.ic ) {
522 set_illegal_op_isr(vcpu);
523 illegal_op(vcpu);
524 return IA64_FAULT;
525 }
526 #ifdef VMAL_NO_FAULT_CHECK
527 if ( vpsr.cpl != 0) {
528 /* Inject Privileged Operation fault into guest */
529 set_privileged_operation_isr (vcpu, 0);
530 privilege_op (vcpu);
531 return IA64_FAULT;
532 }
533 #endif // VMAL_NO_FAULT_CHECK
534 if(vmx_vcpu_get_gr(vcpu,inst.M45.r3,&slot)||vmx_vcpu_get_gr(vcpu,inst.M45.r2,&pte)){
535 #ifdef VMAL_NO_FAULT_CHECK
536 set_isr_reg_nat_consumption(vcpu,0,0);
537 rnat_comsumption(vcpu);
538 return IA64_FAULT;
539 #endif // VMAL_NO_FAULT_CHECK
540 }
541 #ifdef VMAL_NO_FAULT_CHECK
542 if(is_reserved_rr_register(vcpu, slot)){
543 set_illegal_op_isr(vcpu);
544 illegal_op(vcpu);
545 return IA64_FAULT;
546 }
547 #endif // VMAL_NO_FAULT_CHECK
549 if (vmx_vcpu_get_itir(vcpu,&itir)){
550 return(IA64_FAULT);
551 }
552 if (vmx_vcpu_get_ifa(vcpu,&ifa)){
553 return(IA64_FAULT);
554 }
555 #ifdef VMAL_NO_FAULT_CHECK
556 if (is_reserved_itir_field(vcpu, itir)) {
557 // TODO
558 return IA64_FAULT;
559 }
560 if (unimplemented_gva(vcpu,ifa) ) {
561 isr.val = set_isr_ei_ni(vcpu);
562 isr.code = IA64_RESERVED_REG_FAULT;
563 vcpu_set_isr(vcpu, isr.val);
564 unimpl_daddr(vcpu);
565 return IA64_FAULT;
566 }
567 #endif // VMAL_NO_FAULT_CHECK
569 return (vmx_vcpu_itr_d(vcpu,pte,itir,ifa,slot));
570 }
572 IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
573 {
574 UINT64 fault, itir, ifa, pte, slot;
575 ISR isr;
576 IA64_PSR vpsr;
577 vpsr.val=vmx_vcpu_get_psr(vcpu);
578 if ( vpsr.ic ) {
579 set_illegal_op_isr(vcpu);
580 illegal_op(vcpu);
581 return IA64_FAULT;
582 }
583 #ifdef VMAL_NO_FAULT_CHECK
584 if ( vpsr.cpl != 0) {
585 /* Inject Privileged Operation fault into guest */
586 set_privileged_operation_isr (vcpu, 0);
587 privilege_op (vcpu);
588 return IA64_FAULT;
589 }
590 #endif // VMAL_NO_FAULT_CHECK
591 if(vmx_vcpu_get_gr(vcpu,inst.M45.r3,&slot)||vmx_vcpu_get_gr(vcpu,inst.M45.r2,&pte)){
592 #ifdef VMAL_NO_FAULT_CHECK
593 set_isr_reg_nat_consumption(vcpu,0,0);
594 rnat_comsumption(vcpu);
595 return IA64_FAULT;
596 #endif // VMAL_NO_FAULT_CHECK
597 }
598 #ifdef VMAL_NO_FAULT_CHECK
599 if(is_reserved_rr_register(vcpu, slot)){
600 set_illegal_op_isr(vcpu);
601 illegal_op(vcpu);
602 return IA64_FAULT;
603 }
604 #endif // VMAL_NO_FAULT_CHECK
606 if (vmx_vcpu_get_itir(vcpu,&itir)){
607 return(IA64_FAULT);
608 }
609 if (vmx_vcpu_get_ifa(vcpu,&ifa)){
610 return(IA64_FAULT);
611 }
612 #ifdef VMAL_NO_FAULT_CHECK
613 if (is_reserved_itir_field(vcpu, itir)) {
614 // TODO
615 return IA64_FAULT;
616 }
617 if (unimplemented_gva(vcpu,ifa) ) {
618 isr.val = set_isr_ei_ni(vcpu);
619 isr.code = IA64_RESERVED_REG_FAULT;
620 vcpu_set_isr(vcpu, isr.val);
621 unimpl_daddr(vcpu);
622 return IA64_FAULT;
623 }
624 #endif // VMAL_NO_FAULT_CHECK
626 return (vmx_vcpu_itr_i(vcpu,pte,itir,ifa,slot));
627 }
629 IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa,u64 *pte)
630 {
631 UINT64 fault;
632 ISR isr;
633 IA64_PSR vpsr;
634 IA64FAULT ret1;
636 vpsr.val=vmx_vcpu_get_psr(vcpu);
637 if ( vpsr.ic ) {
638 set_illegal_op_isr(vcpu);
639 illegal_op(vcpu);
640 return IA64_FAULT;
641 }
643 #ifdef VMAL_NO_FAULT_CHECK
644 if ( vpsr.cpl != 0) {
645 /* Inject Privileged Operation fault into guest */
646 set_privileged_operation_isr (vcpu, 0);
647 privilege_op (vcpu);
648 return IA64_FAULT;
649 }
650 #endif // VMAL_NO_FAULT_CHECK
651 ret1 = vmx_vcpu_get_gr(vcpu,inst.M45.r2,pte);
652 #ifdef VMAL_NO_FAULT_CHECK
653 if( ret1 != IA64_NO_FAULT ){
654 set_isr_reg_nat_consumption(vcpu,0,0);
655 rnat_comsumption(vcpu);
656 return IA64_FAULT;
657 }
658 #endif // VMAL_NO_FAULT_CHECK
660 if (vmx_vcpu_get_itir(vcpu,itir)){
661 return(IA64_FAULT);
662 }
663 if (vmx_vcpu_get_ifa(vcpu,ifa)){
664 return(IA64_FAULT);
665 }
666 #ifdef VMAL_NO_FAULT_CHECK
667 if (unimplemented_gva(vcpu,ifa) ) {
668 isr.val = set_isr_ei_ni(vcpu);
669 isr.code = IA64_RESERVED_REG_FAULT;
670 vcpu_set_isr(vcpu, isr.val);
671 unimpl_daddr(vcpu);
672 return IA64_FAULT;
673 }
674 #endif // VMAL_NO_FAULT_CHECK
675 return IA64_NO_FAULT;
676 }
678 IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INST64 inst)
679 {
680 UINT64 itir, ifa, pte;
682 if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) {
683 return IA64_FAULT;
684 }
686 return (vmx_vcpu_itc_d(vcpu,pte,itir,ifa));
687 }
689 IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INST64 inst)
690 {
691 UINT64 itir, ifa, pte;
693 if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) {
694 return IA64_FAULT;
695 }
697 return (vmx_vcpu_itc_i(vcpu,pte,itir,ifa));
699 }
701 /*************************************
702 * Moves to semi-privileged registers
703 *************************************/
705 IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
706 {
707 // I27 and M30 are identical for these fields
708 if(inst.M30.ar3!=44){
709 panic("Can't support ar register other than itc");
710 }
711 #ifdef CHECK_FAULT
712 IA64_PSR vpsr;
713 vpsr.val=vmx_vcpu_get_psr(vcpu);
714 if ( vpsr.cpl != 0) {
715 /* Inject Privileged Operation fault into guest */
716 set_privileged_operation_isr (vcpu, 0);
717 privilege_op (vcpu);
718 return IA64_FAULT;
719 }
720 #endif // CHECK_FAULT
721 UINT64 imm;
722 if(inst.M30.s){
723 imm = -inst.M30.imm;
724 }else{
725 imm = inst.M30.imm;
726 }
727 return (vmx_vcpu_set_itc(vcpu, imm));
728 }
730 IA64FAULT vmx_emul_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
731 {
732 // I26 and M29 are identical for these fields
733 u64 r2;
734 if(inst.M29.ar3!=44){
735 panic("Can't support ar register other than itc");
736 }
737 if(vmx_vcpu_get_gr(vcpu,inst.M29.r2,&r2)){
738 #ifdef CHECK_FAULT
739 set_isr_reg_nat_consumption(vcpu,0,0);
740 rnat_comsumption(vcpu);
741 return IA64_FAULT;
742 #endif //CHECK_FAULT
743 }
744 #ifdef CHECK_FAULT
745 IA64_PSR vpsr;
746 vpsr.val=vmx_vcpu_get_psr(vcpu);
747 if ( vpsr.cpl != 0) {
748 /* Inject Privileged Operation fault into guest */
749 set_privileged_operation_isr (vcpu, 0);
750 privilege_op (vcpu);
751 return IA64_FAULT;
752 }
753 #endif // CHECK_FAULT
754 return (vmx_vcpu_set_itc(vcpu, r2));
755 }
758 IA64FAULT vmx_emul_mov_from_ar_reg(VCPU *vcpu, INST64 inst)
759 {
760 // I27 and M30 are identical for these fields
761 if(inst.M31.ar3!=44){
762 panic("Can't support ar register other than itc");
763 }
764 #ifdef CHECK_FAULT
765 if(check_target_register(vcpu,inst.M31.r1)){
766 set_illegal_op_isr(vcpu);
767 illegal_op(vcpu);
768 return IA64_FAULT;
769 }
770 IA64_PSR vpsr;
771 vpsr.val=vmx_vcpu_get_psr(vcpu);
772 if (vpsr.si&& vpsr.cpl != 0) {
773 /* Inject Privileged Operation fault into guest */
774 set_privileged_operation_isr (vcpu, 0);
775 privilege_op (vcpu);
776 return IA64_FAULT;
777 }
778 #endif // CHECK_FAULT
779 u64 r1;
780 vmx_vcpu_get_itc(vcpu,&r1);
781 vmx_vcpu_set_gr(vcpu,inst.M31.r1,r1,0);
782 return IA64_NO_FAULT;
783 }
786 /********************************
787 * Moves to privileged registers
788 ********************************/
790 IA64FAULT vmx_emul_mov_to_pkr(VCPU *vcpu, INST64 inst)
791 {
792 u64 r3,r2;
793 #ifdef CHECK_FAULT
794 IA64_PSR vpsr;
795 vpsr.val=vmx_vcpu_get_psr(vcpu);
796 if (vpsr.cpl != 0) {
797 /* Inject Privileged Operation fault into guest */
798 set_privileged_operation_isr (vcpu, 0);
799 privilege_op (vcpu);
800 return IA64_FAULT;
801 }
802 #endif // CHECK_FAULT
803 if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
804 #ifdef CHECK_FAULT
805 set_isr_reg_nat_consumption(vcpu,0,0);
806 rnat_comsumption(vcpu);
807 return IA64_FAULT;
808 #endif //CHECK_FAULT
809 }
810 return (vmx_vcpu_set_pkr(vcpu,r3,r2));
811 }
813 IA64FAULT vmx_emul_mov_to_rr(VCPU *vcpu, INST64 inst)
814 {
815 u64 r3,r2;
816 #ifdef CHECK_FAULT
817 IA64_PSR vpsr;
818 vpsr.val=vmx_vcpu_get_psr(vcpu);
819 if (vpsr.cpl != 0) {
820 /* Inject Privileged Operation fault into guest */
821 set_privileged_operation_isr (vcpu, 0);
822 privilege_op (vcpu);
823 return IA64_FAULT;
824 }
825 #endif // CHECK_FAULT
826 if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
827 #ifdef CHECK_FAULT
828 set_isr_reg_nat_consumption(vcpu,0,0);
829 rnat_comsumption(vcpu);
830 return IA64_FAULT;
831 #endif //CHECK_FAULT
832 }
833 return (vmx_vcpu_set_rr(vcpu,r3,r2));
834 }
836 IA64FAULT vmx_emul_mov_to_dbr(VCPU *vcpu, INST64 inst)
837 {
838 u64 r3,r2;
839 #ifdef CHECK_FAULT
840 IA64_PSR vpsr;
841 vpsr.val=vmx_vcpu_get_psr(vcpu);
842 if (vpsr.cpl != 0) {
843 /* Inject Privileged Operation fault into guest */
844 set_privileged_operation_isr (vcpu, 0);
845 privilege_op (vcpu);
846 return IA64_FAULT;
847 }
848 #endif // CHECK_FAULT
849 if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
850 #ifdef CHECK_FAULT
851 set_isr_reg_nat_consumption(vcpu,0,0);
852 rnat_comsumption(vcpu);
853 return IA64_FAULT;
854 #endif //CHECK_FAULT
855 }
856 return (vmx_vcpu_set_dbr(vcpu,r3,r2));
857 }
859 IA64FAULT vmx_emul_mov_to_ibr(VCPU *vcpu, INST64 inst)
860 {
861 u64 r3,r2;
862 #ifdef CHECK_FAULT
863 IA64_PSR vpsr;
864 vpsr.val=vmx_vcpu_get_psr(vcpu);
865 if (vpsr.cpl != 0) {
866 /* Inject Privileged Operation fault into guest */
867 set_privileged_operation_isr (vcpu, 0);
868 privilege_op (vcpu);
869 return IA64_FAULT;
870 }
871 #endif // CHECK_FAULT
872 if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
873 #ifdef CHECK_FAULT
874 set_isr_reg_nat_consumption(vcpu,0,0);
875 rnat_comsumption(vcpu);
876 return IA64_FAULT;
877 #endif //CHECK_FAULT
878 }
879 return (vmx_vcpu_set_ibr(vcpu,r3,r2));
880 }
882 IA64FAULT vmx_emul_mov_to_pmc(VCPU *vcpu, INST64 inst)
883 {
884 u64 r3,r2;
885 #ifdef CHECK_FAULT
886 IA64_PSR vpsr;
887 vpsr.val=vmx_vcpu_get_psr(vcpu);
888 if (vpsr.cpl != 0) {
889 /* Inject Privileged Operation fault into guest */
890 set_privileged_operation_isr (vcpu, 0);
891 privilege_op (vcpu);
892 return IA64_FAULT;
893 }
894 #endif // CHECK_FAULT
895 if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
896 #ifdef CHECK_FAULT
897 set_isr_reg_nat_consumption(vcpu,0,0);
898 rnat_comsumption(vcpu);
899 return IA64_FAULT;
900 #endif //CHECK_FAULT
901 }
902 return (vmx_vcpu_set_pmc(vcpu,r3,r2));
903 }
905 IA64FAULT vmx_emul_mov_to_pmd(VCPU *vcpu, INST64 inst)
906 {
907 u64 r3,r2;
908 #ifdef CHECK_FAULT
909 IA64_PSR vpsr;
910 vpsr.val=vmx_vcpu_get_psr(vcpu);
911 if (vpsr.cpl != 0) {
912 /* Inject Privileged Operation fault into guest */
913 set_privileged_operation_isr (vcpu, 0);
914 privilege_op (vcpu);
915 return IA64_FAULT;
916 }
917 #endif // CHECK_FAULT
918 if(vmx_vcpu_get_gr(vcpu,inst.M42.r3,&r3)||vmx_vcpu_get_gr(vcpu,inst.M42.r2,&r2)){
919 #ifdef CHECK_FAULT
920 set_isr_reg_nat_consumption(vcpu,0,0);
921 rnat_comsumption(vcpu);
922 return IA64_FAULT;
923 #endif //CHECK_FAULT
924 }
925 return (vmx_vcpu_set_pmd(vcpu,r3,r2));
926 }
929 /**********************************
930 * Moves from privileged registers
931 **********************************/
933 IA64FAULT vmx_emul_mov_from_rr(VCPU *vcpu, INST64 inst)
934 {
935 u64 r3,r1;
936 #ifdef CHECK_FAULT
937 if(check_target_register(vcpu, inst.M43.r1)){
938 set_illegal_op_isr(vcpu);
939 illegal_op(vcpu);
940 return IA64_FAULT;
941 }
942 IA64_PSR vpsr;
943 vpsr.val=vmx_vcpu_get_psr(vcpu);
944 if (vpsr.cpl != 0) {
945 /* Inject Privileged Operation fault into guest */
946 set_privileged_operation_isr (vcpu, 0);
947 privilege_op (vcpu);
948 return IA64_FAULT;
949 }
951 #endif //CHECK_FAULT
952 if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
953 #ifdef CHECK_FAULT
954 set_isr_reg_nat_consumption(vcpu,0,0);
955 rnat_comsumption(vcpu);
956 return IA64_FAULT;
957 #endif //CHECK_FAULT
958 }
959 #ifdef CHECK_FAULT
960 if(is_reserved_rr_register(vcpu,r3>>VRN_SHIFT)){
961 set_rsv_reg_field_isr(vcpu);
962 rsv_reg_field(vcpu);
963 }
964 #endif //CHECK_FAULT
965 vmx_vcpu_get_rr(vcpu,r3,&r1);
966 return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
967 }
969 IA64FAULT vmx_emul_mov_from_pkr(VCPU *vcpu, INST64 inst)
970 {
971 u64 r3,r1;
972 #ifdef CHECK_FAULT
973 if(check_target_register(vcpu, inst.M43.r1)){
974 set_illegal_op_isr(vcpu);
975 illegal_op(vcpu);
976 return IA64_FAULT;
977 }
978 IA64_PSR vpsr;
979 vpsr.val=vmx_vcpu_get_psr(vcpu);
980 if (vpsr.cpl != 0) {
981 /* Inject Privileged Operation fault into guest */
982 set_privileged_operation_isr (vcpu, 0);
983 privilege_op (vcpu);
984 return IA64_FAULT;
985 }
987 #endif //CHECK_FAULT
988 if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
989 #ifdef CHECK_FAULT
990 set_isr_reg_nat_consumption(vcpu,0,0);
991 rnat_comsumption(vcpu);
992 return IA64_FAULT;
993 #endif //CHECK_FAULT
994 }
995 #ifdef CHECK_FAULT
996 if(is_reserved_indirect_register(vcpu,r3)){
997 set_rsv_reg_field_isr(vcpu);
998 rsv_reg_field(vcpu);
999 return IA64_FAULT;
1001 #endif //CHECK_FAULT
1002 vmx_vcpu_get_pkr(vcpu,r3,&r1);
1003 return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1006 IA64FAULT vmx_emul_mov_from_dbr(VCPU *vcpu, INST64 inst)
1008 u64 r3,r1;
1009 #ifdef CHECK_FAULT
1010 if(check_target_register(vcpu, inst.M43.r1)){
1011 set_illegal_op_isr(vcpu);
1012 illegal_op(vcpu);
1013 return IA64_FAULT;
1015 IA64_PSR vpsr;
1016 vpsr.val=vmx_vcpu_get_psr(vcpu);
1017 if (vpsr.cpl != 0) {
1018 /* Inject Privileged Operation fault into guest */
1019 set_privileged_operation_isr (vcpu, 0);
1020 privilege_op (vcpu);
1021 return IA64_FAULT;
1024 #endif //CHECK_FAULT
1025 if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
1026 #ifdef CHECK_FAULT
1027 set_isr_reg_nat_consumption(vcpu,0,0);
1028 rnat_comsumption(vcpu);
1029 return IA64_FAULT;
1030 #endif //CHECK_FAULT
1032 #ifdef CHECK_FAULT
1033 if(is_reserved_indirect_register(vcpu,r3)){
1034 set_rsv_reg_field_isr(vcpu);
1035 rsv_reg_field(vcpu);
1036 return IA64_FAULT;
1038 #endif //CHECK_FAULT
1039 vmx_vcpu_get_dbr(vcpu,r3,&r1);
1040 return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1043 IA64FAULT vmx_emul_mov_from_ibr(VCPU *vcpu, INST64 inst)
1045 u64 r3,r1;
1046 #ifdef CHECK_FAULT
1047 if(check_target_register(vcpu, inst.M43.r1)){
1048 set_illegal_op_isr(vcpu);
1049 illegal_op(vcpu);
1050 return IA64_FAULT;
1052 IA64_PSR vpsr;
1053 vpsr.val=vmx_vcpu_get_psr(vcpu);
1054 if (vpsr.cpl != 0) {
1055 /* Inject Privileged Operation fault into guest */
1056 set_privileged_operation_isr (vcpu, 0);
1057 privilege_op (vcpu);
1058 return IA64_FAULT;
1061 #endif //CHECK_FAULT
1062 if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
1063 #ifdef CHECK_FAULT
1064 set_isr_reg_nat_consumption(vcpu,0,0);
1065 rnat_comsumption(vcpu);
1066 return IA64_FAULT;
1067 #endif //CHECK_FAULT
1069 #ifdef CHECK_FAULT
1070 if(is_reserved_indirect_register(vcpu,r3)){
1071 set_rsv_reg_field_isr(vcpu);
1072 rsv_reg_field(vcpu);
1073 return IA64_FAULT;
1075 #endif //CHECK_FAULT
1076 vmx_vcpu_get_ibr(vcpu,r3,&r1);
1077 return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1080 IA64FAULT vmx_emul_mov_from_pmc(VCPU *vcpu, INST64 inst)
1082 u64 r3,r1;
1083 #ifdef CHECK_FAULT
1084 if(check_target_register(vcpu, inst.M43.r1)){
1085 set_illegal_op_isr(vcpu);
1086 illegal_op(vcpu);
1087 return IA64_FAULT;
1089 IA64_PSR vpsr;
1090 vpsr.val=vmx_vcpu_get_psr(vcpu);
1091 if (vpsr.cpl != 0) {
1092 /* Inject Privileged Operation fault into guest */
1093 set_privileged_operation_isr (vcpu, 0);
1094 privilege_op (vcpu);
1095 return IA64_FAULT;
1098 #endif //CHECK_FAULT
1099 if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
1100 #ifdef CHECK_FAULT
1101 set_isr_reg_nat_consumption(vcpu,0,0);
1102 rnat_comsumption(vcpu);
1103 return IA64_FAULT;
1104 #endif //CHECK_FAULT
1106 #ifdef CHECK_FAULT
1107 if(is_reserved_indirect_register(vcpu,r3)){
1108 set_rsv_reg_field_isr(vcpu);
1109 rsv_reg_field(vcpu);
1110 return IA64_FAULT;
1112 #endif //CHECK_FAULT
1113 vmx_vcpu_get_pmc(vcpu,r3,&r1);
1114 return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1117 IA64FAULT vmx_emul_mov_from_cpuid(VCPU *vcpu, INST64 inst)
1119 u64 r3,r1;
1120 #ifdef CHECK_FAULT
1121 if(check_target_register(vcpu, inst.M43.r1)){
1122 set_illegal_op_isr(vcpu);
1123 illegal_op(vcpu);
1124 return IA64_FAULT;
1126 #endif //CHECK_FAULT
1127 if(vmx_vcpu_get_gr(vcpu,inst.M43.r3,&r3)){
1128 #ifdef CHECK_FAULT
1129 set_isr_reg_nat_consumption(vcpu,0,0);
1130 rnat_comsumption(vcpu);
1131 return IA64_FAULT;
1132 #endif //CHECK_FAULT
1134 #ifdef CHECK_FAULT
1135 if(is_reserved_indirect_register(vcpu,r3)){
1136 set_rsv_reg_field_isr(vcpu);
1137 rsv_reg_field(vcpu);
1138 return IA64_FAULT;
1140 #endif //CHECK_FAULT
1141 vmx_vcpu_get_cpuid(vcpu,r3,&r1);
1142 return vmx_vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1145 IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
1147 u64 r2,cr3;
1148 #ifdef CHECK_FAULT
1149 IA64_PSR vpsr;
1150 vpsr.val=vmx_vcpu_get_psr(vcpu);
1151 if(is_reserved_cr(inst.M32.cr3)||(vpsr.ic&&is_interruption_control_cr(inst.M32.cr3))){
1152 set_illegal_op_isr(vcpu);
1153 illegal_op(vcpu);
1154 return IA64_FAULT;
1156 if ( vpsr.cpl != 0) {
1157 /* Inject Privileged Operation fault into guest */
1158 set_privileged_operation_isr (vcpu, 0);
1159 privilege_op (vcpu);
1160 return IA64_FAULT;
1162 #endif // CHECK_FAULT
1163 if(vmx_vcpu_get_gr(vcpu, inst.M32.r2, &r2)){
1164 #ifdef CHECK_FAULT
1165 set_isr_reg_nat_consumption(vcpu,0,0);
1166 rnat_comsumption(vcpu);
1167 return IA64_FAULT;
1168 #endif //CHECK_FAULT
1170 #ifdef CHECK_FAULT
1171 if ( check_cr_rsv_fields (inst.M32.cr3, r2)) {
1172 /* Inject Reserved Register/Field fault
1173 * into guest */
1174 set_rsv_reg_field_isr (vcpu,0);
1175 rsv_reg_field (vcpu);
1176 return IA64_FAULT;
1178 #endif //CHECK_FAULT
1179 extern u64 cr_igfld_mask(int index, u64 value);
1180 r2 = cr_igfld_mask(inst.M32.cr3,r2);
1181 VMX_VPD(vcpu, vcr[inst.M32.cr3]) = r2;
1182 switch (inst.M32.cr3) {
1183 case 0: return vmx_vcpu_set_dcr(vcpu,r2);
1184 case 1: return vmx_vcpu_set_itm(vcpu,r2);
1185 case 2: return vmx_vcpu_set_iva(vcpu,r2);
1186 case 8: return vmx_vcpu_set_pta(vcpu,r2);
1187 case 16:return vmx_vcpu_set_ipsr(vcpu,r2);
1188 case 17:return vmx_vcpu_set_isr(vcpu,r2);
1189 case 19:return vmx_vcpu_set_iip(vcpu,r2);
1190 case 20:return vmx_vcpu_set_ifa(vcpu,r2);
1191 case 21:return vmx_vcpu_set_itir(vcpu,r2);
1192 case 22:return vmx_vcpu_set_iipa(vcpu,r2);
1193 case 23:return vmx_vcpu_set_ifs(vcpu,r2);
1194 case 24:return vmx_vcpu_set_iim(vcpu,r2);
1195 case 25:return vmx_vcpu_set_iha(vcpu,r2);
1196 case 64:return vmx_vcpu_set_lid(vcpu,r2);
1197 case 65:return IA64_NO_FAULT;
1198 case 66:return vmx_vcpu_set_tpr(vcpu,r2);
1199 case 67:return vmx_vcpu_set_eoi(vcpu,r2);
1200 case 68:return IA64_NO_FAULT;
1201 case 69:return IA64_NO_FAULT;
1202 case 70:return IA64_NO_FAULT;
1203 case 71:return IA64_NO_FAULT;
1204 case 72:return vmx_vcpu_set_itv(vcpu,r2);
1205 case 73:return vmx_vcpu_set_pmv(vcpu,r2);
1206 case 74:return vmx_vcpu_set_cmcv(vcpu,r2);
1207 case 80:return vmx_vcpu_set_lrr0(vcpu,r2);
1208 case 81:return vmx_vcpu_set_lrr1(vcpu,r2);
1209 default: return IA64_NO_FAULT;
1214 #define cr_get(cr) \
1215 ((fault=vmx_vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
1216 vmx_vcpu_set_gr(vcpu, tgt, val,0):fault;
1219 IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst)
1221 UINT64 tgt = inst.M33.r1;
1222 UINT64 val;
1223 IA64FAULT fault;
1224 #ifdef CHECK_FAULT
1225 IA64_PSR vpsr;
1226 vpsr.val=vmx_vcpu_get_psr(vcpu);
1227 if(is_reserved_cr(inst.M33.cr3)||is_read_only_cr(inst.M33.cr3||
1228 (vpsr.ic&&is_interruption_control_cr(inst.M33.cr3)))){
1229 set_illegal_op_isr(vcpu);
1230 illegal_op(vcpu);
1231 return IA64_FAULT;
1233 if ( vpsr.cpl != 0) {
1234 /* Inject Privileged Operation fault into guest */
1235 set_privileged_operation_isr (vcpu, 0);
1236 privilege_op (vcpu);
1237 return IA64_FAULT;
1239 #endif // CHECK_FAULT
1241 // from_cr_cnt[inst.M33.cr3]++;
1242 switch (inst.M33.cr3) {
1243 case 0: return cr_get(dcr);
1244 case 1: return cr_get(itm);
1245 case 2: return cr_get(iva);
1246 case 8: return cr_get(pta);
1247 case 16:return cr_get(ipsr);
1248 case 17:return cr_get(isr);
1249 case 19:return cr_get(iip);
1250 case 20:return cr_get(ifa);
1251 case 21:return cr_get(itir);
1252 case 22:return cr_get(iipa);
1253 case 23:return cr_get(ifs);
1254 case 24:return cr_get(iim);
1255 case 25:return cr_get(iha);
1256 case 64:val = ia64_getreg(_IA64_REG_CR_LID);
1257 return vmx_vcpu_set_gr(vcpu,tgt,val,0);
1258 // case 64:return cr_get(lid);
1259 case 65:
1260 vmx_vcpu_get_ivr(vcpu,&val);
1261 return vmx_vcpu_set_gr(vcpu,tgt,val,0);
1262 case 66:return cr_get(tpr);
1263 case 67:return vmx_vcpu_set_gr(vcpu,tgt,0L,0);
1264 case 68:return cr_get(irr0);
1265 case 69:return cr_get(irr1);
1266 case 70:return cr_get(irr2);
1267 case 71:return cr_get(irr3);
1268 case 72:return cr_get(itv);
1269 case 73:return cr_get(pmv);
1270 case 74:return cr_get(cmcv);
1271 case 80:return cr_get(lrr0);
1272 case 81:return cr_get(lrr1);
1273 default:
1274 panic("Read reserved cr register");
1279 static void post_emulation_action(VCPU *vcpu)
1281 if ( vcpu->arch.irq_new_condition ) {
1282 vcpu->arch.irq_new_condition = 0;
1283 vhpi_detection(vcpu);
1287 //#define BYPASS_VMAL_OPCODE
1288 extern IA64_SLOT_TYPE slot_types[0x20][3];
1289 IA64_BUNDLE __vmx_get_domain_bundle(u64 iip)
1291 IA64_BUNDLE bundle;
1293 fetch_code( current,iip, &bundle.i64[0]);
1294 fetch_code( current,iip+8, &bundle.i64[1]);
1295 return bundle;
1298 /** Emulate a privileged operation.
1301 * @param vcpu virtual cpu
1302 * @cause the reason cause virtualization fault
1303 * @opcode the instruction code which cause virtualization fault
1304 */
1306 void
1307 vmx_emulate(VCPU *vcpu, UINT64 cause, UINT64 opcode)
1309 IA64_BUNDLE bundle;
1310 int slot;
1311 IA64_SLOT_TYPE slot_type;
1312 IA64FAULT status;
1313 INST64 inst;
1314 REGS * regs;
1315 UINT64 iip;
1316 regs = vcpu_regs(vcpu);
1317 iip = regs->cr_iip;
1318 IA64_PSR vpsr;
1319 /*
1320 if (privop_trace) {
1321 static long i = 400;
1322 //if (i > 0) printf("privop @%p\n",iip);
1323 if (i > 0) printf("priv_handle_op: @%p, itc=%lx, itm=%lx\n",
1324 iip,ia64_get_itc(),ia64_get_itm());
1325 i--;
1327 */
1328 #ifdef VTLB_DEBUG
1329 check_vtlb_sanity(vmx_vcpu_get_vtlb(vcpu));
1330 dump_vtlb(vmx_vcpu_get_vtlb(vcpu));
1331 #endif
1332 #if 0
1333 if ( (cause == 0xff && opcode == 0x1e000000000) || cause == 0 ) {
1334 printf ("VMAL decode error: cause - %lx; op - %lx\n",
1335 cause, opcode );
1336 return;
1338 #endif
1339 #ifdef BYPASS_VMAL_OPCODE
1340 // make a local copy of the bundle containing the privop
1341 bundle = __vmx_get_domain_bundle(iip);
1342 slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
1343 if (!slot) inst.inst = bundle.slot0;
1344 else if (slot == 1)
1345 inst.inst = bundle.slot1a + (bundle.slot1b<<18);
1346 else if (slot == 2) inst.inst = bundle.slot2;
1347 else printf("priv_handle_op: illegal slot: %d\n", slot);
1348 slot_type = slot_types[bundle.template][slot];
1349 ia64_priv_decoder(slot_type, inst, &cause);
1350 if(cause==0){
1351 printf("This instruction at 0x%lx slot %d can't be virtualized", iip, slot);
1352 panic("123456\n");
1354 #else
1355 inst.inst=opcode;
1356 #endif /* BYPASS_VMAL_OPCODE */
1358 /*
1359 * Switch to actual virtual rid in rr0 and rr4,
1360 * which is required by some tlb related instructions.
1361 */
1362 prepare_if_physical_mode(vcpu);
1364 switch(cause) {
1365 case EVENT_RSM:
1366 status=vmx_emul_rsm(vcpu, inst);
1367 break;
1368 case EVENT_SSM:
1369 status=vmx_emul_ssm(vcpu, inst);
1370 break;
1371 case EVENT_MOV_TO_PSR:
1372 status=vmx_emul_mov_to_psr(vcpu, inst);
1373 break;
1374 case EVENT_MOV_FROM_PSR:
1375 status=vmx_emul_mov_from_psr(vcpu, inst);
1376 break;
1377 case EVENT_MOV_FROM_CR:
1378 status=vmx_emul_mov_from_cr(vcpu, inst);
1379 break;
1380 case EVENT_MOV_TO_CR:
1381 status=vmx_emul_mov_to_cr(vcpu, inst);
1382 break;
1383 case EVENT_BSW_0:
1384 status=vmx_emul_bsw0(vcpu, inst);
1385 break;
1386 case EVENT_BSW_1:
1387 status=vmx_emul_bsw1(vcpu, inst);
1388 break;
1389 case EVENT_COVER:
1390 status=vmx_emul_cover(vcpu, inst);
1391 break;
1392 case EVENT_RFI:
1393 status=vmx_emul_rfi(vcpu, inst);
1394 break;
1395 case EVENT_ITR_D:
1396 status=vmx_emul_itr_d(vcpu, inst);
1397 break;
1398 case EVENT_ITR_I:
1399 status=vmx_emul_itr_i(vcpu, inst);
1400 break;
1401 case EVENT_PTR_D:
1402 status=vmx_emul_ptr_d(vcpu, inst);
1403 break;
1404 case EVENT_PTR_I:
1405 status=vmx_emul_ptr_i(vcpu, inst);
1406 break;
1407 case EVENT_ITC_D:
1408 status=vmx_emul_itc_d(vcpu, inst);
1409 break;
1410 case EVENT_ITC_I:
1411 status=vmx_emul_itc_i(vcpu, inst);
1412 break;
1413 case EVENT_PTC_L:
1414 status=vmx_emul_ptc_l(vcpu, inst);
1415 break;
1416 case EVENT_PTC_G:
1417 status=vmx_emul_ptc_g(vcpu, inst);
1418 break;
1419 case EVENT_PTC_GA:
1420 status=vmx_emul_ptc_ga(vcpu, inst);
1421 break;
1422 case EVENT_PTC_E:
1423 status=vmx_emul_ptc_e(vcpu, inst);
1424 break;
1425 case EVENT_MOV_TO_RR:
1426 status=vmx_emul_mov_to_rr(vcpu, inst);
1427 break;
1428 case EVENT_MOV_FROM_RR:
1429 status=vmx_emul_mov_from_rr(vcpu, inst);
1430 break;
1431 case EVENT_THASH:
1432 status=vmx_emul_thash(vcpu, inst);
1433 break;
1434 case EVENT_TTAG:
1435 status=vmx_emul_ttag(vcpu, inst);
1436 break;
1437 case EVENT_TPA:
1438 status=vmx_emul_tpa(vcpu, inst);
1439 break;
1440 case EVENT_TAK:
1441 status=vmx_emul_tak(vcpu, inst);
1442 break;
1443 case EVENT_MOV_TO_AR_IMM:
1444 status=vmx_emul_mov_to_ar_imm(vcpu, inst);
1445 break;
1446 case EVENT_MOV_TO_AR:
1447 status=vmx_emul_mov_to_ar_reg(vcpu, inst);
1448 break;
1449 case EVENT_MOV_FROM_AR:
1450 status=vmx_emul_mov_from_ar_reg(vcpu, inst);
1451 break;
1452 case EVENT_MOV_TO_DBR:
1453 status=vmx_emul_mov_to_dbr(vcpu, inst);
1454 break;
1455 case EVENT_MOV_TO_IBR:
1456 status=vmx_emul_mov_to_ibr(vcpu, inst);
1457 break;
1458 case EVENT_MOV_TO_PMC:
1459 status=vmx_emul_mov_to_pmc(vcpu, inst);
1460 break;
1461 case EVENT_MOV_TO_PMD:
1462 status=vmx_emul_mov_to_pmd(vcpu, inst);
1463 break;
1464 case EVENT_MOV_TO_PKR:
1465 status=vmx_emul_mov_to_pkr(vcpu, inst);
1466 break;
1467 case EVENT_MOV_FROM_DBR:
1468 status=vmx_emul_mov_from_dbr(vcpu, inst);
1469 break;
1470 case EVENT_MOV_FROM_IBR:
1471 status=vmx_emul_mov_from_ibr(vcpu, inst);
1472 break;
1473 case EVENT_MOV_FROM_PMC:
1474 status=vmx_emul_mov_from_pmc(vcpu, inst);
1475 break;
1476 case EVENT_MOV_FROM_PKR:
1477 status=vmx_emul_mov_from_pkr(vcpu, inst);
1478 break;
1479 case EVENT_MOV_FROM_CPUID:
1480 status=vmx_emul_mov_from_cpuid(vcpu, inst);
1481 break;
1482 case EVENT_VMSW:
1483 printf ("Unimplemented instruction %d\n", cause);
1484 status=IA64_FAULT;
1485 break;
1486 default:
1487 printf("unknown cause %d, iip: %lx, ipsr: %lx\n", cause,regs->cr_iip,regs->cr_ipsr);
1488 while(1);
1489 /* For unknown cause, let hardware to re-execute */
1490 status=IA64_RETRY;
1491 break;
1492 // panic("unknown cause in virtualization intercept");
1493 };
1495 #if 0
1496 if (status == IA64_FAULT)
1497 panic("Emulation failed with cause %d:\n", cause);
1498 #endif
1500 if ( status == IA64_NO_FAULT && cause !=EVENT_RFI ) {
1501 vmx_vcpu_increment_iip(vcpu);
1504 recover_if_physical_mode(vcpu);
1505 post_emulation_action (vcpu);
1506 //TODO set_irq_check(v);
1507 return;