ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_virt.c @ 10561:550786d7d352

[IA64] privop_stat.c

Cleanup: create privop_stat.[ch]
Move all stats function to privop_stat.c to depolute other files.

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Mon Jul 03 08:12:16 2006 -0600 (2006-07-03)
parents b20733e82ab6
children 6703fed8870f
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_virt.c:
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Fred yang (fred.yang@intel.com)
20 * Shaofan Li (Susue Li) <susie.li@intel.com>
21 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
22 */
23 #include <asm/bundle.h>
24 #include <asm/vmx_vcpu.h>
25 #include <asm/processor.h>
26 #include <asm/delay.h> // Debug only
27 #include <asm/vmmu.h>
28 #include <asm/vmx_mm_def.h>
29 #include <asm/smp.h>
30 #include <asm/vmx.h>
31 #include <asm/virt_event.h>
32 #include <asm/vmx_phy_mode.h>
33 extern void vhpi_detection(VCPU *vcpu);//temporarily place here,need a header file.
35 void
36 ia64_priv_decoder(IA64_SLOT_TYPE slot_type, INST64 inst, UINT64 * cause)
37 {
38 *cause=0;
39 switch (slot_type) {
40 case M:
41 if (inst.generic.major==0){
42 if(inst.M28.x3==0){
43 if(inst.M44.x4==6){
44 *cause=EVENT_SSM;
45 }else if(inst.M44.x4==7){
46 *cause=EVENT_RSM;
47 }else if(inst.M30.x4==8&&inst.M30.x2==2){
48 *cause=EVENT_MOV_TO_AR_IMM;
49 }
50 }
51 }
52 else if(inst.generic.major==1){
53 if(inst.M28.x3==0){
54 if(inst.M32.x6==0x2c){
55 *cause=EVENT_MOV_TO_CR;
56 }else if(inst.M33.x6==0x24){
57 *cause=EVENT_MOV_FROM_CR;
58 }else if(inst.M35.x6==0x2d){
59 *cause=EVENT_MOV_TO_PSR;
60 }else if(inst.M36.x6==0x25){
61 *cause=EVENT_MOV_FROM_PSR;
62 }else if(inst.M29.x6==0x2A){
63 *cause=EVENT_MOV_TO_AR;
64 }else if(inst.M31.x6==0x22){
65 *cause=EVENT_MOV_FROM_AR;
66 }else if(inst.M45.x6==0x09){
67 *cause=EVENT_PTC_L;
68 }else if(inst.M45.x6==0x0A){
69 *cause=EVENT_PTC_G;
70 }else if(inst.M45.x6==0x0B){
71 *cause=EVENT_PTC_GA;
72 }else if(inst.M45.x6==0x0C){
73 *cause=EVENT_PTR_D;
74 }else if(inst.M45.x6==0x0D){
75 *cause=EVENT_PTR_I;
76 }else if(inst.M46.x6==0x1A){
77 *cause=EVENT_THASH;
78 }else if(inst.M46.x6==0x1B){
79 *cause=EVENT_TTAG;
80 }else if(inst.M46.x6==0x1E){
81 *cause=EVENT_TPA;
82 }else if(inst.M46.x6==0x1F){
83 *cause=EVENT_TAK;
84 }else if(inst.M47.x6==0x34){
85 *cause=EVENT_PTC_E;
86 }else if(inst.M41.x6==0x2E){
87 *cause=EVENT_ITC_D;
88 }else if(inst.M41.x6==0x2F){
89 *cause=EVENT_ITC_I;
90 }else if(inst.M42.x6==0x00){
91 *cause=EVENT_MOV_TO_RR;
92 }else if(inst.M42.x6==0x01){
93 *cause=EVENT_MOV_TO_DBR;
94 }else if(inst.M42.x6==0x02){
95 *cause=EVENT_MOV_TO_IBR;
96 }else if(inst.M42.x6==0x03){
97 *cause=EVENT_MOV_TO_PKR;
98 }else if(inst.M42.x6==0x04){
99 *cause=EVENT_MOV_TO_PMC;
100 }else if(inst.M42.x6==0x05){
101 *cause=EVENT_MOV_TO_PMD;
102 }else if(inst.M42.x6==0x0E){
103 *cause=EVENT_ITR_D;
104 }else if(inst.M42.x6==0x0F){
105 *cause=EVENT_ITR_I;
106 }else if(inst.M43.x6==0x10){
107 *cause=EVENT_MOV_FROM_RR;
108 }else if(inst.M43.x6==0x11){
109 *cause=EVENT_MOV_FROM_DBR;
110 }else if(inst.M43.x6==0x12){
111 *cause=EVENT_MOV_FROM_IBR;
112 }else if(inst.M43.x6==0x13){
113 *cause=EVENT_MOV_FROM_PKR;
114 }else if(inst.M43.x6==0x14){
115 *cause=EVENT_MOV_FROM_PMC;
116 /*
117 }else if(inst.M43.x6==0x15){
118 *cause=EVENT_MOV_FROM_PMD;
119 */
120 }else if(inst.M43.x6==0x17){
121 *cause=EVENT_MOV_FROM_CPUID;
122 }
123 }
124 }
125 break;
126 case B:
127 if(inst.generic.major==0){
128 if(inst.B8.x6==0x02){
129 *cause=EVENT_COVER;
130 }else if(inst.B8.x6==0x08){
131 *cause=EVENT_RFI;
132 }else if(inst.B8.x6==0x0c){
133 *cause=EVENT_BSW_0;
134 }else if(inst.B8.x6==0x0d){
135 *cause=EVENT_BSW_1;
136 }
137 }
138 case I:
139 case F:
140 case L:
141 case ILLEGAL:
142 break;
143 }
144 }
146 IA64FAULT vmx_emul_rsm(VCPU *vcpu, INST64 inst)
147 {
148 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
149 return vmx_vcpu_reset_psr_sm(vcpu,imm24);
150 }
152 IA64FAULT vmx_emul_ssm(VCPU *vcpu, INST64 inst)
153 {
154 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
155 return vmx_vcpu_set_psr_sm(vcpu,imm24);
156 }
158 unsigned long last_guest_psr = 0x0;
159 IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst)
160 {
161 UINT64 tgt = inst.M33.r1;
162 UINT64 val;
164 /*
165 if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
166 return vcpu_set_gr(vcpu, tgt, val);
167 else return fault;
168 */
169 val = vmx_vcpu_get_psr(vcpu);
170 val = (val & MASK(0, 32)) | (val & MASK(35, 2));
171 last_guest_psr = val;
172 return vcpu_set_gr(vcpu, tgt, val, 0);
173 }
175 /**
176 * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
177 */
178 IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst)
179 {
180 UINT64 val;
182 if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
183 panic_domain(vcpu_regs(vcpu),"get_psr nat bit fault\n");
185 val = (val & MASK(0, 32)) | (VCPU(vcpu, vpsr) & MASK(32, 32));
186 #if 0
187 if (last_mov_from_psr && (last_guest_psr != (val & MASK(0,32))))
188 while(1);
189 else
190 last_mov_from_psr = 0;
191 #endif
192 return vmx_vcpu_set_psr_l(vcpu,val);
193 }
196 /**************************************************************************
197 Privileged operation emulation routines
198 **************************************************************************/
200 IA64FAULT vmx_emul_rfi(VCPU *vcpu, INST64 inst)
201 {
202 IA64_PSR vpsr;
203 REGS *regs;
204 #ifdef CHECK_FAULT
205 vpsr.val=vmx_vcpu_get_psr(vcpu);
206 if ( vpsr.cpl != 0) {
207 /* Inject Privileged Operation fault into guest */
208 set_privileged_operation_isr (vcpu, 0);
209 privilege_op (vcpu);
210 return IA64_FAULT;
211 }
212 #endif // CHECK_FAULT
213 regs=vcpu_regs(vcpu);
214 vpsr.val=regs->cr_ipsr;
215 if ( vpsr.is == 1 ) {
216 panic_domain(regs,"We do not support IA32 instruction yet");
217 }
219 return vmx_vcpu_rfi(vcpu);
220 }
222 IA64FAULT vmx_emul_bsw0(VCPU *vcpu, INST64 inst)
223 {
224 #ifdef CHECK_FAULT
225 IA64_PSR vpsr;
226 vpsr.val=vmx_vcpu_get_psr(vcpu);
227 if ( vpsr.cpl != 0) {
228 /* Inject Privileged Operation fault into guest */
229 set_privileged_operation_isr (vcpu, 0);
230 privilege_op (vcpu);
231 return IA64_FAULT;
232 }
233 #endif // CHECK_FAULT
234 return vcpu_bsw0(vcpu);
235 }
237 IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST64 inst)
238 {
239 #ifdef CHECK_FAULT
240 IA64_PSR vpsr;
241 vpsr.val=vmx_vcpu_get_psr(vcpu);
242 if ( vpsr.cpl != 0) {
243 /* Inject Privileged Operation fault into guest */
244 set_privileged_operation_isr (vcpu, 0);
245 privilege_op (vcpu);
246 return IA64_FAULT;
247 }
248 #endif // CHECK_FAULT
249 return vcpu_bsw1(vcpu);
250 }
252 IA64FAULT vmx_emul_cover(VCPU *vcpu, INST64 inst)
253 {
254 return vmx_vcpu_cover(vcpu);
255 }
257 IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst)
258 {
259 u64 r2,r3;
260 IA64_PSR vpsr;
262 vpsr.val=vmx_vcpu_get_psr(vcpu);
263 if ( vpsr.cpl != 0) {
264 /* Inject Privileged Operation fault into guest */
265 set_privileged_operation_isr (vcpu, 0);
266 privilege_op (vcpu);
267 return IA64_FAULT;
268 }
269 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
270 #ifdef VMAL_NO_FAULT_CHECK
271 ISR isr;
272 set_isr_reg_nat_consumption(vcpu,0,0);
273 rnat_comsumption(vcpu);
274 return IA64_FAULT;
275 #endif // VMAL_NO_FAULT_CHECK
276 }
277 #ifdef VMAL_NO_FAULT_CHECK
278 if (unimplemented_gva(vcpu,r3) ) {
279 isr.val = set_isr_ei_ni(vcpu);
280 isr.code = IA64_RESERVED_REG_FAULT;
281 vcpu_set_isr(vcpu, isr.val);
282 unimpl_daddr(vcpu);
283 return IA64_FAULT;
284 }
285 #endif // VMAL_NO_FAULT_CHECK
286 return vmx_vcpu_ptc_l(vcpu,r3,bits(r2,2,7));
287 }
289 IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst)
290 {
291 u64 r3;
292 IA64_PSR vpsr;
294 vpsr.val=vmx_vcpu_get_psr(vcpu);
295 #ifdef VMAL_NO_FAULT_CHECK
296 ISR isr;
297 if ( vpsr.cpl != 0) {
298 /* Inject Privileged Operation fault into guest */
299 set_privileged_operation_isr (vcpu, 0);
300 privilege_op (vcpu);
301 return IA64_FAULT;
302 }
303 #endif // VMAL_NO_FAULT_CHECK
304 if(vcpu_get_gr_nat(vcpu,inst.M47.r3,&r3)){
305 #ifdef VMAL_NO_FAULT_CHECK
306 set_isr_reg_nat_consumption(vcpu,0,0);
307 rnat_comsumption(vcpu);
308 return IA64_FAULT;
309 #endif // VMAL_NO_FAULT_CHECK
310 }
311 return vmx_vcpu_ptc_e(vcpu,r3);
312 }
314 IA64FAULT vmx_emul_ptc_g(VCPU *vcpu, INST64 inst)
315 {
316 u64 r2,r3;
317 #ifdef VMAL_NO_FAULT_CHECK
318 IA64_PSR vpsr;
319 vpsr.val=vmx_vcpu_get_psr(vcpu);
320 if ( vpsr.cpl != 0) {
321 /* Inject Privileged Operation fault into guest */
322 set_privileged_operation_isr (vcpu, 0);
323 privilege_op (vcpu);
324 return IA64_FAULT;
325 }
326 #endif // VMAL_NO_FAULT_CHECK
327 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
328 #ifdef VMAL_NO_FAULT_CHECK
329 ISR isr;
330 set_isr_reg_nat_consumption(vcpu,0,0);
331 rnat_comsumption(vcpu);
332 return IA64_FAULT;
333 #endif // VMAL_NO_FAULT_CHECK
334 }
335 #ifdef VMAL_NO_FAULT_CHECK
336 if (unimplemented_gva(vcpu,r3) ) {
337 isr.val = set_isr_ei_ni(vcpu);
338 isr.code = IA64_RESERVED_REG_FAULT;
339 vcpu_set_isr(vcpu, isr.val);
340 unimpl_daddr(vcpu);
341 return IA64_FAULT;
342 }
343 #endif // VMAL_NO_FAULT_CHECK
344 return vmx_vcpu_ptc_g(vcpu,r3,bits(r2,2,7));
345 }
347 IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, INST64 inst)
348 {
349 u64 r2,r3;
350 #ifdef VMAL_NO_FAULT_CHECK
351 IA64_PSR vpsr;
352 vpsr.val=vmx_vcpu_get_psr(vcpu);
353 if ( vpsr.cpl != 0) {
354 /* Inject Privileged Operation fault into guest */
355 set_privileged_operation_isr (vcpu, 0);
356 privilege_op (vcpu);
357 return IA64_FAULT;
358 }
359 #endif // VMAL_NO_FAULT_CHECK
360 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
361 #ifdef VMAL_NO_FAULT_CHECK
362 ISR isr;
363 set_isr_reg_nat_consumption(vcpu,0,0);
364 rnat_comsumption(vcpu);
365 return IA64_FAULT;
366 #endif // VMAL_NO_FAULT_CHECK
367 }
368 #ifdef VMAL_NO_FAULT_CHECK
369 if (unimplemented_gva(vcpu,r3) ) {
370 isr.val = set_isr_ei_ni(vcpu);
371 isr.code = IA64_RESERVED_REG_FAULT;
372 vcpu_set_isr(vcpu, isr.val);
373 unimpl_daddr(vcpu);
374 return IA64_FAULT;
375 }
376 #endif // VMAL_NO_FAULT_CHECK
377 return vmx_vcpu_ptc_ga(vcpu,r3,bits(r2,2,7));
378 }
380 IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3)
381 {
382 IA64FAULT ret1, ret2;
384 #ifdef VMAL_NO_FAULT_CHECK
385 ISR isr;
386 IA64_PSR vpsr;
387 vpsr.val=vmx_vcpu_get_psr(vcpu);
388 if ( vpsr.cpl != 0) {
389 /* Inject Privileged Operation fault into guest */
390 set_privileged_operation_isr (vcpu, 0);
391 privilege_op (vcpu);
392 return IA64_FAULT;
393 }
394 #endif // VMAL_NO_FAULT_CHECK
395 ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r3,pr3);
396 ret2 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pr2);
397 #ifdef VMAL_NO_FAULT_CHECK
398 if ( ret1 != IA64_NO_FAULT || ret2 != IA64_NO_FAULT ) {
399 set_isr_reg_nat_consumption(vcpu,0,0);
400 rnat_comsumption(vcpu);
401 return IA64_FAULT;
402 }
403 if (unimplemented_gva(vcpu,r3) ) {
404 isr.val = set_isr_ei_ni(vcpu);
405 isr.code = IA64_RESERVED_REG_FAULT;
406 vcpu_set_isr(vcpu, isr.val);
407 unimpl_daddr(vcpu);
408 return IA64_FAULT;
409 }
410 #endif // VMAL_NO_FAULT_CHECK
411 return IA64_NO_FAULT;
412 }
414 IA64FAULT vmx_emul_ptr_d(VCPU *vcpu, INST64 inst)
415 {
416 u64 r2,r3;
417 if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
418 return IA64_FAULT;
419 return vmx_vcpu_ptr_d(vcpu,r3,bits(r2,2,7));
420 }
422 IA64FAULT vmx_emul_ptr_i(VCPU *vcpu, INST64 inst)
423 {
424 u64 r2,r3;
425 if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
426 return IA64_FAULT;
427 return vmx_vcpu_ptr_i(vcpu,r3,bits(r2,2,7));
428 }
431 IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst)
432 {
433 u64 r1,r3;
434 #ifdef CHECK_FAULT
435 ISR visr;
436 IA64_PSR vpsr;
437 if(check_target_register(vcpu, inst.M46.r1)){
438 set_illegal_op_isr(vcpu);
439 illegal_op(vcpu);
440 return IA64_FAULT;
441 }
442 #endif //CHECK_FAULT
443 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
444 #ifdef CHECK_FAULT
445 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
446 return IA64_NO_FAULT;
447 #endif //CHECK_FAULT
448 }
449 #ifdef CHECK_FAULT
450 if(unimplemented_gva(vcpu, r3)){
451 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
452 return IA64_NO_FAULT;
453 }
454 #endif //CHECK_FAULT
455 vmx_vcpu_thash(vcpu, r3, &r1);
456 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
457 return(IA64_NO_FAULT);
458 }
461 IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst)
462 {
463 u64 r1,r3;
464 #ifdef CHECK_FAULT
465 ISR visr;
466 IA64_PSR vpsr;
467 #endif
468 #ifdef CHECK_FAULT
469 if(check_target_register(vcpu, inst.M46.r1)){
470 set_illegal_op_isr(vcpu);
471 illegal_op(vcpu);
472 return IA64_FAULT;
473 }
474 #endif //CHECK_FAULT
475 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
476 #ifdef CHECK_FAULT
477 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
478 return IA64_NO_FAULT;
479 #endif //CHECK_FAULT
480 }
481 #ifdef CHECK_FAULT
482 if(unimplemented_gva(vcpu, r3)){
483 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
484 return IA64_NO_FAULT;
485 }
486 #endif //CHECK_FAULT
487 vmx_vcpu_ttag(vcpu, r3, &r1);
488 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
489 return(IA64_NO_FAULT);
490 }
493 IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst)
494 {
495 u64 r1,r3;
496 #ifdef CHECK_FAULT
497 ISR visr;
498 if(check_target_register(vcpu, inst.M46.r1)){
499 set_illegal_op_isr(vcpu);
500 illegal_op(vcpu);
501 return IA64_FAULT;
502 }
503 IA64_PSR vpsr;
504 vpsr.val=vmx_vcpu_get_psr(vcpu);
505 if(vpsr.cpl!=0){
506 visr.val=0;
507 vcpu_set_isr(vcpu, visr.val);
508 return IA64_FAULT;
509 }
510 #endif //CHECK_FAULT
511 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
512 #ifdef CHECK_FAULT
513 set_isr_reg_nat_consumption(vcpu,0,1);
514 rnat_comsumption(vcpu);
515 return IA64_FAULT;
516 #endif //CHECK_FAULT
517 }
518 #ifdef CHECK_FAULT
519 if (unimplemented_gva(vcpu,r3) ) {
520 // inject unimplemented_data_address_fault
521 visr.val = set_isr_ei_ni(vcpu);
522 visr.code = IA64_RESERVED_REG_FAULT;
523 vcpu_set_isr(vcpu, isr.val);
524 // FAULT_UNIMPLEMENTED_DATA_ADDRESS.
525 unimpl_daddr(vcpu);
526 return IA64_FAULT;
527 }
528 #endif //CHECK_FAULT
530 if(vmx_vcpu_tpa(vcpu, r3, &r1)){
531 return IA64_FAULT;
532 }
533 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
534 return(IA64_NO_FAULT);
535 }
537 IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst)
538 {
539 u64 r1,r3;
540 #ifdef CHECK_FAULT
541 ISR visr;
542 IA64_PSR vpsr;
543 int fault=IA64_NO_FAULT;
544 visr.val=0;
545 if(check_target_register(vcpu, inst.M46.r1)){
546 set_illegal_op_isr(vcpu);
547 illegal_op(vcpu);
548 return IA64_FAULT;
549 }
550 vpsr.val=vmx_vcpu_get_psr(vcpu);
551 if(vpsr.cpl!=0){
552 vcpu_set_isr(vcpu, visr.val);
553 return IA64_FAULT;
554 }
555 #endif
556 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
557 #ifdef CHECK_FAULT
558 set_isr_reg_nat_consumption(vcpu,0,1);
559 rnat_comsumption(vcpu);
560 return IA64_FAULT;
561 #endif
562 }
563 if(vmx_vcpu_tak(vcpu, r3, &r1)){
564 return IA64_FAULT;
565 }
566 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
567 return(IA64_NO_FAULT);
568 }
571 /************************************
572 * Insert translation register/cache
573 ************************************/
575 IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
576 {
577 UINT64 itir, ifa, pte, slot;
578 IA64_PSR vpsr;
579 vpsr.val=vmx_vcpu_get_psr(vcpu);
580 if ( vpsr.ic ) {
581 set_illegal_op_isr(vcpu);
582 illegal_op(vcpu);
583 return IA64_FAULT;
584 }
585 #ifdef VMAL_NO_FAULT_CHECK
586 ISR isr;
587 if ( vpsr.cpl != 0) {
588 /* Inject Privileged Operation fault into guest */
589 set_privileged_operation_isr (vcpu, 0);
590 privilege_op (vcpu);
591 return IA64_FAULT;
592 }
593 #endif // VMAL_NO_FAULT_CHECK
594 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){
595 #ifdef VMAL_NO_FAULT_CHECK
596 set_isr_reg_nat_consumption(vcpu,0,0);
597 rnat_comsumption(vcpu);
598 return IA64_FAULT;
599 #endif // VMAL_NO_FAULT_CHECK
600 }
601 #ifdef VMAL_NO_FAULT_CHECK
602 if(is_reserved_rr_register(vcpu, slot)){
603 set_illegal_op_isr(vcpu);
604 illegal_op(vcpu);
605 return IA64_FAULT;
606 }
607 #endif // VMAL_NO_FAULT_CHECK
609 if (vcpu_get_itir(vcpu,&itir)){
610 return(IA64_FAULT);
611 }
612 if (vcpu_get_ifa(vcpu,&ifa)){
613 return(IA64_FAULT);
614 }
615 #ifdef VMAL_NO_FAULT_CHECK
616 if (is_reserved_itir_field(vcpu, itir)) {
617 // TODO
618 return IA64_FAULT;
619 }
620 if (unimplemented_gva(vcpu,ifa) ) {
621 isr.val = set_isr_ei_ni(vcpu);
622 isr.code = IA64_RESERVED_REG_FAULT;
623 vcpu_set_isr(vcpu, isr.val);
624 unimpl_daddr(vcpu);
625 return IA64_FAULT;
626 }
627 #endif // VMAL_NO_FAULT_CHECK
629 return (vmx_vcpu_itr_d(vcpu,slot,pte,itir,ifa));
630 }
632 IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
633 {
634 UINT64 itir, ifa, pte, slot;
635 #ifdef VMAL_NO_FAULT_CHECK
636 ISR isr;
637 #endif
638 IA64_PSR vpsr;
639 vpsr.val=vmx_vcpu_get_psr(vcpu);
640 if ( vpsr.ic ) {
641 set_illegal_op_isr(vcpu);
642 illegal_op(vcpu);
643 return IA64_FAULT;
644 }
645 #ifdef VMAL_NO_FAULT_CHECK
646 if ( vpsr.cpl != 0) {
647 /* Inject Privileged Operation fault into guest */
648 set_privileged_operation_isr (vcpu, 0);
649 privilege_op (vcpu);
650 return IA64_FAULT;
651 }
652 #endif // VMAL_NO_FAULT_CHECK
653 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){
654 #ifdef VMAL_NO_FAULT_CHECK
655 set_isr_reg_nat_consumption(vcpu,0,0);
656 rnat_comsumption(vcpu);
657 return IA64_FAULT;
658 #endif // VMAL_NO_FAULT_CHECK
659 }
660 #ifdef VMAL_NO_FAULT_CHECK
661 if(is_reserved_rr_register(vcpu, slot)){
662 set_illegal_op_isr(vcpu);
663 illegal_op(vcpu);
664 return IA64_FAULT;
665 }
666 #endif // VMAL_NO_FAULT_CHECK
668 if (vcpu_get_itir(vcpu,&itir)){
669 return(IA64_FAULT);
670 }
671 if (vcpu_get_ifa(vcpu,&ifa)){
672 return(IA64_FAULT);
673 }
674 #ifdef VMAL_NO_FAULT_CHECK
675 if (is_reserved_itir_field(vcpu, itir)) {
676 // TODO
677 return IA64_FAULT;
678 }
679 if (unimplemented_gva(vcpu,ifa) ) {
680 isr.val = set_isr_ei_ni(vcpu);
681 isr.code = IA64_RESERVED_REG_FAULT;
682 vcpu_set_isr(vcpu, isr.val);
683 unimpl_daddr(vcpu);
684 return IA64_FAULT;
685 }
686 #endif // VMAL_NO_FAULT_CHECK
688 return (vmx_vcpu_itr_i(vcpu,slot,pte,itir,ifa));
689 }
691 IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa,u64 *pte)
692 {
693 IA64_PSR vpsr;
694 IA64FAULT ret1;
696 vpsr.val=vmx_vcpu_get_psr(vcpu);
697 if ( vpsr.ic ) {
698 set_illegal_op_isr(vcpu);
699 illegal_op(vcpu);
700 return IA64_FAULT;
701 }
703 #ifdef VMAL_NO_FAULT_CHECK
704 UINT64 fault;
705 ISR isr;
706 if ( vpsr.cpl != 0) {
707 /* Inject Privileged Operation fault into guest */
708 set_privileged_operation_isr (vcpu, 0);
709 privilege_op (vcpu);
710 return IA64_FAULT;
711 }
712 #endif // VMAL_NO_FAULT_CHECK
713 ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pte);
714 #ifdef VMAL_NO_FAULT_CHECK
715 if( ret1 != IA64_NO_FAULT ){
716 set_isr_reg_nat_consumption(vcpu,0,0);
717 rnat_comsumption(vcpu);
718 return IA64_FAULT;
719 }
720 #endif // VMAL_NO_FAULT_CHECK
722 if (vcpu_get_itir(vcpu,itir)){
723 return(IA64_FAULT);
724 }
725 if (vcpu_get_ifa(vcpu,ifa)){
726 return(IA64_FAULT);
727 }
728 #ifdef VMAL_NO_FAULT_CHECK
729 if (unimplemented_gva(vcpu,ifa) ) {
730 isr.val = set_isr_ei_ni(vcpu);
731 isr.code = IA64_RESERVED_REG_FAULT;
732 vcpu_set_isr(vcpu, isr.val);
733 unimpl_daddr(vcpu);
734 return IA64_FAULT;
735 }
736 #endif // VMAL_NO_FAULT_CHECK
737 return IA64_NO_FAULT;
738 }
740 IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INST64 inst)
741 {
742 UINT64 itir, ifa, pte;
744 if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) {
745 return IA64_FAULT;
746 }
748 return (vmx_vcpu_itc_d(vcpu,pte,itir,ifa));
749 }
751 IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INST64 inst)
752 {
753 UINT64 itir, ifa, pte;
755 if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) {
756 return IA64_FAULT;
757 }
759 return (vmx_vcpu_itc_i(vcpu,pte,itir,ifa));
761 }
763 /*************************************
764 * Moves to semi-privileged registers
765 *************************************/
767 IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
768 {
769 // I27 and M30 are identical for these fields
770 UINT64 imm;
772 if(inst.M30.ar3!=44){
773 panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc");
774 }
775 #ifdef CHECK_FAULT
776 IA64_PSR vpsr;
777 vpsr.val=vmx_vcpu_get_psr(vcpu);
778 if ( vpsr.cpl != 0) {
779 /* Inject Privileged Operation fault into guest */
780 set_privileged_operation_isr (vcpu, 0);
781 privilege_op (vcpu);
782 return IA64_FAULT;
783 }
784 #endif // CHECK_FAULT
785 if(inst.M30.s){
786 imm = -inst.M30.imm;
787 }else{
788 imm = inst.M30.imm;
789 }
790 return (vmx_vcpu_set_itc(vcpu, imm));
791 }
793 IA64FAULT vmx_emul_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
794 {
795 // I26 and M29 are identical for these fields
796 u64 r2;
797 if(inst.M29.ar3!=44){
798 panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc");
799 }
800 if(vcpu_get_gr_nat(vcpu,inst.M29.r2,&r2)){
801 #ifdef CHECK_FAULT
802 set_isr_reg_nat_consumption(vcpu,0,0);
803 rnat_comsumption(vcpu);
804 return IA64_FAULT;
805 #endif //CHECK_FAULT
806 }
807 #ifdef CHECK_FAULT
808 IA64_PSR vpsr;
809 vpsr.val=vmx_vcpu_get_psr(vcpu);
810 if ( vpsr.cpl != 0) {
811 /* Inject Privileged Operation fault into guest */
812 set_privileged_operation_isr (vcpu, 0);
813 privilege_op (vcpu);
814 return IA64_FAULT;
815 }
816 #endif // CHECK_FAULT
817 return (vmx_vcpu_set_itc(vcpu, r2));
818 }
821 IA64FAULT vmx_emul_mov_from_ar_reg(VCPU *vcpu, INST64 inst)
822 {
823 // I27 and M30 are identical for these fields
824 u64 r1;
825 if(inst.M31.ar3!=44){
826 panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc");
827 }
828 #ifdef CHECK_FAULT
829 if(check_target_register(vcpu,inst.M31.r1)){
830 set_illegal_op_isr(vcpu);
831 illegal_op(vcpu);
832 return IA64_FAULT;
833 }
834 IA64_PSR vpsr;
835 vpsr.val=vmx_vcpu_get_psr(vcpu);
836 if (vpsr.si&& vpsr.cpl != 0) {
837 /* Inject Privileged Operation fault into guest */
838 set_privileged_operation_isr (vcpu, 0);
839 privilege_op (vcpu);
840 return IA64_FAULT;
841 }
842 #endif // CHECK_FAULT
843 vmx_vcpu_get_itc(vcpu,&r1);
844 vcpu_set_gr(vcpu,inst.M31.r1,r1,0);
845 return IA64_NO_FAULT;
846 }
849 /********************************
850 * Moves to privileged registers
851 ********************************/
853 IA64FAULT vmx_emul_mov_to_pkr(VCPU *vcpu, INST64 inst)
854 {
855 u64 r3,r2;
856 #ifdef CHECK_FAULT
857 IA64_PSR vpsr;
858 vpsr.val=vmx_vcpu_get_psr(vcpu);
859 if (vpsr.cpl != 0) {
860 /* Inject Privileged Operation fault into guest */
861 set_privileged_operation_isr (vcpu, 0);
862 privilege_op (vcpu);
863 return IA64_FAULT;
864 }
865 #endif // CHECK_FAULT
866 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
867 #ifdef CHECK_FAULT
868 set_isr_reg_nat_consumption(vcpu,0,0);
869 rnat_comsumption(vcpu);
870 return IA64_FAULT;
871 #endif //CHECK_FAULT
872 }
873 return (vmx_vcpu_set_pkr(vcpu,r3,r2));
874 }
876 IA64FAULT vmx_emul_mov_to_rr(VCPU *vcpu, INST64 inst)
877 {
878 u64 r3,r2;
879 #ifdef CHECK_FAULT
880 IA64_PSR vpsr;
881 vpsr.val=vmx_vcpu_get_psr(vcpu);
882 if (vpsr.cpl != 0) {
883 /* Inject Privileged Operation fault into guest */
884 set_privileged_operation_isr (vcpu, 0);
885 privilege_op (vcpu);
886 return IA64_FAULT;
887 }
888 #endif // CHECK_FAULT
889 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
890 #ifdef CHECK_FAULT
891 set_isr_reg_nat_consumption(vcpu,0,0);
892 rnat_comsumption(vcpu);
893 return IA64_FAULT;
894 #endif //CHECK_FAULT
895 }
896 return (vmx_vcpu_set_rr(vcpu,r3,r2));
897 }
899 IA64FAULT vmx_emul_mov_to_dbr(VCPU *vcpu, INST64 inst)
900 {
901 u64 r3,r2;
902 return IA64_NO_FAULT;
903 #ifdef CHECK_FAULT
904 IA64_PSR vpsr;
905 vpsr.val=vmx_vcpu_get_psr(vcpu);
906 if (vpsr.cpl != 0) {
907 /* Inject Privileged Operation fault into guest */
908 set_privileged_operation_isr (vcpu, 0);
909 privilege_op (vcpu);
910 return IA64_FAULT;
911 }
912 #endif // CHECK_FAULT
913 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
914 #ifdef CHECK_FAULT
915 set_isr_reg_nat_consumption(vcpu,0,0);
916 rnat_comsumption(vcpu);
917 return IA64_FAULT;
918 #endif //CHECK_FAULT
919 }
920 return (vmx_vcpu_set_dbr(vcpu,r3,r2));
921 }
923 IA64FAULT vmx_emul_mov_to_ibr(VCPU *vcpu, INST64 inst)
924 {
925 u64 r3,r2;
926 return IA64_NO_FAULT;
927 #ifdef CHECK_FAULT
928 IA64_PSR vpsr;
929 vpsr.val=vmx_vcpu_get_psr(vcpu);
930 if (vpsr.cpl != 0) {
931 /* Inject Privileged Operation fault into guest */
932 set_privileged_operation_isr (vcpu, 0);
933 privilege_op (vcpu);
934 return IA64_FAULT;
935 }
936 #endif // CHECK_FAULT
937 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
938 #ifdef CHECK_FAULT
939 set_isr_reg_nat_consumption(vcpu,0,0);
940 rnat_comsumption(vcpu);
941 return IA64_FAULT;
942 #endif //CHECK_FAULT
943 }
944 return (vmx_vcpu_set_ibr(vcpu,r3,r2));
945 }
947 IA64FAULT vmx_emul_mov_to_pmc(VCPU *vcpu, INST64 inst)
948 {
949 u64 r3,r2;
950 #ifdef CHECK_FAULT
951 IA64_PSR vpsr;
952 vpsr.val=vmx_vcpu_get_psr(vcpu);
953 if (vpsr.cpl != 0) {
954 /* Inject Privileged Operation fault into guest */
955 set_privileged_operation_isr (vcpu, 0);
956 privilege_op (vcpu);
957 return IA64_FAULT;
958 }
959 #endif // CHECK_FAULT
960 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
961 #ifdef CHECK_FAULT
962 set_isr_reg_nat_consumption(vcpu,0,0);
963 rnat_comsumption(vcpu);
964 return IA64_FAULT;
965 #endif //CHECK_FAULT
966 }
967 return (vmx_vcpu_set_pmc(vcpu,r3,r2));
968 }
970 IA64FAULT vmx_emul_mov_to_pmd(VCPU *vcpu, INST64 inst)
971 {
972 u64 r3,r2;
973 #ifdef CHECK_FAULT
974 IA64_PSR vpsr;
975 vpsr.val=vmx_vcpu_get_psr(vcpu);
976 if (vpsr.cpl != 0) {
977 /* Inject Privileged Operation fault into guest */
978 set_privileged_operation_isr (vcpu, 0);
979 privilege_op (vcpu);
980 return IA64_FAULT;
981 }
982 #endif // CHECK_FAULT
983 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
984 #ifdef CHECK_FAULT
985 set_isr_reg_nat_consumption(vcpu,0,0);
986 rnat_comsumption(vcpu);
987 return IA64_FAULT;
988 #endif //CHECK_FAULT
989 }
990 return (vmx_vcpu_set_pmd(vcpu,r3,r2));
991 }
994 /**********************************
995 * Moves from privileged registers
996 **********************************/
998 IA64FAULT vmx_emul_mov_from_rr(VCPU *vcpu, INST64 inst)
999 {
1000 u64 r3,r1;
1001 #ifdef CHECK_FAULT
1002 if(check_target_register(vcpu, inst.M43.r1)){
1003 set_illegal_op_isr(vcpu);
1004 illegal_op(vcpu);
1005 return IA64_FAULT;
1007 IA64_PSR vpsr;
1008 vpsr.val=vmx_vcpu_get_psr(vcpu);
1009 if (vpsr.cpl != 0) {
1010 /* Inject Privileged Operation fault into guest */
1011 set_privileged_operation_isr (vcpu, 0);
1012 privilege_op (vcpu);
1013 return IA64_FAULT;
1016 #endif //CHECK_FAULT
1017 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1018 #ifdef CHECK_FAULT
1019 set_isr_reg_nat_consumption(vcpu,0,0);
1020 rnat_comsumption(vcpu);
1021 return IA64_FAULT;
1022 #endif //CHECK_FAULT
1024 #ifdef CHECK_FAULT
1025 if(is_reserved_rr_register(vcpu,r3>>VRN_SHIFT)){
1026 set_rsv_reg_field_isr(vcpu);
1027 rsv_reg_field(vcpu);
1029 #endif //CHECK_FAULT
1030 vcpu_get_rr(vcpu,r3,&r1);
1031 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1034 IA64FAULT vmx_emul_mov_from_pkr(VCPU *vcpu, INST64 inst)
1036 u64 r3,r1;
1037 #ifdef CHECK_FAULT
1038 if(check_target_register(vcpu, inst.M43.r1)){
1039 set_illegal_op_isr(vcpu);
1040 illegal_op(vcpu);
1041 return IA64_FAULT;
1043 IA64_PSR vpsr;
1044 vpsr.val=vmx_vcpu_get_psr(vcpu);
1045 if (vpsr.cpl != 0) {
1046 /* Inject Privileged Operation fault into guest */
1047 set_privileged_operation_isr (vcpu, 0);
1048 privilege_op (vcpu);
1049 return IA64_FAULT;
1052 #endif //CHECK_FAULT
1053 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1054 #ifdef CHECK_FAULT
1055 set_isr_reg_nat_consumption(vcpu,0,0);
1056 rnat_comsumption(vcpu);
1057 return IA64_FAULT;
1058 #endif //CHECK_FAULT
1060 #ifdef CHECK_FAULT
1061 if(is_reserved_indirect_register(vcpu,r3)){
1062 set_rsv_reg_field_isr(vcpu);
1063 rsv_reg_field(vcpu);
1064 return IA64_FAULT;
1066 #endif //CHECK_FAULT
1067 vmx_vcpu_get_pkr(vcpu,r3,&r1);
1068 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1071 IA64FAULT vmx_emul_mov_from_dbr(VCPU *vcpu, INST64 inst)
1073 u64 r3,r1;
1074 #ifdef CHECK_FAULT
1075 if(check_target_register(vcpu, inst.M43.r1)){
1076 set_illegal_op_isr(vcpu);
1077 illegal_op(vcpu);
1078 return IA64_FAULT;
1080 IA64_PSR vpsr;
1081 vpsr.val=vmx_vcpu_get_psr(vcpu);
1082 if (vpsr.cpl != 0) {
1083 /* Inject Privileged Operation fault into guest */
1084 set_privileged_operation_isr (vcpu, 0);
1085 privilege_op (vcpu);
1086 return IA64_FAULT;
1089 #endif //CHECK_FAULT
1090 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1091 #ifdef CHECK_FAULT
1092 set_isr_reg_nat_consumption(vcpu,0,0);
1093 rnat_comsumption(vcpu);
1094 return IA64_FAULT;
1095 #endif //CHECK_FAULT
1097 #ifdef CHECK_FAULT
1098 if(is_reserved_indirect_register(vcpu,r3)){
1099 set_rsv_reg_field_isr(vcpu);
1100 rsv_reg_field(vcpu);
1101 return IA64_FAULT;
1103 #endif //CHECK_FAULT
1104 vmx_vcpu_get_dbr(vcpu,r3,&r1);
1105 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1108 IA64FAULT vmx_emul_mov_from_ibr(VCPU *vcpu, INST64 inst)
1110 u64 r3,r1;
1111 #ifdef CHECK_FAULT
1112 if(check_target_register(vcpu, inst.M43.r1)){
1113 set_illegal_op_isr(vcpu);
1114 illegal_op(vcpu);
1115 return IA64_FAULT;
1117 IA64_PSR vpsr;
1118 vpsr.val=vmx_vcpu_get_psr(vcpu);
1119 if (vpsr.cpl != 0) {
1120 /* Inject Privileged Operation fault into guest */
1121 set_privileged_operation_isr (vcpu, 0);
1122 privilege_op (vcpu);
1123 return IA64_FAULT;
1126 #endif //CHECK_FAULT
1127 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1128 #ifdef CHECK_FAULT
1129 set_isr_reg_nat_consumption(vcpu,0,0);
1130 rnat_comsumption(vcpu);
1131 return IA64_FAULT;
1132 #endif //CHECK_FAULT
1134 #ifdef CHECK_FAULT
1135 if(is_reserved_indirect_register(vcpu,r3)){
1136 set_rsv_reg_field_isr(vcpu);
1137 rsv_reg_field(vcpu);
1138 return IA64_FAULT;
1140 #endif //CHECK_FAULT
1141 vmx_vcpu_get_ibr(vcpu,r3,&r1);
1142 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1145 IA64FAULT vmx_emul_mov_from_pmc(VCPU *vcpu, INST64 inst)
1147 u64 r3,r1;
1148 #ifdef CHECK_FAULT
1149 if(check_target_register(vcpu, inst.M43.r1)){
1150 set_illegal_op_isr(vcpu);
1151 illegal_op(vcpu);
1152 return IA64_FAULT;
1154 IA64_PSR vpsr;
1155 vpsr.val=vmx_vcpu_get_psr(vcpu);
1156 if (vpsr.cpl != 0) {
1157 /* Inject Privileged Operation fault into guest */
1158 set_privileged_operation_isr (vcpu, 0);
1159 privilege_op (vcpu);
1160 return IA64_FAULT;
1163 #endif //CHECK_FAULT
1164 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1165 #ifdef CHECK_FAULT
1166 set_isr_reg_nat_consumption(vcpu,0,0);
1167 rnat_comsumption(vcpu);
1168 return IA64_FAULT;
1169 #endif //CHECK_FAULT
1171 #ifdef CHECK_FAULT
1172 if(is_reserved_indirect_register(vcpu,r3)){
1173 set_rsv_reg_field_isr(vcpu);
1174 rsv_reg_field(vcpu);
1175 return IA64_FAULT;
1177 #endif //CHECK_FAULT
1178 vmx_vcpu_get_pmc(vcpu,r3,&r1);
1179 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1182 IA64FAULT vmx_emul_mov_from_cpuid(VCPU *vcpu, INST64 inst)
1184 u64 r3,r1;
1185 #ifdef CHECK_FAULT
1186 if(check_target_register(vcpu, inst.M43.r1)){
1187 set_illegal_op_isr(vcpu);
1188 illegal_op(vcpu);
1189 return IA64_FAULT;
1191 #endif //CHECK_FAULT
1192 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1193 #ifdef CHECK_FAULT
1194 set_isr_reg_nat_consumption(vcpu,0,0);
1195 rnat_comsumption(vcpu);
1196 return IA64_FAULT;
1197 #endif //CHECK_FAULT
1199 #ifdef CHECK_FAULT
1200 if(is_reserved_indirect_register(vcpu,r3)){
1201 set_rsv_reg_field_isr(vcpu);
1202 rsv_reg_field(vcpu);
1203 return IA64_FAULT;
1205 #endif //CHECK_FAULT
1206 vmx_vcpu_get_cpuid(vcpu,r3,&r1);
1207 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1210 IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
1212 u64 r2;
1213 extern u64 cr_igfld_mask(int index, u64 value);
1214 #ifdef CHECK_FAULT
1215 IA64_PSR vpsr;
1216 vpsr.val=vmx_vcpu_get_psr(vcpu);
1217 if(is_reserved_cr(inst.M32.cr3)||(vpsr.ic&&is_interruption_control_cr(inst.M32.cr3))){
1218 set_illegal_op_isr(vcpu);
1219 illegal_op(vcpu);
1220 return IA64_FAULT;
1222 if ( vpsr.cpl != 0) {
1223 /* Inject Privileged Operation fault into guest */
1224 set_privileged_operation_isr (vcpu, 0);
1225 privilege_op (vcpu);
1226 return IA64_FAULT;
1228 #endif // CHECK_FAULT
1229 if(vcpu_get_gr_nat(vcpu, inst.M32.r2, &r2)){
1230 #ifdef CHECK_FAULT
1231 set_isr_reg_nat_consumption(vcpu,0,0);
1232 rnat_comsumption(vcpu);
1233 return IA64_FAULT;
1234 #endif //CHECK_FAULT
1236 #ifdef CHECK_FAULT
1237 if ( check_cr_rsv_fields (inst.M32.cr3, r2)) {
1238 /* Inject Reserved Register/Field fault
1239 * into guest */
1240 set_rsv_reg_field_isr (vcpu,0);
1241 rsv_reg_field (vcpu);
1242 return IA64_FAULT;
1244 #endif //CHECK_FAULT
1245 r2 = cr_igfld_mask(inst.M32.cr3,r2);
1246 switch (inst.M32.cr3) {
1247 case 0: return vmx_vcpu_set_dcr(vcpu,r2);
1248 case 1: return vmx_vcpu_set_itm(vcpu,r2);
1249 case 2: return vmx_vcpu_set_iva(vcpu,r2);
1250 case 8: return vmx_vcpu_set_pta(vcpu,r2);
1251 case 16:return vcpu_set_ipsr(vcpu,r2);
1252 case 17:return vcpu_set_isr(vcpu,r2);
1253 case 19:return vcpu_set_iip(vcpu,r2);
1254 case 20:return vcpu_set_ifa(vcpu,r2);
1255 case 21:return vcpu_set_itir(vcpu,r2);
1256 case 22:return vcpu_set_iipa(vcpu,r2);
1257 case 23:return vcpu_set_ifs(vcpu,r2);
1258 case 24:return vcpu_set_iim(vcpu,r2);
1259 case 25:return vcpu_set_iha(vcpu,r2);
1260 case 64:printk("SET LID to 0x%lx\n", r2);
1261 return IA64_NO_FAULT;
1262 case 65:return IA64_NO_FAULT;
1263 case 66:return vmx_vcpu_set_tpr(vcpu,r2);
1264 case 67:return vmx_vcpu_set_eoi(vcpu,r2);
1265 case 68:return IA64_NO_FAULT;
1266 case 69:return IA64_NO_FAULT;
1267 case 70:return IA64_NO_FAULT;
1268 case 71:return IA64_NO_FAULT;
1269 case 72:return vmx_vcpu_set_itv(vcpu,r2);
1270 case 73:return vmx_vcpu_set_pmv(vcpu,r2);
1271 case 74:return vmx_vcpu_set_cmcv(vcpu,r2);
1272 case 80:return vmx_vcpu_set_lrr0(vcpu,r2);
1273 case 81:return vmx_vcpu_set_lrr1(vcpu,r2);
1274 default:VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
1275 return IA64_NO_FAULT;
1280 #define cr_get(cr) \
1281 ((fault=vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
1282 vcpu_set_gr(vcpu, tgt, val,0):fault;
1284 #define vmx_cr_get(cr) \
1285 ((fault=vmx_vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
1286 vcpu_set_gr(vcpu, tgt, val,0):fault;
1288 IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst)
1290 UINT64 tgt = inst.M33.r1;
1291 UINT64 val;
1292 IA64FAULT fault;
1293 #ifdef CHECK_FAULT
1294 IA64_PSR vpsr;
1295 vpsr.val=vmx_vcpu_get_psr(vcpu);
1296 if(is_reserved_cr(inst.M33.cr3)||is_read_only_cr(inst.M33.cr3||
1297 (vpsr.ic&&is_interruption_control_cr(inst.M33.cr3)))){
1298 set_illegal_op_isr(vcpu);
1299 illegal_op(vcpu);
1300 return IA64_FAULT;
1302 if ( vpsr.cpl != 0) {
1303 /* Inject Privileged Operation fault into guest */
1304 set_privileged_operation_isr (vcpu, 0);
1305 privilege_op (vcpu);
1306 return IA64_FAULT;
1308 #endif // CHECK_FAULT
1310 // from_cr_cnt[inst.M33.cr3]++;
1311 switch (inst.M33.cr3) {
1312 case 0: return vmx_cr_get(dcr);
1313 case 1: return vmx_cr_get(itm);
1314 case 2: return vmx_cr_get(iva);
1315 case 8: return vmx_cr_get(pta);
1316 case 16:return cr_get(ipsr);
1317 case 17:return cr_get(isr);
1318 case 19:return cr_get(iip);
1319 case 20:return cr_get(ifa);
1320 case 21:return cr_get(itir);
1321 case 22:return cr_get(iipa);
1322 case 23:return cr_get(ifs);
1323 case 24:return cr_get(iim);
1324 case 25:return cr_get(iha);
1325 case 64:return vmx_cr_get(lid);
1326 case 65:
1327 vmx_vcpu_get_ivr(vcpu,&val);
1328 return vcpu_set_gr(vcpu,tgt,val,0);
1329 case 66:return vmx_cr_get(tpr);
1330 case 67:return vcpu_set_gr(vcpu,tgt,0L,0);
1331 case 68:return vmx_cr_get(irr0);
1332 case 69:return vmx_cr_get(irr1);
1333 case 70:return vmx_cr_get(irr2);
1334 case 71:return vmx_cr_get(irr3);
1335 case 72:return vmx_cr_get(itv);
1336 case 73:return vmx_cr_get(pmv);
1337 case 74:return vmx_cr_get(cmcv);
1338 case 80:return vmx_cr_get(lrr0);
1339 case 81:return vmx_cr_get(lrr1);
1340 default: return IA64_NO_FAULT;
1345 static void post_emulation_action(VCPU *vcpu)
1347 if ( vcpu->arch.irq_new_condition ) {
1348 vcpu->arch.irq_new_condition = 0;
1349 vhpi_detection(vcpu);
1353 //#define BYPASS_VMAL_OPCODE
1354 extern IA64_SLOT_TYPE slot_types[0x20][3];
1355 IA64_BUNDLE __vmx_get_domain_bundle(u64 iip)
1357 IA64_BUNDLE bundle;
1358 fetch_code( current, iip, &bundle.i64[0], &bundle.i64[1]);
1359 return bundle;
1362 /** Emulate a privileged operation.
1365 * @param vcpu virtual cpu
1366 * @cause the reason cause virtualization fault
1367 * @opcode the instruction code which cause virtualization fault
1368 */
1370 void
1371 vmx_emulate(VCPU *vcpu, REGS *regs)
1373 IA64FAULT status;
1374 INST64 inst;
1375 UINT64 iip, cause, opcode;
1376 iip = regs->cr_iip;
1377 cause = VMX(vcpu,cause);
1378 opcode = VMX(vcpu,opcode);
1380 #ifdef VTLB_DEBUG
1381 check_vtlb_sanity(vmx_vcpu_get_vtlb(vcpu));
1382 dump_vtlb(vmx_vcpu_get_vtlb(vcpu));
1383 #endif
1384 #if 0
1385 if ( (cause == 0xff && opcode == 0x1e000000000) || cause == 0 ) {
1386 printf ("VMAL decode error: cause - %lx; op - %lx\n",
1387 cause, opcode );
1388 return;
1390 #endif
1391 #ifdef BYPASS_VMAL_OPCODE
1392 // make a local copy of the bundle containing the privop
1393 IA64_BUNDLE bundle;
1394 int slot;
1395 IA64_SLOT_TYPE slot_type;
1396 IA64_PSR vpsr;
1397 bundle = __vmx_get_domain_bundle(iip);
1398 slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
1399 if (!slot) inst.inst = bundle.slot0;
1400 else if (slot == 1)
1401 inst.inst = bundle.slot1a + (bundle.slot1b<<18);
1402 else if (slot == 2) inst.inst = bundle.slot2;
1403 else printf("priv_handle_op: illegal slot: %d\n", slot);
1404 slot_type = slot_types[bundle.template][slot];
1405 ia64_priv_decoder(slot_type, inst, &cause);
1406 if(cause==0){
1407 panic_domain(regs,"This instruction at 0x%lx slot %d can't be virtualized", iip, slot);
1409 #else
1410 inst.inst=opcode;
1411 #endif /* BYPASS_VMAL_OPCODE */
1412 /*
1413 * Switch to actual virtual rid in rr0 and rr4,
1414 * which is required by some tlb related instructions.
1415 */
1416 prepare_if_physical_mode(vcpu);
1418 switch(cause) {
1419 case EVENT_RSM:
1420 status=vmx_emul_rsm(vcpu, inst);
1421 break;
1422 case EVENT_SSM:
1423 status=vmx_emul_ssm(vcpu, inst);
1424 break;
1425 case EVENT_MOV_TO_PSR:
1426 status=vmx_emul_mov_to_psr(vcpu, inst);
1427 break;
1428 case EVENT_MOV_FROM_PSR:
1429 status=vmx_emul_mov_from_psr(vcpu, inst);
1430 break;
1431 case EVENT_MOV_FROM_CR:
1432 status=vmx_emul_mov_from_cr(vcpu, inst);
1433 break;
1434 case EVENT_MOV_TO_CR:
1435 status=vmx_emul_mov_to_cr(vcpu, inst);
1436 break;
1437 case EVENT_BSW_0:
1438 status=vmx_emul_bsw0(vcpu, inst);
1439 break;
1440 case EVENT_BSW_1:
1441 status=vmx_emul_bsw1(vcpu, inst);
1442 break;
1443 case EVENT_COVER:
1444 status=vmx_emul_cover(vcpu, inst);
1445 break;
1446 case EVENT_RFI:
1447 status=vmx_emul_rfi(vcpu, inst);
1448 break;
1449 case EVENT_ITR_D:
1450 status=vmx_emul_itr_d(vcpu, inst);
1451 break;
1452 case EVENT_ITR_I:
1453 status=vmx_emul_itr_i(vcpu, inst);
1454 break;
1455 case EVENT_PTR_D:
1456 status=vmx_emul_ptr_d(vcpu, inst);
1457 break;
1458 case EVENT_PTR_I:
1459 status=vmx_emul_ptr_i(vcpu, inst);
1460 break;
1461 case EVENT_ITC_D:
1462 status=vmx_emul_itc_d(vcpu, inst);
1463 break;
1464 case EVENT_ITC_I:
1465 status=vmx_emul_itc_i(vcpu, inst);
1466 break;
1467 case EVENT_PTC_L:
1468 status=vmx_emul_ptc_l(vcpu, inst);
1469 break;
1470 case EVENT_PTC_G:
1471 status=vmx_emul_ptc_g(vcpu, inst);
1472 break;
1473 case EVENT_PTC_GA:
1474 status=vmx_emul_ptc_ga(vcpu, inst);
1475 break;
1476 case EVENT_PTC_E:
1477 status=vmx_emul_ptc_e(vcpu, inst);
1478 break;
1479 case EVENT_MOV_TO_RR:
1480 status=vmx_emul_mov_to_rr(vcpu, inst);
1481 break;
1482 case EVENT_MOV_FROM_RR:
1483 status=vmx_emul_mov_from_rr(vcpu, inst);
1484 break;
1485 case EVENT_THASH:
1486 status=vmx_emul_thash(vcpu, inst);
1487 break;
1488 case EVENT_TTAG:
1489 status=vmx_emul_ttag(vcpu, inst);
1490 break;
1491 case EVENT_TPA:
1492 status=vmx_emul_tpa(vcpu, inst);
1493 break;
1494 case EVENT_TAK:
1495 status=vmx_emul_tak(vcpu, inst);
1496 break;
1497 case EVENT_MOV_TO_AR_IMM:
1498 status=vmx_emul_mov_to_ar_imm(vcpu, inst);
1499 break;
1500 case EVENT_MOV_TO_AR:
1501 status=vmx_emul_mov_to_ar_reg(vcpu, inst);
1502 break;
1503 case EVENT_MOV_FROM_AR:
1504 status=vmx_emul_mov_from_ar_reg(vcpu, inst);
1505 break;
1506 case EVENT_MOV_TO_DBR:
1507 status=vmx_emul_mov_to_dbr(vcpu, inst);
1508 break;
1509 case EVENT_MOV_TO_IBR:
1510 status=vmx_emul_mov_to_ibr(vcpu, inst);
1511 break;
1512 case EVENT_MOV_TO_PMC:
1513 status=vmx_emul_mov_to_pmc(vcpu, inst);
1514 break;
1515 case EVENT_MOV_TO_PMD:
1516 status=vmx_emul_mov_to_pmd(vcpu, inst);
1517 break;
1518 case EVENT_MOV_TO_PKR:
1519 status=vmx_emul_mov_to_pkr(vcpu, inst);
1520 break;
1521 case EVENT_MOV_FROM_DBR:
1522 status=vmx_emul_mov_from_dbr(vcpu, inst);
1523 break;
1524 case EVENT_MOV_FROM_IBR:
1525 status=vmx_emul_mov_from_ibr(vcpu, inst);
1526 break;
1527 case EVENT_MOV_FROM_PMC:
1528 status=vmx_emul_mov_from_pmc(vcpu, inst);
1529 break;
1530 case EVENT_MOV_FROM_PKR:
1531 status=vmx_emul_mov_from_pkr(vcpu, inst);
1532 break;
1533 case EVENT_MOV_FROM_CPUID:
1534 status=vmx_emul_mov_from_cpuid(vcpu, inst);
1535 break;
1536 case EVENT_VMSW:
1537 printf ("Unimplemented instruction %ld\n", cause);
1538 status=IA64_FAULT;
1539 break;
1540 default:
1541 panic_domain(regs,"unknown cause %ld, iip: %lx, ipsr: %lx\n", cause,regs->cr_iip,regs->cr_ipsr);
1542 break;
1543 };
1545 #if 0
1546 if (status == IA64_FAULT)
1547 panic("Emulation failed with cause %d:\n", cause);
1548 #endif
1550 if ( status == IA64_NO_FAULT && cause !=EVENT_RFI ) {
1551 vmx_vcpu_increment_iip(vcpu);
1554 recover_if_physical_mode(vcpu);
1555 post_emulation_action (vcpu);
1556 //TODO set_irq_check(v);
1557 return;