ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_virt.c @ 10695:6703fed8870f

[IA64] enable acceleration of external interrupt

This patch is to enable acceleration of externel interrupt
which is described in VTI spec.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Wed Jul 12 13:20:15 2006 -0600 (2006-07-12)
parents 550786d7d352
children 4834d1e8f26e
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_virt.c:
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Fred yang (fred.yang@intel.com)
20 * Shaofan Li (Susue Li) <susie.li@intel.com>
21 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
22 */
23 #include <asm/bundle.h>
24 #include <asm/vmx_vcpu.h>
25 #include <asm/processor.h>
26 #include <asm/delay.h> // Debug only
27 #include <asm/vmmu.h>
28 #include <asm/vmx_mm_def.h>
29 #include <asm/smp.h>
30 #include <asm/vmx.h>
31 #include <asm/virt_event.h>
32 #include <asm/vmx_phy_mode.h>
34 void
35 ia64_priv_decoder(IA64_SLOT_TYPE slot_type, INST64 inst, UINT64 * cause)
36 {
37 *cause=0;
38 switch (slot_type) {
39 case M:
40 if (inst.generic.major==0){
41 if(inst.M28.x3==0){
42 if(inst.M44.x4==6){
43 *cause=EVENT_SSM;
44 }else if(inst.M44.x4==7){
45 *cause=EVENT_RSM;
46 }else if(inst.M30.x4==8&&inst.M30.x2==2){
47 *cause=EVENT_MOV_TO_AR_IMM;
48 }
49 }
50 }
51 else if(inst.generic.major==1){
52 if(inst.M28.x3==0){
53 if(inst.M32.x6==0x2c){
54 *cause=EVENT_MOV_TO_CR;
55 }else if(inst.M33.x6==0x24){
56 *cause=EVENT_MOV_FROM_CR;
57 }else if(inst.M35.x6==0x2d){
58 *cause=EVENT_MOV_TO_PSR;
59 }else if(inst.M36.x6==0x25){
60 *cause=EVENT_MOV_FROM_PSR;
61 }else if(inst.M29.x6==0x2A){
62 *cause=EVENT_MOV_TO_AR;
63 }else if(inst.M31.x6==0x22){
64 *cause=EVENT_MOV_FROM_AR;
65 }else if(inst.M45.x6==0x09){
66 *cause=EVENT_PTC_L;
67 }else if(inst.M45.x6==0x0A){
68 *cause=EVENT_PTC_G;
69 }else if(inst.M45.x6==0x0B){
70 *cause=EVENT_PTC_GA;
71 }else if(inst.M45.x6==0x0C){
72 *cause=EVENT_PTR_D;
73 }else if(inst.M45.x6==0x0D){
74 *cause=EVENT_PTR_I;
75 }else if(inst.M46.x6==0x1A){
76 *cause=EVENT_THASH;
77 }else if(inst.M46.x6==0x1B){
78 *cause=EVENT_TTAG;
79 }else if(inst.M46.x6==0x1E){
80 *cause=EVENT_TPA;
81 }else if(inst.M46.x6==0x1F){
82 *cause=EVENT_TAK;
83 }else if(inst.M47.x6==0x34){
84 *cause=EVENT_PTC_E;
85 }else if(inst.M41.x6==0x2E){
86 *cause=EVENT_ITC_D;
87 }else if(inst.M41.x6==0x2F){
88 *cause=EVENT_ITC_I;
89 }else if(inst.M42.x6==0x00){
90 *cause=EVENT_MOV_TO_RR;
91 }else if(inst.M42.x6==0x01){
92 *cause=EVENT_MOV_TO_DBR;
93 }else if(inst.M42.x6==0x02){
94 *cause=EVENT_MOV_TO_IBR;
95 }else if(inst.M42.x6==0x03){
96 *cause=EVENT_MOV_TO_PKR;
97 }else if(inst.M42.x6==0x04){
98 *cause=EVENT_MOV_TO_PMC;
99 }else if(inst.M42.x6==0x05){
100 *cause=EVENT_MOV_TO_PMD;
101 }else if(inst.M42.x6==0x0E){
102 *cause=EVENT_ITR_D;
103 }else if(inst.M42.x6==0x0F){
104 *cause=EVENT_ITR_I;
105 }else if(inst.M43.x6==0x10){
106 *cause=EVENT_MOV_FROM_RR;
107 }else if(inst.M43.x6==0x11){
108 *cause=EVENT_MOV_FROM_DBR;
109 }else if(inst.M43.x6==0x12){
110 *cause=EVENT_MOV_FROM_IBR;
111 }else if(inst.M43.x6==0x13){
112 *cause=EVENT_MOV_FROM_PKR;
113 }else if(inst.M43.x6==0x14){
114 *cause=EVENT_MOV_FROM_PMC;
115 /*
116 }else if(inst.M43.x6==0x15){
117 *cause=EVENT_MOV_FROM_PMD;
118 */
119 }else if(inst.M43.x6==0x17){
120 *cause=EVENT_MOV_FROM_CPUID;
121 }
122 }
123 }
124 break;
125 case B:
126 if(inst.generic.major==0){
127 if(inst.B8.x6==0x02){
128 *cause=EVENT_COVER;
129 }else if(inst.B8.x6==0x08){
130 *cause=EVENT_RFI;
131 }else if(inst.B8.x6==0x0c){
132 *cause=EVENT_BSW_0;
133 }else if(inst.B8.x6==0x0d){
134 *cause=EVENT_BSW_1;
135 }
136 }
137 case I:
138 case F:
139 case L:
140 case ILLEGAL:
141 break;
142 }
143 }
145 IA64FAULT vmx_emul_rsm(VCPU *vcpu, INST64 inst)
146 {
147 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
148 return vmx_vcpu_reset_psr_sm(vcpu,imm24);
149 }
151 IA64FAULT vmx_emul_ssm(VCPU *vcpu, INST64 inst)
152 {
153 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
154 return vmx_vcpu_set_psr_sm(vcpu,imm24);
155 }
157 unsigned long last_guest_psr = 0x0;
158 IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst)
159 {
160 UINT64 tgt = inst.M33.r1;
161 UINT64 val;
163 /*
164 if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
165 return vcpu_set_gr(vcpu, tgt, val);
166 else return fault;
167 */
168 val = vmx_vcpu_get_psr(vcpu);
169 val = (val & MASK(0, 32)) | (val & MASK(35, 2));
170 last_guest_psr = val;
171 return vcpu_set_gr(vcpu, tgt, val, 0);
172 }
174 /**
175 * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
176 */
177 IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst)
178 {
179 UINT64 val;
181 if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
182 panic_domain(vcpu_regs(vcpu),"get_psr nat bit fault\n");
184 val = (val & MASK(0, 32)) | (VCPU(vcpu, vpsr) & MASK(32, 32));
185 #if 0
186 if (last_mov_from_psr && (last_guest_psr != (val & MASK(0,32))))
187 while(1);
188 else
189 last_mov_from_psr = 0;
190 #endif
191 return vmx_vcpu_set_psr_l(vcpu,val);
192 }
195 /**************************************************************************
196 Privileged operation emulation routines
197 **************************************************************************/
199 IA64FAULT vmx_emul_rfi(VCPU *vcpu, INST64 inst)
200 {
201 IA64_PSR vpsr;
202 REGS *regs;
203 #ifdef CHECK_FAULT
204 vpsr.val=vmx_vcpu_get_psr(vcpu);
205 if ( vpsr.cpl != 0) {
206 /* Inject Privileged Operation fault into guest */
207 set_privileged_operation_isr (vcpu, 0);
208 privilege_op (vcpu);
209 return IA64_FAULT;
210 }
211 #endif // CHECK_FAULT
212 regs=vcpu_regs(vcpu);
213 vpsr.val=regs->cr_ipsr;
214 if ( vpsr.is == 1 ) {
215 panic_domain(regs,"We do not support IA32 instruction yet");
216 }
218 return vmx_vcpu_rfi(vcpu);
219 }
221 IA64FAULT vmx_emul_bsw0(VCPU *vcpu, INST64 inst)
222 {
223 #ifdef CHECK_FAULT
224 IA64_PSR vpsr;
225 vpsr.val=vmx_vcpu_get_psr(vcpu);
226 if ( vpsr.cpl != 0) {
227 /* Inject Privileged Operation fault into guest */
228 set_privileged_operation_isr (vcpu, 0);
229 privilege_op (vcpu);
230 return IA64_FAULT;
231 }
232 #endif // CHECK_FAULT
233 return vcpu_bsw0(vcpu);
234 }
236 IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST64 inst)
237 {
238 #ifdef CHECK_FAULT
239 IA64_PSR vpsr;
240 vpsr.val=vmx_vcpu_get_psr(vcpu);
241 if ( vpsr.cpl != 0) {
242 /* Inject Privileged Operation fault into guest */
243 set_privileged_operation_isr (vcpu, 0);
244 privilege_op (vcpu);
245 return IA64_FAULT;
246 }
247 #endif // CHECK_FAULT
248 return vcpu_bsw1(vcpu);
249 }
251 IA64FAULT vmx_emul_cover(VCPU *vcpu, INST64 inst)
252 {
253 return vmx_vcpu_cover(vcpu);
254 }
256 IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst)
257 {
258 u64 r2,r3;
259 IA64_PSR vpsr;
261 vpsr.val=vmx_vcpu_get_psr(vcpu);
262 if ( vpsr.cpl != 0) {
263 /* Inject Privileged Operation fault into guest */
264 set_privileged_operation_isr (vcpu, 0);
265 privilege_op (vcpu);
266 return IA64_FAULT;
267 }
268 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
269 #ifdef VMAL_NO_FAULT_CHECK
270 ISR isr;
271 set_isr_reg_nat_consumption(vcpu,0,0);
272 rnat_comsumption(vcpu);
273 return IA64_FAULT;
274 #endif // VMAL_NO_FAULT_CHECK
275 }
276 #ifdef VMAL_NO_FAULT_CHECK
277 if (unimplemented_gva(vcpu,r3) ) {
278 isr.val = set_isr_ei_ni(vcpu);
279 isr.code = IA64_RESERVED_REG_FAULT;
280 vcpu_set_isr(vcpu, isr.val);
281 unimpl_daddr(vcpu);
282 return IA64_FAULT;
283 }
284 #endif // VMAL_NO_FAULT_CHECK
285 return vmx_vcpu_ptc_l(vcpu,r3,bits(r2,2,7));
286 }
288 IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst)
289 {
290 u64 r3;
291 IA64_PSR vpsr;
293 vpsr.val=vmx_vcpu_get_psr(vcpu);
294 #ifdef VMAL_NO_FAULT_CHECK
295 ISR isr;
296 if ( vpsr.cpl != 0) {
297 /* Inject Privileged Operation fault into guest */
298 set_privileged_operation_isr (vcpu, 0);
299 privilege_op (vcpu);
300 return IA64_FAULT;
301 }
302 #endif // VMAL_NO_FAULT_CHECK
303 if(vcpu_get_gr_nat(vcpu,inst.M47.r3,&r3)){
304 #ifdef VMAL_NO_FAULT_CHECK
305 set_isr_reg_nat_consumption(vcpu,0,0);
306 rnat_comsumption(vcpu);
307 return IA64_FAULT;
308 #endif // VMAL_NO_FAULT_CHECK
309 }
310 return vmx_vcpu_ptc_e(vcpu,r3);
311 }
313 IA64FAULT vmx_emul_ptc_g(VCPU *vcpu, INST64 inst)
314 {
315 u64 r2,r3;
316 #ifdef VMAL_NO_FAULT_CHECK
317 IA64_PSR vpsr;
318 vpsr.val=vmx_vcpu_get_psr(vcpu);
319 if ( vpsr.cpl != 0) {
320 /* Inject Privileged Operation fault into guest */
321 set_privileged_operation_isr (vcpu, 0);
322 privilege_op (vcpu);
323 return IA64_FAULT;
324 }
325 #endif // VMAL_NO_FAULT_CHECK
326 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
327 #ifdef VMAL_NO_FAULT_CHECK
328 ISR isr;
329 set_isr_reg_nat_consumption(vcpu,0,0);
330 rnat_comsumption(vcpu);
331 return IA64_FAULT;
332 #endif // VMAL_NO_FAULT_CHECK
333 }
334 #ifdef VMAL_NO_FAULT_CHECK
335 if (unimplemented_gva(vcpu,r3) ) {
336 isr.val = set_isr_ei_ni(vcpu);
337 isr.code = IA64_RESERVED_REG_FAULT;
338 vcpu_set_isr(vcpu, isr.val);
339 unimpl_daddr(vcpu);
340 return IA64_FAULT;
341 }
342 #endif // VMAL_NO_FAULT_CHECK
343 return vmx_vcpu_ptc_g(vcpu,r3,bits(r2,2,7));
344 }
346 IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, INST64 inst)
347 {
348 u64 r2,r3;
349 #ifdef VMAL_NO_FAULT_CHECK
350 IA64_PSR vpsr;
351 vpsr.val=vmx_vcpu_get_psr(vcpu);
352 if ( vpsr.cpl != 0) {
353 /* Inject Privileged Operation fault into guest */
354 set_privileged_operation_isr (vcpu, 0);
355 privilege_op (vcpu);
356 return IA64_FAULT;
357 }
358 #endif // VMAL_NO_FAULT_CHECK
359 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
360 #ifdef VMAL_NO_FAULT_CHECK
361 ISR isr;
362 set_isr_reg_nat_consumption(vcpu,0,0);
363 rnat_comsumption(vcpu);
364 return IA64_FAULT;
365 #endif // VMAL_NO_FAULT_CHECK
366 }
367 #ifdef VMAL_NO_FAULT_CHECK
368 if (unimplemented_gva(vcpu,r3) ) {
369 isr.val = set_isr_ei_ni(vcpu);
370 isr.code = IA64_RESERVED_REG_FAULT;
371 vcpu_set_isr(vcpu, isr.val);
372 unimpl_daddr(vcpu);
373 return IA64_FAULT;
374 }
375 #endif // VMAL_NO_FAULT_CHECK
376 return vmx_vcpu_ptc_ga(vcpu,r3,bits(r2,2,7));
377 }
379 IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3)
380 {
381 IA64FAULT ret1, ret2;
383 #ifdef VMAL_NO_FAULT_CHECK
384 ISR isr;
385 IA64_PSR vpsr;
386 vpsr.val=vmx_vcpu_get_psr(vcpu);
387 if ( vpsr.cpl != 0) {
388 /* Inject Privileged Operation fault into guest */
389 set_privileged_operation_isr (vcpu, 0);
390 privilege_op (vcpu);
391 return IA64_FAULT;
392 }
393 #endif // VMAL_NO_FAULT_CHECK
394 ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r3,pr3);
395 ret2 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pr2);
396 #ifdef VMAL_NO_FAULT_CHECK
397 if ( ret1 != IA64_NO_FAULT || ret2 != IA64_NO_FAULT ) {
398 set_isr_reg_nat_consumption(vcpu,0,0);
399 rnat_comsumption(vcpu);
400 return IA64_FAULT;
401 }
402 if (unimplemented_gva(vcpu,r3) ) {
403 isr.val = set_isr_ei_ni(vcpu);
404 isr.code = IA64_RESERVED_REG_FAULT;
405 vcpu_set_isr(vcpu, isr.val);
406 unimpl_daddr(vcpu);
407 return IA64_FAULT;
408 }
409 #endif // VMAL_NO_FAULT_CHECK
410 return IA64_NO_FAULT;
411 }
413 IA64FAULT vmx_emul_ptr_d(VCPU *vcpu, INST64 inst)
414 {
415 u64 r2,r3;
416 if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
417 return IA64_FAULT;
418 return vmx_vcpu_ptr_d(vcpu,r3,bits(r2,2,7));
419 }
421 IA64FAULT vmx_emul_ptr_i(VCPU *vcpu, INST64 inst)
422 {
423 u64 r2,r3;
424 if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
425 return IA64_FAULT;
426 return vmx_vcpu_ptr_i(vcpu,r3,bits(r2,2,7));
427 }
430 IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst)
431 {
432 u64 r1,r3;
433 #ifdef CHECK_FAULT
434 ISR visr;
435 IA64_PSR vpsr;
436 if(check_target_register(vcpu, inst.M46.r1)){
437 set_illegal_op_isr(vcpu);
438 illegal_op(vcpu);
439 return IA64_FAULT;
440 }
441 #endif //CHECK_FAULT
442 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
443 #ifdef CHECK_FAULT
444 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
445 return IA64_NO_FAULT;
446 #endif //CHECK_FAULT
447 }
448 #ifdef CHECK_FAULT
449 if(unimplemented_gva(vcpu, r3)){
450 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
451 return IA64_NO_FAULT;
452 }
453 #endif //CHECK_FAULT
454 vmx_vcpu_thash(vcpu, r3, &r1);
455 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
456 return(IA64_NO_FAULT);
457 }
460 IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst)
461 {
462 u64 r1,r3;
463 #ifdef CHECK_FAULT
464 ISR visr;
465 IA64_PSR vpsr;
466 #endif
467 #ifdef CHECK_FAULT
468 if(check_target_register(vcpu, inst.M46.r1)){
469 set_illegal_op_isr(vcpu);
470 illegal_op(vcpu);
471 return IA64_FAULT;
472 }
473 #endif //CHECK_FAULT
474 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
475 #ifdef CHECK_FAULT
476 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
477 return IA64_NO_FAULT;
478 #endif //CHECK_FAULT
479 }
480 #ifdef CHECK_FAULT
481 if(unimplemented_gva(vcpu, r3)){
482 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
483 return IA64_NO_FAULT;
484 }
485 #endif //CHECK_FAULT
486 vmx_vcpu_ttag(vcpu, r3, &r1);
487 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
488 return(IA64_NO_FAULT);
489 }
492 IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst)
493 {
494 u64 r1,r3;
495 #ifdef CHECK_FAULT
496 ISR visr;
497 if(check_target_register(vcpu, inst.M46.r1)){
498 set_illegal_op_isr(vcpu);
499 illegal_op(vcpu);
500 return IA64_FAULT;
501 }
502 IA64_PSR vpsr;
503 vpsr.val=vmx_vcpu_get_psr(vcpu);
504 if(vpsr.cpl!=0){
505 visr.val=0;
506 vcpu_set_isr(vcpu, visr.val);
507 return IA64_FAULT;
508 }
509 #endif //CHECK_FAULT
510 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
511 #ifdef CHECK_FAULT
512 set_isr_reg_nat_consumption(vcpu,0,1);
513 rnat_comsumption(vcpu);
514 return IA64_FAULT;
515 #endif //CHECK_FAULT
516 }
517 #ifdef CHECK_FAULT
518 if (unimplemented_gva(vcpu,r3) ) {
519 // inject unimplemented_data_address_fault
520 visr.val = set_isr_ei_ni(vcpu);
521 visr.code = IA64_RESERVED_REG_FAULT;
522 vcpu_set_isr(vcpu, isr.val);
523 // FAULT_UNIMPLEMENTED_DATA_ADDRESS.
524 unimpl_daddr(vcpu);
525 return IA64_FAULT;
526 }
527 #endif //CHECK_FAULT
529 if(vmx_vcpu_tpa(vcpu, r3, &r1)){
530 return IA64_FAULT;
531 }
532 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
533 return(IA64_NO_FAULT);
534 }
536 IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst)
537 {
538 u64 r1,r3;
539 #ifdef CHECK_FAULT
540 ISR visr;
541 IA64_PSR vpsr;
542 int fault=IA64_NO_FAULT;
543 visr.val=0;
544 if(check_target_register(vcpu, inst.M46.r1)){
545 set_illegal_op_isr(vcpu);
546 illegal_op(vcpu);
547 return IA64_FAULT;
548 }
549 vpsr.val=vmx_vcpu_get_psr(vcpu);
550 if(vpsr.cpl!=0){
551 vcpu_set_isr(vcpu, visr.val);
552 return IA64_FAULT;
553 }
554 #endif
555 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
556 #ifdef CHECK_FAULT
557 set_isr_reg_nat_consumption(vcpu,0,1);
558 rnat_comsumption(vcpu);
559 return IA64_FAULT;
560 #endif
561 }
562 if(vmx_vcpu_tak(vcpu, r3, &r1)){
563 return IA64_FAULT;
564 }
565 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
566 return(IA64_NO_FAULT);
567 }
570 /************************************
571 * Insert translation register/cache
572 ************************************/
574 IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
575 {
576 UINT64 itir, ifa, pte, slot;
577 IA64_PSR vpsr;
578 vpsr.val=vmx_vcpu_get_psr(vcpu);
579 if ( vpsr.ic ) {
580 set_illegal_op_isr(vcpu);
581 illegal_op(vcpu);
582 return IA64_FAULT;
583 }
584 #ifdef VMAL_NO_FAULT_CHECK
585 ISR isr;
586 if ( vpsr.cpl != 0) {
587 /* Inject Privileged Operation fault into guest */
588 set_privileged_operation_isr (vcpu, 0);
589 privilege_op (vcpu);
590 return IA64_FAULT;
591 }
592 #endif // VMAL_NO_FAULT_CHECK
593 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){
594 #ifdef VMAL_NO_FAULT_CHECK
595 set_isr_reg_nat_consumption(vcpu,0,0);
596 rnat_comsumption(vcpu);
597 return IA64_FAULT;
598 #endif // VMAL_NO_FAULT_CHECK
599 }
600 #ifdef VMAL_NO_FAULT_CHECK
601 if(is_reserved_rr_register(vcpu, slot)){
602 set_illegal_op_isr(vcpu);
603 illegal_op(vcpu);
604 return IA64_FAULT;
605 }
606 #endif // VMAL_NO_FAULT_CHECK
608 if (vcpu_get_itir(vcpu,&itir)){
609 return(IA64_FAULT);
610 }
611 if (vcpu_get_ifa(vcpu,&ifa)){
612 return(IA64_FAULT);
613 }
614 #ifdef VMAL_NO_FAULT_CHECK
615 if (is_reserved_itir_field(vcpu, itir)) {
616 // TODO
617 return IA64_FAULT;
618 }
619 if (unimplemented_gva(vcpu,ifa) ) {
620 isr.val = set_isr_ei_ni(vcpu);
621 isr.code = IA64_RESERVED_REG_FAULT;
622 vcpu_set_isr(vcpu, isr.val);
623 unimpl_daddr(vcpu);
624 return IA64_FAULT;
625 }
626 #endif // VMAL_NO_FAULT_CHECK
628 return (vmx_vcpu_itr_d(vcpu,slot,pte,itir,ifa));
629 }
631 IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
632 {
633 UINT64 itir, ifa, pte, slot;
634 #ifdef VMAL_NO_FAULT_CHECK
635 ISR isr;
636 #endif
637 IA64_PSR vpsr;
638 vpsr.val=vmx_vcpu_get_psr(vcpu);
639 if ( vpsr.ic ) {
640 set_illegal_op_isr(vcpu);
641 illegal_op(vcpu);
642 return IA64_FAULT;
643 }
644 #ifdef VMAL_NO_FAULT_CHECK
645 if ( vpsr.cpl != 0) {
646 /* Inject Privileged Operation fault into guest */
647 set_privileged_operation_isr (vcpu, 0);
648 privilege_op (vcpu);
649 return IA64_FAULT;
650 }
651 #endif // VMAL_NO_FAULT_CHECK
652 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){
653 #ifdef VMAL_NO_FAULT_CHECK
654 set_isr_reg_nat_consumption(vcpu,0,0);
655 rnat_comsumption(vcpu);
656 return IA64_FAULT;
657 #endif // VMAL_NO_FAULT_CHECK
658 }
659 #ifdef VMAL_NO_FAULT_CHECK
660 if(is_reserved_rr_register(vcpu, slot)){
661 set_illegal_op_isr(vcpu);
662 illegal_op(vcpu);
663 return IA64_FAULT;
664 }
665 #endif // VMAL_NO_FAULT_CHECK
667 if (vcpu_get_itir(vcpu,&itir)){
668 return(IA64_FAULT);
669 }
670 if (vcpu_get_ifa(vcpu,&ifa)){
671 return(IA64_FAULT);
672 }
673 #ifdef VMAL_NO_FAULT_CHECK
674 if (is_reserved_itir_field(vcpu, itir)) {
675 // TODO
676 return IA64_FAULT;
677 }
678 if (unimplemented_gva(vcpu,ifa) ) {
679 isr.val = set_isr_ei_ni(vcpu);
680 isr.code = IA64_RESERVED_REG_FAULT;
681 vcpu_set_isr(vcpu, isr.val);
682 unimpl_daddr(vcpu);
683 return IA64_FAULT;
684 }
685 #endif // VMAL_NO_FAULT_CHECK
687 return (vmx_vcpu_itr_i(vcpu,slot,pte,itir,ifa));
688 }
690 IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa,u64 *pte)
691 {
692 IA64_PSR vpsr;
693 IA64FAULT ret1;
695 vpsr.val=vmx_vcpu_get_psr(vcpu);
696 if ( vpsr.ic ) {
697 set_illegal_op_isr(vcpu);
698 illegal_op(vcpu);
699 return IA64_FAULT;
700 }
702 #ifdef VMAL_NO_FAULT_CHECK
703 UINT64 fault;
704 ISR isr;
705 if ( vpsr.cpl != 0) {
706 /* Inject Privileged Operation fault into guest */
707 set_privileged_operation_isr (vcpu, 0);
708 privilege_op (vcpu);
709 return IA64_FAULT;
710 }
711 #endif // VMAL_NO_FAULT_CHECK
712 ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pte);
713 #ifdef VMAL_NO_FAULT_CHECK
714 if( ret1 != IA64_NO_FAULT ){
715 set_isr_reg_nat_consumption(vcpu,0,0);
716 rnat_comsumption(vcpu);
717 return IA64_FAULT;
718 }
719 #endif // VMAL_NO_FAULT_CHECK
721 if (vcpu_get_itir(vcpu,itir)){
722 return(IA64_FAULT);
723 }
724 if (vcpu_get_ifa(vcpu,ifa)){
725 return(IA64_FAULT);
726 }
727 #ifdef VMAL_NO_FAULT_CHECK
728 if (unimplemented_gva(vcpu,ifa) ) {
729 isr.val = set_isr_ei_ni(vcpu);
730 isr.code = IA64_RESERVED_REG_FAULT;
731 vcpu_set_isr(vcpu, isr.val);
732 unimpl_daddr(vcpu);
733 return IA64_FAULT;
734 }
735 #endif // VMAL_NO_FAULT_CHECK
736 return IA64_NO_FAULT;
737 }
739 IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INST64 inst)
740 {
741 UINT64 itir, ifa, pte;
743 if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) {
744 return IA64_FAULT;
745 }
747 return (vmx_vcpu_itc_d(vcpu,pte,itir,ifa));
748 }
750 IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INST64 inst)
751 {
752 UINT64 itir, ifa, pte;
754 if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) {
755 return IA64_FAULT;
756 }
758 return (vmx_vcpu_itc_i(vcpu,pte,itir,ifa));
760 }
762 /*************************************
763 * Moves to semi-privileged registers
764 *************************************/
766 IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
767 {
768 // I27 and M30 are identical for these fields
769 UINT64 imm;
771 if(inst.M30.ar3!=44){
772 panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc");
773 }
774 #ifdef CHECK_FAULT
775 IA64_PSR vpsr;
776 vpsr.val=vmx_vcpu_get_psr(vcpu);
777 if ( vpsr.cpl != 0) {
778 /* Inject Privileged Operation fault into guest */
779 set_privileged_operation_isr (vcpu, 0);
780 privilege_op (vcpu);
781 return IA64_FAULT;
782 }
783 #endif // CHECK_FAULT
784 if(inst.M30.s){
785 imm = -inst.M30.imm;
786 }else{
787 imm = inst.M30.imm;
788 }
789 return (vmx_vcpu_set_itc(vcpu, imm));
790 }
792 IA64FAULT vmx_emul_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
793 {
794 // I26 and M29 are identical for these fields
795 u64 r2;
796 if(inst.M29.ar3!=44){
797 panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc");
798 }
799 if(vcpu_get_gr_nat(vcpu,inst.M29.r2,&r2)){
800 #ifdef CHECK_FAULT
801 set_isr_reg_nat_consumption(vcpu,0,0);
802 rnat_comsumption(vcpu);
803 return IA64_FAULT;
804 #endif //CHECK_FAULT
805 }
806 #ifdef CHECK_FAULT
807 IA64_PSR vpsr;
808 vpsr.val=vmx_vcpu_get_psr(vcpu);
809 if ( vpsr.cpl != 0) {
810 /* Inject Privileged Operation fault into guest */
811 set_privileged_operation_isr (vcpu, 0);
812 privilege_op (vcpu);
813 return IA64_FAULT;
814 }
815 #endif // CHECK_FAULT
816 return (vmx_vcpu_set_itc(vcpu, r2));
817 }
820 IA64FAULT vmx_emul_mov_from_ar_reg(VCPU *vcpu, INST64 inst)
821 {
822 // I27 and M30 are identical for these fields
823 u64 r1;
824 if(inst.M31.ar3!=44){
825 panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc");
826 }
827 #ifdef CHECK_FAULT
828 if(check_target_register(vcpu,inst.M31.r1)){
829 set_illegal_op_isr(vcpu);
830 illegal_op(vcpu);
831 return IA64_FAULT;
832 }
833 IA64_PSR vpsr;
834 vpsr.val=vmx_vcpu_get_psr(vcpu);
835 if (vpsr.si&& vpsr.cpl != 0) {
836 /* Inject Privileged Operation fault into guest */
837 set_privileged_operation_isr (vcpu, 0);
838 privilege_op (vcpu);
839 return IA64_FAULT;
840 }
841 #endif // CHECK_FAULT
842 vmx_vcpu_get_itc(vcpu,&r1);
843 vcpu_set_gr(vcpu,inst.M31.r1,r1,0);
844 return IA64_NO_FAULT;
845 }
848 /********************************
849 * Moves to privileged registers
850 ********************************/
852 IA64FAULT vmx_emul_mov_to_pkr(VCPU *vcpu, INST64 inst)
853 {
854 u64 r3,r2;
855 #ifdef CHECK_FAULT
856 IA64_PSR vpsr;
857 vpsr.val=vmx_vcpu_get_psr(vcpu);
858 if (vpsr.cpl != 0) {
859 /* Inject Privileged Operation fault into guest */
860 set_privileged_operation_isr (vcpu, 0);
861 privilege_op (vcpu);
862 return IA64_FAULT;
863 }
864 #endif // CHECK_FAULT
865 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
866 #ifdef CHECK_FAULT
867 set_isr_reg_nat_consumption(vcpu,0,0);
868 rnat_comsumption(vcpu);
869 return IA64_FAULT;
870 #endif //CHECK_FAULT
871 }
872 return (vmx_vcpu_set_pkr(vcpu,r3,r2));
873 }
875 IA64FAULT vmx_emul_mov_to_rr(VCPU *vcpu, INST64 inst)
876 {
877 u64 r3,r2;
878 #ifdef CHECK_FAULT
879 IA64_PSR vpsr;
880 vpsr.val=vmx_vcpu_get_psr(vcpu);
881 if (vpsr.cpl != 0) {
882 /* Inject Privileged Operation fault into guest */
883 set_privileged_operation_isr (vcpu, 0);
884 privilege_op (vcpu);
885 return IA64_FAULT;
886 }
887 #endif // CHECK_FAULT
888 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
889 #ifdef CHECK_FAULT
890 set_isr_reg_nat_consumption(vcpu,0,0);
891 rnat_comsumption(vcpu);
892 return IA64_FAULT;
893 #endif //CHECK_FAULT
894 }
895 return (vmx_vcpu_set_rr(vcpu,r3,r2));
896 }
898 IA64FAULT vmx_emul_mov_to_dbr(VCPU *vcpu, INST64 inst)
899 {
900 u64 r3,r2;
901 return IA64_NO_FAULT;
902 #ifdef CHECK_FAULT
903 IA64_PSR vpsr;
904 vpsr.val=vmx_vcpu_get_psr(vcpu);
905 if (vpsr.cpl != 0) {
906 /* Inject Privileged Operation fault into guest */
907 set_privileged_operation_isr (vcpu, 0);
908 privilege_op (vcpu);
909 return IA64_FAULT;
910 }
911 #endif // CHECK_FAULT
912 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
913 #ifdef CHECK_FAULT
914 set_isr_reg_nat_consumption(vcpu,0,0);
915 rnat_comsumption(vcpu);
916 return IA64_FAULT;
917 #endif //CHECK_FAULT
918 }
919 return (vmx_vcpu_set_dbr(vcpu,r3,r2));
920 }
922 IA64FAULT vmx_emul_mov_to_ibr(VCPU *vcpu, INST64 inst)
923 {
924 u64 r3,r2;
925 return IA64_NO_FAULT;
926 #ifdef CHECK_FAULT
927 IA64_PSR vpsr;
928 vpsr.val=vmx_vcpu_get_psr(vcpu);
929 if (vpsr.cpl != 0) {
930 /* Inject Privileged Operation fault into guest */
931 set_privileged_operation_isr (vcpu, 0);
932 privilege_op (vcpu);
933 return IA64_FAULT;
934 }
935 #endif // CHECK_FAULT
936 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
937 #ifdef CHECK_FAULT
938 set_isr_reg_nat_consumption(vcpu,0,0);
939 rnat_comsumption(vcpu);
940 return IA64_FAULT;
941 #endif //CHECK_FAULT
942 }
943 return (vmx_vcpu_set_ibr(vcpu,r3,r2));
944 }
946 IA64FAULT vmx_emul_mov_to_pmc(VCPU *vcpu, INST64 inst)
947 {
948 u64 r3,r2;
949 #ifdef CHECK_FAULT
950 IA64_PSR vpsr;
951 vpsr.val=vmx_vcpu_get_psr(vcpu);
952 if (vpsr.cpl != 0) {
953 /* Inject Privileged Operation fault into guest */
954 set_privileged_operation_isr (vcpu, 0);
955 privilege_op (vcpu);
956 return IA64_FAULT;
957 }
958 #endif // CHECK_FAULT
959 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
960 #ifdef CHECK_FAULT
961 set_isr_reg_nat_consumption(vcpu,0,0);
962 rnat_comsumption(vcpu);
963 return IA64_FAULT;
964 #endif //CHECK_FAULT
965 }
966 return (vmx_vcpu_set_pmc(vcpu,r3,r2));
967 }
969 IA64FAULT vmx_emul_mov_to_pmd(VCPU *vcpu, INST64 inst)
970 {
971 u64 r3,r2;
972 #ifdef CHECK_FAULT
973 IA64_PSR vpsr;
974 vpsr.val=vmx_vcpu_get_psr(vcpu);
975 if (vpsr.cpl != 0) {
976 /* Inject Privileged Operation fault into guest */
977 set_privileged_operation_isr (vcpu, 0);
978 privilege_op (vcpu);
979 return IA64_FAULT;
980 }
981 #endif // CHECK_FAULT
982 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
983 #ifdef CHECK_FAULT
984 set_isr_reg_nat_consumption(vcpu,0,0);
985 rnat_comsumption(vcpu);
986 return IA64_FAULT;
987 #endif //CHECK_FAULT
988 }
989 return (vmx_vcpu_set_pmd(vcpu,r3,r2));
990 }
993 /**********************************
994 * Moves from privileged registers
995 **********************************/
997 IA64FAULT vmx_emul_mov_from_rr(VCPU *vcpu, INST64 inst)
998 {
999 u64 r3,r1;
1000 #ifdef CHECK_FAULT
1001 if(check_target_register(vcpu, inst.M43.r1)){
1002 set_illegal_op_isr(vcpu);
1003 illegal_op(vcpu);
1004 return IA64_FAULT;
1006 IA64_PSR vpsr;
1007 vpsr.val=vmx_vcpu_get_psr(vcpu);
1008 if (vpsr.cpl != 0) {
1009 /* Inject Privileged Operation fault into guest */
1010 set_privileged_operation_isr (vcpu, 0);
1011 privilege_op (vcpu);
1012 return IA64_FAULT;
1015 #endif //CHECK_FAULT
1016 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1017 #ifdef CHECK_FAULT
1018 set_isr_reg_nat_consumption(vcpu,0,0);
1019 rnat_comsumption(vcpu);
1020 return IA64_FAULT;
1021 #endif //CHECK_FAULT
1023 #ifdef CHECK_FAULT
1024 if(is_reserved_rr_register(vcpu,r3>>VRN_SHIFT)){
1025 set_rsv_reg_field_isr(vcpu);
1026 rsv_reg_field(vcpu);
1028 #endif //CHECK_FAULT
1029 vcpu_get_rr(vcpu,r3,&r1);
1030 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1033 IA64FAULT vmx_emul_mov_from_pkr(VCPU *vcpu, INST64 inst)
1035 u64 r3,r1;
1036 #ifdef CHECK_FAULT
1037 if(check_target_register(vcpu, inst.M43.r1)){
1038 set_illegal_op_isr(vcpu);
1039 illegal_op(vcpu);
1040 return IA64_FAULT;
1042 IA64_PSR vpsr;
1043 vpsr.val=vmx_vcpu_get_psr(vcpu);
1044 if (vpsr.cpl != 0) {
1045 /* Inject Privileged Operation fault into guest */
1046 set_privileged_operation_isr (vcpu, 0);
1047 privilege_op (vcpu);
1048 return IA64_FAULT;
1051 #endif //CHECK_FAULT
1052 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1053 #ifdef CHECK_FAULT
1054 set_isr_reg_nat_consumption(vcpu,0,0);
1055 rnat_comsumption(vcpu);
1056 return IA64_FAULT;
1057 #endif //CHECK_FAULT
1059 #ifdef CHECK_FAULT
1060 if(is_reserved_indirect_register(vcpu,r3)){
1061 set_rsv_reg_field_isr(vcpu);
1062 rsv_reg_field(vcpu);
1063 return IA64_FAULT;
1065 #endif //CHECK_FAULT
1066 vmx_vcpu_get_pkr(vcpu,r3,&r1);
1067 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1070 IA64FAULT vmx_emul_mov_from_dbr(VCPU *vcpu, INST64 inst)
1072 u64 r3,r1;
1073 #ifdef CHECK_FAULT
1074 if(check_target_register(vcpu, inst.M43.r1)){
1075 set_illegal_op_isr(vcpu);
1076 illegal_op(vcpu);
1077 return IA64_FAULT;
1079 IA64_PSR vpsr;
1080 vpsr.val=vmx_vcpu_get_psr(vcpu);
1081 if (vpsr.cpl != 0) {
1082 /* Inject Privileged Operation fault into guest */
1083 set_privileged_operation_isr (vcpu, 0);
1084 privilege_op (vcpu);
1085 return IA64_FAULT;
1088 #endif //CHECK_FAULT
1089 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1090 #ifdef CHECK_FAULT
1091 set_isr_reg_nat_consumption(vcpu,0,0);
1092 rnat_comsumption(vcpu);
1093 return IA64_FAULT;
1094 #endif //CHECK_FAULT
1096 #ifdef CHECK_FAULT
1097 if(is_reserved_indirect_register(vcpu,r3)){
1098 set_rsv_reg_field_isr(vcpu);
1099 rsv_reg_field(vcpu);
1100 return IA64_FAULT;
1102 #endif //CHECK_FAULT
1103 vmx_vcpu_get_dbr(vcpu,r3,&r1);
1104 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1107 IA64FAULT vmx_emul_mov_from_ibr(VCPU *vcpu, INST64 inst)
1109 u64 r3,r1;
1110 #ifdef CHECK_FAULT
1111 if(check_target_register(vcpu, inst.M43.r1)){
1112 set_illegal_op_isr(vcpu);
1113 illegal_op(vcpu);
1114 return IA64_FAULT;
1116 IA64_PSR vpsr;
1117 vpsr.val=vmx_vcpu_get_psr(vcpu);
1118 if (vpsr.cpl != 0) {
1119 /* Inject Privileged Operation fault into guest */
1120 set_privileged_operation_isr (vcpu, 0);
1121 privilege_op (vcpu);
1122 return IA64_FAULT;
1125 #endif //CHECK_FAULT
1126 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1127 #ifdef CHECK_FAULT
1128 set_isr_reg_nat_consumption(vcpu,0,0);
1129 rnat_comsumption(vcpu);
1130 return IA64_FAULT;
1131 #endif //CHECK_FAULT
1133 #ifdef CHECK_FAULT
1134 if(is_reserved_indirect_register(vcpu,r3)){
1135 set_rsv_reg_field_isr(vcpu);
1136 rsv_reg_field(vcpu);
1137 return IA64_FAULT;
1139 #endif //CHECK_FAULT
1140 vmx_vcpu_get_ibr(vcpu,r3,&r1);
1141 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1144 IA64FAULT vmx_emul_mov_from_pmc(VCPU *vcpu, INST64 inst)
1146 u64 r3,r1;
1147 #ifdef CHECK_FAULT
1148 if(check_target_register(vcpu, inst.M43.r1)){
1149 set_illegal_op_isr(vcpu);
1150 illegal_op(vcpu);
1151 return IA64_FAULT;
1153 IA64_PSR vpsr;
1154 vpsr.val=vmx_vcpu_get_psr(vcpu);
1155 if (vpsr.cpl != 0) {
1156 /* Inject Privileged Operation fault into guest */
1157 set_privileged_operation_isr (vcpu, 0);
1158 privilege_op (vcpu);
1159 return IA64_FAULT;
1162 #endif //CHECK_FAULT
1163 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1164 #ifdef CHECK_FAULT
1165 set_isr_reg_nat_consumption(vcpu,0,0);
1166 rnat_comsumption(vcpu);
1167 return IA64_FAULT;
1168 #endif //CHECK_FAULT
1170 #ifdef CHECK_FAULT
1171 if(is_reserved_indirect_register(vcpu,r3)){
1172 set_rsv_reg_field_isr(vcpu);
1173 rsv_reg_field(vcpu);
1174 return IA64_FAULT;
1176 #endif //CHECK_FAULT
1177 vmx_vcpu_get_pmc(vcpu,r3,&r1);
1178 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1181 IA64FAULT vmx_emul_mov_from_cpuid(VCPU *vcpu, INST64 inst)
1183 u64 r3,r1;
1184 #ifdef CHECK_FAULT
1185 if(check_target_register(vcpu, inst.M43.r1)){
1186 set_illegal_op_isr(vcpu);
1187 illegal_op(vcpu);
1188 return IA64_FAULT;
1190 #endif //CHECK_FAULT
1191 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1192 #ifdef CHECK_FAULT
1193 set_isr_reg_nat_consumption(vcpu,0,0);
1194 rnat_comsumption(vcpu);
1195 return IA64_FAULT;
1196 #endif //CHECK_FAULT
1198 #ifdef CHECK_FAULT
1199 if(is_reserved_indirect_register(vcpu,r3)){
1200 set_rsv_reg_field_isr(vcpu);
1201 rsv_reg_field(vcpu);
1202 return IA64_FAULT;
1204 #endif //CHECK_FAULT
1205 vmx_vcpu_get_cpuid(vcpu,r3,&r1);
1206 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1209 IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
1211 u64 r2;
1212 extern u64 cr_igfld_mask(int index, u64 value);
1213 #ifdef CHECK_FAULT
1214 IA64_PSR vpsr;
1215 vpsr.val=vmx_vcpu_get_psr(vcpu);
1216 if(is_reserved_cr(inst.M32.cr3)||(vpsr.ic&&is_interruption_control_cr(inst.M32.cr3))){
1217 set_illegal_op_isr(vcpu);
1218 illegal_op(vcpu);
1219 return IA64_FAULT;
1221 if ( vpsr.cpl != 0) {
1222 /* Inject Privileged Operation fault into guest */
1223 set_privileged_operation_isr (vcpu, 0);
1224 privilege_op (vcpu);
1225 return IA64_FAULT;
1227 #endif // CHECK_FAULT
1228 if(vcpu_get_gr_nat(vcpu, inst.M32.r2, &r2)){
1229 #ifdef CHECK_FAULT
1230 set_isr_reg_nat_consumption(vcpu,0,0);
1231 rnat_comsumption(vcpu);
1232 return IA64_FAULT;
1233 #endif //CHECK_FAULT
1235 #ifdef CHECK_FAULT
1236 if ( check_cr_rsv_fields (inst.M32.cr3, r2)) {
1237 /* Inject Reserved Register/Field fault
1238 * into guest */
1239 set_rsv_reg_field_isr (vcpu,0);
1240 rsv_reg_field (vcpu);
1241 return IA64_FAULT;
1243 #endif //CHECK_FAULT
1244 r2 = cr_igfld_mask(inst.M32.cr3,r2);
1245 switch (inst.M32.cr3) {
1246 case 0: return vmx_vcpu_set_dcr(vcpu,r2);
1247 case 1: return vmx_vcpu_set_itm(vcpu,r2);
1248 case 2: return vmx_vcpu_set_iva(vcpu,r2);
1249 case 8: return vmx_vcpu_set_pta(vcpu,r2);
1250 case 16:return vcpu_set_ipsr(vcpu,r2);
1251 case 17:return vcpu_set_isr(vcpu,r2);
1252 case 19:return vcpu_set_iip(vcpu,r2);
1253 case 20:return vcpu_set_ifa(vcpu,r2);
1254 case 21:return vcpu_set_itir(vcpu,r2);
1255 case 22:return vcpu_set_iipa(vcpu,r2);
1256 case 23:return vcpu_set_ifs(vcpu,r2);
1257 case 24:return vcpu_set_iim(vcpu,r2);
1258 case 25:return vcpu_set_iha(vcpu,r2);
1259 case 64:printk("SET LID to 0x%lx\n", r2);
1260 return IA64_NO_FAULT;
1261 case 65:return IA64_NO_FAULT;
1262 case 66:return vmx_vcpu_set_tpr(vcpu,r2);
1263 case 67:return vmx_vcpu_set_eoi(vcpu,r2);
1264 case 68:return IA64_NO_FAULT;
1265 case 69:return IA64_NO_FAULT;
1266 case 70:return IA64_NO_FAULT;
1267 case 71:return IA64_NO_FAULT;
1268 case 72:return vmx_vcpu_set_itv(vcpu,r2);
1269 case 73:return vmx_vcpu_set_pmv(vcpu,r2);
1270 case 74:return vmx_vcpu_set_cmcv(vcpu,r2);
1271 case 80:return vmx_vcpu_set_lrr0(vcpu,r2);
1272 case 81:return vmx_vcpu_set_lrr1(vcpu,r2);
1273 default:VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
1274 return IA64_NO_FAULT;
1279 #define cr_get(cr) \
1280 ((fault=vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
1281 vcpu_set_gr(vcpu, tgt, val,0):fault;
1283 #define vmx_cr_get(cr) \
1284 ((fault=vmx_vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
1285 vcpu_set_gr(vcpu, tgt, val,0):fault;
1287 IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst)
1289 UINT64 tgt = inst.M33.r1;
1290 UINT64 val;
1291 IA64FAULT fault;
1292 #ifdef CHECK_FAULT
1293 IA64_PSR vpsr;
1294 vpsr.val=vmx_vcpu_get_psr(vcpu);
1295 if(is_reserved_cr(inst.M33.cr3)||is_read_only_cr(inst.M33.cr3||
1296 (vpsr.ic&&is_interruption_control_cr(inst.M33.cr3)))){
1297 set_illegal_op_isr(vcpu);
1298 illegal_op(vcpu);
1299 return IA64_FAULT;
1301 if ( vpsr.cpl != 0) {
1302 /* Inject Privileged Operation fault into guest */
1303 set_privileged_operation_isr (vcpu, 0);
1304 privilege_op (vcpu);
1305 return IA64_FAULT;
1307 #endif // CHECK_FAULT
1309 // from_cr_cnt[inst.M33.cr3]++;
1310 switch (inst.M33.cr3) {
1311 case 0: return vmx_cr_get(dcr);
1312 case 1: return vmx_cr_get(itm);
1313 case 2: return vmx_cr_get(iva);
1314 case 8: return vmx_cr_get(pta);
1315 case 16:return cr_get(ipsr);
1316 case 17:return cr_get(isr);
1317 case 19:return cr_get(iip);
1318 case 20:return cr_get(ifa);
1319 case 21:return cr_get(itir);
1320 case 22:return cr_get(iipa);
1321 case 23:return cr_get(ifs);
1322 case 24:return cr_get(iim);
1323 case 25:return cr_get(iha);
1324 case 64:return vmx_cr_get(lid);
1325 case 65:
1326 vmx_vcpu_get_ivr(vcpu,&val);
1327 return vcpu_set_gr(vcpu,tgt,val,0);
1328 case 66:return vmx_cr_get(tpr);
1329 case 67:return vcpu_set_gr(vcpu,tgt,0L,0);
1330 case 68:return vmx_cr_get(irr0);
1331 case 69:return vmx_cr_get(irr1);
1332 case 70:return vmx_cr_get(irr2);
1333 case 71:return vmx_cr_get(irr3);
1334 case 72:return vmx_cr_get(itv);
1335 case 73:return vmx_cr_get(pmv);
1336 case 74:return vmx_cr_get(cmcv);
1337 case 80:return vmx_cr_get(lrr0);
1338 case 81:return vmx_cr_get(lrr1);
1339 default: return IA64_NO_FAULT;
1344 //#define BYPASS_VMAL_OPCODE
1345 extern IA64_SLOT_TYPE slot_types[0x20][3];
1346 IA64_BUNDLE __vmx_get_domain_bundle(u64 iip)
1348 IA64_BUNDLE bundle;
1349 fetch_code( current, iip, &bundle.i64[0], &bundle.i64[1]);
1350 return bundle;
1353 /** Emulate a privileged operation.
1356 * @param vcpu virtual cpu
1357 * @cause the reason cause virtualization fault
1358 * @opcode the instruction code which cause virtualization fault
1359 */
1361 void
1362 vmx_emulate(VCPU *vcpu, REGS *regs)
1364 IA64FAULT status;
1365 INST64 inst;
1366 UINT64 iip, cause, opcode;
1367 iip = regs->cr_iip;
1368 cause = VMX(vcpu,cause);
1369 opcode = VMX(vcpu,opcode);
1371 #ifdef VTLB_DEBUG
1372 check_vtlb_sanity(vmx_vcpu_get_vtlb(vcpu));
1373 dump_vtlb(vmx_vcpu_get_vtlb(vcpu));
1374 #endif
1375 #if 0
1376 if ( (cause == 0xff && opcode == 0x1e000000000) || cause == 0 ) {
1377 printf ("VMAL decode error: cause - %lx; op - %lx\n",
1378 cause, opcode );
1379 return;
1381 #endif
1382 #ifdef BYPASS_VMAL_OPCODE
1383 // make a local copy of the bundle containing the privop
1384 IA64_BUNDLE bundle;
1385 int slot;
1386 IA64_SLOT_TYPE slot_type;
1387 IA64_PSR vpsr;
1388 bundle = __vmx_get_domain_bundle(iip);
1389 slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
1390 if (!slot) inst.inst = bundle.slot0;
1391 else if (slot == 1)
1392 inst.inst = bundle.slot1a + (bundle.slot1b<<18);
1393 else if (slot == 2) inst.inst = bundle.slot2;
1394 else printf("priv_handle_op: illegal slot: %d\n", slot);
1395 slot_type = slot_types[bundle.template][slot];
1396 ia64_priv_decoder(slot_type, inst, &cause);
1397 if(cause==0){
1398 panic_domain(regs,"This instruction at 0x%lx slot %d can't be virtualized", iip, slot);
1400 #else
1401 inst.inst=opcode;
1402 #endif /* BYPASS_VMAL_OPCODE */
1403 /*
1404 * Switch to actual virtual rid in rr0 and rr4,
1405 * which is required by some tlb related instructions.
1406 */
1407 prepare_if_physical_mode(vcpu);
1409 switch(cause) {
1410 case EVENT_RSM:
1411 status=vmx_emul_rsm(vcpu, inst);
1412 break;
1413 case EVENT_SSM:
1414 status=vmx_emul_ssm(vcpu, inst);
1415 break;
1416 case EVENT_MOV_TO_PSR:
1417 status=vmx_emul_mov_to_psr(vcpu, inst);
1418 break;
1419 case EVENT_MOV_FROM_PSR:
1420 status=vmx_emul_mov_from_psr(vcpu, inst);
1421 break;
1422 case EVENT_MOV_FROM_CR:
1423 status=vmx_emul_mov_from_cr(vcpu, inst);
1424 break;
1425 case EVENT_MOV_TO_CR:
1426 status=vmx_emul_mov_to_cr(vcpu, inst);
1427 break;
1428 case EVENT_BSW_0:
1429 status=vmx_emul_bsw0(vcpu, inst);
1430 break;
1431 case EVENT_BSW_1:
1432 status=vmx_emul_bsw1(vcpu, inst);
1433 break;
1434 case EVENT_COVER:
1435 status=vmx_emul_cover(vcpu, inst);
1436 break;
1437 case EVENT_RFI:
1438 status=vmx_emul_rfi(vcpu, inst);
1439 break;
1440 case EVENT_ITR_D:
1441 status=vmx_emul_itr_d(vcpu, inst);
1442 break;
1443 case EVENT_ITR_I:
1444 status=vmx_emul_itr_i(vcpu, inst);
1445 break;
1446 case EVENT_PTR_D:
1447 status=vmx_emul_ptr_d(vcpu, inst);
1448 break;
1449 case EVENT_PTR_I:
1450 status=vmx_emul_ptr_i(vcpu, inst);
1451 break;
1452 case EVENT_ITC_D:
1453 status=vmx_emul_itc_d(vcpu, inst);
1454 break;
1455 case EVENT_ITC_I:
1456 status=vmx_emul_itc_i(vcpu, inst);
1457 break;
1458 case EVENT_PTC_L:
1459 status=vmx_emul_ptc_l(vcpu, inst);
1460 break;
1461 case EVENT_PTC_G:
1462 status=vmx_emul_ptc_g(vcpu, inst);
1463 break;
1464 case EVENT_PTC_GA:
1465 status=vmx_emul_ptc_ga(vcpu, inst);
1466 break;
1467 case EVENT_PTC_E:
1468 status=vmx_emul_ptc_e(vcpu, inst);
1469 break;
1470 case EVENT_MOV_TO_RR:
1471 status=vmx_emul_mov_to_rr(vcpu, inst);
1472 break;
1473 case EVENT_MOV_FROM_RR:
1474 status=vmx_emul_mov_from_rr(vcpu, inst);
1475 break;
1476 case EVENT_THASH:
1477 status=vmx_emul_thash(vcpu, inst);
1478 break;
1479 case EVENT_TTAG:
1480 status=vmx_emul_ttag(vcpu, inst);
1481 break;
1482 case EVENT_TPA:
1483 status=vmx_emul_tpa(vcpu, inst);
1484 break;
1485 case EVENT_TAK:
1486 status=vmx_emul_tak(vcpu, inst);
1487 break;
1488 case EVENT_MOV_TO_AR_IMM:
1489 status=vmx_emul_mov_to_ar_imm(vcpu, inst);
1490 break;
1491 case EVENT_MOV_TO_AR:
1492 status=vmx_emul_mov_to_ar_reg(vcpu, inst);
1493 break;
1494 case EVENT_MOV_FROM_AR:
1495 status=vmx_emul_mov_from_ar_reg(vcpu, inst);
1496 break;
1497 case EVENT_MOV_TO_DBR:
1498 status=vmx_emul_mov_to_dbr(vcpu, inst);
1499 break;
1500 case EVENT_MOV_TO_IBR:
1501 status=vmx_emul_mov_to_ibr(vcpu, inst);
1502 break;
1503 case EVENT_MOV_TO_PMC:
1504 status=vmx_emul_mov_to_pmc(vcpu, inst);
1505 break;
1506 case EVENT_MOV_TO_PMD:
1507 status=vmx_emul_mov_to_pmd(vcpu, inst);
1508 break;
1509 case EVENT_MOV_TO_PKR:
1510 status=vmx_emul_mov_to_pkr(vcpu, inst);
1511 break;
1512 case EVENT_MOV_FROM_DBR:
1513 status=vmx_emul_mov_from_dbr(vcpu, inst);
1514 break;
1515 case EVENT_MOV_FROM_IBR:
1516 status=vmx_emul_mov_from_ibr(vcpu, inst);
1517 break;
1518 case EVENT_MOV_FROM_PMC:
1519 status=vmx_emul_mov_from_pmc(vcpu, inst);
1520 break;
1521 case EVENT_MOV_FROM_PKR:
1522 status=vmx_emul_mov_from_pkr(vcpu, inst);
1523 break;
1524 case EVENT_MOV_FROM_CPUID:
1525 status=vmx_emul_mov_from_cpuid(vcpu, inst);
1526 break;
1527 case EVENT_VMSW:
1528 printf ("Unimplemented instruction %ld\n", cause);
1529 status=IA64_FAULT;
1530 break;
1531 default:
1532 panic_domain(regs,"unknown cause %ld, iip: %lx, ipsr: %lx\n", cause,regs->cr_iip,regs->cr_ipsr);
1533 break;
1534 };
1536 #if 0
1537 if (status == IA64_FAULT)
1538 panic("Emulation failed with cause %d:\n", cause);
1539 #endif
1541 if ( status == IA64_NO_FAULT && cause !=EVENT_RFI ) {
1542 vmx_vcpu_increment_iip(vcpu);
1545 recover_if_physical_mode(vcpu);
1546 return;