ia64/xen-unstable

view xen/arch/ia64/vmx/vmx_virt.c @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents 8c921adf4833
children
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_virt.c:
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Fred yang (fred.yang@intel.com)
20 * Shaofan Li (Susue Li) <susie.li@intel.com>
21 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
22 */
23 #include <asm/bundle.h>
24 #include <asm/vmx_vcpu.h>
25 #include <asm/processor.h>
26 #include <asm/delay.h> // Debug only
27 #include <asm/vmmu.h>
28 #include <asm/vmx_mm_def.h>
29 #include <asm/smp.h>
30 #include <asm/vmx.h>
31 #include <asm/virt_event.h>
32 #include <asm/vmx_phy_mode.h>
33 #include <asm/debugger.h>
35 #ifdef BYPASS_VMAL_OPCODE
36 static void
37 ia64_priv_decoder(IA64_SLOT_TYPE slot_type, INST64 inst, u64 * cause)
38 {
39 *cause=0;
40 switch (slot_type) {
41 case M:
42 if (inst.generic.major==0){
43 if(inst.M28.x3==0){
44 if(inst.M44.x4==6){
45 *cause=EVENT_SSM;
46 }else if(inst.M44.x4==7){
47 *cause=EVENT_RSM;
48 }else if(inst.M30.x4==8&&inst.M30.x2==2){
49 *cause=EVENT_MOV_TO_AR_IMM;
50 }
51 }
52 }
53 else if(inst.generic.major==1){
54 if(inst.M28.x3==0){
55 if(inst.M32.x6==0x2c){
56 *cause=EVENT_MOV_TO_CR;
57 }else if(inst.M33.x6==0x24){
58 *cause=EVENT_MOV_FROM_CR;
59 }else if(inst.M35.x6==0x2d){
60 *cause=EVENT_MOV_TO_PSR;
61 }else if(inst.M36.x6==0x25){
62 *cause=EVENT_MOV_FROM_PSR;
63 }else if(inst.M29.x6==0x2A){
64 *cause=EVENT_MOV_TO_AR;
65 }else if(inst.M31.x6==0x22){
66 *cause=EVENT_MOV_FROM_AR;
67 }else if(inst.M45.x6==0x09){
68 *cause=EVENT_PTC_L;
69 }else if(inst.M45.x6==0x0A){
70 *cause=EVENT_PTC_G;
71 }else if(inst.M45.x6==0x0B){
72 *cause=EVENT_PTC_GA;
73 }else if(inst.M45.x6==0x0C){
74 *cause=EVENT_PTR_D;
75 }else if(inst.M45.x6==0x0D){
76 *cause=EVENT_PTR_I;
77 }else if(inst.M46.x6==0x1A){
78 *cause=EVENT_THASH;
79 }else if(inst.M46.x6==0x1B){
80 *cause=EVENT_TTAG;
81 }else if(inst.M46.x6==0x1E){
82 *cause=EVENT_TPA;
83 }else if(inst.M46.x6==0x1F){
84 *cause=EVENT_TAK;
85 }else if(inst.M47.x6==0x34){
86 *cause=EVENT_PTC_E;
87 }else if(inst.M41.x6==0x2E){
88 *cause=EVENT_ITC_D;
89 }else if(inst.M41.x6==0x2F){
90 *cause=EVENT_ITC_I;
91 }else if(inst.M42.x6==0x00){
92 *cause=EVENT_MOV_TO_RR;
93 }else if(inst.M42.x6==0x01){
94 *cause=EVENT_MOV_TO_DBR;
95 }else if(inst.M42.x6==0x02){
96 *cause=EVENT_MOV_TO_IBR;
97 }else if(inst.M42.x6==0x03){
98 *cause=EVENT_MOV_TO_PKR;
99 }else if(inst.M42.x6==0x04){
100 *cause=EVENT_MOV_TO_PMC;
101 }else if(inst.M42.x6==0x05){
102 *cause=EVENT_MOV_TO_PMD;
103 }else if(inst.M42.x6==0x0E){
104 *cause=EVENT_ITR_D;
105 }else if(inst.M42.x6==0x0F){
106 *cause=EVENT_ITR_I;
107 }else if(inst.M43.x6==0x10){
108 *cause=EVENT_MOV_FROM_RR;
109 }else if(inst.M43.x6==0x11){
110 *cause=EVENT_MOV_FROM_DBR;
111 }else if(inst.M43.x6==0x12){
112 *cause=EVENT_MOV_FROM_IBR;
113 }else if(inst.M43.x6==0x13){
114 *cause=EVENT_MOV_FROM_PKR;
115 }else if(inst.M43.x6==0x14){
116 *cause=EVENT_MOV_FROM_PMC;
117 /*
118 }else if(inst.M43.x6==0x15){
119 *cause=EVENT_MOV_FROM_PMD;
120 */
121 }else if(inst.M43.x6==0x17){
122 *cause=EVENT_MOV_FROM_CPUID;
123 }
124 }
125 }
126 break;
127 case B:
128 if(inst.generic.major==0){
129 if(inst.B8.x6==0x02){
130 *cause=EVENT_COVER;
131 }else if(inst.B8.x6==0x08){
132 *cause=EVENT_RFI;
133 }else if(inst.B8.x6==0x0c){
134 *cause=EVENT_BSW_0;
135 }else if(inst.B8.x6==0x0d){
136 *cause=EVENT_BSW_1;
137 }
138 }
139 case I:
140 case F:
141 case L:
142 case ILLEGAL:
143 break;
144 }
145 }
146 #endif
148 static IA64FAULT vmx_emul_rsm(VCPU *vcpu, INST64 inst)
149 {
150 u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm;
151 return vmx_vcpu_reset_psr_sm(vcpu,imm24);
152 }
154 static IA64FAULT vmx_emul_ssm(VCPU *vcpu, INST64 inst)
155 {
156 u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm;
157 return vmx_vcpu_set_psr_sm(vcpu,imm24);
158 }
160 static IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst)
161 {
162 u64 tgt = inst.M33.r1;
163 u64 val;
165 /*
166 if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
167 return vcpu_set_gr(vcpu, tgt, val);
168 else return fault;
169 */
170 val = vmx_vcpu_get_psr(vcpu);
171 val = (val & MASK(0, 32)) | (val & MASK(35, 2));
172 return vcpu_set_gr(vcpu, tgt, val, 0);
173 }
175 /**
176 * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
177 */
178 static IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst)
179 {
180 u64 val;
182 if (vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
183 panic_domain(vcpu_regs(vcpu),"get_psr nat bit fault\n");
185 return vmx_vcpu_set_psr_l(vcpu, val);
186 }
189 /**************************************************************************
190 Privileged operation emulation routines
191 **************************************************************************/
193 static IA64FAULT vmx_emul_rfi(VCPU *vcpu, INST64 inst)
194 {
195 IA64_PSR vpsr;
196 REGS *regs;
197 #ifdef CHECK_FAULT
198 vpsr.val=vmx_vcpu_get_psr(vcpu);
199 if ( vpsr.cpl != 0) {
200 /* Inject Privileged Operation fault into guest */
201 set_privileged_operation_isr (vcpu, 0);
202 privilege_op (vcpu);
203 return IA64_FAULT;
204 }
205 #endif // CHECK_FAULT
207 if (debugger_event(XEN_IA64_DEBUG_ON_RFI)) {
208 raise_softirq(SCHEDULE_SOFTIRQ);
209 do_softirq();
210 }
212 regs=vcpu_regs(vcpu);
213 vpsr.val=regs->cr_ipsr;
214 if ( vpsr.is == 1 ) {
215 panic_domain(regs,"We do not support IA32 instruction yet");
216 }
218 return vmx_vcpu_rfi(vcpu);
219 }
221 static IA64FAULT vmx_emul_bsw0(VCPU *vcpu, INST64 inst)
222 {
223 #ifdef CHECK_FAULT
224 IA64_PSR vpsr;
225 vpsr.val=vmx_vcpu_get_psr(vcpu);
226 if ( vpsr.cpl != 0) {
227 /* Inject Privileged Operation fault into guest */
228 set_privileged_operation_isr (vcpu, 0);
229 privilege_op (vcpu);
230 return IA64_FAULT;
231 }
232 #endif // CHECK_FAULT
233 return vcpu_bsw0(vcpu);
234 }
236 static IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST64 inst)
237 {
238 #ifdef CHECK_FAULT
239 IA64_PSR vpsr;
240 vpsr.val=vmx_vcpu_get_psr(vcpu);
241 if ( vpsr.cpl != 0) {
242 /* Inject Privileged Operation fault into guest */
243 set_privileged_operation_isr (vcpu, 0);
244 privilege_op (vcpu);
245 return IA64_FAULT;
246 }
247 #endif // CHECK_FAULT
248 return vcpu_bsw1(vcpu);
249 }
251 static IA64FAULT vmx_emul_cover(VCPU *vcpu, INST64 inst)
252 {
253 return vmx_vcpu_cover(vcpu);
254 }
256 static IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst)
257 {
258 u64 r2,r3;
259 #ifdef VMAL_NO_FAULT_CHECK
260 IA64_PSR vpsr;
262 vpsr.val=vmx_vcpu_get_psr(vcpu);
263 if ( vpsr.cpl != 0) {
264 /* Inject Privileged Operation fault into guest */
265 set_privileged_operation_isr (vcpu, 0);
266 privilege_op (vcpu);
267 return IA64_FAULT;
268 }
269 #endif // VMAL_NO_FAULT_CHECK
270 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
271 #ifdef VMAL_NO_FAULT_CHECK
272 ISR isr;
273 set_isr_reg_nat_consumption(vcpu,0,0);
274 rnat_comsumption(vcpu);
275 return IA64_FAULT;
276 #endif // VMAL_NO_FAULT_CHECK
277 }
278 #ifdef VMAL_NO_FAULT_CHECK
279 if (unimplemented_gva(vcpu,r3) ) {
280 unimpl_daddr(vcpu);
281 return IA64_FAULT;
282 }
283 #endif // VMAL_NO_FAULT_CHECK
285 debugger_event(XEN_IA64_DEBUG_ON_TC);
287 return vmx_vcpu_ptc_l(vcpu,r3,bits(r2,2,7));
288 }
290 static IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst)
291 {
292 u64 r3;
293 #ifdef VMAL_NO_FAULT_CHECK
294 IA64_PSR vpsr;
296 vpsr.val=vmx_vcpu_get_psr(vcpu);
297 ISR isr;
298 if ( vpsr.cpl != 0) {
299 /* Inject Privileged Operation fault into guest */
300 set_privileged_operation_isr (vcpu, 0);
301 privilege_op (vcpu);
302 return IA64_FAULT;
303 }
304 #endif // VMAL_NO_FAULT_CHECK
305 if(vcpu_get_gr_nat(vcpu,inst.M47.r3,&r3)){
306 #ifdef VMAL_NO_FAULT_CHECK
307 set_isr_reg_nat_consumption(vcpu,0,0);
308 rnat_comsumption(vcpu);
309 return IA64_FAULT;
310 #endif // VMAL_NO_FAULT_CHECK
311 }
312 return vmx_vcpu_ptc_e(vcpu,r3);
313 }
315 static IA64FAULT vmx_emul_ptc_g(VCPU *vcpu, INST64 inst)
316 {
317 u64 r2,r3;
318 #ifdef VMAL_NO_FAULT_CHECK
319 IA64_PSR vpsr;
320 vpsr.val=vmx_vcpu_get_psr(vcpu);
321 if ( vpsr.cpl != 0) {
322 /* Inject Privileged Operation fault into guest */
323 set_privileged_operation_isr (vcpu, 0);
324 privilege_op (vcpu);
325 return IA64_FAULT;
326 }
327 #endif // VMAL_NO_FAULT_CHECK
328 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
329 #ifdef VMAL_NO_FAULT_CHECK
330 ISR isr;
331 set_isr_reg_nat_consumption(vcpu,0,0);
332 rnat_comsumption(vcpu);
333 return IA64_FAULT;
334 #endif // VMAL_NO_FAULT_CHECK
335 }
336 #ifdef VMAL_NO_FAULT_CHECK
337 if (unimplemented_gva(vcpu,r3) ) {
338 unimpl_daddr(vcpu);
339 return IA64_FAULT;
340 }
341 #endif // VMAL_NO_FAULT_CHECK
343 debugger_event(XEN_IA64_DEBUG_ON_TC);
345 return vmx_vcpu_ptc_g(vcpu,r3,bits(r2,2,7));
346 }
348 static IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, INST64 inst)
349 {
350 u64 r2,r3;
351 #ifdef VMAL_NO_FAULT_CHECK
352 IA64_PSR vpsr;
353 vpsr.val=vmx_vcpu_get_psr(vcpu);
354 if ( vpsr.cpl != 0) {
355 /* Inject Privileged Operation fault into guest */
356 set_privileged_operation_isr (vcpu, 0);
357 privilege_op (vcpu);
358 return IA64_FAULT;
359 }
360 #endif // VMAL_NO_FAULT_CHECK
361 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
362 #ifdef VMAL_NO_FAULT_CHECK
363 ISR isr;
364 set_isr_reg_nat_consumption(vcpu,0,0);
365 rnat_comsumption(vcpu);
366 return IA64_FAULT;
367 #endif // VMAL_NO_FAULT_CHECK
368 }
369 #ifdef VMAL_NO_FAULT_CHECK
370 if (unimplemented_gva(vcpu,r3) ) {
371 unimpl_daddr(vcpu);
372 return IA64_FAULT;
373 }
374 #endif // VMAL_NO_FAULT_CHECK
376 debugger_event(XEN_IA64_DEBUG_ON_TC);
378 return vmx_vcpu_ptc_ga(vcpu,r3,bits(r2,2,7));
379 }
381 static IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3)
382 {
383 IA64FAULT ret1, ret2;
385 #ifdef VMAL_NO_FAULT_CHECK
386 ISR isr;
387 IA64_PSR vpsr;
388 vpsr.val=vmx_vcpu_get_psr(vcpu);
389 if ( vpsr.cpl != 0) {
390 /* Inject Privileged Operation fault into guest */
391 set_privileged_operation_isr (vcpu, 0);
392 privilege_op (vcpu);
393 return IA64_FAULT;
394 }
395 #endif // VMAL_NO_FAULT_CHECK
396 ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r3,pr3);
397 ret2 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pr2);
398 #ifdef VMAL_NO_FAULT_CHECK
399 if ( ret1 != IA64_NO_FAULT || ret2 != IA64_NO_FAULT ) {
400 set_isr_reg_nat_consumption(vcpu,0,0);
401 rnat_comsumption(vcpu);
402 return IA64_FAULT;
403 }
404 if (unimplemented_gva(vcpu,r3) ) {
405 unimpl_daddr(vcpu);
406 return IA64_FAULT;
407 }
408 #endif // VMAL_NO_FAULT_CHECK
409 return IA64_NO_FAULT;
410 }
412 static IA64FAULT vmx_emul_ptr_d(VCPU *vcpu, INST64 inst)
413 {
414 u64 r2,r3;
415 if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
416 return IA64_FAULT;
418 debugger_event(XEN_IA64_DEBUG_ON_TR);
420 return vmx_vcpu_ptr_d(vcpu,r3,bits(r2,2,7));
421 }
423 static IA64FAULT vmx_emul_ptr_i(VCPU *vcpu, INST64 inst)
424 {
425 u64 r2,r3;
426 if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
427 return IA64_FAULT;
429 debugger_event(XEN_IA64_DEBUG_ON_TR);
431 return vmx_vcpu_ptr_i(vcpu,r3,bits(r2,2,7));
432 }
435 static IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst)
436 {
437 u64 r1,r3;
438 #ifdef CHECK_FAULT
439 ISR visr;
440 IA64_PSR vpsr;
441 if(check_target_register(vcpu, inst.M46.r1)){
442 set_illegal_op_isr(vcpu);
443 illegal_op(vcpu);
444 return IA64_FAULT;
445 }
446 #endif //CHECK_FAULT
447 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
448 #ifdef CHECK_FAULT
449 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
450 return IA64_NO_FAULT;
451 #endif //CHECK_FAULT
452 }
453 #ifdef CHECK_FAULT
454 if(unimplemented_gva(vcpu, r3)){
455 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
456 return IA64_NO_FAULT;
457 }
458 #endif //CHECK_FAULT
459 r1 = vmx_vcpu_thash(vcpu, r3);
460 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
461 return(IA64_NO_FAULT);
462 }
465 static IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst)
466 {
467 u64 r1,r3;
468 #ifdef CHECK_FAULT
469 ISR visr;
470 IA64_PSR vpsr;
471 #endif
472 #ifdef CHECK_FAULT
473 if(check_target_register(vcpu, inst.M46.r1)){
474 set_illegal_op_isr(vcpu);
475 illegal_op(vcpu);
476 return IA64_FAULT;
477 }
478 #endif //CHECK_FAULT
479 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
480 #ifdef CHECK_FAULT
481 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
482 return IA64_NO_FAULT;
483 #endif //CHECK_FAULT
484 }
485 #ifdef CHECK_FAULT
486 if(unimplemented_gva(vcpu, r3)){
487 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
488 return IA64_NO_FAULT;
489 }
490 #endif //CHECK_FAULT
491 r1 = vmx_vcpu_ttag(vcpu, r3);
492 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
493 return(IA64_NO_FAULT);
494 }
497 static IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst)
498 {
499 u64 r1,r3;
500 #ifdef CHECK_FAULT
501 ISR visr;
502 if(check_target_register(vcpu, inst.M46.r1)){
503 set_illegal_op_isr(vcpu);
504 illegal_op(vcpu);
505 return IA64_FAULT;
506 }
507 IA64_PSR vpsr;
508 vpsr.val=vmx_vcpu_get_psr(vcpu);
509 if(vpsr.cpl!=0){
510 visr.val=0;
511 vcpu_set_isr(vcpu, visr.val);
512 return IA64_FAULT;
513 }
514 #endif //CHECK_FAULT
515 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
516 #ifdef CHECK_FAULT
517 set_isr_reg_nat_consumption(vcpu,0,1);
518 rnat_comsumption(vcpu);
519 return IA64_FAULT;
520 #endif //CHECK_FAULT
521 }
522 #ifdef CHECK_FAULT
523 if (unimplemented_gva(vcpu,r3) ) {
524 // inject unimplemented_data_address_fault
525 visr.val = set_isr_ei_ni(vcpu);
526 visr.code = IA64_RESERVED_REG_FAULT;
527 vcpu_set_isr(vcpu, isr.val);
528 // FAULT_UNIMPLEMENTED_DATA_ADDRESS.
529 unimpl_daddr(vcpu);
530 return IA64_FAULT;
531 }
532 #endif //CHECK_FAULT
534 if(vmx_vcpu_tpa(vcpu, r3, &r1)){
535 return IA64_FAULT;
536 }
537 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
538 return(IA64_NO_FAULT);
539 }
541 static IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst)
542 {
543 u64 r1,r3;
544 #ifdef CHECK_FAULT
545 ISR visr;
546 IA64_PSR vpsr;
547 int fault=IA64_NO_FAULT;
548 visr.val=0;
549 if(check_target_register(vcpu, inst.M46.r1)){
550 set_illegal_op_isr(vcpu);
551 illegal_op(vcpu);
552 return IA64_FAULT;
553 }
554 vpsr.val=vmx_vcpu_get_psr(vcpu);
555 if(vpsr.cpl!=0){
556 vcpu_set_isr(vcpu, visr.val);
557 return IA64_FAULT;
558 }
559 #endif
560 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
561 #ifdef CHECK_FAULT
562 set_isr_reg_nat_consumption(vcpu,0,1);
563 rnat_comsumption(vcpu);
564 return IA64_FAULT;
565 #endif
566 }
567 r1 = vmx_vcpu_tak(vcpu, r3);
568 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
569 return(IA64_NO_FAULT);
570 }
573 /************************************
574 * Insert translation register/cache
575 ************************************/
577 static IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
578 {
579 u64 itir, ifa, pte, slot;
580 ISR isr;
582 #ifdef VMAL_NO_FAULT_CHECK
583 IA64_PSR vpsr;
585 vpsr.val = vmx_vcpu_get_psr(vcpu);
586 if (vpsr.ic) {
587 set_illegal_op_isr(vcpu);
588 illegal_op(vcpu);
589 return IA64_FAULT;
590 }
591 if (vpsr.cpl != 0) {
592 /* Inject Privileged Operation fault into guest */
593 set_privileged_operation_isr(vcpu, 0);
594 privilege_op (vcpu);
595 return IA64_FAULT;
596 }
597 #endif // VMAL_NO_FAULT_CHECK
598 if (vcpu_get_gr_nat(vcpu, inst.M45.r3, &slot)
599 || vcpu_get_gr_nat(vcpu, inst.M45.r2, &pte)) {
600 #ifdef VMAL_NO_FAULT_CHECK
601 set_isr_reg_nat_consumption(vcpu, 0, 0);
602 rnat_comsumption(vcpu);
603 return IA64_FAULT;
604 #endif // VMAL_NO_FAULT_CHECK
605 }
606 #ifdef VMAL_NO_FAULT_CHECK
607 if (is_reserved_rr_register(vcpu, slot)) {
608 set_illegal_op_isr(vcpu);
609 illegal_op(vcpu);
610 return IA64_FAULT;
611 }
612 #endif // VMAL_NO_FAULT_CHECK
614 if (vcpu_get_itir(vcpu ,&itir)) {
615 return(IA64_FAULT);
616 }
617 if (vcpu_get_ifa(vcpu, &ifa)) {
618 return(IA64_FAULT);
619 }
620 #ifdef VMAL_NO_FAULT_CHECK
621 if (is_reserved_itir_field(vcpu, itir)) {
622 // TODO
623 return IA64_FAULT;
624 }
625 if (unimplemented_gva(vcpu, ifa)) {
626 unimpl_daddr(vcpu);
627 return IA64_FAULT;
628 }
629 #endif // VMAL_NO_FAULT_CHECK
631 if (slot >= NDTRS) {
632 isr.val = set_isr_ei_ni(vcpu);
633 isr.code = IA64_RESERVED_REG_FAULT;
634 vcpu_set_isr(vcpu, isr.val);
635 rsv_reg_field(vcpu);
636 return IA64_FAULT;
637 }
639 debugger_event(XEN_IA64_DEBUG_ON_TR);
641 return (vmx_vcpu_itr_d(vcpu, slot, pte, itir, ifa));
642 }
644 static IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
645 {
646 u64 itir, ifa, pte, slot;
647 ISR isr;
648 #ifdef VMAL_NO_FAULT_CHECK
649 IA64_PSR vpsr;
650 vpsr.val = vmx_vcpu_get_psr(vcpu);
651 if (vpsr.ic) {
652 set_illegal_op_isr(vcpu);
653 illegal_op(vcpu);
654 return IA64_FAULT;
655 }
656 if (vpsr.cpl != 0) {
657 /* Inject Privileged Operation fault into guest */
658 set_privileged_operation_isr(vcpu, 0);
659 privilege_op(vcpu);
660 return IA64_FAULT;
661 }
662 #endif // VMAL_NO_FAULT_CHECK
663 if (vcpu_get_gr_nat(vcpu, inst.M45.r3, &slot)
664 || vcpu_get_gr_nat(vcpu, inst.M45.r2, &pte)) {
665 #ifdef VMAL_NO_FAULT_CHECK
666 set_isr_reg_nat_consumption(vcpu, 0, 0);
667 rnat_comsumption(vcpu);
668 return IA64_FAULT;
669 #endif // VMAL_NO_FAULT_CHECK
670 }
671 #ifdef VMAL_NO_FAULT_CHECK
672 if (is_reserved_rr_register(vcpu, slot)) {
673 set_illegal_op_isr(vcpu);
674 illegal_op(vcpu);
675 return IA64_FAULT;
676 }
677 #endif // VMAL_NO_FAULT_CHECK
679 if (vcpu_get_itir(vcpu, &itir)) {
680 return IA64_FAULT;
681 }
682 if (vcpu_get_ifa(vcpu, &ifa)) {
683 return IA64_FAULT;
684 }
685 #ifdef VMAL_NO_FAULT_CHECK
686 if (is_reserved_itir_field(vcpu, itir)) {
687 // TODO
688 return IA64_FAULT;
689 }
690 if (unimplemented_gva(vcpu, ifa)) {
691 unimpl_daddr(vcpu);
692 return IA64_FAULT;
693 }
694 #endif // VMAL_NO_FAULT_CHECK
696 if (slot >= NITRS) {
697 isr.val = set_isr_ei_ni(vcpu);
698 isr.code = IA64_RESERVED_REG_FAULT;
699 vcpu_set_isr(vcpu, isr.val);
700 rsv_reg_field(vcpu);
701 return IA64_FAULT;
702 }
704 debugger_event(XEN_IA64_DEBUG_ON_TR);
706 return vmx_vcpu_itr_i(vcpu, slot, pte, itir, ifa);
707 }
709 static IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst,
710 u64 *itir, u64 *ifa, u64 *pte)
711 {
712 IA64FAULT ret1;
714 #ifdef VMAL_NO_FAULT_CHECK
715 IA64_PSR vpsr;
716 vpsr.val = vmx_vcpu_get_psr(vcpu);
717 if (vpsr.ic) {
718 set_illegal_op_isr(vcpu);
719 illegal_op(vcpu);
720 return IA64_FAULT;
721 }
723 u64 fault;
724 ISR isr;
725 if (vpsr.cpl != 0) {
726 /* Inject Privileged Operation fault into guest */
727 set_privileged_operation_isr(vcpu, 0);
728 privilege_op(vcpu);
729 return IA64_FAULT;
730 }
731 #endif // VMAL_NO_FAULT_CHECK
732 ret1 = vcpu_get_gr_nat(vcpu, inst.M45.r2,pte);
733 #ifdef VMAL_NO_FAULT_CHECK
734 if (ret1 != IA64_NO_FAULT) {
735 set_isr_reg_nat_consumption(vcpu, 0, 0);
736 rnat_comsumption(vcpu);
737 return IA64_FAULT;
738 }
739 #endif // VMAL_NO_FAULT_CHECK
741 if (vcpu_get_itir(vcpu, itir)) {
742 return IA64_FAULT;
743 }
744 if (vcpu_get_ifa(vcpu, ifa)) {
745 return IA64_FAULT;
746 }
747 #ifdef VMAL_NO_FAULT_CHECK
748 if (unimplemented_gva(vcpu,ifa) ) {
749 unimpl_daddr(vcpu);
750 return IA64_FAULT;
751 }
752 #endif // VMAL_NO_FAULT_CHECK
753 return IA64_NO_FAULT;
754 }
756 static IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INST64 inst)
757 {
758 u64 itir, ifa, pte;
760 if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) {
761 return IA64_FAULT;
762 }
764 debugger_event(XEN_IA64_DEBUG_ON_TC);
766 return vmx_vcpu_itc_d(vcpu, pte, itir, ifa);
767 }
769 static IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INST64 inst)
770 {
771 u64 itir, ifa, pte;
773 if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) {
774 return IA64_FAULT;
775 }
777 debugger_event(XEN_IA64_DEBUG_ON_TC);
779 return vmx_vcpu_itc_i(vcpu, pte, itir, ifa);
780 }
782 /*************************************
783 * Moves to semi-privileged registers
784 *************************************/
786 static IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
787 {
788 // I27 and M30 are identical for these fields
789 u64 imm;
791 if(inst.M30.ar3!=44){
792 panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc");
793 }
794 #ifdef CHECK_FAULT
795 IA64_PSR vpsr;
796 vpsr.val=vmx_vcpu_get_psr(vcpu);
797 if ( vpsr.cpl != 0) {
798 /* Inject Privileged Operation fault into guest */
799 set_privileged_operation_isr (vcpu, 0);
800 privilege_op (vcpu);
801 return IA64_FAULT;
802 }
803 #endif // CHECK_FAULT
804 if(inst.M30.s){
805 imm = -inst.M30.imm;
806 }else{
807 imm = inst.M30.imm;
808 }
809 return (vmx_vcpu_set_itc(vcpu, imm));
810 }
812 static IA64FAULT vmx_emul_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
813 {
814 // I26 and M29 are identical for these fields
815 u64 r2;
816 if(inst.M29.ar3!=44){
817 panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc");
818 }
819 if(vcpu_get_gr_nat(vcpu,inst.M29.r2,&r2)){
820 #ifdef CHECK_FAULT
821 set_isr_reg_nat_consumption(vcpu,0,0);
822 rnat_comsumption(vcpu);
823 return IA64_FAULT;
824 #endif //CHECK_FAULT
825 }
826 #ifdef CHECK_FAULT
827 IA64_PSR vpsr;
828 vpsr.val=vmx_vcpu_get_psr(vcpu);
829 if ( vpsr.cpl != 0) {
830 /* Inject Privileged Operation fault into guest */
831 set_privileged_operation_isr (vcpu, 0);
832 privilege_op (vcpu);
833 return IA64_FAULT;
834 }
835 #endif // CHECK_FAULT
836 return (vmx_vcpu_set_itc(vcpu, r2));
837 }
840 static IA64FAULT vmx_emul_mov_from_ar_reg(VCPU *vcpu, INST64 inst)
841 {
842 // I27 and M30 are identical for these fields
843 u64 r1;
844 if(inst.M31.ar3!=44){
845 panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc");
846 }
847 #ifdef CHECK_FAULT
848 if(check_target_register(vcpu,inst.M31.r1)){
849 set_illegal_op_isr(vcpu);
850 illegal_op(vcpu);
851 return IA64_FAULT;
852 }
853 IA64_PSR vpsr;
854 vpsr.val=vmx_vcpu_get_psr(vcpu);
855 if (vpsr.si&& vpsr.cpl != 0) {
856 /* Inject Privileged Operation fault into guest */
857 set_privileged_operation_isr (vcpu, 0);
858 privilege_op (vcpu);
859 return IA64_FAULT;
860 }
861 #endif // CHECK_FAULT
862 r1 = vmx_vcpu_get_itc(vcpu);
863 vcpu_set_gr(vcpu,inst.M31.r1,r1,0);
864 return IA64_NO_FAULT;
865 }
868 /********************************
869 * Moves to privileged registers
870 ********************************/
872 static IA64FAULT vmx_emul_mov_to_pkr(VCPU *vcpu, INST64 inst)
873 {
874 u64 r3,r2;
875 #ifdef CHECK_FAULT
876 IA64_PSR vpsr;
877 vpsr.val=vmx_vcpu_get_psr(vcpu);
878 if (vpsr.cpl != 0) {
879 /* Inject Privileged Operation fault into guest */
880 set_privileged_operation_isr (vcpu, 0);
881 privilege_op (vcpu);
882 return IA64_FAULT;
883 }
884 #endif // CHECK_FAULT
885 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
886 #ifdef CHECK_FAULT
887 set_isr_reg_nat_consumption(vcpu,0,0);
888 rnat_comsumption(vcpu);
889 return IA64_FAULT;
890 #endif //CHECK_FAULT
891 }
892 return (vmx_vcpu_set_pkr(vcpu,r3,r2));
893 }
895 static IA64FAULT vmx_emul_mov_to_rr(VCPU *vcpu, INST64 inst)
896 {
897 u64 r3,r2;
898 #ifdef CHECK_FAULT
899 IA64_PSR vpsr;
900 vpsr.val=vmx_vcpu_get_psr(vcpu);
901 if (vpsr.cpl != 0) {
902 /* Inject Privileged Operation fault into guest */
903 set_privileged_operation_isr (vcpu, 0);
904 privilege_op (vcpu);
905 return IA64_FAULT;
906 }
907 #endif // CHECK_FAULT
908 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
909 #ifdef CHECK_FAULT
910 set_isr_reg_nat_consumption(vcpu,0,0);
911 rnat_comsumption(vcpu);
912 return IA64_FAULT;
913 #endif //CHECK_FAULT
914 }
915 return (vmx_vcpu_set_rr(vcpu,r3,r2));
916 }
918 static IA64FAULT vmx_emul_mov_to_dbr(VCPU *vcpu, INST64 inst)
919 {
920 u64 r3,r2;
921 #ifdef CHECK_FAULT
922 IA64_PSR vpsr;
923 vpsr.val=vmx_vcpu_get_psr(vcpu);
924 if (vpsr.cpl != 0) {
925 /* Inject Privileged Operation fault into guest */
926 set_privileged_operation_isr (vcpu, 0);
927 privilege_op (vcpu);
928 return IA64_FAULT;
929 }
930 #endif // CHECK_FAULT
931 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
932 #ifdef CHECK_FAULT
933 set_isr_reg_nat_consumption(vcpu,0,0);
934 rnat_comsumption(vcpu);
935 return IA64_FAULT;
936 #endif //CHECK_FAULT
937 }
938 return (vmx_vcpu_set_dbr(vcpu,r3,r2));
939 }
941 static IA64FAULT vmx_emul_mov_to_ibr(VCPU *vcpu, INST64 inst)
942 {
943 u64 r3,r2;
944 #ifdef CHECK_FAULT
945 IA64_PSR vpsr;
946 vpsr.val=vmx_vcpu_get_psr(vcpu);
947 if (vpsr.cpl != 0) {
948 /* Inject Privileged Operation fault into guest */
949 set_privileged_operation_isr (vcpu, 0);
950 privilege_op (vcpu);
951 return IA64_FAULT;
952 }
953 #endif // CHECK_FAULT
954 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
955 #ifdef CHECK_FAULT
956 set_isr_reg_nat_consumption(vcpu,0,0);
957 rnat_comsumption(vcpu);
958 return IA64_FAULT;
959 #endif //CHECK_FAULT
960 }
961 return vmx_vcpu_set_ibr(vcpu,r3,r2);
962 }
964 static IA64FAULT vmx_emul_mov_to_pmc(VCPU *vcpu, INST64 inst)
965 {
966 u64 r3,r2;
967 #ifdef CHECK_FAULT
968 IA64_PSR vpsr;
969 vpsr.val=vmx_vcpu_get_psr(vcpu);
970 if (vpsr.cpl != 0) {
971 /* Inject Privileged Operation fault into guest */
972 set_privileged_operation_isr (vcpu, 0);
973 privilege_op (vcpu);
974 return IA64_FAULT;
975 }
976 #endif // CHECK_FAULT
977 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
978 #ifdef CHECK_FAULT
979 set_isr_reg_nat_consumption(vcpu,0,0);
980 rnat_comsumption(vcpu);
981 return IA64_FAULT;
982 #endif //CHECK_FAULT
983 }
984 return (vmx_vcpu_set_pmc(vcpu,r3,r2));
985 }
987 static IA64FAULT vmx_emul_mov_to_pmd(VCPU *vcpu, INST64 inst)
988 {
989 u64 r3,r2;
990 #ifdef CHECK_FAULT
991 IA64_PSR vpsr;
992 vpsr.val=vmx_vcpu_get_psr(vcpu);
993 if (vpsr.cpl != 0) {
994 /* Inject Privileged Operation fault into guest */
995 set_privileged_operation_isr (vcpu, 0);
996 privilege_op (vcpu);
997 return IA64_FAULT;
998 }
999 #endif // CHECK_FAULT
1000 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
1001 #ifdef CHECK_FAULT
1002 set_isr_reg_nat_consumption(vcpu,0,0);
1003 rnat_comsumption(vcpu);
1004 return IA64_FAULT;
1005 #endif //CHECK_FAULT
1007 return (vmx_vcpu_set_pmd(vcpu,r3,r2));
1011 /**********************************
1012 * Moves from privileged registers
1013 **********************************/
1015 static IA64FAULT vmx_emul_mov_from_rr(VCPU *vcpu, INST64 inst)
1017 u64 r3,r1;
1018 #ifdef CHECK_FAULT
1019 if(check_target_register(vcpu, inst.M43.r1)){
1020 set_illegal_op_isr(vcpu);
1021 illegal_op(vcpu);
1022 return IA64_FAULT;
1024 IA64_PSR vpsr;
1025 vpsr.val=vmx_vcpu_get_psr(vcpu);
1026 if (vpsr.cpl != 0) {
1027 /* Inject Privileged Operation fault into guest */
1028 set_privileged_operation_isr (vcpu, 0);
1029 privilege_op (vcpu);
1030 return IA64_FAULT;
1033 #endif //CHECK_FAULT
1034 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1035 #ifdef CHECK_FAULT
1036 set_isr_reg_nat_consumption(vcpu,0,0);
1037 rnat_comsumption(vcpu);
1038 return IA64_FAULT;
1039 #endif //CHECK_FAULT
1041 #ifdef CHECK_FAULT
1042 if(is_reserved_rr_register(vcpu,r3>>VRN_SHIFT)){
1043 set_rsv_reg_field_isr(vcpu);
1044 rsv_reg_field(vcpu);
1046 #endif //CHECK_FAULT
1047 vcpu_get_rr(vcpu,r3,&r1);
1048 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1051 static IA64FAULT vmx_emul_mov_from_pkr(VCPU *vcpu, INST64 inst)
1053 u64 r3,r1;
1054 #ifdef CHECK_FAULT
1055 if(check_target_register(vcpu, inst.M43.r1)){
1056 set_illegal_op_isr(vcpu);
1057 illegal_op(vcpu);
1058 return IA64_FAULT;
1060 IA64_PSR vpsr;
1061 vpsr.val=vmx_vcpu_get_psr(vcpu);
1062 if (vpsr.cpl != 0) {
1063 /* Inject Privileged Operation fault into guest */
1064 set_privileged_operation_isr (vcpu, 0);
1065 privilege_op (vcpu);
1066 return IA64_FAULT;
1069 #endif //CHECK_FAULT
1070 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1071 #ifdef CHECK_FAULT
1072 set_isr_reg_nat_consumption(vcpu,0,0);
1073 rnat_comsumption(vcpu);
1074 return IA64_FAULT;
1075 #endif //CHECK_FAULT
1077 #ifdef CHECK_FAULT
1078 if(is_reserved_indirect_register(vcpu,r3)){
1079 set_rsv_reg_field_isr(vcpu);
1080 rsv_reg_field(vcpu);
1081 return IA64_FAULT;
1083 #endif //CHECK_FAULT
1084 r1 = vmx_vcpu_get_pkr(vcpu, r3);
1085 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1088 static IA64FAULT vmx_emul_mov_from_dbr(VCPU *vcpu, INST64 inst)
1090 u64 r3,r1;
1091 IA64FAULT res;
1092 #ifdef CHECK_FAULT
1093 if(check_target_register(vcpu, inst.M43.r1)){
1094 set_illegal_op_isr(vcpu);
1095 illegal_op(vcpu);
1096 return IA64_FAULT;
1098 IA64_PSR vpsr;
1099 vpsr.val=vmx_vcpu_get_psr(vcpu);
1100 if (vpsr.cpl != 0) {
1101 /* Inject Privileged Operation fault into guest */
1102 set_privileged_operation_isr (vcpu, 0);
1103 privilege_op (vcpu);
1104 return IA64_FAULT;
1107 #endif //CHECK_FAULT
1108 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1109 #ifdef CHECK_FAULT
1110 set_isr_reg_nat_consumption(vcpu,0,0);
1111 rnat_comsumption(vcpu);
1112 return IA64_FAULT;
1113 #endif //CHECK_FAULT
1115 #ifdef CHECK_FAULT
1116 if(is_reserved_indirect_register(vcpu,r3)){
1117 set_rsv_reg_field_isr(vcpu);
1118 rsv_reg_field(vcpu);
1119 return IA64_FAULT;
1121 #endif //CHECK_FAULT
1122 res = vmx_vcpu_get_dbr(vcpu, r3, &r1);
1123 if (res != IA64_NO_FAULT)
1124 return res;
1125 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1128 static IA64FAULT vmx_emul_mov_from_ibr(VCPU *vcpu, INST64 inst)
1130 u64 r3,r1;
1131 IA64FAULT res;
1132 #ifdef CHECK_FAULT
1133 if(check_target_register(vcpu, inst.M43.r1)){
1134 set_illegal_op_isr(vcpu);
1135 illegal_op(vcpu);
1136 return IA64_FAULT;
1138 IA64_PSR vpsr;
1139 vpsr.val=vmx_vcpu_get_psr(vcpu);
1140 if (vpsr.cpl != 0) {
1141 /* Inject Privileged Operation fault into guest */
1142 set_privileged_operation_isr (vcpu, 0);
1143 privilege_op (vcpu);
1144 return IA64_FAULT;
1147 #endif //CHECK_FAULT
1148 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1149 #ifdef CHECK_FAULT
1150 set_isr_reg_nat_consumption(vcpu,0,0);
1151 rnat_comsumption(vcpu);
1152 return IA64_FAULT;
1153 #endif //CHECK_FAULT
1155 #ifdef CHECK_FAULT
1156 if(is_reserved_indirect_register(vcpu,r3)){
1157 set_rsv_reg_field_isr(vcpu);
1158 rsv_reg_field(vcpu);
1159 return IA64_FAULT;
1161 #endif //CHECK_FAULT
1162 res = vmx_vcpu_get_ibr(vcpu, r3, &r1);
1163 if (res != IA64_NO_FAULT)
1164 return res;
1165 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1168 static IA64FAULT vmx_emul_mov_from_pmc(VCPU *vcpu, INST64 inst)
1170 u64 r3,r1;
1171 #ifdef CHECK_FAULT
1172 if(check_target_register(vcpu, inst.M43.r1)){
1173 set_illegal_op_isr(vcpu);
1174 illegal_op(vcpu);
1175 return IA64_FAULT;
1177 IA64_PSR vpsr;
1178 vpsr.val=vmx_vcpu_get_psr(vcpu);
1179 if (vpsr.cpl != 0) {
1180 /* Inject Privileged Operation fault into guest */
1181 set_privileged_operation_isr (vcpu, 0);
1182 privilege_op (vcpu);
1183 return IA64_FAULT;
1186 #endif //CHECK_FAULT
1187 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1188 #ifdef CHECK_FAULT
1189 set_isr_reg_nat_consumption(vcpu,0,0);
1190 rnat_comsumption(vcpu);
1191 return IA64_FAULT;
1192 #endif //CHECK_FAULT
1194 #ifdef CHECK_FAULT
1195 if(is_reserved_indirect_register(vcpu,r3)){
1196 set_rsv_reg_field_isr(vcpu);
1197 rsv_reg_field(vcpu);
1198 return IA64_FAULT;
1200 #endif //CHECK_FAULT
1201 r1 = vmx_vcpu_get_pmc(vcpu, r3);
1202 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1205 static IA64FAULT vmx_emul_mov_from_cpuid(VCPU *vcpu, INST64 inst)
1207 u64 r3,r1;
1208 #ifdef CHECK_FAULT
1209 if(check_target_register(vcpu, inst.M43.r1)){
1210 set_illegal_op_isr(vcpu);
1211 illegal_op(vcpu);
1212 return IA64_FAULT;
1214 #endif //CHECK_FAULT
1215 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
1216 #ifdef CHECK_FAULT
1217 set_isr_reg_nat_consumption(vcpu,0,0);
1218 rnat_comsumption(vcpu);
1219 return IA64_FAULT;
1220 #endif //CHECK_FAULT
1222 #ifdef CHECK_FAULT
1223 if(is_reserved_indirect_register(vcpu,r3)){
1224 set_rsv_reg_field_isr(vcpu);
1225 rsv_reg_field(vcpu);
1226 return IA64_FAULT;
1228 #endif //CHECK_FAULT
1229 r1 = vmx_vcpu_get_cpuid(vcpu, r3);
1230 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
1233 static IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
1235 u64 r2;
1236 extern u64 cr_igfld_mask(int index, u64 value);
1237 #ifdef CHECK_FAULT
1238 IA64_PSR vpsr;
1239 vpsr.val=vmx_vcpu_get_psr(vcpu);
1240 if(is_reserved_cr(inst.M32.cr3)||(vpsr.ic&&is_interruption_control_cr(inst.M32.cr3))){
1241 set_illegal_op_isr(vcpu);
1242 illegal_op(vcpu);
1243 return IA64_FAULT;
1245 if ( vpsr.cpl != 0) {
1246 /* Inject Privileged Operation fault into guest */
1247 set_privileged_operation_isr (vcpu, 0);
1248 privilege_op (vcpu);
1249 return IA64_FAULT;
1251 #endif // CHECK_FAULT
1252 if(vcpu_get_gr_nat(vcpu, inst.M32.r2, &r2)){
1253 #ifdef CHECK_FAULT
1254 set_isr_reg_nat_consumption(vcpu,0,0);
1255 rnat_comsumption(vcpu);
1256 return IA64_FAULT;
1257 #endif //CHECK_FAULT
1259 #ifdef CHECK_FAULT
1260 if ( check_cr_rsv_fields (inst.M32.cr3, r2)) {
1261 /* Inject Reserved Register/Field fault
1262 * into guest */
1263 set_rsv_reg_field_isr (vcpu,0);
1264 rsv_reg_field (vcpu);
1265 return IA64_FAULT;
1267 #endif //CHECK_FAULT
1268 r2 = cr_igfld_mask(inst.M32.cr3,r2);
1269 switch (inst.M32.cr3) {
1270 case 0: return vcpu_set_dcr(vcpu,r2);
1271 case 1: return vmx_vcpu_set_itm(vcpu,r2);
1272 case 2: return vmx_vcpu_set_iva(vcpu,r2);
1273 case 8: return vmx_vcpu_set_pta(vcpu,r2);
1274 case 16:return vcpu_set_ipsr(vcpu,r2);
1275 case 17:return vcpu_set_isr(vcpu,r2);
1276 case 19:return vcpu_set_iip(vcpu,r2);
1277 case 20:return vcpu_set_ifa(vcpu,r2);
1278 case 21:return vcpu_set_itir(vcpu,r2);
1279 case 22:return vcpu_set_iipa(vcpu,r2);
1280 case 23:return vcpu_set_ifs(vcpu,r2);
1281 case 24:return vcpu_set_iim(vcpu,r2);
1282 case 25:return vcpu_set_iha(vcpu,r2);
1283 case 64:printk("SET LID to 0x%lx\n", r2);
1284 return IA64_NO_FAULT;
1285 case 65:return IA64_NO_FAULT;
1286 case 66:return vmx_vcpu_set_tpr(vcpu,r2);
1287 case 67:return vmx_vcpu_set_eoi(vcpu,r2);
1288 case 68:return IA64_NO_FAULT;
1289 case 69:return IA64_NO_FAULT;
1290 case 70:return IA64_NO_FAULT;
1291 case 71:return IA64_NO_FAULT;
1292 case 72:return vmx_vcpu_set_itv(vcpu,r2);
1293 case 73:return vmx_vcpu_set_pmv(vcpu,r2);
1294 case 74:return vmx_vcpu_set_cmcv(vcpu,r2);
1295 case 80:return vmx_vcpu_set_lrr0(vcpu,r2);
1296 case 81:return vmx_vcpu_set_lrr1(vcpu,r2);
1297 default:VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
1298 return IA64_NO_FAULT;
1303 #define cr_get(cr) \
1304 ((fault=vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
1305 vcpu_set_gr(vcpu, tgt, val,0):fault;
1307 //#define cr_get(cr) (vcpu_set_gr(vcpu, tgt, vcpu_get##cr(vcpu), 0)
1309 /*
1310 #define vmx_cr_get(cr) \
1311 ((fault=vmx_vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
1312 vcpu_set_gr(vcpu, tgt, val,0):fault;
1313 */
1315 #define vmx_cr_get(cr) (vcpu_set_gr(vcpu, tgt, vmx_vcpu_get_##cr(vcpu), 0))
1317 static IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst)
1319 u64 tgt = inst.M33.r1;
1320 u64 val;
1321 IA64FAULT fault;
1322 #ifdef CHECK_FAULT
1323 IA64_PSR vpsr;
1324 vpsr.val=vmx_vcpu_get_psr(vcpu);
1325 if(is_reserved_cr(inst.M33.cr3)||is_read_only_cr(inst.M33.cr3||
1326 (vpsr.ic&&is_interruption_control_cr(inst.M33.cr3)))){
1327 set_illegal_op_isr(vcpu);
1328 illegal_op(vcpu);
1329 return IA64_FAULT;
1331 if ( vpsr.cpl != 0) {
1332 /* Inject Privileged Operation fault into guest */
1333 set_privileged_operation_isr (vcpu, 0);
1334 privilege_op (vcpu);
1335 return IA64_FAULT;
1337 #endif // CHECK_FAULT
1339 // from_cr_cnt[inst.M33.cr3]++;
1340 switch (inst.M33.cr3) {
1341 case 0: return cr_get(dcr);
1342 case 1: return vmx_cr_get(itm);
1343 case 2: return vmx_cr_get(iva);
1344 case 8: return vmx_cr_get(pta);
1345 case 16:return cr_get(ipsr);
1346 case 17:return cr_get(isr);
1347 case 19:return cr_get(iip);
1348 case 20:return cr_get(ifa);
1349 case 21:return cr_get(itir);
1350 case 22:return cr_get(iipa);
1351 case 23:return cr_get(ifs);
1352 case 24:return cr_get(iim);
1353 case 25:return cr_get(iha);
1354 case 64:return vmx_cr_get(lid);
1355 case 65:
1356 val = vmx_vcpu_get_ivr(vcpu);
1357 return vcpu_set_gr(vcpu,tgt,val,0);
1358 case 66:return vmx_cr_get(tpr);
1359 case 67:return vcpu_set_gr(vcpu,tgt,0L,0);
1360 case 68:return vmx_cr_get(irr0);
1361 case 69:return vmx_cr_get(irr1);
1362 case 70:return vmx_cr_get(irr2);
1363 case 71:return vmx_cr_get(irr3);
1364 case 72:return vmx_cr_get(itv);
1365 case 73:return vmx_cr_get(pmv);
1366 case 74:return vmx_cr_get(cmcv);
1367 case 80:return vmx_cr_get(lrr0);
1368 case 81:return vmx_cr_get(lrr1);
1369 default: return IA64_NO_FAULT;
1374 //#define BYPASS_VMAL_OPCODE
1375 extern IA64_SLOT_TYPE slot_types[0x20][3];
1376 unsigned long
1377 __vmx_get_domain_bundle(u64 iip, IA64_BUNDLE *pbundle)
1379 return fetch_code(current, iip, pbundle);
1382 /** Emulate a privileged operation.
1385 * @param vcpu virtual cpu
1386 * @cause the reason cause virtualization fault
1387 * @opcode the instruction code which cause virtualization fault
1388 */
1390 void
1391 vmx_emulate(VCPU *vcpu, REGS *regs)
1393 IA64FAULT status;
1394 INST64 inst;
1395 u64 iip, cause, opcode;
1396 iip = regs->cr_iip;
1397 cause = VMX(vcpu,cause);
1398 opcode = VMX(vcpu,opcode);
1400 #ifdef VTLB_DEBUG
1401 check_vtlb_sanity(vmx_vcpu_get_vtlb(vcpu));
1402 dump_vtlb(vmx_vcpu_get_vtlb(vcpu));
1403 #endif
1404 #if 0
1405 if ( (cause == 0xff && opcode == 0x1e000000000) || cause == 0 ) {
1406 printk ("VMAL decode error: cause - %lx; op - %lx\n",
1407 cause, opcode );
1408 return;
1410 #endif
1411 #ifdef BYPASS_VMAL_OPCODE
1412 // make a local copy of the bundle containing the privop
1413 IA64_BUNDLE bundle;
1414 int slot;
1415 IA64_SLOT_TYPE slot_type;
1416 IA64_PSR vpsr;
1417 bundle = __vmx_get_domain_bundle(iip);
1418 slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
1419 if (!slot) inst.inst = bundle.slot0;
1420 else if (slot == 1)
1421 inst.inst = bundle.slot1a + (bundle.slot1b<<18);
1422 else if (slot == 2) inst.inst = bundle.slot2;
1423 else printk("priv_handle_op: illegal slot: %d\n", slot);
1424 slot_type = slot_types[bundle.template][slot];
1425 ia64_priv_decoder(slot_type, inst, &cause);
1426 if(cause==0){
1427 panic_domain(regs,"This instruction at 0x%lx slot %d can't be virtualized", iip, slot);
1429 #else
1430 inst.inst=opcode;
1431 #endif /* BYPASS_VMAL_OPCODE */
1433 debugger_event(XEN_IA64_DEBUG_ON_PRIVOP);
1435 /*
1436 * Switch to actual virtual rid in rr0 and rr4,
1437 * which is required by some tlb related instructions.
1438 */
1439 prepare_if_physical_mode(vcpu);
1441 switch(cause) {
1442 case EVENT_RSM:
1443 perfc_incr(vmx_rsm);
1444 status=vmx_emul_rsm(vcpu, inst);
1445 break;
1446 case EVENT_SSM:
1447 perfc_incr(vmx_ssm);
1448 status=vmx_emul_ssm(vcpu, inst);
1449 break;
1450 case EVENT_MOV_TO_PSR:
1451 perfc_incr(vmx_mov_to_psr);
1452 status=vmx_emul_mov_to_psr(vcpu, inst);
1453 break;
1454 case EVENT_MOV_FROM_PSR:
1455 perfc_incr(vmx_mov_from_psr);
1456 status=vmx_emul_mov_from_psr(vcpu, inst);
1457 break;
1458 case EVENT_MOV_FROM_CR:
1459 perfc_incr(vmx_mov_from_cr);
1460 status=vmx_emul_mov_from_cr(vcpu, inst);
1461 break;
1462 case EVENT_MOV_TO_CR:
1463 perfc_incr(vmx_mov_to_cr);
1464 status=vmx_emul_mov_to_cr(vcpu, inst);
1465 break;
1466 case EVENT_BSW_0:
1467 perfc_incr(vmx_bsw0);
1468 status=vmx_emul_bsw0(vcpu, inst);
1469 break;
1470 case EVENT_BSW_1:
1471 perfc_incr(vmx_bsw1);
1472 status=vmx_emul_bsw1(vcpu, inst);
1473 break;
1474 case EVENT_COVER:
1475 perfc_incr(vmx_cover);
1476 status=vmx_emul_cover(vcpu, inst);
1477 break;
1478 case EVENT_RFI:
1479 perfc_incr(vmx_rfi);
1480 status=vmx_emul_rfi(vcpu, inst);
1481 break;
1482 case EVENT_ITR_D:
1483 perfc_incr(vmx_itr_d);
1484 status=vmx_emul_itr_d(vcpu, inst);
1485 break;
1486 case EVENT_ITR_I:
1487 perfc_incr(vmx_itr_i);
1488 status=vmx_emul_itr_i(vcpu, inst);
1489 break;
1490 case EVENT_PTR_D:
1491 perfc_incr(vmx_ptr_d);
1492 status=vmx_emul_ptr_d(vcpu, inst);
1493 break;
1494 case EVENT_PTR_I:
1495 perfc_incr(vmx_ptr_i);
1496 status=vmx_emul_ptr_i(vcpu, inst);
1497 break;
1498 case EVENT_ITC_D:
1499 perfc_incr(vmx_itc_d);
1500 status=vmx_emul_itc_d(vcpu, inst);
1501 break;
1502 case EVENT_ITC_I:
1503 perfc_incr(vmx_itc_i);
1504 status=vmx_emul_itc_i(vcpu, inst);
1505 break;
1506 case EVENT_PTC_L:
1507 perfc_incr(vmx_ptc_l);
1508 status=vmx_emul_ptc_l(vcpu, inst);
1509 break;
1510 case EVENT_PTC_G:
1511 perfc_incr(vmx_ptc_g);
1512 status=vmx_emul_ptc_g(vcpu, inst);
1513 break;
1514 case EVENT_PTC_GA:
1515 perfc_incr(vmx_ptc_ga);
1516 status=vmx_emul_ptc_ga(vcpu, inst);
1517 break;
1518 case EVENT_PTC_E:
1519 perfc_incr(vmx_ptc_e);
1520 status=vmx_emul_ptc_e(vcpu, inst);
1521 break;
1522 case EVENT_MOV_TO_RR:
1523 perfc_incr(vmx_mov_to_rr);
1524 status=vmx_emul_mov_to_rr(vcpu, inst);
1525 break;
1526 case EVENT_MOV_FROM_RR:
1527 perfc_incr(vmx_mov_from_rr);
1528 status=vmx_emul_mov_from_rr(vcpu, inst);
1529 break;
1530 case EVENT_THASH:
1531 perfc_incr(vmx_thash);
1532 status=vmx_emul_thash(vcpu, inst);
1533 break;
1534 case EVENT_TTAG:
1535 perfc_incr(vmx_ttag);
1536 status=vmx_emul_ttag(vcpu, inst);
1537 break;
1538 case EVENT_TPA:
1539 perfc_incr(vmx_tpa);
1540 status=vmx_emul_tpa(vcpu, inst);
1541 break;
1542 case EVENT_TAK:
1543 perfc_incr(vmx_tak);
1544 status=vmx_emul_tak(vcpu, inst);
1545 break;
1546 case EVENT_MOV_TO_AR_IMM:
1547 perfc_incr(vmx_mov_to_ar_imm);
1548 status=vmx_emul_mov_to_ar_imm(vcpu, inst);
1549 break;
1550 case EVENT_MOV_TO_AR:
1551 perfc_incr(vmx_mov_to_ar_reg);
1552 status=vmx_emul_mov_to_ar_reg(vcpu, inst);
1553 break;
1554 case EVENT_MOV_FROM_AR:
1555 perfc_incr(vmx_mov_from_ar_reg);
1556 status=vmx_emul_mov_from_ar_reg(vcpu, inst);
1557 break;
1558 case EVENT_MOV_TO_DBR:
1559 perfc_incr(vmx_mov_to_dbr);
1560 status=vmx_emul_mov_to_dbr(vcpu, inst);
1561 break;
1562 case EVENT_MOV_TO_IBR:
1563 perfc_incr(vmx_mov_to_ibr);
1564 status=vmx_emul_mov_to_ibr(vcpu, inst);
1565 break;
1566 case EVENT_MOV_TO_PMC:
1567 perfc_incr(vmx_mov_to_pmc);
1568 status=vmx_emul_mov_to_pmc(vcpu, inst);
1569 break;
1570 case EVENT_MOV_TO_PMD:
1571 perfc_incr(vmx_mov_to_pmd);
1572 status=vmx_emul_mov_to_pmd(vcpu, inst);
1573 break;
1574 case EVENT_MOV_TO_PKR:
1575 perfc_incr(vmx_mov_to_pkr);
1576 status=vmx_emul_mov_to_pkr(vcpu, inst);
1577 break;
1578 case EVENT_MOV_FROM_DBR:
1579 perfc_incr(vmx_mov_from_dbr);
1580 status=vmx_emul_mov_from_dbr(vcpu, inst);
1581 break;
1582 case EVENT_MOV_FROM_IBR:
1583 perfc_incr(vmx_mov_from_ibr);
1584 status=vmx_emul_mov_from_ibr(vcpu, inst);
1585 break;
1586 case EVENT_MOV_FROM_PMC:
1587 perfc_incr(vmx_mov_from_pmc);
1588 status=vmx_emul_mov_from_pmc(vcpu, inst);
1589 break;
1590 case EVENT_MOV_FROM_PKR:
1591 perfc_incr(vmx_mov_from_pkr);
1592 status=vmx_emul_mov_from_pkr(vcpu, inst);
1593 break;
1594 case EVENT_MOV_FROM_CPUID:
1595 perfc_incr(vmx_mov_from_cpuid);
1596 status=vmx_emul_mov_from_cpuid(vcpu, inst);
1597 break;
1598 case EVENT_VMSW:
1599 printk ("Unimplemented instruction %ld\n", cause);
1600 status=IA64_FAULT;
1601 break;
1602 default:
1603 panic_domain(regs,"unknown cause %ld, iip: %lx, ipsr: %lx\n",
1604 cause,regs->cr_iip,regs->cr_ipsr);
1605 break;
1606 };
1608 #if 0
1609 if (status != IA64_NO_FAULT)
1610 panic("Emulation failed with cause %d:\n", cause);
1611 #endif
1613 switch (status) {
1614 case IA64_RSVDREG_FAULT:
1615 set_rsv_reg_field_isr(vcpu);
1616 rsv_reg_field(vcpu);
1617 break;
1618 case IA64_ILLOP_FAULT:
1619 set_illegal_op_isr(vcpu);
1620 illegal_op(vcpu);
1621 break;
1622 case IA64_FAULT:
1623 /* Registers aleady set. */
1624 break;
1625 case IA64_NO_FAULT:
1626 if ( cause != EVENT_RFI )
1627 vcpu_increment_iip(vcpu);
1628 break;
1632 recover_if_physical_mode(vcpu);
1633 return;