ia64/xen-unstable

annotate xen/arch/ia64/vmx/vmx_virt.c @ 10695:6703fed8870f

[IA64] enable acceleration of external interrupt

This patch is to enable acceleration of externel interrupt
which is described in VTI spec.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author awilliam@xenbuild.aw
date Wed Jul 12 13:20:15 2006 -0600 (2006-07-12)
parents 550786d7d352
children 4834d1e8f26e
rev   line source
djm@6458 1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
djm@6458 2 /*
djm@6458 3 * vmx_virt.c:
djm@6458 4 * Copyright (c) 2005, Intel Corporation.
djm@6458 5 *
djm@6458 6 * This program is free software; you can redistribute it and/or modify it
djm@6458 7 * under the terms and conditions of the GNU General Public License,
djm@6458 8 * version 2, as published by the Free Software Foundation.
djm@6458 9 *
djm@6458 10 * This program is distributed in the hope it will be useful, but WITHOUT
djm@6458 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
djm@6458 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
djm@6458 13 * more details.
djm@6458 14 *
djm@6458 15 * You should have received a copy of the GNU General Public License along with
djm@6458 16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
djm@6458 17 * Place - Suite 330, Boston, MA 02111-1307 USA.
djm@6458 18 *
djm@6458 19 * Fred yang (fred.yang@intel.com)
djm@6458 20 * Shaofan Li (Susue Li) <susie.li@intel.com>
djm@6458 21 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
djm@6458 22 */
awilliam@10561 23 #include <asm/bundle.h>
djm@6458 24 #include <asm/vmx_vcpu.h>
djm@6458 25 #include <asm/processor.h>
djm@6458 26 #include <asm/delay.h> // Debug only
djm@6458 27 #include <asm/vmmu.h>
djm@6458 28 #include <asm/vmx_mm_def.h>
djm@6458 29 #include <asm/smp.h>
awilliam@8916 30 #include <asm/vmx.h>
djm@6458 31 #include <asm/virt_event.h>
awilliam@8916 32 #include <asm/vmx_phy_mode.h>
djm@6458 33
djm@6458 34 void
djm@6458 35 ia64_priv_decoder(IA64_SLOT_TYPE slot_type, INST64 inst, UINT64 * cause)
djm@6458 36 {
djm@6458 37 *cause=0;
djm@6458 38 switch (slot_type) {
djm@6458 39 case M:
djm@6458 40 if (inst.generic.major==0){
djm@6458 41 if(inst.M28.x3==0){
djm@6458 42 if(inst.M44.x4==6){
djm@6458 43 *cause=EVENT_SSM;
djm@6458 44 }else if(inst.M44.x4==7){
djm@6458 45 *cause=EVENT_RSM;
djm@6458 46 }else if(inst.M30.x4==8&&inst.M30.x2==2){
djm@6458 47 *cause=EVENT_MOV_TO_AR_IMM;
djm@6458 48 }
djm@6458 49 }
djm@6458 50 }
djm@6458 51 else if(inst.generic.major==1){
djm@6458 52 if(inst.M28.x3==0){
djm@6458 53 if(inst.M32.x6==0x2c){
djm@6458 54 *cause=EVENT_MOV_TO_CR;
djm@6458 55 }else if(inst.M33.x6==0x24){
djm@6458 56 *cause=EVENT_MOV_FROM_CR;
djm@6458 57 }else if(inst.M35.x6==0x2d){
djm@6458 58 *cause=EVENT_MOV_TO_PSR;
djm@6458 59 }else if(inst.M36.x6==0x25){
djm@6458 60 *cause=EVENT_MOV_FROM_PSR;
djm@6458 61 }else if(inst.M29.x6==0x2A){
djm@6458 62 *cause=EVENT_MOV_TO_AR;
djm@6458 63 }else if(inst.M31.x6==0x22){
djm@6458 64 *cause=EVENT_MOV_FROM_AR;
djm@6458 65 }else if(inst.M45.x6==0x09){
djm@6458 66 *cause=EVENT_PTC_L;
djm@6458 67 }else if(inst.M45.x6==0x0A){
djm@6458 68 *cause=EVENT_PTC_G;
djm@6458 69 }else if(inst.M45.x6==0x0B){
djm@6458 70 *cause=EVENT_PTC_GA;
djm@6458 71 }else if(inst.M45.x6==0x0C){
djm@6458 72 *cause=EVENT_PTR_D;
djm@6458 73 }else if(inst.M45.x6==0x0D){
djm@6458 74 *cause=EVENT_PTR_I;
djm@6458 75 }else if(inst.M46.x6==0x1A){
djm@6458 76 *cause=EVENT_THASH;
djm@6458 77 }else if(inst.M46.x6==0x1B){
djm@6458 78 *cause=EVENT_TTAG;
djm@6458 79 }else if(inst.M46.x6==0x1E){
djm@6458 80 *cause=EVENT_TPA;
djm@6458 81 }else if(inst.M46.x6==0x1F){
djm@6458 82 *cause=EVENT_TAK;
djm@6458 83 }else if(inst.M47.x6==0x34){
djm@6458 84 *cause=EVENT_PTC_E;
djm@6458 85 }else if(inst.M41.x6==0x2E){
djm@6458 86 *cause=EVENT_ITC_D;
djm@6458 87 }else if(inst.M41.x6==0x2F){
djm@6458 88 *cause=EVENT_ITC_I;
djm@6458 89 }else if(inst.M42.x6==0x00){
djm@6458 90 *cause=EVENT_MOV_TO_RR;
djm@6458 91 }else if(inst.M42.x6==0x01){
djm@6458 92 *cause=EVENT_MOV_TO_DBR;
djm@6458 93 }else if(inst.M42.x6==0x02){
djm@6458 94 *cause=EVENT_MOV_TO_IBR;
djm@6458 95 }else if(inst.M42.x6==0x03){
djm@6458 96 *cause=EVENT_MOV_TO_PKR;
djm@6458 97 }else if(inst.M42.x6==0x04){
djm@6458 98 *cause=EVENT_MOV_TO_PMC;
djm@6458 99 }else if(inst.M42.x6==0x05){
djm@6458 100 *cause=EVENT_MOV_TO_PMD;
djm@6458 101 }else if(inst.M42.x6==0x0E){
djm@6458 102 *cause=EVENT_ITR_D;
djm@6458 103 }else if(inst.M42.x6==0x0F){
djm@6458 104 *cause=EVENT_ITR_I;
djm@6458 105 }else if(inst.M43.x6==0x10){
djm@6458 106 *cause=EVENT_MOV_FROM_RR;
djm@6458 107 }else if(inst.M43.x6==0x11){
djm@6458 108 *cause=EVENT_MOV_FROM_DBR;
djm@6458 109 }else if(inst.M43.x6==0x12){
djm@6458 110 *cause=EVENT_MOV_FROM_IBR;
djm@6458 111 }else if(inst.M43.x6==0x13){
djm@6458 112 *cause=EVENT_MOV_FROM_PKR;
djm@6458 113 }else if(inst.M43.x6==0x14){
djm@6458 114 *cause=EVENT_MOV_FROM_PMC;
djm@6458 115 /*
djm@6458 116 }else if(inst.M43.x6==0x15){
djm@6458 117 *cause=EVENT_MOV_FROM_PMD;
djm@6458 118 */
djm@6458 119 }else if(inst.M43.x6==0x17){
djm@6458 120 *cause=EVENT_MOV_FROM_CPUID;
djm@6458 121 }
djm@6458 122 }
djm@6458 123 }
djm@6458 124 break;
djm@6458 125 case B:
djm@6458 126 if(inst.generic.major==0){
djm@6458 127 if(inst.B8.x6==0x02){
djm@6458 128 *cause=EVENT_COVER;
djm@6458 129 }else if(inst.B8.x6==0x08){
djm@6458 130 *cause=EVENT_RFI;
djm@6458 131 }else if(inst.B8.x6==0x0c){
djm@6458 132 *cause=EVENT_BSW_0;
djm@6458 133 }else if(inst.B8.x6==0x0d){
djm@6458 134 *cause=EVENT_BSW_1;
djm@6458 135 }
djm@6458 136 }
awilliam@8916 137 case I:
awilliam@8916 138 case F:
awilliam@8916 139 case L:
awilliam@8916 140 case ILLEGAL:
awilliam@8916 141 break;
djm@6458 142 }
djm@6458 143 }
djm@6458 144
djm@6458 145 IA64FAULT vmx_emul_rsm(VCPU *vcpu, INST64 inst)
djm@6458 146 {
djm@6458 147 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
djm@6458 148 return vmx_vcpu_reset_psr_sm(vcpu,imm24);
djm@6458 149 }
djm@6458 150
djm@6458 151 IA64FAULT vmx_emul_ssm(VCPU *vcpu, INST64 inst)
djm@6458 152 {
djm@6458 153 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
djm@6458 154 return vmx_vcpu_set_psr_sm(vcpu,imm24);
djm@6458 155 }
djm@6458 156
djm@6458 157 unsigned long last_guest_psr = 0x0;
djm@6458 158 IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst)
djm@6458 159 {
djm@6458 160 UINT64 tgt = inst.M33.r1;
djm@6458 161 UINT64 val;
djm@6458 162
djm@6458 163 /*
djm@6458 164 if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
djm@6867 165 return vcpu_set_gr(vcpu, tgt, val);
djm@6458 166 else return fault;
djm@6458 167 */
djm@6458 168 val = vmx_vcpu_get_psr(vcpu);
djm@6458 169 val = (val & MASK(0, 32)) | (val & MASK(35, 2));
djm@6458 170 last_guest_psr = val;
djm@6867 171 return vcpu_set_gr(vcpu, tgt, val, 0);
djm@6458 172 }
djm@6458 173
djm@6458 174 /**
djm@6458 175 * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
djm@6458 176 */
djm@6458 177 IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst)
djm@6458 178 {
djm@6458 179 UINT64 val;
awilliam@9982 180
djm@6867 181 if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
awilliam@9982 182 panic_domain(vcpu_regs(vcpu),"get_psr nat bit fault\n");
djm@6458 183
djm@6801 184 val = (val & MASK(0, 32)) | (VCPU(vcpu, vpsr) & MASK(32, 32));
djm@6458 185 #if 0
djm@6458 186 if (last_mov_from_psr && (last_guest_psr != (val & MASK(0,32))))
djm@6458 187 while(1);
djm@6458 188 else
djm@6458 189 last_mov_from_psr = 0;
djm@6458 190 #endif
djm@6458 191 return vmx_vcpu_set_psr_l(vcpu,val);
djm@6458 192 }
djm@6458 193
djm@6458 194
djm@6458 195 /**************************************************************************
djm@6458 196 Privileged operation emulation routines
djm@6458 197 **************************************************************************/
djm@6458 198
djm@6458 199 IA64FAULT vmx_emul_rfi(VCPU *vcpu, INST64 inst)
djm@6458 200 {
djm@6458 201 IA64_PSR vpsr;
djm@6458 202 REGS *regs;
djm@6458 203 #ifdef CHECK_FAULT
djm@6458 204 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 205 if ( vpsr.cpl != 0) {
djm@6458 206 /* Inject Privileged Operation fault into guest */
djm@6458 207 set_privileged_operation_isr (vcpu, 0);
djm@6458 208 privilege_op (vcpu);
djm@6458 209 return IA64_FAULT;
djm@6458 210 }
djm@6458 211 #endif // CHECK_FAULT
djm@6458 212 regs=vcpu_regs(vcpu);
djm@6458 213 vpsr.val=regs->cr_ipsr;
djm@6458 214 if ( vpsr.is == 1 ) {
awilliam@9982 215 panic_domain(regs,"We do not support IA32 instruction yet");
djm@6458 216 }
djm@6458 217
djm@6458 218 return vmx_vcpu_rfi(vcpu);
djm@6458 219 }
djm@6458 220
djm@6458 221 IA64FAULT vmx_emul_bsw0(VCPU *vcpu, INST64 inst)
djm@6458 222 {
djm@6458 223 #ifdef CHECK_FAULT
djm@6458 224 IA64_PSR vpsr;
djm@6458 225 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 226 if ( vpsr.cpl != 0) {
djm@6458 227 /* Inject Privileged Operation fault into guest */
djm@6458 228 set_privileged_operation_isr (vcpu, 0);
djm@6458 229 privilege_op (vcpu);
djm@6458 230 return IA64_FAULT;
djm@6458 231 }
djm@6458 232 #endif // CHECK_FAULT
djm@6867 233 return vcpu_bsw0(vcpu);
djm@6458 234 }
djm@6458 235
djm@6458 236 IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST64 inst)
djm@6458 237 {
djm@6458 238 #ifdef CHECK_FAULT
djm@6458 239 IA64_PSR vpsr;
djm@6458 240 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 241 if ( vpsr.cpl != 0) {
djm@6458 242 /* Inject Privileged Operation fault into guest */
djm@6458 243 set_privileged_operation_isr (vcpu, 0);
djm@6458 244 privilege_op (vcpu);
djm@6458 245 return IA64_FAULT;
djm@6458 246 }
djm@6458 247 #endif // CHECK_FAULT
djm@6867 248 return vcpu_bsw1(vcpu);
djm@6458 249 }
djm@6458 250
djm@6458 251 IA64FAULT vmx_emul_cover(VCPU *vcpu, INST64 inst)
djm@6458 252 {
djm@6458 253 return vmx_vcpu_cover(vcpu);
djm@6458 254 }
djm@6458 255
djm@6458 256 IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst)
djm@6458 257 {
djm@6458 258 u64 r2,r3;
djm@6458 259 IA64_PSR vpsr;
djm@6458 260
djm@6458 261 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 262 if ( vpsr.cpl != 0) {
djm@6458 263 /* Inject Privileged Operation fault into guest */
djm@6458 264 set_privileged_operation_isr (vcpu, 0);
djm@6458 265 privilege_op (vcpu);
djm@6458 266 return IA64_FAULT;
djm@6458 267 }
djm@6867 268 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
djm@6458 269 #ifdef VMAL_NO_FAULT_CHECK
awilliam@8916 270 ISR isr;
djm@6458 271 set_isr_reg_nat_consumption(vcpu,0,0);
djm@6458 272 rnat_comsumption(vcpu);
djm@6458 273 return IA64_FAULT;
djm@6458 274 #endif // VMAL_NO_FAULT_CHECK
djm@6458 275 }
djm@6458 276 #ifdef VMAL_NO_FAULT_CHECK
djm@6458 277 if (unimplemented_gva(vcpu,r3) ) {
djm@6458 278 isr.val = set_isr_ei_ni(vcpu);
djm@6458 279 isr.code = IA64_RESERVED_REG_FAULT;
djm@6458 280 vcpu_set_isr(vcpu, isr.val);
djm@6458 281 unimpl_daddr(vcpu);
djm@6458 282 return IA64_FAULT;
djm@6458 283 }
djm@6458 284 #endif // VMAL_NO_FAULT_CHECK
djm@6458 285 return vmx_vcpu_ptc_l(vcpu,r3,bits(r2,2,7));
djm@6458 286 }
djm@6458 287
djm@6458 288 IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst)
djm@6458 289 {
djm@6458 290 u64 r3;
djm@6458 291 IA64_PSR vpsr;
djm@6458 292
djm@6458 293 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 294 #ifdef VMAL_NO_FAULT_CHECK
awilliam@8916 295 ISR isr;
djm@6458 296 if ( vpsr.cpl != 0) {
djm@6458 297 /* Inject Privileged Operation fault into guest */
djm@6458 298 set_privileged_operation_isr (vcpu, 0);
djm@6458 299 privilege_op (vcpu);
djm@6458 300 return IA64_FAULT;
djm@6458 301 }
djm@6458 302 #endif // VMAL_NO_FAULT_CHECK
djm@6867 303 if(vcpu_get_gr_nat(vcpu,inst.M47.r3,&r3)){
djm@6458 304 #ifdef VMAL_NO_FAULT_CHECK
djm@6458 305 set_isr_reg_nat_consumption(vcpu,0,0);
djm@6458 306 rnat_comsumption(vcpu);
djm@6458 307 return IA64_FAULT;
djm@6458 308 #endif // VMAL_NO_FAULT_CHECK
djm@6458 309 }
djm@6458 310 return vmx_vcpu_ptc_e(vcpu,r3);
djm@6458 311 }
djm@6458 312
djm@6458 313 IA64FAULT vmx_emul_ptc_g(VCPU *vcpu, INST64 inst)
djm@6458 314 {
awilliam@10258 315 u64 r2,r3;
awilliam@10258 316 #ifdef VMAL_NO_FAULT_CHECK
awilliam@10258 317 IA64_PSR vpsr;
awilliam@10258 318 vpsr.val=vmx_vcpu_get_psr(vcpu);
awilliam@10258 319 if ( vpsr.cpl != 0) {
awilliam@10258 320 /* Inject Privileged Operation fault into guest */
awilliam@10258 321 set_privileged_operation_isr (vcpu, 0);
awilliam@10258 322 privilege_op (vcpu);
awilliam@10258 323 return IA64_FAULT;
awilliam@10258 324 }
awilliam@10258 325 #endif // VMAL_NO_FAULT_CHECK
awilliam@10258 326 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
awilliam@10258 327 #ifdef VMAL_NO_FAULT_CHECK
awilliam@10258 328 ISR isr;
awilliam@10258 329 set_isr_reg_nat_consumption(vcpu,0,0);
awilliam@10258 330 rnat_comsumption(vcpu);
awilliam@10258 331 return IA64_FAULT;
awilliam@10258 332 #endif // VMAL_NO_FAULT_CHECK
awilliam@10258 333 }
awilliam@10258 334 #ifdef VMAL_NO_FAULT_CHECK
awilliam@10258 335 if (unimplemented_gva(vcpu,r3) ) {
awilliam@10258 336 isr.val = set_isr_ei_ni(vcpu);
awilliam@10258 337 isr.code = IA64_RESERVED_REG_FAULT;
awilliam@10258 338 vcpu_set_isr(vcpu, isr.val);
awilliam@10258 339 unimpl_daddr(vcpu);
awilliam@10258 340 return IA64_FAULT;
awilliam@10258 341 }
awilliam@10258 342 #endif // VMAL_NO_FAULT_CHECK
awilliam@10258 343 return vmx_vcpu_ptc_g(vcpu,r3,bits(r2,2,7));
djm@6458 344 }
djm@6458 345
djm@6458 346 IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, INST64 inst)
djm@6458 347 {
awilliam@10258 348 u64 r2,r3;
awilliam@10258 349 #ifdef VMAL_NO_FAULT_CHECK
awilliam@10258 350 IA64_PSR vpsr;
awilliam@10258 351 vpsr.val=vmx_vcpu_get_psr(vcpu);
awilliam@10258 352 if ( vpsr.cpl != 0) {
awilliam@10258 353 /* Inject Privileged Operation fault into guest */
awilliam@10258 354 set_privileged_operation_isr (vcpu, 0);
awilliam@10258 355 privilege_op (vcpu);
awilliam@10258 356 return IA64_FAULT;
awilliam@10258 357 }
awilliam@10258 358 #endif // VMAL_NO_FAULT_CHECK
awilliam@10258 359 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
awilliam@10258 360 #ifdef VMAL_NO_FAULT_CHECK
awilliam@10258 361 ISR isr;
awilliam@10258 362 set_isr_reg_nat_consumption(vcpu,0,0);
awilliam@10258 363 rnat_comsumption(vcpu);
awilliam@10258 364 return IA64_FAULT;
awilliam@10258 365 #endif // VMAL_NO_FAULT_CHECK
awilliam@10258 366 }
awilliam@10258 367 #ifdef VMAL_NO_FAULT_CHECK
awilliam@10258 368 if (unimplemented_gva(vcpu,r3) ) {
awilliam@10258 369 isr.val = set_isr_ei_ni(vcpu);
awilliam@10258 370 isr.code = IA64_RESERVED_REG_FAULT;
awilliam@10258 371 vcpu_set_isr(vcpu, isr.val);
awilliam@10258 372 unimpl_daddr(vcpu);
awilliam@10258 373 return IA64_FAULT;
awilliam@10258 374 }
awilliam@10258 375 #endif // VMAL_NO_FAULT_CHECK
awilliam@10258 376 return vmx_vcpu_ptc_ga(vcpu,r3,bits(r2,2,7));
djm@6458 377 }
djm@6458 378
djm@6458 379 IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3)
djm@6458 380 {
djm@6458 381 IA64FAULT ret1, ret2;
djm@6458 382
djm@6458 383 #ifdef VMAL_NO_FAULT_CHECK
awilliam@8916 384 ISR isr;
djm@6458 385 IA64_PSR vpsr;
djm@6458 386 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 387 if ( vpsr.cpl != 0) {
djm@6458 388 /* Inject Privileged Operation fault into guest */
djm@6458 389 set_privileged_operation_isr (vcpu, 0);
djm@6458 390 privilege_op (vcpu);
djm@6458 391 return IA64_FAULT;
djm@6458 392 }
djm@6458 393 #endif // VMAL_NO_FAULT_CHECK
djm@6867 394 ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r3,pr3);
djm@6867 395 ret2 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pr2);
djm@6458 396 #ifdef VMAL_NO_FAULT_CHECK
djm@6458 397 if ( ret1 != IA64_NO_FAULT || ret2 != IA64_NO_FAULT ) {
djm@6458 398 set_isr_reg_nat_consumption(vcpu,0,0);
djm@6458 399 rnat_comsumption(vcpu);
djm@6458 400 return IA64_FAULT;
djm@6458 401 }
djm@6458 402 if (unimplemented_gva(vcpu,r3) ) {
djm@6458 403 isr.val = set_isr_ei_ni(vcpu);
djm@6458 404 isr.code = IA64_RESERVED_REG_FAULT;
djm@6458 405 vcpu_set_isr(vcpu, isr.val);
djm@6458 406 unimpl_daddr(vcpu);
djm@6458 407 return IA64_FAULT;
djm@6458 408 }
djm@6458 409 #endif // VMAL_NO_FAULT_CHECK
djm@6458 410 return IA64_NO_FAULT;
djm@6458 411 }
djm@6458 412
djm@6458 413 IA64FAULT vmx_emul_ptr_d(VCPU *vcpu, INST64 inst)
djm@6458 414 {
djm@6458 415 u64 r2,r3;
djm@6458 416 if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
djm@6458 417 return IA64_FAULT;
djm@6458 418 return vmx_vcpu_ptr_d(vcpu,r3,bits(r2,2,7));
djm@6458 419 }
djm@6458 420
djm@6458 421 IA64FAULT vmx_emul_ptr_i(VCPU *vcpu, INST64 inst)
djm@6458 422 {
djm@6458 423 u64 r2,r3;
djm@6458 424 if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
djm@6458 425 return IA64_FAULT;
djm@6458 426 return vmx_vcpu_ptr_i(vcpu,r3,bits(r2,2,7));
djm@6458 427 }
djm@6458 428
djm@6458 429
djm@6458 430 IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst)
djm@6458 431 {
djm@6458 432 u64 r1,r3;
awilliam@8916 433 #ifdef CHECK_FAULT
djm@6458 434 ISR visr;
djm@6458 435 IA64_PSR vpsr;
djm@6458 436 if(check_target_register(vcpu, inst.M46.r1)){
djm@6458 437 set_illegal_op_isr(vcpu);
djm@6458 438 illegal_op(vcpu);
djm@6458 439 return IA64_FAULT;
djm@6458 440 }
djm@6458 441 #endif //CHECK_FAULT
djm@6867 442 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
djm@6458 443 #ifdef CHECK_FAULT
djm@6867 444 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
djm@6458 445 return IA64_NO_FAULT;
djm@6458 446 #endif //CHECK_FAULT
djm@6458 447 }
djm@6458 448 #ifdef CHECK_FAULT
djm@6458 449 if(unimplemented_gva(vcpu, r3)){
djm@6867 450 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
djm@6458 451 return IA64_NO_FAULT;
djm@6458 452 }
djm@6458 453 #endif //CHECK_FAULT
djm@6458 454 vmx_vcpu_thash(vcpu, r3, &r1);
djm@6867 455 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
djm@6458 456 return(IA64_NO_FAULT);
djm@6458 457 }
djm@6458 458
djm@6458 459
djm@6458 460 IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst)
djm@6458 461 {
djm@6458 462 u64 r1,r3;
awilliam@8916 463 #ifdef CHECK_FAULT
djm@6458 464 ISR visr;
djm@6458 465 IA64_PSR vpsr;
awilliam@8916 466 #endif
awilliam@8916 467 #ifdef CHECK_FAULT
djm@6458 468 if(check_target_register(vcpu, inst.M46.r1)){
djm@6458 469 set_illegal_op_isr(vcpu);
djm@6458 470 illegal_op(vcpu);
djm@6458 471 return IA64_FAULT;
djm@6458 472 }
djm@6458 473 #endif //CHECK_FAULT
djm@6867 474 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
djm@6458 475 #ifdef CHECK_FAULT
djm@6867 476 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
djm@6458 477 return IA64_NO_FAULT;
djm@6458 478 #endif //CHECK_FAULT
djm@6458 479 }
djm@6458 480 #ifdef CHECK_FAULT
djm@6458 481 if(unimplemented_gva(vcpu, r3)){
djm@6867 482 vcpu_set_gr(vcpu, inst.M46.r1, 0, 1);
djm@6458 483 return IA64_NO_FAULT;
djm@6458 484 }
djm@6458 485 #endif //CHECK_FAULT
djm@6458 486 vmx_vcpu_ttag(vcpu, r3, &r1);
djm@6867 487 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
djm@6458 488 return(IA64_NO_FAULT);
djm@6458 489 }
djm@6458 490
djm@6458 491
djm@6458 492 IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst)
djm@6458 493 {
djm@6458 494 u64 r1,r3;
awilliam@8916 495 #ifdef CHECK_FAULT
djm@6458 496 ISR visr;
djm@6458 497 if(check_target_register(vcpu, inst.M46.r1)){
djm@6458 498 set_illegal_op_isr(vcpu);
djm@6458 499 illegal_op(vcpu);
djm@6458 500 return IA64_FAULT;
djm@6458 501 }
djm@6458 502 IA64_PSR vpsr;
djm@6458 503 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 504 if(vpsr.cpl!=0){
djm@6458 505 visr.val=0;
djm@6458 506 vcpu_set_isr(vcpu, visr.val);
djm@6458 507 return IA64_FAULT;
djm@6458 508 }
djm@6458 509 #endif //CHECK_FAULT
djm@6867 510 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
djm@6458 511 #ifdef CHECK_FAULT
djm@6458 512 set_isr_reg_nat_consumption(vcpu,0,1);
djm@6458 513 rnat_comsumption(vcpu);
djm@6458 514 return IA64_FAULT;
djm@6458 515 #endif //CHECK_FAULT
djm@6458 516 }
djm@6458 517 #ifdef CHECK_FAULT
djm@6458 518 if (unimplemented_gva(vcpu,r3) ) {
djm@6458 519 // inject unimplemented_data_address_fault
djm@6458 520 visr.val = set_isr_ei_ni(vcpu);
djm@6458 521 visr.code = IA64_RESERVED_REG_FAULT;
djm@6458 522 vcpu_set_isr(vcpu, isr.val);
djm@6458 523 // FAULT_UNIMPLEMENTED_DATA_ADDRESS.
djm@6458 524 unimpl_daddr(vcpu);
djm@6458 525 return IA64_FAULT;
djm@6458 526 }
djm@6458 527 #endif //CHECK_FAULT
djm@6458 528
djm@6458 529 if(vmx_vcpu_tpa(vcpu, r3, &r1)){
djm@6458 530 return IA64_FAULT;
djm@6458 531 }
djm@6867 532 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
djm@6458 533 return(IA64_NO_FAULT);
djm@6458 534 }
djm@6458 535
djm@6458 536 IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst)
djm@6458 537 {
djm@6458 538 u64 r1,r3;
awilliam@8916 539 #ifdef CHECK_FAULT
djm@6458 540 ISR visr;
djm@6458 541 IA64_PSR vpsr;
djm@6458 542 int fault=IA64_NO_FAULT;
djm@6458 543 visr.val=0;
djm@6458 544 if(check_target_register(vcpu, inst.M46.r1)){
djm@6458 545 set_illegal_op_isr(vcpu);
djm@6458 546 illegal_op(vcpu);
djm@6458 547 return IA64_FAULT;
djm@6458 548 }
djm@6458 549 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 550 if(vpsr.cpl!=0){
djm@6458 551 vcpu_set_isr(vcpu, visr.val);
djm@6458 552 return IA64_FAULT;
djm@6458 553 }
djm@6458 554 #endif
djm@6867 555 if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){
djm@6458 556 #ifdef CHECK_FAULT
djm@6458 557 set_isr_reg_nat_consumption(vcpu,0,1);
djm@6458 558 rnat_comsumption(vcpu);
djm@6458 559 return IA64_FAULT;
djm@6458 560 #endif
djm@6458 561 }
djm@6458 562 if(vmx_vcpu_tak(vcpu, r3, &r1)){
djm@6458 563 return IA64_FAULT;
djm@6458 564 }
djm@6867 565 vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
djm@6458 566 return(IA64_NO_FAULT);
djm@6458 567 }
djm@6458 568
djm@6458 569
djm@6458 570 /************************************
djm@6458 571 * Insert translation register/cache
djm@6458 572 ************************************/
djm@6458 573
djm@6458 574 IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
djm@6458 575 {
awilliam@8916 576 UINT64 itir, ifa, pte, slot;
djm@6458 577 IA64_PSR vpsr;
djm@6458 578 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 579 if ( vpsr.ic ) {
djm@6458 580 set_illegal_op_isr(vcpu);
djm@6458 581 illegal_op(vcpu);
djm@6458 582 return IA64_FAULT;
djm@6458 583 }
djm@6458 584 #ifdef VMAL_NO_FAULT_CHECK
awilliam@8916 585 ISR isr;
djm@6458 586 if ( vpsr.cpl != 0) {
djm@6458 587 /* Inject Privileged Operation fault into guest */
djm@6458 588 set_privileged_operation_isr (vcpu, 0);
djm@6458 589 privilege_op (vcpu);
djm@6458 590 return IA64_FAULT;
djm@6458 591 }
djm@6458 592 #endif // VMAL_NO_FAULT_CHECK
djm@6867 593 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){
djm@6458 594 #ifdef VMAL_NO_FAULT_CHECK
djm@6458 595 set_isr_reg_nat_consumption(vcpu,0,0);
djm@6458 596 rnat_comsumption(vcpu);
djm@6458 597 return IA64_FAULT;
djm@6458 598 #endif // VMAL_NO_FAULT_CHECK
djm@6458 599 }
djm@6458 600 #ifdef VMAL_NO_FAULT_CHECK
djm@6458 601 if(is_reserved_rr_register(vcpu, slot)){
djm@6458 602 set_illegal_op_isr(vcpu);
djm@6458 603 illegal_op(vcpu);
djm@6458 604 return IA64_FAULT;
djm@6458 605 }
djm@6458 606 #endif // VMAL_NO_FAULT_CHECK
djm@6458 607
djm@6801 608 if (vcpu_get_itir(vcpu,&itir)){
djm@6458 609 return(IA64_FAULT);
djm@6458 610 }
djm@6801 611 if (vcpu_get_ifa(vcpu,&ifa)){
djm@6458 612 return(IA64_FAULT);
djm@6458 613 }
djm@6458 614 #ifdef VMAL_NO_FAULT_CHECK
djm@6458 615 if (is_reserved_itir_field(vcpu, itir)) {
djm@6458 616 // TODO
djm@6458 617 return IA64_FAULT;
djm@6458 618 }
djm@6458 619 if (unimplemented_gva(vcpu,ifa) ) {
djm@6458 620 isr.val = set_isr_ei_ni(vcpu);
djm@6458 621 isr.code = IA64_RESERVED_REG_FAULT;
djm@6458 622 vcpu_set_isr(vcpu, isr.val);
djm@6458 623 unimpl_daddr(vcpu);
djm@6458 624 return IA64_FAULT;
djm@6458 625 }
djm@6458 626 #endif // VMAL_NO_FAULT_CHECK
djm@6458 627
awilliam@9164 628 return (vmx_vcpu_itr_d(vcpu,slot,pte,itir,ifa));
djm@6458 629 }
djm@6458 630
djm@6458 631 IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
djm@6458 632 {
awilliam@8916 633 UINT64 itir, ifa, pte, slot;
awilliam@8916 634 #ifdef VMAL_NO_FAULT_CHECK
djm@6458 635 ISR isr;
awilliam@8916 636 #endif
djm@6458 637 IA64_PSR vpsr;
djm@6458 638 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 639 if ( vpsr.ic ) {
djm@6458 640 set_illegal_op_isr(vcpu);
djm@6458 641 illegal_op(vcpu);
djm@6458 642 return IA64_FAULT;
djm@6458 643 }
djm@6458 644 #ifdef VMAL_NO_FAULT_CHECK
djm@6458 645 if ( vpsr.cpl != 0) {
djm@6458 646 /* Inject Privileged Operation fault into guest */
djm@6458 647 set_privileged_operation_isr (vcpu, 0);
djm@6458 648 privilege_op (vcpu);
djm@6458 649 return IA64_FAULT;
djm@6458 650 }
djm@6458 651 #endif // VMAL_NO_FAULT_CHECK
djm@6867 652 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){
djm@6458 653 #ifdef VMAL_NO_FAULT_CHECK
djm@6458 654 set_isr_reg_nat_consumption(vcpu,0,0);
djm@6458 655 rnat_comsumption(vcpu);
djm@6458 656 return IA64_FAULT;
djm@6458 657 #endif // VMAL_NO_FAULT_CHECK
djm@6458 658 }
djm@6458 659 #ifdef VMAL_NO_FAULT_CHECK
djm@6458 660 if(is_reserved_rr_register(vcpu, slot)){
djm@6458 661 set_illegal_op_isr(vcpu);
djm@6458 662 illegal_op(vcpu);
djm@6458 663 return IA64_FAULT;
djm@6458 664 }
djm@6458 665 #endif // VMAL_NO_FAULT_CHECK
djm@6458 666
djm@6801 667 if (vcpu_get_itir(vcpu,&itir)){
djm@6458 668 return(IA64_FAULT);
djm@6458 669 }
djm@6801 670 if (vcpu_get_ifa(vcpu,&ifa)){
djm@6458 671 return(IA64_FAULT);
djm@6458 672 }
djm@6458 673 #ifdef VMAL_NO_FAULT_CHECK
djm@6458 674 if (is_reserved_itir_field(vcpu, itir)) {
djm@6458 675 // TODO
djm@6458 676 return IA64_FAULT;
djm@6458 677 }
djm@6458 678 if (unimplemented_gva(vcpu,ifa) ) {
djm@6458 679 isr.val = set_isr_ei_ni(vcpu);
djm@6458 680 isr.code = IA64_RESERVED_REG_FAULT;
djm@6458 681 vcpu_set_isr(vcpu, isr.val);
djm@6458 682 unimpl_daddr(vcpu);
djm@6458 683 return IA64_FAULT;
djm@6458 684 }
djm@6458 685 #endif // VMAL_NO_FAULT_CHECK
djm@6458 686
awilliam@9164 687 return (vmx_vcpu_itr_i(vcpu,slot,pte,itir,ifa));
djm@6458 688 }
djm@6458 689
djm@6458 690 IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa,u64 *pte)
djm@6458 691 {
djm@6458 692 IA64_PSR vpsr;
djm@6458 693 IA64FAULT ret1;
djm@6458 694
djm@6458 695 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 696 if ( vpsr.ic ) {
djm@6458 697 set_illegal_op_isr(vcpu);
djm@6458 698 illegal_op(vcpu);
djm@6458 699 return IA64_FAULT;
djm@6458 700 }
djm@6458 701
djm@6458 702 #ifdef VMAL_NO_FAULT_CHECK
awilliam@8916 703 UINT64 fault;
awilliam@8916 704 ISR isr;
djm@6458 705 if ( vpsr.cpl != 0) {
djm@6458 706 /* Inject Privileged Operation fault into guest */
djm@6458 707 set_privileged_operation_isr (vcpu, 0);
djm@6458 708 privilege_op (vcpu);
djm@6458 709 return IA64_FAULT;
djm@6458 710 }
djm@6458 711 #endif // VMAL_NO_FAULT_CHECK
djm@6867 712 ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pte);
djm@6458 713 #ifdef VMAL_NO_FAULT_CHECK
djm@6458 714 if( ret1 != IA64_NO_FAULT ){
djm@6458 715 set_isr_reg_nat_consumption(vcpu,0,0);
djm@6458 716 rnat_comsumption(vcpu);
djm@6458 717 return IA64_FAULT;
djm@6458 718 }
djm@6458 719 #endif // VMAL_NO_FAULT_CHECK
djm@6458 720
djm@6801 721 if (vcpu_get_itir(vcpu,itir)){
djm@6458 722 return(IA64_FAULT);
djm@6458 723 }
djm@6801 724 if (vcpu_get_ifa(vcpu,ifa)){
djm@6458 725 return(IA64_FAULT);
djm@6458 726 }
djm@6458 727 #ifdef VMAL_NO_FAULT_CHECK
djm@6458 728 if (unimplemented_gva(vcpu,ifa) ) {
djm@6458 729 isr.val = set_isr_ei_ni(vcpu);
djm@6458 730 isr.code = IA64_RESERVED_REG_FAULT;
djm@6458 731 vcpu_set_isr(vcpu, isr.val);
djm@6458 732 unimpl_daddr(vcpu);
djm@6458 733 return IA64_FAULT;
djm@6458 734 }
djm@6458 735 #endif // VMAL_NO_FAULT_CHECK
djm@6458 736 return IA64_NO_FAULT;
djm@6458 737 }
djm@6458 738
djm@6458 739 IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INST64 inst)
djm@6458 740 {
djm@6458 741 UINT64 itir, ifa, pte;
djm@6458 742
djm@6458 743 if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) {
djm@6458 744 return IA64_FAULT;
djm@6458 745 }
djm@6458 746
djm@6458 747 return (vmx_vcpu_itc_d(vcpu,pte,itir,ifa));
djm@6458 748 }
djm@6458 749
djm@6458 750 IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INST64 inst)
djm@6458 751 {
djm@6458 752 UINT64 itir, ifa, pte;
djm@6458 753
djm@6458 754 if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) {
djm@6458 755 return IA64_FAULT;
djm@6458 756 }
djm@6458 757
djm@6458 758 return (vmx_vcpu_itc_i(vcpu,pte,itir,ifa));
djm@6458 759
djm@6458 760 }
djm@6458 761
djm@6458 762 /*************************************
djm@6458 763 * Moves to semi-privileged registers
djm@6458 764 *************************************/
djm@6458 765
djm@6458 766 IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
djm@6458 767 {
djm@6458 768 // I27 and M30 are identical for these fields
awilliam@9392 769 UINT64 imm;
awilliam@9982 770
djm@6458 771 if(inst.M30.ar3!=44){
awilliam@9982 772 panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc");
djm@6458 773 }
djm@6458 774 #ifdef CHECK_FAULT
djm@6458 775 IA64_PSR vpsr;
djm@6458 776 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 777 if ( vpsr.cpl != 0) {
djm@6458 778 /* Inject Privileged Operation fault into guest */
djm@6458 779 set_privileged_operation_isr (vcpu, 0);
djm@6458 780 privilege_op (vcpu);
djm@6458 781 return IA64_FAULT;
djm@6458 782 }
djm@6458 783 #endif // CHECK_FAULT
djm@6458 784 if(inst.M30.s){
djm@6458 785 imm = -inst.M30.imm;
djm@6458 786 }else{
djm@6458 787 imm = inst.M30.imm;
djm@6458 788 }
djm@6458 789 return (vmx_vcpu_set_itc(vcpu, imm));
djm@6458 790 }
djm@6458 791
djm@6458 792 IA64FAULT vmx_emul_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
djm@6458 793 {
djm@6458 794 // I26 and M29 are identical for these fields
djm@6458 795 u64 r2;
djm@6458 796 if(inst.M29.ar3!=44){
awilliam@9982 797 panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc");
djm@6458 798 }
djm@6867 799 if(vcpu_get_gr_nat(vcpu,inst.M29.r2,&r2)){
djm@6458 800 #ifdef CHECK_FAULT
djm@6458 801 set_isr_reg_nat_consumption(vcpu,0,0);
djm@6458 802 rnat_comsumption(vcpu);
djm@6458 803 return IA64_FAULT;
djm@6458 804 #endif //CHECK_FAULT
djm@6458 805 }
djm@6458 806 #ifdef CHECK_FAULT
djm@6458 807 IA64_PSR vpsr;
djm@6458 808 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 809 if ( vpsr.cpl != 0) {
djm@6458 810 /* Inject Privileged Operation fault into guest */
djm@6458 811 set_privileged_operation_isr (vcpu, 0);
djm@6458 812 privilege_op (vcpu);
djm@6458 813 return IA64_FAULT;
djm@6458 814 }
djm@6458 815 #endif // CHECK_FAULT
djm@6458 816 return (vmx_vcpu_set_itc(vcpu, r2));
djm@6458 817 }
djm@6458 818
djm@6458 819
djm@6458 820 IA64FAULT vmx_emul_mov_from_ar_reg(VCPU *vcpu, INST64 inst)
djm@6458 821 {
djm@6458 822 // I27 and M30 are identical for these fields
awilliam@9392 823 u64 r1;
djm@6458 824 if(inst.M31.ar3!=44){
awilliam@9982 825 panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc");
djm@6458 826 }
djm@6458 827 #ifdef CHECK_FAULT
djm@6458 828 if(check_target_register(vcpu,inst.M31.r1)){
djm@6458 829 set_illegal_op_isr(vcpu);
djm@6458 830 illegal_op(vcpu);
djm@6458 831 return IA64_FAULT;
djm@6458 832 }
djm@6458 833 IA64_PSR vpsr;
djm@6458 834 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 835 if (vpsr.si&& vpsr.cpl != 0) {
djm@6458 836 /* Inject Privileged Operation fault into guest */
djm@6458 837 set_privileged_operation_isr (vcpu, 0);
djm@6458 838 privilege_op (vcpu);
djm@6458 839 return IA64_FAULT;
djm@6458 840 }
djm@6458 841 #endif // CHECK_FAULT
djm@6458 842 vmx_vcpu_get_itc(vcpu,&r1);
djm@6867 843 vcpu_set_gr(vcpu,inst.M31.r1,r1,0);
djm@6458 844 return IA64_NO_FAULT;
djm@6458 845 }
djm@6458 846
djm@6458 847
djm@6458 848 /********************************
djm@6458 849 * Moves to privileged registers
djm@6458 850 ********************************/
djm@6458 851
djm@6458 852 IA64FAULT vmx_emul_mov_to_pkr(VCPU *vcpu, INST64 inst)
djm@6458 853 {
djm@6458 854 u64 r3,r2;
djm@6458 855 #ifdef CHECK_FAULT
djm@6458 856 IA64_PSR vpsr;
djm@6458 857 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 858 if (vpsr.cpl != 0) {
djm@6458 859 /* Inject Privileged Operation fault into guest */
djm@6458 860 set_privileged_operation_isr (vcpu, 0);
djm@6458 861 privilege_op (vcpu);
djm@6458 862 return IA64_FAULT;
djm@6458 863 }
djm@6458 864 #endif // CHECK_FAULT
djm@6867 865 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
djm@6458 866 #ifdef CHECK_FAULT
djm@6458 867 set_isr_reg_nat_consumption(vcpu,0,0);
djm@6458 868 rnat_comsumption(vcpu);
djm@6458 869 return IA64_FAULT;
djm@6458 870 #endif //CHECK_FAULT
djm@6458 871 }
djm@6458 872 return (vmx_vcpu_set_pkr(vcpu,r3,r2));
djm@6458 873 }
djm@6458 874
djm@6458 875 IA64FAULT vmx_emul_mov_to_rr(VCPU *vcpu, INST64 inst)
djm@6458 876 {
djm@6458 877 u64 r3,r2;
djm@6458 878 #ifdef CHECK_FAULT
djm@6458 879 IA64_PSR vpsr;
djm@6458 880 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 881 if (vpsr.cpl != 0) {
djm@6458 882 /* Inject Privileged Operation fault into guest */
djm@6458 883 set_privileged_operation_isr (vcpu, 0);
djm@6458 884 privilege_op (vcpu);
djm@6458 885 return IA64_FAULT;
djm@6458 886 }
djm@6458 887 #endif // CHECK_FAULT
djm@6867 888 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
djm@6458 889 #ifdef CHECK_FAULT
djm@6458 890 set_isr_reg_nat_consumption(vcpu,0,0);
djm@6458 891 rnat_comsumption(vcpu);
djm@6458 892 return IA64_FAULT;
djm@6458 893 #endif //CHECK_FAULT
djm@6458 894 }
djm@6458 895 return (vmx_vcpu_set_rr(vcpu,r3,r2));
djm@6458 896 }
djm@6458 897
djm@6458 898 IA64FAULT vmx_emul_mov_to_dbr(VCPU *vcpu, INST64 inst)
djm@6458 899 {
awilliam@9392 900 u64 r3,r2;
djm@7915 901 return IA64_NO_FAULT;
djm@6458 902 #ifdef CHECK_FAULT
djm@6458 903 IA64_PSR vpsr;
djm@6458 904 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 905 if (vpsr.cpl != 0) {
djm@6458 906 /* Inject Privileged Operation fault into guest */
djm@6458 907 set_privileged_operation_isr (vcpu, 0);
djm@6458 908 privilege_op (vcpu);
djm@6458 909 return IA64_FAULT;
djm@6458 910 }
djm@6458 911 #endif // CHECK_FAULT
djm@6867 912 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
djm@6458 913 #ifdef CHECK_FAULT
djm@6458 914 set_isr_reg_nat_consumption(vcpu,0,0);
djm@6458 915 rnat_comsumption(vcpu);
djm@6458 916 return IA64_FAULT;
djm@6458 917 #endif //CHECK_FAULT
djm@6458 918 }
djm@6458 919 return (vmx_vcpu_set_dbr(vcpu,r3,r2));
djm@6458 920 }
djm@6458 921
djm@6458 922 IA64FAULT vmx_emul_mov_to_ibr(VCPU *vcpu, INST64 inst)
djm@6458 923 {
awilliam@9392 924 u64 r3,r2;
djm@7915 925 return IA64_NO_FAULT;
djm@6458 926 #ifdef CHECK_FAULT
djm@6458 927 IA64_PSR vpsr;
djm@6458 928 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 929 if (vpsr.cpl != 0) {
djm@6458 930 /* Inject Privileged Operation fault into guest */
djm@6458 931 set_privileged_operation_isr (vcpu, 0);
djm@6458 932 privilege_op (vcpu);
djm@6458 933 return IA64_FAULT;
djm@6458 934 }
djm@6458 935 #endif // CHECK_FAULT
djm@6867 936 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
djm@6458 937 #ifdef CHECK_FAULT
djm@6458 938 set_isr_reg_nat_consumption(vcpu,0,0);
djm@6458 939 rnat_comsumption(vcpu);
djm@6458 940 return IA64_FAULT;
djm@6458 941 #endif //CHECK_FAULT
djm@6458 942 }
djm@6458 943 return (vmx_vcpu_set_ibr(vcpu,r3,r2));
djm@6458 944 }
djm@6458 945
djm@6458 946 IA64FAULT vmx_emul_mov_to_pmc(VCPU *vcpu, INST64 inst)
djm@6458 947 {
djm@6458 948 u64 r3,r2;
djm@6458 949 #ifdef CHECK_FAULT
djm@6458 950 IA64_PSR vpsr;
djm@6458 951 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 952 if (vpsr.cpl != 0) {
djm@6458 953 /* Inject Privileged Operation fault into guest */
djm@6458 954 set_privileged_operation_isr (vcpu, 0);
djm@6458 955 privilege_op (vcpu);
djm@6458 956 return IA64_FAULT;
djm@6458 957 }
djm@6458 958 #endif // CHECK_FAULT
djm@6867 959 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
djm@6458 960 #ifdef CHECK_FAULT
djm@6458 961 set_isr_reg_nat_consumption(vcpu,0,0);
djm@6458 962 rnat_comsumption(vcpu);
djm@6458 963 return IA64_FAULT;
djm@6458 964 #endif //CHECK_FAULT
djm@6458 965 }
djm@6458 966 return (vmx_vcpu_set_pmc(vcpu,r3,r2));
djm@6458 967 }
djm@6458 968
djm@6458 969 IA64FAULT vmx_emul_mov_to_pmd(VCPU *vcpu, INST64 inst)
djm@6458 970 {
djm@6458 971 u64 r3,r2;
djm@6458 972 #ifdef CHECK_FAULT
djm@6458 973 IA64_PSR vpsr;
djm@6458 974 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 975 if (vpsr.cpl != 0) {
djm@6458 976 /* Inject Privileged Operation fault into guest */
djm@6458 977 set_privileged_operation_isr (vcpu, 0);
djm@6458 978 privilege_op (vcpu);
djm@6458 979 return IA64_FAULT;
djm@6458 980 }
djm@6458 981 #endif // CHECK_FAULT
djm@6867 982 if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){
djm@6458 983 #ifdef CHECK_FAULT
djm@6458 984 set_isr_reg_nat_consumption(vcpu,0,0);
djm@6458 985 rnat_comsumption(vcpu);
djm@6458 986 return IA64_FAULT;
djm@6458 987 #endif //CHECK_FAULT
djm@6458 988 }
djm@6458 989 return (vmx_vcpu_set_pmd(vcpu,r3,r2));
djm@6458 990 }
djm@6458 991
djm@6458 992
djm@6458 993 /**********************************
djm@6458 994 * Moves from privileged registers
djm@6458 995 **********************************/
djm@6458 996
djm@6458 997 IA64FAULT vmx_emul_mov_from_rr(VCPU *vcpu, INST64 inst)
djm@6458 998 {
djm@6458 999 u64 r3,r1;
djm@6458 1000 #ifdef CHECK_FAULT
djm@6458 1001 if(check_target_register(vcpu, inst.M43.r1)){
djm@6458 1002 set_illegal_op_isr(vcpu);
djm@6458 1003 illegal_op(vcpu);
djm@6458 1004 return IA64_FAULT;
djm@6458 1005 }
djm@6458 1006 IA64_PSR vpsr;
djm@6458 1007 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 1008 if (vpsr.cpl != 0) {
djm@6458 1009 /* Inject Privileged Operation fault into guest */
djm@6458 1010 set_privileged_operation_isr (vcpu, 0);
djm@6458 1011 privilege_op (vcpu);
djm@6458 1012 return IA64_FAULT;
djm@6458 1013 }
djm@6458 1014
djm@6458 1015 #endif //CHECK_FAULT
djm@6867 1016 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
djm@6458 1017 #ifdef CHECK_FAULT
djm@6458 1018 set_isr_reg_nat_consumption(vcpu,0,0);
djm@6458 1019 rnat_comsumption(vcpu);
djm@6458 1020 return IA64_FAULT;
djm@6458 1021 #endif //CHECK_FAULT
djm@6458 1022 }
djm@6458 1023 #ifdef CHECK_FAULT
djm@6458 1024 if(is_reserved_rr_register(vcpu,r3>>VRN_SHIFT)){
djm@6458 1025 set_rsv_reg_field_isr(vcpu);
djm@6458 1026 rsv_reg_field(vcpu);
djm@6458 1027 }
djm@6458 1028 #endif //CHECK_FAULT
awilliam@9164 1029 vcpu_get_rr(vcpu,r3,&r1);
djm@6867 1030 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
djm@6458 1031 }
djm@6458 1032
djm@6458 1033 IA64FAULT vmx_emul_mov_from_pkr(VCPU *vcpu, INST64 inst)
djm@6458 1034 {
djm@6458 1035 u64 r3,r1;
djm@6458 1036 #ifdef CHECK_FAULT
djm@6458 1037 if(check_target_register(vcpu, inst.M43.r1)){
djm@6458 1038 set_illegal_op_isr(vcpu);
djm@6458 1039 illegal_op(vcpu);
djm@6458 1040 return IA64_FAULT;
djm@6458 1041 }
djm@6458 1042 IA64_PSR vpsr;
djm@6458 1043 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 1044 if (vpsr.cpl != 0) {
djm@6458 1045 /* Inject Privileged Operation fault into guest */
djm@6458 1046 set_privileged_operation_isr (vcpu, 0);
djm@6458 1047 privilege_op (vcpu);
djm@6458 1048 return IA64_FAULT;
djm@6458 1049 }
djm@6458 1050
djm@6458 1051 #endif //CHECK_FAULT
djm@6867 1052 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
djm@6458 1053 #ifdef CHECK_FAULT
djm@6458 1054 set_isr_reg_nat_consumption(vcpu,0,0);
djm@6458 1055 rnat_comsumption(vcpu);
djm@6458 1056 return IA64_FAULT;
djm@6458 1057 #endif //CHECK_FAULT
djm@6458 1058 }
djm@6458 1059 #ifdef CHECK_FAULT
djm@6458 1060 if(is_reserved_indirect_register(vcpu,r3)){
djm@6458 1061 set_rsv_reg_field_isr(vcpu);
djm@6458 1062 rsv_reg_field(vcpu);
djm@6458 1063 return IA64_FAULT;
djm@6458 1064 }
djm@6458 1065 #endif //CHECK_FAULT
djm@6458 1066 vmx_vcpu_get_pkr(vcpu,r3,&r1);
djm@6867 1067 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
djm@6458 1068 }
djm@6458 1069
djm@6458 1070 IA64FAULT vmx_emul_mov_from_dbr(VCPU *vcpu, INST64 inst)
djm@6458 1071 {
djm@6458 1072 u64 r3,r1;
djm@6458 1073 #ifdef CHECK_FAULT
djm@6458 1074 if(check_target_register(vcpu, inst.M43.r1)){
djm@6458 1075 set_illegal_op_isr(vcpu);
djm@6458 1076 illegal_op(vcpu);
djm@6458 1077 return IA64_FAULT;
djm@6458 1078 }
djm@6458 1079 IA64_PSR vpsr;
djm@6458 1080 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 1081 if (vpsr.cpl != 0) {
djm@6458 1082 /* Inject Privileged Operation fault into guest */
djm@6458 1083 set_privileged_operation_isr (vcpu, 0);
djm@6458 1084 privilege_op (vcpu);
djm@6458 1085 return IA64_FAULT;
djm@6458 1086 }
djm@6458 1087
djm@6458 1088 #endif //CHECK_FAULT
djm@6867 1089 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
djm@6458 1090 #ifdef CHECK_FAULT
djm@6458 1091 set_isr_reg_nat_consumption(vcpu,0,0);
djm@6458 1092 rnat_comsumption(vcpu);
djm@6458 1093 return IA64_FAULT;
djm@6458 1094 #endif //CHECK_FAULT
djm@6458 1095 }
djm@6458 1096 #ifdef CHECK_FAULT
djm@6458 1097 if(is_reserved_indirect_register(vcpu,r3)){
djm@6458 1098 set_rsv_reg_field_isr(vcpu);
djm@6458 1099 rsv_reg_field(vcpu);
djm@6458 1100 return IA64_FAULT;
djm@6458 1101 }
djm@6458 1102 #endif //CHECK_FAULT
djm@6458 1103 vmx_vcpu_get_dbr(vcpu,r3,&r1);
djm@6867 1104 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
djm@6458 1105 }
djm@6458 1106
djm@6458 1107 IA64FAULT vmx_emul_mov_from_ibr(VCPU *vcpu, INST64 inst)
djm@6458 1108 {
djm@6458 1109 u64 r3,r1;
djm@6458 1110 #ifdef CHECK_FAULT
djm@6458 1111 if(check_target_register(vcpu, inst.M43.r1)){
djm@6458 1112 set_illegal_op_isr(vcpu);
djm@6458 1113 illegal_op(vcpu);
djm@6458 1114 return IA64_FAULT;
djm@6458 1115 }
djm@6458 1116 IA64_PSR vpsr;
djm@6458 1117 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 1118 if (vpsr.cpl != 0) {
djm@6458 1119 /* Inject Privileged Operation fault into guest */
djm@6458 1120 set_privileged_operation_isr (vcpu, 0);
djm@6458 1121 privilege_op (vcpu);
djm@6458 1122 return IA64_FAULT;
djm@6458 1123 }
djm@6458 1124
djm@6458 1125 #endif //CHECK_FAULT
djm@6867 1126 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
djm@6458 1127 #ifdef CHECK_FAULT
djm@6458 1128 set_isr_reg_nat_consumption(vcpu,0,0);
djm@6458 1129 rnat_comsumption(vcpu);
djm@6458 1130 return IA64_FAULT;
djm@6458 1131 #endif //CHECK_FAULT
djm@6458 1132 }
djm@6458 1133 #ifdef CHECK_FAULT
djm@6458 1134 if(is_reserved_indirect_register(vcpu,r3)){
djm@6458 1135 set_rsv_reg_field_isr(vcpu);
djm@6458 1136 rsv_reg_field(vcpu);
djm@6458 1137 return IA64_FAULT;
djm@6458 1138 }
djm@6458 1139 #endif //CHECK_FAULT
djm@6458 1140 vmx_vcpu_get_ibr(vcpu,r3,&r1);
djm@6867 1141 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
djm@6458 1142 }
djm@6458 1143
djm@6458 1144 IA64FAULT vmx_emul_mov_from_pmc(VCPU *vcpu, INST64 inst)
djm@6458 1145 {
djm@6458 1146 u64 r3,r1;
djm@6458 1147 #ifdef CHECK_FAULT
djm@6458 1148 if(check_target_register(vcpu, inst.M43.r1)){
djm@6458 1149 set_illegal_op_isr(vcpu);
djm@6458 1150 illegal_op(vcpu);
djm@6458 1151 return IA64_FAULT;
djm@6458 1152 }
djm@6458 1153 IA64_PSR vpsr;
djm@6458 1154 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 1155 if (vpsr.cpl != 0) {
djm@6458 1156 /* Inject Privileged Operation fault into guest */
djm@6458 1157 set_privileged_operation_isr (vcpu, 0);
djm@6458 1158 privilege_op (vcpu);
djm@6458 1159 return IA64_FAULT;
djm@6458 1160 }
djm@6458 1161
djm@6458 1162 #endif //CHECK_FAULT
djm@6867 1163 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
djm@6458 1164 #ifdef CHECK_FAULT
djm@6458 1165 set_isr_reg_nat_consumption(vcpu,0,0);
djm@6458 1166 rnat_comsumption(vcpu);
djm@6458 1167 return IA64_FAULT;
djm@6458 1168 #endif //CHECK_FAULT
djm@6458 1169 }
djm@6458 1170 #ifdef CHECK_FAULT
djm@6458 1171 if(is_reserved_indirect_register(vcpu,r3)){
djm@6458 1172 set_rsv_reg_field_isr(vcpu);
djm@6458 1173 rsv_reg_field(vcpu);
djm@6458 1174 return IA64_FAULT;
djm@6458 1175 }
djm@6458 1176 #endif //CHECK_FAULT
djm@6458 1177 vmx_vcpu_get_pmc(vcpu,r3,&r1);
djm@6867 1178 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
djm@6458 1179 }
djm@6458 1180
djm@6458 1181 IA64FAULT vmx_emul_mov_from_cpuid(VCPU *vcpu, INST64 inst)
djm@6458 1182 {
djm@6458 1183 u64 r3,r1;
djm@6458 1184 #ifdef CHECK_FAULT
djm@6458 1185 if(check_target_register(vcpu, inst.M43.r1)){
djm@6458 1186 set_illegal_op_isr(vcpu);
djm@6458 1187 illegal_op(vcpu);
djm@6458 1188 return IA64_FAULT;
djm@6458 1189 }
djm@6458 1190 #endif //CHECK_FAULT
djm@6867 1191 if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){
djm@6458 1192 #ifdef CHECK_FAULT
djm@6458 1193 set_isr_reg_nat_consumption(vcpu,0,0);
djm@6458 1194 rnat_comsumption(vcpu);
djm@6458 1195 return IA64_FAULT;
djm@6458 1196 #endif //CHECK_FAULT
djm@6458 1197 }
djm@6458 1198 #ifdef CHECK_FAULT
djm@6458 1199 if(is_reserved_indirect_register(vcpu,r3)){
djm@6458 1200 set_rsv_reg_field_isr(vcpu);
djm@6458 1201 rsv_reg_field(vcpu);
djm@6458 1202 return IA64_FAULT;
djm@6458 1203 }
djm@6458 1204 #endif //CHECK_FAULT
djm@6458 1205 vmx_vcpu_get_cpuid(vcpu,r3,&r1);
djm@6867 1206 return vcpu_set_gr(vcpu, inst.M43.r1, r1,0);
djm@6458 1207 }
djm@6458 1208
djm@6458 1209 IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
djm@6458 1210 {
awilliam@8916 1211 u64 r2;
awilliam@9392 1212 extern u64 cr_igfld_mask(int index, u64 value);
djm@6458 1213 #ifdef CHECK_FAULT
djm@6458 1214 IA64_PSR vpsr;
djm@6458 1215 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 1216 if(is_reserved_cr(inst.M32.cr3)||(vpsr.ic&&is_interruption_control_cr(inst.M32.cr3))){
djm@6458 1217 set_illegal_op_isr(vcpu);
djm@6458 1218 illegal_op(vcpu);
djm@6458 1219 return IA64_FAULT;
djm@6458 1220 }
djm@6458 1221 if ( vpsr.cpl != 0) {
djm@6458 1222 /* Inject Privileged Operation fault into guest */
djm@6458 1223 set_privileged_operation_isr (vcpu, 0);
djm@6458 1224 privilege_op (vcpu);
djm@6458 1225 return IA64_FAULT;
djm@6458 1226 }
djm@6458 1227 #endif // CHECK_FAULT
djm@6867 1228 if(vcpu_get_gr_nat(vcpu, inst.M32.r2, &r2)){
djm@6458 1229 #ifdef CHECK_FAULT
djm@6458 1230 set_isr_reg_nat_consumption(vcpu,0,0);
djm@6458 1231 rnat_comsumption(vcpu);
djm@6458 1232 return IA64_FAULT;
djm@6458 1233 #endif //CHECK_FAULT
djm@6458 1234 }
djm@6458 1235 #ifdef CHECK_FAULT
djm@6458 1236 if ( check_cr_rsv_fields (inst.M32.cr3, r2)) {
djm@6458 1237 /* Inject Reserved Register/Field fault
djm@6458 1238 * into guest */
djm@6458 1239 set_rsv_reg_field_isr (vcpu,0);
djm@6458 1240 rsv_reg_field (vcpu);
djm@6458 1241 return IA64_FAULT;
djm@6458 1242 }
djm@6458 1243 #endif //CHECK_FAULT
djm@6458 1244 r2 = cr_igfld_mask(inst.M32.cr3,r2);
djm@6458 1245 switch (inst.M32.cr3) {
djm@6458 1246 case 0: return vmx_vcpu_set_dcr(vcpu,r2);
djm@6458 1247 case 1: return vmx_vcpu_set_itm(vcpu,r2);
djm@6458 1248 case 2: return vmx_vcpu_set_iva(vcpu,r2);
djm@6458 1249 case 8: return vmx_vcpu_set_pta(vcpu,r2);
djm@6801 1250 case 16:return vcpu_set_ipsr(vcpu,r2);
djm@6801 1251 case 17:return vcpu_set_isr(vcpu,r2);
djm@6801 1252 case 19:return vcpu_set_iip(vcpu,r2);
djm@6801 1253 case 20:return vcpu_set_ifa(vcpu,r2);
djm@6801 1254 case 21:return vcpu_set_itir(vcpu,r2);
djm@6801 1255 case 22:return vcpu_set_iipa(vcpu,r2);
djm@6801 1256 case 23:return vcpu_set_ifs(vcpu,r2);
djm@6801 1257 case 24:return vcpu_set_iim(vcpu,r2);
djm@6801 1258 case 25:return vcpu_set_iha(vcpu,r2);
djm@6458 1259 case 64:printk("SET LID to 0x%lx\n", r2);
awilliam@10258 1260 return IA64_NO_FAULT;
djm@6458 1261 case 65:return IA64_NO_FAULT;
djm@6458 1262 case 66:return vmx_vcpu_set_tpr(vcpu,r2);
djm@6458 1263 case 67:return vmx_vcpu_set_eoi(vcpu,r2);
djm@6458 1264 case 68:return IA64_NO_FAULT;
djm@6458 1265 case 69:return IA64_NO_FAULT;
djm@6458 1266 case 70:return IA64_NO_FAULT;
djm@6458 1267 case 71:return IA64_NO_FAULT;
djm@6458 1268 case 72:return vmx_vcpu_set_itv(vcpu,r2);
djm@6458 1269 case 73:return vmx_vcpu_set_pmv(vcpu,r2);
djm@6458 1270 case 74:return vmx_vcpu_set_cmcv(vcpu,r2);
djm@6458 1271 case 80:return vmx_vcpu_set_lrr0(vcpu,r2);
djm@6458 1272 case 81:return vmx_vcpu_set_lrr1(vcpu,r2);
awilliam@10258 1273 default:VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
awilliam@10258 1274 return IA64_NO_FAULT;
djm@6458 1275 }
djm@6458 1276 }
djm@6458 1277
djm@6458 1278
djm@6458 1279 #define cr_get(cr) \
djm@6801 1280 ((fault=vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
djm@6867 1281 vcpu_set_gr(vcpu, tgt, val,0):fault;
djm@6801 1282
djm@6801 1283 #define vmx_cr_get(cr) \
djm@6458 1284 ((fault=vmx_vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\
djm@6867 1285 vcpu_set_gr(vcpu, tgt, val,0):fault;
djm@6458 1286
djm@6458 1287 IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst)
djm@6458 1288 {
djm@6458 1289 UINT64 tgt = inst.M33.r1;
djm@6458 1290 UINT64 val;
djm@6458 1291 IA64FAULT fault;
djm@6458 1292 #ifdef CHECK_FAULT
djm@6458 1293 IA64_PSR vpsr;
djm@6458 1294 vpsr.val=vmx_vcpu_get_psr(vcpu);
djm@6458 1295 if(is_reserved_cr(inst.M33.cr3)||is_read_only_cr(inst.M33.cr3||
djm@6458 1296 (vpsr.ic&&is_interruption_control_cr(inst.M33.cr3)))){
djm@6458 1297 set_illegal_op_isr(vcpu);
djm@6458 1298 illegal_op(vcpu);
djm@6458 1299 return IA64_FAULT;
djm@6458 1300 }
djm@6458 1301 if ( vpsr.cpl != 0) {
djm@6458 1302 /* Inject Privileged Operation fault into guest */
djm@6458 1303 set_privileged_operation_isr (vcpu, 0);
djm@6458 1304 privilege_op (vcpu);
djm@6458 1305 return IA64_FAULT;
djm@6458 1306 }
djm@6458 1307 #endif // CHECK_FAULT
djm@6458 1308
djm@6458 1309 // from_cr_cnt[inst.M33.cr3]++;
djm@6458 1310 switch (inst.M33.cr3) {
djm@6801 1311 case 0: return vmx_cr_get(dcr);
djm@6801 1312 case 1: return vmx_cr_get(itm);
djm@6801 1313 case 2: return vmx_cr_get(iva);
djm@6801 1314 case 8: return vmx_cr_get(pta);
djm@6458 1315 case 16:return cr_get(ipsr);
djm@6458 1316 case 17:return cr_get(isr);
djm@6458 1317 case 19:return cr_get(iip);
djm@6458 1318 case 20:return cr_get(ifa);
djm@6458 1319 case 21:return cr_get(itir);
djm@6458 1320 case 22:return cr_get(iipa);
djm@6458 1321 case 23:return cr_get(ifs);
djm@6458 1322 case 24:return cr_get(iim);
djm@6458 1323 case 25:return cr_get(iha);
djm@6801 1324 case 64:return vmx_cr_get(lid);
djm@6458 1325 case 65:
djm@6801 1326 vmx_vcpu_get_ivr(vcpu,&val);
djm@6867 1327 return vcpu_set_gr(vcpu,tgt,val,0);
djm@6801 1328 case 66:return vmx_cr_get(tpr);
djm@6867 1329 case 67:return vcpu_set_gr(vcpu,tgt,0L,0);
djm@6801 1330 case 68:return vmx_cr_get(irr0);
djm@6801 1331 case 69:return vmx_cr_get(irr1);
djm@6801 1332 case 70:return vmx_cr_get(irr2);
djm@6801 1333 case 71:return vmx_cr_get(irr3);
djm@6801 1334 case 72:return vmx_cr_get(itv);
djm@6801 1335 case 73:return vmx_cr_get(pmv);
djm@6801 1336 case 74:return vmx_cr_get(cmcv);
djm@6801 1337 case 80:return vmx_cr_get(lrr0);
djm@6801 1338 case 81:return vmx_cr_get(lrr1);
djm@7915 1339 default: return IA64_NO_FAULT;
djm@6458 1340 }
djm@6458 1341 }
djm@6458 1342
djm@6458 1343
djm@6458 1344 //#define BYPASS_VMAL_OPCODE
djm@6458 1345 extern IA64_SLOT_TYPE slot_types[0x20][3];
djm@6458 1346 IA64_BUNDLE __vmx_get_domain_bundle(u64 iip)
djm@6458 1347 {
djm@6458 1348 IA64_BUNDLE bundle;
awilliam@9011 1349 fetch_code( current, iip, &bundle.i64[0], &bundle.i64[1]);
djm@6458 1350 return bundle;
djm@6458 1351 }
djm@6458 1352
djm@6458 1353 /** Emulate a privileged operation.
djm@6458 1354 *
djm@6458 1355 *
djm@6458 1356 * @param vcpu virtual cpu
djm@6458 1357 * @cause the reason cause virtualization fault
djm@6458 1358 * @opcode the instruction code which cause virtualization fault
djm@6458 1359 */
djm@6458 1360
djm@6458 1361 void
djm@6867 1362 vmx_emulate(VCPU *vcpu, REGS *regs)
djm@6458 1363 {
djm@6458 1364 IA64FAULT status;
djm@6458 1365 INST64 inst;
djm@6867 1366 UINT64 iip, cause, opcode;
djm@6458 1367 iip = regs->cr_iip;
djm@6867 1368 cause = VMX(vcpu,cause);
djm@6867 1369 opcode = VMX(vcpu,opcode);
djm@6867 1370
djm@6458 1371 #ifdef VTLB_DEBUG
djm@6458 1372 check_vtlb_sanity(vmx_vcpu_get_vtlb(vcpu));
djm@6458 1373 dump_vtlb(vmx_vcpu_get_vtlb(vcpu));
djm@6458 1374 #endif
djm@6458 1375 #if 0
djm@6458 1376 if ( (cause == 0xff && opcode == 0x1e000000000) || cause == 0 ) {
djm@6458 1377 printf ("VMAL decode error: cause - %lx; op - %lx\n",
djm@6458 1378 cause, opcode );
djm@6458 1379 return;
djm@6458 1380 }
djm@6458 1381 #endif
djm@6458 1382 #ifdef BYPASS_VMAL_OPCODE
djm@6458 1383 // make a local copy of the bundle containing the privop
awilliam@8916 1384 IA64_BUNDLE bundle;
awilliam@8916 1385 int slot;
awilliam@8916 1386 IA64_SLOT_TYPE slot_type;
awilliam@8916 1387 IA64_PSR vpsr;
djm@6458 1388 bundle = __vmx_get_domain_bundle(iip);
djm@6458 1389 slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
djm@6458 1390 if (!slot) inst.inst = bundle.slot0;
djm@6458 1391 else if (slot == 1)
djm@6458 1392 inst.inst = bundle.slot1a + (bundle.slot1b<<18);
djm@6458 1393 else if (slot == 2) inst.inst = bundle.slot2;
djm@6458 1394 else printf("priv_handle_op: illegal slot: %d\n", slot);
djm@6458 1395 slot_type = slot_types[bundle.template][slot];
djm@6458 1396 ia64_priv_decoder(slot_type, inst, &cause);
djm@6458 1397 if(cause==0){
awilliam@9982 1398 panic_domain(regs,"This instruction at 0x%lx slot %d can't be virtualized", iip, slot);
djm@6458 1399 }
djm@6458 1400 #else
djm@6458 1401 inst.inst=opcode;
djm@6458 1402 #endif /* BYPASS_VMAL_OPCODE */
djm@6458 1403 /*
djm@6458 1404 * Switch to actual virtual rid in rr0 and rr4,
djm@6458 1405 * which is required by some tlb related instructions.
djm@6458 1406 */
djm@6458 1407 prepare_if_physical_mode(vcpu);
djm@6458 1408
djm@6458 1409 switch(cause) {
djm@6458 1410 case EVENT_RSM:
djm@6458 1411 status=vmx_emul_rsm(vcpu, inst);
djm@6458 1412 break;
djm@6458 1413 case EVENT_SSM:
djm@6458 1414 status=vmx_emul_ssm(vcpu, inst);
djm@6458 1415 break;
djm@6458 1416 case EVENT_MOV_TO_PSR:
djm@6458 1417 status=vmx_emul_mov_to_psr(vcpu, inst);
djm@6458 1418 break;
djm@6458 1419 case EVENT_MOV_FROM_PSR:
djm@6458 1420 status=vmx_emul_mov_from_psr(vcpu, inst);
djm@6458 1421 break;
djm@6458 1422 case EVENT_MOV_FROM_CR:
djm@6458 1423 status=vmx_emul_mov_from_cr(vcpu, inst);
djm@6458 1424 break;
djm@6458 1425 case EVENT_MOV_TO_CR:
djm@6458 1426 status=vmx_emul_mov_to_cr(vcpu, inst);
djm@6458 1427 break;
djm@6458 1428 case EVENT_BSW_0:
djm@6458 1429 status=vmx_emul_bsw0(vcpu, inst);
djm@6458 1430 break;
djm@6458 1431 case EVENT_BSW_1:
djm@6458 1432 status=vmx_emul_bsw1(vcpu, inst);
djm@6458 1433 break;
djm@6458 1434 case EVENT_COVER:
djm@6458 1435 status=vmx_emul_cover(vcpu, inst);
djm@6458 1436 break;
djm@6458 1437 case EVENT_RFI:
djm@6458 1438 status=vmx_emul_rfi(vcpu, inst);
djm@6458 1439 break;
djm@6458 1440 case EVENT_ITR_D:
djm@6458 1441 status=vmx_emul_itr_d(vcpu, inst);
djm@6458 1442 break;
djm@6458 1443 case EVENT_ITR_I:
djm@6458 1444 status=vmx_emul_itr_i(vcpu, inst);
djm@6458 1445 break;
djm@6458 1446 case EVENT_PTR_D:
djm@6458 1447 status=vmx_emul_ptr_d(vcpu, inst);
djm@6458 1448 break;
djm@6458 1449 case EVENT_PTR_I:
djm@6458 1450 status=vmx_emul_ptr_i(vcpu, inst);
djm@6458 1451 break;
djm@6458 1452 case EVENT_ITC_D:
djm@6458 1453 status=vmx_emul_itc_d(vcpu, inst);
djm@6458 1454 break;
djm@6458 1455 case EVENT_ITC_I:
djm@6458 1456 status=vmx_emul_itc_i(vcpu, inst);
djm@6458 1457 break;
djm@6458 1458 case EVENT_PTC_L:
djm@6458 1459 status=vmx_emul_ptc_l(vcpu, inst);
djm@6458 1460 break;
djm@6458 1461 case EVENT_PTC_G:
djm@6458 1462 status=vmx_emul_ptc_g(vcpu, inst);
djm@6458 1463 break;
djm@6458 1464 case EVENT_PTC_GA:
djm@6458 1465 status=vmx_emul_ptc_ga(vcpu, inst);
djm@6458 1466 break;
djm@6458 1467 case EVENT_PTC_E:
djm@6458 1468 status=vmx_emul_ptc_e(vcpu, inst);
djm@6458 1469 break;
djm@6458 1470 case EVENT_MOV_TO_RR:
djm@6458 1471 status=vmx_emul_mov_to_rr(vcpu, inst);
djm@6458 1472 break;
djm@6458 1473 case EVENT_MOV_FROM_RR:
djm@6458 1474 status=vmx_emul_mov_from_rr(vcpu, inst);
djm@6458 1475 break;
djm@6458 1476 case EVENT_THASH:
djm@6458 1477 status=vmx_emul_thash(vcpu, inst);
djm@6458 1478 break;
djm@6458 1479 case EVENT_TTAG:
djm@6458 1480 status=vmx_emul_ttag(vcpu, inst);
djm@6458 1481 break;
djm@6458 1482 case EVENT_TPA:
djm@6458 1483 status=vmx_emul_tpa(vcpu, inst);
djm@6458 1484 break;
djm@6458 1485 case EVENT_TAK:
djm@6458 1486 status=vmx_emul_tak(vcpu, inst);
djm@6458 1487 break;
djm@6458 1488 case EVENT_MOV_TO_AR_IMM:
djm@6458 1489 status=vmx_emul_mov_to_ar_imm(vcpu, inst);
djm@6458 1490 break;
djm@6458 1491 case EVENT_MOV_TO_AR:
djm@6458 1492 status=vmx_emul_mov_to_ar_reg(vcpu, inst);
djm@6458 1493 break;
djm@6458 1494 case EVENT_MOV_FROM_AR:
djm@6458 1495 status=vmx_emul_mov_from_ar_reg(vcpu, inst);
djm@6458 1496 break;
djm@6458 1497 case EVENT_MOV_TO_DBR:
djm@6458 1498 status=vmx_emul_mov_to_dbr(vcpu, inst);
djm@6458 1499 break;
djm@6458 1500 case EVENT_MOV_TO_IBR:
djm@6458 1501 status=vmx_emul_mov_to_ibr(vcpu, inst);
djm@6458 1502 break;
djm@6458 1503 case EVENT_MOV_TO_PMC:
djm@6458 1504 status=vmx_emul_mov_to_pmc(vcpu, inst);
djm@6458 1505 break;
djm@6458 1506 case EVENT_MOV_TO_PMD:
djm@6458 1507 status=vmx_emul_mov_to_pmd(vcpu, inst);
djm@6458 1508 break;
djm@6458 1509 case EVENT_MOV_TO_PKR:
djm@6458 1510 status=vmx_emul_mov_to_pkr(vcpu, inst);
djm@6458 1511 break;
djm@6458 1512 case EVENT_MOV_FROM_DBR:
djm@6458 1513 status=vmx_emul_mov_from_dbr(vcpu, inst);
djm@6458 1514 break;
djm@6458 1515 case EVENT_MOV_FROM_IBR:
djm@6458 1516 status=vmx_emul_mov_from_ibr(vcpu, inst);
djm@6458 1517 break;
djm@6458 1518 case EVENT_MOV_FROM_PMC:
djm@6458 1519 status=vmx_emul_mov_from_pmc(vcpu, inst);
djm@6458 1520 break;
djm@6458 1521 case EVENT_MOV_FROM_PKR:
djm@6458 1522 status=vmx_emul_mov_from_pkr(vcpu, inst);
djm@6458 1523 break;
djm@6458 1524 case EVENT_MOV_FROM_CPUID:
djm@6458 1525 status=vmx_emul_mov_from_cpuid(vcpu, inst);
djm@6458 1526 break;
djm@6458 1527 case EVENT_VMSW:
awilliam@8916 1528 printf ("Unimplemented instruction %ld\n", cause);
djm@6458 1529 status=IA64_FAULT;
djm@6458 1530 break;
djm@6458 1531 default:
awilliam@9982 1532 panic_domain(regs,"unknown cause %ld, iip: %lx, ipsr: %lx\n", cause,regs->cr_iip,regs->cr_ipsr);
djm@6458 1533 break;
djm@6458 1534 };
djm@6458 1535
djm@6458 1536 #if 0
djm@6458 1537 if (status == IA64_FAULT)
djm@6458 1538 panic("Emulation failed with cause %d:\n", cause);
djm@6458 1539 #endif
djm@6458 1540
djm@6458 1541 if ( status == IA64_NO_FAULT && cause !=EVENT_RFI ) {
djm@6458 1542 vmx_vcpu_increment_iip(vcpu);
djm@6458 1543 }
djm@6458 1544
djm@6458 1545 recover_if_physical_mode(vcpu);
djm@6458 1546 return;
djm@6458 1547
djm@6458 1548 }
djm@6458 1549