uint64_t *val,
struct x86_emulate_ctxt *ctxt)
{
- struct cpu_user_regs _regs;
- int rc;
-
- _regs.ecx = (uint32_t)reg;
-
- if ( (rc = hvm_msr_read_intercept(&_regs)) != X86EMUL_OKAY )
- return rc;
-
- *val = ((uint64_t)(uint32_t)_regs.edx << 32) | (uint32_t)_regs.eax;
- return X86EMUL_OKAY;
+ return hvm_msr_read_intercept(reg, val);
}
static int hvmemul_write_msr(
uint64_t val,
struct x86_emulate_ctxt *ctxt)
{
- struct cpu_user_regs _regs;
-
- _regs.edx = (uint32_t)(val >> 32);
- _regs.eax = (uint32_t)val;
- _regs.ecx = (uint32_t)reg;
-
- return hvm_msr_write_intercept(&_regs);
+ return hvm_msr_write_intercept(reg, val);
}
static int hvmemul_wbinvd(
regs->edx = (uint32_t)(tsc >> 32);
}
-int hvm_msr_read_intercept(struct cpu_user_regs *regs)
+int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
{
- uint32_t ecx = regs->ecx;
- uint64_t msr_content = 0;
struct vcpu *v = current;
uint64_t *var_range_base, *fixed_range_base;
int index, mtrr;
hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
mtrr = !!(cpuid[3] & bitmaskof(X86_FEATURE_MTRR));
- switch ( ecx )
+ switch ( msr )
{
case MSR_IA32_TSC:
- msr_content = hvm_get_guest_tsc(v);
+ *msr_content = hvm_get_guest_tsc(v);
break;
case MSR_TSC_AUX:
- msr_content = hvm_msr_tsc_aux(v);
+ *msr_content = hvm_msr_tsc_aux(v);
break;
case MSR_IA32_APICBASE:
- msr_content = vcpu_vlapic(v)->hw.apic_base_msr;
+ *msr_content = vcpu_vlapic(v)->hw.apic_base_msr;
break;
case MSR_IA32_CR_PAT:
- msr_content = v->arch.hvm_vcpu.pat_cr;
+ *msr_content = v->arch.hvm_vcpu.pat_cr;
break;
case MSR_MTRRcap:
if ( !mtrr )
goto gp_fault;
- msr_content = v->arch.hvm_vcpu.mtrr.mtrr_cap;
+ *msr_content = v->arch.hvm_vcpu.mtrr.mtrr_cap;
break;
case MSR_MTRRdefType:
if ( !mtrr )
goto gp_fault;
- msr_content = v->arch.hvm_vcpu.mtrr.def_type
+ *msr_content = v->arch.hvm_vcpu.mtrr.def_type
| (v->arch.hvm_vcpu.mtrr.enabled << 10);
break;
case MSR_MTRRfix64K_00000:
if ( !mtrr )
goto gp_fault;
- msr_content = fixed_range_base[0];
+ *msr_content = fixed_range_base[0];
break;
case MSR_MTRRfix16K_80000:
case MSR_MTRRfix16K_A0000:
if ( !mtrr )
goto gp_fault;
- index = regs->ecx - MSR_MTRRfix16K_80000;
- msr_content = fixed_range_base[index + 1];
+ index = msr - MSR_MTRRfix16K_80000;
+ *msr_content = fixed_range_base[index + 1];
break;
case MSR_MTRRfix4K_C0000...MSR_MTRRfix4K_F8000:
if ( !mtrr )
goto gp_fault;
- index = regs->ecx - MSR_MTRRfix4K_C0000;
- msr_content = fixed_range_base[index + 3];
+ index = msr - MSR_MTRRfix4K_C0000;
+ *msr_content = fixed_range_base[index + 3];
break;
case MSR_IA32_MTRR_PHYSBASE0...MSR_IA32_MTRR_PHYSMASK7:
if ( !mtrr )
goto gp_fault;
- index = regs->ecx - MSR_IA32_MTRR_PHYSBASE0;
- msr_content = var_range_base[index];
+ index = msr - MSR_IA32_MTRR_PHYSBASE0;
+ *msr_content = var_range_base[index];
break;
case MSR_K8_ENABLE_C1E:
* has been migrated to an Intel host. This fixes a guest crash
* in this case.
*/
- msr_content = 0;
+ *msr_content = 0;
break;
default:
- ret = vmce_rdmsr(ecx, &msr_content);
+ ret = vmce_rdmsr(msr, msr_content);
if ( ret < 0 )
goto gp_fault;
else if ( ret )
break;
/* ret == 0, This is not an MCE MSR, see other MSRs */
- else if (!ret)
- return hvm_funcs.msr_read_intercept(regs);
+ else if (!ret) {
+ return hvm_funcs.msr_read_intercept(msr, msr_content);
+ }
}
- regs->eax = (uint32_t)msr_content;
- regs->edx = (uint32_t)(msr_content >> 32);
return X86EMUL_OKAY;
gp_fault:
return X86EMUL_EXCEPTION;
}
-int hvm_msr_write_intercept(struct cpu_user_regs *regs)
+int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
{
- uint32_t ecx = regs->ecx;
- uint64_t msr_content = (uint32_t)regs->eax | ((uint64_t)regs->edx << 32);
struct vcpu *v = current;
int index, mtrr;
uint32_t cpuid[4];
hvm_cpuid(1, &cpuid[0], &cpuid[1], &cpuid[2], &cpuid[3]);
mtrr = !!(cpuid[3] & bitmaskof(X86_FEATURE_MTRR));
- switch ( ecx )
+ switch ( msr )
{
case MSR_IA32_TSC:
hvm_set_guest_tsc(v, msr_content);
case MSR_MTRRfix16K_A0000:
if ( !mtrr )
goto gp_fault;
- index = regs->ecx - MSR_MTRRfix16K_80000 + 1;
+ index = msr - MSR_MTRRfix16K_80000 + 1;
if ( !mtrr_fix_range_msr_set(&v->arch.hvm_vcpu.mtrr,
index, msr_content) )
goto gp_fault;
case MSR_MTRRfix4K_C0000...MSR_MTRRfix4K_F8000:
if ( !mtrr )
goto gp_fault;
- index = regs->ecx - MSR_MTRRfix4K_C0000 + 3;
+ index = msr - MSR_MTRRfix4K_C0000 + 3;
if ( !mtrr_fix_range_msr_set(&v->arch.hvm_vcpu.mtrr,
index, msr_content) )
goto gp_fault;
if ( !mtrr )
goto gp_fault;
if ( !mtrr_var_range_msr_set(&v->arch.hvm_vcpu.mtrr,
- regs->ecx, msr_content) )
+ msr, msr_content) )
goto gp_fault;
break;
break;
default:
- ret = vmce_wrmsr(ecx, msr_content);
+ ret = vmce_wrmsr(msr, msr_content);
if ( ret < 0 )
goto gp_fault;
else if ( ret )
break;
else if (!ret)
- return hvm_funcs.msr_write_intercept(regs);
+ return hvm_funcs.msr_write_intercept(msr, msr_content);
}
return X86EMUL_OKAY;
write_efer(read_efer() & ~EFER_SVME);
}
-static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs)
+static enum handler_return
+long_mode_do_msr_write(unsigned int msr, uint64_t msr_content)
{
- u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
- u32 ecx = regs->ecx;
-
HVM_DBG_LOG(DBG_LEVEL_0, "msr %x msr_content %"PRIx64,
- ecx, msr_content);
+ msr, msr_content);
- switch ( ecx )
+ switch ( msr )
{
case MSR_EFER:
if ( hvm_set_efer(msr_content) )
__restore_debug_registers(v);
}
-static int svm_msr_read_intercept(struct cpu_user_regs *regs)
+static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
{
- u64 msr_content = 0;
- u32 ecx = regs->ecx, eax, edx;
+ u32 eax, edx;
struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- switch ( ecx )
+ switch ( msr )
{
case MSR_EFER:
- msr_content = v->arch.hvm_vcpu.guest_efer;
+ *msr_content = v->arch.hvm_vcpu.guest_efer;
break;
case MSR_IA32_SYSENTER_CS:
- msr_content = v->arch.hvm_svm.guest_sysenter_cs;
+ *msr_content = v->arch.hvm_svm.guest_sysenter_cs;
break;
case MSR_IA32_SYSENTER_ESP:
- msr_content = v->arch.hvm_svm.guest_sysenter_esp;
+ *msr_content = v->arch.hvm_svm.guest_sysenter_esp;
break;
case MSR_IA32_SYSENTER_EIP:
- msr_content = v->arch.hvm_svm.guest_sysenter_eip;
+ *msr_content = v->arch.hvm_svm.guest_sysenter_eip;
break;
case MSR_IA32_MC4_MISC: /* Threshold register */
* MCA/MCE: We report that the threshold register is unavailable
* for OS use (locked by the BIOS).
*/
- msr_content = 1ULL << 61; /* MC4_MISC.Locked */
+ *msr_content = 1ULL << 61; /* MC4_MISC.Locked */
break;
case MSR_IA32_EBC_FREQUENCY_ID:
* has been migrated from an Intel host. The value zero is not
* particularly meaningful, but at least avoids the guest crashing!
*/
- msr_content = 0;
+ *msr_content = 0;
break;
case MSR_K8_VM_HSAVE_PA:
goto gpf;
case MSR_IA32_DEBUGCTLMSR:
- msr_content = vmcb->debugctlmsr;
+ *msr_content = vmcb->debugctlmsr;
break;
case MSR_IA32_LASTBRANCHFROMIP:
- msr_content = vmcb->lastbranchfromip;
+ *msr_content = vmcb->lastbranchfromip;
break;
case MSR_IA32_LASTBRANCHTOIP:
- msr_content = vmcb->lastbranchtoip;
+ *msr_content = vmcb->lastbranchtoip;
break;
case MSR_IA32_LASTINTFROMIP:
- msr_content = vmcb->lastintfromip;
+ *msr_content = vmcb->lastintfromip;
break;
case MSR_IA32_LASTINTTOIP:
- msr_content = vmcb->lastinttoip;
+ *msr_content = vmcb->lastinttoip;
break;
case MSR_K7_PERFCTR0:
case MSR_K7_EVNTSEL1:
case MSR_K7_EVNTSEL2:
case MSR_K7_EVNTSEL3:
- vpmu_do_rdmsr(ecx, &msr_content);
+ vpmu_do_rdmsr(msr, msr_content);
break;
default:
- if ( rdmsr_viridian_regs(ecx, &msr_content) ||
- rdmsr_hypervisor_regs(ecx, &msr_content) )
+ if ( rdmsr_viridian_regs(msr, msr_content) ||
+ rdmsr_hypervisor_regs(msr, msr_content) )
break;
- if ( rdmsr_safe(ecx, eax, edx) == 0 )
+ if ( rdmsr_safe(msr, eax, edx) == 0 )
{
- msr_content = ((uint64_t)edx << 32) | eax;
+ *msr_content = ((uint64_t)edx << 32) | eax;
break;
}
goto gpf;
}
- regs->eax = (uint32_t)msr_content;
- regs->edx = (uint32_t)(msr_content >> 32);
-
- HVMTRACE_3D (MSR_READ, ecx, regs->eax, regs->edx);
- HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
- ecx, (unsigned long)regs->eax, (unsigned long)regs->edx);
+ HVMTRACE_3D (MSR_READ, msr,
+ (uint32_t)*msr_content, (uint32_t)(*msr_content>>32));
+ HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, msr_value=%"PRIx64,
+ msr, *msr_content);
return X86EMUL_OKAY;
gpf:
return X86EMUL_EXCEPTION;
}
-static int svm_msr_write_intercept(struct cpu_user_regs *regs)
+static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
{
- u64 msr_content = 0;
- u32 ecx = regs->ecx;
struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
+ HVMTRACE_3D(MSR_WRITE, msr,
+ (uint32_t)msr_content, (uint32_t)(msr_content >> 32));
- HVMTRACE_3D (MSR_WRITE, ecx, regs->eax, regs->edx);
-
- switch ( ecx )
+ switch ( msr )
{
case MSR_K8_VM_HSAVE_PA:
goto gpf;
case MSR_K7_EVNTSEL1:
case MSR_K7_EVNTSEL2:
case MSR_K7_EVNTSEL3:
- vpmu_do_wrmsr(ecx, msr_content);
+ vpmu_do_wrmsr(msr, msr_content);
break;
default:
- if ( wrmsr_viridian_regs(ecx, msr_content) )
+ if ( wrmsr_viridian_regs(msr, msr_content) )
break;
- switch ( long_mode_do_msr_write(regs) )
+ switch ( long_mode_do_msr_write(msr, msr_content) )
{
case HNDL_unhandled:
- wrmsr_hypervisor_regs(ecx, msr_content);
+ wrmsr_hypervisor_regs(msr, msr_content);
break;
case HNDL_exception_raised:
return X86EMUL_EXCEPTION;
int rc, inst_len;
struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ uint64_t msr_content;
if ( vmcb->exitinfo1 == 0 )
{
if ( (inst_len = __get_instruction_length(v, INSTR_RDMSR)) == 0 )
return;
- rc = hvm_msr_read_intercept(regs);
+ rc = hvm_msr_read_intercept(regs->ecx, &msr_content);
+ regs->eax = (uint32_t)msr_content;
+ regs->edx = (uint32_t)(msr_content >> 32);
}
else
{
if ( (inst_len = __get_instruction_length(v, INSTR_WRMSR)) == 0 )
return;
- rc = hvm_msr_write_intercept(regs);
+ msr_content = ((uint64_t)regs->edx << 32) | (uint32_t)regs->eax;
+ rc = hvm_msr_write_intercept(regs->ecx, msr_content);
}
if ( rc == X86EMUL_OKAY )
unsigned int *ecx, unsigned int *edx);
static void vmx_wbinvd_intercept(void);
static void vmx_fpu_dirty_intercept(void);
-static int vmx_msr_read_intercept(struct cpu_user_regs *regs);
-static int vmx_msr_write_intercept(struct cpu_user_regs *regs);
+static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
+static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content);
static void vmx_invlpg_intercept(unsigned long vaddr);
static void __ept_sync_domain(void *info);
set_bit(VMX_INDEX_MSR_ ## address, &host_msr_state->flags); \
break
-static enum handler_return long_mode_do_msr_read(struct cpu_user_regs *regs)
+static enum handler_return
+long_mode_do_msr_read(unsigned int msr, uint64_t *msr_content)
{
- u64 msr_content = 0;
- u32 ecx = regs->ecx;
struct vcpu *v = current;
struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state;
- switch ( ecx )
+ switch ( msr )
{
case MSR_EFER:
- msr_content = v->arch.hvm_vcpu.guest_efer;
+ *msr_content = v->arch.hvm_vcpu.guest_efer;
break;
case MSR_FS_BASE:
- msr_content = __vmread(GUEST_FS_BASE);
+ *msr_content = __vmread(GUEST_FS_BASE);
break;
case MSR_GS_BASE:
- msr_content = __vmread(GUEST_GS_BASE);
+ *msr_content = __vmread(GUEST_GS_BASE);
break;
case MSR_SHADOW_GS_BASE:
- rdmsrl(MSR_SHADOW_GS_BASE, msr_content);
+ rdmsrl(MSR_SHADOW_GS_BASE, *msr_content);
break;
case MSR_STAR:
- msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_STAR];
+ *msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_STAR];
break;
case MSR_LSTAR:
- msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_LSTAR];
+ *msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_LSTAR];
break;
case MSR_CSTAR:
- msr_content = v->arch.hvm_vmx.cstar;
+ *msr_content = v->arch.hvm_vmx.cstar;
break;
case MSR_SYSCALL_MASK:
- msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK];
+ *msr_content = guest_msr_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK];
break;
default:
return HNDL_unhandled;
}
- HVM_DBG_LOG(DBG_LEVEL_0, "msr 0x%x content 0x%"PRIx64, ecx, msr_content);
-
- regs->eax = (u32)(msr_content >> 0);
- regs->edx = (u32)(msr_content >> 32);
+ HVM_DBG_LOG(DBG_LEVEL_0, "msr 0x%x content 0x%"PRIx64, msr, *msr_content);
return HNDL_done;
}
-static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs)
+static enum handler_return
+long_mode_do_msr_write(unsigned int msr, uint64_t msr_content)
{
- u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
- u32 ecx = regs->ecx;
struct vcpu *v = current;
struct vmx_msr_state *guest_msr_state = &v->arch.hvm_vmx.msr_state;
struct vmx_msr_state *host_msr_state = &this_cpu(host_msr_state);
- HVM_DBG_LOG(DBG_LEVEL_0, "msr 0x%x content 0x%"PRIx64, ecx, msr_content);
+ HVM_DBG_LOG(DBG_LEVEL_0, "msr 0x%x content 0x%"PRIx64, msr, msr_content);
- switch ( ecx )
+ switch ( msr )
{
case MSR_EFER:
if ( hvm_set_efer(msr_content) )
if ( !is_canonical_address(msr_content) )
goto uncanonical_address;
- if ( ecx == MSR_FS_BASE )
+ if ( msr == MSR_FS_BASE )
__vmwrite(GUEST_FS_BASE, msr_content);
- else if ( ecx == MSR_GS_BASE )
+ else if ( msr == MSR_GS_BASE )
__vmwrite(GUEST_GS_BASE, msr_content);
else
wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
return HNDL_done;
uncanonical_address:
- HVM_DBG_LOG(DBG_LEVEL_0, "Not cano address of msr write %x", ecx);
+ HVM_DBG_LOG(DBG_LEVEL_0, "Not cano address of msr write %x", msr);
vmx_inject_hw_exception(TRAP_gp_fault, 0);
exception_raised:
return HNDL_exception_raised;
return 0;
}
-static int vmx_msr_read_intercept(struct cpu_user_regs *regs)
+static int vmx_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
{
- u64 msr_content = 0;
- u32 ecx = regs->ecx, eax, edx;
+ u32 eax, edx;
- HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x", ecx);
+ HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x", msr);
- switch ( ecx )
+ switch ( msr )
{
case MSR_IA32_SYSENTER_CS:
- msr_content = (u32)__vmread(GUEST_SYSENTER_CS);
+ *msr_content = (u32)__vmread(GUEST_SYSENTER_CS);
break;
case MSR_IA32_SYSENTER_ESP:
- msr_content = __vmread(GUEST_SYSENTER_ESP);
+ *msr_content = __vmread(GUEST_SYSENTER_ESP);
break;
case MSR_IA32_SYSENTER_EIP:
- msr_content = __vmread(GUEST_SYSENTER_EIP);
+ *msr_content = __vmread(GUEST_SYSENTER_EIP);
break;
case MSR_IA32_DEBUGCTLMSR:
- msr_content = __vmread(GUEST_IA32_DEBUGCTL);
+ *msr_content = __vmread(GUEST_IA32_DEBUGCTL);
#ifdef __i386__
- msr_content |= (u64)__vmread(GUEST_IA32_DEBUGCTL_HIGH) << 32;
+ *msr_content |= (u64)__vmread(GUEST_IA32_DEBUGCTL_HIGH) << 32;
#endif
break;
case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_PROCBASED_CTLS2:
goto gp_fault;
case MSR_IA32_MISC_ENABLE:
- rdmsrl(MSR_IA32_MISC_ENABLE, msr_content);
+ rdmsrl(MSR_IA32_MISC_ENABLE, *msr_content);
/* Debug Trace Store is not supported. */
- msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL |
+ *msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL |
MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
break;
default:
- if ( vpmu_do_rdmsr(ecx, &msr_content) )
+ if ( vpmu_do_rdmsr(msr, msr_content) )
break;
- if ( passive_domain_do_rdmsr(regs) )
+ if ( passive_domain_do_rdmsr(msr, msr_content) )
goto done;
- switch ( long_mode_do_msr_read(regs) )
+ switch ( long_mode_do_msr_read(msr, msr_content) )
{
case HNDL_unhandled:
break;
goto done;
}
- if ( vmx_read_guest_msr(ecx, &msr_content) == 0 )
+ if ( vmx_read_guest_msr(msr, msr_content) == 0 )
break;
- if ( is_last_branch_msr(ecx) )
+ if ( is_last_branch_msr(msr) )
{
- msr_content = 0;
+ *msr_content = 0;
break;
}
- if ( rdmsr_viridian_regs(ecx, &msr_content) ||
- rdmsr_hypervisor_regs(ecx, &msr_content) )
+ if ( rdmsr_viridian_regs(msr, msr_content) ||
+ rdmsr_hypervisor_regs(msr, msr_content) )
break;
- if ( rdmsr_safe(ecx, eax, edx) == 0 )
+ if ( rdmsr_safe(msr, eax, edx) == 0 )
{
- msr_content = ((uint64_t)edx << 32) | eax;
+ *msr_content = ((uint64_t)edx << 32) | eax;
break;
}
goto gp_fault;
}
- regs->eax = (uint32_t)msr_content;
- regs->edx = (uint32_t)(msr_content >> 32);
-
done:
- HVMTRACE_3D (MSR_READ, ecx, regs->eax, regs->edx);
- HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
- ecx, (unsigned long)regs->eax,
- (unsigned long)regs->edx);
+ HVMTRACE_3D(MSR_READ, msr,
+ (uint32_t)*msr_content, (uint32_t)(*msr_content >> 32));
+ HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, msr_value=0x%"PRIx64,
+ msr, *msr_content);
return X86EMUL_OKAY;
gp_fault:
vmx_vmcs_exit(v);
}
-static int vmx_msr_write_intercept(struct cpu_user_regs *regs)
+static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content)
{
- u32 ecx = regs->ecx;
- u64 msr_content;
struct vcpu *v = current;
- HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x, eax=%x, edx=%x",
- ecx, (u32)regs->eax, (u32)regs->edx);
+ HVM_DBG_LOG(DBG_LEVEL_1, "ecx=%x, msr_value=0x%"PRIx64,
+ msr, msr_content);
- msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
+ HVMTRACE_3D(MSR_WRITE, msr,
+ (uint32_t)msr_content, (uint32_t)(msr_content >> 32));
- HVMTRACE_3D (MSR_WRITE, ecx, regs->eax, regs->edx);
-
- switch ( ecx )
+ switch ( msr )
{
case MSR_IA32_SYSENTER_CS:
__vmwrite(GUEST_SYSENTER_CS, msr_content);
}
if ( (rc < 0) ||
- (vmx_add_host_load_msr(ecx) < 0) )
+ (vmx_add_host_load_msr(msr) < 0) )
vmx_inject_hw_exception(TRAP_machine_check, 0);
else
{
case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_PROCBASED_CTLS2:
goto gp_fault;
default:
- if ( vpmu_do_wrmsr(ecx, msr_content) )
+ if ( vpmu_do_wrmsr(msr, msr_content) )
return X86EMUL_OKAY;
- if ( passive_domain_do_wrmsr(regs) )
+ if ( passive_domain_do_wrmsr(msr, msr_content) )
return X86EMUL_OKAY;
- if ( wrmsr_viridian_regs(ecx, msr_content) )
+ if ( wrmsr_viridian_regs(msr, msr_content) )
break;
- switch ( long_mode_do_msr_write(regs) )
+ switch ( long_mode_do_msr_write(msr, msr_content) )
{
case HNDL_unhandled:
- if ( (vmx_write_guest_msr(ecx, msr_content) != 0) &&
- !is_last_branch_msr(ecx) )
- wrmsr_hypervisor_regs(ecx, msr_content);
+ if ( (vmx_write_guest_msr(msr, msr_content) != 0) &&
+ !is_last_branch_msr(msr) )
+ wrmsr_hypervisor_regs(msr, msr_content);
break;
case HNDL_exception_raised:
return X86EMUL_EXCEPTION;
vmx_dr_access(exit_qualification, regs);
break;
case EXIT_REASON_MSR_READ:
+ {
+ uint64_t msr_content;
inst_len = __get_instruction_length(); /* Safe: RDMSR */
- if ( hvm_msr_read_intercept(regs) == X86EMUL_OKAY )
+ if ( hvm_msr_read_intercept(regs->ecx, &msr_content) == X86EMUL_OKAY )
+ {
+ regs->eax = (uint32_t)msr_content;
+ regs->edx = (uint32_t)(msr_content >> 32);
__update_guest_eip(inst_len);
+ }
break;
+ }
case EXIT_REASON_MSR_WRITE:
+ {
+ uint64_t msr_content;
inst_len = __get_instruction_length(); /* Safe: WRMSR */
- if ( hvm_msr_write_intercept(regs) == X86EMUL_OKAY )
+ msr_content = ((uint64_t)regs->edx << 32) | (uint32_t)regs->eax;
+ if ( hvm_msr_write_intercept(regs->ecx, msr_content) == X86EMUL_OKAY )
__update_guest_eip(inst_len);
break;
+ }
case EXIT_REASON_MWAIT_INSTRUCTION:
case EXIT_REASON_MONITOR_INSTRUCTION:
static char *cpu_type;
-static int passive_domain_msr_op_checks(struct cpu_user_regs *regs ,int *typep, int *indexp)
+static int passive_domain_msr_op_checks(unsigned int msr, int *typep, int *indexp)
{
struct vpmu_struct *vpmu = vcpu_vpmu(current);
if ( model == NULL )
return 0;
if ( model->is_arch_pmu_msr == NULL )
return 0;
- if ( !model->is_arch_pmu_msr((u64)regs->ecx, typep, indexp) )
+ if ( !model->is_arch_pmu_msr(msr, typep, indexp) )
return 0;
if ( !(vpmu->flags & PASSIVE_DOMAIN_ALLOCATED) )
return 1;
}
-int passive_domain_do_rdmsr(struct cpu_user_regs *regs)
+int passive_domain_do_rdmsr(unsigned int msr, uint64_t *msr_content)
{
- u64 msr_content;
int type, index;
- if ( !passive_domain_msr_op_checks(regs, &type, &index))
+ if ( !passive_domain_msr_op_checks(msr, &type, &index))
return 0;
- model->load_msr(current, type, index, &msr_content);
- regs->eax = msr_content & 0xFFFFFFFF;
- regs->edx = msr_content >> 32;
+ model->load_msr(current, type, index, msr_content);
return 1;
}
-int passive_domain_do_wrmsr(struct cpu_user_regs *regs)
+int passive_domain_do_wrmsr(unsigned int msr, uint64_t msr_content)
{
- u64 msr_content;
int type, index;
- if ( !passive_domain_msr_op_checks(regs, &type, &index))
+ if ( !passive_domain_msr_op_checks(msr, &type, &index))
return 0;
- msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
model->save_msr(current, type, index, msr_content);
return 1;
}
unsigned int *ecx, unsigned int *edx);
void (*wbinvd_intercept)(void);
void (*fpu_dirty_intercept)(void);
- int (*msr_read_intercept)(struct cpu_user_regs *regs);
- int (*msr_write_intercept)(struct cpu_user_regs *regs);
+ int (*msr_read_intercept)(unsigned int msr, uint64_t *msr_content);
+ int (*msr_write_intercept)(unsigned int msr, uint64_t msr_content);
void (*invlpg_intercept)(unsigned long vaddr);
void (*set_uc_mode)(struct vcpu *v);
void (*set_info_guest)(struct vcpu *v);
int hvm_set_cr0(unsigned long value);
int hvm_set_cr3(unsigned long value);
int hvm_set_cr4(unsigned long value);
-int hvm_msr_read_intercept(struct cpu_user_regs *regs);
-int hvm_msr_write_intercept(struct cpu_user_regs *regs);
+int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content);
+int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content);
#endif /* __ASM_X86_HVM_SUPPORT_H__ */
"xenoprof/x86 with autotranslated mode enabled" \
"isn't supported yet\n"); \
} while (0)
-int passive_domain_do_rdmsr(struct cpu_user_regs *regs);
-int passive_domain_do_wrmsr(struct cpu_user_regs *regs);
+int passive_domain_do_rdmsr(unsigned int msr, uint64_t *msr_content);
+int passive_domain_do_wrmsr(unsigned int msr, uint64_t msr_content);
void passive_domain_destroy(struct vcpu *v);
#endif /* __ASM_X86_XENOPROF_H__ */