case MSR_K7_EVNTSEL1:
case MSR_K7_EVNTSEL2:
case MSR_K7_EVNTSEL3:
- vpmu_do_rdmsr(regs);
- goto done;
+ vpmu_do_rdmsr(ecx, &msr_content);
+ break;
default:
regs->eax = (uint32_t)msr_content;
regs->edx = (uint32_t)(msr_content >> 32);
-done:
HVMTRACE_3D (MSR_READ, ecx, regs->eax, regs->edx);
HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
ecx, (unsigned long)regs->eax, (unsigned long)regs->edx);
case MSR_K7_EVNTSEL1:
case MSR_K7_EVNTSEL2:
case MSR_K7_EVNTSEL3:
- vpmu_do_wrmsr(regs);
- goto done;
+ vpmu_do_wrmsr(ecx, msr_content);
+ break;
default:
if ( wrmsr_viridian_regs(ecx, msr_content) )
}
break;
}
-done:
return X86EMUL_OKAY;
gpf:
apic_write(APIC_LVTPC, ctx->hw_lapic_lvtpc | APIC_LVT_MASKED);
}
-static void context_update(struct cpu_user_regs *regs, u64 msr_content)
+static void context_update(unsigned int msr, u64 msr_content)
{
int i;
- u32 addr = regs->ecx;
struct vcpu *v = current;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
struct amd_vpmu_context *ctxt = vpmu->context;
for ( i = 0; i < NUM_COUNTERS; i++ )
- if ( addr == AMD_F10H_COUNTERS[i] )
+ if ( msr == AMD_F10H_COUNTERS[i] )
ctxt->counters[i] = msr_content;
for ( i = 0; i < NUM_COUNTERS; i++ )
- if ( addr == AMD_F10H_CTRLS[i] )
+ if ( msr == AMD_F10H_CTRLS[i] )
ctxt->ctrls[i] = msr_content;
ctxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
}
-static int amd_vpmu_do_wrmsr(struct cpu_user_regs *regs)
+static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
{
- u64 msr_content = 0;
struct vcpu *v = current;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
-
/* For all counters, enable guest only mode for HVM guest */
- if ( (get_pmu_reg_type(regs->ecx) == MSR_TYPE_CTRL) &&
+ if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
!(is_guest_mode(msr_content)) )
{
set_guest_mode(msr_content);
}
/* check if the first counter is enabled */
- if ( (get_pmu_reg_type(regs->ecx) == MSR_TYPE_CTRL) &&
+ if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
is_pmu_enabled(msr_content) && !(vpmu->flags & VPMU_RUNNING) )
{
if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
}
/* stop saving & restore if guest stops first counter */
- if ( (get_pmu_reg_type(regs->ecx) == MSR_TYPE_CTRL) &&
+ if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
(is_pmu_enabled(msr_content) == 0) && (vpmu->flags & VPMU_RUNNING) )
{
apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
}
/* Update vpmu context immediately */
- context_update(regs, msr_content);
+ context_update(msr, msr_content);
/* Write to hw counters */
- wrmsrl(regs->ecx, msr_content);
+ wrmsrl(msr, msr_content);
return 1;
}
-static int amd_vpmu_do_rdmsr(struct cpu_user_regs *regs)
+static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
{
- u64 msr_content = 0;
-
- rdmsrl(regs->ecx, msr_content);
- regs->eax = msr_content & 0xFFFFFFFF;
- regs->edx = msr_content >> 32;
-
+ rdmsrl(msr, *msr_content);
return 1;
}
MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
break;
default:
- if ( vpmu_do_rdmsr(regs) )
- goto done;
+ if ( vpmu_do_rdmsr(ecx, &msr_content) )
+ break;
if ( passive_domain_do_rdmsr(regs) )
goto done;
switch ( long_mode_do_msr_read(regs) )
case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_PROCBASED_CTLS2:
goto gp_fault;
default:
- if ( vpmu_do_wrmsr(regs) )
+ if ( vpmu_do_wrmsr(ecx, msr_content) )
return X86EMUL_OKAY;
if ( passive_domain_do_wrmsr(regs) )
return X86EMUL_OKAY;
return 1;
}
-static int core2_vpmu_do_wrmsr(struct cpu_user_regs *regs)
+static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
{
- u32 ecx = regs->ecx;
- u64 msr_content, global_ctrl, non_global_ctrl;
+ u64 global_ctrl, non_global_ctrl;
char pmu_enable = 0;
int i, tmp;
int type = -1, index = -1;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
struct core2_vpmu_context *core2_vpmu_cxt = NULL;
- if ( !core2_vpmu_msr_common_check(ecx, &type, &index) )
+ if ( !core2_vpmu_msr_common_check(msr, &type, &index) )
return 0;
- msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
core2_vpmu_cxt = vpmu->context;
- switch ( ecx )
+ switch ( msr )
{
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
core2_vpmu_cxt->global_ovf_status &= ~msr_content;
}
break;
default:
- tmp = ecx - MSR_P6_EVNTSEL0;
+ tmp = msr - MSR_P6_EVNTSEL0;
vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
if ( tmp >= 0 && tmp < core2_get_pmc_count() )
core2_vpmu_cxt->pmu_enable->arch_pmc_enable[tmp] =
if (inject_gp)
vmx_inject_hw_exception(TRAP_gp_fault, 0);
else
- wrmsrl(ecx, msr_content);
+ wrmsrl(msr, msr_content);
}
else
vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
return 1;
}
-static int core2_vpmu_do_rdmsr(struct cpu_user_regs *regs)
+static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
{
- u64 msr_content = 0;
int type = -1, index = -1;
struct vcpu *v = current;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
struct core2_vpmu_context *core2_vpmu_cxt = NULL;
- if ( !core2_vpmu_msr_common_check(regs->ecx, &type, &index) )
+ if ( !core2_vpmu_msr_common_check(msr, &type, &index) )
return 0;
core2_vpmu_cxt = vpmu->context;
- switch ( regs->ecx )
+ switch ( msr )
{
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+ *msr_content = 0;
break;
case MSR_CORE_PERF_GLOBAL_STATUS:
- msr_content = core2_vpmu_cxt->global_ovf_status;
+ *msr_content = core2_vpmu_cxt->global_ovf_status;
break;
case MSR_CORE_PERF_GLOBAL_CTRL:
- vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, &msr_content);
+ vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
break;
default:
- rdmsrl(regs->ecx, msr_content);
+ rdmsrl(msr, *msr_content);
}
- regs->eax = msr_content & 0xFFFFFFFF;
- regs->edx = msr_content >> 32;
return 1;
}
static int __read_mostly opt_vpmu_enabled;
boolean_param("vpmu", opt_vpmu_enabled);
-int vpmu_do_wrmsr(struct cpu_user_regs *regs)
+int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
{
struct vpmu_struct *vpmu = vcpu_vpmu(current);
if ( vpmu->arch_vpmu_ops )
- return vpmu->arch_vpmu_ops->do_wrmsr(regs);
+ return vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content);
return 0;
}
-int vpmu_do_rdmsr(struct cpu_user_regs *regs)
+int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
{
struct vpmu_struct *vpmu = vcpu_vpmu(current);
if ( vpmu->arch_vpmu_ops )
- return vpmu->arch_vpmu_ops->do_rdmsr(regs);
+ return vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content);
return 0;
}
/* Arch specific operations shared by all vpmus */
struct arch_vpmu_ops {
- int (*do_wrmsr)(struct cpu_user_regs *regs);
- int (*do_rdmsr)(struct cpu_user_regs *regs);
+ int (*do_wrmsr)(unsigned int msr, uint64_t msr_content);
+ int (*do_rdmsr)(unsigned int msr, uint64_t *msr_content);
int (*do_interrupt)(struct cpu_user_regs *regs);
void (*arch_vpmu_initialise)(struct vcpu *v);
void (*arch_vpmu_destroy)(struct vcpu *v);
#define VPMU_CONTEXT_LOADED 0x2
#define VPMU_RUNNING 0x4
#define PASSIVE_DOMAIN_ALLOCATED 0x8
-int vpmu_do_wrmsr(struct cpu_user_regs *regs);
-int vpmu_do_rdmsr(struct cpu_user_regs *regs);
+int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content);
+int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content);
int vpmu_do_interrupt(struct cpu_user_regs *regs);
void vpmu_initialise(struct vcpu *v);
void vpmu_destroy(struct vcpu *v);