svm_fpu_leave(v);
svm_save_dr(v);
+ vpmu_save(v);
svm_sync_vmcb(v);
svm_vmload(root_vmcb[cpu]);
svm_vmsave(root_vmcb[cpu]);
svm_vmload(v->arch.hvm_svm.vmcb);
+ vpmu_load(v);
if ( cpu_has_rdtscp )
wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v));
return rc;
}
+ vpmu_initialise(v);
return 0;
}
static void svm_vcpu_destroy(struct vcpu *v)
{
svm_destroy_vmcb(v);
+ vpmu_destroy(v);
+ passive_domain_destroy(v);
}
static void svm_inject_exception(
static int svm_do_pmu_interrupt(struct cpu_user_regs *regs)
{
- return 0;
+ return vpmu_do_interrupt(regs);
}
static int svm_cpu_prepare(unsigned int cpu)
msr_content = vmcb->lastinttoip;
break;
+ case MSR_K7_PERFCTR0:
+ case MSR_K7_PERFCTR1:
+ case MSR_K7_PERFCTR2:
+ case MSR_K7_PERFCTR3:
+ case MSR_K7_EVNTSEL0:
+ case MSR_K7_EVNTSEL1:
+ case MSR_K7_EVNTSEL2:
+ case MSR_K7_EVNTSEL3:
+ vpmu_do_rdmsr(regs);
+ goto done;
+
default:
if ( rdmsr_viridian_regs(ecx, &msr_content) ||
regs->eax = (uint32_t)msr_content;
regs->edx = (uint32_t)(msr_content >> 32);
+done:
HVMTRACE_3D (MSR_READ, ecx, regs->eax, regs->edx);
HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
ecx, (unsigned long)regs->eax, (unsigned long)regs->edx);
vmcb->lastinttoip = msr_content;
break;
+ case MSR_K7_PERFCTR0:
+ case MSR_K7_PERFCTR1:
+ case MSR_K7_PERFCTR2:
+ case MSR_K7_PERFCTR3:
+ case MSR_K7_EVNTSEL0:
+ case MSR_K7_EVNTSEL1:
+ case MSR_K7_EVNTSEL2:
+ case MSR_K7_EVNTSEL3:
+ vpmu_do_wrmsr(regs);
+ goto done;
+
default:
if ( wrmsr_viridian_regs(ecx, msr_content) )
break;
}
break;
}
-
+done:
return X86EMUL_OKAY;
gpf:
#include <public/sched.h>
#include <public/hvm/save.h>
#include <asm/hvm/vpmu.h>
+#include <asm/hvm/svm/svm.h>
+#include <asm/hvm/svm/vmcb.h>
static int __read_mostly opt_vpmu_enabled;
boolean_param("vpmu", opt_vpmu_enabled);
}
extern struct arch_vpmu_ops core2_vpmu_ops;
+extern struct arch_vpmu_ops amd_vpmu_ops;
+
void vpmu_initialise(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
+ __u8 vendor = current_cpu_data.x86_vendor;
+ __u8 family = current_cpu_data.x86;
+ __u8 cpu_model = current_cpu_data.x86_model;
if ( !opt_vpmu_enabled )
return;
if ( vpmu->flags & VPMU_CONTEXT_ALLOCATED )
vpmu_destroy(v);
- if ( current_cpu_data.x86 == 6 )
+ switch ( vendor )
{
- switch ( current_cpu_data.x86_model )
+ case X86_VENDOR_AMD:
{
- case 15:
- case 23:
- case 26:
- case 29:
- vpmu->arch_vpmu_ops = &core2_vpmu_ops;
- break;
+ switch ( family )
+ {
+ case 0x10:
+ vpmu->arch_vpmu_ops = &amd_vpmu_ops;
+ break;
+ default:
+ printk("VMPU: Initialization failed. "
+ "AMD processor family %d has not "
+ "been supported\n", family);
+ return;
+ }
}
+ break;
+
+ case X86_VENDOR_INTEL:
+ {
+ if ( family == 6 )
+ {
+ switch ( cpu_model )
+ {
+ case 15:
+ case 23:
+ case 26:
+ case 29:
+ vpmu->arch_vpmu_ops = &core2_vpmu_ops;
+ break;
+ }
+ }
+ }
+ break;
+
+ default:
+ printk("VMPU: Initialization failed. "
+ "Unknown CPU vendor %d\n", vendor);
+ return;
}
if ( vpmu->arch_vpmu_ops != NULL )
#include <asm/processor.h>
#include <asm/hvm/svm/vmcb.h>
#include <asm/i387.h>
+#include <asm/hvm/vpmu.h>
void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb);