}
}
-void vpmu_do_cpuid(unsigned int input,
- unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
-{
- struct vpmu_struct *vpmu = vcpu_vpmu(current);
-
- if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_cpuid )
- vpmu->arch_vpmu_ops->do_cpuid(input, eax, ebx, ecx, edx);
-}
-
static void vpmu_save_force(void *arg)
{
struct vcpu *v = (struct vcpu *)arg;
#define MSR_PMC_ALIAS_MASK (~(MSR_IA32_PERFCTR0 ^ MSR_IA32_A_PERFCTR0))
static bool_t __read_mostly full_width_write;
-/* Intel-specific VPMU features */
-#define VPMU_CPU_HAS_DS 0x100 /* Has Debug Store */
-#define VPMU_CPU_HAS_BTS 0x200 /* Has Branch Trace Store */
-
/*
* MSR_CORE_PERF_FIXED_CTR_CTRL contains the configuration of all fixed
* counters. 4 bits for every counter.
return 0;
}
-static void core2_vpmu_do_cpuid(unsigned int input,
- unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
-{
- switch ( input )
- {
- case 0x1:
-
- if ( vpmu_is_set(vcpu_vpmu(current), VPMU_CPU_HAS_DS) )
- {
- /* Switch on the 'Debug Store' feature in CPUID.EAX[1]:EDX[21] */
- *edx |= cpufeat_mask(X86_FEATURE_DS);
- if ( cpu_has(¤t_cpu_data, X86_FEATURE_DTES64) )
- *ecx |= cpufeat_mask(X86_FEATURE_DTES64);
- if ( cpu_has(¤t_cpu_data, X86_FEATURE_DSCPL) )
- *ecx |= cpufeat_mask(X86_FEATURE_DSCPL);
- }
- break;
-
- case 0xa:
- /* Report at most version 3 since that's all we currently emulate */
- if ( MASK_EXTR(*eax, PMU_VERSION_MASK) > 3 )
- *eax = (*eax & ~PMU_VERSION_MASK) | MASK_INSR(3, PMU_VERSION_MASK);
- break;
- }
-}
-
/* Dump vpmu info on console, called in the context of keyhandler 'q'. */
static void core2_vpmu_dump(const struct vcpu *v)
{
.do_wrmsr = core2_vpmu_do_wrmsr,
.do_rdmsr = core2_vpmu_do_rdmsr,
.do_interrupt = core2_vpmu_do_interrupt,
- .do_cpuid = core2_vpmu_do_cpuid,
.arch_vpmu_destroy = core2_vpmu_destroy,
.arch_vpmu_save = core2_vpmu_save,
.arch_vpmu_load = core2_vpmu_load,
.arch_vpmu_dump = core2_vpmu_dump
};
-static void core2_no_vpmu_do_cpuid(unsigned int input,
- unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
-{
- /*
- * As in this case the vpmu is not enabled reset some bits in the
- * architectural performance monitoring related part.
- */
- if ( input == 0xa )
- {
- *eax &= ~PMU_VERSION_MASK;
- *eax &= ~PMU_GENERAL_NR_MASK;
- *eax &= ~PMU_GENERAL_WIDTH_MASK;
-
- *edx &= ~PMU_FIXED_NR_MASK;
- *edx &= ~PMU_FIXED_WIDTH_MASK;
- }
-}
-
/*
* If its a vpmu msr set it to 0.
*/
*/
struct arch_vpmu_ops core2_no_vpmu_ops = {
.do_rdmsr = core2_no_vpmu_do_rdmsr,
- .do_cpuid = core2_no_vpmu_do_cpuid,
};
int vmx_vpmu_initialise(struct vcpu *v)
if ( !(hvm_pae_enabled(v) || hvm_long_mode_enabled(v)) )
*edx &= ~cpufeat_mask(X86_FEATURE_PSE36);
}
+
+ if ( vpmu_enabled(v) &&
+ vpmu_is_set(vcpu_vpmu(v), VPMU_CPU_HAS_DS) )
+ {
+ *edx |= cpufeat_mask(X86_FEATURE_DS);
+ if ( cpu_has(¤t_cpu_data, X86_FEATURE_DTES64) )
+ *ecx |= cpufeat_mask(X86_FEATURE_DTES64);
+ if ( cpu_has(¤t_cpu_data, X86_FEATURE_DSCPL) )
+ *ecx |= cpufeat_mask(X86_FEATURE_DSCPL);
+ }
+
break;
case 0x7:
}
break;
+ case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
+ if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || !vpmu_enabled(v) )
+ {
+ *eax = *ebx = *ecx = *edx = 0;
+ break;
+ }
+
+ /* Report at most version 3 since that's all we currently emulate */
+ if ( (*eax & 0xff) > 3 )
+ *eax = (*eax & ~0xff) | 3;
+ break;
+
case 0x80000001:
*ecx &= hvm_featureset[FEATURESET_e1c];
*edx &= hvm_featureset[FEATURESET_e1d];
break;
}
- vpmu_do_cpuid(input, eax, ebx, ecx, edx);
-
HVMTRACE_5D (CPUID, input, *eax, *ebx, *ecx, *edx);
}
}
}
+ if ( vpmu_enabled(curr) &&
+ vpmu_is_set(vcpu_vpmu(curr), VPMU_CPU_HAS_DS) )
+ {
+ d |= cpufeat_mask(X86_FEATURE_DS);
+ if ( cpu_has(¤t_cpu_data, X86_FEATURE_DTES64) )
+ c |= cpufeat_mask(X86_FEATURE_DTES64);
+ if ( cpu_has(¤t_cpu_data, X86_FEATURE_DSCPL) )
+ c |= cpufeat_mask(X86_FEATURE_DSCPL);
+ }
+
c |= cpufeat_mask(X86_FEATURE_HYPERVISOR);
break;
a = 0;
break;
+ case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
+ if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+ !vpmu_enabled(curr) )
+ goto unsupported;
+
+ /* Report at most version 3 since that's all we currently emulate. */
+ if ( (a & 0xff) > 3 )
+ a = (a & ~0xff) | 3;
+ break;
+
case XSTATE_CPUID:
if ( !is_control_domain(currd) && !is_hardware_domain(currd) )
b &= pv_featureset[FEATURESET_e8b];
break;
- case 0x0000000a: /* Architectural Performance Monitor Features (Intel) */
- break;
-
case 0x00000005: /* MONITOR/MWAIT */
case 0x0000000b: /* Extended Topology Enumeration */
case 0x8000000a: /* SVM revision and features */
}
out:
- /* VPMU may decide to modify some of the leaves */
- vpmu_do_cpuid(leaf, &a, &b, &c, &d);
-
regs->eax = a;
regs->ebx = b;
regs->ecx = c;
#define vcpu_vpmu(vcpu) (&(vcpu)->arch.vpmu)
#define vpmu_vcpu(vpmu) container_of((vpmu), struct vcpu, arch.vpmu)
+#define vpmu_enabled(vcpu) vpmu_is_set(vcpu_vpmu(vcpu), VPMU_CONTEXT_ALLOCATED)
#define MSR_TYPE_COUNTER 0
#define MSR_TYPE_CTRL 1
uint64_t supported);
int (*do_rdmsr)(unsigned int msr, uint64_t *msr_content);
int (*do_interrupt)(struct cpu_user_regs *regs);
- void (*do_cpuid)(unsigned int input,
- unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx);
void (*arch_vpmu_destroy)(struct vcpu *v);
int (*arch_vpmu_save)(struct vcpu *v, bool_t to_guest);
int (*arch_vpmu_load)(struct vcpu *v, bool_t from_guest);
/* PV(H) guests: VPMU registers are accessed by guest from shared page */
#define VPMU_CACHED 0x40
+/* Intel-specific VPMU features */
+#define VPMU_CPU_HAS_DS 0x100 /* Has Debug Store */
+#define VPMU_CPU_HAS_BTS 0x200 /* Has Branch Trace Store */
+
static inline void vpmu_set(struct vpmu_struct *vpmu, const u32 mask)
{
vpmu->flags |= mask;
int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
uint64_t supported, bool_t is_write);
void vpmu_do_interrupt(struct cpu_user_regs *regs);
-void vpmu_do_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx);
void vpmu_initialise(struct vcpu *v);
void vpmu_destroy(struct vcpu *v);
void vpmu_save(struct vcpu *v);