#include <asm/apic.h>
#include <asm/regs.h>
#include <asm/current.h>
-
+
#include "op_counter.h"
#include "op_x86_model.h"
-
+
struct op_counter_config counter_config[OP_MAX_COUNTER];
struct op_ibs_config ibs_config;
if ( ovf && is_active(current->domain) && !xen_mode )
send_guest_vcpu_virq(current, VIRQ_XENOPROF);
- if ( ovf == 2 )
+ if ( ovf == 2 )
current->nmi_pending = 1;
return 1;
}
-
-
+
+
static void nmi_cpu_save_registers(struct op_msrs *msrs)
{
unsigned int const nr_ctrs = model->num_counters;
for (i = 0; i < nr_ctrs; ++i) {
rdmsrl(counters[i].addr, counters[i].value);
}
-
+
for (i = 0; i < nr_ctrls; ++i) {
rdmsrl(controls[i].addr, controls[i].value);
}
* of msrs are distinct for save and setup operations
*/
on_each_cpu(nmi_save_registers, NULL, 1);
- return 0;
+ return 0;
}
int nmi_enable_virq(void)
void nmi_disable_virq(void)
{
unset_nmi_callback();
-}
+}
static void nmi_restore_registers(struct op_msrs * msrs)
for (i = 0; i < nr_ctrls; ++i) {
wrmsrl(controls[i].addr, controls[i].value);
}
-
+
for (i = 0; i < nr_ctrs; ++i) {
wrmsrl(counters[i].addr, counters[i].value);
}
}
-
+
static void nmi_cpu_shutdown(void * dummy)
{
nmi_restore_registers(msrs);
}
-
+
void nmi_release_counters(void)
{
on_each_cpu(nmi_cpu_shutdown, NULL, 1);
free_msrs();
}
-
+
static void nmi_cpu_start(void * dummy)
{
int cpu = smp_processor_id();
apic_write(APIC_LVTPC, APIC_DM_NMI);
model->start(msrs);
}
-
+
int nmi_start(void)
{
on_each_cpu(nmi_cpu_start, NULL, 1);
return 0;
}
-
-
+
+
static void nmi_cpu_stop(void * dummy)
{
unsigned int v;
apic_write(APIC_LVTPC, saved_lvtpc[cpu]);
apic_write(APIC_LVTERR, v);
}
-
-
+
+
void nmi_stop(void)
{
on_each_cpu(nmi_cpu_stop, NULL, 1);
static int __init p4_init(char ** cpu_type)
-{
+{
__u8 cpu_model = current_cpu_data.x86_model;
if ((cpu_model > 6) || (cpu_model == 5)) {
__u8 vendor = current_cpu_data.x86_vendor;
__u8 family = current_cpu_data.x86;
__u8 _model = current_cpu_data.x86_model;
-
+
if (!cpu_has_apic) {
printk("xenoprof: Initialization failed. No APIC\n");
return -ENODEV;
break;
}
break;
-
+
case X86_VENDOR_INTEL:
switch (family) {
/* Pentium IV */
#include <asm/current.h>
#include <asm/hvm/vpmu.h>
#include <asm/hvm/vmx/vpmu_core2.h>
-
+
#include "op_x86_model.h"
#include "op_counter.h"
static int num_counters = 2;
static int counter_width = 32;
-#define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1))))
+#define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1))))
#define CTRL_READ(msr_content,msrs,c) do {rdmsrl((msrs->controls[(c)].addr), (msr_content));} while (0)
#define CTRL_WRITE(msr_content,msrs,c) do {wrmsrl((msrs->controls[(c)].addr), (msr_content));} while (0)
#define CTRL_SET_KERN(val,k) (val |= ((k & 1ULL) << 17))
#define CTRL_SET_UM(val, m) (val |= (m << 8))
#define CTRL_SET_EVENT(val, e) (val |= e)
-#define IS_ACTIVE(val) (val & (1ULL << 22) )
+#define IS_ACTIVE(val) (val & (1ULL << 22) )
#define IS_ENABLE(val) (val & (1ULL << 20) )
static unsigned long reset_value[OP_MAX_COUNTER];
int ppro_has_global_ctrl = 0;
-
+
static void ppro_fill_in_addresses(struct op_msrs * const msrs)
{
int i;
{
uint64_t msr_content;
int i;
-
+
if (cpu_has_arch_perfmon) {
union cpuid10_eax eax;
eax.full = cpuid_eax(0xa);
CTRL_CLEAR(msr_content);
CTRL_WRITE(msr_content, msrs, i);
}
-
+
/* avoid a false detection of ctr overflows in NMI handler */
for (i = 0; i < num_counters; ++i)
wrmsrl(msrs->counters[i].addr, ~0x0ULL);
if (CTR_OVERFLOWED(val)) {
xenoprof_log_event(current, regs, eip, mode, i);
wrmsrl(msrs->counters[i].addr, -reset_value[i]);
- if ( is_passive(current->domain) && (mode != 2) &&
- vpmu_is_set(vcpu_vpmu(current), PASSIVE_DOMAIN_ALLOCATED) )
+ if ( is_passive(current->domain) && (mode != 2) &&
+ vpmu_is_set(vcpu_vpmu(current), PASSIVE_DOMAIN_ALLOCATED) )
{
if ( IS_ACTIVE(msrs_content[i].control) )
{
return ovf;
}
-
+
static void ppro_start(struct op_msrs const * const msrs)
{
uint64_t msr_content;
if ( (msr_index >= MSR_IA32_PERFCTR0) &&
(msr_index < (MSR_IA32_PERFCTR0 + num_counters)) )
{
- *type = MSR_TYPE_ARCH_COUNTER;
+ *type = MSR_TYPE_ARCH_COUNTER;
*index = msr_index - MSR_IA32_PERFCTR0;
return 1;
}
gdprintk(XENLOG_WARNING, "Insufficient memory for oprofile, oprofile is "
"unavailable on domain %d vcpu %d.\n",
v->vcpu_id, v->domain->domain_id);
- return 0;
+ return 0;
}
static void ppro_free_msr(struct vcpu *v)
case MSR_TYPE_ARCH_CTRL:
*msr_content = msrs[index].control;
break;
- }
+ }
}
static void ppro_save_msr(struct vcpu *v, int type, int index, u64 msr_content)
{
struct arch_msr_pair *msrs = vcpu_vpmu(v)->context;
-
+
switch ( type )
{
case MSR_TYPE_ARCH_COUNTER:
case MSR_TYPE_ARCH_CTRL:
msrs[index].control = msr_content;
break;
- }
+ }
}
/*