ia64/xen-unstable
changeset 18733:2a022ee37392
x86, hvm, xenoprof: Add fully support of HVM guest to xenoprofile on Intel P6.
Signed-off-by: Ronghui Duan <ronghui.duan@intel.com>
Signed-off-by: Ronghui Duan <ronghui.duan@intel.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Mon Oct 27 13:31:15 2008 +0000 (2008-10-27) |
parents | 4413d53a8320 |
children | 324b9b1dd71d |
files | xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/hvm/vmx/vpmu_core2.c xen/arch/x86/oprofile/nmi_int.c xen/arch/x86/oprofile/op_model_ppro.c xen/arch/x86/oprofile/op_x86_model.h xen/common/xenoprof.c xen/include/asm-x86/hvm/vmx/vpmu.h xen/include/asm-x86/hvm/vmx/vpmu_core2.h xen/include/xen/xenoprof.h |
line diff
1.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Mon Oct 27 13:29:35 2008 +0000 1.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Mon Oct 27 13:31:15 2008 +0000 1.3 @@ -26,6 +26,7 @@ 1.4 #include <xen/domain_page.h> 1.5 #include <xen/hypercall.h> 1.6 #include <xen/perfc.h> 1.7 +#include <xen/xenoprof.h> 1.8 #include <asm/current.h> 1.9 #include <asm/io.h> 1.10 #include <asm/regs.h> 1.11 @@ -132,6 +133,7 @@ static void vmx_vcpu_destroy(struct vcpu 1.12 { 1.13 vmx_destroy_vmcs(v); 1.14 vpmu_destroy(v); 1.15 + passive_domain_destroy(v); 1.16 } 1.17 1.18 #ifdef __x86_64__ 1.19 @@ -1666,6 +1668,8 @@ static int vmx_msr_read_intercept(struct 1.20 default: 1.21 if ( vpmu_do_rdmsr(regs) ) 1.22 goto done; 1.23 + if ( passive_domain_do_rdmsr(regs) ) 1.24 + goto done; 1.25 switch ( long_mode_do_msr_read(regs) ) 1.26 { 1.27 case HNDL_unhandled: 1.28 @@ -1861,6 +1865,8 @@ static int vmx_msr_write_intercept(struc 1.29 default: 1.30 if ( vpmu_do_wrmsr(regs) ) 1.31 return X86EMUL_OKAY; 1.32 + if ( passive_domain_do_wrmsr(regs) ) 1.33 + return X86EMUL_OKAY; 1.34 1.35 if ( wrmsr_viridian_regs(ecx, regs->eax, regs->edx) ) 1.36 break;
2.1 --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c Mon Oct 27 13:29:35 2008 +0000 2.2 +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c Mon Oct 27 13:31:15 2008 +0000 2.3 @@ -35,6 +35,26 @@ 2.4 #include <asm/hvm/vmx/vpmu.h> 2.5 #include <asm/hvm/vmx/vpmu_core2.h> 2.6 2.7 +u32 core2_counters_msr[] = { 2.8 + MSR_CORE_PERF_FIXED_CTR0, 2.9 + MSR_CORE_PERF_FIXED_CTR1, 2.10 + MSR_CORE_PERF_FIXED_CTR2}; 2.11 + 2.12 +/* Core 2 Non-architectual Performance Control MSRs. */ 2.13 +u32 core2_ctrls_msr[] = { 2.14 + MSR_CORE_PERF_FIXED_CTR_CTRL, 2.15 + MSR_IA32_PEBS_ENABLE, 2.16 + MSR_IA32_DS_AREA}; 2.17 + 2.18 +struct pmumsr core2_counters = { 2.19 + 3, 2.20 + core2_counters_msr 2.21 +}; 2.22 + 2.23 +struct pmumsr core2_ctrls = { 2.24 + 3, 2.25 + core2_ctrls_msr 2.26 +}; 2.27 static int arch_pmc_cnt; 2.28 2.29 static int core2_get_pmc_count(void)
3.1 --- a/xen/arch/x86/oprofile/nmi_int.c Mon Oct 27 13:29:35 2008 +0000 3.2 +++ b/xen/arch/x86/oprofile/nmi_int.c Mon Oct 27 13:31:15 2008 +0000 3.3 @@ -36,6 +36,55 @@ static unsigned long saved_lvtpc[NR_CPUS 3.4 static char *cpu_type; 3.5 3.6 extern int is_active(struct domain *d); 3.7 +extern int is_passive(struct domain *d); 3.8 + 3.9 +int passive_domain_do_rdmsr(struct cpu_user_regs *regs) 3.10 +{ 3.11 + u64 msr_content; 3.12 + int type, index; 3.13 + struct vpmu_struct *vpmu = vcpu_vpmu(current); 3.14 + 3.15 + if ( model->is_arch_pmu_msr == NULL ) 3.16 + return 0; 3.17 + if ( !model->is_arch_pmu_msr((u64)regs->ecx, &type, &index) ) 3.18 + return 0; 3.19 + if ( !(vpmu->flags & PASSIVE_DOMAIN_ALLOCATED) ) 3.20 + if ( ! model->allocated_msr(current) ) 3.21 + return 0; 3.22 + 3.23 + model->load_msr(current, type, index, &msr_content); 3.24 + regs->eax = msr_content & 0xFFFFFFFF; 3.25 + regs->edx = msr_content >> 32; 3.26 + return 1; 3.27 +} 3.28 + 3.29 + 3.30 +int passive_domain_do_wrmsr(struct cpu_user_regs *regs) 3.31 +{ 3.32 + u64 msr_content; 3.33 + int type, index; 3.34 + struct vpmu_struct *vpmu = vcpu_vpmu(current); 3.35 + 3.36 + if ( model->is_arch_pmu_msr == NULL ) 3.37 + return 0; 3.38 + if ( !model->is_arch_pmu_msr((u64)regs->ecx, &type, &index) ) 3.39 + return 0; 3.40 + 3.41 + if ( !(vpmu->flags & PASSIVE_DOMAIN_ALLOCATED) ) 3.42 + if ( ! model->allocated_msr(current) ) 3.43 + return 0; 3.44 + 3.45 + msr_content = (u32)regs->eax | ((u64)regs->edx << 32); 3.46 + model->save_msr(current, type, index, msr_content); 3.47 + return 1; 3.48 +} 3.49 + 3.50 +void passive_domain_destroy(struct vcpu *v) 3.51 +{ 3.52 + struct vpmu_struct *vpmu = vcpu_vpmu(v); 3.53 + if ( vpmu->flags & PASSIVE_DOMAIN_ALLOCATED ) 3.54 + model->free_msr(v); 3.55 +} 3.56 3.57 static int nmi_callback(struct cpu_user_regs *regs, int cpu) 3.58 { 3.59 @@ -46,6 +95,8 @@ static int nmi_callback(struct cpu_user_ 3.60 if ( ovf && is_active(current->domain) && !xen_mode ) 3.61 send_guest_vcpu_virq(current, VIRQ_XENOPROF); 3.62 3.63 + if ( ovf == 2 ) 3.64 + test_and_set_bool(current->nmi_pending); 3.65 return 1; 3.66 } 3.67
4.1 --- a/xen/arch/x86/oprofile/op_model_ppro.c Mon Oct 27 13:29:35 2008 +0000 4.2 +++ b/xen/arch/x86/oprofile/op_model_ppro.c Mon Oct 27 13:31:15 2008 +0000 4.3 @@ -18,6 +18,8 @@ 4.4 #include <xen/sched.h> 4.5 #include <asm/regs.h> 4.6 #include <asm/current.h> 4.7 +#include <asm/hvm/vmx/vpmu.h> 4.8 +#include <asm/hvm/vmx/vpmu_core2.h> 4.9 4.10 #include "op_x86_model.h" 4.11 #include "op_counter.h" 4.12 @@ -39,9 +41,11 @@ 4.13 #define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17)) 4.14 #define CTRL_SET_UM(val, m) (val |= (m << 8)) 4.15 #define CTRL_SET_EVENT(val, e) (val |= e) 4.16 - 4.17 +#define IS_ACTIVE(val) (val & (1 << 22) ) 4.18 +#define IS_ENABLE(val) (val & (1 << 20) ) 4.19 static unsigned long reset_value[NUM_COUNTERS]; 4.20 int ppro_has_global_ctrl = 0; 4.21 +extern int is_passive(struct domain *d); 4.22 4.23 static void ppro_fill_in_addresses(struct op_msrs * const msrs) 4.24 { 4.25 @@ -103,6 +107,7 @@ static int ppro_check_ctrs(unsigned int 4.26 int ovf = 0; 4.27 unsigned long eip = regs->eip; 4.28 int mode = xenoprofile_get_mode(current, regs); 4.29 + struct arch_msr_pair *msrs_content = vcpu_vpmu(current)->context; 4.30 4.31 for (i = 0 ; i < NUM_COUNTERS; ++i) { 4.32 if (!reset_value[i]) 4.33 @@ -111,7 +116,18 @@ static int ppro_check_ctrs(unsigned int 4.34 if (CTR_OVERFLOWED(low)) { 4.35 xenoprof_log_event(current, regs, eip, mode, i); 4.36 CTR_WRITE(reset_value[i], msrs, i); 4.37 - ovf = 1; 4.38 + if ( is_passive(current->domain) && (mode != 2) && 4.39 + (vcpu_vpmu(current)->flags & PASSIVE_DOMAIN_ALLOCATED) ) 4.40 + { 4.41 + if ( IS_ACTIVE(msrs_content[i].control) ) 4.42 + { 4.43 + msrs_content[i].counter = (low | (unsigned long)high << 32); 4.44 + if ( IS_ENABLE(msrs_content[i].control) ) 4.45 + ovf = 2; 4.46 + } 4.47 + } 4.48 + if ( !ovf ) 4.49 + ovf = 1; 4.50 } 4.51 } 4.52 4.53 @@ -159,6 +175,82 @@ static void ppro_stop(struct op_msrs con 4.54 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 4.55 } 4.56 4.57 +static int ppro_is_arch_pmu_msr(u64 msr_index, int *type, int *index) 4.58 +{ 4.59 + if ( (msr_index >= MSR_IA32_PERFCTR0) && 4.60 + (msr_index < (MSR_IA32_PERFCTR0 + NUM_COUNTERS)) ) 4.61 + { 4.62 + *type = MSR_TYPE_ARCH_COUNTER; 4.63 + *index = msr_index - MSR_IA32_PERFCTR0; 4.64 + return 1; 4.65 + } 4.66 + if ( (msr_index >= MSR_P6_EVNTSEL0) && 4.67 + (msr_index < (MSR_P6_EVNTSEL0 + NUM_CONTROLS)) ) 4.68 + { 4.69 + *type = MSR_TYPE_ARCH_CTRL; 4.70 + *index = msr_index - MSR_P6_EVNTSEL0; 4.71 + return 1; 4.72 + } 4.73 + 4.74 + return 0; 4.75 +} 4.76 + 4.77 +static int ppro_allocate_msr(struct vcpu *v) 4.78 +{ 4.79 + struct vpmu_struct *vpmu = vcpu_vpmu(v); 4.80 + struct arch_msr_pair *msr_content; 4.81 + 4.82 + msr_content = xmalloc_bytes( sizeof(struct arch_msr_pair) * NUM_COUNTERS ); 4.83 + if ( !msr_content ) 4.84 + goto out; 4.85 + memset(msr_content, 0, sizeof(struct arch_msr_pair) * NUM_COUNTERS); 4.86 + vpmu->context = (void *)msr_content; 4.87 + vpmu->flags = 0; 4.88 + vpmu->flags |= PASSIVE_DOMAIN_ALLOCATED; 4.89 + return 1; 4.90 +out: 4.91 + gdprintk(XENLOG_WARNING, "Insufficient memory for oprofile, oprofile is " 4.92 + "unavailable on domain %d vcpu %d.\n", 4.93 + v->vcpu_id, v->domain->domain_id); 4.94 + return 0; 4.95 +} 4.96 + 4.97 +static void ppro_free_msr(struct vcpu *v) 4.98 +{ 4.99 + struct vpmu_struct *vpmu = vcpu_vpmu(v); 4.100 + 4.101 + xfree(vpmu->context); 4.102 + vpmu->flags &= ~PASSIVE_DOMAIN_ALLOCATED; 4.103 +} 4.104 + 4.105 +static void ppro_load_msr(struct vcpu *v, int type, int index, u64 *msr_content) 4.106 +{ 4.107 + struct arch_msr_pair *msrs = vcpu_vpmu(v)->context; 4.108 + switch ( type ) 4.109 + { 4.110 + case MSR_TYPE_ARCH_COUNTER: 4.111 + *msr_content = msrs[index].counter; 4.112 + break; 4.113 + case MSR_TYPE_ARCH_CTRL: 4.114 + *msr_content = msrs[index].control; 4.115 + break; 4.116 + } 4.117 +} 4.118 + 4.119 +static void ppro_save_msr(struct vcpu *v, int type, int index, u64 msr_content) 4.120 +{ 4.121 + struct arch_msr_pair *msrs = vcpu_vpmu(v)->context; 4.122 + 4.123 + switch ( type ) 4.124 + { 4.125 + case MSR_TYPE_ARCH_COUNTER: 4.126 + msrs[index].counter = msr_content; 4.127 + break; 4.128 + case MSR_TYPE_ARCH_CTRL: 4.129 + msrs[index].control = msr_content; 4.130 + break; 4.131 + } 4.132 +} 4.133 4.134 struct op_x86_model_spec const op_ppro_spec = { 4.135 .num_counters = NUM_COUNTERS, 4.136 @@ -167,5 +259,10 @@ struct op_x86_model_spec const op_ppro_s 4.137 .setup_ctrs = &ppro_setup_ctrs, 4.138 .check_ctrs = &ppro_check_ctrs, 4.139 .start = &ppro_start, 4.140 - .stop = &ppro_stop 4.141 + .stop = &ppro_stop, 4.142 + .is_arch_pmu_msr = &ppro_is_arch_pmu_msr, 4.143 + .allocated_msr = &ppro_allocate_msr, 4.144 + .free_msr = &ppro_free_msr, 4.145 + .load_msr = &ppro_load_msr, 4.146 + .save_msr = &ppro_save_msr 4.147 };
5.1 --- a/xen/arch/x86/oprofile/op_x86_model.h Mon Oct 27 13:29:35 2008 +0000 5.2 +++ b/xen/arch/x86/oprofile/op_x86_model.h Mon Oct 27 13:31:15 2008 +0000 5.3 @@ -41,6 +41,11 @@ struct op_x86_model_spec { 5.4 struct cpu_user_regs * const regs); 5.5 void (*start)(struct op_msrs const * const msrs); 5.6 void (*stop)(struct op_msrs const * const msrs); 5.7 + int (*is_arch_pmu_msr)(u64 msr_index, int *type, int *index); 5.8 + int (*allocated_msr)(struct vcpu *v); 5.9 + void (*free_msr)(struct vcpu *v); 5.10 + void (*load_msr)(struct vcpu * const v, int type, int index, u64 *msr_content); 5.11 + void (*save_msr)(struct vcpu * const v, int type, int index, u64 msr_content); 5.12 }; 5.13 5.14 extern struct op_x86_model_spec const op_ppro_spec;
6.1 --- a/xen/common/xenoprof.c Mon Oct 27 13:29:35 2008 +0000 6.2 +++ b/xen/common/xenoprof.c Mon Oct 27 13:31:15 2008 +0000 6.3 @@ -85,7 +85,7 @@ int is_active(struct domain *d) 6.4 return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_ACTIVE)); 6.5 } 6.6 6.7 -static int is_passive(struct domain *d) 6.8 +int is_passive(struct domain *d) 6.9 { 6.10 struct xenoprof *x = d->xenoprof; 6.11 return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_PASSIVE));
7.1 --- a/xen/include/asm-x86/hvm/vmx/vpmu.h Mon Oct 27 13:29:35 2008 +0000 7.2 +++ b/xen/include/asm-x86/hvm/vmx/vpmu.h Mon Oct 27 13:31:15 2008 +0000 7.3 @@ -67,7 +67,7 @@ struct vpmu_struct { 7.4 #define VPMU_CONTEXT_ALLOCATED 0x1 7.5 #define VPMU_CONTEXT_LOADED 0x2 7.6 #define VPMU_RUNNING 0x4 7.7 - 7.8 +#define PASSIVE_DOMAIN_ALLOCATED 0x8 7.9 int vpmu_do_wrmsr(struct cpu_user_regs *regs); 7.10 int vpmu_do_rdmsr(struct cpu_user_regs *regs); 7.11 int vpmu_do_interrupt(struct cpu_user_regs *regs);
8.1 --- a/xen/include/asm-x86/hvm/vmx/vpmu_core2.h Mon Oct 27 13:29:35 2008 +0000 8.2 +++ b/xen/include/asm-x86/hvm/vmx/vpmu_core2.h Mon Oct 27 13:31:15 2008 +0000 8.3 @@ -23,28 +23,6 @@ 8.4 #ifndef __ASM_X86_HVM_VPMU_CORE_H_ 8.5 #define __ASM_X86_HVM_VPMU_CORE_H_ 8.6 8.7 -/* Core 2 Non-architectual Performance Counter MSRs. */ 8.8 -u32 core2_counters_msr[] = { 8.9 - MSR_CORE_PERF_FIXED_CTR0, 8.10 - MSR_CORE_PERF_FIXED_CTR1, 8.11 - MSR_CORE_PERF_FIXED_CTR2}; 8.12 - 8.13 -/* Core 2 Non-architectual Performance Control MSRs. */ 8.14 -u32 core2_ctrls_msr[] = { 8.15 - MSR_CORE_PERF_FIXED_CTR_CTRL, 8.16 - MSR_IA32_PEBS_ENABLE, 8.17 - MSR_IA32_DS_AREA}; 8.18 - 8.19 -struct pmumsr core2_counters = { 8.20 - 3, 8.21 - core2_counters_msr 8.22 -}; 8.23 - 8.24 -struct pmumsr core2_ctrls = { 8.25 - 3, 8.26 - core2_ctrls_msr 8.27 -}; 8.28 - 8.29 struct arch_msr_pair { 8.30 u64 counter; 8.31 u64 control;
9.1 --- a/xen/include/xen/xenoprof.h Mon Oct 27 13:29:35 2008 +0000 9.2 +++ b/xen/include/xen/xenoprof.h Mon Oct 27 13:31:15 2008 +0000 9.3 @@ -75,4 +75,7 @@ int xenoprof_add_trace(struct domain *d, 9.4 int acquire_pmu_ownship(int pmu_ownership); 9.5 void release_pmu_ownship(int pmu_ownership); 9.6 9.7 +int passive_domain_do_rdmsr(struct cpu_user_regs *regs); 9.8 +int passive_domain_do_wrmsr(struct cpu_user_regs *regs); 9.9 +void passive_domain_destroy(struct vcpu *v); 9.10 #endif /* __XEN__XENOPROF_H__ */