ia64/xen-unstable

changeset 16940:6ea3db7ae24d

vmx: Enable Core 2 Duo Performance Counters in HVM guest
Signed-off-by: Haitao Shan <haitao.shan@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jan 30 09:59:27 2008 +0000 (2008-01-30)
parents 47b7ec3b4055
children 087caea46be7
files xen/arch/x86/apic.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/Makefile xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/hvm/vmx/vpmu.c xen/arch/x86/hvm/vmx/vpmu_core2.c xen/arch/x86/i8259.c xen/arch/x86/oprofile/op_model_ppro.c xen/common/xenoprof.c xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/hvm/vlapic.h xen/include/asm-x86/hvm/vmx/vmcs.h xen/include/asm-x86/hvm/vmx/vpmu.h xen/include/asm-x86/hvm/vmx/vpmu_core2.h xen/include/asm-x86/irq.h xen/include/asm-x86/mach-default/irq_vectors.h xen/include/xen/xenoprof.h
line diff
     1.1 --- a/xen/arch/x86/apic.c	Wed Jan 30 09:38:10 2008 +0000
     1.2 +++ b/xen/arch/x86/apic.c	Wed Jan 30 09:59:27 2008 +0000
     1.3 @@ -94,6 +94,9 @@ void __init apic_intr_init(void)
     1.4      set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
     1.5      set_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
     1.6  
     1.7 +    /* Performance Counters Interrupt */
     1.8 +    set_intr_gate(PMU_APIC_VECTOR, pmu_apic_interrupt);
     1.9 +
    1.10      /* thermal monitor LVT interrupt */
    1.11  #ifdef CONFIG_X86_MCE_P4THERMAL
    1.12      set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
    1.13 @@ -1227,6 +1230,16 @@ fastcall void smp_error_interrupt(struct
    1.14  }
    1.15  
    1.16  /*
    1.17 + * This interrupt handles performance counters interrupt
    1.18 + */
    1.19 +
    1.20 +fastcall void smp_pmu_apic_interrupt(struct cpu_user_regs *regs)
    1.21 +{
    1.22 +    ack_APIC_irq();
    1.23 +    hvm_do_pmu_interrupt(regs);
    1.24 +}
    1.25 +
    1.26 +/*
    1.27   * This initializes the IO-APIC and APIC hardware if this is
    1.28   * a UP kernel.
    1.29   */
     2.1 --- a/xen/arch/x86/hvm/svm/svm.c	Wed Jan 30 09:38:10 2008 +0000
     2.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Wed Jan 30 09:59:27 2008 +0000
     2.3 @@ -860,6 +860,11 @@ static int svm_event_pending(struct vcpu
     2.4      return vmcb->eventinj.fields.v;
     2.5  }
     2.6  
     2.7 +static int svm_do_pmu_interrupt(struct cpu_user_regs *regs)
     2.8 +{
     2.9 +    return 0;
    2.10 +}
    2.11 +
    2.12  static struct hvm_function_table svm_function_table = {
    2.13      .name                 = "SVM",
    2.14      .cpu_down             = svm_cpu_down,
    2.15 @@ -882,7 +887,8 @@ static struct hvm_function_table svm_fun
    2.16      .set_tsc_offset       = svm_set_tsc_offset,
    2.17      .inject_exception     = svm_inject_exception,
    2.18      .init_hypercall_page  = svm_init_hypercall_page,
    2.19 -    .event_pending        = svm_event_pending
    2.20 +    .event_pending        = svm_event_pending,
    2.21 +    .do_pmu_interrupt     = svm_do_pmu_interrupt
    2.22  };
    2.23  
    2.24  int start_svm(struct cpuinfo_x86 *c)
     3.1 --- a/xen/arch/x86/hvm/vmx/Makefile	Wed Jan 30 09:38:10 2008 +0000
     3.2 +++ b/xen/arch/x86/hvm/vmx/Makefile	Wed Jan 30 09:59:27 2008 +0000
     3.3 @@ -9,3 +9,5 @@ obj-y += realmode.o
     3.4  endif
     3.5  obj-y += vmcs.o
     3.6  obj-y += vmx.o
     3.7 +obj-y += vpmu.o
     3.8 +obj-y += vpmu_core2.o
     4.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Wed Jan 30 09:38:10 2008 +0000
     4.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Wed Jan 30 09:59:27 2008 +0000
     4.3 @@ -90,6 +90,8 @@ static int vmx_vcpu_initialise(struct vc
     4.4          return rc;
     4.5      }
     4.6  
     4.7 +    vpmu_initialise(v);
     4.8 +
     4.9      vmx_install_vlapic_mapping(v);
    4.10  
    4.11  #ifndef VMXASSIST
    4.12 @@ -104,6 +106,7 @@ static int vmx_vcpu_initialise(struct vc
    4.13  static void vmx_vcpu_destroy(struct vcpu *v)
    4.14  {
    4.15      vmx_destroy_vmcs(v);
    4.16 +    vpmu_destroy(v);
    4.17  }
    4.18  
    4.19  #ifdef __x86_64__
    4.20 @@ -742,6 +745,7 @@ static void vmx_ctxt_switch_from(struct 
    4.21      vmx_save_guest_msrs(v);
    4.22      vmx_restore_host_msrs();
    4.23      vmx_save_dr(v);
    4.24 +    vpmu_save(v);
    4.25  }
    4.26  
    4.27  static void vmx_ctxt_switch_to(struct vcpu *v)
    4.28 @@ -752,6 +756,7 @@ static void vmx_ctxt_switch_to(struct vc
    4.29  
    4.30      vmx_restore_guest_msrs(v);
    4.31      vmx_restore_dr(v);
    4.32 +    vpmu_load(v);
    4.33  }
    4.34  
    4.35  static unsigned long vmx_get_segment_base(struct vcpu *v, enum x86_segment seg)
    4.36 @@ -1119,6 +1124,11 @@ static int vmx_event_pending(struct vcpu
    4.37      return (__vmread(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK);
    4.38  }
    4.39  
    4.40 +static int vmx_do_pmu_interrupt(struct cpu_user_regs *regs)
    4.41 +{
    4.42 +    return vpmu_do_interrupt(regs);
    4.43 +}
    4.44 +
    4.45  static struct hvm_function_table vmx_function_table = {
    4.46      .name                 = "VMX",
    4.47      .domain_initialise    = vmx_domain_initialise,
    4.48 @@ -1141,6 +1151,7 @@ static struct hvm_function_table vmx_fun
    4.49      .inject_exception     = vmx_inject_exception,
    4.50      .init_hypercall_page  = vmx_init_hypercall_page,
    4.51      .event_pending        = vmx_event_pending,
    4.52 +    .do_pmu_interrupt     = vmx_do_pmu_interrupt,
    4.53      .cpu_up               = vmx_cpu_up,
    4.54      .cpu_down             = vmx_cpu_down,
    4.55  };
    4.56 @@ -1300,7 +1311,6 @@ void vmx_cpuid_intercept(
    4.57  
    4.58      case 0x00000006:
    4.59      case 0x00000009:
    4.60 -    case 0x0000000A:
    4.61          *eax = *ebx = *ecx = *edx = 0;
    4.62          break;
    4.63  
    4.64 @@ -2376,7 +2386,15 @@ static int vmx_do_msr_read(struct cpu_us
    4.65          /* No point in letting the guest see real MCEs */
    4.66          msr_content = 0;
    4.67          break;
    4.68 +    case MSR_IA32_MISC_ENABLE:
    4.69 +        rdmsrl(MSR_IA32_MISC_ENABLE, msr_content);
    4.70 +        /* Debug Trace Store is not supported. */
    4.71 +        msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL |
    4.72 +                       MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
    4.73 +        break;
    4.74      default:
    4.75 +        if ( vpmu_do_rdmsr(regs) )
    4.76 +            goto done;
    4.77          switch ( long_mode_do_msr_read(regs) )
    4.78          {
    4.79              case HNDL_unhandled:
    4.80 @@ -2583,6 +2601,8 @@ static int vmx_do_msr_write(struct cpu_u
    4.81      case MSR_IA32_VMX_BASIC...MSR_IA32_VMX_PROCBASED_CTLS2:
    4.82          goto gp_fault;
    4.83      default:
    4.84 +        if ( vpmu_do_wrmsr(regs) )
    4.85 +            return 1;
    4.86          switch ( long_mode_do_msr_write(regs) )
    4.87          {
    4.88              case HNDL_unhandled:
    4.89 @@ -2632,6 +2652,7 @@ static void vmx_do_extint(struct cpu_use
    4.90      fastcall void smp_call_function_interrupt(void);
    4.91      fastcall void smp_spurious_interrupt(struct cpu_user_regs *regs);
    4.92      fastcall void smp_error_interrupt(struct cpu_user_regs *regs);
    4.93 +    fastcall void smp_pmu_apic_interrupt(struct cpu_user_regs *regs);
    4.94  #ifdef CONFIG_X86_MCE_P4THERMAL
    4.95      fastcall void smp_thermal_interrupt(struct cpu_user_regs *regs);
    4.96  #endif
    4.97 @@ -2662,6 +2683,9 @@ static void vmx_do_extint(struct cpu_use
    4.98      case ERROR_APIC_VECTOR:
    4.99          smp_error_interrupt(regs);
   4.100          break;
   4.101 +    case PMU_APIC_VECTOR:
   4.102 +        smp_pmu_apic_interrupt(regs);
   4.103 +        break;
   4.104  #ifdef CONFIG_X86_MCE_P4THERMAL
   4.105      case THERMAL_APIC_VECTOR:
   4.106          smp_thermal_interrupt(regs);
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/xen/arch/x86/hvm/vmx/vpmu.c	Wed Jan 30 09:59:27 2008 +0000
     5.3 @@ -0,0 +1,119 @@
     5.4 +/*
     5.5 + * vpmu.c: PMU virtualization for HVM domain.
     5.6 + *
     5.7 + * Copyright (c) 2007, Intel Corporation.
     5.8 + *
     5.9 + * This program is free software; you can redistribute it and/or modify it
    5.10 + * under the terms and conditions of the GNU General Public License,
    5.11 + * version 2, as published by the Free Software Foundation.
    5.12 + *
    5.13 + * This program is distributed in the hope it will be useful, but WITHOUT
    5.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    5.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    5.16 + * more details.
    5.17 + *
    5.18 + * You should have received a copy of the GNU General Public License along with
    5.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
    5.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
    5.21 + *
    5.22 + * Author: Haitao Shan <haitao.shan@intel.com>
    5.23 + */
    5.24 +
    5.25 +#include <xen/config.h>
    5.26 +#include <xen/sched.h>
    5.27 +#include <asm/regs.h>
    5.28 +#include <asm/types.h>
    5.29 +#include <asm/msr.h>
    5.30 +#include <asm/hvm/support.h>
    5.31 +#include <asm/hvm/vmx/vmx.h>
    5.32 +#include <asm/hvm/vmx/vmcs.h>
    5.33 +#include <public/sched.h>
    5.34 +#include <public/hvm/save.h>
    5.35 +#include <asm/hvm/vmx/vpmu.h>
    5.36 +
    5.37 +int inline vpmu_do_wrmsr(struct cpu_user_regs *regs)
    5.38 +{
    5.39 +    struct vpmu_struct *vpmu = vcpu_vpmu(current);
    5.40 +
    5.41 +    if ( vpmu->arch_vpmu_ops )
    5.42 +        return vpmu->arch_vpmu_ops->do_wrmsr(regs);
    5.43 +    return 0;
    5.44 +}
    5.45 +
    5.46 +int inline vpmu_do_rdmsr(struct cpu_user_regs *regs)
    5.47 +{
    5.48 +    struct vpmu_struct *vpmu = vcpu_vpmu(current);
    5.49 +
    5.50 +    if ( vpmu->arch_vpmu_ops )
    5.51 +        return vpmu->arch_vpmu_ops->do_rdmsr(regs);
    5.52 +    return 0;
    5.53 +}
    5.54 +
    5.55 +int inline vpmu_do_interrupt(struct cpu_user_regs *regs)
    5.56 +{
    5.57 +    struct vpmu_struct *vpmu = vcpu_vpmu(current);
    5.58 +
    5.59 +    if ( vpmu->arch_vpmu_ops )
    5.60 +        return vpmu->arch_vpmu_ops->do_interrupt(regs);
    5.61 +    return 0;
    5.62 +}
    5.63 +
    5.64 +void vpmu_save(struct vcpu *v)
    5.65 +{
    5.66 +    struct vpmu_struct *vpmu = vcpu_vpmu(v);
    5.67 +
    5.68 +    if ( vpmu->arch_vpmu_ops )
    5.69 +        vpmu->arch_vpmu_ops->arch_vpmu_save(v);
    5.70 +}
    5.71 +
    5.72 +void vpmu_load(struct vcpu *v)
    5.73 +{
    5.74 +    struct vpmu_struct *vpmu = vcpu_vpmu(v);
    5.75 +
    5.76 +    if ( vpmu->arch_vpmu_ops )
    5.77 +        vpmu->arch_vpmu_ops->arch_vpmu_load(v);
    5.78 +}
    5.79 +
    5.80 +extern struct arch_vpmu_ops core2_vpmu_ops;
    5.81 +void inline vpmu_initialise(struct vcpu *v)
    5.82 +{
    5.83 +    struct vpmu_struct *vpmu = vcpu_vpmu(v);
    5.84 +
    5.85 +    /* If it is not a fresh initialization, release all resources
    5.86 +     * before initialise again.
    5.87 +     */
    5.88 +    if ( vpmu->flags & VPMU_CONTEXT_ALLOCATED )
    5.89 +        vpmu_destroy(v);
    5.90 +
    5.91 +    if ( current_cpu_data.x86 == 6 )
    5.92 +    {
    5.93 +        switch ( current_cpu_data.x86_model )
    5.94 +        {
    5.95 +        case 15:
    5.96 +        case 23:
    5.97 +            vpmu->arch_vpmu_ops = &core2_vpmu_ops;
    5.98 +            dprintk(XENLOG_INFO,
    5.99 +                   "Core 2 duo CPU detected for guest PMU usage.\n");
   5.100 +            break;
   5.101 +        }
   5.102 +    }
   5.103 +
   5.104 +    if ( !vpmu->arch_vpmu_ops )
   5.105 +    {
   5.106 +        dprintk(XENLOG_WARNING, "Unsupport CPU model for guest PMU usage.\n");
   5.107 +        return;
   5.108 +    }
   5.109 +
   5.110 +    vpmu->flags = 0;
   5.111 +    vpmu->context = NULL;
   5.112 +    vpmu->arch_vpmu_ops->arch_vpmu_initialise(v);
   5.113 +}
   5.114 +
   5.115 +void inline vpmu_destroy(struct vcpu *v)
   5.116 +{
   5.117 +    struct vpmu_struct *vpmu = vcpu_vpmu(v);
   5.118 +
   5.119 +    if ( vpmu->arch_vpmu_ops )
   5.120 +        vpmu->arch_vpmu_ops->arch_vpmu_destroy(v);
   5.121 +}
   5.122 +
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c	Wed Jan 30 09:59:27 2008 +0000
     6.3 @@ -0,0 +1,469 @@
     6.4 +/*
     6.5 + * vpmu_core2.c: CORE 2 specific PMU virtualization for HVM domain.
     6.6 + *
     6.7 + * Copyright (c) 2007, Intel Corporation.
     6.8 + *
     6.9 + * This program is free software; you can redistribute it and/or modify it
    6.10 + * under the terms and conditions of the GNU General Public License,
    6.11 + * version 2, as published by the Free Software Foundation.
    6.12 + *
    6.13 + * This program is distributed in the hope it will be useful, but WITHOUT
    6.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    6.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    6.16 + * more details.
    6.17 + *
    6.18 + * You should have received a copy of the GNU General Public License along with
    6.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
    6.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
    6.21 + *
    6.22 + * Author: Haitao Shan <haitao.shan@intel.com>
    6.23 + */
    6.24 +
    6.25 +#include <xen/config.h>
    6.26 +#include <xen/sched.h>
    6.27 +#include <asm/system.h>
    6.28 +#include <asm/regs.h>
    6.29 +#include <asm/types.h>
    6.30 +#include <asm/msr.h>
    6.31 +#include <asm/msr-index.h>
    6.32 +#include <asm/hvm/support.h>
    6.33 +#include <asm/hvm/vlapic.h>
    6.34 +#include <asm/hvm/vmx/vmx.h>
    6.35 +#include <asm/hvm/vmx/vmcs.h>
    6.36 +#include <public/sched.h>
    6.37 +#include <public/hvm/save.h>
    6.38 +#include <asm/hvm/vmx/vpmu.h>
    6.39 +#include <asm/hvm/vmx/vpmu_core2.h>
    6.40 +
    6.41 +static int arch_pmc_cnt = 0;
    6.42 +
    6.43 +static int core2_get_pmc_count(void)
    6.44 +{
    6.45 +    u32 eax, ebx, ecx, edx;
    6.46 +
    6.47 +    if ( arch_pmc_cnt )
    6.48 +        return arch_pmc_cnt;
    6.49 +
    6.50 +    cpuid(0xa, &eax, &ebx, &ecx, &edx);
    6.51 +    return arch_pmc_cnt = (eax & 0xff00) >> 8;
    6.52 +}
    6.53 +
    6.54 +static int is_core2_vpmu_msr(u32 msr_index, int *type, int *index)
    6.55 +{
    6.56 +    int i;
    6.57 +
    6.58 +    for ( i=0; i < core2_counters.num; i++ )
    6.59 +        if ( core2_counters.msr[i] == msr_index )
    6.60 +        {
    6.61 +            *type = MSR_TYPE_COUNTER;
    6.62 +            *index = i;
    6.63 +            return 1;
    6.64 +        }
    6.65 +    for ( i=0; i < core2_ctrls.num; i++ )
    6.66 +        if ( core2_ctrls.msr[i] == msr_index )
    6.67 +        {
    6.68 +            *type = MSR_TYPE_CTRL;
    6.69 +            *index = i;
    6.70 +            return 1;
    6.71 +        }
    6.72 +
    6.73 +    if ( msr_index == MSR_CORE_PERF_GLOBAL_CTRL ||
    6.74 +         msr_index == MSR_CORE_PERF_GLOBAL_STATUS ||
    6.75 +         msr_index == MSR_CORE_PERF_GLOBAL_OVF_CTRL )
    6.76 +    {
    6.77 +        *type = MSR_TYPE_GLOBAL;
    6.78 +        return 1;
    6.79 +    }
    6.80 +
    6.81 +    if ( msr_index >= MSR_IA32_PERFCTR0 &&
    6.82 +         msr_index < MSR_IA32_PERFCTR0 + core2_get_pmc_count() )
    6.83 +    {
    6.84 +        *type = MSR_TYPE_ARCH_COUNTER;
    6.85 +        *index = msr_index - MSR_IA32_PERFCTR0;
    6.86 +        return 1;
    6.87 +    }
    6.88 +    if ( msr_index >= MSR_P6_EVNTSEL0 &&
    6.89 +         msr_index < MSR_P6_EVNTSEL0 + core2_get_pmc_count() )
    6.90 +    {
    6.91 +        *type = MSR_TYPE_ARCH_CTRL;
    6.92 +        *index = msr_index - MSR_P6_EVNTSEL0;
    6.93 +        return 1;
    6.94 +    }
    6.95 +    return 0;
    6.96 +}
    6.97 +
    6.98 +static void core2_vpmu_set_msr_bitmap(char *msr_bitmap)
    6.99 +{
   6.100 +    int i;
   6.101 +
   6.102 +    /* Allow Read/Write PMU Counters MSR Directly. */
   6.103 +    for ( i=0; i < core2_counters.num; i++ )
   6.104 +    {
   6.105 +        clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap);
   6.106 +        clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap + 0x800);
   6.107 +    }
   6.108 +    for ( i=0; i < core2_get_pmc_count(); i++ )
   6.109 +    {
   6.110 +        clear_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap);
   6.111 +        clear_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap + 0x800);
   6.112 +    }
   6.113 +    /* Allow Read PMU Non-global Controls Directly. */
   6.114 +    for ( i=0; i < core2_ctrls.num; i++ )
   6.115 +        clear_bit(msraddr_to_bitpos(core2_ctrls.msr[i]), msr_bitmap);
   6.116 +    for ( i=0; i < core2_get_pmc_count(); i++ )
   6.117 +        clear_bit(msraddr_to_bitpos(MSR_P6_EVNTSEL0+i), msr_bitmap);
   6.118 +}
   6.119 +
   6.120 +static void core2_vpmu_unset_msr_bitmap(char *msr_bitmap)
   6.121 +{
   6.122 +    int i;
   6.123 +
   6.124 +    /* Undo all the changes to msr bitmap. */
   6.125 +    for ( i=0; i < core2_counters.num; i++ )
   6.126 +    {
   6.127 +        set_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap);
   6.128 +        set_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap + 0x800);
   6.129 +    }
   6.130 +    for ( i=0; i < core2_get_pmc_count(); i++ )
   6.131 +    {
   6.132 +        set_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap);
   6.133 +        set_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap + 0x800);
   6.134 +    }
   6.135 +    for ( i=0; i < core2_ctrls.num; i++ )
   6.136 +        set_bit(msraddr_to_bitpos(core2_ctrls.msr[i]), msr_bitmap);
   6.137 +    for ( i=0; i < core2_get_pmc_count(); i++ )
   6.138 +        set_bit(msraddr_to_bitpos(MSR_P6_EVNTSEL0+i), msr_bitmap);
   6.139 +}
   6.140 +
   6.141 +static inline void __core2_vpmu_save(struct vcpu *v)
   6.142 +{
   6.143 +    int i;
   6.144 +    struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context;
   6.145 +
   6.146 +    for ( i=0; i < core2_counters.num; i++ )
   6.147 +        rdmsrl(core2_counters.msr[i], core2_vpmu_cxt->counters[i]);
   6.148 +    for ( i=0; i < core2_get_pmc_count(); i++ )
   6.149 +        rdmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter);
   6.150 +    core2_vpmu_cxt->hw_lapic_lvtpc = apic_read(APIC_LVTPC);
   6.151 +    apic_write(APIC_LVTPC, LVTPC_HVM_PMU | APIC_LVT_MASKED);
   6.152 +}
   6.153 +
   6.154 +static void core2_vpmu_save(struct vcpu *v)
   6.155 +{
   6.156 +    struct vpmu_struct *vpmu = vcpu_vpmu(v);
   6.157 +
   6.158 +    if ( !((vpmu->flags & VPMU_CONTEXT_ALLOCATED) &&
   6.159 +           (vpmu->flags & VPMU_CONTEXT_LOADED)) )
   6.160 +        return;
   6.161 +
   6.162 +    __core2_vpmu_save(v);
   6.163 +
   6.164 +    /* Unset PMU MSR bitmap to trap lazy load. */
   6.165 +    if ( !(vpmu->flags & VPMU_RUNNING) && cpu_has_vmx_msr_bitmap )
   6.166 +        core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
   6.167 +
   6.168 +    vpmu->flags &= ~VPMU_CONTEXT_LOADED;
   6.169 +    return;
   6.170 +}
   6.171 +
   6.172 +static inline void __core2_vpmu_load(struct vcpu *v)
   6.173 +{
   6.174 +    int i;
   6.175 +    struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context;
   6.176 +
   6.177 +    for ( i=0; i < core2_counters.num; i++ )
   6.178 +        wrmsrl(core2_counters.msr[i], core2_vpmu_cxt->counters[i]);
   6.179 +    for ( i=0; i < core2_get_pmc_count(); i++ )
   6.180 +        wrmsrl(MSR_IA32_PERFCTR0+i, core2_vpmu_cxt->arch_msr_pair[i].counter);
   6.181 +
   6.182 +    for ( i=0; i < core2_ctrls.num; i++ )
   6.183 +        wrmsrl(core2_ctrls.msr[i], core2_vpmu_cxt->ctrls[i]);
   6.184 +    for ( i=0; i < core2_get_pmc_count(); i++ )
   6.185 +        wrmsrl(MSR_P6_EVNTSEL0+i, core2_vpmu_cxt->arch_msr_pair[i].control);
   6.186 +
   6.187 +    apic_write_around(APIC_LVTPC, core2_vpmu_cxt->hw_lapic_lvtpc);
   6.188 +}
   6.189 +
   6.190 +static void core2_vpmu_load(struct vcpu *v)
   6.191 +{
   6.192 +    struct vpmu_struct *vpmu = vcpu_vpmu(v);
   6.193 +
   6.194 +    /* Only when PMU is counting, we load PMU context immediately. */
   6.195 +    if ( !((vpmu->flags & VPMU_CONTEXT_ALLOCATED) &&
   6.196 +           (vpmu->flags & VPMU_RUNNING)) )
   6.197 +        return;
   6.198 +    __core2_vpmu_load(v);
   6.199 +    vpmu->flags |= VPMU_CONTEXT_LOADED;
   6.200 +}
   6.201 +
   6.202 +static int core2_vpmu_alloc_resource(struct vcpu *v)
   6.203 +{
   6.204 +    struct vpmu_struct *vpmu = vcpu_vpmu(v);
   6.205 +    struct core2_vpmu_context *core2_vpmu_cxt;
   6.206 +    struct core2_pmu_enable *pmu_enable;
   6.207 +
   6.208 +    if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
   6.209 +        return 0;
   6.210 +
   6.211 +    wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
   6.212 +    if ( vmx_add_host_load_msr(v, MSR_CORE_PERF_GLOBAL_CTRL) )
   6.213 +        return 0;
   6.214 +
   6.215 +    if ( vmx_add_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL) )
   6.216 +        return 0;
   6.217 +    vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, -1ULL);
   6.218 +
   6.219 +    pmu_enable = xmalloc_bytes(sizeof(struct core2_pmu_enable) +
   6.220 +                 (core2_get_pmc_count()-1)*sizeof(char));
   6.221 +    if ( !pmu_enable )
   6.222 +        goto out1;
   6.223 +    memset(pmu_enable, 0, sizeof(struct core2_pmu_enable) +
   6.224 +                 (core2_get_pmc_count()-1)*sizeof(char));
   6.225 +
   6.226 +    core2_vpmu_cxt = xmalloc_bytes(sizeof(struct core2_vpmu_context) +
   6.227 +                    (core2_get_pmc_count()-1)*sizeof(struct arch_msr_pair));
   6.228 +    if ( !core2_vpmu_cxt )
   6.229 +        goto out2;
   6.230 +    memset(core2_vpmu_cxt, 0, sizeof(struct core2_vpmu_context) +
   6.231 +                    (core2_get_pmc_count()-1)*sizeof(struct arch_msr_pair));
   6.232 +    core2_vpmu_cxt->pmu_enable = pmu_enable;
   6.233 +    vpmu->context = (void *)core2_vpmu_cxt;
   6.234 +
   6.235 +    return 1;
   6.236 + out2:
   6.237 +    xfree(pmu_enable);
   6.238 + out1:
   6.239 +    dprintk(XENLOG_WARNING, "Insufficient memory for PMU, PMU feature is \
   6.240 +            unavailable on domain %d vcpu %d.\n",
   6.241 +            v->vcpu_id, v->domain->domain_id);
   6.242 +    return 0;
   6.243 +}
   6.244 +
   6.245 +static void core2_vpmu_save_msr_context(struct vcpu *v, int type,
   6.246 +                                       int index, u64 msr_data)
   6.247 +{
   6.248 +    struct core2_vpmu_context *core2_vpmu_cxt = vcpu_vpmu(v)->context;
   6.249 +
   6.250 +    switch ( type )
   6.251 +    {
   6.252 +    case MSR_TYPE_CTRL:
   6.253 +        core2_vpmu_cxt->ctrls[index] = msr_data;
   6.254 +        break;
   6.255 +    case MSR_TYPE_ARCH_CTRL:
   6.256 +        core2_vpmu_cxt->arch_msr_pair[index].control = msr_data;
   6.257 +        break;
   6.258 +    }
   6.259 +}
   6.260 +
   6.261 +static int core2_vpmu_msr_common_check(u32 msr_index, int *type, int *index)
   6.262 +{
   6.263 +    struct vpmu_struct *vpmu = vcpu_vpmu(current);
   6.264 +
   6.265 +    if ( !is_core2_vpmu_msr(msr_index, type, index) )
   6.266 +        return 0;
   6.267 +
   6.268 +    if ( unlikely(!(vpmu->flags & VPMU_CONTEXT_ALLOCATED)) &&
   6.269 +         !core2_vpmu_alloc_resource(current) )
   6.270 +        return 0;
   6.271 +    vpmu->flags |= VPMU_CONTEXT_ALLOCATED;
   6.272 +
   6.273 +    /* Do the lazy load staff. */
   6.274 +    if ( !(vpmu->flags & VPMU_CONTEXT_LOADED) )
   6.275 +    {
   6.276 +        __core2_vpmu_load(current);
   6.277 +        vpmu->flags |= VPMU_CONTEXT_LOADED;
   6.278 +        if ( cpu_has_vmx_msr_bitmap )
   6.279 +            core2_vpmu_set_msr_bitmap(current->arch.hvm_vmx.msr_bitmap);
   6.280 +    }
   6.281 +    return 1;
   6.282 +}
   6.283 +
   6.284 +static int core2_vpmu_do_wrmsr(struct cpu_user_regs *regs)
   6.285 +{
   6.286 +    u32 ecx = regs->ecx;
   6.287 +    u64 msr_content, global_ctrl, non_global_ctrl;
   6.288 +    char pmu_enable = 0;
   6.289 +    int i, tmp;
   6.290 +    int type = -1, index = -1;
   6.291 +    struct vcpu *v = current;
   6.292 +    struct vpmu_struct *vpmu = vcpu_vpmu(v);
   6.293 +    struct core2_vpmu_context *core2_vpmu_cxt = NULL;
   6.294 +
   6.295 +    if ( !core2_vpmu_msr_common_check(ecx, &type, &index) )
   6.296 +        return 0;
   6.297 +
   6.298 +    msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
   6.299 +    core2_vpmu_cxt = vpmu->context;
   6.300 +    switch ( ecx )
   6.301 +    {
   6.302 +    case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
   6.303 +        core2_vpmu_cxt->global_ovf_status &= ~msr_content;
   6.304 +        return 1;
   6.305 +    case MSR_CORE_PERF_GLOBAL_STATUS:
   6.306 +        dprintk(XENLOG_INFO, "Can not write readonly MSR: \
   6.307 +                            MSR_PERF_GLOBAL_STATUS(0x38E)!\n");
   6.308 +        vmx_inject_hw_exception(current, TRAP_gp_fault, 0);
   6.309 +        return 1;
   6.310 +    case MSR_IA32_PEBS_ENABLE:
   6.311 +        if ( msr_content & 1 )
   6.312 +            dprintk(XENLOG_WARNING, "Guest is trying to enable PEBS, \
   6.313 +                    which is not supported.\n");
   6.314 +        return 1;
   6.315 +    case MSR_IA32_DS_AREA:
   6.316 +        dprintk(XENLOG_WARNING, "Guest setting of DTS is ignored.\n");
   6.317 +        return 1;
   6.318 +    case MSR_CORE_PERF_GLOBAL_CTRL:
   6.319 +        global_ctrl = msr_content;
   6.320 +        for ( i = 0; i < core2_get_pmc_count(); i++ )
   6.321 +        {
   6.322 +            rdmsrl(MSR_P6_EVNTSEL0+i, non_global_ctrl);
   6.323 +            core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i] =
   6.324 +                    global_ctrl & (non_global_ctrl >> 22) & 1;
   6.325 +            global_ctrl >>= 1;
   6.326 +        }
   6.327 +
   6.328 +        rdmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, non_global_ctrl);
   6.329 +        global_ctrl = msr_content >> 32;
   6.330 +        for ( i = 0; i < 3; i++ )
   6.331 +        {
   6.332 +            core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] =
   6.333 +                (global_ctrl & 1) & ((non_global_ctrl & 0x3)? 1: 0);
   6.334 +            non_global_ctrl >>= 4;
   6.335 +            global_ctrl >>= 1;
   6.336 +        }
   6.337 +        break;
   6.338 +    case MSR_CORE_PERF_FIXED_CTR_CTRL:
   6.339 +        non_global_ctrl = msr_content;
   6.340 +        vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
   6.341 +        global_ctrl >>= 32;
   6.342 +        for ( i = 0; i < 3; i++ )
   6.343 +        {
   6.344 +            core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i] =
   6.345 +                (global_ctrl & 1) & ((non_global_ctrl & 0x3)? 1: 0);
   6.346 +            non_global_ctrl >>= 4;
   6.347 +            global_ctrl >>= 1;
   6.348 +        }
   6.349 +        break;
   6.350 +    default:
   6.351 +        tmp = ecx - MSR_P6_EVNTSEL0;
   6.352 +        vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, &global_ctrl);
   6.353 +        if ( tmp >= 0 && tmp < core2_get_pmc_count() )
   6.354 +            core2_vpmu_cxt->pmu_enable->arch_pmc_enable[tmp] =
   6.355 +                (global_ctrl >> tmp) & (msr_content >> 22) & 1;
   6.356 +    }
   6.357 +
   6.358 +    for ( i = 0; i < 3; i++ )
   6.359 +        pmu_enable |= core2_vpmu_cxt->pmu_enable->fixed_ctr_enable[i];
   6.360 +    for ( i = 0; i < core2_get_pmc_count(); i++ )
   6.361 +        pmu_enable |= core2_vpmu_cxt->pmu_enable->arch_pmc_enable[i];
   6.362 +    if ( pmu_enable )
   6.363 +        vpmu->flags |= VPMU_RUNNING;
   6.364 +    else
   6.365 +        vpmu->flags &= ~VPMU_RUNNING;
   6.366 +
   6.367 +    /* Setup LVTPC in local apic */
   6.368 +    if ( vpmu->flags & VPMU_RUNNING &&
   6.369 +         is_vlapic_lvtpc_enabled(vcpu_vlapic(v)) )
   6.370 +        apic_write_around(APIC_LVTPC, LVTPC_HVM_PMU);
   6.371 +    else
   6.372 +        apic_write_around(APIC_LVTPC, LVTPC_HVM_PMU | APIC_LVT_MASKED);
   6.373 +
   6.374 +    core2_vpmu_save_msr_context(v, type, index, msr_content);
   6.375 +    if ( type != MSR_TYPE_GLOBAL )
   6.376 +        wrmsrl(ecx, msr_content);
   6.377 +    else
   6.378 +        vmx_write_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
   6.379 +
   6.380 +    return 1;
   6.381 +}
   6.382 +
   6.383 +static int core2_vpmu_do_rdmsr(struct cpu_user_regs *regs)
   6.384 +{
   6.385 +    u64 msr_content = 0;
   6.386 +    int type = -1, index = -1;
   6.387 +    struct vcpu *v = current;
   6.388 +    struct vpmu_struct *vpmu = vcpu_vpmu(v);
   6.389 +    struct core2_vpmu_context *core2_vpmu_cxt = NULL;
   6.390 +
   6.391 +    if ( !core2_vpmu_msr_common_check(regs->ecx, &type, &index) )
   6.392 +        return 0;
   6.393 +
   6.394 +    core2_vpmu_cxt = vpmu->context;
   6.395 +    switch ( regs->ecx )
   6.396 +    {
   6.397 +    case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
   6.398 +        break;
   6.399 +    case MSR_CORE_PERF_GLOBAL_STATUS:
   6.400 +        msr_content = core2_vpmu_cxt->global_ovf_status;
   6.401 +        break;
   6.402 +    case MSR_CORE_PERF_GLOBAL_CTRL:
   6.403 +        vmx_read_guest_msr(v, MSR_CORE_PERF_GLOBAL_CTRL, &msr_content);
   6.404 +        break;
   6.405 +    default:
   6.406 +        rdmsrl(regs->ecx, msr_content);
   6.407 +    }
   6.408 +
   6.409 +    regs->eax = msr_content & 0xFFFFFFFF;
   6.410 +    regs->edx = msr_content >> 32;
   6.411 +    return 1;
   6.412 +}
   6.413 +
   6.414 +static int core2_vpmu_do_interrupt(struct cpu_user_regs *regs)
   6.415 +{
   6.416 +    struct vcpu *v = current;
   6.417 +    u64 msr_content;
   6.418 +    u32 vlapic_lvtpc;
   6.419 +    unsigned char int_vec;
   6.420 +    struct vpmu_struct *vpmu = vcpu_vpmu(v);
   6.421 +    struct core2_vpmu_context *core2_vpmu_cxt = vpmu->context;
   6.422 +    struct vlapic *vlapic = vcpu_vlapic(v);
   6.423 +
   6.424 +    rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, msr_content);
   6.425 +    if ( !msr_content )
   6.426 +        return 0;
   6.427 +    core2_vpmu_cxt->global_ovf_status |= msr_content;
   6.428 +    wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0xC000000700000003);
   6.429 +
   6.430 +    apic_write_around(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
   6.431 +
   6.432 +    if ( !is_vlapic_lvtpc_enabled(vlapic) )
   6.433 +        return 1;
   6.434 +
   6.435 +    vlapic_lvtpc = vlapic_get_reg(vlapic, APIC_LVTPC);
   6.436 +    int_vec = vlapic_lvtpc & APIC_VECTOR_MASK;
   6.437 +    vlapic_set_reg(vlapic, APIC_LVTPC, vlapic_lvtpc | APIC_LVT_MASKED);
   6.438 +    if ( GET_APIC_DELIVERY_MODE(vlapic_lvtpc) == APIC_MODE_FIXED )
   6.439 +        vlapic_set_irq(vcpu_vlapic(v), int_vec, 0);
   6.440 +    else
   6.441 +        test_and_set_bool(v->nmi_pending);
   6.442 +    return 1;
   6.443 +}
   6.444 +
   6.445 +static void core2_vpmu_initialise(struct vcpu *v)
   6.446 +{
   6.447 +}
   6.448 +
   6.449 +static void core2_vpmu_destroy(struct vcpu *v)
   6.450 +{
   6.451 +    struct vpmu_struct *vpmu = vcpu_vpmu(v);
   6.452 +    struct core2_vpmu_context *core2_vpmu_cxt = vpmu->context;
   6.453 +
   6.454 +    if ( !vpmu->flags & VPMU_CONTEXT_ALLOCATED )
   6.455 +        return;
   6.456 +    xfree(core2_vpmu_cxt->pmu_enable);
   6.457 +    xfree(vpmu->context);
   6.458 +    if ( cpu_has_vmx_msr_bitmap )
   6.459 +        core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
   6.460 +    release_pmu_ownship(PMU_OWNER_HVM);
   6.461 +}
   6.462 +
   6.463 +struct arch_vpmu_ops core2_vpmu_ops = {
   6.464 +    .do_wrmsr = core2_vpmu_do_wrmsr,
   6.465 +    .do_rdmsr = core2_vpmu_do_rdmsr,
   6.466 +    .do_interrupt = core2_vpmu_do_interrupt,
   6.467 +    .arch_vpmu_initialise = core2_vpmu_initialise,
   6.468 +    .arch_vpmu_destroy = core2_vpmu_destroy,
   6.469 +    .arch_vpmu_save = core2_vpmu_save,
   6.470 +    .arch_vpmu_load = core2_vpmu_load
   6.471 +};
   6.472 +
     7.1 --- a/xen/arch/x86/i8259.c	Wed Jan 30 09:38:10 2008 +0000
     7.2 +++ b/xen/arch/x86/i8259.c	Wed Jan 30 09:59:27 2008 +0000
     7.3 @@ -72,6 +72,7 @@ BUILD_SMP_INTERRUPT(call_function_interr
     7.4  BUILD_SMP_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
     7.5  BUILD_SMP_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
     7.6  BUILD_SMP_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
     7.7 +BUILD_SMP_INTERRUPT(pmu_apic_interrupt,PMU_APIC_VECTOR)
     7.8  BUILD_SMP_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
     7.9  
    7.10  #define IRQ(x,y) \
     8.1 --- a/xen/arch/x86/oprofile/op_model_ppro.c	Wed Jan 30 09:38:10 2008 +0000
     8.2 +++ b/xen/arch/x86/oprofile/op_model_ppro.c	Wed Jan 30 09:59:27 2008 +0000
     8.3 @@ -41,6 +41,7 @@
     8.4  #define CTRL_SET_EVENT(val, e) (val |= e)
     8.5  
     8.6  static unsigned long reset_value[NUM_COUNTERS];
     8.7 +int ppro_has_global_ctrl = 0;
     8.8   
     8.9  static void ppro_fill_in_addresses(struct op_msrs * const msrs)
    8.10  {
    8.11 @@ -134,6 +135,11 @@ static void ppro_start(struct op_msrs co
    8.12  			CTRL_WRITE(low, high, msrs, i);
    8.13  		}
    8.14  	}
    8.15 +    /* Global Control MSR is enabled by default when system power on.
    8.16 +     * However, this may not hold true when xenoprof starts to run.
    8.17 +     */
    8.18 +    if ( ppro_has_global_ctrl )
    8.19 +        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, (1<<NUM_COUNTERS) - 1);
    8.20  }
    8.21  
    8.22  
    8.23 @@ -149,6 +155,8 @@ static void ppro_stop(struct op_msrs con
    8.24  		CTRL_SET_INACTIVE(low);
    8.25  		CTRL_WRITE(low, high, msrs, i);
    8.26  	}
    8.27 +    if ( ppro_has_global_ctrl )
    8.28 +        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
    8.29  }
    8.30  
    8.31  
     9.1 --- a/xen/common/xenoprof.c	Wed Jan 30 09:38:10 2008 +0000
     9.2 +++ b/xen/common/xenoprof.c	Wed Jan 30 09:59:27 2008 +0000
     9.3 @@ -23,6 +23,10 @@
     9.4  /* Lock protecting the following global state */
     9.5  static DEFINE_SPINLOCK(xenoprof_lock);
     9.6  
     9.7 +static DEFINE_SPINLOCK(pmu_owner_lock);
     9.8 +int pmu_owner = 0;
     9.9 +int pmu_hvm_refcount = 0;
    9.10 +
    9.11  static struct domain *active_domains[MAX_OPROF_DOMAINS];
    9.12  static int active_ready[MAX_OPROF_DOMAINS];
    9.13  static unsigned int adomains;
    9.14 @@ -44,6 +48,37 @@ static u64 passive_samples;
    9.15  static u64 idle_samples;
    9.16  static u64 others_samples;
    9.17  
    9.18 +int acquire_pmu_ownership(int pmu_ownship)
    9.19 +{
    9.20 +    spin_lock(&pmu_owner_lock);
    9.21 +    if ( pmu_owner == PMU_OWNER_NONE )
    9.22 +    {
    9.23 +        pmu_owner = pmu_ownship;
    9.24 +        goto out;
    9.25 +    }
    9.26 +
    9.27 +    if ( pmu_owner == pmu_ownship )
    9.28 +        goto out;
    9.29 +
    9.30 +    spin_unlock(&pmu_owner_lock);
    9.31 +    return 0;
    9.32 + out:
    9.33 +    if ( pmu_owner == PMU_OWNER_HVM )
    9.34 +        pmu_hvm_refcount++;
    9.35 +    spin_unlock(&pmu_owner_lock);
    9.36 +    return 1;
    9.37 +}
    9.38 +
    9.39 +void release_pmu_ownship(int pmu_ownship)
    9.40 +{
    9.41 +    spin_lock(&pmu_owner_lock);
    9.42 +    if ( pmu_ownship == PMU_OWNER_HVM )
    9.43 +        pmu_hvm_refcount--;
    9.44 +    if ( !pmu_hvm_refcount )
    9.45 +        pmu_owner = PMU_OWNER_NONE;
    9.46 +    spin_unlock(&pmu_owner_lock);
    9.47 +}
    9.48 +
    9.49  int is_active(struct domain *d)
    9.50  {
    9.51      struct xenoprof *x = d->xenoprof;
    9.52 @@ -649,6 +684,11 @@ int do_xenoprof_op(int op, XEN_GUEST_HAN
    9.53          break;
    9.54  
    9.55      case XENOPROF_get_buffer:
    9.56 +        if ( !acquire_pmu_ownership(PMU_OWNER_XENOPROF) )
    9.57 +        {
    9.58 +            ret = -EBUSY;
    9.59 +            break;
    9.60 +        }
    9.61          ret = xenoprof_op_get_buffer(arg);
    9.62          break;
    9.63  
    9.64 @@ -786,6 +826,7 @@ int do_xenoprof_op(int op, XEN_GUEST_HAN
    9.65              break;
    9.66          x = current->domain->xenoprof;
    9.67          unshare_xenoprof_page_with_guest(x);
    9.68 +        release_pmu_ownship(PMU_OWNER_XENOPROF);
    9.69          break;
    9.70      }
    9.71  
    10.1 --- a/xen/include/asm-x86/hvm/hvm.h	Wed Jan 30 09:38:10 2008 +0000
    10.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Wed Jan 30 09:59:27 2008 +0000
    10.3 @@ -119,6 +119,7 @@ struct hvm_function_table {
    10.4      void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
    10.5  
    10.6      int  (*event_pending)(struct vcpu *v);
    10.7 +    int  (*do_pmu_interrupt)(struct cpu_user_regs *regs);
    10.8  
    10.9      int  (*cpu_up)(void);
   10.10      void (*cpu_down)(void);
   10.11 @@ -235,6 +236,11 @@ static inline int hvm_event_pending(stru
   10.12      return hvm_funcs.event_pending(v);
   10.13  }
   10.14  
   10.15 +static inline int hvm_do_pmu_interrupt(struct cpu_user_regs *regs)
   10.16 +{
   10.17 +    return hvm_funcs.do_pmu_interrupt(regs);
   10.18 +}
   10.19 +
   10.20  /* These reserved bits in lower 32 remain 0 after any load of CR0 */
   10.21  #define HVM_CR0_GUEST_RESERVED_BITS             \
   10.22      (~((unsigned long)                          \
    11.1 --- a/xen/include/asm-x86/hvm/vlapic.h	Wed Jan 30 09:38:10 2008 +0000
    11.2 +++ b/xen/include/asm-x86/hvm/vlapic.h	Wed Jan 30 09:59:27 2008 +0000
    11.3 @@ -71,6 +71,12 @@ static inline void vlapic_set_reg(
    11.4      *((uint32_t *)(&vlapic->regs->data[reg])) = val;
    11.5  }
    11.6  
    11.7 +static inline int is_vlapic_lvtpc_enabled(struct vlapic *vlapic)
    11.8 +{
    11.9 +    return vlapic_enabled(vlapic) &&
   11.10 +           !(vlapic_get_reg(vlapic, APIC_LVTPC) & APIC_LVT_MASKED);
   11.11 +}
   11.12 +
   11.13  int vlapic_set_irq(struct vlapic *vlapic, uint8_t vec, uint8_t trig);
   11.14  
   11.15  int vlapic_has_pending_irq(struct vcpu *v);
    12.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h	Wed Jan 30 09:38:10 2008 +0000
    12.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h	Wed Jan 30 09:59:27 2008 +0000
    12.3 @@ -22,6 +22,7 @@
    12.4  #include <asm/config.h>
    12.5  #include <asm/hvm/io.h>
    12.6  #include <asm/hvm/vmx/cpu.h>
    12.7 +#include <asm/hvm/vmx/vpmu.h>
    12.8  
    12.9  #ifdef VMXASSIST
   12.10  #include <public/hvm/vmx_assist.h>
   12.11 @@ -76,6 +77,9 @@ struct arch_vmx_struct {
   12.12      /* Cache of cpu execution control. */
   12.13      u32                  exec_control;
   12.14  
   12.15 +    /* PMU */
   12.16 +    struct vpmu_struct   vpmu;
   12.17 +
   12.18  #ifdef __x86_64__
   12.19      struct vmx_msr_state msr_state;
   12.20      unsigned long        shadow_gs;
    13.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.2 +++ b/xen/include/asm-x86/hvm/vmx/vpmu.h	Wed Jan 30 09:59:27 2008 +0000
    13.3 @@ -0,0 +1,83 @@
    13.4 +/*
    13.5 + * vpmu.h: PMU virtualization for HVM domain.
    13.6 + *
    13.7 + * Copyright (c) 2007, Intel Corporation.
    13.8 + *
    13.9 + * This program is free software; you can redistribute it and/or modify it
   13.10 + * under the terms and conditions of the GNU General Public License,
   13.11 + * version 2, as published by the Free Software Foundation.
   13.12 + *
   13.13 + * This program is distributed in the hope it will be useful, but WITHOUT
   13.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   13.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   13.16 + * more details.
   13.17 + *
   13.18 + * You should have received a copy of the GNU General Public License along with
   13.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   13.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   13.21 + *
   13.22 + * Author: Haitao Shan <haitao.shan@intel.com>
   13.23 + */
   13.24 +
   13.25 +#ifndef __ASM_X86_HVM_VPMU_H_
   13.26 +#define __ASM_X86_HVM_VPMU_H_
   13.27 +
   13.28 +#define msraddr_to_bitpos(x) (((x)&0xffff) + ((x)>>31)*0x2000)
   13.29 +#define vcpu_vpmu(vcpu)   (&(vcpu)->arch.hvm_vcpu.u.vmx.vpmu)
   13.30 +#define vpmu_vcpu(vpmu)   (container_of((vpmu), struct vcpu, \
   13.31 +                                          arch.hvm_vcpu.u.vmx.vpmu))
   13.32 +#define vpmu_domain(vpmu) (vpmu_vcpu(vpmu)->domain)
   13.33 +
   13.34 +#define MSR_TYPE_COUNTER            0
   13.35 +#define MSR_TYPE_CTRL               1
   13.36 +#define MSR_TYPE_GLOBAL             2
   13.37 +#define MSR_TYPE_ARCH_COUNTER       3
   13.38 +#define MSR_TYPE_ARCH_CTRL          4
   13.39 +
   13.40 +#define LVTPC_HVM_PMU            0xf8
   13.41 +
   13.42 +struct pmumsr {
   13.43 +    unsigned int num;
   13.44 +    u32 *msr;
   13.45 +};
   13.46 +
   13.47 +struct msr_load_store_entry {
   13.48 +    u32 msr_index;
   13.49 +    u32 msr_reserved;
   13.50 +    u64 msr_data;
   13.51 +};
   13.52 +
   13.53 +/* Arch specific operations shared by all vpmus */
   13.54 +struct arch_vpmu_ops {
   13.55 +    int (*do_wrmsr)(struct cpu_user_regs *regs);
   13.56 +    int (*do_rdmsr)(struct cpu_user_regs *regs);
   13.57 +    int (*do_interrupt)(struct cpu_user_regs *regs);
   13.58 +    void (*arch_vpmu_initialise)(struct vcpu *v);
   13.59 +    void (*arch_vpmu_destroy)(struct vcpu *v);
   13.60 +    void (*arch_vpmu_save)(struct vcpu *v);
   13.61 +    void (*arch_vpmu_load)(struct vcpu *v);
   13.62 +};
   13.63 +
   13.64 +struct vpmu_struct {
   13.65 +    u32 flags;
   13.66 +    void *context;
   13.67 +    struct arch_vpmu_ops *arch_vpmu_ops;
   13.68 +};
   13.69 +
   13.70 +#define VPMU_CONTEXT_ALLOCATED              0x1
   13.71 +#define VPMU_CONTEXT_LOADED                 0x2
   13.72 +#define VPMU_RUNNING                        0x4
   13.73 +
   13.74 +int inline vpmu_do_wrmsr(struct cpu_user_regs *regs);
   13.75 +int inline vpmu_do_rdmsr(struct cpu_user_regs *regs);
   13.76 +int inline vpmu_do_interrupt(struct cpu_user_regs *regs);
   13.77 +void inline vpmu_initialise(struct vcpu *v);
   13.78 +void inline vpmu_destroy(struct vcpu *v);
   13.79 +void inline vpmu_save(struct vcpu *v);
   13.80 +void inline vpmu_load(struct vcpu *v);
   13.81 +
   13.82 +extern int acquire_pmu_ownership(int pmu_ownership);
   13.83 +extern void release_pmu_ownership(int pmu_ownership);
   13.84 +
   13.85 +#endif /* __ASM_X86_HVM_VPMU_H_*/
   13.86 +
    14.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.2 +++ b/xen/include/asm-x86/hvm/vmx/vpmu_core2.h	Wed Jan 30 09:59:27 2008 +0000
    14.3 @@ -0,0 +1,68 @@
    14.4 +
    14.5 +/*
    14.6 + * vpmu_core2.h: CORE 2 specific PMU virtualization for HVM domain.
    14.7 + *
    14.8 + * Copyright (c) 2007, Intel Corporation.
    14.9 + *
   14.10 + * This program is free software; you can redistribute it and/or modify it
   14.11 + * under the terms and conditions of the GNU General Public License,
   14.12 + * version 2, as published by the Free Software Foundation.
   14.13 + *
   14.14 + * This program is distributed in the hope it will be useful, but WITHOUT
   14.15 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   14.16 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   14.17 + * more details.
   14.18 + *
   14.19 + * You should have received a copy of the GNU General Public License along with
   14.20 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   14.21 + * Place - Suite 330, Boston, MA 02111-1307 USA.
   14.22 + *
   14.23 + * Author: Haitao Shan <haitao.shan@intel.com>
   14.24 + */
   14.25 +
   14.26 +#ifndef __ASM_X86_HVM_VPMU_CORE_H_
   14.27 +#define __ASM_X86_HVM_VPMU_CORE_H_
   14.28 +
   14.29 +/* Core 2 Non-architectual Performance Counter MSRs. */
   14.30 +u32 core2_counters_msr[] =   {
   14.31 +    MSR_CORE_PERF_FIXED_CTR0,
   14.32 +    MSR_CORE_PERF_FIXED_CTR1,
   14.33 +    MSR_CORE_PERF_FIXED_CTR2};
   14.34 +
   14.35 +/* Core 2 Non-architectual Performance Control MSRs. */
   14.36 +u32 core2_ctrls_msr[] = {
   14.37 +    MSR_CORE_PERF_FIXED_CTR_CTRL,
   14.38 +    MSR_IA32_PEBS_ENABLE,
   14.39 +    MSR_IA32_DS_AREA};
   14.40 +
   14.41 +struct pmumsr core2_counters = {
   14.42 +    3,
   14.43 +    core2_counters_msr
   14.44 +};
   14.45 +
   14.46 +struct pmumsr core2_ctrls = {
   14.47 +    3,
   14.48 +    core2_ctrls_msr
   14.49 +};
   14.50 +
   14.51 +struct arch_msr_pair {
   14.52 +    u64 counter;
   14.53 +    u64 control;
   14.54 +};
   14.55 +
   14.56 +struct core2_pmu_enable {
   14.57 +    char fixed_ctr_enable[3];
   14.58 +    char arch_pmc_enable[1];
   14.59 +};
   14.60 +
   14.61 +struct core2_vpmu_context {
   14.62 +    struct core2_pmu_enable *pmu_enable;
   14.63 +    u64 counters[3];
   14.64 +    u64 ctrls[3];
   14.65 +    u64 global_ovf_status;
   14.66 +    u32 hw_lapic_lvtpc;
   14.67 +    struct arch_msr_pair arch_msr_pair[1];
   14.68 +};
   14.69 +
   14.70 +#endif /* __ASM_X86_HVM_VPMU_CORE_H_ */
   14.71 +
    15.1 --- a/xen/include/asm-x86/irq.h	Wed Jan 30 09:38:10 2008 +0000
    15.2 +++ b/xen/include/asm-x86/irq.h	Wed Jan 30 09:59:27 2008 +0000
    15.3 @@ -28,6 +28,7 @@ fastcall void invalidate_interrupt(void)
    15.4  fastcall void call_function_interrupt(void);
    15.5  fastcall void apic_timer_interrupt(void);
    15.6  fastcall void error_interrupt(void);
    15.7 +fastcall void pmu_apic_interrupt(void);
    15.8  fastcall void spurious_interrupt(void);
    15.9  fastcall void thermal_interrupt(void);
   15.10  
    16.1 --- a/xen/include/asm-x86/mach-default/irq_vectors.h	Wed Jan 30 09:38:10 2008 +0000
    16.2 +++ b/xen/include/asm-x86/mach-default/irq_vectors.h	Wed Jan 30 09:59:27 2008 +0000
    16.3 @@ -9,13 +9,14 @@
    16.4  #define CALL_FUNCTION_VECTOR	0xfb
    16.5  #define THERMAL_APIC_VECTOR	0xfa
    16.6  #define LOCAL_TIMER_VECTOR	0xf9
    16.7 +#define PMU_APIC_VECTOR 	0xf8
    16.8  
    16.9  /*
   16.10   * High-priority dynamically-allocated vectors. For interrupts that
   16.11   * must be higher priority than any guest-bound interrupt.
   16.12   */
   16.13  #define FIRST_HIPRIORITY_VECTOR	0xf0
   16.14 -#define LAST_HIPRIORITY_VECTOR  0xf8
   16.15 +#define LAST_HIPRIORITY_VECTOR  0xf7
   16.16  
   16.17  /* Legacy PIC uses vectors 0xe0-0xef. */
   16.18  #define FIRST_LEGACY_VECTOR	0xe0
    17.1 --- a/xen/include/xen/xenoprof.h	Wed Jan 30 09:38:10 2008 +0000
    17.2 +++ b/xen/include/xen/xenoprof.h	Wed Jan 30 09:59:27 2008 +0000
    17.3 @@ -69,4 +69,10 @@ int do_xenoprof_op(int op, XEN_GUEST_HAN
    17.4  int xenoprof_add_trace(struct domain *d, struct vcpu *v, 
    17.5                         unsigned long eip, int mode);
    17.6  
    17.7 +#define PMU_OWNER_NONE          0
    17.8 +#define PMU_OWNER_XENOPROF      1
    17.9 +#define PMU_OWNER_HVM           2
   17.10 +int acquire_pmu_ownship(int pmu_ownership);
   17.11 +void release_pmu_ownship(int pmu_ownership);
   17.12 +
   17.13  #endif  /* __XEN__XENOPROF_H__ */