]> xenbits.xensource.com Git - people/tklengyel/xen.git/commitdiff
xen/x86: Make XEN_DOMCTL_get_vcpu_msrs more configurable get_msrs
authorTamas K Lengyel <tamas.lengyel@intel.com>
Thu, 22 Sep 2022 18:49:55 +0000 (18:49 +0000)
committerTamas K Lengyel <tamas.lengyel@intel.com>
Fri, 21 Oct 2022 13:55:35 +0000 (09:55 -0400)
Currently the XEN_DOMCTL_get_vcpu_msrs is only capable of gathering a handful
of predetermined vcpu MSRs. In our use-case gathering the vPMU MSRs by an
external privileged tool is necessary, thus we extend the domctl to allow for
querying for any guest MSRs. To remain compatible with the existing setup if
no specific MSR is requested via the domctl the default list is returned.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
tools/include/xenctrl.h
tools/libs/ctrl/xc_domain.c
tools/libs/guest/xg_sr_save_x86_pv.c
xen/arch/x86/cpu/vpmu.c
xen/arch/x86/cpu/vpmu_amd.c
xen/arch/x86/cpu/vpmu_intel.c
xen/arch/x86/domctl.c
xen/arch/x86/include/asm/vpmu.h

index 0c8b4c3aa7a5f639c314e14dfb40e75782186e25..04244213bf5b995dbc330bf6c90018725babbccd 100644 (file)
@@ -872,6 +872,10 @@ int xc_vcpu_getinfo(xc_interface *xch,
                     uint32_t vcpu,
                     xc_vcpuinfo_t *info);
 
+typedef struct xen_domctl_vcpu_msr xc_vcpumsr_t;
+int xc_vcpu_get_msrs(xc_interface *xch, uint32_t domid, uint32_t vcpu,
+                     uint32_t count, xc_vcpumsr_t *msrs);
+
 long long xc_domain_get_cpu_usage(xc_interface *xch,
                                   uint32_t domid,
                                   int vcpu);
index 14c0420c35be5a0a9a2776a7007d413b272a6050..d3a7e1fea6e0decc3e4564ba5d3c54fdefb051ad 100644 (file)
@@ -2201,6 +2201,41 @@ int xc_domain_soft_reset(xc_interface *xch,
     domctl.domain = domid;
     return do_domctl(xch, &domctl);
 }
+
+int xc_vcpu_get_msrs(xc_interface *xch, uint32_t domid, uint32_t vcpu,
+                     uint32_t count, xc_vcpumsr_t *msrs)
+{
+    int rc;
+    DECLARE_DOMCTL;
+    domctl.cmd = XEN_DOMCTL_get_vcpu_msrs;
+    domctl.domain = domid;
+    domctl.u.vcpu_msrs.vcpu = vcpu;
+    domctl.u.vcpu_msrs.msr_count = count;
+
+    if ( !msrs )
+    {
+        if ( (rc = xc_domctl(xch, &domctl)) < 0 )
+            return rc;
+
+        return domctl.u.vcpu_msrs.msr_count;
+    }
+    else
+    {
+        DECLARE_HYPERCALL_BOUNCE(msrs, count * sizeof(xc_vcpumsr_t), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+        if ( xc_hypercall_bounce_pre(xch, msrs) )
+            return -1;
+
+        set_xen_guest_handle(domctl.u.vcpu_msrs.msrs, msrs);
+
+        rc = do_domctl(xch, &domctl);
+
+        xc_hypercall_bounce_post(xch, msrs);
+
+        return rc;
+    }
+}
+
 /*
  * Local variables:
  * mode: C
index 4964f1f7b8e1ef903191ada130df6b6fd35fa5c1..7ac313bf3f11b9b152c1af40410ef4ef9f8f3c3b 100644 (file)
@@ -719,6 +719,8 @@ static int write_one_vcpu_msrs(struct xc_sr_context *ctx, uint32_t id)
         goto err;
     }
 
+    memset(buffer, 0, buffersz);
+
     set_xen_guest_handle(domctl.u.vcpu_msrs.msrs, buffer);
     if ( xc_domctl(xch, &domctl) < 0 )
     {
index 64cdbfc48c4b67b5b41cb1daf8100d88799891b3..438dfbe196f4bb9c8e8781e6d7d206cc143335aa 100644 (file)
@@ -651,6 +651,16 @@ void vpmu_dump(struct vcpu *v)
         alternative_vcall(vpmu_ops.arch_vpmu_dump, v);
 }
 
+int vpmu_get_msr(struct vcpu *v, unsigned int msr, uint64_t *val)
+{
+    ASSERT(v != current);
+
+    if ( !vpmu_is_set(vcpu_vpmu(v), VPMU_CONTEXT_ALLOCATED) )
+        return -EOPNOTSUPP;
+
+    return alternative_call(vpmu_ops.get_msr, v, msr, val);
+}
+
 long do_xenpmu_op(
     unsigned int op, XEN_GUEST_HANDLE_PARAM(xen_pmu_params_t) arg)
 {
index 58794a16f06cd4041935922a51eb813a364a5d14..75bd68e5411795713104b467bf04432104d230ef 100644 (file)
@@ -518,6 +518,12 @@ static int cf_check svm_vpmu_initialise(struct vcpu *v)
     return 0;
 }
 
+static int cf_check amd_get_msr(struct vcpu *v, unsigned int msr, uint64_t *val)
+{
+    /* TODO in case an external tool needs access to these MSRs */
+    return -ENOSYS;
+}
+
 #ifdef CONFIG_MEM_SHARING
 static int cf_check amd_allocate_context(struct vcpu *v)
 {
@@ -535,6 +541,7 @@ static const struct arch_vpmu_ops __initconst_cf_clobber amd_vpmu_ops = {
     .arch_vpmu_save = amd_vpmu_save,
     .arch_vpmu_load = amd_vpmu_load,
     .arch_vpmu_dump = amd_vpmu_dump,
+    .get_msr = amd_get_msr,
 
 #ifdef CONFIG_MEM_SHARING
     .allocate_context = amd_allocate_context,
index b91d818be0b6a81b63b1c01c44e4f1011820bf6b..b4b6ecfb158c5e61b730525e283de56259060029 100644 (file)
@@ -898,6 +898,42 @@ static int cf_check vmx_vpmu_initialise(struct vcpu *v)
     return 0;
 }
 
+static int cf_check core2_vpmu_get_msr(struct vcpu *v, unsigned int msr,
+                                       uint64_t *val)
+{
+    int type, index, ret = 0;
+    struct vpmu_struct *vpmu = vcpu_vpmu(v);
+    struct xen_pmu_intel_ctxt *core2_vpmu_cxt = vpmu->context;
+    uint64_t *fixed_counters = vpmu_reg_pointer(core2_vpmu_cxt, fixed_counters);
+    struct xen_pmu_cntr_pair *xen_pmu_cntr_pair =
+        vpmu_reg_pointer(core2_vpmu_cxt, arch_counters);
+
+    if ( !is_core2_vpmu_msr(msr, &type, &index) )
+        return -EINVAL;
+
+    vcpu_pause(v);
+
+    if ( msr == MSR_CORE_PERF_GLOBAL_OVF_CTRL )
+        *val = core2_vpmu_cxt->global_ovf_ctrl;
+    else if ( msr == MSR_CORE_PERF_GLOBAL_STATUS )
+        *val = core2_vpmu_cxt->global_status;
+    else if ( msr == MSR_CORE_PERF_GLOBAL_CTRL )
+        *val = core2_vpmu_cxt->global_ctrl;
+    else if ( msr >= MSR_CORE_PERF_FIXED_CTR0 &&
+              msr < MSR_CORE_PERF_FIXED_CTR0 + fixed_pmc_cnt )
+        *val = fixed_counters[msr - MSR_CORE_PERF_FIXED_CTR0];
+    else if ( msr >= MSR_P6_PERFCTR(0) && msr < MSR_P6_PERFCTR(arch_pmc_cnt) )
+        *val = xen_pmu_cntr_pair[msr - MSR_P6_PERFCTR(0)].counter;
+    else if ( msr >= MSR_P6_EVNTSEL(0) && msr < MSR_P6_EVNTSEL(arch_pmc_cnt) )
+        *val = xen_pmu_cntr_pair[msr - MSR_P6_EVNTSEL(0)].control;
+    else
+        ret = -EINVAL;
+
+    vcpu_unpause(v);
+
+    return ret;
+}
+
 static const struct arch_vpmu_ops __initconst_cf_clobber core2_vpmu_ops = {
     .initialise = vmx_vpmu_initialise,
     .do_wrmsr = core2_vpmu_do_wrmsr,
@@ -907,6 +943,7 @@ static const struct arch_vpmu_ops __initconst_cf_clobber core2_vpmu_ops = {
     .arch_vpmu_save = core2_vpmu_save,
     .arch_vpmu_load = core2_vpmu_load,
     .arch_vpmu_dump = core2_vpmu_dump,
+    .get_msr = core2_vpmu_get_msr,
 
 #ifdef CONFIG_MEM_SHARING
     .allocate_context = core2_vpmu_alloc_resource,
index e9bfbc57a794ee139c3b5761378f15eb20cb1c8e..c481aa857557d9d9807b627d102d65e44e936366 100644 (file)
@@ -1104,8 +1104,7 @@ long arch_do_domctl(
             break;
 
         ret = -EINVAL;
-        if ( (v == curr) || /* no vcpu_pause() */
-             !is_pv_domain(d) )
+        if ( v == curr )
             break;
 
         /* Count maximum number of optional msrs. */
@@ -1127,36 +1126,48 @@ long arch_do_domctl(
 
                 vcpu_pause(v);
 
-                for ( j = 0; j < ARRAY_SIZE(msrs_to_send); ++j )
+                for ( j = 0; j < ARRAY_SIZE(msrs_to_send) && i < vmsrs->msr_count; ++j )
                 {
                     uint64_t val;
-                    int rc = guest_rdmsr(v, msrs_to_send[j], &val);
+                    int rc;
+
+                    if ( copy_from_guest_offset(&msr, vmsrs->msrs, i, 1) )
+                    {
+                        ret = -EFAULT;
+                        break;
+                    }
+
+                    msr.index = msr.index ?: msrs_to_send[j];
+
+                    rc = guest_rdmsr(v, msr.index, &val);
 
                     /*
                      * It is the programmers responsibility to ensure that
-                     * msrs_to_send[] contain generally-read/write MSRs.
+                     * the msr requested contain generally-read/write MSRs.
                      * X86EMUL_EXCEPTION here implies a missing feature, and
                      * that the guest doesn't have access to the MSR.
                      */
                     if ( rc == X86EMUL_EXCEPTION )
                         continue;
+                    if ( rc == X86EMUL_UNHANDLEABLE )
+                        ret = vpmu_get_msr(v, msr.index, &val);
+                    else
+                        ret = (rc == X86EMUL_OKAY) ? 0 : -ENXIO;
 
-                    if ( rc != X86EMUL_OKAY )
+                    if ( ret )
                     {
                         ASSERT_UNREACHABLE();
-                        ret = -ENXIO;
                         break;
                     }
 
                     if ( !val )
                         continue; /* Skip empty MSRs. */
 
-                    if ( i < vmsrs->msr_count && !ret )
+                    msr.value = val;
+                    if ( copy_to_guest_offset(vmsrs->msrs, i, &msr, 1) )
                     {
-                        msr.index = msrs_to_send[j];
-                        msr.value = val;
-                        if ( copy_to_guest_offset(vmsrs->msrs, i, &msr, 1) )
-                            ret = -EFAULT;
+                        ret = -EFAULT;
+                        break;
                     }
                     ++i;
                 }
index 05e1fbfccfcf9f592108a8db70f7955e52129cca..2fcf570b25bbf552049c59b82967ef84a0b9769e 100644 (file)
@@ -47,6 +47,7 @@ struct arch_vpmu_ops {
     int (*arch_vpmu_save)(struct vcpu *v, bool_t to_guest);
     int (*arch_vpmu_load)(struct vcpu *v, bool_t from_guest);
     void (*arch_vpmu_dump)(const struct vcpu *);
+    int (*get_msr)(struct vcpu *v, unsigned int msr, uint64_t *val);
 
 #ifdef CONFIG_MEM_SHARING
     int (*allocate_context)(struct vcpu *v);
@@ -117,6 +118,7 @@ void vpmu_save(struct vcpu *v);
 void cf_check vpmu_save_force(void *arg);
 int vpmu_load(struct vcpu *v, bool_t from_guest);
 void vpmu_dump(struct vcpu *v);
+int vpmu_get_msr(struct vcpu *v, unsigned int msr, uint64_t *val);
 
 static inline int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
 {