uint32_t vcpu,
xc_vcpuinfo_t *info);
+typedef struct xen_domctl_vcpu_msr xc_vcpumsr_t;
+int xc_vcpu_rdmsr_list(xc_interface *xch, uint32_t domid, uint32_t vcpu,
+ uint32_t count, xc_vcpumsr_t *msrs);
+int xc_vcpu_wrmsr_list(xc_interface *xch, uint32_t domid, uint32_t vcpu,
+ uint32_t count, xc_vcpumsr_t *msrs);
+
long long xc_domain_get_cpu_usage(xc_interface *xch,
uint32_t domid,
int vcpu);
domctl.domain = domid;
return do_domctl(xch, &domctl);
}
+
+int xc_vcpu_rdmsr_list(xc_interface *xch, uint32_t domid, uint32_t vcpu,
+ uint32_t count, xc_vcpumsr_t *msrs)
+{
+ int rc;
+ DECLARE_DOMCTL;
+ DECLARE_HYPERCALL_BOUNCE(msrs, count * sizeof(xc_vcpumsr_t), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+ if ( !count || !msrs )
+ return -1;
+
+ domctl.cmd = XEN_DOMCTL_rdmsr_list;
+ domctl.domain = domid;
+ domctl.u.vcpu_msrs.vcpu = vcpu;
+ domctl.u.vcpu_msrs.msr_count = count;
+
+ if ( xc_hypercall_bounce_pre(xch, msrs) )
+ return -ENOMEM;
+
+ set_xen_guest_handle(domctl.u.vcpu_msrs.msrs, msrs);
+ rc = do_domctl(xch, &domctl);
+ xc_hypercall_bounce_post(xch, msrs);
+
+ return rc;
+}
+
+int xc_vcpu_wrmsr_list(xc_interface *xch, uint32_t domid, uint32_t vcpu,
+ uint32_t count, xc_vcpumsr_t *msrs)
+{
+ int rc;
+ DECLARE_DOMCTL;
+ DECLARE_HYPERCALL_BOUNCE(msrs, count * sizeof(xc_vcpumsr_t), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+
+ if ( !count || !msrs )
+ return -1;
+
+ domctl.cmd = XEN_DOMCTL_wrmsr_list;
+ domctl.domain = domid;
+ domctl.u.vcpu_msrs.vcpu = vcpu;
+ domctl.u.vcpu_msrs.msr_count = count;
+
+ if ( xc_hypercall_bounce_pre(xch, msrs) )
+ return -ENOMEM;
+
+ set_xen_guest_handle(domctl.u.vcpu_msrs.msrs, msrs);
+ rc = do_domctl(xch, &domctl);
+ xc_hypercall_bounce_post(xch, msrs);
+
+ return rc;
+}
+
/*
* Local variables:
* mode: C
apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc);
}
-int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, bool is_write)
+int vpmu_do_msr(struct vcpu *v, unsigned int msr, uint64_t *msr_content, bool is_write)
{
- struct vcpu *curr = current;
struct vpmu_struct *vpmu;
int ret = 0;
*/
if ( likely(vpmu_mode == XENPMU_MODE_OFF) ||
((vpmu_mode & XENPMU_MODE_ALL) &&
- !is_hardware_domain(curr->domain)) )
+ !is_hardware_domain(v->domain)) )
goto nop;
- vpmu = vcpu_vpmu(curr);
+ vpmu = vcpu_vpmu(v);
if ( !vpmu_is_set(vpmu, VPMU_INITIALIZED) )
goto nop;
* and since do_wr/rdmsr may load VPMU context we should save
* (and unload) it again.
*/
- if ( !has_vlapic(curr->domain) && vpmu->xenpmu_data &&
+ if ( !has_vlapic(v->domain) && vpmu->xenpmu_data &&
vpmu_is_set(vpmu, VPMU_CACHED) )
{
vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
static int cf_check amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
{
- struct vcpu *v = current;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
unsigned int idx = 0;
int type = get_pmu_reg_type(msr, &idx);
+ ASSERT(v == current);
+
if ( (type == MSR_TYPE_CTRL ) &&
((msr_content & CTRL_RSVD_MASK) != ctrl_rsvd[idx]) )
return -EINVAL;
return 0;
}
-static int cf_check amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
+static int cf_check amd_vpmu_do_rdmsr(struct vcpu *v, unsigned int msr,
+ uint64_t *msr_content)
{
- struct vcpu *v = current;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
+ ASSERT(v == current);
+
if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)
|| vpmu_is_set(vpmu, VPMU_FROZEN) )
{
return 0;
}
+static int cf_check amd_get_msr(struct vcpu *v, unsigned int msr, uint64_t *val)
+{
+ /* TODO in case an external tool needs access to these MSRs */
+ return -ENOSYS;
+}
+
#ifdef CONFIG_MEM_SHARING
static int cf_check amd_allocate_context(struct vcpu *v)
{
.arch_vpmu_save = amd_vpmu_save,
.arch_vpmu_load = amd_vpmu_load,
.arch_vpmu_dump = amd_vpmu_dump,
+ .get_msr = amd_get_msr,
#ifdef CONFIG_MEM_SHARING
.allocate_context = amd_allocate_context,
return 1;
}
-static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
+static int cf_check core2_vpmu_do_wrmsr(struct vcpu *v, unsigned int msr, uint64_t msr_content)
{
int i, tmp;
int type = -1, index = -1;
- struct vcpu *v = current;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
struct xen_pmu_intel_ctxt *core2_vpmu_cxt;
uint64_t *enabled_cntrs;
+ if ( v != current )
+ {
+ gdprintk(XENLOG_INFO, "Not yet implemented\n");
+ return -ENOSYS;
+ }
+
if ( !core2_vpmu_msr_common_check(msr, &type, &index) )
return -EINVAL;
return 0;
}
-static int cf_check core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
+static int cf_check get_saved_msr(struct vcpu *v, unsigned int msr, uint64_t *val)
+{
+ int type, index, ret = 0;
+ struct vpmu_struct *vpmu = vcpu_vpmu(v);
+ struct xen_pmu_intel_ctxt *core2_vpmu_cxt = vpmu->context;
+ uint64_t *fixed_counters = vpmu_reg_pointer(core2_vpmu_cxt, fixed_counters);
+ struct xen_pmu_cntr_pair *xen_pmu_cntr_pair =
+ vpmu_reg_pointer(core2_vpmu_cxt, arch_counters);
+
+ if ( !is_core2_vpmu_msr(msr, &type, &index) )
+ return -EINVAL;
+
+ if ( msr == MSR_CORE_PERF_GLOBAL_OVF_CTRL )
+ *val = core2_vpmu_cxt->global_ovf_ctrl;
+ else if ( msr == MSR_CORE_PERF_GLOBAL_STATUS )
+ *val = core2_vpmu_cxt->global_status;
+ else if ( msr == MSR_CORE_PERF_GLOBAL_CTRL )
+ *val = core2_vpmu_cxt->global_ctrl;
+ else if ( msr >= MSR_CORE_PERF_FIXED_CTR0 &&
+ msr < MSR_CORE_PERF_FIXED_CTR0 + fixed_pmc_cnt )
+ *val = fixed_counters[msr - MSR_CORE_PERF_FIXED_CTR0];
+ else if ( msr >= MSR_P6_PERFCTR(0) && msr < MSR_P6_PERFCTR(arch_pmc_cnt) )
+ *val = xen_pmu_cntr_pair[msr - MSR_P6_PERFCTR(0)].counter;
+ else if ( msr >= MSR_P6_EVNTSEL(0) && msr < MSR_P6_EVNTSEL(arch_pmc_cnt) )
+ *val = xen_pmu_cntr_pair[msr - MSR_P6_EVNTSEL(0)].control;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int cf_check core2_vpmu_do_rdmsr(struct vcpu *v, unsigned int msr, uint64_t *msr_content)
{
int type = -1, index = -1;
- struct vcpu *v = current;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
struct xen_pmu_intel_ctxt *core2_vpmu_cxt;
+ if ( v != current )
+ return get_saved_msr(v, msr, msr_content);
+
if ( core2_vpmu_msr_common_check(msr, &type, &index) )
{
core2_vpmu_cxt = vpmu->context;
break;
}
+ case XEN_DOMCTL_rdmsr_list:
+ case XEN_DOMCTL_wrmsr_list:
+ {
+ struct xen_domctl_vcpu_msrs *vmsrs = &domctl->u.vcpu_msrs;
+ struct xen_domctl_vcpu_msr msr = {};
+ struct vcpu *v;
+ unsigned int i;
+
+ ret = -ESRCH;
+ if ( (vmsrs->vcpu >= d->max_vcpus) ||
+ ((v = d->vcpu[vmsrs->vcpu]) == NULL) )
+ break;
+
+ ret = -EINVAL;
+ if ( v == curr ) /* no vcpu_pause() */
+ break;
+
+ if ( !vmsrs->msr_count || guest_handle_is_null(vmsrs->msrs) )
+ break;
+
+ vcpu_pause(v);
+
+ ret = 0;
+ for ( i=0; i<vmsrs->msr_count; i++ )
+ {
+ int rc;
+
+ if ( copy_from_guest_offset(&msr, vmsrs->msrs, i, 1) )
+ {
+ ret = -EFAULT;
+ break;
+ }
+
+ if ( domctl->cmd == XEN_DOMCTL_rdmsr_list )
+ rc = guest_rdmsr(v, msr.index, &msr.value);
+ else
+ rc = guest_wrmsr(v, msr.index, msr.value);
+
+ if ( rc != X86EMUL_OKAY )
+ {
+ ret = -EINVAL;
+ vmsrs->msr_count = i;
+ copyback = true;
+ break;
+ }
+
+ if ( copy_to_guest_offset(vmsrs->msrs, i, &msr, 1)
+ {
+ ret = -EFAULT;
+ break;
+ }
+ }
+
+ vcpu_unpause(v);
+ break;
+ }
+
case XEN_DOMCTL_get_vcpu_msrs:
case XEN_DOMCTL_set_vcpu_msrs:
{
case MSR_AMD_FAM15H_EVNTSEL3:
case MSR_AMD_FAM15H_EVNTSEL4:
case MSR_AMD_FAM15H_EVNTSEL5:
- if ( vpmu_do_rdmsr(msr, msr_content) )
+ if ( vpmu_do_rdmsr(v, msr, msr_content) )
goto gpf;
break;
case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
case MSR_IA32_PEBS_ENABLE:
case MSR_IA32_DS_AREA:
- if ( vpmu_do_rdmsr(msr, msr_content) )
+ if ( vpmu_do_rdmsr(curr, msr, msr_content) )
goto gp_fault;
break;
/* Arch specific operations shared by all vpmus */
struct arch_vpmu_ops {
int (*initialise)(struct vcpu *v);
- int (*do_wrmsr)(unsigned int msr, uint64_t msr_content);
- int (*do_rdmsr)(unsigned int msr, uint64_t *msr_content);
+ int (*do_wrmsr)(struct vcpu *v, unsigned int msr, uint64_t msr_content);
+ int (*do_rdmsr)(struct vcpu *v, unsigned int msr, uint64_t *msr_content);
int (*do_interrupt)(struct cpu_user_regs *regs);
void (*arch_vpmu_destroy)(struct vcpu *v);
int (*arch_vpmu_save)(struct vcpu *v, bool_t to_guest);
}
void vpmu_lvtpc_update(uint32_t val);
-int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, bool is_write);
+int vpmu_do_msr(struct vcpu *v, unsigned int msr, uint64_t *msr_content, bool is_write);
void vpmu_do_interrupt(struct cpu_user_regs *regs);
void vpmu_initialise(struct vcpu *v);
void vpmu_destroy(struct vcpu *v);
int vpmu_load(struct vcpu *v, bool_t from_guest);
void vpmu_dump(struct vcpu *v);
-static inline int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
+static inline int vpmu_do_wrmsr(struct vcpu *v, unsigned int msr, uint64_t msr_content)
{
- return vpmu_do_msr(msr, &msr_content, true /* write */);
+ return vpmu_do_msr(v, msr, &msr_content, true /* write */);
}
-static inline int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
+static inline int vpmu_do_rdmsr(struct vcpu *v, unsigned int msr, uint64_t *msr_content)
{
- return vpmu_do_msr(msr, msr_content, false /* read */);
+ return vpmu_do_msr(v, msr, msr_content, false /* read */);
}
extern unsigned int vpmu_mode;
case MSR_INTEL_CORE_THREAD_COUNT:
*/
default:
+ /* TODO: vpmu msrs for external tools should get wired in here */
return X86EMUL_UNHANDLEABLE;
}
if ( vpmu_msr || (boot_cpu_data.x86_vendor &
(X86_VENDOR_AMD | X86_VENDOR_HYGON)) )
{
- if ( vpmu_do_rdmsr(reg, val) )
+ if ( vpmu_do_rdmsr(curr, reg, val) )
break;
return X86EMUL_OKAY;
}
* Output for set:
* - If Xen encounters an error with a specific MSR, -EINVAL shall be returned
* and 'msr_count' shall be set to the offending index, to aid debugging.
+ *
+*/
+/*
+ * XEN_DOMCTL_rdmsr_list / XEN_DOMCTL_wrmsr_list.
+ *
+ * Input:
+ * - 'msr_count' is the number of entries in 'msrs'.
+ *
+ * Output:
+ * - If Xen encounters an error with a specific MSR, including an MSR that's
+ * unused by the guest, -EINVAL shall be returned and 'msr_count' shall be
+ * set to the offending index, to aid debugging. The remaining items in the
+ * list after the offending MSR remain unprocessed by Xen.
+ *
*/
struct xen_domctl_vcpu_msrs {
uint32_t vcpu; /* IN */
#define XEN_DOMCTL_get_cpu_policy 82
#define XEN_DOMCTL_set_cpu_policy 83
#define XEN_DOMCTL_vmtrace_op 84
+#define XEN_DOMCTL_rdmsr_list 85
+#define XEN_DOMCTL_wrmsr_list 86
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002