int xc_get_domain_cpu_policy(xc_interface *xch, uint32_t domid,
uint32_t *nr_leaves, xen_cpuid_leaf_t *leaves,
uint32_t *nr_msrs, xen_msr_entry_t *msrs);
+int xc_set_domain_cpu_policy(xc_interface *xch, uint32_t domid,
+ uint32_t nr_leaves, xen_cpuid_leaf_t *leaves,
+ uint32_t nr_msrs, xen_msr_entry_t *msrs,
+ uint32_t *err_leaf_p, uint32_t *err_subleaf_p,
+ uint32_t *err_msr_p);
uint32_t xc_get_cpu_featureset_size(void);
return ret;
}
+int xc_set_domain_cpu_policy(xc_interface *xch, uint32_t domid,
+ uint32_t nr_leaves, xen_cpuid_leaf_t *leaves,
+ uint32_t nr_msrs, xen_msr_entry_t *msrs,
+ uint32_t *err_leaf_p, uint32_t *err_subleaf_p,
+ uint32_t *err_msr_p)
+{
+ DECLARE_DOMCTL;
+ DECLARE_HYPERCALL_BOUNCE(leaves,
+ nr_leaves * sizeof(*leaves),
+ XC_HYPERCALL_BUFFER_BOUNCE_IN);
+ DECLARE_HYPERCALL_BOUNCE(msrs,
+ nr_msrs * sizeof(*msrs),
+ XC_HYPERCALL_BUFFER_BOUNCE_IN);
+ int ret;
+
+ if ( err_leaf_p )
+ *err_leaf_p = -1;
+ if ( err_subleaf_p )
+ *err_subleaf_p = -1;
+ if ( err_msr_p )
+ *err_msr_p = -1;
+
+ if ( xc_hypercall_bounce_pre(xch, leaves) )
+ return -1;
+
+ if ( xc_hypercall_bounce_pre(xch, msrs) )
+ return -1;
+
+ domctl.cmd = XEN_DOMCTL_set_cpu_policy;
+ domctl.domain = domid;
+ domctl.u.cpu_policy.nr_leaves = nr_leaves;
+ set_xen_guest_handle(domctl.u.cpu_policy.cpuid_policy, leaves);
+ domctl.u.cpu_policy.nr_msrs = nr_msrs;
+ set_xen_guest_handle(domctl.u.cpu_policy.msr_policy, msrs);
+ domctl.u.cpu_policy.err_leaf = -1;
+ domctl.u.cpu_policy.err_subleaf = -1;
+ domctl.u.cpu_policy.err_msr = -1;
+
+ ret = do_domctl(xch, &domctl);
+
+ xc_hypercall_bounce_post(xch, leaves);
+ xc_hypercall_bounce_post(xch, msrs);
+
+ if ( err_leaf_p )
+ *err_leaf_p = domctl.u.cpu_policy.err_leaf;
+ if ( err_subleaf_p )
+ *err_subleaf_p = domctl.u.cpu_policy.err_subleaf;
+ if ( err_msr_p )
+ *err_msr_p = domctl.u.cpu_policy.err_msr;
+
+ return ret;
+}
+
struct cpuid_domain_info
{
unsigned int vendor; /* X86_VENDOR_* */
return 0;
}
+static int update_domain_cpu_policy(struct domain *d,
+ xen_domctl_cpu_policy_t *xdpc)
+{
+ struct cpu_policy new = {};
+ const struct cpu_policy *sys = is_pv_domain(d)
+ ? &system_policies[XEN_SYSCTL_cpu_policy_pv_max]
+ : &system_policies[XEN_SYSCTL_cpu_policy_hvm_max];
+ struct cpu_policy_errors err = INIT_CPU_POLICY_ERRORS;
+ int ret = -ENOMEM;
+
+ /* Start by copying the domain's existing policies. */
+ if ( !(new.cpuid = xmemdup(d->arch.cpuid)) ||
+ !(new.msr = xmemdup(d->arch.msr)) )
+ goto out;
+
+ /* Merge the toolstack provided data. */
+ if ( (ret = x86_cpuid_copy_from_buffer(
+ new.cpuid, xdpc->cpuid_policy, xdpc->nr_leaves,
+ &err.leaf, &err.subleaf)) ||
+ (ret = x86_msr_copy_from_buffer(
+ new.msr, xdpc->msr_policy, xdpc->nr_msrs, &err.msr)) )
+ goto out;
+
+ /* Trim any newly-stale out-of-range leaves. */
+ x86_cpuid_policy_clear_out_of_range_leaves(new.cpuid);
+
+ /* Audit the combined dataset. */
+ ret = x86_cpu_policies_are_compatible(sys, &new, &err);
+ if ( ret )
+ goto out;
+
+ /*
+ * Audit was successful. Replace existing policies, leaving the old
+ * policies to be freed.
+ */
+ SWAP(new.cpuid, d->arch.cpuid);
+ SWAP(new.msr, d->arch.msr);
+
+ /* TODO: Drop when x86_cpu_policies_are_compatible() is completed. */
+ recalculate_cpuid_policy(d);
+
+ /* Recalculate relevant dom/vcpu state now the policy has changed. */
+ domain_cpu_policy_changed(d);
+
+ out:
+ /* Free whichever cpuid/msr structs are not installed in struct domain. */
+ xfree(new.cpuid);
+ xfree(new.msr);
+
+ if ( ret )
+ {
+ xdpc->err_leaf = err.leaf;
+ xdpc->err_subleaf = err.subleaf;
+ xdpc->err_msr = err.msr;
+ }
+
+ return ret;
+}
+
static int vcpu_set_vmce(struct vcpu *v,
const struct xen_domctl_ext_vcpucontext *evc)
{
copyback = true;
break;
+ case XEN_DOMCTL_set_cpu_policy:
+ if ( d == currd ) /* No domain_pause() */
+ {
+ ret = -EINVAL;
+ break;
+ }
+
+ domain_pause(d);
+
+ if ( d->creation_finished )
+ ret = -EEXIST; /* No changing once the domain is running. */
+ else
+ {
+ ret = update_domain_cpu_policy(d, &domctl->u.cpu_policy);
+ if ( ret ) /* Copy domctl->u.cpu_policy.err_* to guest. */
+ copyback = true;
+ }
+
+ domain_unpause(d);
+ break;
+
default:
ret = iommu_do_domctl(domctl, d, u_domctl);
break;
};
/*
- * XEN_DOMCTL_get_cpu_policy (x86 specific)
+ * XEN_DOMCTL_{get,set}_cpu_policy (x86 specific)
*
- * Query the CPUID and MSR policies for a specific domain.
+ * Query or set the CPUID and MSR policies for a specific domain.
*/
struct xen_domctl_cpu_policy {
uint32_t nr_leaves; /* IN/OUT: Number of leaves in/written to
* 'cpuid_policy'. */
uint32_t nr_msrs; /* IN/OUT: Number of MSRs in/written to
* 'msr_domain_policy' */
- XEN_GUEST_HANDLE_64(xen_cpuid_leaf_t) cpuid_policy; /* OUT */
- XEN_GUEST_HANDLE_64(xen_msr_entry_t) msr_policy; /* OUT */
+ XEN_GUEST_HANDLE_64(xen_cpuid_leaf_t) cpuid_policy; /* IN/OUT */
+ XEN_GUEST_HANDLE_64(xen_msr_entry_t) msr_policy; /* IN/OUT */
+
+ /*
+ * OUT, set_policy only. Written in some (but not all) error cases to
+ * identify the CPUID leaf/subleaf and/or MSR which auditing objects to.
+ */
+ uint32_t err_leaf, err_subleaf, err_msr;
};
typedef struct xen_domctl_cpu_policy xen_domctl_cpu_policy_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpu_policy_t);
/* #define XEN_DOMCTL_set_gnttab_limits 80 - Moved into XEN_DOMCTL_createdomain */
#define XEN_DOMCTL_vuart_op 81
#define XEN_DOMCTL_get_cpu_policy 82
+#define XEN_DOMCTL_set_cpu_policy 83
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
case XEN_DOMCTL_set_virq_handler:
return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__SET_VIRQ_HANDLER);
+ case XEN_DOMCTL_set_cpu_policy:
case XEN_DOMCTL_set_cpuid:
return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SET_CPUID);
# source = the domain making the hypercall
# target = the new target domain
set_as_target
+# XEN_DOMCTL_set_cpu_policy
# XEN_DOMCTL_set_cpuid
set_cpuid
# XEN_DOMCTL_gettscinfo