It turns out that Xen has never enforced that a domain remain within the
xstate features advertised in CPUID.
The check of new_bv against xfeature_mask ensures that a domain stays within
the set of features that Xen has enabled in hardware (and therefore isn't a
security problem), but this does means that attempts to level a guest for
migration safety might not be effective if the guest ignores CPUID.
Check the CPUID policy in validate_xstate() (for incoming migration) and in
handle_xsetbv() (for guest XSETBV instructions). This subsumes the PKRU check
for PV guests in handle_xsetbv() (and also demonstrates that I should have
spotted this problem while reviewing c/s
fbf9971241f).
For migration, this is correct despite the current (mis)ordering of data
because d->arch.cpuid is the applicable max policy.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
if ( _xcr0_accum )
{
if ( evc->size >= PV_XSAVE_HDR_SIZE + XSTATE_AREA_MIN_SIZE )
- ret = validate_xstate(_xcr0, _xcr0_accum,
+ ret = validate_xstate(d, _xcr0, _xcr0_accum,
&_xsave_area->xsave_hdr);
}
else if ( !_xcr0 )
ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur];
h->cur += desc->length;
- err = validate_xstate(ctxt->xcr0, ctxt->xcr0_accum,
+ err = validate_xstate(d, ctxt->xcr0, ctxt->xcr0_accum,
(const void *)&ctxt->save_area.xsave_hdr);
if ( err )
{
return !(xcr0 & X86_XCR0_BNDREGS) == !(xcr0 & X86_XCR0_BNDCSR);
}
-int validate_xstate(u64 xcr0, u64 xcr0_accum, const struct xsave_hdr *hdr)
+int validate_xstate(const struct domain *d, uint64_t xcr0, uint64_t xcr0_accum,
+ const struct xsave_hdr *hdr)
{
+ const struct cpuid_policy *cp = d->arch.cpuid;
+ uint64_t xcr0_max =
+ ((uint64_t)cp->xstate.xcr0_high << 32) | cp->xstate.xcr0_low;
unsigned int i;
if ( (hdr->xstate_bv & ~xcr0_accum) ||
(xcr0 & ~xcr0_accum) ||
+ (xcr0_accum & ~xcr0_max) ||
!valid_xcr0(xcr0) ||
!valid_xcr0(xcr0_accum) )
return -EINVAL;
int handle_xsetbv(u32 index, u64 new_bv)
{
struct vcpu *curr = current;
+ const struct cpuid_policy *cp = curr->domain->arch.cpuid;
+ uint64_t xcr0_max =
+ ((uint64_t)cp->xstate.xcr0_high << 32) | cp->xstate.xcr0_low;
u64 mask;
if ( index != XCR_XFEATURE_ENABLED_MASK )
return -EOPNOTSUPP;
- if ( (new_bv & ~xfeature_mask) || !valid_xcr0(new_bv) )
+ if ( (new_bv & ~xcr0_max) ||
+ (new_bv & ~xfeature_mask) || !valid_xcr0(new_bv) )
return -EINVAL;
- /* XCR0.PKRU is disabled on PV mode. */
- if ( is_pv_vcpu(curr) && (new_bv & X86_XCR0_PKRU) )
- return -EOPNOTSUPP;
-
if ( !set_xcr0(new_bv) )
return -EFAULT;
void xrstor(struct vcpu *v, uint64_t mask);
void xstate_set_init(uint64_t mask);
bool xsave_enabled(const struct vcpu *v);
-int __must_check validate_xstate(u64 xcr0, u64 xcr0_accum,
- const struct xsave_hdr *);
+int __must_check validate_xstate(const struct domain *d,
+ uint64_t xcr0, uint64_t xcr0_accum,
+ const struct xsave_hdr *hdr);
int __must_check handle_xsetbv(u32 index, u64 new_bv);
void expand_xsave_states(struct vcpu *v, void *dest, unsigned int size);
void compress_xsave_states(struct vcpu *v, const void *src, unsigned int size);