BUG();
if ( cpu_has_xsaves && is_hvm_vcpu(n) )
- set_msr_xss(n->arch.hvm.msr_xss);
+ set_msr_xss(n->arch.msrs->xss.raw);
}
vcpu_restore_fpu_nonlazy(n, false);
nd->arch.ctxt_switch->to(n);
case MSR_IA32_XSS:
if ( !d->arch.cpuid->xstate.xsaves )
goto gp_fault;
- *msr_content = v->arch.hvm.msr_xss;
+ *msr_content = v->arch.msrs->xss.raw;
break;
case MSR_K8_ENABLE_C1E:
/* No XSS features currently supported for guests. */
if ( !d->arch.cpuid->xstate.xsaves || msr_content != 0 )
goto gp_fault;
- v->arch.hvm.msr_xss = msr_content;
+ v->arch.msrs->xss.raw = msr_content;
break;
case MSR_AMD64_NB_CFG:
{
if ( cpu_has_xsaves && cpu_has_vmx_xsaves )
{
- ctxt->msr[ctxt->count].val = v->arch.hvm.msr_xss;
+ ctxt->msr[ctxt->count].val = v->arch.msrs->xss.raw;
if ( ctxt->msr[ctxt->count].val )
ctxt->msr[ctxt->count++].index = MSR_IA32_XSS;
}
{
case MSR_IA32_XSS:
if ( cpu_has_xsaves && cpu_has_vmx_xsaves )
- v->arch.hvm.msr_xss = ctxt->msr[i].val;
+ v->arch.msrs->xss.raw = ctxt->msr[i].val;
else
err = -ENXIO;
break;
struct hvm_vcpu_asid n1asid;
u64 msr_tsc_adjust;
- u64 msr_xss;
union {
struct vmx_vcpu vmx;
* values here may be stale in current context.
*/
uint32_t dr_mask[4];
+
+ /* 0x00000da0 - MSR_IA32_XSS */
+ struct {
+ uint64_t raw;
+ } xss;
};
void init_guest_msr_policy(void);