ctxt.ldtr_arbytes = seg.attr.bytes;
if ( v->fpu_initialised )
+ {
memcpy(ctxt.fpu_regs, v->arch.fpu_ctxt, sizeof(ctxt.fpu_regs));
- else
- memset(ctxt.fpu_regs, 0, sizeof(ctxt.fpu_regs));
+ ctxt.flags = XEN_X86_FPU_INITIALISED;
+ }
+ else
+ {
+ memset(ctxt.fpu_regs, 0, sizeof(ctxt.fpu_regs));
+ ctxt.flags = 0;
+ }
ctxt.rax = v->arch.user_regs.eax;
ctxt.rbx = v->arch.user_regs.ebx;
return -EINVAL;
}
- if ( hvm_load_entry(CPU, h, &ctxt) != 0 )
+ if ( hvm_load_entry_zeroextend(CPU, h, &ctxt) != 0 )
return -EINVAL;
/* Sanity check some control registers. */
return -EINVAL;
}
+ if ( (ctxt.flags & ~XEN_X86_FPU_INITIALISED) != 0 )
+ {
+ gprintk(XENLOG_ERR, "bad flags value in CPU context: %#x\n",
+ ctxt.flags);
+ return -EINVAL;
+ }
+
/* Older Xen versions used to save the segment arbytes directly
* from the VMCS on Intel hosts. Detect this and rearrange them
* into the struct segment_register format. */
seg.attr.bytes = ctxt.ldtr_arbytes;
hvm_set_segment_register(v, x86_seg_ldtr, &seg);
- /* In case xsave-absent save file is restored on a xsave-capable host */
- if ( cpu_has_xsave && !xsave_enabled(v) )
+ v->fpu_initialised = !!(ctxt.flags & XEN_X86_FPU_INITIALISED);
+ if ( v->fpu_initialised )
{
- struct xsave_struct *xsave_area = v->arch.xsave_area;
+ memcpy(v->arch.fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
+ /* In case xsave-absent save file is restored on a xsave-capable host */
+ if ( cpu_has_xsave && !xsave_enabled(v) )
+ {
+ struct xsave_struct *xsave_area = v->arch.xsave_area;
- memcpy(v->arch.xsave_area, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
- xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
- if ( cpu_has_xsaves || cpu_has_xsavec )
- xsave_area->xsave_hdr.xcomp_bv = XSTATE_FP_SSE |
- XSTATE_COMPACTION_ENABLED;
+ xsave_area->xsave_hdr.xstate_bv = XSTATE_FP_SSE;
+ if ( cpu_has_xsaves || cpu_has_xsavec )
+ xsave_area->xsave_hdr.xcomp_bv = XSTATE_FP_SSE |
+ XSTATE_COMPACTION_ENABLED;
+ }
}
- else
- memcpy(v->arch.fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
v->arch.user_regs.eax = ctxt.rax;
v->arch.user_regs.ebx = ctxt.rbx;
v->arch.debugreg[7] = ctxt.dr7;
v->arch.vgc_flags = VGCF_online;
- v->fpu_initialised = 1;
/* Auxiliary processors should be woken immediately. */
v->is_initialised = 1;
/*
* Processor
*
- * Compat: Pre-3.4 didn't have msr_tsc_aux
+ * Compat:
+ * - Pre-3.4 didn't have msr_tsc_aux
+ * - Pre-4.7 didn't have fpu_initialised
*/
struct hvm_hw_cpu {
};
/* error code for pending event */
uint32_t error_code;
+
+#define _XEN_X86_FPU_INITIALISED 0
+#define XEN_X86_FPU_INITIALISED (1U<<_XEN_X86_FPU_INITIALISED)
+ uint32_t flags;
};
struct hvm_hw_cpu_compat {
struct hvm_hw_cpu_compat cmp;
} *ucpu = (union hvm_hw_cpu_union *)h;
- /* If we copy from the end backwards, we should
- * be able to do the modification in-place */
- ucpu->nat.error_code = ucpu->cmp.error_code;
- ucpu->nat.pending_event = ucpu->cmp.pending_event;
- ucpu->nat.tsc = ucpu->cmp.tsc;
- ucpu->nat.msr_tsc_aux = 0;
+ if ( size == sizeof(struct hvm_hw_cpu_compat) )
+ {
+ /*
+ * If we copy from the end backwards, we should
+ * be able to do the modification in-place.
+ */
+ ucpu->nat.error_code = ucpu->cmp.error_code;
+ ucpu->nat.pending_event = ucpu->cmp.pending_event;
+ ucpu->nat.tsc = ucpu->cmp.tsc;
+ ucpu->nat.msr_tsc_aux = 0;
+ }
+ /* Mimic the old behaviour by unconditionally setting fpu_initialised. */
+ ucpu->nat.flags = XEN_X86_FPU_INITIALISED;
return 0;
}