mov %rsp, %rdi
call svm_vmenter_helper
- mov VCPU_arch_msrs(%rbx), %rax
- mov VCPUMSR_spec_ctrl_raw(%rax), %eax
+ clgi
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
- /* SPEC_CTRL_EXIT_TO_SVM (nothing currently) */
+ /* SPEC_CTRL_EXIT_TO_SVM Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
+ .macro svm_vmentry_spec_ctrl
+ mov VCPU_arch_msrs(%rbx), %rax
+ movzbl CPUINFO_last_spec_ctrl(%rsp), %edx
+ mov VCPUMSR_spec_ctrl_raw(%rax), %eax
+ cmp %edx, %eax
+ je 1f /* Skip write if value is correct. */
+ mov $MSR_SPEC_CTRL, %ecx
+ xor %edx, %edx
+ wrmsr
+ mov %al, CPUINFO_last_spec_ctrl(%rsp)
+1: /* No Spectre v1 concerns. Execution will hit VMRUN imminently. */
+ .endm
+ ALTERNATIVE "", svm_vmentry_spec_ctrl, X86_FEATURE_SC_MSR_HVM
pop %r15
pop %r14
pop %rsi
pop %rdi
- clgi
sti
vmrun
GET_CURRENT(bx)
- /* SPEC_CTRL_ENTRY_FROM_SVM Req: b=curr %rsp=regs/cpuinfo, Clob: ac */
+ /* SPEC_CTRL_ENTRY_FROM_SVM Req: %rsp=regs/cpuinfo Clob: acd */
ALTERNATIVE "", DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_HVM
+
+ .macro svm_vmexit_spec_ctrl
+ /*
+ * Write to MSR_SPEC_CTRL unconditionally, for the RAS[:32]
+ * flushing side effect.
+ */
+ mov $MSR_SPEC_CTRL, %ecx
+ movzbl CPUINFO_xen_spec_ctrl(%rsp), %eax
+ xor %edx, %edx
+ wrmsr
+ mov %al, CPUINFO_last_spec_ctrl(%rsp)
+ .endm
+ ALTERNATIVE "", svm_vmexit_spec_ctrl, X86_FEATURE_SC_MSR_HVM
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
stgi
*
* For VT-x guests, the guest value is held in the MSR guest load/save
* list.
+ *
+ * For SVM, the guest value lives in the VMCB, and hardware saves/restores
+ * the host value automatically. However, guests run with the OR of the
+ * host and guest value, which allows Xen to set protections behind the
+ * guest's back.
+ *
+ * We must clear/restore Xen's value before/after VMRUN to avoid unduly
+ * influencing the guest. In order to support "behind the guest's back"
+ * protections, we load this value (commonly 0) before VMRUN.
*/
struct {
uint32_t raw;
* - On VMX by using MSR load/save lists to have vmentry/exit atomically
* load/save the guest value. Xen's value is loaded in regular code, and
* there is no need to use the shadow logic (below).
+ * - On SVM by altering MSR_SPEC_CTRL inside the CLGI/STGI region. This
+ * makes the changes atomic with respect to NMIs/etc, so no need for
+ * shadowing logic.
*
* Factor 2 is harder. We maintain a shadow_spec_ctrl value, and a use_shadow
* boolean in the per cpu spec_ctrl_flags. The synchronous use is:
OFFSET(CPUINFO_pv_cr3, struct cpu_info, pv_cr3);
OFFSET(CPUINFO_shadow_spec_ctrl, struct cpu_info, shadow_spec_ctrl);
OFFSET(CPUINFO_xen_spec_ctrl, struct cpu_info, xen_spec_ctrl);
+ OFFSET(CPUINFO_last_spec_ctrl, struct cpu_info, last_spec_ctrl);
OFFSET(CPUINFO_spec_ctrl_flags, struct cpu_info, spec_ctrl_flags);
OFFSET(CPUINFO_root_pgt_changed, struct cpu_info, root_pgt_changed);
OFFSET(CPUINFO_use_pv_cr3, struct cpu_info, use_pv_cr3);