ci = get_cpu_info();
/* Avoid NMI/#MC using unsafe MSRs until we've reloaded microcode. */
- ci->spec_ctrl_flags &= ~SCF_IST_MASK;
+ ci->scf &= ~SCF_IST_MASK;
ACPI_FLUSH_CPU_CACHE();
panic("Missing previously available feature(s)\n");
/* Re-enabled default NMI/#MC use of MSRs now microcode is loaded. */
- ci->spec_ctrl_flags |= (default_spec_ctrl_flags & SCF_IST_MASK);
+ ci->scf |= (default_scf & SCF_IST_MASK);
if ( boot_cpu_has(X86_FEATURE_IBRSB) || boot_cpu_has(X86_FEATURE_IBRS) )
{
}
}
- /* Update the top-of-stack block with the new spec_ctrl settings. */
- info->spec_ctrl_flags =
- (info->spec_ctrl_flags & ~SCF_DOM_MASK) |
- (nextd->arch.spec_ctrl_flags & SCF_DOM_MASK);
+ /* Update the top-of-stack block with the new speculation settings. */
+ info->scf =
+ (info->scf & ~SCF_DOM_MASK) |
+ (nextd->arch.scf & SCF_DOM_MASK);
}
sched_context_switched(prev, next);
/* SPEC_CTRL_ENTRY_FROM_SVM Req: %rsp=regs/cpuinfo, %rdx=0 Clob: acd */
.macro svm_vmexit_cond_ibpb
- testb $SCF_entry_ibpb, CPUINFO_spec_ctrl_flags(%rsp)
+ testb $SCF_entry_ibpb, CPUINFO_scf(%rsp)
jz .L_skip_ibpb
mov $MSR_PRED_CMD, %ecx
BUILD_BUG_ON(SCF_verw & ~0xff)
movzbl VCPU_vmx_launched(%rbx), %ecx
shl $31, %ecx
- movzbl CPUINFO_spec_ctrl_flags(%rsp), %eax
+ movzbl CPUINFO_scf(%rsp), %eax
and $SCF_verw, %eax
or %eax, %ecx
rc = vmx_add_msr(v, MSR_FLUSH_CMD, FLUSH_CMD_L1D,
VMX_MSR_GUEST_LOADONLY);
- if ( !rc && (d->arch.spec_ctrl_flags & SCF_entry_ibpb) )
+ if ( !rc && (d->arch.scf & SCF_entry_ibpb) )
rc = vmx_add_msr(v, MSR_PRED_CMD, PRED_CMD_IBPB,
VMX_MSR_HOST);
if ( bsp_delay_spec_ctrl )
{
- info->spec_ctrl_flags &= ~SCF_use_shadow;
+ info->scf &= ~SCF_use_shadow;
barrier();
wrmsrl(MSR_SPEC_CTRL, default_xen_spec_ctrl);
info->last_spec_ctrl = default_xen_spec_ctrl;
bool __initdata bsp_delay_spec_ctrl;
uint8_t __read_mostly default_xen_spec_ctrl;
-uint8_t __read_mostly default_spec_ctrl_flags;
+uint8_t __read_mostly default_scf;
paddr_t __read_mostly l1tf_addr_mask, __read_mostly l1tf_safe_maddr;
static bool __initdata cpu_has_bug_l1tf;
* NMI/#MC, so can't interrupt Xen ahead of having already flushed the
* BTB.
*/
- default_spec_ctrl_flags |= SCF_ist_ibpb;
+ default_scf |= SCF_ist_ibpb;
}
if ( opt_ibpb_entry_hvm )
setup_force_cpu_cap(X86_FEATURE_IBPB_ENTRY_HVM);
bool ibpb = ((pv ? opt_ibpb_entry_pv : opt_ibpb_entry_hvm) &&
(d->domain_id != 0 || opt_ibpb_entry_dom0));
- d->arch.spec_ctrl_flags =
+ d->arch.scf =
(verw ? SCF_verw : 0) |
(ibpb ? SCF_entry_ibpb : 0) |
0;
{
if ( opt_msr_sc_pv )
{
- default_spec_ctrl_flags |= SCF_ist_sc_msr;
+ default_scf |= SCF_ist_sc_msr;
setup_force_cpu_cap(X86_FEATURE_SC_MSR_PV);
}
* Xen's value is not restored atomically. An early NMI hitting
* the VMExit path needs to restore Xen's value for safety.
*/
- default_spec_ctrl_flags |= SCF_ist_sc_msr;
+ default_scf |= SCF_ist_sc_msr;
setup_force_cpu_cap(X86_FEATURE_SC_MSR_HVM);
}
}
if ( opt_rsb_pv )
{
setup_force_cpu_cap(X86_FEATURE_SC_RSB_PV);
- default_spec_ctrl_flags |= SCF_ist_rsb;
+ default_scf |= SCF_ist_rsb;
}
/*
* possible rogue RSB speculation.
*/
if ( !cpu_has_svm )
- default_spec_ctrl_flags |= SCF_ist_rsb;
+ default_scf |= SCF_ist_rsb;
}
srso_calculations(hw_smt_enabled);
if ( opt_eager_fpu == -1 )
opt_eager_fpu = should_use_eager_fpu();
- /* (Re)init BSP state now that default_spec_ctrl_flags has been calculated. */
+ /* (Re)init BSP state now that default_scf has been calculated. */
init_shadow_spec_ctrl_state();
/*
{
info->shadow_spec_ctrl = 0;
barrier();
- info->spec_ctrl_flags |= SCF_use_shadow;
+ info->scf |= SCF_use_shadow;
barrier();
}
OFFSET(CPUINFO_shadow_spec_ctrl, struct cpu_info, shadow_spec_ctrl);
OFFSET(CPUINFO_xen_spec_ctrl, struct cpu_info, xen_spec_ctrl);
OFFSET(CPUINFO_last_spec_ctrl, struct cpu_info, last_spec_ctrl);
- OFFSET(CPUINFO_spec_ctrl_flags, struct cpu_info, spec_ctrl_flags);
+ OFFSET(CPUINFO_scf, struct cpu_info, scf);
OFFSET(CPUINFO_root_pgt_changed, struct cpu_info, root_pgt_changed);
OFFSET(CPUINFO_use_pv_cr3, struct cpu_info, use_pv_cr3);
DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
/* Account for ev/ec having already been popped off the stack. */
SPEC_CTRL_COND_VERW \
- scf=STK_REL(CPUINFO_spec_ctrl_flags, CPUINFO_rip), \
- sel=STK_REL(CPUINFO_verw_sel, CPUINFO_rip)
+ scf=STK_REL(CPUINFO_scf, CPUINFO_rip), \
+ sel=STK_REL(CPUINFO_verw_sel, CPUINFO_rip)
.Lft0: iretq
_ASM_PRE_EXTABLE(.Lft0, handle_exception)
/*
* When the CPU pushed this exception frame, it zero-extended eflags.
* For an IST exit, SPEC_CTRL_EXIT_TO_XEN stashed shadow copies of
- * spec_ctrl_flags and ver_sel above eflags, as we can't use any GPRs,
+ * scf and ver_sel above eflags, as we can't use any GPRs,
* and we're at a random place on the stack, not in a CPUFINFO block.
*
* Account for ev/ec having already been popped off the stack.
unsigned int shadow_spec_ctrl;
uint8_t xen_spec_ctrl;
uint8_t last_spec_ctrl;
- uint8_t spec_ctrl_flags;
+ uint8_t scf; /* SCF_* */
/*
* The following field controls copying of the L4 page table of 64-bit
uint32_t pci_cf8;
uint8_t cmos_idx;
- uint8_t spec_ctrl_flags; /* See SCF_DOM_MASK */
+ uint8_t scf; /* See SCF_DOM_MASK */
union {
struct pv_domain pv;
#define __X86_SPEC_CTRL_H__
/*
- * Encoding of:
- * cpuinfo.spec_ctrl_flags
- * default_spec_ctrl_flags
- * domain.spec_ctrl_flags
+ * Encoding of Xen's speculation control flags in:
+ * cpuinfo.scf
+ * default_scf
+ * domain.scf
*
* Live settings are in the top-of-stack block, because they need to be
* accessable when XPTI is active. Some settings are fixed from boot, some
extern bool bsp_delay_spec_ctrl;
extern uint8_t default_xen_spec_ctrl;
-extern uint8_t default_spec_ctrl_flags;
+extern uint8_t default_scf;
extern int8_t opt_xpti_hwdom, opt_xpti_domu;
info->shadow_spec_ctrl = 0;
info->xen_spec_ctrl = default_xen_spec_ctrl;
- info->spec_ctrl_flags = default_spec_ctrl_flags;
+ info->scf = default_scf;
/*
* For least latency, the VERW selector should be a writeable data
*/
info->shadow_spec_ctrl = val;
barrier();
- info->spec_ctrl_flags |= SCF_use_shadow;
+ info->scf |= SCF_use_shadow;
barrier();
alternative_input("", "wrmsr", X86_FEATURE_SC_MSR_IDLE,
"a" (val), "c" (MSR_SPEC_CTRL), "d" (0));
* Disable shadowing before updating the MSR. There are no SMP issues
* here; only local processor ordering concerns.
*/
- info->spec_ctrl_flags &= ~SCF_use_shadow;
+ info->scf &= ~SCF_use_shadow;
barrier();
alternative_input("", "wrmsr", X86_FEATURE_SC_MSR_IDLE,
"a" (val), "c" (MSR_SPEC_CTRL), "d" (0));
* shadowing logic.
*
* Factor 2 is harder. We maintain a shadow_spec_ctrl value, and a use_shadow
- * boolean in the per cpu spec_ctrl_flags. The synchronous use is:
+ * boolean in the per cpu scf. The synchronous use is:
*
* 1) Store guest value in shadow_spec_ctrl
* 2) Set the use_shadow boolean
* interrupting Xen.
*/
.if \maybexen
- testb $SCF_entry_ibpb, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14)
+ testb $SCF_entry_ibpb, STACK_CPUINFO_FIELD(scf)(%r14)
jz .L\@_skip
testb $3, UREGS_cs(%rsp)
.else
- testb $SCF_entry_ibpb, CPUINFO_spec_ctrl_flags(%rsp)
+ testb $SCF_entry_ibpb, CPUINFO_scf(%rsp)
.endif
jz .L\@_skip
#define STK_REL(field, top_of_stk) ((field) - (top_of_stk))
.macro SPEC_CTRL_COND_VERW \
- scf=STK_REL(CPUINFO_spec_ctrl_flags, CPUINFO_error_code), \
- sel=STK_REL(CPUINFO_verw_sel, CPUINFO_error_code)
+ scf=STK_REL(CPUINFO_scf, CPUINFO_error_code), \
+ sel=STK_REL(CPUINFO_verw_sel, CPUINFO_error_code)
/*
* Requires \scf and \sel as %rsp-relative expressions
* Clobbers eflags
testb $3, UREGS_cs(%rsp)
setnz %al
not %eax
- and %al, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14)
+ and %al, STACK_CPUINFO_FIELD(scf)(%r14)
movzbl STACK_CPUINFO_FIELD(xen_spec_ctrl)(%r14), %eax
.else
- andb $~SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp)
+ andb $~SCF_use_shadow, CPUINFO_scf(%rsp)
movzbl CPUINFO_xen_spec_ctrl(%rsp), %eax
.endif
mov %eax, CPUINFO_shadow_spec_ctrl(%rsp)
/* Set SPEC_CTRL shadowing *before* loading the guest value. */
- orb $SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp)
+ orb $SCF_use_shadow, CPUINFO_scf(%rsp)
mov $MSR_SPEC_CTRL, %ecx
xor %edx, %edx
* DO_SPEC_CTRL_ENTRY maybexen=1
* but with conditionals rather than alternatives.
*/
- movzbl STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14), %ebx
+ movzbl STACK_CPUINFO_FIELD(scf)(%r14), %ebx
test $SCF_ist_ibpb, %bl
jz .L\@_skip_ibpb
testb $3, UREGS_cs(%rsp)
setnz %al
not %eax
- and %al, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14)
+ and %al, STACK_CPUINFO_FIELD(scf)(%r14)
/* Load Xen's intended value. */
mov $MSR_SPEC_CTRL, %ecx
* Requires %r12=ist_exit, %r14=stack_end, %rsp=regs
* Clobbers %rax, %rbx, %rcx, %rdx
*/
- movzbl STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14), %ebx
+ movzbl STACK_CPUINFO_FIELD(scf)(%r14), %ebx
testb $SCF_ist_sc_msr, %bl
jz .L\@_skip_sc_msr