return;
}
+ /*
+ * AMD/Hygon CPUs to date (June 2022) don't flush the the RAS. Future
+ * CPUs are expected to enumerate IBPB_RET when this has been fixed.
+ * Until then, cover the difference with the software sequence.
+ */
+ if ( boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_IBPB_RET) )
+ setup_force_cpu_cap(X86_BUG_IBPB_NO_RET);
+
/*
* IBPB-on-entry mitigations for Branch Type Confusion.
*
#define X86_BUG_FPU_PTRS X86_BUG( 0) /* (F)X{SAVE,RSTOR} doesn't save/restore FOP/FIP/FDP. */
#define X86_BUG_CLFLUSH_MFENCE X86_BUG( 2) /* MFENCE needed to serialise CLFLUSH */
+#define X86_BUG_IBPB_NO_RET X86_BUG( 3) /* IBPB doesn't flush the RSB/RAS */
/* Total number of capability words, inc synth and bug words. */
#define NCAPINTS (FSCAPINTS + X86_NR_SYNTH + X86_NR_BUG) /* N 32-bit words worth of info */
void init_speculation_mitigations(void);
void spec_ctrl_init_domain(struct domain *d);
+/*
+ * Switch to a new guest prediction context.
+ *
+ * This flushes all indirect branch predictors (BTB, RSB/RAS), so guest code
+ * which has previously run on this CPU can't attack subsequent guest code.
+ *
+ * As this flushes the RSB/RAS, it destroys the predictions of the calling
+ * context. For best performace, arrange for this to be used when we're going
+ * to jump out of the current context, e.g. with reset_stack_and_jump().
+ *
+ * For hardware which mis-implements IBPB, fix up by flushing the RSB/RAS
+ * manually.
+ */
+static always_inline void spec_ctrl_new_guest_context(void)
+{
+ wrmsrl(MSR_PRED_CMD, PRED_CMD_IBPB);
+
+ /* (ab)use alternative_input() to specify clobbers. */
+ alternative_input("", "DO_OVERWRITE_RSB", X86_BUG_IBPB_NO_RET,
+ : "rax", "rcx");
+}
+
extern int8_t opt_ibpb_ctxt_switch;
extern bool opt_ssbd;
extern int8_t opt_eager_fpu;