* condition where an NMI hits while we are midway though patching some
* instructions in the NMI path.
*/
-static int __init nmi_apply_alternatives(const struct cpu_user_regs *regs,
- int cpu)
+static int __init cf_check nmi_apply_alternatives(
+ const struct cpu_user_regs *regs, int cpu)
{
/*
* More than one NMI may occur between the two set_nmi_callback() below.
return ret;
}
-static int microcode_nmi_callback(const struct cpu_user_regs *regs, int cpu)
+static int cf_check microcode_nmi_callback(
+ const struct cpu_user_regs *regs, int cpu)
{
unsigned int primary = cpumask_first(this_cpu(cpu_sibling_mask));
int ret;
static DEFINE_PER_CPU_READ_MOSTLY(bool, crash_save_done);
/* This becomes the NMI handler for non-crashing CPUs, when Xen is crashing. */
-static int noreturn do_nmi_crash(const struct cpu_user_regs *regs, int cpu)
+static int noreturn cf_check do_nmi_crash(
+ const struct cpu_user_regs *regs, int cpu)
{
stac();
* Note that because of this NOP code the do_nmi is not safely patchable.
* Also if we do receive 'real' NMIs we have lost them.
*/
-static int mask_nmi_callback(const struct cpu_user_regs *regs, int cpu)
+static int cf_check mask_nmi_callback(const struct cpu_user_regs *regs, int cpu)
{
/* TODO: Handle missing NMI/MCE.*/
return 1;
return v;
}
-static int nmi_callback(const struct cpu_user_regs *regs, int cpu)
+static int cf_check nmi_callback(const struct cpu_user_regs *regs, int cpu)
{
int xen_mode, ovf;
static bool opt_show_all;
boolean_param("async-show-all", opt_show_all);
-static int nmi_show_execution_state(const struct cpu_user_regs *regs, int cpu)
+static int cf_check nmi_show_execution_state(
+ const struct cpu_user_regs *regs, int cpu)
{
if ( !cpumask_test_cpu(cpu, &show_state_mask) )
return 0;