uint64_t cc7;
};
-static void do_get_hw_residencies(void *arg)
+static void cf_check do_get_hw_residencies(void *arg)
{
struct cpuinfo_x86 *c = ¤t_cpu_data;
struct hw_residencies *hw_res = arg;
u32 val;
};
-static void do_drv_read(void *drvcmd)
+static void cf_check do_drv_read(void *drvcmd)
{
struct drv_cmd *cmd;
}
}
-static void do_drv_write(void *drvcmd)
+static void cf_check do_drv_write(void *drvcmd)
{
struct drv_cmd *cmd;
uint64_t msr_content;
static DEFINE_PER_CPU(struct perf_pair, gov_perf_pair);
static DEFINE_PER_CPU(struct perf_pair, usr_perf_pair);
-static void read_measured_perf_ctrs(void *_readin)
+static void cf_check read_measured_perf_ctrs(void *_readin)
{
struct perf_pair *readin = _readin;
return extract_freq(get_cur_val(cpumask_of(cpu)), data);
}
-static void feature_detect(void *info)
+static void cf_check feature_detect(void *info)
{
struct cpufreq_policy *policy = info;
unsigned int eax;
#define ARCH_CPU_FLAG_RESUME 1
-static void transition_pstate(void *pstate)
+static void cf_check transition_pstate(void *pstate)
{
wrmsrl(MSR_PSTATE_CTRL, *(unsigned int *)pstate);
}
-static void update_cpb(void *data)
+static void cf_check update_cpb(void *data)
{
struct cpufreq_policy *policy = data;
u32 max_hw_pstate;
};
-static void get_cpu_data(void *arg)
+static void cf_check get_cpu_data(void *arg)
{
struct amd_cpu_data *data = arg;
struct processor_performance *perf = data->perf;
return INVALID_ACPIID;
}
-static void get_mwait_ecx(void *info)
+static void cf_check get_mwait_ecx(void *info)
{
*(u32 *)info = cpuid_ecx(CPUID_MWAIT_LEAF);
}
}
}
-static void disable_c1e(void *unused)
+static void cf_check disable_c1e(void *unused)
{
uint64_t msr_content;
* Collects information of correctable errors and notifies
* Dom0 via an event.
*/
-static void mce_amd_checkregs(void *info)
+static void cf_check mce_amd_checkregs(void *info)
{
mctelem_cookie_t mctc;
struct mca_summary bs;
} while ( 1 );
}
-static void do_mc_get_cpu_info(void *v)
+static void cf_check do_mc_get_cpu_info(void *v)
{
int cpu = smp_processor_id();
int cindex, cpn;
wrmsrl(MSR_K8_HWCR, hwcr);
}
-static void x86_mc_msrinject(void *data)
+static void cf_check x86_mc_msrinject(void *data)
{
struct xen_mc_msrinject *mci = data;
struct mcinfo_msr *msr;
}
/*ARGSUSED*/
-static void x86_mc_mceinject(void *data)
+static void cf_check x86_mc_mceinject(void *data)
{
printk("Simulating #MC on cpu %d\n", smp_processor_id());
__asm__ __volatile__("int $0x12");
cmci_discover();
}
-static void __cpu_mcheck_distribute_cmci(void *unused)
+static void cf_check __cpu_mcheck_distribute_cmci(void *unused)
{
cmci_discover();
}
static int adjust = 0;
static int variable_period = 1;
-static void mce_checkregs (void *info)
+static void cf_check mce_checkregs(void *info)
{
mctelem_cookie_t mctc;
struct mca_summary bs;
return ret;
}
-static int do_microcode_update(void *patch)
+static int cf_check do_microcode_update(void *patch)
{
unsigned int cpu = smp_processor_id();
int ret;
return false;
}
-void mtrr_save_fixed_ranges(void *info)
+void cf_check mtrr_save_fixed_ranges(void *info)
{
get_fixed_ranges(mtrr_state.fixed_ranges);
}
*/
int hold_mtrr_updates_on_aps;
-static void ipi_handler(void *info)
+static void cf_check ipi_handler(void *info)
/* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
[RETURNS] Nothing.
*/
cpuidle_current_governor->reflect(power);
}
-static void auto_demotion_disable(void *dummy)
+static void cf_check auto_demotion_disable(void *dummy)
{
u64 msr_bits;
wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
}
-static void byt_auto_demotion_disable(void *dummy)
+static void cf_check byt_auto_demotion_disable(void *dummy)
{
wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
}
-static void c1e_promotion_disable(void *dummy)
+static void cf_check c1e_promotion_disable(void *dummy)
{
u64 msr_bits;
#endif
}
-static void vpmu_save_force(void *arg)
+static void cf_check vpmu_save_force(void *arg)
{
struct vcpu *v = arg;
struct vpmu_struct *vpmu = vcpu_vpmu(v);
put_vpmu(v);
}
-static void vpmu_clear_last(void *arg)
+static void cf_check vpmu_clear_last(void *arg)
{
if ( this_cpu(last_vcpu) == arg )
this_cpu(last_vcpu) = NULL;
return rangeset_remove_range(mem, mfn_x(mfn), mfn_x(mfn));
}
-static void ap_resume(void *unused)
+static void cf_check ap_resume(void *unused)
{
BUG_ON(map_vcpuinfo());
BUG_ON(init_evtchn());
alternative_vcall(hvm_funcs.nhvm_vcpu_destroy, v);
}
-static void
-nestedhvm_flushtlb_ipi(void *info)
+static void cf_check nestedhvm_flushtlb_ipi(void *info)
{
struct vcpu *v = current;
struct domain *d = info;
free_domheap_page(maddr_to_page(pa));
}
-static void __vmx_clear_vmcs(void *info)
+static void cf_check __vmx_clear_vmcs(void *info)
{
struct vcpu *v = info;
struct vmx_vcpu *vmx = &v->arch.hvm.vmx;
};
extern struct mtrr_state mtrr_state;
-extern void mtrr_save_fixed_ranges(void *);
+extern void cf_check mtrr_save_fixed_ranges(void *);
extern void mtrr_save_state(void);
extern int mtrr_add(unsigned long base, unsigned long size,
unsigned int type, char increment);
}
}
-static void set_eoi_ready(void *data);
+static void cf_check set_eoi_ready(void *data);
static void cf_check irq_guest_eoi_timer_fn(void *data)
{
}
/* Mark specified IRQ as ready-for-EOI (if it really is) and attempt to EOI. */
-static void set_eoi_ready(void *data)
+static void cf_check set_eoi_ready(void *data)
{
struct irq_desc *desc = data;
(P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
-static void __init wait_for_nmis(void *p)
+static void __init cf_check wait_for_nmis(void *p)
{
unsigned int start_count = this_cpu(nmi_count);
unsigned long ticks = 10 * 1000 * cpu_khz / nmi_hz;
}
-static void nmi_save_registers(void * dummy)
+static void cf_check nmi_save_registers(void *dummy)
{
int cpu = smp_processor_id();
struct op_msrs * msrs = &cpu_msrs[cpu];
}
-static void nmi_cpu_setup(void * dummy)
+static void cf_check nmi_cpu_setup(void *dummy)
{
int cpu = smp_processor_id();
struct op_msrs * msrs = &cpu_msrs[cpu];
}
-static void nmi_cpu_shutdown(void * dummy)
+static void cf_check nmi_cpu_shutdown(void *dummy)
{
int cpu = smp_processor_id();
struct op_msrs * msrs = &cpu_msrs[cpu];
}
-static void nmi_cpu_start(void * dummy)
+static void cf_check nmi_cpu_start(void *dummy)
{
int cpu = smp_processor_id();
struct op_msrs const * msrs = &cpu_msrs[cpu];
}
-static void nmi_cpu_stop(void * dummy)
+static void cf_check nmi_cpu_stop(void *dummy)
{
unsigned int v;
int cpu = smp_processor_id();
#define APIC_EILVT_MSG_NMI 0x4
#define APIC_EILVT_LVTOFF_IBS 1
#define APIC_EILVTn(n) (0x500 + 0x10 * n)
-static inline void __init init_ibs_nmi_per_cpu(void *arg)
+static inline void __init cf_check init_ibs_nmi_per_cpu(void *arg)
{
unsigned long reg;
long cf_check cpu_frequency_change_helper(void *);
void check_resource_access(struct resource_access *);
-void resource_access(void *);
+void cf_check resource_access(void *);
#ifndef COMPAT
typedef long ret_t;
ra->nr_done = i;
}
-void resource_access(void *info)
+void cf_check resource_access(void *info)
{
struct resource_access *ra = info;
unsigned int i;
const uint32_t *val;
};
-static void do_write_psr_msrs(void *data)
+static void cf_check do_write_psr_msrs(void *data)
{
const struct cos_write_info *info = data;
unsigned int i, index, cos = info->cos;
break;
}
-static void noreturn __machine_halt(void *unused)
+static void noreturn cf_check __machine_halt(void *unused)
{
local_irq_disable();
}
__initcall(reboot_init);
-static void noreturn __machine_restart(void *pdelay)
+static void cf_check noreturn __machine_restart(void *pdelay)
{
machine_restart(*(unsigned int *)pdelay);
}
cpumask_clear_cpu(smp_processor_id(), &cpu_online_map);
}
-static void stop_this_cpu(void *dummy)
+static void cf_check stop_this_cpu(void *dummy)
{
__stop_this_cpu();
for ( ; ; )
unsigned long size;
};
-static void l3_cache_get(void *arg)
+static void cf_check l3_cache_get(void *arg)
{
struct cpuid4_info info;
struct l3_cache_info *l3_info = arg;
static unsigned long tsc_max_warp, tsc_check_count;
static cpumask_t tsc_check_cpumask;
-static void tsc_check_slave(void *unused)
+static void cf_check tsc_check_slave(void *unused)
{
unsigned int cpu = smp_processor_id();
local_irq_disable();
}
/* Ordinary rendezvous function which does not modify TSC values. */
-static void time_calibration_std_rendezvous(void *_r)
+static void cf_check time_calibration_std_rendezvous(void *_r)
{
struct calibration_rendezvous *r = _r;
unsigned int total_cpus = cpumask_weight(&r->cpu_calibration_map);
* Rendezvous function used when clocksource is TSC and
* no CPU hotplug will be performed.
*/
-static void time_calibration_nop_rendezvous(void *rv)
+static void cf_check time_calibration_nop_rendezvous(void *rv)
{
const struct calibration_rendezvous *r = rv;
struct cpu_time_stamp *c = &this_cpu(cpu_calibration);
disable_tsc_sync = true;
}
-static void __init reset_percpu_time(void *unused)
+static void __init cf_check reset_percpu_time(void *unused)
{
struct cpu_time *t = &this_cpu(cpu_time);
return ret;
}
-static void _take_cpu_down(void *unused)
+static void cf_check _take_cpu_down(void *unused)
{
cpu_notifier_call_chain(smp_processor_id(), CPU_DYING, NULL, true);
__cpu_disable();
}
-static int take_cpu_down(void *arg)
+static int cf_check take_cpu_down(void *arg)
{
_take_cpu_down(arg);
return 0;
}
presmp_initcall(initialise_gdb);
-static void gdb_pause_this_cpu(void *unused)
+static void cf_check gdb_pause_this_cpu(void *unused)
{
unsigned long flags;
static DEFINE_PER_CPU(s_time_t, read_clocks_time);
static DEFINE_PER_CPU(u64, read_cycles_time);
-static void read_clocks_slave(void *unused)
+static void cf_check read_clocks_slave(void *unused)
{
unsigned int cpu = smp_processor_id();
local_irq_disable();
printk("\n");
}
-static void __init smp_scrub_heap_pages(void *data)
+static void __init cf_check smp_scrub_heap_pages(void *data)
{
unsigned long mfn, start, end;
struct page_info *pg;