#define GET_CC7_RES(val) GET_HW_RES_IN_NS(0x3FE, val) /* SNB onwards */
#define PHI_CC6_RES(val) GET_HW_RES_IN_NS(0x3FF, val) /* Xeon Phi only */
-static void lapic_timer_nop(void) { }
+static void cf_check lapic_timer_nop(void) { }
void (*__read_mostly lapic_timer_off)(void);
void (*__read_mostly lapic_timer_on)(void);
"HALT"
};
-static uint64_t get_stime_tick(void) { return (uint64_t)NOW(); }
-static uint64_t stime_ticks_elapsed(uint64_t t1, uint64_t t2) { return t2 - t1; }
-static uint64_t stime_tick_to_ns(uint64_t ticks) { return ticks; }
+static uint64_t cf_check get_stime_tick(void)
+{
+ return NOW();
+}
+
+static uint64_t cf_check stime_ticks_elapsed(uint64_t t1, uint64_t t2)
+{
+ return t2 - t1;
+}
+
+static uint64_t cf_check stime_tick_to_ns(uint64_t ticks)
+{
+ return ticks;
+}
+
+static uint64_t cf_check get_acpi_pm_tick(void)
+{
+ return inl(pmtmr_ioport);
+}
-static uint64_t get_acpi_pm_tick(void) { return (uint64_t)inl(pmtmr_ioport); }
-static uint64_t acpi_pm_ticks_elapsed(uint64_t t1, uint64_t t2)
+static uint64_t cf_check acpi_pm_ticks_elapsed(uint64_t t1, uint64_t t2)
{
if ( t2 >= t1 )
return (t2 - t1);
spin_unlock(&power->stat_lock);
}
-static void acpi_processor_idle(void)
+static void cf_check acpi_processor_idle(void)
{
unsigned int cpu = smp_processor_id();
struct acpi_processor_power *power = processor_powers[cpu];
cpuidle_current_governor->reflect(power);
}
-void acpi_dead_idle(void)
+void cf_check acpi_dead_idle(void)
{
struct acpi_processor_power *power;
struct acpi_processor_cx *cx;
return (us >> 32) ? (unsigned int)-2000 : (unsigned int)us;
}
-static int menu_select(struct acpi_processor_power *power)
+static int cf_check menu_select(struct acpi_processor_power *power)
{
struct menu_device *data = &this_cpu(menu_devices);
int i;
return data->last_state_idx;
}
-static void menu_reflect(struct acpi_processor_power *power)
+static void cf_check menu_reflect(struct acpi_processor_power *power)
{
struct menu_device *data = &this_cpu(menu_devices);
u64 new_factor;
data->correction_factor[data->bucket] = new_factor;
}
-static int menu_enable_device(struct acpi_processor_power *power)
+static int cf_check menu_enable_device(struct acpi_processor_power *power)
{
memset(&per_cpu(menu_devices, power->cpu), 0, sizeof(struct menu_device));
{}
};
-static void mwait_idle(void)
+static void cf_check mwait_idle(void)
{
unsigned int cpu = smp_processor_id();
struct acpi_processor_power *power = processor_powers[cpu];
DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
-static void default_idle(void);
+static void cf_check default_idle(void);
void (*pm_idle) (void) __read_mostly = default_idle;
void (*dead_idle) (void) __read_mostly = default_dead_idle;
-static void default_idle(void)
+static void cf_check default_idle(void)
{
struct cpu_info *info = get_cpu_info();
local_irq_enable();
}
-void default_dead_idle(void)
+void cf_check default_dead_idle(void)
{
/*
* When going into S3, without flushing caches modified data may be
smp_send_event_check_mask(&cpu_online_map);
}
-void hpet_broadcast_enter(void)
+void cf_check hpet_broadcast_enter(void)
{
unsigned int cpu = smp_processor_id();
struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
spin_unlock(&ch->lock);
}
-void hpet_broadcast_exit(void)
+void cf_check hpet_broadcast_exit(void)
{
unsigned int cpu = smp_processor_id();
struct hpet_event_channel *ch = per_cpu(cpu_bc_channel, cpu);
int mwait_idle_init(struct notifier_block *);
int cpuidle_init_cpu(unsigned int cpu);
-void default_dead_idle(void);
-void acpi_dead_idle(void);
+void cf_check default_dead_idle(void);
+void cf_check acpi_dead_idle(void);
void play_dead(void);
void trace_exit_reason(u32 *irq_traced);
void update_idle_stats(struct acpi_processor_power *,
*/
void hpet_broadcast_init(void);
void hpet_broadcast_resume(void);
-void hpet_broadcast_enter(void);
-void hpet_broadcast_exit(void);
+void cf_check hpet_broadcast_enter(void);
+void cf_check hpet_broadcast_exit(void);
int hpet_broadcast_is_available(void);
void hpet_disable_legacy_broadcast(void);
int cpu_frequency_change(u64 freq);
-void pit_broadcast_enter(void);
-void pit_broadcast_exit(void);
+void cf_check pit_broadcast_enter(void);
+void cf_check pit_broadcast_exit(void);
int pit_broadcast_is_available(void);
-uint64_t acpi_pm_tick_to_ns(uint64_t ticks);
+uint64_t cf_check acpi_pm_tick_to_ns(uint64_t ticks);
uint64_t tsc_ticks2ns(uint64_t ticks);
}
__initcall(init_pmtmr_scale);
-uint64_t acpi_pm_tick_to_ns(uint64_t ticks)
+uint64_t cf_check acpi_pm_tick_to_ns(uint64_t ticks)
{
return scale_delta(ticks, &pmt_scale);
}
}
__initcall(disable_pit_irq);
-void pit_broadcast_enter(void)
+void cf_check pit_broadcast_enter(void)
{
cpumask_set_cpu(smp_processor_id(), &pit_broadcast_mask);
}
-void pit_broadcast_exit(void)
+void cf_check pit_broadcast_exit(void)
{
int cpu = smp_processor_id();
static unsigned int __read_mostly userspace_cmdline_freq;
static DEFINE_PER_CPU(unsigned int, cpu_set_freq);
-static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
- unsigned int event)
+static int cf_check cpufreq_governor_userspace(
+ struct cpufreq_policy *policy, unsigned int event)
{
int ret = 0;
unsigned int cpu;
return __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
}
-static bool_t __init
+static bool __init cf_check
cpufreq_userspace_handle_option(const char *name, const char *val)
{
if (!strcmp(name, "speed") && val) {
/*
* cpufreq performance governor
*/
-static int cpufreq_governor_performance(struct cpufreq_policy *policy,
- unsigned int event)
+static int cf_check cpufreq_governor_performance(
+ struct cpufreq_policy *policy, unsigned int event)
{
int ret = 0;
/*
* cpufreq powersave governor
*/
-static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
- unsigned int event)
+static int cf_check cpufreq_governor_powersave(
+ struct cpufreq_policy *policy, unsigned int event)
{
int ret = 0;