static void __init wait_for_nmis(void *p)
{
- unsigned int cpu = smp_processor_id();
- unsigned int start_count = nmi_count(cpu);
+ unsigned int start_count = this_cpu(nmi_count);
unsigned long ticks = 10 * 1000 * cpu_khz / nmi_hz;
unsigned long s, e;
s = rdtsc();
do {
cpu_relax();
- if ( nmi_count(cpu) >= start_count + 2 )
+ if ( this_cpu(nmi_count) >= start_count + 2 )
break;
e = rdtsc();
} while( e - s < ticks );
printk("Testing NMI watchdog on all CPUs:");
for_each_online_cpu ( cpu )
- prev_nmi_count[cpu] = nmi_count(cpu);
+ prev_nmi_count[cpu] = per_cpu(nmi_count, cpu);
/*
* Wait at most 10 ticks for 2 watchdog NMIs on each CPU.
for_each_online_cpu ( cpu )
{
- if ( nmi_count(cpu) - prev_nmi_count[cpu] < 2 )
+ if ( per_cpu(nmi_count, cpu) - prev_nmi_count[cpu] < 2 )
{
printk(" %d", cpu);
ok = false;
printk("CPU\tNMI\n");
for_each_online_cpu ( cpu )
- printk("%3u\t%3u\n", cpu, nmi_count(cpu));
+ printk("%3u\t%3u\n", cpu, per_cpu(nmi_count, cpu));
if ( !hardware_domain || !(v = domain_vcpu(hardware_domain, 0)) )
return;
static nmi_callback_t *nmi_callback = dummy_nmi_callback;
+DEFINE_PER_CPU(unsigned int, nmi_count);
+
void do_nmi(const struct cpu_user_regs *regs)
{
unsigned int cpu = smp_processor_id();
unsigned char reason = 0;
bool handle_unknown = false;
- ++nmi_count(cpu);
+ this_cpu(nmi_count)++;
if ( nmi_callback(regs, cpu) )
return;
/* arch independent irq_stat fields */
#define softirq_pending(cpu) __IRQ_STAT((cpu), __softirq_pending)
#define local_irq_count(cpu) __IRQ_STAT((cpu), __local_irq_count)
-#define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count)
#define mwait_wakeup(cpu) __IRQ_STAT((cpu), __mwait_wakeup)
#endif /* __irq_cpustat_h */