return preempt_count() || in_irq() || !local_irq_is_enabled();
}
-/* asm helper */
-void bug_if_in_atomic(void)
+#ifndef NDEBUG
+void ASSERT_NOT_IN_ATOMIC(void)
{
- BUG_ON(in_atomic());
+ ASSERT(!preempt_count());
+ ASSERT(!in_irq());
+ ASSERT(local_irq_is_enabled());
}
+#endif
struct task_slice next_slice;
int cpu = smp_processor_id();
- ASSERT(!in_atomic());
+ ASSERT_NOT_IN_ATOMIC();
SCHED_STAT_CRANK(sched_run);
asmlinkage void do_softirq(void)
{
- ASSERT(!in_atomic());
+ ASSERT_NOT_IN_ATOMIC();
__do_softirq(0);
}
struct vcpu *curr = current;
struct waitqueue_vcpu *wqv = curr->waitqueue_vcpu;
- ASSERT(!in_atomic());
+ ASSERT_NOT_IN_ATOMIC();
__prepare_to_wait(wqv);
ASSERT(list_empty(&wqv->list));
#ifndef NDEBUG
#define ASSERT_NOT_IN_ATOMIC \
sti; /* sometimes called with interrupts disabled: safe to enable */ \
- call bug_if_in_atomic
+ call ASSERT_NOT_IN_ATOMIC
#else
#define ASSERT_NOT_IN_ATOMIC
#endif
bool_t in_atomic(void);
+#ifndef NDEBUG
+void ASSERT_NOT_IN_ATOMIC(void);
+#else
+#define ASSERT_NOT_IN_ATOMIC() ((void)0)
+#endif
+
#endif /* __XEN_PREEMPT_H__ */