Signed-off-by: Keir Fraser <keir@xen.org>
ENTRY(svm_asm_do_resume)
call svm_intr_assist
call_with_regs(nsvm_vcpu_switch)
+ ASSERT_NOT_IN_ATOMIC
get_current(bx)
CLGI
vmx_asm_do_vmentry:
call vmx_intr_assist
call nvmx_switch_guest
+ ASSERT_NOT_IN_ATOMIC
get_current(bx)
cli
/* %rbx: struct vcpu */
ENTRY(compat_test_all_events)
+ ASSERT_NOT_IN_ATOMIC
cli # tests must not race interrupts
/*compat_test_softirqs:*/
movl VCPU_processor(%rbx),%eax
/* %rbx: struct vcpu */
test_all_events:
+ ASSERT_NOT_IN_ATOMIC
cli # tests must not race interrupts
/*test_softirqs:*/
movl VCPU_processor(%rbx),%eax
{
return preempt_count() || in_irq() || !local_irq_is_enabled();
}
+
+/* asm helper */
+void bug_if_in_atomic(void)
+{
+ BUG_ON(in_atomic());
+}
GET_STACK_BASE(reg); \
__GET_CURRENT(reg)
+#ifndef NDEBUG
+#define ASSERT_NOT_IN_ATOMIC \
+ sti; /* sometimes called with interrupts disabled: safe to enable */ \
+ call bug_if_in_atomic
+#else
+#define ASSERT_NOT_IN_ATOMIC
+#endif
+
#endif
#endif /* __X86_ASM_DEFNS_H__ */