if ( alert_counter[cpu] == 5*nmi_hz )
{
console_force_unlock();
- disable_criticalregion_checking();
die("NMI Watchdog detected LOCKUP on CPU", regs, cpu);
}
}
perfc_incrc(page_faults);
- ASSERT_no_criticalregion();
-
-
if ( likely(VM_ASSIST(d, VMASST_TYPE_writable_pagetables)) )
{
if ( unlikely(ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l1va) &&
trap_info_t *ti;
unsigned long fixup;
- ASSERT_no_criticalregion();
-
/* Badness if error in ring 0, or result of an interrupt. */
if ( !(regs->xcs & 3) || (error_code & 1) )
goto gp_in_kernel;
asmlinkage void mem_parity_error(struct pt_regs *regs)
{
console_force_unlock();
- disable_criticalregion_checking();
printk("\n\n");
asmlinkage void io_check_error(struct pt_regs *regs)
{
console_force_unlock();
- disable_criticalregion_checking();
printk("\n\n");
/* No-op hypercall. */
return -ENOSYS;
}
-
-/*
- * Lock debugging
- */
-
-#ifndef NDEBUG
-
-static int crit_count[NR_CPUS], crit_checking_disabled[NR_CPUS];
-
-void disable_criticalregion_checking(void)
-{
- int cpu = smp_processor_id();
- crit_checking_disabled[cpu]++;
-}
-
-void criticalregion_enter(void)
-{
- int cpu = smp_processor_id();
- if ( crit_checking_disabled[cpu] )
- return;
- ASSERT(crit_count[cpu] >= 0);
- crit_count[cpu]++;
-}
-
-void criticalregion_exit(void)
-{
- int cpu = smp_processor_id();
- if ( crit_checking_disabled[cpu] )
- return;
- crit_count[cpu]--;
- ASSERT(crit_count[cpu] >= 0);
-}
-
-void ASSERT_no_criticalregion(void)
-{
- int cpu = smp_processor_id();
- if ( (crit_count[cpu] == 0) || crit_checking_disabled[cpu] )
- return;
- disable_criticalregion_checking();
- ASSERT(crit_count[cpu] >= 0); /* -ve count is a special kind of bogus! */
- ASSERT(crit_count[cpu] == 0); /* we should definitely take this path */
- ASSERT(1); /* NEVER GET HERE! */
-}
-
-#endif /* !NDEBUG */
#define shadow_mode(_d) ((_d)->mm.shadow_mode)
#define shadow_lock_init(_d) spin_lock_init(&(_d)->mm.shadow_lock)
-#define shadow_lock(_m) spin_lock_nochecking(&(_m)->shadow_lock)
-#define shadow_unlock(_m) spin_unlock_nochecking(&(_m)->shadow_lock)
+#define shadow_lock(_m) spin_lock(&(_m)->shadow_lock)
+#define shadow_unlock(_m) spin_unlock(&(_m)->shadow_lock)
extern void shadow_mode_init(void);
extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc);
* aliasing issues.
*/
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
- ASSERT_no_criticalregion(); \
__asm__ __volatile__( \
"1: mov"itype" %"rtype"1,%2\n" \
"2:\n" \
} while (0)
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
- ASSERT_no_criticalregion(); \
__asm__ __volatile__( \
"1: mov"itype" %2,%"rtype"1\n" \
"2:\n" \
static always_inline unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
- ASSERT_no_criticalregion();
if (__builtin_constant_p(n)) {
unsigned long ret;
static always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
- ASSERT_no_criticalregion();
if (__builtin_constant_p(n)) {
unsigned long ret;
#endif
-#ifndef NDEBUG
-
-extern void criticalregion_enter(void);
-extern void criticalregion_exit(void);
-extern void ASSERT_no_criticalregion(void);
-extern void disable_criticalregion_checking(void);
-
-#define spin_lock(_lock) \
- do { criticalregion_enter(); _raw_spin_lock(_lock); } while (0)
-#define spin_unlock(_lock) \
- do { _raw_spin_unlock(_lock); criticalregion_exit(); } while (0)
-#define spin_lock_recursive(_lock) \
- do { criticalregion_enter(); _raw_spin_lock_recursive(_lock); } while (0)
-#define spin_unlock_recursive(_lock) \
- do { _raw_spin_unlock_recursive(_lock); criticalregion_exit(); } while (0)
-#define read_lock(_lock) \
- do { criticalregion_enter(); _raw_read_lock(_lock); } while (0)
-#define read_unlock(_lock) \
- do { _raw_read_unlock(_lock); criticalregion_exit(); } while (0)
-#define write_lock(_lock) \
- do { criticalregion_enter(); _raw_write_lock(_lock); } while (0)
-#define write_unlock(_lock) \
- do { _raw_write_unlock(_lock); criticalregion_exit(); } while (0)
-
-static inline int spin_trylock(spinlock_t *lock)
-{
- criticalregion_enter();
- if ( !_raw_spin_trylock(lock) )
- {
- criticalregion_exit();
- return 0;
- }
- return 1;
-}
-
-#else
-
-#define ASSERT_no_criticalregion() ((void)0)
-#define disable_criticalregion_checking() ((void)0)
-
#define spin_lock(_lock) _raw_spin_lock(_lock)
#define spin_trylock(_lock) _raw_spin_trylock(_lock)
#define spin_unlock(_lock) _raw_spin_unlock(_lock)
#define write_lock(_lock) _raw_write_lock(_lock)
#define write_unlock(_lock) _raw_write_unlock(_lock)
-#endif
-
-/*
- * Use these if you have taken special care to ensure that certain unsafe
- * things can occur in your critical region (e.g., faults, user-space
- * accesses).
- */
-#define spin_lock_nochecking(_lock) _raw_spin_lock(_lock)
-#define spin_trylock_nochecking(_lock) _raw_spin_trylock(_lock)
-#define spin_unlock_nochecking(_lock) _raw_spin_unlock(_lock)
-
#endif /* __SPINLOCK_H__ */