ia64/xen-unstable

view xen/include/xen/spinlock.h @ 19800:78962f85c562

IOMMU: Add two generic functions to vendor neutral interface

Add 2 generic functions into the vendor neutral iommu interface, The
reason is that from changeset 19732, there is only one global flag
"iommu_enabled" that controls iommu enablement for both vtd and amd
systems, so we need different code paths for vtd and amd iommu systems
if this flag has been turned on. Also, the early checking of
"iommu_enabled" in iommu_setup() is removed to prevent iommu
functionalities from been disabled on amd systems.

Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 19 08:41:50 2009 +0100 (2009-06-19)
parents f210a633571c
children
line source
1 #ifndef __SPINLOCK_H__
2 #define __SPINLOCK_H__
4 #include <xen/config.h>
5 #include <asm/system.h>
6 #include <asm/spinlock.h>
8 #ifndef NDEBUG
9 struct lock_debug {
10 int irq_safe; /* +1: IRQ-safe; 0: not IRQ-safe; -1: don't know yet */
11 };
12 #define _LOCK_DEBUG { -1 }
13 void spin_debug_enable(void);
14 void spin_debug_disable(void);
15 #else
16 struct lock_debug { };
17 #define _LOCK_DEBUG { }
18 #define spin_debug_enable() ((void)0)
19 #define spin_debug_disable() ((void)0)
20 #endif
22 typedef struct {
23 raw_spinlock_t raw;
24 u16 recurse_cpu:12;
25 u16 recurse_cnt:4;
26 struct lock_debug debug;
27 } spinlock_t;
30 #define SPIN_LOCK_UNLOCKED { _RAW_SPIN_LOCK_UNLOCKED, 0xfffu, 0, _LOCK_DEBUG }
31 #define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED
32 #define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED)
34 typedef struct {
35 raw_rwlock_t raw;
36 struct lock_debug debug;
37 } rwlock_t;
39 #define RW_LOCK_UNLOCKED { _RAW_RW_LOCK_UNLOCKED, _LOCK_DEBUG }
40 #define DEFINE_RWLOCK(l) rwlock_t l = RW_LOCK_UNLOCKED
41 #define rwlock_init(l) (*(l) = (rwlock_t)RW_LOCK_UNLOCKED)
43 void _spin_lock(spinlock_t *lock);
44 void _spin_lock_irq(spinlock_t *lock);
45 unsigned long _spin_lock_irqsave(spinlock_t *lock);
47 void _spin_unlock(spinlock_t *lock);
48 void _spin_unlock_irq(spinlock_t *lock);
49 void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
51 int _spin_is_locked(spinlock_t *lock);
52 int _spin_trylock(spinlock_t *lock);
53 void _spin_barrier(spinlock_t *lock);
54 void _spin_barrier_irq(spinlock_t *lock);
56 void _spin_lock_recursive(spinlock_t *lock);
57 void _spin_unlock_recursive(spinlock_t *lock);
59 void _read_lock(rwlock_t *lock);
60 void _read_lock_irq(rwlock_t *lock);
61 unsigned long _read_lock_irqsave(rwlock_t *lock);
63 void _read_unlock(rwlock_t *lock);
64 void _read_unlock_irq(rwlock_t *lock);
65 void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags);
67 void _write_lock(rwlock_t *lock);
68 void _write_lock_irq(rwlock_t *lock);
69 unsigned long _write_lock_irqsave(rwlock_t *lock);
70 int _write_trylock(rwlock_t *lock);
72 void _write_unlock(rwlock_t *lock);
73 void _write_unlock_irq(rwlock_t *lock);
74 void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags);
76 int _rw_is_locked(rwlock_t *lock);
77 int _rw_is_write_locked(rwlock_t *lock);
79 #define spin_lock(l) _spin_lock(l)
80 #define spin_lock_irq(l) _spin_lock_irq(l)
81 #define spin_lock_irqsave(l, f) ((f) = _spin_lock_irqsave(l))
83 #define spin_unlock(l) _spin_unlock(l)
84 #define spin_unlock_irq(l) _spin_unlock_irq(l)
85 #define spin_unlock_irqrestore(l, f) _spin_unlock_irqrestore(l, f)
87 #define spin_is_locked(l) _spin_is_locked(l)
88 #define spin_trylock(l) _spin_trylock(l)
90 /* Ensure a lock is quiescent between two critical operations. */
91 #define spin_barrier(l) _spin_barrier(l)
92 #define spin_barrier_irq(l) _spin_barrier_irq(l)
94 /*
95 * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
96 * reentered recursively on the same CPU. All critical regions that may form
97 * part of a recursively-nested set must be protected by these forms. If there
98 * are any critical regions that cannot form part of such a set, they can use
99 * standard spin_[un]lock().
100 */
101 #define spin_lock_recursive(l) _spin_lock_recursive(l)
102 #define spin_unlock_recursive(l) _spin_unlock_recursive(l)
104 #define read_lock(l) _read_lock(l)
105 #define read_lock_irq(l) _read_lock_irq(l)
106 #define read_lock_irqsave(l, f) ((f) = _read_lock_irqsave(l))
108 #define read_unlock(l) _read_unlock(l)
109 #define read_unlock_irq(l) _read_unlock_irq(l)
110 #define read_unlock_irqrestore(l, f) _read_unlock_irqrestore(l, f)
112 #define write_lock(l) _write_lock(l)
113 #define write_lock_irq(l) _write_lock_irq(l)
114 #define write_lock_irqsave(l, f) ((f) = _write_lock_irqsave(l))
115 #define write_trylock(l) _write_trylock(l)
117 #define write_unlock(l) _write_unlock(l)
118 #define write_unlock_irq(l) _write_unlock_irq(l)
119 #define write_unlock_irqrestore(l, f) _write_unlock_irqrestore(l, f)
121 #define rw_is_locked(l) _rw_is_locked(l)
122 #define rw_is_write_locked(l) _rw_is_write_locked(l)
124 #endif /* __SPINLOCK_H__ */