ia64/xen-unstable

view xen/include/asm-x86/spinlock.h @ 15812:86a154e1ef5d

[HVM] Shadow: don't shadow the p2m table.
For HVM vcpus with paging disabled, we used to shadow the p2m table,
and skip the p2m lookup to go from gfn to mfn. Instead, we now
provide a simple pagetable that gives a one-to-one mapping of 4GB, and
shadow that, making the translations from gfn to mfn via the p2m.
This removes the paging-disabled special-case code from the shadow
fault handler, and allows us to expand the p2m interface, since all HVM
translations now go through the same p2m lookups.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Fri Aug 31 11:06:22 2007 +0100 (2007-08-31)
parents 79053138b35c
children 4e3316ed1af5
line source
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #include <xen/config.h>
5 #include <xen/lib.h>
6 #include <asm/atomic.h>
7 #include <asm/rwlock.h>
9 typedef struct {
10 volatile s16 lock;
11 s8 recurse_cpu;
12 u8 recurse_cnt;
13 } spinlock_t;
15 #define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 1, -1, 0 }
17 #define spin_lock_init(x) do { *(x) = (spinlock_t) SPIN_LOCK_UNLOCKED; } while(0)
18 #define spin_is_locked(x) (*(volatile char *)(&(x)->lock) <= 0)
20 static inline void _raw_spin_lock(spinlock_t *lock)
21 {
22 __asm__ __volatile__ (
23 "1: lock; decb %0 \n"
24 " js 2f \n"
25 ".section .text.lock,\"ax\"\n"
26 "2: cmpb $0,%0 \n"
27 " rep; nop \n"
28 " jle 2b \n"
29 " jmp 1b \n"
30 ".previous"
31 : "=m" (lock->lock) : : "memory" );
32 }
34 static inline void _raw_spin_unlock(spinlock_t *lock)
35 {
36 ASSERT(spin_is_locked(lock));
37 __asm__ __volatile__ (
38 "movb $1,%0"
39 : "=m" (lock->lock) : : "memory" );
40 }
42 static inline int _raw_spin_trylock(spinlock_t *lock)
43 {
44 char oldval;
45 __asm__ __volatile__(
46 "xchgb %b0,%1"
47 :"=q" (oldval), "=m" (lock->lock)
48 :"0" (0) : "memory");
49 return oldval > 0;
50 }
52 /*
53 * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
54 * reentered recursively on the same CPU. All critical regions that may form
55 * part of a recursively-nested set must be protected by these forms. If there
56 * are any critical regions that cannot form part of such a set, they can use
57 * standard spin_[un]lock().
58 */
59 #define _raw_spin_lock_recursive(_lock) \
60 do { \
61 int cpu = smp_processor_id(); \
62 if ( likely((_lock)->recurse_cpu != cpu) ) \
63 { \
64 spin_lock(_lock); \
65 (_lock)->recurse_cpu = cpu; \
66 } \
67 (_lock)->recurse_cnt++; \
68 } while ( 0 )
70 #define _raw_spin_unlock_recursive(_lock) \
71 do { \
72 if ( likely(--(_lock)->recurse_cnt == 0) ) \
73 { \
74 (_lock)->recurse_cpu = -1; \
75 spin_unlock(_lock); \
76 } \
77 } while ( 0 )
80 typedef struct {
81 volatile unsigned int lock;
82 } rwlock_t;
84 #define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { RW_LOCK_BIAS }
86 #define rwlock_init(x) do { *(x) = (rwlock_t) RW_LOCK_UNLOCKED; } while(0)
88 /*
89 * On x86, we implement read-write locks as a 32-bit counter
90 * with the high bit (sign) being the "contended" bit.
91 */
92 static inline void _raw_read_lock(rwlock_t *rw)
93 {
94 __build_read_lock(rw, "__read_lock_failed");
95 }
97 static inline void _raw_write_lock(rwlock_t *rw)
98 {
99 __build_write_lock(rw, "__write_lock_failed");
100 }
102 #define _raw_read_unlock(rw) \
103 __asm__ __volatile__ ( \
104 "lock ; incl %0" : \
105 "=m" ((rw)->lock) : : "memory" )
106 #define _raw_write_unlock(rw) \
107 __asm__ __volatile__ ( \
108 "lock ; addl $" RW_LOCK_BIAS_STR ",%0" : \
109 "=m" ((rw)->lock) : : "memory" )
111 #endif /* __ASM_SPINLOCK_H */