ia64/xen-unstable

annotate xen/include/asm-x86/spinlock.h @ 15812:86a154e1ef5d

[HVM] Shadow: don't shadow the p2m table.
For HVM vcpus with paging disabled, we used to shadow the p2m table,
and skip the p2m lookup to go from gfn to mfn. Instead, we now
provide a simple pagetable that gives a one-to-one mapping of 4GB, and
shadow that, making the translations from gfn to mfn via the p2m.
This removes the paging-disabled special-case code from the shadow
fault handler, and allows us to expand the p2m interface, since all HVM
translations now go through the same p2m lookups.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Fri Aug 31 11:06:22 2007 +0100 (2007-08-31)
parents 79053138b35c
children 4e3316ed1af5
rev   line source
kaf24@1452 1 #ifndef __ASM_SPINLOCK_H
kaf24@1452 2 #define __ASM_SPINLOCK_H
kaf24@1452 3
kaf24@1452 4 #include <xen/config.h>
kaf24@1452 5 #include <xen/lib.h>
kaf24@1452 6 #include <asm/atomic.h>
kaf24@1452 7 #include <asm/rwlock.h>
kaf24@1452 8
kaf24@1452 9 typedef struct {
kaf24@1542 10 volatile s16 lock;
kaf24@1542 11 s8 recurse_cpu;
kaf24@1542 12 u8 recurse_cnt;
kaf24@1452 13 } spinlock_t;
kaf24@1452 14
kfraser@11872 15 #define SPIN_LOCK_UNLOCKED /*(spinlock_t)*/ { 1, -1, 0 }
kaf24@1452 16
kfraser@11872 17 #define spin_lock_init(x) do { *(x) = (spinlock_t) SPIN_LOCK_UNLOCKED; } while(0)
kaf24@1452 18 #define spin_is_locked(x) (*(volatile char *)(&(x)->lock) <= 0)
kaf24@1452 19
kaf24@2373 20 static inline void _raw_spin_lock(spinlock_t *lock)
kaf24@1542 21 {
kaf24@1542 22 __asm__ __volatile__ (
kaf24@1542 23 "1: lock; decb %0 \n"
kaf24@1542 24 " js 2f \n"
kaf24@1542 25 ".section .text.lock,\"ax\"\n"
kaf24@1542 26 "2: cmpb $0,%0 \n"
kaf24@1542 27 " rep; nop \n"
kaf24@1542 28 " jle 2b \n"
kaf24@1542 29 " jmp 1b \n"
kaf24@1542 30 ".previous"
kaf24@1542 31 : "=m" (lock->lock) : : "memory" );
kaf24@1542 32 }
kaf24@1452 33
kaf24@2373 34 static inline void _raw_spin_unlock(spinlock_t *lock)
kaf24@1452 35 {
kaf24@1542 36 ASSERT(spin_is_locked(lock));
kaf24@1542 37 __asm__ __volatile__ (
kaf24@1542 38 "movb $1,%0"
kaf24@1542 39 : "=m" (lock->lock) : : "memory" );
kaf24@1452 40 }
kaf24@1452 41
kaf24@2373 42 static inline int _raw_spin_trylock(spinlock_t *lock)
kaf24@1452 43 {
kaf24@1542 44 char oldval;
kaf24@1542 45 __asm__ __volatile__(
kaf24@1542 46 "xchgb %b0,%1"
kaf24@1542 47 :"=q" (oldval), "=m" (lock->lock)
kaf24@1542 48 :"0" (0) : "memory");
kaf24@1542 49 return oldval > 0;
kaf24@1452 50 }
kaf24@1452 51
kaf24@1452 52 /*
kaf24@1542 53 * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
kaf24@1542 54 * reentered recursively on the same CPU. All critical regions that may form
kaf24@1542 55 * part of a recursively-nested set must be protected by these forms. If there
kaf24@1542 56 * are any critical regions that cannot form part of such a set, they can use
kaf24@1542 57 * standard spin_[un]lock().
kaf24@1452 58 */
kaf24@2373 59 #define _raw_spin_lock_recursive(_lock) \
kaf24@1542 60 do { \
kaf24@1542 61 int cpu = smp_processor_id(); \
kaf24@1542 62 if ( likely((_lock)->recurse_cpu != cpu) ) \
kaf24@1542 63 { \
kaf24@1542 64 spin_lock(_lock); \
kaf24@1542 65 (_lock)->recurse_cpu = cpu; \
kaf24@1542 66 } \
kaf24@1542 67 (_lock)->recurse_cnt++; \
kaf24@1542 68 } while ( 0 )
kaf24@1542 69
kaf24@2373 70 #define _raw_spin_unlock_recursive(_lock) \
kaf24@1542 71 do { \
kaf24@1542 72 if ( likely(--(_lock)->recurse_cnt == 0) ) \
kaf24@1542 73 { \
kaf24@1542 74 (_lock)->recurse_cpu = -1; \
kaf24@1542 75 spin_unlock(_lock); \
kaf24@1542 76 } \
kaf24@1542 77 } while ( 0 )
kaf24@1542 78
kaf24@1542 79
kaf24@1452 80 typedef struct {
kaf24@1542 81 volatile unsigned int lock;
kaf24@1452 82 } rwlock_t;
kaf24@1452 83
kfraser@11872 84 #define RW_LOCK_UNLOCKED /*(rwlock_t)*/ { RW_LOCK_BIAS }
kaf24@1452 85
kfraser@11872 86 #define rwlock_init(x) do { *(x) = (rwlock_t) RW_LOCK_UNLOCKED; } while(0)
kaf24@1452 87
kaf24@1452 88 /*
kaf24@1452 89 * On x86, we implement read-write locks as a 32-bit counter
kaf24@1452 90 * with the high bit (sign) being the "contended" bit.
kaf24@1452 91 */
kaf24@2373 92 static inline void _raw_read_lock(rwlock_t *rw)
kaf24@1452 93 {
kaf24@1542 94 __build_read_lock(rw, "__read_lock_failed");
kaf24@1452 95 }
kaf24@1452 96
kaf24@2373 97 static inline void _raw_write_lock(rwlock_t *rw)
kaf24@1452 98 {
kaf24@1542 99 __build_write_lock(rw, "__write_lock_failed");
kaf24@1452 100 }
kaf24@1452 101
kaf24@2373 102 #define _raw_read_unlock(rw) \
kaf24@1542 103 __asm__ __volatile__ ( \
kaf24@1542 104 "lock ; incl %0" : \
kaf24@1542 105 "=m" ((rw)->lock) : : "memory" )
kaf24@2373 106 #define _raw_write_unlock(rw) \
kaf24@1542 107 __asm__ __volatile__ ( \
kaf24@1542 108 "lock ; addl $" RW_LOCK_BIAS_STR ",%0" : \
kaf24@1542 109 "=m" ((rw)->lock) : : "memory" )
kaf24@1452 110
kaf24@1452 111 #endif /* __ASM_SPINLOCK_H */