ia64/xen-unstable
changeset 1228:ee97ff34e170
bitkeeper revision 1.825.3.1 (4062f7e9e4Hjc12XFoN-wZ-bm0GL4w)
synch_bitops.h:
new file
system.h, evtchn.h, evtchn.c, entry.S, console.c:
Fix races in event-channel status checks and updates.
synch_bitops.h:
new file
system.h, evtchn.h, evtchn.c, entry.S, console.c:
Fix races in event-channel status checks and updates.
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Thu Mar 25 15:16:57 2004 +0000 (2004-03-25) |
parents | f99bce03c4db |
children | d1172efb8a1e b083c9b2e63b |
files | .rootkeys xen/arch/i386/entry.S xenolinux-2.4.25-sparse/arch/xen/drivers/console/console.c xenolinux-2.4.25-sparse/arch/xen/kernel/entry.S xenolinux-2.4.25-sparse/arch/xen/kernel/evtchn.c xenolinux-2.4.25-sparse/include/asm-xen/evtchn.h xenolinux-2.4.25-sparse/include/asm-xen/synch_bitops.h xenolinux-2.4.25-sparse/include/asm-xen/system.h |
line diff
1.1 --- a/.rootkeys Thu Mar 25 13:24:43 2004 +0000 1.2 +++ b/.rootkeys Thu Mar 25 15:16:57 2004 +0000 1.3 @@ -684,6 +684,7 @@ 3e5a4e67AJPjW-zL7p-xWuA6IVeH1g xenolinux 1.4 3e5a4e68uJz-xI0IBVMD7xRLQKJDFg xenolinux-2.4.25-sparse/include/asm-xen/segment.h 1.5 3e5a4e68Nfdh6QcOKUTGCaYkf2LmYA xenolinux-2.4.25-sparse/include/asm-xen/smp.h 1.6 3fa8e3f0kBLeE4To2vpdi3cpJbIkbQ xenolinux-2.4.25-sparse/include/asm-xen/suspend.h 1.7 +4062f7e2PzFOUGT0PaE7A0VprTU3JQ xenolinux-2.4.25-sparse/include/asm-xen/synch_bitops.h 1.8 3e5a4e68mTr0zcp9SXDbnd-XLrrfxw xenolinux-2.4.25-sparse/include/asm-xen/system.h 1.9 3f1056a9L_kqHcFheV00KbKBzv9j5w xenolinux-2.4.25-sparse/include/asm-xen/vga.h 1.10 3f689063nhrIRsMMZjZxMFk7iEINqQ xenolinux-2.4.25-sparse/include/asm-xen/xen_proc.h
2.1 --- a/xen/arch/i386/entry.S Thu Mar 25 13:24:43 2004 +0000 2.2 +++ b/xen/arch/i386/entry.S Thu Mar 25 15:16:57 2004 +0000 2.3 @@ -373,7 +373,7 @@ test_all_events: 2.4 andl UPCALL_PENDING(%eax),%ecx # ECX = pending & ~mask 2.5 andl $1,%ecx # Is bit 0 pending and not masked? 2.6 jz restore_all_guest 2.7 - orl %ecx,UPCALL_MASK(%eax) # Upcalls are masked during delivery 2.8 + lock btsl $0,UPCALL_MASK(%eax) # Upcalls are masked during delivery 2.9 /*process_guest_events:*/ 2.10 movzwl PROCESSOR(%ebx),%edx 2.11 shl $4,%edx # sizeof(guest_trap_bounce) == 16
3.1 --- a/xenolinux-2.4.25-sparse/arch/xen/drivers/console/console.c Thu Mar 25 13:24:43 2004 +0000 3.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/drivers/console/console.c Thu Mar 25 15:16:57 2004 +0000 3.3 @@ -144,12 +144,7 @@ void xen_console_init(void) 3.4 3.5 register_console(&kcons_info); 3.6 3.7 - /* 3.8 - * XXX This prevents a bogus 'VIRQ_ERROR' when interrupts are enabled 3.9 - * for the first time. This works because by this point all important 3.10 - * VIRQs (eg. timer) have been properly bound. 3.11 - */ 3.12 - clear_bit(0, &HYPERVISOR_shared_info->evtchn_pending[0]); 3.13 + evtchn_clear_error_virq(); 3.14 } 3.15 3.16
4.1 --- a/xenolinux-2.4.25-sparse/arch/xen/kernel/entry.S Thu Mar 25 13:24:43 2004 +0000 4.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/kernel/entry.S Thu Mar 25 15:16:57 2004 +0000 4.3 @@ -210,14 +210,14 @@ ENTRY(system_call) 4.4 movl %eax,EAX(%esp) # save the return value 4.5 ENTRY(ret_from_sys_call) 4.6 movl SYMBOL_NAME(HYPERVISOR_shared_info),%esi 4.7 - btsl $0,evtchn_upcall_mask(%esi) # make tests atomic 4.8 + lock btsl $0,evtchn_upcall_mask(%esi) # make tests atomic 4.9 ret_syscall_tests: 4.10 cmpl $0,need_resched(%ebx) 4.11 jne reschedule 4.12 cmpl $0,sigpending(%ebx) 4.13 je safesti # ensure need_resched updates are seen 4.14 signal_return: 4.15 - btrl $0,evtchn_upcall_mask(%esi) # reenable event callbacks 4.16 + lock btrl $0,evtchn_upcall_mask(%esi) # reenable event callbacks 4.17 movl %esp,%eax 4.18 xorl %edx,%edx 4.19 call SYMBOL_NAME(do_signal) 4.20 @@ -254,7 +254,7 @@ ret_from_exception: 4.21 4.22 ALIGN 4.23 reschedule: 4.24 - btrl $0,evtchn_upcall_mask(%esi) # reenable event callbacks 4.25 + lock btrl $0,evtchn_upcall_mask(%esi) # reenable event callbacks 4.26 call SYMBOL_NAME(schedule) # test 4.27 jmp ret_from_sys_call 4.28 4.29 @@ -317,12 +317,12 @@ 11: push %esp 4.30 movb CS(%esp),%cl 4.31 test $2,%cl # slow return to ring 2 or 3 4.32 jne ret_syscall_tests 4.33 -safesti:btrl $0,evtchn_upcall_mask(%esi) # reenable event callbacks 4.34 +safesti:lock btrl $0,evtchn_upcall_mask(%esi) # reenable event callbacks 4.35 scrit: /**** START OF CRITICAL REGION ****/ 4.36 testb $1,evtchn_upcall_pending(%esi) 4.37 jnz 14f # process more events if necessary... 4.38 RESTORE_ALL 4.39 -14: btsl $0,evtchn_upcall_mask(%esi) 4.40 +14: lock btsl $0,evtchn_upcall_mask(%esi) 4.41 jmp 11b 4.42 ecrit: /**** END OF CRITICAL REGION ****/ 4.43 # [How we do the fixup]. We want to merge the current stack frame with the 4.44 @@ -364,7 +364,7 @@ critical_fixup_table: 4.45 .byte 0x20 # pop %es 4.46 .byte 0x24,0x24,0x24 # add $4,%esp 4.47 .byte 0x28 # iret 4.48 - .byte 0x00,0x00,0x00,0x00,0x00 # btsl $0,4(%esi) 4.49 + .byte 0x00,0x00,0x00,0x00,0x00,0x00 # lock btsl $0,4(%esi) 4.50 .byte 0x00,0x00 # jmp 11b 4.51 4.52 # Hypervisor uses this for application faults while it executes.
5.1 --- a/xenolinux-2.4.25-sparse/arch/xen/kernel/evtchn.c Thu Mar 25 13:24:43 2004 +0000 5.2 +++ b/xenolinux-2.4.25-sparse/arch/xen/kernel/evtchn.c Thu Mar 25 15:16:57 2004 +0000 5.3 @@ -14,6 +14,7 @@ 5.4 #include <asm/atomic.h> 5.5 #include <asm/system.h> 5.6 #include <asm/ptrace.h> 5.7 +#include <asm/synch_bitops.h> 5.8 #include <asm/hypervisor.h> 5.9 #include <asm/hypervisor-ifs/event_channel.h> 5.10 5.11 @@ -84,7 +85,7 @@ static void evtchn_handle_exceptions(sha 5.12 { 5.13 printk(KERN_ALERT "Error on IRQ line %d!\n", 5.14 dynirq + DYNIRQ_BASE); 5.15 - clear_bit(port, &s->evtchn_exception[0]); 5.16 + synch_clear_bit(port, &s->evtchn_exception[0]); 5.17 } 5.18 else 5.19 evtchn_device_upcall(port, 1); 5.20 @@ -99,7 +100,7 @@ void evtchn_do_upcall(struct pt_regs *re 5.21 5.22 local_irq_save(flags); 5.23 5.24 - while ( test_and_clear_bit(0, &s->evtchn_upcall_pending) ) 5.25 + while ( synch_test_and_clear_bit(0, &s->evtchn_upcall_pending) ) 5.26 { 5.27 if ( s->evtchn_pending_sel != 0 ) 5.28 evtchn_handle_normal(s, regs);
6.1 --- a/xenolinux-2.4.25-sparse/include/asm-xen/evtchn.h Thu Mar 25 13:24:43 2004 +0000 6.2 +++ b/xenolinux-2.4.25-sparse/include/asm-xen/evtchn.h Thu Mar 25 15:16:57 2004 +0000 6.3 @@ -13,6 +13,7 @@ 6.4 #include <linux/config.h> 6.5 #include <asm/hypervisor.h> 6.6 #include <asm/ptrace.h> 6.7 +#include <asm/synch_bitops.h> 6.8 6.9 /* 6.10 * LOW-LEVEL DEFINITIONS 6.11 @@ -27,21 +28,15 @@ void evtchn_device_upcall(int port, int 6.12 static inline void mask_evtchn(int port) 6.13 { 6.14 shared_info_t *s = HYPERVISOR_shared_info; 6.15 - set_bit(port, &s->evtchn_mask[0]); 6.16 + synch_set_bit(port, &s->evtchn_mask[0]); 6.17 } 6.18 6.19 -/* 6.20 - * I haven't thought too much about the synchronisation in here against 6.21 - * other CPUs, but all the bit-update operations are reorder barriers on 6.22 - * x86 so reordering concerns aren't a problem for now. Some mb() calls 6.23 - * would be required on weaker architectures I think. -- KAF (24/3/2004) 6.24 - */ 6.25 static inline void unmask_evtchn(int port) 6.26 { 6.27 shared_info_t *s = HYPERVISOR_shared_info; 6.28 int need_upcall = 0; 6.29 6.30 - clear_bit(port, &s->evtchn_mask[0]); 6.31 + synch_clear_bit(port, &s->evtchn_mask[0]); 6.32 6.33 /* 6.34 * The following is basically the equivalent of 'hw_resend_irq'. Just like 6.35 @@ -49,34 +44,43 @@ static inline void unmask_evtchn(int por 6.36 */ 6.37 6.38 /* Asserted a standard notification? */ 6.39 - if ( test_bit (port, &s->evtchn_pending[0]) && 6.40 - !test_and_set_bit(port>>5, &s->evtchn_pending_sel) ) 6.41 + if ( synch_test_bit (port, &s->evtchn_pending[0]) && 6.42 + !synch_test_and_set_bit(port>>5, &s->evtchn_pending_sel) ) 6.43 need_upcall = 1; 6.44 6.45 /* Asserted an exceptional notification? */ 6.46 - if ( test_bit (port, &s->evtchn_exception[0]) && 6.47 - !test_and_set_bit(port>>5, &s->evtchn_exception_sel) ) 6.48 + if ( synch_test_bit (port, &s->evtchn_exception[0]) && 6.49 + !synch_test_and_set_bit(port>>5, &s->evtchn_exception_sel) ) 6.50 need_upcall = 1; 6.51 6.52 /* If asserted either type of notification, check the master flags. */ 6.53 if ( need_upcall && 6.54 - !test_and_set_bit(0, &s->evtchn_upcall_pending) && 6.55 - !test_bit (0, &s->evtchn_upcall_mask) ) 6.56 + !synch_test_and_set_bit(0, &s->evtchn_upcall_pending) && 6.57 + !synch_test_bit (0, &s->evtchn_upcall_mask) ) 6.58 evtchn_do_upcall(NULL); 6.59 } 6.60 6.61 static inline void clear_evtchn(int port) 6.62 { 6.63 shared_info_t *s = HYPERVISOR_shared_info; 6.64 - clear_bit(port, &s->evtchn_pending[0]); 6.65 + synch_clear_bit(port, &s->evtchn_pending[0]); 6.66 } 6.67 6.68 static inline void clear_evtchn_exception(int port) 6.69 { 6.70 shared_info_t *s = HYPERVISOR_shared_info; 6.71 - clear_bit(port, &s->evtchn_exception[0]); 6.72 + synch_clear_bit(port, &s->evtchn_exception[0]); 6.73 } 6.74 6.75 +static inline void evtchn_clear_error_virq(void) 6.76 +{ 6.77 + /* 6.78 + * XXX This prevents a bogus 'VIRQ_ERROR' when interrupts are enabled 6.79 + * for the first time. This works because by this point all important 6.80 + * VIRQs (eg. timer) have been properly bound. 6.81 + */ 6.82 + synch_clear_bit(0, &HYPERVISOR_shared_info->evtchn_pending[0]); 6.83 +} 6.84 6.85 /* 6.86 * CHARACTER-DEVICE DEFINITIONS
7.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 7.2 +++ b/xenolinux-2.4.25-sparse/include/asm-xen/synch_bitops.h Thu Mar 25 15:16:57 2004 +0000 7.3 @@ -0,0 +1,83 @@ 7.4 +#ifndef __XEN_SYNCH_BITOPS_H__ 7.5 +#define __XEN_SYNCH_BITOPS_H__ 7.6 + 7.7 +/* 7.8 + * Copyright 1992, Linus Torvalds. 7.9 + * Heavily modified to provide guaranteed strong synchronisation 7.10 + * when communicating with Xen or other guest OSes running on other CPUs. 7.11 + */ 7.12 + 7.13 +#include <linux/config.h> 7.14 + 7.15 +#define ADDR (*(volatile long *) addr) 7.16 + 7.17 +static __inline__ void synch_set_bit(int nr, volatile void * addr) 7.18 +{ 7.19 + __asm__ __volatile__ ( 7.20 + "lock btsl %1,%0" 7.21 + : "=m" (ADDR) : "Ir" (nr) : "memory" ); 7.22 +} 7.23 + 7.24 +static __inline__ void synch_clear_bit(int nr, volatile void * addr) 7.25 +{ 7.26 + __asm__ __volatile__ ( 7.27 + "lock btrl %1,%0" 7.28 + : "=m" (ADDR) : "Ir" (nr) : "memory" ); 7.29 +} 7.30 + 7.31 +static __inline__ void synch_change_bit(int nr, volatile void * addr) 7.32 +{ 7.33 + __asm__ __volatile__ ( 7.34 + "lock btcl %1,%0" 7.35 + : "=m" (ADDR) : "Ir" (nr) : "memory" ); 7.36 +} 7.37 + 7.38 +static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr) 7.39 +{ 7.40 + int oldbit; 7.41 + __asm__ __volatile__ ( 7.42 + "lock btsl %2,%1\n\tsbbl %0,%0" 7.43 + : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory"); 7.44 + return oldbit; 7.45 +} 7.46 + 7.47 +static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr) 7.48 +{ 7.49 + int oldbit; 7.50 + __asm__ __volatile__ ( 7.51 + "lock btrl %2,%1\n\tsbbl %0,%0" 7.52 + : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory"); 7.53 + return oldbit; 7.54 +} 7.55 + 7.56 +static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr) 7.57 +{ 7.58 + int oldbit; 7.59 + 7.60 + __asm__ __volatile__ ( 7.61 + "lock btcl %2,%1\n\tsbbl %0,%0" 7.62 + : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory"); 7.63 + return oldbit; 7.64 +} 7.65 + 7.66 +static __inline__ int synch_const_test_bit(int nr, const volatile void * addr) 7.67 +{ 7.68 + return ((1UL << (nr & 31)) & 7.69 + (((const volatile unsigned int *) addr)[nr >> 5])) != 0; 7.70 +} 7.71 + 7.72 +static __inline__ int synch_var_test_bit(int nr, volatile void * addr) 7.73 +{ 7.74 + int oldbit; 7.75 + __asm__ __volatile__ ( 7.76 + "btl %2,%1\n\tsbbl %0,%0" 7.77 + : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) ); 7.78 + return oldbit; 7.79 +} 7.80 + 7.81 +#define synch_test_bit(nr,addr) \ 7.82 +(__builtin_constant_p(nr) ? \ 7.83 + synch_const_test_bit((nr),(addr)) : \ 7.84 + synch_var_test_bit((nr),(addr))) 7.85 + 7.86 +#endif /* __XEN_SYNCH_BITOPS_H__ */
8.1 --- a/xenolinux-2.4.25-sparse/include/asm-xen/system.h Thu Mar 25 13:24:43 2004 +0000 8.2 +++ b/xenolinux-2.4.25-sparse/include/asm-xen/system.h Thu Mar 25 15:16:57 2004 +0000 8.3 @@ -4,9 +4,10 @@ 8.4 #include <linux/config.h> 8.5 #include <linux/kernel.h> 8.6 #include <linux/init.h> 8.7 +#include <linux/bitops.h> 8.8 +#include <asm/synch_bitops.h> 8.9 #include <asm/segment.h> 8.10 #include <asm/hypervisor.h> 8.11 -#include <linux/bitops.h> /* for LOCK_PREFIX */ 8.12 #include <asm/evtchn.h> 8.13 8.14 #ifdef __KERNEL__ 8.15 @@ -250,19 +251,19 @@ static inline unsigned long __cmpxchg(vo 8.16 unsigned long prev; 8.17 switch (size) { 8.18 case 1: 8.19 - __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" 8.20 + __asm__ __volatile__("lock cmpxchgb %b1,%2" 8.21 : "=a"(prev) 8.22 : "q"(new), "m"(*__xg(ptr)), "0"(old) 8.23 : "memory"); 8.24 return prev; 8.25 case 2: 8.26 - __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" 8.27 + __asm__ __volatile__("lock cmpxchgw %w1,%2" 8.28 : "=a"(prev) 8.29 : "q"(new), "m"(*__xg(ptr)), "0"(old) 8.30 : "memory"); 8.31 return prev; 8.32 case 4: 8.33 - __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" 8.34 + __asm__ __volatile__("lock cmpxchgl %1,%2" 8.35 : "=a"(prev) 8.36 : "q"(new), "m"(*__xg(ptr)), "0"(old) 8.37 : "memory"); 8.38 @@ -320,49 +321,47 @@ static inline unsigned long __cmpxchg(vo 8.39 8.40 #define set_wmb(var, value) do { var = value; wmb(); } while (0) 8.41 8.42 +#define safe_halt() ((void)0) 8.43 + 8.44 /* 8.45 - * NB. ALl the following routines are SMP-safe on x86, even where they look 8.46 - * possibly racy. For example, we must ensure that we clear the mask bit and 8.47 - * /then/ check teh pending bit. But this will happen because the bit-update 8.48 - * operations are ordering barriers. 8.49 - * 8.50 - * For this reason also, many uses of 'barrier' here are rather anal. But 8.51 - * they do no harm. 8.52 + * Note the use of synch_*_bit() operations in the following. These operations 8.53 + * ensure correct serialisation of checks and updates w.r.t. Xen executing on 8.54 + * a different CPU. 8.55 */ 8.56 8.57 #define __cli() \ 8.58 do { \ 8.59 - set_bit(0, &HYPERVISOR_shared_info->evtchn_upcall_mask); \ 8.60 - barrier(); \ 8.61 + synch_set_bit(0, &HYPERVISOR_shared_info->evtchn_upcall_mask); \ 8.62 } while (0) 8.63 8.64 #define __sti() \ 8.65 do { \ 8.66 shared_info_t *_shared = HYPERVISOR_shared_info; \ 8.67 - clear_bit(0, &_shared->evtchn_upcall_mask); \ 8.68 - barrier(); \ 8.69 - if ( unlikely(test_bit(0, &_shared->evtchn_upcall_pending)) ) \ 8.70 + synch_clear_bit(0, &_shared->evtchn_upcall_mask); \ 8.71 + if ( unlikely(synch_test_bit(0, &_shared->evtchn_upcall_pending)) ) \ 8.72 evtchn_do_upcall(NULL); \ 8.73 } while (0) 8.74 8.75 #define __save_flags(x) \ 8.76 do { \ 8.77 - (x) = test_bit(0, &HYPERVISOR_shared_info->evtchn_upcall_mask); \ 8.78 - barrier(); \ 8.79 + (x) = synch_test_bit(0, &HYPERVISOR_shared_info->evtchn_upcall_mask); \ 8.80 } while (0) 8.81 8.82 -#define __restore_flags(x) do { if (x) __cli(); else __sti(); } while (0) 8.83 +#define __restore_flags(x) do { if (x) __cli(); else __sti(); } while (0) 8.84 8.85 -#define safe_halt() ((void)0) 8.86 +#define __save_and_cli(x) \ 8.87 +do { \ 8.88 + (x) = synch_test_and_set_bit( \ 8.89 + 0, &HYPERVISOR_shared_info->evtchn_upcall_mask); \ 8.90 +} while (0) 8.91 8.92 -#define __save_and_cli(x) do { __save_flags(x); __cli(); } while(0); 8.93 -#define __save_and_sti(x) do { __save_flags(x); __sti(); } while(0); 8.94 +#define __save_and_sti(x) \ 8.95 +do { \ 8.96 + (x) = synch_test_and_clear_bit( \ 8.97 + 0, &HYPERVISOR_shared_info->evtchn_upcall_mask); \ 8.98 +} while (0) 8.99 8.100 -#define local_irq_save(x) \ 8.101 -do { \ 8.102 - (x) = test_and_set_bit(0, &HYPERVISOR_shared_info->evtchn_upcall_mask); \ 8.103 - barrier(); \ 8.104 -} while (0) 8.105 +#define local_irq_save(x) __save_and_cli(x) 8.106 #define local_irq_restore(x) __restore_flags(x) 8.107 #define local_irq_disable() __cli() 8.108 #define local_irq_enable() __sti()