ia64/xen-unstable

changeset 7404:6cf6f8783282

Fix local_irq_save() and irqs_disabled() to be preemption-safe.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sun Oct 16 11:45:51 2005 +0100 (2005-10-16)
parents 475e2a8493b8
children f535708469ee
files linux-2.6-xen-sparse/include/asm-xen/asm-i386/system.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/system.h
line diff
     1.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/system.h	Sat Oct 15 17:19:43 2005 +0100
     1.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/system.h	Sun Oct 16 11:45:51 2005 +0100
     1.3 @@ -497,22 +497,11 @@ unsigned long __set_mb_temp;            
     1.4   * includes these barriers, for example.
     1.5   */
     1.6  
     1.7 -/*
     1.8 - * Don't use smp_processor_id() in preemptible code: debug builds will barf.
     1.9 - * It's okay in these cases as we only read the upcall mask in preemptible
    1.10 - * regions, which is always safe.
    1.11 - */
    1.12 -#ifdef CONFIG_SMP
    1.13 -#define __this_cpu()	__smp_processor_id()
    1.14 -#else
    1.15 -#define __this_cpu()	0
    1.16 -#endif
    1.17 -
    1.18  #define __cli()								\
    1.19  do {									\
    1.20  	vcpu_info_t *_vcpu;						\
    1.21  	preempt_disable();						\
    1.22 -	_vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];	\
    1.23 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
    1.24  	_vcpu->evtchn_upcall_mask = 1;					\
    1.25  	preempt_enable_no_resched();					\
    1.26  	barrier();							\
    1.27 @@ -523,7 +512,7 @@ do {									\
    1.28  	vcpu_info_t *_vcpu;						\
    1.29  	barrier();							\
    1.30  	preempt_disable();						\
    1.31 -	_vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];	\
    1.32 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
    1.33  	_vcpu->evtchn_upcall_mask = 0;					\
    1.34  	barrier(); /* unmask then check (avoid races) */		\
    1.35  	if ( unlikely(_vcpu->evtchn_upcall_pending) )			\
    1.36 @@ -534,8 +523,10 @@ do {									\
    1.37  #define __save_flags(x)							\
    1.38  do {									\
    1.39  	vcpu_info_t *_vcpu;						\
    1.40 -	_vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];	\
    1.41 +	preempt_disable();						\
    1.42 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
    1.43  	(x) = _vcpu->evtchn_upcall_mask;				\
    1.44 +	preempt_enable();						\
    1.45  } while (0)
    1.46  
    1.47  #define __restore_flags(x)						\
    1.48 @@ -543,7 +534,7 @@ do {									\
    1.49  	vcpu_info_t *_vcpu;						\
    1.50  	barrier();							\
    1.51  	preempt_disable();						\
    1.52 -	_vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];	\
    1.53 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
    1.54  	if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {			\
    1.55  		barrier(); /* unmask then check (avoid races) */	\
    1.56  		if ( unlikely(_vcpu->evtchn_upcall_pending) )		\
    1.57 @@ -559,7 +550,7 @@ do {									\
    1.58  do {									\
    1.59  	vcpu_info_t *_vcpu;						\
    1.60  	preempt_disable();						\
    1.61 -	_vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];	\
    1.62 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
    1.63  	(x) = _vcpu->evtchn_upcall_mask;				\
    1.64  	_vcpu->evtchn_upcall_mask = 1;					\
    1.65  	preempt_enable_no_resched();					\
    1.66 @@ -572,8 +563,15 @@ do {									\
    1.67  #define local_irq_disable()	__cli()
    1.68  #define local_irq_enable()	__sti()
    1.69  
    1.70 +/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
    1.71  #define irqs_disabled()							\
    1.72 -	HYPERVISOR_shared_info->vcpu_data[__this_cpu()].evtchn_upcall_mask
    1.73 +({	int ___x;							\
    1.74 +	vcpu_info_t *_vcpu;						\
    1.75 +	preempt_disable();						\
    1.76 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
    1.77 +	___x = (_vcpu->evtchn_upcall_mask != 0);			\
    1.78 +	preempt_enable_no_resched();					\
    1.79 +	___x; })
    1.80  
    1.81  /*
    1.82   * disable hlt during certain critical i/o operations
     2.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/system.h	Sat Oct 15 17:19:43 2005 +0100
     2.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/system.h	Sun Oct 16 11:45:51 2005 +0100
     2.3 @@ -321,22 +321,11 @@ static inline unsigned long __cmpxchg(vo
     2.4   * includes these barriers, for example.
     2.5   */
     2.6  
     2.7 -/*
     2.8 - * Don't use smp_processor_id() in preemptible code: debug builds will barf.
     2.9 - * It's okay in these cases as we only read the upcall mask in preemptible
    2.10 - * regions, which is always safe.
    2.11 - */
    2.12 -#ifdef CONFIG_SMP
    2.13 -#define __this_cpu()	__smp_processor_id()
    2.14 -#else
    2.15 -#define __this_cpu()	0
    2.16 -#endif
    2.17 -
    2.18  #define __cli()								\
    2.19  do {									\
    2.20  	vcpu_info_t *_vcpu;						\
    2.21  	preempt_disable();						\
    2.22 -	_vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];	\
    2.23 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
    2.24  	_vcpu->evtchn_upcall_mask = 1;					\
    2.25  	preempt_enable_no_resched();					\
    2.26  	barrier();							\
    2.27 @@ -347,7 +336,7 @@ do {									\
    2.28  	vcpu_info_t *_vcpu;						\
    2.29  	barrier();							\
    2.30  	preempt_disable();						\
    2.31 -	_vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];	\
    2.32 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
    2.33  	_vcpu->evtchn_upcall_mask = 0;					\
    2.34  	barrier(); /* unmask then check (avoid races) */		\
    2.35  	if ( unlikely(_vcpu->evtchn_upcall_pending) )			\
    2.36 @@ -358,8 +347,10 @@ do {									\
    2.37  #define __save_flags(x)							\
    2.38  do {									\
    2.39  	vcpu_info_t *_vcpu;						\
    2.40 -	_vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];	\
    2.41 +	preempt_disable();						\
    2.42 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
    2.43  	(x) = _vcpu->evtchn_upcall_mask;				\
    2.44 +	preempt_enable();						\
    2.45  } while (0)
    2.46  
    2.47  #define __restore_flags(x)						\
    2.48 @@ -367,7 +358,7 @@ do {									\
    2.49  	vcpu_info_t *_vcpu;						\
    2.50  	barrier();							\
    2.51  	preempt_disable();						\
    2.52 -	_vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];	\
    2.53 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
    2.54  	if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {			\
    2.55  		barrier(); /* unmask then check (avoid races) */	\
    2.56  		if ( unlikely(_vcpu->evtchn_upcall_pending) )		\
    2.57 @@ -383,7 +374,7 @@ do {									\
    2.58  do {									\
    2.59  	vcpu_info_t *_vcpu;						\
    2.60  	preempt_disable();						\
    2.61 -	_vcpu = &HYPERVISOR_shared_info->vcpu_data[__this_cpu()];	\
    2.62 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
    2.63  	(x) = _vcpu->evtchn_upcall_mask;				\
    2.64  	_vcpu->evtchn_upcall_mask = 1;					\
    2.65  	preempt_enable_no_resched();					\
    2.66 @@ -398,8 +389,15 @@ void cpu_idle_wait(void);
    2.67  #define local_irq_disable()	__cli()
    2.68  #define local_irq_enable()	__sti()
    2.69  
    2.70 +/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
    2.71  #define irqs_disabled()							\
    2.72 -	HYPERVISOR_shared_info->vcpu_data[__this_cpu()].evtchn_upcall_mask
    2.73 +({	int ___x;							\
    2.74 +	vcpu_info_t *_vcpu;						\
    2.75 +	preempt_disable();						\
    2.76 +	_vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()];	\
    2.77 +	___x = (_vcpu->evtchn_upcall_mask != 0);			\
    2.78 +	preempt_enable_no_resched();					\
    2.79 +	___x; })
    2.80  
    2.81  /*
    2.82   * disable hlt during certain critical i/o operations