From: Andrew Cooper Date: Mon, 5 Oct 2020 17:47:09 +0000 (+0100) Subject: barrier: Use LOCK ADD rather than MFENCE X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=5634432aa038da3bf057e231dbeddee81c191f24;p=people%2Fandrewcoop%2Fxen-test-framework.git barrier: Use LOCK ADD rather than MFENCE Signed-off-by: Andrew Cooper --- diff --git a/arch/x86/include/arch/barrier.h b/arch/x86/include/arch/barrier.h index 4c439af..3645a2e 100644 --- a/arch/x86/include/arch/barrier.h +++ b/arch/x86/include/arch/barrier.h @@ -15,8 +15,10 @@ * the point of view of program order, reads may not be reordered with respect * to other reads, and writes may not be reordered with respect to other * writes, causing smp_rmb() and smp_wmb() to degrade to simple compiler - * barriers. smp_mb() however does need to be an mfence instruction, as reads - * are permitted to be reordered ahead of non-aliasing writes. + * barriers. + * + * smp_mb() however does need to provide real ordering, as reads are permitted + * to be reordered ahead of non-aliasing writes. */ #include @@ -25,7 +27,11 @@ #define rmb() __asm__ __volatile__ ("lfence" ::: "memory") #define wmb() __asm__ __volatile__ ("sfence" ::: "memory") -#define smp_mb() mb() +#ifdef __i386__ +#define smp_mb() __asm__ __volatile__ ("lock addl $0, -4(%%esp)" ::: "memory"); +#else +#define smp_mb() __asm__ __volatile__ ("lock addl $0, -4(%%rsp)" ::: "memory"); +#endif #define smp_rmb() barrier() #define smp_wmb() barrier()