From 7ba7c7044e1ebbf806abc3ce526411f10fb94872 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Mon, 8 Jul 2019 12:57:50 -0700 Subject: Optimize i386 memory barriers Use identical memory barrier implementations on 32 and 64 bit Intel CPUs. In the past the Linux kernel supported 32 bit CPUs that violate the x86 ordering standard. Since io_uring is not supported by these older kernels, do not support these older CPUs in liburing. See also Linux kernel commit 5927145efd5d ("x86/cpu: Remove the CONFIG_X86_PPRO_FENCE=y quirk") # v4.16. Cc: Roman Penyaev Suggested-by: Roman Penyaev Signed-off-by: Bart Van Assche Signed-off-by: Jens Axboe --- src/barrier.h | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) (limited to 'src') diff --git a/src/barrier.h b/src/barrier.h index eb8ee1e..e079cf6 100644 --- a/src/barrier.h +++ b/src/barrier.h @@ -32,25 +32,18 @@ after the acquire operation executes. This is implemented using #if defined(__x86_64__) || defined(__i386__) -/* From tools/arch/x86/include/asm/barrier.h */ -#if defined(__i386__) -/* - * Some non-Intel clones support out of order store. wmb() ceases to be a - * nop for these. - */ -#define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") -#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") -#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") -#elif defined(__x86_64__) +/* Adapted from arch/x86/include/asm/barrier.h */ #define mb() asm volatile("mfence" ::: "memory") #define rmb() asm volatile("lfence" ::: "memory") #define wmb() asm volatile("sfence" ::: "memory") #define smp_rmb() barrier() #define smp_wmb() barrier() +#if defined(__i386__) +#define smp_mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory", "cc") +#else #define smp_mb() asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc") #endif -#if defined(__x86_64__) #define smp_store_release(p, v) \ do { \ barrier(); \ @@ -63,7 +56,6 @@ do { \ barrier(); \ ___p1; \ }) -#endif /* defined(__x86_64__) */ #else /* defined(__x86_64__) || defined(__i386__) */ /* * Add arch appropriate definitions. Be safe and use full barriers for -- cgit