diff options
author | Bart Van Assche <bvanassche@acm.org> | 2019-07-08 12:57:50 -0700 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-07-08 15:26:01 -0600 |
commit | 7ba7c7044e1ebbf806abc3ce526411f10fb94872 (patch) | |
tree | 19da3b7b27c8258b36716795ba74f1983953e6b0 /src | |
parent | da457cd34d0d5aeed47223c6954a430bd5b4b1cb (diff) |
Optimize i386 memory barriers
Use identical memory barrier implementations on 32 and 64 bit Intel CPUs.
In the past the Linux kernel supported 32 bit CPUs that violate the x86
ordering standard. Since io_uring is not supported by these older kernels,
do not support these older CPUs in liburing. See also Linux kernel commit
5927145efd5d ("x86/cpu: Remove the CONFIG_X86_PPRO_FENCE=y quirk") # v4.16.
Cc: Roman Penyaev <rpenyaev@suse.de>
Suggested-by: Roman Penyaev <rpenyaev@suse.de>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'src')
-rw-r--r-- | src/barrier.h | 16 |
1 files changed, 4 insertions, 12 deletions
diff --git a/src/barrier.h b/src/barrier.h index eb8ee1e..e079cf6 100644 --- a/src/barrier.h +++ b/src/barrier.h @@ -32,25 +32,18 @@ after the acquire operation executes. This is implemented using #if defined(__x86_64__) || defined(__i386__) -/* From tools/arch/x86/include/asm/barrier.h */ -#if defined(__i386__) -/* - * Some non-Intel clones support out of order store. wmb() ceases to be a - * nop for these. - */ -#define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") -#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") -#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") -#elif defined(__x86_64__) +/* Adapted from arch/x86/include/asm/barrier.h */ #define mb() asm volatile("mfence" ::: "memory") #define rmb() asm volatile("lfence" ::: "memory") #define wmb() asm volatile("sfence" ::: "memory") #define smp_rmb() barrier() #define smp_wmb() barrier() +#if defined(__i386__) +#define smp_mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory", "cc") +#else #define smp_mb() asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc") #endif -#if defined(__x86_64__) #define smp_store_release(p, v) \ do { \ barrier(); \ @@ -63,7 +56,6 @@ do { \ barrier(); \ ___p1; \ }) -#endif /* defined(__x86_64__) */ #else /* defined(__x86_64__) || defined(__i386__) */ /* * Add arch appropriate definitions. Be safe and use full barriers for |