diff mbox series

[liburing,4/4] Optimize i386 memory barriers

Message ID 20190708195750.223103-5-bvanassche@acm.org (mailing list archive)
State New, archived
Headers show
Series Optimize i386 memory barriers | expand

Commit Message

Bart Van Assche July 8, 2019, 7:57 p.m. UTC
Use identical memory barrier implementations on 32 and 64 bit Intel CPUs.
In the past the Linux kernel supported 32 bit CPUs that violate the x86
ordering standard. Since io_uring is not supported by these older kernels,
do not support these older CPUs in liburing. See also Linux kernel commit
5927145efd5d ("x86/cpu: Remove the CONFIG_X86_PPRO_FENCE=y quirk") # v4.16.

Cc: Roman Penyaev <rpenyaev@suse.de>
Suggested-by: Roman Penyaev <rpenyaev@suse.de>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---
 src/barrier.h | 16 ++++------------
 1 file changed, 4 insertions(+), 12 deletions(-)
diff mbox series

Patch

diff --git a/src/barrier.h b/src/barrier.h
index eb8ee1ec9d34..e079cf609f26 100644
--- a/src/barrier.h
+++ b/src/barrier.h
@@ -32,25 +32,18 @@  after the acquire operation executes. This is implemented using
 
 
 #if defined(__x86_64__) || defined(__i386__)
-/* From tools/arch/x86/include/asm/barrier.h */
-#if defined(__i386__)
-/*
- * Some non-Intel clones support out of order store. wmb() ceases to be a
- * nop for these.
- */
-#define mb()	asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
-#define rmb()	asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
-#define wmb()	asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
-#elif defined(__x86_64__)
+/* Adapted from arch/x86/include/asm/barrier.h */
 #define mb()	asm volatile("mfence" ::: "memory")
 #define rmb()	asm volatile("lfence" ::: "memory")
 #define wmb()	asm volatile("sfence" ::: "memory")
 #define smp_rmb() barrier()
 #define smp_wmb() barrier()
+#if defined(__i386__)
+#define smp_mb()  asm volatile("lock; addl $0,0(%%esp)" ::: "memory", "cc")
+#else
 #define smp_mb()  asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
 #endif
 
-#if defined(__x86_64__)
 #define smp_store_release(p, v)			\
 do {						\
 	barrier();				\
@@ -63,7 +56,6 @@  do {						\
 	barrier();				\
 	___p1;					\
 })
-#endif /* defined(__x86_64__) */
 #else /* defined(__x86_64__) || defined(__i386__) */
 /*
  * Add arch appropriate definitions. Be safe and use full barriers for