[liburing,3/4] Change __x86_64 into __x86_64__
diff mbox series

Message ID 20190708195750.223103-4-bvanassche@acm.org
State New
Headers show
Series
  • Optimize i386 memory barriers
Related show

Commit Message

Bart Van Assche July 8, 2019, 7:57 p.m. UTC
This patch improves consistency with the Linux kernel source code.

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---
 src/barrier.h | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

Patch
diff mbox series

diff --git a/src/barrier.h b/src/barrier.h
index e1a407fccde2..eb8ee1ec9d34 100644
--- a/src/barrier.h
+++ b/src/barrier.h
@@ -31,7 +31,7 @@  after the acquire operation executes. This is implemented using
 #define READ_ONCE(var) (*((volatile typeof(var) *)(&(var))))
 
 
-#if defined(__x86_64) || defined(__i386__)
+#if defined(__x86_64__) || defined(__i386__)
 /* From tools/arch/x86/include/asm/barrier.h */
 #if defined(__i386__)
 /*
@@ -64,14 +64,14 @@  do {						\
 	___p1;					\
 })
 #endif /* defined(__x86_64__) */
-#else
+#else /* defined(__x86_64__) || defined(__i386__) */
 /*
  * Add arch appropriate definitions. Be safe and use full barriers for
  * archs we don't have support for.
  */
 #define smp_rmb()	__sync_synchronize()
 #define smp_wmb()	__sync_synchronize()
-#endif
+#endif /* defined(__x86_64__) || defined(__i386__) */
 
 /* From tools/include/asm/barrier.h */
 
@@ -92,4 +92,4 @@  do {						\
 })
 #endif
 
-#endif
+#endif /* defined(LIBURING_BARRIER_H) */