diff mbox series

parisc: Define mb() and add memory barriers to assembler unlock sequences

Message ID d13565f8-d7f4-ff4d-2202-94e0404b5817@bell.net (mailing list archive)
State Accepted, archived
Headers show
Series parisc: Define mb() and add memory barriers to assembler unlock sequences | expand

Commit Message

John David Anglin Aug. 5, 2018, 5:30 p.m. UTC
For years I thought all parisc machines executed loads and stores in 
order.  However, Jeff Law
recently indicated on gcc-patches that this is not correct.  There are 
various degrees of out-of-order
execution all the way back to the PA7xxx processor series 
(hit-under-miss).  The PA8xxx series has
full out-of-order execution for both integer operations, and loads and 
stores.

This is described in the following article:
http://web.archive.org/web/20040214092531/http://www.cpus.hp.com/technical_references/advperf.shtml

For this reason, we need to define mb() and to insert a memory barrier 
before the store unlocking
spinlocks.  This ensures that all memory accesses are complete prior to 
unlocking.  The ldcw instruction
performs the same function on entry.

Signed-off-by: John David Anglin <dave.anglin@bell.net>

diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index e95207c0565e..13b783ba5b0a 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -481,8 +481,10 @@
 	/* Release pa_tlb_lock lock without reloading lock address. */
 	.macro		tlb_unlock0	spc,tmp
 #ifdef CONFIG_SMP
-	or,COND(=)	%r0,\spc,%r0
+	cmpib,COND(=),n	0,\spc,1f
+	sync
 	stw             \spc,0(\tmp)
+1:
 #endif
 	.endm
 
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 22e6374ece44..b2bc244dca1a 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -352,6 +352,7 @@ ENDPROC_CFI(flush_data_cache_local)
 
 	.macro	tlb_unlock	la,flags,tmp
 #ifdef CONFIG_SMP
+	sync
 	ldi		1,\tmp
 	stw		\tmp,0(\la)
 	mtsm		\flags
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index e775f80ae28c..4886a6db42e9 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -633,6 +633,7 @@ cas_action:
 	sub,<>	%r28, %r25, %r0
 2:	stw,ma	%r24, 0(%r26)
 	/* Free lock */
+	sync
 	stw,ma	%r20, 0(%sr2,%r20)
 #if ENABLE_LWS_DEBUG
 	/* Clear thread register indicator */
@@ -647,6 +648,7 @@ cas_action:
 3:		
 	/* Error occurred on load or store */
 	/* Free lock */
+	sync
 	stw	%r20, 0(%sr2,%r20)
 #if ENABLE_LWS_DEBUG
 	stw	%r0, 4(%sr2,%r20)
@@ -848,6 +850,7 @@ cas2_action:
 
 cas2_end:
 	/* Free lock */
+	sync
 	stw,ma	%r20, 0(%sr2,%r20)
 	/* Enable interrupts */
 	ssm	PSW_SM_I, %r0
@@ -858,6 +861,7 @@ cas2_end:
 22:
 	/* Error occurred on load or store */
 	/* Free lock */
+	sync
 	stw	%r20, 0(%sr2,%r20)
 	ssm	PSW_SM_I, %r0
 	ldo	1(%r0),%r28
diff mbox series

Patch

--- /dev/null	2018-08-04 12:32:49.880000000 -0400
+++ arch/parisc/include/asm/barrier.h	2018-08-04 13:10:48.843495530 -0400
@@ -0,0 +1,32 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+/* The synchronize caches instruction executes as a nop on systems in
+   which all memory references are performed in order. */
+#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory")
+
+#if defined(CONFIG_SMP)
+#define mb()		do { synchronize_caches(); } while (0)
+#define rmb()		mb()
+#define wmb()		mb()
+#define dma_rmb()	mb()
+#define dma_wmb()	mb()
+#else
+#define mb()		barrier()
+#define rmb()		barrier()
+#define wmb()		barrier()
+#define dma_rmb()	barrier()
+#define dma_wmb()	barrier()
+#endif
+
+#define __smp_mb()	mb()
+#define __smp_rmb()	mb()
+#define __smp_wmb()	mb()
+
+#include <asm-generic/barrier.h>
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __ASM_BARRIER_H */