diff mbox series

riscv: Optimize AMO acquire/release usage

Message ID 20220406120405.660354-1-guoren@kernel.org (mailing list archive)
State New, archived
Headers show
Series riscv: Optimize AMO acquire/release usage | expand

Commit Message

Guo Ren April 6, 2022, 12:04 p.m. UTC
From: Guo Ren <guoren@linux.alibaba.com>

Using RISCV_ACQUIRE/RELEASE_BARRIER is over expensive for
xchg/cmpxchg_acquire/release than nature instructions' .aq/rl.
The patch fixed these issues under RISC-V Instruction Set Manual,
Volume I: RISC-V User-Level ISA “A” Standard Extension for Atomic
Instructions, Version 2.1.

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
---
 arch/riscv/include/asm/atomic.h  | 70 ++++++++++++++++++++++++++++++--
 arch/riscv/include/asm/cmpxchg.h | 30 +++++---------
 2 files changed, 76 insertions(+), 24 deletions(-)

Comments

Mark Rutland April 11, 2022, 9:34 a.m. UTC | #1
Hi Guo,

On Wed, Apr 06, 2022 at 08:04:05PM +0800, guoren@kernel.org wrote:
> From: Guo Ren <guoren@linux.alibaba.com>
> 
> Using RISCV_ACQUIRE/RELEASE_BARRIER is over expensive for
> xchg/cmpxchg_acquire/release than nature instructions' .aq/rl.
> The patch fixed these issues under RISC-V Instruction Set Manual,
> Volume I: RISC-V User-Level ISA “A” Standard Extension for Atomic
> Instructions, Version 2.1.
> 
> Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
> Signed-off-by: Guo Ren <guoren@kernel.org>
> Cc: Palmer Dabbelt <palmer@dabbelt.com>
> ---
>  arch/riscv/include/asm/atomic.h  | 70 ++++++++++++++++++++++++++++++--
>  arch/riscv/include/asm/cmpxchg.h | 30 +++++---------
>  2 files changed, 76 insertions(+), 24 deletions(-)

I'll leave the bulk of this to Palmer, but I spotted something below which
doesn't look right.

> @@ -315,12 +379,11 @@ static __always_inline int arch_atomic_sub_if_positive(atomic_t *v, int offset)
>         int prev, rc;
>  
>  	__asm__ __volatile__ (
> -		"0:	lr.w     %[p],  %[c]\n"
> +		"0:	lr.w.aq  %[p],  %[c]\n"
>  		"	sub      %[rc], %[p], %[o]\n"
>  		"	bltz     %[rc], 1f\n"
>  		"	sc.w.rl  %[rc], %[rc], %[c]\n"
>  		"	bnez     %[rc], 0b\n"
> -		"	fence    rw, rw\n"
>  		"1:\n"
>  		: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
>  		: [o]"r" (offset)

I believe in this case the existing code here is correct, and this optimization
is broken.

I believe the existing code is using RELEASE + FULL-BARRIER to ensure full
ordering, since separate ACQUIRE+RELEASE cannot. For a description of the
problem, see the commit message for:

  8e86f0b409a44193 ("arm64: atomics: fix use of acquire + release for full barrier semantics")

The gist is that HW can re-order:

	ACCESS-A
	ACQUIRE
	RELEASE
	ACCESS-B

... to:

	ACQUIRE
	ACCESS-B
	ACCESS-A
	RELEASE

... violating FULL ordering semantics.

This will apply for *any* operation where FULL orderingis required, which I
suspect applies to some more cases below.

> @@ -337,12 +400,11 @@ static __always_inline s64 arch_atomic64_sub_if_positive(atomic64_t *v, s64 offs
>         long rc;
>  
>  	__asm__ __volatile__ (
> -		"0:	lr.d     %[p],  %[c]\n"
> +		"0:	lr.d.aq  %[p],  %[c]\n"
>  		"	sub      %[rc], %[p], %[o]\n"
>  		"	bltz     %[rc], 1f\n"
>  		"	sc.d.rl  %[rc], %[rc], %[c]\n"
>  		"	bnez     %[rc], 0b\n"
> -		"	fence    rw, rw\n"
>  		"1:\n"
>  		: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
>  		: [o]"r" (offset)

My comment for arch_atomic_sub_if_positive() applies here too.


[...]

> @@ -309,11 +301,10 @@
>  	switch (size) {							\
>  	case 4:								\
>  		__asm__ __volatile__ (					\
> -			"0:	lr.w %0, %2\n"				\
> +			"0:	lr.w.aq %0, %2\n"			\
>  			"	bne  %0, %z3, 1f\n"			\
>  			"	sc.w.rl %1, %z4, %2\n"			\
>  			"	bnez %1, 0b\n"				\
> -			"	fence rw, rw\n"				\
>  			"1:\n"						\
>  			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
>  			: "rJ" ((long)__old), "rJ" (__new)		\
> @@ -321,11 +312,10 @@
>  		break;							\
>  	case 8:								\
>  		__asm__ __volatile__ (					\
> -			"0:	lr.d %0, %2\n"				\
> +			"0:	lr.d.aq %0, %2\n"			\
>  			"	bne %0, %z3, 1f\n"			\
>  			"	sc.d.rl %1, %z4, %2\n"			\
>  			"	bnez %1, 0b\n"				\
> -			"	fence rw, rw\n"				\
>  			"1:\n"						\
>  			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
>  			: "rJ" (__old), "rJ" (__new)			\

I don't have enough context to say for sure, but I suspect these are expecting
FULL ordering too, and would be broken, as above.

Thanks,
Mark.
Guo Ren April 11, 2022, 1:20 p.m. UTC | #2
Hi Mark,

On Mon, Apr 11, 2022 at 5:35 PM Mark Rutland <mark.rutland@arm.com> wrote:
>
> Hi Guo,
>
> On Wed, Apr 06, 2022 at 08:04:05PM +0800, guoren@kernel.org wrote:
> > From: Guo Ren <guoren@linux.alibaba.com>
> >
> > Using RISCV_ACQUIRE/RELEASE_BARRIER is over expensive for
> > xchg/cmpxchg_acquire/release than nature instructions' .aq/rl.
> > The patch fixed these issues under RISC-V Instruction Set Manual,
> > Volume I: RISC-V User-Level ISA “A” Standard Extension for Atomic
> > Instructions, Version 2.1.
> >
> > Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
> > Signed-off-by: Guo Ren <guoren@kernel.org>
> > Cc: Palmer Dabbelt <palmer@dabbelt.com>
> > ---
> >  arch/riscv/include/asm/atomic.h  | 70 ++++++++++++++++++++++++++++++--
> >  arch/riscv/include/asm/cmpxchg.h | 30 +++++---------
> >  2 files changed, 76 insertions(+), 24 deletions(-)
>
> I'll leave the bulk of this to Palmer, but I spotted something below which
> doesn't look right.
>
> > @@ -315,12 +379,11 @@ static __always_inline int arch_atomic_sub_if_positive(atomic_t *v, int offset)
> >         int prev, rc;
> >
> >       __asm__ __volatile__ (
> > -             "0:     lr.w     %[p],  %[c]\n"
> > +             "0:     lr.w.aq  %[p],  %[c]\n"
> >               "       sub      %[rc], %[p], %[o]\n"
> >               "       bltz     %[rc], 1f\n"
> >               "       sc.w.rl  %[rc], %[rc], %[c]\n"
> >               "       bnez     %[rc], 0b\n"
> > -             "       fence    rw, rw\n"
> >               "1:\n"
> >               : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> >               : [o]"r" (offset)
>
> I believe in this case the existing code here is correct, and this optimization
> is broken.
Yes, you are right, My patch would break the memory consistency of
riscv between acquire & release. Thx for your corrections.

>
> I believe the existing code is using RELEASE + FULL-BARRIER to ensure full
> ordering, since separate ACQUIRE+RELEASE cannot. For a description of the
> problem, see the commit message for:
I've another question: The RELEASE(prevent ACCESS-A after stlxr) +
FULL-BARRIER is for arm64 because there is no "stlaxr" for arm64,
right? We could use sc.w.aqrl directly for riscv to reduce a release
fence.

New patch:
       __asm__ __volatile__ (
              "0:     lr.w     %[p],  %[c]\n"
               "       sub      %[rc], %[p], %[o]\n"
               "       bltz     %[rc], 1f\n"
-              "       sc.w.rl  %[rc], %[rc], %[c]\n"
+              "       sc.w.aqrl  %[rc], %[rc], %[c]\n"
               "       bnez     %[rc], 0b\n"
 -             "       fence    rw, rw\n"
               "1:\n"
               : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
               : [o]"r" (offset)

(It surprises me that seems lr.w.aq is useless for the real world.)

>
>   8e86f0b409a44193 ("arm64: atomics: fix use of acquire + release for full barrier semantics")
>
> The gist is that HW can re-order:
>
>         ACCESS-A
>         ACQUIRE
>         RELEASE
>         ACCESS-B
>
> ... to:
>
>         ACQUIRE
>         ACCESS-B
>         ACCESS-A
>         RELEASE
>
> ... violating FULL ordering semantics.
>
> This will apply for *any* operation where FULL orderingis required, which I
> suspect applies to some more cases below.


>
> > @@ -337,12 +400,11 @@ static __always_inline s64 arch_atomic64_sub_if_positive(atomic64_t *v, s64 offs
> >         long rc;
> >
> >       __asm__ __volatile__ (
> > -             "0:     lr.d     %[p],  %[c]\n"
> > +             "0:     lr.d.aq  %[p],  %[c]\n"
> >               "       sub      %[rc], %[p], %[o]\n"
> >               "       bltz     %[rc], 1f\n"
> >               "       sc.d.rl  %[rc], %[rc], %[c]\n"
> >               "       bnez     %[rc], 0b\n"
> > -             "       fence    rw, rw\n"
> >               "1:\n"
> >               : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> >               : [o]"r" (offset)
>
> My comment for arch_atomic_sub_if_positive() applies here too.
>
>
> [...]
>
> > @@ -309,11 +301,10 @@
> >       switch (size) {                                                 \
> >       case 4:                                                         \
> >               __asm__ __volatile__ (                                  \
> > -                     "0:     lr.w %0, %2\n"                          \
> > +                     "0:     lr.w.aq %0, %2\n"                       \
> >                       "       bne  %0, %z3, 1f\n"                     \
> >                       "       sc.w.rl %1, %z4, %2\n"                  \
> >                       "       bnez %1, 0b\n"                          \
> > -                     "       fence rw, rw\n"                         \
> >                       "1:\n"                                          \
> >                       : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)    \
> >                       : "rJ" ((long)__old), "rJ" (__new)              \
> > @@ -321,11 +312,10 @@
> >               break;                                                  \
> >       case 8:                                                         \
> >               __asm__ __volatile__ (                                  \
> > -                     "0:     lr.d %0, %2\n"                          \
> > +                     "0:     lr.d.aq %0, %2\n"                       \
> >                       "       bne %0, %z3, 1f\n"                      \
> >                       "       sc.d.rl %1, %z4, %2\n"                  \
> >                       "       bnez %1, 0b\n"                          \
> > -                     "       fence rw, rw\n"                         \
> >                       "1:\n"                                          \
> >                       : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)    \
> >                       : "rJ" (__old), "rJ" (__new)                    \
>
> I don't have enough context to say for sure, but I suspect these are expecting
> FULL ordering too, and would be broken, as above.
>
> Thanks,
> Mark.
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index ac9bdf4fc404..364df773a36a 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -99,6 +99,30 @@  c_type arch_atomic##prefix##_fetch_##op##_relaxed(c_type i,		\
 	return ret;							\
 }									\
 static __always_inline							\
+c_type arch_atomic##prefix##_fetch_##op##_acquire(c_type i,		\
+					     atomic##prefix##_t *v)	\
+{									\
+	register c_type ret;						\
+	__asm__ __volatile__ (						\
+		"	amo" #asm_op "." #asm_type ".aq %1, %2, %0"	\
+		: "+A" (v->counter), "=r" (ret)				\
+		: "r" (I)						\
+		: "memory");						\
+	return ret;							\
+}									\
+static __always_inline							\
+c_type arch_atomic##prefix##_fetch_##op##_release(c_type i,		\
+					     atomic##prefix##_t *v)	\
+{									\
+	register c_type ret;						\
+	__asm__ __volatile__ (						\
+		"	amo" #asm_op "." #asm_type ".rl %1, %2, %0"	\
+		: "+A" (v->counter), "=r" (ret)				\
+		: "r" (I)						\
+		: "memory");						\
+	return ret;							\
+}									\
+static __always_inline							\
 c_type arch_atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v)	\
 {									\
 	register c_type ret;						\
@@ -118,6 +142,18 @@  c_type arch_atomic##prefix##_##op##_return_relaxed(c_type i,		\
         return arch_atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I;	\
 }									\
 static __always_inline							\
+c_type arch_atomic##prefix##_##op##_return_acquire(c_type i,		\
+					      atomic##prefix##_t *v)	\
+{									\
+        return arch_atomic##prefix##_fetch_##op##_acquire(i, v) c_op I;	\
+}									\
+static __always_inline							\
+c_type arch_atomic##prefix##_##op##_return_release(c_type i,		\
+					      atomic##prefix##_t *v)	\
+{									\
+        return arch_atomic##prefix##_fetch_##op##_release(i, v) c_op I;	\
+}									\
+static __always_inline							\
 c_type arch_atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v)	\
 {									\
         return arch_atomic##prefix##_fetch_##op(i, v) c_op I;		\
@@ -140,22 +176,38 @@  ATOMIC_OPS(sub, add, +, -i)
 
 #define arch_atomic_add_return_relaxed	arch_atomic_add_return_relaxed
 #define arch_atomic_sub_return_relaxed	arch_atomic_sub_return_relaxed
+#define arch_atomic_add_return_acquire	arch_atomic_add_return_acquire
+#define arch_atomic_sub_return_acquire	arch_atomic_sub_return_acquire
+#define arch_atomic_add_return_release	arch_atomic_add_return_release
+#define arch_atomic_sub_return_release	arch_atomic_sub_return_release
 #define arch_atomic_add_return		arch_atomic_add_return
 #define arch_atomic_sub_return		arch_atomic_sub_return
 
 #define arch_atomic_fetch_add_relaxed	arch_atomic_fetch_add_relaxed
 #define arch_atomic_fetch_sub_relaxed	arch_atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_add_acquire	arch_atomic_fetch_add_acquire
+#define arch_atomic_fetch_sub_acquire	arch_atomic_fetch_sub_acquire
+#define arch_atomic_fetch_add_release	arch_atomic_fetch_add_release
+#define arch_atomic_fetch_sub_release	arch_atomic_fetch_sub_release
 #define arch_atomic_fetch_add		arch_atomic_fetch_add
 #define arch_atomic_fetch_sub		arch_atomic_fetch_sub
 
 #ifndef CONFIG_GENERIC_ATOMIC64
 #define arch_atomic64_add_return_relaxed	arch_atomic64_add_return_relaxed
 #define arch_atomic64_sub_return_relaxed	arch_atomic64_sub_return_relaxed
+#define arch_atomic64_add_return_acquire	arch_atomic64_add_return_acquire
+#define arch_atomic64_sub_return_acquire	arch_atomic64_sub_return_acquire
+#define arch_atomic64_add_return_release	arch_atomic64_add_return_release
+#define arch_atomic64_sub_return_release	arch_atomic64_sub_return_release
 #define arch_atomic64_add_return		arch_atomic64_add_return
 #define arch_atomic64_sub_return		arch_atomic64_sub_return
 
 #define arch_atomic64_fetch_add_relaxed	arch_atomic64_fetch_add_relaxed
 #define arch_atomic64_fetch_sub_relaxed	arch_atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_add_acquire	arch_atomic64_fetch_add_acquire
+#define arch_atomic64_fetch_sub_acquire	arch_atomic64_fetch_sub_acquire
+#define arch_atomic64_fetch_add_release	arch_atomic64_fetch_add_release
+#define arch_atomic64_fetch_sub_release	arch_atomic64_fetch_sub_release
 #define arch_atomic64_fetch_add		arch_atomic64_fetch_add
 #define arch_atomic64_fetch_sub		arch_atomic64_fetch_sub
 #endif
@@ -178,6 +230,12 @@  ATOMIC_OPS(xor, xor, i)
 #define arch_atomic_fetch_and_relaxed	arch_atomic_fetch_and_relaxed
 #define arch_atomic_fetch_or_relaxed	arch_atomic_fetch_or_relaxed
 #define arch_atomic_fetch_xor_relaxed	arch_atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_acquire	arch_atomic_fetch_and_acquire
+#define arch_atomic_fetch_or_acquire	arch_atomic_fetch_or_acquire
+#define arch_atomic_fetch_xor_acquire	arch_atomic_fetch_xor_acquire
+#define arch_atomic_fetch_and_release	arch_atomic_fetch_and_release
+#define arch_atomic_fetch_or_release	arch_atomic_fetch_or_release
+#define arch_atomic_fetch_xor_release	arch_atomic_fetch_xor_release
 #define arch_atomic_fetch_and		arch_atomic_fetch_and
 #define arch_atomic_fetch_or		arch_atomic_fetch_or
 #define arch_atomic_fetch_xor		arch_atomic_fetch_xor
@@ -186,6 +244,12 @@  ATOMIC_OPS(xor, xor, i)
 #define arch_atomic64_fetch_and_relaxed	arch_atomic64_fetch_and_relaxed
 #define arch_atomic64_fetch_or_relaxed	arch_atomic64_fetch_or_relaxed
 #define arch_atomic64_fetch_xor_relaxed	arch_atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and_acquire	arch_atomic64_fetch_and_acquire
+#define arch_atomic64_fetch_or_acquire	arch_atomic64_fetch_or_acquire
+#define arch_atomic64_fetch_xor_acquire	arch_atomic64_fetch_xor_acquire
+#define arch_atomic64_fetch_and_release	arch_atomic64_fetch_and_release
+#define arch_atomic64_fetch_or_release	arch_atomic64_fetch_or_release
+#define arch_atomic64_fetch_xor_release	arch_atomic64_fetch_xor_release
 #define arch_atomic64_fetch_and		arch_atomic64_fetch_and
 #define arch_atomic64_fetch_or		arch_atomic64_fetch_or
 #define arch_atomic64_fetch_xor		arch_atomic64_fetch_xor
@@ -315,12 +379,11 @@  static __always_inline int arch_atomic_sub_if_positive(atomic_t *v, int offset)
        int prev, rc;
 
 	__asm__ __volatile__ (
-		"0:	lr.w     %[p],  %[c]\n"
+		"0:	lr.w.aq  %[p],  %[c]\n"
 		"	sub      %[rc], %[p], %[o]\n"
 		"	bltz     %[rc], 1f\n"
 		"	sc.w.rl  %[rc], %[rc], %[c]\n"
 		"	bnez     %[rc], 0b\n"
-		"	fence    rw, rw\n"
 		"1:\n"
 		: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
 		: [o]"r" (offset)
@@ -337,12 +400,11 @@  static __always_inline s64 arch_atomic64_sub_if_positive(atomic64_t *v, s64 offs
        long rc;
 
 	__asm__ __volatile__ (
-		"0:	lr.d     %[p],  %[c]\n"
+		"0:	lr.d.aq  %[p],  %[c]\n"
 		"	sub      %[rc], %[p], %[o]\n"
 		"	bltz     %[rc], 1f\n"
 		"	sc.d.rl  %[rc], %[rc], %[c]\n"
 		"	bnez     %[rc], 0b\n"
-		"	fence    rw, rw\n"
 		"1:\n"
 		: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
 		: [o]"r" (offset)
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 36dc962f6343..8ff1cd8162ba 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -52,16 +52,14 @@ 
 	switch (size) {							\
 	case 4:								\
 		__asm__ __volatile__ (					\
-			"	amoswap.w %0, %2, %1\n"			\
-			RISCV_ACQUIRE_BARRIER				\
+			"	amoswap.w.aq %0, %2, %1\n"		\
 			: "=r" (__ret), "+A" (*__ptr)			\
 			: "r" (__new)					\
 			: "memory");					\
 		break;							\
 	case 8:								\
 		__asm__ __volatile__ (					\
-			"	amoswap.d %0, %2, %1\n"			\
-			RISCV_ACQUIRE_BARRIER				\
+			"	amoswap.d.aq %0, %2, %1\n"		\
 			: "=r" (__ret), "+A" (*__ptr)			\
 			: "r" (__new)					\
 			: "memory");					\
@@ -87,16 +85,14 @@ 
 	switch (size) {							\
 	case 4:								\
 		__asm__ __volatile__ (					\
-			RISCV_RELEASE_BARRIER				\
-			"	amoswap.w %0, %2, %1\n"			\
+			"	amoswap.w.rl %0, %2, %1\n"		\
 			: "=r" (__ret), "+A" (*__ptr)			\
 			: "r" (__new)					\
 			: "memory");					\
 		break;							\
 	case 8:								\
 		__asm__ __volatile__ (					\
-			RISCV_RELEASE_BARRIER				\
-			"	amoswap.d %0, %2, %1\n"			\
+			"	amoswap.d.rl %0, %2, %1\n"		\
 			: "=r" (__ret), "+A" (*__ptr)			\
 			: "r" (__new)					\
 			: "memory");					\
@@ -217,11 +213,10 @@ 
 	switch (size) {							\
 	case 4:								\
 		__asm__ __volatile__ (					\
-			"0:	lr.w %0, %2\n"				\
+			"0:	lr.w.aq %0, %2\n"			\
 			"	bne  %0, %z3, 1f\n"			\
 			"	sc.w %1, %z4, %2\n"			\
 			"	bnez %1, 0b\n"				\
-			RISCV_ACQUIRE_BARRIER				\
 			"1:\n"						\
 			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
 			: "rJ" ((long)__old), "rJ" (__new)		\
@@ -229,11 +224,10 @@ 
 		break;							\
 	case 8:								\
 		__asm__ __volatile__ (					\
-			"0:	lr.d %0, %2\n"				\
+			"0:	lr.d.aq %0, %2\n"			\
 			"	bne %0, %z3, 1f\n"			\
 			"	sc.d %1, %z4, %2\n"			\
 			"	bnez %1, 0b\n"				\
-			RISCV_ACQUIRE_BARRIER				\
 			"1:\n"						\
 			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
 			: "rJ" (__old), "rJ" (__new)			\
@@ -263,10 +257,9 @@ 
 	switch (size) {							\
 	case 4:								\
 		__asm__ __volatile__ (					\
-			RISCV_RELEASE_BARRIER				\
 			"0:	lr.w %0, %2\n"				\
 			"	bne  %0, %z3, 1f\n"			\
-			"	sc.w %1, %z4, %2\n"			\
+			"	sc.w.rl %1, %z4, %2\n"			\
 			"	bnez %1, 0b\n"				\
 			"1:\n"						\
 			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
@@ -275,10 +268,9 @@ 
 		break;							\
 	case 8:								\
 		__asm__ __volatile__ (					\
-			RISCV_RELEASE_BARRIER				\
 			"0:	lr.d %0, %2\n"				\
 			"	bne %0, %z3, 1f\n"			\
-			"	sc.d %1, %z4, %2\n"			\
+			"	sc.d.rl %1, %z4, %2\n"			\
 			"	bnez %1, 0b\n"				\
 			"1:\n"						\
 			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
@@ -309,11 +301,10 @@ 
 	switch (size) {							\
 	case 4:								\
 		__asm__ __volatile__ (					\
-			"0:	lr.w %0, %2\n"				\
+			"0:	lr.w.aq %0, %2\n"			\
 			"	bne  %0, %z3, 1f\n"			\
 			"	sc.w.rl %1, %z4, %2\n"			\
 			"	bnez %1, 0b\n"				\
-			"	fence rw, rw\n"				\
 			"1:\n"						\
 			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
 			: "rJ" ((long)__old), "rJ" (__new)		\
@@ -321,11 +312,10 @@ 
 		break;							\
 	case 8:								\
 		__asm__ __volatile__ (					\
-			"0:	lr.d %0, %2\n"				\
+			"0:	lr.d.aq %0, %2\n"			\
 			"	bne %0, %z3, 1f\n"			\
 			"	sc.d.rl %1, %z4, %2\n"			\
 			"	bnez %1, 0b\n"				\
-			"	fence rw, rw\n"				\
 			"1:\n"						\
 			: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)	\
 			: "rJ" (__old), "rJ" (__new)			\