diff mbox

[1/3] arm/arm64: spinlocks: fix memory barriers

Message ID 1435248739-25425-2-git-send-email-drjones@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andrew Jones June 25, 2015, 4:12 p.m. UTC
It shouldn't be necessary to use a barrier on the way into
spin_lock. We'll be focused on a single address until we get
it (exclusively) set, and then we'll do a barrier on the way
out. Also, it does make sense to do a barrier on the way in
to spin_unlock, i.e. ensure what we did in the critical section
is ordered wrt to what we do outside it, before we announce that
we're outside.

Signed-off-by: Andrew Jones <drjones@redhat.com>
---
 lib/arm/spinlock.c   | 8 ++++----
 lib/arm64/spinlock.c | 5 ++---
 2 files changed, 6 insertions(+), 7 deletions(-)

Comments

Christoffer Dall June 29, 2015, 10:27 a.m. UTC | #1
On Thu, Jun 25, 2015 at 06:12:17PM +0200, Andrew Jones wrote:
> It shouldn't be necessary to use a barrier on the way into
> spin_lock. We'll be focused on a single address until we get
> it (exclusively) set, and then we'll do a barrier on the way
> out. Also, it does make sense to do a barrier on the way in
> to spin_unlock, i.e. ensure what we did in the critical section
> is ordered wrt to what we do outside it, before we announce that
> we're outside.
> 
> Signed-off-by: Andrew Jones <drjones@redhat.com>
> ---
>  lib/arm/spinlock.c   | 8 ++++----
>  lib/arm64/spinlock.c | 5 ++---
>  2 files changed, 6 insertions(+), 7 deletions(-)
> 
> diff --git a/lib/arm/spinlock.c b/lib/arm/spinlock.c
> index 3b023ceaebf71..116ea5d7db930 100644
> --- a/lib/arm/spinlock.c
> +++ b/lib/arm/spinlock.c
> @@ -7,10 +7,9 @@ void spin_lock(struct spinlock *lock)
>  {
>  	u32 val, fail;
>  
> -	dmb();
> -
>  	if (!mmu_enabled()) {
>  		lock->v = 1;
> +		smp_mb();
>  		return;
>  	}
>  
> @@ -25,11 +24,12 @@ void spin_lock(struct spinlock *lock)
>  		: "r" (&lock->v)
>  		: "cc" );
>  	} while (fail);
> -	dmb();
> +
> +	smp_mb();
>  }
>  
>  void spin_unlock(struct spinlock *lock)
>  {
> +	smp_mb();
>  	lock->v = 0;
> -	dmb();
>  }
> diff --git a/lib/arm64/spinlock.c b/lib/arm64/spinlock.c
> index 68b68b75ba60d..a3907f03cacda 100644
> --- a/lib/arm64/spinlock.c
> +++ b/lib/arm64/spinlock.c
> @@ -13,10 +13,9 @@ void spin_lock(struct spinlock *lock)
>  {
>  	u32 val, fail;
>  
> -	smp_mb();
> -
>  	if (!mmu_enabled()) {
>  		lock->v = 1;
> +		smp_mb();
>  		return;
>  	}
>  
> @@ -35,9 +34,9 @@ void spin_lock(struct spinlock *lock)
>  
>  void spin_unlock(struct spinlock *lock)
>  {
> +	smp_mb();
>  	if (mmu_enabled())
>  		asm volatile("stlrh wzr, [%0]" :: "r" (&lock->v));
>  	else
>  		lock->v = 0;
> -	smp_mb();
>  }
> -- 
> 2.4.3
> 
looks good to me

-Christoffer
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Paolo Bonzini July 3, 2015, 5:42 p.m. UTC | #2
On 25/06/2015 18:12, Andrew Jones wrote:
> It shouldn't be necessary to use a barrier on the way into
> spin_lock. We'll be focused on a single address until we get
> it (exclusively) set, and then we'll do a barrier on the way
> out. Also, it does make sense to do a barrier on the way in
> to spin_unlock, i.e. ensure what we did in the critical section
> is ordered wrt to what we do outside it, before we announce that
> we're outside.
> 
> Signed-off-by: Andrew Jones <drjones@redhat.com>
> ---
>  lib/arm/spinlock.c   | 8 ++++----
>  lib/arm64/spinlock.c | 5 ++---
>  2 files changed, 6 insertions(+), 7 deletions(-)
> 
> diff --git a/lib/arm/spinlock.c b/lib/arm/spinlock.c
> index 3b023ceaebf71..116ea5d7db930 100644
> --- a/lib/arm/spinlock.c
> +++ b/lib/arm/spinlock.c
> @@ -7,10 +7,9 @@ void spin_lock(struct spinlock *lock)
>  {
>  	u32 val, fail;
>  
> -	dmb();
> -
>  	if (!mmu_enabled()) {
>  		lock->v = 1;
> +		smp_mb();
>  		return;
>  	}
>  
> @@ -25,11 +24,12 @@ void spin_lock(struct spinlock *lock)
>  		: "r" (&lock->v)
>  		: "cc" );
>  	} while (fail);
> -	dmb();
> +
> +	smp_mb();
>  }
>  
>  void spin_unlock(struct spinlock *lock)
>  {
> +	smp_mb();
>  	lock->v = 0;
> -	dmb();
>  }
> diff --git a/lib/arm64/spinlock.c b/lib/arm64/spinlock.c
> index 68b68b75ba60d..a3907f03cacda 100644
> --- a/lib/arm64/spinlock.c
> +++ b/lib/arm64/spinlock.c
> @@ -13,10 +13,9 @@ void spin_lock(struct spinlock *lock)
>  {
>  	u32 val, fail;
>  
> -	smp_mb();
> -
>  	if (!mmu_enabled()) {
>  		lock->v = 1;
> +		smp_mb();
>  		return;
>  	}
>  
> @@ -35,9 +34,9 @@ void spin_lock(struct spinlock *lock)
>  
>  void spin_unlock(struct spinlock *lock)
>  {
> +	smp_mb();
>  	if (mmu_enabled())
>  		asm volatile("stlrh wzr, [%0]" :: "r" (&lock->v));
>  	else
>  		lock->v = 0;
> -	smp_mb();
>  }
> 

Applied, thanks.

Paolo
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/lib/arm/spinlock.c b/lib/arm/spinlock.c
index 3b023ceaebf71..116ea5d7db930 100644
--- a/lib/arm/spinlock.c
+++ b/lib/arm/spinlock.c
@@ -7,10 +7,9 @@  void spin_lock(struct spinlock *lock)
 {
 	u32 val, fail;
 
-	dmb();
-
 	if (!mmu_enabled()) {
 		lock->v = 1;
+		smp_mb();
 		return;
 	}
 
@@ -25,11 +24,12 @@  void spin_lock(struct spinlock *lock)
 		: "r" (&lock->v)
 		: "cc" );
 	} while (fail);
-	dmb();
+
+	smp_mb();
 }
 
 void spin_unlock(struct spinlock *lock)
 {
+	smp_mb();
 	lock->v = 0;
-	dmb();
 }
diff --git a/lib/arm64/spinlock.c b/lib/arm64/spinlock.c
index 68b68b75ba60d..a3907f03cacda 100644
--- a/lib/arm64/spinlock.c
+++ b/lib/arm64/spinlock.c
@@ -13,10 +13,9 @@  void spin_lock(struct spinlock *lock)
 {
 	u32 val, fail;
 
-	smp_mb();
-
 	if (!mmu_enabled()) {
 		lock->v = 1;
+		smp_mb();
 		return;
 	}
 
@@ -35,9 +34,9 @@  void spin_lock(struct spinlock *lock)
 
 void spin_unlock(struct spinlock *lock)
 {
+	smp_mb();
 	if (mmu_enabled())
 		asm volatile("stlrh wzr, [%0]" :: "r" (&lock->v));
 	else
 		lock->v = 0;
-	smp_mb();
 }