diff mbox series

[4/9] block: turn bdev_lock into a mutex

Message ID 20230505175132.2236632-5-hch@lst.de (mailing list archive)
State New, archived
Headers show
Series [1/9] block: consolidate the shutdown logic in blk_mark_disk_dead and del_gendisk | expand

Commit Message

Christoph Hellwig May 5, 2023, 5:51 p.m. UTC
There is no reason for this lock to spin, and being able to sleep under
it will come in handy soon.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/bdev.c | 27 +++++++++++++--------------
 1 file changed, 13 insertions(+), 14 deletions(-)

Comments

Jan Kara May 7, 2023, 7:09 p.m. UTC | #1
On Fri 05-05-23 13:51:27, Christoph Hellwig wrote:
> There is no reason for this lock to spin, and being able to sleep under
> it will come in handy soon.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Makes sense. Feel free to add:

Reviewed-by: Jan Kara <jack@suse.cz>

								Honza

> ---
>  block/bdev.c | 27 +++++++++++++--------------
>  1 file changed, 13 insertions(+), 14 deletions(-)
> 
> diff --git a/block/bdev.c b/block/bdev.c
> index f2c7181b0bba7d..bad75f6cf8edcd 100644
> --- a/block/bdev.c
> +++ b/block/bdev.c
> @@ -308,7 +308,7 @@ EXPORT_SYMBOL(thaw_bdev);
>   * pseudo-fs
>   */
>  
> -static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
> +static  __cacheline_aligned_in_smp DEFINE_MUTEX(bdev_lock);
>  static struct kmem_cache * bdev_cachep __read_mostly;
>  
>  static struct inode *bdev_alloc_inode(struct super_block *sb)
> @@ -457,15 +457,14 @@ long nr_blockdev_pages(void)
>   *
>   * Test whether @bdev can be claimed by @holder.
>   *
> - * CONTEXT:
> - * spin_lock(&bdev_lock).
> - *
>   * RETURNS:
>   * %true if @bdev can be claimed, %false otherwise.
>   */
>  static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
>  			 void *holder)
>  {
> +	lockdep_assert_held(&bdev_lock);
> +
>  	if (bdev->bd_holder == holder)
>  		return true;	 /* already a holder */
>  	else if (bdev->bd_holder != NULL)
> @@ -500,10 +499,10 @@ int bd_prepare_to_claim(struct block_device *bdev, void *holder)
>  	if (WARN_ON_ONCE(!holder))
>  		return -EINVAL;
>  retry:
> -	spin_lock(&bdev_lock);
> +	mutex_lock(&bdev_lock);
>  	/* if someone else claimed, fail */
>  	if (!bd_may_claim(bdev, whole, holder)) {
> -		spin_unlock(&bdev_lock);
> +		mutex_unlock(&bdev_lock);
>  		return -EBUSY;
>  	}
>  
> @@ -513,7 +512,7 @@ int bd_prepare_to_claim(struct block_device *bdev, void *holder)
>  		DEFINE_WAIT(wait);
>  
>  		prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
> -		spin_unlock(&bdev_lock);
> +		mutex_unlock(&bdev_lock);
>  		schedule();
>  		finish_wait(wq, &wait);
>  		goto retry;
> @@ -521,7 +520,7 @@ int bd_prepare_to_claim(struct block_device *bdev, void *holder)
>  
>  	/* yay, all mine */
>  	whole->bd_claiming = holder;
> -	spin_unlock(&bdev_lock);
> +	mutex_unlock(&bdev_lock);
>  	return 0;
>  }
>  EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */
> @@ -547,7 +546,7 @@ static void bd_finish_claiming(struct block_device *bdev, void *holder)
>  {
>  	struct block_device *whole = bdev_whole(bdev);
>  
> -	spin_lock(&bdev_lock);
> +	mutex_lock(&bdev_lock);
>  	BUG_ON(!bd_may_claim(bdev, whole, holder));
>  	/*
>  	 * Note that for a whole device bd_holders will be incremented twice,
> @@ -558,7 +557,7 @@ static void bd_finish_claiming(struct block_device *bdev, void *holder)
>  	bdev->bd_holders++;
>  	bdev->bd_holder = holder;
>  	bd_clear_claiming(whole, holder);
> -	spin_unlock(&bdev_lock);
> +	mutex_unlock(&bdev_lock);
>  }
>  
>  /**
> @@ -572,9 +571,9 @@ static void bd_finish_claiming(struct block_device *bdev, void *holder)
>   */
>  void bd_abort_claiming(struct block_device *bdev, void *holder)
>  {
> -	spin_lock(&bdev_lock);
> +	mutex_lock(&bdev_lock);
>  	bd_clear_claiming(bdev_whole(bdev), holder);
> -	spin_unlock(&bdev_lock);
> +	mutex_unlock(&bdev_lock);
>  }
>  EXPORT_SYMBOL(bd_abort_claiming);
>  
> @@ -587,7 +586,7 @@ static void bd_end_claim(struct block_device *bdev)
>  	 * Release a claim on the device.  The holder fields are protected with
>  	 * bdev_lock.  open_mutex is used to synchronize disk_holder unlinking.
>  	 */
> -	spin_lock(&bdev_lock);
> +	mutex_lock(&bdev_lock);
>  	WARN_ON_ONCE(--bdev->bd_holders < 0);
>  	WARN_ON_ONCE(--whole->bd_holders < 0);
>  	if (!bdev->bd_holders) {
> @@ -597,7 +596,7 @@ static void bd_end_claim(struct block_device *bdev)
>  	}
>  	if (!whole->bd_holders)
>  		whole->bd_holder = NULL;
> -	spin_unlock(&bdev_lock);
> +	mutex_unlock(&bdev_lock);
>  
>  	/*
>  	 * If this was the last claim, remove holder link and unblock evpoll if
> -- 
> 2.39.2
>
Christian Brauner May 16, 2023, 4:24 p.m. UTC | #2
On Fri, May 05, 2023 at 01:51:27PM -0400, Christoph Hellwig wrote:
> There is no reason for this lock to spin, and being able to sleep under
> it will come in handy soon.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---

Acked-by: Christian Brauner <brauner@kernel.org>
diff mbox series

Patch

diff --git a/block/bdev.c b/block/bdev.c
index f2c7181b0bba7d..bad75f6cf8edcd 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -308,7 +308,7 @@  EXPORT_SYMBOL(thaw_bdev);
  * pseudo-fs
  */
 
-static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
+static  __cacheline_aligned_in_smp DEFINE_MUTEX(bdev_lock);
 static struct kmem_cache * bdev_cachep __read_mostly;
 
 static struct inode *bdev_alloc_inode(struct super_block *sb)
@@ -457,15 +457,14 @@  long nr_blockdev_pages(void)
  *
  * Test whether @bdev can be claimed by @holder.
  *
- * CONTEXT:
- * spin_lock(&bdev_lock).
- *
  * RETURNS:
  * %true if @bdev can be claimed, %false otherwise.
  */
 static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
 			 void *holder)
 {
+	lockdep_assert_held(&bdev_lock);
+
 	if (bdev->bd_holder == holder)
 		return true;	 /* already a holder */
 	else if (bdev->bd_holder != NULL)
@@ -500,10 +499,10 @@  int bd_prepare_to_claim(struct block_device *bdev, void *holder)
 	if (WARN_ON_ONCE(!holder))
 		return -EINVAL;
 retry:
-	spin_lock(&bdev_lock);
+	mutex_lock(&bdev_lock);
 	/* if someone else claimed, fail */
 	if (!bd_may_claim(bdev, whole, holder)) {
-		spin_unlock(&bdev_lock);
+		mutex_unlock(&bdev_lock);
 		return -EBUSY;
 	}
 
@@ -513,7 +512,7 @@  int bd_prepare_to_claim(struct block_device *bdev, void *holder)
 		DEFINE_WAIT(wait);
 
 		prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
-		spin_unlock(&bdev_lock);
+		mutex_unlock(&bdev_lock);
 		schedule();
 		finish_wait(wq, &wait);
 		goto retry;
@@ -521,7 +520,7 @@  int bd_prepare_to_claim(struct block_device *bdev, void *holder)
 
 	/* yay, all mine */
 	whole->bd_claiming = holder;
-	spin_unlock(&bdev_lock);
+	mutex_unlock(&bdev_lock);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */
@@ -547,7 +546,7 @@  static void bd_finish_claiming(struct block_device *bdev, void *holder)
 {
 	struct block_device *whole = bdev_whole(bdev);
 
-	spin_lock(&bdev_lock);
+	mutex_lock(&bdev_lock);
 	BUG_ON(!bd_may_claim(bdev, whole, holder));
 	/*
 	 * Note that for a whole device bd_holders will be incremented twice,
@@ -558,7 +557,7 @@  static void bd_finish_claiming(struct block_device *bdev, void *holder)
 	bdev->bd_holders++;
 	bdev->bd_holder = holder;
 	bd_clear_claiming(whole, holder);
-	spin_unlock(&bdev_lock);
+	mutex_unlock(&bdev_lock);
 }
 
 /**
@@ -572,9 +571,9 @@  static void bd_finish_claiming(struct block_device *bdev, void *holder)
  */
 void bd_abort_claiming(struct block_device *bdev, void *holder)
 {
-	spin_lock(&bdev_lock);
+	mutex_lock(&bdev_lock);
 	bd_clear_claiming(bdev_whole(bdev), holder);
-	spin_unlock(&bdev_lock);
+	mutex_unlock(&bdev_lock);
 }
 EXPORT_SYMBOL(bd_abort_claiming);
 
@@ -587,7 +586,7 @@  static void bd_end_claim(struct block_device *bdev)
 	 * Release a claim on the device.  The holder fields are protected with
 	 * bdev_lock.  open_mutex is used to synchronize disk_holder unlinking.
 	 */
-	spin_lock(&bdev_lock);
+	mutex_lock(&bdev_lock);
 	WARN_ON_ONCE(--bdev->bd_holders < 0);
 	WARN_ON_ONCE(--whole->bd_holders < 0);
 	if (!bdev->bd_holders) {
@@ -597,7 +596,7 @@  static void bd_end_claim(struct block_device *bdev)
 	}
 	if (!whole->bd_holders)
 		whole->bd_holder = NULL;
-	spin_unlock(&bdev_lock);
+	mutex_unlock(&bdev_lock);
 
 	/*
 	 * If this was the last claim, remove holder link and unblock evpoll if