diff mbox series

[7/8] loop: only freeze the queue in __loop_clr_fd when needed

Message ID 20220126155040.1190842-8-hch@lst.de (mailing list archive)
State New, archived
Headers show
Series [1/8] loop: de-duplicate the idle worker freeing code | expand

Commit Message

Christoph Hellwig Jan. 26, 2022, 3:50 p.m. UTC
->release is only called after all outstanding I/O has completed, so only
freeze the queue when clearing the backing file of a live loop device.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/block/loop.c | 18 ++++++++++++------
 1 file changed, 12 insertions(+), 6 deletions(-)

Comments

Jan Kara Jan. 27, 2022, 11:01 a.m. UTC | #1
On Wed 26-01-22 16:50:39, Christoph Hellwig wrote:
> ->release is only called after all outstanding I/O has completed, so only
> freeze the queue when clearing the backing file of a live loop device.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>

Looks good. I was just wondering: Is there anything which prevents us from
moving blk_mq_freeze_queue() & blk_mq_unfreeze_queue() calls to
loop_clr_fd() around __loop_clr_fd() call?

Anyway feel free to add:

Reviewed-by: Jan Kara <jack@suse.cz>

								Honza

> ---
>  drivers/block/loop.c | 18 ++++++++++++------
>  1 file changed, 12 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/block/loop.c b/drivers/block/loop.c
> index d9a0e2205762f..fc0cdd1612b1d 100644
> --- a/drivers/block/loop.c
> +++ b/drivers/block/loop.c
> @@ -1110,7 +1110,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
>  	return error;
>  }
>  
> -static void __loop_clr_fd(struct loop_device *lo)
> +static void __loop_clr_fd(struct loop_device *lo, bool release)
>  {
>  	struct file *filp;
>  	gfp_t gfp = lo->old_gfp_mask;
> @@ -1139,8 +1139,13 @@ static void __loop_clr_fd(struct loop_device *lo)
>  	if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags))
>  		blk_queue_write_cache(lo->lo_queue, false, false);
>  
> -	/* freeze request queue during the transition */
> -	blk_mq_freeze_queue(lo->lo_queue);
> +	/*
> +	 * Freeze the request queue when unbinding on a live file descriptor and
> +	 * thus an open device.  When called from ->release we are guaranteed
> +	 * that there is no I/O in progress already.
> +	 */
> +	if (!release)
> +		blk_mq_freeze_queue(lo->lo_queue);
>  
>  	destroy_workqueue(lo->workqueue);
>  	loop_free_idle_workers(lo, true);
> @@ -1163,7 +1168,8 @@ static void __loop_clr_fd(struct loop_device *lo)
>  	/* let user-space know about this change */
>  	kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
>  	mapping_set_gfp_mask(filp->f_mapping, gfp);
> -	blk_mq_unfreeze_queue(lo->lo_queue);
> +	if (!release)
> +		blk_mq_unfreeze_queue(lo->lo_queue);
>  
>  	disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
>  
> @@ -1201,7 +1207,7 @@ static void loop_rundown_workfn(struct work_struct *work)
>  	struct block_device *bdev = lo->lo_device;
>  	struct gendisk *disk = lo->lo_disk;
>  
> -	__loop_clr_fd(lo);
> +	__loop_clr_fd(lo, true);
>  	kobject_put(&bdev->bd_device.kobj);
>  	module_put(disk->fops->owner);
>  	loop_rundown_completed(lo);
> @@ -1247,7 +1253,7 @@ static int loop_clr_fd(struct loop_device *lo)
>  	lo->lo_state = Lo_rundown;
>  	mutex_unlock(&lo->lo_mutex);
>  
> -	__loop_clr_fd(lo);
> +	__loop_clr_fd(lo, false);
>  	loop_rundown_completed(lo);
>  	return 0;
>  }
> -- 
> 2.30.2
>
Christoph Hellwig Jan. 28, 2022, 6:48 a.m. UTC | #2
On Thu, Jan 27, 2022 at 12:01:43PM +0100, Jan Kara wrote:
> On Wed 26-01-22 16:50:39, Christoph Hellwig wrote:
> > ->release is only called after all outstanding I/O has completed, so only
> > freeze the queue when clearing the backing file of a live loop device.
> > 
> > Signed-off-by: Christoph Hellwig <hch@lst.de>
> 
> Looks good. I was just wondering: Is there anything which prevents us from
> moving blk_mq_freeze_queue() & blk_mq_unfreeze_queue() calls to
> loop_clr_fd() around __loop_clr_fd() call?

That would move more code into the freeze protection.  Including lo_mutex
which elsewhere is taken outside blk_freeze_queue.  The lo_mutex
acquisition is only for an assert, so we could skip that, so maybe we
can get there.  But the next patch adds another check for the release
flag so it doesn't seem worth it at this moment.
diff mbox series

Patch

diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index d9a0e2205762f..fc0cdd1612b1d 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1110,7 +1110,7 @@  static int loop_configure(struct loop_device *lo, fmode_t mode,
 	return error;
 }
 
-static void __loop_clr_fd(struct loop_device *lo)
+static void __loop_clr_fd(struct loop_device *lo, bool release)
 {
 	struct file *filp;
 	gfp_t gfp = lo->old_gfp_mask;
@@ -1139,8 +1139,13 @@  static void __loop_clr_fd(struct loop_device *lo)
 	if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags))
 		blk_queue_write_cache(lo->lo_queue, false, false);
 
-	/* freeze request queue during the transition */
-	blk_mq_freeze_queue(lo->lo_queue);
+	/*
+	 * Freeze the request queue when unbinding on a live file descriptor and
+	 * thus an open device.  When called from ->release we are guaranteed
+	 * that there is no I/O in progress already.
+	 */
+	if (!release)
+		blk_mq_freeze_queue(lo->lo_queue);
 
 	destroy_workqueue(lo->workqueue);
 	loop_free_idle_workers(lo, true);
@@ -1163,7 +1168,8 @@  static void __loop_clr_fd(struct loop_device *lo)
 	/* let user-space know about this change */
 	kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
 	mapping_set_gfp_mask(filp->f_mapping, gfp);
-	blk_mq_unfreeze_queue(lo->lo_queue);
+	if (!release)
+		blk_mq_unfreeze_queue(lo->lo_queue);
 
 	disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
 
@@ -1201,7 +1207,7 @@  static void loop_rundown_workfn(struct work_struct *work)
 	struct block_device *bdev = lo->lo_device;
 	struct gendisk *disk = lo->lo_disk;
 
-	__loop_clr_fd(lo);
+	__loop_clr_fd(lo, true);
 	kobject_put(&bdev->bd_device.kobj);
 	module_put(disk->fops->owner);
 	loop_rundown_completed(lo);
@@ -1247,7 +1253,7 @@  static int loop_clr_fd(struct loop_device *lo)
 	lo->lo_state = Lo_rundown;
 	mutex_unlock(&lo->lo_mutex);
 
-	__loop_clr_fd(lo);
+	__loop_clr_fd(lo, false);
 	loop_rundown_completed(lo);
 	return 0;
 }