diff mbox series

[06/10] btrfs: don't use btrfs_bio_wq_end_io for compressed writes

Message ID 20220504122524.558088-7-hch@lst.de (mailing list archive)
State New, archived
Headers show
Series [01/10] btrfs: don't double-defer bio completions for compressed reads | expand

Commit Message

Christoph Hellwig May 4, 2022, 12:25 p.m. UTC
Compressed write bio completion is the only user of btrfs_bio_wq_end_io
for writes, and the use of btrfs_bio_wq_end_io is a little suboptimal
here as we only real need user context for the final completion of a
compressed_bio structure, and not every single bio completion.

Add a work_struct to struct compressed_bio instead and use that to call
finish_compressed_bio_write.  This allows to remove all handling of
write bios in the btrfs_bio_wq_end_io infrastructure.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 fs/btrfs/compression.c | 45 +++++++++++++++++++++---------------------
 fs/btrfs/compression.h |  7 +++++--
 fs/btrfs/ctree.h       |  2 +-
 fs/btrfs/disk-io.c     | 30 +++++++++++-----------------
 fs/btrfs/super.c       |  2 --
 5 files changed, 41 insertions(+), 45 deletions(-)

Comments

Nikolay Borisov May 11, 2022, 7:20 p.m. UTC | #1
On 4.05.22 г. 15:25 ч., Christoph Hellwig wrote:
> Compressed write bio completion is the only user of btrfs_bio_wq_end_io
> for writes, and the use of btrfs_bio_wq_end_io is a little suboptimal
> here as we only real need user context for the final completion of a
> compressed_bio structure, and not every single bio completion.
> 
> Add a work_struct to struct compressed_bio instead and use that to call
> finish_compressed_bio_write.  This allows to remove all handling of
> write bios in the btrfs_bio_wq_end_io infrastructure.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   fs/btrfs/compression.c | 45 +++++++++++++++++++++---------------------
>   fs/btrfs/compression.h |  7 +++++--
>   fs/btrfs/ctree.h       |  2 +-
>   fs/btrfs/disk-io.c     | 30 +++++++++++-----------------
>   fs/btrfs/super.c       |  2 --
>   5 files changed, 41 insertions(+), 45 deletions(-)
> 
> diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
> index f4564f32f6d93..9d5986a30a4a2 100644
> --- a/fs/btrfs/compression.c
> +++ b/fs/btrfs/compression.c
> @@ -403,6 +403,14 @@ static void finish_compressed_bio_write(struct compressed_bio *cb)
>   	kfree(cb);
>   }
>   
> +static void btrfs_finish_compressed_write_work(struct work_struct *work)
> +{
> +	struct compressed_bio *cb =
> +		container_of(work, struct compressed_bio, write_end_work);
> +
> +	finish_compressed_bio_write(cb);
> +}
> +
>   /*
>    * Do the cleanup once all the compressed pages hit the disk.  This will clear
>    * writeback on the file pages and free the compressed pages.
> @@ -414,29 +422,16 @@ static void end_compressed_bio_write(struct bio *bio)
>   {
>   	struct compressed_bio *cb = bio->bi_private;
>   
> -	if (!dec_and_test_compressed_bio(cb, bio))
> -		goto out;
> -
> -	btrfs_record_physical_zoned(cb->inode, cb->start, bio);
> +	if (dec_and_test_compressed_bio(cb, bio)) {
> +		struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
>   
> -	finish_compressed_bio_write(cb);
> -out:
> +		btrfs_record_physical_zoned(cb->inode, cb->start, bio);
> +		queue_work(fs_info->compressed_write_workers,
> +			   &cb->write_end_work);
> +	}
>   	bio_put(bio);
>   }
>   
> -static blk_status_t submit_compressed_bio(struct btrfs_fs_info *fs_info,
> -					  struct bio *bio, int mirror_num)
> -{
> -	blk_status_t ret;
> -
> -	ASSERT(bio->bi_iter.bi_size);
> -	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
> -	if (ret)
> -		return ret;
> -	ret = btrfs_map_bio(fs_info, bio, mirror_num);
> -	return ret;
> -}
> -
>   /*
>    * Allocate a compressed_bio, which will be used to read/write on-disk
>    * (aka, compressed) * data.
> @@ -533,7 +528,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
>   	cb->compressed_pages = compressed_pages;
>   	cb->compressed_len = compressed_len;
>   	cb->writeback = writeback;
> -	cb->orig_bio = NULL;
> +	INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work);
>   	cb->nr_pages = nr_pages;
>   
>   	if (blkcg_css)
> @@ -603,7 +598,8 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
>   					goto finish_cb;
>   			}
>   
> -			ret = submit_compressed_bio(fs_info, bio, 0);
> +			ASSERT(bio->bi_iter.bi_size);
> +			ret = btrfs_map_bio(fs_info, bio, 0);
>   			if (ret)
>   				goto finish_cb;
>   			bio = NULL;
> @@ -941,7 +937,12 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
>   						  fs_info->sectorsize);
>   			sums += fs_info->csum_size * nr_sectors;
>   
> -			ret = submit_compressed_bio(fs_info, comp_bio, mirror_num);
> +			ASSERT(comp_bio->bi_iter.bi_size);
> +			ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
> +						  BTRFS_WQ_ENDIO_DATA);
> +			if (ret)
> +				goto finish_cb;
> +			ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
>   			if (ret)
>   				goto finish_cb;
>   			comp_bio = NULL;
> diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
> index 2707404389a5d..4a40725cbf1db 100644
> --- a/fs/btrfs/compression.h
> +++ b/fs/btrfs/compression.h
> @@ -61,8 +61,11 @@ struct compressed_bio {
>   	blk_status_t status;
>   	int mirror_num;
>   
> -	/* for reads, this is the bio we are copying the data into */
> -	struct bio *orig_bio;
> +	union {
> +		/* for reads, this is the bio we are copying the data into */
> +		struct bio *orig_bio;
> +		struct work_struct write_end_work;
> +	};
>   
>   	/*
>   	 * the start of a variable length array of checksums only
> diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
> index a3739143bf44c..6cf699959286d 100644
> --- a/fs/btrfs/ctree.h
> +++ b/fs/btrfs/ctree.h
> @@ -855,7 +855,7 @@ struct btrfs_fs_info {
>   	struct btrfs_workqueue *endio_meta_workers;
>   	struct workqueue_struct *endio_raid56_workers;
>   	struct workqueue_struct *rmw_workers;
> -	struct btrfs_workqueue *endio_meta_write_workers;
> +	struct workqueue_struct *compressed_write_workers;
>   	struct btrfs_workqueue *endio_write_workers;
>   	struct btrfs_workqueue *endio_freespace_worker;
>   	struct btrfs_workqueue *caching_workers;
> diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
> index ef117eaab468c..aa56ba9378e1f 100644
> --- a/fs/btrfs/disk-io.c
> +++ b/fs/btrfs/disk-io.c
> @@ -748,19 +748,10 @@ static void end_workqueue_bio(struct bio *bio)
>   	fs_info = end_io_wq->info;
>   	end_io_wq->status = bio->bi_status;
>   
> -	if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
> -		if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
> -			wq = fs_info->endio_meta_write_workers;
> -		else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
> -			wq = fs_info->endio_freespace_worker;
> -		else
> -			wq = fs_info->endio_write_workers;
> -	} else {
> -		if (end_io_wq->metadata)
> -			wq = fs_info->endio_meta_workers;
> -		else
> -			wq = fs_info->endio_workers;
> -	}
> +	if (end_io_wq->metadata)
> +		wq = fs_info->endio_meta_workers;
> +	else
> +		wq = fs_info->endio_workers;
>   
>   	btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);
>   	btrfs_queue_work(wq, &end_io_wq->work);
> @@ -771,6 +762,9 @@ blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
>   {
>   	struct btrfs_end_io_wq *end_io_wq;
>   
> +	if (WARN_ON_ONCE(btrfs_op(bio) != BTRFS_MAP_WRITE))
> +		return BLK_STS_IOERR;

This is broken w.r.t to btrfs_submit_metadata_bio since in
that function the code is:

  if (btrfs_op(bio) != BTRFS_MAP_WRITE) {
                       /*
                        * called for a read, do the setup so that checksum validation
                        * can happen in the async kernel threads
                        */
                     ret = btrfs_bio_wq_end_io(fs_info, bio,
                                                 BTRFS_WQ_ENDIO_METADATA);
                       if (!ret)
                               ret = btrfs_map_bio(fs_info, bio, mirror_num);



So metadata reads  will end up in function which disallows reads..
This results in failure to mount when reading various metadata.

I guess the correct fix is to include the following hunk from patch 8 in this series:

@@ -916,14 +839,7 @@ void btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio, int mirror_
  	bio->bi_opf |= REQ_META;
  
  	if (btrfs_op(bio) != BTRFS_MAP_WRITE) {
-		/*
-		 * called for a read, do the setup so that checksum validation
-		 * can happen in the async kernel threads
-		 */
-		ret = btrfs_bio_wq_end_io(fs_info, bio,
-					  BTRFS_WQ_ENDIO_METADATA);
-		if (!ret)
-			ret = btrfs_map_bio(fs_info, bio, mirror_num);
+		ret = btrfs_map_bio(fs_info, bio, mirror_num);





> +
>   	end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
>   	if (!end_io_wq)
>   		return BLK_STS_RESOURCE;
> @@ -2273,6 +2267,8 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
>   		destroy_workqueue(fs_info->endio_raid56_workers);
>   	if (fs_info->rmw_workers)
>   		destroy_workqueue(fs_info->rmw_workers);
> +	if (fs_info->compressed_write_workers)
> +		destroy_workqueue(fs_info->compressed_write_workers);
>   	btrfs_destroy_workqueue(fs_info->endio_write_workers);
>   	btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
>   	btrfs_destroy_workqueue(fs_info->delayed_workers);
> @@ -2287,7 +2283,6 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
>   	 * queues can do metadata I/O operations.
>   	 */
>   	btrfs_destroy_workqueue(fs_info->endio_meta_workers);
> -	btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
>   }
>   
>   static void free_root_extent_buffers(struct btrfs_root *root)
> @@ -2469,15 +2464,14 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
>   	fs_info->endio_meta_workers =
>   		btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
>   				      max_active, 4);
> -	fs_info->endio_meta_write_workers =
> -		btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
> -				      max_active, 2);
>   	fs_info->endio_raid56_workers =
>   		alloc_workqueue("btrfs-endio-raid56", flags, max_active);
>   	fs_info->rmw_workers = alloc_workqueue("btrfs-rmw", flags, max_active);
>   	fs_info->endio_write_workers =
>   		btrfs_alloc_workqueue(fs_info, "endio-write", flags,
>   				      max_active, 2);
> +	fs_info->compressed_write_workers =
> +		alloc_workqueue("btrfs-compressed-write", flags, max_active);
>   	fs_info->endio_freespace_worker =
>   		btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
>   				      max_active, 0);
> @@ -2492,7 +2486,7 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
>   	if (!(fs_info->workers && fs_info->hipri_workers &&
>   	      fs_info->delalloc_workers && fs_info->flush_workers &&
>   	      fs_info->endio_workers && fs_info->endio_meta_workers &&
> -	      fs_info->endio_meta_write_workers &&
> +	      fs_info->compressed_write_workers &&
>   	      fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
>   	      fs_info->endio_freespace_worker && fs_info->rmw_workers &&
>   	      fs_info->caching_workers && fs_info->fixup_workers &&
> diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
> index b1fdc6a26c76e..9c683c466d585 100644
> --- a/fs/btrfs/super.c
> +++ b/fs/btrfs/super.c
> @@ -1908,8 +1908,6 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
>   	btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
>   	btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size);
>   	btrfs_workqueue_set_max(fs_info->endio_meta_workers, new_pool_size);
> -	btrfs_workqueue_set_max(fs_info->endio_meta_write_workers,
> -				new_pool_size);
>   	btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size);
>   	btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size);
>   	btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size);
Nikolay Borisov May 11, 2022, 7:28 p.m. UTC | #2
On 11.05.22 г. 22:20 ч., Nikolay Borisov wrote:
> 
> 
> On 4.05.22 г. 15:25 ч., Christoph Hellwig wrote:
>> Compressed write bio completion is the only user of btrfs_bio_wq_end_io
>> for writes, and the use of btrfs_bio_wq_end_io is a little suboptimal
>> here as we only real need user context for the final completion of a
>> compressed_bio structure, and not every single bio completion.
>>
>> Add a work_struct to struct compressed_bio instead and use that to call
>> finish_compressed_bio_write.  This allows to remove all handling of
>> write bios in the btrfs_bio_wq_end_io infrastructure.
>>
>> Signed-off-by: Christoph Hellwig <hch@lst.de>

<snip>


>> @@ -771,6 +762,9 @@ blk_status_t btrfs_bio_wq_end_io(struct 
>> btrfs_fs_info *info, struct bio *bio,
>>   {
>>       struct btrfs_end_io_wq *end_io_wq;
>> +    if (WARN_ON_ONCE(btrfs_op(bio) != BTRFS_MAP_WRITE))
>> +        return BLK_STS_IOERR;
> 
> This is broken w.r.t to btrfs_submit_metadata_bio since in
> that function the code is:
> 
>   if (btrfs_op(bio) != BTRFS_MAP_WRITE) {
>                        /*
>                         * called for a read, do the setup so that 
> checksum validation
>                         * can happen in the async kernel threads
>                         */
>                      ret = btrfs_bio_wq_end_io(fs_info, bio,
>                                                  BTRFS_WQ_ENDIO_METADATA);
>                        if (!ret)
>                                ret = btrfs_map_bio(fs_info, bio, 
> mirror_num);
> 
> 
> 
> So metadata reads  will end up in function which disallows reads..
> This results in failure to mount when reading various metadata.
> 
> I guess the correct fix is to include the following hunk from patch 8 in 
> this series:
> 
> @@ -916,14 +839,7 @@ void btrfs_submit_metadata_bio(struct inode *inode, 
> struct bio *bio, int mirror_
>       bio->bi_opf |= REQ_META;
> 
>       if (btrfs_op(bio) != BTRFS_MAP_WRITE) {
> -        /*
> -         * called for a read, do the setup so that checksum validation
> -         * can happen in the async kernel threads
> -         */
> -        ret = btrfs_bio_wq_end_io(fs_info, bio,
> -                      BTRFS_WQ_ENDIO_METADATA);
> -        if (!ret)
> -            ret = btrfs_map_bio(fs_info, bio, mirror_num);
> +        ret = btrfs_map_bio(fs_info, bio, mirror_num);
> 
> 
> 
> 
> 

Even if I apply this hunk to patch 6 I get more io failures, this time 
from btrfs_submit_data_read_bio, again there is relevant hunk in patch 8:

-	ret = btrfs_bio_wq_end_io(fs_info, bio,
-			btrfs_is_free_space_inode(BTRFS_I(inode)) ?
-			BTRFS_WQ_ENDIO_FREE_SPACE : BTRFS_WQ_ENDIO_DATA);
-	if (ret)
-		goto out;
-

And then there is another due to DIO in btrfs_submit_dio_bio's call of 
btrfs_bio_wq_end_io if we want to read.


Please fix those to ensure the series is actually bisectable.


<Snip>
Christoph Hellwig May 12, 2022, 5:56 a.m. UTC | #3
On Wed, May 11, 2022 at 10:28:02PM +0300, Nikolay Borisov wrote:
> Please fix those to ensure the series is actually bisectable.

Yeah.  Funny thing is this how I had it until the last repost,
just to reshuffle it for some reason.  I should have just left it as-is..
diff mbox series

Patch

diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index f4564f32f6d93..9d5986a30a4a2 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -403,6 +403,14 @@  static void finish_compressed_bio_write(struct compressed_bio *cb)
 	kfree(cb);
 }
 
+static void btrfs_finish_compressed_write_work(struct work_struct *work)
+{
+	struct compressed_bio *cb =
+		container_of(work, struct compressed_bio, write_end_work);
+
+	finish_compressed_bio_write(cb);
+}
+
 /*
  * Do the cleanup once all the compressed pages hit the disk.  This will clear
  * writeback on the file pages and free the compressed pages.
@@ -414,29 +422,16 @@  static void end_compressed_bio_write(struct bio *bio)
 {
 	struct compressed_bio *cb = bio->bi_private;
 
-	if (!dec_and_test_compressed_bio(cb, bio))
-		goto out;
-
-	btrfs_record_physical_zoned(cb->inode, cb->start, bio);
+	if (dec_and_test_compressed_bio(cb, bio)) {
+		struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
 
-	finish_compressed_bio_write(cb);
-out:
+		btrfs_record_physical_zoned(cb->inode, cb->start, bio);
+		queue_work(fs_info->compressed_write_workers,
+			   &cb->write_end_work);
+	}
 	bio_put(bio);
 }
 
-static blk_status_t submit_compressed_bio(struct btrfs_fs_info *fs_info,
-					  struct bio *bio, int mirror_num)
-{
-	blk_status_t ret;
-
-	ASSERT(bio->bi_iter.bi_size);
-	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
-	if (ret)
-		return ret;
-	ret = btrfs_map_bio(fs_info, bio, mirror_num);
-	return ret;
-}
-
 /*
  * Allocate a compressed_bio, which will be used to read/write on-disk
  * (aka, compressed) * data.
@@ -533,7 +528,7 @@  blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
 	cb->compressed_pages = compressed_pages;
 	cb->compressed_len = compressed_len;
 	cb->writeback = writeback;
-	cb->orig_bio = NULL;
+	INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work);
 	cb->nr_pages = nr_pages;
 
 	if (blkcg_css)
@@ -603,7 +598,8 @@  blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
 					goto finish_cb;
 			}
 
-			ret = submit_compressed_bio(fs_info, bio, 0);
+			ASSERT(bio->bi_iter.bi_size);
+			ret = btrfs_map_bio(fs_info, bio, 0);
 			if (ret)
 				goto finish_cb;
 			bio = NULL;
@@ -941,7 +937,12 @@  void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 						  fs_info->sectorsize);
 			sums += fs_info->csum_size * nr_sectors;
 
-			ret = submit_compressed_bio(fs_info, comp_bio, mirror_num);
+			ASSERT(comp_bio->bi_iter.bi_size);
+			ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
+						  BTRFS_WQ_ENDIO_DATA);
+			if (ret)
+				goto finish_cb;
+			ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
 			if (ret)
 				goto finish_cb;
 			comp_bio = NULL;
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 2707404389a5d..4a40725cbf1db 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -61,8 +61,11 @@  struct compressed_bio {
 	blk_status_t status;
 	int mirror_num;
 
-	/* for reads, this is the bio we are copying the data into */
-	struct bio *orig_bio;
+	union {
+		/* for reads, this is the bio we are copying the data into */
+		struct bio *orig_bio;
+		struct work_struct write_end_work;
+	};
 
 	/*
 	 * the start of a variable length array of checksums only
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index a3739143bf44c..6cf699959286d 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -855,7 +855,7 @@  struct btrfs_fs_info {
 	struct btrfs_workqueue *endio_meta_workers;
 	struct workqueue_struct *endio_raid56_workers;
 	struct workqueue_struct *rmw_workers;
-	struct btrfs_workqueue *endio_meta_write_workers;
+	struct workqueue_struct *compressed_write_workers;
 	struct btrfs_workqueue *endio_write_workers;
 	struct btrfs_workqueue *endio_freespace_worker;
 	struct btrfs_workqueue *caching_workers;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index ef117eaab468c..aa56ba9378e1f 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -748,19 +748,10 @@  static void end_workqueue_bio(struct bio *bio)
 	fs_info = end_io_wq->info;
 	end_io_wq->status = bio->bi_status;
 
-	if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
-		if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
-			wq = fs_info->endio_meta_write_workers;
-		else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
-			wq = fs_info->endio_freespace_worker;
-		else
-			wq = fs_info->endio_write_workers;
-	} else {
-		if (end_io_wq->metadata)
-			wq = fs_info->endio_meta_workers;
-		else
-			wq = fs_info->endio_workers;
-	}
+	if (end_io_wq->metadata)
+		wq = fs_info->endio_meta_workers;
+	else
+		wq = fs_info->endio_workers;
 
 	btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);
 	btrfs_queue_work(wq, &end_io_wq->work);
@@ -771,6 +762,9 @@  blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
 {
 	struct btrfs_end_io_wq *end_io_wq;
 
+	if (WARN_ON_ONCE(btrfs_op(bio) != BTRFS_MAP_WRITE))
+		return BLK_STS_IOERR;
+
 	end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
 	if (!end_io_wq)
 		return BLK_STS_RESOURCE;
@@ -2273,6 +2267,8 @@  static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
 		destroy_workqueue(fs_info->endio_raid56_workers);
 	if (fs_info->rmw_workers)
 		destroy_workqueue(fs_info->rmw_workers);
+	if (fs_info->compressed_write_workers)
+		destroy_workqueue(fs_info->compressed_write_workers);
 	btrfs_destroy_workqueue(fs_info->endio_write_workers);
 	btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
 	btrfs_destroy_workqueue(fs_info->delayed_workers);
@@ -2287,7 +2283,6 @@  static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
 	 * queues can do metadata I/O operations.
 	 */
 	btrfs_destroy_workqueue(fs_info->endio_meta_workers);
-	btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
 }
 
 static void free_root_extent_buffers(struct btrfs_root *root)
@@ -2469,15 +2464,14 @@  static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
 	fs_info->endio_meta_workers =
 		btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
 				      max_active, 4);
-	fs_info->endio_meta_write_workers =
-		btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
-				      max_active, 2);
 	fs_info->endio_raid56_workers =
 		alloc_workqueue("btrfs-endio-raid56", flags, max_active);
 	fs_info->rmw_workers = alloc_workqueue("btrfs-rmw", flags, max_active);
 	fs_info->endio_write_workers =
 		btrfs_alloc_workqueue(fs_info, "endio-write", flags,
 				      max_active, 2);
+	fs_info->compressed_write_workers =
+		alloc_workqueue("btrfs-compressed-write", flags, max_active);
 	fs_info->endio_freespace_worker =
 		btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
 				      max_active, 0);
@@ -2492,7 +2486,7 @@  static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
 	if (!(fs_info->workers && fs_info->hipri_workers &&
 	      fs_info->delalloc_workers && fs_info->flush_workers &&
 	      fs_info->endio_workers && fs_info->endio_meta_workers &&
-	      fs_info->endio_meta_write_workers &&
+	      fs_info->compressed_write_workers &&
 	      fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
 	      fs_info->endio_freespace_worker && fs_info->rmw_workers &&
 	      fs_info->caching_workers && fs_info->fixup_workers &&
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index b1fdc6a26c76e..9c683c466d585 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1908,8 +1908,6 @@  static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
 	btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
 	btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size);
 	btrfs_workqueue_set_max(fs_info->endio_meta_workers, new_pool_size);
-	btrfs_workqueue_set_max(fs_info->endio_meta_write_workers,
-				new_pool_size);
 	btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size);
 	btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size);
 	btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size);