diff mbox series

[15/20] btrfs: remove the io_pages field in struct extent_buffer

Message ID 20230309090526.332550-16-hch@lst.de (mailing list archive)
State New, archived
Headers show
Series [01/20] btrfs: mark extent_buffer_under_io static | expand

Commit Message

Christoph Hellwig March 9, 2023, 9:05 a.m. UTC
No need to track the number of pages under I/O now that each
extent_buffer is read and written using a single bio.  For the
read side we need to grab an extra reference for the duration of
the I/O to prevent eviction, though.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 fs/btrfs/extent_io.c | 17 +++++------------
 fs/btrfs/extent_io.h |  1 -
 2 files changed, 5 insertions(+), 13 deletions(-)

Comments

Johannes Thumshirn March 9, 2023, 4:01 p.m. UTC | #1
Looks good,
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Qu Wenruo March 10, 2023, 8:53 a.m. UTC | #2
On 2023/3/9 17:05, Christoph Hellwig wrote:
> No need to track the number of pages under I/O now that each
> extent_buffer is read and written using a single bio.  For the
> read side we need to grab an extra reference for the duration of
> the I/O to prevent eviction, though.

But don't we already have an aomtic_inc_not_zero(&eb->refs) call in the 
old submit_eb_pages() function?

And I didn't find that function got modified in the series.

Thanks,
Qu

> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   fs/btrfs/extent_io.c | 17 +++++------------
>   fs/btrfs/extent_io.h |  1 -
>   2 files changed, 5 insertions(+), 13 deletions(-)
> 
> diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
> index 16522bcf5d4a10..24df1247d81d88 100644
> --- a/fs/btrfs/extent_io.c
> +++ b/fs/btrfs/extent_io.c
> @@ -1770,8 +1770,6 @@ static void extent_buffer_write_end_io(struct btrfs_bio *bbio)
>   	bio_for_each_segment_all(bvec, &bbio->bio, iter_all) {
>   		struct page *page = bvec->bv_page;
>   
> -		atomic_dec(&eb->io_pages);
> -
>   		if (!uptodate) {
>   			ClearPageUptodate(page);
>   			btrfs_page_set_error(fs_info, page, eb->start, eb->len);
> @@ -1796,7 +1794,6 @@ static void prepare_eb_write(struct extent_buffer *eb)
>   	unsigned long end;
>   
>   	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
> -	atomic_set(&eb->io_pages, num_extent_pages(eb));
>   
>   	/* Set btree blocks beyond nritems with 0 to avoid stale content */
>   	nritems = btrfs_header_nritems(eb);
> @@ -3236,8 +3233,7 @@ static void __free_extent_buffer(struct extent_buffer *eb)
>   
>   static int extent_buffer_under_io(const struct extent_buffer *eb)
>   {
> -	return (atomic_read(&eb->io_pages) ||
> -		test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
> +	return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
>   		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
>   }
>   
> @@ -3374,7 +3370,6 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
>   
>   	spin_lock_init(&eb->refs_lock);
>   	atomic_set(&eb->refs, 1);
> -	atomic_set(&eb->io_pages, 0);
>   
>   	ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
>   
> @@ -3491,9 +3486,9 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
>   	 * adequately protected by the refcount, but the TREE_REF bit and
>   	 * its corresponding reference are not. To protect against this
>   	 * class of races, we call check_buffer_tree_ref from the codepaths
> -	 * which trigger io after they set eb->io_pages. Note that once io is
> -	 * initiated, TREE_REF can no longer be cleared, so that is the
> -	 * moment at which any such race is best fixed.
> +	 * which trigger io. Note that once io is initiated, TREE_REF can no
> +	 * longer be cleared, so that is the moment at which any such race is
> +	 * best fixed.
>   	 */
>   	refs = atomic_read(&eb->refs);
>   	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
> @@ -4063,7 +4058,6 @@ static void extent_buffer_read_end_io(struct btrfs_bio *bbio)
>   	struct bio_vec *bvec;
>   	u32 bio_offset = 0;
>   
> -	atomic_inc(&eb->refs);
>   	eb->read_mirror = bbio->mirror_num;
>   
>   	if (uptodate &&
> @@ -4078,7 +4072,6 @@ static void extent_buffer_read_end_io(struct btrfs_bio *bbio)
>   	}
>   
>   	bio_for_each_segment_all(bvec, &bbio->bio, iter_all) {
> -		atomic_dec(&eb->io_pages);
>   		end_page_read(bvec->bv_page, uptodate, eb->start + bio_offset,
>   			      bvec->bv_len);
>   		bio_offset += bvec->bv_len;
> @@ -4101,8 +4094,8 @@ static void __read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num,
>   
>   	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
>   	eb->read_mirror = 0;
> -	atomic_set(&eb->io_pages, num_pages);
>   	check_buffer_tree_ref(eb);
> +	atomic_inc(&eb->refs);
>   
>   	bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
>   			       REQ_OP_READ | REQ_META,
> diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
> index 342412d37a7b4b..12854a2b48f060 100644
> --- a/fs/btrfs/extent_io.h
> +++ b/fs/btrfs/extent_io.h
> @@ -79,7 +79,6 @@ struct extent_buffer {
>   	struct btrfs_fs_info *fs_info;
>   	spinlock_t refs_lock;
>   	atomic_t refs;
> -	atomic_t io_pages;
>   	int read_mirror;
>   	struct rcu_head rcu_head;
>   	pid_t lock_owner;
Christoph Hellwig March 10, 2023, 11:50 a.m. UTC | #3
On Fri, Mar 10, 2023 at 04:53:14PM +0800, Qu Wenruo wrote:
>
>
> On 2023/3/9 17:05, Christoph Hellwig wrote:
>> No need to track the number of pages under I/O now that each
>> extent_buffer is read and written using a single bio.  For the
>> read side we need to grab an extra reference for the duration of
>> the I/O to prevent eviction, though.
>
> But don't we already have an aomtic_inc_not_zero(&eb->refs) call in the old 
> submit_eb_pages() function?

I guess you mean submit_eb_page?  That deals with writeback, so doesn't
help with having a reference during reds, which this patch adds.

> And I didn't find that function got modified in the series.

It doesn't have to.
diff mbox series

Patch

diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 16522bcf5d4a10..24df1247d81d88 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1770,8 +1770,6 @@  static void extent_buffer_write_end_io(struct btrfs_bio *bbio)
 	bio_for_each_segment_all(bvec, &bbio->bio, iter_all) {
 		struct page *page = bvec->bv_page;
 
-		atomic_dec(&eb->io_pages);
-
 		if (!uptodate) {
 			ClearPageUptodate(page);
 			btrfs_page_set_error(fs_info, page, eb->start, eb->len);
@@ -1796,7 +1794,6 @@  static void prepare_eb_write(struct extent_buffer *eb)
 	unsigned long end;
 
 	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
-	atomic_set(&eb->io_pages, num_extent_pages(eb));
 
 	/* Set btree blocks beyond nritems with 0 to avoid stale content */
 	nritems = btrfs_header_nritems(eb);
@@ -3236,8 +3233,7 @@  static void __free_extent_buffer(struct extent_buffer *eb)
 
 static int extent_buffer_under_io(const struct extent_buffer *eb)
 {
-	return (atomic_read(&eb->io_pages) ||
-		test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
+	return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
 		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
 }
 
@@ -3374,7 +3370,6 @@  __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
 
 	spin_lock_init(&eb->refs_lock);
 	atomic_set(&eb->refs, 1);
-	atomic_set(&eb->io_pages, 0);
 
 	ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
 
@@ -3491,9 +3486,9 @@  static void check_buffer_tree_ref(struct extent_buffer *eb)
 	 * adequately protected by the refcount, but the TREE_REF bit and
 	 * its corresponding reference are not. To protect against this
 	 * class of races, we call check_buffer_tree_ref from the codepaths
-	 * which trigger io after they set eb->io_pages. Note that once io is
-	 * initiated, TREE_REF can no longer be cleared, so that is the
-	 * moment at which any such race is best fixed.
+	 * which trigger io. Note that once io is initiated, TREE_REF can no
+	 * longer be cleared, so that is the moment at which any such race is
+	 * best fixed.
 	 */
 	refs = atomic_read(&eb->refs);
 	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
@@ -4063,7 +4058,6 @@  static void extent_buffer_read_end_io(struct btrfs_bio *bbio)
 	struct bio_vec *bvec;
 	u32 bio_offset = 0;
 
-	atomic_inc(&eb->refs);
 	eb->read_mirror = bbio->mirror_num;
 
 	if (uptodate &&
@@ -4078,7 +4072,6 @@  static void extent_buffer_read_end_io(struct btrfs_bio *bbio)
 	}
 
 	bio_for_each_segment_all(bvec, &bbio->bio, iter_all) {
-		atomic_dec(&eb->io_pages);
 		end_page_read(bvec->bv_page, uptodate, eb->start + bio_offset,
 			      bvec->bv_len);
 		bio_offset += bvec->bv_len;
@@ -4101,8 +4094,8 @@  static void __read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num,
 
 	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
 	eb->read_mirror = 0;
-	atomic_set(&eb->io_pages, num_pages);
 	check_buffer_tree_ref(eb);
+	atomic_inc(&eb->refs);
 
 	bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
 			       REQ_OP_READ | REQ_META,
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 342412d37a7b4b..12854a2b48f060 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -79,7 +79,6 @@  struct extent_buffer {
 	struct btrfs_fs_info *fs_info;
 	spinlock_t refs_lock;
 	atomic_t refs;
-	atomic_t io_pages;
 	int read_mirror;
 	struct rcu_head rcu_head;
 	pid_t lock_owner;