diff mbox series

[3/4] btrfs: convert relocate_one_page() to relocate_one_folio()

Message ID b723970ca03542e6863442ded58651cfcdb8fe24.1705605787.git.rgoldwyn@suse.com (mailing list archive)
State New, archived
Headers show
Series [1/4] btrfs: Use IS_ERR() instead of checking folio for NULL | expand

Commit Message

Goldwyn Rodrigues Jan. 18, 2024, 7:46 p.m. UTC
From: Goldwyn Rodrigues <rgoldwyn@suse.com>

Convert page references to folios and call the respective folio
functions.

Since find_or_create_page() takes a mask argument, call
__filemap_get_folio() instead of filemap_grab_folio().

Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com>
---
 fs/btrfs/relocation.c | 87 ++++++++++++++++++++++---------------------
 1 file changed, 44 insertions(+), 43 deletions(-)

Comments

Boris Burkov Jan. 18, 2024, 9:43 p.m. UTC | #1
On Thu, Jan 18, 2024 at 01:46:39PM -0600, Goldwyn Rodrigues wrote:
> From: Goldwyn Rodrigues <rgoldwyn@suse.com>
> 
> Convert page references to folios and call the respective folio
> functions.
> 
> Since find_or_create_page() takes a mask argument, call
> __filemap_get_folio() instead of filemap_grab_folio().
> 
> Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com>
> ---
>  fs/btrfs/relocation.c | 87 ++++++++++++++++++++++---------------------
>  1 file changed, 44 insertions(+), 43 deletions(-)
> 
> diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
> index c365bfc60652..f4fd4257adae 100644
> --- a/fs/btrfs/relocation.c
> +++ b/fs/btrfs/relocation.c
> @@ -2849,7 +2849,7 @@ static noinline_for_stack int prealloc_file_extent_cluster(
>  	 * btrfs_do_readpage() call of previously relocated file cluster.
>  	 *
>  	 * If the current cluster starts in the above range, btrfs_do_readpage()
> -	 * will skip the read, and relocate_one_page() will later writeback
> +	 * will skip the read, and relocate_one_folio() will later writeback
>  	 * the padding zeros as new data, causing data corruption.
>  	 *
>  	 * Here we have to manually invalidate the range (i_size, PAGE_END + 1).
> @@ -2983,68 +2983,69 @@ static u64 get_cluster_boundary_end(const struct file_extent_cluster *cluster,
>  	return cluster->boundary[cluster_nr + 1] - 1;
>  }
>  
> -static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
> +static int relocate_one_folio(struct inode *inode, struct file_ra_state *ra,
>  			     const struct file_extent_cluster *cluster,
> -			     int *cluster_nr, unsigned long page_index)
> +			     int *cluster_nr, unsigned long index)
>  {
>  	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
>  	u64 offset = BTRFS_I(inode)->index_cnt;
>  	const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
>  	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
> -	struct page *page;
> -	u64 page_start;
> -	u64 page_end;
> +	struct folio *folio;
> +	u64 start;
> +	u64 end;

This patch throws out this function labelling the start/index/end with
'page_' which I think was pretty useful given the other starts/ends like
'extent_' and 'clamped_'. Namespacing the indices makes the code easier
to follow, IMO.

>  	u64 cur;
>  	int ret;
>  
> -	ASSERT(page_index <= last_index);
> -	page = find_lock_page(inode->i_mapping, page_index);
> -	if (!page) {
> +	ASSERT(index <= last_index);
> +	folio = filemap_lock_folio(inode->i_mapping, index);
> +	if (IS_ERR(folio)) {
>  		page_cache_sync_readahead(inode->i_mapping, ra, NULL,
> -				page_index, last_index + 1 - page_index);
> -		page = find_or_create_page(inode->i_mapping, page_index, mask);
> -		if (!page)
> -			return -ENOMEM;
> +				index, last_index + 1 - index);
> +		folio = __filemap_get_folio(inode->i_mapping, index,
> +				FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
> +		if (IS_ERR(folio))
> +			return PTR_ERR(folio);
>  	}
>  
> -	if (PageReadahead(page))
> +	if (folio_test_readahead(folio))
>  		page_cache_async_readahead(inode->i_mapping, ra, NULL,
> -				page_folio(page), page_index,
> -				last_index + 1 - page_index);
> +				folio, index,
> +				last_index + 1 - index);
>  
> -	if (!PageUptodate(page)) {
> -		btrfs_read_folio(NULL, page_folio(page));
> -		lock_page(page);
> -		if (!PageUptodate(page)) {
> +	if (!folio_test_uptodate(folio)) {
> +		btrfs_read_folio(NULL, folio);
> +		folio_lock(folio);
> +		if (!folio_test_uptodate(folio)) {
>  			ret = -EIO;
> -			goto release_page;
> +			goto release;
>  		}
>  	}
>  
>  	/*
> -	 * We could have lost page private when we dropped the lock to read the
> -	 * page above, make sure we set_page_extent_mapped here so we have any
> +	 * We could have lost folio private when we dropped the lock to read the
> +	 * folio above, make sure we set_page_extent_mapped here so we have any
>  	 * of the subpage blocksize stuff we need in place.
>  	 */
> -	ret = set_page_extent_mapped(page);
> +	ret = set_folio_extent_mapped(folio);
>  	if (ret < 0)
> -		goto release_page;
> +		goto release;
>  
> -	page_start = page_offset(page);
> -	page_end = page_start + PAGE_SIZE - 1;
> +	start = folio_pos(folio);
> +	end = start + PAGE_SIZE - 1;
>  
>  	/*
>  	 * Start from the cluster, as for subpage case, the cluster can start
> -	 * inside the page.
> +	 * inside the folio.
>  	 */
> -	cur = max(page_start, cluster->boundary[*cluster_nr] - offset);
> -	while (cur <= page_end) {
> +	cur = max(start, cluster->boundary[*cluster_nr] - offset);
> +	while (cur <= end) {
>  		struct extent_state *cached_state = NULL;
>  		u64 extent_start = cluster->boundary[*cluster_nr] - offset;
>  		u64 extent_end = get_cluster_boundary_end(cluster,
>  						*cluster_nr) - offset;
> -		u64 clamped_start = max(page_start, extent_start);
> -		u64 clamped_end = min(page_end, extent_end);
> +		u64 clamped_start = max(start, extent_start);
> +		u64 clamped_end = min(end, extent_end);

e.g., I think these lines lose clarity from s/page_start/start/

>  		u32 clamped_len = clamped_end + 1 - clamped_start;
>  
>  		/* Reserve metadata for this range */
> @@ -3052,7 +3053,7 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
>  						      clamped_len, clamped_len,
>  						      false);
>  		if (ret)
> -			goto release_page;
> +			goto release;
>  
>  		/* Mark the range delalloc and dirty for later writeback */
>  		lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
> @@ -3068,20 +3069,20 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
>  							clamped_len, true);
>  			btrfs_delalloc_release_extents(BTRFS_I(inode),
>  						       clamped_len);
> -			goto release_page;
> +			goto release;
>  		}
> -		btrfs_folio_set_dirty(fs_info, page_folio(page),
> +		btrfs_folio_set_dirty(fs_info, folio,
>  				      clamped_start, clamped_len);

Does this fit one line now?

>  
>  		/*
> -		 * Set the boundary if it's inside the page.
> +		 * Set the boundary if it's inside the folio.
>  		 * Data relocation requires the destination extents to have the
>  		 * same size as the source.
>  		 * EXTENT_BOUNDARY bit prevents current extent from being merged
>  		 * with previous extent.
>  		 */
>  		if (in_range(cluster->boundary[*cluster_nr] - offset,
> -			     page_start, PAGE_SIZE)) {
> +			     start, PAGE_SIZE)) {
>  			u64 boundary_start = cluster->boundary[*cluster_nr] -
>  						offset;
>  			u64 boundary_end = boundary_start +
> @@ -3104,8 +3105,8 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
>  				break;
>  		}
>  	}
> -	unlock_page(page);
> -	put_page(page);
> +	folio_unlock(folio);
> +	folio_put(folio);
>  
>  	balance_dirty_pages_ratelimited(inode->i_mapping);
>  	btrfs_throttle(fs_info);
> @@ -3113,9 +3114,9 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
>  		ret = -ECANCELED;
>  	return ret;
>  
> -release_page:
> -	unlock_page(page);
> -	put_page(page);
> +release:
> +	folio_unlock(folio);
> +	folio_put(folio);
>  	return ret;
>  }
>  
> @@ -3150,7 +3151,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
>  	last_index = (cluster->end - offset) >> PAGE_SHIFT;
>  	for (index = (cluster->start - offset) >> PAGE_SHIFT;
>  	     index <= last_index && !ret; index++)
> -		ret = relocate_one_page(inode, ra, cluster, &cluster_nr, index);
> +		ret = relocate_one_folio(inode, ra, cluster, &cluster_nr, index);
>  	if (ret == 0)
>  		WARN_ON(cluster_nr != cluster->nr);
>  out:
> -- 
> 2.43.0
>
David Sterba Jan. 22, 2024, 8:52 p.m. UTC | #2
On Thu, Jan 18, 2024 at 01:46:39PM -0600, Goldwyn Rodrigues wrote:
> From: Goldwyn Rodrigues <rgoldwyn@suse.com>
> 
> Convert page references to folios and call the respective folio
> functions.
> 
> Since find_or_create_page() takes a mask argument, call
> __filemap_get_folio() instead of filemap_grab_folio().
> 
> Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com>
> ---
>  fs/btrfs/relocation.c | 87 ++++++++++++++++++++++---------------------
>  1 file changed, 44 insertions(+), 43 deletions(-)
> 
> diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
> index c365bfc60652..f4fd4257adae 100644
> --- a/fs/btrfs/relocation.c
> +++ b/fs/btrfs/relocation.c
> @@ -2849,7 +2849,7 @@ static noinline_for_stack int prealloc_file_extent_cluster(
>  	 * btrfs_do_readpage() call of previously relocated file cluster.
>  	 *
>  	 * If the current cluster starts in the above range, btrfs_do_readpage()
> -	 * will skip the read, and relocate_one_page() will later writeback
> +	 * will skip the read, and relocate_one_folio() will later writeback
>  	 * the padding zeros as new data, causing data corruption.
>  	 *
>  	 * Here we have to manually invalidate the range (i_size, PAGE_END + 1).
> @@ -2983,68 +2983,69 @@ static u64 get_cluster_boundary_end(const struct file_extent_cluster *cluster,
>  	return cluster->boundary[cluster_nr + 1] - 1;
>  }
>  
> -static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
> +static int relocate_one_folio(struct inode *inode, struct file_ra_state *ra,
>  			     const struct file_extent_cluster *cluster,
> -			     int *cluster_nr, unsigned long page_index)
> +			     int *cluster_nr, unsigned long index)
>  {
>  	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
>  	u64 offset = BTRFS_I(inode)->index_cnt;
>  	const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
>  	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
> -	struct page *page;
> -	u64 page_start;
> -	u64 page_end;
> +	struct folio *folio;
> +	u64 start;
> +	u64 end;
>  	u64 cur;
>  	int ret;
>  
> -	ASSERT(page_index <= last_index);
> -	page = find_lock_page(inode->i_mapping, page_index);
> -	if (!page) {
> +	ASSERT(index <= last_index);
> +	folio = filemap_lock_folio(inode->i_mapping, index);
> +	if (IS_ERR(folio)) {
>  		page_cache_sync_readahead(inode->i_mapping, ra, NULL,

How do page_cache_sync_readahead and folios interact? We still have
page == folio but the large folios are on the way, so do we need
something to make it work? If there's an assumption about pages and
folios this could be turned to an assertion so we don't forget about
that later.

> -				page_index, last_index + 1 - page_index);
> -		page = find_or_create_page(inode->i_mapping, page_index, mask);
> -		if (!page)
> -			return -ENOMEM;
> +				index, last_index + 1 - index);
> +		folio = __filemap_get_folio(inode->i_mapping, index,
> +				FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);

Please format the line continuation so the parameters start under (,
this has been the preferred style, although there's still a lot of the
one/two tabs as next line indentation.

> +		if (IS_ERR(folio))
> +			return PTR_ERR(folio);
>  	}
>  
> -	if (PageReadahead(page))
> +	if (folio_test_readahead(folio))
>  		page_cache_async_readahead(inode->i_mapping, ra, NULL,
> -				page_folio(page), page_index,
> -				last_index + 1 - page_index);
> +				folio, index,
> +				last_index + 1 - index);

Same here

>  
> -	if (!PageUptodate(page)) {
> -		btrfs_read_folio(NULL, page_folio(page));
> -		lock_page(page);
> -		if (!PageUptodate(page)) {
> +	if (!folio_test_uptodate(folio)) {
> +		btrfs_read_folio(NULL, folio);
> +		folio_lock(folio);
> +		if (!folio_test_uptodate(folio)) {
>  			ret = -EIO;
> -			goto release_page;
> +			goto release;
>  		}
>  	}
>  
>  	/*
> -	 * We could have lost page private when we dropped the lock to read the
> -	 * page above, make sure we set_page_extent_mapped here so we have any
> +	 * We could have lost folio private when we dropped the lock to read the
> +	 * folio above, make sure we set_page_extent_mapped here so we have any
>  	 * of the subpage blocksize stuff we need in place.
>  	 */
> -	ret = set_page_extent_mapped(page);
> +	ret = set_folio_extent_mapped(folio);
>  	if (ret < 0)
> -		goto release_page;
> +		goto release;
>  
> -	page_start = page_offset(page);
> -	page_end = page_start + PAGE_SIZE - 1;
> +	start = folio_pos(folio);
> +	end = start + PAGE_SIZE - 1;
>  
>  	/*
>  	 * Start from the cluster, as for subpage case, the cluster can start
> -	 * inside the page.
> +	 * inside the folio.
>  	 */
> -	cur = max(page_start, cluster->boundary[*cluster_nr] - offset);
> -	while (cur <= page_end) {
> +	cur = max(start, cluster->boundary[*cluster_nr] - offset);
> +	while (cur <= end) {
>  		struct extent_state *cached_state = NULL;
>  		u64 extent_start = cluster->boundary[*cluster_nr] - offset;
>  		u64 extent_end = get_cluster_boundary_end(cluster,
>  						*cluster_nr) - offset;
> -		u64 clamped_start = max(page_start, extent_start);
> -		u64 clamped_end = min(page_end, extent_end);
> +		u64 clamped_start = max(start, extent_start);
> +		u64 clamped_end = min(end, extent_end);
>  		u32 clamped_len = clamped_end + 1 - clamped_start;
>  
>  		/* Reserve metadata for this range */
> @@ -3052,7 +3053,7 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
>  						      clamped_len, clamped_len,
>  						      false);
>  		if (ret)
> -			goto release_page;
> +			goto release;
>  
>  		/* Mark the range delalloc and dirty for later writeback */
>  		lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
> @@ -3068,20 +3069,20 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
>  							clamped_len, true);
>  			btrfs_delalloc_release_extents(BTRFS_I(inode),
>  						       clamped_len);
> -			goto release_page;
> +			goto release;
>  		}
> -		btrfs_folio_set_dirty(fs_info, page_folio(page),
> +		btrfs_folio_set_dirty(fs_info, folio,
>  				      clamped_start, clamped_len);

This can be joined with the line above

>  
>  		/*
> -		 * Set the boundary if it's inside the page.
> +		 * Set the boundary if it's inside the folio.
>  		 * Data relocation requires the destination extents to have the
>  		 * same size as the source.
>  		 * EXTENT_BOUNDARY bit prevents current extent from being merged
>  		 * with previous extent.
>  		 */
>  		if (in_range(cluster->boundary[*cluster_nr] - offset,
> -			     page_start, PAGE_SIZE)) {
> +			     start, PAGE_SIZE)) {

Can be joined

>  			u64 boundary_start = cluster->boundary[*cluster_nr] -
>  						offset;
>  			u64 boundary_end = boundary_start +
David Sterba Jan. 22, 2024, 8:53 p.m. UTC | #3
On Thu, Jan 18, 2024 at 01:43:47PM -0800, Boris Burkov wrote:
> On Thu, Jan 18, 2024 at 01:46:39PM -0600, Goldwyn Rodrigues wrote:
> > From: Goldwyn Rodrigues <rgoldwyn@suse.com>
> > -static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
> > +static int relocate_one_folio(struct inode *inode, struct file_ra_state *ra,
> >  			     const struct file_extent_cluster *cluster,
> > -			     int *cluster_nr, unsigned long page_index)
> > +			     int *cluster_nr, unsigned long index)
> >  {
> >  	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
> >  	u64 offset = BTRFS_I(inode)->index_cnt;
> >  	const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
> >  	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
> > -	struct page *page;
> > -	u64 page_start;
> > -	u64 page_end;
> > +	struct folio *folio;
> > +	u64 start;
> > +	u64 end;
> 
> This patch throws out this function labelling the start/index/end with
> 'page_' which I think was pretty useful given the other starts/ends like
> 'extent_' and 'clamped_'. Namespacing the indices makes the code easier
> to follow, IMO.

With all the other prefixes around I agree that keeping folio_ (as
replacement of page_) would be useful.
Goldwyn Rodrigues Jan. 23, 2024, 4:36 p.m. UTC | #4
On 21:52 22/01, David Sterba wrote:
> On Thu, Jan 18, 2024 at 01:46:39PM -0600, Goldwyn Rodrigues wrote:
> > From: Goldwyn Rodrigues <rgoldwyn@suse.com>
> > 
> > Convert page references to folios and call the respective folio
> > functions.
> > 
> > Since find_or_create_page() takes a mask argument, call
> > __filemap_get_folio() instead of filemap_grab_folio().
> > 
> > Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com>
> > ---
> >  fs/btrfs/relocation.c | 87 ++++++++++++++++++++++---------------------
> >  1 file changed, 44 insertions(+), 43 deletions(-)
> > 
> > diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
> > index c365bfc60652..f4fd4257adae 100644
> > --- a/fs/btrfs/relocation.c
> > +++ b/fs/btrfs/relocation.c
> > @@ -2849,7 +2849,7 @@ static noinline_for_stack int prealloc_file_extent_cluster(
> >  	 * btrfs_do_readpage() call of previously relocated file cluster.
> >  	 *
> >  	 * If the current cluster starts in the above range, btrfs_do_readpage()
> > -	 * will skip the read, and relocate_one_page() will later writeback
> > +	 * will skip the read, and relocate_one_folio() will later writeback
> >  	 * the padding zeros as new data, causing data corruption.
> >  	 *
> >  	 * Here we have to manually invalidate the range (i_size, PAGE_END + 1).
> > @@ -2983,68 +2983,69 @@ static u64 get_cluster_boundary_end(const struct file_extent_cluster *cluster,
> >  	return cluster->boundary[cluster_nr + 1] - 1;
> >  }
> >  
> > -static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
> > +static int relocate_one_folio(struct inode *inode, struct file_ra_state *ra,
> >  			     const struct file_extent_cluster *cluster,
> > -			     int *cluster_nr, unsigned long page_index)
> > +			     int *cluster_nr, unsigned long index)
> >  {
> >  	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
> >  	u64 offset = BTRFS_I(inode)->index_cnt;
> >  	const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
> >  	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
> > -	struct page *page;
> > -	u64 page_start;
> > -	u64 page_end;
> > +	struct folio *folio;
> > +	u64 start;
> > +	u64 end;
> >  	u64 cur;
> >  	int ret;
> >  
> > -	ASSERT(page_index <= last_index);
> > -	page = find_lock_page(inode->i_mapping, page_index);
> > -	if (!page) {
> > +	ASSERT(index <= last_index);
> > +	folio = filemap_lock_folio(inode->i_mapping, index);
> > +	if (IS_ERR(folio)) {
> >  		page_cache_sync_readahead(inode->i_mapping, ra, NULL,
> 
> How do page_cache_sync_readahead and folios interact? We still have
> page == folio but the large folios are on the way, so do we need
> something to make it work? If there's an assumption about pages and
> folios this could be turned to an assertion so we don't forget about
> that later.

For now page and folio are the same and the assumption is folio size is
PAGE_SIZE. I am adding WARN_ON(folio_order(folio)) after an uptodate
folio is received to warn if the folio size changes.
diff mbox series

Patch

diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index c365bfc60652..f4fd4257adae 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2849,7 +2849,7 @@  static noinline_for_stack int prealloc_file_extent_cluster(
 	 * btrfs_do_readpage() call of previously relocated file cluster.
 	 *
 	 * If the current cluster starts in the above range, btrfs_do_readpage()
-	 * will skip the read, and relocate_one_page() will later writeback
+	 * will skip the read, and relocate_one_folio() will later writeback
 	 * the padding zeros as new data, causing data corruption.
 	 *
 	 * Here we have to manually invalidate the range (i_size, PAGE_END + 1).
@@ -2983,68 +2983,69 @@  static u64 get_cluster_boundary_end(const struct file_extent_cluster *cluster,
 	return cluster->boundary[cluster_nr + 1] - 1;
 }
 
-static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
+static int relocate_one_folio(struct inode *inode, struct file_ra_state *ra,
 			     const struct file_extent_cluster *cluster,
-			     int *cluster_nr, unsigned long page_index)
+			     int *cluster_nr, unsigned long index)
 {
 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	u64 offset = BTRFS_I(inode)->index_cnt;
 	const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
 	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
-	struct page *page;
-	u64 page_start;
-	u64 page_end;
+	struct folio *folio;
+	u64 start;
+	u64 end;
 	u64 cur;
 	int ret;
 
-	ASSERT(page_index <= last_index);
-	page = find_lock_page(inode->i_mapping, page_index);
-	if (!page) {
+	ASSERT(index <= last_index);
+	folio = filemap_lock_folio(inode->i_mapping, index);
+	if (IS_ERR(folio)) {
 		page_cache_sync_readahead(inode->i_mapping, ra, NULL,
-				page_index, last_index + 1 - page_index);
-		page = find_or_create_page(inode->i_mapping, page_index, mask);
-		if (!page)
-			return -ENOMEM;
+				index, last_index + 1 - index);
+		folio = __filemap_get_folio(inode->i_mapping, index,
+				FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
+		if (IS_ERR(folio))
+			return PTR_ERR(folio);
 	}
 
-	if (PageReadahead(page))
+	if (folio_test_readahead(folio))
 		page_cache_async_readahead(inode->i_mapping, ra, NULL,
-				page_folio(page), page_index,
-				last_index + 1 - page_index);
+				folio, index,
+				last_index + 1 - index);
 
-	if (!PageUptodate(page)) {
-		btrfs_read_folio(NULL, page_folio(page));
-		lock_page(page);
-		if (!PageUptodate(page)) {
+	if (!folio_test_uptodate(folio)) {
+		btrfs_read_folio(NULL, folio);
+		folio_lock(folio);
+		if (!folio_test_uptodate(folio)) {
 			ret = -EIO;
-			goto release_page;
+			goto release;
 		}
 	}
 
 	/*
-	 * We could have lost page private when we dropped the lock to read the
-	 * page above, make sure we set_page_extent_mapped here so we have any
+	 * We could have lost folio private when we dropped the lock to read the
+	 * folio above, make sure we set_page_extent_mapped here so we have any
 	 * of the subpage blocksize stuff we need in place.
 	 */
-	ret = set_page_extent_mapped(page);
+	ret = set_folio_extent_mapped(folio);
 	if (ret < 0)
-		goto release_page;
+		goto release;
 
-	page_start = page_offset(page);
-	page_end = page_start + PAGE_SIZE - 1;
+	start = folio_pos(folio);
+	end = start + PAGE_SIZE - 1;
 
 	/*
 	 * Start from the cluster, as for subpage case, the cluster can start
-	 * inside the page.
+	 * inside the folio.
 	 */
-	cur = max(page_start, cluster->boundary[*cluster_nr] - offset);
-	while (cur <= page_end) {
+	cur = max(start, cluster->boundary[*cluster_nr] - offset);
+	while (cur <= end) {
 		struct extent_state *cached_state = NULL;
 		u64 extent_start = cluster->boundary[*cluster_nr] - offset;
 		u64 extent_end = get_cluster_boundary_end(cluster,
 						*cluster_nr) - offset;
-		u64 clamped_start = max(page_start, extent_start);
-		u64 clamped_end = min(page_end, extent_end);
+		u64 clamped_start = max(start, extent_start);
+		u64 clamped_end = min(end, extent_end);
 		u32 clamped_len = clamped_end + 1 - clamped_start;
 
 		/* Reserve metadata for this range */
@@ -3052,7 +3053,7 @@  static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
 						      clamped_len, clamped_len,
 						      false);
 		if (ret)
-			goto release_page;
+			goto release;
 
 		/* Mark the range delalloc and dirty for later writeback */
 		lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
@@ -3068,20 +3069,20 @@  static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
 							clamped_len, true);
 			btrfs_delalloc_release_extents(BTRFS_I(inode),
 						       clamped_len);
-			goto release_page;
+			goto release;
 		}
-		btrfs_folio_set_dirty(fs_info, page_folio(page),
+		btrfs_folio_set_dirty(fs_info, folio,
 				      clamped_start, clamped_len);
 
 		/*
-		 * Set the boundary if it's inside the page.
+		 * Set the boundary if it's inside the folio.
 		 * Data relocation requires the destination extents to have the
 		 * same size as the source.
 		 * EXTENT_BOUNDARY bit prevents current extent from being merged
 		 * with previous extent.
 		 */
 		if (in_range(cluster->boundary[*cluster_nr] - offset,
-			     page_start, PAGE_SIZE)) {
+			     start, PAGE_SIZE)) {
 			u64 boundary_start = cluster->boundary[*cluster_nr] -
 						offset;
 			u64 boundary_end = boundary_start +
@@ -3104,8 +3105,8 @@  static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
 				break;
 		}
 	}
-	unlock_page(page);
-	put_page(page);
+	folio_unlock(folio);
+	folio_put(folio);
 
 	balance_dirty_pages_ratelimited(inode->i_mapping);
 	btrfs_throttle(fs_info);
@@ -3113,9 +3114,9 @@  static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
 		ret = -ECANCELED;
 	return ret;
 
-release_page:
-	unlock_page(page);
-	put_page(page);
+release:
+	folio_unlock(folio);
+	folio_put(folio);
 	return ret;
 }
 
@@ -3150,7 +3151,7 @@  static int relocate_file_extent_cluster(struct inode *inode,
 	last_index = (cluster->end - offset) >> PAGE_SHIFT;
 	for (index = (cluster->start - offset) >> PAGE_SHIFT;
 	     index <= last_index && !ret; index++)
-		ret = relocate_one_page(inode, ra, cluster, &cluster_nr, index);
+		ret = relocate_one_folio(inode, ra, cluster, &cluster_nr, index);
 	if (ret == 0)
 		WARN_ON(cluster_nr != cluster->nr);
 out: