diff mbox series

[v14,102/138] iomap: Convert iomap_write_begin and iomap_write_end to folios

Message ID 20210715033704.692967-103-willy@infradead.org (mailing list archive)
State New
Headers show
Series Memory folios | expand

Commit Message

Matthew Wilcox July 15, 2021, 3:36 a.m. UTC
These functions still only work in PAGE_SIZE chunks, but there are
fewer conversions from head to tail pages as a result of this patch.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/iomap/buffered-io.c | 68 ++++++++++++++++++++++--------------------
 1 file changed, 36 insertions(+), 32 deletions(-)

Comments

kernel test robot July 15, 2021, 1:53 p.m. UTC | #1
Hi "Matthew,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on linus/master]
[also build test ERROR on next-20210715]
[cannot apply to hnaz-linux-mm/master xfs-linux/for-next tip/perf/core v5.14-rc1]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/0day-ci/linux/commits/Matthew-Wilcox-Oracle/Memory-folios/20210715-133101
base:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 8096acd7442e613fad0354fc8dfdb2003cceea0b
config: arm-randconfig-r014-20210715 (attached as .config)
compiler: clang version 13.0.0 (https://github.com/llvm/llvm-project 0e49c54a8cbd3e779e5526a5888c683c01cc3c50)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # install arm cross compiling tool for clang build
        # apt-get install binutils-arm-linux-gnueabi
        # https://github.com/0day-ci/linux/commit/fd265884da3f65758e8b5153d45537a4bbefbb70
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Matthew-Wilcox-Oracle/Memory-folios/20210715-133101
        git checkout fd265884da3f65758e8b5153d45537a4bbefbb70
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=arm 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

>> fs/iomap/buffered-io.c:645:2: error: implicit declaration of function 'flush_dcache_folio' [-Werror,-Wimplicit-function-declaration]
           flush_dcache_folio(folio);
           ^
   fs/iomap/buffered-io.c:645:2: note: did you mean 'flush_dcache_page'?
   arch/arm/include/asm/cacheflush.h:292:13: note: 'flush_dcache_page' declared here
   extern void flush_dcache_page(struct page *);
               ^
   1 error generated.


vim +/flush_dcache_folio +645 fs/iomap/buffered-io.c

   640	
   641	static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
   642			size_t copied, struct folio *folio)
   643	{
   644		struct iomap_page *iop = to_iomap_page(folio);
 > 645		flush_dcache_folio(folio);
   646	
   647		/*
   648		 * The blocks that were entirely written will now be uptodate, so we
   649		 * don't have to worry about a readpage reading them and overwriting a
   650		 * partial write.  However if we have encountered a short write and only
   651		 * partially written into a block, it will not be marked uptodate, so a
   652		 * readpage might come in and destroy our partial write.
   653		 *
   654		 * Do the simplest thing, and just treat any short write to a non
   655		 * uptodate page as a zero-length write, and force the caller to redo
   656		 * the whole thing.
   657		 */
   658		if (unlikely(copied < len && !folio_test_uptodate(folio)))
   659			return 0;
   660		iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len);
   661		filemap_dirty_folio(inode->i_mapping, folio);
   662		return copied;
   663	}
   664	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
Matthew Wilcox July 15, 2021, 3:29 p.m. UTC | #2
On Thu, Jul 15, 2021 at 09:53:26PM +0800, kernel test robot wrote:
> >> fs/iomap/buffered-io.c:645:2: error: implicit declaration of function 'flush_dcache_folio' [-Werror,-Wimplicit-function-declaration]
>            flush_dcache_folio(folio);

Thanks.  ARM doesn't include asm-generic/cacheflush.h so it needs
flush_dcache_folio() declared.  Adding this:

+++ b/arch/arm/include/asm/cacheflush.h
@@ -290,6 +290,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
  */
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 extern void flush_dcache_page(struct page *);
+void flush_dcache_folio(struct folio *folio);

 static inline void flush_kernel_vmap_range(void *addr, int size)
 {
Darrick J. Wong July 15, 2021, 9:51 p.m. UTC | #3
On Thu, Jul 15, 2021 at 04:36:28AM +0100, Matthew Wilcox (Oracle) wrote:
> These functions still only work in PAGE_SIZE chunks, but there are
> fewer conversions from head to tail pages as a result of this patch.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  fs/iomap/buffered-io.c | 68 ++++++++++++++++++++++--------------------
>  1 file changed, 36 insertions(+), 32 deletions(-)
> 
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index a3fe0d36c739..5e0aa23d4693 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -541,9 +541,8 @@ static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
>  
>  static int
>  __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
> -		struct page *page, struct iomap *srcmap)
> +		struct folio *folio, struct iomap *srcmap)
>  {
> -	struct folio *folio = page_folio(page);
>  	struct iomap_page *iop = iomap_page_create(inode, folio);
>  	loff_t block_size = i_blocksize(inode);
>  	loff_t block_start = round_down(pos, block_size);
> @@ -583,12 +582,14 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
>  	return 0;
>  }
>  
> -static int
> -iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
> -		struct page **pagep, struct iomap *iomap, struct iomap *srcmap)
> +static int iomap_write_begin(struct inode *inode, loff_t pos, size_t len,
> +		unsigned flags, struct folio **foliop, struct iomap *iomap,
> +		struct iomap *srcmap)
>  {
>  	const struct iomap_page_ops *page_ops = iomap->page_ops;
> +	struct folio *folio;
>  	struct page *page;
> +	unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS;
>  	int status = 0;
>  
>  	BUG_ON(pos + len > iomap->offset + iomap->length);
> @@ -604,30 +605,31 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
>  			return status;
>  	}
>  
> -	page = grab_cache_page_write_begin(inode->i_mapping, pos >> PAGE_SHIFT,
> -			AOP_FLAG_NOFS);
> -	if (!page) {
> +	folio = __filemap_get_folio(inode->i_mapping, pos >> PAGE_SHIFT, fgp,

Ah, ok, so we're moving the file_get_pages flags up to iomap now.

> +			mapping_gfp_mask(inode->i_mapping));
> +	if (!folio) {
>  		status = -ENOMEM;
>  		goto out_no_page;
>  	}
>  
> +	page = folio_file_page(folio, pos >> PAGE_SHIFT);
>  	if (srcmap->type == IOMAP_INLINE)
>  		iomap_read_inline_data(inode, page, srcmap);
>  	else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
>  		status = __block_write_begin_int(page, pos, len, NULL, srcmap);
>  	else
> -		status = __iomap_write_begin(inode, pos, len, flags, page,
> +		status = __iomap_write_begin(inode, pos, len, flags, folio,
>  				srcmap);
>  
>  	if (unlikely(status))
>  		goto out_unlock;
>  
> -	*pagep = page;
> +	*foliop = folio;
>  	return 0;
>  
>  out_unlock:
> -	unlock_page(page);
> -	put_page(page);
> +	folio_unlock(folio);
> +	folio_put(folio);
>  	iomap_write_failed(inode, pos, len);
>  
>  out_no_page:
> @@ -637,11 +639,10 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
>  }
>  
>  static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
> -		size_t copied, struct page *page)
> +		size_t copied, struct folio *folio)
>  {
> -	struct folio *folio = page_folio(page);
>  	struct iomap_page *iop = to_iomap_page(folio);
> -	flush_dcache_page(page);
> +	flush_dcache_folio(folio);
>  
>  	/*
>  	 * The blocks that were entirely written will now be uptodate, so we
> @@ -654,10 +655,10 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
>  	 * uptodate page as a zero-length write, and force the caller to redo
>  	 * the whole thing.
>  	 */
> -	if (unlikely(copied < len && !PageUptodate(page)))
> +	if (unlikely(copied < len && !folio_test_uptodate(folio)))
>  		return 0;
>  	iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len);
> -	__set_page_dirty_nobuffers(page);
> +	filemap_dirty_folio(inode->i_mapping, folio);
>  	return copied;
>  }
>  
> @@ -680,9 +681,10 @@ static size_t iomap_write_end_inline(struct inode *inode, struct page *page,
>  
>  /* Returns the number of bytes copied.  May be 0.  Cannot be an errno. */
>  static size_t iomap_write_end(struct inode *inode, loff_t pos, size_t len,
> -		size_t copied, struct page *page, struct iomap *iomap,
> +		size_t copied, struct folio *folio, struct iomap *iomap,
>  		struct iomap *srcmap)
>  {
> +	struct page *page = folio_file_page(folio, pos / PAGE_SIZE);

pos >> PAGE_SHIFT ?

(There's a few more of these elsewhere...)

--D

>  	const struct iomap_page_ops *page_ops = iomap->page_ops;
>  	loff_t old_size = inode->i_size;
>  	size_t ret;
> @@ -693,7 +695,7 @@ static size_t iomap_write_end(struct inode *inode, loff_t pos, size_t len,
>  		ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
>  				page, NULL);
>  	} else {
> -		ret = __iomap_write_end(inode, pos, len, copied, page);
> +		ret = __iomap_write_end(inode, pos, len, copied, folio);
>  	}
>  
>  	/*
> @@ -705,13 +707,13 @@ static size_t iomap_write_end(struct inode *inode, loff_t pos, size_t len,
>  		i_size_write(inode, pos + ret);
>  		iomap->flags |= IOMAP_F_SIZE_CHANGED;
>  	}
> -	unlock_page(page);
> +	folio_unlock(folio);
>  
>  	if (old_size < pos)
>  		pagecache_isize_extended(inode, old_size, pos);
>  	if (page_ops && page_ops->page_done)
>  		page_ops->page_done(inode, pos, ret, page, iomap);
> -	put_page(page);
> +	folio_put(folio);
>  
>  	if (ret < len)
>  		iomap_write_failed(inode, pos, len);
> @@ -727,6 +729,7 @@ iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
>  	ssize_t written = 0;
>  
>  	do {
> +		struct folio *folio;
>  		struct page *page;
>  		unsigned long offset;	/* Offset into pagecache page */
>  		unsigned long bytes;	/* Bytes to write to page */
> @@ -750,18 +753,19 @@ iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
>  			break;
>  		}
>  
> -		status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap,
> +		status = iomap_write_begin(inode, pos, bytes, 0, &folio, iomap,
>  				srcmap);
>  		if (unlikely(status))
>  			break;
>  
> +		page = folio_file_page(folio, pos / PAGE_SIZE);
>  		if (mapping_writably_mapped(inode->i_mapping))
>  			flush_dcache_page(page);
>  
>  		copied = copy_page_from_iter_atomic(page, offset, bytes, i);
>  
> -		status = iomap_write_end(inode, pos, bytes, copied, page, iomap,
> -				srcmap);
> +		status = iomap_write_end(inode, pos, bytes, copied, folio,
> +				iomap, srcmap);
>  
>  		if (unlikely(copied != status))
>  			iov_iter_revert(i, copied - status);
> @@ -825,14 +829,14 @@ iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
>  	do {
>  		unsigned long offset = offset_in_page(pos);
>  		unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
> -		struct page *page;
> +		struct folio *folio;
>  
>  		status = iomap_write_begin(inode, pos, bytes,
> -				IOMAP_WRITE_F_UNSHARE, &page, iomap, srcmap);
> +				IOMAP_WRITE_F_UNSHARE, &folio, iomap, srcmap);
>  		if (unlikely(status))
>  			return status;
>  
> -		status = iomap_write_end(inode, pos, bytes, bytes, page, iomap,
> +		status = iomap_write_end(inode, pos, bytes, bytes, folio, iomap,
>  				srcmap);
>  		if (WARN_ON_ONCE(status == 0))
>  			return -EIO;
> @@ -871,19 +875,19 @@ EXPORT_SYMBOL_GPL(iomap_file_unshare);
>  static s64 iomap_zero(struct inode *inode, loff_t pos, u64 length,
>  		struct iomap *iomap, struct iomap *srcmap)
>  {
> -	struct page *page;
> +	struct folio *folio;
>  	int status;
>  	unsigned offset = offset_in_page(pos);
>  	unsigned bytes = min_t(u64, PAGE_SIZE - offset, length);
>  
> -	status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap, srcmap);
> +	status = iomap_write_begin(inode, pos, bytes, 0, &folio, iomap, srcmap);
>  	if (status)
>  		return status;
>  
> -	zero_user(page, offset, bytes);
> -	mark_page_accessed(page);
> +	zero_user(folio_file_page(folio, pos / PAGE_SIZE), offset, bytes);
> +	folio_mark_accessed(folio);
>  
> -	return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap);
> +	return iomap_write_end(inode, pos, bytes, bytes, folio, iomap, srcmap);
>  }
>  
>  static loff_t iomap_zero_range_actor(struct inode *inode, loff_t pos,
> -- 
> 2.30.2
>
Matthew Wilcox July 16, 2021, 3:11 a.m. UTC | #4
On Thu, Jul 15, 2021 at 02:51:05PM -0700, Darrick J. Wong wrote:
> On Thu, Jul 15, 2021 at 04:36:28AM +0100, Matthew Wilcox (Oracle) wrote:
> > +static int iomap_write_begin(struct inode *inode, loff_t pos, size_t len,
> > +		unsigned flags, struct folio **foliop, struct iomap *iomap,
> > +		struct iomap *srcmap)
> >  {
> >  	const struct iomap_page_ops *page_ops = iomap->page_ops;
> > +	struct folio *folio;
> >  	struct page *page;
> > +	unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS;
> >  	int status = 0;
> >  
> >  	BUG_ON(pos + len > iomap->offset + iomap->length);
> > @@ -604,30 +605,31 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
> >  			return status;
> >  	}
> >  
> > -	page = grab_cache_page_write_begin(inode->i_mapping, pos >> PAGE_SHIFT,
> > -			AOP_FLAG_NOFS);
> > -	if (!page) {
> > +	folio = __filemap_get_folio(inode->i_mapping, pos >> PAGE_SHIFT, fgp,
> 
> Ah, ok, so we're moving the file_get_pages flags up to iomap now.

Right, saves us having a folio equivalent of
grab_cache_page_write_begin().  And lets us get rid of AOP_FLAG_NOFS
eventually (although that really should be obsoleted by scoped
allocations, but one windmill at a time).

> > +	struct page *page = folio_file_page(folio, pos / PAGE_SIZE);
> 
> pos >> PAGE_SHIFT ?

mmm.  We're inconsistent:

willy@pepe:~/kernel/folio$ git grep '/ PAGE_SIZE' mm/ fs/ |wc
     92     720    6475
willy@pepe:~/kernel/folio$ git grep '>> PAGE_SHIFT' mm/ fs/ |wc
    635    4582   39394

That said, there's a clear preference.  It's just that we had a bug the
other day where somebody shifted by PAGE_SHIFT in the wrong direction ...
But again, this is your code, so I'll change to the shift.
diff mbox series

Patch

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index a3fe0d36c739..5e0aa23d4693 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -541,9 +541,8 @@  static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
 
 static int
 __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
-		struct page *page, struct iomap *srcmap)
+		struct folio *folio, struct iomap *srcmap)
 {
-	struct folio *folio = page_folio(page);
 	struct iomap_page *iop = iomap_page_create(inode, folio);
 	loff_t block_size = i_blocksize(inode);
 	loff_t block_start = round_down(pos, block_size);
@@ -583,12 +582,14 @@  __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
 	return 0;
 }
 
-static int
-iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
-		struct page **pagep, struct iomap *iomap, struct iomap *srcmap)
+static int iomap_write_begin(struct inode *inode, loff_t pos, size_t len,
+		unsigned flags, struct folio **foliop, struct iomap *iomap,
+		struct iomap *srcmap)
 {
 	const struct iomap_page_ops *page_ops = iomap->page_ops;
+	struct folio *folio;
 	struct page *page;
+	unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS;
 	int status = 0;
 
 	BUG_ON(pos + len > iomap->offset + iomap->length);
@@ -604,30 +605,31 @@  iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
 			return status;
 	}
 
-	page = grab_cache_page_write_begin(inode->i_mapping, pos >> PAGE_SHIFT,
-			AOP_FLAG_NOFS);
-	if (!page) {
+	folio = __filemap_get_folio(inode->i_mapping, pos >> PAGE_SHIFT, fgp,
+			mapping_gfp_mask(inode->i_mapping));
+	if (!folio) {
 		status = -ENOMEM;
 		goto out_no_page;
 	}
 
+	page = folio_file_page(folio, pos >> PAGE_SHIFT);
 	if (srcmap->type == IOMAP_INLINE)
 		iomap_read_inline_data(inode, page, srcmap);
 	else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
 		status = __block_write_begin_int(page, pos, len, NULL, srcmap);
 	else
-		status = __iomap_write_begin(inode, pos, len, flags, page,
+		status = __iomap_write_begin(inode, pos, len, flags, folio,
 				srcmap);
 
 	if (unlikely(status))
 		goto out_unlock;
 
-	*pagep = page;
+	*foliop = folio;
 	return 0;
 
 out_unlock:
-	unlock_page(page);
-	put_page(page);
+	folio_unlock(folio);
+	folio_put(folio);
 	iomap_write_failed(inode, pos, len);
 
 out_no_page:
@@ -637,11 +639,10 @@  iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
 }
 
 static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
-		size_t copied, struct page *page)
+		size_t copied, struct folio *folio)
 {
-	struct folio *folio = page_folio(page);
 	struct iomap_page *iop = to_iomap_page(folio);
-	flush_dcache_page(page);
+	flush_dcache_folio(folio);
 
 	/*
 	 * The blocks that were entirely written will now be uptodate, so we
@@ -654,10 +655,10 @@  static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
 	 * uptodate page as a zero-length write, and force the caller to redo
 	 * the whole thing.
 	 */
-	if (unlikely(copied < len && !PageUptodate(page)))
+	if (unlikely(copied < len && !folio_test_uptodate(folio)))
 		return 0;
 	iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len);
-	__set_page_dirty_nobuffers(page);
+	filemap_dirty_folio(inode->i_mapping, folio);
 	return copied;
 }
 
@@ -680,9 +681,10 @@  static size_t iomap_write_end_inline(struct inode *inode, struct page *page,
 
 /* Returns the number of bytes copied.  May be 0.  Cannot be an errno. */
 static size_t iomap_write_end(struct inode *inode, loff_t pos, size_t len,
-		size_t copied, struct page *page, struct iomap *iomap,
+		size_t copied, struct folio *folio, struct iomap *iomap,
 		struct iomap *srcmap)
 {
+	struct page *page = folio_file_page(folio, pos / PAGE_SIZE);
 	const struct iomap_page_ops *page_ops = iomap->page_ops;
 	loff_t old_size = inode->i_size;
 	size_t ret;
@@ -693,7 +695,7 @@  static size_t iomap_write_end(struct inode *inode, loff_t pos, size_t len,
 		ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
 				page, NULL);
 	} else {
-		ret = __iomap_write_end(inode, pos, len, copied, page);
+		ret = __iomap_write_end(inode, pos, len, copied, folio);
 	}
 
 	/*
@@ -705,13 +707,13 @@  static size_t iomap_write_end(struct inode *inode, loff_t pos, size_t len,
 		i_size_write(inode, pos + ret);
 		iomap->flags |= IOMAP_F_SIZE_CHANGED;
 	}
-	unlock_page(page);
+	folio_unlock(folio);
 
 	if (old_size < pos)
 		pagecache_isize_extended(inode, old_size, pos);
 	if (page_ops && page_ops->page_done)
 		page_ops->page_done(inode, pos, ret, page, iomap);
-	put_page(page);
+	folio_put(folio);
 
 	if (ret < len)
 		iomap_write_failed(inode, pos, len);
@@ -727,6 +729,7 @@  iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 	ssize_t written = 0;
 
 	do {
+		struct folio *folio;
 		struct page *page;
 		unsigned long offset;	/* Offset into pagecache page */
 		unsigned long bytes;	/* Bytes to write to page */
@@ -750,18 +753,19 @@  iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 			break;
 		}
 
-		status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap,
+		status = iomap_write_begin(inode, pos, bytes, 0, &folio, iomap,
 				srcmap);
 		if (unlikely(status))
 			break;
 
+		page = folio_file_page(folio, pos / PAGE_SIZE);
 		if (mapping_writably_mapped(inode->i_mapping))
 			flush_dcache_page(page);
 
 		copied = copy_page_from_iter_atomic(page, offset, bytes, i);
 
-		status = iomap_write_end(inode, pos, bytes, copied, page, iomap,
-				srcmap);
+		status = iomap_write_end(inode, pos, bytes, copied, folio,
+				iomap, srcmap);
 
 		if (unlikely(copied != status))
 			iov_iter_revert(i, copied - status);
@@ -825,14 +829,14 @@  iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 	do {
 		unsigned long offset = offset_in_page(pos);
 		unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
-		struct page *page;
+		struct folio *folio;
 
 		status = iomap_write_begin(inode, pos, bytes,
-				IOMAP_WRITE_F_UNSHARE, &page, iomap, srcmap);
+				IOMAP_WRITE_F_UNSHARE, &folio, iomap, srcmap);
 		if (unlikely(status))
 			return status;
 
-		status = iomap_write_end(inode, pos, bytes, bytes, page, iomap,
+		status = iomap_write_end(inode, pos, bytes, bytes, folio, iomap,
 				srcmap);
 		if (WARN_ON_ONCE(status == 0))
 			return -EIO;
@@ -871,19 +875,19 @@  EXPORT_SYMBOL_GPL(iomap_file_unshare);
 static s64 iomap_zero(struct inode *inode, loff_t pos, u64 length,
 		struct iomap *iomap, struct iomap *srcmap)
 {
-	struct page *page;
+	struct folio *folio;
 	int status;
 	unsigned offset = offset_in_page(pos);
 	unsigned bytes = min_t(u64, PAGE_SIZE - offset, length);
 
-	status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap, srcmap);
+	status = iomap_write_begin(inode, pos, bytes, 0, &folio, iomap, srcmap);
 	if (status)
 		return status;
 
-	zero_user(page, offset, bytes);
-	mark_page_accessed(page);
+	zero_user(folio_file_page(folio, pos / PAGE_SIZE), offset, bytes);
+	folio_mark_accessed(folio);
 
-	return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap);
+	return iomap_write_end(inode, pos, bytes, bytes, folio, iomap, srcmap);
 }
 
 static loff_t iomap_zero_range_actor(struct inode *inode, loff_t pos,