diff mbox series

[08/31] ext4: Convert ext4_bio_write_page() to ext4_bio_write_folio()

Message ID 20230126202415.1682629-9-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Convert most of ext4 to folios | expand

Commit Message

Matthew Wilcox Jan. 26, 2023, 8:23 p.m. UTC
Both callers now have a folio so pass it in directly and avoid the call
to page_folio() at the beginning.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/ext4/ext4.h    |  5 ++---
 fs/ext4/inode.c   | 18 +++++++++---------
 fs/ext4/page-io.c | 10 ++++------
 3 files changed, 15 insertions(+), 18 deletions(-)

Comments

Theodore Ts'o March 14, 2023, 10:31 p.m. UTC | #1
On Thu, Jan 26, 2023 at 08:23:52PM +0000, Matthew Wilcox (Oracle) wrote:
> Both callers now have a folio so pass it in directly and avoid the call
> to page_folio() at the beginning.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

The ext4_writepage() changes will need to be dropped when you rebase,
but other than that....

Reviewed-by: Theodore Ts'o <tytso@mit.edu>
diff mbox series

Patch

diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 43e26e6f6e42..7a132e8648f4 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -3756,9 +3756,8 @@  extern void ext4_io_submit_init(struct ext4_io_submit *io,
 				struct writeback_control *wbc);
 extern void ext4_end_io_rsv_work(struct work_struct *work);
 extern void ext4_io_submit(struct ext4_io_submit *io);
-extern int ext4_bio_write_page(struct ext4_io_submit *io,
-			       struct page *page,
-			       int len);
+int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *page,
+		size_t len);
 extern struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end);
 extern struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end);
 
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 8b91e325492f..fcd904123384 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2014,9 +2014,9 @@  static int ext4_writepage(struct page *page,
 	struct folio *folio = page_folio(page);
 	int ret = 0;
 	loff_t size;
-	unsigned int len;
+	size_t len;
 	struct buffer_head *page_bufs = NULL;
-	struct inode *inode = page->mapping->host;
+	struct inode *inode = folio->mapping->host;
 	struct ext4_io_submit io_submit;
 
 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
@@ -2052,12 +2052,12 @@  static int ext4_writepage(struct page *page,
 	 * Also, if there is only one buffer per page (the fs block
 	 * size == the page size), if one buffer needs block
 	 * allocation or needs to modify the extent tree to clear the
-	 * unwritten flag, we know that the page can't be written at
+	 * unwritten flag, we know that the folio can't be written at
 	 * all, so we might as well refuse the write immediately.
 	 * Unfortunately if the block size != page size, we can't as
 	 * easily detect this case using ext4_walk_page_buffers(), but
 	 * for the extremely common case, this is an optimization that
-	 * skips a useless round trip through ext4_bio_write_page().
+	 * skips a useless round trip through ext4_bio_write_folio().
 	 */
 	if (ext4_walk_page_buffers(NULL, inode, page_bufs, 0, len, NULL,
 				   ext4_bh_delay_or_unwritten)) {
@@ -2079,7 +2079,7 @@  static int ext4_writepage(struct page *page,
 	if (folio_test_checked(folio) && ext4_should_journal_data(inode))
 		/*
 		 * It's mmapped pagecache.  Add buffers and journal it.  There
-		 * doesn't seem much point in redirtying the page here.
+		 * doesn't seem much point in redirtying the folio here.
 		 */
 		return __ext4_journalled_writepage(page, len);
 
@@ -2090,7 +2090,7 @@  static int ext4_writepage(struct page *page,
 		folio_unlock(folio);
 		return -ENOMEM;
 	}
-	ret = ext4_bio_write_page(&io_submit, page, len);
+	ret = ext4_bio_write_folio(&io_submit, folio, len);
 	ext4_io_submit(&io_submit);
 	/* Drop io_end reference we got from init */
 	ext4_put_io_end_defer(io_submit.io_end);
@@ -2113,8 +2113,8 @@  static int mpage_submit_folio(struct mpage_da_data *mpd, struct folio *folio)
 	 * write-protects our page in page tables and the page cannot get
 	 * written to again until we release folio lock. So only after
 	 * folio_clear_dirty_for_io() we are safe to sample i_size for
-	 * ext4_bio_write_page() to zero-out tail of the written page. We rely
-	 * on the barrier provided by TestClearPageDirty in
+	 * ext4_bio_write_folio() to zero-out tail of the written page. We rely
+	 * on the barrier provided by folio_test_clear_dirty() in
 	 * folio_clear_dirty_for_io() to make sure i_size is really sampled only
 	 * after page tables are updated.
 	 */
@@ -2123,7 +2123,7 @@  static int mpage_submit_folio(struct mpage_da_data *mpd, struct folio *folio)
 	if (folio_pos(folio) + len > size &&
 	    !ext4_verity_in_progress(mpd->inode))
 		len = size & ~PAGE_MASK;
-	err = ext4_bio_write_page(&mpd->io_submit, &folio->page, len);
+	err = ext4_bio_write_folio(&mpd->io_submit, folio, len);
 	if (!err)
 		mpd->wbc->nr_to_write--;
 	mpd->first_page++;
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index fd6c0dca24b9..c6da8800a49f 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -425,11 +425,9 @@  static void io_submit_add_bh(struct ext4_io_submit *io,
 	io->io_next_block++;
 }
 
-int ext4_bio_write_page(struct ext4_io_submit *io,
-			struct page *page,
-			int len)
+int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *folio,
+		size_t len)
 {
-	struct folio *folio = page_folio(page);
 	struct folio *io_folio = folio;
 	struct inode *inode = folio->mapping->host;
 	unsigned block_start;
@@ -522,8 +520,8 @@  int ext4_bio_write_page(struct ext4_io_submit *io,
 		if (io->io_bio)
 			gfp_flags = GFP_NOWAIT | __GFP_NOWARN;
 	retry_encrypt:
-		bounce_page = fscrypt_encrypt_pagecache_blocks(page, enc_bytes,
-							       0, gfp_flags);
+		bounce_page = fscrypt_encrypt_pagecache_blocks(&folio->page,
+					enc_bytes, 0, gfp_flags);
 		if (IS_ERR(bounce_page)) {
 			ret = PTR_ERR(bounce_page);
 			if (ret == -ENOMEM &&