diff mbox series

[1/4] nilfs2: Remove nilfs_writepage

Message ID 20241002150036.1339475-2-willy@infradead.org (mailing list archive)
State New
Headers show
Series nilfs2: Finish folio conversion | expand

Commit Message

Matthew Wilcox Oct. 2, 2024, 3 p.m. UTC
Since nilfs2 has a ->writepages operation already, ->writepage is only
called by the migration code.  If we add a ->migrate_folio operation,
it won't even be used for that and so it can be deleted.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/nilfs2/inode.c | 33 +--------------------------------
 1 file changed, 1 insertion(+), 32 deletions(-)

Comments

Ryusuke Konishi Oct. 3, 2024, 11:47 a.m. UTC | #1
On Thu, Oct 3, 2024 at 12:00 AM Matthew Wilcox (Oracle) wrote:
>
> Since nilfs2 has a ->writepages operation already, ->writepage is only
> called by the migration code.  If we add a ->migrate_folio operation,
> it won't even be used for that and so it can be deleted.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  fs/nilfs2/inode.c | 33 +--------------------------------
>  1 file changed, 1 insertion(+), 32 deletions(-)
>
> diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
> index be6acf6e2bfc..f1b47b655672 100644
> --- a/fs/nilfs2/inode.c
> +++ b/fs/nilfs2/inode.c
> @@ -170,37 +170,6 @@ static int nilfs_writepages(struct address_space *mapping,
>         return err;
>  }
>
> -static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
> -{
> -       struct folio *folio = page_folio(page);
> -       struct inode *inode = folio->mapping->host;
> -       int err;
> -
> -       if (sb_rdonly(inode->i_sb)) {
> -               /*
> -                * It means that filesystem was remounted in read-only
> -                * mode because of error or metadata corruption. But we
> -                * have dirty pages that try to be flushed in background.
> -                * So, here we simply discard this dirty page.
> -                */
> -               nilfs_clear_folio_dirty(folio);
> -               folio_unlock(folio);
> -               return -EROFS;
> -       }
> -
> -       folio_redirty_for_writepage(wbc, folio);
> -       folio_unlock(folio);
> -
> -       if (wbc->sync_mode == WB_SYNC_ALL) {
> -               err = nilfs_construct_segment(inode->i_sb);
> -               if (unlikely(err))
> -                       return err;
> -       } else if (wbc->for_reclaim)
> -               nilfs_flush_segment(inode->i_sb, inode->i_ino);
> -
> -       return 0;
> -}
> -
>  static bool nilfs_dirty_folio(struct address_space *mapping,
>                 struct folio *folio)
>  {
> @@ -295,7 +264,6 @@ nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
>  }
>
>  const struct address_space_operations nilfs_aops = {
> -       .writepage              = nilfs_writepage,
>         .read_folio             = nilfs_read_folio,
>         .writepages             = nilfs_writepages,
>         .dirty_folio            = nilfs_dirty_folio,
> @@ -304,6 +272,7 @@ const struct address_space_operations nilfs_aops = {
>         .write_end              = nilfs_write_end,
>         .invalidate_folio       = block_invalidate_folio,
>         .direct_IO              = nilfs_direct_IO,
> +       .migrate_folio          = buffer_migrate_folio,
>         .is_partially_uptodate  = block_is_partially_uptodate,
>  };
>

After applying this patch, fsstress started causing kernel panics.

Looking at the patch, I realized that migrate_folio needs to use
buffer_migrate_folio_norefs, which checks for buffer head references.

I was able to eliminate the kernel panic by setting migrate_folio as follows:

+ .migrate_folio = buffer_migrate_folio_norefs,

I would like to continue load testing to avoid side effects of reclaim
by completely eliminating nilfs_writepage (calling
nilfs_flush_segment). So far, no problems have occurred even in tests
with different block sizes or architectures, as long as I make the
above changes.

Thanks,
Ryusuke Konishi
diff mbox series

Patch

diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index be6acf6e2bfc..f1b47b655672 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -170,37 +170,6 @@  static int nilfs_writepages(struct address_space *mapping,
 	return err;
 }
 
-static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
-{
-	struct folio *folio = page_folio(page);
-	struct inode *inode = folio->mapping->host;
-	int err;
-
-	if (sb_rdonly(inode->i_sb)) {
-		/*
-		 * It means that filesystem was remounted in read-only
-		 * mode because of error or metadata corruption. But we
-		 * have dirty pages that try to be flushed in background.
-		 * So, here we simply discard this dirty page.
-		 */
-		nilfs_clear_folio_dirty(folio);
-		folio_unlock(folio);
-		return -EROFS;
-	}
-
-	folio_redirty_for_writepage(wbc, folio);
-	folio_unlock(folio);
-
-	if (wbc->sync_mode == WB_SYNC_ALL) {
-		err = nilfs_construct_segment(inode->i_sb);
-		if (unlikely(err))
-			return err;
-	} else if (wbc->for_reclaim)
-		nilfs_flush_segment(inode->i_sb, inode->i_ino);
-
-	return 0;
-}
-
 static bool nilfs_dirty_folio(struct address_space *mapping,
 		struct folio *folio)
 {
@@ -295,7 +264,6 @@  nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 }
 
 const struct address_space_operations nilfs_aops = {
-	.writepage		= nilfs_writepage,
 	.read_folio		= nilfs_read_folio,
 	.writepages		= nilfs_writepages,
 	.dirty_folio		= nilfs_dirty_folio,
@@ -304,6 +272,7 @@  const struct address_space_operations nilfs_aops = {
 	.write_end		= nilfs_write_end,
 	.invalidate_folio	= block_invalidate_folio,
 	.direct_IO		= nilfs_direct_IO,
+	.migrate_folio		= buffer_migrate_folio,
 	.is_partially_uptodate  = block_is_partially_uptodate,
 };