diff mbox series

[v14,128/138] iomap: Support multi-page folios in invalidatepage

Message ID 20210715033704.692967-129-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Memory folios | expand

Commit Message

Matthew Wilcox July 15, 2021, 3:36 a.m. UTC
If we're punching a hole in a multi-page folio, we need to remove the
per-page iomap data as the folio is about to be split and each page will
need its own.  This means that writepage can now come across a page with
no iop allocated, so remove the assertion that there is already one,
and just create one (with the uptodate bits set) if there isn't one.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/iomap/buffered-io.c | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)

Comments

Darrick J. Wong July 15, 2021, 10:10 p.m. UTC | #1
On Thu, Jul 15, 2021 at 04:36:54AM +0100, Matthew Wilcox (Oracle) wrote:
> If we're punching a hole in a multi-page folio, we need to remove the
> per-page iomap data as the folio is about to be split and each page will
> need its own.  This means that writepage can now come across a page with
> no iop allocated, so remove the assertion that there is already one,
> and just create one (with the uptodate bits set) if there isn't one.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Lol, Andreas already did the bottom half of the change for you.

Reviewed-by: Darrick J. Wong <djwong@kernel.org>

--D

> ---
>  fs/iomap/buffered-io.c | 11 +++++++----
>  1 file changed, 7 insertions(+), 4 deletions(-)
> 
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index 48de198c5603..7f78256fc0ba 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -474,13 +474,17 @@ iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
>  	trace_iomap_invalidatepage(folio->mapping->host, offset, len);
>  
>  	/*
> -	 * If we are invalidating the entire page, clear the dirty state from it
> -	 * and release it to avoid unnecessary buildup of the LRU.
> +	 * If we are invalidating the entire folio, clear the dirty state
> +	 * from it and release it to avoid unnecessary buildup of the LRU.
>  	 */
>  	if (offset == 0 && len == folio_size(folio)) {
>  		WARN_ON_ONCE(folio_test_writeback(folio));
>  		folio_cancel_dirty(folio);
>  		iomap_page_release(folio);
> +	} else if (folio_multi(folio)) {
> +		/* Must release the iop so the page can be split */
> +		WARN_ON_ONCE(!folio_test_uptodate(folio) && folio_test_dirty(folio));
> +		iomap_page_release(folio);
>  	}
>  }
>  EXPORT_SYMBOL_GPL(iomap_invalidatepage);
> @@ -1300,7 +1304,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
>  		struct writeback_control *wbc, struct inode *inode,
>  		struct folio *folio, loff_t end_pos)
>  {
> -	struct iomap_page *iop = to_iomap_page(folio);
> +	struct iomap_page *iop = iomap_page_create(inode, folio);
>  	struct iomap_ioend *ioend, *next;
>  	unsigned len = i_blocksize(inode);
>  	unsigned nblocks = i_blocks_per_folio(inode, folio);
> @@ -1308,7 +1312,6 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
>  	int error = 0, count = 0, i;
>  	LIST_HEAD(submit_list);
>  
> -	WARN_ON_ONCE(nblocks > 1 && !iop);
>  	WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
>  
>  	/*
> -- 
> 2.30.2
>
Matthew Wilcox July 16, 2021, 2:49 a.m. UTC | #2
On Thu, Jul 15, 2021 at 03:10:18PM -0700, Darrick J. Wong wrote:
> On Thu, Jul 15, 2021 at 04:36:54AM +0100, Matthew Wilcox (Oracle) wrote:
> > If we're punching a hole in a multi-page folio, we need to remove the
> > per-page iomap data as the folio is about to be split and each page will
> > need its own.  This means that writepage can now come across a page with
> > no iop allocated, so remove the assertion that there is already one,
> > and just create one (with the uptodate bits set) if there isn't one.
> > 
> > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> 
> Lol, Andreas already did the bottom half of the change for you.

Heh, yes, I copy-and-pasted it from this patch ;-)  Thanks for
merging it!
diff mbox series

Patch

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 48de198c5603..7f78256fc0ba 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -474,13 +474,17 @@  iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
 	trace_iomap_invalidatepage(folio->mapping->host, offset, len);
 
 	/*
-	 * If we are invalidating the entire page, clear the dirty state from it
-	 * and release it to avoid unnecessary buildup of the LRU.
+	 * If we are invalidating the entire folio, clear the dirty state
+	 * from it and release it to avoid unnecessary buildup of the LRU.
 	 */
 	if (offset == 0 && len == folio_size(folio)) {
 		WARN_ON_ONCE(folio_test_writeback(folio));
 		folio_cancel_dirty(folio);
 		iomap_page_release(folio);
+	} else if (folio_multi(folio)) {
+		/* Must release the iop so the page can be split */
+		WARN_ON_ONCE(!folio_test_uptodate(folio) && folio_test_dirty(folio));
+		iomap_page_release(folio);
 	}
 }
 EXPORT_SYMBOL_GPL(iomap_invalidatepage);
@@ -1300,7 +1304,7 @@  iomap_writepage_map(struct iomap_writepage_ctx *wpc,
 		struct writeback_control *wbc, struct inode *inode,
 		struct folio *folio, loff_t end_pos)
 {
-	struct iomap_page *iop = to_iomap_page(folio);
+	struct iomap_page *iop = iomap_page_create(inode, folio);
 	struct iomap_ioend *ioend, *next;
 	unsigned len = i_blocksize(inode);
 	unsigned nblocks = i_blocks_per_folio(inode, folio);
@@ -1308,7 +1312,6 @@  iomap_writepage_map(struct iomap_writepage_ctx *wpc,
 	int error = 0, count = 0, i;
 	LIST_HEAD(submit_list);
 
-	WARN_ON_ONCE(nblocks > 1 && !iop);
 	WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
 
 	/*