diff mbox series

[v14,094/138] iomap: Convert iomap_page_release to take a folio

Message ID 20210715033704.692967-95-willy@infradead.org (mailing list archive)
State New
Headers show
Series Memory folios | expand

Commit Message

Matthew Wilcox July 15, 2021, 3:36 a.m. UTC
iomap_page_release() was also assuming that it was being passed a
head page.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/iomap/buffered-io.c | 18 +++++++++++-------
 1 file changed, 11 insertions(+), 7 deletions(-)

Comments

Darrick J. Wong July 15, 2021, 9:20 p.m. UTC | #1
On Thu, Jul 15, 2021 at 04:36:20AM +0100, Matthew Wilcox (Oracle) wrote:
> iomap_page_release() was also assuming that it was being passed a
> head page.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Eh, looks pretty straightforward to me...
Reviewed-by: Darrick J. Wong <djwong@kernel.org>

--D

> ---
>  fs/iomap/buffered-io.c | 18 +++++++++++-------
>  1 file changed, 11 insertions(+), 7 deletions(-)
> 
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index c15a0ac52a32..251ec45426aa 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -59,18 +59,18 @@ iomap_page_create(struct inode *inode, struct folio *folio)
>  	return iop;
>  }
>  
> -static void
> -iomap_page_release(struct page *page)
> +static void iomap_page_release(struct folio *folio)
>  {
> -	struct iomap_page *iop = detach_page_private(page);
> -	unsigned int nr_blocks = i_blocks_per_page(page->mapping->host, page);
> +	struct iomap_page *iop = folio_detach_private(folio);
> +	unsigned int nr_blocks = i_blocks_per_folio(folio->mapping->host,
> +							folio);
>  
>  	if (!iop)
>  		return;
>  	WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
>  	WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
>  	WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
> -			PageUptodate(page));
> +			folio_test_uptodate(folio));
>  	kfree(iop);
>  }
>  
> @@ -456,6 +456,8 @@ EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
>  int
>  iomap_releasepage(struct page *page, gfp_t gfp_mask)
>  {
> +	struct folio *folio = page_folio(page);
> +
>  	trace_iomap_releasepage(page->mapping->host, page_offset(page),
>  			PAGE_SIZE);
>  
> @@ -466,7 +468,7 @@ iomap_releasepage(struct page *page, gfp_t gfp_mask)
>  	 */
>  	if (PageDirty(page) || PageWriteback(page))
>  		return 0;
> -	iomap_page_release(page);
> +	iomap_page_release(folio);
>  	return 1;
>  }
>  EXPORT_SYMBOL_GPL(iomap_releasepage);
> @@ -474,6 +476,8 @@ EXPORT_SYMBOL_GPL(iomap_releasepage);
>  void
>  iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
>  {
> +	struct folio *folio = page_folio(page);
> +
>  	trace_iomap_invalidatepage(page->mapping->host, offset, len);
>  
>  	/*
> @@ -483,7 +487,7 @@ iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
>  	if (offset == 0 && len == PAGE_SIZE) {
>  		WARN_ON_ONCE(PageWriteback(page));
>  		cancel_dirty_page(page);
> -		iomap_page_release(page);
> +		iomap_page_release(folio);
>  	}
>  }
>  EXPORT_SYMBOL_GPL(iomap_invalidatepage);
> -- 
> 2.30.2
>
diff mbox series

Patch

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index c15a0ac52a32..251ec45426aa 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -59,18 +59,18 @@  iomap_page_create(struct inode *inode, struct folio *folio)
 	return iop;
 }
 
-static void
-iomap_page_release(struct page *page)
+static void iomap_page_release(struct folio *folio)
 {
-	struct iomap_page *iop = detach_page_private(page);
-	unsigned int nr_blocks = i_blocks_per_page(page->mapping->host, page);
+	struct iomap_page *iop = folio_detach_private(folio);
+	unsigned int nr_blocks = i_blocks_per_folio(folio->mapping->host,
+							folio);
 
 	if (!iop)
 		return;
 	WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
 	WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
 	WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
-			PageUptodate(page));
+			folio_test_uptodate(folio));
 	kfree(iop);
 }
 
@@ -456,6 +456,8 @@  EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
 int
 iomap_releasepage(struct page *page, gfp_t gfp_mask)
 {
+	struct folio *folio = page_folio(page);
+
 	trace_iomap_releasepage(page->mapping->host, page_offset(page),
 			PAGE_SIZE);
 
@@ -466,7 +468,7 @@  iomap_releasepage(struct page *page, gfp_t gfp_mask)
 	 */
 	if (PageDirty(page) || PageWriteback(page))
 		return 0;
-	iomap_page_release(page);
+	iomap_page_release(folio);
 	return 1;
 }
 EXPORT_SYMBOL_GPL(iomap_releasepage);
@@ -474,6 +476,8 @@  EXPORT_SYMBOL_GPL(iomap_releasepage);
 void
 iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
 {
+	struct folio *folio = page_folio(page);
+
 	trace_iomap_invalidatepage(page->mapping->host, offset, len);
 
 	/*
@@ -483,7 +487,7 @@  iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
 	if (offset == 0 && len == PAGE_SIZE) {
 		WARN_ON_ONCE(PageWriteback(page));
 		cancel_dirty_page(page);
-		iomap_page_release(page);
+		iomap_page_release(folio);
 	}
 }
 EXPORT_SYMBOL_GPL(iomap_invalidatepage);