Message ID | 20210719184001.1750630-6-willy@infradead.org (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Folio support in block + iomap layers | expand |
On Mon, Jul 19, 2021 at 07:39:49PM +0100, Matthew Wilcox (Oracle) wrote: > -static void > -iomap_page_release(struct page *page) > +static void iomap_page_release(struct folio *folio) > { > - struct iomap_page *iop = detach_page_private(page); > - unsigned int nr_blocks = i_blocks_per_page(page->mapping->host, page); > + struct iomap_page *iop = folio_detach_private(folio); > + unsigned int nr_blocks = i_blocks_per_folio(folio->mapping->host, > + folio); Nit: but I find this variant much easier to read: unsigned int nr_blocks = i_blocks_per_folio(folio->mapping->host, folio); Otherwise looks good: Reviewed-by: Christoph Hellwig <hch@lst.de>
On Tue, Jul 20, 2021 at 08:52:43AM +0200, Christoph Hellwig wrote: > On Mon, Jul 19, 2021 at 07:39:49PM +0100, Matthew Wilcox (Oracle) wrote: > > -static void > > -iomap_page_release(struct page *page) > > +static void iomap_page_release(struct folio *folio) > > { > > - struct iomap_page *iop = detach_page_private(page); > > - unsigned int nr_blocks = i_blocks_per_page(page->mapping->host, page); > > + struct iomap_page *iop = folio_detach_private(folio); > > + unsigned int nr_blocks = i_blocks_per_folio(folio->mapping->host, > > + folio); > > Nit: but I find this variant much easier to read: > > unsigned int nr_blocks = > i_blocks_per_folio(folio->mapping->host, folio); Probably even better ... struct inode *inode = folio->mapping->host; unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
On Tue, Jul 20, 2021 at 12:29:23PM +0100, Matthew Wilcox wrote: > Probably even better ... > > struct inode *inode = folio->mapping->host; > unsigned int nr_blocks = i_blocks_per_folio(inode, folio); Fine with me.
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index ec8bdc0c63df..83eb5fdcbe05 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -59,18 +59,18 @@ iomap_page_create(struct inode *inode, struct folio *folio) return iop; } -static void -iomap_page_release(struct page *page) +static void iomap_page_release(struct folio *folio) { - struct iomap_page *iop = detach_page_private(page); - unsigned int nr_blocks = i_blocks_per_page(page->mapping->host, page); + struct iomap_page *iop = folio_detach_private(folio); + unsigned int nr_blocks = i_blocks_per_folio(folio->mapping->host, + folio); if (!iop) return; WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending)); WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending)); WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) != - PageUptodate(page)); + folio_test_uptodate(folio)); kfree(iop); } @@ -458,6 +458,8 @@ EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); int iomap_releasepage(struct page *page, gfp_t gfp_mask) { + struct folio *folio = page_folio(page); + trace_iomap_releasepage(page->mapping->host, page_offset(page), PAGE_SIZE); @@ -468,7 +470,7 @@ iomap_releasepage(struct page *page, gfp_t gfp_mask) */ if (PageDirty(page) || PageWriteback(page)) return 0; - iomap_page_release(page); + iomap_page_release(folio); return 1; } EXPORT_SYMBOL_GPL(iomap_releasepage); @@ -476,6 +478,8 @@ EXPORT_SYMBOL_GPL(iomap_releasepage); void iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len) { + struct folio *folio = page_folio(page); + trace_iomap_invalidatepage(page->mapping->host, offset, len); /* @@ -485,7 +489,7 @@ iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len) if (offset == 0 && len == PAGE_SIZE) { WARN_ON_ONCE(PageWriteback(page)); cancel_dirty_page(page); - iomap_page_release(page); + iomap_page_release(folio); } } EXPORT_SYMBOL_GPL(iomap_invalidatepage);