Message ID | 20230814170350.756488-1-willy@infradead.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [1/2] btrfs: Convert defrag_prepare_one_page() to use a folio | expand |
On Mon, Aug 14, 2023 at 06:03:49PM +0100, Matthew Wilcox (Oracle) wrote: > Use a folio throughout defrag_prepare_one_page() to remove dozens of > hidden calls to compound_head(). There is no support here for large > folios; indeed, turn the existing check for PageCompound into a check > for large folios. > > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> > --- > fs/btrfs/defrag.c | 53 ++++++++++++++++++++++++----------------------- > 1 file changed, 27 insertions(+), 26 deletions(-) > > diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c > index f2ff4cbe8656..4392a09d2bb1 100644 > --- a/fs/btrfs/defrag.c > +++ b/fs/btrfs/defrag.c > @@ -724,13 +724,14 @@ static struct page *defrag_prepare_one_page(struct btrfs_inode *inode, pgoff_t i > u64 page_start = (u64)index << PAGE_SHIFT; > u64 page_end = page_start + PAGE_SIZE - 1; > struct extent_state *cached_state = NULL; > - struct page *page; > + struct folio *folio; > int ret; > > again: > - page = find_or_create_page(mapping, index, mask); > - if (!page) > - return ERR_PTR(-ENOMEM); > + folio = __filemap_get_folio(mapping, index, > + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask); > + if (IS_ERR(folio)) > + return &folio->page; > > /* > * Since we can defragment files opened read-only, we can encounter > @@ -740,16 +741,16 @@ static struct page *defrag_prepare_one_page(struct btrfs_inode *inode, pgoff_t i > * executables that explicitly enable them, so this isn't very > * restrictive. > */ > - if (PageCompound(page)) { > - unlock_page(page); > - put_page(page); > + if (folio_test_large(folio)) { > + folio_unlock(folio); > + folio_put(folio); > return ERR_PTR(-ETXTBSY); > } > > - ret = set_page_extent_mapped(page); > + ret = set_page_extent_mapped(&folio->page); > if (ret < 0) { > - unlock_page(page); > - put_page(page); > + folio_unlock(folio); > + folio_put(folio); > return ERR_PTR(ret); > } > > @@ -764,17 +765,17 @@ static struct page *defrag_prepare_one_page(struct btrfs_inode *inode, pgoff_t i > if (!ordered) > break; > > - unlock_page(page); > + folio_unlock(folio); > btrfs_start_ordered_extent(ordered); > btrfs_put_ordered_extent(ordered); > - lock_page(page); > + folio_lock(folio); > /* > - * We unlocked the page above, so we need check if it was > + * We unlocked the folio above, so we need check if it was > * released or not. > */ > - if (page->mapping != mapping || !PagePrivate(page)) { > - unlock_page(page); > - put_page(page); > + if (folio->mapping != mapping || !folio->private) { Handling the private bit is probably the only thing that's not a direct API conversion. I'd assume that PagePrivate should be folio_test_private() or the private pointer should be read via folio_get_private(), not directly.
diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c index f2ff4cbe8656..4392a09d2bb1 100644 --- a/fs/btrfs/defrag.c +++ b/fs/btrfs/defrag.c @@ -724,13 +724,14 @@ static struct page *defrag_prepare_one_page(struct btrfs_inode *inode, pgoff_t i u64 page_start = (u64)index << PAGE_SHIFT; u64 page_end = page_start + PAGE_SIZE - 1; struct extent_state *cached_state = NULL; - struct page *page; + struct folio *folio; int ret; again: - page = find_or_create_page(mapping, index, mask); - if (!page) - return ERR_PTR(-ENOMEM); + folio = __filemap_get_folio(mapping, index, + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask); + if (IS_ERR(folio)) + return &folio->page; /* * Since we can defragment files opened read-only, we can encounter @@ -740,16 +741,16 @@ static struct page *defrag_prepare_one_page(struct btrfs_inode *inode, pgoff_t i * executables that explicitly enable them, so this isn't very * restrictive. */ - if (PageCompound(page)) { - unlock_page(page); - put_page(page); + if (folio_test_large(folio)) { + folio_unlock(folio); + folio_put(folio); return ERR_PTR(-ETXTBSY); } - ret = set_page_extent_mapped(page); + ret = set_page_extent_mapped(&folio->page); if (ret < 0) { - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); return ERR_PTR(ret); } @@ -764,17 +765,17 @@ static struct page *defrag_prepare_one_page(struct btrfs_inode *inode, pgoff_t i if (!ordered) break; - unlock_page(page); + folio_unlock(folio); btrfs_start_ordered_extent(ordered); btrfs_put_ordered_extent(ordered); - lock_page(page); + folio_lock(folio); /* - * We unlocked the page above, so we need check if it was + * We unlocked the folio above, so we need check if it was * released or not. */ - if (page->mapping != mapping || !PagePrivate(page)) { - unlock_page(page); - put_page(page); + if (folio->mapping != mapping || !folio->private) { + folio_unlock(folio); + folio_put(folio); goto again; } } @@ -783,21 +784,21 @@ static struct page *defrag_prepare_one_page(struct btrfs_inode *inode, pgoff_t i * Now the page range has no ordered extent any more. Read the page to * make it uptodate. */ - if (!PageUptodate(page)) { - btrfs_read_folio(NULL, page_folio(page)); - lock_page(page); - if (page->mapping != mapping || !PagePrivate(page)) { - unlock_page(page); - put_page(page); + if (!folio_test_uptodate(folio)) { + btrfs_read_folio(NULL, folio); + folio_lock(folio); + if (folio->mapping != mapping || !folio->private) { + folio_unlock(folio); + folio_put(folio); goto again; } - if (!PageUptodate(page)) { - unlock_page(page); - put_page(page); + if (!folio_test_uptodate(folio)) { + folio_unlock(folio); + folio_put(folio); return ERR_PTR(-EIO); } } - return page; + return &folio->page; } struct defrag_target_range {
Use a folio throughout defrag_prepare_one_page() to remove dozens of hidden calls to compound_head(). There is no support here for large folios; indeed, turn the existing check for PageCompound into a check for large folios. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- fs/btrfs/defrag.c | 53 ++++++++++++++++++++++++----------------------- 1 file changed, 27 insertions(+), 26 deletions(-)