diff mbox series

[RFC,v3,2/7] iomap: Add iomap_folio_done helper

Message ID 20221216150626.670312-3-agruenba@redhat.com (mailing list archive)
State New, archived
Headers show
Series Turn iomap_page_ops into iomap_folio_ops | expand

Commit Message

Andreas Gruenbacher Dec. 16, 2022, 3:06 p.m. UTC
Add an iomap_folio_done() helper to encapsulate unlocking the folio,
calling ->page_done(), and putting the folio.  This doesn't change the
functionality.

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
---
 fs/iomap/buffered-io.c | 28 +++++++++++++++++-----------
 1 file changed, 17 insertions(+), 11 deletions(-)

Comments

Christoph Hellwig Dec. 23, 2022, 3:02 p.m. UTC | #1
On Fri, Dec 16, 2022 at 04:06:21PM +0100, Andreas Gruenbacher wrote:
> +static void iomap_folio_done(struct iomap_iter *iter, loff_t pos, size_t ret,
> +		struct folio *folio)
> +{
> +	const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
> +
> +	if (folio)
> +		folio_unlock(folio);
> +	if (page_ops && page_ops->page_done)
> +		page_ops->page_done(iter->inode, pos, ret, &folio->page);
> +	if (folio)
> +		folio_put(folio);
> +}

How is the folio derefence going to work if folio is NULL?

That being said, I really wonder if the current API is the right way to
go.  Can't we just have a ->get_folio method with the same signature as
__filemap_get_folio, and then do the __filemap_get_folio from the file
system and avoid the page/folio == NULL clean path entirely?  Then on
the done side move the unlock and put into the done method as well.

>  	if (!folio) {
>  		status = (iter->flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOMEM;
> -		goto out_no_page;
> +		iomap_folio_done(iter, pos, 0, NULL);
> +		return status;
>  	}
>  
>  	/*
> @@ -656,13 +670,9 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
>  	return 0;
>  
>  out_unlock:
> -	folio_unlock(folio);
> -	folio_put(folio);
> +	iomap_folio_done(iter, pos, 0, folio);
>  	iomap_write_failed(iter->inode, pos, len);
>  
> -out_no_page:
> -	if (page_ops && page_ops->page_done)
> -		page_ops->page_done(iter->inode, pos, 0, NULL);
>  	return status;

But for the current version I don't really understand why the error
unwinding changes here.
Andreas Grünbacher Dec. 23, 2022, 8:54 p.m. UTC | #2
Am Fr., 23. Dez. 2022 um 16:12 Uhr schrieb Christoph Hellwig
<hch@infradead.org>:
> On Fri, Dec 16, 2022 at 04:06:21PM +0100, Andreas Gruenbacher wrote:
> > +static void iomap_folio_done(struct iomap_iter *iter, loff_t pos, size_t ret,
> > +             struct folio *folio)
> > +{
> > +     const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
> > +
> > +     if (folio)
> > +             folio_unlock(folio);
> > +     if (page_ops && page_ops->page_done)
> > +             page_ops->page_done(iter->inode, pos, ret, &folio->page);
> > +     if (folio)
> > +             folio_put(folio);
> > +}
>
> How is the folio dereference going to work if folio is NULL?

'&folio->page' is effectively a type cast, not a dereference. I
realize iomap_folio_done() as introduced here is not pretty, but it's
only an intermediary step and the ugliness goes away later in this
series.

> That being said, I really wonder if the current API is the right way to
> go.  Can't we just have a ->get_folio method with the same signature as
> __filemap_get_folio, and then do the __filemap_get_folio from the file
> system and avoid the page/folio == NULL clean path entirely?  Then on
> the done side move the unlock and put into the done method as well.

Yes, this is what happens later in this series (as you've seen by now).

> >       if (!folio) {
> >               status = (iter->flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOMEM;
> > -             goto out_no_page;
> > +             iomap_folio_done(iter, pos, 0, NULL);
> > +             return status;
> >       }
> >
> >       /*
> > @@ -656,13 +670,9 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
> >       return 0;
> >
> >  out_unlock:
> > -     folio_unlock(folio);
> > -     folio_put(folio);
> > +     iomap_folio_done(iter, pos, 0, folio);
> >       iomap_write_failed(iter->inode, pos, len);
> >
> > -out_no_page:
> > -     if (page_ops && page_ops->page_done)
> > -             page_ops->page_done(iter->inode, pos, 0, NULL);
> >       return status;
>
> But for the current version I don't really understand why the error
> unwinding changes here.

Currently, we have this order of operations in iomap_write_begin():

  folio_unlock() // folio_put() // iomap_write_failed() // ->page_done()

and this order in iomap_write_end():

  folio_unlock() // ->page_done() // folio_put() // iomap_write_failed()

The unwinding in iomap_write_begin() works because this is the trivial
case in which nothing happens to the page. We might just as well use
the same order of operations there as in iomap_write_end() though, and
when you switch to that, this is what you get.

Thank you for the review.

Andreas
Christoph Hellwig Dec. 24, 2022, 7:22 a.m. UTC | #3
On Fri, Dec 23, 2022 at 09:54:34PM +0100, Andreas Grünbacher wrote:
> > But for the current version I don't really understand why the error
> > unwinding changes here.
> 
> Currently, we have this order of operations in iomap_write_begin():
> 
>   folio_unlock() // folio_put() // iomap_write_failed() // ->page_done()
> 
> and this order in iomap_write_end():
> 
>   folio_unlock() // ->page_done() // folio_put() // iomap_write_failed()
> 
> The unwinding in iomap_write_begin() works because this is the trivial
> case in which nothing happens to the page. We might just as well use
> the same order of operations there as in iomap_write_end() though, and
> when you switch to that, this is what you get.

Please document this in the commit message.
diff mbox series

Patch

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 347010c6a652..8ce9abb29d46 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -575,6 +575,19 @@  static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
 	return 0;
 }
 
+static void iomap_folio_done(struct iomap_iter *iter, loff_t pos, size_t ret,
+		struct folio *folio)
+{
+	const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
+
+	if (folio)
+		folio_unlock(folio);
+	if (page_ops && page_ops->page_done)
+		page_ops->page_done(iter->inode, pos, ret, &folio->page);
+	if (folio)
+		folio_put(folio);
+}
+
 static int iomap_write_begin_inline(const struct iomap_iter *iter,
 		struct folio *folio)
 {
@@ -616,7 +629,8 @@  static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
 			fgp, mapping_gfp_mask(iter->inode->i_mapping));
 	if (!folio) {
 		status = (iter->flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOMEM;
-		goto out_no_page;
+		iomap_folio_done(iter, pos, 0, NULL);
+		return status;
 	}
 
 	/*
@@ -656,13 +670,9 @@  static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
 	return 0;
 
 out_unlock:
-	folio_unlock(folio);
-	folio_put(folio);
+	iomap_folio_done(iter, pos, 0, folio);
 	iomap_write_failed(iter->inode, pos, len);
 
-out_no_page:
-	if (page_ops && page_ops->page_done)
-		page_ops->page_done(iter->inode, pos, 0, NULL);
 	return status;
 }
 
@@ -712,7 +722,6 @@  static size_t iomap_write_end_inline(const struct iomap_iter *iter,
 static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
 		size_t copied, struct folio *folio)
 {
-	const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
 	loff_t old_size = iter->inode->i_size;
 	size_t ret;
@@ -736,11 +745,8 @@  static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
 		iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
 		folio_may_straddle_isize(iter->inode, folio, old_size, pos);
 	}
-	folio_unlock(folio);
 
-	if (page_ops && page_ops->page_done)
-		page_ops->page_done(iter->inode, pos, ret, &folio->page);
-	folio_put(folio);
+	iomap_folio_done(iter, pos, ret, folio);
 
 	if (ret < len)
 		iomap_write_failed(iter->inode, pos + ret, len - ret);