diff mbox series

iomap-folio & nvdimm merge

Message ID YcIIbtKhOulAL4s4@casper.infradead.org (mailing list archive)
State New, archived
Headers show
Series iomap-folio & nvdimm merge | expand

Commit Message

Matthew Wilcox Dec. 21, 2021, 5:01 p.m. UTC
On Thu, Dec 16, 2021 at 09:07:06PM +0000, Matthew Wilcox (Oracle) wrote:
> The zero iterator can work in folio-sized chunks instead of page-sized
> chunks.  This will save a lot of page cache lookups if the file is cached
> in large folios.

This patch (and a few others) end up conflicting with what Christoph did
that's now in the nvdimm tree.  In an effort to make the merge cleaner,
I took the next-20211220 tag and did the following:

Revert de291b590286
Apply: https://lore.kernel.org/linux-xfs/20211221044450.517558-1-willy@infradead.org/
(these two things are likely to happen in the nvdimm tree imminently)

I then checked out iomap-folio-5.17e and added this patch:

    iomap: Inline __iomap_zero_iter into its caller

    To make the merge easier, replicate the inlining of __iomap_zero_iter()
    into iomap_zero_iter() that is currently in the nvdimm tree.

    Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

+++ b/fs/iomap/buffered-io.c
@@@ -888,19 -908,32 +907,23 @@@ static loff_t iomap_zero_iter(struct io
                return length;

        do {
-               unsigned offset = offset_in_page(pos);
-               size_t bytes = min_t(u64, PAGE_SIZE - offset, length);
-               struct page *page;
+               struct folio *folio;
                int status;
+               size_t offset;
+               size_t bytes = min_t(u64, SIZE_MAX, length);

-               status = iomap_write_begin(iter, pos, bytes, &page);
 -              if (IS_DAX(iter->inode)) {
 -                      s64 tmp = dax_iomap_zero(pos, bytes, iomap);
 -                      if (tmp < 0)
 -                              return tmp;
 -                      bytes = tmp;
 -                      goto good;
 -              }
 -
+               status = iomap_write_begin(iter, pos, bytes, &folio);
                if (status)
                        return status;

-               zero_user(page, offset, bytes);
-               mark_page_accessed(page);
+               offset = offset_in_folio(folio, pos);
+               if (bytes > folio_size(folio) - offset)
+                       bytes = folio_size(folio) - offset;
+
+               folio_zero_range(folio, offset, bytes);
+               folio_mark_accessed(folio);

-               bytes = iomap_write_end(iter, pos, bytes, bytes, page);
+               bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
 -good:
                if (WARN_ON_ONCE(bytes == 0))
                        return -EIO;



Shall I push out a version of this patch series which includes the
"iomap: Inline __iomap_zero_iter into its caller" patch I pasted above?

Comments

Darrick J. Wong Dec. 21, 2021, 6:41 p.m. UTC | #1
On Tue, Dec 21, 2021 at 05:01:34PM +0000, Matthew Wilcox wrote:
> On Thu, Dec 16, 2021 at 09:07:06PM +0000, Matthew Wilcox (Oracle) wrote:
> > The zero iterator can work in folio-sized chunks instead of page-sized
> > chunks.  This will save a lot of page cache lookups if the file is cached
> > in large folios.
> 
> This patch (and a few others) end up conflicting with what Christoph did
> that's now in the nvdimm tree.  In an effort to make the merge cleaner,
> I took the next-20211220 tag and did the following:
> 
> Revert de291b590286
> Apply: https://lore.kernel.org/linux-xfs/20211221044450.517558-1-willy@infradead.org/
> (these two things are likely to happen in the nvdimm tree imminently)
> 
> I then checked out iomap-folio-5.17e and added this patch:
> 
>     iomap: Inline __iomap_zero_iter into its caller
> 
>     To make the merge easier, replicate the inlining of __iomap_zero_iter()
>     into iomap_zero_iter() that is currently in the nvdimm tree.
> 
>     Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Looks like a reasonable function promotion to me...
Reviewed-by: Darrick J. Wong <djwong@kernel.org>

> 
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index ba80bedd9590..c6b3a148e898 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -895,27 +895,6 @@ iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
>  }
>  EXPORT_SYMBOL_GPL(iomap_file_unshare);
>  
> -static s64 __iomap_zero_iter(struct iomap_iter *iter, loff_t pos, u64 length)
> -{
> -       struct folio *folio;
> -       int status;
> -       size_t offset;
> -       size_t bytes = min_t(u64, SIZE_MAX, length);
> -
> -       status = iomap_write_begin(iter, pos, bytes, &folio);
> -       if (status)
> -               return status;
> -
> -       offset = offset_in_folio(folio, pos);
> -       if (bytes > folio_size(folio) - offset)
> -               bytes = folio_size(folio) - offset;
> -
> -       folio_zero_range(folio, offset, bytes);
> -       folio_mark_accessed(folio);
> -
> -       return iomap_write_end(iter, pos, bytes, bytes, folio);
> -}
> -
>  static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
>  {
>         struct iomap *iomap = &iter->iomap;
> @@ -929,14 +908,34 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
>                 return length;
>  
>         do {
> -               s64 bytes;
> +               struct folio *folio;
> +               int status;
> +               size_t offset;
> +               size_t bytes = min_t(u64, SIZE_MAX, length);
> +
> +               if (IS_DAX(iter->inode)) {
> +                       s64 tmp = dax_iomap_zero(pos, bytes, iomap);
> +                       if (tmp < 0)
> +                               return tmp;
> +                       bytes = tmp;
> +                       goto good;
> +               }
>  
> -               if (IS_DAX(iter->inode))
> -                       bytes = dax_iomap_zero(pos, length, iomap);
> -               else
> -                       bytes = __iomap_zero_iter(iter, pos, length);
> -               if (bytes < 0)
> -                       return bytes;
> +               status = iomap_write_begin(iter, pos, bytes, &folio);
> +               if (status)
> +                       return status;
> +
> +               offset = offset_in_folio(folio, pos);
> +               if (bytes > folio_size(folio) - offset)
> +                       bytes = folio_size(folio) - offset;
> +
> +               folio_zero_range(folio, offset, bytes);
> +               folio_mark_accessed(folio);
> +
> +               bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
> +good:
> +               if (WARN_ON_ONCE(bytes == 0))
> +                       return -EIO;
>  
>                 pos += bytes;
>                 length -= bytes;
> 
> 
> 
> Then I did the merge, and the merge commit looks pretty sensible
> afterwards:
> 
>     Merge branch 'iomap-folio-5.17f' into fixup
> 
> diff --cc fs/iomap/buffered-io.c
> index 955f51f94b3f,c6b3a148e898..c938bbad075e
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@@ -888,19 -908,32 +907,23 @@@ static loff_t iomap_zero_iter(struct io
>                 return length;
> 
>         do {
> -               unsigned offset = offset_in_page(pos);
> -               size_t bytes = min_t(u64, PAGE_SIZE - offset, length);
> -               struct page *page;
> +               struct folio *folio;
>                 int status;
> +               size_t offset;
> +               size_t bytes = min_t(u64, SIZE_MAX, length);
> 
> -               status = iomap_write_begin(iter, pos, bytes, &page);
>  -              if (IS_DAX(iter->inode)) {
>  -                      s64 tmp = dax_iomap_zero(pos, bytes, iomap);
>  -                      if (tmp < 0)
>  -                              return tmp;
>  -                      bytes = tmp;
>  -                      goto good;
>  -              }
>  -
> +               status = iomap_write_begin(iter, pos, bytes, &folio);
>                 if (status)
>                         return status;
> 
> -               zero_user(page, offset, bytes);
> -               mark_page_accessed(page);
> +               offset = offset_in_folio(folio, pos);
> +               if (bytes > folio_size(folio) - offset)
> +                       bytes = folio_size(folio) - offset;
> +
> +               folio_zero_range(folio, offset, bytes);
> +               folio_mark_accessed(folio);
> 
> -               bytes = iomap_write_end(iter, pos, bytes, bytes, page);
> +               bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
>  -good:

Assuming I'm reading the metadiff properly, I think this merge
resolution looks correct given what both patchsets are trying to do.

>                 if (WARN_ON_ONCE(bytes == 0))
>                         return -EIO;
> 
> 
> 
> Shall I push out a version of this patch series which includes the
> "iomap: Inline __iomap_zero_iter into its caller" patch I pasted above?

Yes.

I've been distracted for months with first a Huge Customer Escalation
and now a <embargoed>, which means that I've been and continue to be
very distracted.  I /think/ there are no other iomap patches being
proposed for inclusion -- Andreas' patches were applied as fixes during
5.16-rc, Christoph's DAX refactoring is now in the nvdimm tree, and that
leaves Matthew's folios refactoring.

So seeing as (I think?) there are no other iomap patches for 5.17, if
Matthew wants to add his branch to for-next and push directly to Linus
(rather than pushing to me to push the exact same branch to Linus) I
think that would be ... better than letting it block on me.  IIRC I've
RVB'd everything in the folios branch. :(

FWIW I ran the 5.17e branch through my fstests cloud and nothing fell
out, so I think it's in good enough shape to merge to for-next.

--D
Matthew Wilcox Dec. 21, 2021, 6:53 p.m. UTC | #2
On Tue, Dec 21, 2021 at 10:41:15AM -0800, Darrick J. Wong wrote:
> >     iomap: Inline __iomap_zero_iter into its caller
> > 
> >     To make the merge easier, replicate the inlining of __iomap_zero_iter()
> >     into iomap_zero_iter() that is currently in the nvdimm tree.
> > 
> >     Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> 
> Looks like a reasonable function promotion to me...
> Reviewed-by: Darrick J. Wong <djwong@kernel.org>

Thanks, applied that to the commit.

> > Shall I push out a version of this patch series which includes the
> > "iomap: Inline __iomap_zero_iter into its caller" patch I pasted above?
> 
> Yes.
> 
> I've been distracted for months with first a Huge Customer Escalation
> and now a <embargoed>, which means that I've been and continue to be
> very distracted.  I /think/ there are no other iomap patches being
> proposed for inclusion -- Andreas' patches were applied as fixes during
> 5.16-rc, Christoph's DAX refactoring is now in the nvdimm tree, and that
> leaves Matthew's folios refactoring.
> 
> So seeing as (I think?) there are no other iomap patches for 5.17, if
> Matthew wants to add his branch to for-next and push directly to Linus
> (rather than pushing to me to push the exact same branch to Linus) I
> think that would be ... better than letting it block on me.  IIRC I've
> RVB'd everything in the folios branch. :(
> 
> FWIW I ran the 5.17e branch through my fstests cloud and nothing fell
> out, so I think it's in good enough shape to merge to for-next.

Glad to hear it passed that thorough testing.  Stephen, please pick
up a new tree (hopefully just temporarily until Darrick can swim to
the surface):

 git://git.infradead.org/users/willy/linux.git folio-iomap

Hopefully the previous message will give you enough context for
the merge conflict resolution.
Stephen Rothwell Dec. 21, 2021, 10:46 p.m. UTC | #3
Hi Matthew,

On Tue, 21 Dec 2021 18:53:25 +0000 Matthew Wilcox <willy@infradead.org> wrote:
>
> Glad to hear it passed that thorough testing.  Stephen, please pick
> up a new tree (hopefully just temporarily until Darrick can swim to
> the surface):
> 
>  git://git.infradead.org/users/willy/linux.git folio-iomap
> 
> Hopefully the previous message will give you enough context for
> the merge conflict resolution.

I have added that after the folio tree today.

Thanks for adding your subsystem tree as a participant of linux-next.  As
you may know, this is not a judgement of your code.  The purpose of
linux-next is for integration testing and to lower the impact of
conflicts between subsystems in the next merge window. 

You will need to ensure that the patches/commits in your tree/series have
been:
     * submitted under GPL v2 (or later) and include the Contributor's
        Signed-off-by,
     * posted to the relevant mailing list,
     * reviewed by you (or another maintainer of your subsystem tree),
     * successfully unit tested, and 
     * destined for the current or next Linux merge window.

Basically, this should be just what you would send to Linus (or ask him
to fetch).  It is allowed to be rebased if you deem it necessary.
diff mbox series

Patch

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index ba80bedd9590..c6b3a148e898 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -895,27 +895,6 @@  iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
 }
 EXPORT_SYMBOL_GPL(iomap_file_unshare);
 
-static s64 __iomap_zero_iter(struct iomap_iter *iter, loff_t pos, u64 length)
-{
-       struct folio *folio;
-       int status;
-       size_t offset;
-       size_t bytes = min_t(u64, SIZE_MAX, length);
-
-       status = iomap_write_begin(iter, pos, bytes, &folio);
-       if (status)
-               return status;
-
-       offset = offset_in_folio(folio, pos);
-       if (bytes > folio_size(folio) - offset)
-               bytes = folio_size(folio) - offset;
-
-       folio_zero_range(folio, offset, bytes);
-       folio_mark_accessed(folio);
-
-       return iomap_write_end(iter, pos, bytes, bytes, folio);
-}
-
 static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
 {
        struct iomap *iomap = &iter->iomap;
@@ -929,14 +908,34 @@  static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
                return length;
 
        do {
-               s64 bytes;
+               struct folio *folio;
+               int status;
+               size_t offset;
+               size_t bytes = min_t(u64, SIZE_MAX, length);
+
+               if (IS_DAX(iter->inode)) {
+                       s64 tmp = dax_iomap_zero(pos, bytes, iomap);
+                       if (tmp < 0)
+                               return tmp;
+                       bytes = tmp;
+                       goto good;
+               }
 
-               if (IS_DAX(iter->inode))
-                       bytes = dax_iomap_zero(pos, length, iomap);
-               else
-                       bytes = __iomap_zero_iter(iter, pos, length);
-               if (bytes < 0)
-                       return bytes;
+               status = iomap_write_begin(iter, pos, bytes, &folio);
+               if (status)
+                       return status;
+
+               offset = offset_in_folio(folio, pos);
+               if (bytes > folio_size(folio) - offset)
+                       bytes = folio_size(folio) - offset;
+
+               folio_zero_range(folio, offset, bytes);
+               folio_mark_accessed(folio);
+
+               bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
+good:
+               if (WARN_ON_ONCE(bytes == 0))
+                       return -EIO;
 
                pos += bytes;
                length -= bytes;



Then I did the merge, and the merge commit looks pretty sensible
afterwards:

    Merge branch 'iomap-folio-5.17f' into fixup

diff --cc fs/iomap/buffered-io.c
index 955f51f94b3f,c6b3a148e898..c938bbad075e
--- a/fs/iomap/buffered-io.c