Message ID | 20241111234842.2024180-14-axboe@kernel.dk (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Uncached buffered IO | expand |
On Mon, Nov 11, 2024 at 04:37:40PM -0700, Jens Axboe wrote: > Add iomap buffered write support for RWF_UNCACHED. If RWF_UNCACHED is > set for a write, mark the folios being written with drop_writeback. Then > writeback completion will drop the pages. The write_iter handler simply > kicks off writeback for the pages, and writeback completion will take > care of the rest. > > This still needs the user of the iomap buffered write helpers to call > iocb_uncached_write() upon successful issue of the writes. > > Signed-off-by: Jens Axboe <axboe@kernel.dk> > --- > fs/iomap/buffered-io.c | 15 +++++++++++++-- > include/linux/iomap.h | 4 +++- > 2 files changed, 16 insertions(+), 3 deletions(-) > > diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c > index ef0b68bccbb6..2f2a5db04a68 100644 > --- a/fs/iomap/buffered-io.c > +++ b/fs/iomap/buffered-io.c > @@ -603,6 +603,8 @@ struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len) > > if (iter->flags & IOMAP_NOWAIT) > fgp |= FGP_NOWAIT; > + if (iter->flags & IOMAP_UNCACHED) > + fgp |= FGP_UNCACHED; > fgp |= fgf_set_order(len); > > return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, > @@ -1023,8 +1025,9 @@ ssize_t > iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, > const struct iomap_ops *ops, void *private) > { > + struct address_space *mapping = iocb->ki_filp->f_mapping; > struct iomap_iter iter = { > - .inode = iocb->ki_filp->f_mapping->host, > + .inode = mapping->host, > .pos = iocb->ki_pos, > .len = iov_iter_count(i), > .flags = IOMAP_WRITE, > @@ -1034,9 +1037,14 @@ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, > > if (iocb->ki_flags & IOCB_NOWAIT) > iter.flags |= IOMAP_NOWAIT; > + if (iocb->ki_flags & IOCB_UNCACHED) > + iter.flags |= IOMAP_UNCACHED; > > - while ((ret = iomap_iter(&iter, ops)) > 0) > + while ((ret = iomap_iter(&iter, ops)) > 0) { > + if (iocb->ki_flags & IOCB_UNCACHED) > + iter.iomap.flags |= IOMAP_F_UNCACHED; > iter.processed = iomap_write_iter(&iter, i); > + } > > if (unlikely(iter.pos == iocb->ki_pos)) > return ret; > @@ -1770,6 +1778,9 @@ static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, > size_t poff = offset_in_folio(folio, pos); > int error; > > + if (folio_test_uncached(folio)) > + wpc->iomap.flags |= IOMAP_F_UNCACHED; > + > if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos)) { > new_ioend: > error = iomap_submit_ioend(wpc, 0); > diff --git a/include/linux/iomap.h b/include/linux/iomap.h > index f61407e3b121..2efc72df19a2 100644 > --- a/include/linux/iomap.h > +++ b/include/linux/iomap.h > @@ -64,6 +64,7 @@ struct vm_fault; > #define IOMAP_F_BUFFER_HEAD 0 > #endif /* CONFIG_BUFFER_HEAD */ > #define IOMAP_F_XATTR (1U << 5) > +#define IOMAP_F_UNCACHED (1U << 6) This value ^^^ is set only by the core iomap code, right? > /* > * Flags set by the core iomap code during operations: ...in which case it should be set down here. It probably ought to have a description of what it does, too: "IOMAP_F_UNCACHED is set to indicate that writes to the page cache (and hence writeback) will result in folios being evicted as soon as the updated bytes are written back to the storage." If the writeback fails, does that mean that the dirty data will /not/ be retained in the page cache? IIRC we finally got to the point where the major filesystems leave pagecache alone after writeback EIO. The rest of the mechanics looks nifty to me; there's plenty of places where this could be useful to me personally. :) --D > @@ -173,8 +174,9 @@ struct iomap_folio_ops { > #define IOMAP_NOWAIT (1 << 5) /* do not block */ > #define IOMAP_OVERWRITE_ONLY (1 << 6) /* only pure overwrites allowed */ > #define IOMAP_UNSHARE (1 << 7) /* unshare_file_range */ > +#define IOMAP_UNCACHED (1 << 8) /* uncached IO */ > #ifdef CONFIG_FS_DAX > -#define IOMAP_DAX (1 << 8) /* DAX mapping */ > +#define IOMAP_DAX (1 << 9) /* DAX mapping */ > #else > #define IOMAP_DAX 0 > #endif /* CONFIG_FS_DAX */ > -- > 2.45.2 > >
On 11/11/24 6:01 PM, Darrick J. Wong wrote: > On Mon, Nov 11, 2024 at 04:37:40PM -0700, Jens Axboe wrote: >> Add iomap buffered write support for RWF_UNCACHED. If RWF_UNCACHED is >> set for a write, mark the folios being written with drop_writeback. Then >> writeback completion will drop the pages. The write_iter handler simply >> kicks off writeback for the pages, and writeback completion will take >> care of the rest. >> >> This still needs the user of the iomap buffered write helpers to call >> iocb_uncached_write() upon successful issue of the writes. >> >> Signed-off-by: Jens Axboe <axboe@kernel.dk> >> --- >> fs/iomap/buffered-io.c | 15 +++++++++++++-- >> include/linux/iomap.h | 4 +++- >> 2 files changed, 16 insertions(+), 3 deletions(-) >> >> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c >> index ef0b68bccbb6..2f2a5db04a68 100644 >> --- a/fs/iomap/buffered-io.c >> +++ b/fs/iomap/buffered-io.c >> @@ -603,6 +603,8 @@ struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len) >> >> if (iter->flags & IOMAP_NOWAIT) >> fgp |= FGP_NOWAIT; >> + if (iter->flags & IOMAP_UNCACHED) >> + fgp |= FGP_UNCACHED; >> fgp |= fgf_set_order(len); >> >> return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, >> @@ -1023,8 +1025,9 @@ ssize_t >> iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, >> const struct iomap_ops *ops, void *private) >> { >> + struct address_space *mapping = iocb->ki_filp->f_mapping; >> struct iomap_iter iter = { >> - .inode = iocb->ki_filp->f_mapping->host, >> + .inode = mapping->host, >> .pos = iocb->ki_pos, >> .len = iov_iter_count(i), >> .flags = IOMAP_WRITE, >> @@ -1034,9 +1037,14 @@ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, >> >> if (iocb->ki_flags & IOCB_NOWAIT) >> iter.flags |= IOMAP_NOWAIT; >> + if (iocb->ki_flags & IOCB_UNCACHED) >> + iter.flags |= IOMAP_UNCACHED; >> >> - while ((ret = iomap_iter(&iter, ops)) > 0) >> + while ((ret = iomap_iter(&iter, ops)) > 0) { >> + if (iocb->ki_flags & IOCB_UNCACHED) >> + iter.iomap.flags |= IOMAP_F_UNCACHED; >> iter.processed = iomap_write_iter(&iter, i); >> + } >> >> if (unlikely(iter.pos == iocb->ki_pos)) >> return ret; >> @@ -1770,6 +1778,9 @@ static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, >> size_t poff = offset_in_folio(folio, pos); >> int error; >> >> + if (folio_test_uncached(folio)) >> + wpc->iomap.flags |= IOMAP_F_UNCACHED; >> + >> if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos)) { >> new_ioend: >> error = iomap_submit_ioend(wpc, 0); >> diff --git a/include/linux/iomap.h b/include/linux/iomap.h >> index f61407e3b121..2efc72df19a2 100644 >> --- a/include/linux/iomap.h >> +++ b/include/linux/iomap.h >> @@ -64,6 +64,7 @@ struct vm_fault; >> #define IOMAP_F_BUFFER_HEAD 0 >> #endif /* CONFIG_BUFFER_HEAD */ >> #define IOMAP_F_XATTR (1U << 5) >> +#define IOMAP_F_UNCACHED (1U << 6) > > This value ^^^ is set only by the core iomap code, right? Correct >> /* >> * Flags set by the core iomap code during operations: > > ...in which case it should be set down here. It probably ought to have > a description of what it does, too: Ah yes indeed, good point. I'll move it and add a description. > "IOMAP_F_UNCACHED is set to indicate that writes to the page cache (and > hence writeback) will result in folios being evicted as soon as the > updated bytes are written back to the storage." Excellent, I'll go with that. > If the writeback fails, does that mean that the dirty data will /not/ be > retained in the page cache? IIRC we finally got to the point where the > major filesystems leave pagecache alone after writeback EIO. Good question - didn't change any of those bits. It currently relies on writeback completion to prune the ranges. So if an EIO completion triggers writeback completion, then it'll get pruned. But for that case, I suspect the range is still dirty, and hence the pruning would not succeed, for obvious reasons. So you'd need further things on top of that, I'm afraid. > The rest of the mechanics looks nifty to me; there's plenty of places > where this could be useful to me personally. :) For sure, I think there are tons of use cases for this as well. Thanks for taking a look!
On Mon, Nov 11, 2024 at 04:37:40PM -0700, Jens Axboe wrote: > Add iomap buffered write support for RWF_UNCACHED. If RWF_UNCACHED is > set for a write, mark the folios being written with drop_writeback. Then s/drop_writeback/uncached/ ? BTW, this might be getting into wonky "don't care that much" territory, but something else to be aware of is that certain writes can potentially change pagecache state as a side effect outside of the actual buffered write itself. For example, xfs calls iomap_zero_range() on write extension (i.e. pos > isize), which uses buffered writes and thus could populate a pagecache folio without setting it uncached, even if done on behalf of an uncached write. I've only made a first pass and could be missing some details, but IIUC I _think_ this means something like writing out a stream of small, sparse and file extending uncached writes could actually end up behaving more like sync I/O. Again, not saying that's something we really care about, just raising it in case it's worth considering or documenting.. Brian > writeback completion will drop the pages. The write_iter handler simply > kicks off writeback for the pages, and writeback completion will take > care of the rest. > > This still needs the user of the iomap buffered write helpers to call > iocb_uncached_write() upon successful issue of the writes. > > Signed-off-by: Jens Axboe <axboe@kernel.dk> > --- > fs/iomap/buffered-io.c | 15 +++++++++++++-- > include/linux/iomap.h | 4 +++- > 2 files changed, 16 insertions(+), 3 deletions(-) > > diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c > index ef0b68bccbb6..2f2a5db04a68 100644 > --- a/fs/iomap/buffered-io.c > +++ b/fs/iomap/buffered-io.c > @@ -603,6 +603,8 @@ struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len) > > if (iter->flags & IOMAP_NOWAIT) > fgp |= FGP_NOWAIT; > + if (iter->flags & IOMAP_UNCACHED) > + fgp |= FGP_UNCACHED; > fgp |= fgf_set_order(len); > > return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, > @@ -1023,8 +1025,9 @@ ssize_t > iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, > const struct iomap_ops *ops, void *private) > { > + struct address_space *mapping = iocb->ki_filp->f_mapping; > struct iomap_iter iter = { > - .inode = iocb->ki_filp->f_mapping->host, > + .inode = mapping->host, > .pos = iocb->ki_pos, > .len = iov_iter_count(i), > .flags = IOMAP_WRITE, > @@ -1034,9 +1037,14 @@ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, > > if (iocb->ki_flags & IOCB_NOWAIT) > iter.flags |= IOMAP_NOWAIT; > + if (iocb->ki_flags & IOCB_UNCACHED) > + iter.flags |= IOMAP_UNCACHED; > > - while ((ret = iomap_iter(&iter, ops)) > 0) > + while ((ret = iomap_iter(&iter, ops)) > 0) { > + if (iocb->ki_flags & IOCB_UNCACHED) > + iter.iomap.flags |= IOMAP_F_UNCACHED; > iter.processed = iomap_write_iter(&iter, i); > + } > > if (unlikely(iter.pos == iocb->ki_pos)) > return ret; > @@ -1770,6 +1778,9 @@ static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, > size_t poff = offset_in_folio(folio, pos); > int error; > > + if (folio_test_uncached(folio)) > + wpc->iomap.flags |= IOMAP_F_UNCACHED; > + > if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos)) { > new_ioend: > error = iomap_submit_ioend(wpc, 0); > diff --git a/include/linux/iomap.h b/include/linux/iomap.h > index f61407e3b121..2efc72df19a2 100644 > --- a/include/linux/iomap.h > +++ b/include/linux/iomap.h > @@ -64,6 +64,7 @@ struct vm_fault; > #define IOMAP_F_BUFFER_HEAD 0 > #endif /* CONFIG_BUFFER_HEAD */ > #define IOMAP_F_XATTR (1U << 5) > +#define IOMAP_F_UNCACHED (1U << 6) > > /* > * Flags set by the core iomap code during operations: > @@ -173,8 +174,9 @@ struct iomap_folio_ops { > #define IOMAP_NOWAIT (1 << 5) /* do not block */ > #define IOMAP_OVERWRITE_ONLY (1 << 6) /* only pure overwrites allowed */ > #define IOMAP_UNSHARE (1 << 7) /* unshare_file_range */ > +#define IOMAP_UNCACHED (1 << 8) /* uncached IO */ > #ifdef CONFIG_FS_DAX > -#define IOMAP_DAX (1 << 8) /* DAX mapping */ > +#define IOMAP_DAX (1 << 9) /* DAX mapping */ > #else > #define IOMAP_DAX 0 > #endif /* CONFIG_FS_DAX */ > -- > 2.45.2 > >
On 11/12/24 9:37 AM, Brian Foster wrote: > On Mon, Nov 11, 2024 at 04:37:40PM -0700, Jens Axboe wrote: >> Add iomap buffered write support for RWF_UNCACHED. If RWF_UNCACHED is >> set for a write, mark the folios being written with drop_writeback. Then > > s/drop_writeback/uncached/ ? Ah indeed, guess that never got changed. Thanks, will fix that in the commit message. > BTW, this might be getting into wonky "don't care that much" territory, > but something else to be aware of is that certain writes can potentially > change pagecache state as a side effect outside of the actual buffered > write itself. > > For example, xfs calls iomap_zero_range() on write extension (i.e. pos > > isize), which uses buffered writes and thus could populate a pagecache > folio without setting it uncached, even if done on behalf of an uncached > write. > > I've only made a first pass and could be missing some details, but IIUC > I _think_ this means something like writing out a stream of small, > sparse and file extending uncached writes could actually end up behaving > more like sync I/O. Again, not saying that's something we really care > about, just raising it in case it's worth considering or documenting.. No that's useful info, I'm not really surprised that there would still be cases where UNCACHED goes unnoticed. In other words, I'd be surprised if the current patches for eg xfs/ext4 cover all the cases where new folios are created and should be marked as UNCACHED of IOCB_UNCACHED is set in the iocb. I think those can be sorted out or documented as we move forward. UNCACHED is really just a hint - the kernel should do its best to not have permanent folios for this IO, but there are certainly cases where it won't be honored if you're racing with regular buffered IO or mmap. For the case above, sounds like we could cover that, however, and probably should.
On Tue, Nov 12, 2024 at 10:16:10AM -0700, Jens Axboe wrote: > On 11/12/24 9:37 AM, Brian Foster wrote: > > On Mon, Nov 11, 2024 at 04:37:40PM -0700, Jens Axboe wrote: > >> Add iomap buffered write support for RWF_UNCACHED. If RWF_UNCACHED is > >> set for a write, mark the folios being written with drop_writeback. Then > > > > s/drop_writeback/uncached/ ? > > Ah indeed, guess that never got changed. Thanks, will fix that in the > commit message. > > > BTW, this might be getting into wonky "don't care that much" territory, > > but something else to be aware of is that certain writes can potentially > > change pagecache state as a side effect outside of the actual buffered > > write itself. > > > > For example, xfs calls iomap_zero_range() on write extension (i.e. pos > > > isize), which uses buffered writes and thus could populate a pagecache > > folio without setting it uncached, even if done on behalf of an uncached > > write. > > > > I've only made a first pass and could be missing some details, but IIUC > > I _think_ this means something like writing out a stream of small, > > sparse and file extending uncached writes could actually end up behaving > > more like sync I/O. Again, not saying that's something we really care > > about, just raising it in case it's worth considering or documenting.. > > No that's useful info, I'm not really surprised that there would still > be cases where UNCACHED goes unnoticed. In other words, I'd be surprised > if the current patches for eg xfs/ext4 cover all the cases where new > folios are created and should be marked as UNCACHED of IOCB_UNCACHED is > set in the iocb. > > I think those can be sorted out or documented as we move forward. > UNCACHED is really just a hint - the kernel should do its best to not > have permanent folios for this IO, but there are certainly cases where > it won't be honored if you're racing with regular buffered IO or mmap. > For the case above, sounds like we could cover that, however, and > probably should. > Ok. I suppose you could plumb the iocb state through the zero range call as well, but given the description/semantics I wouldn't blame you if you wanted to leave that for a followon improvement. Thanks for the explanation. Brian > -- > Jens Axboe >
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index ef0b68bccbb6..2f2a5db04a68 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -603,6 +603,8 @@ struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len) if (iter->flags & IOMAP_NOWAIT) fgp |= FGP_NOWAIT; + if (iter->flags & IOMAP_UNCACHED) + fgp |= FGP_UNCACHED; fgp |= fgf_set_order(len); return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, @@ -1023,8 +1025,9 @@ ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, const struct iomap_ops *ops, void *private) { + struct address_space *mapping = iocb->ki_filp->f_mapping; struct iomap_iter iter = { - .inode = iocb->ki_filp->f_mapping->host, + .inode = mapping->host, .pos = iocb->ki_pos, .len = iov_iter_count(i), .flags = IOMAP_WRITE, @@ -1034,9 +1037,14 @@ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, if (iocb->ki_flags & IOCB_NOWAIT) iter.flags |= IOMAP_NOWAIT; + if (iocb->ki_flags & IOCB_UNCACHED) + iter.flags |= IOMAP_UNCACHED; - while ((ret = iomap_iter(&iter, ops)) > 0) + while ((ret = iomap_iter(&iter, ops)) > 0) { + if (iocb->ki_flags & IOCB_UNCACHED) + iter.iomap.flags |= IOMAP_F_UNCACHED; iter.processed = iomap_write_iter(&iter, i); + } if (unlikely(iter.pos == iocb->ki_pos)) return ret; @@ -1770,6 +1778,9 @@ static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, size_t poff = offset_in_folio(folio, pos); int error; + if (folio_test_uncached(folio)) + wpc->iomap.flags |= IOMAP_F_UNCACHED; + if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos)) { new_ioend: error = iomap_submit_ioend(wpc, 0); diff --git a/include/linux/iomap.h b/include/linux/iomap.h index f61407e3b121..2efc72df19a2 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -64,6 +64,7 @@ struct vm_fault; #define IOMAP_F_BUFFER_HEAD 0 #endif /* CONFIG_BUFFER_HEAD */ #define IOMAP_F_XATTR (1U << 5) +#define IOMAP_F_UNCACHED (1U << 6) /* * Flags set by the core iomap code during operations: @@ -173,8 +174,9 @@ struct iomap_folio_ops { #define IOMAP_NOWAIT (1 << 5) /* do not block */ #define IOMAP_OVERWRITE_ONLY (1 << 6) /* only pure overwrites allowed */ #define IOMAP_UNSHARE (1 << 7) /* unshare_file_range */ +#define IOMAP_UNCACHED (1 << 8) /* uncached IO */ #ifdef CONFIG_FS_DAX -#define IOMAP_DAX (1 << 8) /* DAX mapping */ +#define IOMAP_DAX (1 << 9) /* DAX mapping */ #else #define IOMAP_DAX 0 #endif /* CONFIG_FS_DAX */
Add iomap buffered write support for RWF_UNCACHED. If RWF_UNCACHED is set for a write, mark the folios being written with drop_writeback. Then writeback completion will drop the pages. The write_iter handler simply kicks off writeback for the pages, and writeback completion will take care of the rest. This still needs the user of the iomap buffered write helpers to call iocb_uncached_write() upon successful issue of the writes. Signed-off-by: Jens Axboe <axboe@kernel.dk> --- fs/iomap/buffered-io.c | 15 +++++++++++++-- include/linux/iomap.h | 4 +++- 2 files changed, 16 insertions(+), 3 deletions(-)