Message ID | 20230710130253.3484695-10-willy@infradead.org (mailing list archive) |
---|---|
State | Deferred, archived |
Headers | show |
Series | Create large folios in iomap buffered write path | expand |
On Mon, Jul 10, 2023 at 02:02:53PM +0100, Matthew Wilcox (Oracle) wrote: > If we have a large folio, we can copy in larger chunks than PAGE_SIZE. > Start at the maximum page cache size and shrink by half every time we > hit the "we are short on memory" problem. > > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> > Reviewed-by: Christoph Hellwig <hch@lst.de> Looks good! Reviewed-by: Darrick J. Wong <djwong@kernel.org> --D > --- > fs/iomap/buffered-io.c | 32 +++++++++++++++++--------------- > 1 file changed, 17 insertions(+), 15 deletions(-) > > diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c > index 2d3e90f4d16e..f21f1f641c4a 100644 > --- a/fs/iomap/buffered-io.c > +++ b/fs/iomap/buffered-io.c > @@ -769,6 +769,7 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, > static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) > { > loff_t length = iomap_length(iter); > + size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER; > loff_t pos = iter->pos; > ssize_t written = 0; > long status = 0; > @@ -777,15 +778,12 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) > > do { > struct folio *folio; > - struct page *page; > - unsigned long offset; /* Offset into pagecache page */ > - unsigned long bytes; /* Bytes to write to page */ > + size_t offset; /* Offset into folio */ > + size_t bytes; /* Bytes to write to folio */ > size_t copied; /* Bytes copied from user */ > > - offset = offset_in_page(pos); > - bytes = min_t(unsigned long, PAGE_SIZE - offset, > - iov_iter_count(i)); > -again: > + offset = pos & (chunk - 1); > + bytes = min(chunk - offset, iov_iter_count(i)); > status = balance_dirty_pages_ratelimited_flags(mapping, > bdp_flags); > if (unlikely(status)) > @@ -815,12 +813,14 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) > if (iter->iomap.flags & IOMAP_F_STALE) > break; > > - page = folio_file_page(folio, pos >> PAGE_SHIFT); > - if (mapping_writably_mapped(mapping)) > - flush_dcache_page(page); > + offset = offset_in_folio(folio, pos); > + if (bytes > folio_size(folio) - offset) > + bytes = folio_size(folio) - offset; > > - copied = copy_page_from_iter_atomic(page, offset, bytes, i); > + if (mapping_writably_mapped(mapping)) > + flush_dcache_folio(folio); > > + copied = copy_folio_from_iter_atomic(folio, offset, bytes, i); > status = iomap_write_end(iter, pos, bytes, copied, folio); > > if (unlikely(copied != status)) > @@ -836,11 +836,13 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) > */ > if (copied) > bytes = copied; > - goto again; > + if (chunk > PAGE_SIZE) > + chunk /= 2; > + } else { > + pos += status; > + written += status; > + length -= status; > } > - pos += status; > - written += status; > - length -= status; > } while (iov_iter_count(i) && length); > > if (status == -EAGAIN) { > -- > 2.39.2 >
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 2d3e90f4d16e..f21f1f641c4a 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -769,6 +769,7 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) { loff_t length = iomap_length(iter); + size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER; loff_t pos = iter->pos; ssize_t written = 0; long status = 0; @@ -777,15 +778,12 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) do { struct folio *folio; - struct page *page; - unsigned long offset; /* Offset into pagecache page */ - unsigned long bytes; /* Bytes to write to page */ + size_t offset; /* Offset into folio */ + size_t bytes; /* Bytes to write to folio */ size_t copied; /* Bytes copied from user */ - offset = offset_in_page(pos); - bytes = min_t(unsigned long, PAGE_SIZE - offset, - iov_iter_count(i)); -again: + offset = pos & (chunk - 1); + bytes = min(chunk - offset, iov_iter_count(i)); status = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags); if (unlikely(status)) @@ -815,12 +813,14 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) if (iter->iomap.flags & IOMAP_F_STALE) break; - page = folio_file_page(folio, pos >> PAGE_SHIFT); - if (mapping_writably_mapped(mapping)) - flush_dcache_page(page); + offset = offset_in_folio(folio, pos); + if (bytes > folio_size(folio) - offset) + bytes = folio_size(folio) - offset; - copied = copy_page_from_iter_atomic(page, offset, bytes, i); + if (mapping_writably_mapped(mapping)) + flush_dcache_folio(folio); + copied = copy_folio_from_iter_atomic(folio, offset, bytes, i); status = iomap_write_end(iter, pos, bytes, copied, folio); if (unlikely(copied != status)) @@ -836,11 +836,13 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) */ if (copied) bytes = copied; - goto again; + if (chunk > PAGE_SIZE) + chunk /= 2; + } else { + pos += status; + written += status; + length -= status; } - pos += status; - written += status; - length -= status; } while (iov_iter_count(i) && length); if (status == -EAGAIN) {