diff mbox series

[v2,16/25] iomap: Support large pages in read paths

Message ID 20200212041845.25879-17-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Large pages in the page cache | expand

Commit Message

Matthew Wilcox Feb. 12, 2020, 4:18 a.m. UTC
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

Use thp_size() instead of PAGE_SIZE, use offset_in_this_page() and
abstract away how to access the list of readahead pages.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/iomap/buffered-io.c | 28 ++++++++++++++++++++--------
 1 file changed, 20 insertions(+), 8 deletions(-)

Comments

Christoph Hellwig Feb. 12, 2020, 8:13 a.m. UTC | #1
> +/*
> + * Estimate the number of vectors we need based on the current page size;
> + * if we're wrong we'll end up doing an overly large allocation or needing
> + * to do a second allocation, neither of which is a big deal.
> + */
> +static unsigned int iomap_nr_vecs(struct page *page, loff_t length)
> +{
> +	return (length + thp_size(page) - 1) >> page_shift(page);
> +}

With the multipage bvecs a huge page (or any physically contigous piece
of memory) will always use one or less (if merged into the previous)
bio_vec.  So this can be simplified and optimized.
Matthew Wilcox Feb. 12, 2020, 5:45 p.m. UTC | #2
On Wed, Feb 12, 2020 at 12:13:40AM -0800, Christoph Hellwig wrote:
> > +/*
> > + * Estimate the number of vectors we need based on the current page size;
> > + * if we're wrong we'll end up doing an overly large allocation or needing
> > + * to do a second allocation, neither of which is a big deal.
> > + */
> > +static unsigned int iomap_nr_vecs(struct page *page, loff_t length)
> > +{
> > +	return (length + thp_size(page) - 1) >> page_shift(page);
> > +}
> 
> With the multipage bvecs a huge page (or any physically contigous piece
> of memory) will always use one or less (if merged into the previous)
> bio_vec.  So this can be simplified and optimized.

Oh, hm, right.  So what you really want here is for me to pass in the
number of thp pages allocated by the readahead operation.  rac->nr_pages
is the number of PAGE_SIZE pages, but we could have an rac->nr_segs
element as well.
diff mbox series

Patch

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index e522039f627f..68f8903ecd6d 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -179,14 +179,16 @@  iomap_read_finish(struct iomap_page *iop, struct page *page)
 static void
 iomap_read_page_end_io(struct bio_vec *bvec, int error)
 {
-	struct page *page = bvec->bv_page;
+	struct page *page = compound_head(bvec->bv_page);
 	struct iomap_page *iop = to_iomap_page(page);
+	unsigned offset = bvec->bv_offset +
+				PAGE_SIZE * (bvec->bv_page - page);
 
 	if (unlikely(error)) {
 		ClearPageUptodate(page);
 		SetPageError(page);
 	} else {
-		iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
+		iomap_set_range_uptodate(page, offset, bvec->bv_len);
 	}
 
 	iomap_read_finish(iop, page);
@@ -239,6 +241,16 @@  static inline bool iomap_block_needs_zeroing(struct inode *inode,
 		pos >= i_size_read(inode);
 }
 
+/*
+ * Estimate the number of vectors we need based on the current page size;
+ * if we're wrong we'll end up doing an overly large allocation or needing
+ * to do a second allocation, neither of which is a big deal.
+ */
+static unsigned int iomap_nr_vecs(struct page *page, loff_t length)
+{
+	return (length + thp_size(page) - 1) >> page_shift(page);
+}
+
 static loff_t
 iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 		struct iomap *iomap, struct iomap *srcmap)
@@ -263,7 +275,7 @@  iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 		goto done;
 
 	if (iomap_block_needs_zeroing(inode, iomap, pos)) {
-		zero_user(page, poff, plen);
+		zero_user_large(page, poff, plen);
 		iomap_set_range_uptodate(page, poff, plen);
 		goto done;
 	}
@@ -294,7 +306,7 @@  iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 
 	if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) {
 		gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
-		int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
+		int nr_vecs = iomap_nr_vecs(page, length);
 
 		if (ctx->bio)
 			submit_bio(ctx->bio);
@@ -331,9 +343,9 @@  iomap_readpage(struct page *page, const struct iomap_ops *ops)
 
 	trace_iomap_readpage(page->mapping->host, 1);
 
-	for (poff = 0; poff < PAGE_SIZE; poff += ret) {
-		ret = iomap_apply(inode, page_offset(page) + poff,
-				PAGE_SIZE - poff, 0, ops, &ctx,
+	for (poff = 0; poff < thp_size(page); poff += ret) {
+		ret = iomap_apply(inode, file_offset_of_page(page) + poff,
+				thp_size(page) - poff, 0, ops, &ctx,
 				iomap_readpage_actor);
 		if (ret <= 0) {
 			WARN_ON_ONCE(ret == 0);
@@ -376,7 +388,7 @@  iomap_readahead_actor(struct inode *inode, loff_t pos, loff_t length,
 		if (WARN_ON(ret == 0))
 			break;
 		done += ret;
-		if (offset_in_page(pos + done) == 0) {
+		if (offset_in_this_page(ctx->cur_page, pos + done) == 0) {
 			ctx->rac->nr_pages -= ctx->rac->batch_count;
 			if (!ctx->cur_page_in_bio)
 				unlock_page(ctx->cur_page);