diff mbox series

[03/14] iomap: Support THPs in BIO completion path

Message ID 20201014030357.21898-4-willy@infradead.org (mailing list archive)
State Deferred, archived
Headers show
Series Transparent Huge Page support for XFS | expand

Commit Message

Matthew Wilcox Oct. 14, 2020, 3:03 a.m. UTC
bio_for_each_segment_all() iterates once per regular sized page.
Use bio_for_each_bvec_all() to iterate once per bvec and handle
merged THPs ourselves, instead of teaching the block layer about THPs.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/iomap/buffered-io.c | 62 ++++++++++++++++++++++++++++++------------
 1 file changed, 44 insertions(+), 18 deletions(-)

Comments

Christoph Hellwig Oct. 15, 2020, 9:50 a.m. UTC | #1
On Wed, Oct 14, 2020 at 04:03:46AM +0100, Matthew Wilcox (Oracle) wrote:
> bio_for_each_segment_all() iterates once per regular sized page.
> Use bio_for_each_bvec_all() to iterate once per bvec and handle
> merged THPs ourselves, instead of teaching the block layer about THPs.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

This seems to conflict with your synchronous readpage series..
diff mbox series

Patch

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 3e1eb40a73fd..935468d79d9d 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -167,32 +167,45 @@  iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
 		SetPageUptodate(page);
 }
 
-static void
-iomap_read_page_end_io(struct bio_vec *bvec, int error)
+static void iomap_finish_page_read(struct page *page, size_t offset,
+		size_t length, int error)
 {
-	struct page *page = bvec->bv_page;
 	struct iomap_page *iop = to_iomap_page(page);
 
 	if (unlikely(error)) {
 		ClearPageUptodate(page);
 		SetPageError(page);
 	} else {
-		iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
+		iomap_set_range_uptodate(page, offset, length);
 	}
 
-	if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
+	if (!iop || atomic_sub_and_test(length, &iop->read_bytes_pending))
 		unlock_page(page);
 }
 
-static void
-iomap_read_end_io(struct bio *bio)
+static void iomap_finish_bvec_read(struct page *page, size_t offset,
+		size_t length, int error)
+{
+	while (length > 0) {
+		size_t count = min(thp_size(page) - offset, length);
+
+		iomap_finish_page_read(page, offset, count, error);
+
+		page += (offset + count) / PAGE_SIZE;
+		offset = 0;
+		length -= count;
+	}
+}
+
+static void iomap_read_end_io(struct bio *bio)
 {
-	int error = blk_status_to_errno(bio->bi_status);
+	int i, error = blk_status_to_errno(bio->bi_status);
 	struct bio_vec *bvec;
-	struct bvec_iter_all iter_all;
 
-	bio_for_each_segment_all(bvec, bio, iter_all)
-		iomap_read_page_end_io(bvec, error);
+	bio_for_each_bvec_all(bvec, bio, i)
+		iomap_finish_bvec_read(bvec->bv_page, bvec->bv_offset,
+				bvec->bv_len, error);
+
 	bio_put(bio);
 }
 
@@ -1035,9 +1048,8 @@  vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
 }
 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
 
-static void
-iomap_finish_page_writeback(struct inode *inode, struct page *page,
-		int error, unsigned int len)
+static void iomap_finish_page_write(struct inode *inode, struct page *page,
+		unsigned int len, int error)
 {
 	struct iomap_page *iop = to_iomap_page(page);
 
@@ -1053,6 +1065,20 @@  iomap_finish_page_writeback(struct inode *inode, struct page *page,
 		end_page_writeback(page);
 }
 
+static void iomap_finish_bvec_write(struct inode *inode, struct page *page,
+		size_t offset, size_t length, int error)
+{
+	while (length > 0) {
+		size_t count = min(thp_size(page) - offset, length);
+
+		iomap_finish_page_write(inode, page, count, error);
+
+		page += (offset + count) / PAGE_SIZE;
+		offset = 0;
+		length -= count;
+	}
+}
+
 /*
  * We're now finished for good with this ioend structure.  Update the page
  * state, release holds on bios, and finally free up memory.  Do not use the
@@ -1070,7 +1096,7 @@  iomap_finish_ioend(struct iomap_ioend *ioend, int error)
 
 	for (bio = &ioend->io_inline_bio; bio; bio = next) {
 		struct bio_vec *bv;
-		struct bvec_iter_all iter_all;
+		int i;
 
 		/*
 		 * For the last bio, bi_private points to the ioend, so we
@@ -1082,9 +1108,9 @@  iomap_finish_ioend(struct iomap_ioend *ioend, int error)
 			next = bio->bi_private;
 
 		/* walk each page on bio, ending page IO on them */
-		bio_for_each_segment_all(bv, bio, iter_all)
-			iomap_finish_page_writeback(inode, bv->bv_page, error,
-					bv->bv_len);
+		bio_for_each_bvec_all(bv, bio, i)
+			iomap_finish_bvec_write(inode, bv->bv_page,
+					bv->bv_offset, bv->bv_len, error);
 		bio_put(bio);
 	}
 	/* The ioend has been freed by bio_put() */