diff mbox series

[01/14] iomap: add wrapper to pass readpage_ctx to read path

Message ID 20241229133836.1194272-2-aalbersh@kernel.org (mailing list archive)
State New
Headers show
Series Direct mapped extended attribute data | expand

Commit Message

Andrey Albershteyn Dec. 29, 2024, 1:38 p.m. UTC
Make filesystems able to create readpage context, similar as
iomap_writepage_ctx in write path. This will allow filesystem to
pass _ops to iomap for ioend configuration (->prepare_ioend) which
in turn can be used to set BIO end callout (bio->bi_end_io).

Signed-off-by: Andrey Albershteyn <aalbersh@kernel.org>
---
 fs/iomap/buffered-io.c | 76 ++++++++++++++++++++++++------------------
 include/linux/iomap.h  | 12 +++++++
 2 files changed, 55 insertions(+), 33 deletions(-)
diff mbox series

Patch

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 0f33ac975209..0d9291719d75 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -320,14 +320,6 @@  static void iomap_read_end_io(struct bio *bio)
 	bio_put(bio);
 }
 
-struct iomap_readpage_ctx {
-	struct folio		*cur_folio;
-	bool			cur_folio_in_bio;
-	struct bio		*bio;
-	struct readahead_control *rac;
-	int			flags;
-};
-
 /**
  * iomap_read_inline_data - copy inline data into the page cache
  * @iter: iteration structure
@@ -461,28 +453,27 @@  static loff_t iomap_read_folio_iter(const struct iomap_iter *iter,
 	return done;
 }
 
-int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
+int iomap_read_folio_ctx(struct iomap_readpage_ctx *ctx,
+		const struct iomap_ops *ops)
 {
+	struct folio *folio = ctx->cur_folio;
 	struct iomap_iter iter = {
 		.inode		= folio->mapping->host,
 		.pos		= folio_pos(folio),
 		.len		= folio_size(folio),
 	};
-	struct iomap_readpage_ctx ctx = {
-		.cur_folio	= folio,
-	};
 	int ret;
 
 	trace_iomap_readpage(iter.inode, 1);
 
 	while ((ret = iomap_iter(&iter, ops)) > 0)
-		iter.processed = iomap_read_folio_iter(&iter, &ctx);
+		iter.processed = iomap_read_folio_iter(&iter, ctx);
 
-	if (ctx.bio) {
-		submit_bio(ctx.bio);
-		WARN_ON_ONCE(!ctx.cur_folio_in_bio);
+	if (ctx->bio) {
+		submit_bio(ctx->bio);
+		WARN_ON_ONCE(!ctx->cur_folio_in_bio);
 	} else {
-		WARN_ON_ONCE(ctx.cur_folio_in_bio);
+		WARN_ON_ONCE(ctx->cur_folio_in_bio);
 		folio_unlock(folio);
 	}
 
@@ -493,6 +484,16 @@  int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
 	 */
 	return 0;
 }
+EXPORT_SYMBOL_GPL(iomap_read_folio_ctx);
+
+int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
+{
+	struct iomap_readpage_ctx ctx = {
+		.cur_folio	= folio,
+	};
+
+	return iomap_read_folio_ctx(&ctx, ops);
+}
 EXPORT_SYMBOL_GPL(iomap_read_folio);
 
 static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
@@ -520,6 +521,30 @@  static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
 	return done;
 }
 
+void iomap_readahead_ctx(struct iomap_readpage_ctx *ctx,
+		const struct iomap_ops *ops)
+{
+	struct readahead_control *rac = ctx->rac;
+	struct iomap_iter iter = {
+		.inode	= rac->mapping->host,
+		.pos	= readahead_pos(rac),
+		.len	= readahead_length(rac),
+	};
+
+	trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
+
+	while (iomap_iter(&iter, ops) > 0)
+		iter.processed = iomap_readahead_iter(&iter, ctx);
+
+	if (ctx->bio)
+		submit_bio(ctx->bio);
+	if (ctx->cur_folio) {
+		if (!ctx->cur_folio_in_bio)
+			folio_unlock(ctx->cur_folio);
+	}
+}
+EXPORT_SYMBOL_GPL(iomap_readahead_ctx);
+
 /**
  * iomap_readahead - Attempt to read pages from a file.
  * @rac: Describes the pages to be read.
@@ -537,26 +562,11 @@  static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
  */
 void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
 {
-	struct iomap_iter iter = {
-		.inode	= rac->mapping->host,
-		.pos	= readahead_pos(rac),
-		.len	= readahead_length(rac),
-	};
 	struct iomap_readpage_ctx ctx = {
 		.rac	= rac,
 	};
 
-	trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
-
-	while (iomap_iter(&iter, ops) > 0)
-		iter.processed = iomap_readahead_iter(&iter, &ctx);
-
-	if (ctx.bio)
-		submit_bio(ctx.bio);
-	if (ctx.cur_folio) {
-		if (!ctx.cur_folio_in_bio)
-			folio_unlock(ctx.cur_folio);
-	}
+	iomap_readahead_ctx(&ctx, ops);
 }
 EXPORT_SYMBOL_GPL(iomap_readahead);
 
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 3297ed36c26b..b5ae08955c87 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -296,9 +296,21 @@  static inline bool iomap_want_unshare_iter(const struct iomap_iter *iter)
 		iter->srcmap.type == IOMAP_MAPPED;
 }
 
+struct iomap_readpage_ctx {
+	struct folio			*cur_folio;
+	bool				cur_folio_in_bio;
+	struct bio			*bio;
+	struct readahead_control	*rac;
+	int				flags;
+};
+
 ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
 		const struct iomap_ops *ops, void *private);
+int iomap_read_folio_ctx(struct iomap_readpage_ctx *ctx,
+		const struct iomap_ops *ops);
 int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
+void iomap_readahead_ctx(struct iomap_readpage_ctx *ctx,
+		const struct iomap_ops *ops);
 void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
 bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
 struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len);