@@ -505,12 +505,12 @@ const struct address_space_operations def_blk_aops = {
#else /* CONFIG_BUFFER_HEAD */
static int blkdev_read_folio(struct file *file, struct folio *folio)
{
- return iomap_read_folio(folio, &blkdev_iomap_ops);
+ return iomap_read_folio(folio, &blkdev_iomap_ops, NULL);
}
static void blkdev_readahead(struct readahead_control *rac)
{
- iomap_readahead(rac, &blkdev_iomap_ops);
+ iomap_readahead(rac, &blkdev_iomap_ops, NULL);
}
static int blkdev_map_blocks(struct iomap_writepage_ctx *wpc,
@@ -370,12 +370,12 @@ int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
*/
static int erofs_read_folio(struct file *file, struct folio *folio)
{
- return iomap_read_folio(folio, &erofs_iomap_ops);
+ return iomap_read_folio(folio, &erofs_iomap_ops, NULL);
}
static void erofs_readahead(struct readahead_control *rac)
{
- return iomap_readahead(rac, &erofs_iomap_ops);
+ return iomap_readahead(rac, &erofs_iomap_ops, NULL);
}
static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
@@ -422,7 +422,7 @@ static int gfs2_read_folio(struct file *file, struct folio *folio)
if (!gfs2_is_jdata(ip) ||
(i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
- error = iomap_read_folio(folio, &gfs2_iomap_ops);
+ error = iomap_read_folio(folio, &gfs2_iomap_ops, NULL);
} else if (gfs2_is_stuffed(ip)) {
error = stuffed_read_folio(ip, folio);
} else {
@@ -497,7 +497,7 @@ static void gfs2_readahead(struct readahead_control *rac)
else if (gfs2_is_jdata(ip))
mpage_readahead(rac, gfs2_block_map);
else
- iomap_readahead(rac, &gfs2_iomap_ops);
+ iomap_readahead(rac, &gfs2_iomap_ops, NULL);
}
/**
@@ -320,7 +320,9 @@ struct iomap_readpage_ctx {
struct folio *cur_folio;
bool cur_folio_in_bio;
struct bio *bio;
+ loff_t bio_start_pos;
struct readahead_control *rac;
+ const struct iomap_read_folio_ops *ops;
};
/**
@@ -362,6 +364,15 @@ static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
pos >= i_size_read(iter->inode);
}
+static void iomap_read_submit_bio(const struct iomap_iter *iter,
+ struct iomap_readpage_ctx *ctx)
+{
+ if (ctx->ops && ctx->ops->submit_io)
+ ctx->ops->submit_io(iter->inode, ctx->bio, ctx->bio_start_pos);
+ else
+ submit_bio(ctx->bio);
+}
+
static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
struct iomap_readpage_ctx *ctx, loff_t offset)
{
@@ -405,8 +416,9 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
if (ctx->bio)
- submit_bio(ctx->bio);
+ iomap_read_submit_bio(iter, ctx);
+ ctx->bio_start_pos = offset;
if (ctx->rac) /* same as readahead_gfp_mask */
gfp |= __GFP_NORETRY | __GFP_NOWARN;
ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
@@ -455,7 +467,8 @@ static loff_t iomap_read_folio_iter(const struct iomap_iter *iter,
return done;
}
-int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
+int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops,
+ const struct iomap_read_folio_ops *read_folio_ops)
{
struct iomap_iter iter = {
.inode = folio->mapping->host,
@@ -464,6 +477,7 @@ int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
};
struct iomap_readpage_ctx ctx = {
.cur_folio = folio,
+ .ops = read_folio_ops,
};
int ret;
@@ -473,7 +487,7 @@ int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
iter.processed = iomap_read_folio_iter(&iter, &ctx);
if (ctx.bio) {
- submit_bio(ctx.bio);
+ iomap_read_submit_bio(&iter, &ctx);
WARN_ON_ONCE(!ctx.cur_folio_in_bio);
} else {
WARN_ON_ONCE(ctx.cur_folio_in_bio);
@@ -518,6 +532,7 @@ static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
* iomap_readahead - Attempt to read pages from a file.
* @rac: Describes the pages to be read.
* @ops: The operations vector for the filesystem.
+ * @read_folio_ops: Function hooks for filesystems for special bio submissions
*
* This function is for filesystems to call to implement their readahead
* address_space operation.
@@ -529,7 +544,8 @@ static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
* function is called with memalloc_nofs set, so allocations will not cause
* the filesystem to be reentered.
*/
-void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
+void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops,
+ const struct iomap_read_folio_ops *read_folio_ops)
{
struct iomap_iter iter = {
.inode = rac->mapping->host,
@@ -538,6 +554,7 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
};
struct iomap_readpage_ctx ctx = {
.rac = rac,
+ .ops = read_folio_ops,
};
trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
@@ -546,7 +563,7 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
iter.processed = iomap_readahead_iter(&iter, &ctx);
if (ctx.bio)
- submit_bio(ctx.bio);
+ iomap_read_submit_bio(&iter, &ctx);
if (ctx.cur_folio) {
if (!ctx.cur_folio_in_bio)
folio_unlock(ctx.cur_folio);
@@ -522,14 +522,14 @@ xfs_vm_read_folio(
struct file *unused,
struct folio *folio)
{
- return iomap_read_folio(folio, &xfs_read_iomap_ops);
+ return iomap_read_folio(folio, &xfs_read_iomap_ops, NULL);
}
STATIC void
xfs_vm_readahead(
struct readahead_control *rac)
{
- iomap_readahead(rac, &xfs_read_iomap_ops);
+ iomap_readahead(rac, &xfs_read_iomap_ops, NULL);
}
static int
@@ -112,12 +112,12 @@ static const struct iomap_ops zonefs_write_iomap_ops = {
static int zonefs_read_folio(struct file *unused, struct folio *folio)
{
- return iomap_read_folio(folio, &zonefs_read_iomap_ops);
+ return iomap_read_folio(folio, &zonefs_read_iomap_ops, NULL);
}
static void zonefs_readahead(struct readahead_control *rac)
{
- iomap_readahead(rac, &zonefs_read_iomap_ops);
+ iomap_readahead(rac, &zonefs_read_iomap_ops, NULL);
}
/*
@@ -303,8 +303,20 @@ static inline bool iomap_want_unshare_iter(const struct iomap_iter *iter)
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops, void *private);
-int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
-void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
+
+struct iomap_read_folio_ops {
+ /*
+ * Optional, allows the filesystem to perform a custom submission of
+ * bio, such as csum calculations or multi-device bio split
+ */
+ void (*submit_io)(struct inode *inode, struct bio *bio,
+ loff_t file_offset);
+};
+
+int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops,
+ const struct iomap_read_folio_ops *);
+void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops,
+ const struct iomap_read_folio_ops *);
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len);
bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);