@@ -41,6 +41,7 @@ struct iomap_folio_state {
};
static struct bio_set iomap_ioend_bioset;
+static struct bio_set iomap_read_ioend_bioset;
static inline bool ifs_is_fully_uptodate(struct folio *folio,
struct iomap_folio_state *ifs)
@@ -310,7 +311,7 @@ static void iomap_finish_folio_read(struct folio *folio, size_t off,
folio_end_read(folio, uptodate);
}
-static void iomap_read_end_io(struct bio *bio)
+void iomap_read_end_io(struct bio *bio)
{
int error = blk_status_to_errno(bio->bi_status);
struct folio_iter fi;
@@ -319,6 +320,7 @@ static void iomap_read_end_io(struct bio *bio)
iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
bio_put(bio);
}
+EXPORT_SYMBOL_GPL(iomap_read_end_io);
/**
* iomap_read_inline_data - copy inline data into the page cache
@@ -371,6 +373,8 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
loff_t orig_pos = pos;
size_t poff, plen;
sector_t sector;
+ struct iomap_read_ioend *ioend;
+ const struct iomap *srcmap = iomap_iter_srcmap(iter);
if (iomap->type == IOMAP_INLINE)
return iomap_read_inline_data(iter, folio);
@@ -407,21 +411,29 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
if (ctx->rac) /* same as readahead_gfp_mask */
gfp |= __GFP_NORETRY | __GFP_NOWARN;
- ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
- REQ_OP_READ, gfp);
+ ctx->bio = bio_alloc_bioset(iomap->bdev, bio_max_segs(nr_vecs),
+ REQ_OP_READ, gfp, &iomap_read_ioend_bioset);
/*
* If the bio_alloc fails, try it again for a single page to
* avoid having to deal with partial page reads. This emulates
* what do_mpage_read_folio does.
*/
if (!ctx->bio) {
- ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
- orig_gfp);
+ ctx->bio = bio_alloc_bioset(iomap->bdev, 1,
+ REQ_OP_READ, orig_gfp, &iomap_read_ioend_bioset);
}
if (ctx->rac)
ctx->bio->bi_opf |= REQ_RAHEAD;
ctx->bio->bi_iter.bi_sector = sector;
ctx->bio->bi_end_io = iomap_read_end_io;
+ ioend = container_of(ctx->bio, struct iomap_read_ioend,
+ io_bio);
+ ioend->io_inode = iter->inode;
+ ioend->io_flags = srcmap->flags;
+ ioend->io_offset = poff;
+ ioend->io_size = plen;
+ if (ctx->ops && ctx->ops->prepare_ioend)
+ ctx->ops->prepare_ioend(ioend);
bio_add_folio_nofail(ctx->bio, folio, plen, poff);
}
@@ -2157,6 +2169,15 @@ EXPORT_SYMBOL_GPL(iomap_write_region);
static int __init iomap_buffered_init(void)
{
+ int error = 0;
+
+ error = bioset_init(&iomap_read_ioend_bioset,
+ 4 * (PAGE_SIZE / SECTOR_SIZE),
+ offsetof(struct iomap_read_ioend, io_bio),
+ BIOSET_NEED_BVECS);
+ if (error)
+ return error;
+
return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
offsetof(struct iomap_ioend, io_bio),
BIOSET_NEED_BVECS);
@@ -296,14 +296,34 @@ static inline bool iomap_want_unshare_iter(const struct iomap_iter *iter)
iter->srcmap.type == IOMAP_MAPPED;
}
+struct iomap_read_ioend {
+ struct inode *io_inode; /* file being read from */
+ u16 io_flags; /* IOMAP_F_* */
+ size_t io_size; /* size of the extent */
+ loff_t io_offset; /* offset in the file */
+ struct work_struct io_work; /* post read work (e.g. fs-verity) */
+ struct bio io_bio; /* MUST BE LAST! */
+};
+
+struct iomap_readpage_ops {
+ /*
+ * Optional, allows the file systems to perform actions just before
+ * submitting the bio and/or override the bio bi_end_io handler for
+ * additional verification after bio is processed
+ */
+ void (*prepare_ioend)(struct iomap_read_ioend *ioend);
+};
+
struct iomap_readpage_ctx {
struct folio *cur_folio;
bool cur_folio_in_bio;
struct bio *bio;
struct readahead_control *rac;
int flags;
+ const struct iomap_readpage_ops *ops;
};
+void iomap_read_end_io(struct bio *bio);
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops, void *private);
int iomap_read_folio_ctx(struct iomap_readpage_ctx *ctx,