@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/compiler.h>
#include <linux/fs.h>
+#include <linux/fsverity.h>
#include <linux/iomap.h>
#include <linux/pagemap.h>
#include <linux/uio.h>
@@ -23,6 +24,8 @@
#define IOEND_BATCH_SIZE 4096
+#define IOMAP_POOL_SIZE (4 * (PAGE_SIZE / SECTOR_SIZE))
+
typedef int (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length);
/*
* Structure allocated for each folio to track per-block uptodate, dirty state
@@ -368,6 +371,111 @@ static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
pos >= i_size_read(iter->inode);
}
+#ifdef CONFIG_FS_VERITY
+struct iomap_fsverity_bio {
+ struct work_struct work;
+ struct bio bio;
+};
+static struct bio_set *iomap_fsverity_bioset;
+
+static int iomap_fsverity_init_bioset(void)
+{
+ struct bio_set *bs, *old;
+ int error;
+
+ bs = kzalloc(sizeof(*bs), GFP_KERNEL);
+ if (!bs)
+ return -ENOMEM;
+
+ error = bioset_init(bs, IOMAP_POOL_SIZE,
+ offsetof(struct iomap_fsverity_bio, bio),
+ BIOSET_NEED_BVECS);
+ if (error) {
+ kfree(bs);
+ return error;
+ }
+
+ /*
+ * This has to be atomic as readaheads can race to create the
+ * bioset. If someone set the pointer before us, we drop ours.
+ */
+ old = cmpxchg(&iomap_fsverity_bioset, NULL, bs);
+ if (old) {
+ bioset_exit(bs);
+ kfree(bs);
+ }
+
+ return 0;
+}
+
+int iomap_init_fsverity(struct super_block *sb, unsigned int wq_flags,
+ int max_active)
+{
+ int ret;
+
+ if (!iomap_fsverity_bioset) {
+ ret = iomap_fsverity_init_bioset();
+ if (ret)
+ return ret;
+ }
+
+ return fsverity_init_wq(sb, wq_flags, max_active);
+}
+EXPORT_SYMBOL_GPL(iomap_init_fsverity);
+
+static void
+iomap_read_fsverify_end_io_work(struct work_struct *work)
+{
+ struct iomap_fsverity_bio *fbio =
+ container_of(work, struct iomap_fsverity_bio, work);
+
+ fsverity_verify_bio(&fbio->bio);
+ iomap_read_end_io(&fbio->bio);
+}
+
+static void
+iomap_read_fsverity_end_io(struct bio *bio)
+{
+ struct iomap_fsverity_bio *fbio =
+ container_of(bio, struct iomap_fsverity_bio, bio);
+
+ INIT_WORK(&fbio->work, iomap_read_fsverify_end_io_work);
+ queue_work(bio->bi_private, &fbio->work);
+}
+
+static struct bio *
+iomap_fsverity_read_bio_alloc(struct inode *inode, struct block_device *bdev,
+ int nr_vecs, gfp_t gfp)
+{
+ struct bio *bio;
+
+ bio = bio_alloc_bioset(bdev, nr_vecs, REQ_OP_READ, gfp,
+ iomap_fsverity_bioset);
+ if (bio) {
+ bio->bi_private = inode->i_sb->s_verity_wq;
+ bio->bi_end_io = iomap_read_fsverity_end_io;
+ }
+ return bio;
+}
+#else
+# define iomap_fsverity_read_bio_alloc(...) (NULL)
+# define iomap_fsverity_init_bioset(...) (-EOPNOTSUPP)
+#endif /* CONFIG_FS_VERITY */
+
+static struct bio *iomap_read_bio_alloc(struct inode *inode,
+ struct block_device *bdev, int nr_vecs, gfp_t gfp)
+{
+ struct bio *bio;
+
+ if (fsverity_active(inode))
+ return iomap_fsverity_read_bio_alloc(inode, bdev, nr_vecs, gfp);
+
+ bio = bio_alloc(bdev, nr_vecs, REQ_OP_READ, gfp);
+ if (bio)
+ bio->bi_end_io = iomap_read_end_io;
+ return bio;
+}
+
static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
struct iomap_readpage_ctx *ctx, loff_t offset)
{
@@ -380,6 +488,10 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
size_t poff, plen;
sector_t sector;
+ /* Fail reads from broken fsverity files immediately. */
+ if (IS_VERITY(iter->inode) && !fsverity_active(iter->inode))
+ return -EIO;
+
if (iomap->type == IOMAP_INLINE)
return iomap_read_inline_data(iter, folio);
@@ -391,6 +503,12 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
if (iomap_block_needs_zeroing(iter, pos)) {
folio_zero_range(folio, poff, plen);
+ if (fsverity_active(iter->inode) &&
+ !fsverity_verify_blocks(folio, plen, poff)) {
+ folio_set_error(folio);
+ goto done;
+ }
+
iomap_set_range_uptodate(folio, poff, plen);
goto done;
}
@@ -408,28 +526,29 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
!bio_add_folio(ctx->bio, folio, plen, poff)) {
gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
gfp_t orig_gfp = gfp;
- unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
if (ctx->bio)
submit_bio(ctx->bio);
if (ctx->rac) /* same as readahead_gfp_mask */
gfp |= __GFP_NORETRY | __GFP_NOWARN;
- ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
- REQ_OP_READ, gfp);
+
+ ctx->bio = iomap_read_bio_alloc(iter->inode, iomap->bdev,
+ bio_max_segs(DIV_ROUND_UP(length, PAGE_SIZE)),
+ gfp);
+
/*
* If the bio_alloc fails, try it again for a single page to
* avoid having to deal with partial page reads. This emulates
* what do_mpage_read_folio does.
*/
if (!ctx->bio) {
- ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
- orig_gfp);
+ ctx->bio = iomap_read_bio_alloc(iter->inode,
+ iomap->bdev, 1, orig_gfp);
}
if (ctx->rac)
ctx->bio->bi_opf |= REQ_RAHEAD;
ctx->bio->bi_iter.bi_sector = sector;
- ctx->bio->bi_end_io = iomap_read_end_io;
bio_add_folio_nofail(ctx->bio, folio, plen, poff);
}
@@ -1987,7 +2106,7 @@ EXPORT_SYMBOL_GPL(iomap_writepages);
static int __init iomap_init(void)
{
- return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
+ return bioset_init(&iomap_ioend_bioset, IOMAP_POOL_SIZE,
offsetof(struct iomap_ioend, io_bio),
BIOSET_NEED_BVECS);
}
@@ -256,6 +256,11 @@ static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i)
return &i->iomap;
}
+#ifdef CONFIG_FS_VERITY
+int iomap_init_fsverity(struct super_block *sb, unsigned int wq_flags,
+ int max_active);
+#endif
+
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops);
int iomap_file_buffered_write_punch_delalloc(struct inode *inode,