Message ID | 20250203094322.1809766-4-hch@lst.de (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [1/7] block: support integrity generation and verification from file systems | expand |
On Mon, Feb 03, 2025 at 10:43:07AM +0100, Christoph Hellwig wrote: > From: Goldwyn Rodrigues <rgoldwyn@suse.com> > > Allocate the bio from the bioset provided in iomap_read_folio_ops. > If no bioset is provided, fs_bio_set is used which is the standard > bioset for filesystems. > > Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com> I feel like I've seen this patch and the last one floating around for quite a while; would you and/or Goldwyn like to merge it for 6.15? --D > [hch: factor out two helpers] > Signed-off-by: Christoph Hellwig <hch@lst.de> > --- > fs/iomap/buffered-io.c | 51 ++++++++++++++++++++++++++++-------------- > include/linux/iomap.h | 6 +++++ > 2 files changed, 40 insertions(+), 17 deletions(-) > > diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c > index 804527dcc9ba..eaffa23eb8e4 100644 > --- a/fs/iomap/buffered-io.c > +++ b/fs/iomap/buffered-io.c > @@ -364,6 +364,39 @@ static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, > pos >= i_size_read(iter->inode); > } > > +static struct bio_set *iomap_read_bio_set(struct iomap_readpage_ctx *ctx) > +{ > + if (ctx->ops && ctx->ops->bio_set) > + return ctx->ops->bio_set; > + return &fs_bio_set; > +} > + > +static struct bio *iomap_read_alloc_bio(const struct iomap_iter *iter, > + struct iomap_readpage_ctx *ctx, loff_t length) > +{ > + unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); > + struct block_device *bdev = iter->iomap.bdev; > + struct bio_set *bio_set = iomap_read_bio_set(ctx); > + gfp_t gfp = mapping_gfp_constraint(iter->inode->i_mapping, GFP_KERNEL); > + gfp_t orig_gfp = gfp; > + struct bio *bio; > + > + if (ctx->rac) /* same as readahead_gfp_mask */ > + gfp |= __GFP_NORETRY | __GFP_NOWARN; > + > + bio = bio_alloc_bioset(bdev, bio_max_segs(nr_vecs), REQ_OP_READ, gfp, > + bio_set); > + > + /* > + * If the bio_alloc fails, try it again for a single page to avoid > + * having to deal with partial page reads. This emulates what > + * do_mpage_read_folio does. > + */ > + if (!bio) > + bio = bio_alloc_bioset(bdev, 1, REQ_OP_READ, orig_gfp, bio_set); > + return bio; > +} > + > static void iomap_read_submit_bio(const struct iomap_iter *iter, > struct iomap_readpage_ctx *ctx) > { > @@ -411,27 +444,11 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter, > if (!ctx->bio || > bio_end_sector(ctx->bio) != sector || > !bio_add_folio(ctx->bio, folio, plen, poff)) { > - gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); > - gfp_t orig_gfp = gfp; > - unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); > - > if (ctx->bio) > iomap_read_submit_bio(iter, ctx); > > ctx->bio_start_pos = offset; > - if (ctx->rac) /* same as readahead_gfp_mask */ > - gfp |= __GFP_NORETRY | __GFP_NOWARN; > - ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), > - REQ_OP_READ, gfp); > - /* > - * If the bio_alloc fails, try it again for a single page to > - * avoid having to deal with partial page reads. This emulates > - * what do_mpage_read_folio does. > - */ > - if (!ctx->bio) { > - ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, > - orig_gfp); > - } > + ctx->bio = iomap_read_alloc_bio(iter, ctx, length); > if (ctx->rac) > ctx->bio->bi_opf |= REQ_RAHEAD; > ctx->bio->bi_iter.bi_sector = sector; > diff --git a/include/linux/iomap.h b/include/linux/iomap.h > index 2930861d1ef1..304be88ecd23 100644 > --- a/include/linux/iomap.h > +++ b/include/linux/iomap.h > @@ -311,6 +311,12 @@ struct iomap_read_folio_ops { > */ > void (*submit_io)(struct inode *inode, struct bio *bio, > loff_t file_offset); > + > + /* > + * Optional, allows filesystem to specify own bio_set, so new bio's > + * can be allocated from the provided bio_set. > + */ > + struct bio_set *bio_set; > }; > > int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops, > -- > 2.45.2 > >
On Mon, Feb 03, 2025 at 02:23:27PM -0800, Darrick J. Wong wrote: > > Allocate the bio from the bioset provided in iomap_read_folio_ops. > > If no bioset is provided, fs_bio_set is used which is the standard > > bioset for filesystems. > > > > Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.com> > > I feel like I've seen this patch and the last one floating around for > quite a while; would you and/or Goldwyn like to merge it for 6.15? I think Goldwyn posted it once or twice and this is my first take on it (I had a similar one in a local tree, but I don't think that ever made it out to the public). But until we actually grow a user I'd rather not have it queue up as dead code. I'm not sure what the timeline of iomap in btrfs is, but I'm 6.15 is the absolute earliest that the PI support for XFS could make it.
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 804527dcc9ba..eaffa23eb8e4 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -364,6 +364,39 @@ static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, pos >= i_size_read(iter->inode); } +static struct bio_set *iomap_read_bio_set(struct iomap_readpage_ctx *ctx) +{ + if (ctx->ops && ctx->ops->bio_set) + return ctx->ops->bio_set; + return &fs_bio_set; +} + +static struct bio *iomap_read_alloc_bio(const struct iomap_iter *iter, + struct iomap_readpage_ctx *ctx, loff_t length) +{ + unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); + struct block_device *bdev = iter->iomap.bdev; + struct bio_set *bio_set = iomap_read_bio_set(ctx); + gfp_t gfp = mapping_gfp_constraint(iter->inode->i_mapping, GFP_KERNEL); + gfp_t orig_gfp = gfp; + struct bio *bio; + + if (ctx->rac) /* same as readahead_gfp_mask */ + gfp |= __GFP_NORETRY | __GFP_NOWARN; + + bio = bio_alloc_bioset(bdev, bio_max_segs(nr_vecs), REQ_OP_READ, gfp, + bio_set); + + /* + * If the bio_alloc fails, try it again for a single page to avoid + * having to deal with partial page reads. This emulates what + * do_mpage_read_folio does. + */ + if (!bio) + bio = bio_alloc_bioset(bdev, 1, REQ_OP_READ, orig_gfp, bio_set); + return bio; +} + static void iomap_read_submit_bio(const struct iomap_iter *iter, struct iomap_readpage_ctx *ctx) { @@ -411,27 +444,11 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter, if (!ctx->bio || bio_end_sector(ctx->bio) != sector || !bio_add_folio(ctx->bio, folio, plen, poff)) { - gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); - gfp_t orig_gfp = gfp; - unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); - if (ctx->bio) iomap_read_submit_bio(iter, ctx); ctx->bio_start_pos = offset; - if (ctx->rac) /* same as readahead_gfp_mask */ - gfp |= __GFP_NORETRY | __GFP_NOWARN; - ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), - REQ_OP_READ, gfp); - /* - * If the bio_alloc fails, try it again for a single page to - * avoid having to deal with partial page reads. This emulates - * what do_mpage_read_folio does. - */ - if (!ctx->bio) { - ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, - orig_gfp); - } + ctx->bio = iomap_read_alloc_bio(iter, ctx, length); if (ctx->rac) ctx->bio->bi_opf |= REQ_RAHEAD; ctx->bio->bi_iter.bi_sector = sector; diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 2930861d1ef1..304be88ecd23 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -311,6 +311,12 @@ struct iomap_read_folio_ops { */ void (*submit_io)(struct inode *inode, struct bio *bio, loff_t file_offset); + + /* + * Optional, allows filesystem to specify own bio_set, so new bio's + * can be allocated from the provided bio_set. + */ + struct bio_set *bio_set; }; int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops,