Message ID | 20230121065031.1139353-24-hch@lst.de (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [01/34] block: export bio_split_rw | expand |
On Sat, Jan 21, 2023 at 07:50:20AM +0100, Christoph Hellwig wrote: > Currently the I/O submitters have to split bios according to the > chunk stripe boundaries. This leads to extra lookups in the extent > trees and a lot of boilerplate code. > > To drop this requirement, split the bio when __btrfs_map_block > returns a mapping that is smaller than the requested size and > keep a count of pending bios in the original btrfs_bio so that > the upper level completion is only invoked when all clones have > completed. > > Based on a patch from Qu Wenruo. > > Signed-off-by: Christoph Hellwig <hch@lst.de> > Reviewed-by: Josef Bacik <josef@toxicpanda.com> > Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> > Reviewed-by: Qu Wenruo <wqu@suse.com> > --- > fs/btrfs/bio.c | 108 ++++++++++++++++++++++++++++++++++++++++--------- > fs/btrfs/bio.h | 1 + > 2 files changed, 91 insertions(+), 18 deletions(-) > > diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c > index c7522ac7e0e71c..ff42b783902140 100644 > --- a/fs/btrfs/bio.c > +++ b/fs/btrfs/bio.c > @@ -17,6 +17,7 @@ > #include "file-item.h" > > static struct bio_set btrfs_bioset; > +static struct bio_set btrfs_clone_bioset; > static struct bio_set btrfs_repair_bioset; > static mempool_t btrfs_failed_bio_pool; > > @@ -38,6 +39,7 @@ static inline void btrfs_bio_init(struct btrfs_bio *bbio, > bbio->inode = inode; > bbio->end_io = end_io; > bbio->private = private; > + atomic_set(&bbio->pending_ios, 1); > } > > /* > @@ -75,6 +77,58 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size, > return bio; > } > > +static struct bio *btrfs_split_bio(struct bio *orig, u64 map_length) > +{ > + struct btrfs_bio *orig_bbio = btrfs_bio(orig); > + struct bio *bio; > + > + bio = bio_split(orig, map_length >> SECTOR_SHIFT, GFP_NOFS, > + &btrfs_clone_bioset); > + btrfs_bio_init(btrfs_bio(bio), orig_bbio->inode, NULL, orig_bbio); > + > + btrfs_bio(bio)->file_offset = orig_bbio->file_offset; > + if (!(orig->bi_opf & REQ_BTRFS_ONE_ORDERED)) > + orig_bbio->file_offset += map_length; > + > + atomic_inc(&orig_bbio->pending_ios); > + return bio; > +} > + > +static void btrfs_orig_write_end_io(struct bio *bio); > +static void btrfs_bbio_propagate_error(struct btrfs_bio *bbio, > + struct btrfs_bio *orig_bbio) > +{ > + /* > + * For writes btrfs tolerates nr_mirrors - 1 write failures, so we > + * can't just blindly propagate a write failure here. > + * Instead increment the error count in the original I/O context so > + * that it is guaranteed to be larger than the error tolerance. > + */ > + if (bbio->bio.bi_end_io == &btrfs_orig_write_end_io) { > + struct btrfs_io_stripe *orig_stripe = orig_bbio->bio.bi_private; > + struct btrfs_io_context *orig_bioc = orig_stripe->bioc; > + > + atomic_add(orig_bioc->max_errors, &orig_bioc->error); > + } else { > + orig_bbio->bio.bi_status = bbio->bio.bi_status; > + } > +} > + > +static void btrfs_orig_bbio_end_io(struct btrfs_bio *bbio) > +{ > + if (bbio->bio.bi_pool == &btrfs_clone_bioset) { > + struct btrfs_bio *orig_bbio = bbio->private; > + > + if (bbio->bio.bi_status) > + btrfs_bbio_propagate_error(bbio, orig_bbio); > + bio_put(&bbio->bio); > + bbio = orig_bbio; > + } > + > + if (atomic_dec_and_test(&bbio->pending_ios)) > + bbio->end_io(bbio); > +} > + > static int next_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror) > { > if (cur_mirror == fbio->num_copies) > @@ -92,7 +146,7 @@ static int prev_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror) > static void btrfs_repair_done(struct btrfs_failed_bio *fbio) > { > if (atomic_dec_and_test(&fbio->repair_count)) { > - fbio->bbio->end_io(fbio->bbio); > + btrfs_orig_bbio_end_io(fbio->bbio); > mempool_free(fbio, &btrfs_failed_bio_pool); > } > } > @@ -232,7 +286,7 @@ static void btrfs_check_read_bio(struct btrfs_bio *bbio, > if (unlikely(fbio)) > btrfs_repair_done(fbio); > else > - bbio->end_io(bbio); > + btrfs_orig_bbio_end_io(bbio); > } > > static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev) > @@ -286,7 +340,7 @@ static void btrfs_simple_end_io(struct bio *bio) > } else { > if (bio_op(bio) == REQ_OP_ZONE_APPEND) > btrfs_record_physical_zoned(bbio); > - bbio->end_io(bbio); > + btrfs_orig_bbio_end_io(bbio); > } > } > > @@ -300,7 +354,7 @@ static void btrfs_raid56_end_io(struct bio *bio) > if (bio_op(bio) == REQ_OP_READ && !(bbio->bio.bi_opf & REQ_META)) > btrfs_check_read_bio(bbio, NULL); > else > - bbio->end_io(bbio); > + btrfs_orig_bbio_end_io(bbio); > > btrfs_put_bioc(bioc); > } > @@ -327,7 +381,7 @@ static void btrfs_orig_write_end_io(struct bio *bio) > else > bio->bi_status = BLK_STS_OK; > > - bbio->end_io(bbio); > + btrfs_orig_bbio_end_io(bbio); > btrfs_put_bioc(bioc); > } > > @@ -492,7 +546,7 @@ static void run_one_async_done(struct btrfs_work *work) > > /* If an error occurred we just want to clean up the bio and move on */ > if (bio->bi_status) { > - btrfs_bio_end_io(async->bbio, bio->bi_status); > + btrfs_orig_bbio_end_io(async->bbio); > return; > } > > @@ -567,8 +621,8 @@ static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio, > return true; > } > > -void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, > - int mirror_num) > +static bool btrfs_submit_chunk(struct btrfs_fs_info *fs_info, struct bio *bio, > + int mirror_num) > { > struct btrfs_bio *bbio = btrfs_bio(bio); > u64 logical = bio->bi_iter.bi_sector << 9; > @@ -587,11 +641,10 @@ void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, > goto fail; > } > > + map_length = min(map_length, length); > if (map_length < length) { > - btrfs_crit(fs_info, > - "mapping failed logical %llu bio len %llu len %llu", > - logical, length, map_length); > - BUG(); > + bio = btrfs_split_bio(bio, map_length); > + bbio = btrfs_bio(bio); > } > > /* > @@ -602,14 +655,14 @@ void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, > bbio->saved_iter = bio->bi_iter; > ret = btrfs_lookup_bio_sums(bbio); > if (ret) > - goto fail; > + goto fail_put_bio; > } > > if (btrfs_op(bio) == BTRFS_MAP_WRITE) { > if (bio_op(bio) == REQ_OP_ZONE_APPEND) { > ret = btrfs_extract_ordered_extent(btrfs_bio(bio)); > if (ret) > - goto fail; > + goto fail_put_bio; > } > > /* > @@ -621,20 +674,33 @@ void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, > !btrfs_is_data_reloc_root(bbio->inode->root)) { > if (should_async_write(bbio) && > btrfs_wq_submit_bio(bbio, bioc, &smap, mirror_num)) > - return; > + goto done; > > ret = btrfs_bio_csum(bbio); > if (ret) > - goto fail; > + goto fail_put_bio; > } > } > > __btrfs_submit_bio(bio, bioc, &smap, mirror_num); > - return; > +done: > + return map_length == length; > > +fail_put_bio: > + if (map_length < length) > + bio_put(bio); This is causing a panic in btrfs/125 because you set bbio to btrfs_bio(split_bio), which has a NULL end_io. You need something like the following so that we're ending the correct bbio. Thanks, Josef diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c index 5d4b67fc44f4..f3a357c48e69 100644 --- a/fs/btrfs/bio.c +++ b/fs/btrfs/bio.c @@ -607,6 +607,7 @@ static bool btrfs_submit_chunk(struct btrfs_fs_info *fs_info, struct bio *bio, int mirror_num) { struct btrfs_bio *bbio = btrfs_bio(bio); + struct btrfs_bio *orig_bbio = bbio; u64 logical = bio->bi_iter.bi_sector << 9; u64 length = bio->bi_iter.bi_size; u64 map_length = length; @@ -673,7 +674,7 @@ static bool btrfs_submit_chunk(struct btrfs_fs_info *fs_info, struct bio *bio, bio_put(bio); fail: btrfs_bio_counter_dec(fs_info); - btrfs_bio_end_io(bbio, ret); + btrfs_bio_end_io(orig_bbio, ret); /* Do not submit another chunk */ return true; }
On Wed, Jan 25, 2023 at 04:51:16PM -0500, Josef Bacik wrote: > This is causing a panic in btrfs/125 because you set bbio to > btrfs_bio(split_bio), which has a NULL end_io. You need something like the > following so that we're ending the correct bbio. Thanks, Just curious, what are the other configuration details as I've never been able to hit it? The fix itself looks good.
On Thu, Jan 26, 2023 at 06:21:43AM +0100, Christoph Hellwig wrote: > On Wed, Jan 25, 2023 at 04:51:16PM -0500, Josef Bacik wrote: > > This is causing a panic in btrfs/125 because you set bbio to > > btrfs_bio(split_bio), which has a NULL end_io. You need something like the > > following so that we're ending the correct bbio. Thanks, > > Just curious, what are the other configuration details as I've never been > able to hit it? > > The fix itself looks good. I reproduced it on the CI setup I've got for us, this was the config [btrfs_normal_freespacetree] TEST_DIR=/mnt/test TEST_DEV=/dev/mapper/vg0-lv0 SCRATCH_DEV_POOL="/dev/mapper/vg0-lv7 /dev/mapper/vg0-lv6 /dev/mapper/vg0-lv5 /dev/mapper/vg0-lv4 /dev/mapper/vg0-lv3 /dev/mapper/vg0-lv2 /dev/mapper/vg0-lv1 " SCRATCH_MNT=/mnt/scratch LOGWRITES_DEV=/dev/mapper/vg0-lv8 PERF_CONFIGNAME=jbacik MKFS_OPTIONS="-K -f -O ^no-holes" MOUNT_OPTIONS="-o space_cache=v2" FSTYP=btrfs I actually hadn't been running 125 because it wasn't in the auto group, Dave noticed it, I just tried it on this VM and hit it right away. No worries, that's why we have the CI stuff, sometimes it just doesn't trigger for us but will trigger with the CI setup. Thanks, Josef
On Thu, Jan 26, 2023 at 12:43:01PM -0500, Josef Bacik wrote: > I actually hadn't been running 125 because it wasn't in the auto group, Dave > noticed it, I just tried it on this VM and hit it right away. No worries, > that's why we have the CI stuff, sometimes it just doesn't trigger for us but > will trigger with the CI setup. Thanks, Oh, I guess the lack of auto group means I've never tested it. But it's a fairly bad bug, and I'm surprised nothing in auto hits an error after a bio split. I'll need to find out if I can find a simpler reproducer as this warrants a regression test.
On Thu, Jan 26, 2023 at 06:46:11PM +0100, Christoph Hellwig wrote: > On Thu, Jan 26, 2023 at 12:43:01PM -0500, Josef Bacik wrote: > > I actually hadn't been running 125 because it wasn't in the auto group, Dave > > noticed it, I just tried it on this VM and hit it right away. No worries, > > that's why we have the CI stuff, sometimes it just doesn't trigger for us but > > will trigger with the CI setup. Thanks, > > Oh, I guess the lack of auto group means I've never tested it. But > it's a fairly bad bug, and I'm surprised nothing in auto hits an > error after a bio split. I'll need to find out if I can find a simpler > reproducer as this warrants a regression test. The 'auto' group is good for first tests, I'm running 'check -g all' on my VM setups. If this is enough to trigger errors then we probably don't need a separate regression test.
On Thu, Jan 26, 2023 at 07:33:04PM +0100, David Sterba wrote: > > Oh, I guess the lack of auto group means I've never tested it. But > > it's a fairly bad bug, and I'm surprised nothing in auto hits an > > error after a bio split. I'll need to find out if I can find a simpler > > reproducer as this warrants a regression test. > > The 'auto' group is good for first tests, I'm running 'check -g all' on > my VM setups. If this is enough to trigger errors then we probably don't > need a separate regression test. Hmm. The xfstests README says: "By default the tests suite will run all the tests in the auto group. These are the tests that are expected to function correctly as regression tests, and it excludes tests that exercise conditions known to cause machine failures (i.e. the "dangerous" tests)." and my assumptions over decades of xfstests use has been that only tests that are broken, non-deterministic, or cause recent upstream kernels to crash are not in auto. Is there some kind of different rule for btrfs? e.g. btrfs/125 seems to complete quickly and does not actually seem to be dangerous. Besides that there's btrfs/185, which is very quick fuzzer, and btrfs/198 which is a fairly normal test as far as I can tell. The generic tests also have a few !auto tests that look like they should be mostly in the auto group as well, in addition to a few broken and dangerous ones, and the blockdev ones from Darrick that should probably move to blktests. XFS mostly seems to have dangerous fuzzer tests in the !auto category.
diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c index c7522ac7e0e71c..ff42b783902140 100644 --- a/fs/btrfs/bio.c +++ b/fs/btrfs/bio.c @@ -17,6 +17,7 @@ #include "file-item.h" static struct bio_set btrfs_bioset; +static struct bio_set btrfs_clone_bioset; static struct bio_set btrfs_repair_bioset; static mempool_t btrfs_failed_bio_pool; @@ -38,6 +39,7 @@ static inline void btrfs_bio_init(struct btrfs_bio *bbio, bbio->inode = inode; bbio->end_io = end_io; bbio->private = private; + atomic_set(&bbio->pending_ios, 1); } /* @@ -75,6 +77,58 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size, return bio; } +static struct bio *btrfs_split_bio(struct bio *orig, u64 map_length) +{ + struct btrfs_bio *orig_bbio = btrfs_bio(orig); + struct bio *bio; + + bio = bio_split(orig, map_length >> SECTOR_SHIFT, GFP_NOFS, + &btrfs_clone_bioset); + btrfs_bio_init(btrfs_bio(bio), orig_bbio->inode, NULL, orig_bbio); + + btrfs_bio(bio)->file_offset = orig_bbio->file_offset; + if (!(orig->bi_opf & REQ_BTRFS_ONE_ORDERED)) + orig_bbio->file_offset += map_length; + + atomic_inc(&orig_bbio->pending_ios); + return bio; +} + +static void btrfs_orig_write_end_io(struct bio *bio); +static void btrfs_bbio_propagate_error(struct btrfs_bio *bbio, + struct btrfs_bio *orig_bbio) +{ + /* + * For writes btrfs tolerates nr_mirrors - 1 write failures, so we + * can't just blindly propagate a write failure here. + * Instead increment the error count in the original I/O context so + * that it is guaranteed to be larger than the error tolerance. + */ + if (bbio->bio.bi_end_io == &btrfs_orig_write_end_io) { + struct btrfs_io_stripe *orig_stripe = orig_bbio->bio.bi_private; + struct btrfs_io_context *orig_bioc = orig_stripe->bioc; + + atomic_add(orig_bioc->max_errors, &orig_bioc->error); + } else { + orig_bbio->bio.bi_status = bbio->bio.bi_status; + } +} + +static void btrfs_orig_bbio_end_io(struct btrfs_bio *bbio) +{ + if (bbio->bio.bi_pool == &btrfs_clone_bioset) { + struct btrfs_bio *orig_bbio = bbio->private; + + if (bbio->bio.bi_status) + btrfs_bbio_propagate_error(bbio, orig_bbio); + bio_put(&bbio->bio); + bbio = orig_bbio; + } + + if (atomic_dec_and_test(&bbio->pending_ios)) + bbio->end_io(bbio); +} + static int next_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror) { if (cur_mirror == fbio->num_copies) @@ -92,7 +146,7 @@ static int prev_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror) static void btrfs_repair_done(struct btrfs_failed_bio *fbio) { if (atomic_dec_and_test(&fbio->repair_count)) { - fbio->bbio->end_io(fbio->bbio); + btrfs_orig_bbio_end_io(fbio->bbio); mempool_free(fbio, &btrfs_failed_bio_pool); } } @@ -232,7 +286,7 @@ static void btrfs_check_read_bio(struct btrfs_bio *bbio, if (unlikely(fbio)) btrfs_repair_done(fbio); else - bbio->end_io(bbio); + btrfs_orig_bbio_end_io(bbio); } static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev) @@ -286,7 +340,7 @@ static void btrfs_simple_end_io(struct bio *bio) } else { if (bio_op(bio) == REQ_OP_ZONE_APPEND) btrfs_record_physical_zoned(bbio); - bbio->end_io(bbio); + btrfs_orig_bbio_end_io(bbio); } } @@ -300,7 +354,7 @@ static void btrfs_raid56_end_io(struct bio *bio) if (bio_op(bio) == REQ_OP_READ && !(bbio->bio.bi_opf & REQ_META)) btrfs_check_read_bio(bbio, NULL); else - bbio->end_io(bbio); + btrfs_orig_bbio_end_io(bbio); btrfs_put_bioc(bioc); } @@ -327,7 +381,7 @@ static void btrfs_orig_write_end_io(struct bio *bio) else bio->bi_status = BLK_STS_OK; - bbio->end_io(bbio); + btrfs_orig_bbio_end_io(bbio); btrfs_put_bioc(bioc); } @@ -492,7 +546,7 @@ static void run_one_async_done(struct btrfs_work *work) /* If an error occurred we just want to clean up the bio and move on */ if (bio->bi_status) { - btrfs_bio_end_io(async->bbio, bio->bi_status); + btrfs_orig_bbio_end_io(async->bbio); return; } @@ -567,8 +621,8 @@ static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio, return true; } -void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, - int mirror_num) +static bool btrfs_submit_chunk(struct btrfs_fs_info *fs_info, struct bio *bio, + int mirror_num) { struct btrfs_bio *bbio = btrfs_bio(bio); u64 logical = bio->bi_iter.bi_sector << 9; @@ -587,11 +641,10 @@ void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, goto fail; } + map_length = min(map_length, length); if (map_length < length) { - btrfs_crit(fs_info, - "mapping failed logical %llu bio len %llu len %llu", - logical, length, map_length); - BUG(); + bio = btrfs_split_bio(bio, map_length); + bbio = btrfs_bio(bio); } /* @@ -602,14 +655,14 @@ void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, bbio->saved_iter = bio->bi_iter; ret = btrfs_lookup_bio_sums(bbio); if (ret) - goto fail; + goto fail_put_bio; } if (btrfs_op(bio) == BTRFS_MAP_WRITE) { if (bio_op(bio) == REQ_OP_ZONE_APPEND) { ret = btrfs_extract_ordered_extent(btrfs_bio(bio)); if (ret) - goto fail; + goto fail_put_bio; } /* @@ -621,20 +674,33 @@ void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, !btrfs_is_data_reloc_root(bbio->inode->root)) { if (should_async_write(bbio) && btrfs_wq_submit_bio(bbio, bioc, &smap, mirror_num)) - return; + goto done; ret = btrfs_bio_csum(bbio); if (ret) - goto fail; + goto fail_put_bio; } } __btrfs_submit_bio(bio, bioc, &smap, mirror_num); - return; +done: + return map_length == length; +fail_put_bio: + if (map_length < length) + bio_put(bio); fail: btrfs_bio_counter_dec(fs_info); btrfs_bio_end_io(bbio, ret); + /* Do not submit another chunk */ + return true; +} + +void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, + int mirror_num) +{ + while (!btrfs_submit_chunk(fs_info, bio, mirror_num)) + ; } /* @@ -742,10 +808,13 @@ int __init btrfs_bioset_init(void) offsetof(struct btrfs_bio, bio), BIOSET_NEED_BVECS)) return -ENOMEM; + if (bioset_init(&btrfs_clone_bioset, BIO_POOL_SIZE, + offsetof(struct btrfs_bio, bio), 0)) + goto out_free_bioset; if (bioset_init(&btrfs_repair_bioset, BIO_POOL_SIZE, offsetof(struct btrfs_bio, bio), BIOSET_NEED_BVECS)) - goto out_free_bioset; + goto out_free_clone_bioset; if (mempool_init_kmalloc_pool(&btrfs_failed_bio_pool, BIO_POOL_SIZE, sizeof(struct btrfs_failed_bio))) goto out_free_repair_bioset; @@ -753,6 +822,8 @@ int __init btrfs_bioset_init(void) out_free_repair_bioset: bioset_exit(&btrfs_repair_bioset); +out_free_clone_bioset: + bioset_exit(&btrfs_clone_bioset); out_free_bioset: bioset_exit(&btrfs_bioset); return -ENOMEM; @@ -762,5 +833,6 @@ void __cold btrfs_bioset_exit(void) { mempool_exit(&btrfs_failed_bio_pool); bioset_exit(&btrfs_repair_bioset); + bioset_exit(&btrfs_clone_bioset); bioset_exit(&btrfs_bioset); } diff --git a/fs/btrfs/bio.h b/fs/btrfs/bio.h index 334dcc3d5feb95..7c50f757cf5106 100644 --- a/fs/btrfs/bio.h +++ b/fs/btrfs/bio.h @@ -55,6 +55,7 @@ struct btrfs_bio { /* For internal use in read end I/O handling */ unsigned int mirror_num; + atomic_t pending_ios; struct work_struct end_io_work; /*