Message ID | 20241031095918.99964-7-john.g.garry@oracle.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | bio_split() error handling rework | expand |
在 2024/10/31 17:59, John Garry 写道: > Add proper bio_split() error handling. For any error, call > raid_end_bio_io() and return. Except for discard, where we end the bio > directly. > > Signed-off-by: John Garry <john.g.garry@oracle.com> > --- > drivers/md/raid10.c | 47 ++++++++++++++++++++++++++++++++++++++++++++- > 1 file changed, 46 insertions(+), 1 deletion(-) > LGTM Reviewed-by: Yu Kuai <yukuai3@huawei.com> > diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c > index f3bf1116794a..ccd95459b192 100644 > --- a/drivers/md/raid10.c > +++ b/drivers/md/raid10.c > @@ -1159,6 +1159,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, > int slot = r10_bio->read_slot; > struct md_rdev *err_rdev = NULL; > gfp_t gfp = GFP_NOIO; > + int error; > > if (slot >= 0 && r10_bio->devs[slot].rdev) { > /* > @@ -1206,6 +1207,10 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, > if (max_sectors < bio_sectors(bio)) { > struct bio *split = bio_split(bio, max_sectors, > gfp, &conf->bio_split); > + if (IS_ERR(split)) { > + error = PTR_ERR(split); > + goto err_handle; > + } > bio_chain(split, bio); > allow_barrier(conf); > submit_bio_noacct(bio); > @@ -1236,6 +1241,11 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, > mddev_trace_remap(mddev, read_bio, r10_bio->sector); > submit_bio_noacct(read_bio); > return; > +err_handle: > + atomic_dec(&rdev->nr_pending); > + bio->bi_status = errno_to_blk_status(error); > + set_bit(R10BIO_Uptodate, &r10_bio->state); > + raid_end_bio_io(r10_bio); > } > > static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, > @@ -1347,9 +1357,10 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, > struct r10bio *r10_bio) > { > struct r10conf *conf = mddev->private; > - int i; > + int i, k; > sector_t sectors; > int max_sectors; > + int error; > > if ((mddev_is_clustered(mddev) && > md_cluster_ops->area_resyncing(mddev, WRITE, > @@ -1482,6 +1493,10 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, > if (r10_bio->sectors < bio_sectors(bio)) { > struct bio *split = bio_split(bio, r10_bio->sectors, > GFP_NOIO, &conf->bio_split); > + if (IS_ERR(split)) { > + error = PTR_ERR(split); > + goto err_handle; > + } > bio_chain(split, bio); > allow_barrier(conf); > submit_bio_noacct(bio); > @@ -1503,6 +1518,26 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, > raid10_write_one_disk(mddev, r10_bio, bio, true, i); > } > one_write_done(r10_bio); > + return; > +err_handle: > + for (k = 0; k < i; k++) { > + int d = r10_bio->devs[k].devnum; > + struct md_rdev *rdev = conf->mirrors[d].rdev; > + struct md_rdev *rrdev = conf->mirrors[d].replacement; > + > + if (r10_bio->devs[k].bio) { > + rdev_dec_pending(rdev, mddev); > + r10_bio->devs[k].bio = NULL; > + } > + if (r10_bio->devs[k].repl_bio) { > + rdev_dec_pending(rrdev, mddev); > + r10_bio->devs[k].repl_bio = NULL; > + } > + } > + > + bio->bi_status = errno_to_blk_status(error); > + set_bit(R10BIO_Uptodate, &r10_bio->state); > + raid_end_bio_io(r10_bio); > } > > static void __make_request(struct mddev *mddev, struct bio *bio, int sectors) > @@ -1644,6 +1679,11 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio) > if (remainder) { > split_size = stripe_size - remainder; > split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split); > + if (IS_ERR(split)) { > + bio->bi_status = errno_to_blk_status(PTR_ERR(split)); > + bio_endio(bio); > + return 0; > + } > bio_chain(split, bio); > allow_barrier(conf); > /* Resend the fist split part */ > @@ -1654,6 +1694,11 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio) > if (remainder) { > split_size = bio_sectors(bio) - remainder; > split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split); > + if (IS_ERR(split)) { > + bio->bi_status = errno_to_blk_status(PTR_ERR(split)); > + bio_endio(bio); > + return 0; > + } > bio_chain(split, bio); > allow_barrier(conf); > /* Resend the second split part */ >
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index f3bf1116794a..ccd95459b192 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1159,6 +1159,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, int slot = r10_bio->read_slot; struct md_rdev *err_rdev = NULL; gfp_t gfp = GFP_NOIO; + int error; if (slot >= 0 && r10_bio->devs[slot].rdev) { /* @@ -1206,6 +1207,10 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, if (max_sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, max_sectors, gfp, &conf->bio_split); + if (IS_ERR(split)) { + error = PTR_ERR(split); + goto err_handle; + } bio_chain(split, bio); allow_barrier(conf); submit_bio_noacct(bio); @@ -1236,6 +1241,11 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, mddev_trace_remap(mddev, read_bio, r10_bio->sector); submit_bio_noacct(read_bio); return; +err_handle: + atomic_dec(&rdev->nr_pending); + bio->bi_status = errno_to_blk_status(error); + set_bit(R10BIO_Uptodate, &r10_bio->state); + raid_end_bio_io(r10_bio); } static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, @@ -1347,9 +1357,10 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, struct r10bio *r10_bio) { struct r10conf *conf = mddev->private; - int i; + int i, k; sector_t sectors; int max_sectors; + int error; if ((mddev_is_clustered(mddev) && md_cluster_ops->area_resyncing(mddev, WRITE, @@ -1482,6 +1493,10 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, if (r10_bio->sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, r10_bio->sectors, GFP_NOIO, &conf->bio_split); + if (IS_ERR(split)) { + error = PTR_ERR(split); + goto err_handle; + } bio_chain(split, bio); allow_barrier(conf); submit_bio_noacct(bio); @@ -1503,6 +1518,26 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, raid10_write_one_disk(mddev, r10_bio, bio, true, i); } one_write_done(r10_bio); + return; +err_handle: + for (k = 0; k < i; k++) { + int d = r10_bio->devs[k].devnum; + struct md_rdev *rdev = conf->mirrors[d].rdev; + struct md_rdev *rrdev = conf->mirrors[d].replacement; + + if (r10_bio->devs[k].bio) { + rdev_dec_pending(rdev, mddev); + r10_bio->devs[k].bio = NULL; + } + if (r10_bio->devs[k].repl_bio) { + rdev_dec_pending(rrdev, mddev); + r10_bio->devs[k].repl_bio = NULL; + } + } + + bio->bi_status = errno_to_blk_status(error); + set_bit(R10BIO_Uptodate, &r10_bio->state); + raid_end_bio_io(r10_bio); } static void __make_request(struct mddev *mddev, struct bio *bio, int sectors) @@ -1644,6 +1679,11 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio) if (remainder) { split_size = stripe_size - remainder; split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split); + if (IS_ERR(split)) { + bio->bi_status = errno_to_blk_status(PTR_ERR(split)); + bio_endio(bio); + return 0; + } bio_chain(split, bio); allow_barrier(conf); /* Resend the fist split part */ @@ -1654,6 +1694,11 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio) if (remainder) { split_size = bio_sectors(bio) - remainder; split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split); + if (IS_ERR(split)) { + bio->bi_status = errno_to_blk_status(PTR_ERR(split)); + bio_endio(bio); + return 0; + } bio_chain(split, bio); allow_barrier(conf); /* Resend the second split part */
Add proper bio_split() error handling. For any error, call raid_end_bio_io() and return. Except for discard, where we end the bio directly. Signed-off-by: John Garry <john.g.garry@oracle.com> --- drivers/md/raid10.c | 47 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-)