Message ID | 20221212070611.5209-4-hch@lst.de (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [1/3] btrfs: cleanup raid56_parity_write | expand |
On 2022/12/12 15:06, Christoph Hellwig wrote: > Both callers of rmv_rbio call rbio_orig_end_io right after it, so > move the call into the shared function. > > Signed-off-by: Christoph Hellwig <hch@lst.de> This changed the schema, as all work functions, rmw_rbio(), recover_rbio(), scrub_rbio() all follows the same return error, and let the caller to call rbio_orig_end_io(). I'm not against the change, but it's better to change all *_rbio() functions to follow the same behavior instead. Thanks, Qu > --- > fs/btrfs/raid56.c | 30 ++++++++++-------------------- > 1 file changed, 10 insertions(+), 20 deletions(-) > > diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c > index 5035e2b20a5e02..14d71076a222f9 100644 > --- a/fs/btrfs/raid56.c > +++ b/fs/btrfs/raid56.c > @@ -2275,7 +2275,7 @@ static bool need_read_stripe_sectors(struct btrfs_raid_bio *rbio) > return false; > } > > -static int rmw_rbio(struct btrfs_raid_bio *rbio) > +static void rmw_rbio(struct btrfs_raid_bio *rbio) > { > struct bio_list bio_list; > int sectornr; > @@ -2287,7 +2287,7 @@ static int rmw_rbio(struct btrfs_raid_bio *rbio) > */ > ret = alloc_rbio_parity_pages(rbio); > if (ret < 0) > - return ret; > + goto out; > > /* > * Either full stripe write, or we have every data sector already > @@ -2300,13 +2300,13 @@ static int rmw_rbio(struct btrfs_raid_bio *rbio) > */ > ret = alloc_rbio_data_pages(rbio); > if (ret < 0) > - return ret; > + goto out; > > index_rbio_pages(rbio); > > ret = rmw_read_wait_recover(rbio); > if (ret < 0) > - return ret; > + goto out; > } > > /* > @@ -2339,7 +2339,7 @@ static int rmw_rbio(struct btrfs_raid_bio *rbio) > bio_list_init(&bio_list); > ret = rmw_assemble_write_bios(rbio, &bio_list); > if (ret < 0) > - return ret; > + goto out; > > /* We should have at least one bio assembled. */ > ASSERT(bio_list_size(&bio_list)); > @@ -2356,32 +2356,22 @@ static int rmw_rbio(struct btrfs_raid_bio *rbio) > break; > } > } > - return ret; > +out: > + rbio_orig_end_io(rbio, errno_to_blk_status(ret)); > } > > static void rmw_rbio_work(struct work_struct *work) > { > struct btrfs_raid_bio *rbio; > - int ret; > > rbio = container_of(work, struct btrfs_raid_bio, work); > - > - ret = lock_stripe_add(rbio); > - if (ret == 0) { > - ret = rmw_rbio(rbio); > - rbio_orig_end_io(rbio, errno_to_blk_status(ret)); > - } > + if (lock_stripe_add(rbio) == 0) > + rmw_rbio(rbio); > } > > static void rmw_rbio_work_locked(struct work_struct *work) > { > - struct btrfs_raid_bio *rbio; > - int ret; > - > - rbio = container_of(work, struct btrfs_raid_bio, work); > - > - ret = rmw_rbio(rbio); > - rbio_orig_end_io(rbio, errno_to_blk_status(ret)); > + rmw_rbio(container_of(work, struct btrfs_raid_bio, work)); > } > > /*
On Mon, Dec 12, 2022 at 03:39:57PM +0800, Qu Wenruo wrote: > This changed the schema, as all work functions, rmw_rbio(), recover_rbio(), > scrub_rbio() all follows the same return error, and let the caller to call > rbio_orig_end_io(). > > I'm not against the change, but it's better to change all *_rbio() > functions to follow the same behavior instead. I hadn't looked at the other work items, but yes it seems like they would benefit from a similar change.
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 5035e2b20a5e02..14d71076a222f9 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -2275,7 +2275,7 @@ static bool need_read_stripe_sectors(struct btrfs_raid_bio *rbio) return false; } -static int rmw_rbio(struct btrfs_raid_bio *rbio) +static void rmw_rbio(struct btrfs_raid_bio *rbio) { struct bio_list bio_list; int sectornr; @@ -2287,7 +2287,7 @@ static int rmw_rbio(struct btrfs_raid_bio *rbio) */ ret = alloc_rbio_parity_pages(rbio); if (ret < 0) - return ret; + goto out; /* * Either full stripe write, or we have every data sector already @@ -2300,13 +2300,13 @@ static int rmw_rbio(struct btrfs_raid_bio *rbio) */ ret = alloc_rbio_data_pages(rbio); if (ret < 0) - return ret; + goto out; index_rbio_pages(rbio); ret = rmw_read_wait_recover(rbio); if (ret < 0) - return ret; + goto out; } /* @@ -2339,7 +2339,7 @@ static int rmw_rbio(struct btrfs_raid_bio *rbio) bio_list_init(&bio_list); ret = rmw_assemble_write_bios(rbio, &bio_list); if (ret < 0) - return ret; + goto out; /* We should have at least one bio assembled. */ ASSERT(bio_list_size(&bio_list)); @@ -2356,32 +2356,22 @@ static int rmw_rbio(struct btrfs_raid_bio *rbio) break; } } - return ret; +out: + rbio_orig_end_io(rbio, errno_to_blk_status(ret)); } static void rmw_rbio_work(struct work_struct *work) { struct btrfs_raid_bio *rbio; - int ret; rbio = container_of(work, struct btrfs_raid_bio, work); - - ret = lock_stripe_add(rbio); - if (ret == 0) { - ret = rmw_rbio(rbio); - rbio_orig_end_io(rbio, errno_to_blk_status(ret)); - } + if (lock_stripe_add(rbio) == 0) + rmw_rbio(rbio); } static void rmw_rbio_work_locked(struct work_struct *work) { - struct btrfs_raid_bio *rbio; - int ret; - - rbio = container_of(work, struct btrfs_raid_bio, work); - - ret = rmw_rbio(rbio); - rbio_orig_end_io(rbio, errno_to_blk_status(ret)); + rmw_rbio(container_of(work, struct btrfs_raid_bio, work)); } /*
Both callers of rmv_rbio call rbio_orig_end_io right after it, so move the call into the shared function. Signed-off-by: Christoph Hellwig <hch@lst.de> --- fs/btrfs/raid56.c | 30 ++++++++++-------------------- 1 file changed, 10 insertions(+), 20 deletions(-)