Message ID | 91e9ffb668376e4c67753ca92c8ca8737c07258c.1741306938.git.boris@bur.io (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | btrfs: block_group refcounting fixes | expand |
On Fri, Mar 7, 2025 at 12:31 AM Boris Burkov <boris@bur.io> wrote: > > Similar to mark_bg_unused and mark_bg_to_reclaim, we have a few places > that use bg_list with refcounting, mostly for retrying failures to > reclaim/delete unused. > > These have custom logic for handling locking and refcounting the bg_list > properly, but they actually all want to do the same thing, so pull that > logic out into a helper. Unfortunately, mark_bg_unused does still need > the NEW flag to avoid prematurely marking stuff unused (even if refcount > is fine, we don't want to mess with bg creation), so it cannot use the > new helper. > > Signed-off-by: Boris Burkov <boris@bur.io> > --- > fs/btrfs/block-group.c | 54 +++++++++++++++++++++++------------------- > 1 file changed, 30 insertions(+), 24 deletions(-) > > diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c > index e4071897c9a8..a570d89ff0c3 100644 > --- a/fs/btrfs/block-group.c > +++ b/fs/btrfs/block-group.c > @@ -1455,6 +1455,31 @@ static bool clean_pinned_extents(struct btrfs_trans_handle *trans, > return ret == 0; > } > > +/* > + * link the block_group to l via bg_list Please make the style consistent with the rest, first word capitalized and ending the sentence with punctuation. > + * > + * Use this rather than list_add_tail directly to ensure proper respect > + * to locking and refcounting. > + * > + * @bg: the block_group to link to the list. > + * @l: the list to link it to. > + * Returns: true if the bg was linked with a refcount bump and false otherwise. > + */ > +static bool btrfs_link_bg_list(struct btrfs_block_group *bg, struct list_head *l) Don't use single letter variable names, it's discouraged, except for loop index variables like 'i' or 'j' for inner loops. Use something else like 'list' for example. Otherwise it looks fine. With that: Reviewed-by: Filipe Manana <fdmanana@suse.com> Thanks. > +{ > + struct btrfs_fs_info *fs_info = bg->fs_info; > + bool added = false; > + > + spin_lock(&fs_info->unused_bgs_lock); > + if (list_empty(&bg->bg_list)) { > + btrfs_get_block_group(bg); > + list_add_tail(&bg->bg_list, l); > + added = true; > + } > + spin_unlock(&fs_info->unused_bgs_lock); > + return added; > +} > + > /* > * Process the unused_bgs list and remove any that don't have any allocated > * space inside of them. > @@ -1570,8 +1595,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) > * drop under the "next" label for the > * fs_info->unused_bgs list. > */ > - btrfs_get_block_group(block_group); > - list_add_tail(&block_group->bg_list, &retry_list); > + btrfs_link_bg_list(block_group, &retry_list); > > trace_btrfs_skip_unused_block_group(block_group); > spin_unlock(&block_group->lock); > @@ -1968,20 +1992,8 @@ void btrfs_reclaim_bgs_work(struct work_struct *work) > spin_unlock(&space_info->lock); > > next: > - if (ret && !READ_ONCE(space_info->periodic_reclaim)) { > - /* Refcount held by the reclaim_bgs list after splice. */ > - spin_lock(&fs_info->unused_bgs_lock); > - /* > - * This block group might be added to the unused list > - * during the above process. Move it back to the > - * reclaim list otherwise. > - */ > - if (list_empty(&bg->bg_list)) { > - btrfs_get_block_group(bg); > - list_add_tail(&bg->bg_list, &retry_list); > - } > - spin_unlock(&fs_info->unused_bgs_lock); > - } > + if (ret && !READ_ONCE(space_info->periodic_reclaim)) > + btrfs_link_bg_list(bg, &retry_list); > btrfs_put_block_group(bg); > > mutex_unlock(&fs_info->reclaim_bgs_lock); > @@ -2021,13 +2033,8 @@ void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg) > { > struct btrfs_fs_info *fs_info = bg->fs_info; > > - spin_lock(&fs_info->unused_bgs_lock); > - if (list_empty(&bg->bg_list)) { > - btrfs_get_block_group(bg); > + if (btrfs_link_bg_list(bg, &fs_info->reclaim_bgs)) > trace_btrfs_add_reclaim_block_group(bg); > - list_add_tail(&bg->bg_list, &fs_info->reclaim_bgs); > - } > - spin_unlock(&fs_info->unused_bgs_lock); > } > > static int read_bg_from_eb(struct btrfs_fs_info *fs_info, const struct btrfs_key *key, > @@ -2940,8 +2947,7 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran > } > #endif > > - btrfs_get_block_group(cache); > - list_add_tail(&cache->bg_list, &trans->new_bgs); > + btrfs_link_bg_list(cache, &trans->new_bgs); > btrfs_inc_delayed_refs_rsv_bg_inserts(fs_info); > > set_avail_alloc_bits(fs_info, type); > -- > 2.48.1 > >
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index e4071897c9a8..a570d89ff0c3 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1455,6 +1455,31 @@ static bool clean_pinned_extents(struct btrfs_trans_handle *trans, return ret == 0; } +/* + * link the block_group to l via bg_list + * + * Use this rather than list_add_tail directly to ensure proper respect + * to locking and refcounting. + * + * @bg: the block_group to link to the list. + * @l: the list to link it to. + * Returns: true if the bg was linked with a refcount bump and false otherwise. + */ +static bool btrfs_link_bg_list(struct btrfs_block_group *bg, struct list_head *l) +{ + struct btrfs_fs_info *fs_info = bg->fs_info; + bool added = false; + + spin_lock(&fs_info->unused_bgs_lock); + if (list_empty(&bg->bg_list)) { + btrfs_get_block_group(bg); + list_add_tail(&bg->bg_list, l); + added = true; + } + spin_unlock(&fs_info->unused_bgs_lock); + return added; +} + /* * Process the unused_bgs list and remove any that don't have any allocated * space inside of them. @@ -1570,8 +1595,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) * drop under the "next" label for the * fs_info->unused_bgs list. */ - btrfs_get_block_group(block_group); - list_add_tail(&block_group->bg_list, &retry_list); + btrfs_link_bg_list(block_group, &retry_list); trace_btrfs_skip_unused_block_group(block_group); spin_unlock(&block_group->lock); @@ -1968,20 +1992,8 @@ void btrfs_reclaim_bgs_work(struct work_struct *work) spin_unlock(&space_info->lock); next: - if (ret && !READ_ONCE(space_info->periodic_reclaim)) { - /* Refcount held by the reclaim_bgs list after splice. */ - spin_lock(&fs_info->unused_bgs_lock); - /* - * This block group might be added to the unused list - * during the above process. Move it back to the - * reclaim list otherwise. - */ - if (list_empty(&bg->bg_list)) { - btrfs_get_block_group(bg); - list_add_tail(&bg->bg_list, &retry_list); - } - spin_unlock(&fs_info->unused_bgs_lock); - } + if (ret && !READ_ONCE(space_info->periodic_reclaim)) + btrfs_link_bg_list(bg, &retry_list); btrfs_put_block_group(bg); mutex_unlock(&fs_info->reclaim_bgs_lock); @@ -2021,13 +2033,8 @@ void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg) { struct btrfs_fs_info *fs_info = bg->fs_info; - spin_lock(&fs_info->unused_bgs_lock); - if (list_empty(&bg->bg_list)) { - btrfs_get_block_group(bg); + if (btrfs_link_bg_list(bg, &fs_info->reclaim_bgs)) trace_btrfs_add_reclaim_block_group(bg); - list_add_tail(&bg->bg_list, &fs_info->reclaim_bgs); - } - spin_unlock(&fs_info->unused_bgs_lock); } static int read_bg_from_eb(struct btrfs_fs_info *fs_info, const struct btrfs_key *key, @@ -2940,8 +2947,7 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran } #endif - btrfs_get_block_group(cache); - list_add_tail(&cache->bg_list, &trans->new_bgs); + btrfs_link_bg_list(cache, &trans->new_bgs); btrfs_inc_delayed_refs_rsv_bg_inserts(fs_info); set_avail_alloc_bits(fs_info, type);
Similar to mark_bg_unused and mark_bg_to_reclaim, we have a few places that use bg_list with refcounting, mostly for retrying failures to reclaim/delete unused. These have custom logic for handling locking and refcounting the bg_list properly, but they actually all want to do the same thing, so pull that logic out into a helper. Unfortunately, mark_bg_unused does still need the NEW flag to avoid prematurely marking stuff unused (even if refcount is fine, we don't want to mess with bg creation), so it cannot use the new helper. Signed-off-by: Boris Burkov <boris@bur.io> --- fs/btrfs/block-group.c | 54 +++++++++++++++++++++++------------------- 1 file changed, 30 insertions(+), 24 deletions(-)