diff mbox series

[v2,2/5] btrfs: harden bg->bg_list against list_del races

Message ID 35f34b992427ea8a8c888d3e183b9ea024d1dfcc.1741636986.git.boris@bur.io (mailing list archive)
State New
Headers show
Series btrfs: block_group refcounting fixes | expand

Commit Message

Boris Burkov March 10, 2025, 8:07 p.m. UTC
As far as I can tell, these calls of list_del_init on bg_list can not
run concurrently with btrfs_mark_bg_unused or btrfs_mark_bg_to_reclaim,
as they are in transaction error paths and situations where the block
group is readonly.

However, if there is any chance at all of racing with mark_bg_unused,
or a different future user of bg_list, better to be safe than sorry.

Otherwise we risk the following interleaving (bg_list refcount in parens)

T1 (some random op)                       T2 (btrfs_mark_bg_unused)
                                        !list_empty(&bg->bg_list); (1)
list_del_init(&bg->bg_list); (1)
                                        list_move_tail (1)
btrfs_put_block_group (0)
                                        btrfs_delete_unused_bgs
                                             bg = list_first_entry
                                             list_del_init(&bg->bg_list);
                                             btrfs_put_block_group(bg); (-1)

Ultimately, this results in a broken ref count that hits zero one deref
early and the real final deref underflows the refcount, resulting in a WARNING.

Reviewed-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: Boris Burkov <boris@bur.io>
---
 fs/btrfs/extent-tree.c |  8 ++++++++
 fs/btrfs/transaction.c | 12 ++++++++++++
 2 files changed, 20 insertions(+)

Comments

Filipe Manana March 11, 2025, 11:35 a.m. UTC | #1
On Mon, Mar 10, 2025 at 8:06 PM Boris Burkov <boris@bur.io> wrote:
>
> As far as I can tell, these calls of list_del_init on bg_list can not
> run concurrently with btrfs_mark_bg_unused or btrfs_mark_bg_to_reclaim,
> as they are in transaction error paths and situations where the block
> group is readonly.
>
> However, if there is any chance at all of racing with mark_bg_unused,
> or a different future user of bg_list, better to be safe than sorry.
>
> Otherwise we risk the following interleaving (bg_list refcount in parens)
>
> T1 (some random op)                       T2 (btrfs_mark_bg_unused)
>                                         !list_empty(&bg->bg_list); (1)
> list_del_init(&bg->bg_list); (1)
>                                         list_move_tail (1)
> btrfs_put_block_group (0)
>                                         btrfs_delete_unused_bgs
>                                              bg = list_first_entry
>                                              list_del_init(&bg->bg_list);
>                                              btrfs_put_block_group(bg); (-1)
>
> Ultimately, this results in a broken ref count that hits zero one deref
> early and the real final deref underflows the refcount, resulting in a WARNING.
>
> Reviewed-by: Qu Wenruo <wqu@suse.com>
> Signed-off-by: Boris Burkov <boris@bur.io>

Reviewed-by: Filipe Manana <fdmanana@suse.com>

Looks good now, thanks.

> ---
>  fs/btrfs/extent-tree.c |  8 ++++++++
>  fs/btrfs/transaction.c | 12 ++++++++++++
>  2 files changed, 20 insertions(+)
>
> diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
> index 5de1a1293c93..5ead2f4976e4 100644
> --- a/fs/btrfs/extent-tree.c
> +++ b/fs/btrfs/extent-tree.c
> @@ -2868,7 +2868,15 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
>                                                    block_group->length,
>                                                    &trimmed);
>
> +               /*
> +                * Not strictly necessary to lock, as the block_group should be
> +                * read-only from btrfs_delete_unused_bgs.
> +                */
> +               ASSERT(block_group->ro);
> +               spin_lock(&fs_info->unused_bgs_lock);
>                 list_del_init(&block_group->bg_list);
> +               spin_unlock(&fs_info->unused_bgs_lock);
> +
>                 btrfs_unfreeze_block_group(block_group);
>                 btrfs_put_block_group(block_group);
>
> diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
> index db8fe291d010..470dfc3a1a5c 100644
> --- a/fs/btrfs/transaction.c
> +++ b/fs/btrfs/transaction.c
> @@ -160,7 +160,13 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
>                         cache = list_first_entry(&transaction->deleted_bgs,
>                                                  struct btrfs_block_group,
>                                                  bg_list);
> +                       /*
> +                        * Not strictly necessary to lock, as no other task will be using a
> +                        * block_group on the deleted_bgs list during a transaction abort.
> +                        */
> +                       spin_lock(&transaction->fs_info->unused_bgs_lock);
>                         list_del_init(&cache->bg_list);
> +                       spin_unlock(&transaction->fs_info->unused_bgs_lock);
>                         btrfs_unfreeze_block_group(cache);
>                         btrfs_put_block_group(cache);
>                 }
> @@ -2096,7 +2102,13 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
>
>         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
>                 btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info);
> +               /*
> +               * Not strictly necessary to lock, as no other task will be using a
> +               * block_group on the new_bgs list during a transaction abort.
> +               */
> +              spin_lock(&fs_info->unused_bgs_lock);
>                 list_del_init(&block_group->bg_list);
> +              spin_unlock(&fs_info->unused_bgs_lock);
>         }
>  }
>
> --
> 2.48.1
>
>
diff mbox series

Patch

diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 5de1a1293c93..5ead2f4976e4 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2868,7 +2868,15 @@  int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
 						   block_group->length,
 						   &trimmed);
 
+		/*
+		 * Not strictly necessary to lock, as the block_group should be
+		 * read-only from btrfs_delete_unused_bgs.
+		 */
+		ASSERT(block_group->ro);
+		spin_lock(&fs_info->unused_bgs_lock);
 		list_del_init(&block_group->bg_list);
+		spin_unlock(&fs_info->unused_bgs_lock);
+
 		btrfs_unfreeze_block_group(block_group);
 		btrfs_put_block_group(block_group);
 
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index db8fe291d010..470dfc3a1a5c 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -160,7 +160,13 @@  void btrfs_put_transaction(struct btrfs_transaction *transaction)
 			cache = list_first_entry(&transaction->deleted_bgs,
 						 struct btrfs_block_group,
 						 bg_list);
+			/*
+			 * Not strictly necessary to lock, as no other task will be using a
+			 * block_group on the deleted_bgs list during a transaction abort.
+			 */
+			spin_lock(&transaction->fs_info->unused_bgs_lock);
 			list_del_init(&cache->bg_list);
+			spin_unlock(&transaction->fs_info->unused_bgs_lock);
 			btrfs_unfreeze_block_group(cache);
 			btrfs_put_block_group(cache);
 		}
@@ -2096,7 +2102,13 @@  static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
 
        list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
                btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info);
+		/*
+		* Not strictly necessary to lock, as no other task will be using a
+		* block_group on the new_bgs list during a transaction abort.
+		*/
+	       spin_lock(&fs_info->unused_bgs_lock);
                list_del_init(&block_group->bg_list);
+	       spin_unlock(&fs_info->unused_bgs_lock);
        }
 }