diff mbox series

btrfs: qgroup: fix sleep from invalid context bug in update_qgroup_limit_item()

Message ID 20221110141342.2129475-1-chenxiaosong2@huawei.com (mailing list archive)
State New, archived
Headers show
Series btrfs: qgroup: fix sleep from invalid context bug in update_qgroup_limit_item() | expand

Commit Message

ChenXiaoSong Nov. 10, 2022, 2:13 p.m. UTC
Syzkaller reported BUG as follows:

  BUG: sleeping function called from invalid context at
       include/linux/sched/mm.h:274
  Call Trace:
   <TASK>
   dump_stack_lvl+0xcd/0x134
   __might_resched.cold+0x222/0x26b
   kmem_cache_alloc+0x2e7/0x3c0
   update_qgroup_limit_item+0xe1/0x390
   btrfs_qgroup_inherit+0x147b/0x1ee0
   create_subvol+0x4eb/0x1710
   btrfs_mksubvol+0xfe5/0x13f0
   __btrfs_ioctl_snap_create+0x2b0/0x430
   btrfs_ioctl_snap_create_v2+0x25a/0x520
   btrfs_ioctl+0x2a1c/0x5ce0
   __x64_sys_ioctl+0x193/0x200
   do_syscall_64+0x35/0x80

Fix this by introducing __update_qgroup_limit_item() helper, allocate
memory outside of the spin lock.

Signed-off-by: ChenXiaoSong <chenxiaosong2@huawei.com>
---
 fs/btrfs/qgroup.c | 35 ++++++++++++++++++++++++++---------
 1 file changed, 26 insertions(+), 9 deletions(-)

Comments

David Sterba Nov. 10, 2022, 2:46 p.m. UTC | #1
On Thu, Nov 10, 2022 at 10:13:42PM +0800, ChenXiaoSong wrote:
> Syzkaller reported BUG as follows:

Do you have link to the report? Or at least the identifier of the
report, there's some automation that recognizes Reported-by: syzbot-...
to close it once the patch is merged.
ChenXiaoSong Nov. 10, 2022, 2:59 p.m. UTC | #2
I have _no_ link to the report, I just reproduce it in my own qemu vm, 
and _no_ c or syz repro.

在 2022/11/10 22:46, David Sterba 写道:
> On Thu, Nov 10, 2022 at 10:13:42PM +0800, ChenXiaoSong wrote:
>> Syzkaller reported BUG as follows:
> 
> Do you have link to the report? Or at least the identifier of the
> report, there's some automation that recognizes Reported-by: syzbot-...
> to close it once the patch is merged.
> .
>
David Sterba Nov. 10, 2022, 3:35 p.m. UTC | #3
On Thu, Nov 10, 2022 at 10:59:56PM +0800, ChenXiaoSong wrote:
> I have _no_ link to the report, I just reproduce it in my own qemu vm, 
> and _no_ c or syz repro.

I see, we've got several reports from syzbot, I thought you're fixing one
of them.
David Sterba Nov. 10, 2022, 8:54 p.m. UTC | #4
On Thu, Nov 10, 2022 at 10:13:42PM +0800, ChenXiaoSong wrote:
> Syzkaller reported BUG as follows:
> 
>   BUG: sleeping function called from invalid context at
>        include/linux/sched/mm.h:274
>   Call Trace:
>    <TASK>
>    dump_stack_lvl+0xcd/0x134
>    __might_resched.cold+0x222/0x26b
>    kmem_cache_alloc+0x2e7/0x3c0
>    update_qgroup_limit_item+0xe1/0x390
>    btrfs_qgroup_inherit+0x147b/0x1ee0
>    create_subvol+0x4eb/0x1710
>    btrfs_mksubvol+0xfe5/0x13f0
>    __btrfs_ioctl_snap_create+0x2b0/0x430
>    btrfs_ioctl_snap_create_v2+0x25a/0x520
>    btrfs_ioctl+0x2a1c/0x5ce0
>    __x64_sys_ioctl+0x193/0x200
>    do_syscall_64+0x35/0x80
> 
> Fix this by introducing __update_qgroup_limit_item() helper, allocate
> memory outside of the spin lock.
> 
> Signed-off-by: ChenXiaoSong <chenxiaosong2@huawei.com>

Added to misc-next, thanks.

> +	path = btrfs_alloc_path();

btrfs_alloc_path uses fixed GFP_NOFS flags for kmem_cache_alloc but that
does not try to detect if it could sleep or not.
Qu Wenruo Nov. 10, 2022, 10:58 p.m. UTC | #5
On 2022/11/10 22:13, ChenXiaoSong wrote:
> Syzkaller reported BUG as follows:
> 
>    BUG: sleeping function called from invalid context at
>         include/linux/sched/mm.h:274
>    Call Trace:
>     <TASK>
>     dump_stack_lvl+0xcd/0x134
>     __might_resched.cold+0x222/0x26b
>     kmem_cache_alloc+0x2e7/0x3c0
>     update_qgroup_limit_item+0xe1/0x390
>     btrfs_qgroup_inherit+0x147b/0x1ee0
>     create_subvol+0x4eb/0x1710
>     btrfs_mksubvol+0xfe5/0x13f0
>     __btrfs_ioctl_snap_create+0x2b0/0x430
>     btrfs_ioctl_snap_create_v2+0x25a/0x520
>     btrfs_ioctl+0x2a1c/0x5ce0
>     __x64_sys_ioctl+0x193/0x200
>     do_syscall_64+0x35/0x80
> 
> Fix this by introducing __update_qgroup_limit_item() helper, allocate
> memory outside of the spin lock.
> 
> Signed-off-by: ChenXiaoSong <chenxiaosong2@huawei.com>

Unfortunately, __update_qgroup_limit_item() can still sleep.

As it calls btrfs_search_slot(), which can lead to disk IO if the qgroup 
tree is not cached.


I believe the proper way is to either unlock the spinlock inside 
btrfs_qgroup_inherit() (which needs extra scrutiny on the qgroup lock), 
or delayed the limit item updates until we have unlocked the spinlock.

To me, the latter one seems more reasonable, as it's just one qgroup 
(@dstgroup), and we're doing the same delayed work for sysfs interface 
creation.

Thanks,
Qu

> ---
>   fs/btrfs/qgroup.c | 35 ++++++++++++++++++++++++++---------
>   1 file changed, 26 insertions(+), 9 deletions(-)
> 
> diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
> index 9334c3157c22..99a61cc04b68 100644
> --- a/fs/btrfs/qgroup.c
> +++ b/fs/btrfs/qgroup.c
> @@ -768,11 +768,11 @@ static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
>   	return ret;
>   }
>   
> -static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
> -				    struct btrfs_qgroup *qgroup)
> +static int __update_qgroup_limit_item(struct btrfs_trans_handle *trans,
> +				      struct btrfs_qgroup *qgroup,
> +				      struct btrfs_path *path) >   {
>   	struct btrfs_root *quota_root = trans->fs_info->quota_root;
> -	struct btrfs_path *path;
>   	struct btrfs_key key;
>   	struct extent_buffer *l;
>   	struct btrfs_qgroup_limit_item *qgroup_limit;
> @@ -783,10 +783,6 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
>   	key.type = BTRFS_QGROUP_LIMIT_KEY;
>   	key.offset = qgroup->qgroupid;
>   
> -	path = btrfs_alloc_path();
> -	if (!path)
> -		return -ENOMEM;
> -
>   	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
>   	if (ret > 0)
>   		ret = -ENOENT;
> @@ -806,6 +802,21 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
>   	btrfs_mark_buffer_dirty(l);
>   
>   out:
> +	return ret;
> +}
> +
> +static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
> +				    struct btrfs_qgroup *qgroup)
> +{
> +	struct btrfs_path *path;
> +	int ret;
> +
> +	path = btrfs_alloc_path();
> +	if (!path)
> +		return -ENOMEM;
> +
> +	ret = __update_qgroup_limit_item(trans, qgroup, path);
> +
>   	btrfs_free_path(path);
>   	return ret;
>   }
> @@ -2860,6 +2871,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
>   	bool need_rescan = false;
>   	u32 level_size = 0;
>   	u64 nums;
> +	struct btrfs_path *path;
>   
>   	/*
>   	 * There are only two callers of this function.
> @@ -2935,6 +2947,11 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
>   		ret = 0;
>   	}
>   
> +	path = btrfs_alloc_path();
> +	if (!path) {
> +		ret = -ENOMEM;
> +		goto out;
> +	}
>   
>   	spin_lock(&fs_info->qgroup_lock);
>   
> @@ -2950,8 +2967,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
>   		dstgroup->max_excl = inherit->lim.max_excl;
>   		dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
>   		dstgroup->rsv_excl = inherit->lim.rsv_excl;
> -
> -		ret = update_qgroup_limit_item(trans, dstgroup);
> +		ret = __update_qgroup_limit_item(trans, dstgroup, path);
>   		if (ret) {
>   			qgroup_mark_inconsistent(fs_info);
>   			btrfs_info(fs_info,
> @@ -3053,6 +3069,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
>   
>   unlock:
>   	spin_unlock(&fs_info->qgroup_lock);
> +	btrfs_free_path(path);
>   	if (!ret)
>   		ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup);
>   out:
Qu Wenruo Nov. 10, 2022, 11:31 p.m. UTC | #6
On 2022/11/11 04:54, David Sterba wrote:
> On Thu, Nov 10, 2022 at 10:13:42PM +0800, ChenXiaoSong wrote:
>> Syzkaller reported BUG as follows:
>>
>>    BUG: sleeping function called from invalid context at
>>         include/linux/sched/mm.h:274
>>    Call Trace:
>>     <TASK>
>>     dump_stack_lvl+0xcd/0x134
>>     __might_resched.cold+0x222/0x26b
>>     kmem_cache_alloc+0x2e7/0x3c0
>>     update_qgroup_limit_item+0xe1/0x390
>>     btrfs_qgroup_inherit+0x147b/0x1ee0
>>     create_subvol+0x4eb/0x1710
>>     btrfs_mksubvol+0xfe5/0x13f0
>>     __btrfs_ioctl_snap_create+0x2b0/0x430
>>     btrfs_ioctl_snap_create_v2+0x25a/0x520
>>     btrfs_ioctl+0x2a1c/0x5ce0
>>     __x64_sys_ioctl+0x193/0x200
>>     do_syscall_64+0x35/0x80
>>
>> Fix this by introducing __update_qgroup_limit_item() helper, allocate
>> memory outside of the spin lock.
>>
>> Signed-off-by: ChenXiaoSong <chenxiaosong2@huawei.com>
> 
> Added to misc-next, thanks.

Please remove it for now, the patch only addressed what MM layer 
reports, it doesn't really solve the root cause, we're doing a tree 
modification (btrfs_search_slot()), under a spinlock.

I'm pretty sure there will be a v2 version to properly fix it.

Thanks,
Qu
> 
>> +	path = btrfs_alloc_path();
> 
> btrfs_alloc_path uses fixed GFP_NOFS flags for kmem_cache_alloc but that
> does not try to detect if it could sleep or not.
ChenXiaoSong Nov. 11, 2022, 1:50 a.m. UTC | #7
Yes, at least two places will sleep in btrfs_search_slot():

   update_qgroup_limit_item
     btrfs_search_slot
       setup_nodes_for_search
         reada_for_balance
           btrfs_readahead_node_child
             btrfs_readahead_tree_block
               btrfs_find_create_tree_block
                 alloc_extent_buffer
                   kmem_cache_zalloc
                     /* will sleep */
                     kmem_cache_alloc(GFP_NOFS|__GFP_NOFAIL|__GFP_ZERO)
               read_extent_buffer_pages
                 submit_extent_page
                   submit_one_bio /* disk IO, will sleep */

在 2022/11/11 6:58, Qu Wenruo 写道:
> 
> 
> On 2022/11/10 22:13, ChenXiaoSong wrote:
>> Syzkaller reported BUG as follows:
>>
>>    BUG: sleeping function called from invalid context at
>>         include/linux/sched/mm.h:274
>>    Call Trace:
>>     <TASK>
>>     dump_stack_lvl+0xcd/0x134
>>     __might_resched.cold+0x222/0x26b
>>     kmem_cache_alloc+0x2e7/0x3c0
>>     update_qgroup_limit_item+0xe1/0x390
>>     btrfs_qgroup_inherit+0x147b/0x1ee0
>>     create_subvol+0x4eb/0x1710
>>     btrfs_mksubvol+0xfe5/0x13f0
>>     __btrfs_ioctl_snap_create+0x2b0/0x430
>>     btrfs_ioctl_snap_create_v2+0x25a/0x520
>>     btrfs_ioctl+0x2a1c/0x5ce0
>>     __x64_sys_ioctl+0x193/0x200
>>     do_syscall_64+0x35/0x80
>>
>> Fix this by introducing __update_qgroup_limit_item() helper, allocate
>> memory outside of the spin lock.
>>
>> Signed-off-by: ChenXiaoSong <chenxiaosong2@huawei.com>
> 
> Unfortunately, __update_qgroup_limit_item() can still sleep.
> 
> As it calls btrfs_search_slot(), which can lead to disk IO if the qgroup 
> tree is not cached.
> 
> 
> I believe the proper way is to either unlock the spinlock inside 
> btrfs_qgroup_inherit() (which needs extra scrutiny on the qgroup lock), 
> or delayed the limit item updates until we have unlocked the spinlock.
> 
> To me, the latter one seems more reasonable, as it's just one qgroup 
> (@dstgroup), and we're doing the same delayed work for sysfs interface 
> creation.
> 
> Thanks,
> Qu
> 
>> ---
>>   fs/btrfs/qgroup.c | 35 ++++++++++++++++++++++++++---------
>>   1 file changed, 26 insertions(+), 9 deletions(-)
>>
>> diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
>> index 9334c3157c22..99a61cc04b68 100644
>> --- a/fs/btrfs/qgroup.c
>> +++ b/fs/btrfs/qgroup.c
>> @@ -768,11 +768,11 @@ static int del_qgroup_item(struct 
>> btrfs_trans_handle *trans, u64 qgroupid)
>>       return ret;
>>   }
>> -static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
>> -                    struct btrfs_qgroup *qgroup)
>> +static int __update_qgroup_limit_item(struct btrfs_trans_handle *trans,
>> +                      struct btrfs_qgroup *qgroup,
>> +                      struct btrfs_path *path) >   {
>>       struct btrfs_root *quota_root = trans->fs_info->quota_root;
>> -    struct btrfs_path *path;
>>       struct btrfs_key key;
>>       struct extent_buffer *l;
>>       struct btrfs_qgroup_limit_item *qgroup_limit;
>> @@ -783,10 +783,6 @@ static int update_qgroup_limit_item(struct 
>> btrfs_trans_handle *trans,
>>       key.type = BTRFS_QGROUP_LIMIT_KEY;
>>       key.offset = qgroup->qgroupid;
>> -    path = btrfs_alloc_path();
>> -    if (!path)
>> -        return -ENOMEM;
>> -
>>       ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
>>       if (ret > 0)
>>           ret = -ENOENT;
>> @@ -806,6 +802,21 @@ static int update_qgroup_limit_item(struct 
>> btrfs_trans_handle *trans,
>>       btrfs_mark_buffer_dirty(l);
>>   out:
>> +    return ret;
>> +}
>> +
>> +static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
>> +                    struct btrfs_qgroup *qgroup)
>> +{
>> +    struct btrfs_path *path;
>> +    int ret;
>> +
>> +    path = btrfs_alloc_path();
>> +    if (!path)
>> +        return -ENOMEM;
>> +
>> +    ret = __update_qgroup_limit_item(trans, qgroup, path);
>> +
>>       btrfs_free_path(path);
>>       return ret;
>>   }
>> @@ -2860,6 +2871,7 @@ int btrfs_qgroup_inherit(struct 
>> btrfs_trans_handle *trans, u64 srcid,
>>       bool need_rescan = false;
>>       u32 level_size = 0;
>>       u64 nums;
>> +    struct btrfs_path *path;
>>       /*
>>        * There are only two callers of this function.
>> @@ -2935,6 +2947,11 @@ int btrfs_qgroup_inherit(struct 
>> btrfs_trans_handle *trans, u64 srcid,
>>           ret = 0;
>>       }
>> +    path = btrfs_alloc_path();
>> +    if (!path) {
>> +        ret = -ENOMEM;
>> +        goto out;
>> +    }
>>       spin_lock(&fs_info->qgroup_lock);
>> @@ -2950,8 +2967,7 @@ int btrfs_qgroup_inherit(struct 
>> btrfs_trans_handle *trans, u64 srcid,
>>           dstgroup->max_excl = inherit->lim.max_excl;
>>           dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
>>           dstgroup->rsv_excl = inherit->lim.rsv_excl;
>> -
>> -        ret = update_qgroup_limit_item(trans, dstgroup);
>> +        ret = __update_qgroup_limit_item(trans, dstgroup, path);
>>           if (ret) {
>>               qgroup_mark_inconsistent(fs_info);
>>               btrfs_info(fs_info,
>> @@ -3053,6 +3069,7 @@ int btrfs_qgroup_inherit(struct 
>> btrfs_trans_handle *trans, u64 srcid,
>>   unlock:
>>       spin_unlock(&fs_info->qgroup_lock);
>> +    btrfs_free_path(path);
>>       if (!ret)
>>           ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup);
>>   out:
> 
> .
David Sterba Nov. 11, 2022, 11:44 a.m. UTC | #8
On Fri, Nov 11, 2022 at 07:31:22AM +0800, Qu Wenruo wrote:
> 
> 
> On 2022/11/11 04:54, David Sterba wrote:
> > On Thu, Nov 10, 2022 at 10:13:42PM +0800, ChenXiaoSong wrote:
> >> Syzkaller reported BUG as follows:
> >>
> >>    BUG: sleeping function called from invalid context at
> >>         include/linux/sched/mm.h:274
> >>    Call Trace:
> >>     <TASK>
> >>     dump_stack_lvl+0xcd/0x134
> >>     __might_resched.cold+0x222/0x26b
> >>     kmem_cache_alloc+0x2e7/0x3c0
> >>     update_qgroup_limit_item+0xe1/0x390
> >>     btrfs_qgroup_inherit+0x147b/0x1ee0
> >>     create_subvol+0x4eb/0x1710
> >>     btrfs_mksubvol+0xfe5/0x13f0
> >>     __btrfs_ioctl_snap_create+0x2b0/0x430
> >>     btrfs_ioctl_snap_create_v2+0x25a/0x520
> >>     btrfs_ioctl+0x2a1c/0x5ce0
> >>     __x64_sys_ioctl+0x193/0x200
> >>     do_syscall_64+0x35/0x80
> >>
> >> Fix this by introducing __update_qgroup_limit_item() helper, allocate
> >> memory outside of the spin lock.
> >>
> >> Signed-off-by: ChenXiaoSong <chenxiaosong2@huawei.com>
> > 
> > Added to misc-next, thanks.
> 
> Please remove it for now, the patch only addressed what MM layer 
> reports, it doesn't really solve the root cause, we're doing a tree 
> modification (btrfs_search_slot()), under a spinlock.

Removed. As the potential sleeping under spinlock is hard to spot we
should add might_sleep to some places.
diff mbox series

Patch

diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 9334c3157c22..99a61cc04b68 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -768,11 +768,11 @@  static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
 	return ret;
 }
 
-static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
-				    struct btrfs_qgroup *qgroup)
+static int __update_qgroup_limit_item(struct btrfs_trans_handle *trans,
+				      struct btrfs_qgroup *qgroup,
+				      struct btrfs_path *path)
 {
 	struct btrfs_root *quota_root = trans->fs_info->quota_root;
-	struct btrfs_path *path;
 	struct btrfs_key key;
 	struct extent_buffer *l;
 	struct btrfs_qgroup_limit_item *qgroup_limit;
@@ -783,10 +783,6 @@  static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
 	key.type = BTRFS_QGROUP_LIMIT_KEY;
 	key.offset = qgroup->qgroupid;
 
-	path = btrfs_alloc_path();
-	if (!path)
-		return -ENOMEM;
-
 	ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
 	if (ret > 0)
 		ret = -ENOENT;
@@ -806,6 +802,21 @@  static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
 	btrfs_mark_buffer_dirty(l);
 
 out:
+	return ret;
+}
+
+static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
+				    struct btrfs_qgroup *qgroup)
+{
+	struct btrfs_path *path;
+	int ret;
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	ret = __update_qgroup_limit_item(trans, qgroup, path);
+
 	btrfs_free_path(path);
 	return ret;
 }
@@ -2860,6 +2871,7 @@  int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
 	bool need_rescan = false;
 	u32 level_size = 0;
 	u64 nums;
+	struct btrfs_path *path;
 
 	/*
 	 * There are only two callers of this function.
@@ -2935,6 +2947,11 @@  int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
 		ret = 0;
 	}
 
+	path = btrfs_alloc_path();
+	if (!path) {
+		ret = -ENOMEM;
+		goto out;
+	}
 
 	spin_lock(&fs_info->qgroup_lock);
 
@@ -2950,8 +2967,7 @@  int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
 		dstgroup->max_excl = inherit->lim.max_excl;
 		dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
 		dstgroup->rsv_excl = inherit->lim.rsv_excl;
-
-		ret = update_qgroup_limit_item(trans, dstgroup);
+		ret = __update_qgroup_limit_item(trans, dstgroup, path);
 		if (ret) {
 			qgroup_mark_inconsistent(fs_info);
 			btrfs_info(fs_info,
@@ -3053,6 +3069,7 @@  int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
 
 unlock:
 	spin_unlock(&fs_info->qgroup_lock);
+	btrfs_free_path(path);
 	if (!ret)
 		ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup);
 out: