diff mbox series

[-next,RFC] block: fix null-deref in percpu_ref_put

Message ID 20220729105036.2202791-1-zhangwensheng@huaweicloud.com (mailing list archive)
State Not Applicable
Delegated to: Netdev Maintainers
Headers show
Series [-next,RFC] block: fix null-deref in percpu_ref_put | expand

Checks

Context Check Description
netdev/tree_selection success Not a local patch

Commit Message

zhangwensheng (E) July 29, 2022, 10:50 a.m. UTC
From: Zhang Wensheng <zhangwensheng5@huawei.com>

A problem was find in stable 5.10 and the root cause of it like below.

In the use of q_usage_counter of request_queue, blk_cleanup_queue using
"wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter))"
to wait q_usage_counter becoming zero. however, if the q_usage_counter
becoming zero quickly, and percpu_ref_exit will execute and ref->data
will be freed, maybe another process will cause a null-defef problem
like below:

	CPU0                             CPU1
blk_cleanup_queue
 blk_freeze_queue
  blk_mq_freeze_queue_wait
				scsi_end_request
				 percpu_ref_get
				 ...
				 percpu_ref_put
				  atomic_long_sub_and_test
  percpu_ref_exit
   ref->data -> NULL
   				   ref->data->release(ref) -> null-deref

Fix it by setting flag(QUEUE_FLAG_USAGE_COUNT_SYNC) to add synchronization
mechanism, when ref->data->release is called, the flag will be setted,
and the "wait_event" in blk_mq_freeze_queue_wait must wait flag becoming
true as well, which will limit percpu_ref_exit to execute ahead of time.

Although the problem was not reproduced in mainline, it may also has
problem when the passthrough IO which will go directly to
blk_cleanup_queue and cause the problem as well.

Signed-off-by: Zhang Wensheng <zhangwensheng5@huawei.com>
---
 block/blk-core.c       | 4 +++-
 block/blk-mq.c         | 7 +++++++
 include/linux/blk-mq.h | 1 +
 include/linux/blkdev.h | 2 ++
 4 files changed, 13 insertions(+), 1 deletion(-)

Comments

Greg KH July 29, 2022, 11:16 a.m. UTC | #1
On Fri, Jul 29, 2022 at 06:50:36PM +0800, Zhang Wensheng wrote:
> From: Zhang Wensheng <zhangwensheng5@huawei.com>
> 
> A problem was find in stable 5.10 and the root cause of it like below.

5.10 is very old, is this still an issue in Linus's tree?

> 
> In the use of q_usage_counter of request_queue, blk_cleanup_queue using
> "wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter))"
> to wait q_usage_counter becoming zero. however, if the q_usage_counter
> becoming zero quickly, and percpu_ref_exit will execute and ref->data
> will be freed, maybe another process will cause a null-defef problem
> like below:
> 
> 	CPU0                             CPU1
> blk_cleanup_queue
>  blk_freeze_queue
>   blk_mq_freeze_queue_wait
> 				scsi_end_request
> 				 percpu_ref_get
> 				 ...
> 				 percpu_ref_put
> 				  atomic_long_sub_and_test
>   percpu_ref_exit
>    ref->data -> NULL
>    				   ref->data->release(ref) -> null-deref
> 
> Fix it by setting flag(QUEUE_FLAG_USAGE_COUNT_SYNC) to add synchronization
> mechanism, when ref->data->release is called, the flag will be setted,
> and the "wait_event" in blk_mq_freeze_queue_wait must wait flag becoming
> true as well, which will limit percpu_ref_exit to execute ahead of time.
> 
> Although the problem was not reproduced in mainline, it may also has
> problem when the passthrough IO which will go directly to
> blk_cleanup_queue and cause the problem as well.
> 
> Signed-off-by: Zhang Wensheng <zhangwensheng5@huawei.com>

As the documentation said, this is not how you mark things for stable
backports.

> ---
>  block/blk-core.c       | 4 +++-
>  block/blk-mq.c         | 7 +++++++
>  include/linux/blk-mq.h | 1 +
>  include/linux/blkdev.h | 2 ++
>  4 files changed, 13 insertions(+), 1 deletion(-)
> 
> diff --git a/block/blk-core.c b/block/blk-core.c
> index 27fb1357ad4b..4b73f46e62ec 100644
> --- a/block/blk-core.c
> +++ b/block/blk-core.c
> @@ -312,7 +312,8 @@ void blk_cleanup_queue(struct request_queue *q)
>  	 * prevent that blk_mq_run_hw_queues() accesses the hardware queues
>  	 * after draining finished.
>  	 */
> -	blk_freeze_queue(q);
> +	blk_freeze_queue_start(q);
> +	blk_mq_freeze_queue_wait_sync(q);
>  
>  	blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
>  
> @@ -403,6 +404,7 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
>  	struct request_queue *q =
>  		container_of(ref, struct request_queue, q_usage_counter);
>  
> +	blk_queue_flag_set(QUEUE_FLAG_USAGE_COUNT_SYNC, q);
>  	wake_up_all(&q->mq_freeze_wq);
>  }
>  
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 93d9d60980fb..44e764257511 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -165,6 +165,7 @@ void blk_freeze_queue_start(struct request_queue *q)
>  {
>  	mutex_lock(&q->mq_freeze_lock);
>  	if (++q->mq_freeze_depth == 1) {
> +		blk_queue_flag_clear(QUEUE_FLAG_USAGE_COUNT_SYNC, q);
>  		percpu_ref_kill(&q->q_usage_counter);
>  		mutex_unlock(&q->mq_freeze_lock);
>  		if (queue_is_mq(q))
> @@ -175,6 +176,12 @@ void blk_freeze_queue_start(struct request_queue *q)
>  }
>  EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
>  
> +void blk_mq_freeze_queue_wait_sync(struct request_queue *q)
> +{
> +	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter) &&
> +			test_bit(QUEUE_FLAG_USAGE_COUNT_SYNC, &q->queue_flags));

No timeout ever?


> +}
> +
>  void blk_mq_freeze_queue_wait(struct request_queue *q)
>  {
>  	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
> diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
> index e2d9daf7e8dd..50fd56f85b31 100644
> --- a/include/linux/blk-mq.h
> +++ b/include/linux/blk-mq.h
> @@ -868,6 +868,7 @@ void blk_mq_freeze_queue(struct request_queue *q);
>  void blk_mq_unfreeze_queue(struct request_queue *q);
>  void blk_freeze_queue_start(struct request_queue *q);
>  void blk_mq_freeze_queue_wait(struct request_queue *q);
> +void blk_mq_freeze_queue_wait_sync(struct request_queue *q);
>  int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
>  				     unsigned long timeout);
>  
> diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
> index 2f7b43444c5f..93ed8b166d66 100644
> --- a/include/linux/blkdev.h
> +++ b/include/linux/blkdev.h
> @@ -575,6 +575,8 @@ struct request_queue {
>  #define QUEUE_FLAG_HCTX_ACTIVE	28	/* at least one blk-mq hctx is active */
>  #define QUEUE_FLAG_NOWAIT       29	/* device supports NOWAIT */
>  #define QUEUE_FLAG_SQ_SCHED     30	/* single queue style io dispatch */
> +/* sync for q_usage_counter */
> +#define QUEUE_FLAG_USAGE_COUNT_SYNC    31

Why not put the comment a the end of the line like everything else in
this list?

And why not use tabs?

thanks,

greg k-h
Ming Lei July 29, 2022, 1:58 p.m. UTC | #2
On Fri, Jul 29, 2022 at 06:50:36PM +0800, Zhang Wensheng wrote:
> From: Zhang Wensheng <zhangwensheng5@huawei.com>
> 
> A problem was find in stable 5.10 and the root cause of it like below.
> 
> In the use of q_usage_counter of request_queue, blk_cleanup_queue using
> "wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter))"
> to wait q_usage_counter becoming zero. however, if the q_usage_counter
> becoming zero quickly, and percpu_ref_exit will execute and ref->data
> will be freed, maybe another process will cause a null-defef problem
> like below:
> 
> 	CPU0                             CPU1
> blk_cleanup_queue
>  blk_freeze_queue
>   blk_mq_freeze_queue_wait
> 				scsi_end_request
> 				 percpu_ref_get
> 				 ...
> 				 percpu_ref_put
> 				  atomic_long_sub_and_test
>   percpu_ref_exit
>    ref->data -> NULL
>    				   ref->data->release(ref) -> null-deref
> 

Looks it is one generic issue in percpu_ref, I think the following patch
should address it.


diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index d73a1c08c3e3..07308bd36d83 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -331,8 +331,12 @@ static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
 
 	if (__ref_is_percpu(ref, &percpu_count))
 		this_cpu_sub(*percpu_count, nr);
-	else if (unlikely(atomic_long_sub_and_test(nr, &ref->data->count)))
-		ref->data->release(ref);
+	else {
+		percpu_ref_func_t	*release = ref->data->release;
+
+		if (unlikely(atomic_long_sub_and_test(nr, &ref->data->count)))
+			release(ref);
+	}
 
 	rcu_read_unlock();
 }


Thanks,
Ming
zhangwensheng (E) July 30, 2022, 2:15 a.m. UTC | #3
Hi, Ming

I don't think this is a generic issue in percpu_ref, I sort out some 
processes
using percpu_ref like "part->ref", "blkg->refcnt" and 
"ctx->reqs/ctx->users",
they all use percpu_ref_exit after "release" done which will not cause 
problem.
so I think it should not change it in api(percpu_ref_put_many), and user 
should
to guarantee it.

thanks!
Wensheng

在 2022/7/29 21:58, Ming Lei 写道:
> On Fri, Jul 29, 2022 at 06:50:36PM +0800, Zhang Wensheng wrote:
>> From: Zhang Wensheng <zhangwensheng5@huawei.com>
>>
>> A problem was find in stable 5.10 and the root cause of it like below.
>>
>> In the use of q_usage_counter of request_queue, blk_cleanup_queue using
>> "wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter))"
>> to wait q_usage_counter becoming zero. however, if the q_usage_counter
>> becoming zero quickly, and percpu_ref_exit will execute and ref->data
>> will be freed, maybe another process will cause a null-defef problem
>> like below:
>>
>> 	CPU0                             CPU1
>> blk_cleanup_queue
>>   blk_freeze_queue
>>    blk_mq_freeze_queue_wait
>> 				scsi_end_request
>> 				 percpu_ref_get
>> 				 ...
>> 				 percpu_ref_put
>> 				  atomic_long_sub_and_test
>>    percpu_ref_exit
>>     ref->data -> NULL
>>     				   ref->data->release(ref) -> null-deref
>>
> Looks it is one generic issue in percpu_ref, I think the following patch
> should address it.
>
>
> diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
> index d73a1c08c3e3..07308bd36d83 100644
> --- a/include/linux/percpu-refcount.h
> +++ b/include/linux/percpu-refcount.h
> @@ -331,8 +331,12 @@ static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
>   
>   	if (__ref_is_percpu(ref, &percpu_count))
>   		this_cpu_sub(*percpu_count, nr);
> -	else if (unlikely(atomic_long_sub_and_test(nr, &ref->data->count)))
> -		ref->data->release(ref);
> +	else {
> +		percpu_ref_func_t	*release = ref->data->release;
> +
> +		if (unlikely(atomic_long_sub_and_test(nr, &ref->data->count)))
> +			release(ref);
> +	}
>   
>   	rcu_read_unlock();
>   }
>
>
> Thanks,
> Ming
diff mbox series

Patch

diff --git a/block/blk-core.c b/block/blk-core.c
index 27fb1357ad4b..4b73f46e62ec 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -312,7 +312,8 @@  void blk_cleanup_queue(struct request_queue *q)
 	 * prevent that blk_mq_run_hw_queues() accesses the hardware queues
 	 * after draining finished.
 	 */
-	blk_freeze_queue(q);
+	blk_freeze_queue_start(q);
+	blk_mq_freeze_queue_wait_sync(q);
 
 	blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
 
@@ -403,6 +404,7 @@  static void blk_queue_usage_counter_release(struct percpu_ref *ref)
 	struct request_queue *q =
 		container_of(ref, struct request_queue, q_usage_counter);
 
+	blk_queue_flag_set(QUEUE_FLAG_USAGE_COUNT_SYNC, q);
 	wake_up_all(&q->mq_freeze_wq);
 }
 
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 93d9d60980fb..44e764257511 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -165,6 +165,7 @@  void blk_freeze_queue_start(struct request_queue *q)
 {
 	mutex_lock(&q->mq_freeze_lock);
 	if (++q->mq_freeze_depth == 1) {
+		blk_queue_flag_clear(QUEUE_FLAG_USAGE_COUNT_SYNC, q);
 		percpu_ref_kill(&q->q_usage_counter);
 		mutex_unlock(&q->mq_freeze_lock);
 		if (queue_is_mq(q))
@@ -175,6 +176,12 @@  void blk_freeze_queue_start(struct request_queue *q)
 }
 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
 
+void blk_mq_freeze_queue_wait_sync(struct request_queue *q)
+{
+	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter) &&
+			test_bit(QUEUE_FLAG_USAGE_COUNT_SYNC, &q->queue_flags));
+}
+
 void blk_mq_freeze_queue_wait(struct request_queue *q)
 {
 	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index e2d9daf7e8dd..50fd56f85b31 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -868,6 +868,7 @@  void blk_mq_freeze_queue(struct request_queue *q);
 void blk_mq_unfreeze_queue(struct request_queue *q);
 void blk_freeze_queue_start(struct request_queue *q);
 void blk_mq_freeze_queue_wait(struct request_queue *q);
+void blk_mq_freeze_queue_wait_sync(struct request_queue *q);
 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
 				     unsigned long timeout);
 
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2f7b43444c5f..93ed8b166d66 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -575,6 +575,8 @@  struct request_queue {
 #define QUEUE_FLAG_HCTX_ACTIVE	28	/* at least one blk-mq hctx is active */
 #define QUEUE_FLAG_NOWAIT       29	/* device supports NOWAIT */
 #define QUEUE_FLAG_SQ_SCHED     30	/* single queue style io dispatch */
+/* sync for q_usage_counter */
+#define QUEUE_FLAG_USAGE_COUNT_SYNC    31
 
 #define QUEUE_FLAG_MQ_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
 				 (1 << QUEUE_FLAG_SAME_COMP) |		\