diff mbox series

[PATCHv5,1/2] RDMA/rxe: Fix a dead lock problem

Message ID 20220417024343.568777-1-yanjun.zhu@linux.dev (mailing list archive)
State Changes Requested
Headers show
Series [PATCHv5,1/2] RDMA/rxe: Fix a dead lock problem | expand

Commit Message

Zhu Yanjun April 17, 2022, 2:43 a.m. UTC
From: Zhu Yanjun <yanjun.zhu@linux.dev>

This is a dead lock problem.
The ah_pool xa_lock first is acquired in this:

{SOFTIRQ-ON-W} state was registered at:

  lock_acquire+0x1d2/0x5a0
  _raw_spin_lock+0x33/0x80
  __rxe_add_to_pool+0x183/0x230 [rdma_rxe]

Then ah_pool xa_lock is acquired in this:

{IN-SOFTIRQ-W}:

Call Trace:
 <TASK>
  dump_stack_lvl+0x44/0x57
  mark_lock.part.52.cold.79+0x3c/0x46
  __lock_acquire+0x1565/0x34a0
  lock_acquire+0x1d2/0x5a0
  _raw_spin_lock_irqsave+0x42/0x90
  rxe_pool_get_index+0x72/0x1d0 [rdma_rxe]
  rxe_get_av+0x168/0x2a0 [rdma_rxe]
</TASK>

From the above, in the function __rxe_add_to_pool,
xa_lock is acquired. Then the function __rxe_add_to_pool
is interrupted by softirq. The function
rxe_pool_get_index will also acquire xa_lock.

Finally, the dead lock appears.

[  296.806097]        CPU0
[  296.808550]        ----
[  296.811003]   lock(&xa->xa_lock#15);  <----- __rxe_add_to_pool
[  296.814583]   <Interrupt>
[  296.817209]     lock(&xa->xa_lock#15); <---- rxe_pool_get_index
[  296.820961]
                 *** DEADLOCK ***

Fixes: 3225717f6dfa ("RDMA/rxe: Replace red-black trees by carrays")
Reported-and-tested-by: Yi Zhang <yi.zhang@redhat.com>
Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
---
V4->V5: Commit logs are changed to avoid confusion.
V3->V4: xa_lock_irq locks are used.
V2->V3: __rxe_add_to_pool is between spin_lock and spin_unlock, so
        GFP_ATOMIC is used in __rxe_add_to_pool.
V1->V2: Replace GFP_KERNEL with GFP_ATOMIC
---
 drivers/infiniband/sw/rxe/rxe_pool.c | 20 ++++++++++++++------
 1 file changed, 14 insertions(+), 6 deletions(-)

Comments

Leon Romanovsky April 20, 2022, 6:42 a.m. UTC | #1
On Sat, Apr 16, 2022 at 10:43:42PM -0400, yanjun.zhu@linux.dev wrote:
> From: Zhu Yanjun <yanjun.zhu@linux.dev>
> 
> This is a dead lock problem.
> The ah_pool xa_lock first is acquired in this:
> 
> {SOFTIRQ-ON-W} state was registered at:
> 
>   lock_acquire+0x1d2/0x5a0
>   _raw_spin_lock+0x33/0x80
>   __rxe_add_to_pool+0x183/0x230 [rdma_rxe]
> 
> Then ah_pool xa_lock is acquired in this:
> 
> {IN-SOFTIRQ-W}:
> 
> Call Trace:
>  <TASK>
>   dump_stack_lvl+0x44/0x57
>   mark_lock.part.52.cold.79+0x3c/0x46
>   __lock_acquire+0x1565/0x34a0
>   lock_acquire+0x1d2/0x5a0
>   _raw_spin_lock_irqsave+0x42/0x90
>   rxe_pool_get_index+0x72/0x1d0 [rdma_rxe]
>   rxe_get_av+0x168/0x2a0 [rdma_rxe]
> </TASK>
> 
> From the above, in the function __rxe_add_to_pool,
> xa_lock is acquired. Then the function __rxe_add_to_pool
> is interrupted by softirq. The function
> rxe_pool_get_index will also acquire xa_lock.
> 
> Finally, the dead lock appears.
> 
> [  296.806097]        CPU0
> [  296.808550]        ----
> [  296.811003]   lock(&xa->xa_lock#15);  <----- __rxe_add_to_pool
> [  296.814583]   <Interrupt>
> [  296.817209]     lock(&xa->xa_lock#15); <---- rxe_pool_get_index
> [  296.820961]
>                  *** DEADLOCK ***
> 
> Fixes: 3225717f6dfa ("RDMA/rxe: Replace red-black trees by carrays")
> Reported-and-tested-by: Yi Zhang <yi.zhang@redhat.com>
> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
> ---
> V4->V5: Commit logs are changed to avoid confusion.
> V3->V4: xa_lock_irq locks are used.
> V2->V3: __rxe_add_to_pool is between spin_lock and spin_unlock, so
>         GFP_ATOMIC is used in __rxe_add_to_pool.
> V1->V2: Replace GFP_KERNEL with GFP_ATOMIC
> ---
>  drivers/infiniband/sw/rxe/rxe_pool.c | 20 ++++++++++++++------
>  1 file changed, 14 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
> index 87066d04ed18..f1f06dc7e64f 100644
> --- a/drivers/infiniband/sw/rxe/rxe_pool.c
> +++ b/drivers/infiniband/sw/rxe/rxe_pool.c
> @@ -106,7 +106,7 @@ void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
>  
>  	atomic_set(&pool->num_elem, 0);
>  
> -	xa_init_flags(&pool->xa, XA_FLAGS_ALLOC);
> +	xa_init_flags(&pool->xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
>  	pool->limit.min = info->min_index;
>  	pool->limit.max = info->max_index;
>  }
> @@ -138,8 +138,10 @@ void *rxe_alloc(struct rxe_pool *pool)
>  	elem->obj = obj;
>  	kref_init(&elem->ref_cnt);
>  
> -	err = xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
> -			      &pool->next, GFP_KERNEL);
> +	xa_lock_irq(&pool->xa);
> +	err = __xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
> +				&pool->next, GFP_KERNEL);
> +	xa_unlock_irq(&pool->xa);

I may admit that I didn't follow your previous discussions, so maybe you
already explained it. But why do you need xa_lock_irq() here?

Thanks

>  	if (err)
>  		goto err_free;
>  
> @@ -155,6 +157,7 @@ void *rxe_alloc(struct rxe_pool *pool)
>  int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
>  {
>  	int err;
> +	unsigned long flags;
>  
>  	if (WARN_ON(pool->flags & RXE_POOL_ALLOC))
>  		return -EINVAL;
> @@ -166,8 +169,10 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
>  	elem->obj = (u8 *)elem - pool->elem_offset;
>  	kref_init(&elem->ref_cnt);
>  
> -	err = xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
> -			      &pool->next, GFP_KERNEL);
> +	xa_lock_irqsave(&pool->xa, flags);
> +	err = __xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
> +				&pool->next, GFP_ATOMIC);
> +	xa_unlock_irqrestore(&pool->xa, flags);
>  	if (err)
>  		goto err_cnt;
>  
> @@ -200,8 +205,11 @@ static void rxe_elem_release(struct kref *kref)
>  {
>  	struct rxe_pool_elem *elem = container_of(kref, typeof(*elem), ref_cnt);
>  	struct rxe_pool *pool = elem->pool;
> +	unsigned long flags;
>  
> -	xa_erase(&pool->xa, elem->index);
> +	xa_lock_irqsave(&pool->xa, flags);
> +	__xa_erase(&pool->xa, elem->index);
> +	xa_unlock_irqrestore(&pool->xa, flags);
>  
>  	if (pool->cleanup)
>  		pool->cleanup(elem);
> -- 
> 2.27.0
>
Jason Gunthorpe April 20, 2022, 4:32 p.m. UTC | #2
On Wed, Apr 20, 2022 at 09:42:23AM +0300, Leon Romanovsky wrote:
> On Sat, Apr 16, 2022 at 10:43:42PM -0400, yanjun.zhu@linux.dev wrote:
> > From: Zhu Yanjun <yanjun.zhu@linux.dev>
> > 
> > This is a dead lock problem.
> > The ah_pool xa_lock first is acquired in this:
> > 
> > {SOFTIRQ-ON-W} state was registered at:
> > 
> >   lock_acquire+0x1d2/0x5a0
> >   _raw_spin_lock+0x33/0x80
> >   __rxe_add_to_pool+0x183/0x230 [rdma_rxe]
> > 
> > Then ah_pool xa_lock is acquired in this:
> > 
> > {IN-SOFTIRQ-W}:
> > 
> > Call Trace:
> >  <TASK>
> >   dump_stack_lvl+0x44/0x57
> >   mark_lock.part.52.cold.79+0x3c/0x46
> >   __lock_acquire+0x1565/0x34a0
> >   lock_acquire+0x1d2/0x5a0
> >   _raw_spin_lock_irqsave+0x42/0x90
> >   rxe_pool_get_index+0x72/0x1d0 [rdma_rxe]
> >   rxe_get_av+0x168/0x2a0 [rdma_rxe]
> > </TASK>
> > 
> > From the above, in the function __rxe_add_to_pool,
> > xa_lock is acquired. Then the function __rxe_add_to_pool
> > is interrupted by softirq. The function
> > rxe_pool_get_index will also acquire xa_lock.
> > 
> > Finally, the dead lock appears.
> > 
> > [  296.806097]        CPU0
> > [  296.808550]        ----
> > [  296.811003]   lock(&xa->xa_lock#15);  <----- __rxe_add_to_pool
> > [  296.814583]   <Interrupt>
> > [  296.817209]     lock(&xa->xa_lock#15); <---- rxe_pool_get_index
> > [  296.820961]
> >                  *** DEADLOCK ***
> > 
> > Fixes: 3225717f6dfa ("RDMA/rxe: Replace red-black trees by carrays")
> > Reported-and-tested-by: Yi Zhang <yi.zhang@redhat.com>
> > Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
> > V4->V5: Commit logs are changed to avoid confusion.
> > V3->V4: xa_lock_irq locks are used.
> > V2->V3: __rxe_add_to_pool is between spin_lock and spin_unlock, so
> >         GFP_ATOMIC is used in __rxe_add_to_pool.
> > V1->V2: Replace GFP_KERNEL with GFP_ATOMIC
> >  drivers/infiniband/sw/rxe/rxe_pool.c | 20 ++++++++++++++------
> >  1 file changed, 14 insertions(+), 6 deletions(-)
> > 
> > diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
> > index 87066d04ed18..f1f06dc7e64f 100644
> > +++ b/drivers/infiniband/sw/rxe/rxe_pool.c
> > @@ -106,7 +106,7 @@ void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
> >  
> >  	atomic_set(&pool->num_elem, 0);
> >  
> > -	xa_init_flags(&pool->xa, XA_FLAGS_ALLOC);
> > +	xa_init_flags(&pool->xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
> >  	pool->limit.min = info->min_index;
> >  	pool->limit.max = info->max_index;
> >  }
> > @@ -138,8 +138,10 @@ void *rxe_alloc(struct rxe_pool *pool)
> >  	elem->obj = obj;
> >  	kref_init(&elem->ref_cnt);
> >  
> > -	err = xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
> > -			      &pool->next, GFP_KERNEL);
> > +	xa_lock_irq(&pool->xa);
> > +	err = __xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
> > +				&pool->next, GFP_KERNEL);
> > +	xa_unlock_irq(&pool->xa);

It should just use xa_alloc_cyclic_irq() and xa_erase_irq(). Don't
open code the lock.

> I may admit that I didn't follow your previous discussions, so maybe you
> already explained it. But why do you need xa_lock_irq() here?

The spinlock type needs to be consistent in all users.

You can only use the naked version if the spinlock is always obtained
from a process context.

You can only use bh version if the spinlock is always obtained from a
process context or bh/softirq

You can always use the irq version

What I don't understand is why IRQ and not BH? AFAIK there is no case
where rxe is called from a real IRQ, right? Or is it because you can't
nest BH under the IRQ spinlock from CM?

Jason
Zhu Yanjun April 21, 2022, 12:49 p.m. UTC | #3
在 2022/4/21 0:32, Jason Gunthorpe 写道:
> On Wed, Apr 20, 2022 at 09:42:23AM +0300, Leon Romanovsky wrote:
>> On Sat, Apr 16, 2022 at 10:43:42PM -0400, yanjun.zhu@linux.dev wrote:
>>> From: Zhu Yanjun <yanjun.zhu@linux.dev>
>>>
>>> This is a dead lock problem.
>>> The ah_pool xa_lock first is acquired in this:
>>>
>>> {SOFTIRQ-ON-W} state was registered at:
>>>
>>>    lock_acquire+0x1d2/0x5a0
>>>    _raw_spin_lock+0x33/0x80
>>>    __rxe_add_to_pool+0x183/0x230 [rdma_rxe]
>>>
>>> Then ah_pool xa_lock is acquired in this:
>>>
>>> {IN-SOFTIRQ-W}:
>>>
>>> Call Trace:
>>>   <TASK>
>>>    dump_stack_lvl+0x44/0x57
>>>    mark_lock.part.52.cold.79+0x3c/0x46
>>>    __lock_acquire+0x1565/0x34a0
>>>    lock_acquire+0x1d2/0x5a0
>>>    _raw_spin_lock_irqsave+0x42/0x90
>>>    rxe_pool_get_index+0x72/0x1d0 [rdma_rxe]
>>>    rxe_get_av+0x168/0x2a0 [rdma_rxe]
>>> </TASK>
>>>
>>>  From the above, in the function __rxe_add_to_pool,
>>> xa_lock is acquired. Then the function __rxe_add_to_pool
>>> is interrupted by softirq. The function
>>> rxe_pool_get_index will also acquire xa_lock.
>>>
>>> Finally, the dead lock appears.
>>>
>>> [  296.806097]        CPU0
>>> [  296.808550]        ----
>>> [  296.811003]   lock(&xa->xa_lock#15);  <----- __rxe_add_to_pool
>>> [  296.814583]   <Interrupt>
>>> [  296.817209]     lock(&xa->xa_lock#15); <---- rxe_pool_get_index
>>> [  296.820961]
>>>                   *** DEADLOCK ***
>>>
>>> Fixes: 3225717f6dfa ("RDMA/rxe: Replace red-black trees by carrays")
>>> Reported-and-tested-by: Yi Zhang <yi.zhang@redhat.com>
>>> Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev>
>>> V4->V5: Commit logs are changed to avoid confusion.
>>> V3->V4: xa_lock_irq locks are used.
>>> V2->V3: __rxe_add_to_pool is between spin_lock and spin_unlock, so
>>>          GFP_ATOMIC is used in __rxe_add_to_pool.
>>> V1->V2: Replace GFP_KERNEL with GFP_ATOMIC
>>>   drivers/infiniband/sw/rxe/rxe_pool.c | 20 ++++++++++++++------
>>>   1 file changed, 14 insertions(+), 6 deletions(-)
>>>
>>> diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
>>> index 87066d04ed18..f1f06dc7e64f 100644
>>> +++ b/drivers/infiniband/sw/rxe/rxe_pool.c
>>> @@ -106,7 +106,7 @@ void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
>>>   
>>>   	atomic_set(&pool->num_elem, 0);
>>>   
>>> -	xa_init_flags(&pool->xa, XA_FLAGS_ALLOC);
>>> +	xa_init_flags(&pool->xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
>>>   	pool->limit.min = info->min_index;
>>>   	pool->limit.max = info->max_index;
>>>   }
>>> @@ -138,8 +138,10 @@ void *rxe_alloc(struct rxe_pool *pool)
>>>   	elem->obj = obj;
>>>   	kref_init(&elem->ref_cnt);
>>>   
>>> -	err = xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
>>> -			      &pool->next, GFP_KERNEL);
>>> +	xa_lock_irq(&pool->xa);
>>> +	err = __xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
>>> +				&pool->next, GFP_KERNEL);
>>> +	xa_unlock_irq(&pool->xa);
> 
> It should just use xa_alloc_cyclic_irq() and xa_erase_irq(). Don't
> open code the lock.

Got it. I will use the above functions.

> 
>> I may admit that I didn't follow your previous discussions, so maybe you
>> already explained it. But why do you need xa_lock_irq() here?
> 
> The spinlock type needs to be consistent in all users.
> 
> You can only use the naked version if the spinlock is always obtained
> from a process context.
> 
> You can only use bh version if the spinlock is always obtained from a
> process context or bh/softirq
> 
> You can always use the irq version
> 
> What I don't understand is why IRQ and not BH? AFAIK there is no case
> where rxe is called from a real IRQ, right? Or is it because you can't
> nest BH under the IRQ spinlock from CM?

Sure. I will use IRQ spinlock. The reason is as below:

1. 
https://patchwork.kernel.org/project/linux-rdma/patch/20220210073655.42281-2-guoqing.jiang@linux.dev/

2. 
https://patchwork.kernel.org/project/linux-rdma/patch/20220215194448.44369-1-rpearsonhpe@gmail.com/

The above 2 links are why I used IRQ.

Zhu Yanjun

> 
> Jason
diff mbox series

Patch

diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 87066d04ed18..f1f06dc7e64f 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -106,7 +106,7 @@  void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
 
 	atomic_set(&pool->num_elem, 0);
 
-	xa_init_flags(&pool->xa, XA_FLAGS_ALLOC);
+	xa_init_flags(&pool->xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
 	pool->limit.min = info->min_index;
 	pool->limit.max = info->max_index;
 }
@@ -138,8 +138,10 @@  void *rxe_alloc(struct rxe_pool *pool)
 	elem->obj = obj;
 	kref_init(&elem->ref_cnt);
 
-	err = xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
-			      &pool->next, GFP_KERNEL);
+	xa_lock_irq(&pool->xa);
+	err = __xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
+				&pool->next, GFP_KERNEL);
+	xa_unlock_irq(&pool->xa);
 	if (err)
 		goto err_free;
 
@@ -155,6 +157,7 @@  void *rxe_alloc(struct rxe_pool *pool)
 int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
 {
 	int err;
+	unsigned long flags;
 
 	if (WARN_ON(pool->flags & RXE_POOL_ALLOC))
 		return -EINVAL;
@@ -166,8 +169,10 @@  int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
 	elem->obj = (u8 *)elem - pool->elem_offset;
 	kref_init(&elem->ref_cnt);
 
-	err = xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
-			      &pool->next, GFP_KERNEL);
+	xa_lock_irqsave(&pool->xa, flags);
+	err = __xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
+				&pool->next, GFP_ATOMIC);
+	xa_unlock_irqrestore(&pool->xa, flags);
 	if (err)
 		goto err_cnt;
 
@@ -200,8 +205,11 @@  static void rxe_elem_release(struct kref *kref)
 {
 	struct rxe_pool_elem *elem = container_of(kref, typeof(*elem), ref_cnt);
 	struct rxe_pool *pool = elem->pool;
+	unsigned long flags;
 
-	xa_erase(&pool->xa, elem->index);
+	xa_lock_irqsave(&pool->xa, flags);
+	__xa_erase(&pool->xa, elem->index);
+	xa_unlock_irqrestore(&pool->xa, flags);
 
 	if (pool->cleanup)
 		pool->cleanup(elem);