diff mbox

[RFC,2/2] dmaengine: altera: fix spinlock usage

Message ID 1505732881-7484-3-git-send-email-lesne@alse-fr.com (mailing list archive)
State Accepted
Headers show

Commit Message

Sylvain Lesne Sept. 18, 2017, 11:08 a.m. UTC
Since this lock is acquired in both process and IRQ context, failing to
to disable IRQs when trying to acquire the lock in process context can
lead to deadlocks.

Signed-off-by: Sylvain Lesne <lesne@alse-fr.com>
---
I'm not sure that this is the "smartest" fix, but I think something 
should be done to fix the spinlock usage in this driver (lockdep 
agrees!).
---
 drivers/dma/altera-msgdma.c | 35 +++++++++++++++++++++--------------
 1 file changed, 21 insertions(+), 14 deletions(-)

Comments

Stefan Roese Sept. 25, 2017, 2:38 p.m. UTC | #1
Hi Sylvain,

On 18.09.2017 13:08, Sylvain Lesne wrote:
> Since this lock is acquired in both process and IRQ context, failing to
> to disable IRQs when trying to acquire the lock in process context can
> lead to deadlocks.
> 
> Signed-off-by: Sylvain Lesne <lesne@alse-fr.com>
> ---
> I'm not sure that this is the "smartest" fix, but I think something
> should be done to fix the spinlock usage in this driver (lockdep
> agrees!).

I'm a bit unsure here as well, as introducing the irqsave spinlocks
here will be a bit costly time-wise.

Perhaps its better to move the addition of new descriptors in the IDLE
state from the ISR back the IRQ tasklet, as I've initially proposed in
v2 of this patch:

https://patchwork.kernel.org/patch/9805967/

This way, the lock will not be used from IRQ context any more and
the "normal" spinlock calls should be enough.

What do you think?

Thanks,
Stefan

> ---
>   drivers/dma/altera-msgdma.c | 35 +++++++++++++++++++++--------------
>   1 file changed, 21 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
> index 35cbf2365f68..339186f25a2a 100644
> --- a/drivers/dma/altera-msgdma.c
> +++ b/drivers/dma/altera-msgdma.c
> @@ -212,11 +212,12 @@ struct msgdma_device {
>   static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
>   {
>   	struct msgdma_sw_desc *desc;
> +	unsigned long flags;
>   
> -	spin_lock_bh(&mdev->lock);
> +	spin_lock_irqsave(&mdev->lock, flags);
>   	desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
>   	list_del(&desc->node);
> -	spin_unlock_bh(&mdev->lock);
> +	spin_unlock_irqrestore(&mdev->lock, flags);
>   
>   	INIT_LIST_HEAD(&desc->tx_list);
>   
> @@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
>   	struct msgdma_device *mdev = to_mdev(tx->chan);
>   	struct msgdma_sw_desc *new;
>   	dma_cookie_t cookie;
> +	unsigned long flags;
>   
>   	new = tx_to_desc(tx);
> -	spin_lock_bh(&mdev->lock);
> +	spin_lock_irqsave(&mdev->lock, flags);
>   	cookie = dma_cookie_assign(tx);
>   
>   	list_add_tail(&new->node, &mdev->pending_list);
> -	spin_unlock_bh(&mdev->lock);
> +	spin_unlock_irqrestore(&mdev->lock, flags);
>   
>   	return cookie;
>   }
> @@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
>   	struct msgdma_extended_desc *desc;
>   	size_t copy;
>   	u32 desc_cnt;
> +	unsigned long irqflags;
>   
>   	desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
>   
> -	spin_lock_bh(&mdev->lock);
> +	spin_lock_irqsave(&mdev->lock, irqflags);
>   	if (desc_cnt > mdev->desc_free_cnt) {
>   		spin_unlock_bh(&mdev->lock);
>   		dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
>   		return NULL;
>   	}
>   	mdev->desc_free_cnt -= desc_cnt;
> -	spin_unlock_bh(&mdev->lock);
> +	spin_unlock_irqrestore(&mdev->lock, irqflags);
>   
>   	do {
>   		/* Allocate and populate the descriptor */
> @@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
>   	u32 desc_cnt = 0, i;
>   	struct scatterlist *sg;
>   	u32 stride;
> +	unsigned long irqflags;
>   
>   	for_each_sg(sgl, sg, sg_len, i)
>   		desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
>   
> -	spin_lock_bh(&mdev->lock);
> +	spin_lock_irqsave(&mdev->lock, irqflags);
>   	if (desc_cnt > mdev->desc_free_cnt) {
>   		spin_unlock_bh(&mdev->lock);
>   		dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
>   		return NULL;
>   	}
>   	mdev->desc_free_cnt -= desc_cnt;
> -	spin_unlock_bh(&mdev->lock);
> +	spin_unlock_irqrestore(&mdev->lock, irqflags);
>   
>   	avail = sg_dma_len(sgl);
>   
> @@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev)
>   static void msgdma_issue_pending(struct dma_chan *chan)
>   {
>   	struct msgdma_device *mdev = to_mdev(chan);
> +	unsigned long flags;
>   
> -	spin_lock_bh(&mdev->lock);
> +	spin_lock_irqsave(&mdev->lock, flags);
>   	msgdma_start_transfer(mdev);
> -	spin_unlock_bh(&mdev->lock);
> +	spin_unlock_irqrestore(&mdev->lock, flags);
>   }
>   
>   /**
> @@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev)
>   static void msgdma_free_chan_resources(struct dma_chan *dchan)
>   {
>   	struct msgdma_device *mdev = to_mdev(dchan);
> +	unsigned long flags;
>   
> -	spin_lock_bh(&mdev->lock);
> +	spin_lock_irqsave(&mdev->lock, flags);
>   	msgdma_free_descriptors(mdev);
> -	spin_unlock_bh(&mdev->lock);
> +	spin_unlock_irqrestore(&mdev->lock, flags);
>   	kfree(mdev->sw_desq);
>   }
>   
> @@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data)
>   	u32 count;
>   	u32 __maybe_unused size;
>   	u32 __maybe_unused status;
> +	unsigned long flags;
>   
> -	spin_lock(&mdev->lock);
> +	spin_lock_irqsave(&mdev->lock, flags);
>   
>   	/* Read number of responses that are available */
>   	count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
> @@ -704,7 +711,7 @@ static void msgdma_tasklet(unsigned long data)
>   		msgdma_chan_desc_cleanup(mdev);
>   	}
>   
> -	spin_unlock(&mdev->lock);
> +	spin_unlock_irqrestore(&mdev->lock, flags);
>   }
>   
>   /**
> 

Viele Grüße,
Stefan
Sylvain Lesne Sept. 26, 2017, 8:05 a.m. UTC | #2
On 09/25/2017 04:38 PM, Stefan Roese wrote:
> Hi Sylvain,
> 
> On 18.09.2017 13:08, Sylvain Lesne wrote:
>> Since this lock is acquired in both process and IRQ context, failing to
>> to disable IRQs when trying to acquire the lock in process context can
>> lead to deadlocks.
>>
>> Signed-off-by: Sylvain Lesne <lesne@alse-fr.com>
>> ---
>> I'm not sure that this is the "smartest" fix, but I think something
>> should be done to fix the spinlock usage in this driver (lockdep
>> agrees!).
> 
> I'm a bit unsure here as well, as introducing the irqsave spinlocks
> here will be a bit costly time-wise.
> 
> Perhaps its better to move the addition of new descriptors in the IDLE
> state from the ISR back the IRQ tasklet, as I've initially proposed in
> v2 of this patch:
> 
> https://patchwork.kernel.org/patch/9805967/
> 
> This way, the lock will not be used from IRQ context any more and
> the "normal" spinlock calls should be enough.
> 
> What do you think?

Thanks for the feedback!
I agree that simplifying the locks is probably the best idea.

Since Vinod suggested that you move this in the IRQ for performance
reasons, moving it back to the tasklet could cause a performance hit,
but this is probably better than adding irqsave everywhere.

Thanks,
Sylvain

> 
> Thanks,
> Stefan
> 
>> ---
>>   drivers/dma/altera-msgdma.c | 35 +++++++++++++++++++++--------------
>>   1 file changed, 21 insertions(+), 14 deletions(-)
>>
>> diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
>> index 35cbf2365f68..339186f25a2a 100644
>> --- a/drivers/dma/altera-msgdma.c
>> +++ b/drivers/dma/altera-msgdma.c
>> @@ -212,11 +212,12 @@ struct msgdma_device {
>>   static struct msgdma_sw_desc *msgdma_get_descriptor(struct
>> msgdma_device *mdev)
>>   {
>>       struct msgdma_sw_desc *desc;
>> +    unsigned long flags;
>>   -    spin_lock_bh(&mdev->lock);
>> +    spin_lock_irqsave(&mdev->lock, flags);
>>       desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc,
>> node);
>>       list_del(&desc->node);
>> -    spin_unlock_bh(&mdev->lock);
>> +    spin_unlock_irqrestore(&mdev->lock, flags);
>>         INIT_LIST_HEAD(&desc->tx_list);
>>   @@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct
>> dma_async_tx_descriptor *tx)
>>       struct msgdma_device *mdev = to_mdev(tx->chan);
>>       struct msgdma_sw_desc *new;
>>       dma_cookie_t cookie;
>> +    unsigned long flags;
>>         new = tx_to_desc(tx);
>> -    spin_lock_bh(&mdev->lock);
>> +    spin_lock_irqsave(&mdev->lock, flags);
>>       cookie = dma_cookie_assign(tx);
>>         list_add_tail(&new->node, &mdev->pending_list);
>> -    spin_unlock_bh(&mdev->lock);
>> +    spin_unlock_irqrestore(&mdev->lock, flags);
>>         return cookie;
>>   }
>> @@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan,
>> dma_addr_t dma_dst,
>>       struct msgdma_extended_desc *desc;
>>       size_t copy;
>>       u32 desc_cnt;
>> +    unsigned long irqflags;
>>         desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
>>   -    spin_lock_bh(&mdev->lock);
>> +    spin_lock_irqsave(&mdev->lock, irqflags);
>>       if (desc_cnt > mdev->desc_free_cnt) {
>>           spin_unlock_bh(&mdev->lock);
>>           dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
>>           return NULL;
>>       }
>>       mdev->desc_free_cnt -= desc_cnt;
>> -    spin_unlock_bh(&mdev->lock);
>> +    spin_unlock_irqrestore(&mdev->lock, irqflags);
>>         do {
>>           /* Allocate and populate the descriptor */
>> @@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan,
>> struct scatterlist *sgl,
>>       u32 desc_cnt = 0, i;
>>       struct scatterlist *sg;
>>       u32 stride;
>> +    unsigned long irqflags;
>>         for_each_sg(sgl, sg, sg_len, i)
>>           desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
>>   -    spin_lock_bh(&mdev->lock);
>> +    spin_lock_irqsave(&mdev->lock, irqflags);
>>       if (desc_cnt > mdev->desc_free_cnt) {
>>           spin_unlock_bh(&mdev->lock);
>>           dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
>>           return NULL;
>>       }
>>       mdev->desc_free_cnt -= desc_cnt;
>> -    spin_unlock_bh(&mdev->lock);
>> +    spin_unlock_irqrestore(&mdev->lock, irqflags);
>>         avail = sg_dma_len(sgl);
>>   @@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct
>> msgdma_device *mdev)
>>   static void msgdma_issue_pending(struct dma_chan *chan)
>>   {
>>       struct msgdma_device *mdev = to_mdev(chan);
>> +    unsigned long flags;
>>   -    spin_lock_bh(&mdev->lock);
>> +    spin_lock_irqsave(&mdev->lock, flags);
>>       msgdma_start_transfer(mdev);
>> -    spin_unlock_bh(&mdev->lock);
>> +    spin_unlock_irqrestore(&mdev->lock, flags);
>>   }
>>     /**
>> @@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct
>> msgdma_device *mdev)
>>   static void msgdma_free_chan_resources(struct dma_chan *dchan)
>>   {
>>       struct msgdma_device *mdev = to_mdev(dchan);
>> +    unsigned long flags;
>>   -    spin_lock_bh(&mdev->lock);
>> +    spin_lock_irqsave(&mdev->lock, flags);
>>       msgdma_free_descriptors(mdev);
>> -    spin_unlock_bh(&mdev->lock);
>> +    spin_unlock_irqrestore(&mdev->lock, flags);
>>       kfree(mdev->sw_desq);
>>   }
>>   @@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data)
>>       u32 count;
>>       u32 __maybe_unused size;
>>       u32 __maybe_unused status;
>> +    unsigned long flags;
>>   -    spin_lock(&mdev->lock);
>> +    spin_lock_irqsave(&mdev->lock, flags);
>>         /* Read number of responses that are available */
>>       count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
>> @@ -704,7 +711,7 @@ static void msgdma_tasklet(unsigned long data)
>>           msgdma_chan_desc_cleanup(mdev);
>>       }
>>   -    spin_unlock(&mdev->lock);
>> +    spin_unlock_irqrestore(&mdev->lock, flags);
>>   }
>>     /**
>>
> 
> Viele Grüße,
> Stefan
>
Stefan Roese Sept. 26, 2017, 9:29 a.m. UTC | #3
Hi Sylvain,

On 26.09.2017 10:05, Sylvain Lesne wrote:
> On 09/25/2017 04:38 PM, Stefan Roese wrote:
>> Hi Sylvain,
>>
>> On 18.09.2017 13:08, Sylvain Lesne wrote:
>>> Since this lock is acquired in both process and IRQ context, failing to
>>> to disable IRQs when trying to acquire the lock in process context can
>>> lead to deadlocks.
>>>
>>> Signed-off-by: Sylvain Lesne <lesne@alse-fr.com>
>>> ---
>>> I'm not sure that this is the "smartest" fix, but I think something
>>> should be done to fix the spinlock usage in this driver (lockdep
>>> agrees!).
>>
>> I'm a bit unsure here as well, as introducing the irqsave spinlocks
>> here will be a bit costly time-wise.
>>
>> Perhaps its better to move the addition of new descriptors in the IDLE
>> state from the ISR back the IRQ tasklet, as I've initially proposed in
>> v2 of this patch:
>>
>> https://patchwork.kernel.org/patch/9805967/
>>
>> This way, the lock will not be used from IRQ context any more and
>> the "normal" spinlock calls should be enough.
>>
>> What do you think?
> 
> Thanks for the feedback!
> I agree that simplifying the locks is probably the best idea.
> 
> Since Vinod suggested that you move this in the IRQ for performance
> reasons, moving it back to the tasklet could cause a performance hit,
> but this is probably better than adding irqsave everywhere.

Yes, I would prefer this alternative as well. Perhaps Vinod has
some comments as well?

Otherwise, would you "volunteer" to send a patch for this suggested
change or should I add this to my list?

Thanks,
Stefan
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Vinod Koul Sept. 26, 2017, 4:40 p.m. UTC | #4
On Tue, Sep 26, 2017 at 11:29:40AM +0200, Stefan Roese wrote:
> Hi Sylvain,
> 
> On 26.09.2017 10:05, Sylvain Lesne wrote:
> >On 09/25/2017 04:38 PM, Stefan Roese wrote:
> >>Hi Sylvain,
> >>
> >>On 18.09.2017 13:08, Sylvain Lesne wrote:
> >>>Since this lock is acquired in both process and IRQ context, failing to
> >>>to disable IRQs when trying to acquire the lock in process context can
> >>>lead to deadlocks.
> >>>
> >>>Signed-off-by: Sylvain Lesne <lesne@alse-fr.com>
> >>>---
> >>>I'm not sure that this is the "smartest" fix, but I think something
> >>>should be done to fix the spinlock usage in this driver (lockdep
> >>>agrees!).
> >>
> >>I'm a bit unsure here as well, as introducing the irqsave spinlocks
> >>here will be a bit costly time-wise.
> >>
> >>Perhaps its better to move the addition of new descriptors in the IDLE
> >>state from the ISR back the IRQ tasklet, as I've initially proposed in
> >>v2 of this patch:

Keeping DMAengine idle is not really very smart! we would like to get
maximum throughput from it.

> >>
> >>https://patchwork.kernel.org/patch/9805967/
> >>
> >>This way, the lock will not be used from IRQ context any more and
> >>the "normal" spinlock calls should be enough.
> >>
> >>What do you think?
> >
> >Thanks for the feedback!
> >I agree that simplifying the locks is probably the best idea.
> >
> >Since Vinod suggested that you move this in the IRQ for performance
> >reasons, moving it back to the tasklet could cause a performance hit,
> >but this is probably better than adding irqsave everywhere.
> 
> Yes, I would prefer this alternative as well. Perhaps Vinod has
> some comments as well?

Since the list is modified in irq and non irq context, using irqsave variant
of spinlock makes sense. Disabling irq here is okay for getting better
throughput.

> 
> Otherwise, would you "volunteer" to send a patch for this suggested
> change or should I add this to my list?
> 
> Thanks,
> Stefan
Stefan Roese Sept. 26, 2017, 5:07 p.m. UTC | #5
Hi Vinod,

On 26.09.2017 18:40, Vinod Koul wrote:
> On Tue, Sep 26, 2017 at 11:29:40AM +0200, Stefan Roese wrote:
>> Hi Sylvain,
>>
>> On 26.09.2017 10:05, Sylvain Lesne wrote:
>>> On 09/25/2017 04:38 PM, Stefan Roese wrote:
>>>> Hi Sylvain,
>>>>
>>>> On 18.09.2017 13:08, Sylvain Lesne wrote:
>>>>> Since this lock is acquired in both process and IRQ context, failing to
>>>>> to disable IRQs when trying to acquire the lock in process context can
>>>>> lead to deadlocks.
>>>>>
>>>>> Signed-off-by: Sylvain Lesne <lesne@alse-fr.com>
>>>>> ---
>>>>> I'm not sure that this is the "smartest" fix, but I think something
>>>>> should be done to fix the spinlock usage in this driver (lockdep
>>>>> agrees!).
>>>>
>>>> I'm a bit unsure here as well, as introducing the irqsave spinlocks
>>>> here will be a bit costly time-wise.
>>>>
>>>> Perhaps its better to move the addition of new descriptors in the IDLE
>>>> state from the ISR back the IRQ tasklet, as I've initially proposed in
>>>> v2 of this patch:
> 
> Keeping DMAengine idle is not really very smart! we would like to get
> maximum throughput from it.
> 
>>>>
>>>> https://patchwork.kernel.org/patch/9805967/
>>>>
>>>> This way, the lock will not be used from IRQ context any more and
>>>> the "normal" spinlock calls should be enough.
>>>>
>>>> What do you think?
>>>
>>> Thanks for the feedback!
>>> I agree that simplifying the locks is probably the best idea.
>>>
>>> Since Vinod suggested that you move this in the IRQ for performance
>>> reasons, moving it back to the tasklet could cause a performance hit,
>>> but this is probably better than adding irqsave everywhere.
>>
>> Yes, I would prefer this alternative as well. Perhaps Vinod has
>> some comments as well?
> 
> Since the list is modified in irq and non irq context, using irqsave variant
> of spinlock makes sense. Disabling irq here is okay for getting better
> throughput.

Fine with me. Lets go with Sylvian's spinlock patch then.

Reviewed-by: Stefan Roese <sr@denx.de>

Thanks,
Stefan
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 35cbf2365f68..339186f25a2a 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -212,11 +212,12 @@  struct msgdma_device {
 static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
 {
 	struct msgdma_sw_desc *desc;
+	unsigned long flags;
 
-	spin_lock_bh(&mdev->lock);
+	spin_lock_irqsave(&mdev->lock, flags);
 	desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
 	list_del(&desc->node);
-	spin_unlock_bh(&mdev->lock);
+	spin_unlock_irqrestore(&mdev->lock, flags);
 
 	INIT_LIST_HEAD(&desc->tx_list);
 
@@ -306,13 +307,14 @@  static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
 	struct msgdma_device *mdev = to_mdev(tx->chan);
 	struct msgdma_sw_desc *new;
 	dma_cookie_t cookie;
+	unsigned long flags;
 
 	new = tx_to_desc(tx);
-	spin_lock_bh(&mdev->lock);
+	spin_lock_irqsave(&mdev->lock, flags);
 	cookie = dma_cookie_assign(tx);
 
 	list_add_tail(&new->node, &mdev->pending_list);
-	spin_unlock_bh(&mdev->lock);
+	spin_unlock_irqrestore(&mdev->lock, flags);
 
 	return cookie;
 }
@@ -336,17 +338,18 @@  msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
 	struct msgdma_extended_desc *desc;
 	size_t copy;
 	u32 desc_cnt;
+	unsigned long irqflags;
 
 	desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
 
-	spin_lock_bh(&mdev->lock);
+	spin_lock_irqsave(&mdev->lock, irqflags);
 	if (desc_cnt > mdev->desc_free_cnt) {
 		spin_unlock_bh(&mdev->lock);
 		dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
 		return NULL;
 	}
 	mdev->desc_free_cnt -= desc_cnt;
-	spin_unlock_bh(&mdev->lock);
+	spin_unlock_irqrestore(&mdev->lock, irqflags);
 
 	do {
 		/* Allocate and populate the descriptor */
@@ -397,18 +400,19 @@  msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
 	u32 desc_cnt = 0, i;
 	struct scatterlist *sg;
 	u32 stride;
+	unsigned long irqflags;
 
 	for_each_sg(sgl, sg, sg_len, i)
 		desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
 
-	spin_lock_bh(&mdev->lock);
+	spin_lock_irqsave(&mdev->lock, irqflags);
 	if (desc_cnt > mdev->desc_free_cnt) {
 		spin_unlock_bh(&mdev->lock);
 		dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
 		return NULL;
 	}
 	mdev->desc_free_cnt -= desc_cnt;
-	spin_unlock_bh(&mdev->lock);
+	spin_unlock_irqrestore(&mdev->lock, irqflags);
 
 	avail = sg_dma_len(sgl);
 
@@ -566,10 +570,11 @@  static void msgdma_start_transfer(struct msgdma_device *mdev)
 static void msgdma_issue_pending(struct dma_chan *chan)
 {
 	struct msgdma_device *mdev = to_mdev(chan);
+	unsigned long flags;
 
-	spin_lock_bh(&mdev->lock);
+	spin_lock_irqsave(&mdev->lock, flags);
 	msgdma_start_transfer(mdev);
-	spin_unlock_bh(&mdev->lock);
+	spin_unlock_irqrestore(&mdev->lock, flags);
 }
 
 /**
@@ -634,10 +639,11 @@  static void msgdma_free_descriptors(struct msgdma_device *mdev)
 static void msgdma_free_chan_resources(struct dma_chan *dchan)
 {
 	struct msgdma_device *mdev = to_mdev(dchan);
+	unsigned long flags;
 
-	spin_lock_bh(&mdev->lock);
+	spin_lock_irqsave(&mdev->lock, flags);
 	msgdma_free_descriptors(mdev);
-	spin_unlock_bh(&mdev->lock);
+	spin_unlock_irqrestore(&mdev->lock, flags);
 	kfree(mdev->sw_desq);
 }
 
@@ -682,8 +688,9 @@  static void msgdma_tasklet(unsigned long data)
 	u32 count;
 	u32 __maybe_unused size;
 	u32 __maybe_unused status;
+	unsigned long flags;
 
-	spin_lock(&mdev->lock);
+	spin_lock_irqsave(&mdev->lock, flags);
 
 	/* Read number of responses that are available */
 	count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
@@ -704,7 +711,7 @@  static void msgdma_tasklet(unsigned long data)
 		msgdma_chan_desc_cleanup(mdev);
 	}
 
-	spin_unlock(&mdev->lock);
+	spin_unlock_irqrestore(&mdev->lock, flags);
 }
 
 /**