diff mbox series

dmaengine: pch_dma: Fix potential deadlock on &pd_chan->lock

Message ID 20230726062313.77121-1-dg573847474@gmail.com (mailing list archive)
State Changes Requested
Headers show
Series dmaengine: pch_dma: Fix potential deadlock on &pd_chan->lock | expand

Commit Message

Chengfeng Ye July 26, 2023, 6:23 a.m. UTC
As &pd_chan->lock is acquired by pdc_tasklet() under softirq context
scheduled by pd_irq(), other acquisition of the same lock under process
context should disable irq, otherwise deadlock could happen if the soft
irq preempts the execution of process context code while the lock is held
in process context on the same CPU.

pd_issue_pending(), pd_tx_submit(), pdc_desc_put() and pdc_desc_get() are
callbacks function or executed by callback functions that could execute
without irq disaled.

Possible deadlock scenario:
pd_prep_slave_sg()
    -> pdc_desc_put()
    -> spin_lock(&pd_chan->lock)
        <tasklet softirq interruption>
        -> pdc_tasklet()
        -> spin_lock_irqsave(&pd_chan->lock, flags); (deadlock here)

This flaw was found by an experimental static analysis tool I am developing
for irq-related deadlock.

The tentative patch fixes the potential deadlock by spin_lock_irqsave() to
disable irq while lock is held.

Signed-off-by: Chengfeng Ye <dg573847474@gmail.com>
---
 drivers/dma/pch_dma.c | 25 +++++++++++++++----------
 1 file changed, 15 insertions(+), 10 deletions(-)

Comments

Chengfeng Ye Aug. 16, 2023, 5:03 p.m. UTC | #1
Hi maintainers,

May I ask if anyone would like to review the patch?

Thanks,
Chengfeng
diff mbox series

Patch

diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index c359decc07a3..ad9ac4f64961 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -409,8 +409,9 @@  static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
 {
 	struct pch_dma_desc *desc = to_pd_desc(txd);
 	struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
+	unsigned long flags;
 
-	spin_lock(&pd_chan->lock);
+	spin_lock_irqsave(&pd_chan->lock, flags);
 
 	if (list_empty(&pd_chan->active_list)) {
 		list_add_tail(&desc->desc_node, &pd_chan->active_list);
@@ -419,7 +420,7 @@  static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
 		list_add_tail(&desc->desc_node, &pd_chan->queue);
 	}
 
-	spin_unlock(&pd_chan->lock);
+	spin_unlock_irqrestore(&pd_chan->lock, flags);
 	return 0;
 }
 
@@ -445,9 +446,10 @@  static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
 {
 	struct pch_dma_desc *desc, *_d;
 	struct pch_dma_desc *ret = NULL;
+	unsigned long flags;
 	int i = 0;
 
-	spin_lock(&pd_chan->lock);
+	spin_lock_irqsave(&pd_chan->lock, flags);
 	list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
 		i++;
 		if (async_tx_test_ack(&desc->txd)) {
@@ -457,15 +459,15 @@  static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
 		}
 		dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
 	}
-	spin_unlock(&pd_chan->lock);
+	spin_unlock_irqrestore(&pd_chan->lock, flags);
 	dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
 
 	if (!ret) {
 		ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC);
 		if (ret) {
-			spin_lock(&pd_chan->lock);
+			spin_lock_irqsave(&pd_chan->lock, flags);
 			pd_chan->descs_allocated++;
-			spin_unlock(&pd_chan->lock);
+			spin_unlock_irqrestore(&pd_chan->lock, flags);
 		} else {
 			dev_err(chan2dev(&pd_chan->chan),
 				"failed to alloc desc\n");
@@ -478,11 +480,13 @@  static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
 static void pdc_desc_put(struct pch_dma_chan *pd_chan,
 			 struct pch_dma_desc *desc)
 {
+	unsigned long flags;
+
 	if (desc) {
-		spin_lock(&pd_chan->lock);
+		spin_lock_irqsave(&pd_chan->lock, flags);
 		list_splice_init(&desc->tx_list, &pd_chan->free_list);
 		list_add(&desc->desc_node, &pd_chan->free_list);
-		spin_unlock(&pd_chan->lock);
+		spin_unlock_irqrestore(&pd_chan->lock, flags);
 	}
 }
 
@@ -555,11 +559,12 @@  static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 static void pd_issue_pending(struct dma_chan *chan)
 {
 	struct pch_dma_chan *pd_chan = to_pd_chan(chan);
+	unsigned long flags;
 
 	if (pdc_is_idle(pd_chan)) {
-		spin_lock(&pd_chan->lock);
+		spin_lock_irqsave(&pd_chan->lock, flags);
 		pdc_advance_work(pd_chan);
-		spin_unlock(&pd_chan->lock);
+		spin_unlock_irqrestore(&pd_chan->lock, flags);
 	}
 }