diff mbox

dmaengine: cppi41: Fix oops in cppi41_runtime_resume

Message ID aafe7afb-6d8a-2fef-acdd-bd331151d16a@ti.com (mailing list archive)
State Superseded
Headers show

Commit Message

Grygorii Strashko Jan. 12, 2017, 11:42 p.m. UTC
On 01/12/2017 05:04 PM, Tony Lindgren wrote:
> * Grygorii Strashko <grygorii.strashko@ti.com> [170112 14:53]:
>>
>>
>> On 01/12/2017 04:19 PM, Tony Lindgren wrote:
>>> * Grygorii Strashko <grygorii.strashko@ti.com> [170112 13:54]:
>>>> On 01/12/2017 03:30 PM, Tony Lindgren wrote:
>>>>
>>>> Sry, but even if it looks better it might still race with PM runtime :(
>>>>
>>>>> -	if (likely(pm_runtime_active(cdd->ddev.dev)))
>>>>> +	if (likely(atomic_read(&cdd->active)))
>>>>>  		push_desc_queue(c);
>>>>>  	else
>>>>
>>>>
>>>> - CPU is here (-EINPROGRESS and active == 0) and then preempted 
>>>> - PM runtime will finish cppi41_runtime_resume and clean-up pending descs
>>>> - CPU return here and adds desc to the pending queue
>>>> - oops
>>>>
>>>> Am I wrong?
>>>
>>> We had cppi41_dma_issue_pending() getting called from atomic contex and
>>> cppi41_runtime_resume() getting preempted where cppi41_dma_issue_pending()
>>> would add to the queue.
>>
>> Again, I can be mistaken but cppi41_configure_channel() seems not atomic.
>> cppi41_configure_channel()->dma_async_issue_pending()
>> + documentation says "This function can be called in an interrupt context"
>>
>> And definitely it will be preemptive on RT :(
> 
> Hmm OK. So are you thinking we should add a spinlock around the
> test in cppi41_dma_issue_pending() and when modifying cdd->active?

in general yes.

> 
> Something like:
> 
> spin_lock_irqsave(&cdd->lock, flags);
> if (likely(cdd->active))
> 	push_desc_queue(c);
> else
> 	list_add_tail(&c->node, &cdd->pending);
> spin_unlock_irqrestore(&cdd->lock, flags);
> 
> Or do you have something better in mind?

I've come up with smth like below. *Not tested*
But may be it can be done more simpler.

Comments

Tony Lindgren Jan. 13, 2017, 12:03 a.m. UTC | #1
* Grygorii Strashko <grygorii.strashko@ti.com> [170112 15:43]:
> @@ -457,38 +449,36 @@ static void push_desc_queue(struct cppi41_channel *c)
>         cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
>  }
>  
> -static void pending_desc(struct cppi41_channel *c)
> +static void cppi41_dma_issue_pending(struct dma_chan *chan)
>  {
> +       struct cppi41_channel *c = to_cpp41_chan(chan);
>         struct cppi41_dd *cdd = c->cdd;
> +       int error;
> +       struct cppi41_channel *_c;
>         unsigned long flags;
>  
>         spin_lock_irqsave(&cdd->lock, flags);
>         list_add_tail(&c->node, &cdd->pending);
> -       spin_unlock_irqrestore(&cdd->lock, flags);
> -}
> -
> -static void cppi41_dma_issue_pending(struct dma_chan *chan)
> -{
> -       struct cppi41_channel *c = to_cpp41_chan(chan);
> -       struct cppi41_dd *cdd = c->cdd;
> -       int error;
>  
>         error = pm_runtime_get(cdd->ddev.dev);
> -       if ((error != -EINPROGRESS) && error < 0) {
> +       if (error < 0) {
>                 pm_runtime_put_noidle(cdd->ddev.dev);
>                 dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n",
>                         error);
> -
> +               spin_unlock_irqrestore(&cdd->lock, flags);
>                 return;
>         }
>  
> -       if (likely(pm_runtime_active(cdd->ddev.dev)))
> -               push_desc_queue(c);
> -       else
> -               pending_desc(c);
> +       if (!cdd->is_suspended) {
> +               list_for_each_entry_safe(c, _c, &cdd->pending, node) {
> +                       push_desc_queue(c);
> +                       list_del(&c->node);
> +               };
> +       }
>  
>         pm_runtime_mark_last_busy(cdd->ddev.dev);
>         pm_runtime_put_autosuspend(cdd->ddev.dev);
> +       spin_lock_irqsave(&cdd->lock, flags);
>  }

So always add to the queue no matter, then always flush the queue
directly if active? Yeah that might work :)

Don't we need spinlock in the list_for_each_entry_safe() to prevent
it racing with runtime_resume() though?

Tony
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index d5ba43a..41a8768 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -153,6 +153,7 @@  struct cppi41_dd {
 
        /* context for suspend/resume */
        unsigned int dma_tdfdq;
+       int is_suspended;
 };
 
 #define FIST_COMPLETION_QUEUE  93
@@ -317,12 +318,6 @@  static irqreturn_t cppi41_irq(int irq, void *data)
 
                while (val) {
                        u32 desc, len;
-                       int error;
-
-                       error = pm_runtime_get(cdd->ddev.dev);
-                       if (error < 0)
-                               dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
-                                       __func__, error);
 
                        q_num = __fls(val);
                        val &= ~(1 << q_num);
@@ -343,9 +338,6 @@  static irqreturn_t cppi41_irq(int irq, void *data)
                        c->residue = pd_trans_len(c->desc->pd6) - len;
                        dma_cookie_complete(&c->txd);
                        dmaengine_desc_get_callback_invoke(&c->txd, NULL);
-
-                       pm_runtime_mark_last_busy(cdd->ddev.dev);
-                       pm_runtime_put_autosuspend(cdd->ddev.dev);
                }
        }
        return IRQ_HANDLED;
@@ -457,38 +449,36 @@  static void push_desc_queue(struct cppi41_channel *c)
        cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
 }
 
-static void pending_desc(struct cppi41_channel *c)
+static void cppi41_dma_issue_pending(struct dma_chan *chan)
 {
+       struct cppi41_channel *c = to_cpp41_chan(chan);
        struct cppi41_dd *cdd = c->cdd;
+       int error;
+       struct cppi41_channel *_c;
        unsigned long flags;
 
        spin_lock_irqsave(&cdd->lock, flags);
        list_add_tail(&c->node, &cdd->pending);
-       spin_unlock_irqrestore(&cdd->lock, flags);
-}
-
-static void cppi41_dma_issue_pending(struct dma_chan *chan)
-{
-       struct cppi41_channel *c = to_cpp41_chan(chan);
-       struct cppi41_dd *cdd = c->cdd;
-       int error;
 
        error = pm_runtime_get(cdd->ddev.dev);
-       if ((error != -EINPROGRESS) && error < 0) {
+       if (error < 0) {
                pm_runtime_put_noidle(cdd->ddev.dev);
                dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n",
                        error);
-
+               spin_unlock_irqrestore(&cdd->lock, flags);
                return;
        }
 
-       if (likely(pm_runtime_active(cdd->ddev.dev)))
-               push_desc_queue(c);
-       else
-               pending_desc(c);
+       if (!cdd->is_suspended) {
+               list_for_each_entry_safe(c, _c, &cdd->pending, node) {
+                       push_desc_queue(c);
+                       list_del(&c->node);
+               };
+       }
 
        pm_runtime_mark_last_busy(cdd->ddev.dev);
        pm_runtime_put_autosuspend(cdd->ddev.dev);
+       spin_lock_irqsave(&cdd->lock, flags);
 }
 
 static u32 get_host_pd0(u32 length)
@@ -1150,10 +1140,20 @@  static int __maybe_unused cppi41_resume(struct device *dev)
 static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
 {
        struct cppi41_dd *cdd = dev_get_drvdata(dev);
+       int ret;
+       unsigned long flags;
 
-       WARN_ON(!list_empty(&cdd->pending));
+       spin_lock_irqsave(&cdd->lock, flags);
+       if (!list_empty(&cdd->pending)) {
+               WARN_ON(!list_empty(&cdd->pending));
+               ret = -EBUSY;
+       } else {
+               /* ... suspend the device ... */
+               cdd->is_suspended = 1;
+       }
+       spin_unlock_irqrestore(&cdd->lock, flags);
 
-       return 0;
+       return ret;
 }
 
 static int __maybe_unused cppi41_runtime_resume(struct device *dev)
@@ -1163,6 +1163,8 @@  static int __maybe_unused cppi41_runtime_resume(struct device *dev)
        unsigned long flags;
 
        spin_lock_irqsave(&cdd->lock, flags);
+       cdd->is_suspended = 0;
+
        list_for_each_entry_safe(c, _c, &cdd->pending, node) {
                push_desc_queue(c);
                list_del(&c->node);