diff mbox series

[v1,2/3] mailbox: mediatek: implement flush function

Message ID 20200217090532.16019-3-bibby.hsieh@mediatek.com (mailing list archive)
State New, archived
Headers show
Series Remove atomic_exec | expand

Commit Message

Bibby Hsieh Feb. 17, 2020, 9:05 a.m. UTC
For client driver which need to reorganize the command buffer, it could
use this function to flush the send command buffer.
If the channel doesn't be started (usually in waiting for event), this
function will abort it directly.

Signed-off-by: Bibby Hsieh <bibby.hsieh@mediatek.com>
---
 drivers/mailbox/mtk-cmdq-mailbox.c | 52 ++++++++++++++++++++++++++++++
 1 file changed, 52 insertions(+)

Comments

CK Hu (胡俊光) Feb. 17, 2020, 9:29 a.m. UTC | #1
Hi, Bibby:

On Mon, 2020-02-17 at 17:05 +0800, Bibby Hsieh wrote:
> For client driver which need to reorganize the command buffer, it could
> use this function to flush the send command buffer.
> If the channel doesn't be started (usually in waiting for event), this
> function will abort it directly.
> 

Reviewed-by: CK Hu <ck.hu@mediatek.com>

> Signed-off-by: Bibby Hsieh <bibby.hsieh@mediatek.com>
> ---
>  drivers/mailbox/mtk-cmdq-mailbox.c | 52 ++++++++++++++++++++++++++++++
>  1 file changed, 52 insertions(+)
> 
> diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
> index 9a6ce9f5a7db..0da5e2dc2c0e 100644
> --- a/drivers/mailbox/mtk-cmdq-mailbox.c
> +++ b/drivers/mailbox/mtk-cmdq-mailbox.c
> @@ -432,10 +432,62 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
>  {
>  }
>  
> +static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
> +{
> +	struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
> +	struct cmdq_task_cb *cb;
> +	struct cmdq_cb_data data;
> +	struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
> +	struct cmdq_task *task, *tmp;
> +	unsigned long flags;
> +	u32 enable;
> +
> +	spin_lock_irqsave(&thread->chan->lock, flags);
> +	if (list_empty(&thread->task_busy_list))
> +		goto out;
> +
> +	WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
> +	if (!cmdq_thread_is_in_wfe(thread))
> +		goto wait;
> +
> +	list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
> +				 list_entry) {
> +		cb = &task->pkt->async_cb;
> +		if (cb->cb) {
> +			data.sta = CMDQ_CB_ERROR;
> +			data.data = cb->data;
> +			cb->cb(data);
> +		}
> +		list_del(&task->list_entry);
> +		kfree(task);
> +	}
> +
> +	cmdq_thread_resume(thread);
> +	cmdq_thread_disable(cmdq, thread);
> +	clk_disable(cmdq->clock);
> +
> +out:
> +	spin_unlock_irqrestore(&thread->chan->lock, flags);
> +	return 0;
> +
> +wait:
> +	cmdq_thread_resume(thread);
> +	spin_unlock_irqrestore(&thread->chan->lock, flags);
> +	if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK,
> +				      enable, enable == 0, 1, timeout)) {
> +		dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n",
> +			(u32)(thread->base - cmdq->base));
> +
> +		return -EFAULT;
> +	}
> +	return 0;
> +}
> +
>  static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
>  	.send_data = cmdq_mbox_send_data,
>  	.startup = cmdq_mbox_startup,
>  	.shutdown = cmdq_mbox_shutdown,
> +	.flush = cmdq_mbox_flush,
>  };
>  
>  static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
Bibby Hsieh March 6, 2020, 3:55 a.m. UTC | #2
Hi, Jassi,

https://patchwork.kernel.org/cover/11385839/

I push a series of patches about remove atomic_exec feature in Mediatek
CMDQ driver. Could you please review them if you are free?

Thanks

On Mon, 2020-02-17 at 17:29 +0800, CK Hu wrote:
> Hi, Bibby:
> 
> On Mon, 2020-02-17 at 17:05 +0800, Bibby Hsieh wrote:
> > For client driver which need to reorganize the command buffer, it could
> > use this function to flush the send command buffer.
> > If the channel doesn't be started (usually in waiting for event), this
> > function will abort it directly.
> > 
> 
> Reviewed-by: CK Hu <ck.hu@mediatek.com>
> 
> > Signed-off-by: Bibby Hsieh <bibby.hsieh@mediatek.com>
> > ---
> >  drivers/mailbox/mtk-cmdq-mailbox.c | 52 ++++++++++++++++++++++++++++++
> >  1 file changed, 52 insertions(+)
> > 
> > diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
> > index 9a6ce9f5a7db..0da5e2dc2c0e 100644
> > --- a/drivers/mailbox/mtk-cmdq-mailbox.c
> > +++ b/drivers/mailbox/mtk-cmdq-mailbox.c
> > @@ -432,10 +432,62 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
> >  {
> >  }
> >  
> > +static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
> > +{
> > +	struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
> > +	struct cmdq_task_cb *cb;
> > +	struct cmdq_cb_data data;
> > +	struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
> > +	struct cmdq_task *task, *tmp;
> > +	unsigned long flags;
> > +	u32 enable;
> > +
> > +	spin_lock_irqsave(&thread->chan->lock, flags);
> > +	if (list_empty(&thread->task_busy_list))
> > +		goto out;
> > +
> > +	WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
> > +	if (!cmdq_thread_is_in_wfe(thread))
> > +		goto wait;
> > +
> > +	list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
> > +				 list_entry) {
> > +		cb = &task->pkt->async_cb;
> > +		if (cb->cb) {
> > +			data.sta = CMDQ_CB_ERROR;
> > +			data.data = cb->data;
> > +			cb->cb(data);
> > +		}
> > +		list_del(&task->list_entry);
> > +		kfree(task);
> > +	}
> > +
> > +	cmdq_thread_resume(thread);
> > +	cmdq_thread_disable(cmdq, thread);
> > +	clk_disable(cmdq->clock);
> > +
> > +out:
> > +	spin_unlock_irqrestore(&thread->chan->lock, flags);
> > +	return 0;
> > +
> > +wait:
> > +	cmdq_thread_resume(thread);
> > +	spin_unlock_irqrestore(&thread->chan->lock, flags);
> > +	if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK,
> > +				      enable, enable == 0, 1, timeout)) {
> > +		dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n",
> > +			(u32)(thread->base - cmdq->base));
> > +
> > +		return -EFAULT;
> > +	}
> > +	return 0;
> > +}
> > +
> >  static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
> >  	.send_data = cmdq_mbox_send_data,
> >  	.startup = cmdq_mbox_startup,
> >  	.shutdown = cmdq_mbox_shutdown,
> > +	.flush = cmdq_mbox_flush,
> >  };
> >  
> >  static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
> 
>
diff mbox series

Patch

diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 9a6ce9f5a7db..0da5e2dc2c0e 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -432,10 +432,62 @@  static void cmdq_mbox_shutdown(struct mbox_chan *chan)
 {
 }
 
+static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
+{
+	struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
+	struct cmdq_task_cb *cb;
+	struct cmdq_cb_data data;
+	struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
+	struct cmdq_task *task, *tmp;
+	unsigned long flags;
+	u32 enable;
+
+	spin_lock_irqsave(&thread->chan->lock, flags);
+	if (list_empty(&thread->task_busy_list))
+		goto out;
+
+	WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
+	if (!cmdq_thread_is_in_wfe(thread))
+		goto wait;
+
+	list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
+				 list_entry) {
+		cb = &task->pkt->async_cb;
+		if (cb->cb) {
+			data.sta = CMDQ_CB_ERROR;
+			data.data = cb->data;
+			cb->cb(data);
+		}
+		list_del(&task->list_entry);
+		kfree(task);
+	}
+
+	cmdq_thread_resume(thread);
+	cmdq_thread_disable(cmdq, thread);
+	clk_disable(cmdq->clock);
+
+out:
+	spin_unlock_irqrestore(&thread->chan->lock, flags);
+	return 0;
+
+wait:
+	cmdq_thread_resume(thread);
+	spin_unlock_irqrestore(&thread->chan->lock, flags);
+	if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK,
+				      enable, enable == 0, 1, timeout)) {
+		dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n",
+			(u32)(thread->base - cmdq->base));
+
+		return -EFAULT;
+	}
+	return 0;
+}
+
 static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
 	.send_data = cmdq_mbox_send_data,
 	.startup = cmdq_mbox_startup,
 	.shutdown = cmdq_mbox_shutdown,
+	.flush = cmdq_mbox_flush,
 };
 
 static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,