diff mbox series

[2/3] mailbox: mediatek: remove implementation related to atomic_exec

Message ID 20200214043325.16618-3-bibby.hsieh@mediatek.com (mailing list archive)
State New, archived
Headers show
Series Remove atomic_exec | expand

Commit Message

Bibby Hsieh Feb. 14, 2020, 4:33 a.m. UTC
After implement flush, client can flush the executing
command buffer or abort the still waiting for event
command buffer, so controller do not need to implement
atomic_exe feature. remove it.

Signed-off-by: Bibby Hsieh <bibby.hsieh@mediatek.com>
---
 drivers/mailbox/mtk-cmdq-mailbox.c | 76 ++++--------------------------
 1 file changed, 8 insertions(+), 68 deletions(-)

Comments

CK Hu (胡俊光) Feb. 14, 2020, 5:54 a.m. UTC | #1
Hi, Bibby:

On Fri, 2020-02-14 at 12:33 +0800, Bibby Hsieh wrote:
> After implement flush, client can flush the executing
> command buffer or abort the still waiting for event
> command buffer, so controller do not need to implement
> atomic_exe feature. remove it.
> 

Reviewed-by: CK Hu <ck.hu@mediatek.com>

> Signed-off-by: Bibby Hsieh <bibby.hsieh@mediatek.com>
> ---
>  drivers/mailbox/mtk-cmdq-mailbox.c | 76 ++++--------------------------
>  1 file changed, 8 insertions(+), 68 deletions(-)
> 
> diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
> index 03e58ff62007..3ce777001aa5 100644
> --- a/drivers/mailbox/mtk-cmdq-mailbox.c
> +++ b/drivers/mailbox/mtk-cmdq-mailbox.c
> @@ -57,7 +57,6 @@ struct cmdq_thread {
>  	void __iomem		*base;
>  	struct list_head	task_busy_list;
>  	u32			priority;
> -	bool			atomic_exec;
>  };
>  
>  struct cmdq_task {
> @@ -163,48 +162,11 @@ static void cmdq_task_insert_into_thread(struct cmdq_task *task)
>  	cmdq_thread_invalidate_fetched_data(thread);
>  }
>  
> -static bool cmdq_command_is_wfe(u64 cmd)
> -{
> -	u64 wfe_option = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE;
> -	u64 wfe_op = (u64)(CMDQ_CODE_WFE << CMDQ_OP_CODE_SHIFT) << 32;
> -	u64 wfe_mask = (u64)CMDQ_OP_CODE_MASK << 32 | 0xffffffff;
> -
> -	return ((cmd & wfe_mask) == (wfe_op | wfe_option));
> -}
> -
> -/* we assume tasks in the same display GCE thread are waiting the same event. */
> -static void cmdq_task_remove_wfe(struct cmdq_task *task)
> -{
> -	struct device *dev = task->cmdq->mbox.dev;
> -	u64 *base = task->pkt->va_base;
> -	int i;
> -
> -	dma_sync_single_for_cpu(dev, task->pa_base, task->pkt->cmd_buf_size,
> -				DMA_TO_DEVICE);
> -	for (i = 0; i < CMDQ_NUM_CMD(task->pkt); i++)
> -		if (cmdq_command_is_wfe(base[i]))
> -			base[i] = (u64)CMDQ_JUMP_BY_OFFSET << 32 |
> -				  CMDQ_JUMP_PASS;
> -	dma_sync_single_for_device(dev, task->pa_base, task->pkt->cmd_buf_size,
> -				   DMA_TO_DEVICE);
> -}
> -
>  static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
>  {
>  	return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
>  }
>  
> -static void cmdq_thread_wait_end(struct cmdq_thread *thread,
> -				 unsigned long end_pa)
> -{
> -	struct device *dev = thread->chan->mbox->dev;
> -	unsigned long curr_pa;
> -
> -	if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_ADDR,
> -			curr_pa, curr_pa == end_pa, 1, 20))
> -		dev_err(dev, "GCE thread cannot run to end.\n");
> -}
> -
>  static void cmdq_task_exec_done(struct cmdq_task *task, enum cmdq_cb_status sta)
>  {
>  	struct cmdq_task_cb *cb = &task->pkt->async_cb;
> @@ -384,36 +346,15 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
>  		WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
>  		curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR);
>  		end_pa = readl(thread->base + CMDQ_THR_END_ADDR);
> -
> -		/*
> -		 * Atomic execution should remove the following wfe, i.e. only
> -		 * wait event at first task, and prevent to pause when running.
> -		 */
> -		if (thread->atomic_exec) {
> -			/* GCE is executing if command is not WFE */
> -			if (!cmdq_thread_is_in_wfe(thread)) {
> -				cmdq_thread_resume(thread);
> -				cmdq_thread_wait_end(thread, end_pa);
> -				WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
> -				/* set to this task directly */
> -				writel(task->pa_base,
> -				       thread->base + CMDQ_THR_CURR_ADDR);
> -			} else {
> -				cmdq_task_insert_into_thread(task);
> -				cmdq_task_remove_wfe(task);
> -				smp_mb(); /* modify jump before enable thread */
> -			}
> +		/* check boundary */
> +		if (curr_pa == end_pa - CMDQ_INST_SIZE ||
> +		    curr_pa == end_pa) {
> +			/* set to this task directly */
> +			writel(task->pa_base,
> +			       thread->base + CMDQ_THR_CURR_ADDR);
>  		} else {
> -			/* check boundary */
> -			if (curr_pa == end_pa - CMDQ_INST_SIZE ||
> -			    curr_pa == end_pa) {
> -				/* set to this task directly */
> -				writel(task->pa_base,
> -				       thread->base + CMDQ_THR_CURR_ADDR);
> -			} else {
> -				cmdq_task_insert_into_thread(task);
> -				smp_mb(); /* modify jump before enable thread */
> -			}
> +			cmdq_task_insert_into_thread(task);
> +			smp_mb(); /* modify jump before enable thread */
>  		}
>  		writel(task->pa_base + pkt->cmd_buf_size,
>  		       thread->base + CMDQ_THR_END_ADDR);
> @@ -495,7 +436,6 @@ static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
>  
>  	thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
>  	thread->priority = sp->args[1];
> -	thread->atomic_exec = (sp->args[2] != 0);
>  	thread->chan = &mbox->chans[ind];
>  
>  	return &mbox->chans[ind];
diff mbox series

Patch

diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 03e58ff62007..3ce777001aa5 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -57,7 +57,6 @@  struct cmdq_thread {
 	void __iomem		*base;
 	struct list_head	task_busy_list;
 	u32			priority;
-	bool			atomic_exec;
 };
 
 struct cmdq_task {
@@ -163,48 +162,11 @@  static void cmdq_task_insert_into_thread(struct cmdq_task *task)
 	cmdq_thread_invalidate_fetched_data(thread);
 }
 
-static bool cmdq_command_is_wfe(u64 cmd)
-{
-	u64 wfe_option = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE;
-	u64 wfe_op = (u64)(CMDQ_CODE_WFE << CMDQ_OP_CODE_SHIFT) << 32;
-	u64 wfe_mask = (u64)CMDQ_OP_CODE_MASK << 32 | 0xffffffff;
-
-	return ((cmd & wfe_mask) == (wfe_op | wfe_option));
-}
-
-/* we assume tasks in the same display GCE thread are waiting the same event. */
-static void cmdq_task_remove_wfe(struct cmdq_task *task)
-{
-	struct device *dev = task->cmdq->mbox.dev;
-	u64 *base = task->pkt->va_base;
-	int i;
-
-	dma_sync_single_for_cpu(dev, task->pa_base, task->pkt->cmd_buf_size,
-				DMA_TO_DEVICE);
-	for (i = 0; i < CMDQ_NUM_CMD(task->pkt); i++)
-		if (cmdq_command_is_wfe(base[i]))
-			base[i] = (u64)CMDQ_JUMP_BY_OFFSET << 32 |
-				  CMDQ_JUMP_PASS;
-	dma_sync_single_for_device(dev, task->pa_base, task->pkt->cmd_buf_size,
-				   DMA_TO_DEVICE);
-}
-
 static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
 {
 	return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
 }
 
-static void cmdq_thread_wait_end(struct cmdq_thread *thread,
-				 unsigned long end_pa)
-{
-	struct device *dev = thread->chan->mbox->dev;
-	unsigned long curr_pa;
-
-	if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_ADDR,
-			curr_pa, curr_pa == end_pa, 1, 20))
-		dev_err(dev, "GCE thread cannot run to end.\n");
-}
-
 static void cmdq_task_exec_done(struct cmdq_task *task, enum cmdq_cb_status sta)
 {
 	struct cmdq_task_cb *cb = &task->pkt->async_cb;
@@ -384,36 +346,15 @@  static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
 		WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
 		curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR);
 		end_pa = readl(thread->base + CMDQ_THR_END_ADDR);
-
-		/*
-		 * Atomic execution should remove the following wfe, i.e. only
-		 * wait event at first task, and prevent to pause when running.
-		 */
-		if (thread->atomic_exec) {
-			/* GCE is executing if command is not WFE */
-			if (!cmdq_thread_is_in_wfe(thread)) {
-				cmdq_thread_resume(thread);
-				cmdq_thread_wait_end(thread, end_pa);
-				WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
-				/* set to this task directly */
-				writel(task->pa_base,
-				       thread->base + CMDQ_THR_CURR_ADDR);
-			} else {
-				cmdq_task_insert_into_thread(task);
-				cmdq_task_remove_wfe(task);
-				smp_mb(); /* modify jump before enable thread */
-			}
+		/* check boundary */
+		if (curr_pa == end_pa - CMDQ_INST_SIZE ||
+		    curr_pa == end_pa) {
+			/* set to this task directly */
+			writel(task->pa_base,
+			       thread->base + CMDQ_THR_CURR_ADDR);
 		} else {
-			/* check boundary */
-			if (curr_pa == end_pa - CMDQ_INST_SIZE ||
-			    curr_pa == end_pa) {
-				/* set to this task directly */
-				writel(task->pa_base,
-				       thread->base + CMDQ_THR_CURR_ADDR);
-			} else {
-				cmdq_task_insert_into_thread(task);
-				smp_mb(); /* modify jump before enable thread */
-			}
+			cmdq_task_insert_into_thread(task);
+			smp_mb(); /* modify jump before enable thread */
 		}
 		writel(task->pa_base + pkt->cmd_buf_size,
 		       thread->base + CMDQ_THR_END_ADDR);
@@ -495,7 +436,6 @@  static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
 
 	thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
 	thread->priority = sp->args[1];
-	thread->atomic_exec = (sp->args[2] != 0);
 	thread->chan = &mbox->chans[ind];
 
 	return &mbox->chans[ind];