@@ -19,6 +19,7 @@
#define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT)
#define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE)
+#define CMDQ_GCE_NUM_MAX (2)
#define CMDQ_CURR_IRQ_STATUS 0x10
#define CMDQ_SYNC_TOKEN_UPDATE 0x68
@@ -73,14 +74,16 @@ struct cmdq {
u32 thread_nr;
u32 irq_mask;
struct cmdq_thread *thread;
- struct clk *clock;
+ struct clk *clock[CMDQ_GCE_NUM_MAX];
bool suspended;
u8 shift_pa;
+ u32 gce_num;
};
struct gce_plat {
u32 thread_nr;
u8 shift;
+ u32 gce_num;
};
u8 cmdq_get_shift_pa(struct mbox_chan *chan)
@@ -120,11 +123,15 @@ static void cmdq_init(struct cmdq *cmdq)
{
int i;
- WARN_ON(clk_enable(cmdq->clock) < 0);
+ for (i = 0; i < cmdq->gce_num; i++)
+ WARN_ON(clk_enable(cmdq->clock[i]) < 0);
+
writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
for (i = 0; i <= CMDQ_MAX_EVENT; i++)
writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE);
- clk_disable(cmdq->clock);
+
+ for (i = 0; i < cmdq->gce_num; i++)
+ clk_disable(cmdq->clock[i]);
}
static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
@@ -257,8 +264,12 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq,
}
if (list_empty(&thread->task_busy_list)) {
+ int i;
+
cmdq_thread_disable(cmdq, thread);
- clk_disable(cmdq->clock);
+
+ for (i = 0; i < cmdq->gce_num; i++)
+ clk_disable(cmdq->clock[i]);
}
}
@@ -303,7 +314,8 @@ static int cmdq_suspend(struct device *dev)
if (task_running)
dev_warn(dev, "exist running task(s) in suspend\n");
- clk_unprepare(cmdq->clock);
+ for (i = 0; i < cmdq->gce_num; i++)
+ clk_unprepare(cmdq->clock[i]);
return 0;
}
@@ -311,8 +323,11 @@ static int cmdq_suspend(struct device *dev)
static int cmdq_resume(struct device *dev)
{
struct cmdq *cmdq = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < cmdq->gce_num; i++)
+ WARN_ON(clk_prepare(cmdq->clock[i]) < 0);
- WARN_ON(clk_prepare(cmdq->clock) < 0);
cmdq->suspended = false;
return 0;
}
@@ -320,8 +335,10 @@ static int cmdq_resume(struct device *dev)
static int cmdq_remove(struct platform_device *pdev)
{
struct cmdq *cmdq = platform_get_drvdata(pdev);
+ int i;
- clk_unprepare(cmdq->clock);
+ for (i = 0; i < cmdq->gce_num; i++)
+ clk_unprepare(cmdq->clock[i]);
return 0;
}
@@ -348,7 +365,11 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
task->pkt = pkt;
if (list_empty(&thread->task_busy_list)) {
- WARN_ON(clk_enable(cmdq->clock) < 0);
+ int i;
+
+ for (i = 0; i < cmdq->gce_num; i++)
+ WARN_ON(clk_enable(cmdq->clock[i]) < 0);
+
/*
* The thread reset will clear thread related register to 0,
* including pc, end, priority, irq, suspend and enable. Thus
@@ -401,6 +422,7 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
struct cmdq_task *task, *tmp;
unsigned long flags;
+ int i;
spin_lock_irqsave(&thread->chan->lock, flags);
if (list_empty(&thread->task_busy_list))
@@ -420,7 +442,9 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
}
cmdq_thread_disable(cmdq, thread);
- clk_disable(cmdq->clock);
+
+ for (i = 0; i < cmdq->gce_num; i++)
+ clk_disable(cmdq->clock[i]);
done:
/*
* The thread->task_busy_list empty means thread already disable. The
@@ -440,6 +464,7 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
struct cmdq_task *task, *tmp;
unsigned long flags;
u32 enable;
+ int i;
spin_lock_irqsave(&thread->chan->lock, flags);
if (list_empty(&thread->task_busy_list))
@@ -463,7 +488,9 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
cmdq_thread_resume(thread);
cmdq_thread_disable(cmdq, thread);
- clk_disable(cmdq->clock);
+
+ for (i = 0; i < cmdq->gce_num; i++)
+ clk_disable(cmdq->clock[i]);
out:
spin_unlock_irqrestore(&thread->chan->lock, flags);
@@ -512,6 +539,9 @@ static int cmdq_probe(struct platform_device *pdev)
struct cmdq *cmdq;
int err, i;
struct gce_plat *plat_data;
+ struct device_node *phandle = dev->of_node;
+ struct device_node *node;
+ int alias_id = 0;
cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
if (!cmdq)
@@ -536,6 +566,7 @@ static int cmdq_probe(struct platform_device *pdev)
cmdq->thread_nr = plat_data->thread_nr;
cmdq->shift_pa = plat_data->shift;
+ cmdq->gce_num = plat_data->gce_num;
cmdq->irq_mask = GENMASK(cmdq->thread_nr - 1, 0);
err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
"mtk_cmdq", cmdq);
@@ -547,10 +578,24 @@ static int cmdq_probe(struct platform_device *pdev)
dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
dev, cmdq->base, cmdq->irq);
- cmdq->clock = devm_clk_get(dev, "gce");
- if (IS_ERR(cmdq->clock)) {
- dev_err(dev, "failed to get gce clk\n");
- return PTR_ERR(cmdq->clock);
+ if (cmdq->gce_num > 1) {
+ for_each_child_of_node(phandle->parent, node) {
+ alias_id = of_alias_get_id(node, "gce");
+ if (alias_id < cmdq->gce_num) {
+ cmdq->clock[alias_id] = of_clk_get(node, 0);
+ if (IS_ERR(cmdq->clock[alias_id])) {
+ dev_err(dev, "failed to get gce clk: %d\n",
+ alias_id);
+ return PTR_ERR(cmdq->clock[alias_id]);
+ }
+ }
+ }
+ } else {
+ cmdq->clock[alias_id] = devm_clk_get(&pdev->dev, "gce");
+ if (IS_ERR(cmdq->clock[alias_id])) {
+ dev_err(dev, "failed to get gce clk\n");
+ return PTR_ERR(cmdq->clock[alias_id]);
+ }
}
cmdq->mbox.dev = dev;
@@ -586,7 +631,9 @@ static int cmdq_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, cmdq);
- WARN_ON(clk_prepare(cmdq->clock) < 0);
+
+ for (i = 0; i < cmdq->gce_num; i++)
+ WARN_ON(clk_prepare(cmdq->clock[i]) < 0);
cmdq_init(cmdq);
@@ -598,15 +645,32 @@ static const struct dev_pm_ops cmdq_pm_ops = {
.resume = cmdq_resume,
};
-static const struct gce_plat gce_plat_v2 = {.thread_nr = 16};
-static const struct gce_plat gce_plat_v3 = {.thread_nr = 24};
-static const struct gce_plat gce_plat_v4 = {.thread_nr = 24, .shift = 3};
+static const struct gce_plat gce_plat_v2 = {
+ .thread_nr = 16,
+ .shift = 0,
+ .gce_num = 1
+};
+static const struct gce_plat gce_plat_v3 = {
+ .thread_nr = 24,
+ .shift = 0,
+ .gce_num = 1
+};
+static const struct gce_plat gce_plat_v4 = {
+ .thread_nr = 24,
+ .shift = 3,
+ .gce_num = 1
+};
+static const struct gce_plat gce_plat_v5 = {
+ .thread_nr = 24,
+ .shift = 3,
+ .gce_num = 2
+};
static const struct of_device_id cmdq_of_ids[] = {
{.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_v2},
{.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_v3},
{.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_v4},
- {.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_v4},
+ {.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_v5},
{}
};
For the design of GCE hardware event signal transportation, evnet rx will send the event signal to all GCE event merges after receiving the event signal from the other hardware. Because GCE event merges need to response to event rx, their clocks must be enabled at that time. To make sure all the gce clock is enabled while receiving the hardware event, each cmdq mailbox should enable or disable the others gce clk at the same time. Signed-off-by: jason-jh.lin <jason-jh.lin@mediatek.com> --- drivers/mailbox/mtk-cmdq-mailbox.c | 102 +++++++++++++++++++++++------ 1 file changed, 83 insertions(+), 19 deletions(-)