@@ -243,9 +243,6 @@ int fsl_edma_terminate_all(struct dma_chan *chan)
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
- if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_PD)
- pm_runtime_allow(fsl_chan->pd_dev);
-
return 0;
}
@@ -805,8 +802,12 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
int ret;
- if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
- clk_prepare_enable(fsl_chan->clk);
+ ret = pm_runtime_get_sync(&fsl_chan->vchan.chan.dev->device);
+ if (ret < 0) {
+ dev_err(&fsl_chan->vchan.chan.dev->device, "pm_runtime_get_sync() failed\n");
+ pm_runtime_disable(&fsl_chan->vchan.chan.dev->device);
+ return ret;
+ }
fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_TCD64 ?
@@ -819,6 +820,7 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
if (ret) {
dma_pool_destroy(fsl_chan->tcd_pool);
+ pm_runtime_put_sync_suspend(&fsl_chan->vchan.chan.dev->device);
return ret;
}
}
@@ -851,8 +853,7 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
fsl_chan->is_sw = false;
fsl_chan->srcid = 0;
fsl_chan->is_remote = false;
- if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
- clk_disable_unprepare(fsl_chan->clk);
+ pm_runtime_put_sync_suspend(&fsl_chan->vchan.chan.dev->device);
}
void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
@@ -420,7 +420,6 @@ MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
struct fsl_edma_chan *fsl_chan;
- struct device_link *link;
struct device *pd_chan;
struct device *dev;
int i;
@@ -439,24 +438,35 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng
return -EINVAL;
}
- link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
- DL_FLAG_PM_RUNTIME |
- DL_FLAG_RPM_ACTIVE);
- if (!link) {
- dev_err(dev, "Failed to add device_link to %d\n", i);
- return -EINVAL;
- }
-
fsl_chan->pd_dev = pd_chan;
-
- pm_runtime_use_autosuspend(fsl_chan->pd_dev);
- pm_runtime_set_autosuspend_delay(fsl_chan->pd_dev, 200);
- pm_runtime_set_active(fsl_chan->pd_dev);
}
return 0;
}
+/* Per channel dma power domain */
+static int fsl_edma_chan_runtime_suspend(struct device *dev)
+{
+ struct fsl_edma_chan *fsl_chan = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(fsl_chan->clk);
+
+ return 0;
+}
+
+static int fsl_edma_chan_runtime_resume(struct device *dev)
+{
+ struct fsl_edma_chan *fsl_chan = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(fsl_chan->clk);
+}
+
+static struct dev_pm_domain fsl_edma_chan_pm_domain = {
+ .ops = {
+ RUNTIME_PM_OPS(fsl_edma_chan_runtime_suspend, fsl_edma_chan_runtime_resume, NULL)
+ }
+};
+
static int fsl_edma_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -583,10 +593,15 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_chan->pdev = pdev;
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
+ if (fsl_chan->pd_dev)
+ pm_runtime_get_sync(fsl_chan->pd_dev);
+
edma_write_tcdreg(fsl_chan, cpu_to_le32(0), csr);
fsl_edma_chan_mux(fsl_chan, 0, false);
if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK)
clk_disable_unprepare(fsl_chan->clk);
+ if (fsl_chan->pd_dev)
+ pm_runtime_put_sync_suspend(fsl_chan->pd_dev);
}
ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma);
@@ -645,6 +660,34 @@ static int fsl_edma_probe(struct platform_device *pdev)
return ret;
}
+ pm_runtime_enable(&pdev->dev);
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+ struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
+ struct device *chan_dev;
+
+ if (fsl_edma->chan_masked & BIT(i))
+ continue;
+
+ chan_dev = &fsl_chan->vchan.chan.dev->device;
+ dev_set_drvdata(chan_dev, fsl_chan);
+ dev_pm_domain_set(chan_dev, &fsl_edma_chan_pm_domain);
+
+ if (fsl_chan->pd_dev) {
+ struct device_link *link;
+
+ link = device_link_add(chan_dev, fsl_chan->pd_dev, DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (!link)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "Failed to add device_link to %d\n", i);
+ pm_runtime_put_sync_suspend(fsl_chan->pd_dev);
+ }
+
+ pm_runtime_enable(chan_dev);
+ }
+
ret = of_dma_controller_register(np,
drvdata->flags & FSL_EDMA_DRV_SPLIT_REG ? fsl_edma3_xlate : fsl_edma_xlate,
fsl_edma);
@@ -685,6 +728,13 @@ static int fsl_edma_suspend_late(struct device *dev)
fsl_chan = &fsl_edma->chans[i];
if (fsl_edma->chan_masked & BIT(i))
continue;
+
+ if (pm_runtime_status_suspended(&fsl_chan->vchan.chan.dev->device) ||
+ (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_HAS_PD) &&
+ (fsl_edma->drvdata->flags & FSL_EDMA_DRV_SPLIT_REG) &&
+ !fsl_chan->srcid))
+ continue;
+
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
/* Make sure chan is idle or will force disable. */
if (unlikely(fsl_chan->status == DMA_IN_PROGRESS)) {
@@ -711,6 +761,13 @@ static int fsl_edma_resume_early(struct device *dev)
fsl_chan = &fsl_edma->chans[i];
if (fsl_edma->chan_masked & BIT(i))
continue;
+
+ if (pm_runtime_status_suspended(&fsl_chan->vchan.chan.dev->device) ||
+ (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_HAS_PD) &&
+ (fsl_edma->drvdata->flags & FSL_EDMA_DRV_SPLIT_REG) &&
+ !fsl_chan->srcid))
+ continue;
+
fsl_chan->pm_state = RUNNING;
edma_write_tcdreg(fsl_chan, 0, csr);
if (fsl_chan->srcid != 0)
@@ -723,6 +780,33 @@ static int fsl_edma_resume_early(struct device *dev)
return 0;
}
+/* edma engine runtime system/resume */
+static int fsl_edma_runtime_suspend(struct device *dev)
+{
+ struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++)
+ clk_disable_unprepare(fsl_edma->muxclk[i]);
+
+ clk_disable_unprepare(fsl_edma->dmaclk);
+
+ return 0;
+}
+
+static int fsl_edma_runtime_resume(struct device *dev)
+{
+ struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++)
+ clk_prepare_enable(fsl_edma->muxclk[i]);
+
+ clk_prepare_enable(fsl_edma->dmaclk);
+
+ return 0;
+}
+
/*
* eDMA provides the service to others, so it should be suspend late
* and resume early. When eDMA suspend, all of the clients should stop
@@ -731,6 +815,7 @@ static int fsl_edma_resume_early(struct device *dev)
static const struct dev_pm_ops fsl_edma_pm_ops = {
.suspend_late = fsl_edma_suspend_late,
.resume_early = fsl_edma_resume_early,
+ RUNTIME_PM_OPS(fsl_edma_runtime_suspend, fsl_edma_runtime_resume, NULL)
};
static struct platform_driver fsl_edma_driver = {