@@ -415,6 +415,38 @@ void mmci_dma_release(struct mmci_host *host)
host->ops->dma_release(host);
}
+int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
+{
+ struct mmc_data *data = host->data;
+ int ret;
+
+ ret = mmci_prep_data(host, data, false);
+ if (ret)
+ return ret;
+
+ if (!host->ops || !host->ops->dma_start)
+ return -EINVAL;
+
+ /* Okay, go for it. */
+ dev_vdbg(mmc_dev(host->mmc),
+ "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
+ data->sg_len, data->blksz, data->blocks, data->flags);
+
+ host->ops->dma_start(host, &datactrl);
+
+ /* Trigger the DMA transfer */
+ mmci_write_datactrlreg(host, datactrl);
+
+ /*
+ * Let the MMCI say when the data is ended and it's time
+ * to fire next DMA request. When that happens, MMCI will
+ * call mmci_data_end()
+ */
+ writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
+ host->base + MMCIMASK0);
+ return 0;
+}
+
static void
mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
{
@@ -721,20 +753,11 @@ int mmci_dmae_prep_data(struct mmci_host *host,
&dmae->desc_current);
}
-static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
{
struct mmci_dmae_priv *dmae = host->dma_priv;
struct mmc_data *data = host->data;
- int ret;
-
- ret = mmci_dmae_prep_data(host, host->data, false);
- if (ret)
- return ret;
- /* Okay, go for it. */
- dev_vdbg(mmc_dev(host->mmc),
- "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
- data->sg_len, data->blksz, data->blocks, data->flags);
dmae->in_progress = true;
dmaengine_submit(dmae->desc_current);
dma_async_issue_pending(dmae->cur);
@@ -742,18 +765,8 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
if (host->variant->qcom_dml)
dml_start_xfer(host, data);
- datactrl |= MCI_DPSM_DMAENABLE;
-
- /* Trigger the DMA transfer */
- mmci_write_datactrlreg(host, datactrl);
+ *datactrl |= MCI_DPSM_DMAENABLE;
- /*
- * Let the MMCI say when the data is ended and it's time
- * to fire next DMA request. When that happens, MMCI will
- * call mmci_data_end()
- */
- writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
- host->base + MMCIMASK0);
return 0;
}
@@ -806,6 +819,7 @@ static struct mmci_host_ops mmci_variant_ops = {
.get_next_data = mmci_dmae_get_next_data,
.dma_setup = mmci_dmae_setup,
.dma_release = mmci_dmae_release,
+ .dma_start = mmci_dmae_start,
};
#else
/* Blank functions if the DMA engine is not available */
@@ -818,11 +832,6 @@ static inline void mmci_dma_data_error(struct mmci_host *host)
{
}
-static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
-{
- return -ENOSYS;
-}
-
static struct mmci_host_ops mmci_variant_ops = {};
#endif
@@ -925,7 +934,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
* Attempt to use DMA operation mode, if this
* should fail, fall back to PIO mode
*/
- if (!mmci_dma_start_data(host, datactrl))
+ if (!mmci_dma_start(host, datactrl))
return;
/* IRQ mode, map the SG list for CPU reading/writing */
@@ -280,6 +280,7 @@ struct mmci_host_ops {
void (*get_next_data)(struct mmci_host *host, struct mmc_data *data);
int (*dma_setup)(struct mmci_host *host);
void (*dma_release)(struct mmci_host *host);
+ int (*dma_start)(struct mmci_host *host, unsigned int *datactrl);
};
struct mmci_host {
@@ -338,4 +339,5 @@ void mmci_dmae_unprep_data(struct mmci_host *host, struct mmc_data *data,
void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data);
int mmci_dmae_setup(struct mmci_host *host);
void mmci_dmae_release(struct mmci_host *host);
+int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl);
@@ -187,6 +187,7 @@ static struct mmci_host_ops qcom_variant_ops = {
.get_next_data = mmci_dmae_get_next_data,
.dma_setup = qcom_dma_setup,
.dma_release = mmci_dmae_release,
+ .dma_start = mmci_dmae_start,
};
void qcom_variant_init(struct mmci_host *host)