@@ -417,6 +417,42 @@ static int dw_mci_idmac_init(struct dw_mci *host)
return 0;
}
+static unsigned int dw_mci_pre_dma_transfer(struct dw_mci *host,
+ struct mmc_data *data, struct dw_mci_next *next)
+{
+ unsigned int sg_len;
+
+ BUG_ON(next && data->host_cookie);
+ BUG_ON(!next && data->host_cookie &&
+ data->host_cookie != host->next_data.cookie);
+
+ if (!next && data->host_cookie &&
+ data->host_cookie != host->next_data.cookie) {
+ data->host_cookie = 0;
+ }
+
+ if (next ||
+ (!next && data->host_cookie != host->next_data.cookie)) {
+ sg_len = dma_map_sg(&host->pdev->dev, data->sg,
+ data->sg_len, ((data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+ } else {
+ sg_len = host->next_data.sg_len;
+ host->next_data.sg_len = 0;
+ }
+
+ if (sg_len == 0)
+ return -EINVAL;
+
+ if (next) {
+ next->sg_len = sg_len;
+ data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
+ } else
+ data->sg_len = sg_len;
+
+ return sg_len;
+}
+
static struct dw_mci_dma_ops dw_mci_idmac_ops = {
.init = dw_mci_idmac_init,
.start = dw_mci_idmac_start_dma,
@@ -451,13 +487,9 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
return -EINVAL;
}
- if (data->flags & MMC_DATA_READ)
- direction = DMA_FROM_DEVICE;
- else
- direction = DMA_TO_DEVICE;
-
- sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
- direction);
+ sg_len = dw_mci_pre_dma_transfer(host, data, NULL);
+ if (sg_len < 0)
+ return sg_len;
dev_vdbg(&host->pdev->dev,
"sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
@@ -643,6 +675,42 @@ static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
spin_unlock_bh(&host->lock);
}
+static void dw_mci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
+ int err)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (!data)
+ return;
+
+ if (slot->host->use_dma) {
+ dma_unmap_sg(&slot->host->pdev->dev, data->sg, data->sg_len,
+ ((data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+
+ data->host_cookie = 0;
+ }
+}
+
+static void dw_mci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool is_first_req)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (!data)
+ return;
+
+ BUG_ON(mrq->data->host_cookie);
+
+ if (slot->host->use_dma) {
+ if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
+ &slot->host->next_data))
+ mrq->data->host_cookie = 0;
+ }
+}
+
static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
@@ -748,6 +816,8 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
static const struct mmc_host_ops dw_mci_ops = {
.request = dw_mci_request,
+ .pre_req = dw_mci_pre_request,
+ .post_req = dw_mci_post_request,
.set_ios = dw_mci_set_ios,
.get_ro = dw_mci_get_ro,
.get_cd = dw_mci_get_cd,
@@ -35,6 +35,11 @@ enum {
struct mmc_data;
+struct dw_mci_next {
+ unsigned int sg_len;
+ s32 cookie;
+};
+
/**
* struct dw_mci - MMC controller state shared between all slots
* @lock: Spinlock protecting the queue and associated data.
@@ -154,6 +159,8 @@ struct dw_mci {
u32 quirks;
struct regulator *vmmc; /* Power regulator */
+
+ struct dw_mci_next next_data;
};
/* DMA ops for Internal/External DMAC interface */