@@ -187,10 +187,19 @@ static int qce_check_version(struct qce_device *qce)
return 0;
}
+static void qce_crypto_unmap_dma(void *data)
+{
+ struct qce_device *qce = data;
+
+ dma_unmap_resource(qce->dev, qce->base_dma, qce->dma_size,
+ DMA_BIDIRECTIONAL, 0);
+}
+
static int qce_crypto_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct qce_device *qce;
+ struct resource *res;
int ret;
qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
@@ -200,7 +209,7 @@ static int qce_crypto_probe(struct platform_device *pdev)
qce->dev = dev;
platform_set_drvdata(pdev, qce);
- qce->base = devm_platform_ioremap_resource(pdev, 0);
+ qce->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(qce->base))
return PTR_ERR(qce->base);
@@ -246,7 +255,18 @@ static int qce_crypto_probe(struct platform_device *pdev)
qce->async_req_enqueue = qce_async_request_enqueue;
qce->async_req_done = qce_async_request_done;
- return devm_qce_register_algs(qce);
+ ret = devm_qce_register_algs(qce);
+ if (ret)
+ return ret;
+
+ qce->dma_size = resource_size(res);
+ qce->base_dma = dma_map_resource(dev, res->start, qce->dma_size,
+ DMA_BIDIRECTIONAL, 0);
+ ret = dma_mapping_error(dev, qce->base_dma);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(qce->dev, qce_crypto_unmap_dma, qce);
}
static const struct of_device_id qce_crypto_of_match[] = {
@@ -26,6 +26,8 @@
* @dma: pointer to dma data
* @burst_size: the crypto burst size
* @pipe_pair_id: which pipe pair id the device using
+ * @base_dma: base DMA address
+ * @dma_size: size of memory mapped for DMA
* @async_req_enqueue: invoked by every algorithm to enqueue a request
* @async_req_done: invoked by every algorithm to finish its request
*/
@@ -42,6 +44,8 @@ struct qce_device {
struct qce_dma_data dma;
int burst_size;
unsigned int pipe_pair_id;
+ dma_addr_t base_dma;
+ size_t dma_size;
int (*async_req_enqueue)(struct qce_device *qce,
struct crypto_async_request *req);
void (*async_req_done)(struct qce_device *qce, int ret);