@@ -800,7 +800,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
return -EINVAL;
ret = qat_alg_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
- &qat_req->buf, f);
+ &qat_req->buf, NULL, f);
if (unlikely(ret))
return ret;
@@ -844,7 +844,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
return -EINVAL;
ret = qat_alg_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
- &qat_req->buf, f);
+ &qat_req->buf, NULL, f);
if (unlikely(ret))
return ret;
@@ -1030,7 +1030,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
return 0;
ret = qat_alg_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
- &qat_req->buf, f);
+ &qat_req->buf, NULL, f);
if (unlikely(ret))
return ret;
@@ -1097,7 +1097,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
return 0;
ret = qat_alg_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
- &qat_req->buf, f);
+ &qat_req->buf, NULL, f);
if (unlikely(ret))
return ret;
@@ -32,10 +32,7 @@ void qat_alg_free_bufl(struct adf_accel_dev *accel_dev,
kfree(bl);
if (blp != blpout) {
- /* If out of place operation dma unmap only data */
- int bufless = blout->num_bufs - blout->num_mapped_bufs;
-
- for (i = bufless; i < blout->num_bufs; i++) {
+ for (i = 0; i < blout->num_mapped_bufs; i++) {
dma_unmap_single(dev, blout->bufers[i].addr,
blout->bufers[i].len,
DMA_BIDIRECTIONAL);
@@ -47,11 +44,13 @@ void qat_alg_free_bufl(struct adf_accel_dev *accel_dev,
}
}
-int qat_alg_sgl_to_bufl(struct adf_accel_dev *accel_dev,
- struct scatterlist *sgl,
- struct scatterlist *sglout,
- struct qat_request_buffs *buf,
- gfp_t flags)
+static int __qat_alg_sgl_to_bufl(struct adf_accel_dev *accel_dev,
+ struct scatterlist *sgl,
+ struct scatterlist *sglout,
+ struct qat_request_buffs *buf,
+ dma_addr_t extra_dst_buff,
+ size_t sz_extra_dst_buff,
+ gfp_t flags)
{
struct device *dev = &GET_DEV(accel_dev);
int i, sg_nctr = 0;
@@ -107,9 +106,10 @@ int qat_alg_sgl_to_bufl(struct adf_accel_dev *accel_dev,
/* Handle out of place operation */
if (sgl != sglout) {
struct qat_alg_buf *bufers;
+ int extra_buff = extra_dst_buff ? 1 : 0;
n = sg_nents(sglout);
- sz_out = struct_size(buflout, bufers, n);
+ sz_out = struct_size(buflout, bufers, n + extra_buff);
sg_nctr = 0;
if (n > QAT_MAX_BUFF_DESC) {
@@ -140,7 +140,13 @@ int qat_alg_sgl_to_bufl(struct adf_accel_dev *accel_dev,
bufers[y].len = sg->length;
sg_nctr++;
}
+ if (extra_buff) {
+ bufers[sg_nctr].addr = extra_dst_buff;
+ bufers[sg_nctr].len = sz_extra_dst_buff;
+ }
+
buflout->num_bufs = sg_nctr;
+ buflout->num_bufs += extra_buff;
buflout->num_mapped_bufs = sg_nctr;
bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, bloutp)))
@@ -186,3 +192,23 @@ int qat_alg_sgl_to_bufl(struct adf_accel_dev *accel_dev,
dev_err(dev, "Failed to map buf for dma\n");
return -ENOMEM;
}
+
+int qat_alg_sgl_to_bufl(struct adf_accel_dev *accel_dev,
+ struct scatterlist *sgl,
+ struct scatterlist *sglout,
+ struct qat_request_buffs *buf,
+ struct qat_sgl_to_bufl_params *params,
+ gfp_t flags)
+{
+ dma_addr_t extra_dst_buff = 0;
+ size_t sz_extra_dst_buff = 0;
+
+ if (params) {
+ extra_dst_buff = params->extra_dst_buff;
+ sz_extra_dst_buff = params->sz_extra_dst_buff;
+ }
+
+ return __qat_alg_sgl_to_bufl(accel_dev, sgl, sglout, buf,
+ extra_dst_buff, sz_extra_dst_buff,
+ flags);
+}
@@ -38,12 +38,18 @@ struct qat_request_buffs {
struct qat_alg_fixed_buf_list sgl_dst;
};
+struct qat_sgl_to_bufl_params {
+ dma_addr_t extra_dst_buff;
+ size_t sz_extra_dst_buff;
+};
+
void qat_alg_free_bufl(struct adf_accel_dev *accel_dev,
struct qat_request_buffs *buf);
int qat_alg_sgl_to_bufl(struct adf_accel_dev *accel_dev,
struct scatterlist *sgl,
struct scatterlist *sglout,
struct qat_request_buffs *buf,
+ struct qat_sgl_to_bufl_params *params,
gfp_t flags);
#endif