From patchwork Thu Mar 5 02:06:21 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xu Zaibo X-Patchwork-Id: 11421067 X-Patchwork-Delegate: herbert@gondor.apana.org.au Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 7A3D61820 for ; Thu, 5 Mar 2020 02:10:13 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 65C632146E for ; Thu, 5 Mar 2020 02:10:13 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726049AbgCECKM (ORCPT ); Wed, 4 Mar 2020 21:10:12 -0500 Received: from szxga05-in.huawei.com ([45.249.212.191]:10726 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726007AbgCECKM (ORCPT ); Wed, 4 Mar 2020 21:10:12 -0500 Received: from DGGEMS411-HUB.china.huawei.com (unknown [172.30.72.58]) by Forcepoint Email with ESMTP id 146B3B2B70745AD3F838; Thu, 5 Mar 2020 10:10:10 +0800 (CST) Received: from localhost.localdomain (10.67.165.24) by DGGEMS411-HUB.china.huawei.com (10.3.19.211) with Microsoft SMTP Server id 14.3.439.0; Thu, 5 Mar 2020 10:10:01 +0800 From: Zaibo Xu To: , CC: , , , , , , , , , , Subject: [PATCH v3 1/5] crypto: hisilicon - Use one workqueue per qm instead of per qp Date: Thu, 5 Mar 2020 10:06:21 +0800 Message-ID: <1583373985-718-2-git-send-email-xuzaibo@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1583373985-718-1-git-send-email-xuzaibo@huawei.com> References: <1583373985-718-1-git-send-email-xuzaibo@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-CFilter-Loop: Reflected Sender: linux-crypto-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-crypto@vger.kernel.org From: Shukun Tan Since SEC need not so many workqueues as our test, we just use one workqueue created by the device driver of QM if necessary, which will also reduce CPU waste without any throughput decreasing. Signed-off-by: Shukun Tan Signed-off-by: Zaibo Xu Reviewed-by: Jonathan Cameron --- drivers/crypto/hisilicon/qm.c | 39 ++++++++++++++++----------------------- drivers/crypto/hisilicon/qm.h | 5 +++-- 2 files changed, 19 insertions(+), 25 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index ad7146a..2fa1d1a 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -494,17 +494,9 @@ static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm) } } -static void qm_qp_work_func(struct work_struct *work) +static void qm_work_process(struct work_struct *work) { - struct hisi_qp *qp; - - qp = container_of(work, struct hisi_qp, work); - qm_poll_qp(qp, qp->qm); -} - -static irqreturn_t qm_irq_handler(int irq, void *data) -{ - struct hisi_qm *qm = data; + struct hisi_qm *qm = container_of(work, struct hisi_qm, work); struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; struct hisi_qp *qp; int eqe_num = 0; @@ -513,7 +505,7 @@ static irqreturn_t qm_irq_handler(int irq, void *data) eqe_num++; qp = qm_to_hisi_qp(qm, eqe); if (qp) - queue_work(qp->wq, &qp->work); + qm_poll_qp(qp, qm); if (qm->status.eq_head == QM_Q_DEPTH - 1) { qm->status.eqc_phase = !qm->status.eqc_phase; @@ -531,6 +523,17 @@ static irqreturn_t qm_irq_handler(int irq, void *data) } qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); +} + +static irqreturn_t do_qm_irq(int irq, void *data) +{ + struct hisi_qm *qm = (struct hisi_qm *)data; + + /* the workqueue created by device driver of QM */ + if (qm->wq) + queue_work(qm->wq, &qm->work); + else + schedule_work(&qm->work); return IRQ_HANDLED; } @@ -540,7 +543,7 @@ static irqreturn_t qm_irq(int irq, void *data) struct hisi_qm *qm = data; if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE)) - return qm_irq_handler(irq, data); + return do_qm_irq(irq, data); dev_err(&qm->pdev->dev, "invalid int source\n"); qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); @@ -1159,20 +1162,9 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) qp->qp_id = qp_id; qp->alg_type = alg_type; - INIT_WORK(&qp->work, qm_qp_work_func); - qp->wq = alloc_workqueue("hisi_qm", WQ_UNBOUND | WQ_HIGHPRI | - WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0); - if (!qp->wq) { - ret = -EFAULT; - goto err_free_qp_mem; - } return qp; -err_free_qp_mem: - if (qm->use_dma_api) - dma_free_coherent(dev, qp->qdma.size, qp->qdma.va, - qp->qdma.dma); err_clear_bit: write_lock(&qm->qps_lock); qm->qp_array[qp_id] = NULL; @@ -1704,6 +1696,7 @@ int hisi_qm_init(struct hisi_qm *qm) qm->qp_in_used = 0; mutex_init(&qm->mailbox_lock); rwlock_init(&qm->qps_lock); + INIT_WORK(&qm->work, qm_work_process); dev_dbg(dev, "init qm %s with %s\n", pdev->is_physfn ? "pf" : "vf", qm->use_dma_api ? "dma api" : "iommu api"); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 1a4f208..c72c2e6 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -183,6 +183,9 @@ struct hisi_qm { u32 error_mask; u32 msi_mask; + struct workqueue_struct *wq; + struct work_struct work; + const char *algs; bool use_dma_api; bool use_sva; @@ -219,8 +222,6 @@ struct hisi_qp { void *qp_ctx; void (*req_cb)(struct hisi_qp *qp, void *data); void (*event_cb)(struct hisi_qp *qp); - struct work_struct work; - struct workqueue_struct *wq; struct hisi_qm *qm; u16 pasid; From patchwork Thu Mar 5 02:06:22 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xu Zaibo X-Patchwork-Id: 11421077 X-Patchwork-Delegate: herbert@gondor.apana.org.au Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 2B0BA1580 for ; Thu, 5 Mar 2020 02:10:16 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 168DA2084E for ; Thu, 5 Mar 2020 02:10:16 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726007AbgCECKP (ORCPT ); Wed, 4 Mar 2020 21:10:15 -0500 Received: from szxga05-in.huawei.com ([45.249.212.191]:11163 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1725777AbgCECKN (ORCPT ); Wed, 4 Mar 2020 21:10:13 -0500 Received: from DGGEMS411-HUB.china.huawei.com (unknown [172.30.72.58]) by Forcepoint Email with ESMTP id 1BB5237181449ABE785D; Thu, 5 Mar 2020 10:10:10 +0800 (CST) Received: from localhost.localdomain (10.67.165.24) by DGGEMS411-HUB.china.huawei.com (10.3.19.211) with Microsoft SMTP Server id 14.3.439.0; Thu, 5 Mar 2020 10:10:01 +0800 From: Zaibo Xu To: , CC: , , , , , , , , , , Subject: [PATCH v3 2/5] crypto: hisilicon/sec2 - Add workqueue for SEC driver. Date: Thu, 5 Mar 2020 10:06:22 +0800 Message-ID: <1583373985-718-3-git-send-email-xuzaibo@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1583373985-718-1-git-send-email-xuzaibo@huawei.com> References: <1583373985-718-1-git-send-email-xuzaibo@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-CFilter-Loop: Reflected Sender: linux-crypto-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-crypto@vger.kernel.org From: Ye Kai Allocate one workqueue for each QM instead of one for all QMs, we found the throughput of SEC engine can be increased to the hardware limit throughput during testing sec2 performance. so we added this scheme. Signed-off-by: Ye Kai Signed-off-by: Longfang Liu Signed-off-by: Zaibo Xu Reviewed-by: Jonathan Cameron --- drivers/crypto/hisilicon/sec2/sec_main.c | 32 +++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 3767fdb..1fe2558 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -774,12 +774,30 @@ static void sec_qm_uninit(struct hisi_qm *qm) static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec) { + int ret; + + /* + * WQ_HIGHPRI: SEC request must be low delayed, + * so need a high priority workqueue. + * WQ_UNBOUND: SEC task is likely with long + * running CPU intensive workloads. + */ + qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | + WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus(), + pci_name(qm->pdev)); + if (!qm->wq) { + pci_err(qm->pdev, "fail to alloc workqueue\n"); + return -ENOMEM; + } + if (qm->fun_type == QM_HW_PF) { qm->qp_base = SEC_PF_DEF_Q_BASE; qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; - return sec_pf_probe_init(sec); + ret = sec_pf_probe_init(sec); + if (ret) + goto err_probe_uninit; } else if (qm->fun_type == QM_HW_VF) { /* * have no way to get qm configure in VM in v1 hardware, @@ -792,18 +810,26 @@ static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec) qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; } else if (qm->ver == QM_HW_V2) { /* v2 starts to support get vft by mailbox */ - return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + if (ret) + goto err_probe_uninit; } } else { - return -ENODEV; + ret = -ENODEV; + goto err_probe_uninit; } return 0; +err_probe_uninit: + destroy_workqueue(qm->wq); + return ret; } static void sec_probe_uninit(struct hisi_qm *qm) { hisi_qm_dev_err_uninit(qm); + + destroy_workqueue(qm->wq); } static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) From patchwork Thu Mar 5 02:06:23 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xu Zaibo X-Patchwork-Id: 11421075 X-Patchwork-Delegate: herbert@gondor.apana.org.au Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id E0B6E1820 for ; Thu, 5 Mar 2020 02:10:15 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id CC4232084E for ; Thu, 5 Mar 2020 02:10:15 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726079AbgCECKP (ORCPT ); Wed, 4 Mar 2020 21:10:15 -0500 Received: from szxga05-in.huawei.com ([45.249.212.191]:10727 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726004AbgCECKN (ORCPT ); Wed, 4 Mar 2020 21:10:13 -0500 Received: from DGGEMS411-HUB.china.huawei.com (unknown [172.30.72.58]) by Forcepoint Email with ESMTP id 10BF391CC35432D2C8FB; Thu, 5 Mar 2020 10:10:10 +0800 (CST) Received: from localhost.localdomain (10.67.165.24) by DGGEMS411-HUB.china.huawei.com (10.3.19.211) with Microsoft SMTP Server id 14.3.439.0; Thu, 5 Mar 2020 10:10:01 +0800 From: Zaibo Xu To: , CC: , , , , , , , , , , Subject: [PATCH v3 3/5] crypto: hisilicon/sec2 - Add iommu status check Date: Thu, 5 Mar 2020 10:06:23 +0800 Message-ID: <1583373985-718-4-git-send-email-xuzaibo@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1583373985-718-1-git-send-email-xuzaibo@huawei.com> References: <1583373985-718-1-git-send-email-xuzaibo@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-CFilter-Loop: Reflected Sender: linux-crypto-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-crypto@vger.kernel.org From: Longfang Liu In order to improve performance of small packets (<512Bytes) in SMMU translation scenario, we need to identify the type of IOMMU in the SEC probe to process small packets by a different method. Signed-off-by: Longfang Liu Signed-off-by: Zaibo Xu Reviewed-by: Jonathan Cameron --- drivers/crypto/hisilicon/sec2/sec.h | 1 + drivers/crypto/hisilicon/sec2/sec_main.c | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 13e2d8d..eab0d22 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -165,6 +165,7 @@ struct sec_dev { struct list_head list; struct sec_debug debug; u32 ctx_q_num; + bool iommu_used; u32 num_vfs; unsigned long status; }; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 1fe2558..4f354d7 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -832,6 +833,23 @@ static void sec_probe_uninit(struct hisi_qm *qm) destroy_workqueue(qm->wq); } +static void sec_iommu_used_check(struct sec_dev *sec) +{ + struct iommu_domain *domain; + struct device *dev = &sec->qm.pdev->dev; + + domain = iommu_get_domain_for_dev(dev); + + /* Check if iommu is used */ + sec->iommu_used = false; + if (domain) { + if (domain->type & __IOMMU_DOMAIN_PAGING) + sec->iommu_used = true; + dev_info(dev, "SMMU Opened, the iommu type = %u\n", + domain->type); + } +} + static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct sec_dev *sec; @@ -845,6 +863,7 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, sec); sec->ctx_q_num = ctx_q_num; + sec_iommu_used_check(sec); qm = &sec->qm; From patchwork Thu Mar 5 02:06:24 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xu Zaibo X-Patchwork-Id: 11421071 X-Patchwork-Delegate: herbert@gondor.apana.org.au Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id D8C861580 for ; Thu, 5 Mar 2020 02:10:13 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id BC0F32084E for ; Thu, 5 Mar 2020 02:10:13 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726083AbgCECKN (ORCPT ); Wed, 4 Mar 2020 21:10:13 -0500 Received: from szxga05-in.huawei.com ([45.249.212.191]:10725 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726079AbgCECKM (ORCPT ); Wed, 4 Mar 2020 21:10:12 -0500 Received: from DGGEMS411-HUB.china.huawei.com (unknown [172.30.72.58]) by Forcepoint Email with ESMTP id 0B6F1D1C0A0A709A6BAA; Thu, 5 Mar 2020 10:10:10 +0800 (CST) Received: from localhost.localdomain (10.67.165.24) by DGGEMS411-HUB.china.huawei.com (10.3.19.211) with Microsoft SMTP Server id 14.3.439.0; Thu, 5 Mar 2020 10:10:02 +0800 From: Zaibo Xu To: , CC: , , , , , , , , , , Subject: [PATCH v3 4/5] crypto: hisilicon/sec2 - Update IV and MAC operation Date: Thu, 5 Mar 2020 10:06:24 +0800 Message-ID: <1583373985-718-5-git-send-email-xuzaibo@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1583373985-718-1-git-send-email-xuzaibo@huawei.com> References: <1583373985-718-1-git-send-email-xuzaibo@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-CFilter-Loop: Reflected Sender: linux-crypto-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-crypto@vger.kernel.org From: Longfang Liu We have updated the operation method of IV and MAC address to prepare for pbuf patch and fixed coding style. Signed-off-by: Longfang Liu Signed-off-by: Zaibo Xu Reviewed-by: Jonathan Cameron --- drivers/crypto/hisilicon/sec2/sec.h | 2 + drivers/crypto/hisilicon/sec2/sec_crypto.c | 73 +++++++++++++++++------------- 2 files changed, 43 insertions(+), 32 deletions(-) diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index eab0d22..e67b416 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -23,6 +23,8 @@ struct sec_cipher_req { dma_addr_t c_in_dma; struct hisi_acc_hw_sgl *c_out; dma_addr_t c_out_dma; + u8 *c_ivin; + dma_addr_t c_ivin_dma; struct skcipher_request *sk_req; u32 c_len; bool encrypt; diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index acd1550..1eeaa74 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -46,6 +46,7 @@ #define SEC_CIPHER_AUTH 0xfe #define SEC_AUTH_CIPHER 0x1 #define SEC_MAX_MAC_LEN 64 +#define SEC_MAX_AAD_LEN 65535 #define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH) #define SEC_SQE_LEN_RATE 4 #define SEC_SQE_CFLAG 2 @@ -110,12 +111,12 @@ static void sec_free_req_id(struct sec_req *req) mutex_unlock(&qp_ctx->req_lock); } -static int sec_aead_verify(struct sec_req *req, struct sec_qp_ctx *qp_ctx) +static int sec_aead_verify(struct sec_req *req) { struct aead_request *aead_req = req->aead_req.aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); - u8 *mac_out = qp_ctx->res[req->req_id].out_mac; size_t authsize = crypto_aead_authsize(tfm); + u8 *mac_out = req->aead_req.out_mac; u8 *mac = mac_out + SEC_MAX_MAC_LEN; struct scatterlist *sgl = aead_req->src; size_t sz; @@ -163,7 +164,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp) } if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt) - err = sec_aead_verify(req, qp_ctx); + err = sec_aead_verify(req); atomic64_inc(&ctx->sec->debug.dfx.recv_cnt); @@ -259,11 +260,11 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx, if (ctx->alg_type == SEC_AEAD) { ret = sec_alloc_mac_resource(dev, res); if (ret) - goto get_fail; + goto alloc_fail; } return 0; -get_fail: +alloc_fail: sec_free_civ_resource(dev, res); return ret; @@ -590,11 +591,21 @@ GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC) GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS) GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC) -static int sec_cipher_map(struct device *dev, struct sec_req *req, +static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, struct scatterlist *src, struct scatterlist *dst) { struct sec_cipher_req *c_req = &req->c_req; + struct sec_aead_req *a_req = &req->aead_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; + struct sec_alg_res *res = &qp_ctx->res[req->req_id]; + struct device *dev = SEC_CTX_DEV(ctx); + + c_req->c_ivin = res->c_ivin; + c_req->c_ivin_dma = res->c_ivin_dma; + if (ctx->alg_type == SEC_AEAD) { + a_req->out_mac = res->out_mac; + a_req->out_mac_dma = res->out_mac_dma; + } c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src, qp_ctx->c_in_pool, @@ -625,29 +636,30 @@ static int sec_cipher_map(struct device *dev, struct sec_req *req, return 0; } -static void sec_cipher_unmap(struct device *dev, struct sec_cipher_req *req, +static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req, struct scatterlist *src, struct scatterlist *dst) { + struct sec_cipher_req *c_req = &req->c_req; + struct device *dev = SEC_CTX_DEV(ctx); + if (dst != src) - hisi_acc_sg_buf_unmap(dev, src, req->c_in); + hisi_acc_sg_buf_unmap(dev, src, c_req->c_in); - hisi_acc_sg_buf_unmap(dev, dst, req->c_out); + hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out); } static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req) { struct skcipher_request *sq = req->c_req.sk_req; - return sec_cipher_map(SEC_CTX_DEV(ctx), req, sq->src, sq->dst); + return sec_cipher_map(ctx, req, sq->src, sq->dst); } static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req) { - struct device *dev = SEC_CTX_DEV(ctx); - struct sec_cipher_req *c_req = &req->c_req; - struct skcipher_request *sk_req = c_req->sk_req; + struct skcipher_request *sq = req->c_req.sk_req; - sec_cipher_unmap(dev, c_req, sk_req->src, sk_req->dst); + sec_cipher_unmap(ctx, req, sq->src, sq->dst); } static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx, @@ -758,16 +770,14 @@ static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req) { struct aead_request *aq = req->aead_req.aead_req; - return sec_cipher_map(SEC_CTX_DEV(ctx), req, aq->src, aq->dst); + return sec_cipher_map(ctx, req, aq->src, aq->dst); } static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req) { - struct device *dev = SEC_CTX_DEV(ctx); - struct sec_cipher_req *cq = &req->c_req; struct aead_request *aq = req->aead_req.aead_req; - sec_cipher_unmap(dev, cq, aq->src, aq->dst); + sec_cipher_unmap(ctx, req, aq->src, aq->dst); } static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req) @@ -800,9 +810,9 @@ static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req) static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req) { struct skcipher_request *sk_req = req->c_req.sk_req; - u8 *c_ivin = req->qp_ctx->res[req->req_id].c_ivin; + struct sec_cipher_req *c_req = &req->c_req; - memcpy(c_ivin, sk_req->iv, ctx->c_ctx.ivsize); + memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize); } static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) @@ -817,8 +827,7 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) memset(sec_sqe, 0, sizeof(struct sec_sqe)); sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma); - sec_sqe->type2.c_ivin_addr = - cpu_to_le64(req->qp_ctx->res[req->req_id].c_ivin_dma); + sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma); sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma); @@ -903,9 +912,9 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req) { struct aead_request *aead_req = req->aead_req.aead_req; - u8 *c_ivin = req->qp_ctx->res[req->req_id].c_ivin; + struct sec_cipher_req *c_req = &req->c_req; - memcpy(c_ivin, aead_req->iv, ctx->c_ctx.ivsize); + memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize); } static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir, @@ -938,8 +947,7 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir, sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen); - sec_sqe->type2.mac_addr = - cpu_to_le64(req->qp_ctx->res[req->req_id].out_mac_dma); + sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma); } static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req) @@ -963,6 +971,7 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) { struct aead_request *a_req = req->aead_req.aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); + struct sec_aead_req *aead_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; size_t authsize = crypto_aead_authsize(tfm); struct sec_qp_ctx *qp_ctx = req->qp_ctx; @@ -978,7 +987,7 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) struct scatterlist *sgl = a_req->dst; sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), - qp_ctx->res[req->req_id].out_mac, + aead_req->out_mac, authsize, a_req->cryptlen + a_req->assoclen); @@ -1030,6 +1039,7 @@ static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req) static int sec_process(struct sec_ctx *ctx, struct sec_req *req) { + struct sec_cipher_req *c_req = &req->c_req; int ret; ret = sec_request_init(ctx, req); @@ -1056,12 +1066,10 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req) /* As failing, restore the IV from user */ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) { if (ctx->alg_type == SEC_SKCIPHER) - memcpy(req->c_req.sk_req->iv, - req->qp_ctx->res[req->req_id].c_ivin, + memcpy(req->c_req.sk_req->iv, c_req->c_ivin, ctx->c_ctx.ivsize); else - memcpy(req->aead_req.aead_req->iv, - req->qp_ctx->res[req->req_id].c_ivin, + memcpy(req->aead_req.aead_req->iv, c_req->c_ivin, ctx->c_ctx.ivsize); } @@ -1320,7 +1328,8 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) struct crypto_aead *tfm = crypto_aead_reqtfm(req); size_t authsize = crypto_aead_authsize(tfm); - if (unlikely(!req->src || !req->dst || !req->cryptlen)) { + if (unlikely(!req->src || !req->dst || !req->cryptlen || + req->assoclen > SEC_MAX_AAD_LEN)) { dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n"); return -EINVAL; } From patchwork Thu Mar 5 02:06:25 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Xu Zaibo X-Patchwork-Id: 11421073 X-Patchwork-Delegate: herbert@gondor.apana.org.au Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id C3EC8174A for ; Thu, 5 Mar 2020 02:10:15 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id A68E02084E for ; Thu, 5 Mar 2020 02:10:15 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726004AbgCECKP (ORCPT ); Wed, 4 Mar 2020 21:10:15 -0500 Received: from szxga05-in.huawei.com ([45.249.212.191]:11164 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726007AbgCECKO (ORCPT ); Wed, 4 Mar 2020 21:10:14 -0500 Received: from DGGEMS411-HUB.china.huawei.com (unknown [172.30.72.58]) by Forcepoint Email with ESMTP id 1F9FE91BB26E76B7B80F; Thu, 5 Mar 2020 10:10:10 +0800 (CST) Received: from localhost.localdomain (10.67.165.24) by DGGEMS411-HUB.china.huawei.com (10.3.19.211) with Microsoft SMTP Server id 14.3.439.0; Thu, 5 Mar 2020 10:10:02 +0800 From: Zaibo Xu To: , CC: , , , , , , , , , , Subject: [PATCH v3 5/5] crypto: hisilicon/sec2 - Add pbuffer mode for SEC driver Date: Thu, 5 Mar 2020 10:06:25 +0800 Message-ID: <1583373985-718-6-git-send-email-xuzaibo@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1583373985-718-1-git-send-email-xuzaibo@huawei.com> References: <1583373985-718-1-git-send-email-xuzaibo@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-CFilter-Loop: Reflected Sender: linux-crypto-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-crypto@vger.kernel.org From: Longfang Liu In the scenario of SMMU translation, the SEC performance of short messages (<512Bytes) cannot meet our expectations. To avoid this, we reserve the plat buffer (PBUF) memory for small packets when creating TFM. Signed-off-by: Longfang Liu Signed-off-by: Zaibo Xu Reviewed-by: Jonathan Cameron --- drivers/crypto/hisilicon/sec2/sec.h | 4 + drivers/crypto/hisilicon/sec2/sec_crypto.c | 173 ++++++++++++++++++++++++++++- 2 files changed, 172 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index e67b416..a73d82c 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -11,6 +11,8 @@ /* Algorithm resource per hardware SEC queue */ struct sec_alg_res { + u8 *pbuf; + dma_addr_t pbuf_dma; u8 *c_ivin; dma_addr_t c_ivin_dma; u8 *out_mac; @@ -50,6 +52,7 @@ struct sec_req { /* Status of the SEC request */ bool fake_busy; + bool use_pbuf; }; /** @@ -130,6 +133,7 @@ struct sec_ctx { atomic_t dec_qcyclic; enum sec_alg_type alg_type; + bool pbuf_supported; struct sec_cipher_ctx c_ctx; struct sec_auth_ctx a_ctx; }; diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 1eeaa74..b2557431 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -48,6 +48,19 @@ #define SEC_MAX_MAC_LEN 64 #define SEC_MAX_AAD_LEN 65535 #define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH) + +#define SEC_PBUF_SZ 512 +#define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ +#define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE) +#define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \ + SEC_MAX_MAC_LEN * 2) +#define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG) +#define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM) +#define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \ + SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM)) +#define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \ + SEC_PBUF_LEFT_SZ) + #define SEC_SQE_LEN_RATE 4 #define SEC_SQE_CFLAG 2 #define SEC_SQE_AEAD_FLAG 3 @@ -246,6 +259,50 @@ static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res) res->out_mac, res->out_mac_dma); } +static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res) +{ + if (res->pbuf) + dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ, + res->pbuf, res->pbuf_dma); +} + +/* + * To improve performance, pbuffer is used for + * small packets (< 512Bytes) as IOMMU translation using. + */ +static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res) +{ + int pbuf_page_offset; + int i, j, k; + + res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ, + &res->pbuf_dma, GFP_KERNEL); + if (!res->pbuf) + return -ENOMEM; + + /* + * SEC_PBUF_PKG contains data pbuf, iv and + * out_mac : + * Every PAGE contains six SEC_PBUF_PKG + * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG + * So we need SEC_PBUF_PAGE_NUM numbers of PAGE + * for the SEC_TOTAL_PBUF_SZ + */ + for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) { + pbuf_page_offset = PAGE_SIZE * i; + for (j = 0; j < SEC_PBUF_NUM; j++) { + k = i * SEC_PBUF_NUM + j; + if (k == QM_Q_DEPTH) + break; + res[k].pbuf = res->pbuf + + j * SEC_PBUF_PKG + pbuf_page_offset; + res[k].pbuf_dma = res->pbuf_dma + + j * SEC_PBUF_PKG + pbuf_page_offset; + } + } + return 0; +} + static int sec_alg_resource_alloc(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { @@ -262,6 +319,13 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx, if (ret) goto alloc_fail; } + if (ctx->pbuf_supported) { + ret = sec_alloc_pbuf_resource(dev, res); + if (ret) { + dev_err(dev, "fail to alloc pbuf dma resource!\n"); + goto alloc_fail; + } + } return 0; alloc_fail: @@ -277,6 +341,8 @@ static void sec_alg_resource_free(struct sec_ctx *ctx, sec_free_civ_resource(dev, qp_ctx->res); + if (ctx->pbuf_supported) + sec_free_pbuf_resource(dev, qp_ctx->res); if (ctx->alg_type == SEC_AEAD) sec_free_mac_resource(dev, qp_ctx->res); } @@ -369,6 +435,8 @@ static int sec_ctx_base_init(struct sec_ctx *ctx) ctx->sec = sec; ctx->hlf_q_num = sec->ctx_q_num >> 1; + ctx->pbuf_supported = ctx->sec->iommu_used; + /* Half of queue depth is taken as fake requests limit in the queue. */ ctx->fake_req_limit = QM_Q_DEPTH >> 1; ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx), @@ -591,6 +659,66 @@ GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC) GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS) GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC) +static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, + struct scatterlist *src) +{ + struct aead_request *aead_req = req->aead_req.aead_req; + struct sec_cipher_req *c_req = &req->c_req; + struct sec_qp_ctx *qp_ctx = req->qp_ctx; + struct device *dev = SEC_CTX_DEV(ctx); + int copy_size, pbuf_length; + int req_id = req->req_id; + + if (ctx->alg_type == SEC_AEAD) + copy_size = aead_req->cryptlen + aead_req->assoclen; + else + copy_size = c_req->c_len; + + pbuf_length = sg_copy_to_buffer(src, sg_nents(src), + qp_ctx->res[req_id].pbuf, + copy_size); + + if (unlikely(pbuf_length != copy_size)) { + dev_err(dev, "copy src data to pbuf error!\n"); + return -EINVAL; + } + + c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma; + + if (!c_req->c_in_dma) { + dev_err(dev, "fail to set pbuffer address!\n"); + return -ENOMEM; + } + + c_req->c_out_dma = c_req->c_in_dma; + + return 0; +} + +static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, + struct scatterlist *dst) +{ + struct aead_request *aead_req = req->aead_req.aead_req; + struct sec_cipher_req *c_req = &req->c_req; + struct sec_qp_ctx *qp_ctx = req->qp_ctx; + struct device *dev = SEC_CTX_DEV(ctx); + int copy_size, pbuf_length; + int req_id = req->req_id; + + if (ctx->alg_type == SEC_AEAD) + copy_size = c_req->c_len + aead_req->assoclen; + else + copy_size = c_req->c_len; + + pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), + qp_ctx->res[req_id].pbuf, + copy_size); + + if (unlikely(pbuf_length != copy_size)) + dev_err(dev, "copy pbuf data to dst error!\n"); + +} + static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, struct scatterlist *src, struct scatterlist *dst) { @@ -599,7 +727,20 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, struct sec_qp_ctx *qp_ctx = req->qp_ctx; struct sec_alg_res *res = &qp_ctx->res[req->req_id]; struct device *dev = SEC_CTX_DEV(ctx); + int ret; + + if (req->use_pbuf) { + ret = sec_cipher_pbuf_map(ctx, req, src); + c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET; + c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET; + if (ctx->alg_type == SEC_AEAD) { + a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET; + a_req->out_mac_dma = res->pbuf_dma + + SEC_PBUF_MAC_OFFSET; + } + return ret; + } c_req->c_ivin = res->c_ivin; c_req->c_ivin_dma = res->c_ivin_dma; if (ctx->alg_type == SEC_AEAD) { @@ -642,10 +783,14 @@ static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req, struct sec_cipher_req *c_req = &req->c_req; struct device *dev = SEC_CTX_DEV(ctx); - if (dst != src) - hisi_acc_sg_buf_unmap(dev, src, c_req->c_in); + if (req->use_pbuf) { + sec_cipher_pbuf_unmap(ctx, req, dst); + } else { + if (dst != src) + hisi_acc_sg_buf_unmap(dev, src, c_req->c_in); - hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out); + hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out); + } } static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req) @@ -844,7 +989,10 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET; sec_sqe->type_cipher_auth = bd_type | cipher; - sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET; + if (req->use_pbuf) + sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET; + else + sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET; scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET; if (c_req->c_in_dma != c_req->c_out_dma) de = 0x1 << SEC_DE_OFFSET; @@ -852,7 +1000,10 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) sec_sqe->sds_sa_type = (de | scene | sa_type); /* Just set DST address type */ - da_type = SEC_SGL << SEC_DST_SGL_OFFSET; + if (req->use_pbuf) + da_type = SEC_PBUF << SEC_DST_SGL_OFFSET; + else + da_type = SEC_SGL << SEC_DST_SGL_OFFSET; sec_sqe->sdm_addr_type |= da_type; sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len); @@ -1215,6 +1366,12 @@ static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) return -EINVAL; } sreq->c_req.c_len = sk_req->cryptlen; + + if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ) + sreq->use_pbuf = true; + else + sreq->use_pbuf = false; + if (c_alg == SEC_CALG_3DES) { if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) { dev_err(dev, "skcipher 3des input length error!\n"); @@ -1334,6 +1491,12 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) return -EINVAL; } + if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <= + SEC_PBUF_SZ) + sreq->use_pbuf = true; + else + sreq->use_pbuf = false; + /* Support AES only */ if (unlikely(c_alg != SEC_CALG_AES)) { dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n");