From patchwork Thu Jul 30 12:11:06 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Rameshwar Prasad Sahu X-Patchwork-Id: 6902211 X-Patchwork-Delegate: herbert@gondor.apana.org.au Return-Path: X-Original-To: patchwork-linux-crypto@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork2.web.kernel.org (Postfix) with ESMTP id F2B00C05AC for ; Thu, 30 Jul 2015 12:13:49 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 75F0620575 for ; Thu, 30 Jul 2015 12:13:47 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id A009720591 for ; Thu, 30 Jul 2015 12:13:45 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752387AbbG3MMg (ORCPT ); Thu, 30 Jul 2015 08:12:36 -0400 Received: from denmail01-v4020.amcc.com ([192.195.68.30]:38162 "EHLO denmail01.amcc.com" rhost-flags-OK-FAIL-OK-FAIL) by vger.kernel.org with ESMTP id S1750977AbbG3MMe (ORCPT ); Thu, 30 Jul 2015 08:12:34 -0400 Received: from apm.com (pnqlwv041.amcc.com [10.48.19.141]) by denmail01.amcc.com (8.13.8/8.13.8) with ESMTP id t6UCBwVM019509; Thu, 30 Jul 2015 06:11:59 -0600 Received: (from rsahu@localhost) by apm.com (8.13.8/8.13.8/Submit) id t6UCBwYH003141; Thu, 30 Jul 2015 17:41:58 +0530 From: Rameshwar Prasad Sahu To: vinod.koul@intel.com, dan.j.williams@intel.com, herbert@gondor.apana.org.au, davem@davemloft.net Cc: linux-crypto@vger.kernel.org, dmaengine@vger.kernel.org, arnd@arndb.de, linux-kernel@vger.kernel.org, devicetree@vger.kernel.org, linux-arm-kernel@lists.infradead.org, jcm@redhat.com, patches@apm.com, Rameshwar Prasad Sahu Subject: [PATCH v1 2/4] dmaengine: xgene-dma: Add support for CRC32C calculation via DMA engine Date: Thu, 30 Jul 2015 17:41:06 +0530 Message-Id: <1438258268-3099-3-git-send-email-rsahu@apm.com> X-Mailer: git-send-email 1.8.2.1 In-Reply-To: <1438258268-3099-1-git-send-email-rsahu@apm.com> References: <1438258268-3099-1-git-send-email-rsahu@apm.com> Sender: linux-crypto-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-crypto@vger.kernel.org X-Spam-Status: No, score=-8.3 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP This patch implements CRC32C support to APM X-Gene SoC DMA engine driver. Basically we have DMA engine in SoC capable of doing CRC32C calculations. Signed-off-by: Rameshwar Prasad Sahu --- drivers/dma/xgene-dma.c | 317 +++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 302 insertions(+), 15 deletions(-) -- 1.8.2.1 -- To unsubscribe from this list: send the line "unsubscribe linux-crypto" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index d0a148d..b7c8813 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -22,6 +22,7 @@ */ #include +#include #include #include #include @@ -41,6 +42,7 @@ #define XGENE_DMA_RING_ID_SETUP(v) ((v) | BIT(31)) #define XGENE_DMA_RING_ID_BUF 0x0C #define XGENE_DMA_RING_ID_BUF_SETUP(v) (((v) << 9) | BIT(21)) +#define XGENE_DMA_RING_IS_BUFPOOL BIT(20) #define XGENE_DMA_RING_THRESLD0_SET1 0x30 #define XGENE_DMA_RING_THRESLD0_SET1_VAL 0X64 #define XGENE_DMA_RING_THRESLD1_SET1 0x34 @@ -70,6 +72,8 @@ (((u32 *)(m))[2] |= (((v) >> 8) << 5)) #define XGENE_DMA_RING_ADDRH_SET(m, v) \ (((u32 *)(m))[3] |= ((v) >> 35)) +#define XGENE_DMA_RING_BUFMODE_SET(m) \ + (((u32 *)(m))[3] |= ((0x3) << 20)) #define XGENE_DMA_RING_ACCEPTLERR_SET(m) \ (((u32 *)(m))[3] |= BIT(19)) #define XGENE_DMA_RING_SIZE_SET(m, v) \ @@ -107,6 +111,7 @@ #define XGENE_DMA_RING_INT2_MASK 0x90B0 #define XGENE_DMA_RING_INT3_MASK 0x90B8 #define XGENE_DMA_RING_INT4_MASK 0x90C0 +#define XGENE_DMA_CFG_RING_FQ_ASSOC 0x90DC #define XGENE_DMA_CFG_RING_WQ_ASSOC 0x90E0 #define XGENE_DMA_ASSOC_RING_MNGR1 0xFFFFFFFF #define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070 @@ -128,6 +133,10 @@ #define XGENE_DMA_DESC_LERR_POS 60 #define XGENE_DMA_DESC_BUFLEN_POS 48 #define XGENE_DMA_DESC_HOENQ_NUM_POS 48 +#define XGENE_DMA_DESC_BD_BIT BIT(0) +#define XGENE_DMA_DESC_SD_BIT BIT(1) +#define XGENE_DMA_DESC_CRCSEED_POS 8 +#define XGENE_DMA_DESC_FPQ_NUM_POS 32 #define XGENE_DMA_DESC_ELERR_RD(m) \ (((m) >> XGENE_DMA_DESC_ELERR_POS) & 0x3) #define XGENE_DMA_DESC_LERR_RD(m) \ @@ -141,21 +150,26 @@ /* X-Gene DMA configurable parameters defines */ #define XGENE_DMA_RING_NUM 512 #define XGENE_DMA_BUFNUM 0x0 +#define XGENE_DMA_BUFPOOL_BUFNUM 0x20 #define XGENE_DMA_CPU_BUFNUM 0x18 #define XGENE_DMA_RING_OWNER_DMA 0x03 #define XGENE_DMA_RING_OWNER_CPU 0x0F #define XGENE_DMA_RING_TYPE_REGULAR 0x01 +#define XGENE_DMA_RING_TYPE_BUFPOOL 0x02 #define XGENE_DMA_RING_WQ_DESC_SIZE 32 /* 32 Bytes */ +#define XGENE_DMA_BUFPOOL_DESC_SIZE 16 /* 16 Bytes */ #define XGENE_DMA_RING_NUM_CONFIG 5 #define XGENE_DMA_MAX_CHANNEL 4 #define XGENE_DMA_XOR_CHANNEL 0 #define XGENE_DMA_PQ_CHANNEL 1 +#define XGENE_DMA_FLYBY_CHANNEL 2 #define XGENE_DMA_MAX_BYTE_CNT 0x4000 /* 16 KB */ #define XGENE_DMA_MAX_64B_DESC_BYTE_CNT 0x14000 /* 80 KB */ #define XGENE_DMA_XOR_ALIGNMENT 6 /* 64 Bytes */ #define XGENE_DMA_MAX_XOR_SRC 5 #define XGENE_DMA_16K_BUFFER_LEN_CODE 0x0 #define XGENE_DMA_INVALID_LEN_CODE 0x7800000000000000ULL +#define XGENE_DMA_MAX_FLYBY_BYTE_CNT 0x7FFF /* (32 KB - 1) */ /* X-Gene DMA descriptor error codes */ #define ERR_DESC_AXI 0x01 @@ -189,9 +203,14 @@ #define FLYBY_3SRC_XOR 0x90 #define FLYBY_4SRC_XOR 0xA0 #define FLYBY_5SRC_XOR 0xB0 +#define FLYBY_CRC16 0x10 +#define FLYBY_ISCSI_CRC32C 0x20 +#define FLYBY_CRC32 0x30 +#define FLYBY_CHECKSUM 0x40 /* X-Gene DMA SW descriptor flags */ #define XGENE_DMA_FLAG_64B_DESC BIT(0) +#define XGENE_DMA_FLAG_FLYBY_ACTIVE BIT(1) /* Define to dump X-Gene DMA descriptor */ #define XGENE_DMA_DESC_DUMP(desc, m) \ @@ -208,6 +227,11 @@ #define chan_err(chan, fmt, arg...) \ dev_err(chan->dev, "%s: " fmt, chan->name, ##arg) +struct xgene_dma_desc16 { + __le64 m0; + __le64 m1; +}; + struct xgene_dma_desc_hw { __le64 m0; __le64 m1; @@ -234,6 +258,7 @@ struct xgene_dma_ring { u16 slots; u16 dst_ring_num; u32 size; + bool is_bufpool; void __iomem *cmd; void __iomem *cmd_base; dma_addr_t desc_paddr; @@ -241,6 +266,7 @@ struct xgene_dma_ring { enum xgene_dma_ring_cfgsize cfgsize; union { void *desc_vaddr; + struct xgene_dma_desc16 *desc16; struct xgene_dma_desc_hw *desc_hw; }; }; @@ -249,6 +275,7 @@ struct xgene_dma_desc_sw { struct xgene_dma_desc_hw desc1; struct xgene_dma_desc_hw desc2; u32 flags; + u8 *flyby_result; struct list_head node; struct list_head tx_list; struct dma_async_tx_descriptor tx; @@ -278,6 +305,8 @@ struct xgene_dma_desc_sw { * descriptors for further executions * @rx_ring: receive ring descriptor that we use to get completed DMA * descriptors during cleanup time + * @bufpool: Queue which maintains list of allocated memory for flyby operations + * needed by DMA engine */ struct xgene_dma_chan { struct dma_chan dma_chan; @@ -296,6 +325,7 @@ struct xgene_dma_chan { struct tasklet_struct tasklet; struct xgene_dma_ring tx_ring; struct xgene_dma_ring rx_ring; + struct xgene_dma_ring bufpool; }; /** @@ -519,6 +549,102 @@ static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC; } +static u32 xgene_dma_set_flyby_src(__le64 *ext8, struct scatterlist *sg, + dma_addr_t *paddr, u32 *nbytes, u32 offset) +{ + u32 len; + + /* Fetch physical address from sg */ + if (*paddr == 0) + *paddr = sg_dma_address(sg); + + len = sg_dma_len(sg) - offset; + + *ext8 |= cpu_to_le64(*paddr); + *ext8 |= cpu_to_le64(xgene_dma_encode_len(len)); + + if (len <= XGENE_DMA_MAX_BYTE_CNT) { + *nbytes -= len; + *paddr = 0; + return len; + } + + *nbytes -= XGENE_DMA_MAX_BYTE_CNT; + *paddr += XGENE_DMA_MAX_BYTE_CNT; + + return XGENE_DMA_MAX_BYTE_CNT; +} + +static int xgene_dma_prep_flyby_desc(struct xgene_dma_chan *chan, + struct xgene_dma_desc_sw *desc_sw, + struct scatterlist *sg, u32 nbytes, + u32 seed, u8 opcode) +{ + struct xgene_dma_desc_hw *desc1, *desc2; + dma_addr_t paddr = 0; + u32 len = nbytes; + u32 offset = 0; + int i; + + /* Get 1st descriptor */ + desc1 = &desc_sw->desc1; + xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num); + + /* Set 1st source address */ + offset += xgene_dma_set_flyby_src(&desc1->m1, sg, &paddr, + &nbytes, offset); + + if (!nbytes) { + desc2 = NULL; + goto skip_additional_src; + } + + /* + * Still we have request length remaining, + * So we need to use prepare 64B descriptor + */ + desc2 = &desc_sw->desc2; + desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT); + + /* Set 2nd to 5th source address */ + for (i = 0; i < 4 && nbytes; i++) { + /* Fetch next sg element */ + if (!paddr) { + sg = sg_next(sg); + if (!sg) + break; + offset = 0; + } + offset += xgene_dma_set_flyby_src( + xgene_dma_lookup_ext8(desc2, i), + sg, &paddr, &nbytes, offset); + } + + /* Invalidate unused source address field */ + for (; i < 4; i++) + xgene_dma_invalidate_buffer(xgene_dma_lookup_ext8(desc2, i)); + + /* Check whether requested buffer processed */ + if (nbytes) { + chan_err(chan, "Src count crossed maximum limit\n"); + return -EINVAL; + } + + /* Update flag that we have prepared 64B descriptor */ + desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC; + +skip_additional_src: + /* Set descriptor parameters for flyby operation */ + desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_BD_BIT); + desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_SD_BIT); + desc1->m2 |= cpu_to_le64(opcode); + desc1->m2 |= cpu_to_le64((u64)bitrev32(seed) << + XGENE_DMA_DESC_CRCSEED_POS); + desc1->m3 |= cpu_to_le64(len); + + return 0; +} + static dma_cookie_t xgene_dma_tx_submit(struct dma_async_tx_descriptor *tx) { struct xgene_dma_desc_sw *desc; @@ -764,7 +890,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) { struct xgene_dma_ring *ring = &chan->rx_ring; struct xgene_dma_desc_sw *desc_sw, *_desc_sw; - struct xgene_dma_desc_hw *desc_hw; + struct xgene_dma_desc_hw *desc_hw1, *desc_hw2; + u32 command; u8 status; /* Clean already completed and acked descriptors */ @@ -773,22 +900,35 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) /* Run the callback for each descriptor, in order */ list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) { /* Get subsequent hw descriptor from DMA rx ring */ - desc_hw = &ring->desc_hw[ring->head]; + desc_hw1 = &ring->desc_hw[ring->head]; /* Check if this descriptor has been completed */ - if (unlikely(le64_to_cpu(desc_hw->m0) == + if (unlikely(le64_to_cpu(desc_hw1->m0) == XGENE_DMA_DESC_EMPTY_SIGNATURE)) break; if (++ring->head == ring->slots) ring->head = 0; + if (le64_to_cpu(desc_hw1->m0) & XGENE_DMA_DESC_NV_BIT) { + /* 64B Rx descriptor */ + desc_hw2 = &ring->desc_hw[ring->head]; + + if (++ring->head == ring->slots) + ring->head = 0; + + command = 2; + } else { + desc_hw2 = NULL; + command = 1; + } + /* Check if we have any error with DMA transactions */ status = XGENE_DMA_DESC_STATUS( XGENE_DMA_DESC_ELERR_RD(le64_to_cpu( - desc_hw->m0)), + desc_hw1->m0)), XGENE_DMA_DESC_LERR_RD(le64_to_cpu( - desc_hw->m0))); + desc_hw1->m0))); if (status) { /* Print the DMA error type */ chan_err(chan, "%s\n", xgene_dma_desc_err[status]); @@ -803,15 +943,23 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) XGENE_DMA_DESC_DUMP(&desc_sw->desc2, "X-Gene DMA TX DESC2: "); - XGENE_DMA_DESC_DUMP(desc_hw, + XGENE_DMA_DESC_DUMP(desc_hw1, "X-Gene DMA RX ERR DESC: "); } /* Notify the hw about this completed descriptor */ - iowrite32(-1, ring->cmd); + iowrite32(-command, ring->cmd); /* Mark this hw descriptor as processed */ - desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE); + desc_hw1->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE); + if (desc_hw2) + desc_hw2->m0 = cpu_to_le64( + XGENE_DMA_DESC_EMPTY_SIGNATURE); + + if (desc_sw->flags & XGENE_DMA_FLAG_FLYBY_ACTIVE) { + iowrite32(command, chan->bufpool.cmd); + *(__le32 *)desc_sw->flyby_result = (__le32)desc_hw1->m3; + } xgene_dma_run_tx_complete_actions(chan, desc_sw); @@ -1182,6 +1330,54 @@ fail: return NULL; } +struct dma_async_tx_descriptor *xgene_dma_prep_flyby( + struct xgene_dma_chan *chan, struct scatterlist *src_sg, + size_t len, u32 seed, u8 *result, unsigned long flags, u8 opcode) +{ + struct xgene_dma_desc_sw *desc; + int ret; + + if (len > XGENE_DMA_MAX_FLYBY_BYTE_CNT) { + chan_err(chan, "Source length is too long 0x%zX\n", len); + return NULL; + } + + /* Allocate the link descriptor from DMA pool */ + desc = xgene_dma_alloc_descriptor(chan); + if (!desc) + return NULL; + + /* Prepare DMA flyby descriptor */ + ret = xgene_dma_prep_flyby_desc(chan, desc, src_sg, len, seed, opcode); + if (ret) { + xgene_dma_clean_descriptor(chan, desc); + return NULL; + } + + desc->flags |= XGENE_DMA_FLAG_FLYBY_ACTIVE; + desc->tx.flags = flags; + desc->flyby_result = result; + + list_add_tail(&desc->node, &desc->tx_list); + + return &desc->tx; +} + +struct dma_async_tx_descriptor *xgene_dma_prep_crc32c( + struct dma_chan *dchan, struct scatterlist *src_sg, + size_t len, u32 seed, u8 *result, unsigned long flags) +{ + struct xgene_dma_chan *chan; + + if (unlikely(!dchan)) + return NULL; + + chan = to_dma_chan(dchan); + + return xgene_dma_prep_flyby(chan, src_sg, len, seed, + result, flags, FLYBY_ISCSI_CRC32C); +} + static void xgene_dma_issue_pending(struct dma_chan *dchan) { struct xgene_dma_chan *chan = to_dma_chan(dchan); @@ -1276,15 +1472,22 @@ static void xgene_dma_setup_ring(struct xgene_dma_ring *ring) { void *ring_cfg = ring->state; u64 addr = ring->desc_paddr; - u32 i, val; + u32 ring_id_buf, i, val; - ring->slots = ring->size / XGENE_DMA_RING_WQ_DESC_SIZE; + ring->slots = ring->size / (ring->is_bufpool ? + XGENE_DMA_BUFPOOL_DESC_SIZE : + XGENE_DMA_RING_WQ_DESC_SIZE); /* Clear DMA ring state */ xgene_dma_clr_ring_state(ring); /* Set DMA ring type */ - XGENE_DMA_RING_TYPE_SET(ring_cfg, XGENE_DMA_RING_TYPE_REGULAR); + XGENE_DMA_RING_TYPE_SET(ring_cfg, ring->is_bufpool ? + XGENE_DMA_RING_TYPE_BUFPOOL : + XGENE_DMA_RING_TYPE_REGULAR); + + if (ring->is_bufpool) + XGENE_DMA_RING_BUFMODE_SET(ring_cfg); if (ring->owner == XGENE_DMA_RING_OWNER_DMA) { /* Set recombination buffer and timeout */ @@ -1309,8 +1512,13 @@ static void xgene_dma_setup_ring(struct xgene_dma_ring *ring) ring->pdma->csr_ring + XGENE_DMA_RING_ID); /* Set DMA ring buffer */ - iowrite32(XGENE_DMA_RING_ID_BUF_SETUP(ring->num), - ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF); + ring_id_buf = XGENE_DMA_RING_ID_BUF_SETUP(ring->num); + + if (ring->is_bufpool) + ring_id_buf |= XGENE_DMA_RING_IS_BUFPOOL; + + iowrite32(ring_id_buf, ring->pdma->csr_ring + + XGENE_DMA_RING_ID_BUF); if (ring->owner != XGENE_DMA_RING_OWNER_CPU) return; @@ -1405,6 +1613,9 @@ static void xgene_dma_delete_chan_rings(struct xgene_dma_chan *chan) { xgene_dma_delete_ring_one(&chan->rx_ring); xgene_dma_delete_ring_one(&chan->tx_ring); + + if (chan->id == XGENE_DMA_FLYBY_CHANNEL) + xgene_dma_delete_ring_one(&chan->bufpool); } static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan, @@ -1436,6 +1647,60 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan, return 0; } +static int xgene_dma_init_bufpool(struct xgene_dma_chan *chan) +{ + struct xgene_dma_ring *bufpool = &chan->bufpool; + struct xgene_dma_desc16 *desc16; + dma_addr_t buf_addr; + void *buf; + int ret, i; + + /* Create DMA buffer pool */ + bufpool->owner = XGENE_DMA_RING_OWNER_DMA; + bufpool->is_bufpool = true; + bufpool->buf_num = XGENE_DMA_BUFPOOL_BUFNUM; + + ret = xgene_dma_create_ring_one(chan, bufpool, + XGENE_DMA_RING_CFG_SIZE_64KB); + if (ret) + return ret; + + bufpool->dst_ring_num = XGENE_DMA_RING_DST_ID(bufpool->num); + + dev_dbg(chan->dev, + "Bufpool ring id 0x%X num %d desc 0x%p\n", + bufpool->id, bufpool->num, bufpool->desc_vaddr); + + for (i = 0; i < bufpool->slots; i++) { + desc16 = &bufpool->desc16[i]; + memset(desc16, 0, sizeof(struct xgene_dma_desc16)); + buf = devm_kzalloc(chan->dev, + XGENE_DMA_MAX_BYTE_CNT, GFP_KERNEL); + if (!buf) { + xgene_dma_delete_ring_one(bufpool); + return -ENOMEM; + } + + buf_addr = dma_map_single(chan->dev, buf, + XGENE_DMA_MAX_BYTE_CNT, + DMA_TO_DEVICE); + + desc16->m0 |= cpu_to_le64((u64)bufpool->dst_ring_num << + XGENE_DMA_DESC_FPQ_NUM_POS); + desc16->m0 |= cpu_to_le64(XGENE_DMA_DESC_IN_BIT); + desc16->m0 |= cpu_to_le64((u64)XGENE_DMA_RING_OWNER_DMA << + XGENE_DMA_DESC_RTYPE_POS); + desc16->m1 |= cpu_to_le64(XGENE_DMA_DESC_C_BIT); + desc16->m1 |= cpu_to_le64(buf_addr); + desc16->m1 |= cpu_to_le64(xgene_dma_encode_len( + XGENE_DMA_MAX_BYTE_CNT)); + } + + iowrite32(bufpool->slots, bufpool->cmd); + + return 0; +} + static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan) { struct xgene_dma_ring *rx_ring = &chan->rx_ring; @@ -1444,6 +1709,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan) /* Create DMA Rx ring descriptor */ rx_ring->owner = XGENE_DMA_RING_OWNER_CPU; + rx_ring->is_bufpool = false; rx_ring->buf_num = XGENE_DMA_CPU_BUFNUM + chan->id; ret = xgene_dma_create_ring_one(chan, rx_ring, @@ -1456,6 +1722,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan) /* Create DMA Tx ring descriptor */ tx_ring->owner = XGENE_DMA_RING_OWNER_DMA; + tx_ring->is_bufpool = false; tx_ring->buf_num = XGENE_DMA_BUFNUM + chan->id; ret = xgene_dma_create_ring_one(chan, tx_ring, @@ -1474,6 +1741,14 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan) /* Set the max outstanding request possible to this channel */ chan->max_outstanding = rx_ring->slots; + if (chan->id == XGENE_DMA_FLYBY_CHANNEL) { + ret = xgene_dma_init_bufpool(chan); + if (ret) { + xgene_dma_delete_ring_one(rx_ring); + xgene_dma_delete_ring_one(tx_ring); + } + } + return ret; } @@ -1562,6 +1837,8 @@ static void xgene_dma_init_hw(struct xgene_dma *pdma) /* Associate DMA ring to corresponding ring HW */ iowrite32(XGENE_DMA_ASSOC_RING_MNGR1, + pdma->csr_dma + XGENE_DMA_CFG_RING_FQ_ASSOC); + iowrite32(XGENE_DMA_ASSOC_RING_MNGR1, pdma->csr_dma + XGENE_DMA_CFG_RING_WQ_ASSOC); /* Configure RAID6 polynomial control setting */ @@ -1730,6 +2007,9 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan, dma_cap_set(DMA_XOR, dma_dev->cap_mask); } + if (chan->id == XGENE_DMA_FLYBY_CHANNEL) + dma_cap_set(DMA_CRC32C, dma_dev->cap_mask); + /* Set base and prep routines */ dma_dev->dev = chan->dev; dma_dev->device_alloc_chan_resources = xgene_dma_alloc_chan_resources; @@ -1750,6 +2030,9 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan, dma_dev->max_pq = XGENE_DMA_MAX_XOR_SRC; dma_dev->pq_align = XGENE_DMA_XOR_ALIGNMENT; } + + if (dma_has_cap(DMA_CRC32C, dma_dev->cap_mask)) + dma_dev->device_prep_dma_crc32c = xgene_dma_prep_crc32c; } static int xgene_dma_async_register(struct xgene_dma *pdma, int id) @@ -1789,11 +2072,13 @@ static int xgene_dma_async_register(struct xgene_dma *pdma, int id) /* DMA capability info */ dev_info(pdma->dev, - "%s: CAPABILITY ( %s%s%s%s)\n", dma_chan_name(&chan->dma_chan), + "%s: CAPABILITY ( %s%s%s%s%s)\n", + dma_chan_name(&chan->dma_chan), dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "MEMCPY " : "", dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "", dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "", - dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : ""); + dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : "", + dma_has_cap(DMA_CRC32C, dma_dev->cap_mask) ? "CRC32C " : ""); return 0; }