From patchwork Thu Aug 1 14:35:03 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 13750582 X-Patchwork-Delegate: kuba@kernel.org Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id BAAA51A4F39 for ; Thu, 1 Aug 2024 14:35:36 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1722522936; cv=none; b=NQtjZtuAtsniXdiBZhkKiqxO5vwiNZKriS7+DW+kzvOFcO5WzFdUFjhfLHyMyx6BtS99Th8aeroyR9VjIsiJqmQ0xX2O7IAyENI7IRhkMjAXWq7dJ38IC6p9lzY+ey+cSnPvIflsdRRdO8V7USFDyyMwYasFfq5V8aVh/ijDkI4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1722522936; c=relaxed/simple; bh=KEhkadlzqJwzpJPP8Fdtmsri2r04puQXB5fLVgnY5SE=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=fcjnli3f6bsFojJI5ES5VOa54L4KH50gVwA+Y5GrDjqhRbETz9ZzrDSO9BRagApea3WDKaJUl2B2DhLXAmhONPo6H3gz476ggbwUywXKCAI8uaIY9fmwFDhq10S0gZ5bcxX8WeFDM94/6Gj9E7HZwgFLkNanMFOtjzLay6saQCo= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=rE0+sm0j; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="rE0+sm0j" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 1BEBFC32786; Thu, 1 Aug 2024 14:35:35 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1722522936; bh=KEhkadlzqJwzpJPP8Fdtmsri2r04puQXB5fLVgnY5SE=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=rE0+sm0j289NTigQIfcAoTGkVh3AJHefxrwCIRGbQwouDMN/zG5dtT7kwA0alTLBP B3T81dhoFtHHzd8lXEk3H8hhQKI2C5BUD9nm3jVexz2mMYXAIE2pEVyKrsf5+SqfMI MElOQkhl7X02h6fhCVnYa5SO9EjFAXu31JdY532H48z2DPHwhSj8+5dH6052T0YdLW nApHTeVGueCkgKVI8jRHH0cDAgvXxMA7umaewiGtj3Vqw7La4Wsd6jaNmU9zm9CFck y4+4TxYJ1nJLNHs1tc5nxPtExbU0xa974Efwsl2BjfdWLo1W5mIb3wRneJkkgRzhXv 46droMnqoJWYw== From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: nbd@nbd.name, lorenzo.bianconi83@gmail.com, davem@davemloft.net, edumazet@google.com, kuba@kernel.org, pabeni@redhat.com, linux-arm-kernel@lists.infradead.org, upstream@airoha.com, angelogioacchino.delregno@collabora.com, benjamin.larsson@genexis.eu, rkannoth@marvell.com, sgoutham@marvell.com, andrew@lunn.ch, arnd@arndb.de, horms@kernel.org Subject: [PATCH v2 net-next 1/8] net: airoha: Introduce airoha_qdma struct Date: Thu, 1 Aug 2024 16:35:03 +0200 Message-ID: <7df163bdc72ee29c3d27a0cbf54522ffeeafe53c.1722522582.git.lorenzo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: References: Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: kuba@kernel.org Introduce airoha_qdma struct and move qdma IO register mapping in airoha_qdma. This is a preliminary patch to enable both QDMA controllers available on EN7581 SoC. Signed-off-by: Lorenzo Bianconi --- drivers/net/ethernet/mediatek/airoha_eth.c | 197 ++++++++++++--------- 1 file changed, 112 insertions(+), 85 deletions(-) diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c index 1c5b85a86df1..7add08bac8cf 100644 --- a/drivers/net/ethernet/mediatek/airoha_eth.c +++ b/drivers/net/ethernet/mediatek/airoha_eth.c @@ -18,6 +18,7 @@ #include #define AIROHA_MAX_NUM_GDM_PORTS 1 +#define AIROHA_MAX_NUM_QDMA 1 #define AIROHA_MAX_NUM_RSTS 3 #define AIROHA_MAX_NUM_XSI_RSTS 5 #define AIROHA_MAX_MTU 2000 @@ -782,6 +783,10 @@ struct airoha_hw_stats { u64 rx_len[7]; }; +struct airoha_qdma { + void __iomem *regs; +}; + struct airoha_gdm_port { struct net_device *dev; struct airoha_eth *eth; @@ -794,8 +799,6 @@ struct airoha_eth { struct device *dev; unsigned long state; - - void __iomem *qdma_regs; void __iomem *fe_regs; /* protect concurrent irqmask accesses */ @@ -806,6 +809,7 @@ struct airoha_eth { struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS]; struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS]; + struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA]; struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS]; struct net_device *napi_dev; @@ -850,16 +854,16 @@ static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val) #define airoha_fe_clear(eth, offset, val) \ airoha_rmw((eth)->fe_regs, (offset), (val), 0) -#define airoha_qdma_rr(eth, offset) \ - airoha_rr((eth)->qdma_regs, (offset)) -#define airoha_qdma_wr(eth, offset, val) \ - airoha_wr((eth)->qdma_regs, (offset), (val)) -#define airoha_qdma_rmw(eth, offset, mask, val) \ - airoha_rmw((eth)->qdma_regs, (offset), (mask), (val)) -#define airoha_qdma_set(eth, offset, val) \ - airoha_rmw((eth)->qdma_regs, (offset), 0, (val)) -#define airoha_qdma_clear(eth, offset, val) \ - airoha_rmw((eth)->qdma_regs, (offset), (val), 0) +#define airoha_qdma_rr(qdma, offset) \ + airoha_rr((qdma)->regs, (offset)) +#define airoha_qdma_wr(qdma, offset, val) \ + airoha_wr((qdma)->regs, (offset), (val)) +#define airoha_qdma_rmw(qdma, offset, mask, val) \ + airoha_rmw((qdma)->regs, (offset), (mask), (val)) +#define airoha_qdma_set(qdma, offset, val) \ + airoha_rmw((qdma)->regs, (offset), 0, (val)) +#define airoha_qdma_clear(qdma, offset, val) \ + airoha_rmw((qdma)->regs, (offset), (val), 0) static void airoha_qdma_set_irqmask(struct airoha_eth *eth, int index, u32 clear, u32 set) @@ -873,11 +877,12 @@ static void airoha_qdma_set_irqmask(struct airoha_eth *eth, int index, eth->irqmask[index] &= ~clear; eth->irqmask[index] |= set; - airoha_qdma_wr(eth, REG_INT_ENABLE(index), eth->irqmask[index]); + airoha_qdma_wr(ð->qdma[0], REG_INT_ENABLE(index), + eth->irqmask[index]); /* Read irq_enable register in order to guarantee the update above * completes in the spinlock critical section. */ - airoha_qdma_rr(eth, REG_INT_ENABLE(index)); + airoha_qdma_rr(ð->qdma[0], REG_INT_ENABLE(index)); spin_unlock_irqrestore(ð->irq_lock, flags); } @@ -1383,6 +1388,7 @@ static int airoha_fe_init(struct airoha_eth *eth) static int airoha_qdma_fill_rx_queue(struct airoha_queue *q) { enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); + struct airoha_qdma *qdma = &q->eth->qdma[0]; struct airoha_eth *eth = q->eth; int qid = q - ð->q_rx[0]; int nframes = 0; @@ -1420,7 +1426,8 @@ static int airoha_qdma_fill_rx_queue(struct airoha_queue *q) WRITE_ONCE(desc->msg2, 0); WRITE_ONCE(desc->msg3, 0); - airoha_qdma_rmw(eth, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK, + airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), + RX_RING_CPU_IDX_MASK, FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head)); } @@ -1529,7 +1536,8 @@ static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget) } static int airoha_qdma_init_rx_queue(struct airoha_eth *eth, - struct airoha_queue *q, int ndesc) + struct airoha_queue *q, + struct airoha_qdma *qdma, int ndesc) { const struct page_pool_params pp_params = { .order = 0, @@ -1568,14 +1576,15 @@ static int airoha_qdma_init_rx_queue(struct airoha_eth *eth, netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll); - airoha_qdma_wr(eth, REG_RX_RING_BASE(qid), dma_addr); - airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_SIZE_MASK, + airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr); + airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), + RX_RING_SIZE_MASK, FIELD_PREP(RX_RING_SIZE_MASK, ndesc)); thr = clamp(ndesc >> 3, 1, 32); - airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK, + airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK, FIELD_PREP(RX_RING_THR_MASK, thr)); - airoha_qdma_rmw(eth, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK, + airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK, FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head)); airoha_qdma_fill_rx_queue(q); @@ -1599,7 +1608,8 @@ static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q) } } -static int airoha_qdma_init_rx(struct airoha_eth *eth) +static int airoha_qdma_init_rx(struct airoha_eth *eth, + struct airoha_qdma *qdma) { int i; @@ -1612,7 +1622,7 @@ static int airoha_qdma_init_rx(struct airoha_eth *eth) } err = airoha_qdma_init_rx_queue(eth, ð->q_rx[i], - RX_DSCP_NUM(i)); + qdma, RX_DSCP_NUM(i)); if (err) return err; } @@ -1623,11 +1633,13 @@ static int airoha_qdma_init_rx(struct airoha_eth *eth) static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) { struct airoha_tx_irq_queue *irq_q; + struct airoha_qdma *qdma; struct airoha_eth *eth; int id, done = 0; irq_q = container_of(napi, struct airoha_tx_irq_queue, napi); eth = irq_q->eth; + qdma = ð->qdma[0]; id = irq_q - ð->q_tx_irq[0]; while (irq_q->queued > 0 && done < budget) { @@ -1697,9 +1709,9 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) int i, len = done >> 7; for (i = 0; i < len; i++) - airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id), + airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id), IRQ_CLEAR_LEN_MASK, 0x80); - airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id), + airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(id), IRQ_CLEAR_LEN_MASK, (done & 0x7f)); } @@ -1711,7 +1723,8 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) } static int airoha_qdma_init_tx_queue(struct airoha_eth *eth, - struct airoha_queue *q, int size) + struct airoha_queue *q, + struct airoha_qdma *qdma, int size) { int i, qid = q - ð->q_tx[0]; dma_addr_t dma_addr; @@ -1738,10 +1751,10 @@ static int airoha_qdma_init_tx_queue(struct airoha_eth *eth, WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val)); } - airoha_qdma_wr(eth, REG_TX_RING_BASE(qid), dma_addr); - airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, + airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr); + airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); - airoha_qdma_rmw(eth, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK, + airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK, FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head)); return 0; @@ -1749,7 +1762,7 @@ static int airoha_qdma_init_tx_queue(struct airoha_eth *eth, static int airoha_qdma_tx_irq_init(struct airoha_eth *eth, struct airoha_tx_irq_queue *irq_q, - int size) + struct airoha_qdma *qdma, int size) { int id = irq_q - ð->q_tx_irq[0]; dma_addr_t dma_addr; @@ -1765,29 +1778,30 @@ static int airoha_qdma_tx_irq_init(struct airoha_eth *eth, irq_q->size = size; irq_q->eth = eth; - airoha_qdma_wr(eth, REG_TX_IRQ_BASE(id), dma_addr); - airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK, + airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr); + airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK, FIELD_PREP(TX_IRQ_DEPTH_MASK, size)); - airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK, + airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK, FIELD_PREP(TX_IRQ_THR_MASK, 1)); return 0; } -static int airoha_qdma_init_tx(struct airoha_eth *eth) +static int airoha_qdma_init_tx(struct airoha_eth *eth, + struct airoha_qdma *qdma) { int i, err; for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) { err = airoha_qdma_tx_irq_init(eth, ð->q_tx_irq[i], - IRQ_QUEUE_LEN(i)); + qdma, IRQ_QUEUE_LEN(i)); if (err) return err; } for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) { err = airoha_qdma_init_tx_queue(eth, ð->q_tx[i], - TX_DSCP_NUM); + qdma, TX_DSCP_NUM); if (err) return err; } @@ -1814,7 +1828,8 @@ static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q) spin_unlock_bh(&q->lock); } -static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth) +static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth, + struct airoha_qdma *qdma) { dma_addr_t dma_addr; u32 status; @@ -1826,7 +1841,7 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth) if (!eth->hfwd.desc) return -ENOMEM; - airoha_qdma_wr(eth, REG_FWD_DSCP_BASE, dma_addr); + airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr); size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM; eth->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr, @@ -1834,14 +1849,14 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth) if (!eth->hfwd.q) return -ENOMEM; - airoha_qdma_wr(eth, REG_FWD_BUF_BASE, dma_addr); + airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr); - airoha_qdma_rmw(eth, REG_HW_FWD_DSCP_CFG, + airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG, HW_FWD_DSCP_PAYLOAD_SIZE_MASK, FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0)); - airoha_qdma_rmw(eth, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK, + airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK, FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128)); - airoha_qdma_rmw(eth, REG_LMGR_INIT_CFG, + airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG, LMGR_INIT_START | LMGR_SRAM_MODE_MASK | HW_FWD_DESC_NUM_MASK, FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) | @@ -1849,67 +1864,69 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth) return read_poll_timeout(airoha_qdma_rr, status, !(status & LMGR_INIT_START), USEC_PER_MSEC, - 30 * USEC_PER_MSEC, true, eth, + 30 * USEC_PER_MSEC, true, qdma, REG_LMGR_INIT_CFG); } -static void airoha_qdma_init_qos(struct airoha_eth *eth) +static void airoha_qdma_init_qos(struct airoha_eth *eth, + struct airoha_qdma *qdma) { - airoha_qdma_clear(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK); - airoha_qdma_set(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK); + airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK); + airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK); - airoha_qdma_clear(eth, REG_PSE_BUF_USAGE_CFG, + airoha_qdma_clear(qdma, REG_PSE_BUF_USAGE_CFG, PSE_BUF_ESTIMATE_EN_MASK); - airoha_qdma_set(eth, REG_EGRESS_RATE_METER_CFG, + airoha_qdma_set(qdma, REG_EGRESS_RATE_METER_CFG, EGRESS_RATE_METER_EN_MASK | EGRESS_RATE_METER_EQ_RATE_EN_MASK); /* 2047us x 31 = 63.457ms */ - airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG, + airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG, EGRESS_RATE_METER_WINDOW_SZ_MASK, FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f)); - airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG, + airoha_qdma_rmw(qdma, REG_EGRESS_RATE_METER_CFG, EGRESS_RATE_METER_TIMESLICE_MASK, FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff)); /* ratelimit init */ - airoha_qdma_set(eth, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK); + airoha_qdma_set(qdma, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK); /* fast-tick 25us */ - airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK, + airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK, FIELD_PREP(GLB_FAST_TICK_MASK, 25)); - airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK, + airoha_qdma_rmw(qdma, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK, FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40)); - airoha_qdma_set(eth, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK); - airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK, + airoha_qdma_set(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK); + airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK, FIELD_PREP(EGRESS_FAST_TICK_MASK, 25)); - airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG, + airoha_qdma_rmw(qdma, REG_EGRESS_TRTCM_CFG, EGRESS_SLOW_TICK_RATIO_MASK, FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40)); - airoha_qdma_set(eth, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK); - airoha_qdma_clear(eth, REG_INGRESS_TRTCM_CFG, + airoha_qdma_set(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK); + airoha_qdma_clear(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_MODE_MASK); - airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK, + airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK, FIELD_PREP(INGRESS_FAST_TICK_MASK, 125)); - airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG, + airoha_qdma_rmw(qdma, REG_INGRESS_TRTCM_CFG, INGRESS_SLOW_TICK_RATIO_MASK, FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8)); - airoha_qdma_set(eth, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK); - airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK, + airoha_qdma_set(qdma, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK); + airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK, FIELD_PREP(SLA_FAST_TICK_MASK, 25)); - airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK, + airoha_qdma_rmw(qdma, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK, FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40)); } -static int airoha_qdma_hw_init(struct airoha_eth *eth) +static int airoha_qdma_hw_init(struct airoha_eth *eth, + struct airoha_qdma *qdma) { int i; /* clear pending irqs */ for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) - airoha_qdma_wr(eth, REG_INT_STATUS(i), 0xffffffff); + airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff); /* setup irqs */ airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, INT_IDX0_MASK); @@ -1922,14 +1939,14 @@ static int airoha_qdma_hw_init(struct airoha_eth *eth) continue; if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i)) - airoha_qdma_set(eth, REG_TX_RING_BLOCKING(i), + airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i), TX_RING_IRQ_BLOCKING_CFG_MASK); else - airoha_qdma_clear(eth, REG_TX_RING_BLOCKING(i), + airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i), TX_RING_IRQ_BLOCKING_CFG_MASK); } - airoha_qdma_wr(eth, REG_QDMA_GLOBAL_CFG, + airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_2B_OFFSET_MASK | FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) | GLOBAL_CFG_CPU_TXR_RR_MASK | @@ -1940,18 +1957,18 @@ static int airoha_qdma_hw_init(struct airoha_eth *eth) GLOBAL_CFG_TX_WB_DONE_MASK | FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2)); - airoha_qdma_init_qos(eth); + airoha_qdma_init_qos(eth, qdma); /* disable qdma rx delay interrupt */ for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { if (!eth->q_rx[i].ndesc) continue; - airoha_qdma_clear(eth, REG_RX_DELAY_INT_IDX(i), + airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i), RX_DELAY_INT_MASK); } - airoha_qdma_set(eth, REG_TXQ_CNGST_CFG, + airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG, TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN); return 0; @@ -1961,12 +1978,14 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) { struct airoha_eth *eth = dev_instance; u32 intr[ARRAY_SIZE(eth->irqmask)]; + struct airoha_qdma *qdma; int i; + qdma = ð->qdma[0]; for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) { - intr[i] = airoha_qdma_rr(eth, REG_INT_STATUS(i)); + intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i)); intr[i] &= eth->irqmask[i]; - airoha_qdma_wr(eth, REG_INT_STATUS(i), intr[i]); + airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]); } if (!test_bit(DEV_STATE_INITIALIZED, ð->state)) @@ -1996,7 +2015,7 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX0, TX_DONE_INT_MASK(i)); - status = airoha_qdma_rr(eth, REG_IRQ_STATUS(i)); + status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(i)); head = FIELD_GET(IRQ_HEAD_IDX_MASK, status); irq_q->head = head % irq_q->size; irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status); @@ -2010,6 +2029,7 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) static int airoha_qdma_init(struct airoha_eth *eth) { + struct airoha_qdma *qdma = ð->qdma[0]; int err; err = devm_request_irq(eth->dev, eth->irq, airoha_irq_handler, @@ -2017,19 +2037,19 @@ static int airoha_qdma_init(struct airoha_eth *eth) if (err) return err; - err = airoha_qdma_init_rx(eth); + err = airoha_qdma_init_rx(eth, qdma); if (err) return err; - err = airoha_qdma_init_tx(eth); + err = airoha_qdma_init_tx(eth, qdma); if (err) return err; - err = airoha_qdma_init_hfwd_queues(eth); + err = airoha_qdma_init_hfwd_queues(eth, qdma); if (err) return err; - err = airoha_qdma_hw_init(eth); + err = airoha_qdma_hw_init(eth, qdma); if (err) return err; @@ -2262,8 +2282,9 @@ static int airoha_dev_open(struct net_device *dev) airoha_fe_clear(eth, REG_GDM_INGRESS_CFG(port->id), GDM_STAG_EN_MASK); - airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK); - airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK); + airoha_qdma_set(ð->qdma[0], REG_QDMA_GLOBAL_CFG, + GLOBAL_CFG_TX_DMA_EN_MASK | + GLOBAL_CFG_RX_DMA_EN_MASK); return 0; } @@ -2279,8 +2300,9 @@ static int airoha_dev_stop(struct net_device *dev) if (err) return err; - airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK); - airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK); + airoha_qdma_clear(ð->qdma[0], REG_QDMA_GLOBAL_CFG, + GLOBAL_CFG_TX_DMA_EN_MASK | + GLOBAL_CFG_RX_DMA_EN_MASK); return 0; } @@ -2340,6 +2362,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, struct airoha_eth *eth = port->eth; u32 nr_frags = 1 + sinfo->nr_frags; struct netdev_queue *txq; + struct airoha_qdma *qdma; struct airoha_queue *q; void *data = skb->data; u16 index; @@ -2367,6 +2390,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) | FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f); + qdma = ð->qdma[0]; q = ð->q_tx[qid]; if (WARN_ON_ONCE(!q->ndesc)) goto error; @@ -2411,7 +2435,8 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, e->dma_addr = addr; e->dma_len = len; - airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, + airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), + TX_RING_CPU_IDX_MASK, FIELD_PREP(TX_RING_CPU_IDX_MASK, index)); data = skb_frag_address(frag); @@ -2613,9 +2638,11 @@ static int airoha_probe(struct platform_device *pdev) return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs), "failed to iomap fe regs\n"); - eth->qdma_regs = devm_platform_ioremap_resource_byname(pdev, "qdma0"); - if (IS_ERR(eth->qdma_regs)) - return dev_err_probe(eth->dev, PTR_ERR(eth->qdma_regs), + eth->qdma[0].regs = devm_platform_ioremap_resource_byname(pdev, + "qdma0"); + if (IS_ERR(eth->qdma[0].regs)) + return dev_err_probe(eth->dev, + PTR_ERR(eth->qdma[0].regs), "failed to iomap qdma regs\n"); eth->rsts[0].id = "fe"; From patchwork Thu Aug 1 14:35:04 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 13750583 X-Patchwork-Delegate: kuba@kernel.org Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 693351A4885 for ; Thu, 1 Aug 2024 14:35:40 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1722522940; cv=none; b=kPxk+lf+794LrFL2KqAKyWWAA3YcbZqcNyaMfRIYbGQLU2yUlWaeYCmlk2gOPsvBpFfKLbLcXUjJgl8j5bfPr+Zfafv4hWDX9Z0EZI7T3vMqN4+hKsEh+VG/VgvH7sJqeYSdh1gQpcDxoQpCdVagfgE8vA9yOlyFDnrtLnOl1uI= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1722522940; c=relaxed/simple; bh=GZyIPN1G+25wGME29PtVRV32mdWiEPrTkVniXVVudEc=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=ifTqtnELiDReCV2tT+fiByy5fwZ34ycDvpHzPc1clLBgSsYH0HgM1Wv3HRDXJlF8b1+1eSzMR2K2/i83b6rA0o6q/DruTlrzVhoT0up8hwyCfV6jvOBY63nfXjJK/gjvMkvmBYTkWbpWP9VZsm6VpF3xuK1pA6piuVnPUU5Wp40= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=ncIbq8LM; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="ncIbq8LM" Received: by smtp.kernel.org (Postfix) with ESMTPSA id E9755C32786; Thu, 1 Aug 2024 14:35:39 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1722522940; bh=GZyIPN1G+25wGME29PtVRV32mdWiEPrTkVniXVVudEc=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=ncIbq8LMQwgIVZ0GmyORKdsTx6RYJRvq5csUxjffsneF5R7EsmrYKUEc2eLzr8xhn PNIJBDmSgk38pv4o6KAaVgBsDPQpCPubJdwHF31Kffxw3fjoRpmw/yNSYu+apcsWPz JjCULs+bYGvwbHjIMlR67EXIYksBnLNZny3UKQi5ijVrOXYpLW64q1etNbhmH3akys W6cXn8yN7jopc5Cze09WLQG7k3aQHnWW4bP0YGF1vDjRr4HGGsjOysv1xcv9uAD+gm CqnycXcZ8EV85OCSYmI3r67jHLc+2/1nqoX5o2jYYt/hv+0MqZtQ4o9eTWw8j29zXV evSuicvqDAjaQ== From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: nbd@nbd.name, lorenzo.bianconi83@gmail.com, davem@davemloft.net, edumazet@google.com, kuba@kernel.org, pabeni@redhat.com, linux-arm-kernel@lists.infradead.org, upstream@airoha.com, angelogioacchino.delregno@collabora.com, benjamin.larsson@genexis.eu, rkannoth@marvell.com, sgoutham@marvell.com, andrew@lunn.ch, arnd@arndb.de, horms@kernel.org Subject: [PATCH v2 net-next 2/8] net: airoha: Move airoha_queues in airoha_qdma Date: Thu, 1 Aug 2024 16:35:04 +0200 Message-ID: <795fc4797bffbf7f0a1351308aa9bf0e65b5126e.1722522582.git.lorenzo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: References: Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: kuba@kernel.org QDMA controllers available in EN7581 SoC have independent tx/rx hw queues so move them in airoha_queues structure. Signed-off-by: Lorenzo Bianconi --- drivers/net/ethernet/mediatek/airoha_eth.c | 126 +++++++++++---------- 1 file changed, 65 insertions(+), 61 deletions(-) diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c index 7add08bac8cf..fc6712216c47 100644 --- a/drivers/net/ethernet/mediatek/airoha_eth.c +++ b/drivers/net/ethernet/mediatek/airoha_eth.c @@ -785,6 +785,17 @@ struct airoha_hw_stats { struct airoha_qdma { void __iomem *regs; + + struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ]; + + struct airoha_queue q_tx[AIROHA_NUM_TX_RING]; + struct airoha_queue q_rx[AIROHA_NUM_RX_RING]; + + /* descriptor and packet buffers for qdma hw forward */ + struct { + void *desc; + void *q; + } hfwd; }; struct airoha_gdm_port { @@ -809,20 +820,10 @@ struct airoha_eth { struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS]; struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS]; - struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA]; - struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS]; - struct net_device *napi_dev; - struct airoha_queue q_tx[AIROHA_NUM_TX_RING]; - struct airoha_queue q_rx[AIROHA_NUM_RX_RING]; - - struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ]; - /* descriptor and packet buffers for qdma hw forward */ - struct { - void *desc; - void *q; - } hfwd; + struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA]; + struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS]; }; static u32 airoha_rr(void __iomem *base, u32 offset) @@ -1390,7 +1391,7 @@ static int airoha_qdma_fill_rx_queue(struct airoha_queue *q) enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); struct airoha_qdma *qdma = &q->eth->qdma[0]; struct airoha_eth *eth = q->eth; - int qid = q - ð->q_rx[0]; + int qid = q - &qdma->q_rx[0]; int nframes = 0; while (q->queued < q->ndesc - 1) { @@ -1457,8 +1458,9 @@ static int airoha_qdma_get_gdm_port(struct airoha_eth *eth, static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) { enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); + struct airoha_qdma *qdma = &q->eth->qdma[0]; struct airoha_eth *eth = q->eth; - int qid = q - ð->q_rx[0]; + int qid = q - &qdma->q_rx[0]; int done = 0; while (done < budget) { @@ -1549,7 +1551,7 @@ static int airoha_qdma_init_rx_queue(struct airoha_eth *eth, .dev = eth->dev, .napi = &q->napi, }; - int qid = q - ð->q_rx[0], thr; + int qid = q - &qdma->q_rx[0], thr; dma_addr_t dma_addr; q->buf_size = PAGE_SIZE / 2; @@ -1613,7 +1615,7 @@ static int airoha_qdma_init_rx(struct airoha_eth *eth, { int i; - for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { int err; if (!(RX_DONE_INT_MASK & BIT(i))) { @@ -1621,7 +1623,7 @@ static int airoha_qdma_init_rx(struct airoha_eth *eth, continue; } - err = airoha_qdma_init_rx_queue(eth, ð->q_rx[i], + err = airoha_qdma_init_rx_queue(eth, &qdma->q_rx[i], qdma, RX_DSCP_NUM(i)); if (err) return err; @@ -1640,7 +1642,7 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) irq_q = container_of(napi, struct airoha_tx_irq_queue, napi); eth = irq_q->eth; qdma = ð->qdma[0]; - id = irq_q - ð->q_tx_irq[0]; + id = irq_q - &qdma->q_tx_irq[0]; while (irq_q->queued > 0 && done < budget) { u32 qid, last, val = irq_q->q[irq_q->head]; @@ -1657,10 +1659,10 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) last = FIELD_GET(IRQ_DESC_IDX_MASK, val); qid = FIELD_GET(IRQ_RING_IDX_MASK, val); - if (qid >= ARRAY_SIZE(eth->q_tx)) + if (qid >= ARRAY_SIZE(qdma->q_tx)) continue; - q = ð->q_tx[qid]; + q = &qdma->q_tx[qid]; if (!q->ndesc) continue; @@ -1726,7 +1728,7 @@ static int airoha_qdma_init_tx_queue(struct airoha_eth *eth, struct airoha_queue *q, struct airoha_qdma *qdma, int size) { - int i, qid = q - ð->q_tx[0]; + int i, qid = q - &qdma->q_tx[0]; dma_addr_t dma_addr; spin_lock_init(&q->lock); @@ -1764,7 +1766,7 @@ static int airoha_qdma_tx_irq_init(struct airoha_eth *eth, struct airoha_tx_irq_queue *irq_q, struct airoha_qdma *qdma, int size) { - int id = irq_q - ð->q_tx_irq[0]; + int id = irq_q - &qdma->q_tx_irq[0]; dma_addr_t dma_addr; netif_napi_add_tx(eth->napi_dev, &irq_q->napi, @@ -1792,15 +1794,15 @@ static int airoha_qdma_init_tx(struct airoha_eth *eth, { int i, err; - for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) { - err = airoha_qdma_tx_irq_init(eth, ð->q_tx_irq[i], + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { + err = airoha_qdma_tx_irq_init(eth, &qdma->q_tx_irq[i], qdma, IRQ_QUEUE_LEN(i)); if (err) return err; } - for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) { - err = airoha_qdma_init_tx_queue(eth, ð->q_tx[i], + for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { + err = airoha_qdma_init_tx_queue(eth, &qdma->q_tx[i], qdma, TX_DSCP_NUM); if (err) return err; @@ -1836,17 +1838,17 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth, int size; size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc); - eth->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr, - GFP_KERNEL); - if (!eth->hfwd.desc) + qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr, + GFP_KERNEL); + if (!qdma->hfwd.desc) return -ENOMEM; airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr); size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM; - eth->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr, - GFP_KERNEL); - if (!eth->hfwd.q) + qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr, + GFP_KERNEL); + if (!qdma->hfwd.q) return -ENOMEM; airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr); @@ -1934,8 +1936,8 @@ static int airoha_qdma_hw_init(struct airoha_eth *eth, airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX4, INT_IDX4_MASK); /* setup irq binding */ - for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) { - if (!eth->q_tx[i].ndesc) + for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { + if (!qdma->q_tx[i].ndesc) continue; if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i)) @@ -1960,8 +1962,8 @@ static int airoha_qdma_hw_init(struct airoha_eth *eth, airoha_qdma_init_qos(eth, qdma); /* disable qdma rx delay interrupt */ - for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { - if (!eth->q_rx[i].ndesc) + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { + if (!qdma->q_rx[i].ndesc) continue; airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i), @@ -1995,18 +1997,18 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX1, RX_DONE_INT_MASK); - for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { - if (!eth->q_rx[i].ndesc) + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { + if (!qdma->q_rx[i].ndesc) continue; if (intr[1] & BIT(i)) - napi_schedule(ð->q_rx[i].napi); + napi_schedule(&qdma->q_rx[i].napi); } } if (intr[0] & INT_TX_MASK) { - for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) { - struct airoha_tx_irq_queue *irq_q = ð->q_tx_irq[i]; + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { + struct airoha_tx_irq_queue *irq_q = &qdma->q_tx_irq[i]; u32 status, head; if (!(intr[0] & TX_DONE_INT_MASK(i))) @@ -2020,7 +2022,7 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) irq_q->head = head % irq_q->size; irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status); - napi_schedule(ð->q_tx_irq[i].napi); + napi_schedule(&qdma->q_tx_irq[i].napi); } } @@ -2079,44 +2081,46 @@ static int airoha_hw_init(struct airoha_eth *eth) static void airoha_hw_cleanup(struct airoha_eth *eth) { + struct airoha_qdma *qdma = ð->qdma[0]; int i; - for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { - if (!eth->q_rx[i].ndesc) + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { + if (!qdma->q_rx[i].ndesc) continue; - napi_disable(ð->q_rx[i].napi); - netif_napi_del(ð->q_rx[i].napi); - airoha_qdma_cleanup_rx_queue(ð->q_rx[i]); - if (eth->q_rx[i].page_pool) - page_pool_destroy(eth->q_rx[i].page_pool); + napi_disable(&qdma->q_rx[i].napi); + netif_napi_del(&qdma->q_rx[i].napi); + airoha_qdma_cleanup_rx_queue(&qdma->q_rx[i]); + if (qdma->q_rx[i].page_pool) + page_pool_destroy(qdma->q_rx[i].page_pool); } - for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) { - napi_disable(ð->q_tx_irq[i].napi); - netif_napi_del(ð->q_tx_irq[i].napi); + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { + napi_disable(&qdma->q_tx_irq[i].napi); + netif_napi_del(&qdma->q_tx_irq[i].napi); } - for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) { - if (!eth->q_tx[i].ndesc) + for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { + if (!qdma->q_tx[i].ndesc) continue; - airoha_qdma_cleanup_tx_queue(ð->q_tx[i]); + airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]); } } static void airoha_qdma_start_napi(struct airoha_eth *eth) { + struct airoha_qdma *qdma = ð->qdma[0]; int i; - for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) - napi_enable(ð->q_tx_irq[i].napi); + for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) + napi_enable(&qdma->q_tx_irq[i].napi); - for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { - if (!eth->q_rx[i].ndesc) + for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { + if (!qdma->q_rx[i].ndesc) continue; - napi_enable(ð->q_rx[i].napi); + napi_enable(&qdma->q_rx[i].napi); } } @@ -2391,7 +2395,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f); qdma = ð->qdma[0]; - q = ð->q_tx[qid]; + q = &qdma->q_tx[qid]; if (WARN_ON_ONCE(!q->ndesc)) goto error; From patchwork Thu Aug 1 14:35:05 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 13750584 X-Patchwork-Delegate: kuba@kernel.org Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 7BE8F1A4885 for ; Thu, 1 Aug 2024 14:35:44 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1722522944; cv=none; b=IWA6ahl01QGZFGi5kQSKf7L2BPwkd2ZKTShGgKkMvr/hTkRpML1z0Lcrk6vXTRhBHBMdvr6RlP2I3P4uNX30+aEFrbGIKPwGg5S8Exx9JlLmsnTxlsjoHAEq46ROIQZludW6vlY19DDhEzWaKMwnkMnBhfZiOgJNpj5kYey8ADM= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1722522944; c=relaxed/simple; bh=MdCYfXhI+5ylGk5FIhPJCeHlshTnPBveEQYvMXylFN8=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=BXqwXSIEY9bqsnTwoolB0ODowdG9eH40X0tAzIpggCu9fTXKtyAhWYjJ3dNFuZSedsLkbNf0wUAQJtUN5TLKmPb//vOR7oSdd8+2OMvFNEOt8U4v4+YqFqEPoonJQIeBV86g36r7LJcMKLept6S6l/iwCM/J566N4quJtfe95H4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=Ks0oF3uh; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="Ks0oF3uh" Received: by smtp.kernel.org (Postfix) with ESMTPSA id B666BC32786; Thu, 1 Aug 2024 14:35:43 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1722522944; bh=MdCYfXhI+5ylGk5FIhPJCeHlshTnPBveEQYvMXylFN8=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=Ks0oF3uhxGivw2b2OmB/yoapT1nLsPyHc1rGX/MtAW3KDJpLU+yI2DE6jHKYflBDm kTIEU43c3xrIoN50AmSKQEG5y8fDpIXEo5+kar9wmfbtxEgc6hN4B4SNsA1wimwbT+ 22gKcyAUCtcaPiw8cFzFZlkw+cHKyoqJJruAsV08l/XKTaMf1HYFlTj5dIAeDw3Fjl O7ACaB89QB4fIaGi312Ugr9c1Fm2146SolpBppk00KCfDgsqPbUnrM7U49EPOPXy4M VAr8HOSLWyiz5/KlqvYF905Jvkq0bow1jefc0dEQAQHavWhj6HMrhKX8FZLLvp8C1k Q4JhgNm/7L0Og== From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: nbd@nbd.name, lorenzo.bianconi83@gmail.com, davem@davemloft.net, edumazet@google.com, kuba@kernel.org, pabeni@redhat.com, linux-arm-kernel@lists.infradead.org, upstream@airoha.com, angelogioacchino.delregno@collabora.com, benjamin.larsson@genexis.eu, rkannoth@marvell.com, sgoutham@marvell.com, andrew@lunn.ch, arnd@arndb.de, horms@kernel.org Subject: [PATCH v2 net-next 3/8] net: airoha: Move irq_mask in airoha_qdma structure Date: Thu, 1 Aug 2024 16:35:05 +0200 Message-ID: <1c8a06e8be605278a7b2f3cd8ac06e74bf5ebf2b.1722522582.git.lorenzo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: References: Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: kuba@kernel.org QDMA controllers have independent irq lines, so move irqmask in airoha_qdma structure. This is a preliminary patch to support multiple QDMA controllers. Signed-off-by: Lorenzo Bianconi --- drivers/net/ethernet/mediatek/airoha_eth.c | 84 +++++++++++----------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c index fc6712216c47..48d0b44e6d92 100644 --- a/drivers/net/ethernet/mediatek/airoha_eth.c +++ b/drivers/net/ethernet/mediatek/airoha_eth.c @@ -786,6 +786,11 @@ struct airoha_hw_stats { struct airoha_qdma { void __iomem *regs; + /* protect concurrent irqmask accesses */ + spinlock_t irq_lock; + u32 irqmask[QDMA_INT_REG_MAX]; + int irq; + struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ]; struct airoha_queue q_tx[AIROHA_NUM_TX_RING]; @@ -812,11 +817,6 @@ struct airoha_eth { unsigned long state; void __iomem *fe_regs; - /* protect concurrent irqmask accesses */ - spinlock_t irq_lock; - u32 irqmask[QDMA_INT_REG_MAX]; - int irq; - struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS]; struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS]; @@ -866,38 +866,37 @@ static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val) #define airoha_qdma_clear(qdma, offset, val) \ airoha_rmw((qdma)->regs, (offset), (val), 0) -static void airoha_qdma_set_irqmask(struct airoha_eth *eth, int index, +static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index, u32 clear, u32 set) { unsigned long flags; - if (WARN_ON_ONCE(index >= ARRAY_SIZE(eth->irqmask))) + if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask))) return; - spin_lock_irqsave(ð->irq_lock, flags); + spin_lock_irqsave(&qdma->irq_lock, flags); - eth->irqmask[index] &= ~clear; - eth->irqmask[index] |= set; - airoha_qdma_wr(ð->qdma[0], REG_INT_ENABLE(index), - eth->irqmask[index]); + qdma->irqmask[index] &= ~clear; + qdma->irqmask[index] |= set; + airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]); /* Read irq_enable register in order to guarantee the update above * completes in the spinlock critical section. */ - airoha_qdma_rr(ð->qdma[0], REG_INT_ENABLE(index)); + airoha_qdma_rr(qdma, REG_INT_ENABLE(index)); - spin_unlock_irqrestore(ð->irq_lock, flags); + spin_unlock_irqrestore(&qdma->irq_lock, flags); } -static void airoha_qdma_irq_enable(struct airoha_eth *eth, int index, +static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index, u32 mask) { - airoha_qdma_set_irqmask(eth, index, 0, mask); + airoha_qdma_set_irqmask(qdma, index, 0, mask); } -static void airoha_qdma_irq_disable(struct airoha_eth *eth, int index, +static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index, u32 mask) { - airoha_qdma_set_irqmask(eth, index, mask, 0); + airoha_qdma_set_irqmask(qdma, index, mask, 0); } static void airoha_set_macaddr(struct airoha_eth *eth, const u8 *addr) @@ -1522,7 +1521,7 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget) { struct airoha_queue *q = container_of(napi, struct airoha_queue, napi); - struct airoha_eth *eth = q->eth; + struct airoha_qdma *qdma = &q->eth->qdma[0]; int cur, done = 0; do { @@ -1531,7 +1530,7 @@ static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget) } while (cur && done < budget); if (done < budget && napi_complete(napi)) - airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1, + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, RX_DONE_INT_MASK); return done; @@ -1718,7 +1717,7 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) } if (done < budget && napi_complete(napi)) - airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, TX_DONE_INT_MASK(id)); return done; @@ -1927,13 +1926,13 @@ static int airoha_qdma_hw_init(struct airoha_eth *eth, int i; /* clear pending irqs */ - for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) + for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff); /* setup irqs */ - airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, INT_IDX0_MASK); - airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1, INT_IDX1_MASK); - airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX4, INT_IDX4_MASK); + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK); + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK); + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK); /* setup irq binding */ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { @@ -1979,14 +1978,13 @@ static int airoha_qdma_hw_init(struct airoha_eth *eth, static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) { struct airoha_eth *eth = dev_instance; - u32 intr[ARRAY_SIZE(eth->irqmask)]; - struct airoha_qdma *qdma; + struct airoha_qdma *qdma = ð->qdma[0]; + u32 intr[ARRAY_SIZE(qdma->irqmask)]; int i; - qdma = ð->qdma[0]; - for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) { + for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) { intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i)); - intr[i] &= eth->irqmask[i]; + intr[i] &= qdma->irqmask[i]; airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]); } @@ -1994,7 +1992,7 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) return IRQ_NONE; if (intr[1] & RX_DONE_INT_MASK) { - airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX1, + airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1, RX_DONE_INT_MASK); for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { @@ -2014,7 +2012,7 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) if (!(intr[0] & TX_DONE_INT_MASK(i))) continue; - airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX0, + airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0, TX_DONE_INT_MASK(i)); status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(i)); @@ -2029,12 +2027,18 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) return IRQ_HANDLED; } -static int airoha_qdma_init(struct airoha_eth *eth) +static int airoha_qdma_init(struct platform_device *pdev, + struct airoha_eth *eth) { struct airoha_qdma *qdma = ð->qdma[0]; int err; - err = devm_request_irq(eth->dev, eth->irq, airoha_irq_handler, + spin_lock_init(&qdma->irq_lock); + qdma->irq = platform_get_irq(pdev, 0); + if (qdma->irq < 0) + return qdma->irq; + + err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler, IRQF_SHARED, KBUILD_MODNAME, eth); if (err) return err; @@ -2060,7 +2064,8 @@ static int airoha_qdma_init(struct airoha_eth *eth) return 0; } -static int airoha_hw_init(struct airoha_eth *eth) +static int airoha_hw_init(struct platform_device *pdev, + struct airoha_eth *eth) { int err; @@ -2076,7 +2081,7 @@ static int airoha_hw_init(struct airoha_eth *eth) if (err) return err; - return airoha_qdma_init(eth); + return airoha_qdma_init(pdev, eth); } static void airoha_hw_cleanup(struct airoha_eth *eth) @@ -2673,11 +2678,6 @@ static int airoha_probe(struct platform_device *pdev) return err; } - spin_lock_init(ð->irq_lock); - eth->irq = platform_get_irq(pdev, 0); - if (eth->irq < 0) - return eth->irq; - eth->napi_dev = alloc_netdev_dummy(0); if (!eth->napi_dev) return -ENOMEM; @@ -2687,7 +2687,7 @@ static int airoha_probe(struct platform_device *pdev) strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name)); platform_set_drvdata(pdev, eth); - err = airoha_hw_init(eth); + err = airoha_hw_init(pdev, eth); if (err) goto error; From patchwork Thu Aug 1 14:35:06 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 13750585 X-Patchwork-Delegate: kuba@kernel.org Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 45A6416F0DF for ; Thu, 1 Aug 2024 14:35:48 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1722522949; cv=none; b=Li1NcAQ1TLG94eJL1q6xiqpSXZldnMQFU4R8+f+DHDQTIaQOwfhVfHAcRDQ9V7VJKuvr2zPPH4Wbs9aQDvL2e27W8ojCUbBXx5BmHqVai19X8SUgft56SdMp35xRk489H5CDfQu9Cki2AcIwRvMbzjbmDqmwZ5NegNZNMvk/Hik= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1722522949; c=relaxed/simple; bh=hUCcjv8h/km/J2/tsWQEP1zCSdEjq81Cq6xpbTDO/Yc=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=ohdJMa88E+DD98ByYIsUdUC+UZXSNKxCNAesFZMKlYzsmn5SFSHWgzqW4UODDQ3AreekxeZamuMBoIs6vC+HmBf/zpKA2hUUBhgc/sMzL0aOL5+aOUnUM4PknlpOe5FIFENrxP/2y122bYwN9Gj8bLZjeZDUJ1tdgQ+uvbfbzv4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=ZCLGZF5q; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="ZCLGZF5q" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 5F753C32786; Thu, 1 Aug 2024 14:35:47 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1722522948; bh=hUCcjv8h/km/J2/tsWQEP1zCSdEjq81Cq6xpbTDO/Yc=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=ZCLGZF5qME0hCo79vylrCtE1OSow3Q8O4U7PvB5PdlEtWt4XjyA3fXEMywXvAVGNJ EOzw+d+m3IZ9UPeSiMXkQsW4SoAAvVnScwe866gXSrfIN9RBu3rHE5Hzo4tTGZKhBD WmO0W44BTBz8MBU9ltnFSvKNu5ZIa3Dc4gycouRx2T5WJfNeq7TlTW3Hc1bYQtuR/F ZwbAMUcw+h3+vUXOZ9TcSzCeu/V+fwpxW3y1xqdCtfTxX1sgQOW26KYcc2xvY2JdPD 8r9x3JtaqCNTNODlk0LtKTjKP8lcpEEE+9zjZoORYAaHeHWu2+kd7KRBGs2eqg3jjB Qa/xOmVuGNv5Q== From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: nbd@nbd.name, lorenzo.bianconi83@gmail.com, davem@davemloft.net, edumazet@google.com, kuba@kernel.org, pabeni@redhat.com, linux-arm-kernel@lists.infradead.org, upstream@airoha.com, angelogioacchino.delregno@collabora.com, benjamin.larsson@genexis.eu, rkannoth@marvell.com, sgoutham@marvell.com, andrew@lunn.ch, arnd@arndb.de, horms@kernel.org Subject: [PATCH v2 net-next 4/8] net: airoha: Add airoha_qdma pointer in airoha_tx_irq_queue/airoha_queue structures Date: Thu, 1 Aug 2024 16:35:06 +0200 Message-ID: <074565b82fd0ceefe66e186f21133d825dbd48eb.1722522582.git.lorenzo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: References: Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: kuba@kernel.org Move airoha_eth pointer in airoha_qdma structure from airoha_tx_irq_queue/airoha_queue ones. This is a preliminary patch to introduce support for multi-QDMA controllers available on EN7581. Signed-off-by: Lorenzo Bianconi --- drivers/net/ethernet/mediatek/airoha_eth.c | 84 +++++++++++----------- 1 file changed, 41 insertions(+), 43 deletions(-) diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c index 48d0b44e6d92..54515c8a8b03 100644 --- a/drivers/net/ethernet/mediatek/airoha_eth.c +++ b/drivers/net/ethernet/mediatek/airoha_eth.c @@ -728,7 +728,7 @@ struct airoha_queue_entry { }; struct airoha_queue { - struct airoha_eth *eth; + struct airoha_qdma *qdma; /* protect concurrent queue accesses */ spinlock_t lock; @@ -747,7 +747,7 @@ struct airoha_queue { }; struct airoha_tx_irq_queue { - struct airoha_eth *eth; + struct airoha_qdma *qdma; struct napi_struct napi; u32 *q; @@ -784,6 +784,7 @@ struct airoha_hw_stats { }; struct airoha_qdma { + struct airoha_eth *eth; void __iomem *regs; /* protect concurrent irqmask accesses */ @@ -1388,8 +1389,8 @@ static int airoha_fe_init(struct airoha_eth *eth) static int airoha_qdma_fill_rx_queue(struct airoha_queue *q) { enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); - struct airoha_qdma *qdma = &q->eth->qdma[0]; - struct airoha_eth *eth = q->eth; + struct airoha_qdma *qdma = q->qdma; + struct airoha_eth *eth = qdma->eth; int qid = q - &qdma->q_rx[0]; int nframes = 0; @@ -1457,8 +1458,8 @@ static int airoha_qdma_get_gdm_port(struct airoha_eth *eth, static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) { enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); - struct airoha_qdma *qdma = &q->eth->qdma[0]; - struct airoha_eth *eth = q->eth; + struct airoha_qdma *qdma = q->qdma; + struct airoha_eth *eth = qdma->eth; int qid = q - &qdma->q_rx[0]; int done = 0; @@ -1521,7 +1522,6 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget) { struct airoha_queue *q = container_of(napi, struct airoha_queue, napi); - struct airoha_qdma *qdma = &q->eth->qdma[0]; int cur, done = 0; do { @@ -1530,14 +1530,13 @@ static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget) } while (cur && done < budget); if (done < budget && napi_complete(napi)) - airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, + airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1, RX_DONE_INT_MASK); return done; } -static int airoha_qdma_init_rx_queue(struct airoha_eth *eth, - struct airoha_queue *q, +static int airoha_qdma_init_rx_queue(struct airoha_queue *q, struct airoha_qdma *qdma, int ndesc) { const struct page_pool_params pp_params = { @@ -1547,15 +1546,16 @@ static int airoha_qdma_init_rx_queue(struct airoha_eth *eth, .dma_dir = DMA_FROM_DEVICE, .max_len = PAGE_SIZE, .nid = NUMA_NO_NODE, - .dev = eth->dev, + .dev = qdma->eth->dev, .napi = &q->napi, }; + struct airoha_eth *eth = qdma->eth; int qid = q - &qdma->q_rx[0], thr; dma_addr_t dma_addr; q->buf_size = PAGE_SIZE / 2; q->ndesc = ndesc; - q->eth = eth; + q->qdma = qdma; q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), GFP_KERNEL); @@ -1595,7 +1595,7 @@ static int airoha_qdma_init_rx_queue(struct airoha_eth *eth, static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q) { - struct airoha_eth *eth = q->eth; + struct airoha_eth *eth = q->qdma->eth; while (q->queued) { struct airoha_queue_entry *e = &q->entry[q->tail]; @@ -1609,8 +1609,7 @@ static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q) } } -static int airoha_qdma_init_rx(struct airoha_eth *eth, - struct airoha_qdma *qdma) +static int airoha_qdma_init_rx(struct airoha_qdma *qdma) { int i; @@ -1622,8 +1621,8 @@ static int airoha_qdma_init_rx(struct airoha_eth *eth, continue; } - err = airoha_qdma_init_rx_queue(eth, &qdma->q_rx[i], - qdma, RX_DSCP_NUM(i)); + err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma, + RX_DSCP_NUM(i)); if (err) return err; } @@ -1639,9 +1638,9 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) int id, done = 0; irq_q = container_of(napi, struct airoha_tx_irq_queue, napi); - eth = irq_q->eth; - qdma = ð->qdma[0]; + qdma = irq_q->qdma; id = irq_q - &qdma->q_tx_irq[0]; + eth = qdma->eth; while (irq_q->queued > 0 && done < budget) { u32 qid, last, val = irq_q->q[irq_q->head]; @@ -1723,16 +1722,16 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) return done; } -static int airoha_qdma_init_tx_queue(struct airoha_eth *eth, - struct airoha_queue *q, +static int airoha_qdma_init_tx_queue(struct airoha_queue *q, struct airoha_qdma *qdma, int size) { + struct airoha_eth *eth = qdma->eth; int i, qid = q - &qdma->q_tx[0]; dma_addr_t dma_addr; spin_lock_init(&q->lock); q->ndesc = size; - q->eth = eth; + q->qdma = qdma; q->free_thr = 1 + MAX_SKB_FRAGS; q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), @@ -1761,11 +1760,11 @@ static int airoha_qdma_init_tx_queue(struct airoha_eth *eth, return 0; } -static int airoha_qdma_tx_irq_init(struct airoha_eth *eth, - struct airoha_tx_irq_queue *irq_q, +static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q, struct airoha_qdma *qdma, int size) { int id = irq_q - &qdma->q_tx_irq[0]; + struct airoha_eth *eth = qdma->eth; dma_addr_t dma_addr; netif_napi_add_tx(eth->napi_dev, &irq_q->napi, @@ -1777,7 +1776,7 @@ static int airoha_qdma_tx_irq_init(struct airoha_eth *eth, memset(irq_q->q, 0xff, size * sizeof(u32)); irq_q->size = size; - irq_q->eth = eth; + irq_q->qdma = qdma; airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr); airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK, @@ -1788,21 +1787,20 @@ static int airoha_qdma_tx_irq_init(struct airoha_eth *eth, return 0; } -static int airoha_qdma_init_tx(struct airoha_eth *eth, - struct airoha_qdma *qdma) +static int airoha_qdma_init_tx(struct airoha_qdma *qdma) { int i, err; for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) { - err = airoha_qdma_tx_irq_init(eth, &qdma->q_tx_irq[i], - qdma, IRQ_QUEUE_LEN(i)); + err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma, + IRQ_QUEUE_LEN(i)); if (err) return err; } for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { - err = airoha_qdma_init_tx_queue(eth, &qdma->q_tx[i], - qdma, TX_DSCP_NUM); + err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma, + TX_DSCP_NUM); if (err) return err; } @@ -1812,7 +1810,7 @@ static int airoha_qdma_init_tx(struct airoha_eth *eth, static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q) { - struct airoha_eth *eth = q->eth; + struct airoha_eth *eth = q->qdma->eth; spin_lock_bh(&q->lock); while (q->queued) { @@ -1829,9 +1827,9 @@ static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q) spin_unlock_bh(&q->lock); } -static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth, - struct airoha_qdma *qdma) +static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma) { + struct airoha_eth *eth = qdma->eth; dma_addr_t dma_addr; u32 status; int size; @@ -1869,8 +1867,7 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth, REG_LMGR_INIT_CFG); } -static void airoha_qdma_init_qos(struct airoha_eth *eth, - struct airoha_qdma *qdma) +static void airoha_qdma_init_qos(struct airoha_qdma *qdma) { airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK); airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK); @@ -1920,8 +1917,7 @@ static void airoha_qdma_init_qos(struct airoha_eth *eth, FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40)); } -static int airoha_qdma_hw_init(struct airoha_eth *eth, - struct airoha_qdma *qdma) +static int airoha_qdma_hw_init(struct airoha_qdma *qdma) { int i; @@ -1958,7 +1954,7 @@ static int airoha_qdma_hw_init(struct airoha_eth *eth, GLOBAL_CFG_TX_WB_DONE_MASK | FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2)); - airoha_qdma_init_qos(eth, qdma); + airoha_qdma_init_qos(qdma); /* disable qdma rx delay interrupt */ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { @@ -2034,6 +2030,8 @@ static int airoha_qdma_init(struct platform_device *pdev, int err; spin_lock_init(&qdma->irq_lock); + qdma->eth = eth; + qdma->irq = platform_get_irq(pdev, 0); if (qdma->irq < 0) return qdma->irq; @@ -2043,19 +2041,19 @@ static int airoha_qdma_init(struct platform_device *pdev, if (err) return err; - err = airoha_qdma_init_rx(eth, qdma); + err = airoha_qdma_init_rx(qdma); if (err) return err; - err = airoha_qdma_init_tx(eth, qdma); + err = airoha_qdma_init_tx(qdma); if (err) return err; - err = airoha_qdma_init_hfwd_queues(eth, qdma); + err = airoha_qdma_init_hfwd_queues(qdma); if (err) return err; - err = airoha_qdma_hw_init(eth, qdma); + err = airoha_qdma_hw_init(qdma); if (err) return err; From patchwork Thu Aug 1 14:35:07 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 13750586 X-Patchwork-Delegate: kuba@kernel.org Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id AD13D1A57C4 for ; Thu, 1 Aug 2024 14:35:53 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1722522953; cv=none; b=elQ8B6pGg6GB0Bf+C09GGHIPYCWSBRhtsDJkcRY8KxOVs0bKbH4BmHaGtEdh6wbL5x+wFStAScQNUDInYuVw5plZi76jNUFebZFqDMfxW2UzejJ99x0iXOJZbMQXO/9/T2ROJYUwSVHtFIfRQxLvak004Pc9S2Sxvqu6TiznEWY= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1722522953; c=relaxed/simple; bh=rDhzdFmdvFdeMEnYvLJnOXfciv2PMkb7NwTEj8CXv/A=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=EtI4nFEL98iAvzJ+H0pGpMiwOtFoL8Fw36ncF7oZMa7aq5dAo3Fuan57YdHLtFPN7zrMJnZ+5l69sNDMsiI+IdsoShf8FzyPWde1OYyIfpgrcdtXAMM2r0QglbarDGltXpTYLTdF8dAUzEGqiWL6GhDgNqMTea4fxeNz3NKegcw= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=kF14U4gb; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="kF14U4gb" Received: by smtp.kernel.org (Postfix) with ESMTPSA id CAA5DC32786; Thu, 1 Aug 2024 14:35:52 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1722522953; bh=rDhzdFmdvFdeMEnYvLJnOXfciv2PMkb7NwTEj8CXv/A=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=kF14U4gbjxkLHWKmytt9o9zvJoSxAgGZoe/M0gkMdLS/pwLEbCASqdyv5Chp+pGXS V3ptkU85UOrbfaUt9fMEve8YO7+GmjXlh4lm1qW1GaPUDqK3gcm26OLbS+54KGHQyJ Hx0AIuGkMBt119Zz/BERUNVcABMNqkbgYYmmYfnuAHgZIoxtK84QCAUHtdOdOGyoUJ Si7v9EGYfyssjQetskA5h1H4ug23MX6lzeThuvKNvxYo7OymKa4EArLcRvVZJvfaX0 S4ZJuFG6HQZiwMZTe1XBNfDXM2K2XNWZVwcu7bCvO+xMuSIXc2Mv+75Y6h4wMg79p/ YQzAtdDgexz8A== From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: nbd@nbd.name, lorenzo.bianconi83@gmail.com, davem@davemloft.net, edumazet@google.com, kuba@kernel.org, pabeni@redhat.com, linux-arm-kernel@lists.infradead.org, upstream@airoha.com, angelogioacchino.delregno@collabora.com, benjamin.larsson@genexis.eu, rkannoth@marvell.com, sgoutham@marvell.com, andrew@lunn.ch, arnd@arndb.de, horms@kernel.org Subject: [PATCH v2 net-next 5/8] net: airoha: Use qdma pointer as private structure in airoha_irq_handler routine Date: Thu, 1 Aug 2024 16:35:07 +0200 Message-ID: <1e40c3cb973881c0eb3c3c247c78550da62054ab.1722522582.git.lorenzo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: References: Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: kuba@kernel.org This is a preliminary patch to support multi-QDMA controllers. Signed-off-by: Lorenzo Bianconi --- drivers/net/ethernet/mediatek/airoha_eth.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c index 54515c8a8b03..2ded99434a17 100644 --- a/drivers/net/ethernet/mediatek/airoha_eth.c +++ b/drivers/net/ethernet/mediatek/airoha_eth.c @@ -1973,8 +1973,7 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma) static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) { - struct airoha_eth *eth = dev_instance; - struct airoha_qdma *qdma = ð->qdma[0]; + struct airoha_qdma *qdma = dev_instance; u32 intr[ARRAY_SIZE(qdma->irqmask)]; int i; @@ -1984,7 +1983,7 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]); } - if (!test_bit(DEV_STATE_INITIALIZED, ð->state)) + if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state)) return IRQ_NONE; if (intr[1] & RX_DONE_INT_MASK) { @@ -2037,7 +2036,7 @@ static int airoha_qdma_init(struct platform_device *pdev, return qdma->irq; err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler, - IRQF_SHARED, KBUILD_MODNAME, eth); + IRQF_SHARED, KBUILD_MODNAME, qdma); if (err) return err; From patchwork Thu Aug 1 14:35:08 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 13750587 X-Patchwork-Delegate: kuba@kernel.org Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 493DF1A4885 for ; Thu, 1 Aug 2024 14:35:57 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1722522957; cv=none; b=Kr+EW4h0TMaS08Mp5Ilt4pqgb3nyd2zn49R2XBGkOUTo4Z5c8LuB0Ks0cEyiKoHCxxaz06dYEZxKqPa9TWw2u7QYdXFpE3Fyir3DX6+uzRYv2Hhi6jcryBfHFPqgCVgFr1p8vM3LTjhFsCvv7BOwC42urfKqXls832Ryk0RvYtg= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1722522957; c=relaxed/simple; bh=CN29bD43mYmBHoqM5Pv7Tk1t9vxbTlHM72y91y3xufk=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=KrgbPbQy8Ngg7bqdD2esEZMe8/5//kV9v+EBBXlyenyEDD5KfQvuJqTZmHRKEr54ogeuRU2/sdY1NI1Xk7XMNhITahuebl8wh0E9pK7wE3WxlYCyHgkCiFqZGW8K5JywAb0SuISIeXTsq3sFhR11Ysp+nZ1Ce0mTfcfGFsTQtxw= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=aJS0GN16; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="aJS0GN16" Received: by smtp.kernel.org (Postfix) with ESMTPSA id C394AC32786; Thu, 1 Aug 2024 14:35:56 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1722522957; bh=CN29bD43mYmBHoqM5Pv7Tk1t9vxbTlHM72y91y3xufk=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=aJS0GN16p3x9sif7f83wTuO9Tm0UXflK0wFPoDFyIUgvcNrTm3/l0EuTFShLdhID1 C4EEOIhdCFNaU/p7rWKMUfiY+KCmAZU9gqq5VZuQdC7R/mPITWC32XR4e017iZl2AL XD9yPqAb7+wdOraxJ146Esro4IP67FvIm5iD6/PhYHoweqYroLaW1iFH4qbGY5pM+6 eKarGLxyvvs4P+hth+ZYOxqUyKZIvkyzhn054IGTEomT4CQlLehArsMLKgIUyxWGL8 ubfnHpeBHYL99bqNqgCPsAnNC1mizC7hpCbnh6Za17R9dVxBFey8i0hfI0Q61PBig5 DAx+DndCpejhQ== From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: nbd@nbd.name, lorenzo.bianconi83@gmail.com, davem@davemloft.net, edumazet@google.com, kuba@kernel.org, pabeni@redhat.com, linux-arm-kernel@lists.infradead.org, upstream@airoha.com, angelogioacchino.delregno@collabora.com, benjamin.larsson@genexis.eu, rkannoth@marvell.com, sgoutham@marvell.com, andrew@lunn.ch, arnd@arndb.de, horms@kernel.org Subject: [PATCH v2 net-next 6/8] net: airoha: Allow mapping IO region for multiple qdma controllers Date: Thu, 1 Aug 2024 16:35:08 +0200 Message-ID: X-Mailer: git-send-email 2.45.2 In-Reply-To: References: Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: kuba@kernel.org Map MMIO regions of both qdma controllers available on EN7581 SoC. Run airoha_hw_cleanup routine for both QDMA controllers available on EN7581 SoC removing airoha_eth module or in airoha_probe error path. This is a preliminary patch to support multi-QDMA controllers. Signed-off-by: Lorenzo Bianconi --- drivers/net/ethernet/mediatek/airoha_eth.c | 56 ++++++++++++---------- 1 file changed, 32 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c index 2ded99434a17..5286afbc6f3f 100644 --- a/drivers/net/ethernet/mediatek/airoha_eth.c +++ b/drivers/net/ethernet/mediatek/airoha_eth.c @@ -2023,15 +2023,25 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) } static int airoha_qdma_init(struct platform_device *pdev, - struct airoha_eth *eth) + struct airoha_eth *eth, + struct airoha_qdma *qdma) { - struct airoha_qdma *qdma = ð->qdma[0]; - int err; + int err, id = qdma - ð->qdma[0]; + const char *res; spin_lock_init(&qdma->irq_lock); qdma->eth = eth; - qdma->irq = platform_get_irq(pdev, 0); + res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id); + if (!res) + return -ENOMEM; + + qdma->regs = devm_platform_ioremap_resource_byname(pdev, res); + if (IS_ERR(qdma->regs)) + return dev_err_probe(eth->dev, PTR_ERR(qdma->regs), + "failed to iomap qdma%d regs\n", id); + + qdma->irq = platform_get_irq(pdev, 4 * id); if (qdma->irq < 0) return qdma->irq; @@ -2052,19 +2062,13 @@ static int airoha_qdma_init(struct platform_device *pdev, if (err) return err; - err = airoha_qdma_hw_init(qdma); - if (err) - return err; - - set_bit(DEV_STATE_INITIALIZED, ð->state); - - return 0; + return airoha_qdma_hw_init(qdma); } static int airoha_hw_init(struct platform_device *pdev, struct airoha_eth *eth) { - int err; + int err, i; /* disable xsi */ reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), eth->xsi_rsts); @@ -2078,12 +2082,19 @@ static int airoha_hw_init(struct platform_device *pdev, if (err) return err; - return airoha_qdma_init(pdev, eth); + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) { + err = airoha_qdma_init(pdev, eth, ð->qdma[i]); + if (err) + return err; + } + + set_bit(DEV_STATE_INITIALIZED, ð->state); + + return 0; } -static void airoha_hw_cleanup(struct airoha_eth *eth) +static void airoha_hw_cleanup(struct airoha_qdma *qdma) { - struct airoha_qdma *qdma = ð->qdma[0]; int i; for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) { @@ -2644,13 +2655,6 @@ static int airoha_probe(struct platform_device *pdev) return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs), "failed to iomap fe regs\n"); - eth->qdma[0].regs = devm_platform_ioremap_resource_byname(pdev, - "qdma0"); - if (IS_ERR(eth->qdma[0].regs)) - return dev_err_probe(eth->dev, - PTR_ERR(eth->qdma[0].regs), - "failed to iomap qdma regs\n"); - eth->rsts[0].id = "fe"; eth->rsts[1].id = "pdma"; eth->rsts[2].id = "qdma"; @@ -2706,7 +2710,9 @@ static int airoha_probe(struct platform_device *pdev) return 0; error: - airoha_hw_cleanup(eth); + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) + airoha_hw_cleanup(ð->qdma[i]); + for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { struct airoha_gdm_port *port = eth->ports[i]; @@ -2724,7 +2730,9 @@ static void airoha_remove(struct platform_device *pdev) struct airoha_eth *eth = platform_get_drvdata(pdev); int i; - airoha_hw_cleanup(eth); + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) + airoha_hw_cleanup(ð->qdma[i]); + for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { struct airoha_gdm_port *port = eth->ports[i]; From patchwork Thu Aug 1 14:35:09 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 13750588 X-Patchwork-Delegate: kuba@kernel.org Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id CF4DC1A4885 for ; Thu, 1 Aug 2024 14:36:00 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1722522960; cv=none; b=S9yRPGFWXX1nw8ey6aEVwa3Jr1//MzyMwc15YlTCndsvKKjT5BxXlUhiJWomzZxyZ4DoMQXM2H7obpxgWDMQU9X7FBsxyHYawokrO6SznBG56S49FC5bPe88wZMkpCDpXhryBth/XJVKXCyPpwSC6FxJP5UOK5+8ueWG7GwjCJ0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1722522960; c=relaxed/simple; bh=0J7qgF3J/Fu2KCiXu2zERepCTnGXd9JVJxEDHx0sl0Q=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=XS4NoS6/m5j8PKsakLlPFuUK9xJcLUprSdD0524fvdoK9RedCIUe4C58TrK+X089TLU1VeoujQKnSZy0GC+yRpIeaSNeAXdtaZTwWhxMUE5s7YN9PS8f5vqwdC1Ddf2nce8lw5Scn10HzYGpj1vpgDNFeMGW/mZnpGIvb9aAGm8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=n739wqvl; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="n739wqvl" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 51C86C32786; Thu, 1 Aug 2024 14:36:00 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1722522960; bh=0J7qgF3J/Fu2KCiXu2zERepCTnGXd9JVJxEDHx0sl0Q=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=n739wqvlFLTS8ibT4Add4Tsv0hQH0YvlV5q42u9xjVEafKhwMkDnAGDfocbs7111S qBriEocVjnigEKmsBMDD8jxC61Rs1gaGE/9BCxhJrIqosL7JgjRSRqKhu1nVMACE4+ ZE/7Efz7PcxS1b6YBSazFkAmJ2Z8hLTUtzq905K7pNzXkFGyNQIFyEOoGku+m2aXCU r46oNwbUDSngVOUcjk0XMkrAqRgrBhZFGg0tP47ZMeUXgzDaRYTwbBp0zIDRgY4IW5 obt+B1M4NXL2ZowqQZHnRB+6QICVI0OrHTV1BseKB1Qj+MVdMoKW6vzf2ZjnAitB/X zc90E3av9w2fQ== From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: nbd@nbd.name, lorenzo.bianconi83@gmail.com, davem@davemloft.net, edumazet@google.com, kuba@kernel.org, pabeni@redhat.com, linux-arm-kernel@lists.infradead.org, upstream@airoha.com, angelogioacchino.delregno@collabora.com, benjamin.larsson@genexis.eu, rkannoth@marvell.com, sgoutham@marvell.com, andrew@lunn.ch, arnd@arndb.de, horms@kernel.org Subject: [PATCH v2 net-next 7/8] net: airoha: Start all qdma NAPIs in airoha_probe() Date: Thu, 1 Aug 2024 16:35:09 +0200 Message-ID: X-Mailer: git-send-email 2.45.2 In-Reply-To: References: Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: kuba@kernel.org This is a preliminary patch to support multi-QDMA controllers. Signed-off-by: Lorenzo Bianconi --- drivers/net/ethernet/mediatek/airoha_eth.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c index 5286afbc6f3f..13c72ab6d87a 100644 --- a/drivers/net/ethernet/mediatek/airoha_eth.c +++ b/drivers/net/ethernet/mediatek/airoha_eth.c @@ -2121,9 +2121,8 @@ static void airoha_hw_cleanup(struct airoha_qdma *qdma) } } -static void airoha_qdma_start_napi(struct airoha_eth *eth) +static void airoha_qdma_start_napi(struct airoha_qdma *qdma) { - struct airoha_qdma *qdma = ð->qdma[0]; int i; for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) @@ -2692,7 +2691,9 @@ static int airoha_probe(struct platform_device *pdev) if (err) goto error; - airoha_qdma_start_napi(eth); + for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) + airoha_qdma_start_napi(ð->qdma[i]); + for_each_child_of_node(pdev->dev.of_node, np) { if (!of_device_is_compatible(np, "airoha,eth-mac")) continue; From patchwork Thu Aug 1 14:35:10 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lorenzo Bianconi X-Patchwork-Id: 13750589 X-Patchwork-Delegate: kuba@kernel.org Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id B05941A4885 for ; Thu, 1 Aug 2024 14:36:04 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1722522964; cv=none; b=Y+7h4AQsBd9Efum2ueX4YDzGPRBqg2F9xcj4R+qXRYwRPOYjF6YlOQfBfXk01WNH6qO8UMHz4Y0WTa8rnnkVM75frm+w7WhKzk6K/00sKEH/dBXhIZ1wIHc8k09nl+XTBAZIsCbB/UBCjTJPRuStHld3p9CwcaBWtV9Hfma50ao= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1722522964; c=relaxed/simple; bh=XyiRfGVfjXMgzy6c5ALdFgv6uBYj6iz2le63R/kYzAY=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=ZV8gNf/aUC7Aqy4C7s4xblVMwBvLyBwHsvkquqVhi0PM0zfvL4hfniw36W8ACv7lrwrYnwIcDhqsVG70qqstRg3YPwBWQBMeNa68WzsdV4IK+G/Rr/dfp0YKjSWfe/Ev3rDivJKjJPb1R4a8FKneygGeVptGEWt3YV489yXiqV0= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=BaYw0VBL; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="BaYw0VBL" Received: by smtp.kernel.org (Postfix) with ESMTPSA id EDF78C4AF09; Thu, 1 Aug 2024 14:36:03 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1722522964; bh=XyiRfGVfjXMgzy6c5ALdFgv6uBYj6iz2le63R/kYzAY=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=BaYw0VBL7OpGnIdm+g86mjxrjpC6FQw52HeifXcgHGroX9DXSiE2OZ9AcOIinHiwf 9YnMnHlJq/gke101dPiUpMiXLhKpfMZG/D8h1V7JSOtjivci/CpoACSZiRtI8Ln4pc 4uRbtJqlm0NSaU1k8BKdXKbwGO4AQN1ToweGhs05IFSTi0tQGqHdq6DMiXBAR/bf85 dA2NIjrY2cW0ZCyWht4e/+jGjv/p0H/M797L+uIm69oNOgNKe4K0mF9dU0oz+ss12K fNlUiuMjOMB55ungXkGfo/aeLF8kiclh29mqDLBJfhvnxcPrJh1S2JLZ3m7cQv+U9K P5kTlaSuLK4cg== From: Lorenzo Bianconi To: netdev@vger.kernel.org Cc: nbd@nbd.name, lorenzo.bianconi83@gmail.com, davem@davemloft.net, edumazet@google.com, kuba@kernel.org, pabeni@redhat.com, linux-arm-kernel@lists.infradead.org, upstream@airoha.com, angelogioacchino.delregno@collabora.com, benjamin.larsson@genexis.eu, rkannoth@marvell.com, sgoutham@marvell.com, andrew@lunn.ch, arnd@arndb.de, horms@kernel.org Subject: [PATCH v2 net-next 8/8] net: airoha: Link the gdm port to the selected qdma controller Date: Thu, 1 Aug 2024 16:35:10 +0200 Message-ID: <95b515df34ba4727f7ae5b14a1d0462cceec84ff.1722522582.git.lorenzo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: References: Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Patchwork-Delegate: kuba@kernel.org Link the running gdm port to the qdma controller used to connect with the CPU. Moreover, load all QDMA controllers available on EN7581 SoC. Signed-off-by: Lorenzo Bianconi --- drivers/net/ethernet/mediatek/airoha_eth.c | 37 +++++++++++----------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c index 13c72ab6d87a..db4267225fa4 100644 --- a/drivers/net/ethernet/mediatek/airoha_eth.c +++ b/drivers/net/ethernet/mediatek/airoha_eth.c @@ -18,7 +18,7 @@ #include #define AIROHA_MAX_NUM_GDM_PORTS 1 -#define AIROHA_MAX_NUM_QDMA 1 +#define AIROHA_MAX_NUM_QDMA 2 #define AIROHA_MAX_NUM_RSTS 3 #define AIROHA_MAX_NUM_XSI_RSTS 5 #define AIROHA_MAX_MTU 2000 @@ -805,8 +805,8 @@ struct airoha_qdma { }; struct airoha_gdm_port { + struct airoha_qdma *qdma; struct net_device *dev; - struct airoha_eth *eth; int id; struct airoha_hw_stats stats; @@ -2138,7 +2138,7 @@ static void airoha_qdma_start_napi(struct airoha_qdma *qdma) static void airoha_update_hw_stats(struct airoha_gdm_port *port) { - struct airoha_eth *eth = port->eth; + struct airoha_eth *eth = port->qdma->eth; u32 val, i = 0; spin_lock(&port->stats.lock); @@ -2283,22 +2283,22 @@ static void airoha_update_hw_stats(struct airoha_gdm_port *port) static int airoha_dev_open(struct net_device *dev) { struct airoha_gdm_port *port = netdev_priv(dev); - struct airoha_eth *eth = port->eth; + struct airoha_qdma *qdma = port->qdma; int err; netif_tx_start_all_queues(dev); - err = airoha_set_gdm_ports(eth, true); + err = airoha_set_gdm_ports(qdma->eth, true); if (err) return err; if (netdev_uses_dsa(dev)) - airoha_fe_set(eth, REG_GDM_INGRESS_CFG(port->id), + airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id), GDM_STAG_EN_MASK); else - airoha_fe_clear(eth, REG_GDM_INGRESS_CFG(port->id), + airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id), GDM_STAG_EN_MASK); - airoha_qdma_set(ð->qdma[0], REG_QDMA_GLOBAL_CFG, + airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK | GLOBAL_CFG_RX_DMA_EN_MASK); @@ -2308,15 +2308,15 @@ static int airoha_dev_open(struct net_device *dev) static int airoha_dev_stop(struct net_device *dev) { struct airoha_gdm_port *port = netdev_priv(dev); - struct airoha_eth *eth = port->eth; + struct airoha_qdma *qdma = port->qdma; int err; netif_tx_disable(dev); - err = airoha_set_gdm_ports(eth, false); + err = airoha_set_gdm_ports(qdma->eth, false); if (err) return err; - airoha_qdma_clear(ð->qdma[0], REG_QDMA_GLOBAL_CFG, + airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK | GLOBAL_CFG_RX_DMA_EN_MASK); @@ -2332,7 +2332,7 @@ static int airoha_dev_set_macaddr(struct net_device *dev, void *p) if (err) return err; - airoha_set_macaddr(port->eth, dev->dev_addr); + airoha_set_macaddr(port->qdma->eth, dev->dev_addr); return 0; } @@ -2341,7 +2341,7 @@ static int airoha_dev_init(struct net_device *dev) { struct airoha_gdm_port *port = netdev_priv(dev); - airoha_set_macaddr(port->eth, dev->dev_addr); + airoha_set_macaddr(port->qdma->eth, dev->dev_addr); return 0; } @@ -2375,10 +2375,9 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, struct airoha_gdm_port *port = netdev_priv(dev); u32 msg0 = 0, msg1, len = skb_headlen(skb); int i, qid = skb_get_queue_mapping(skb); - struct airoha_eth *eth = port->eth; + struct airoha_qdma *qdma = port->qdma; u32 nr_frags = 1 + sinfo->nr_frags; struct netdev_queue *txq; - struct airoha_qdma *qdma; struct airoha_queue *q; void *data = skb->data; u16 index; @@ -2406,7 +2405,6 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) | FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f); - qdma = ð->qdma[0]; q = &qdma->q_tx[qid]; if (WARN_ON_ONCE(!q->ndesc)) goto error; @@ -2489,7 +2487,7 @@ static void airoha_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct airoha_gdm_port *port = netdev_priv(dev); - struct airoha_eth *eth = port->eth; + struct airoha_eth *eth = port->qdma->eth; strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver)); strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info)); @@ -2570,6 +2568,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np) { const __be32 *id_ptr = of_get_property(np, "reg", NULL); struct airoha_gdm_port *port; + struct airoha_qdma *qdma; struct net_device *dev; int err, index; u32 id; @@ -2599,6 +2598,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np) return -ENOMEM; } + qdma = ð->qdma[index % AIROHA_MAX_NUM_QDMA]; dev->netdev_ops = &airoha_netdev_ops; dev->ethtool_ops = &airoha_ethtool_ops; dev->max_mtu = AIROHA_MAX_MTU; @@ -2608,6 +2608,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np) NETIF_F_SG | NETIF_F_TSO; dev->features |= dev->hw_features; dev->dev.of_node = np; + dev->irq = qdma->irq; SET_NETDEV_DEV(dev, eth->dev); err = of_get_ethdev_address(np, dev); @@ -2623,8 +2624,8 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np) port = netdev_priv(dev); u64_stats_init(&port->stats.syncp); spin_lock_init(&port->stats.lock); + port->qdma = qdma; port->dev = dev; - port->eth = eth; port->id = id; eth->ports[index] = port;