From patchwork Tue Aug 20 14:01:50 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jijie Shao X-Patchwork-Id: 13770173 X-Patchwork-Delegate: kuba@kernel.org Received: from szxga04-in.huawei.com (szxga04-in.huawei.com [45.249.212.190]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 9130B191F6B; Tue, 20 Aug 2024 14:08:35 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.190 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1724162918; cv=none; b=IrDVy3ots/KpzNzS6oruuEuHCUYcbSPO9rhgeCG6wskHNa6Tb0Y8wl+kZ4Mpj2iHtSusHbQdZYSGEQPd59JcbvXkVo2TX2578RB3GrtlWiQqE6CkmaiIFdI/onuvpmKs/hmYoYkc6QRAFKx5tPQ1kNiikP2X84n4RJv7U6a+K8k= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1724162918; c=relaxed/simple; bh=OoVKnzTRKn/I/uBsolLteM6HH9SAKdlS8tAECrEQWfw=; h=From:To:CC:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=KIF1LiQcwmS8wDuEyYVvJFHbJ5iSC4RvPYAtMr7C0Jtl2NUv5dsL872Dn2U1LLUfcjGDLpDIBZ9i6/0XB8E6weq8CvfaKK6JPa07IuKW40khk4DTIQV36GjHayk7FAv69nIMTR/tfmRCmbesHDg0svrMsUbZOsEFhrNqs9jQq4Q= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=huawei.com; spf=pass smtp.mailfrom=huawei.com; arc=none smtp.client-ip=45.249.212.190 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=huawei.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huawei.com Received: from mail.maildlp.com (unknown [172.19.163.17]) by szxga04-in.huawei.com (SkyGuard) with ESMTP id 4WpB5K0j2Jz2Cmy6; Tue, 20 Aug 2024 22:03:33 +0800 (CST) Received: from kwepemm000007.china.huawei.com (unknown [7.193.23.189]) by mail.maildlp.com (Postfix) with ESMTPS id F27E91A0188; Tue, 20 Aug 2024 22:08:32 +0800 (CST) Received: from localhost.localdomain (10.90.30.45) by kwepemm000007.china.huawei.com (7.193.23.189) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.1.2507.39; Tue, 20 Aug 2024 22:08:32 +0800 From: Jijie Shao To: , , , CC: , , , , , , , , , , , , , , , Subject: [PATCH V2 net-next 07/11] net: hibmcge: Implement rx_poll function to receive packets Date: Tue, 20 Aug 2024 22:01:50 +0800 Message-ID: <20240820140154.137876-8-shaojijie@huawei.com> X-Mailer: git-send-email 2.30.0 In-Reply-To: <20240820140154.137876-1-shaojijie@huawei.com> References: <20240820140154.137876-1-shaojijie@huawei.com> Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-ClientProxiedBy: dggems706-chm.china.huawei.com (10.3.19.183) To kwepemm000007.china.huawei.com (7.193.23.189) X-Patchwork-Delegate: kuba@kernel.org Implement rx_poll function to read the rx descriptor after receiving the rx interrupt. Adjust the skb based on the descriptor to complete the reception of the packet. Signed-off-by: Jijie Shao --- .../ethernet/hisilicon/hibmcge/hbg_common.h | 5 + .../net/ethernet/hisilicon/hibmcge/hbg_hw.c | 10 ++ .../net/ethernet/hisilicon/hibmcge/hbg_hw.h | 1 + .../net/ethernet/hisilicon/hibmcge/hbg_irq.c | 8 +- .../net/ethernet/hisilicon/hibmcge/hbg_main.c | 2 + .../net/ethernet/hisilicon/hibmcge/hbg_reg.h | 13 ++ .../net/ethernet/hisilicon/hibmcge/hbg_txrx.c | 159 +++++++++++++++++- 7 files changed, 196 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h index b04c7a4bdd73..5767a825e252 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h @@ -16,6 +16,10 @@ #define HBG_VECTOR_NUM 4 #define HBG_PCU_CACHE_LINE_SIZE 32 #define HBG_TX_TIMEOUT_BUF_LEN 1024 +#define HBG_RX_DESCR 0x01 + +#define HBG_PACKET_HEAD_SIZE ((HBG_RX_SKIP1 + HBG_RX_SKIP2 + HBG_RX_DESCR) * \ + HBG_PCU_CACHE_LINE_SIZE) enum hbg_dir { HBG_DIR_TX = 1 << 0, @@ -128,6 +132,7 @@ struct hbg_priv { struct hbg_mac mac; struct hbg_vector vectors; struct hbg_ring tx_ring; + struct hbg_ring rx_ring; }; #endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c index 27c5e19707e2..54f7c84ba8aa 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c @@ -68,6 +68,7 @@ static int hbg_hw_dev_specs_init(struct hbg_priv *priv) return -EADDRNOTAVAIL; dev_specs->max_frame_len = HBG_PCU_CACHE_LINE_SIZE + dev_specs->max_mtu; + dev_specs->rx_buf_size = HBG_PACKET_HEAD_SIZE + dev_specs->max_frame_len; return 0; } @@ -172,6 +173,10 @@ u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir) return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR, HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M); + if (dir & HBG_DIR_RX) + return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR, + HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M); + return 0; } @@ -183,6 +188,11 @@ void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc) hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_3_ADDR, tx_desc->word3); } +void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr) +{ + hbg_reg_write(priv, HBG_REG_RX_CFF_ADDR_ADDR, buffer_dma_addr); +} + void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex) { hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR, diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h index 15bf16a50f14..18c6ccbe1b28 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h @@ -52,5 +52,6 @@ void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable); void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr); u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir); void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc); +void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr); #endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c index da53e905ea9e..7ec5a451185c 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c @@ -19,12 +19,18 @@ static void hbg_irq_handle_tx(struct hbg_priv *priv, napi_schedule(&priv->tx_ring.napi); } +static void hbg_irq_handle_rx(struct hbg_priv *priv, + struct hbg_irq_info *irq_info) +{ + napi_schedule(&priv->rx_ring.napi); +} + #define HBG_TXRX_IRQ_I(name, mask, handle) {name, mask, false, false, 0, handle} #define HBG_ERR_IRQ_I(name, mask, need_print) \ {name, mask, true, need_print, 0, hbg_irq_handle_err} static struct hbg_irq_info hbg_irqs[] = { - HBG_TXRX_IRQ_I("RX", HBG_INT_MSK_RX_B, NULL), + HBG_TXRX_IRQ_I("RX", HBG_INT_MSK_RX_B, hbg_irq_handle_rx), HBG_TXRX_IRQ_I("TX", HBG_INT_MSK_TX_B, hbg_irq_handle_tx), HBG_ERR_IRQ_I("MAC_MII_FIFO_ERR", HBG_INT_MSK_MAC_MII_FF_ERR_B, true), HBG_ERR_IRQ_I("MAC_PCS_RX_FIFO_ERR", HBG_INT_MSK_MAC_PCS_RXFF_ERR_B, true), diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c index bcd0f20f7385..599edbbdc219 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c @@ -30,6 +30,7 @@ static int hbg_net_open(struct net_device *dev) return 0; netif_carrier_off(dev); + napi_enable(&priv->rx_ring.napi); napi_enable(&priv->tx_ring.napi); hbg_all_irq_enable(priv, true); hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE); @@ -53,6 +54,7 @@ static int hbg_net_stop(struct net_device *dev) hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE); hbg_all_irq_enable(priv, false); napi_disable(&priv->tx_ring.napi); + napi_disable(&priv->rx_ring.napi); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h index 0294eb875fde..4920e2bddf1f 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h @@ -82,10 +82,12 @@ #define HBG_REG_MAX_FRAME_LEN_M GENMASK(15, 0) #define HBG_REG_CF_CFF_DATA_NUM_ADDR (HBG_REG_SGMII_BASE + 0x045C) #define HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M GENMASK(8, 0) +#define HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M GENMASK(24, 16) #define HBG_REG_TX_CFF_ADDR_0_ADDR (HBG_REG_SGMII_BASE + 0x0488) #define HBG_REG_TX_CFF_ADDR_1_ADDR (HBG_REG_SGMII_BASE + 0x048C) #define HBG_REG_TX_CFF_ADDR_2_ADDR (HBG_REG_SGMII_BASE + 0x0490) #define HBG_REG_TX_CFF_ADDR_3_ADDR (HBG_REG_SGMII_BASE + 0x0494) +#define HBG_REG_RX_CFF_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x04A0) #define HBG_REG_RX_BUF_SIZE_ADDR (HBG_REG_SGMII_BASE + 0x04E4) #define HBG_REG_RX_BUF_SIZE_M GENMASK(15, 0) #define HBG_REG_BUS_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x04E8) @@ -127,4 +129,15 @@ struct hbg_tx_desc { #define HBG_TX_DESC_W0_l4_CS_B BIT(0) #define HBG_TX_DESC_W1_SEND_LEN_M GENMASK(19, 4) +struct hbg_rx_desc { + u32 word0; + u32 word1; /* tag */ + u32 word2; + u32 word3; + u32 word4; + u32 word5; +}; + +#define HBG_RX_DESC_W2_PKT_LEN_M GENMASK(31, 16) + #endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c index 76e93d265ec1..8006a551719b 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c @@ -14,6 +14,9 @@ #define hbg_queue_is_full(head, tail, ring) ((head) == ((tail) + 1) % (ring)->len) #define hbg_queue_is_empty(head, tail) ((head) == (tail)) #define hbg_queue_next_prt(p, ring) (((p) + 1) % (ring)->len) +#define hbg_queue_move_next(p, ring) ({ \ + typeof(ring) _ring = (ring); \ + _ring->p = hbg_queue_next_prt(_ring->p, _ring); }) static int hbg_dma_map(struct hbg_buffer *buffer) { @@ -118,6 +121,20 @@ static void hbg_buffer_free_skb(struct hbg_buffer *buffer) buffer->skb = NULL; } +static int hbg_buffer_alloc_skb(struct hbg_buffer *buffer) +{ + u32 len = hbg_spec_max_frame_len(buffer->priv, buffer->dir); + struct hbg_priv *priv = buffer->priv; + + buffer->skb = netdev_alloc_skb(priv->netdev, len); + if (unlikely(!buffer->skb)) + return -ENOMEM; + + buffer->skb_len = len; + memset(buffer->skb->data, 0, HBG_PACKET_HEAD_SIZE); + return 0; +} + static void hbg_buffer_free(struct hbg_buffer *buffer) { hbg_dma_unmap(buffer); @@ -164,6 +181,110 @@ static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget) return packet_done; } +static int hbg_rx_fill_one_buffer(struct hbg_priv *priv) +{ + struct hbg_ring *ring = &priv->rx_ring; + struct hbg_buffer *buffer; + int ret; + + buffer = &ring->queue[ring->ntu]; + ret = hbg_buffer_alloc_skb(buffer); + if (unlikely(ret)) + return ret; + + ret = hbg_dma_map(buffer); + if (unlikely(ret)) { + hbg_buffer_free_skb(buffer); + return ret; + } + + hbg_hw_fill_buffer(priv, buffer->skb_dma); + hbg_queue_move_next(ntu, ring); + return 0; +} + +static int hbg_rx_fill_buffers(struct hbg_priv *priv) +{ + struct hbg_ring *ring = &priv->rx_ring; + int ret; + + while (!(hbg_fifo_is_full(priv, ring->dir) || + hbg_queue_is_full(ring->ntc, ring->ntu, ring))) { + ret = hbg_rx_fill_one_buffer(priv); + if (ret) + return ret; + } + + return 0; +} + +static bool hbg_sync_data_from_hw(struct hbg_priv *priv, + struct hbg_buffer *buffer) +{ + struct hbg_rx_desc *rx_desc; + + /* make sure HW write desc complete */ + dma_rmb(); + + dma_sync_single_for_cpu(&priv->pdev->dev, buffer->skb_dma, + buffer->skb_len, DMA_FROM_DEVICE); + + rx_desc = (struct hbg_rx_desc *)buffer->skb->data; + return FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2) != 0; +} + +static int hbg_napi_rx_poll(struct napi_struct *napi, int budget) +{ + struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi); + struct hbg_priv *priv = ring->priv; + struct hbg_rx_desc *rx_desc; + struct hbg_buffer *buffer; + u32 packet_done = 0; + u32 pkt_len; + + if (unlikely(!hbg_nic_is_open(priv))) { + napi_complete(napi); + return 0; + } + + while (packet_done < budget) { + if (unlikely(hbg_queue_is_empty(ring->ntc, ring->ntu))) + break; + + buffer = &ring->queue[ring->ntc]; + if (unlikely(!buffer->skb)) + goto next_buffer; + + if (unlikely(!hbg_sync_data_from_hw(priv, buffer))) + break; + + hbg_dma_unmap(buffer); + + skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN); + + rx_desc = (struct hbg_rx_desc *)buffer->skb->data; + pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2); + skb_put(buffer->skb, pkt_len); + buffer->skb->protocol = eth_type_trans(buffer->skb, priv->netdev); + + priv->netdev->stats.rx_bytes += pkt_len; + priv->netdev->stats.rx_packets++; + netif_receive_skb(buffer->skb); + buffer->skb = NULL; + hbg_rx_fill_one_buffer(priv); + +next_buffer: + hbg_queue_move_next(ntc, ring); + packet_done++; + } + + hbg_rx_fill_buffers(priv); + if (likely(napi_complete_done(napi, packet_done))) + hbg_hw_irq_enable(priv, HBG_INT_MSK_RX_B, true); + + return packet_done; +} + static void hbg_ring_uninit(struct hbg_ring *ring) { struct hbg_buffer *buffer; @@ -241,15 +362,50 @@ static int hbg_tx_ring_init(struct hbg_priv *priv) return 0; } +static int hbg_rx_ring_init(struct hbg_priv *priv) +{ + struct hbg_ring *rx_ring = &priv->rx_ring; + int ret; + + ret = hbg_ring_init(priv, rx_ring, HBG_DIR_RX); + if (ret) + return ret; + + netif_napi_add(priv->netdev, &priv->rx_ring.napi, hbg_napi_rx_poll); + return 0; +} + int hbg_txrx_init(struct hbg_priv *priv) { int ret; ret = hbg_tx_ring_init(priv); - if (ret) + if (ret) { dev_err(&priv->pdev->dev, "failed to init tx ring, ret = %d\n", ret); + return ret; + } + + ret = hbg_rx_ring_init(priv); + if (ret) { + dev_err(&priv->pdev->dev, + "failed to init rx ring, ret = %d\n", ret); + goto err_uninit_tx; + } + + ret = hbg_rx_fill_buffers(priv); + if (ret) { + dev_err(&priv->pdev->dev, + "failed to fill rx buffers, ret = %d\n", ret); + goto err_uninit_rx; + } + return 0; + +err_uninit_rx: + hbg_ring_uninit(&priv->rx_ring); +err_uninit_tx: + hbg_ring_uninit(&priv->tx_ring); return ret; } @@ -258,4 +414,5 @@ void hbg_txrx_uninit(void *data) struct hbg_priv *priv = data; hbg_ring_uninit(&priv->tx_ring); + hbg_ring_uninit(&priv->rx_ring); }