From patchwork Thu Aug 22 09:33:30 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jijie Shao X-Patchwork-Id: 13773144 X-Patchwork-Delegate: kuba@kernel.org Received: from szxga01-in.huawei.com (szxga01-in.huawei.com [45.249.212.187]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id E6444186E29; Thu, 22 Aug 2024 09:39:45 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.187 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1724319588; cv=none; b=Evmz9SaMCGZieVJQ+PopCnyt7WwDo85PFKRHL1N0I4wtX7Hy27LrxdDh1upDOicQzfWikTrjmR9lA8nJT9dYAnAV/C6j0rxdz8CJQUm1KYb3eclePBkd7K02C/6jdlARPWMqvyrzuQaEpTLSZjUS6Jec/vfC9PF+PLZUesHQ7fg= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1724319588; c=relaxed/simple; bh=OCfF3L6ZR0IEsYXKygYo+9qW6wSJ5GDLpNSeX+3qHGk=; h=From:To:CC:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=GhAvEMdJvA657Qi7ONTuSjmQV6PgHaKnu4m4LIM12izqpI6R5BKbBqtE18lHtwVNqabTe6Fgmb5VLaYTDffOn045YjYGZ6pkdIYmEMDmU6zRXIlHeNgCaVNOBq51ugmDHeKdXvFqBnESe0k43UGuTaLaLyR5UkBHgjPRu8neHlE= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=huawei.com; spf=pass smtp.mailfrom=huawei.com; arc=none smtp.client-ip=45.249.212.187 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=huawei.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huawei.com Received: from mail.maildlp.com (unknown [172.19.88.194]) by szxga01-in.huawei.com (SkyGuard) with ESMTP id 4WqJ7Q10fYzyR5b; Thu, 22 Aug 2024 17:39:14 +0800 (CST) Received: from kwepemm000007.china.huawei.com (unknown [7.193.23.189]) by mail.maildlp.com (Postfix) with ESMTPS id C9BC314037B; Thu, 22 Aug 2024 17:39:37 +0800 (CST) Received: from localhost.localdomain (10.90.30.45) by kwepemm000007.china.huawei.com (7.193.23.189) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.1.2507.39; Thu, 22 Aug 2024 17:39:36 +0800 From: Jijie Shao To: , , , CC: , , , , , , , , , , , , , , , Subject: [PATCH V3 net-next 07/11] net: hibmcge: Implement rx_poll function to receive packets Date: Thu, 22 Aug 2024 17:33:30 +0800 Message-ID: <20240822093334.1687011-8-shaojijie@huawei.com> X-Mailer: git-send-email 2.30.0 In-Reply-To: <20240822093334.1687011-1-shaojijie@huawei.com> References: <20240822093334.1687011-1-shaojijie@huawei.com> Precedence: bulk X-Mailing-List: netdev@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-ClientProxiedBy: dggems701-chm.china.huawei.com (10.3.19.178) To kwepemm000007.china.huawei.com (7.193.23.189) X-Patchwork-Delegate: kuba@kernel.org Implement rx_poll function to read the rx descriptor after receiving the rx interrupt. Adjust the skb based on the descriptor to complete the reception of the packet. Signed-off-by: Jijie Shao --- .../ethernet/hisilicon/hibmcge/hbg_common.h | 5 + .../net/ethernet/hisilicon/hibmcge/hbg_hw.c | 10 ++ .../net/ethernet/hisilicon/hibmcge/hbg_hw.h | 1 + .../net/ethernet/hisilicon/hibmcge/hbg_irq.c | 8 +- .../net/ethernet/hisilicon/hibmcge/hbg_main.c | 2 + .../net/ethernet/hisilicon/hibmcge/hbg_reg.h | 13 ++ .../net/ethernet/hisilicon/hibmcge/hbg_txrx.c | 159 +++++++++++++++++- 7 files changed, 196 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h index b04c7a4bdd73..5767a825e252 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h @@ -16,6 +16,10 @@ #define HBG_VECTOR_NUM 4 #define HBG_PCU_CACHE_LINE_SIZE 32 #define HBG_TX_TIMEOUT_BUF_LEN 1024 +#define HBG_RX_DESCR 0x01 + +#define HBG_PACKET_HEAD_SIZE ((HBG_RX_SKIP1 + HBG_RX_SKIP2 + HBG_RX_DESCR) * \ + HBG_PCU_CACHE_LINE_SIZE) enum hbg_dir { HBG_DIR_TX = 1 << 0, @@ -128,6 +132,7 @@ struct hbg_priv { struct hbg_mac mac; struct hbg_vector vectors; struct hbg_ring tx_ring; + struct hbg_ring rx_ring; }; #endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c index 4f60bfdc3249..02aa6428e327 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c @@ -68,6 +68,7 @@ static int hbg_hw_dev_specs_init(struct hbg_priv *priv) return -EADDRNOTAVAIL; dev_specs->max_frame_len = HBG_PCU_CACHE_LINE_SIZE + dev_specs->max_mtu; + dev_specs->rx_buf_size = HBG_PACKET_HEAD_SIZE + dev_specs->max_frame_len; return 0; } @@ -172,6 +173,10 @@ u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir) return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR, HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M); + if (dir & HBG_DIR_RX) + return hbg_reg_read_field(priv, HBG_REG_CF_CFF_DATA_NUM_ADDR, + HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M); + return 0; } @@ -183,6 +188,11 @@ void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc) hbg_reg_write(priv, HBG_REG_TX_CFF_ADDR_3_ADDR, tx_desc->word3); } +void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr) +{ + hbg_reg_write(priv, HBG_REG_RX_CFF_ADDR_ADDR, buffer_dma_addr); +} + void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex) { hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR, diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h index 15bf16a50f14..18c6ccbe1b28 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.h @@ -52,5 +52,6 @@ void hbg_hw_mac_enable(struct hbg_priv *priv, u32 enable); void hbg_hw_set_uc_addr(struct hbg_priv *priv, u64 mac_addr); u32 hbg_hw_get_fifo_used_num(struct hbg_priv *priv, enum hbg_dir dir); void hbg_hw_set_tx_desc(struct hbg_priv *priv, struct hbg_tx_desc *tx_desc); +void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr); #endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c index da53e905ea9e..7ec5a451185c 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c @@ -19,12 +19,18 @@ static void hbg_irq_handle_tx(struct hbg_priv *priv, napi_schedule(&priv->tx_ring.napi); } +static void hbg_irq_handle_rx(struct hbg_priv *priv, + struct hbg_irq_info *irq_info) +{ + napi_schedule(&priv->rx_ring.napi); +} + #define HBG_TXRX_IRQ_I(name, mask, handle) {name, mask, false, false, 0, handle} #define HBG_ERR_IRQ_I(name, mask, need_print) \ {name, mask, true, need_print, 0, hbg_irq_handle_err} static struct hbg_irq_info hbg_irqs[] = { - HBG_TXRX_IRQ_I("RX", HBG_INT_MSK_RX_B, NULL), + HBG_TXRX_IRQ_I("RX", HBG_INT_MSK_RX_B, hbg_irq_handle_rx), HBG_TXRX_IRQ_I("TX", HBG_INT_MSK_TX_B, hbg_irq_handle_tx), HBG_ERR_IRQ_I("MAC_MII_FIFO_ERR", HBG_INT_MSK_MAC_MII_FF_ERR_B, true), HBG_ERR_IRQ_I("MAC_PCS_RX_FIFO_ERR", HBG_INT_MSK_MAC_PCS_RXFF_ERR_B, true), diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c index 23d3fbc63d0d..cb42b7f6596d 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c @@ -30,6 +30,7 @@ static int hbg_net_open(struct net_device *dev) return 0; netif_carrier_off(dev); + napi_enable(&priv->rx_ring.napi); napi_enable(&priv->tx_ring.napi); hbg_all_irq_enable(priv, true); hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE); @@ -53,6 +54,7 @@ static int hbg_net_stop(struct net_device *dev) hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE); hbg_all_irq_enable(priv, false); napi_disable(&priv->tx_ring.napi); + napi_disable(&priv->rx_ring.napi); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h index 0294eb875fde..4920e2bddf1f 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h @@ -82,10 +82,12 @@ #define HBG_REG_MAX_FRAME_LEN_M GENMASK(15, 0) #define HBG_REG_CF_CFF_DATA_NUM_ADDR (HBG_REG_SGMII_BASE + 0x045C) #define HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M GENMASK(8, 0) +#define HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M GENMASK(24, 16) #define HBG_REG_TX_CFF_ADDR_0_ADDR (HBG_REG_SGMII_BASE + 0x0488) #define HBG_REG_TX_CFF_ADDR_1_ADDR (HBG_REG_SGMII_BASE + 0x048C) #define HBG_REG_TX_CFF_ADDR_2_ADDR (HBG_REG_SGMII_BASE + 0x0490) #define HBG_REG_TX_CFF_ADDR_3_ADDR (HBG_REG_SGMII_BASE + 0x0494) +#define HBG_REG_RX_CFF_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x04A0) #define HBG_REG_RX_BUF_SIZE_ADDR (HBG_REG_SGMII_BASE + 0x04E4) #define HBG_REG_RX_BUF_SIZE_M GENMASK(15, 0) #define HBG_REG_BUS_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x04E8) @@ -127,4 +129,15 @@ struct hbg_tx_desc { #define HBG_TX_DESC_W0_l4_CS_B BIT(0) #define HBG_TX_DESC_W1_SEND_LEN_M GENMASK(19, 4) +struct hbg_rx_desc { + u32 word0; + u32 word1; /* tag */ + u32 word2; + u32 word3; + u32 word4; + u32 word5; +}; + +#define HBG_RX_DESC_W2_PKT_LEN_M GENMASK(31, 16) + #endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c index 76e93d265ec1..8006a551719b 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c @@ -14,6 +14,9 @@ #define hbg_queue_is_full(head, tail, ring) ((head) == ((tail) + 1) % (ring)->len) #define hbg_queue_is_empty(head, tail) ((head) == (tail)) #define hbg_queue_next_prt(p, ring) (((p) + 1) % (ring)->len) +#define hbg_queue_move_next(p, ring) ({ \ + typeof(ring) _ring = (ring); \ + _ring->p = hbg_queue_next_prt(_ring->p, _ring); }) static int hbg_dma_map(struct hbg_buffer *buffer) { @@ -118,6 +121,20 @@ static void hbg_buffer_free_skb(struct hbg_buffer *buffer) buffer->skb = NULL; } +static int hbg_buffer_alloc_skb(struct hbg_buffer *buffer) +{ + u32 len = hbg_spec_max_frame_len(buffer->priv, buffer->dir); + struct hbg_priv *priv = buffer->priv; + + buffer->skb = netdev_alloc_skb(priv->netdev, len); + if (unlikely(!buffer->skb)) + return -ENOMEM; + + buffer->skb_len = len; + memset(buffer->skb->data, 0, HBG_PACKET_HEAD_SIZE); + return 0; +} + static void hbg_buffer_free(struct hbg_buffer *buffer) { hbg_dma_unmap(buffer); @@ -164,6 +181,110 @@ static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget) return packet_done; } +static int hbg_rx_fill_one_buffer(struct hbg_priv *priv) +{ + struct hbg_ring *ring = &priv->rx_ring; + struct hbg_buffer *buffer; + int ret; + + buffer = &ring->queue[ring->ntu]; + ret = hbg_buffer_alloc_skb(buffer); + if (unlikely(ret)) + return ret; + + ret = hbg_dma_map(buffer); + if (unlikely(ret)) { + hbg_buffer_free_skb(buffer); + return ret; + } + + hbg_hw_fill_buffer(priv, buffer->skb_dma); + hbg_queue_move_next(ntu, ring); + return 0; +} + +static int hbg_rx_fill_buffers(struct hbg_priv *priv) +{ + struct hbg_ring *ring = &priv->rx_ring; + int ret; + + while (!(hbg_fifo_is_full(priv, ring->dir) || + hbg_queue_is_full(ring->ntc, ring->ntu, ring))) { + ret = hbg_rx_fill_one_buffer(priv); + if (ret) + return ret; + } + + return 0; +} + +static bool hbg_sync_data_from_hw(struct hbg_priv *priv, + struct hbg_buffer *buffer) +{ + struct hbg_rx_desc *rx_desc; + + /* make sure HW write desc complete */ + dma_rmb(); + + dma_sync_single_for_cpu(&priv->pdev->dev, buffer->skb_dma, + buffer->skb_len, DMA_FROM_DEVICE); + + rx_desc = (struct hbg_rx_desc *)buffer->skb->data; + return FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2) != 0; +} + +static int hbg_napi_rx_poll(struct napi_struct *napi, int budget) +{ + struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi); + struct hbg_priv *priv = ring->priv; + struct hbg_rx_desc *rx_desc; + struct hbg_buffer *buffer; + u32 packet_done = 0; + u32 pkt_len; + + if (unlikely(!hbg_nic_is_open(priv))) { + napi_complete(napi); + return 0; + } + + while (packet_done < budget) { + if (unlikely(hbg_queue_is_empty(ring->ntc, ring->ntu))) + break; + + buffer = &ring->queue[ring->ntc]; + if (unlikely(!buffer->skb)) + goto next_buffer; + + if (unlikely(!hbg_sync_data_from_hw(priv, buffer))) + break; + + hbg_dma_unmap(buffer); + + skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN); + + rx_desc = (struct hbg_rx_desc *)buffer->skb->data; + pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2); + skb_put(buffer->skb, pkt_len); + buffer->skb->protocol = eth_type_trans(buffer->skb, priv->netdev); + + priv->netdev->stats.rx_bytes += pkt_len; + priv->netdev->stats.rx_packets++; + netif_receive_skb(buffer->skb); + buffer->skb = NULL; + hbg_rx_fill_one_buffer(priv); + +next_buffer: + hbg_queue_move_next(ntc, ring); + packet_done++; + } + + hbg_rx_fill_buffers(priv); + if (likely(napi_complete_done(napi, packet_done))) + hbg_hw_irq_enable(priv, HBG_INT_MSK_RX_B, true); + + return packet_done; +} + static void hbg_ring_uninit(struct hbg_ring *ring) { struct hbg_buffer *buffer; @@ -241,15 +362,50 @@ static int hbg_tx_ring_init(struct hbg_priv *priv) return 0; } +static int hbg_rx_ring_init(struct hbg_priv *priv) +{ + struct hbg_ring *rx_ring = &priv->rx_ring; + int ret; + + ret = hbg_ring_init(priv, rx_ring, HBG_DIR_RX); + if (ret) + return ret; + + netif_napi_add(priv->netdev, &priv->rx_ring.napi, hbg_napi_rx_poll); + return 0; +} + int hbg_txrx_init(struct hbg_priv *priv) { int ret; ret = hbg_tx_ring_init(priv); - if (ret) + if (ret) { dev_err(&priv->pdev->dev, "failed to init tx ring, ret = %d\n", ret); + return ret; + } + + ret = hbg_rx_ring_init(priv); + if (ret) { + dev_err(&priv->pdev->dev, + "failed to init rx ring, ret = %d\n", ret); + goto err_uninit_tx; + } + + ret = hbg_rx_fill_buffers(priv); + if (ret) { + dev_err(&priv->pdev->dev, + "failed to fill rx buffers, ret = %d\n", ret); + goto err_uninit_rx; + } + return 0; + +err_uninit_rx: + hbg_ring_uninit(&priv->rx_ring); +err_uninit_tx: + hbg_ring_uninit(&priv->tx_ring); return ret; } @@ -258,4 +414,5 @@ void hbg_txrx_uninit(void *data) struct hbg_priv *priv = data; hbg_ring_uninit(&priv->tx_ring); + hbg_ring_uninit(&priv->rx_ring); }