From patchwork Wed Feb 8 23:57:18 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Yoshihiro Shimoda X-Patchwork-Id: 13133815 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id E4CE4C6379F for ; Wed, 8 Feb 2023 23:57:33 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230154AbjBHX5c (ORCPT ); Wed, 8 Feb 2023 18:57:32 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:45326 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230124AbjBHX5b (ORCPT ); Wed, 8 Feb 2023 18:57:31 -0500 Received: from relmlie6.idc.renesas.com (relmlor2.renesas.com [210.160.252.172]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id AA9201E2AB; Wed, 8 Feb 2023 15:57:29 -0800 (PST) X-IronPort-AV: E=Sophos;i="5.97,281,1669042800"; d="scan'208";a="152153737" Received: from unknown (HELO relmlir5.idc.renesas.com) ([10.200.68.151]) by relmlie6.idc.renesas.com with ESMTP; 09 Feb 2023 08:57:28 +0900 Received: from localhost.localdomain (unknown [10.166.15.32]) by relmlir5.idc.renesas.com (Postfix) with ESMTP id D13F440029BF; Thu, 9 Feb 2023 08:57:28 +0900 (JST) From: Yoshihiro Shimoda To: davem@davemloft.net, edumazet@google.com, kuba@kernel.org, pabeni@redhat.com Cc: netdev@vger.kernel.org, linux-renesas-soc@vger.kernel.org, Yoshihiro Shimoda Subject: [PATCH net-next v2 1/4] net: renesas: rswitch: Rename rings in struct rswitch_gwca_queue Date: Thu, 9 Feb 2023 08:57:18 +0900 Message-Id: <20230208235721.2336249-2-yoshihiro.shimoda.uh@renesas.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230208235721.2336249-1-yoshihiro.shimoda.uh@renesas.com> References: <20230208235721.2336249-1-yoshihiro.shimoda.uh@renesas.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org To add a new ring which is really related to timestamp (ts_ring) in the future, rename the following members to improve readability: ring --> tx_ring ts_ring --> rx_ring Signed-off-by: Yoshihiro Shimoda --- drivers/net/ethernet/renesas/rswitch.c | 64 +++++++++++++------------- drivers/net/ethernet/renesas/rswitch.h | 4 +- 2 files changed, 34 insertions(+), 34 deletions(-) diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index 50b61a0a7f53..6207692f9c56 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -240,7 +240,7 @@ static int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq) static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq) { - struct rswitch_ext_ts_desc *desc = &gq->ts_ring[gq->dirty]; + struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty]; if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) return true; @@ -283,13 +283,13 @@ static void rswitch_gwca_queue_free(struct net_device *ndev, if (gq->gptp) { dma_free_coherent(ndev->dev.parent, sizeof(struct rswitch_ext_ts_desc) * - (gq->ring_size + 1), gq->ts_ring, gq->ring_dma); - gq->ts_ring = NULL; + (gq->ring_size + 1), gq->rx_ring, gq->ring_dma); + gq->rx_ring = NULL; } else { dma_free_coherent(ndev->dev.parent, sizeof(struct rswitch_ext_desc) * - (gq->ring_size + 1), gq->ring, gq->ring_dma); - gq->ring = NULL; + (gq->ring_size + 1), gq->tx_ring, gq->ring_dma); + gq->tx_ring = NULL; } if (!gq->dir_tx) { @@ -321,14 +321,14 @@ static int rswitch_gwca_queue_alloc(struct net_device *ndev, rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size); if (gptp) - gq->ts_ring = dma_alloc_coherent(ndev->dev.parent, + gq->rx_ring = dma_alloc_coherent(ndev->dev.parent, sizeof(struct rswitch_ext_ts_desc) * (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); else - gq->ring = dma_alloc_coherent(ndev->dev.parent, - sizeof(struct rswitch_ext_desc) * - (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); - if (!gq->ts_ring && !gq->ring) + gq->tx_ring = dma_alloc_coherent(ndev->dev.parent, + sizeof(struct rswitch_ext_desc) * + (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); + if (!gq->rx_ring && !gq->tx_ring) goto out; i = gq->index / 32; @@ -361,14 +361,14 @@ static int rswitch_gwca_queue_format(struct net_device *ndev, struct rswitch_private *priv, struct rswitch_gwca_queue *gq) { - int tx_ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size; + int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size; struct rswitch_ext_desc *desc; struct rswitch_desc *linkfix; dma_addr_t dma_addr; int i; - memset(gq->ring, 0, tx_ring_size); - for (i = 0, desc = gq->ring; i < gq->ring_size; i++, desc++) { + memset(gq->tx_ring, 0, ring_size); + for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) { if (!gq->dir_tx) { dma_addr = dma_map_single(ndev->dev.parent, gq->skbs[i]->data, PKT_BUF_SZ, @@ -397,7 +397,7 @@ static int rswitch_gwca_queue_format(struct net_device *ndev, err: if (!gq->dir_tx) { - for (i--, desc = gq->ring; i >= 0; i--, desc++) { + for (i--, desc = gq->tx_ring; i >= 0; i--, desc++) { dma_addr = rswitch_desc_get_dptr(&desc->desc); dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, DMA_FROM_DEVICE); @@ -407,9 +407,9 @@ static int rswitch_gwca_queue_format(struct net_device *ndev, return -ENOMEM; } -static int rswitch_gwca_queue_ts_fill(struct net_device *ndev, - struct rswitch_gwca_queue *gq, - int start_index, int num) +static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev, + struct rswitch_gwca_queue *gq, + int start_index, int num) { struct rswitch_device *rdev = netdev_priv(ndev); struct rswitch_ext_ts_desc *desc; @@ -418,7 +418,7 @@ static int rswitch_gwca_queue_ts_fill(struct net_device *ndev, for (i = 0; i < num; i++) { index = (i + start_index) % gq->ring_size; - desc = &gq->ts_ring[index]; + desc = &gq->rx_ring[index]; if (!gq->dir_tx) { dma_addr = dma_map_single(ndev->dev.parent, gq->skbs[index]->data, PKT_BUF_SZ, @@ -442,7 +442,7 @@ static int rswitch_gwca_queue_ts_fill(struct net_device *ndev, if (!gq->dir_tx) { for (i--; i >= 0; i--) { index = (i + start_index) % gq->ring_size; - desc = &gq->ts_ring[index]; + desc = &gq->rx_ring[index]; dma_addr = rswitch_desc_get_dptr(&desc->desc); dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, DMA_FROM_DEVICE); @@ -452,21 +452,21 @@ static int rswitch_gwca_queue_ts_fill(struct net_device *ndev, return -ENOMEM; } -static int rswitch_gwca_queue_ts_format(struct net_device *ndev, - struct rswitch_private *priv, - struct rswitch_gwca_queue *gq) +static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev, + struct rswitch_private *priv, + struct rswitch_gwca_queue *gq) { - int tx_ts_ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size; + int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size; struct rswitch_ext_ts_desc *desc; struct rswitch_desc *linkfix; int err; - memset(gq->ts_ring, 0, tx_ts_ring_size); - err = rswitch_gwca_queue_ts_fill(ndev, gq, 0, gq->ring_size); + memset(gq->rx_ring, 0, ring_size); + err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size); if (err < 0) return err; - desc = &gq->ts_ring[gq->ring_size]; /* Last */ + desc = &gq->rx_ring[gq->ring_size]; /* Last */ rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); desc->desc.die_dt = DT_LINKFIX; @@ -594,7 +594,7 @@ static int rswitch_rxdmac_init(struct rswitch_private *priv, int index) struct rswitch_device *rdev = priv->rdev[index]; struct net_device *ndev = rdev->ndev; - return rswitch_gwca_queue_ts_format(ndev, priv, rdev->rx_queue); + return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue); } static int rswitch_gwca_hw_init(struct rswitch_private *priv) @@ -675,7 +675,7 @@ static bool rswitch_rx(struct net_device *ndev, int *quota) boguscnt = min_t(int, gq->ring_size, *quota); limit = boguscnt; - desc = &gq->ts_ring[gq->cur]; + desc = &gq->rx_ring[gq->cur]; while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) { if (--boguscnt < 0) break; @@ -703,14 +703,14 @@ static bool rswitch_rx(struct net_device *ndev, int *quota) rdev->ndev->stats.rx_bytes += pkt_len; gq->cur = rswitch_next_queue_index(gq, true, 1); - desc = &gq->ts_ring[gq->cur]; + desc = &gq->rx_ring[gq->cur]; } num = rswitch_get_num_cur_queues(gq); ret = rswitch_gwca_queue_alloc_skb(gq, gq->dirty, num); if (ret < 0) goto err; - ret = rswitch_gwca_queue_ts_fill(ndev, gq, gq->dirty, num); + ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num); if (ret < 0) goto err; gq->dirty = rswitch_next_queue_index(gq, false, num); @@ -737,7 +737,7 @@ static int rswitch_tx_free(struct net_device *ndev, bool free_txed_only) for (; rswitch_get_num_cur_queues(gq) > 0; gq->dirty = rswitch_next_queue_index(gq, false, 1)) { - desc = &gq->ring[gq->dirty]; + desc = &gq->tx_ring[gq->dirty]; if (free_txed_only && (desc->desc.die_dt & DT_MASK) != DT_FEMPTY) break; @@ -1390,7 +1390,7 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd } gq->skbs[gq->cur] = skb; - desc = &gq->ring[gq->cur]; + desc = &gq->tx_ring[gq->cur]; rswitch_desc_set_dptr(&desc->desc, dma_addr); desc->desc.info_ds = cpu_to_le16(skb->len); diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h index 59830ab91a69..390ec242ed69 100644 --- a/drivers/net/ethernet/renesas/rswitch.h +++ b/drivers/net/ethernet/renesas/rswitch.h @@ -915,8 +915,8 @@ struct rswitch_gwca_queue { bool dir_tx; bool gptp; union { - struct rswitch_ext_desc *ring; - struct rswitch_ext_ts_desc *ts_ring; + struct rswitch_ext_desc *tx_ring; + struct rswitch_ext_ts_desc *rx_ring; }; dma_addr_t ring_dma; int ring_size; From patchwork Wed Feb 8 23:57:19 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Yoshihiro Shimoda X-Patchwork-Id: 13133816 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 2227FC636D3 for ; Wed, 8 Feb 2023 23:57:35 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229611AbjBHX5d (ORCPT ); Wed, 8 Feb 2023 18:57:33 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:45338 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229892AbjBHX5b (ORCPT ); Wed, 8 Feb 2023 18:57:31 -0500 Received: from relmlie5.idc.renesas.com (relmlor1.renesas.com [210.160.252.171]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id E76CF1E2AE; Wed, 8 Feb 2023 15:57:30 -0800 (PST) X-IronPort-AV: E=Sophos;i="5.97,281,1669042800"; d="scan'208";a="148941675" Received: from unknown (HELO relmlir5.idc.renesas.com) ([10.200.68.151]) by relmlie5.idc.renesas.com with ESMTP; 09 Feb 2023 08:57:29 +0900 Received: from localhost.localdomain (unknown [10.166.15.32]) by relmlir5.idc.renesas.com (Postfix) with ESMTP id E3D2E400EF7C; Thu, 9 Feb 2023 08:57:28 +0900 (JST) From: Yoshihiro Shimoda To: davem@davemloft.net, edumazet@google.com, kuba@kernel.org, pabeni@redhat.com Cc: netdev@vger.kernel.org, linux-renesas-soc@vger.kernel.org, Yoshihiro Shimoda Subject: [PATCH net-next v2 2/4] net: renesas: rswitch: Move linkfix variables to rswitch_gwca Date: Thu, 9 Feb 2023 08:57:19 +0900 Message-Id: <20230208235721.2336249-3-yoshihiro.shimoda.uh@renesas.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230208235721.2336249-1-yoshihiro.shimoda.uh@renesas.com> References: <20230208235721.2336249-1-yoshihiro.shimoda.uh@renesas.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org To improve readability, move linkfix related variables to struct rswitch_gwca. Also, rename function names "desc" with "linkfix". Signed-off-by: Yoshihiro Shimoda --- drivers/net/ethernet/renesas/rswitch.c | 39 ++++++++++++++------------ drivers/net/ethernet/renesas/rswitch.h | 6 ++-- 2 files changed, 24 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index 6207692f9c56..b256dadada1d 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -386,7 +386,7 @@ static int rswitch_gwca_queue_format(struct net_device *ndev, rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); desc->desc.die_dt = DT_LINKFIX; - linkfix = &priv->linkfix_table[gq->index]; + linkfix = &priv->gwca.linkfix_table[gq->index]; linkfix->die_dt = DT_LINKFIX; rswitch_desc_set_dptr(linkfix, gq->ring_dma); @@ -470,7 +470,7 @@ static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev, rswitch_desc_set_dptr(&desc->desc, gq->ring_dma); desc->desc.die_dt = DT_LINKFIX; - linkfix = &priv->linkfix_table[gq->index]; + linkfix = &priv->gwca.linkfix_table[gq->index]; linkfix->die_dt = DT_LINKFIX; rswitch_desc_set_dptr(linkfix, gq->ring_dma); @@ -480,28 +480,31 @@ static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev, return 0; } -static int rswitch_gwca_desc_alloc(struct rswitch_private *priv) +static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv) { int i, num_queues = priv->gwca.num_queues; + struct rswitch_gwca *gwca = &priv->gwca; struct device *dev = &priv->pdev->dev; - priv->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues; - priv->linkfix_table = dma_alloc_coherent(dev, priv->linkfix_table_size, - &priv->linkfix_table_dma, GFP_KERNEL); - if (!priv->linkfix_table) + gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues; + gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size, + &gwca->linkfix_table_dma, GFP_KERNEL); + if (!gwca->linkfix_table) return -ENOMEM; for (i = 0; i < num_queues; i++) - priv->linkfix_table[i].die_dt = DT_EOS; + gwca->linkfix_table[i].die_dt = DT_EOS; return 0; } -static void rswitch_gwca_desc_free(struct rswitch_private *priv) +static void rswitch_gwca_linkfix_free(struct rswitch_private *priv) { - if (priv->linkfix_table) - dma_free_coherent(&priv->pdev->dev, priv->linkfix_table_size, - priv->linkfix_table, priv->linkfix_table_dma); - priv->linkfix_table = NULL; + struct rswitch_gwca *gwca = &priv->gwca; + + if (gwca->linkfix_table) + dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size, + gwca->linkfix_table, gwca->linkfix_table_dma); + gwca->linkfix_table = NULL; } static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv) @@ -617,8 +620,8 @@ static int rswitch_gwca_hw_init(struct rswitch_private *priv) iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC); iowrite32(0, priv->addr + GWTTFC); - iowrite32(lower_32_bits(priv->linkfix_table_dma), priv->addr + GWDCBAC1); - iowrite32(upper_32_bits(priv->linkfix_table_dma), priv->addr + GWDCBAC0); + iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1); + iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0); rswitch_gwca_set_rate_limit(priv, priv->gwca.speed); for (i = 0; i < RSWITCH_NUM_PORTS; i++) { @@ -1650,7 +1653,7 @@ static int rswitch_init(struct rswitch_private *priv) if (err < 0) return err; - err = rswitch_gwca_desc_alloc(priv); + err = rswitch_gwca_linkfix_alloc(priv); if (err < 0) return -ENOMEM; @@ -1712,7 +1715,7 @@ static int rswitch_init(struct rswitch_private *priv) rswitch_device_free(priv, i); err_device_alloc: - rswitch_gwca_desc_free(priv); + rswitch_gwca_linkfix_free(priv); return err; } @@ -1791,7 +1794,7 @@ static void rswitch_deinit(struct rswitch_private *priv) rswitch_device_free(priv, i); } - rswitch_gwca_desc_free(priv); + rswitch_gwca_linkfix_free(priv); rswitch_clock_disable(priv); } diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h index 390ec242ed69..79c8ff01021c 100644 --- a/drivers/net/ethernet/renesas/rswitch.h +++ b/drivers/net/ethernet/renesas/rswitch.h @@ -930,6 +930,9 @@ struct rswitch_gwca_queue { #define RSWITCH_NUM_IRQ_REGS (RSWITCH_MAX_NUM_QUEUES / BITS_PER_TYPE(u32)) struct rswitch_gwca { int index; + struct rswitch_desc *linkfix_table; + dma_addr_t linkfix_table_dma; + u32 linkfix_table_size; struct rswitch_gwca_queue *queues; int num_queues; DECLARE_BITMAP(used, RSWITCH_MAX_NUM_QUEUES); @@ -969,9 +972,6 @@ struct rswitch_private { struct platform_device *pdev; void __iomem *addr; struct rcar_gen4_ptp_private *ptp_priv; - struct rswitch_desc *linkfix_table; - dma_addr_t linkfix_table_dma; - u32 linkfix_table_size; struct rswitch_device *rdev[RSWITCH_NUM_PORTS]; From patchwork Wed Feb 8 23:57:20 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Yoshihiro Shimoda X-Patchwork-Id: 13133817 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 7D961C64EC7 for ; Wed, 8 Feb 2023 23:57:36 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230165AbjBHX5f (ORCPT ); Wed, 8 Feb 2023 18:57:35 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:45348 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230158AbjBHX5c (ORCPT ); Wed, 8 Feb 2023 18:57:32 -0500 Received: from relmlie6.idc.renesas.com (relmlor2.renesas.com [210.160.252.172]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 7F0E21E2BD; Wed, 8 Feb 2023 15:57:31 -0800 (PST) X-IronPort-AV: E=Sophos;i="5.97,281,1669042800"; d="scan'208";a="152153741" Received: from unknown (HELO relmlir5.idc.renesas.com) ([10.200.68.151]) by relmlie6.idc.renesas.com with ESMTP; 09 Feb 2023 08:57:29 +0900 Received: from localhost.localdomain (unknown [10.166.15.32]) by relmlir5.idc.renesas.com (Postfix) with ESMTP id 031D44005445; Thu, 9 Feb 2023 08:57:29 +0900 (JST) From: Yoshihiro Shimoda To: davem@davemloft.net, edumazet@google.com, kuba@kernel.org, pabeni@redhat.com Cc: netdev@vger.kernel.org, linux-renesas-soc@vger.kernel.org, Yoshihiro Shimoda Subject: [PATCH net-next v2 3/4] net: renesas: rswitch: Remove gptp flag from rswitch_gwca_queue Date: Thu, 9 Feb 2023 08:57:20 +0900 Message-Id: <20230208235721.2336249-4-yoshihiro.shimoda.uh@renesas.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230208235721.2336249-1-yoshihiro.shimoda.uh@renesas.com> References: <20230208235721.2336249-1-yoshihiro.shimoda.uh@renesas.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org In the previous code, the gptp flag was completely related to the !dir_tx in struct rswitch_gwca_queue because rswitch_gwca_queue_alloc() was called below: < In rswitch_txdmac_alloc() > err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, false, TX_RING_SIZE); So, dir_tx = true, and gptp = false. < In rswitch_rxdmac_alloc() > err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, true, RX_RING_SIZE); So, dir_tx = false, and gptp = true. In the future, a new queue handling for timestamp will be implemented and this gptp flag is confusable. So, remove the gptp flag. Signed-off-by: Yoshihiro Shimoda --- drivers/net/ethernet/renesas/rswitch.c | 26 +++++++++++--------------- drivers/net/ethernet/renesas/rswitch.h | 1 - 2 files changed, 11 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index b256dadada1d..e408d10184e8 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -280,11 +280,14 @@ static void rswitch_gwca_queue_free(struct net_device *ndev, { int i; - if (gq->gptp) { + if (!gq->dir_tx) { dma_free_coherent(ndev->dev.parent, sizeof(struct rswitch_ext_ts_desc) * (gq->ring_size + 1), gq->rx_ring, gq->ring_dma); gq->rx_ring = NULL; + + for (i = 0; i < gq->ring_size; i++) + dev_kfree_skb(gq->skbs[i]); } else { dma_free_coherent(ndev->dev.parent, sizeof(struct rswitch_ext_desc) * @@ -292,11 +295,6 @@ static void rswitch_gwca_queue_free(struct net_device *ndev, gq->tx_ring = NULL; } - if (!gq->dir_tx) { - for (i = 0; i < gq->ring_size; i++) - dev_kfree_skb(gq->skbs[i]); - } - kfree(gq->skbs); gq->skbs = NULL; } @@ -304,12 +302,11 @@ static void rswitch_gwca_queue_free(struct net_device *ndev, static int rswitch_gwca_queue_alloc(struct net_device *ndev, struct rswitch_private *priv, struct rswitch_gwca_queue *gq, - bool dir_tx, bool gptp, int ring_size) + bool dir_tx, int ring_size) { int i, bit; gq->dir_tx = dir_tx; - gq->gptp = gptp; gq->ring_size = ring_size; gq->ndev = ndev; @@ -317,17 +314,18 @@ static int rswitch_gwca_queue_alloc(struct net_device *ndev, if (!gq->skbs) return -ENOMEM; - if (!dir_tx) + if (!dir_tx) { rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size); - if (gptp) gq->rx_ring = dma_alloc_coherent(ndev->dev.parent, sizeof(struct rswitch_ext_ts_desc) * (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); - else + } else { gq->tx_ring = dma_alloc_coherent(ndev->dev.parent, sizeof(struct rswitch_ext_desc) * (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); + } + if (!gq->rx_ring && !gq->tx_ring) goto out; @@ -539,8 +537,7 @@ static int rswitch_txdmac_alloc(struct net_device *ndev) if (!rdev->tx_queue) return -EBUSY; - err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, false, - TX_RING_SIZE); + err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE); if (err < 0) { rswitch_gwca_put(priv, rdev->tx_queue); return err; @@ -574,8 +571,7 @@ static int rswitch_rxdmac_alloc(struct net_device *ndev) if (!rdev->rx_queue) return -EBUSY; - err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, true, - RX_RING_SIZE); + err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE); if (err < 0) { rswitch_gwca_put(priv, rdev->rx_queue); return err; diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h index 79c8ff01021c..ee36e8e896d2 100644 --- a/drivers/net/ethernet/renesas/rswitch.h +++ b/drivers/net/ethernet/renesas/rswitch.h @@ -913,7 +913,6 @@ struct rswitch_etha { struct rswitch_gwca_queue { int index; bool dir_tx; - bool gptp; union { struct rswitch_ext_desc *tx_ring; struct rswitch_ext_ts_desc *rx_ring; From patchwork Wed Feb 8 23:57:21 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Yoshihiro Shimoda X-Patchwork-Id: 13133818 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 9E5E0C64EC6 for ; Wed, 8 Feb 2023 23:57:36 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230169AbjBHX5f (ORCPT ); Wed, 8 Feb 2023 18:57:35 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:45386 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230161AbjBHX5d (ORCPT ); Wed, 8 Feb 2023 18:57:33 -0500 Received: from relmlie5.idc.renesas.com (relmlor1.renesas.com [210.160.252.171]) by lindbergh.monkeyblade.net (Postfix) with ESMTP id 4CE171E1E4; Wed, 8 Feb 2023 15:57:31 -0800 (PST) X-IronPort-AV: E=Sophos;i="5.97,281,1669042800"; d="scan'208";a="148941678" Received: from unknown (HELO relmlir5.idc.renesas.com) ([10.200.68.151]) by relmlie5.idc.renesas.com with ESMTP; 09 Feb 2023 08:57:29 +0900 Received: from localhost.localdomain (unknown [10.166.15.32]) by relmlir5.idc.renesas.com (Postfix) with ESMTP id 16FEF40029BF; Thu, 9 Feb 2023 08:57:29 +0900 (JST) From: Yoshihiro Shimoda To: davem@davemloft.net, edumazet@google.com, kuba@kernel.org, pabeni@redhat.com Cc: netdev@vger.kernel.org, linux-renesas-soc@vger.kernel.org, Yoshihiro Shimoda Subject: [PATCH net-next v2 4/4] net: renesas: rswitch: Improve TX timestamp accuracy Date: Thu, 9 Feb 2023 08:57:21 +0900 Message-Id: <20230208235721.2336249-5-yoshihiro.shimoda.uh@renesas.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230208235721.2336249-1-yoshihiro.shimoda.uh@renesas.com> References: <20230208235721.2336249-1-yoshihiro.shimoda.uh@renesas.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org X-Patchwork-Delegate: kuba@kernel.org In the previous code, TX timestamp accuracy was bad because the irq handler got the timestamp from the timestamp register at that time. This hardware has "Timestamp capture" feature which can store each TX timestamp into the timestamp descriptors. To improve TX timestamp accuracy, implement timestamp descriptors' handling. Signed-off-by: Yoshihiro Shimoda --- drivers/net/ethernet/renesas/rswitch.c | 166 ++++++++++++++++++++++--- drivers/net/ethernet/renesas/rswitch.h | 35 +++++- 2 files changed, 179 insertions(+), 22 deletions(-) diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index e408d10184e8..4fba647835f2 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -123,13 +123,6 @@ static void rswitch_fwd_init(struct rswitch_private *priv) iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index)); } -/* gPTP timer (gPTP) */ -static void rswitch_get_timestamp(struct rswitch_private *priv, - struct timespec64 *ts) -{ - priv->ptp_priv->info.gettime64(&priv->ptp_priv->info, ts); -} - /* Gateway CPU agent block (GWCA) */ static int rswitch_gwca_change_mode(struct rswitch_private *priv, enum rswitch_gwca_mode mode) @@ -299,6 +292,16 @@ static void rswitch_gwca_queue_free(struct net_device *ndev, gq->skbs = NULL; } +static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv) +{ + struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; + + dma_free_coherent(&priv->pdev->dev, + sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1), + gq->ts_ring, gq->ring_dma); + gq->ts_ring = NULL; +} + static int rswitch_gwca_queue_alloc(struct net_device *ndev, struct rswitch_private *priv, struct rswitch_gwca_queue *gq, @@ -344,6 +347,17 @@ static int rswitch_gwca_queue_alloc(struct net_device *ndev, return -ENOMEM; } +static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv) +{ + struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; + + gq->ring_size = TS_RING_SIZE; + gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev, + sizeof(struct rswitch_ts_desc) * + (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL); + return !gq->ts_ring ? -ENOMEM : 0; +} + static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr) { desc->dptrl = cpu_to_le32(lower_32_bits(addr)); @@ -405,6 +419,20 @@ static int rswitch_gwca_queue_format(struct net_device *ndev, return -ENOMEM; } +static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv, + int start_index, int num) +{ + struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; + struct rswitch_ts_desc *desc; + int i, index; + + for (i = 0; i < num; i++) { + index = (i + start_index) % gq->ring_size; + desc = &gq->ts_ring[index]; + desc->desc.die_dt = DT_FEMPTY_ND | DIE; + } +} + static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev, struct rswitch_gwca_queue *gq, int start_index, int num) @@ -618,6 +646,9 @@ static int rswitch_gwca_hw_init(struct rswitch_private *priv) iowrite32(0, priv->addr + GWTTFC); iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1); iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0); + iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10); + iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00); + iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0); rswitch_gwca_set_rate_limit(priv, priv->gwca.speed); for (i = 0; i < RSWITCH_NUM_PORTS; i++) { @@ -744,15 +775,6 @@ static int rswitch_tx_free(struct net_device *ndev, bool free_txed_only) size = le16_to_cpu(desc->desc.info_ds) & TX_DS; skb = gq->skbs[gq->dirty]; if (skb) { - if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { - struct skb_shared_hwtstamps shhwtstamps; - struct timespec64 ts; - - rswitch_get_timestamp(rdev->priv, &ts); - memset(&shhwtstamps, 0, sizeof(shhwtstamps)); - shhwtstamps.hwtstamp = timespec64_to_ktime(ts); - skb_tstamp_tx(skb, &shhwtstamps); - } dma_addr = rswitch_desc_get_dptr(&desc->desc); dma_unmap_single(ndev->dev.parent, dma_addr, size, DMA_TO_DEVICE); @@ -878,6 +900,73 @@ static int rswitch_gwca_request_irqs(struct rswitch_private *priv) return 0; } +static void rswitch_ts(struct rswitch_private *priv) +{ + struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue; + struct rswitch_gwca_ts_info *ts_info, *ts_info2; + struct skb_shared_hwtstamps shhwtstamps; + struct rswitch_ts_desc *desc; + struct timespec64 ts; + u32 tag, port; + int num; + + desc = &gq->ts_ring[gq->cur]; + while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) { + dma_rmb(); + + port = TS_DESC_DPN(desc->desc.dptrl); + tag = TS_DESC_TSUN(desc->desc.dptrl); + + list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) { + if (!(ts_info->port == port && ts_info->tag == tag)) + continue; + + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + ts.tv_sec = __le32_to_cpu(desc->ts_sec); + ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); + shhwtstamps.hwtstamp = timespec64_to_ktime(ts); + skb_tstamp_tx(ts_info->skb, &shhwtstamps); + dev_consume_skb_irq(ts_info->skb); + list_del(&ts_info->list); + kfree(ts_info); + break; + } + + gq->cur = rswitch_next_queue_index(gq, true, 1); + desc = &gq->ts_ring[gq->cur]; + } + + num = rswitch_get_num_cur_queues(gq); + rswitch_gwca_ts_queue_fill(priv, gq->dirty, num); + gq->dirty = rswitch_next_queue_index(gq, false, num); +} + +static irqreturn_t rswitch_gwca_ts_irq(int irq, void *dev_id) +{ + struct rswitch_private *priv = dev_id; + + if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) { + iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS); + rswitch_ts(priv); + + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int rswitch_gwca_ts_request_irqs(struct rswitch_private *priv) +{ + int irq; + + irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME); + if (irq < 0) + return irq; + + return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq, + 0, GWCA_TS_IRQ_NAME, priv); +} + /* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */ static int rswitch_etha_change_mode(struct rswitch_etha *etha, enum rswitch_etha_mode mode) @@ -1348,15 +1437,28 @@ static int rswitch_open(struct net_device *ndev) rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true); rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true); + iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); + return 0; }; static int rswitch_stop(struct net_device *ndev) { struct rswitch_device *rdev = netdev_priv(ndev); + struct rswitch_gwca_ts_info *ts_info, *ts_info2; netif_tx_stop_all_queues(ndev); + iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID); + + list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) { + if (ts_info->port != rdev->port) + continue; + dev_kfree_skb_irq(ts_info->skb); + list_del(&ts_info->list); + kfree(ts_info); + } + rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false); rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false); @@ -1395,11 +1497,25 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) | INFO1_FMT); if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + struct rswitch_gwca_ts_info *ts_info; + + ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC); + if (!ts_info) { + dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE); + return -ENOMEM; + } + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; rdev->ts_tag++; desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC); + + ts_info->skb = skb_get(skb); + ts_info->port = rdev->port; + ts_info->tag = rdev->ts_tag; + list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list); + + skb_tx_timestamp(skb); } - skb_tx_timestamp(skb); dma_wmb(); @@ -1653,6 +1769,13 @@ static int rswitch_init(struct rswitch_private *priv) if (err < 0) return -ENOMEM; + err = rswitch_gwca_ts_queue_alloc(priv); + if (err < 0) + goto err_ts_queue_alloc; + + rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE); + INIT_LIST_HEAD(&priv->gwca.ts_info_list); + for (i = 0; i < RSWITCH_NUM_PORTS; i++) { err = rswitch_device_alloc(priv, i); if (err < 0) { @@ -1673,6 +1796,10 @@ static int rswitch_init(struct rswitch_private *priv) if (err < 0) goto err_gwca_request_irq; + err = rswitch_gwca_ts_request_irqs(priv); + if (err < 0) + goto err_gwca_ts_request_irq; + err = rswitch_gwca_hw_init(priv); if (err < 0) goto err_gwca_hw_init; @@ -1703,6 +1830,7 @@ static int rswitch_init(struct rswitch_private *priv) rswitch_gwca_hw_deinit(priv); err_gwca_hw_init: +err_gwca_ts_request_irq: err_gwca_request_irq: rcar_gen4_ptp_unregister(priv->ptp_priv); @@ -1711,6 +1839,9 @@ static int rswitch_init(struct rswitch_private *priv) rswitch_device_free(priv, i); err_device_alloc: + rswitch_gwca_ts_queue_free(priv); + +err_ts_queue_alloc: rswitch_gwca_linkfix_free(priv); return err; @@ -1790,6 +1921,7 @@ static void rswitch_deinit(struct rswitch_private *priv) rswitch_device_free(priv, i); } + rswitch_gwca_ts_queue_free(priv); rswitch_gwca_linkfix_free(priv); rswitch_clock_disable(priv); diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h index ee36e8e896d2..27d3d38c055f 100644 --- a/drivers/net/ethernet/renesas/rswitch.h +++ b/drivers/net/ethernet/renesas/rswitch.h @@ -27,6 +27,7 @@ #define TX_RING_SIZE 1024 #define RX_RING_SIZE 1024 +#define TS_RING_SIZE (TX_RING_SIZE * RSWITCH_NUM_PORTS) #define PKT_BUF_SZ 1584 #define RSWITCH_ALIGN 128 @@ -49,6 +50,10 @@ #define AGENT_INDEX_GWCA 3 #define GWRO RSWITCH_GWCA0_OFFSET +#define GWCA_TS_IRQ_RESOURCE_NAME "gwca0_rxts0" +#define GWCA_TS_IRQ_NAME "rswitch: gwca0_rxts0" +#define GWCA_TS_IRQ_BIT BIT(0) + #define FWRO 0 #define TPRO RSWITCH_TOP_OFFSET #define CARO RSWITCH_COMA_OFFSET @@ -831,7 +836,7 @@ enum DIE_DT { DT_FSINGLE = 0x80, DT_FSTART = 0x90, DT_FMID = 0xa0, - DT_FEND = 0xb8, + DT_FEND = 0xb0, /* Chain control */ DT_LEMPTY = 0xc0, @@ -843,7 +848,7 @@ enum DIE_DT { DT_FEMPTY = 0x40, DT_FEMPTY_IS = 0x10, DT_FEMPTY_IC = 0x20, - DT_FEMPTY_ND = 0x38, + DT_FEMPTY_ND = 0x30, DT_FEMPTY_START = 0x50, DT_FEMPTY_MID = 0x60, DT_FEMPTY_END = 0x70, @@ -865,6 +870,12 @@ enum DIE_DT { /* For reception */ #define INFO1_SPN(port) ((u64)(port) << 36ULL) +/* For timestamp descriptor in dptrl (Byte 4 to 7) */ +#define TS_DESC_TSUN(dptrl) ((dptrl) & GENMASK(7, 0)) +#define TS_DESC_SPN(dptrl) (((dptrl) & GENMASK(10, 8)) >> 8) +#define TS_DESC_DPN(dptrl) (((dptrl) & GENMASK(17, 16)) >> 16) +#define TS_DESC_TN(dptrl) ((dptrl) & BIT(24)) + struct rswitch_desc { __le16 info_ds; /* Descriptor size */ u8 die_dt; /* Descriptor interrupt enable and type */ @@ -911,21 +922,33 @@ struct rswitch_etha { * name, this driver calls "queue". */ struct rswitch_gwca_queue { - int index; - bool dir_tx; union { struct rswitch_ext_desc *tx_ring; struct rswitch_ext_ts_desc *rx_ring; + struct rswitch_ts_desc *ts_ring; }; + + /* Common */ dma_addr_t ring_dma; int ring_size; int cur; int dirty; - struct sk_buff **skbs; + /* For [rt]_ring */ + int index; + bool dir_tx; + struct sk_buff **skbs; struct net_device *ndev; /* queue to ndev for irq */ }; +struct rswitch_gwca_ts_info { + struct sk_buff *skb; + struct list_head list; + + int port; + u8 tag; +}; + #define RSWITCH_NUM_IRQ_REGS (RSWITCH_MAX_NUM_QUEUES / BITS_PER_TYPE(u32)) struct rswitch_gwca { int index; @@ -934,6 +957,8 @@ struct rswitch_gwca { u32 linkfix_table_size; struct rswitch_gwca_queue *queues; int num_queues; + struct rswitch_gwca_queue ts_queue; + struct list_head ts_info_list; DECLARE_BITMAP(used, RSWITCH_MAX_NUM_QUEUES); u32 tx_irq_bits[RSWITCH_NUM_IRQ_REGS]; u32 rx_irq_bits[RSWITCH_NUM_IRQ_REGS];