diff mbox series

[net-next,v2,7/8] net: stmmac: xgmac: Complete FPE support

Message ID 1776606b2eda8430077551ca117b035f987b5b70.1729233020.git.0x1207@gmail.com (mailing list archive)
State New
Headers show
Series net: stmmac: Refactor FPE as a separate module | expand

Commit Message

Furong Xu Oct. 18, 2024, 6:39 a.m. UTC
Implement the necessary stmmac_fpe_ops function callbacks for xgmac.

Signed-off-by: Furong Xu <0x1207@gmail.com>
---
 .../net/ethernet/stmicro/stmmac/stmmac_fpe.c  | 77 +++++++++++++++++++
 1 file changed, 77 insertions(+)

Comments

Vladimir Oltean Oct. 18, 2024, 9:13 a.m. UTC | #1
On Fri, Oct 18, 2024 at 02:39:13PM +0800, Furong Xu wrote:
> Implement the necessary stmmac_fpe_ops function callbacks for xgmac.
> 
> Signed-off-by: Furong Xu <0x1207@gmail.com>
> ---
>  .../net/ethernet/stmicro/stmmac/stmmac_fpe.c  | 77 +++++++++++++++++++
>  1 file changed, 77 insertions(+)
> 
> diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
> index dfe911b3f486..c90ed7c1279d 100644
> --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
> +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
> @@ -373,6 +373,78 @@ static void dwxgmac3_fpe_configure(void __iomem *ioaddr,
>  			     &dwxgmac3_fpe_info);
>  }
>  
> +static int dwxgmac3_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
> +{
> +	return common_fpe_irq_status(ioaddr + XGMAC_MAC_FPE_CTRL_STS, dev);
> +}
> +
> +static void dwxgmac3_fpe_send_mpacket(void __iomem *ioaddr,
> +				      struct stmmac_fpe_cfg *cfg,
> +				      enum stmmac_mpacket_type type)
> +{
> +	common_fpe_send_mpacket(ioaddr + XGMAC_MAC_FPE_CTRL_STS, cfg, type);
> +}
> +
> +static int dwxgmac3_fpe_get_add_frag_size(const void __iomem *ioaddr)
> +{
> +	return FIELD_GET(FPE_MTL_ADD_FRAG_SZ,
> +			 readl(ioaddr + XGMAC_MTL_FPE_CTRL_STS));
> +}
> +
> +static void dwxgmac3_fpe_set_add_frag_size(void __iomem *ioaddr,
> +					   u32 add_frag_size)
> +{
> +	u32 value;
> +
> +	value = readl(ioaddr + XGMAC_MTL_FPE_CTRL_STS);
> +	writel(u32_replace_bits(value, add_frag_size, FPE_MTL_ADD_FRAG_SZ),
> +	       ioaddr + XGMAC_MTL_FPE_CTRL_STS);
> +}
> +
> +static int dwxgmac3_fpe_map_preemption_class(struct net_device *ndev,
> +					     struct netlink_ext_ack *extack,
> +					     u32 pclass)
> +{
> +	u32 val, offset, count, preemptible_txqs = 0;
> +	struct stmmac_priv *priv = netdev_priv(ndev);
> +	u32 num_tc = ndev->num_tc;
> +
> +	if (!num_tc) {
> +		/* Restore default TC:Queue mapping */
> +		for (u32 i = 0; i < priv->plat->tx_queues_to_use; i++) {
> +			val = readl(priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(i));
> +			writel(u32_replace_bits(val, i, XGMAC_Q2TCMAP),
> +			       priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(i));
> +		}
> +	}
> +
> +	/* Synopsys Databook:
> +	 * "All Queues within a traffic class are selected in a round robin
> +	 * fashion (when packets are available) when the traffic class is
> +	 * selected by the scheduler for packet transmission. This is true for
> +	 * any of the scheduling algorithms."
> +	 */
> +	for (u32 tc = 0; tc < num_tc; tc++) {
> +		count = ndev->tc_to_txq[tc].count;
> +		offset = ndev->tc_to_txq[tc].offset;
> +
> +		if (pclass & BIT(tc))
> +			preemptible_txqs |= GENMASK(offset + count - 1, offset);
> +
> +		for (u32 i = 0; i < count; i++) {
> +			val = readl(priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(offset + i));
> +			writel(u32_replace_bits(val, tc, XGMAC_Q2TCMAP),
> +			       priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(offset + i));
> +		}
> +	}
> +
> +	val = readl(priv->ioaddr + XGMAC_MTL_FPE_CTRL_STS);
> +	writel(u32_replace_bits(val, preemptible_txqs, FPE_MTL_PREEMPTION_CLASS),
> +	       priv->ioaddr + XGMAC_MTL_FPE_CTRL_STS);
> +
> +	return 0;
> +}
> +
>  const struct stmmac_fpe_ops dwmac5_fpe_ops = {
>  	.fpe_configure = dwmac5_fpe_configure,
>  	.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
> @@ -384,4 +456,9 @@ const struct stmmac_fpe_ops dwmac5_fpe_ops = {
>  
>  const struct stmmac_fpe_ops dwxgmac_fpe_ops = {
>  	.fpe_configure = dwxgmac3_fpe_configure,
> +	.fpe_send_mpacket = dwxgmac3_fpe_send_mpacket,
> +	.fpe_irq_status = dwxgmac3_fpe_irq_status,
> +	.fpe_get_add_frag_size = dwxgmac3_fpe_get_add_frag_size,
> +	.fpe_set_add_frag_size = dwxgmac3_fpe_set_add_frag_size,
> +	.fpe_map_preemption_class = dwxgmac3_fpe_map_preemption_class,
>  };

This is much better in terms of visibility into the change.

Though I cannot stop thinking that this implementation design:

stmmac_fpe_configure()
-> stmmac_do_void_callback()
   -> fpe_ops->fpe_configure()
      /                    \
     /                      \
    v                        v
dwmac5_fpe_configure   dwxgmac3_fpe_configure
     \                      /
      \                    /
       v                  v
       common_fpe_configure()

is, pardon the expression, stuffy.

If you aren't very opposed to the idea of having struct stmmac_fpe_ops
contain a mix of function pointers and integer constants, I would
suggest removing:

	.fpe_configure()
	.fpe_send_mpacket()
	.fpe_irq_status()
	.fpe_get_add_frag_size()
	.fpe_set_add_frag_size()

and just keeping a single function pointer, .fpe_map_preemption_class(),
inside stmmac_fpe_ops. Only that is sufficiently different to warrant a
completely separate implementation. Then move all current struct
stmmac_fpe_configure_info to struct stmmac_fpe_ops, and reimplement
stmmac_fpe_configure() directly like common_fpe_configure(),
stmmac_fpe_send_mpacket() directly like common_fpe_send_mpacket(), etc etc.
This lets us avoid the antipattern of calling a function pointer (hidden
by an opaque macro) from common code, only to gather some parameters to
call again a common implementation.

I know this is a preposterous and heretic thing to suggest, but a person
who isn't knee-deep in stmmac has a very hard time locating himself in
space due to the unnecessarily complex layering. If that isn't something
that is important, feel free to ignore.
Russell King (Oracle) Oct. 18, 2024, 9:31 a.m. UTC | #2
On Fri, Oct 18, 2024 at 12:13:21PM +0300, Vladimir Oltean wrote:
> I know this is a preposterous and heretic thing to suggest, but a person
> who isn't knee-deep in stmmac has a very hard time locating himself in
> space due to the unnecessarily complex layering. If that isn't something
> that is important, feel free to ignore.

That is driven by Serge, who seems to be driven by wanting the smallest
code possible with function pointers to abstract the minutest detail.
Like you, I disagree with this approach because it makes following the
code incredibly difficult unless one is constantly looking at it.

Serge wanted to do that to my PCS patches, and when I disagreed, he
stated basically that he'd convert the code after my patches were
merged to his style. So I deleted the patches from my tree. I've also
asked that stmmac removes its use of phylink because stmmac abuses
phylink and with Serge's attitude, it effectively makes it unfixable.
Furong Xu Oct. 18, 2024, 10 a.m. UTC | #3
On Fri, 18 Oct 2024 12:13:21 +0300, Vladimir Oltean <olteanv@gmail.com> wrote:

> This is much better in terms of visibility into the change.
> 
> Though I cannot stop thinking that this implementation design:
> 
> stmmac_fpe_configure()
> -> stmmac_do_void_callback()
>    -> fpe_ops->fpe_configure()  
>       /                    \
>      /                      \
>     v                        v
> dwmac5_fpe_configure   dwxgmac3_fpe_configure
>      \                      /
>       \                    /
>        v                  v
>        common_fpe_configure()
> 
> is, pardon the expression, stuffy.
> 
> If you aren't very opposed to the idea of having struct stmmac_fpe_ops
> contain a mix of function pointers and integer constants, I would
> suggest removing:
> 
> 	.fpe_configure()
> 	.fpe_send_mpacket()
> 	.fpe_irq_status()
> 	.fpe_get_add_frag_size()
> 	.fpe_set_add_frag_size()
> 
> and just keeping a single function pointer, .fpe_map_preemption_class(),
> inside stmmac_fpe_ops. Only that is sufficiently different to warrant a
> completely separate implementation. Then move all current struct
> stmmac_fpe_configure_info to struct stmmac_fpe_ops, and reimplement
> stmmac_fpe_configure() directly like common_fpe_configure(),
> stmmac_fpe_send_mpacket() directly like common_fpe_send_mpacket(), etc etc.
> This lets us avoid the antipattern of calling a function pointer (hidden
> by an opaque macro) from common code, only to gather some parameters to
> call again a common implementation.
> 
> I know this is a preposterous and heretic thing to suggest, but a person
> who isn't knee-deep in stmmac has a very hard time locating himself in
> space due to the unnecessarily complex layering. If that isn't something
> that is important, feel free to ignore.

In fact, I can drop the stmmac_fpe_ops at all, avoid the antipattern of
calling a function pointer for good.
Since this is a new module, we can try something new ;)
Thanks.
diff mbox series

Patch

diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
index dfe911b3f486..c90ed7c1279d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c
@@ -373,6 +373,78 @@  static void dwxgmac3_fpe_configure(void __iomem *ioaddr,
 			     &dwxgmac3_fpe_info);
 }
 
+static int dwxgmac3_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
+{
+	return common_fpe_irq_status(ioaddr + XGMAC_MAC_FPE_CTRL_STS, dev);
+}
+
+static void dwxgmac3_fpe_send_mpacket(void __iomem *ioaddr,
+				      struct stmmac_fpe_cfg *cfg,
+				      enum stmmac_mpacket_type type)
+{
+	common_fpe_send_mpacket(ioaddr + XGMAC_MAC_FPE_CTRL_STS, cfg, type);
+}
+
+static int dwxgmac3_fpe_get_add_frag_size(const void __iomem *ioaddr)
+{
+	return FIELD_GET(FPE_MTL_ADD_FRAG_SZ,
+			 readl(ioaddr + XGMAC_MTL_FPE_CTRL_STS));
+}
+
+static void dwxgmac3_fpe_set_add_frag_size(void __iomem *ioaddr,
+					   u32 add_frag_size)
+{
+	u32 value;
+
+	value = readl(ioaddr + XGMAC_MTL_FPE_CTRL_STS);
+	writel(u32_replace_bits(value, add_frag_size, FPE_MTL_ADD_FRAG_SZ),
+	       ioaddr + XGMAC_MTL_FPE_CTRL_STS);
+}
+
+static int dwxgmac3_fpe_map_preemption_class(struct net_device *ndev,
+					     struct netlink_ext_ack *extack,
+					     u32 pclass)
+{
+	u32 val, offset, count, preemptible_txqs = 0;
+	struct stmmac_priv *priv = netdev_priv(ndev);
+	u32 num_tc = ndev->num_tc;
+
+	if (!num_tc) {
+		/* Restore default TC:Queue mapping */
+		for (u32 i = 0; i < priv->plat->tx_queues_to_use; i++) {
+			val = readl(priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(i));
+			writel(u32_replace_bits(val, i, XGMAC_Q2TCMAP),
+			       priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(i));
+		}
+	}
+
+	/* Synopsys Databook:
+	 * "All Queues within a traffic class are selected in a round robin
+	 * fashion (when packets are available) when the traffic class is
+	 * selected by the scheduler for packet transmission. This is true for
+	 * any of the scheduling algorithms."
+	 */
+	for (u32 tc = 0; tc < num_tc; tc++) {
+		count = ndev->tc_to_txq[tc].count;
+		offset = ndev->tc_to_txq[tc].offset;
+
+		if (pclass & BIT(tc))
+			preemptible_txqs |= GENMASK(offset + count - 1, offset);
+
+		for (u32 i = 0; i < count; i++) {
+			val = readl(priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(offset + i));
+			writel(u32_replace_bits(val, tc, XGMAC_Q2TCMAP),
+			       priv->ioaddr + XGMAC_MTL_TXQ_OPMODE(offset + i));
+		}
+	}
+
+	val = readl(priv->ioaddr + XGMAC_MTL_FPE_CTRL_STS);
+	writel(u32_replace_bits(val, preemptible_txqs, FPE_MTL_PREEMPTION_CLASS),
+	       priv->ioaddr + XGMAC_MTL_FPE_CTRL_STS);
+
+	return 0;
+}
+
 const struct stmmac_fpe_ops dwmac5_fpe_ops = {
 	.fpe_configure = dwmac5_fpe_configure,
 	.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
@@ -384,4 +456,9 @@  const struct stmmac_fpe_ops dwmac5_fpe_ops = {
 
 const struct stmmac_fpe_ops dwxgmac_fpe_ops = {
 	.fpe_configure = dwxgmac3_fpe_configure,
+	.fpe_send_mpacket = dwxgmac3_fpe_send_mpacket,
+	.fpe_irq_status = dwxgmac3_fpe_irq_status,
+	.fpe_get_add_frag_size = dwxgmac3_fpe_get_add_frag_size,
+	.fpe_set_add_frag_size = dwxgmac3_fpe_set_add_frag_size,
+	.fpe_map_preemption_class = dwxgmac3_fpe_map_preemption_class,
 };