diff mbox series

[net-next,v1] igc: offload queue max SDU from tc-taprio

Message ID 20221214144514.15931-1-muhammad.husaini.zulkifli@intel.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series [net-next,v1] igc: offload queue max SDU from tc-taprio | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for net-next
netdev/apply fail Patch does not apply to net-next

Commit Message

Zulkifli, Muhammad Husaini Dec. 14, 2022, 2:45 p.m. UTC
From: Tan Tee Min <tee.min.tan@linux.intel.com>

Add support for configuring the max SDU for each Tx queue.
If not specified, keep the default.

Signed-off-by: Tan Tee Min <tee.min.tan@linux.intel.com>
Signed-off-by: Muhammad Husaini Zulkifli <muhammad.husaini.zulkifli@intel.com>
---
 drivers/net/ethernet/intel/igc/igc.h      |  1 +
 drivers/net/ethernet/intel/igc/igc_main.c | 45 +++++++++++++++++++++++
 include/net/pkt_sched.h                   |  1 +
 net/sched/sch_taprio.c                    |  4 +-
 4 files changed, 50 insertions(+), 1 deletion(-)

Comments

Vinicius Costa Gomes Dec. 14, 2022, 5:17 p.m. UTC | #1
Hi,

Muhammad Husaini Zulkifli <muhammad.husaini.zulkifli@intel.com> writes:

> From: Tan Tee Min <tee.min.tan@linux.intel.com>
>
> Add support for configuring the max SDU for each Tx queue.
> If not specified, keep the default.
>
> Signed-off-by: Tan Tee Min <tee.min.tan@linux.intel.com>
> Signed-off-by: Muhammad Husaini Zulkifli <muhammad.husaini.zulkifli@intel.com>
> ---
>  drivers/net/ethernet/intel/igc/igc.h      |  1 +
>  drivers/net/ethernet/intel/igc/igc_main.c | 45 +++++++++++++++++++++++
>  include/net/pkt_sched.h                   |  1 +
>  net/sched/sch_taprio.c                    |  4 +-
>  4 files changed, 50 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
> index 5da8d162cd38..ce9e88687d8c 100644
> --- a/drivers/net/ethernet/intel/igc/igc.h
> +++ b/drivers/net/ethernet/intel/igc/igc.h
> @@ -99,6 +99,7 @@ struct igc_ring {
>  
>  	u32 start_time;
>  	u32 end_time;
> +	u32 max_sdu;
>  
>  	/* CBS parameters */
>  	bool cbs_enable;                /* indicates if CBS is enabled */
> diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
> index e07287e05862..7ce05c31e371 100644
> --- a/drivers/net/ethernet/intel/igc/igc_main.c
> +++ b/drivers/net/ethernet/intel/igc/igc_main.c
> @@ -1508,6 +1508,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
>  	__le32 launch_time = 0;
>  	u32 tx_flags = 0;
>  	unsigned short f;
> +	u32 max_sdu = 0;
>  	ktime_t txtime;
>  	u8 hdr_len = 0;
>  	int tso = 0;
> @@ -1527,6 +1528,16 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
>  		return NETDEV_TX_BUSY;
>  	}
>  
> +	if (tx_ring->max_sdu > 0) {
> +		if (skb_vlan_tagged(skb))
> +			max_sdu = tx_ring->max_sdu + VLAN_HLEN;
> +		else
> +			max_sdu = tx_ring->max_sdu;

perhaps this?
    max_sdu = tx_ring->max_sdu + (skb_vlan_tagged(skb) ? VLAN_HLEN : 0);

Totally optional.

> +
> +		if (skb->len > max_sdu)
> +			goto skb_drop;
> +	}
> +

I don't think the overhead would be measurable for the pkt/s rates that
a 2.5G link can handle. But a test and a note in the commit message
confirming that would be nice.

>  	if (!tx_ring->launchtime_enable)
>  		goto done;
>  
> @@ -1606,6 +1617,12 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
>  	dev_kfree_skb_any(first->skb);
>  	first->skb = NULL;
>  
> +	return NETDEV_TX_OK;
> +
> +skb_drop:
> +	dev_kfree_skb_any(skb);
> +	skb = NULL;
> +
>  	return NETDEV_TX_OK;
>  }
>  
> @@ -6015,6 +6032,7 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
>  
>  		ring->start_time = 0;
>  		ring->end_time = NSEC_PER_SEC;
> +		ring->max_sdu = 0;
>  	}
>  
>  	return 0;
> @@ -6097,6 +6115,15 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
>  		}
>  	}
>  
> +	for (i = 0; i < adapter->num_tx_queues; i++) {
> +		struct igc_ring *ring = adapter->tx_ring[i];
> +
> +		if (qopt->max_frm_len[i] == U32_MAX)
> +			ring->max_sdu = 0;
> +		else
> +			ring->max_sdu = qopt->max_frm_len[i];
> +	}
> +
>  	return 0;
>  }
>  
> @@ -6184,12 +6211,30 @@ static int igc_tsn_enable_cbs(struct igc_adapter *adapter,
>  	return igc_tsn_offload_apply(adapter);
>  }
>  
> +static int igc_tsn_query_caps(struct tc_query_caps_base *base)
> +{
> +	switch (base->type) {
> +	case TC_SETUP_QDISC_TAPRIO: {
> +		struct tc_taprio_caps *caps = base->caps;
> +
> +		caps->supports_queue_max_sdu = true;
> +
> +		return 0;
> +	}
> +	default:
> +		return -EOPNOTSUPP;
> +	}
> +}
> +
>  static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
>  			void *type_data)
>  {
>  	struct igc_adapter *adapter = netdev_priv(dev);
>  
>  	switch (type) {
> +	case TC_QUERY_CAPS:
> +		return igc_tsn_query_caps(type_data);
> +
>  	case TC_SETUP_QDISC_TAPRIO:
>  		return igc_tsn_enable_qbv_scheduling(adapter, type_data);
>  
> diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
> index 38207873eda6..d2539b1f6529 100644
> --- a/include/net/pkt_sched.h
> +++ b/include/net/pkt_sched.h
> @@ -178,6 +178,7 @@ struct tc_taprio_qopt_offload {
>  	u64 cycle_time;
>  	u64 cycle_time_extension;
>  	u32 max_sdu[TC_MAX_QUEUE];
> +	u32 max_frm_len[TC_MAX_QUEUE];
>

'max_frm_len' is an internal taprio optimization, to simplify the code
where the underlying HW doesn't support offload.

For offloading, only 'max_sdu' should be used. Unless you have a strong
reason. If you have that reason, it should be a separate commit.

>  	size_t num_entries;
>  	struct tc_taprio_sched_entry entries[];
> diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
> index 570389f6cdd7..d39164074756 100644
> --- a/net/sched/sch_taprio.c
> +++ b/net/sched/sch_taprio.c
> @@ -1263,8 +1263,10 @@ static int taprio_enable_offload(struct net_device *dev,
>  	offload->enable = 1;
>  	taprio_sched_to_offload(dev, sched, offload);
>  
> -	for (tc = 0; tc < TC_MAX_QUEUE; tc++)
> +	for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
>  		offload->max_sdu[tc] = q->max_sdu[tc];
> +		offload->max_frm_len[tc] = q->max_frm_len[tc];
> +	}
>  
>  	err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
>  	if (err < 0) {
> -- 
> 2.17.1
>
Zulkifli, Muhammad Husaini Dec. 15, 2022, 6:05 a.m. UTC | #2
Hi Vinicius,

> -----Original Message-----
> From: Gomes, Vinicius <vinicius.gomes@intel.com>
> Sent: Thursday, 15 December, 2022 1:17 AM
> To: Zulkifli, Muhammad Husaini <muhammad.husaini.zulkifli@intel.com>;
> intel-wired-lan@osuosl.org
> Cc: tee.min.tan@linux.intel.com; davem@davemloft.net; kuba@kernel.org;
> netdev@vger.kernel.org; Zulkifli, Muhammad Husaini
> <muhammad.husaini.zulkifli@intel.com>; naamax.meir@linux.intel.com;
> Nguyen, Anthony L <anthony.l.nguyen@intel.com>
> Subject: Re: [PATCH net-next v1] igc: offload queue max SDU from tc-taprio
> 
> Hi,
> 
> Muhammad Husaini Zulkifli <muhammad.husaini.zulkifli@intel.com> writes:
> 
> > From: Tan Tee Min <tee.min.tan@linux.intel.com>
> >
> > Add support for configuring the max SDU for each Tx queue.
> > If not specified, keep the default.
> >
> > Signed-off-by: Tan Tee Min <tee.min.tan@linux.intel.com>
> > Signed-off-by: Muhammad Husaini Zulkifli
> > <muhammad.husaini.zulkifli@intel.com>
> > ---
> >  drivers/net/ethernet/intel/igc/igc.h      |  1 +
> >  drivers/net/ethernet/intel/igc/igc_main.c | 45
> +++++++++++++++++++++++
> >  include/net/pkt_sched.h                   |  1 +
> >  net/sched/sch_taprio.c                    |  4 +-
> >  4 files changed, 50 insertions(+), 1 deletion(-)
> >
> > diff --git a/drivers/net/ethernet/intel/igc/igc.h
> > b/drivers/net/ethernet/intel/igc/igc.h
> > index 5da8d162cd38..ce9e88687d8c 100644
> > --- a/drivers/net/ethernet/intel/igc/igc.h
> > +++ b/drivers/net/ethernet/intel/igc/igc.h
> > @@ -99,6 +99,7 @@ struct igc_ring {
> >
> >  	u32 start_time;
> >  	u32 end_time;
> > +	u32 max_sdu;
> >
> >  	/* CBS parameters */
> >  	bool cbs_enable;                /* indicates if CBS is enabled */
> > diff --git a/drivers/net/ethernet/intel/igc/igc_main.c
> > b/drivers/net/ethernet/intel/igc/igc_main.c
> > index e07287e05862..7ce05c31e371 100644
> > --- a/drivers/net/ethernet/intel/igc/igc_main.c
> > +++ b/drivers/net/ethernet/intel/igc/igc_main.c
> > @@ -1508,6 +1508,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct
> sk_buff *skb,
> >  	__le32 launch_time = 0;
> >  	u32 tx_flags = 0;
> >  	unsigned short f;
> > +	u32 max_sdu = 0;
> >  	ktime_t txtime;
> >  	u8 hdr_len = 0;
> >  	int tso = 0;
> > @@ -1527,6 +1528,16 @@ static netdev_tx_t igc_xmit_frame_ring(struct
> sk_buff *skb,
> >  		return NETDEV_TX_BUSY;
> >  	}
> >
> > +	if (tx_ring->max_sdu > 0) {
> > +		if (skb_vlan_tagged(skb))
> > +			max_sdu = tx_ring->max_sdu + VLAN_HLEN;
> > +		else
> > +			max_sdu = tx_ring->max_sdu;
> 
> perhaps this?
>     max_sdu = tx_ring->max_sdu + (skb_vlan_tagged(skb) ? VLAN_HLEN : 0);
> 
> Totally optional.

Sure. We can change to above suggestion.

> 
> > +
> > +		if (skb->len > max_sdu)
> > +			goto skb_drop;
> > +	}
> > +
> 
> I don't think the overhead would be measurable for the pkt/s rates that a
> 2.5G link can handle. But a test and a note in the commit message confirming
> that would be nice.

IMHO, it should not depends on the link speed but the packet size only.
If we detect packet size greater than max_sdu, we will just drop it.

> 
> >  	if (!tx_ring->launchtime_enable)
> >  		goto done;
> >
> > @@ -1606,6 +1617,12 @@ static netdev_tx_t igc_xmit_frame_ring(struct
> sk_buff *skb,
> >  	dev_kfree_skb_any(first->skb);
> >  	first->skb = NULL;
> >
> > +	return NETDEV_TX_OK;
> > +
> > +skb_drop:
> > +	dev_kfree_skb_any(skb);
> > +	skb = NULL;
> > +
> >  	return NETDEV_TX_OK;
> >  }
> >
> > @@ -6015,6 +6032,7 @@ static int igc_tsn_clear_schedule(struct
> > igc_adapter *adapter)
> >
> >  		ring->start_time = 0;
> >  		ring->end_time = NSEC_PER_SEC;
> > +		ring->max_sdu = 0;
> >  	}
> >
> >  	return 0;
> > @@ -6097,6 +6115,15 @@ static int igc_save_qbv_schedule(struct
> igc_adapter *adapter,
> >  		}
> >  	}
> >
> > +	for (i = 0; i < adapter->num_tx_queues; i++) {
> > +		struct igc_ring *ring = adapter->tx_ring[i];
> > +
> > +		if (qopt->max_frm_len[i] == U32_MAX)
> > +			ring->max_sdu = 0;
> > +		else
> > +			ring->max_sdu = qopt->max_frm_len[i];
> > +	}
> > +
> >  	return 0;
> >  }
> >
> > @@ -6184,12 +6211,30 @@ static int igc_tsn_enable_cbs(struct igc_adapter
> *adapter,
> >  	return igc_tsn_offload_apply(adapter);  }
> >
> > +static int igc_tsn_query_caps(struct tc_query_caps_base *base) {
> > +	switch (base->type) {
> > +	case TC_SETUP_QDISC_TAPRIO: {
> > +		struct tc_taprio_caps *caps = base->caps;
> > +
> > +		caps->supports_queue_max_sdu = true;
> > +
> > +		return 0;
> > +	}
> > +	default:
> > +		return -EOPNOTSUPP;
> > +	}
> > +}
> > +
> >  static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
> >  			void *type_data)
> >  {
> >  	struct igc_adapter *adapter = netdev_priv(dev);
> >
> >  	switch (type) {
> > +	case TC_QUERY_CAPS:
> > +		return igc_tsn_query_caps(type_data);
> > +
> >  	case TC_SETUP_QDISC_TAPRIO:
> >  		return igc_tsn_enable_qbv_scheduling(adapter, type_data);
> >
> > diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index
> > 38207873eda6..d2539b1f6529 100644
> > --- a/include/net/pkt_sched.h
> > +++ b/include/net/pkt_sched.h
> > @@ -178,6 +178,7 @@ struct tc_taprio_qopt_offload {
> >  	u64 cycle_time;
> >  	u64 cycle_time_extension;
> >  	u32 max_sdu[TC_MAX_QUEUE];
> > +	u32 max_frm_len[TC_MAX_QUEUE];
> >
> 
> 'max_frm_len' is an internal taprio optimization, to simplify the code where
> the underlying HW doesn't support offload.

The max_sdu only comes with MTU payload size. The reason why we are using 
this max_frm_len is to get the header + MTU size together. 

We can use max_sdu + header in the igc_save_qbv_schedule() and remove 
this piece of code from pkt_sched.h

> 
> For offloading, only 'max_sdu' should be used. Unless you have a strong
> reason. If you have that reason, it should be a separate commit.
> 
> >  	size_t num_entries;
> >  	struct tc_taprio_sched_entry entries[]; diff --git
> > a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index
> > 570389f6cdd7..d39164074756 100644
> > --- a/net/sched/sch_taprio.c
> > +++ b/net/sched/sch_taprio.c
> > @@ -1263,8 +1263,10 @@ static int taprio_enable_offload(struct
> net_device *dev,
> >  	offload->enable = 1;
> >  	taprio_sched_to_offload(dev, sched, offload);
> >
> > -	for (tc = 0; tc < TC_MAX_QUEUE; tc++)
> > +	for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
> >  		offload->max_sdu[tc] = q->max_sdu[tc];
> > +		offload->max_frm_len[tc] = q->max_frm_len[tc];
> > +	}
> >
> >  	err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
> >  	if (err < 0) {
> > --
> > 2.17.1
> >
> 
> --
> Vinicius
Vinicius Costa Gomes Dec. 15, 2022, 4:04 p.m. UTC | #3
Hi Husaini,

"Zulkifli, Muhammad Husaini" <muhammad.husaini.zulkifli@intel.com>
writes:

> Hi Vinicius,
>
>> -----Original Message-----
>> From: Gomes, Vinicius <vinicius.gomes@intel.com>
>> Sent: Thursday, 15 December, 2022 1:17 AM
>> To: Zulkifli, Muhammad Husaini <muhammad.husaini.zulkifli@intel.com>;
>> intel-wired-lan@osuosl.org
>> Cc: tee.min.tan@linux.intel.com; davem@davemloft.net; kuba@kernel.org;
>> netdev@vger.kernel.org; Zulkifli, Muhammad Husaini
>> <muhammad.husaini.zulkifli@intel.com>; naamax.meir@linux.intel.com;
>> Nguyen, Anthony L <anthony.l.nguyen@intel.com>
>> Subject: Re: [PATCH net-next v1] igc: offload queue max SDU from tc-taprio
>> 
>> Hi,
>> 
>> Muhammad Husaini Zulkifli <muhammad.husaini.zulkifli@intel.com> writes:
>> 
>> > From: Tan Tee Min <tee.min.tan@linux.intel.com>
>> >
>> > Add support for configuring the max SDU for each Tx queue.
>> > If not specified, keep the default.
>> >
>> > Signed-off-by: Tan Tee Min <tee.min.tan@linux.intel.com>
>> > Signed-off-by: Muhammad Husaini Zulkifli
>> > <muhammad.husaini.zulkifli@intel.com>
>> > ---
>> >  drivers/net/ethernet/intel/igc/igc.h      |  1 +
>> >  drivers/net/ethernet/intel/igc/igc_main.c | 45
>> +++++++++++++++++++++++
>> >  include/net/pkt_sched.h                   |  1 +
>> >  net/sched/sch_taprio.c                    |  4 +-
>> >  4 files changed, 50 insertions(+), 1 deletion(-)
>> >
>> > diff --git a/drivers/net/ethernet/intel/igc/igc.h
>> > b/drivers/net/ethernet/intel/igc/igc.h
>> > index 5da8d162cd38..ce9e88687d8c 100644
>> > --- a/drivers/net/ethernet/intel/igc/igc.h
>> > +++ b/drivers/net/ethernet/intel/igc/igc.h
>> > @@ -99,6 +99,7 @@ struct igc_ring {
>> >
>> >  	u32 start_time;
>> >  	u32 end_time;
>> > +	u32 max_sdu;
>> >
>> >  	/* CBS parameters */
>> >  	bool cbs_enable;                /* indicates if CBS is enabled */
>> > diff --git a/drivers/net/ethernet/intel/igc/igc_main.c
>> > b/drivers/net/ethernet/intel/igc/igc_main.c
>> > index e07287e05862..7ce05c31e371 100644
>> > --- a/drivers/net/ethernet/intel/igc/igc_main.c
>> > +++ b/drivers/net/ethernet/intel/igc/igc_main.c
>> > @@ -1508,6 +1508,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct
>> sk_buff *skb,
>> >  	__le32 launch_time = 0;
>> >  	u32 tx_flags = 0;
>> >  	unsigned short f;
>> > +	u32 max_sdu = 0;
>> >  	ktime_t txtime;
>> >  	u8 hdr_len = 0;
>> >  	int tso = 0;
>> > @@ -1527,6 +1528,16 @@ static netdev_tx_t igc_xmit_frame_ring(struct
>> sk_buff *skb,
>> >  		return NETDEV_TX_BUSY;
>> >  	}
>> >
>> > +	if (tx_ring->max_sdu > 0) {
>> > +		if (skb_vlan_tagged(skb))
>> > +			max_sdu = tx_ring->max_sdu + VLAN_HLEN;
>> > +		else
>> > +			max_sdu = tx_ring->max_sdu;
>> 
>> perhaps this?
>>     max_sdu = tx_ring->max_sdu + (skb_vlan_tagged(skb) ? VLAN_HLEN : 0);
>> 
>> Totally optional.
>
> Sure. We can change to above suggestion.
>
>> 
>> > +
>> > +		if (skb->len > max_sdu)
>> > +			goto skb_drop;
>> > +	}
>> > +
>> 
>> I don't think the overhead would be measurable for the pkt/s rates that a
>> 2.5G link can handle. But a test and a note in the commit message confirming
>> that would be nice.
>
> IMHO, it should not depends on the link speed but the packet size only.
> If we detect packet size greater than max_sdu, we will just drop it.
>

I was thinking more about the added conditional on the hot path, if it
had some performance impact for the case when packets are not dropped. I
really don't think there will be any, but it's nice to have some numbers
to confirm that.

>> 
>> >  	if (!tx_ring->launchtime_enable)
>> >  		goto done;
>> >
>> > @@ -1606,6 +1617,12 @@ static netdev_tx_t igc_xmit_frame_ring(struct
>> sk_buff *skb,
>> >  	dev_kfree_skb_any(first->skb);
>> >  	first->skb = NULL;
>> >
>> > +	return NETDEV_TX_OK;
>> > +
>> > +skb_drop:
>> > +	dev_kfree_skb_any(skb);
>> > +	skb = NULL;
>> > +
>> >  	return NETDEV_TX_OK;
>> >  }
>> >
>> > @@ -6015,6 +6032,7 @@ static int igc_tsn_clear_schedule(struct
>> > igc_adapter *adapter)
>> >
>> >  		ring->start_time = 0;
>> >  		ring->end_time = NSEC_PER_SEC;
>> > +		ring->max_sdu = 0;
>> >  	}
>> >
>> >  	return 0;
>> > @@ -6097,6 +6115,15 @@ static int igc_save_qbv_schedule(struct
>> igc_adapter *adapter,
>> >  		}
>> >  	}
>> >
>> > +	for (i = 0; i < adapter->num_tx_queues; i++) {
>> > +		struct igc_ring *ring = adapter->tx_ring[i];
>> > +
>> > +		if (qopt->max_frm_len[i] == U32_MAX)
>> > +			ring->max_sdu = 0;
>> > +		else
>> > +			ring->max_sdu = qopt->max_frm_len[i];
>> > +	}
>> > +
>> >  	return 0;
>> >  }
>> >
>> > @@ -6184,12 +6211,30 @@ static int igc_tsn_enable_cbs(struct igc_adapter
>> *adapter,
>> >  	return igc_tsn_offload_apply(adapter);  }
>> >
>> > +static int igc_tsn_query_caps(struct tc_query_caps_base *base) {
>> > +	switch (base->type) {
>> > +	case TC_SETUP_QDISC_TAPRIO: {
>> > +		struct tc_taprio_caps *caps = base->caps;
>> > +
>> > +		caps->supports_queue_max_sdu = true;
>> > +
>> > +		return 0;
>> > +	}
>> > +	default:
>> > +		return -EOPNOTSUPP;
>> > +	}
>> > +}
>> > +
>> >  static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
>> >  			void *type_data)
>> >  {
>> >  	struct igc_adapter *adapter = netdev_priv(dev);
>> >
>> >  	switch (type) {
>> > +	case TC_QUERY_CAPS:
>> > +		return igc_tsn_query_caps(type_data);
>> > +
>> >  	case TC_SETUP_QDISC_TAPRIO:
>> >  		return igc_tsn_enable_qbv_scheduling(adapter, type_data);
>> >
>> > diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index
>> > 38207873eda6..d2539b1f6529 100644
>> > --- a/include/net/pkt_sched.h
>> > +++ b/include/net/pkt_sched.h
>> > @@ -178,6 +178,7 @@ struct tc_taprio_qopt_offload {
>> >  	u64 cycle_time;
>> >  	u64 cycle_time_extension;
>> >  	u32 max_sdu[TC_MAX_QUEUE];
>> > +	u32 max_frm_len[TC_MAX_QUEUE];
>> >
>> 
>> 'max_frm_len' is an internal taprio optimization, to simplify the code where
>> the underlying HW doesn't support offload.
>
> The max_sdu only comes with MTU payload size. The reason why we are using 
> this max_frm_len is to get the header + MTU size together. 
>
> We can use max_sdu + header in the igc_save_qbv_schedule() and remove 
> this piece of code from pkt_sched.h

This sounds better, only exposing max_sdu to the drivers, even if it
causes a bit of duplicated code.

>
>> 
>> For offloading, only 'max_sdu' should be used. Unless you have a strong
>> reason. If you have that reason, it should be a separate commit.
>> 
>> >  	size_t num_entries;
>> >  	struct tc_taprio_sched_entry entries[]; diff --git
>> > a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index
>> > 570389f6cdd7..d39164074756 100644
>> > --- a/net/sched/sch_taprio.c
>> > +++ b/net/sched/sch_taprio.c
>> > @@ -1263,8 +1263,10 @@ static int taprio_enable_offload(struct
>> net_device *dev,
>> >  	offload->enable = 1;
>> >  	taprio_sched_to_offload(dev, sched, offload);
>> >
>> > -	for (tc = 0; tc < TC_MAX_QUEUE; tc++)
>> > +	for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
>> >  		offload->max_sdu[tc] = q->max_sdu[tc];
>> > +		offload->max_frm_len[tc] = q->max_frm_len[tc];
>> > +	}
>> >
>> >  	err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
>> >  	if (err < 0) {
>> > --
>> > 2.17.1
>> >
>> 
>> --
>> Vinicius
naamax.meir Jan. 1, 2023, 11:13 a.m. UTC | #4
On 12/14/2022 16:45, Muhammad Husaini Zulkifli wrote:
> From: Tan Tee Min <tee.min.tan@linux.intel.com>
> 
> Add support for configuring the max SDU for each Tx queue.
> If not specified, keep the default.
> 
> Signed-off-by: Tan Tee Min <tee.min.tan@linux.intel.com>
> Signed-off-by: Muhammad Husaini Zulkifli <muhammad.husaini.zulkifli@intel.com>
> ---
>   drivers/net/ethernet/intel/igc/igc.h      |  1 +
>   drivers/net/ethernet/intel/igc/igc_main.c | 45 +++++++++++++++++++++++
>   include/net/pkt_sched.h                   |  1 +
>   net/sched/sch_taprio.c                    |  4 +-
>   4 files changed, 50 insertions(+), 1 deletion(-)
Tested-by: Naama Meir <naamax.meir@linux.intel.com>
diff mbox series

Patch

diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 5da8d162cd38..ce9e88687d8c 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -99,6 +99,7 @@  struct igc_ring {
 
 	u32 start_time;
 	u32 end_time;
+	u32 max_sdu;
 
 	/* CBS parameters */
 	bool cbs_enable;                /* indicates if CBS is enabled */
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index e07287e05862..7ce05c31e371 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -1508,6 +1508,7 @@  static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
 	__le32 launch_time = 0;
 	u32 tx_flags = 0;
 	unsigned short f;
+	u32 max_sdu = 0;
 	ktime_t txtime;
 	u8 hdr_len = 0;
 	int tso = 0;
@@ -1527,6 +1528,16 @@  static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
 		return NETDEV_TX_BUSY;
 	}
 
+	if (tx_ring->max_sdu > 0) {
+		if (skb_vlan_tagged(skb))
+			max_sdu = tx_ring->max_sdu + VLAN_HLEN;
+		else
+			max_sdu = tx_ring->max_sdu;
+
+		if (skb->len > max_sdu)
+			goto skb_drop;
+	}
+
 	if (!tx_ring->launchtime_enable)
 		goto done;
 
@@ -1606,6 +1617,12 @@  static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
 	dev_kfree_skb_any(first->skb);
 	first->skb = NULL;
 
+	return NETDEV_TX_OK;
+
+skb_drop:
+	dev_kfree_skb_any(skb);
+	skb = NULL;
+
 	return NETDEV_TX_OK;
 }
 
@@ -6015,6 +6032,7 @@  static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
 
 		ring->start_time = 0;
 		ring->end_time = NSEC_PER_SEC;
+		ring->max_sdu = 0;
 	}
 
 	return 0;
@@ -6097,6 +6115,15 @@  static int igc_save_qbv_schedule(struct igc_adapter *adapter,
 		}
 	}
 
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct igc_ring *ring = adapter->tx_ring[i];
+
+		if (qopt->max_frm_len[i] == U32_MAX)
+			ring->max_sdu = 0;
+		else
+			ring->max_sdu = qopt->max_frm_len[i];
+	}
+
 	return 0;
 }
 
@@ -6184,12 +6211,30 @@  static int igc_tsn_enable_cbs(struct igc_adapter *adapter,
 	return igc_tsn_offload_apply(adapter);
 }
 
+static int igc_tsn_query_caps(struct tc_query_caps_base *base)
+{
+	switch (base->type) {
+	case TC_SETUP_QDISC_TAPRIO: {
+		struct tc_taprio_caps *caps = base->caps;
+
+		caps->supports_queue_max_sdu = true;
+
+		return 0;
+	}
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
 static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
 			void *type_data)
 {
 	struct igc_adapter *adapter = netdev_priv(dev);
 
 	switch (type) {
+	case TC_QUERY_CAPS:
+		return igc_tsn_query_caps(type_data);
+
 	case TC_SETUP_QDISC_TAPRIO:
 		return igc_tsn_enable_qbv_scheduling(adapter, type_data);
 
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 38207873eda6..d2539b1f6529 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -178,6 +178,7 @@  struct tc_taprio_qopt_offload {
 	u64 cycle_time;
 	u64 cycle_time_extension;
 	u32 max_sdu[TC_MAX_QUEUE];
+	u32 max_frm_len[TC_MAX_QUEUE];
 
 	size_t num_entries;
 	struct tc_taprio_sched_entry entries[];
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 570389f6cdd7..d39164074756 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -1263,8 +1263,10 @@  static int taprio_enable_offload(struct net_device *dev,
 	offload->enable = 1;
 	taprio_sched_to_offload(dev, sched, offload);
 
-	for (tc = 0; tc < TC_MAX_QUEUE; tc++)
+	for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
 		offload->max_sdu[tc] = q->max_sdu[tc];
+		offload->max_frm_len[tc] = q->max_frm_len[tc];
+	}
 
 	err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
 	if (err < 0) {