Message ID | 20240808183556.386397-2-anthony.l.nguyen@intel.com (mailing list archive) |
---|---|
State | Changes Requested |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | igb: Add support for AF_XDP zero-copy | expand |
On Thu, Aug 08, 2024 at 11:35:51AM -0700, Tony Nguyen wrote: > From: Sriram Yagnaraman <sriram.yagnaraman@est.tech> > > Always call igb_xdp_ring_update_tail under __netif_tx_lock, add a > comment to indicate that. This is needed to share the same TX ring > between XDP, XSK and slow paths. standalone commit > > Remove static qualifiers on the following functions to be able to call > from XSK specific file that is added in the later patches > - igb_xdp_tx_queue_mapping > - igb_xdp_ring_update_tail > - igb_clean_tx_ring > - igb_clean_rx_ring > - igb_run_xdp > - igb_process_skb_fields ditto > > Introduce igb_xdp_is_enabled() to check if an XDP program is assigned to > the device. ditto > > Signed-off-by: Sriram Yagnaraman <sriram.yagnaraman@est.tech> > Signed-off-by: Kurt Kanzenbach <kurt@linutronix.de> > Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> (A Contingent Worker at Intel) > Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> > --- > drivers/net/ethernet/intel/igb/igb.h | 15 ++++++++++++ > drivers/net/ethernet/intel/igb/igb_main.c | 29 +++++++++++------------ > 2 files changed, 29 insertions(+), 15 deletions(-) > > diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h > index 3c2dc7bdebb5..0de71ec324ed 100644 > --- a/drivers/net/ethernet/intel/igb/igb.h > +++ b/drivers/net/ethernet/intel/igb/igb.h > @@ -718,6 +718,8 @@ extern char igb_driver_name[]; > int igb_xmit_xdp_ring(struct igb_adapter *adapter, > struct igb_ring *ring, > struct xdp_frame *xdpf); > +struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter); > +void igb_xdp_ring_update_tail(struct igb_ring *ring); > int igb_open(struct net_device *netdev); > int igb_close(struct net_device *netdev); > int igb_up(struct igb_adapter *); > @@ -731,12 +733,20 @@ int igb_setup_tx_resources(struct igb_ring *); > int igb_setup_rx_resources(struct igb_ring *); > void igb_free_tx_resources(struct igb_ring *); > void igb_free_rx_resources(struct igb_ring *); > +void igb_clean_tx_ring(struct igb_ring *tx_ring); > +void igb_clean_rx_ring(struct igb_ring *rx_ring); > void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); > void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); > void igb_setup_tctl(struct igb_adapter *); > void igb_setup_rctl(struct igb_adapter *); > void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *); > netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); > +struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, > + struct igb_ring *rx_ring, > + struct xdp_buff *xdp); > +void igb_process_skb_fields(struct igb_ring *rx_ring, > + union e1000_adv_rx_desc *rx_desc, > + struct sk_buff *skb); > void igb_alloc_rx_buffers(struct igb_ring *, u16); > void igb_update_stats(struct igb_adapter *); > bool igb_has_link(struct igb_adapter *adapter); > @@ -797,6 +807,11 @@ static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) > return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); > } > > +static inline bool igb_xdp_is_enabled(struct igb_adapter *adapter) > +{ > + return !!adapter->xdp_prog; READ_ONCE() plus use this everywhere else where prog is read. > +} > + > int igb_add_filter(struct igb_adapter *adapter, > struct igb_nfc_filter *input); > int igb_erase_filter(struct igb_adapter *adapter, > diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c > index 11be39f435f3..bdb7637559b8 100644 > --- a/drivers/net/ethernet/intel/igb/igb_main.c > +++ b/drivers/net/ethernet/intel/igb/igb_main.c > @@ -115,8 +115,6 @@ static void igb_configure_tx(struct igb_adapter *); > static void igb_configure_rx(struct igb_adapter *); > static void igb_clean_all_tx_rings(struct igb_adapter *); > static void igb_clean_all_rx_rings(struct igb_adapter *); > -static void igb_clean_tx_ring(struct igb_ring *); > -static void igb_clean_rx_ring(struct igb_ring *); > static void igb_set_rx_mode(struct net_device *); > static void igb_update_phy_info(struct timer_list *); > static void igb_watchdog(struct timer_list *); > @@ -2914,7 +2912,8 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp) > } > } > > -static void igb_xdp_ring_update_tail(struct igb_ring *ring) > +/* This function assumes __netif_tx_lock is held by the caller. */ > +void igb_xdp_ring_update_tail(struct igb_ring *ring) > { > /* Force memory writes to complete before letting h/w know there > * are new descriptors to fetch. > @@ -2923,7 +2922,7 @@ static void igb_xdp_ring_update_tail(struct igb_ring *ring) > writel(ring->next_to_use, ring->tail); > } > > -static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter) > +struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter) > { > unsigned int r_idx = smp_processor_id(); > > @@ -3000,11 +2999,11 @@ static int igb_xdp_xmit(struct net_device *dev, int n, > nxmit++; > } > > - __netif_tx_unlock(nq); > - > if (unlikely(flags & XDP_XMIT_FLUSH)) > igb_xdp_ring_update_tail(tx_ring); > > + __netif_tx_unlock(nq); > + > return nxmit; > } > > @@ -4879,7 +4878,7 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter) > * igb_clean_tx_ring - Free Tx Buffers > * @tx_ring: ring to be cleaned > **/ > -static void igb_clean_tx_ring(struct igb_ring *tx_ring) > +void igb_clean_tx_ring(struct igb_ring *tx_ring) > { > u16 i = tx_ring->next_to_clean; > struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; > @@ -4998,7 +4997,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter) > * igb_clean_rx_ring - Free Rx Buffers per Queue > * @rx_ring: ring to free buffers from > **/ > -static void igb_clean_rx_ring(struct igb_ring *rx_ring) > +void igb_clean_rx_ring(struct igb_ring *rx_ring) > { > u16 i = rx_ring->next_to_clean; > > @@ -6613,7 +6612,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) > struct igb_adapter *adapter = netdev_priv(netdev); > int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD; > > - if (adapter->xdp_prog) { > + if (igb_xdp_is_enabled(adapter)) { > int i; > > for (i = 0; i < adapter->num_rx_queues; i++) { > @@ -8569,9 +8568,9 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, > return skb; > } > > -static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, > - struct igb_ring *rx_ring, > - struct xdp_buff *xdp) > +struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, > + struct igb_ring *rx_ring, > + struct xdp_buff *xdp) > { > int err, result = IGB_XDP_PASS; > struct bpf_prog *xdp_prog; > @@ -8767,9 +8766,9 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring, > * order to populate the hash, checksum, VLAN, timestamp, protocol, and > * other fields within the skb. > **/ > -static void igb_process_skb_fields(struct igb_ring *rx_ring, > - union e1000_adv_rx_desc *rx_desc, > - struct sk_buff *skb) > +void igb_process_skb_fields(struct igb_ring *rx_ring, > + union e1000_adv_rx_desc *rx_desc, > + struct sk_buff *skb) > { > struct net_device *dev = rx_ring->netdev; > > -- > 2.42.0 > >
On Thu Aug 08 2024, Maciej Fijalkowski wrote: > On Thu, Aug 08, 2024 at 11:35:51AM -0700, Tony Nguyen wrote: >> From: Sriram Yagnaraman <sriram.yagnaraman@est.tech> >> >> Always call igb_xdp_ring_update_tail under __netif_tx_lock, add a >> comment to indicate that. This is needed to share the same TX ring >> between XDP, XSK and slow paths. > > standalone commit Ok. >> +static inline bool igb_xdp_is_enabled(struct igb_adapter *adapter) >> +{ >> + return !!adapter->xdp_prog; > > READ_ONCE() plus use this everywhere else where prog is read. Sure. I'll send v6 to iwl then. Thanks, Kurt
> > On Thu Aug 08 2024, Maciej Fijalkowski wrote: > > On Thu, Aug 08, 2024 at 11:35:51AM -0700, Tony Nguyen wrote: > >> From: Sriram Yagnaraman <sriram.yagnaraman@est.tech> > >> > >> Always call igb_xdp_ring_update_tail under __netif_tx_lock, add a > >> comment to indicate that. This is needed to share the same TX ring > >> between XDP, XSK and slow paths. > > > > standalone commit > > Ok. > > >> +static inline bool igb_xdp_is_enabled(struct igb_adapter *adapter) > >> +{ > >> + return !!adapter->xdp_prog; > > > > READ_ONCE() plus use this everywhere else where prog is read. > > Sure. I'll send v6 to iwl then. I'm in the middle of going through rest of the set, will finish today. > > Thanks, > Kurt
On Fri Aug 09 2024, Fijalkowski, Maciej wrote: >> >> +static inline bool igb_xdp_is_enabled(struct igb_adapter *adapter) >> >> +{ >> >> + return !!adapter->xdp_prog; >> > >> > READ_ONCE() plus use this everywhere else where prog is read. >> >> Sure. I'll send v6 to iwl then. > > I'm in the middle of going through rest of the set, will finish today. Perfect, thanks!
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 3c2dc7bdebb5..0de71ec324ed 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -718,6 +718,8 @@ extern char igb_driver_name[]; int igb_xmit_xdp_ring(struct igb_adapter *adapter, struct igb_ring *ring, struct xdp_frame *xdpf); +struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter); +void igb_xdp_ring_update_tail(struct igb_ring *ring); int igb_open(struct net_device *netdev); int igb_close(struct net_device *netdev); int igb_up(struct igb_adapter *); @@ -731,12 +733,20 @@ int igb_setup_tx_resources(struct igb_ring *); int igb_setup_rx_resources(struct igb_ring *); void igb_free_tx_resources(struct igb_ring *); void igb_free_rx_resources(struct igb_ring *); +void igb_clean_tx_ring(struct igb_ring *tx_ring); +void igb_clean_rx_ring(struct igb_ring *rx_ring); void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); void igb_setup_tctl(struct igb_adapter *); void igb_setup_rctl(struct igb_adapter *); void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *); netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); +struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, + struct igb_ring *rx_ring, + struct xdp_buff *xdp); +void igb_process_skb_fields(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb); void igb_alloc_rx_buffers(struct igb_ring *, u16); void igb_update_stats(struct igb_adapter *); bool igb_has_link(struct igb_adapter *adapter); @@ -797,6 +807,11 @@ static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); } +static inline bool igb_xdp_is_enabled(struct igb_adapter *adapter) +{ + return !!adapter->xdp_prog; +} + int igb_add_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input); int igb_erase_filter(struct igb_adapter *adapter, diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 11be39f435f3..bdb7637559b8 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -115,8 +115,6 @@ static void igb_configure_tx(struct igb_adapter *); static void igb_configure_rx(struct igb_adapter *); static void igb_clean_all_tx_rings(struct igb_adapter *); static void igb_clean_all_rx_rings(struct igb_adapter *); -static void igb_clean_tx_ring(struct igb_ring *); -static void igb_clean_rx_ring(struct igb_ring *); static void igb_set_rx_mode(struct net_device *); static void igb_update_phy_info(struct timer_list *); static void igb_watchdog(struct timer_list *); @@ -2914,7 +2912,8 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp) } } -static void igb_xdp_ring_update_tail(struct igb_ring *ring) +/* This function assumes __netif_tx_lock is held by the caller. */ +void igb_xdp_ring_update_tail(struct igb_ring *ring) { /* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. @@ -2923,7 +2922,7 @@ static void igb_xdp_ring_update_tail(struct igb_ring *ring) writel(ring->next_to_use, ring->tail); } -static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter) +struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter) { unsigned int r_idx = smp_processor_id(); @@ -3000,11 +2999,11 @@ static int igb_xdp_xmit(struct net_device *dev, int n, nxmit++; } - __netif_tx_unlock(nq); - if (unlikely(flags & XDP_XMIT_FLUSH)) igb_xdp_ring_update_tail(tx_ring); + __netif_tx_unlock(nq); + return nxmit; } @@ -4879,7 +4878,7 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter) * igb_clean_tx_ring - Free Tx Buffers * @tx_ring: ring to be cleaned **/ -static void igb_clean_tx_ring(struct igb_ring *tx_ring) +void igb_clean_tx_ring(struct igb_ring *tx_ring) { u16 i = tx_ring->next_to_clean; struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; @@ -4998,7 +4997,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter) * igb_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: ring to free buffers from **/ -static void igb_clean_rx_ring(struct igb_ring *rx_ring) +void igb_clean_rx_ring(struct igb_ring *rx_ring) { u16 i = rx_ring->next_to_clean; @@ -6613,7 +6612,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu) struct igb_adapter *adapter = netdev_priv(netdev); int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD; - if (adapter->xdp_prog) { + if (igb_xdp_is_enabled(adapter)) { int i; for (i = 0; i < adapter->num_rx_queues; i++) { @@ -8569,9 +8568,9 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, return skb; } -static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, - struct igb_ring *rx_ring, - struct xdp_buff *xdp) +struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, + struct igb_ring *rx_ring, + struct xdp_buff *xdp) { int err, result = IGB_XDP_PASS; struct bpf_prog *xdp_prog; @@ -8767,9 +8766,9 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring, * order to populate the hash, checksum, VLAN, timestamp, protocol, and * other fields within the skb. **/ -static void igb_process_skb_fields(struct igb_ring *rx_ring, - union e1000_adv_rx_desc *rx_desc, - struct sk_buff *skb) +void igb_process_skb_fields(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) { struct net_device *dev = rx_ring->netdev;