Message ID | 20250305162132.1106080-8-aleksander.lobakin@intel.com (mailing list archive) |
---|---|
State | Changes Requested |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | idpf: add XDP support | expand |
On Wed, Mar 5, 2025 at 5:22 PM Alexander Lobakin <aleksander.lobakin@intel.com> wrote: > > Add the missing linking of NAPIs to netdev queues when enabling > interrupt vectors in order to support NAPI configuration and > interfaces requiring get_rx_queue()->napi to be set (like XSk > busy polling). > > Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> > --- > drivers/net/ethernet/intel/idpf/idpf_txrx.c | 30 +++++++++++++++++++++ > 1 file changed, 30 insertions(+) > > diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c > index 2f221c0abad8..a3f6e8cff7a0 100644 > --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c > +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c > @@ -3560,8 +3560,11 @@ void idpf_vport_intr_rel(struct idpf_vport *vport) > static void idpf_vport_intr_rel_irq(struct idpf_vport *vport) > { > struct idpf_adapter *adapter = vport->adapter; > + bool unlock; > int vector; > > + unlock = rtnl_trylock(); This is probably not what you want here ? If another thread is holding RTNL, then rtnl_ttrylock() will not add any protection.
On Wed, Mar 05, 2025 at 05:21:23PM +0100, Alexander Lobakin wrote: > Add the missing linking of NAPIs to netdev queues when enabling > interrupt vectors in order to support NAPI configuration and > interfaces requiring get_rx_queue()->napi to be set (like XSk > busy polling). > > Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> > --- > drivers/net/ethernet/intel/idpf/idpf_txrx.c | 30 +++++++++++++++++++++ > 1 file changed, 30 insertions(+) > > diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c > index 2f221c0abad8..a3f6e8cff7a0 100644 > --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c > +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c > @@ -3560,8 +3560,11 @@ void idpf_vport_intr_rel(struct idpf_vport *vport) > static void idpf_vport_intr_rel_irq(struct idpf_vport *vport) > { > struct idpf_adapter *adapter = vport->adapter; > + bool unlock; > int vector; > > + unlock = rtnl_trylock(); > + > for (vector = 0; vector < vport->num_q_vectors; vector++) { > struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; > int irq_num, vidx; > @@ -3573,8 +3576,23 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport) > vidx = vport->q_vector_idxs[vector]; > irq_num = adapter->msix_entries[vidx].vector; > > + for (u32 i = 0; i < q_vector->num_rxq; i++) > + netif_queue_set_napi(vport->netdev, > + q_vector->rx[i]->idx, > + NETDEV_QUEUE_TYPE_RX, > + NULL); > + > + for (u32 i = 0; i < q_vector->num_txq; i++) > + netif_queue_set_napi(vport->netdev, > + q_vector->tx[i]->idx, > + NETDEV_QUEUE_TYPE_TX, > + NULL); > + maybe we could have a wrapper for this? static void idpf_q_set_napi(struct net_device *netdev, struct idpf_q_vector *q_vector, enum netdev_queue_type q_type, struct napi_struct *napi) { u32 q_cnt = q_type == NETDEV_QUEUE_TYPE_RX ? q_vector->num_rxq : q_vector->num_txq; struct idpf_rx_queue **qs = q_type == NETDEV_QUEUE_TYPE_RX ? q_vector->rx : q_vector->tx; for (u32 i = 0; i < q_cnt; i++) netif_queue_set_napi(netdev, qs[i]->idx, q_type, napi); } idpf_q_set_napi(vport->netdev, q_vector, NETDEV_QUEUE_TYPE_RX, NULL); idpf_q_set_napi(vport->netdev, q_vector, NETDEV_QUEUE_TYPE_TX, NULL); ... idpf_q_set_napi(vport->netdev, q_vector, NETDEV_QUEUE_TYPE_RX, &q_vector->napi); idpf_q_set_napi(vport->netdev, q_vector, NETDEV_QUEUE_TYPE_TX, &q_vector->napi); up to you if you take it, less lines in the end but i don't have strong opinion if this should be considered as an improvement or makes code harder to follow. > kfree(free_irq(irq_num, q_vector)); > } > + > + if (unlock) > + rtnl_unlock(); > } > > /** > @@ -3760,6 +3778,18 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport) > "Request_irq failed, error: %d\n", err); > goto free_q_irqs; > } > + > + for (u32 i = 0; i < q_vector->num_rxq; i++) > + netif_queue_set_napi(vport->netdev, > + q_vector->rx[i]->idx, > + NETDEV_QUEUE_TYPE_RX, > + &q_vector->napi); > + > + for (u32 i = 0; i < q_vector->num_txq; i++) > + netif_queue_set_napi(vport->netdev, > + q_vector->tx[i]->idx, > + NETDEV_QUEUE_TYPE_TX, > + &q_vector->napi); > } > > return 0; > -- > 2.48.1 >
From: Eric Dumazet <edumazet@google.com> Date: Fri, 7 Mar 2025 11:28:36 +0100 > On Wed, Mar 5, 2025 at 5:22 PM Alexander Lobakin > <aleksander.lobakin@intel.com> wrote: >> >> Add the missing linking of NAPIs to netdev queues when enabling >> interrupt vectors in order to support NAPI configuration and >> interfaces requiring get_rx_queue()->napi to be set (like XSk >> busy polling). >> >> Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> >> --- >> drivers/net/ethernet/intel/idpf/idpf_txrx.c | 30 +++++++++++++++++++++ >> 1 file changed, 30 insertions(+) >> >> diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c >> index 2f221c0abad8..a3f6e8cff7a0 100644 >> --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c >> +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c >> @@ -3560,8 +3560,11 @@ void idpf_vport_intr_rel(struct idpf_vport *vport) >> static void idpf_vport_intr_rel_irq(struct idpf_vport *vport) >> { >> struct idpf_adapter *adapter = vport->adapter; >> + bool unlock; >> int vector; >> >> + unlock = rtnl_trylock(); > > This is probably not what you want here ? > > If another thread is holding RTNL, then rtnl_ttrylock() will not add > any protection. Yep I know. trylock() is because this function can be called in two scenarios: 1) .ndo_close(), when RTNL is already locked; 2) "soft reset" aka "stop the traffic, reallocate the queues, start the traffic", when RTNL is not taken. The second one spits a WARN without the RTNL being locked. So this trylock() will do nothing for the first scenario and will take the lock for the second one. If that is not correct, let me know, I'll do it a different way (maybe it's better to unconditionally take the lock on the callsite for the second case?). Thanks, Olek
From: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Date: Fri, 7 Mar 2025 11:51:18 +0100 > On Wed, Mar 05, 2025 at 05:21:23PM +0100, Alexander Lobakin wrote: >> Add the missing linking of NAPIs to netdev queues when enabling >> interrupt vectors in order to support NAPI configuration and >> interfaces requiring get_rx_queue()->napi to be set (like XSk >> busy polling). >> >> Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> >> --- >> drivers/net/ethernet/intel/idpf/idpf_txrx.c | 30 +++++++++++++++++++++ >> 1 file changed, 30 insertions(+) >> >> diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c >> index 2f221c0abad8..a3f6e8cff7a0 100644 >> --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c >> +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c >> @@ -3560,8 +3560,11 @@ void idpf_vport_intr_rel(struct idpf_vport *vport) >> static void idpf_vport_intr_rel_irq(struct idpf_vport *vport) >> { >> struct idpf_adapter *adapter = vport->adapter; >> + bool unlock; >> int vector; >> >> + unlock = rtnl_trylock(); >> + >> for (vector = 0; vector < vport->num_q_vectors; vector++) { >> struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; >> int irq_num, vidx; >> @@ -3573,8 +3576,23 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport) >> vidx = vport->q_vector_idxs[vector]; >> irq_num = adapter->msix_entries[vidx].vector; >> >> + for (u32 i = 0; i < q_vector->num_rxq; i++) >> + netif_queue_set_napi(vport->netdev, >> + q_vector->rx[i]->idx, >> + NETDEV_QUEUE_TYPE_RX, >> + NULL); >> + >> + for (u32 i = 0; i < q_vector->num_txq; i++) >> + netif_queue_set_napi(vport->netdev, >> + q_vector->tx[i]->idx, >> + NETDEV_QUEUE_TYPE_TX, >> + NULL); >> + > > maybe we could have a wrapper for this? > > static void idpf_q_set_napi(struct net_device *netdev, > struct idpf_q_vector *q_vector, > enum netdev_queue_type q_type, > struct napi_struct *napi) > { > u32 q_cnt = q_type == NETDEV_QUEUE_TYPE_RX ? q_vector->num_rxq : > q_vector->num_txq; > struct idpf_rx_queue **qs = q_type == NETDEV_QUEUE_TYPE_RX ? > q_vector->rx : q_vector->tx; > > for (u32 i = 0; i < q_cnt; i++) > netif_queue_set_napi(netdev, qs[i]->idx, q_type, napi); > } > > idpf_q_set_napi(vport->netdev, q_vector, NETDEV_QUEUE_TYPE_RX, NULL); > idpf_q_set_napi(vport->netdev, q_vector, NETDEV_QUEUE_TYPE_TX, NULL); > ... > idpf_q_set_napi(vport->netdev, q_vector, NETDEV_QUEUE_TYPE_RX, &q_vector->napi); > idpf_q_set_napi(vport->netdev, q_vector, NETDEV_QUEUE_TYPE_TX, &q_vector->napi); > > > up to you if you take it, less lines in the end but i don't have strong > opinion if this should be considered as an improvement or makes code > harder to follow. No no, it's actually a good idea. Previously, it looked different, but then this stuff with the CPU affinity embedded into the NAPI config got merged and I had to rewrite this in the last minute. > >> kfree(free_irq(irq_num, q_vector)); >> } >> + >> + if (unlock) >> + rtnl_unlock(); >> } Thanks, Olek
From: Alexander Lobakin <aleksander.lobakin@intel.com> Date: Wed, 12 Mar 2025 18:16:11 +0100 > From: Eric Dumazet <edumazet@google.com> > Date: Fri, 7 Mar 2025 11:28:36 +0100 > >> On Wed, Mar 5, 2025 at 5:22 PM Alexander Lobakin >> <aleksander.lobakin@intel.com> wrote: >>> >>> Add the missing linking of NAPIs to netdev queues when enabling >>> interrupt vectors in order to support NAPI configuration and >>> interfaces requiring get_rx_queue()->napi to be set (like XSk >>> busy polling). >>> >>> Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> >>> --- >>> drivers/net/ethernet/intel/idpf/idpf_txrx.c | 30 +++++++++++++++++++++ >>> 1 file changed, 30 insertions(+) >>> >>> diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c >>> index 2f221c0abad8..a3f6e8cff7a0 100644 >>> --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c >>> +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c >>> @@ -3560,8 +3560,11 @@ void idpf_vport_intr_rel(struct idpf_vport *vport) >>> static void idpf_vport_intr_rel_irq(struct idpf_vport *vport) >>> { >>> struct idpf_adapter *adapter = vport->adapter; >>> + bool unlock; >>> int vector; >>> >>> + unlock = rtnl_trylock(); >> >> This is probably not what you want here ? >> >> If another thread is holding RTNL, then rtnl_ttrylock() will not add >> any protection. > > Yep I know. trylock() is because this function can be called in two > scenarios: > > 1) .ndo_close(), when RTNL is already locked; > 2) "soft reset" aka "stop the traffic, reallocate the queues, start the > traffic", when RTNL is not taken. > > The second one spits a WARN without the RTNL being locked. So this > trylock() will do nothing for the first scenario and will take the lock > for the second one. > > If that is not correct, let me know, I'll do it a different way (maybe > it's better to unconditionally take the lock on the callsite for the > second case?). Ping. What should I do, lock RTNL on the callsite or proceed with trylock? Thanks, Olek
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 2f221c0abad8..a3f6e8cff7a0 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -3560,8 +3560,11 @@ void idpf_vport_intr_rel(struct idpf_vport *vport) static void idpf_vport_intr_rel_irq(struct idpf_vport *vport) { struct idpf_adapter *adapter = vport->adapter; + bool unlock; int vector; + unlock = rtnl_trylock(); + for (vector = 0; vector < vport->num_q_vectors; vector++) { struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; int irq_num, vidx; @@ -3573,8 +3576,23 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport) vidx = vport->q_vector_idxs[vector]; irq_num = adapter->msix_entries[vidx].vector; + for (u32 i = 0; i < q_vector->num_rxq; i++) + netif_queue_set_napi(vport->netdev, + q_vector->rx[i]->idx, + NETDEV_QUEUE_TYPE_RX, + NULL); + + for (u32 i = 0; i < q_vector->num_txq; i++) + netif_queue_set_napi(vport->netdev, + q_vector->tx[i]->idx, + NETDEV_QUEUE_TYPE_TX, + NULL); + kfree(free_irq(irq_num, q_vector)); } + + if (unlock) + rtnl_unlock(); } /** @@ -3760,6 +3778,18 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport) "Request_irq failed, error: %d\n", err); goto free_q_irqs; } + + for (u32 i = 0; i < q_vector->num_rxq; i++) + netif_queue_set_napi(vport->netdev, + q_vector->rx[i]->idx, + NETDEV_QUEUE_TYPE_RX, + &q_vector->napi); + + for (u32 i = 0; i < q_vector->num_txq; i++) + netif_queue_set_napi(vport->netdev, + q_vector->tx[i]->idx, + NETDEV_QUEUE_TYPE_TX, + &q_vector->napi); } return 0;
Add the missing linking of NAPIs to netdev queues when enabling interrupt vectors in order to support NAPI configuration and interfaces requiring get_rx_queue()->napi to be set (like XSk busy polling). Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> --- drivers/net/ethernet/intel/idpf/idpf_txrx.c | 30 +++++++++++++++++++++ 1 file changed, 30 insertions(+)