Message ID | 20230109191523.12070-9-gerhard@engleder-embedded.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | tsnep: XDP support | expand |
On Mon, 2023-01-09 at 20:15 +0100, Gerhard Engleder wrote: > Register xdp_rxq_info with page_pool memory model. This is needed for > XDP buffer handling. > > Signed-off-by: Gerhard Engleder <gerhard@engleder-embedded.com> > Reviewed-by: Saeed Mahameed <saeed@kernel.org> > --- > drivers/net/ethernet/engleder/tsnep.h | 2 ++ > drivers/net/ethernet/engleder/tsnep_main.c | 39 ++++++++++++++++------ > 2 files changed, 31 insertions(+), 10 deletions(-) > > diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h > index 855738d31d73..2268ff793edf 100644 > --- a/drivers/net/ethernet/engleder/tsnep.h > +++ b/drivers/net/ethernet/engleder/tsnep.h > @@ -134,6 +134,8 @@ struct tsnep_rx { > u32 dropped; > u32 multicast; > u32 alloc_failed; > + > + struct xdp_rxq_info xdp_rxq; > }; > Rather than placing it in your Rx queue structure it might be better to look at placing it in the tsnep_queue struct. The fact is this is more closely associated with the napi struct then your Rx ring so changing it to that might make more sense as you can avoid a bunch of extra overhead with having to pull napi to it. Basically what it comes down it is that it is much easier to access queue[i].rx than it is for the rx to get access to queue[i].napi. > struct tsnep_queue { > diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c > index 0c9669edb2dd..451ad1849b9d 100644 > --- a/drivers/net/ethernet/engleder/tsnep_main.c > +++ b/drivers/net/ethernet/engleder/tsnep_main.c > @@ -792,6 +792,9 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) > entry->page = NULL; > } > > + if (xdp_rxq_info_is_reg(&rx->xdp_rxq)) > + xdp_rxq_info_unreg(&rx->xdp_rxq); > + > if (rx->page_pool) > page_pool_destroy(rx->page_pool); > > @@ -807,7 +810,7 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) > } > } > > -static int tsnep_rx_ring_init(struct tsnep_rx *rx) > +static int tsnep_rx_ring_init(struct tsnep_rx *rx, unsigned int napi_id) > { > struct device *dmadev = rx->adapter->dmadev; > struct tsnep_rx_entry *entry; > @@ -850,6 +853,15 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx) > goto failed; > } > > + retval = xdp_rxq_info_reg(&rx->xdp_rxq, rx->adapter->netdev, > + rx->queue_index, napi_id); > + if (retval) > + goto failed; > + retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, MEM_TYPE_PAGE_POOL, > + rx->page_pool); > + if (retval) > + goto failed; > + > for (i = 0; i < TSNEP_RING_SIZE; i++) { > entry = &rx->entry[i]; > next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE]; > @@ -1104,7 +1116,8 @@ static bool tsnep_rx_pending(struct tsnep_rx *rx) > } > > static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr, > - int queue_index, struct tsnep_rx *rx) > + unsigned int napi_id, int queue_index, > + struct tsnep_rx *rx) > { > dma_addr_t dma; > int retval; > @@ -1118,7 +1131,7 @@ static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr, > else > rx->offset = TSNEP_SKB_PAD; > > - retval = tsnep_rx_ring_init(rx); > + retval = tsnep_rx_ring_init(rx, napi_id); > if (retval) > return retval; > > @@ -1245,14 +1258,19 @@ static void tsnep_free_irq(struct tsnep_queue *queue, bool first) > static int tsnep_netdev_open(struct net_device *netdev) > { > struct tsnep_adapter *adapter = netdev_priv(netdev); > - int i; > - void __iomem *addr; > int tx_queue_index = 0; > int rx_queue_index = 0; > - int retval; > + unsigned int napi_id; > + void __iomem *addr; > + int i, retval; > > for (i = 0; i < adapter->num_queues; i++) { > adapter->queue[i].adapter = adapter; > + > + netif_napi_add(adapter->netdev, &adapter->queue[i].napi, > + tsnep_poll); > + napi_id = adapter->queue[i].napi.napi_id; > + This is a good example of what I am referring to. > if (adapter->queue[i].tx) { > addr = adapter->addr + TSNEP_QUEUE(tx_queue_index); > retval = tsnep_tx_open(adapter, addr, tx_queue_index, > @@ -1263,7 +1281,7 @@ static int tsnep_netdev_open(struct net_device *netdev) > } > if (adapter->queue[i].rx) { > addr = adapter->addr + TSNEP_QUEUE(rx_queue_index); > - retval = tsnep_rx_open(adapter, addr, > + retval = tsnep_rx_open(adapter, addr, napi_id, > rx_queue_index, > adapter->queue[i].rx); > if (retval) > @@ -1295,8 +1313,6 @@ static int tsnep_netdev_open(struct net_device *netdev) > goto phy_failed; > > for (i = 0; i < adapter->num_queues; i++) { > - netif_napi_add(adapter->netdev, &adapter->queue[i].napi, > - tsnep_poll); > napi_enable(&adapter->queue[i].napi); > > tsnep_enable_irq(adapter, adapter->queue[i].irq_mask); What you could do here is look at making all the napi_add/napi_enable and xdp_rxq_info items into one function to handle all the enabling. > @@ -1317,6 +1333,8 @@ static int tsnep_netdev_open(struct net_device *netdev) > tsnep_rx_close(adapter->queue[i].rx); > if (adapter->queue[i].tx) > tsnep_tx_close(adapter->queue[i].tx); > + > + netif_napi_del(&adapter->queue[i].napi); > } > return retval; > } > @@ -1335,7 +1353,6 @@ static int tsnep_netdev_close(struct net_device *netdev) > tsnep_disable_irq(adapter, adapter->queue[i].irq_mask); > > napi_disable(&adapter->queue[i].napi); > - netif_napi_del(&adapter->queue[i].napi); > > tsnep_free_irq(&adapter->queue[i], i == 0); > Likewise here you could take care of all the same items with the page pool being freed after you have already unregistered and freed the napi instance. > @@ -1343,6 +1360,8 @@ static int tsnep_netdev_close(struct net_device *netdev) > tsnep_rx_close(adapter->queue[i].rx); > if (adapter->queue[i].tx) > tsnep_tx_close(adapter->queue[i].tx); > + > + netif_napi_del(&adapter->queue[i].napi); > } > > return 0;
On 10.01.23 18:29, Alexander H Duyck wrote: > On Mon, 2023-01-09 at 20:15 +0100, Gerhard Engleder wrote: >> Register xdp_rxq_info with page_pool memory model. This is needed for >> XDP buffer handling. >> >> Signed-off-by: Gerhard Engleder <gerhard@engleder-embedded.com> >> Reviewed-by: Saeed Mahameed <saeed@kernel.org> >> --- >> drivers/net/ethernet/engleder/tsnep.h | 2 ++ >> drivers/net/ethernet/engleder/tsnep_main.c | 39 ++++++++++++++++------ >> 2 files changed, 31 insertions(+), 10 deletions(-) >> >> diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h >> index 855738d31d73..2268ff793edf 100644 >> --- a/drivers/net/ethernet/engleder/tsnep.h >> +++ b/drivers/net/ethernet/engleder/tsnep.h >> @@ -134,6 +134,8 @@ struct tsnep_rx { >> u32 dropped; >> u32 multicast; >> u32 alloc_failed; >> + >> + struct xdp_rxq_info xdp_rxq; >> }; >> > > Rather than placing it in your Rx queue structure it might be better to > look at placing it in the tsnep_queue struct. The fact is this is more > closely associated with the napi struct then your Rx ring so changing > it to that might make more sense as you can avoid a bunch of extra > overhead with having to pull napi to it. > > Basically what it comes down it is that it is much easier to access > queue[i].rx than it is for the rx to get access to queue[i].napi. I will try it as suggested. >> struct tsnep_queue { >> diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c >> index 0c9669edb2dd..451ad1849b9d 100644 >> --- a/drivers/net/ethernet/engleder/tsnep_main.c >> +++ b/drivers/net/ethernet/engleder/tsnep_main.c >> @@ -792,6 +792,9 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) >> entry->page = NULL; >> } >> >> + if (xdp_rxq_info_is_reg(&rx->xdp_rxq)) >> + xdp_rxq_info_unreg(&rx->xdp_rxq); >> + >> if (rx->page_pool) >> page_pool_destroy(rx->page_pool); >> >> @@ -807,7 +810,7 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) >> } >> } >> >> -static int tsnep_rx_ring_init(struct tsnep_rx *rx) >> +static int tsnep_rx_ring_init(struct tsnep_rx *rx, unsigned int napi_id) >> { >> struct device *dmadev = rx->adapter->dmadev; >> struct tsnep_rx_entry *entry; >> @@ -850,6 +853,15 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx) >> goto failed; >> } >> >> + retval = xdp_rxq_info_reg(&rx->xdp_rxq, rx->adapter->netdev, >> + rx->queue_index, napi_id); >> + if (retval) >> + goto failed; >> + retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, MEM_TYPE_PAGE_POOL, >> + rx->page_pool); >> + if (retval) >> + goto failed; >> + >> for (i = 0; i < TSNEP_RING_SIZE; i++) { >> entry = &rx->entry[i]; >> next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE]; >> @@ -1104,7 +1116,8 @@ static bool tsnep_rx_pending(struct tsnep_rx *rx) >> } >> >> static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr, >> - int queue_index, struct tsnep_rx *rx) >> + unsigned int napi_id, int queue_index, >> + struct tsnep_rx *rx) >> { >> dma_addr_t dma; >> int retval; >> @@ -1118,7 +1131,7 @@ static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr, >> else >> rx->offset = TSNEP_SKB_PAD; >> >> - retval = tsnep_rx_ring_init(rx); >> + retval = tsnep_rx_ring_init(rx, napi_id); >> if (retval) >> return retval; >> >> @@ -1245,14 +1258,19 @@ static void tsnep_free_irq(struct tsnep_queue *queue, bool first) >> static int tsnep_netdev_open(struct net_device *netdev) >> { >> struct tsnep_adapter *adapter = netdev_priv(netdev); >> - int i; >> - void __iomem *addr; >> int tx_queue_index = 0; >> int rx_queue_index = 0; >> - int retval; >> + unsigned int napi_id; >> + void __iomem *addr; >> + int i, retval; >> >> for (i = 0; i < adapter->num_queues; i++) { >> adapter->queue[i].adapter = adapter; >> + >> + netif_napi_add(adapter->netdev, &adapter->queue[i].napi, >> + tsnep_poll); >> + napi_id = adapter->queue[i].napi.napi_id; >> + > > This is a good example of what I am referring to. > >> if (adapter->queue[i].tx) { >> addr = adapter->addr + TSNEP_QUEUE(tx_queue_index); >> retval = tsnep_tx_open(adapter, addr, tx_queue_index, >> @@ -1263,7 +1281,7 @@ static int tsnep_netdev_open(struct net_device *netdev) >> } >> if (adapter->queue[i].rx) { >> addr = adapter->addr + TSNEP_QUEUE(rx_queue_index); >> - retval = tsnep_rx_open(adapter, addr, >> + retval = tsnep_rx_open(adapter, addr, napi_id, >> rx_queue_index, >> adapter->queue[i].rx); >> if (retval) >> @@ -1295,8 +1313,6 @@ static int tsnep_netdev_open(struct net_device *netdev) >> goto phy_failed; >> >> for (i = 0; i < adapter->num_queues; i++) { >> - netif_napi_add(adapter->netdev, &adapter->queue[i].napi, >> - tsnep_poll); >> napi_enable(&adapter->queue[i].napi); >> >> tsnep_enable_irq(adapter, adapter->queue[i].irq_mask); > > What you could do here is look at making all the napi_add/napi_enable > and xdp_rxq_info items into one function to handle all the enabling. I will rework that. >> @@ -1317,6 +1333,8 @@ static int tsnep_netdev_open(struct net_device *netdev) >> tsnep_rx_close(adapter->queue[i].rx); >> if (adapter->queue[i].tx) >> tsnep_tx_close(adapter->queue[i].tx); >> + >> + netif_napi_del(&adapter->queue[i].napi); >> } >> return retval; >> } >> @@ -1335,7 +1353,6 @@ static int tsnep_netdev_close(struct net_device *netdev) >> tsnep_disable_irq(adapter, adapter->queue[i].irq_mask); >> >> napi_disable(&adapter->queue[i].napi); >> - netif_napi_del(&adapter->queue[i].napi); >> >> tsnep_free_irq(&adapter->queue[i], i == 0); >> > > Likewise here you could take care of all the same items with the page > pool being freed after you have already unregistered and freed the napi > instance. I'm not sure if I understand it right. According to your suggestion above napi and xdp_rxq_info should be freed here? Gerhard
On Tue, Jan 10, 2023 at 1:21 PM Gerhard Engleder <gerhard@engleder-embedded.com> wrote: > > On 10.01.23 18:29, Alexander H Duyck wrote: > > On Mon, 2023-01-09 at 20:15 +0100, Gerhard Engleder wrote: > >> Register xdp_rxq_info with page_pool memory model. This is needed for > >> XDP buffer handling. > >> <...> > >> @@ -1317,6 +1333,8 @@ static int tsnep_netdev_open(struct net_device *netdev) > >> tsnep_rx_close(adapter->queue[i].rx); > >> if (adapter->queue[i].tx) > >> tsnep_tx_close(adapter->queue[i].tx); > >> + > >> + netif_napi_del(&adapter->queue[i].napi); > >> } > >> return retval; > >> } > >> @@ -1335,7 +1353,6 @@ static int tsnep_netdev_close(struct net_device *netdev) > >> tsnep_disable_irq(adapter, adapter->queue[i].irq_mask); > >> > >> napi_disable(&adapter->queue[i].napi); > >> - netif_napi_del(&adapter->queue[i].napi); > >> > >> tsnep_free_irq(&adapter->queue[i], i == 0); > >> > > > > Likewise here you could take care of all the same items with the page > > pool being freed after you have already unregistered and freed the napi > > instance. > > I'm not sure if I understand it right. According to your suggestion > above napi and xdp_rxq_info should be freed here? Right. Between the napi_disable and the netif_napi_del you could unregister the mem model and the xdp_info. Basically the two are tried closer to the NAPI instance than the Rx queue itself so it would make sense to just take care of it here. You might look at just putting together a function to handle all of it since then you just pass &adapter->queue once and then use a local variable in the function.
diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h index 855738d31d73..2268ff793edf 100644 --- a/drivers/net/ethernet/engleder/tsnep.h +++ b/drivers/net/ethernet/engleder/tsnep.h @@ -134,6 +134,8 @@ struct tsnep_rx { u32 dropped; u32 multicast; u32 alloc_failed; + + struct xdp_rxq_info xdp_rxq; }; struct tsnep_queue { diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 0c9669edb2dd..451ad1849b9d 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -792,6 +792,9 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) entry->page = NULL; } + if (xdp_rxq_info_is_reg(&rx->xdp_rxq)) + xdp_rxq_info_unreg(&rx->xdp_rxq); + if (rx->page_pool) page_pool_destroy(rx->page_pool); @@ -807,7 +810,7 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) } } -static int tsnep_rx_ring_init(struct tsnep_rx *rx) +static int tsnep_rx_ring_init(struct tsnep_rx *rx, unsigned int napi_id) { struct device *dmadev = rx->adapter->dmadev; struct tsnep_rx_entry *entry; @@ -850,6 +853,15 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx) goto failed; } + retval = xdp_rxq_info_reg(&rx->xdp_rxq, rx->adapter->netdev, + rx->queue_index, napi_id); + if (retval) + goto failed; + retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, MEM_TYPE_PAGE_POOL, + rx->page_pool); + if (retval) + goto failed; + for (i = 0; i < TSNEP_RING_SIZE; i++) { entry = &rx->entry[i]; next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE]; @@ -1104,7 +1116,8 @@ static bool tsnep_rx_pending(struct tsnep_rx *rx) } static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr, - int queue_index, struct tsnep_rx *rx) + unsigned int napi_id, int queue_index, + struct tsnep_rx *rx) { dma_addr_t dma; int retval; @@ -1118,7 +1131,7 @@ static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr, else rx->offset = TSNEP_SKB_PAD; - retval = tsnep_rx_ring_init(rx); + retval = tsnep_rx_ring_init(rx, napi_id); if (retval) return retval; @@ -1245,14 +1258,19 @@ static void tsnep_free_irq(struct tsnep_queue *queue, bool first) static int tsnep_netdev_open(struct net_device *netdev) { struct tsnep_adapter *adapter = netdev_priv(netdev); - int i; - void __iomem *addr; int tx_queue_index = 0; int rx_queue_index = 0; - int retval; + unsigned int napi_id; + void __iomem *addr; + int i, retval; for (i = 0; i < adapter->num_queues; i++) { adapter->queue[i].adapter = adapter; + + netif_napi_add(adapter->netdev, &adapter->queue[i].napi, + tsnep_poll); + napi_id = adapter->queue[i].napi.napi_id; + if (adapter->queue[i].tx) { addr = adapter->addr + TSNEP_QUEUE(tx_queue_index); retval = tsnep_tx_open(adapter, addr, tx_queue_index, @@ -1263,7 +1281,7 @@ static int tsnep_netdev_open(struct net_device *netdev) } if (adapter->queue[i].rx) { addr = adapter->addr + TSNEP_QUEUE(rx_queue_index); - retval = tsnep_rx_open(adapter, addr, + retval = tsnep_rx_open(adapter, addr, napi_id, rx_queue_index, adapter->queue[i].rx); if (retval) @@ -1295,8 +1313,6 @@ static int tsnep_netdev_open(struct net_device *netdev) goto phy_failed; for (i = 0; i < adapter->num_queues; i++) { - netif_napi_add(adapter->netdev, &adapter->queue[i].napi, - tsnep_poll); napi_enable(&adapter->queue[i].napi); tsnep_enable_irq(adapter, adapter->queue[i].irq_mask); @@ -1317,6 +1333,8 @@ static int tsnep_netdev_open(struct net_device *netdev) tsnep_rx_close(adapter->queue[i].rx); if (adapter->queue[i].tx) tsnep_tx_close(adapter->queue[i].tx); + + netif_napi_del(&adapter->queue[i].napi); } return retval; } @@ -1335,7 +1353,6 @@ static int tsnep_netdev_close(struct net_device *netdev) tsnep_disable_irq(adapter, adapter->queue[i].irq_mask); napi_disable(&adapter->queue[i].napi); - netif_napi_del(&adapter->queue[i].napi); tsnep_free_irq(&adapter->queue[i], i == 0); @@ -1343,6 +1360,8 @@ static int tsnep_netdev_close(struct net_device *netdev) tsnep_rx_close(adapter->queue[i].rx); if (adapter->queue[i].tx) tsnep_tx_close(adapter->queue[i].tx); + + netif_napi_del(&adapter->queue[i].napi); } return 0;