diff mbox series

[net-next,v4,08/10] tsnep: Add RX queue info for XDP support

Message ID 20230109191523.12070-9-gerhard@engleder-embedded.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series tsnep: XDP support | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for net-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 5 maintainers not CCed: bpf@vger.kernel.org ast@kernel.org daniel@iogearbox.net john.fastabend@gmail.com hawk@kernel.org
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 118 lines checked
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Gerhard Engleder Jan. 9, 2023, 7:15 p.m. UTC
Register xdp_rxq_info with page_pool memory model. This is needed for
XDP buffer handling.

Signed-off-by: Gerhard Engleder <gerhard@engleder-embedded.com>
Reviewed-by: Saeed Mahameed <saeed@kernel.org>
---
 drivers/net/ethernet/engleder/tsnep.h      |  2 ++
 drivers/net/ethernet/engleder/tsnep_main.c | 39 ++++++++++++++++------
 2 files changed, 31 insertions(+), 10 deletions(-)

Comments

Alexander Duyck Jan. 10, 2023, 5:29 p.m. UTC | #1
On Mon, 2023-01-09 at 20:15 +0100, Gerhard Engleder wrote:
> Register xdp_rxq_info with page_pool memory model. This is needed for
> XDP buffer handling.
> 
> Signed-off-by: Gerhard Engleder <gerhard@engleder-embedded.com>
> Reviewed-by: Saeed Mahameed <saeed@kernel.org>
> ---
>  drivers/net/ethernet/engleder/tsnep.h      |  2 ++
>  drivers/net/ethernet/engleder/tsnep_main.c | 39 ++++++++++++++++------
>  2 files changed, 31 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
> index 855738d31d73..2268ff793edf 100644
> --- a/drivers/net/ethernet/engleder/tsnep.h
> +++ b/drivers/net/ethernet/engleder/tsnep.h
> @@ -134,6 +134,8 @@ struct tsnep_rx {
>  	u32 dropped;
>  	u32 multicast;
>  	u32 alloc_failed;
> +
> +	struct xdp_rxq_info xdp_rxq;
>  };
>  

Rather than placing it in your Rx queue structure it might be better to
look at placing it in the tsnep_queue struct. The fact is this is more
closely associated with the napi struct then your Rx ring so changing
it to that might make more sense as you can avoid a bunch of extra
overhead with having to pull napi to it.

Basically what it comes down it is that it is much easier to access
queue[i].rx than it is for the rx to get access to queue[i].napi.

>  struct tsnep_queue {
> diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
> index 0c9669edb2dd..451ad1849b9d 100644
> --- a/drivers/net/ethernet/engleder/tsnep_main.c
> +++ b/drivers/net/ethernet/engleder/tsnep_main.c
> @@ -792,6 +792,9 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
>  		entry->page = NULL;
>  	}
>  
> +	if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
> +		xdp_rxq_info_unreg(&rx->xdp_rxq);
> +
>  	if (rx->page_pool)
>  		page_pool_destroy(rx->page_pool);
>  
> @@ -807,7 +810,7 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
>  	}
>  }
>  
> -static int tsnep_rx_ring_init(struct tsnep_rx *rx)
> +static int tsnep_rx_ring_init(struct tsnep_rx *rx, unsigned int napi_id)
>  {
>  	struct device *dmadev = rx->adapter->dmadev;
>  	struct tsnep_rx_entry *entry;
> @@ -850,6 +853,15 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx)
>  		goto failed;
>  	}
>  
> +	retval = xdp_rxq_info_reg(&rx->xdp_rxq, rx->adapter->netdev,
> +				  rx->queue_index, napi_id);
> +	if (retval)
> +		goto failed;
> +	retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, MEM_TYPE_PAGE_POOL,
> +					    rx->page_pool);
> +	if (retval)
> +		goto failed;
> +
>  	for (i = 0; i < TSNEP_RING_SIZE; i++) {
>  		entry = &rx->entry[i];
>  		next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE];
> @@ -1104,7 +1116,8 @@ static bool tsnep_rx_pending(struct tsnep_rx *rx)
>  }
>  
>  static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
> -			 int queue_index, struct tsnep_rx *rx)
> +			 unsigned int napi_id, int queue_index,
> +			 struct tsnep_rx *rx)
>  {
>  	dma_addr_t dma;
>  	int retval;
> @@ -1118,7 +1131,7 @@ static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
>  	else
>  		rx->offset = TSNEP_SKB_PAD;
>  
> -	retval = tsnep_rx_ring_init(rx);
> +	retval = tsnep_rx_ring_init(rx, napi_id);
>  	if (retval)
>  		return retval;
>  
> @@ -1245,14 +1258,19 @@ static void tsnep_free_irq(struct tsnep_queue *queue, bool first)
>  static int tsnep_netdev_open(struct net_device *netdev)
>  {
>  	struct tsnep_adapter *adapter = netdev_priv(netdev);
> -	int i;
> -	void __iomem *addr;
>  	int tx_queue_index = 0;
>  	int rx_queue_index = 0;
> -	int retval;
> +	unsigned int napi_id;
> +	void __iomem *addr;
> +	int i, retval;
>  
>  	for (i = 0; i < adapter->num_queues; i++) {
>  		adapter->queue[i].adapter = adapter;
> +
> +		netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
> +			       tsnep_poll);
> +		napi_id = adapter->queue[i].napi.napi_id;
> +

This is a good example of what I am referring to.

>  		if (adapter->queue[i].tx) {
>  			addr = adapter->addr + TSNEP_QUEUE(tx_queue_index);
>  			retval = tsnep_tx_open(adapter, addr, tx_queue_index,
> @@ -1263,7 +1281,7 @@ static int tsnep_netdev_open(struct net_device *netdev)
>  		}
>  		if (adapter->queue[i].rx) {
>  			addr = adapter->addr + TSNEP_QUEUE(rx_queue_index);
> -			retval = tsnep_rx_open(adapter, addr,
> +			retval = tsnep_rx_open(adapter, addr, napi_id,
>  					       rx_queue_index,
>  					       adapter->queue[i].rx);
>  			if (retval)
> @@ -1295,8 +1313,6 @@ static int tsnep_netdev_open(struct net_device *netdev)
>  		goto phy_failed;
>  
>  	for (i = 0; i < adapter->num_queues; i++) {
> -		netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
> -			       tsnep_poll);
>  		napi_enable(&adapter->queue[i].napi);
>  
>  		tsnep_enable_irq(adapter, adapter->queue[i].irq_mask);

What you could do here is look at making all the napi_add/napi_enable
and xdp_rxq_info items into one function to handle all the enabling.

> @@ -1317,6 +1333,8 @@ static int tsnep_netdev_open(struct net_device *netdev)
>  			tsnep_rx_close(adapter->queue[i].rx);
>  		if (adapter->queue[i].tx)
>  			tsnep_tx_close(adapter->queue[i].tx);
> +
> +		netif_napi_del(&adapter->queue[i].napi);
>  	}
>  	return retval;
>  }
> @@ -1335,7 +1353,6 @@ static int tsnep_netdev_close(struct net_device *netdev)
>  		tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
>  
>  		napi_disable(&adapter->queue[i].napi);
> -		netif_napi_del(&adapter->queue[i].napi);
>  
>  		tsnep_free_irq(&adapter->queue[i], i == 0);
>  

Likewise here you could take care of all the same items with the page
pool being freed after you have already unregistered and freed the napi
instance.

> @@ -1343,6 +1360,8 @@ static int tsnep_netdev_close(struct net_device *netdev)
>  			tsnep_rx_close(adapter->queue[i].rx);
>  		if (adapter->queue[i].tx)
>  			tsnep_tx_close(adapter->queue[i].tx);
> +
> +		netif_napi_del(&adapter->queue[i].napi);
>  	}
>  
>  	return 0;
Gerhard Engleder Jan. 10, 2023, 9:21 p.m. UTC | #2
On 10.01.23 18:29, Alexander H Duyck wrote:
> On Mon, 2023-01-09 at 20:15 +0100, Gerhard Engleder wrote:
>> Register xdp_rxq_info with page_pool memory model. This is needed for
>> XDP buffer handling.
>>
>> Signed-off-by: Gerhard Engleder <gerhard@engleder-embedded.com>
>> Reviewed-by: Saeed Mahameed <saeed@kernel.org>
>> ---
>>   drivers/net/ethernet/engleder/tsnep.h      |  2 ++
>>   drivers/net/ethernet/engleder/tsnep_main.c | 39 ++++++++++++++++------
>>   2 files changed, 31 insertions(+), 10 deletions(-)
>>
>> diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
>> index 855738d31d73..2268ff793edf 100644
>> --- a/drivers/net/ethernet/engleder/tsnep.h
>> +++ b/drivers/net/ethernet/engleder/tsnep.h
>> @@ -134,6 +134,8 @@ struct tsnep_rx {
>>   	u32 dropped;
>>   	u32 multicast;
>>   	u32 alloc_failed;
>> +
>> +	struct xdp_rxq_info xdp_rxq;
>>   };
>>   
> 
> Rather than placing it in your Rx queue structure it might be better to
> look at placing it in the tsnep_queue struct. The fact is this is more
> closely associated with the napi struct then your Rx ring so changing
> it to that might make more sense as you can avoid a bunch of extra
> overhead with having to pull napi to it.
> 
> Basically what it comes down it is that it is much easier to access
> queue[i].rx than it is for the rx to get access to queue[i].napi.

I will try it as suggested.

>>   struct tsnep_queue {
>> diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
>> index 0c9669edb2dd..451ad1849b9d 100644
>> --- a/drivers/net/ethernet/engleder/tsnep_main.c
>> +++ b/drivers/net/ethernet/engleder/tsnep_main.c
>> @@ -792,6 +792,9 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
>>   		entry->page = NULL;
>>   	}
>>   
>> +	if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
>> +		xdp_rxq_info_unreg(&rx->xdp_rxq);
>> +
>>   	if (rx->page_pool)
>>   		page_pool_destroy(rx->page_pool);
>>   
>> @@ -807,7 +810,7 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
>>   	}
>>   }
>>   
>> -static int tsnep_rx_ring_init(struct tsnep_rx *rx)
>> +static int tsnep_rx_ring_init(struct tsnep_rx *rx, unsigned int napi_id)
>>   {
>>   	struct device *dmadev = rx->adapter->dmadev;
>>   	struct tsnep_rx_entry *entry;
>> @@ -850,6 +853,15 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx)
>>   		goto failed;
>>   	}
>>   
>> +	retval = xdp_rxq_info_reg(&rx->xdp_rxq, rx->adapter->netdev,
>> +				  rx->queue_index, napi_id);
>> +	if (retval)
>> +		goto failed;
>> +	retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, MEM_TYPE_PAGE_POOL,
>> +					    rx->page_pool);
>> +	if (retval)
>> +		goto failed;
>> +
>>   	for (i = 0; i < TSNEP_RING_SIZE; i++) {
>>   		entry = &rx->entry[i];
>>   		next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE];
>> @@ -1104,7 +1116,8 @@ static bool tsnep_rx_pending(struct tsnep_rx *rx)
>>   }
>>   
>>   static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
>> -			 int queue_index, struct tsnep_rx *rx)
>> +			 unsigned int napi_id, int queue_index,
>> +			 struct tsnep_rx *rx)
>>   {
>>   	dma_addr_t dma;
>>   	int retval;
>> @@ -1118,7 +1131,7 @@ static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
>>   	else
>>   		rx->offset = TSNEP_SKB_PAD;
>>   
>> -	retval = tsnep_rx_ring_init(rx);
>> +	retval = tsnep_rx_ring_init(rx, napi_id);
>>   	if (retval)
>>   		return retval;
>>   
>> @@ -1245,14 +1258,19 @@ static void tsnep_free_irq(struct tsnep_queue *queue, bool first)
>>   static int tsnep_netdev_open(struct net_device *netdev)
>>   {
>>   	struct tsnep_adapter *adapter = netdev_priv(netdev);
>> -	int i;
>> -	void __iomem *addr;
>>   	int tx_queue_index = 0;
>>   	int rx_queue_index = 0;
>> -	int retval;
>> +	unsigned int napi_id;
>> +	void __iomem *addr;
>> +	int i, retval;
>>   
>>   	for (i = 0; i < adapter->num_queues; i++) {
>>   		adapter->queue[i].adapter = adapter;
>> +
>> +		netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
>> +			       tsnep_poll);
>> +		napi_id = adapter->queue[i].napi.napi_id;
>> +
> 
> This is a good example of what I am referring to.
> 
>>   		if (adapter->queue[i].tx) {
>>   			addr = adapter->addr + TSNEP_QUEUE(tx_queue_index);
>>   			retval = tsnep_tx_open(adapter, addr, tx_queue_index,
>> @@ -1263,7 +1281,7 @@ static int tsnep_netdev_open(struct net_device *netdev)
>>   		}
>>   		if (adapter->queue[i].rx) {
>>   			addr = adapter->addr + TSNEP_QUEUE(rx_queue_index);
>> -			retval = tsnep_rx_open(adapter, addr,
>> +			retval = tsnep_rx_open(adapter, addr, napi_id,
>>   					       rx_queue_index,
>>   					       adapter->queue[i].rx);
>>   			if (retval)
>> @@ -1295,8 +1313,6 @@ static int tsnep_netdev_open(struct net_device *netdev)
>>   		goto phy_failed;
>>   
>>   	for (i = 0; i < adapter->num_queues; i++) {
>> -		netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
>> -			       tsnep_poll);
>>   		napi_enable(&adapter->queue[i].napi);
>>   
>>   		tsnep_enable_irq(adapter, adapter->queue[i].irq_mask);
> 
> What you could do here is look at making all the napi_add/napi_enable
> and xdp_rxq_info items into one function to handle all the enabling.

I will rework that.

>> @@ -1317,6 +1333,8 @@ static int tsnep_netdev_open(struct net_device *netdev)
>>   			tsnep_rx_close(adapter->queue[i].rx);
>>   		if (adapter->queue[i].tx)
>>   			tsnep_tx_close(adapter->queue[i].tx);
>> +
>> +		netif_napi_del(&adapter->queue[i].napi);
>>   	}
>>   	return retval;
>>   }
>> @@ -1335,7 +1353,6 @@ static int tsnep_netdev_close(struct net_device *netdev)
>>   		tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
>>   
>>   		napi_disable(&adapter->queue[i].napi);
>> -		netif_napi_del(&adapter->queue[i].napi);
>>   
>>   		tsnep_free_irq(&adapter->queue[i], i == 0);
>>   
> 
> Likewise here you could take care of all the same items with the page
> pool being freed after you have already unregistered and freed the napi
> instance.

I'm not sure if I understand it right. According to your suggestion
above napi and xdp_rxq_info should be freed here?

Gerhard
Alexander Duyck Jan. 10, 2023, 10:27 p.m. UTC | #3
On Tue, Jan 10, 2023 at 1:21 PM Gerhard Engleder
<gerhard@engleder-embedded.com> wrote:
>
> On 10.01.23 18:29, Alexander H Duyck wrote:
> > On Mon, 2023-01-09 at 20:15 +0100, Gerhard Engleder wrote:
> >> Register xdp_rxq_info with page_pool memory model. This is needed for
> >> XDP buffer handling.
> >>

<...>

> >> @@ -1317,6 +1333,8 @@ static int tsnep_netdev_open(struct net_device *netdev)
> >>                      tsnep_rx_close(adapter->queue[i].rx);
> >>              if (adapter->queue[i].tx)
> >>                      tsnep_tx_close(adapter->queue[i].tx);
> >> +
> >> +            netif_napi_del(&adapter->queue[i].napi);
> >>      }
> >>      return retval;
> >>   }
> >> @@ -1335,7 +1353,6 @@ static int tsnep_netdev_close(struct net_device *netdev)
> >>              tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
> >>
> >>              napi_disable(&adapter->queue[i].napi);
> >> -            netif_napi_del(&adapter->queue[i].napi);
> >>
> >>              tsnep_free_irq(&adapter->queue[i], i == 0);
> >>
> >
> > Likewise here you could take care of all the same items with the page
> > pool being freed after you have already unregistered and freed the napi
> > instance.
>
> I'm not sure if I understand it right. According to your suggestion
> above napi and xdp_rxq_info should be freed here?

Right. Between the napi_disable and the netif_napi_del you could
unregister the mem model and the xdp_info. Basically the two are tried
closer to the NAPI instance than the Rx queue itself so it would make
sense to just take care of it here. You might look at just putting
together a function to handle all of it since then you just pass
&adapter->queue once and then use a local variable in the function.
diff mbox series

Patch

diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
index 855738d31d73..2268ff793edf 100644
--- a/drivers/net/ethernet/engleder/tsnep.h
+++ b/drivers/net/ethernet/engleder/tsnep.h
@@ -134,6 +134,8 @@  struct tsnep_rx {
 	u32 dropped;
 	u32 multicast;
 	u32 alloc_failed;
+
+	struct xdp_rxq_info xdp_rxq;
 };
 
 struct tsnep_queue {
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 0c9669edb2dd..451ad1849b9d 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -792,6 +792,9 @@  static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
 		entry->page = NULL;
 	}
 
+	if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
+		xdp_rxq_info_unreg(&rx->xdp_rxq);
+
 	if (rx->page_pool)
 		page_pool_destroy(rx->page_pool);
 
@@ -807,7 +810,7 @@  static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
 	}
 }
 
-static int tsnep_rx_ring_init(struct tsnep_rx *rx)
+static int tsnep_rx_ring_init(struct tsnep_rx *rx, unsigned int napi_id)
 {
 	struct device *dmadev = rx->adapter->dmadev;
 	struct tsnep_rx_entry *entry;
@@ -850,6 +853,15 @@  static int tsnep_rx_ring_init(struct tsnep_rx *rx)
 		goto failed;
 	}
 
+	retval = xdp_rxq_info_reg(&rx->xdp_rxq, rx->adapter->netdev,
+				  rx->queue_index, napi_id);
+	if (retval)
+		goto failed;
+	retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, MEM_TYPE_PAGE_POOL,
+					    rx->page_pool);
+	if (retval)
+		goto failed;
+
 	for (i = 0; i < TSNEP_RING_SIZE; i++) {
 		entry = &rx->entry[i];
 		next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE];
@@ -1104,7 +1116,8 @@  static bool tsnep_rx_pending(struct tsnep_rx *rx)
 }
 
 static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
-			 int queue_index, struct tsnep_rx *rx)
+			 unsigned int napi_id, int queue_index,
+			 struct tsnep_rx *rx)
 {
 	dma_addr_t dma;
 	int retval;
@@ -1118,7 +1131,7 @@  static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
 	else
 		rx->offset = TSNEP_SKB_PAD;
 
-	retval = tsnep_rx_ring_init(rx);
+	retval = tsnep_rx_ring_init(rx, napi_id);
 	if (retval)
 		return retval;
 
@@ -1245,14 +1258,19 @@  static void tsnep_free_irq(struct tsnep_queue *queue, bool first)
 static int tsnep_netdev_open(struct net_device *netdev)
 {
 	struct tsnep_adapter *adapter = netdev_priv(netdev);
-	int i;
-	void __iomem *addr;
 	int tx_queue_index = 0;
 	int rx_queue_index = 0;
-	int retval;
+	unsigned int napi_id;
+	void __iomem *addr;
+	int i, retval;
 
 	for (i = 0; i < adapter->num_queues; i++) {
 		adapter->queue[i].adapter = adapter;
+
+		netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
+			       tsnep_poll);
+		napi_id = adapter->queue[i].napi.napi_id;
+
 		if (adapter->queue[i].tx) {
 			addr = adapter->addr + TSNEP_QUEUE(tx_queue_index);
 			retval = tsnep_tx_open(adapter, addr, tx_queue_index,
@@ -1263,7 +1281,7 @@  static int tsnep_netdev_open(struct net_device *netdev)
 		}
 		if (adapter->queue[i].rx) {
 			addr = adapter->addr + TSNEP_QUEUE(rx_queue_index);
-			retval = tsnep_rx_open(adapter, addr,
+			retval = tsnep_rx_open(adapter, addr, napi_id,
 					       rx_queue_index,
 					       adapter->queue[i].rx);
 			if (retval)
@@ -1295,8 +1313,6 @@  static int tsnep_netdev_open(struct net_device *netdev)
 		goto phy_failed;
 
 	for (i = 0; i < adapter->num_queues; i++) {
-		netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
-			       tsnep_poll);
 		napi_enable(&adapter->queue[i].napi);
 
 		tsnep_enable_irq(adapter, adapter->queue[i].irq_mask);
@@ -1317,6 +1333,8 @@  static int tsnep_netdev_open(struct net_device *netdev)
 			tsnep_rx_close(adapter->queue[i].rx);
 		if (adapter->queue[i].tx)
 			tsnep_tx_close(adapter->queue[i].tx);
+
+		netif_napi_del(&adapter->queue[i].napi);
 	}
 	return retval;
 }
@@ -1335,7 +1353,6 @@  static int tsnep_netdev_close(struct net_device *netdev)
 		tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
 
 		napi_disable(&adapter->queue[i].napi);
-		netif_napi_del(&adapter->queue[i].napi);
 
 		tsnep_free_irq(&adapter->queue[i], i == 0);
 
@@ -1343,6 +1360,8 @@  static int tsnep_netdev_close(struct net_device *netdev)
 			tsnep_rx_close(adapter->queue[i].rx);
 		if (adapter->queue[i].tx)
 			tsnep_tx_close(adapter->queue[i].tx);
+
+		netif_napi_del(&adapter->queue[i].napi);
 	}
 
 	return 0;