diff mbox series

[net-next,6/6] tsnep: Add XDP RX support

Message ID 20221203215416.13465-7-gerhard@engleder-embedded.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series tsnep: XDP support | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for net-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 1 this patch: 0
netdev/cc_maintainers success CCed 11 of 11 maintainers
netdev/build_clang success Errors and warnings before: 2 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 1 this patch: 0
netdev/checkpatch warning WARNING: line length of 95 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline fail Was 0 now: 1

Commit Message

Gerhard Engleder Dec. 3, 2022, 9:54 p.m. UTC
If BPF program is set up, then run BPF program for every received frame
and execute the selected action.

Test results with A53 1.2GHz:

XDP_DROP (samples/bpf/xdp1)
proto 17:     865683 pkt/s

XDP_TX (samples/bpf/xdp2)
proto 17:     253594 pkt/s

XDP_REDIRECT (samples/bpf/xdpsock)
 sock0@eth2:0 rxdrop xdp-drv
                   pps            pkts           1.00
rx                 862,258        4,514,166
tx                 0              0

XDP_REDIRECT (samples/bpf/xdp_redirect)
eth2->eth1         608,895 rx/s   0 err,drop/s   608,895 xmit/s

Signed-off-by: Gerhard Engleder <gerhard@engleder-embedded.com>
---
 drivers/net/ethernet/engleder/tsnep_main.c | 100 +++++++++++++++++++++
 1 file changed, 100 insertions(+)

Comments

Paolo Abeni Dec. 7, 2022, 10:18 a.m. UTC | #1
On Sat, 2022-12-03 at 22:54 +0100, Gerhard Engleder wrote:
> If BPF program is set up, then run BPF program for every received frame
> and execute the selected action.
> 
> Test results with A53 1.2GHz:
> 
> XDP_DROP (samples/bpf/xdp1)
> proto 17:     865683 pkt/s
> 
> XDP_TX (samples/bpf/xdp2)
> proto 17:     253594 pkt/s
> 
> XDP_REDIRECT (samples/bpf/xdpsock)
>  sock0@eth2:0 rxdrop xdp-drv
>                    pps            pkts           1.00
> rx                 862,258        4,514,166
> tx                 0              0
> 
> XDP_REDIRECT (samples/bpf/xdp_redirect)
> eth2->eth1         608,895 rx/s   0 err,drop/s   608,895 xmit/s
> 
> Signed-off-by: Gerhard Engleder <gerhard@engleder-embedded.com>
> ---
>  drivers/net/ethernet/engleder/tsnep_main.c | 100 +++++++++++++++++++++
>  1 file changed, 100 insertions(+)
> 
> diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
> index 725b2a1e7be4..4e3c6bd3dc9f 100644
> --- a/drivers/net/ethernet/engleder/tsnep_main.c
> +++ b/drivers/net/ethernet/engleder/tsnep_main.c
> @@ -27,6 +27,7 @@
>  #include <linux/phy.h>
>  #include <linux/iopoll.h>
>  #include <linux/bpf.h>
> +#include <linux/bpf_trace.h>
>  
>  #define TSNEP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
>  #define TSNEP_HEADROOM ALIGN(max(TSNEP_SKB_PAD, XDP_PACKET_HEADROOM), 4)
> @@ -44,6 +45,11 @@
>  #define TSNEP_COALESCE_USECS_MAX     ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \
>  				      ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1)
>  
> +#define TSNEP_XDP_PASS		0
> +#define TSNEP_XDP_CONSUMED	BIT(0)
> +#define TSNEP_XDP_TX		BIT(1)
> +#define TSNEP_XDP_REDIRECT	BIT(2)
> +
>  enum {
>  	__TSNEP_DOWN,
>  };
> @@ -819,6 +825,11 @@ static inline unsigned int tsnep_rx_offset(struct tsnep_rx *rx)
>  	return TSNEP_SKB_PAD;
>  }
>  
> +static inline unsigned int tsnep_rx_offset_xdp(void)

Please, no 'inline' in c files, the complier will do a better job
without.

> +{
> +	return XDP_PACKET_HEADROOM;
> +}
> +
>  static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
>  {
>  	struct device *dmadev = rx->adapter->dmadev;
> @@ -1024,6 +1035,65 @@ static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse)
>  	return i;
>  }
>  
> +static int tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
> +			      struct xdp_buff *xdp)
> +{
> +	unsigned int length;
> +	unsigned int sync;
> +	u32 act;
> +
> +	length = xdp->data_end - xdp->data_hard_start - tsnep_rx_offset_xdp();
> +
> +	act = bpf_prog_run_xdp(prog, xdp);
> +
> +	/* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
> +	sync = xdp->data_end - xdp->data_hard_start - tsnep_rx_offset_xdp();
> +	sync = max(sync, length);
> +
> +	switch (act) {
> +	case XDP_PASS:
> +		return TSNEP_XDP_PASS;
> +	case XDP_TX:
> +		if (tsnep_xdp_xmit_back(rx->adapter, xdp) < 0)
> +			goto out_failure;
> +		return TSNEP_XDP_TX;
> +	case XDP_REDIRECT:
> +		if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0)
> +			goto out_failure;
> +		return TSNEP_XDP_REDIRECT;
> +	default:
> +		bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act);
> +		fallthrough;
> +	case XDP_ABORTED:
> +out_failure:
> +		trace_xdp_exception(rx->adapter->netdev, prog, act);
> +		fallthrough;
> +	case XDP_DROP:
> +		page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data),
> +				   sync, true);
> +		return TSNEP_XDP_CONSUMED;
> +	}
> +}
> +
> +static void tsnep_finalize_xdp(struct tsnep_adapter *adapter, int status)
> +{
> +	int cpu = smp_processor_id();
> +	int queue;
> +	struct netdev_queue *nq;
> +
> +	if (status & TSNEP_XDP_TX) {
> +		queue = cpu % adapter->num_tx_queues;
> +		nq = netdev_get_tx_queue(adapter->netdev, queue);
> +
> +		__netif_tx_lock(nq, cpu);
> +		tsnep_xdp_xmit_flush(&adapter->tx[queue]);
> +		__netif_tx_unlock(nq);
> +	}
> +
> +	if (status & TSNEP_XDP_REDIRECT)
> +		xdp_do_flush();
> +}
> +
>  static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
>  				       int length)
>  {
> @@ -1062,12 +1132,17 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
>  	int desc_available;
>  	int done = 0;
>  	enum dma_data_direction dma_dir;
> +	struct bpf_prog *prog;
>  	struct tsnep_rx_entry *entry;
> +	struct xdp_buff xdp;
> +	int xdp_status = 0;
>  	struct sk_buff *skb;
>  	int length;
> +	int retval;
>  
>  	desc_available = tsnep_rx_desc_available(rx);
>  	dma_dir = page_pool_get_dma_dir(rx->page_pool);
> +	prog = READ_ONCE(rx->adapter->xdp_prog);
>  
>  	while (likely(done < budget) && (rx->read != rx->write)) {
>  		entry = &rx->entry[rx->read];
> @@ -1111,6 +1186,28 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
>  		rx->read = (rx->read + 1) % TSNEP_RING_SIZE;
>  		desc_available++;
>  
> +		if (prog) {
> +			xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq);
> +			xdp_prepare_buff(&xdp, page_address(entry->page),
> +					 tsnep_rx_offset_xdp() + TSNEP_RX_INLINE_METADATA_SIZE,
> +					 length - TSNEP_RX_INLINE_METADATA_SIZE,
> +					 false);
> +			retval = tsnep_xdp_run_prog(rx, prog, &xdp);
> +		} else {
> +			retval = TSNEP_XDP_PASS;
> +		}
> +		if (retval) {
> +			if (retval & (TSNEP_XDP_TX | TSNEP_XDP_REDIRECT))
> +				xdp_status |= retval;

Here you could avoid a couple of conditionals passing xdp_status as an
additional tsnep_xdp_run_prog() argument, let the latter update it
under the existing switch, returning a single consumed/pass up bool and
testing such value just after tsnep_xdp_run_prog().

Mostly a matter of personal tasted I guess.


Cheers,

Paolo
Gerhard Engleder Dec. 7, 2022, 8:12 p.m. UTC | #2
On 07.12.22 11:18, Paolo Abeni wrote:
> On Sat, 2022-12-03 at 22:54 +0100, Gerhard Engleder wrote:
>> If BPF program is set up, then run BPF program for every received frame
>> and execute the selected action.
>>
>> Test results with A53 1.2GHz:
>>
>> XDP_DROP (samples/bpf/xdp1)
>> proto 17:     865683 pkt/s
>>
>> XDP_TX (samples/bpf/xdp2)
>> proto 17:     253594 pkt/s
>>
>> XDP_REDIRECT (samples/bpf/xdpsock)
>>   sock0@eth2:0 rxdrop xdp-drv
>>                     pps            pkts           1.00
>> rx                 862,258        4,514,166
>> tx                 0              0
>>
>> XDP_REDIRECT (samples/bpf/xdp_redirect)
>> eth2->eth1         608,895 rx/s   0 err,drop/s   608,895 xmit/s
>>
>> Signed-off-by: Gerhard Engleder <gerhard@engleder-embedded.com>
>> ---
>>   drivers/net/ethernet/engleder/tsnep_main.c | 100 +++++++++++++++++++++
>>   1 file changed, 100 insertions(+)
>>
>> diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
>> index 725b2a1e7be4..4e3c6bd3dc9f 100644
>> --- a/drivers/net/ethernet/engleder/tsnep_main.c
>> +++ b/drivers/net/ethernet/engleder/tsnep_main.c
>> @@ -27,6 +27,7 @@
>>   #include <linux/phy.h>
>>   #include <linux/iopoll.h>
>>   #include <linux/bpf.h>
>> +#include <linux/bpf_trace.h>
>>   
>>   #define TSNEP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
>>   #define TSNEP_HEADROOM ALIGN(max(TSNEP_SKB_PAD, XDP_PACKET_HEADROOM), 4)
>> @@ -44,6 +45,11 @@
>>   #define TSNEP_COALESCE_USECS_MAX     ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \
>>   				      ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1)
>>   
>> +#define TSNEP_XDP_PASS		0
>> +#define TSNEP_XDP_CONSUMED	BIT(0)
>> +#define TSNEP_XDP_TX		BIT(1)
>> +#define TSNEP_XDP_REDIRECT	BIT(2)
>> +
>>   enum {
>>   	__TSNEP_DOWN,
>>   };
>> @@ -819,6 +825,11 @@ static inline unsigned int tsnep_rx_offset(struct tsnep_rx *rx)
>>   	return TSNEP_SKB_PAD;
>>   }
>>   
>> +static inline unsigned int tsnep_rx_offset_xdp(void)
> 
> Please, no 'inline' in c files, the complier will do a better job
> without.

Will be fixed.

>> +{
>> +	return XDP_PACKET_HEADROOM;
>> +}
>> +
>>   static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
>>   {
>>   	struct device *dmadev = rx->adapter->dmadev;
>> @@ -1024,6 +1035,65 @@ static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse)
>>   	return i;
>>   }
>>   
>> +static int tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
>> +			      struct xdp_buff *xdp)
>> +{
>> +	unsigned int length;
>> +	unsigned int sync;
>> +	u32 act;
>> +
>> +	length = xdp->data_end - xdp->data_hard_start - tsnep_rx_offset_xdp();
>> +
>> +	act = bpf_prog_run_xdp(prog, xdp);
>> +
>> +	/* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
>> +	sync = xdp->data_end - xdp->data_hard_start - tsnep_rx_offset_xdp();
>> +	sync = max(sync, length);
>> +
>> +	switch (act) {
>> +	case XDP_PASS:
>> +		return TSNEP_XDP_PASS;
>> +	case XDP_TX:
>> +		if (tsnep_xdp_xmit_back(rx->adapter, xdp) < 0)
>> +			goto out_failure;
>> +		return TSNEP_XDP_TX;
>> +	case XDP_REDIRECT:
>> +		if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0)
>> +			goto out_failure;
>> +		return TSNEP_XDP_REDIRECT;
>> +	default:
>> +		bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act);
>> +		fallthrough;
>> +	case XDP_ABORTED:
>> +out_failure:
>> +		trace_xdp_exception(rx->adapter->netdev, prog, act);
>> +		fallthrough;
>> +	case XDP_DROP:
>> +		page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data),
>> +				   sync, true);
>> +		return TSNEP_XDP_CONSUMED;
>> +	}
>> +}
>> +
>> +static void tsnep_finalize_xdp(struct tsnep_adapter *adapter, int status)
>> +{
>> +	int cpu = smp_processor_id();
>> +	int queue;
>> +	struct netdev_queue *nq;
>> +
>> +	if (status & TSNEP_XDP_TX) {
>> +		queue = cpu % adapter->num_tx_queues;
>> +		nq = netdev_get_tx_queue(adapter->netdev, queue);
>> +
>> +		__netif_tx_lock(nq, cpu);
>> +		tsnep_xdp_xmit_flush(&adapter->tx[queue]);
>> +		__netif_tx_unlock(nq);
>> +	}
>> +
>> +	if (status & TSNEP_XDP_REDIRECT)
>> +		xdp_do_flush();
>> +}
>> +
>>   static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
>>   				       int length)
>>   {
>> @@ -1062,12 +1132,17 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
>>   	int desc_available;
>>   	int done = 0;
>>   	enum dma_data_direction dma_dir;
>> +	struct bpf_prog *prog;
>>   	struct tsnep_rx_entry *entry;
>> +	struct xdp_buff xdp;
>> +	int xdp_status = 0;
>>   	struct sk_buff *skb;
>>   	int length;
>> +	int retval;
>>   
>>   	desc_available = tsnep_rx_desc_available(rx);
>>   	dma_dir = page_pool_get_dma_dir(rx->page_pool);
>> +	prog = READ_ONCE(rx->adapter->xdp_prog);
>>   
>>   	while (likely(done < budget) && (rx->read != rx->write)) {
>>   		entry = &rx->entry[rx->read];
>> @@ -1111,6 +1186,28 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
>>   		rx->read = (rx->read + 1) % TSNEP_RING_SIZE;
>>   		desc_available++;
>>   
>> +		if (prog) {
>> +			xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq);
>> +			xdp_prepare_buff(&xdp, page_address(entry->page),
>> +					 tsnep_rx_offset_xdp() + TSNEP_RX_INLINE_METADATA_SIZE,
>> +					 length - TSNEP_RX_INLINE_METADATA_SIZE,
>> +					 false);
>> +			retval = tsnep_xdp_run_prog(rx, prog, &xdp);
>> +		} else {
>> +			retval = TSNEP_XDP_PASS;
>> +		}
>> +		if (retval) {
>> +			if (retval & (TSNEP_XDP_TX | TSNEP_XDP_REDIRECT))
>> +				xdp_status |= retval;
> 
> Here you could avoid a couple of conditionals passing xdp_status as an
> additional tsnep_xdp_run_prog() argument, let the latter update it
> under the existing switch, returning a single consumed/pass up bool and
> testing such value just after tsnep_xdp_run_prog().
> 
> Mostly a matter of personal tasted I guess.

I will try it.

Thanks for the review!

Gerhard
diff mbox series

Patch

diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 725b2a1e7be4..4e3c6bd3dc9f 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -27,6 +27,7 @@ 
 #include <linux/phy.h>
 #include <linux/iopoll.h>
 #include <linux/bpf.h>
+#include <linux/bpf_trace.h>
 
 #define TSNEP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
 #define TSNEP_HEADROOM ALIGN(max(TSNEP_SKB_PAD, XDP_PACKET_HEADROOM), 4)
@@ -44,6 +45,11 @@ 
 #define TSNEP_COALESCE_USECS_MAX     ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \
 				      ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1)
 
+#define TSNEP_XDP_PASS		0
+#define TSNEP_XDP_CONSUMED	BIT(0)
+#define TSNEP_XDP_TX		BIT(1)
+#define TSNEP_XDP_REDIRECT	BIT(2)
+
 enum {
 	__TSNEP_DOWN,
 };
@@ -819,6 +825,11 @@  static inline unsigned int tsnep_rx_offset(struct tsnep_rx *rx)
 	return TSNEP_SKB_PAD;
 }
 
+static inline unsigned int tsnep_rx_offset_xdp(void)
+{
+	return XDP_PACKET_HEADROOM;
+}
+
 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
 {
 	struct device *dmadev = rx->adapter->dmadev;
@@ -1024,6 +1035,65 @@  static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse)
 	return i;
 }
 
+static int tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
+			      struct xdp_buff *xdp)
+{
+	unsigned int length;
+	unsigned int sync;
+	u32 act;
+
+	length = xdp->data_end - xdp->data_hard_start - tsnep_rx_offset_xdp();
+
+	act = bpf_prog_run_xdp(prog, xdp);
+
+	/* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
+	sync = xdp->data_end - xdp->data_hard_start - tsnep_rx_offset_xdp();
+	sync = max(sync, length);
+
+	switch (act) {
+	case XDP_PASS:
+		return TSNEP_XDP_PASS;
+	case XDP_TX:
+		if (tsnep_xdp_xmit_back(rx->adapter, xdp) < 0)
+			goto out_failure;
+		return TSNEP_XDP_TX;
+	case XDP_REDIRECT:
+		if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0)
+			goto out_failure;
+		return TSNEP_XDP_REDIRECT;
+	default:
+		bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act);
+		fallthrough;
+	case XDP_ABORTED:
+out_failure:
+		trace_xdp_exception(rx->adapter->netdev, prog, act);
+		fallthrough;
+	case XDP_DROP:
+		page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data),
+				   sync, true);
+		return TSNEP_XDP_CONSUMED;
+	}
+}
+
+static void tsnep_finalize_xdp(struct tsnep_adapter *adapter, int status)
+{
+	int cpu = smp_processor_id();
+	int queue;
+	struct netdev_queue *nq;
+
+	if (status & TSNEP_XDP_TX) {
+		queue = cpu % adapter->num_tx_queues;
+		nq = netdev_get_tx_queue(adapter->netdev, queue);
+
+		__netif_tx_lock(nq, cpu);
+		tsnep_xdp_xmit_flush(&adapter->tx[queue]);
+		__netif_tx_unlock(nq);
+	}
+
+	if (status & TSNEP_XDP_REDIRECT)
+		xdp_do_flush();
+}
+
 static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
 				       int length)
 {
@@ -1062,12 +1132,17 @@  static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
 	int desc_available;
 	int done = 0;
 	enum dma_data_direction dma_dir;
+	struct bpf_prog *prog;
 	struct tsnep_rx_entry *entry;
+	struct xdp_buff xdp;
+	int xdp_status = 0;
 	struct sk_buff *skb;
 	int length;
+	int retval;
 
 	desc_available = tsnep_rx_desc_available(rx);
 	dma_dir = page_pool_get_dma_dir(rx->page_pool);
+	prog = READ_ONCE(rx->adapter->xdp_prog);
 
 	while (likely(done < budget) && (rx->read != rx->write)) {
 		entry = &rx->entry[rx->read];
@@ -1111,6 +1186,28 @@  static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
 		rx->read = (rx->read + 1) % TSNEP_RING_SIZE;
 		desc_available++;
 
+		if (prog) {
+			xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq);
+			xdp_prepare_buff(&xdp, page_address(entry->page),
+					 tsnep_rx_offset_xdp() + TSNEP_RX_INLINE_METADATA_SIZE,
+					 length - TSNEP_RX_INLINE_METADATA_SIZE,
+					 false);
+			retval = tsnep_xdp_run_prog(rx, prog, &xdp);
+		} else {
+			retval = TSNEP_XDP_PASS;
+		}
+		if (retval) {
+			if (retval & (TSNEP_XDP_TX | TSNEP_XDP_REDIRECT))
+				xdp_status |= retval;
+
+			rx->packets++;
+			rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE;
+
+			entry->page = NULL;
+
+			continue;
+		}
+
 		skb = tsnep_build_skb(rx, entry->page, length);
 		if (skb) {
 			page_pool_release_page(rx->page_pool, entry->page);
@@ -1129,6 +1226,9 @@  static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
 		entry->page = NULL;
 	}
 
+	if (xdp_status)
+		tsnep_finalize_xdp(rx->adapter, xdp_status);
+
 	if (desc_available)
 		tsnep_rx_refill(rx, desc_available, false);