@@ -109,7 +109,7 @@ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \
en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
en_accel/ktls_tx.o en_accel/ktls_rx.o
-mlx5_core-$(CONFIG_MLX5_EN_NVMEOTCP) += en_accel/fs_tcp.o en_accel/nvmeotcp.o
+mlx5_core-$(CONFIG_MLX5_EN_NVMEOTCP) += en_accel/fs_tcp.o en_accel/nvmeotcp.o en_accel/nvmeotcp_rxtx.o
mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \
steering/dr_matcher.o steering/dr_rule.o \
@@ -526,4 +526,10 @@ static inline struct mlx5e_mpw_info *mlx5e_get_mpw_info(struct mlx5e_rq *rq, int
return (struct mlx5e_mpw_info *)((char *)rq->mpwqe.info + array_size(i, isz));
}
+
+static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
+{
+ return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags];
+}
+
#endif
new file mode 100644
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
+
+#include <linux/skbuff_ref.h>
+#include "en_accel/nvmeotcp_rxtx.h"
+#include <linux/mlx5/mlx5_ifc.h>
+#include "en/txrx.h"
+
+#define MLX5E_TC_FLOW_ID_MASK 0x00ffffff
+
+static struct mlx5e_frag_page *mlx5e_get_frag(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe)
+{
+ struct mlx5e_frag_page *fp;
+
+ if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
+ u16 wqe_id = be16_to_cpu(cqe->wqe_id);
+ u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
+ u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
+ u32 page_idx = wqe_offset >> rq->mpwqe.page_shift;
+ struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
+ union mlx5e_alloc_units *au = &wi->alloc_units;
+
+ fp = &au->frag_pages[page_idx];
+ } else {
+ /* Legacy */
+ struct mlx5_wq_cyc *wq = &rq->wqe.wq;
+ u16 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
+ struct mlx5e_wqe_frag_info *wi = get_frag(rq, ci);
+
+ fp = wi->frag_page;
+ }
+
+ return fp;
+}
+
+static void nvmeotcp_update_resync(struct mlx5e_nvmeotcp_queue *queue,
+ struct mlx5e_cqe128 *cqe128)
+{
+ const struct ulp_ddp_ulp_ops *ulp_ops;
+ u32 seq;
+
+ seq = be32_to_cpu(cqe128->resync_tcp_sn);
+ ulp_ops = inet_csk(queue->sk)->icsk_ulp_ddp_ops;
+ if (ulp_ops && ulp_ops->resync_request)
+ ulp_ops->resync_request(queue->sk, seq, ULP_DDP_RESYNC_PENDING);
+}
+
+static void mlx5e_nvmeotcp_advance_sgl_iter(struct mlx5e_nvmeotcp_queue *queue)
+{
+ struct mlx5e_nvmeotcp_queue_entry *nqe = &queue->ccid_table[queue->ccid];
+
+ queue->ccoff += nqe->sgl[queue->ccsglidx].length;
+ queue->ccoff_inner = 0;
+ queue->ccsglidx++;
+}
+
+static inline void
+mlx5e_nvmeotcp_add_skb_frag(struct net_device *netdev, struct sk_buff *skb,
+ struct mlx5e_nvmeotcp_queue *queue,
+ struct mlx5e_nvmeotcp_queue_entry *nqe, u32 fragsz)
+{
+ dma_sync_single_for_cpu(&netdev->dev,
+ nqe->sgl[queue->ccsglidx].offset + queue->ccoff_inner,
+ fragsz, DMA_FROM_DEVICE);
+
+ page_ref_inc(compound_head(sg_page(&nqe->sgl[queue->ccsglidx])));
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ sg_page(&nqe->sgl[queue->ccsglidx]),
+ nqe->sgl[queue->ccsglidx].offset + queue->ccoff_inner,
+ fragsz,
+ fragsz);
+}
+
+static inline void
+mlx5_nvmeotcp_add_tail_nonlinear(struct sk_buff *skb, skb_frag_t *org_frags,
+ int org_nr_frags, int frag_index)
+{
+ while (org_nr_frags != frag_index) {
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ skb_frag_page(&org_frags[frag_index]),
+ skb_frag_off(&org_frags[frag_index]),
+ skb_frag_size(&org_frags[frag_index]),
+ skb_frag_size(&org_frags[frag_index]));
+ frag_index++;
+ }
+}
+
+static void
+mlx5_nvmeotcp_add_tail(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+ struct mlx5e_nvmeotcp_queue *queue, struct sk_buff *skb,
+ int offset, int len)
+{
+ struct mlx5e_frag_page *frag_page = mlx5e_get_frag(rq, cqe);
+
+ frag_page->frags++;
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ virt_to_page(skb->data), offset, len, len);
+}
+
+static void mlx5_nvmeotcp_trim_nonlinear(struct sk_buff *skb, skb_frag_t *org_frags,
+ int *frag_index, int remaining)
+{
+ unsigned int frag_size;
+ int nr_frags;
+
+ /* skip @remaining bytes in frags */
+ *frag_index = 0;
+ while (remaining) {
+ frag_size = skb_frag_size(&skb_shinfo(skb)->frags[*frag_index]);
+ if (frag_size > remaining) {
+ skb_frag_off_add(&skb_shinfo(skb)->frags[*frag_index],
+ remaining);
+ skb_frag_size_sub(&skb_shinfo(skb)->frags[*frag_index],
+ remaining);
+ remaining = 0;
+ } else {
+ remaining -= frag_size;
+ skb_frag_unref(skb, *frag_index);
+ *frag_index += 1;
+ }
+ }
+
+ /* save original frags for the tail and unref */
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ memcpy(&org_frags[*frag_index], &skb_shinfo(skb)->frags[*frag_index],
+ (nr_frags - *frag_index) * sizeof(skb_frag_t));
+
+ /* remove frags from skb */
+ skb_shinfo(skb)->nr_frags = 0;
+ skb->len -= skb->data_len;
+ skb->truesize -= skb->data_len;
+ skb->data_len = 0;
+}
+
+static bool
+mlx5e_nvmeotcp_rebuild_rx_skb_nonlinear(struct mlx5e_rq *rq, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
+{
+ int ccoff, cclen, hlen, ccid, remaining, fragsz, to_copy = 0;
+ struct net_device *netdev = rq->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_nvmeotcp_queue_entry *nqe;
+ skb_frag_t org_frags[MAX_SKB_FRAGS];
+ struct mlx5e_nvmeotcp_queue *queue;
+ int org_nr_frags, frag_index;
+ struct mlx5e_cqe128 *cqe128;
+ u32 queue_id;
+
+ queue_id = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
+ queue = mlx5e_nvmeotcp_get_queue(priv->nvmeotcp, queue_id);
+ if (unlikely(!queue)) {
+ dev_kfree_skb_any(skb);
+ return false;
+ }
+
+ cqe128 = container_of(cqe, struct mlx5e_cqe128, cqe64);
+ if (cqe_is_nvmeotcp_resync(cqe)) {
+ nvmeotcp_update_resync(queue, cqe128);
+ mlx5e_nvmeotcp_put_queue(queue);
+ return true;
+ }
+
+ /* If a resync occurred in the previous cqe,
+ * the current cqe.crcvalid bit may not be valid,
+ * so we will treat it as 0
+ */
+ if (unlikely(queue->after_resync_cqe) && cqe_is_nvmeotcp_crcvalid(cqe)) {
+ skb->ulp_crc = 0;
+ queue->after_resync_cqe = 0;
+ } else {
+ if (queue->crc_rx)
+ skb->ulp_crc = cqe_is_nvmeotcp_crcvalid(cqe);
+ }
+
+ skb->no_condense = cqe_is_nvmeotcp_zc(cqe);
+ if (!cqe_is_nvmeotcp_zc(cqe)) {
+ mlx5e_nvmeotcp_put_queue(queue);
+ return true;
+ }
+
+ /* cc ddp from cqe */
+ ccid = be16_to_cpu(cqe128->ccid);
+ ccoff = be32_to_cpu(cqe128->ccoff);
+ cclen = be16_to_cpu(cqe128->cclen);
+ hlen = be16_to_cpu(cqe128->hlen);
+
+ /* carve a hole in the skb for DDP data */
+ org_nr_frags = skb_shinfo(skb)->nr_frags;
+ mlx5_nvmeotcp_trim_nonlinear(skb, org_frags, &frag_index, cclen);
+ nqe = &queue->ccid_table[ccid];
+
+ /* packet starts new ccid? */
+ if (queue->ccid != ccid || queue->ccid_gen != nqe->ccid_gen) {
+ queue->ccid = ccid;
+ queue->ccoff = 0;
+ queue->ccoff_inner = 0;
+ queue->ccsglidx = 0;
+ queue->ccid_gen = nqe->ccid_gen;
+ }
+
+ /* skip inside cc until the ccoff in the cqe */
+ while (queue->ccoff + queue->ccoff_inner < ccoff) {
+ remaining = nqe->sgl[queue->ccsglidx].length - queue->ccoff_inner;
+ fragsz = min_t(off_t, remaining,
+ ccoff - (queue->ccoff + queue->ccoff_inner));
+
+ if (fragsz == remaining)
+ mlx5e_nvmeotcp_advance_sgl_iter(queue);
+ else
+ queue->ccoff_inner += fragsz;
+ }
+
+ /* adjust the skb according to the cqe cc */
+ while (to_copy < cclen) {
+ remaining = nqe->sgl[queue->ccsglidx].length - queue->ccoff_inner;
+ fragsz = min_t(int, remaining, cclen - to_copy);
+
+ mlx5e_nvmeotcp_add_skb_frag(netdev, skb, queue, nqe, fragsz);
+ to_copy += fragsz;
+ if (fragsz == remaining)
+ mlx5e_nvmeotcp_advance_sgl_iter(queue);
+ else
+ queue->ccoff_inner += fragsz;
+ }
+
+ if (cqe_bcnt > hlen + cclen) {
+ remaining = cqe_bcnt - hlen - cclen;
+ mlx5_nvmeotcp_add_tail_nonlinear(skb, org_frags,
+ org_nr_frags,
+ frag_index);
+ }
+
+ mlx5e_nvmeotcp_put_queue(queue);
+ return true;
+}
+
+static bool
+mlx5e_nvmeotcp_rebuild_rx_skb_linear(struct mlx5e_rq *rq, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
+{
+ int ccoff, cclen, hlen, ccid, remaining, fragsz, to_copy = 0;
+ struct net_device *netdev = rq->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_nvmeotcp_queue_entry *nqe;
+ struct mlx5e_nvmeotcp_queue *queue;
+ struct mlx5e_cqe128 *cqe128;
+ u32 queue_id;
+
+ queue_id = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
+ queue = mlx5e_nvmeotcp_get_queue(priv->nvmeotcp, queue_id);
+ if (unlikely(!queue)) {
+ dev_kfree_skb_any(skb);
+ return false;
+ }
+
+ cqe128 = container_of(cqe, struct mlx5e_cqe128, cqe64);
+ if (cqe_is_nvmeotcp_resync(cqe)) {
+ nvmeotcp_update_resync(queue, cqe128);
+ mlx5e_nvmeotcp_put_queue(queue);
+ return true;
+ }
+
+ /* If a resync occurred in the previous cqe,
+ * the current cqe.crcvalid bit may not be valid,
+ * so we will treat it as 0
+ */
+ if (unlikely(queue->after_resync_cqe) && cqe_is_nvmeotcp_crcvalid(cqe)) {
+ skb->ulp_crc = 0;
+ queue->after_resync_cqe = 0;
+ } else {
+ if (queue->crc_rx)
+ skb->ulp_crc = cqe_is_nvmeotcp_crcvalid(cqe);
+ }
+
+ skb->no_condense = cqe_is_nvmeotcp_zc(cqe);
+ if (!cqe_is_nvmeotcp_zc(cqe)) {
+ mlx5e_nvmeotcp_put_queue(queue);
+ return true;
+ }
+
+ /* cc ddp from cqe */
+ ccid = be16_to_cpu(cqe128->ccid);
+ ccoff = be32_to_cpu(cqe128->ccoff);
+ cclen = be16_to_cpu(cqe128->cclen);
+ hlen = be16_to_cpu(cqe128->hlen);
+
+ /* carve a hole in the skb for DDP data */
+ skb_trim(skb, hlen);
+ nqe = &queue->ccid_table[ccid];
+
+ /* packet starts new ccid? */
+ if (queue->ccid != ccid || queue->ccid_gen != nqe->ccid_gen) {
+ queue->ccid = ccid;
+ queue->ccoff = 0;
+ queue->ccoff_inner = 0;
+ queue->ccsglidx = 0;
+ queue->ccid_gen = nqe->ccid_gen;
+ }
+
+ /* skip inside cc until the ccoff in the cqe */
+ while (queue->ccoff + queue->ccoff_inner < ccoff) {
+ remaining = nqe->sgl[queue->ccsglidx].length - queue->ccoff_inner;
+ fragsz = min_t(off_t, remaining,
+ ccoff - (queue->ccoff + queue->ccoff_inner));
+
+ if (fragsz == remaining)
+ mlx5e_nvmeotcp_advance_sgl_iter(queue);
+ else
+ queue->ccoff_inner += fragsz;
+ }
+
+ /* adjust the skb according to the cqe cc */
+ while (to_copy < cclen) {
+ remaining = nqe->sgl[queue->ccsglidx].length - queue->ccoff_inner;
+ fragsz = min_t(int, remaining, cclen - to_copy);
+
+ mlx5e_nvmeotcp_add_skb_frag(netdev, skb, queue, nqe, fragsz);
+ to_copy += fragsz;
+ if (fragsz == remaining)
+ mlx5e_nvmeotcp_advance_sgl_iter(queue);
+ else
+ queue->ccoff_inner += fragsz;
+ }
+
+ if (cqe_bcnt > hlen + cclen) {
+ remaining = cqe_bcnt - hlen - cclen;
+ mlx5_nvmeotcp_add_tail(rq, cqe, queue, skb,
+ offset_in_page(skb->data) +
+ hlen + cclen, remaining);
+ }
+
+ mlx5e_nvmeotcp_put_queue(queue);
+ return true;
+}
+
+bool
+mlx5e_nvmeotcp_rebuild_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
+{
+ if (skb->data_len)
+ return mlx5e_nvmeotcp_rebuild_rx_skb_nonlinear(rq, skb, cqe, cqe_bcnt);
+ else
+ return mlx5e_nvmeotcp_rebuild_rx_skb_linear(rq, skb, cqe, cqe_bcnt);
+}
new file mode 100644
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. */
+#ifndef __MLX5E_NVMEOTCP_RXTX_H__
+#define __MLX5E_NVMEOTCP_RXTX_H__
+
+#ifdef CONFIG_MLX5_EN_NVMEOTCP
+
+#include <linux/skbuff.h>
+#include "en_accel/nvmeotcp.h"
+
+bool
+mlx5e_nvmeotcp_rebuild_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt);
+
+static inline int mlx5_nvmeotcp_get_headlen(struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
+{
+ struct mlx5e_cqe128 *cqe128;
+
+ if (!cqe_is_nvmeotcp_zc(cqe))
+ return cqe_bcnt;
+
+ cqe128 = container_of(cqe, struct mlx5e_cqe128, cqe64);
+ return be16_to_cpu(cqe128->hlen);
+}
+
+#else
+
+static inline bool
+mlx5e_nvmeotcp_rebuild_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
+{ return true; }
+
+static inline int mlx5_nvmeotcp_get_headlen(struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
+{ return cqe_bcnt; }
+
+#endif /* CONFIG_MLX5_EN_NVMEOTCP */
+#endif /* __MLX5E_NVMEOTCP_RXTX_H__ */
@@ -53,7 +53,7 @@
#include "en_accel/macsec.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ktls_txrx.h"
-#include "en_accel/nvmeotcp.h"
+#include "en_accel/nvmeotcp_rxtx.h"
#include "en/xdp.h"
#include "en/xsk/rx.h"
#include "en/health.h"
@@ -336,10 +336,6 @@ static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
mlx5e_page_release_fragmented(rq, frag->frag_page);
}
-static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
-{
- return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags];
-}
static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
u16 ix)
@@ -1566,7 +1562,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
#define MLX5E_CE_BIT_MASK 0x80
-static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+static inline bool mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct mlx5e_rq *rq,
struct sk_buff *skb)
@@ -1577,6 +1573,13 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
skb->mac_len = ETH_HLEN;
+ if (IS_ENABLED(CONFIG_MLX5_EN_NVMEOTCP) && cqe_is_nvmeotcp(cqe)) {
+ bool ret = mlx5e_nvmeotcp_rebuild_rx_skb(rq, skb, cqe, cqe_bcnt);
+
+ if (unlikely(!ret))
+ return ret;
+ }
+
if (unlikely(get_cqe_tls_offload(cqe)))
mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
@@ -1623,6 +1626,8 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (unlikely(mlx5e_skb_is_multicast(skb)))
stats->mcast_packets++;
+
+ return true;
}
static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
@@ -1646,7 +1651,7 @@ static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
}
}
-static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
+static inline bool mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct sk_buff *skb)
@@ -1655,7 +1660,7 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
stats->packets++;
stats->bytes += cqe_bcnt;
- mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+ return mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
}
static inline
@@ -1869,7 +1874,8 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto wq_cyc_pop;
}
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (unlikely(!mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb)))
+ goto wq_cyc_pop;
if (mlx5e_cqe_regb_chain(cqe))
if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
@@ -1916,7 +1922,8 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto wq_cyc_pop;
}
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (unlikely(!mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb)))
+ goto wq_cyc_pop;
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
@@ -1965,7 +1972,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
if (!skb)
goto mpwrq_cqe_out;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (unlikely(!mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb)))
+ goto mpwrq_cqe_out;
mlx5e_rep_tc_receive(cqe, rq, skb);
@@ -2011,13 +2019,18 @@ mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
}
}
+static inline u16 mlx5e_get_headlen_hint(struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
+{
+ return min_t(u32, MLX5E_RX_MAX_HEAD, mlx5_nvmeotcp_get_headlen(cqe, cqe_bcnt));
+}
+
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
u32 page_idx)
{
struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
- u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
+ u16 headlen = mlx5e_get_headlen_hint(cqe, cqe_bcnt);
struct mlx5e_frag_page *head_page = frag_page;
u32 frag_offset = head_offset;
u32 byte_cnt = cqe_bcnt;
@@ -2440,7 +2453,8 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
if (!skb)
goto mpwrq_cqe_out;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (unlikely(!mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb)))
+ goto mpwrq_cqe_out;
if (mlx5e_cqe_regb_chain(cqe))
if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
@@ -2773,7 +2787,9 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
if (!skb)
goto wq_cyc_pop;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (unlikely(!mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb)))
+ goto wq_cyc_pop;
+
skb_push(skb, ETH_HLEN);
mlx5_devlink_trap_report(rq->mdev, trap_id, skb,