@@ -103,7 +103,7 @@ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \
en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
en_accel/ktls_tx.o en_accel/ktls_rx.o
-mlx5_core-$(CONFIG_MLX5_EN_NVMEOTCP) += en_accel/fs_tcp.o en_accel/nvmeotcp.o
+mlx5_core-$(CONFIG_MLX5_EN_NVMEOTCP) += en_accel/fs_tcp.o en_accel/nvmeotcp.o en_accel/nvmeotcp_rxtx.o
mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \
steering/dr_matcher.o steering/dr_rule.o \
@@ -628,6 +628,7 @@ struct mlx5e_rq;
typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
typedef struct sk_buff *
(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
+ struct mlx5_cqe64 *cqe,
u16 cqe_bcnt, u32 head_offset, u32 page_idx);
typedef struct sk_buff *
(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
@@ -229,6 +229,7 @@ static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, struct xdp_b
struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
struct mlx5e_mpw_info *wi,
+ struct mlx5_cqe64 *cqe,
u16 cqe_bcnt,
u32 head_offset,
u32 page_idx)
@@ -13,6 +13,7 @@ int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
struct mlx5e_mpw_info *wi,
+ struct mlx5_cqe64 *cqe,
u16 cqe_bcnt,
u32 head_offset,
u32 page_idx);
new file mode 100644
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
+
+#include "en_accel/nvmeotcp_rxtx.h"
+#include <linux/mlx5/mlx5_ifc.h>
+
+#define MLX5E_TC_FLOW_ID_MASK 0x00ffffff
+static void nvmeotcp_update_resync(struct mlx5e_nvmeotcp_queue *queue,
+ struct mlx5e_cqe128 *cqe128)
+{
+ const struct ulp_ddp_ulp_ops *ulp_ops;
+ u32 seq;
+
+ seq = be32_to_cpu(cqe128->resync_tcp_sn);
+ ulp_ops = inet_csk(queue->sk)->icsk_ulp_ddp_ops;
+ if (ulp_ops && ulp_ops->resync_request)
+ ulp_ops->resync_request(queue->sk, seq, ULP_DDP_RESYNC_PENDING);
+}
+
+static void mlx5e_nvmeotcp_advance_sgl_iter(struct mlx5e_nvmeotcp_queue *queue)
+{
+ struct mlx5e_nvmeotcp_queue_entry *nqe = &queue->ccid_table[queue->ccid];
+
+ queue->ccoff += nqe->sgl[queue->ccsglidx].length;
+ queue->ccoff_inner = 0;
+ queue->ccsglidx++;
+}
+
+static inline void
+mlx5e_nvmeotcp_add_skb_frag(struct net_device *netdev, struct sk_buff *skb,
+ struct mlx5e_nvmeotcp_queue *queue,
+ struct mlx5e_nvmeotcp_queue_entry *nqe, u32 fragsz)
+{
+ dma_sync_single_for_cpu(&netdev->dev,
+ nqe->sgl[queue->ccsglidx].offset + queue->ccoff_inner,
+ fragsz, DMA_FROM_DEVICE);
+ page_ref_inc(compound_head(sg_page(&nqe->sgl[queue->ccsglidx])));
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ sg_page(&nqe->sgl[queue->ccsglidx]),
+ nqe->sgl[queue->ccsglidx].offset + queue->ccoff_inner,
+ fragsz,
+ fragsz);
+}
+
+static inline void
+mlx5_nvmeotcp_add_tail_nonlinear(struct mlx5e_nvmeotcp_queue *queue,
+ struct sk_buff *skb, skb_frag_t *org_frags,
+ int org_nr_frags, int frag_index)
+{
+ while (org_nr_frags != frag_index) {
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ skb_frag_page(&org_frags[frag_index]),
+ skb_frag_off(&org_frags[frag_index]),
+ skb_frag_size(&org_frags[frag_index]),
+ skb_frag_size(&org_frags[frag_index]));
+ page_ref_inc(skb_frag_page(&org_frags[frag_index]));
+ frag_index++;
+ }
+}
+
+static void
+mlx5_nvmeotcp_add_tail(struct mlx5e_nvmeotcp_queue *queue, struct sk_buff *skb,
+ int offset, int len)
+{
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, virt_to_page(skb->data), offset, len,
+ len);
+ page_ref_inc(virt_to_page(skb->data));
+}
+
+static void mlx5_nvmeotcp_trim_nonlinear(struct sk_buff *skb, skb_frag_t *org_frags,
+ int *frag_index, int remaining)
+{
+ unsigned int frag_size;
+ int nr_frags;
+
+ /* skip @remaining bytes in frags */
+ *frag_index = 0;
+ while (remaining) {
+ frag_size = skb_frag_size(&skb_shinfo(skb)->frags[*frag_index]);
+ if (frag_size > remaining) {
+ skb_frag_off_add(&skb_shinfo(skb)->frags[*frag_index],
+ remaining);
+ skb_frag_size_sub(&skb_shinfo(skb)->frags[*frag_index],
+ remaining);
+ remaining = 0;
+ } else {
+ remaining -= frag_size;
+ skb_frag_unref(skb, *frag_index);
+ *frag_index += 1;
+ }
+ }
+
+ /* save original frags for the tail and unref */
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ memcpy(&org_frags[*frag_index], &skb_shinfo(skb)->frags[*frag_index],
+ (nr_frags - *frag_index) * sizeof(skb_frag_t));
+ while (--nr_frags >= *frag_index)
+ skb_frag_unref(skb, nr_frags);
+
+ /* remove frags from skb */
+ skb_shinfo(skb)->nr_frags = 0;
+ skb->len -= skb->data_len;
+ skb->truesize -= skb->data_len;
+ skb->data_len = 0;
+}
+
+static bool
+mlx5e_nvmeotcp_rebuild_rx_skb_nonlinear(struct mlx5e_rq *rq, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
+{
+ int ccoff, cclen, hlen, ccid, remaining, fragsz, to_copy = 0;
+ struct net_device *netdev = rq->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_nvmeotcp_queue_entry *nqe;
+ skb_frag_t org_frags[MAX_SKB_FRAGS];
+ struct mlx5e_nvmeotcp_queue *queue;
+ int org_nr_frags, frag_index;
+ struct mlx5e_cqe128 *cqe128;
+ u32 queue_id;
+
+ queue_id = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
+ queue = mlx5e_nvmeotcp_get_queue(priv->nvmeotcp, queue_id);
+ if (unlikely(!queue)) {
+ dev_kfree_skb_any(skb);
+ return false;
+ }
+
+ cqe128 = container_of(cqe, struct mlx5e_cqe128, cqe64);
+ if (cqe_is_nvmeotcp_resync(cqe)) {
+ nvmeotcp_update_resync(queue, cqe128);
+ mlx5e_nvmeotcp_put_queue(queue);
+ return true;
+ }
+
+ /* If a resync occurred in the previous cqe,
+ * the current cqe.crcvalid bit may not be valid,
+ * so we will treat it as 0
+ */
+ if (unlikely(queue->after_resync_cqe) && cqe_is_nvmeotcp_crcvalid(cqe)) {
+ skb->ulp_crc = 0;
+ queue->after_resync_cqe = 0;
+ } else {
+ if (queue->crc_rx)
+ skb->ulp_crc = cqe_is_nvmeotcp_crcvalid(cqe);
+ }
+
+ skb->ulp_ddp = cqe_is_nvmeotcp_zc(cqe);
+ if (!cqe_is_nvmeotcp_zc(cqe)) {
+ mlx5e_nvmeotcp_put_queue(queue);
+ return true;
+ }
+
+ /* cc ddp from cqe */
+ ccid = be16_to_cpu(cqe128->ccid);
+ ccoff = be32_to_cpu(cqe128->ccoff);
+ cclen = be16_to_cpu(cqe128->cclen);
+ hlen = be16_to_cpu(cqe128->hlen);
+
+ /* carve a hole in the skb for DDP data */
+ org_nr_frags = skb_shinfo(skb)->nr_frags;
+ mlx5_nvmeotcp_trim_nonlinear(skb, org_frags, &frag_index, cclen);
+ nqe = &queue->ccid_table[ccid];
+
+ /* packet starts new ccid? */
+ if (queue->ccid != ccid || queue->ccid_gen != nqe->ccid_gen) {
+ queue->ccid = ccid;
+ queue->ccoff = 0;
+ queue->ccoff_inner = 0;
+ queue->ccsglidx = 0;
+ queue->ccid_gen = nqe->ccid_gen;
+ }
+
+ /* skip inside cc until the ccoff in the cqe */
+ while (queue->ccoff + queue->ccoff_inner < ccoff) {
+ remaining = nqe->sgl[queue->ccsglidx].length - queue->ccoff_inner;
+ fragsz = min_t(off_t, remaining,
+ ccoff - (queue->ccoff + queue->ccoff_inner));
+
+ if (fragsz == remaining)
+ mlx5e_nvmeotcp_advance_sgl_iter(queue);
+ else
+ queue->ccoff_inner += fragsz;
+ }
+
+ /* adjust the skb according to the cqe cc */
+ while (to_copy < cclen) {
+ remaining = nqe->sgl[queue->ccsglidx].length - queue->ccoff_inner;
+ fragsz = min_t(int, remaining, cclen - to_copy);
+
+ mlx5e_nvmeotcp_add_skb_frag(netdev, skb, queue, nqe, fragsz);
+ to_copy += fragsz;
+ if (fragsz == remaining)
+ mlx5e_nvmeotcp_advance_sgl_iter(queue);
+ else
+ queue->ccoff_inner += fragsz;
+ }
+
+ if (cqe_bcnt > hlen + cclen) {
+ remaining = cqe_bcnt - hlen - cclen;
+ mlx5_nvmeotcp_add_tail_nonlinear(queue, skb, org_frags,
+ org_nr_frags,
+ frag_index);
+ }
+
+ mlx5e_nvmeotcp_put_queue(queue);
+ return true;
+}
+
+static bool
+mlx5e_nvmeotcp_rebuild_rx_skb_linear(struct mlx5e_rq *rq, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
+{
+ int ccoff, cclen, hlen, ccid, remaining, fragsz, to_copy = 0;
+ struct net_device *netdev = rq->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_nvmeotcp_queue_entry *nqe;
+ struct mlx5e_nvmeotcp_queue *queue;
+ struct mlx5e_cqe128 *cqe128;
+ u32 queue_id;
+
+ queue_id = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
+ queue = mlx5e_nvmeotcp_get_queue(priv->nvmeotcp, queue_id);
+ if (unlikely(!queue)) {
+ dev_kfree_skb_any(skb);
+ return false;
+ }
+
+ cqe128 = container_of(cqe, struct mlx5e_cqe128, cqe64);
+ if (cqe_is_nvmeotcp_resync(cqe)) {
+ nvmeotcp_update_resync(queue, cqe128);
+ mlx5e_nvmeotcp_put_queue(queue);
+ return true;
+ }
+
+ /* If a resync occurred in the previous cqe,
+ * the current cqe.crcvalid bit may not be valid,
+ * so we will treat it as 0
+ */
+ if (unlikely(queue->after_resync_cqe) && cqe_is_nvmeotcp_crcvalid(cqe)) {
+ skb->ulp_crc = 0;
+ queue->after_resync_cqe = 0;
+ } else {
+ if (queue->crc_rx)
+ skb->ulp_crc = cqe_is_nvmeotcp_crcvalid(cqe);
+ }
+
+ skb->ulp_ddp = cqe_is_nvmeotcp_zc(cqe);
+ if (!cqe_is_nvmeotcp_zc(cqe)) {
+ mlx5e_nvmeotcp_put_queue(queue);
+ return true;
+ }
+
+ /* cc ddp from cqe */
+ ccid = be16_to_cpu(cqe128->ccid);
+ ccoff = be32_to_cpu(cqe128->ccoff);
+ cclen = be16_to_cpu(cqe128->cclen);
+ hlen = be16_to_cpu(cqe128->hlen);
+
+ /* carve a hole in the skb for DDP data */
+ skb_trim(skb, hlen);
+ nqe = &queue->ccid_table[ccid];
+
+ /* packet starts new ccid? */
+ if (queue->ccid != ccid || queue->ccid_gen != nqe->ccid_gen) {
+ queue->ccid = ccid;
+ queue->ccoff = 0;
+ queue->ccoff_inner = 0;
+ queue->ccsglidx = 0;
+ queue->ccid_gen = nqe->ccid_gen;
+ }
+
+ /* skip inside cc until the ccoff in the cqe */
+ while (queue->ccoff + queue->ccoff_inner < ccoff) {
+ remaining = nqe->sgl[queue->ccsglidx].length - queue->ccoff_inner;
+ fragsz = min_t(off_t, remaining,
+ ccoff - (queue->ccoff + queue->ccoff_inner));
+
+ if (fragsz == remaining)
+ mlx5e_nvmeotcp_advance_sgl_iter(queue);
+ else
+ queue->ccoff_inner += fragsz;
+ }
+
+ /* adjust the skb according to the cqe cc */
+ while (to_copy < cclen) {
+ remaining = nqe->sgl[queue->ccsglidx].length - queue->ccoff_inner;
+ fragsz = min_t(int, remaining, cclen - to_copy);
+
+ mlx5e_nvmeotcp_add_skb_frag(netdev, skb, queue, nqe, fragsz);
+ to_copy += fragsz;
+ if (fragsz == remaining)
+ mlx5e_nvmeotcp_advance_sgl_iter(queue);
+ else
+ queue->ccoff_inner += fragsz;
+ }
+
+ if (cqe_bcnt > hlen + cclen) {
+ remaining = cqe_bcnt - hlen - cclen;
+ mlx5_nvmeotcp_add_tail(queue, skb,
+ offset_in_page(skb->data) +
+ hlen + cclen, remaining);
+ }
+
+ mlx5e_nvmeotcp_put_queue(queue);
+ return true;
+}
+
+bool
+mlx5e_nvmeotcp_rebuild_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
+{
+ if (skb->data_len)
+ return mlx5e_nvmeotcp_rebuild_rx_skb_nonlinear(rq, skb, cqe, cqe_bcnt);
+ else
+ return mlx5e_nvmeotcp_rebuild_rx_skb_linear(rq, skb, cqe, cqe_bcnt);
+}
new file mode 100644
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. */
+#ifndef __MLX5E_NVMEOTCP_RXTX_H__
+#define __MLX5E_NVMEOTCP_RXTX_H__
+
+#ifdef CONFIG_MLX5_EN_NVMEOTCP
+
+#include <linux/skbuff.h>
+#include "en_accel/nvmeotcp.h"
+
+bool
+mlx5e_nvmeotcp_rebuild_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt);
+
+static inline int mlx5_nvmeotcp_get_headlen(struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
+{
+ struct mlx5e_cqe128 *cqe128;
+
+ if (!cqe_is_nvmeotcp_zc(cqe))
+ return cqe_bcnt;
+
+ cqe128 = container_of(cqe, struct mlx5e_cqe128, cqe64);
+ return be16_to_cpu(cqe128->hlen);
+}
+
+#else
+
+static inline bool
+mlx5e_nvmeotcp_rebuild_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
+{ return true; }
+
+static inline int mlx5_nvmeotcp_get_headlen(struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
+{ return cqe_bcnt; }
+
+#endif /* CONFIG_MLX5_EN_NVMEOTCP */
+#endif /* __MLX5E_NVMEOTCP_RXTX_H__ */
@@ -53,7 +53,7 @@
#include "en_accel/macsec.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ktls_txrx.h"
-#include "en_accel/nvmeotcp.h"
+#include "en_accel/nvmeotcp_rxtx.h"
#include "en/xdp.h"
#include "en/xsk/rx.h"
#include "en/health.h"
@@ -63,9 +63,11 @@
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
+ struct mlx5_cqe64 *cqe,
u16 cqe_bcnt, u32 head_offset, u32 page_idx);
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
+ struct mlx5_cqe64 *cqe,
u16 cqe_bcnt, u32 head_offset, u32 page_idx);
static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
@@ -1484,7 +1486,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
#define MLX5E_CE_BIT_MASK 0x80
-static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+static inline bool mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct mlx5e_rq *rq,
struct sk_buff *skb)
@@ -1495,6 +1497,13 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
skb->mac_len = ETH_HLEN;
+ if (IS_ENABLED(CONFIG_MLX5_EN_NVMEOTCP) && cqe_is_nvmeotcp(cqe)) {
+ bool ret = mlx5e_nvmeotcp_rebuild_rx_skb(rq, skb, cqe, cqe_bcnt);
+
+ if (unlikely(!ret))
+ return ret;
+ }
+
if (unlikely(get_cqe_tls_offload(cqe)))
mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
@@ -1540,6 +1549,8 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (unlikely(mlx5e_skb_is_multicast(skb)))
stats->mcast_packets++;
+
+ return true;
}
static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
@@ -1563,7 +1574,7 @@ static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
}
}
-static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
+static inline bool mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct sk_buff *skb)
@@ -1572,7 +1583,7 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
stats->packets++;
stats->bytes += cqe_bcnt;
- mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+ return mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
}
static inline
@@ -1810,7 +1821,8 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto free_wqe;
}
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (unlikely(!mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb)))
+ goto free_wqe;
if (mlx5e_cqe_regb_chain(cqe))
if (!mlx5e_tc_update_skb(cqe, skb)) {
@@ -1863,7 +1875,8 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto free_wqe;
}
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (unlikely(!mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb)))
+ goto free_wqe;
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
@@ -1910,11 +1923,12 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
mlx5e_skb_from_cqe_mpwrq_linear,
mlx5e_skb_from_cqe_mpwrq_nonlinear,
- rq, wi, cqe_bcnt, head_offset, page_idx);
+ rq, wi, cqe, cqe_bcnt, head_offset, page_idx);
if (!skb)
goto mpwrq_cqe_out;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (unlikely(!mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb)))
+ goto mpwrq_cqe_out;
mlx5e_rep_tc_receive(cqe, rq, skb);
@@ -1959,12 +1973,18 @@ mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
}
}
+static inline u16 mlx5e_get_headlen_hint(struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
+{
+ return min_t(u32, MLX5E_RX_MAX_HEAD, mlx5_nvmeotcp_get_headlen(cqe, cqe_bcnt));
+}
+
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
+ struct mlx5_cqe64 *cqe,
u16 cqe_bcnt, u32 head_offset, u32 page_idx)
{
union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
- u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
+ u16 headlen = mlx5e_get_headlen_hint(cqe, cqe_bcnt);
u32 frag_offset = head_offset + headlen;
u32 byte_cnt = cqe_bcnt - headlen;
union mlx5e_alloc_unit *head_au = au;
@@ -2000,6 +2020,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
+ struct mlx5_cqe64 *cqe,
u16 cqe_bcnt, u32 head_offset, u32 page_idx)
{
union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
@@ -2195,7 +2216,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
if (likely(head_size))
*skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
else
- *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe_bcnt, data_offset,
+ *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe,
+ cqe_bcnt, data_offset,
page_idx);
if (unlikely(!*skb))
goto free_hd_entry;
@@ -2270,11 +2292,12 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
mlx5e_skb_from_cqe_mpwrq_linear,
mlx5e_skb_from_cqe_mpwrq_nonlinear,
mlx5e_xsk_skb_from_cqe_mpwrq_linear,
- rq, wi, cqe_bcnt, head_offset, page_idx);
+ rq, wi, cqe, cqe_bcnt, head_offset, page_idx);
if (!skb)
goto mpwrq_cqe_out;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (unlikely(!mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb)))
+ goto mpwrq_cqe_out;
if (mlx5e_cqe_regb_chain(cqe))
if (!mlx5e_tc_update_skb(cqe, skb)) {
@@ -2611,7 +2634,9 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
if (!skb)
goto free_wqe;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (unlikely(!mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb)))
+ goto free_wqe;
+
skb_push(skb, ETH_HLEN);
dl_port = mlx5e_devlink_get_dl_port(priv);