@@ -802,7 +802,8 @@ static int __xsk_generic_xmit(struct sock *sk)
* if there is space in it. This avoids having to implement
* any buffering in the Tx path.
*/
- if (xsk_cq_reserve_addr_locked(xs->pool, desc.addr))
+ err = xsk_cq_reserve_addr_locked(xs->pool, desc.addr);
+ if (err)
goto out;
skb = xsk_build_skb(xs, &desc);
@@ -371,7 +371,7 @@ static inline void xskq_prod_cancel_n(struct xsk_queue *q, u32 cnt)
static inline int xskq_prod_reserve(struct xsk_queue *q)
{
if (xskq_prod_is_full(q))
- return -ENOSPC;
+ return -ENOBUFS;
/* A, matches D */
q->cached_prod++;
@@ -383,7 +383,7 @@ static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
if (xskq_prod_is_full(q))
- return -ENOSPC;
+ return -ENOBUFS;
/* A, matches D */
ring->desc[q->cached_prod++ & q->ring_mask] = addr;
When the cq reservation is failed, the error code is not set which is initialized to zero in __xsk_generic_xmit(). That means the packet is not send successfully but sendto() return ok. Set the error code and make xskq_prod_reserve_addr()/xskq_prod_reserve() return values more meaningful when the queue is full. Signed-off-by: Wang Liang <wangliang74@huawei.com> --- net/xdp/xsk.c | 3 ++- net/xdp/xsk_queue.h | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-)