@@ -189,7 +189,9 @@ static bool ionic_rx_buf_recycle(struct ionic_queue *q,
static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
- struct ionic_rxq_comp *comp)
+ unsigned int headroom,
+ unsigned int len,
+ unsigned int num_sg_elems)
{
struct net_device *netdev = q->lif->netdev;
struct ionic_buf_info *buf_info;
@@ -198,12 +200,10 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
struct sk_buff *skb;
unsigned int i;
u16 frag_len;
- u16 len;
stats = q_to_rx_stats(q);
buf_info = &desc_info->bufs[0];
- len = le16_to_cpu(comp->len);
prefetchw(buf_info->page);
@@ -215,22 +215,25 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
return NULL;
}
- i = comp->num_sg_elems + 1;
+ i = num_sg_elems + 1;
do {
if (unlikely(!buf_info->page)) {
dev_kfree_skb(skb);
return NULL;
}
- frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
+ if (headroom)
+ frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
+ else
+ frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
len -= frag_len;
dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info),
- 0, frag_len, DMA_FROM_DEVICE);
+ headroom, frag_len, DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- buf_info->page, buf_info->page_offset, frag_len,
- IONIC_PAGE_SIZE);
+ buf_info->page, buf_info->page_offset + headroom,
+ frag_len, IONIC_PAGE_SIZE);
if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
dma_unmap_page(dev, buf_info->dma_addr,
@@ -238,6 +241,10 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
buf_info->page = NULL;
}
+ /* only needed on the first buffer */
+ if (headroom)
+ headroom = 0;
+
buf_info++;
i--;
@@ -248,19 +255,18 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
- struct ionic_rxq_comp *comp)
+ unsigned int headroom,
+ unsigned int len)
{
struct net_device *netdev = q->lif->netdev;
struct ionic_buf_info *buf_info;
struct ionic_rx_stats *stats;
struct device *dev = q->dev;
struct sk_buff *skb;
- u16 len;
stats = q_to_rx_stats(q);
buf_info = &desc_info->bufs[0];
- len = le16_to_cpu(comp->len);
skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
if (unlikely(!skb)) {
@@ -276,10 +282,10 @@ static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
}
dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info),
- 0, len, DMA_FROM_DEVICE);
- skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info), len);
+ headroom, len, DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info) + headroom, len);
dma_sync_single_range_for_device(dev, ionic_rx_buf_pa(buf_info),
- 0, len, DMA_FROM_DEVICE);
+ headroom, len, DMA_FROM_DEVICE);
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, q->lif->netdev);
@@ -303,10 +309,10 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
xdp_init_buff(&xdp_buf, IONIC_PAGE_SIZE, rxq->xdp_rxq_info);
xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info),
- 0, len, false);
+ XDP_PACKET_HEADROOM, len, false);
dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info),
- 0, len,
+ XDP_PACKET_HEADROOM, len,
DMA_FROM_DEVICE);
prefetchw(&xdp_buf.data_hard_start);
@@ -344,6 +350,7 @@ static void ionic_rx_clean(struct ionic_queue *q,
struct ionic_qcq *qcq = q_to_qcq(q);
struct ionic_rx_stats *stats;
struct ionic_rxq_comp *comp;
+ unsigned int headroom;
struct sk_buff *skb;
u16 len;
@@ -363,10 +370,11 @@ static void ionic_rx_clean(struct ionic_queue *q,
if (ionic_run_xdp(stats, netdev, q, desc_info->bufs, len))
return;
+ headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
if (len <= q->lif->rx_copybreak)
- skb = ionic_rx_copybreak(q, desc_info, comp);
+ skb = ionic_rx_copybreak(q, desc_info, headroom, len);
else
- skb = ionic_rx_frags(q, desc_info, comp);
+ skb = ionic_rx_frags(q, desc_info, headroom, len, comp->num_sg_elems);
if (unlikely(!skb)) {
stats->dropped++;
@@ -490,8 +498,9 @@ void ionic_rx_fill(struct ionic_queue *q)
unsigned int frag_len;
unsigned int nfrags;
unsigned int n_fill;
- unsigned int i, j;
unsigned int len;
+ unsigned int i;
+ unsigned int j;
n_fill = ionic_q_space_avail(q);
@@ -500,9 +509,12 @@ void ionic_rx_fill(struct ionic_queue *q)
if (n_fill < fill_threshold)
return;
- len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
+ len = netdev->mtu + VLAN_ETH_HLEN;
for (i = n_fill; i; i--) {
+ unsigned int headroom;
+ unsigned int buf_len;
+
nfrags = 0;
remain_len = len;
desc_info = &q->info[q->head_idx];
@@ -517,9 +529,18 @@ void ionic_rx_fill(struct ionic_queue *q)
}
}
- /* fill main descriptor - buf[0] */
- desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info));
- frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
+ /* fill main descriptor - buf[0]
+ * XDP uses space in the first buffer, so account for
+ * head room, tail room, and ip header in the first frag size.
+ */
+ headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
+ if (q->xdp_rxq_info)
+ buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN;
+ else
+ buf_len = ionic_rx_buf_size(buf_info);
+ frag_len = min_t(u16, len, buf_len);
+
+ desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info) + headroom);
desc->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
buf_info++;