@@ -145,6 +145,13 @@ xdp_get_shared_info_from_buff(struct xdp_buff *xdp)
return (struct skb_shared_info *)xdp_data_hard_end(xdp);
}
+static inline unsigned int xdp_get_frag_tailroom(const skb_frag_t *frag)
+{
+ struct page *page = skb_frag_page(frag);
+
+ return page_size(page) - skb_frag_size(frag) - skb_frag_off(frag);
+}
+
struct xdp_frame {
void *data;
u16 len;
@@ -290,6 +297,8 @@ struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
return xdp_frame;
}
+void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
+ struct xdp_buff *xdp);
void xdp_return_frame(struct xdp_frame *xdpf);
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
void xdp_return_buff(struct xdp_buff *xdp);
@@ -3818,11 +3818,71 @@ static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
.arg2_type = ARG_ANYTHING,
};
+static int bpf_xdp_mb_adjust_tail(struct xdp_buff *xdp, int offset)
+{
+ struct skb_shared_info *sinfo;
+
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ if (offset >= 0) {
+ skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags - 1];
+ int size;
+
+ if (unlikely(offset > xdp_get_frag_tailroom(frag)))
+ return -EINVAL;
+
+ size = skb_frag_size(frag);
+ memset(skb_frag_address(frag) + size, 0, offset);
+ skb_frag_size_set(frag, size + offset);
+ sinfo->xdp_frags_size += offset;
+ } else {
+ int i, n_frags_free = 0, len_free = 0, tlen_free = 0;
+
+ offset = abs(offset);
+ if (unlikely(offset > ((int)(xdp->data_end - xdp->data) +
+ sinfo->xdp_frags_size - ETH_HLEN)))
+ return -EINVAL;
+
+ for (i = sinfo->nr_frags - 1; i >= 0 && offset > 0; i--) {
+ skb_frag_t *frag = &sinfo->frags[i];
+ int size = skb_frag_size(frag);
+ int shrink = min_t(int, offset, size);
+
+ len_free += shrink;
+ offset -= shrink;
+
+ if (unlikely(size == shrink)) {
+ struct page *page = skb_frag_page(frag);
+
+ __xdp_return(page_address(page), &xdp->rxq->mem,
+ false, NULL);
+ tlen_free += page_size(page);
+ n_frags_free++;
+ } else {
+ skb_frag_size_set(frag, size - shrink);
+ break;
+ }
+ }
+ sinfo->nr_frags -= n_frags_free;
+ sinfo->xdp_frags_size -= len_free;
+ sinfo->xdp_frags_tsize -= tlen_free;
+
+ if (unlikely(offset > 0)) {
+ xdp_buff_clear_mb(xdp);
+ xdp->data_end -= offset;
+ }
+ }
+
+ return 0;
+}
+
BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset)
{
void *data_hard_end = xdp_data_hard_end(xdp); /* use xdp->frame_sz */
void *data_end = xdp->data_end + offset;
+ if (unlikely(xdp_buff_is_mb(xdp)))
+ return bpf_xdp_mb_adjust_tail(xdp, offset);
+
/* Notice that xdp_data_hard_end have reserved some tailroom */
if (unlikely(data_end > data_hard_end))
return -EINVAL;
@@ -339,8 +339,8 @@ EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
* is used for those calls sites. Thus, allowing for faster recycling
* of xdp_frames/pages in those cases.
*/
-static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
- struct xdp_buff *xdp)
+void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
+ struct xdp_buff *xdp)
{
struct xdp_mem_allocator *xa;
struct page *page;
@@ -373,6 +373,7 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
break;
}
}
+EXPORT_SYMBOL_GPL(__xdp_return);
void xdp_return_frame(struct xdp_frame *xdpf)
{