diff mbox series

[v5,bpf-next,03/14] xdp: add xdp_shared_info data structure

Message ID 21d27f233e37b66c9ad4073dd09df5c2904112a4.1607349924.git.lorenzo@kernel.org (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series mvneta: introduce XDP multi-buffer support | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Clearly marked for bpf-next
netdev/subject_prefix success Link
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 7649 this patch: 7649
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 252 lines checked
netdev/build_allmodconfig_warn success Errors and warnings before: 7759 this patch: 7759
netdev/header_inline success Link
netdev/stable success Stable not CCed

Commit Message

lorenzo@kernel.org Dec. 7, 2020, 4:32 p.m. UTC
Introduce xdp_shared_info data structure to contain info about
"non-linear" xdp frame. xdp_shared_info will alias skb_shared_info
allowing to keep most of the frags in the same cache-line.
Introduce some xdp_shared_info helpers aligned to skb_frag* ones

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
 drivers/net/ethernet/marvell/mvneta.c | 62 +++++++++++++++------------
 include/net/xdp.h                     | 52 ++++++++++++++++++++--
 2 files changed, 82 insertions(+), 32 deletions(-)

Comments

Saeed Mahameed Dec. 8, 2020, 12:22 a.m. UTC | #1
On Mon, 2020-12-07 at 17:32 +0100, Lorenzo Bianconi wrote:
> Introduce xdp_shared_info data structure to contain info about
> "non-linear" xdp frame. xdp_shared_info will alias skb_shared_info
> allowing to keep most of the frags in the same cache-line.
> Introduce some xdp_shared_info helpers aligned to skb_frag* ones
> 

is there or will be a more general purpose use to this xdp_shared_info
? other than hosting frags ?

> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> ---
>  drivers/net/ethernet/marvell/mvneta.c | 62 +++++++++++++++--------
> ----
>  include/net/xdp.h                     | 52 ++++++++++++++++++++--
>  2 files changed, 82 insertions(+), 32 deletions(-)
> 
> diff --git a/drivers/net/ethernet/marvell/mvneta.c
> b/drivers/net/ethernet/marvell/mvneta.c
> index 1e5b5c69685a..d635463609ad 100644
> --- a/drivers/net/ethernet/marvell/mvneta.c
> +++ b/drivers/net/ethernet/marvell/mvneta.c
> @@ -2033,14 +2033,17 @@ int mvneta_rx_refill_queue(struct mvneta_port
> *pp, struct mvneta_rx_queue *rxq)
>  

[...]

>  static void
> @@ -2278,7 +2281,7 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port
> *pp,
>  			    struct mvneta_rx_desc *rx_desc,
>  			    struct mvneta_rx_queue *rxq,
>  			    struct xdp_buff *xdp, int *size,
> -			    struct skb_shared_info *xdp_sinfo,
> +			    struct xdp_shared_info *xdp_sinfo,
>  			    struct page *page)
>  {
>  	struct net_device *dev = pp->dev;
> @@ -2301,13 +2304,13 @@ mvneta_swbm_add_rx_fragment(struct
> mvneta_port *pp,
>  	if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) {
>  		skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo-
> >nr_frags++];
>  
> -		skb_frag_off_set(frag, pp->rx_offset_correction);
> -		skb_frag_size_set(frag, data_len);
> -		__skb_frag_set_page(frag, page);
> +		xdp_set_frag_offset(frag, pp->rx_offset_correction);
> +		xdp_set_frag_size(frag, data_len);
> +		xdp_set_frag_page(frag, page);
>  

why three separate setters ? why not just one 
xdp_set_frag(page, offset, size) ?

>  		/* last fragment */
>  		if (len == *size) {
> -			struct skb_shared_info *sinfo;
> +			struct xdp_shared_info *sinfo;
>  
>  			sinfo = xdp_get_shared_info_from_buff(xdp);
>  			sinfo->nr_frags = xdp_sinfo->nr_frags;
> @@ -2324,10 +2327,13 @@ static struct sk_buff *
>  mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue
> *rxq,
>  		      struct xdp_buff *xdp, u32 desc_status)
>  {
> -	struct skb_shared_info *sinfo =
> xdp_get_shared_info_from_buff(xdp);
> -	int i, num_frags = sinfo->nr_frags;
> +	struct xdp_shared_info *xdp_sinfo =
> xdp_get_shared_info_from_buff(xdp);
> +	int i, num_frags = xdp_sinfo->nr_frags;
> +	skb_frag_t frag_list[MAX_SKB_FRAGS];
>  	struct sk_buff *skb;
>  
> +	memcpy(frag_list, xdp_sinfo->frags, sizeof(skb_frag_t) *
> num_frags);
> +
>  	skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
>  	if (!skb)
>  		return ERR_PTR(-ENOMEM);
> @@ -2339,12 +2345,12 @@ mvneta_swbm_build_skb(struct mvneta_port *pp,
> struct mvneta_rx_queue *rxq,
>  	mvneta_rx_csum(pp, desc_status, skb);
>  
>  	for (i = 0; i < num_frags; i++) {
> -		skb_frag_t *frag = &sinfo->frags[i];
> +		struct page *page = xdp_get_frag_page(&frag_list[i]);
>  
>  		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
> -				skb_frag_page(frag),
> skb_frag_off(frag),
> -				skb_frag_size(frag), PAGE_SIZE);
> -		page_pool_release_page(rxq->page_pool,
> skb_frag_page(frag));
> +				page,
> xdp_get_frag_offset(&frag_list[i]),
> +				xdp_get_frag_size(&frag_list[i]),
> PAGE_SIZE);
> +		page_pool_release_page(rxq->page_pool, page);
>  	}
>  
>  	return skb;
> @@ -2357,7 +2363,7 @@ static int mvneta_rx_swbm(struct napi_struct
> *napi,
>  {
>  	int rx_proc = 0, rx_todo, refill, size = 0;
>  	struct net_device *dev = pp->dev;
> -	struct skb_shared_info sinfo;
> +	struct xdp_shared_info xdp_sinfo;
>  	struct mvneta_stats ps = {};
>  	struct bpf_prog *xdp_prog;
>  	u32 desc_status, frame_sz;
> @@ -2368,7 +2374,7 @@ static int mvneta_rx_swbm(struct napi_struct
> *napi,
>  	xdp_buf.rxq = &rxq->xdp_rxq;
>  	xdp_buf.mb = 0;
>  
> -	sinfo.nr_frags = 0;
> +	xdp_sinfo.nr_frags = 0;
>  
>  	/* Get number of received packets */
>  	rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
> @@ -2412,7 +2418,7 @@ static int mvneta_rx_swbm(struct napi_struct
> *napi,
>  			}
>  
>  			mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq,
> &xdp_buf,
> -						    &size, &sinfo,
> page);
> +						    &size, &xdp_sinfo,
> page);
>  		} /* Middle or Last descriptor */
>  
>  		if (!(rx_status & MVNETA_RXD_LAST_DESC))
> @@ -2420,7 +2426,7 @@ static int mvneta_rx_swbm(struct napi_struct
> *napi,
>  			continue;
>  
>  		if (size) {
> -			mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo,
> -1);
> +			mvneta_xdp_put_buff(pp, rxq, &xdp_buf,
> &xdp_sinfo, -1);
>  			goto next;
>  		}
>  
> @@ -2432,7 +2438,7 @@ static int mvneta_rx_swbm(struct napi_struct
> *napi,
>  		if (IS_ERR(skb)) {
>  			struct mvneta_pcpu_stats *stats =
> this_cpu_ptr(pp->stats);
>  
> -			mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo,
> -1);
> +			mvneta_xdp_put_buff(pp, rxq, &xdp_buf,
> &xdp_sinfo, -1);
>  
>  			u64_stats_update_begin(&stats->syncp);
>  			stats->es.skb_alloc_error++;
> @@ -2449,12 +2455,12 @@ static int mvneta_rx_swbm(struct napi_struct
> *napi,
>  		napi_gro_receive(napi, skb);
>  next:
>  		xdp_buf.data_hard_start = NULL;
> -		sinfo.nr_frags = 0;
> +		xdp_sinfo.nr_frags = 0;
>  	}
>  	rcu_read_unlock();
>  
>  	if (xdp_buf.data_hard_start)
> -		mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
> +		mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &xdp_sinfo, -1);
>  
>  	if (ps.xdp_redirect)
>  		xdp_do_flush_map();
> diff --git a/include/net/xdp.h b/include/net/xdp.h
> index 70559720ff44..614f66d35ee8 100644
> --- a/include/net/xdp.h
> +++ b/include/net/xdp.h
> @@ -87,10 +87,54 @@ struct xdp_buff {
>  	((xdp)->data_hard_start + (xdp)->frame_sz -	\
>  	 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
>  
> -static inline struct skb_shared_info *
> +struct xdp_shared_info {

xdp_shared_info is a bad name, we need this to have a specific purpose 
xdp_frags should the proper name, so people will think twice before
adding weird bits to this so called shared_info.

> +	u16 nr_frags;
> +	u16 data_length; /* paged area length */
> +	skb_frag_t frags[MAX_SKB_FRAGS];

why MAX_SKB_FRAGS ? just use a flexible array member 
skb_frag_t frags[]; 

and enforce size via the n_frags and on the construction of the
tailroom preserved buffer, which is already being done.

this is waste of unnecessary space, at lease by definition of the
struct, in your use case you do:
memcpy(frag_list, xdp_sinfo->frags, sizeof(skb_frag_t) * num_frags);
And the tailroom space was already preserved for a full skb_shinfo.
so i don't see why you need this array to be of a fixed MAX_SKB_FRAGS
size.

> +};
> +
> +static inline struct xdp_shared_info *
>  xdp_get_shared_info_from_buff(struct xdp_buff *xdp)
>  {
> -	return (struct skb_shared_info *)xdp_data_hard_end(xdp);
> +	BUILD_BUG_ON(sizeof(struct xdp_shared_info) >
> +		     sizeof(struct skb_shared_info));
> +	return (struct xdp_shared_info *)xdp_data_hard_end(xdp);
> +}
> +

Back to my first comment, do we have plans to use this tail room buffer
for other than frag_list use cases ? what will be the buffer format
then ? should we push all new fields to the end of the xdp_shared_info
struct ? or deal with this tailroom buffer as a stack ? 
my main concern is that for drivers that don't support frag list and
still want to utilize the tailroom buffer for other usecases they will
have to skip the first sizeof(xdp_shared_info) so they won't break the
stack.

> +static inline struct page *xdp_get_frag_page(const skb_frag_t *frag)
> +{
> +	return frag->bv_page;
> +}
> +
> +static inline unsigned int xdp_get_frag_offset(const skb_frag_t
> *frag)
> +{
> +	return frag->bv_offset;
> +}
> +
> +static inline unsigned int xdp_get_frag_size(const skb_frag_t *frag)
> +{
> +	return frag->bv_len;
> +}
> +
> +static inline void *xdp_get_frag_address(const skb_frag_t *frag)
> +{
> +	return page_address(xdp_get_frag_page(frag)) +
> +	       xdp_get_frag_offset(frag);
> +}
> +
> +static inline void xdp_set_frag_page(skb_frag_t *frag, struct page
> *page)
> +{
> +	frag->bv_page = page;
> +}
> +
> +static inline void xdp_set_frag_offset(skb_frag_t *frag, u32 offset)
> +{
> +	frag->bv_offset = offset;
> +}
> +
> +static inline void xdp_set_frag_size(skb_frag_t *frag, u32 size)
> +{
> +	frag->bv_len = size;
>  }
>  
>  struct xdp_frame {
> @@ -120,12 +164,12 @@ static __always_inline void
> xdp_frame_bulk_init(struct xdp_frame_bulk *bq)
>  	bq->xa = NULL;
>  }
>  
> -static inline struct skb_shared_info *
> +static inline struct xdp_shared_info *
>  xdp_get_shared_info_from_frame(struct xdp_frame *frame)
>  {
>  	void *data_hard_start = frame->data - frame->headroom -
> sizeof(*frame);
>  
> -	return (struct skb_shared_info *)(data_hard_start + frame-
> >frame_sz -
> +	return (struct xdp_shared_info *)(data_hard_start + frame-
> >frame_sz -
>  				SKB_DATA_ALIGN(sizeof(struct
> skb_shared_info)));
>  }
>  

need a comment here why we preserve the size of skb_shared_info, yet
the usable buffer is of type xdp_shared_info.
Lorenzo Bianconi Dec. 8, 2020, 11:01 a.m. UTC | #2
> On Mon, 2020-12-07 at 17:32 +0100, Lorenzo Bianconi wrote:
> > Introduce xdp_shared_info data structure to contain info about
> > "non-linear" xdp frame. xdp_shared_info will alias skb_shared_info
> > allowing to keep most of the frags in the same cache-line.
> > Introduce some xdp_shared_info helpers aligned to skb_frag* ones
> > 
> 
> is there or will be a more general purpose use to this xdp_shared_info
> ? other than hosting frags ?

I do not have other use-cases at the moment other than multi-buff but in
theory it is possible I guess.
The reason we introduced it is to have most of the frags in the first
shared_info cache-line to avoid cache-misses.

> 
> > Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> > ---
> >  drivers/net/ethernet/marvell/mvneta.c | 62 +++++++++++++++--------
> > ----
> >  include/net/xdp.h                     | 52 ++++++++++++++++++++--
> >  2 files changed, 82 insertions(+), 32 deletions(-)
> > 
> > diff --git a/drivers/net/ethernet/marvell/mvneta.c
> > b/drivers/net/ethernet/marvell/mvneta.c
> > index 1e5b5c69685a..d635463609ad 100644
> > --- a/drivers/net/ethernet/marvell/mvneta.c
> > +++ b/drivers/net/ethernet/marvell/mvneta.c
> > @@ -2033,14 +2033,17 @@ int mvneta_rx_refill_queue(struct mvneta_port
> > *pp, struct mvneta_rx_queue *rxq)
> >  
> 
> [...]
> 
> >  static void
> > @@ -2278,7 +2281,7 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port
> > *pp,
> >  			    struct mvneta_rx_desc *rx_desc,
> >  			    struct mvneta_rx_queue *rxq,
> >  			    struct xdp_buff *xdp, int *size,
> > -			    struct skb_shared_info *xdp_sinfo,
> > +			    struct xdp_shared_info *xdp_sinfo,
> >  			    struct page *page)
> >  {
> >  	struct net_device *dev = pp->dev;
> > @@ -2301,13 +2304,13 @@ mvneta_swbm_add_rx_fragment(struct
> > mvneta_port *pp,
> >  	if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) {
> >  		skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo-
> > >nr_frags++];
> >  
> > -		skb_frag_off_set(frag, pp->rx_offset_correction);
> > -		skb_frag_size_set(frag, data_len);
> > -		__skb_frag_set_page(frag, page);
> > +		xdp_set_frag_offset(frag, pp->rx_offset_correction);
> > +		xdp_set_frag_size(frag, data_len);
> > +		xdp_set_frag_page(frag, page);
> >  
> 
> why three separate setters ? why not just one 
> xdp_set_frag(page, offset, size) ?

to be aligned with skb_frags helpers, but I guess we can have a single helper,
I do not have a strong opinion on it

> 
> >  		/* last fragment */
> >  		if (len == *size) {
> > -			struct skb_shared_info *sinfo;
> > +			struct xdp_shared_info *sinfo;
> >  
> >  			sinfo = xdp_get_shared_info_from_buff(xdp);
> >  			sinfo->nr_frags = xdp_sinfo->nr_frags;
> > @@ -2324,10 +2327,13 @@ static struct sk_buff *
> >  mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue
> > *rxq,
> >  		      struct xdp_buff *xdp, u32 desc_status)
> >  {

[...]

> >  
> > -static inline struct skb_shared_info *
> > +struct xdp_shared_info {
> 
> xdp_shared_info is a bad name, we need this to have a specific purpose 
> xdp_frags should the proper name, so people will think twice before
> adding weird bits to this so called shared_info.

I named the struct xdp_shared_info to recall skb_shared_info but I guess
xdp_frags is fine too. Agree?

> 
> > +	u16 nr_frags;
> > +	u16 data_length; /* paged area length */
> > +	skb_frag_t frags[MAX_SKB_FRAGS];
> 
> why MAX_SKB_FRAGS ? just use a flexible array member 
> skb_frag_t frags[]; 
> 
> and enforce size via the n_frags and on the construction of the
> tailroom preserved buffer, which is already being done.
> 
> this is waste of unnecessary space, at lease by definition of the
> struct, in your use case you do:
> memcpy(frag_list, xdp_sinfo->frags, sizeof(skb_frag_t) * num_frags);
> And the tailroom space was already preserved for a full skb_shinfo.
> so i don't see why you need this array to be of a fixed MAX_SKB_FRAGS
> size.

In order to avoid cache-misses, xdp_shared info is built as a variable
on mvneta_rx_swbm() stack and it is written to "shared_info" area only on the
last fragment in mvneta_swbm_add_rx_fragment(). I used MAX_SKB_FRAGS to be
aligned with skb_shared_info struct but probably we can use even a smaller value.
Another approach would be to define two different struct, e.g.

stuct xdp_frag_metadata {
	u16 nr_frags;
	u16 data_length; /* paged area length */
};

struct xdp_frags {
	skb_frag_t frags[MAX_SKB_FRAGS];
};

and then define xdp_shared_info as

struct xdp_shared_info {
	stuct xdp_frag_metadata meta;
	skb_frag_t frags[];
};

In this way we can probably optimize the space. What do you think?

> 
> > +};
> > +
> > +static inline struct xdp_shared_info *
> >  xdp_get_shared_info_from_buff(struct xdp_buff *xdp)
> >  {
> > -	return (struct skb_shared_info *)xdp_data_hard_end(xdp);
> > +	BUILD_BUG_ON(sizeof(struct xdp_shared_info) >
> > +		     sizeof(struct skb_shared_info));
> > +	return (struct xdp_shared_info *)xdp_data_hard_end(xdp);
> > +}
> > +
> 
> Back to my first comment, do we have plans to use this tail room buffer
> for other than frag_list use cases ? what will be the buffer format
> then ? should we push all new fields to the end of the xdp_shared_info
> struct ? or deal with this tailroom buffer as a stack ? 
> my main concern is that for drivers that don't support frag list and
> still want to utilize the tailroom buffer for other usecases they will
> have to skip the first sizeof(xdp_shared_info) so they won't break the
> stack.

for the moment I do not know if this area is used for other purposes.
Do you think there are other use-cases for it?

> 
> > +static inline struct page *xdp_get_frag_page(const skb_frag_t *frag)
> > +{
> > +	return frag->bv_page;
> > +}
> > +
> > +static inline unsigned int xdp_get_frag_offset(const skb_frag_t
> > *frag)
> > +{
> > +	return frag->bv_offset;
> > +}
> > +
> > +static inline unsigned int xdp_get_frag_size(const skb_frag_t *frag)
> > +{
> > +	return frag->bv_len;
> > +}
> > +
> > +static inline void *xdp_get_frag_address(const skb_frag_t *frag)
> > +{
> > +	return page_address(xdp_get_frag_page(frag)) +
> > +	       xdp_get_frag_offset(frag);
> > +}
> > +
> > +static inline void xdp_set_frag_page(skb_frag_t *frag, struct page
> > *page)
> > +{
> > +	frag->bv_page = page;
> > +}
> > +
> > +static inline void xdp_set_frag_offset(skb_frag_t *frag, u32 offset)
> > +{
> > +	frag->bv_offset = offset;
> > +}
> > +
> > +static inline void xdp_set_frag_size(skb_frag_t *frag, u32 size)
> > +{
> > +	frag->bv_len = size;
> >  }
> >  
> >  struct xdp_frame {
> > @@ -120,12 +164,12 @@ static __always_inline void
> > xdp_frame_bulk_init(struct xdp_frame_bulk *bq)
> >  	bq->xa = NULL;
> >  }
> >  
> > -static inline struct skb_shared_info *
> > +static inline struct xdp_shared_info *
> >  xdp_get_shared_info_from_frame(struct xdp_frame *frame)
> >  {
> >  	void *data_hard_start = frame->data - frame->headroom -
> > sizeof(*frame);
> >  
> > -	return (struct skb_shared_info *)(data_hard_start + frame-
> > >frame_sz -
> > +	return (struct xdp_shared_info *)(data_hard_start + frame-
> > >frame_sz -
> >  				SKB_DATA_ALIGN(sizeof(struct
> > skb_shared_info)));
> >  }
> >  
> 
> need a comment here why we preserve the size of skb_shared_info, yet
> the usable buffer is of type xdp_shared_info.

ack, I will add it in v6.

Regards,
Lorenzo

>
Shay Agroskin Dec. 19, 2020, 2:53 p.m. UTC | #3
Lorenzo Bianconi <lorenzo.bianconi@redhat.com> writes:

>> On Mon, 2020-12-07 at 17:32 +0100, Lorenzo Bianconi wrote:
>> > Introduce xdp_shared_info data structure to contain info 
>> > about
>> > "non-linear" xdp frame. xdp_shared_info will alias 
>> > skb_shared_info
>> > allowing to keep most of the frags in the same cache-line.
[...]
>> 
>> > +	u16 nr_frags;
>> > +	u16 data_length; /* paged area length */
>> > +	skb_frag_t frags[MAX_SKB_FRAGS];
>> 
>> why MAX_SKB_FRAGS ? just use a flexible array member 
>> skb_frag_t frags[]; 
>> 
>> and enforce size via the n_frags and on the construction of the
>> tailroom preserved buffer, which is already being done.
>> 
>> this is waste of unnecessary space, at lease by definition of 
>> the
>> struct, in your use case you do:
>> memcpy(frag_list, xdp_sinfo->frags, sizeof(skb_frag_t) * 
>> num_frags);
>> And the tailroom space was already preserved for a full 
>> skb_shinfo.
>> so i don't see why you need this array to be of a fixed 
>> MAX_SKB_FRAGS
>> size.
>
> In order to avoid cache-misses, xdp_shared info is built as a 
> variable
> on mvneta_rx_swbm() stack and it is written to "shared_info" 
> area only on the
> last fragment in mvneta_swbm_add_rx_fragment(). I used 
> MAX_SKB_FRAGS to be
> aligned with skb_shared_info struct but probably we can use even 
> a smaller value.
> Another approach would be to define two different struct, e.g.
>
> stuct xdp_frag_metadata {
> 	u16 nr_frags;
> 	u16 data_length; /* paged area length */
> };
>
> struct xdp_frags {
> 	skb_frag_t frags[MAX_SKB_FRAGS];
> };
>
> and then define xdp_shared_info as
>
> struct xdp_shared_info {
> 	stuct xdp_frag_metadata meta;
> 	skb_frag_t frags[];
> };
>
> In this way we can probably optimize the space. What do you 
> think?

We're still reserving ~sizeof(skb_shared_info) bytes at the end of 
the first buffer and it seems like in mvneta code you keep 
updating all three fields (frags, nr_frags and data_length).
Can you explain how the space is optimized by splitting the 
structs please?

>> 
>> > +};
>> > +
>> > +static inline struct xdp_shared_info *
>> >  xdp_get_shared_info_from_buff(struct xdp_buff *xdp)
>> >  {
>> > -	return (struct skb_shared_info *)xdp_data_hard_end(xdp);
>> > +	BUILD_BUG_ON(sizeof(struct xdp_shared_info) >
>> > +		     sizeof(struct skb_shared_info));
>> > +	return (struct xdp_shared_info *)xdp_data_hard_end(xdp);
>> > +}
>> > +
>> 
>> Back to my first comment, do we have plans to use this tail 
>> room buffer
>> for other than frag_list use cases ? what will be the buffer 
>> format
>> then ? should we push all new fields to the end of the 
>> xdp_shared_info
>> struct ? or deal with this tailroom buffer as a stack ? 
>> my main concern is that for drivers that don't support frag 
>> list and
>> still want to utilize the tailroom buffer for other usecases 
>> they will
>> have to skip the first sizeof(xdp_shared_info) so they won't 
>> break the
>> stack.
>
> for the moment I do not know if this area is used for other 
> purposes.
> Do you think there are other use-cases for it?
>

Saeed, the stack receives skb_shared_info when the frames are 
passed to the stack (skb_add_rx_frag is used to add the whole 
information to skb's shared info), and for XDP_REDIRECT use case, 
it doesn't seem like all drivers check page's tailroom for more 
information anyway (ena doesn't at least).
Can you please explain what do you mean by "break the stack"?

Thanks, Shay

>> 
[...]
>
>>
Jamal Hadi Salim Dec. 19, 2020, 3:30 p.m. UTC | #4
On 2020-12-19 9:53 a.m., Shay Agroskin wrote:
> 
> Lorenzo Bianconi <lorenzo.bianconi@redhat.com> writes:
> 

>> for the moment I do not know if this area is used for other purposes.
>> Do you think there are other use-cases for it?

Sorry to interject:
Does it make sense to use it to store arbitrary metadata or a scratchpad
in this space? Something equivalent to skb->cb which is lacking in
XDP.

cheers,
jamal
Lorenzo Bianconi Dec. 20, 2020, 5:52 p.m. UTC | #5
>
>
> Lorenzo Bianconi <lorenzo.bianconi@redhat.com> writes:
>
> >> On Mon, 2020-12-07 at 17:32 +0100, Lorenzo Bianconi wrote:
> >> > Introduce xdp_shared_info data structure to contain info
> >> > about
> >> > "non-linear" xdp frame. xdp_shared_info will alias
> >> > skb_shared_info
> >> > allowing to keep most of the frags in the same cache-line.
> [...]
> >>
> >> > +  u16 nr_frags;
> >> > +  u16 data_length; /* paged area length */
> >> > +  skb_frag_t frags[MAX_SKB_FRAGS];
> >>
> >> why MAX_SKB_FRAGS ? just use a flexible array member
> >> skb_frag_t frags[];
> >>
> >> and enforce size via the n_frags and on the construction of the
> >> tailroom preserved buffer, which is already being done.
> >>
> >> this is waste of unnecessary space, at lease by definition of
> >> the
> >> struct, in your use case you do:
> >> memcpy(frag_list, xdp_sinfo->frags, sizeof(skb_frag_t) *
> >> num_frags);
> >> And the tailroom space was already preserved for a full
> >> skb_shinfo.
> >> so i don't see why you need this array to be of a fixed
> >> MAX_SKB_FRAGS
> >> size.
> >
> > In order to avoid cache-misses, xdp_shared info is built as a
> > variable
> > on mvneta_rx_swbm() stack and it is written to "shared_info"
> > area only on the
> > last fragment in mvneta_swbm_add_rx_fragment(). I used
> > MAX_SKB_FRAGS to be
> > aligned with skb_shared_info struct but probably we can use even
> > a smaller value.
> > Another approach would be to define two different struct, e.g.
> >
> > stuct xdp_frag_metadata {
> >       u16 nr_frags;
> >       u16 data_length; /* paged area length */
> > };
> >
> > struct xdp_frags {
> >       skb_frag_t frags[MAX_SKB_FRAGS];
> > };
> >
> > and then define xdp_shared_info as
> >
> > struct xdp_shared_info {
> >       stuct xdp_frag_metadata meta;
> >       skb_frag_t frags[];
> > };
> >
> > In this way we can probably optimize the space. What do you
> > think?
>
> We're still reserving ~sizeof(skb_shared_info) bytes at the end of
> the first buffer and it seems like in mvneta code you keep
> updating all three fields (frags, nr_frags and data_length).
> Can you explain how the space is optimized by splitting the
> structs please?

using xdp_shared_info struct we will have the first 3 fragments in the
same cacheline of nr_frags while using skb_shared_info struct only the
first fragment will be in the same cacheline of nr_frags. Moreover
skb_shared_info has multiple fields unused by xdp.

Regards,
Lorenzo

>
> >>
> >> > +};
> >> > +
> >> > +static inline struct xdp_shared_info *
> >> >  xdp_get_shared_info_from_buff(struct xdp_buff *xdp)
> >> >  {
> >> > -  return (struct skb_shared_info *)xdp_data_hard_end(xdp);
> >> > +  BUILD_BUG_ON(sizeof(struct xdp_shared_info) >
> >> > +               sizeof(struct skb_shared_info));
> >> > +  return (struct xdp_shared_info *)xdp_data_hard_end(xdp);
> >> > +}
> >> > +
> >>
> >> Back to my first comment, do we have plans to use this tail
> >> room buffer
> >> for other than frag_list use cases ? what will be the buffer
> >> format
> >> then ? should we push all new fields to the end of the
> >> xdp_shared_info
> >> struct ? or deal with this tailroom buffer as a stack ?
> >> my main concern is that for drivers that don't support frag
> >> list and
> >> still want to utilize the tailroom buffer for other usecases
> >> they will
> >> have to skip the first sizeof(xdp_shared_info) so they won't
> >> break the
> >> stack.
> >
> > for the moment I do not know if this area is used for other
> > purposes.
> > Do you think there are other use-cases for it?
> >
>
> Saeed, the stack receives skb_shared_info when the frames are
> passed to the stack (skb_add_rx_frag is used to add the whole
> information to skb's shared info), and for XDP_REDIRECT use case,
> it doesn't seem like all drivers check page's tailroom for more
> information anyway (ena doesn't at least).
> Can you please explain what do you mean by "break the stack"?
>
> Thanks, Shay
>
> >>
> [...]
> >
> >>
>
Jesper Dangaard Brouer Dec. 21, 2020, 9:01 a.m. UTC | #6
On Sat, 19 Dec 2020 10:30:57 -0500
Jamal Hadi Salim <jhs@mojatatu.com> wrote:

> On 2020-12-19 9:53 a.m., Shay Agroskin wrote:
> > 
> > Lorenzo Bianconi <lorenzo.bianconi@redhat.com> writes:
> >   
> 
> >> for the moment I do not know if this area is used for other purposes.
> >> Do you think there are other use-cases for it?  

Yes, all the same use-cases as SKB have.  I wanted to keep this the
same as skb_shared_info, but Lorenzo choose to take John's advice and
it going in this direction (which is fine, we can always change and
adjust this later).


> Sorry to interject:
> Does it make sense to use it to store arbitrary metadata or a scratchpad
> in this space? Something equivalent to skb->cb which is lacking in
> XDP.

Well, XDP have the data_meta area.  But difficult to rely on because a
lot of driver don't implement it.  And Saeed and I plan to use this
area and populate it with driver info from RX-descriptor.
Jamal Hadi Salim Dec. 21, 2020, 1 p.m. UTC | #7
On 2020-12-21 4:01 a.m., Jesper Dangaard Brouer wrote:
> On Sat, 19 Dec 2020 10:30:57 -0500

>> Sorry to interject:
>> Does it make sense to use it to store arbitrary metadata or a scratchpad
>> in this space? Something equivalent to skb->cb which is lacking in
>> XDP.
> 
> Well, XDP have the data_meta area.  But difficult to rely on because a
> lot of driver don't implement it.  And Saeed and I plan to use this
> area and populate it with driver info from RX-descriptor.
> 

What i was thinking is some scratch pad that i can write to within
an XDP prog (not driver); example, in a prog array map the scratch
pad is written by one program in the array and read by another later on.
skb->cb allows for that. Unless you mean i can already write to some
XDP data_meta area?

cheers,
jamal
Shay Agroskin Dec. 21, 2020, 8:55 p.m. UTC | #8
Lorenzo Bianconi <lorenzo.bianconi@redhat.com> writes:

>>
>>
>> Lorenzo Bianconi <lorenzo.bianconi@redhat.com> writes:
>>
>> >> On Mon, 2020-12-07 at 17:32 +0100, Lorenzo Bianconi wrote:
>> >> > Introduce xdp_shared_info data structure to contain info
>> >> > about
>> >> > "non-linear" xdp frame. xdp_shared_info will alias
>> >> > skb_shared_info
>> >> > allowing to keep most of the frags in the same cache-line.
>> [...]
>> >>
>> >> > +  u16 nr_frags;
>> >> > +  u16 data_length; /* paged area length */
>> >> > +  skb_frag_t frags[MAX_SKB_FRAGS];
>> >>
>> >> why MAX_SKB_FRAGS ? just use a flexible array member
>> >> skb_frag_t frags[];
>> >>
>> >> and enforce size via the n_frags and on the construction of 
>> >> the
>> >> tailroom preserved buffer, which is already being done.
>> >>
>> >> this is waste of unnecessary space, at lease by definition 
>> >> of
>> >> the
>> >> struct, in your use case you do:
>> >> memcpy(frag_list, xdp_sinfo->frags, sizeof(skb_frag_t) *
>> >> num_frags);
>> >> And the tailroom space was already preserved for a full
>> >> skb_shinfo.
>> >> so i don't see why you need this array to be of a fixed
>> >> MAX_SKB_FRAGS
>> >> size.
>> >
>> > In order to avoid cache-misses, xdp_shared info is built as a
>> > variable
>> > on mvneta_rx_swbm() stack and it is written to "shared_info"
>> > area only on the
>> > last fragment in mvneta_swbm_add_rx_fragment(). I used
>> > MAX_SKB_FRAGS to be
>> > aligned with skb_shared_info struct but probably we can use 
>> > even
>> > a smaller value.
>> > Another approach would be to define two different struct, 
>> > e.g.
>> >
>> > stuct xdp_frag_metadata {
>> >       u16 nr_frags;
>> >       u16 data_length; /* paged area length */
>> > };
>> >
>> > struct xdp_frags {
>> >       skb_frag_t frags[MAX_SKB_FRAGS];
>> > };
>> >
>> > and then define xdp_shared_info as
>> >
>> > struct xdp_shared_info {
>> >       stuct xdp_frag_metadata meta;
>> >       skb_frag_t frags[];
>> > };
>> >
>> > In this way we can probably optimize the space. What do you
>> > think?
>>
>> We're still reserving ~sizeof(skb_shared_info) bytes at the end 
>> of
>> the first buffer and it seems like in mvneta code you keep
>> updating all three fields (frags, nr_frags and data_length).
>> Can you explain how the space is optimized by splitting the
>> structs please?
>
> using xdp_shared_info struct we will have the first 3 fragments 
> in the
> same cacheline of nr_frags while using skb_shared_info struct 
> only the
> first fragment will be in the same cacheline of 
> nr_frags. Moreover
> skb_shared_info has multiple fields unused by xdp.
>
> Regards,
> Lorenzo
>

Thanks for your reply. I was actually referring to your suggestion 
to Saeed. Namely, defining

struct xdp_shared_info {
       struct xdp_frag_metadata meta;
       skb_frag_t frags[];
}

I don't see what benefits there are to this scheme compared to the 
original patch

Thanks,
Shay

>>
>> >>
>> >> > +};
>> >> > +
[...]
>>
>> Saeed, the stack receives skb_shared_info when the frames are
>> passed to the stack (skb_add_rx_frag is used to add the whole
>> information to skb's shared info), and for XDP_REDIRECT use 
>> case,
>> it doesn't seem like all drivers check page's tailroom for more
>> information anyway (ena doesn't at least).
>> Can you please explain what do you mean by "break the stack"?
>>
>> Thanks, Shay
>>
>> >>
>> [...]
>> >
>> >>
>>
diff mbox series

Patch

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 1e5b5c69685a..d635463609ad 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2033,14 +2033,17 @@  int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
 
 static void
 mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
-		    struct xdp_buff *xdp, struct skb_shared_info *sinfo,
+		    struct xdp_buff *xdp, struct xdp_shared_info *xdp_sinfo,
 		    int sync_len)
 {
 	int i;
 
-	for (i = 0; i < sinfo->nr_frags; i++)
+	for (i = 0; i < xdp_sinfo->nr_frags; i++) {
+		skb_frag_t *frag = &xdp_sinfo->frags[i];
+
 		page_pool_put_full_page(rxq->page_pool,
-					skb_frag_page(&sinfo->frags[i]), true);
+					xdp_get_frag_page(frag), true);
+	}
 	page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
 			   sync_len, true);
 }
@@ -2179,7 +2182,7 @@  mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 	       struct bpf_prog *prog, struct xdp_buff *xdp,
 	       u32 frame_sz, struct mvneta_stats *stats)
 {
-	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+	struct xdp_shared_info *xdp_sinfo = xdp_get_shared_info_from_buff(xdp);
 	unsigned int len, data_len, sync;
 	u32 ret, act;
 
@@ -2200,7 +2203,7 @@  mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 
 		err = xdp_do_redirect(pp->dev, xdp, prog);
 		if (unlikely(err)) {
-			mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
+			mvneta_xdp_put_buff(pp, rxq, xdp, xdp_sinfo, sync);
 			ret = MVNETA_XDP_DROPPED;
 		} else {
 			ret = MVNETA_XDP_REDIR;
@@ -2211,7 +2214,7 @@  mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 	case XDP_TX:
 		ret = mvneta_xdp_xmit_back(pp, xdp);
 		if (ret != MVNETA_XDP_TX)
-			mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
+			mvneta_xdp_put_buff(pp, rxq, xdp, xdp_sinfo, sync);
 		break;
 	default:
 		bpf_warn_invalid_xdp_action(act);
@@ -2220,7 +2223,7 @@  mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 		trace_xdp_exception(pp->dev, prog, act);
 		fallthrough;
 	case XDP_DROP:
-		mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
+		mvneta_xdp_put_buff(pp, rxq, xdp, xdp_sinfo, sync);
 		ret = MVNETA_XDP_DROPPED;
 		stats->xdp_drop++;
 		break;
@@ -2241,9 +2244,9 @@  mvneta_swbm_rx_frame(struct mvneta_port *pp,
 {
 	unsigned char *data = page_address(page);
 	int data_len = -MVNETA_MH_SIZE, len;
+	struct xdp_shared_info *xdp_sinfo;
 	struct net_device *dev = pp->dev;
 	enum dma_data_direction dma_dir;
-	struct skb_shared_info *sinfo;
 
 	if (*size > MVNETA_MAX_RX_BUF_SIZE) {
 		len = MVNETA_MAX_RX_BUF_SIZE;
@@ -2269,8 +2272,8 @@  mvneta_swbm_rx_frame(struct mvneta_port *pp,
 	xdp->data_end = xdp->data + data_len;
 	xdp_set_data_meta_invalid(xdp);
 
-	sinfo = xdp_get_shared_info_from_buff(xdp);
-	sinfo->nr_frags = 0;
+	xdp_sinfo = xdp_get_shared_info_from_buff(xdp);
+	xdp_sinfo->nr_frags = 0;
 }
 
 static void
@@ -2278,7 +2281,7 @@  mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
 			    struct mvneta_rx_desc *rx_desc,
 			    struct mvneta_rx_queue *rxq,
 			    struct xdp_buff *xdp, int *size,
-			    struct skb_shared_info *xdp_sinfo,
+			    struct xdp_shared_info *xdp_sinfo,
 			    struct page *page)
 {
 	struct net_device *dev = pp->dev;
@@ -2301,13 +2304,13 @@  mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
 	if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) {
 		skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++];
 
-		skb_frag_off_set(frag, pp->rx_offset_correction);
-		skb_frag_size_set(frag, data_len);
-		__skb_frag_set_page(frag, page);
+		xdp_set_frag_offset(frag, pp->rx_offset_correction);
+		xdp_set_frag_size(frag, data_len);
+		xdp_set_frag_page(frag, page);
 
 		/* last fragment */
 		if (len == *size) {
-			struct skb_shared_info *sinfo;
+			struct xdp_shared_info *sinfo;
 
 			sinfo = xdp_get_shared_info_from_buff(xdp);
 			sinfo->nr_frags = xdp_sinfo->nr_frags;
@@ -2324,10 +2327,13 @@  static struct sk_buff *
 mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 		      struct xdp_buff *xdp, u32 desc_status)
 {
-	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
-	int i, num_frags = sinfo->nr_frags;
+	struct xdp_shared_info *xdp_sinfo = xdp_get_shared_info_from_buff(xdp);
+	int i, num_frags = xdp_sinfo->nr_frags;
+	skb_frag_t frag_list[MAX_SKB_FRAGS];
 	struct sk_buff *skb;
 
+	memcpy(frag_list, xdp_sinfo->frags, sizeof(skb_frag_t) * num_frags);
+
 	skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
 	if (!skb)
 		return ERR_PTR(-ENOMEM);
@@ -2339,12 +2345,12 @@  mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 	mvneta_rx_csum(pp, desc_status, skb);
 
 	for (i = 0; i < num_frags; i++) {
-		skb_frag_t *frag = &sinfo->frags[i];
+		struct page *page = xdp_get_frag_page(&frag_list[i]);
 
 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
-				skb_frag_page(frag), skb_frag_off(frag),
-				skb_frag_size(frag), PAGE_SIZE);
-		page_pool_release_page(rxq->page_pool, skb_frag_page(frag));
+				page, xdp_get_frag_offset(&frag_list[i]),
+				xdp_get_frag_size(&frag_list[i]), PAGE_SIZE);
+		page_pool_release_page(rxq->page_pool, page);
 	}
 
 	return skb;
@@ -2357,7 +2363,7 @@  static int mvneta_rx_swbm(struct napi_struct *napi,
 {
 	int rx_proc = 0, rx_todo, refill, size = 0;
 	struct net_device *dev = pp->dev;
-	struct skb_shared_info sinfo;
+	struct xdp_shared_info xdp_sinfo;
 	struct mvneta_stats ps = {};
 	struct bpf_prog *xdp_prog;
 	u32 desc_status, frame_sz;
@@ -2368,7 +2374,7 @@  static int mvneta_rx_swbm(struct napi_struct *napi,
 	xdp_buf.rxq = &rxq->xdp_rxq;
 	xdp_buf.mb = 0;
 
-	sinfo.nr_frags = 0;
+	xdp_sinfo.nr_frags = 0;
 
 	/* Get number of received packets */
 	rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
@@ -2412,7 +2418,7 @@  static int mvneta_rx_swbm(struct napi_struct *napi,
 			}
 
 			mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
-						    &size, &sinfo, page);
+						    &size, &xdp_sinfo, page);
 		} /* Middle or Last descriptor */
 
 		if (!(rx_status & MVNETA_RXD_LAST_DESC))
@@ -2420,7 +2426,7 @@  static int mvneta_rx_swbm(struct napi_struct *napi,
 			continue;
 
 		if (size) {
-			mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
+			mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &xdp_sinfo, -1);
 			goto next;
 		}
 
@@ -2432,7 +2438,7 @@  static int mvneta_rx_swbm(struct napi_struct *napi,
 		if (IS_ERR(skb)) {
 			struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
 
-			mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
+			mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &xdp_sinfo, -1);
 
 			u64_stats_update_begin(&stats->syncp);
 			stats->es.skb_alloc_error++;
@@ -2449,12 +2455,12 @@  static int mvneta_rx_swbm(struct napi_struct *napi,
 		napi_gro_receive(napi, skb);
 next:
 		xdp_buf.data_hard_start = NULL;
-		sinfo.nr_frags = 0;
+		xdp_sinfo.nr_frags = 0;
 	}
 	rcu_read_unlock();
 
 	if (xdp_buf.data_hard_start)
-		mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
+		mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &xdp_sinfo, -1);
 
 	if (ps.xdp_redirect)
 		xdp_do_flush_map();
diff --git a/include/net/xdp.h b/include/net/xdp.h
index 70559720ff44..614f66d35ee8 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -87,10 +87,54 @@  struct xdp_buff {
 	((xdp)->data_hard_start + (xdp)->frame_sz -	\
 	 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 
-static inline struct skb_shared_info *
+struct xdp_shared_info {
+	u16 nr_frags;
+	u16 data_length; /* paged area length */
+	skb_frag_t frags[MAX_SKB_FRAGS];
+};
+
+static inline struct xdp_shared_info *
 xdp_get_shared_info_from_buff(struct xdp_buff *xdp)
 {
-	return (struct skb_shared_info *)xdp_data_hard_end(xdp);
+	BUILD_BUG_ON(sizeof(struct xdp_shared_info) >
+		     sizeof(struct skb_shared_info));
+	return (struct xdp_shared_info *)xdp_data_hard_end(xdp);
+}
+
+static inline struct page *xdp_get_frag_page(const skb_frag_t *frag)
+{
+	return frag->bv_page;
+}
+
+static inline unsigned int xdp_get_frag_offset(const skb_frag_t *frag)
+{
+	return frag->bv_offset;
+}
+
+static inline unsigned int xdp_get_frag_size(const skb_frag_t *frag)
+{
+	return frag->bv_len;
+}
+
+static inline void *xdp_get_frag_address(const skb_frag_t *frag)
+{
+	return page_address(xdp_get_frag_page(frag)) +
+	       xdp_get_frag_offset(frag);
+}
+
+static inline void xdp_set_frag_page(skb_frag_t *frag, struct page *page)
+{
+	frag->bv_page = page;
+}
+
+static inline void xdp_set_frag_offset(skb_frag_t *frag, u32 offset)
+{
+	frag->bv_offset = offset;
+}
+
+static inline void xdp_set_frag_size(skb_frag_t *frag, u32 size)
+{
+	frag->bv_len = size;
 }
 
 struct xdp_frame {
@@ -120,12 +164,12 @@  static __always_inline void xdp_frame_bulk_init(struct xdp_frame_bulk *bq)
 	bq->xa = NULL;
 }
 
-static inline struct skb_shared_info *
+static inline struct xdp_shared_info *
 xdp_get_shared_info_from_frame(struct xdp_frame *frame)
 {
 	void *data_hard_start = frame->data - frame->headroom - sizeof(*frame);
 
-	return (struct skb_shared_info *)(data_hard_start + frame->frame_sz -
+	return (struct xdp_shared_info *)(data_hard_start + frame->frame_sz -
 				SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
 }