diff mbox series

[v9,bpf-next,02/14] xdp: introduce flags field in xdp_buff/xdp_frame

Message ID 1316f3ef2763ff4c02244fb726c61568c972514c.1623674025.git.lorenzo@kernel.org (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series mvneta: introduce XDP multi-buffer support | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Clearly marked for bpf-next
netdev/subject_prefix success Link
netdev/cc_maintainers warning 6 maintainers not CCed: yhs@fb.com kpsingh@kernel.org hawk@kernel.org andrii@kernel.org kafai@fb.com songliubraving@fb.com
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 5151 this patch: 5151
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 74 lines checked
netdev/build_allmodconfig_warn success Errors and warnings before: 5216 this patch: 5216
netdev/header_inline success Link

Commit Message

lorenzo@kernel.org June 14, 2021, 12:49 p.m. UTC
Introduce flags field in xdp_frame/xdp_buffer data structure
to define additional buffer features. At the moment the only
supported buffer feature is multi-buffer bit (mb). Multi-buffer bit
is used to specify if this is a linear buffer (mb = 0) or a multi-buffer
frame (mb = 1). In the latter case the shared_info area at the end of
the first buffer will be properly initialized to link together
subsequent buffers.

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
 include/net/xdp.h | 31 ++++++++++++++++++++++++++++++-
 1 file changed, 30 insertions(+), 1 deletion(-)

Comments

Alexander Duyck June 28, 2021, 8:14 p.m. UTC | #1
On Mon, Jun 14, 2021 at 5:50 AM Lorenzo Bianconi <lorenzo@kernel.org> wrote:
>
> Introduce flags field in xdp_frame/xdp_buffer data structure
> to define additional buffer features. At the moment the only
> supported buffer feature is multi-buffer bit (mb). Multi-buffer bit
> is used to specify if this is a linear buffer (mb = 0) or a multi-buffer
> frame (mb = 1). In the latter case the shared_info area at the end of
> the first buffer will be properly initialized to link together
> subsequent buffers.
>
> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>

Instead of passing this between buffers and frames I wonder if this
wouldn't be better to place in something like the xdp_mem_info
structure since this is something that would be specific to how the
device is handling memory anyway. You could probably split the type
field into a 16b type and a 16b flags field. Then add your bit where 0
is linear/legacy and 1 is scatter-gather/multi-buffer.
Lorenzo Bianconi June 29, 2021, 12:43 p.m. UTC | #2
> On Mon, Jun 14, 2021 at 5:50 AM Lorenzo Bianconi <lorenzo@kernel.org> wrote:
> >
> > Introduce flags field in xdp_frame/xdp_buffer data structure
> > to define additional buffer features. At the moment the only
> > supported buffer feature is multi-buffer bit (mb). Multi-buffer bit
> > is used to specify if this is a linear buffer (mb = 0) or a multi-buffer
> > frame (mb = 1). In the latter case the shared_info area at the end of
> > the first buffer will be properly initialized to link together
> > subsequent buffers.
> >
> > Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> 
> Instead of passing this between buffers and frames I wonder if this
> wouldn't be better to place in something like the xdp_mem_info
> structure since this is something that would be specific to how the
> device is handling memory anyway. You could probably split the type
> field into a 16b type and a 16b flags field. Then add your bit where 0
> is linear/legacy and 1 is scatter-gather/multi-buffer.
> 

ack, this should be fine but I put the flag field in xdp_buff/xdp_frame
in order to reuse it for some xdp hw-hints (e.g rx checksum type).
We can put it in xdp_mem_info too but I guess it would be less intuitive, what
do you think?

Regards,
Lorenzo
Alexander Duyck June 29, 2021, 1:07 p.m. UTC | #3
On Tue, Jun 29, 2021 at 5:43 AM Lorenzo Bianconi
<lorenzo.bianconi@redhat.com> wrote:
>
> > On Mon, Jun 14, 2021 at 5:50 AM Lorenzo Bianconi <lorenzo@kernel.org> wrote:
> > >
> > > Introduce flags field in xdp_frame/xdp_buffer data structure
> > > to define additional buffer features. At the moment the only
> > > supported buffer feature is multi-buffer bit (mb). Multi-buffer bit
> > > is used to specify if this is a linear buffer (mb = 0) or a multi-buffer
> > > frame (mb = 1). In the latter case the shared_info area at the end of
> > > the first buffer will be properly initialized to link together
> > > subsequent buffers.
> > >
> > > Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> >
> > Instead of passing this between buffers and frames I wonder if this
> > wouldn't be better to place in something like the xdp_mem_info
> > structure since this is something that would be specific to how the
> > device is handling memory anyway. You could probably split the type
> > field into a 16b type and a 16b flags field. Then add your bit where 0
> > is linear/legacy and 1 is scatter-gather/multi-buffer.
> >
>
> ack, this should be fine but I put the flag field in xdp_buff/xdp_frame
> in order to reuse it for some xdp hw-hints (e.g rx checksum type).
> We can put it in xdp_mem_info too but I guess it would be less intuitive, what
> do you think?

I think it makes the most sense in xdp_mem_info. It already tells us
what to expect in some respect in regards to memory layout as it tells
us if we are dealing with shared pages or whole pages and how to
recycle them. I would think that applies almost identically to
scatter-gather XDP the same way.

As far as the addition of flags there is still time for that later as
we still have the 32b of unused space after frame_sz.
Lorenzo Bianconi June 29, 2021, 1:25 p.m. UTC | #4
> On Tue, Jun 29, 2021 at 5:43 AM Lorenzo Bianconi
> <lorenzo.bianconi@redhat.com> wrote:
> >
> > > On Mon, Jun 14, 2021 at 5:50 AM Lorenzo Bianconi <lorenzo@kernel.org> wrote:
> > > >
> > > > Introduce flags field in xdp_frame/xdp_buffer data structure
> > > > to define additional buffer features. At the moment the only
> > > > supported buffer feature is multi-buffer bit (mb). Multi-buffer bit
> > > > is used to specify if this is a linear buffer (mb = 0) or a multi-buffer
> > > > frame (mb = 1). In the latter case the shared_info area at the end of
> > > > the first buffer will be properly initialized to link together
> > > > subsequent buffers.
> > > >
> > > > Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> > >
> > > Instead of passing this between buffers and frames I wonder if this
> > > wouldn't be better to place in something like the xdp_mem_info
> > > structure since this is something that would be specific to how the
> > > device is handling memory anyway. You could probably split the type
> > > field into a 16b type and a 16b flags field. Then add your bit where 0
> > > is linear/legacy and 1 is scatter-gather/multi-buffer.
> > >
> >
> > ack, this should be fine but I put the flag field in xdp_buff/xdp_frame
> > in order to reuse it for some xdp hw-hints (e.g rx checksum type).
> > We can put it in xdp_mem_info too but I guess it would be less intuitive, what
> > do you think?
> 
> I think it makes the most sense in xdp_mem_info. It already tells us
> what to expect in some respect in regards to memory layout as it tells
> us if we are dealing with shared pages or whole pages and how to
> recycle them. I would think that applies almost identically to
> scatter-gather XDP the same way.
> 
> As far as the addition of flags there is still time for that later as
> we still have the 32b of unused space after frame_sz.

ack, I am fine with it. If everybody agree, I will fix it in v10.

Regards,
Lorenzo

>
Lorenzo Bianconi July 5, 2021, 3:52 p.m. UTC | #5
> On Tue, Jun 29, 2021 at 5:43 AM Lorenzo Bianconi
> <lorenzo.bianconi@redhat.com> wrote:
> >
> > > On Mon, Jun 14, 2021 at 5:50 AM Lorenzo Bianconi <lorenzo@kernel.org> wrote:
> > > >
> > > > Introduce flags field in xdp_frame/xdp_buffer data structure
> > > > to define additional buffer features. At the moment the only
> > > > supported buffer feature is multi-buffer bit (mb). Multi-buffer bit
> > > > is used to specify if this is a linear buffer (mb = 0) or a multi-buffer
> > > > frame (mb = 1). In the latter case the shared_info area at the end of
> > > > the first buffer will be properly initialized to link together
> > > > subsequent buffers.
> > > >
> > > > Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> > >
> > > Instead of passing this between buffers and frames I wonder if this
> > > wouldn't be better to place in something like the xdp_mem_info
> > > structure since this is something that would be specific to how the
> > > device is handling memory anyway. You could probably split the type
> > > field into a 16b type and a 16b flags field. Then add your bit where 0
> > > is linear/legacy and 1 is scatter-gather/multi-buffer.
> > >
> >
> > ack, this should be fine but I put the flag field in xdp_buff/xdp_frame
> > in order to reuse it for some xdp hw-hints (e.g rx checksum type).
> > We can put it in xdp_mem_info too but I guess it would be less intuitive, what
> > do you think?
> 
> I think it makes the most sense in xdp_mem_info. It already tells us
> what to expect in some respect in regards to memory layout as it tells
> us if we are dealing with shared pages or whole pages and how to
> recycle them. I would think that applies almost identically to
> scatter-gather XDP the same way.

Hi Alex,

Reviewing the code to address this comment I think I spotted a corner case
where we can't use this approach. Whenever we run dev_map_bpf_prog_run()
we loose mb info converting xdp_frame to xdp_buff since
xdp_convert_frame_to_buff() does not copy it and we have no xdp_rxq_info there.
Do you think we should add a rxq_info there similar to what we did for cpumap?
I think it is better to keep the previous approach since it seems cleaner and
reusable in the future. What do you think?

Regards,
Lorenzo

> 
> As far as the addition of flags there is still time for that later as
> we still have the 32b of unused space after frame_sz.
>
Alexander Duyck July 5, 2021, 9:35 p.m. UTC | #6
On Mon, Jul 5, 2021 at 8:52 AM Lorenzo Bianconi
<lorenzo.bianconi@redhat.com> wrote:
>
> > On Tue, Jun 29, 2021 at 5:43 AM Lorenzo Bianconi
> > <lorenzo.bianconi@redhat.com> wrote:
> > >
> > > > On Mon, Jun 14, 2021 at 5:50 AM Lorenzo Bianconi <lorenzo@kernel.org> wrote:
> > > > >
> > > > > Introduce flags field in xdp_frame/xdp_buffer data structure
> > > > > to define additional buffer features. At the moment the only
> > > > > supported buffer feature is multi-buffer bit (mb). Multi-buffer bit
> > > > > is used to specify if this is a linear buffer (mb = 0) or a multi-buffer
> > > > > frame (mb = 1). In the latter case the shared_info area at the end of
> > > > > the first buffer will be properly initialized to link together
> > > > > subsequent buffers.
> > > > >
> > > > > Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> > > >
> > > > Instead of passing this between buffers and frames I wonder if this
> > > > wouldn't be better to place in something like the xdp_mem_info
> > > > structure since this is something that would be specific to how the
> > > > device is handling memory anyway. You could probably split the type
> > > > field into a 16b type and a 16b flags field. Then add your bit where 0
> > > > is linear/legacy and 1 is scatter-gather/multi-buffer.
> > > >
> > >
> > > ack, this should be fine but I put the flag field in xdp_buff/xdp_frame
> > > in order to reuse it for some xdp hw-hints (e.g rx checksum type).
> > > We can put it in xdp_mem_info too but I guess it would be less intuitive, what
> > > do you think?
> >
> > I think it makes the most sense in xdp_mem_info. It already tells us
> > what to expect in some respect in regards to memory layout as it tells
> > us if we are dealing with shared pages or whole pages and how to
> > recycle them. I would think that applies almost identically to
> > scatter-gather XDP the same way.
>
> Hi Alex,
>
> Reviewing the code to address this comment I think I spotted a corner case
> where we can't use this approach. Whenever we run dev_map_bpf_prog_run()
> we loose mb info converting xdp_frame to xdp_buff since
> xdp_convert_frame_to_buff() does not copy it and we have no xdp_rxq_info there.
> Do you think we should add a rxq_info there similar to what we did for cpumap?
> I think it is better to keep the previous approach since it seems cleaner and
> reusable in the future. What do you think?
>

Hi Lorenzo,

What about doing something like breaking up the type value in
xdp_mem_info? The fact is having it as an enum doesn't get us much
since we have a 32b type field but are only storing 4 possible values
there currently

The way I see it, scatter-gather is just another memory model
attribute rather than being something entirely new. It makes as much
sense to have a bit there for MEM_TYPE_PAGE_SG as it does for
MEM_TYPE_PAGE_SHARED. I would consider either splitting the type field
into two 16b fields. For example you might have one field that
describes the source pool which is currently either allocated page
(ORDER0, SHARED), page_pool (PAGE_POOL), or XSK pool (XSK_BUFF_POOL),
and then two flags for type with there being either shared and/or
scatter-gather.

Also, looking over the code I don't see any reason why current
ORDER0/SHARED couldn't be merged as the free paths are essentially
identical since the MEM_TYPE_PAGE_SHARED path would function perfectly
fine to free MEM_TYPE_PAGE_ORDER0 pages.

Thanks,

- Alex
Lorenzo Bianconi July 6, 2021, 11:53 a.m. UTC | #7
> On Mon, Jul 5, 2021 at 8:52 AM Lorenzo Bianconi
> <lorenzo.bianconi@redhat.com> wrote:
> >
> > > On Tue, Jun 29, 2021 at 5:43 AM Lorenzo Bianconi
> > > <lorenzo.bianconi@redhat.com> wrote:

[...]
> 
> Hi Lorenzo,
> 
> What about doing something like breaking up the type value in
> xdp_mem_info? The fact is having it as an enum doesn't get us much
> since we have a 32b type field but are only storing 4 possible values
> there currently
> 
> The way I see it, scatter-gather is just another memory model
> attribute rather than being something entirely new. It makes as much
> sense to have a bit there for MEM_TYPE_PAGE_SG as it does for
> MEM_TYPE_PAGE_SHARED. I would consider either splitting the type field
> into two 16b fields. For example you might have one field that
> describes the source pool which is currently either allocated page
> (ORDER0, SHARED), page_pool (PAGE_POOL), or XSK pool (XSK_BUFF_POOL),
> and then two flags for type with there being either shared and/or
> scatter-gather.

Hi Alex,

I am fine reducing the xdp_mem_info size defining type field as u16 instead of
u32 but I think mb is a per-xdp_buff/xdp_frame property since at runtime we can
receive a tiny single page xdp_buff/xdp_frame and a "jumbo" xdp_buff/xdp_frame
composed by multiple pages. According to the documentation available in
include/net/xdp.h, xdp_rxq_info (where xdp_mem_info is contained for xdp_buff) 
is "associated with the driver level RX-ring queues and it is information that
is specific to how the driver have configured a given RX-ring queue" so I guess
it is a little bit counterintuitive to add this info there.
Moreover we have the "issue" for devmap in dev_map_bpf_prog_run() when we
perform XDP_REDIRECT with the approach you proposed and last we can reuse this
new flags filed for XDP hw-hints support.
What about reducing xdp_mem_info and add the flags field in xdp_buff/xdp_frame
in order to avoid increasing the xdp_buff/xdp_frame size? Am I missing
something?

Regards,
Lorenzo

> 
> Also, looking over the code I don't see any reason why current
> ORDER0/SHARED couldn't be merged as the free paths are essentially
> identical since the MEM_TYPE_PAGE_SHARED path would function perfectly
> fine to free MEM_TYPE_PAGE_ORDER0 pages.
> 
> Thanks,
> 
> - Alex
>
Alexander Duyck July 6, 2021, 2:04 p.m. UTC | #8
On Tue, Jul 6, 2021 at 4:53 AM Lorenzo Bianconi
<lorenzo.bianconi@redhat.com> wrote:
>
> > On Mon, Jul 5, 2021 at 8:52 AM Lorenzo Bianconi
> > <lorenzo.bianconi@redhat.com> wrote:
> > >
> > > > On Tue, Jun 29, 2021 at 5:43 AM Lorenzo Bianconi
> > > > <lorenzo.bianconi@redhat.com> wrote:
>
> [...]
> >
> > Hi Lorenzo,
> >
> > What about doing something like breaking up the type value in
> > xdp_mem_info? The fact is having it as an enum doesn't get us much
> > since we have a 32b type field but are only storing 4 possible values
> > there currently
> >
> > The way I see it, scatter-gather is just another memory model
> > attribute rather than being something entirely new. It makes as much
> > sense to have a bit there for MEM_TYPE_PAGE_SG as it does for
> > MEM_TYPE_PAGE_SHARED. I would consider either splitting the type field
> > into two 16b fields. For example you might have one field that
> > describes the source pool which is currently either allocated page
> > (ORDER0, SHARED), page_pool (PAGE_POOL), or XSK pool (XSK_BUFF_POOL),
> > and then two flags for type with there being either shared and/or
> > scatter-gather.
>
> Hi Alex,
>
> I am fine reducing the xdp_mem_info size defining type field as u16 instead of
> u32 but I think mb is a per-xdp_buff/xdp_frame property since at runtime we can
> receive a tiny single page xdp_buff/xdp_frame and a "jumbo" xdp_buff/xdp_frame
> composed by multiple pages. According to the documentation available in
> include/net/xdp.h, xdp_rxq_info (where xdp_mem_info is contained for xdp_buff)
> is "associated with the driver level RX-ring queues and it is information that
> is specific to how the driver have configured a given RX-ring queue" so I guess
> it is a little bit counterintuitive to add this info there.

It isn't really all that counterintuitive. However it does put the
onus on the driver to be consistent about things. So even a
single-buffer xdp_buff would technically have to be a scatter-gather
buff, but it would have no fragments in it. So the requirement would
be to initialize the frags and data_len fields to 0 for all xdp_buff
structures.

> Moreover we have the "issue" for devmap in dev_map_bpf_prog_run() when we
> perform XDP_REDIRECT with the approach you proposed and last we can reuse this
> new flags filed for XDP hw-hints support.
> What about reducing xdp_mem_info and add the flags field in xdp_buff/xdp_frame
> in order to avoid increasing the xdp_buff/xdp_frame size? Am I missing
> something?

The problem is there isn't a mem_info field in the xdp_buff. It is in
the Rx queue info structure.

Thanks,

- Alex
Lorenzo Bianconi July 6, 2021, 5:47 p.m. UTC | #9
> On Tue, Jul 6, 2021 at 4:53 AM Lorenzo Bianconi
> <lorenzo.bianconi@redhat.com> wrote:
> >
> > > On Mon, Jul 5, 2021 at 8:52 AM Lorenzo Bianconi
> > > <lorenzo.bianconi@redhat.com> wrote:
> > > >
> > > > > On Tue, Jun 29, 2021 at 5:43 AM Lorenzo Bianconi
> > > > > <lorenzo.bianconi@redhat.com> wrote:
> >
> > [...]
> > >
> > > Hi Lorenzo,
> > >
> > > What about doing something like breaking up the type value in
> > > xdp_mem_info? The fact is having it as an enum doesn't get us much
> > > since we have a 32b type field but are only storing 4 possible values
> > > there currently
> > >
> > > The way I see it, scatter-gather is just another memory model
> > > attribute rather than being something entirely new. It makes as much
> > > sense to have a bit there for MEM_TYPE_PAGE_SG as it does for
> > > MEM_TYPE_PAGE_SHARED. I would consider either splitting the type field
> > > into two 16b fields. For example you might have one field that
> > > describes the source pool which is currently either allocated page
> > > (ORDER0, SHARED), page_pool (PAGE_POOL), or XSK pool (XSK_BUFF_POOL),
> > > and then two flags for type with there being either shared and/or
> > > scatter-gather.
> >
> > Hi Alex,
> >
> > I am fine reducing the xdp_mem_info size defining type field as u16 instead of
> > u32 but I think mb is a per-xdp_buff/xdp_frame property since at runtime we can
> > receive a tiny single page xdp_buff/xdp_frame and a "jumbo" xdp_buff/xdp_frame
> > composed by multiple pages. According to the documentation available in
> > include/net/xdp.h, xdp_rxq_info (where xdp_mem_info is contained for xdp_buff)
> > is "associated with the driver level RX-ring queues and it is information that
> > is specific to how the driver have configured a given RX-ring queue" so I guess
> > it is a little bit counterintuitive to add this info there.
> 
> It isn't really all that counterintuitive. However it does put the
> onus on the driver to be consistent about things. So even a
> single-buffer xdp_buff would technically have to be a scatter-gather
> buff, but it would have no fragments in it. So the requirement would
> be to initialize the frags and data_len fields to 0 for all xdp_buff
> structures.

nr_frags and data_len are currently defined in skb_shared_info(xdp_buff)
so I guess initialize them to 0 will trigger a cache miss (in fact we
introduced the mb bit just to avoid this initialization and introduce
penalties for legacy single-buffer use-case). Do you mean to have these
fields in xdp_buff/xdp_frame structure?

> 
> > Moreover we have the "issue" for devmap in dev_map_bpf_prog_run() when we
> > perform XDP_REDIRECT with the approach you proposed and last we can reuse this
> > new flags filed for XDP hw-hints support.
> > What about reducing xdp_mem_info and add the flags field in xdp_buff/xdp_frame
> > in order to avoid increasing the xdp_buff/xdp_frame size? Am I missing
> > something?
> 
> The problem is there isn't a mem_info field in the xdp_buff. It is in
> the Rx queue info structure.

yes, this is what I meant :)

Regards,
Lorenzo

> 
> Thanks,
> 
> - Alex
>
diff mbox series

Patch

diff --git a/include/net/xdp.h b/include/net/xdp.h
index 5533f0ab2afc..de11d981fa44 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -66,6 +66,10 @@  struct xdp_txq_info {
 	struct net_device *dev;
 };
 
+enum xdp_buff_flags {
+	XDP_FLAGS_MULTI_BUFF	= BIT(0), /* non-linear xdp buff */
+};
+
 struct xdp_buff {
 	void *data;
 	void *data_end;
@@ -74,13 +78,30 @@  struct xdp_buff {
 	struct xdp_rxq_info *rxq;
 	struct xdp_txq_info *txq;
 	u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/
+	u16 flags; /* supported values defined in xdp_flags */
 };
 
+static __always_inline bool xdp_buff_is_mb(struct xdp_buff *xdp)
+{
+	return !!(xdp->flags & XDP_FLAGS_MULTI_BUFF);
+}
+
+static __always_inline void xdp_buff_set_mb(struct xdp_buff *xdp)
+{
+	xdp->flags |= XDP_FLAGS_MULTI_BUFF;
+}
+
+static __always_inline void xdp_buff_clear_mb(struct xdp_buff *xdp)
+{
+	xdp->flags &= ~XDP_FLAGS_MULTI_BUFF;
+}
+
 static __always_inline void
 xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq)
 {
 	xdp->frame_sz = frame_sz;
 	xdp->rxq = rxq;
+	xdp->flags = 0;
 }
 
 static __always_inline void
@@ -117,13 +138,19 @@  struct xdp_frame {
 	u16 headroom;
 	u32 metasize:8;
 	u32 frame_sz:24;
+	struct net_device *dev_rx; /* used by cpumap */
 	/* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time,
 	 * while mem info is valid on remote CPU.
 	 */
 	struct xdp_mem_info mem;
-	struct net_device *dev_rx; /* used by cpumap */
+	u16 flags; /* supported values defined in xdp_flags */
 };
 
+static __always_inline bool xdp_frame_is_mb(struct xdp_frame *frame)
+{
+	return !!(frame->flags & XDP_FLAGS_MULTI_BUFF);
+}
+
 #define XDP_BULK_QUEUE_SIZE	16
 struct xdp_frame_bulk {
 	int count;
@@ -180,6 +207,7 @@  void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
 	xdp->data_end = frame->data + frame->len;
 	xdp->data_meta = frame->data - frame->metasize;
 	xdp->frame_sz = frame->frame_sz;
+	xdp->flags = frame->flags;
 }
 
 static inline
@@ -206,6 +234,7 @@  int xdp_update_frame_from_buff(struct xdp_buff *xdp,
 	xdp_frame->headroom = headroom - sizeof(*xdp_frame);
 	xdp_frame->metasize = metasize;
 	xdp_frame->frame_sz = xdp->frame_sz;
+	xdp_frame->flags = xdp->flags;
 
 	return 0;
 }