diff mbox series

bpf: provide map key to BPF program after redirect

Message ID 20240705103853.21235-1-florian.kauer@linutronix.de (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series bpf: provide map key to BPF program after redirect | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-13 success Logs for s390x-gcc / test (test_maps, false, 360) / test_maps on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-18 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-17 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-16 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-21 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17-O2
bpf/vmtest-bpf-next-VM_Test-37 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18-O2
bpf/vmtest-bpf-next-VM_Test-34 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-41 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-42 success Logs for x86_64-llvm-18 / veristat
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-7 fail Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 fail Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-14 fail Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-22 fail Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 fail Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-31 fail Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-32 fail Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-38 fail Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-39 fail Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-40 fail Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next-VM_Test-15 fail Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
netdev/series_format warning Single patches do not need cover letters; Target tree name not specified in the subject
netdev/tree_selection success Guessed tree name to be net-next, async
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 1054 this patch: 1054
netdev/build_tools success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers success CCed 18 of 18 maintainers
netdev/build_clang success Errors and warnings before: 1128 this patch: 1128
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 7782 this patch: 7782
netdev/checkpatch warning CHECK: braces {} should be used on all arms of this statement WARNING: line length of 81 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 6 this patch: 6
netdev/source_inline success Was 0 now: 0

Commit Message

Florian Kauer July 5, 2024, 10:38 a.m. UTC
Both DEVMAP as well as CPUMAP provide the possibility
to attach BPF programs to their entries that will be
executed after a redirect was performed.

With BPF_F_BROADCAST it is in also possible to execute
BPF programs for multiple clones of the same XDP frame
which is, for example, useful for establishing redundant
traffic paths by setting, for example, different VLAN tags
for the replicated XDP frames.

Currently, this program itself has no information about
the map entry that led to its execution. While egress_ifindex
can be used to get this information indirectly and can
be used for path dependent processing of the replicated frames,
it does not work if multiple entries share the same egress_ifindex.

Therefore, extend the xdp_md struct with a map_key
that contains the key of the associated map entry
after performing a redirect.

See
https://lore.kernel.org/xdp-newbies/5eb6070c-a12e-4d4c-a9f0-a6a6fafa41d1@linutronix.de/T/#u
for the discussion that led to this patch.

Signed-off-by: Florian Kauer <florian.kauer@linutronix.de>
---
 include/net/xdp.h        |  3 +++
 include/uapi/linux/bpf.h |  2 ++
 kernel/bpf/devmap.c      |  6 +++++-
 net/core/filter.c        | 18 ++++++++++++++++++
 4 files changed, 28 insertions(+), 1 deletion(-)

Comments

Toke Høiland-Jørgensen July 5, 2024, 11:01 a.m. UTC | #1
Florian Kauer <florian.kauer@linutronix.de> writes:

> Both DEVMAP as well as CPUMAP provide the possibility
> to attach BPF programs to their entries that will be
> executed after a redirect was performed.
>
> With BPF_F_BROADCAST it is in also possible to execute
> BPF programs for multiple clones of the same XDP frame
> which is, for example, useful for establishing redundant
> traffic paths by setting, for example, different VLAN tags
> for the replicated XDP frames.
>
> Currently, this program itself has no information about
> the map entry that led to its execution. While egress_ifindex
> can be used to get this information indirectly and can
> be used for path dependent processing of the replicated frames,
> it does not work if multiple entries share the same egress_ifindex.
>
> Therefore, extend the xdp_md struct with a map_key
> that contains the key of the associated map entry
> after performing a redirect.
>
> See
> https://lore.kernel.org/xdp-newbies/5eb6070c-a12e-4d4c-a9f0-a6a6fafa41d1@linutronix.de/T/#u
> for the discussion that led to this patch.
>
> Signed-off-by: Florian Kauer <florian.kauer@linutronix.de>
> ---
>  include/net/xdp.h        |  3 +++
>  include/uapi/linux/bpf.h |  2 ++
>  kernel/bpf/devmap.c      |  6 +++++-
>  net/core/filter.c        | 18 ++++++++++++++++++
>  4 files changed, 28 insertions(+), 1 deletion(-)
>
> diff --git a/include/net/xdp.h b/include/net/xdp.h
> index e6770dd40c91..e70f4dfea1a2 100644
> --- a/include/net/xdp.h
> +++ b/include/net/xdp.h
> @@ -86,6 +86,7 @@ struct xdp_buff {
>  	struct xdp_txq_info *txq;
>  	u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/
>  	u32 flags; /* supported values defined in xdp_buff_flags */
> +	u64 map_key; /* set during redirect via a map */
>  };
>  
>  static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp)
> @@ -175,6 +176,7 @@ struct xdp_frame {
>  	struct net_device *dev_rx; /* used by cpumap */
>  	u32 frame_sz;
>  	u32 flags; /* supported values defined in xdp_buff_flags */
> +	u64 map_key; /* set during redirect via a map */
>  };

struct xdp_frame is size constrained, so we shouldn't be using precious
space on this. Besides, it's not information that should be carried
along with the packet after transmission. So let's put it into struct
xdp_txq_info and read it from there the same way we do for egress_ifindex :)

>  static __always_inline bool xdp_frame_has_frags(struct xdp_frame *frame)
> @@ -257,6 +259,7 @@ void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
>  	xdp->data_meta = frame->data - frame->metasize;
>  	xdp->frame_sz = frame->frame_sz;
>  	xdp->flags = frame->flags;
> +	xdp->map_key = frame->map_key;
>  }
>  
>  static inline
> diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
> index 35bcf52dbc65..7dbb0f2a236c 100644
> --- a/include/uapi/linux/bpf.h
> +++ b/include/uapi/linux/bpf.h
> @@ -6455,6 +6455,8 @@ struct xdp_md {
>  	__u32 rx_queue_index;  /* rxq->queue_index  */
>  
>  	__u32 egress_ifindex;  /* txq->dev->ifindex */
> +
> +	__u64 map_key; /* set during redirect via a map in xdp_buff */
>  };

Maybe make the comment a bit easier to understand? Something like "key
of devmap/cpumap entry that is executing"?

-Toke
Florian Kauer July 5, 2024, 11:12 a.m. UTC | #2
On 7/5/24 13:01, Toke Høiland-Jørgensen wrote:
> Florian Kauer <florian.kauer@linutronix.de> writes:
> 
>> Both DEVMAP as well as CPUMAP provide the possibility
>> to attach BPF programs to their entries that will be
>> executed after a redirect was performed.
>>
>> With BPF_F_BROADCAST it is in also possible to execute
>> BPF programs for multiple clones of the same XDP frame
>> which is, for example, useful for establishing redundant
>> traffic paths by setting, for example, different VLAN tags
>> for the replicated XDP frames.
>>
>> Currently, this program itself has no information about
>> the map entry that led to its execution. While egress_ifindex
>> can be used to get this information indirectly and can
>> be used for path dependent processing of the replicated frames,
>> it does not work if multiple entries share the same egress_ifindex.
>>
>> Therefore, extend the xdp_md struct with a map_key
>> that contains the key of the associated map entry
>> after performing a redirect.
>>
>> See
>> https://lore.kernel.org/xdp-newbies/5eb6070c-a12e-4d4c-a9f0-a6a6fafa41d1@linutronix.de/T/#u
>> for the discussion that led to this patch.
>>
>> Signed-off-by: Florian Kauer <florian.kauer@linutronix.de>
>> ---
>>  include/net/xdp.h        |  3 +++
>>  include/uapi/linux/bpf.h |  2 ++
>>  kernel/bpf/devmap.c      |  6 +++++-
>>  net/core/filter.c        | 18 ++++++++++++++++++
>>  4 files changed, 28 insertions(+), 1 deletion(-)
>>
>> diff --git a/include/net/xdp.h b/include/net/xdp.h
>> index e6770dd40c91..e70f4dfea1a2 100644
>> --- a/include/net/xdp.h
>> +++ b/include/net/xdp.h
>> @@ -86,6 +86,7 @@ struct xdp_buff {
>>  	struct xdp_txq_info *txq;
>>  	u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/
>>  	u32 flags; /* supported values defined in xdp_buff_flags */
>> +	u64 map_key; /* set during redirect via a map */
>>  };
>>  
>>  static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp)
>> @@ -175,6 +176,7 @@ struct xdp_frame {
>>  	struct net_device *dev_rx; /* used by cpumap */
>>  	u32 frame_sz;
>>  	u32 flags; /* supported values defined in xdp_buff_flags */
>> +	u64 map_key; /* set during redirect via a map */
>>  };
> 
> struct xdp_frame is size constrained, so we shouldn't be using precious
> space on this. Besides, it's not information that should be carried
> along with the packet after transmission. So let's put it into struct
> xdp_txq_info and read it from there the same way we do for egress_ifindex :)

Very reasonable, but do you really mean struct xdp_frame or xdp_buff?
Only the latter has the xdp_txq_info?

> 
>>  static __always_inline bool xdp_frame_has_frags(struct xdp_frame *frame)
>> @@ -257,6 +259,7 @@ void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
>>  	xdp->data_meta = frame->data - frame->metasize;
>>  	xdp->frame_sz = frame->frame_sz;
>>  	xdp->flags = frame->flags;
>> +	xdp->map_key = frame->map_key;
>>  }
>>  
>>  static inline
>> diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
>> index 35bcf52dbc65..7dbb0f2a236c 100644
>> --- a/include/uapi/linux/bpf.h
>> +++ b/include/uapi/linux/bpf.h
>> @@ -6455,6 +6455,8 @@ struct xdp_md {
>>  	__u32 rx_queue_index;  /* rxq->queue_index  */
>>  
>>  	__u32 egress_ifindex;  /* txq->dev->ifindex */
>> +
>> +	__u64 map_key; /* set during redirect via a map in xdp_buff */
>>  };
> 
> Maybe make the comment a bit easier to understand? Something like "key
> of devmap/cpumap entry that is executing"?

Agreed, thanks!

> 
> -Toke
>
Toke Høiland-Jørgensen July 5, 2024, 1:18 p.m. UTC | #3
Florian Kauer <florian.kauer@linutronix.de> writes:

> On 7/5/24 13:01, Toke Høiland-Jørgensen wrote:
>> Florian Kauer <florian.kauer@linutronix.de> writes:
>> 
>>> Both DEVMAP as well as CPUMAP provide the possibility
>>> to attach BPF programs to their entries that will be
>>> executed after a redirect was performed.
>>>
>>> With BPF_F_BROADCAST it is in also possible to execute
>>> BPF programs for multiple clones of the same XDP frame
>>> which is, for example, useful for establishing redundant
>>> traffic paths by setting, for example, different VLAN tags
>>> for the replicated XDP frames.
>>>
>>> Currently, this program itself has no information about
>>> the map entry that led to its execution. While egress_ifindex
>>> can be used to get this information indirectly and can
>>> be used for path dependent processing of the replicated frames,
>>> it does not work if multiple entries share the same egress_ifindex.
>>>
>>> Therefore, extend the xdp_md struct with a map_key
>>> that contains the key of the associated map entry
>>> after performing a redirect.
>>>
>>> See
>>> https://lore.kernel.org/xdp-newbies/5eb6070c-a12e-4d4c-a9f0-a6a6fafa41d1@linutronix.de/T/#u
>>> for the discussion that led to this patch.
>>>
>>> Signed-off-by: Florian Kauer <florian.kauer@linutronix.de>
>>> ---
>>>  include/net/xdp.h        |  3 +++
>>>  include/uapi/linux/bpf.h |  2 ++
>>>  kernel/bpf/devmap.c      |  6 +++++-
>>>  net/core/filter.c        | 18 ++++++++++++++++++
>>>  4 files changed, 28 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/include/net/xdp.h b/include/net/xdp.h
>>> index e6770dd40c91..e70f4dfea1a2 100644
>>> --- a/include/net/xdp.h
>>> +++ b/include/net/xdp.h
>>> @@ -86,6 +86,7 @@ struct xdp_buff {
>>>  	struct xdp_txq_info *txq;
>>>  	u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/
>>>  	u32 flags; /* supported values defined in xdp_buff_flags */
>>> +	u64 map_key; /* set during redirect via a map */
>>>  };
>>>  
>>>  static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp)
>>> @@ -175,6 +176,7 @@ struct xdp_frame {
>>>  	struct net_device *dev_rx; /* used by cpumap */
>>>  	u32 frame_sz;
>>>  	u32 flags; /* supported values defined in xdp_buff_flags */
>>> +	u64 map_key; /* set during redirect via a map */
>>>  };
>> 
>> struct xdp_frame is size constrained, so we shouldn't be using precious
>> space on this. Besides, it's not information that should be carried
>> along with the packet after transmission. So let's put it into struct
>> xdp_txq_info and read it from there the same way we do for egress_ifindex :)
>
> Very reasonable, but do you really mean struct xdp_frame or xdp_buff?
> Only the latter has the xdp_txq_info?

Well, we should have the field in neither, but xdp_frame is the one that
is size constrained. Whenever a cpumap/devmap program is run (in
xdp_bq_bpf_prog_run() and dev_map_bpf_prog_run_skb()), a struct
xdp_txq_info is prepared on the stack, so you'll just need to add
setting of the new value to that...

-Toke
diff mbox series

Patch

diff --git a/include/net/xdp.h b/include/net/xdp.h
index e6770dd40c91..e70f4dfea1a2 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -86,6 +86,7 @@  struct xdp_buff {
 	struct xdp_txq_info *txq;
 	u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/
 	u32 flags; /* supported values defined in xdp_buff_flags */
+	u64 map_key; /* set during redirect via a map */
 };
 
 static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp)
@@ -175,6 +176,7 @@  struct xdp_frame {
 	struct net_device *dev_rx; /* used by cpumap */
 	u32 frame_sz;
 	u32 flags; /* supported values defined in xdp_buff_flags */
+	u64 map_key; /* set during redirect via a map */
 };
 
 static __always_inline bool xdp_frame_has_frags(struct xdp_frame *frame)
@@ -257,6 +259,7 @@  void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
 	xdp->data_meta = frame->data - frame->metasize;
 	xdp->frame_sz = frame->frame_sz;
 	xdp->flags = frame->flags;
+	xdp->map_key = frame->map_key;
 }
 
 static inline
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 35bcf52dbc65..7dbb0f2a236c 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -6455,6 +6455,8 @@  struct xdp_md {
 	__u32 rx_queue_index;  /* rxq->queue_index  */
 
 	__u32 egress_ifindex;  /* txq->dev->ifindex */
+
+	__u64 map_key; /* set during redirect via a map in xdp_buff */
 };
 
 /* DEVMAP map-value layout
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index da1fec906b96..fac3e8a6c51e 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -574,6 +574,8 @@  static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
 	if (!nxdpf)
 		return -ENOMEM;
 
+	nxdpf->map_key = obj->idx;
+
 	bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog);
 
 	return 0;
@@ -670,8 +672,10 @@  int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
 	}
 
 	/* consume the last copy of the frame */
-	if (last_dst)
+	if (last_dst) {
+		xdpf->map_key = last_dst->idx;
 		bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog);
+	}
 	else
 		xdp_return_frame_rx_napi(xdpf); /* dtab is empty */
 
diff --git a/net/core/filter.c b/net/core/filter.c
index f1c37c85b858..7762a6d6900f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -4394,10 +4394,12 @@  static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
 			err = dev_map_enqueue_multi(xdpf, dev, map,
 						    flags & BPF_F_EXCLUDE_INGRESS);
 		} else {
+			xdpf->map_key = ri->tgt_index;
 			err = dev_map_enqueue(fwd, xdpf, dev);
 		}
 		break;
 	case BPF_MAP_TYPE_CPUMAP:
+		xdpf->map_key = ri->tgt_index;
 		err = cpu_map_enqueue(fwd, xdpf, dev);
 		break;
 	case BPF_MAP_TYPE_UNSPEC:
@@ -4407,6 +4409,7 @@  static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
 				err = -EINVAL;
 				break;
 			}
+			xdpf->map_key = ri->tgt_index;
 			err = dev_xdp_enqueue(fwd, xdpf, dev);
 			break;
 		}
@@ -9022,6 +9025,16 @@  static bool xdp_is_valid_access(int off, int size,
 	case offsetof(struct xdp_md, data_end):
 		info->reg_type = PTR_TO_PACKET_END;
 		break;
+	case offsetof(struct xdp_md, map_key):
+		if (prog->expected_attach_type != BPF_XDP_DEVMAP &&
+		    prog->expected_attach_type != BPF_XDP_CPUMAP) {
+			return false;
+		}
+
+		if (size != sizeof(__u64))
+			return false;
+
+		return true;
 	}
 
 	return __is_valid_xdp_access(off, size);
@@ -10116,6 +10129,11 @@  static u32 xdp_convert_ctx_access(enum bpf_access_type type,
 		*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
 				      offsetof(struct net_device, ifindex));
 		break;
+	case offsetof(struct xdp_md, map_key):
+		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, map_key),
+				      si->dst_reg, si->src_reg,
+				      offsetof(struct xdp_buff, map_key));
+		break;
 	}
 
 	return insn - insn_buf;