diff mbox series

[bpf-next,16/16] selftests/bpf: Add test cases for hash map with dynptr key

Message ID 20241008091501.8302-17-houtao@huaweicloud.com (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series Support dynptr key for hash map | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
netdev/series_format fail Series longer than 15 patches
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 6 this patch: 6
netdev/build_tools success Errors and warnings before: 12 (+1) this patch: 12 (+1)
netdev/cc_maintainers warning 3 maintainers not CCed: linux-kselftest@vger.kernel.org mykolal@fb.com shuah@kernel.org
netdev/build_clang success Errors and warnings before: 7 this patch: 7
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 9 this patch: 9
netdev/checkpatch warning WARNING: Prefer strscpy, strscpy_pad, or __nonstring over strncpy - see: https://github.com/KSPP/linux/issues/90 WARNING: added, moved or deleted file(s), does MAINTAINERS need updating? WARNING: line length of 100 exceeds 80 columns WARNING: line length of 83 exceeds 80 columns WARNING: line length of 84 exceeds 80 columns WARNING: line length of 85 exceeds 80 columns WARNING: line length of 87 exceeds 80 columns WARNING: line length of 88 exceeds 80 columns WARNING: line length of 91 exceeds 80 columns WARNING: line length of 94 exceeds 80 columns WARNING: line length of 96 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-12 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 success Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-15 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-16 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-17 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-18 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17-O2
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-next-VM_Test-34 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18-O2
bpf/vmtest-bpf-next-VM_Test-41 success Logs for x86_64-llvm-18 / veristat
bpf/vmtest-bpf-next-VM_Test-13 success Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-14 success Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-21 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-31 success Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-32 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-40 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-37 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-38 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-39 success Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18

Commit Message

Hou Tao Oct. 8, 2024, 9:15 a.m. UTC
From: Hou Tao <houtao1@huawei.com>

Add three positive test cases to test the basic operations on the
dynptr-keyed hash map. These basic operations include lookup, update,
delete, lookup_and_delete, and get_next_key. These operations are
exercised through both bpf syscall and bpf program. These three test
cases use different map keys. The first test case uses both bpf_dynptr
and a struct with only bpf_dynptr as map key, the second one uses a
struct with a integer and a bpf_dynptr as map key, and the last one uses
a struct with two bpf_dynptr as map key.

Also add multiple negative test cases for dynptr-keyed hash map. These
test cases check whether the type of the register for the map key is
expected, whether the offset of the access is aligned, and whether the
layout of dynptr and non-dynptr parts in the stack is matched with the
definition of map->key_record.

Signed-off-by: Hou Tao <houtao1@huawei.com>
---
 .../bpf/prog_tests/htab_dynkey_test.c         | 451 ++++++++++++++++++
 .../bpf/progs/htab_dynkey_test_failure.c      | 270 +++++++++++
 .../bpf/progs/htab_dynkey_test_success.c      | 399 ++++++++++++++++
 3 files changed, 1120 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/prog_tests/htab_dynkey_test.c
 create mode 100644 tools/testing/selftests/bpf/progs/htab_dynkey_test_failure.c
 create mode 100644 tools/testing/selftests/bpf/progs/htab_dynkey_test_success.c

Comments

Alexei Starovoitov Oct. 11, 2024, 6:23 p.m. UTC | #1
On Tue, Oct 8, 2024 at 2:02 AM Hou Tao <houtao@huaweicloud.com> wrote:
>
> +
> +SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
> +int BPF_PROG(pure_dynptr_key)

...

> +SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
> +int BPF_PROG(mixed_dynptr_key)

...
> +SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
> +int BPF_PROG(multiple_dynptr_key)

attaching to syscalls with pid filtering is ok-ish,
but it's a few unnecessary steps.
Use tracing prog for non-sleepable and syscall prog for sleepable
and bpf_prog_run() it.
More predictable and no need for a pid filter.
Hou Tao Oct. 21, 2024, 2:05 p.m. UTC | #2
Hi,

On 10/12/2024 2:23 AM, Alexei Starovoitov wrote:
> On Tue, Oct 8, 2024 at 2:02 AM Hou Tao <houtao@huaweicloud.com> wrote:
>> +
>> +SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
>> +int BPF_PROG(pure_dynptr_key)
> ...
>
>> +SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
>> +int BPF_PROG(mixed_dynptr_key)
> ...
>> +SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
>> +int BPF_PROG(multiple_dynptr_key)
> attaching to syscalls with pid filtering is ok-ish,
> but it's a few unnecessary steps.
> Use tracing prog for non-sleepable and syscall prog for sleepable
> and bpf_prog_run() it.
> More predictable and no need for a pid filter.

Thanks for the suggestion. Will do in v2.
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/prog_tests/htab_dynkey_test.c b/tools/testing/selftests/bpf/prog_tests/htab_dynkey_test.c
new file mode 100644
index 000000000000..30fc085cfc4c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/htab_dynkey_test.c
@@ -0,0 +1,451 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024. Huawei Technologies Co., Ltd */
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <test_progs.h>
+
+#include "htab_dynkey_test_success.skel.h"
+#include "htab_dynkey_test_failure.skel.h"
+
+struct id_dname_key {
+	int id;
+	struct bpf_dynptr_user name;
+};
+
+struct dname_key {
+	struct bpf_dynptr_user name;
+};
+
+struct multiple_dynptr_key {
+	struct dname_key f_1;
+	unsigned long f_2;
+	struct id_dname_key f_3;
+	unsigned long f_4;
+};
+
+static char *name_list[] = {
+	"systemd",
+	"[rcu_sched]",
+	"[kworker/42:0H-events_highpri]",
+	"[ksoftirqd/58]",
+	"[rcu_tasks_trace]",
+};
+
+#define INIT_VALUE 100
+#define INIT_ID 1000
+
+static void setup_pure_dynptr_key_map(int fd)
+{
+	struct bpf_dynptr_user key, _cur_key, _next_key;
+	struct bpf_dynptr_user *cur_key, *next_key;
+	bool marked[ARRAY_SIZE(name_list)];
+	unsigned int i, next_idx, size;
+	unsigned long value, got;
+	char name[2][64];
+	char msg[64];
+	void *data;
+	int err;
+
+	/* lookup non-existent keys */
+	for (i = 0; i < ARRAY_SIZE(name_list); i++) {
+		snprintf(msg, sizeof(msg), "#%u bad lookup", i);
+		/* Use strdup() to ensure that the content pointed by dynptr is
+		 * used for lookup instead of the pointer in dynptr. sys_bpf()
+		 * will handle the NULL case properly.
+		 */
+		data = strdup(name_list[i]);
+		bpf_dynptr_user_init(data, strlen(name_list[i]) + 1, &key);
+		err = bpf_map_lookup_elem(fd, &key, &value);
+		ASSERT_EQ(err, -ENOENT, msg);
+		free(data);
+	}
+
+	/* update keys */
+	for (i = 0; i < ARRAY_SIZE(name_list); i++) {
+		snprintf(msg, sizeof(msg), "#%u insert", i);
+		data = strdup(name_list[i]);
+		bpf_dynptr_user_init(data, strlen(name_list[i]) + 1, &key);
+		value = INIT_VALUE + i;
+		err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST);
+		ASSERT_OK(err, msg);
+		free(data);
+	}
+
+	/* lookup existent keys */
+	for (i = 0; i < ARRAY_SIZE(name_list); i++) {
+		snprintf(msg, sizeof(msg), "#%u lookup", i);
+		data = strdup(name_list[i]);
+		bpf_dynptr_user_init(data, strlen(name_list[i]) + 1, &key);
+		got = 0;
+		err = bpf_map_lookup_elem(fd, &key, &got);
+		ASSERT_OK(err, msg);
+		free(data);
+
+		value = INIT_VALUE + i;
+		ASSERT_EQ(got, value, msg);
+	}
+
+	/* delete keys */
+	for (i = 0; i < ARRAY_SIZE(name_list); i++) {
+		snprintf(msg, sizeof(msg), "#%u delete", i);
+		data = strdup(name_list[i]);
+		bpf_dynptr_user_init(data, strlen(name_list[i]) + 1, &key);
+		err = bpf_map_delete_elem(fd, &key);
+		ASSERT_OK(err, msg);
+		free(data);
+	}
+
+	/* re-insert keys */
+	for (i = 0; i < ARRAY_SIZE(name_list); i++) {
+		snprintf(msg, sizeof(msg), "#%u re-insert", i);
+		data = strdup(name_list[i]);
+		bpf_dynptr_user_init(data, strlen(name_list[i]) + 1, &key);
+		value = 0;
+		err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST);
+		ASSERT_OK(err, msg);
+		free(data);
+	}
+
+	/* overwrite keys */
+	for (i = 0; i < ARRAY_SIZE(name_list); i++) {
+		snprintf(msg, sizeof(msg), "#%u overwrite", i);
+		data = strdup(name_list[i]);
+		bpf_dynptr_user_init(data, strlen(name_list[i]) + 1, &key);
+		value = INIT_VALUE + i;
+		err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
+		ASSERT_OK(err, msg);
+		free(data);
+	}
+
+	/* get_next keys */
+	next_idx = 0;
+	cur_key = NULL;
+	next_key = &_next_key;
+	memset(&marked, 0, sizeof(marked));
+	while (true) {
+		bpf_dynptr_user_init(name[next_idx], sizeof(name[next_idx]), next_key);
+		err = bpf_map_get_next_key(fd, cur_key, next_key);
+		if (err) {
+			ASSERT_EQ(err, -ENOENT, "get_next_key");
+			break;
+		}
+
+		size = bpf_dynptr_user_size(next_key);
+		data = bpf_dynptr_user_data(next_key);
+		for (i = 0; i < ARRAY_SIZE(name_list); i++) {
+			if (size == strlen(name_list[i]) + 1 &&
+			    !memcmp(name_list[i], data, size)) {
+				ASSERT_FALSE(marked[i], name_list[i]);
+				marked[i] = true;
+				break;
+			}
+		}
+		ASSERT_EQ(next_key->rsvd, 0, "rsvd");
+
+		if (!cur_key)
+			cur_key = &_cur_key;
+		*cur_key = *next_key;
+		next_idx ^= 1;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(marked); i++)
+		ASSERT_TRUE(marked[i], name_list[i]);
+
+	/* lookup_and_delete all elements except the first one */
+	for (i = 1; i < ARRAY_SIZE(name_list); i++) {
+		snprintf(msg, sizeof(msg), "#%u lookup_delete", i);
+		data = strdup(name_list[i]);
+		bpf_dynptr_user_init(data, strlen(name_list[i]) + 1, &key);
+		got = 0;
+		err = bpf_map_lookup_and_delete_elem(fd, &key, &got);
+		ASSERT_OK(err, msg);
+		free(data);
+
+		value = INIT_VALUE + i;
+		ASSERT_EQ(got, value, msg);
+	}
+
+	/* get the key after the first element */
+	cur_key = &_cur_key;
+	strncpy(name[0], name_list[0], sizeof(name[0]) - 1);
+	name[0][sizeof(name[0]) - 1] = 0;
+	bpf_dynptr_user_init(name[0], strlen(name[0]) + 1, cur_key);
+
+	next_key = &_next_key;
+	bpf_dynptr_user_init(name[1], sizeof(name[1]), next_key);
+	err = bpf_map_get_next_key(fd, cur_key, next_key);
+	ASSERT_EQ(err, -ENOENT, "get_last");
+}
+
+static void setup_mixed_dynptr_key_map(int fd)
+{
+	struct id_dname_key key, _cur_key, _next_key;
+	struct id_dname_key *cur_key, *next_key;
+	bool marked[ARRAY_SIZE(name_list)];
+	unsigned int i, next_idx, size;
+	unsigned long value;
+	char name[2][64];
+	char msg[64];
+	void *data;
+	int err;
+
+	/* Zero the hole */
+	memset(&key, 0, sizeof(key));
+
+	/* lookup non-existent keys */
+	for (i = 0; i < ARRAY_SIZE(name_list); i++) {
+		snprintf(msg, sizeof(msg), "#%u bad lookup", i);
+		key.id = INIT_ID + i;
+		data = strdup(name_list[i]);
+		bpf_dynptr_user_init(data, strlen(name_list[i]) + 1, &key.name);
+		err = bpf_map_lookup_elem(fd, &key, &value);
+		ASSERT_EQ(err, -ENOENT, msg);
+		free(data);
+	}
+
+	/* update keys */
+	for (i = 0; i < ARRAY_SIZE(name_list); i++) {
+		snprintf(msg, sizeof(msg), "#%u insert", i);
+		key.id = INIT_ID + i;
+		data = strdup(name_list[i]);
+		bpf_dynptr_user_init(data, strlen(name_list[i]) + 1, &key.name);
+		value = INIT_VALUE + i;
+		err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST);
+		ASSERT_OK(err, msg);
+		free(data);
+	}
+
+	/* lookup existent keys */
+	for (i = 0; i < ARRAY_SIZE(name_list); i++) {
+		unsigned long got = 0;
+
+		snprintf(msg, sizeof(msg), "#%u lookup", i);
+		key.id = INIT_ID + i;
+		data = strdup(name_list[i]);
+		bpf_dynptr_user_init(data, strlen(name_list[i]) + 1, &key.name);
+		err = bpf_map_lookup_elem(fd, &key, &got);
+		ASSERT_OK(err, msg);
+		free(data);
+
+		value = INIT_VALUE + i;
+		ASSERT_EQ(got, value, msg);
+	}
+
+	/* delete keys */
+	for (i = 0; i < ARRAY_SIZE(name_list); i++) {
+		snprintf(msg, sizeof(msg), "#%u delete", i);
+		key.id = INIT_ID + i;
+		data = strdup(name_list[i]);
+		bpf_dynptr_user_init(data, strlen(name_list[i]) + 1, &key.name);
+		err = bpf_map_delete_elem(fd, &key);
+		ASSERT_OK(err, msg);
+		free(data);
+	}
+
+	/* re-insert keys */
+	for (i = 0; i < ARRAY_SIZE(name_list); i++) {
+		snprintf(msg, sizeof(msg), "#%u re-insert", i);
+		key.id = INIT_ID + i;
+		data = strdup(name_list[i]);
+		bpf_dynptr_user_init(data, strlen(name_list[i]) + 1, &key.name);
+		value = 0;
+		err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST);
+		ASSERT_OK(err, msg);
+		free(data);
+	}
+
+	/* overwrite keys */
+	for (i = 0; i < ARRAY_SIZE(name_list); i++) {
+		snprintf(msg, sizeof(msg), "#%u overwrite", i);
+		key.id = INIT_ID + i;
+		data = strdup(name_list[i]);
+		bpf_dynptr_user_init(data, strlen(name_list[i]) + 1, &key.name);
+		value = INIT_VALUE + i;
+		err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
+		ASSERT_OK(err, msg);
+		free(data);
+	}
+
+	/* get_next keys */
+	next_idx = 0;
+	cur_key = NULL;
+	next_key = &_next_key;
+	memset(&marked, 0, sizeof(marked));
+	while (true) {
+		bpf_dynptr_user_init(name[next_idx], sizeof(name[next_idx]), &next_key->name);
+		err = bpf_map_get_next_key(fd, cur_key, next_key);
+		if (err) {
+			ASSERT_EQ(err, -ENOENT, "last get_next");
+			break;
+		}
+
+		size = bpf_dynptr_user_size(&next_key->name);
+		data = bpf_dynptr_user_data(&next_key->name);
+		for (i = 0; i < ARRAY_SIZE(name_list); i++) {
+			if (size == strlen(name_list[i]) + 1 &&
+			    !memcmp(name_list[i], data, size)) {
+				ASSERT_FALSE(marked[i], name_list[i]);
+				ASSERT_EQ(next_key->id, INIT_ID + i, name_list[i]);
+				marked[i] = true;
+				break;
+			}
+		}
+		ASSERT_EQ(next_key->name.rsvd, 0, "rsvd");
+
+		if (!cur_key)
+			cur_key = &_cur_key;
+		*cur_key = *next_key;
+		next_idx ^= 1;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(marked); i++)
+		ASSERT_TRUE(marked[i], name_list[i]);
+}
+
+static void setup_multiple_dynptr_key_map(int fd)
+{
+	struct multiple_dynptr_key key, cur_key, next_key;
+	unsigned long value;
+	unsigned int size;
+	char name[4][64];
+	void *data[2];
+	int err;
+
+	/* Zero the hole */
+	memset(&key, 0, sizeof(key));
+
+	key.f_2 = 2;
+	key.f_3.id = 3;
+	key.f_4 = 4;
+
+	/* lookup a non-existent key */
+	data[0] = strdup(name_list[0]);
+	data[1] = strdup(name_list[1]);
+	bpf_dynptr_user_init(data[0], strlen(name_list[0]) + 1, &key.f_1.name);
+	bpf_dynptr_user_init(data[1], strlen(name_list[1]) + 1, &key.f_3.name);
+	err = bpf_map_lookup_elem(fd, &key, &value);
+	ASSERT_EQ(err, -ENOENT, "lookup");
+
+	/* update key */
+	value = INIT_VALUE;
+	err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST);
+	ASSERT_OK(err, "update");
+	free(data[0]);
+	free(data[1]);
+
+	/* lookup key */
+	data[0] = strdup(name_list[0]);
+	data[1] = strdup(name_list[1]);
+	bpf_dynptr_user_init(data[0], strlen(name_list[0]) + 1, &key.f_1.name);
+	bpf_dynptr_user_init(data[1], strlen(name_list[1]) + 1, &key.f_3.name);
+	err = bpf_map_lookup_elem(fd, &key, &value);
+	ASSERT_OK(err, "lookup");
+	ASSERT_EQ(value, INIT_VALUE, "lookup");
+
+	/* delete key */
+	err = bpf_map_delete_elem(fd, &key);
+	ASSERT_OK(err, "delete");
+	free(data[0]);
+	free(data[1]);
+
+	/* re-insert keys */
+	bpf_dynptr_user_init(name_list[0], strlen(name_list[0]) + 1, &key.f_1.name);
+	bpf_dynptr_user_init(name_list[1], strlen(name_list[1]) + 1, &key.f_3.name);
+	value = 0;
+	err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST);
+	ASSERT_OK(err, "re-insert");
+
+	/* overwrite keys */
+	data[0] = strdup(name_list[0]);
+	data[1] = strdup(name_list[1]);
+	bpf_dynptr_user_init(data[0], strlen(name_list[0]) + 1, &key.f_1.name);
+	bpf_dynptr_user_init(data[1], strlen(name_list[1]) + 1, &key.f_3.name);
+	value = INIT_VALUE;
+	err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
+	ASSERT_OK(err, "overwrite");
+	free(data[0]);
+	free(data[1]);
+
+	/* get_next_key */
+	bpf_dynptr_user_init(name[0], sizeof(name[0]), &next_key.f_1.name);
+	bpf_dynptr_user_init(name[1], sizeof(name[1]), &next_key.f_3.name);
+	err = bpf_map_get_next_key(fd, NULL, &next_key);
+	ASSERT_OK(err, "first get_next");
+
+	size = bpf_dynptr_user_size(&next_key.f_1.name);
+	data[0] = bpf_dynptr_user_data(&next_key.f_1.name);
+	if (ASSERT_EQ(size, strlen(name_list[0]) + 1, "f_1 size"))
+		ASSERT_TRUE(!memcmp(name_list[0], data[0], size), "f_1 data");
+	ASSERT_EQ(next_key.f_1.name.rsvd, 0, "f_1 rsvd");
+
+	ASSERT_EQ(next_key.f_2, 2, "f_2");
+
+	ASSERT_EQ(next_key.f_3.id, 3, "f_3 id");
+	size = bpf_dynptr_user_size(&next_key.f_3.name);
+	data[0] = bpf_dynptr_user_data(&next_key.f_3.name);
+	if (ASSERT_EQ(size, strlen(name_list[1]) + 1, "f_3 size"))
+		ASSERT_TRUE(!memcmp(name_list[1], data[0], size), "f_3 data");
+	ASSERT_EQ(next_key.f_3.name.rsvd, 0, "f_3 rsvd");
+
+	ASSERT_EQ(next_key.f_4, 4, "f_4");
+
+	cur_key = next_key;
+	bpf_dynptr_user_init(name[2], sizeof(name[2]), &next_key.f_1.name);
+	bpf_dynptr_user_init(name[3], sizeof(name[3]), &next_key.f_3.name);
+	err = bpf_map_get_next_key(fd, &cur_key, &next_key);
+	ASSERT_EQ(err, -ENOENT, "last get_next_key");
+}
+
+static void test_htab_dynptr_key(bool pure, bool multiple)
+{
+	struct htab_dynkey_test_success *skel;
+	struct bpf_program *prog;
+	int err;
+
+	skel = htab_dynkey_test_success__open();
+	if (!ASSERT_OK_PTR(skel, "open()"))
+		return;
+
+	prog = pure ? skel->progs.pure_dynptr_key :
+	       (multiple ? skel->progs.multiple_dynptr_key : skel->progs.mixed_dynptr_key);
+	bpf_program__set_autoload(prog, true);
+
+	err = htab_dynkey_test_success__load(skel);
+	if (!ASSERT_OK(err, "load()"))
+		goto out;
+
+	if (pure) {
+		setup_pure_dynptr_key_map(bpf_map__fd(skel->maps.htab_1));
+		setup_pure_dynptr_key_map(bpf_map__fd(skel->maps.htab_2));
+	} else if (multiple) {
+		setup_multiple_dynptr_key_map(bpf_map__fd(skel->maps.htab_4));
+	} else {
+		setup_mixed_dynptr_key_map(bpf_map__fd(skel->maps.htab_3));
+	}
+
+	skel->bss->pid = getpid();
+
+	err = htab_dynkey_test_success__attach(skel);
+	if (!ASSERT_OK(err, "attach()"))
+		goto out;
+
+	usleep(1);
+
+	ASSERT_EQ(skel->bss->test_err, 0, "test");
+out:
+	htab_dynkey_test_success__destroy(skel);
+}
+
+void test_htab_dynkey_test(void)
+{
+	if (test__start_subtest("pure_dynptr_key"))
+		test_htab_dynptr_key(true, false);
+	if (test__start_subtest("mixed_dynptr_key"))
+		test_htab_dynptr_key(false, false);
+	if (test__start_subtest("multiple_dynptr_key"))
+		test_htab_dynptr_key(false, true);
+
+	RUN_TESTS(htab_dynkey_test_failure);
+}
diff --git a/tools/testing/selftests/bpf/progs/htab_dynkey_test_failure.c b/tools/testing/selftests/bpf/progs/htab_dynkey_test_failure.c
new file mode 100644
index 000000000000..c391e4fc5320
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/htab_dynkey_test_failure.c
@@ -0,0 +1,270 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024. Huawei Technologies Co., Ltd */
+#include <linux/types.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <errno.h>
+
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct bpf_map;
+
+struct id_dname_key {
+	int id;
+	struct bpf_dynptr name;
+};
+
+struct dname_id_key {
+	struct bpf_dynptr name;
+	int id;
+};
+
+struct id_name_key {
+	int id;
+	char name[20];
+};
+
+struct dname_key {
+	struct bpf_dynptr name;
+};
+
+struct dname_dname_key {
+	struct bpf_dynptr name_1;
+	struct bpf_dynptr name_2;
+};
+
+struct dname_dname_id_key {
+	struct dname_dname_key names;
+	__u64 id;
+};
+
+struct dname_id_id_id_key {
+	struct bpf_dynptr name;
+	__u64 id[3];
+};
+
+struct dname_dname_dname_key {
+	struct bpf_dynptr name_1;
+	struct bpf_dynptr name_2;
+	struct bpf_dynptr name_3;
+};
+
+struct {
+	__uint(type, BPF_MAP_TYPE_HASH);
+	__uint(max_entries, 10);
+	__uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_DYNPTR_IN_KEY);
+	__type(key, struct id_dname_key);
+	__type(value, unsigned long);
+	__uint(map_extra, 1024);
+} htab_1 SEC(".maps");
+
+struct {
+	__uint(type, BPF_MAP_TYPE_HASH);
+	__uint(max_entries, 10);
+	__uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_DYNPTR_IN_KEY);
+	__type(key, struct dname_key);
+	__type(value, unsigned long);
+	__uint(map_extra, 1024);
+} htab_2 SEC(".maps");
+
+struct {
+	__uint(type, BPF_MAP_TYPE_HASH);
+	__uint(max_entries, 10);
+	__uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_DYNPTR_IN_KEY);
+	__type(key, struct dname_dname_id_key);
+	__type(value, unsigned long);
+	__uint(map_extra, 1024);
+} htab_3 SEC(".maps");
+
+struct {
+	__uint(type, BPF_MAP_TYPE_HASH);
+	__uint(max_entries, 10);
+	__uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_DYNPTR_IN_KEY);
+	__type(key, struct bpf_dynptr);
+	__type(value, unsigned long);
+	__uint(map_extra, 1024);
+} htab_4 SEC(".maps");
+
+struct {
+	__uint(type, BPF_MAP_TYPE_RINGBUF);
+	__uint(max_entries, 4096);
+} ringbuf SEC(".maps");
+
+char dynptr_buf[32] = {};
+
+/* uninitialized dynptr */
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+__failure __msg("dynptr-key expects dynptr at offset 8")
+int BPF_PROG(uninit_dynptr)
+{
+	struct id_dname_key key;
+
+	key.id = 100;
+	bpf_map_lookup_elem(&htab_1, &key);
+
+	return 0;
+}
+
+/* invalid dynptr */
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+__failure __msg("dynptr-key expects dynptr at offset 8")
+int BPF_PROG(invalid_dynptr)
+{
+	struct id_dname_key key;
+
+	key.id = 100;
+	bpf_ringbuf_reserve_dynptr(&ringbuf, 10, 0, &key.name);
+	bpf_ringbuf_discard_dynptr(&key.name, 0);
+	bpf_map_lookup_elem(&htab_1, &key);
+
+	return 0;
+}
+
+/* expect no-dynptr got dynptr */
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+__failure __msg("dynptr-key expects non-dynptr at offset 0")
+int BPF_PROG(invalid_non_dynptr)
+{
+	struct dname_id_key key;
+
+	__builtin_memcpy(dynptr_buf, "test", 4);
+	bpf_dynptr_from_mem(dynptr_buf, 4, 0, &key.name);
+	key.id = 100;
+	bpf_map_lookup_elem(&htab_1, &key);
+
+	return 0;
+}
+
+/* expect dynptr get non-dynptr */
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+__failure __msg("dynptr-key expects dynptr at offset 8")
+int BPF_PROG(no_dynptr)
+{
+	struct id_name_key key;
+
+	key.id = 100;
+	__builtin_memset(key.name, 0, sizeof(key.name));
+	__builtin_memcpy(key.name, "test", 4);
+	bpf_map_lookup_elem(&htab_1, &key);
+
+	return 0;
+}
+
+/* malformed */
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+__failure __msg("malformed dynptr-key at offset 8")
+int BPF_PROG(malformed_dynptr)
+{
+	struct dname_dname_key key;
+
+	bpf_dynptr_from_mem(dynptr_buf, 4, 0, &key.name_1);
+	bpf_dynptr_from_mem(dynptr_buf, 4, 0, &key.name_2);
+
+	bpf_map_lookup_elem(&htab_2, (void *)&key + 8);
+
+	return 0;
+}
+
+/* expect no-dynptr got dynptr */
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+__failure __msg("dynptr-key expects non-dynptr at offset 32")
+int BPF_PROG(invalid_non_dynptr_2)
+{
+	struct dname_dname_dname_key key;
+
+	bpf_dynptr_from_mem(dynptr_buf, 4, 0, &key.name_1);
+	bpf_dynptr_from_mem(dynptr_buf, 4, 0, &key.name_2);
+	bpf_dynptr_from_mem(dynptr_buf, 4, 0, &key.name_3);
+
+	bpf_map_lookup_elem(&htab_3, &key);
+
+	return 0;
+}
+
+/* expect dynptr get non-dynptr */
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+__failure __msg("dynptr-key expects dynptr at offset 16")
+int BPF_PROG(no_dynptr_2)
+{
+	struct dname_id_id_id_key key;
+
+	bpf_dynptr_from_mem(dynptr_buf, 4, 0, &key.name);
+	bpf_map_lookup_elem(&htab_3, &key);
+
+	return 0;
+}
+
+/* misaligned */
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+__failure __msg("R2 misaligned offset -28 for dynptr-key")
+int BPF_PROG(misaligned_dynptr)
+{
+	struct dname_dname_key key;
+
+	bpf_map_lookup_elem(&htab_1, (char *)&key + 4);
+
+	return 0;
+}
+
+/* variable offset */
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+__failure __msg("R2 variable offset prohibited for dynptr-key")
+int BPF_PROG(variable_offset_dynptr)
+{
+	struct bpf_dynptr dynptr_1;
+	struct bpf_dynptr dynptr_2;
+	char *key;
+
+	bpf_dynptr_from_mem(dynptr_buf, 4, 0, &dynptr_1);
+	bpf_dynptr_from_mem(dynptr_buf, 4, 0, &dynptr_2);
+
+	key = (char *)&dynptr_2;
+	key = key + (bpf_get_prandom_u32() & 1) * 16;
+
+	bpf_map_lookup_elem(&htab_2, key);
+
+	return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+__failure __msg("map dynptr-key requires stack ptr but got map_value")
+int BPF_PROG(map_value_as_key)
+{
+	bpf_map_lookup_elem(&htab_1, dynptr_buf);
+
+	return 0;
+}
+
+static int lookup_htab(struct bpf_map *map, struct id_dname_key *key, void *value, void *data)
+{
+	bpf_map_lookup_elem(&htab_1, key);
+	return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+__failure __msg("map dynptr-key requires stack ptr but got map_key")
+int BPF_PROG(map_key_as_key)
+{
+	bpf_for_each_map_elem(&htab_1, lookup_htab, NULL, 0);
+	return 0;
+}
+
+__noinline __weak int subprog_lookup_htab(struct bpf_dynptr *dynptr)
+{
+	bpf_map_lookup_elem(&htab_4, dynptr);
+	return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+__failure __msg("R2 type=dynptr_ptr expected=")
+int BPF_PROG(subprog_dynptr)
+{
+	struct bpf_dynptr dynptr;
+
+	bpf_dynptr_from_mem(dynptr_buf, 4, 0, &dynptr);
+	subprog_lookup_htab(&dynptr);
+	return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/htab_dynkey_test_success.c b/tools/testing/selftests/bpf/progs/htab_dynkey_test_success.c
new file mode 100644
index 000000000000..52736b3519fb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/htab_dynkey_test_success.c
@@ -0,0 +1,399 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024. Huawei Technologies Co., Ltd */
+#include <linux/types.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <errno.h>
+
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct pure_dynptr_key {
+	struct bpf_dynptr name;
+};
+
+struct mixed_dynptr_key {
+	int id;
+	struct bpf_dynptr name;
+};
+
+struct multiple_dynptr_key {
+	struct pure_dynptr_key f_1;
+	unsigned long f_2;
+	struct mixed_dynptr_key f_3;
+	unsigned long f_4;
+};
+
+struct {
+	__uint(type, BPF_MAP_TYPE_HASH);
+	__uint(max_entries, 10);
+	__uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_DYNPTR_IN_KEY);
+	__type(key, struct bpf_dynptr);
+	__type(value, unsigned long);
+	__uint(map_extra, 1024);
+} htab_1 SEC(".maps");
+
+struct {
+	__uint(type, BPF_MAP_TYPE_HASH);
+	__uint(max_entries, 10);
+	__uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_DYNPTR_IN_KEY);
+	__type(key, struct pure_dynptr_key);
+	__type(value, unsigned long);
+	__uint(map_extra, 1024);
+} htab_2 SEC(".maps");
+
+struct {
+	__uint(type, BPF_MAP_TYPE_HASH);
+	__uint(max_entries, 10);
+	__uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_DYNPTR_IN_KEY);
+	__type(key, struct mixed_dynptr_key);
+	__type(value, unsigned long);
+	__uint(map_extra, 1024);
+} htab_3 SEC(".maps");
+
+struct {
+	__uint(type, BPF_MAP_TYPE_HASH);
+	__uint(max_entries, 10);
+	__uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_DYNPTR_IN_KEY);
+	__type(key, struct multiple_dynptr_key);
+	__type(value, unsigned long);
+	__uint(map_extra, 1024);
+} htab_4 SEC(".maps");
+
+struct {
+	__uint(type, BPF_MAP_TYPE_RINGBUF);
+	__uint(max_entries, 4096);
+} ringbuf SEC(".maps");
+
+int pid = 0;
+int test_err = 0;
+char dynptr_buf[2][32] = {{}, {}};
+
+static const char systemd_name[] = "systemd";
+static const char udevd_name[] = "udevd";
+static const char rcu_sched_name[] = "[rcu_sched]";
+
+struct bpf_map;
+
+static int test_pure_dynptr_key_htab(struct bpf_map *htab)
+{
+	unsigned long new_value, *value;
+	struct bpf_dynptr key;
+	int err = 0;
+
+	/* Lookup a existent key */
+	__builtin_memcpy(dynptr_buf[0], systemd_name, sizeof(systemd_name));
+	bpf_dynptr_from_mem(dynptr_buf[0], sizeof(systemd_name), 0, &key);
+	value = bpf_map_lookup_elem(htab, &key);
+	if (!value) {
+		err = 1;
+		goto out;
+	}
+	if (*value != 100) {
+		err = 2;
+		goto out;
+	}
+
+	/* Look up a non-existent key */
+	__builtin_memcpy(dynptr_buf[0], udevd_name, sizeof(udevd_name));
+	bpf_dynptr_from_mem(dynptr_buf[0], sizeof(udevd_name), 0, &key);
+	value = bpf_map_lookup_elem(htab, &key);
+	if (value) {
+		err = 3;
+		goto out;
+	}
+
+	/* Insert a new key */
+	new_value = 42;
+	err = bpf_map_update_elem(htab, &key, &new_value, BPF_NOEXIST);
+	if (err) {
+		err = 4;
+		goto out;
+	}
+
+	/* Insert an existent key */
+	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(udevd_name), 0, &key);
+	err = bpf_dynptr_write(&key, 0, (void *)udevd_name, sizeof(udevd_name), 0);
+	if (err) {
+		bpf_ringbuf_discard_dynptr(&key, 0);
+		err = 5;
+		goto out;
+	}
+
+	err = bpf_map_update_elem(htab, &key, &new_value, BPF_NOEXIST);
+	bpf_ringbuf_discard_dynptr(&key, 0);
+	if (err != -EEXIST) {
+		err = 6;
+		goto out;
+	}
+
+	/* Lookup it again */
+	bpf_dynptr_from_mem(dynptr_buf[0], sizeof(udevd_name), 0, &key);
+	value = bpf_map_lookup_elem(htab, &key);
+	if (!value) {
+		err = 7;
+		goto out;
+	}
+	if (*value != 42) {
+		err = 8;
+		goto out;
+	}
+
+	/* Delete then lookup it */
+	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(udevd_name), 0, &key);
+	err = bpf_dynptr_write(&key, 0, (void *)udevd_name, sizeof(udevd_name), 0);
+	if (err) {
+		bpf_ringbuf_discard_dynptr(&key, 0);
+		err = 9;
+		goto out;
+	}
+	err = bpf_map_delete_elem(htab, &key);
+	bpf_ringbuf_discard_dynptr(&key, 0);
+	if (err) {
+		err = 10;
+		goto out;
+	}
+
+	bpf_dynptr_from_mem(dynptr_buf[0], sizeof(udevd_name), 0, &key);
+	value = bpf_map_lookup_elem(htab, &key);
+	if (value) {
+		err = 10;
+		goto out;
+	}
+out:
+	return err;
+}
+
+static int test_mixed_dynptr_key_htab(struct bpf_map *htab)
+{
+	unsigned long new_value, *value;
+	char udevd_name[] = "udevd";
+	struct mixed_dynptr_key key;
+	int err = 0;
+
+	__builtin_memset(&key, 0, sizeof(key));
+	key.id = 1000;
+
+	/* Lookup a existent key */
+	__builtin_memcpy(dynptr_buf[0], systemd_name, sizeof(systemd_name));
+	bpf_dynptr_from_mem(dynptr_buf[0], sizeof(systemd_name), 0, &key.name);
+	value = bpf_map_lookup_elem(htab, &key);
+	if (!value) {
+		err = 1;
+		goto out;
+	}
+	if (*value != 100) {
+		err = 2;
+		goto out;
+	}
+
+	/* Look up a non-existent key */
+	__builtin_memcpy(dynptr_buf[0], udevd_name, sizeof(udevd_name));
+	bpf_dynptr_from_mem(dynptr_buf[0], sizeof(udevd_name), 0, &key.name);
+	value = bpf_map_lookup_elem(htab, &key);
+	if (value) {
+		err = 3;
+		goto out;
+	}
+
+	/* Insert a new key */
+	new_value = 42;
+	err = bpf_map_update_elem(htab, &key, &new_value, BPF_NOEXIST);
+	if (err) {
+		err = 4;
+		goto out;
+	}
+
+	/* Insert an existent key */
+	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(udevd_name), 0, &key.name);
+	err = bpf_dynptr_write(&key.name, 0, (void *)udevd_name, sizeof(udevd_name), 0);
+	if (err) {
+		bpf_ringbuf_discard_dynptr(&key.name, 0);
+		err = 5;
+		goto out;
+	}
+
+	err = bpf_map_update_elem(htab, &key, &new_value, BPF_NOEXIST);
+	bpf_ringbuf_discard_dynptr(&key.name, 0);
+	if (err != -EEXIST) {
+		err = 6;
+		goto out;
+	}
+
+	/* Lookup it again */
+	bpf_dynptr_from_mem(dynptr_buf[0], sizeof(udevd_name), 0, &key.name);
+	value = bpf_map_lookup_elem(htab, &key);
+	if (!value) {
+		err = 7;
+		goto out;
+	}
+	if (*value != 42) {
+		err = 8;
+		goto out;
+	}
+
+	/* Delete then lookup it */
+	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(udevd_name), 0, &key.name);
+	err = bpf_dynptr_write(&key.name, 0, (void *)udevd_name, sizeof(udevd_name), 0);
+	if (err) {
+		bpf_ringbuf_discard_dynptr(&key.name, 0);
+		err = 9;
+		goto out;
+	}
+	err = bpf_map_delete_elem(htab, &key);
+	bpf_ringbuf_discard_dynptr(&key.name, 0);
+	if (err) {
+		err = 10;
+		goto out;
+	}
+
+	bpf_dynptr_from_mem(dynptr_buf[0], sizeof(udevd_name), 0, &key.name);
+	value = bpf_map_lookup_elem(htab, &key);
+	if (value) {
+		err = 10;
+		goto out;
+	}
+out:
+	return err;
+}
+
+static int test_multiple_dynptr_key_htab(struct bpf_map *htab)
+{
+	unsigned long new_value, *value;
+	struct multiple_dynptr_key key;
+	int err = 0;
+
+	__builtin_memset(&key, 0, sizeof(key));
+	key.f_2 = 2;
+	key.f_3.id = 3;
+	key.f_4 = 4;
+
+	/* Lookup a existent key */
+	__builtin_memcpy(dynptr_buf[0], systemd_name, sizeof(systemd_name));
+	bpf_dynptr_from_mem(dynptr_buf[0], sizeof(systemd_name), 0, &key.f_1.name);
+	__builtin_memcpy(dynptr_buf[1], rcu_sched_name, sizeof(rcu_sched_name));
+	bpf_dynptr_from_mem(dynptr_buf[1], sizeof(rcu_sched_name), 0, &key.f_3.name);
+	value = bpf_map_lookup_elem(htab, &key);
+	if (!value) {
+		err = 1;
+		goto out;
+	}
+	if (*value != 100) {
+		err = 2;
+		goto out;
+	}
+
+	/* Look up a non-existent key */
+	bpf_dynptr_from_mem(dynptr_buf[1], sizeof(rcu_sched_name), 0, &key.f_1.name);
+	bpf_dynptr_from_mem(dynptr_buf[0], sizeof(systemd_name), 0, &key.f_3.name);
+	value = bpf_map_lookup_elem(htab, &key);
+	if (value) {
+		err = 3;
+		goto out;
+	}
+
+	/* Insert a new key */
+	new_value = 42;
+	err = bpf_map_update_elem(htab, &key, &new_value, BPF_NOEXIST);
+	if (err) {
+		err = 4;
+		goto out;
+	}
+
+	/* Insert an existent key */
+	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(rcu_sched_name), 0, &key.f_1.name);
+	err = bpf_dynptr_write(&key.f_1.name, 0, (void *)rcu_sched_name, sizeof(rcu_sched_name), 0);
+	if (err) {
+		bpf_ringbuf_discard_dynptr(&key.f_1.name, 0);
+		err = 5;
+		goto out;
+	}
+	err = bpf_map_update_elem(htab, &key, &new_value, BPF_NOEXIST);
+	bpf_ringbuf_discard_dynptr(&key.f_1.name, 0);
+	if (err != -EEXIST) {
+		err = 6;
+		goto out;
+	}
+
+	/* Lookup a non-existent key */
+	bpf_dynptr_from_mem(dynptr_buf[1], sizeof(rcu_sched_name), 0, &key.f_1.name);
+	key.f_4 = 0;
+	value = bpf_map_lookup_elem(htab, &key);
+	if (value) {
+		err = 7;
+		goto out;
+	}
+
+	/* Lookup an existent key */
+	key.f_4 = 4;
+	value = bpf_map_lookup_elem(htab, &key);
+	if (!value) {
+		err = 8;
+		goto out;
+	}
+	if (*value != 42) {
+		err = 9;
+		goto out;
+	}
+
+	/* Delete the newly-inserted key */
+	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(systemd_name), 0, &key.f_3.name);
+	err = bpf_dynptr_write(&key.f_3.name, 0, (void *)systemd_name, sizeof(systemd_name), 0);
+	if (err) {
+		bpf_ringbuf_discard_dynptr(&key.f_3.name, 0);
+		err = 10;
+		goto out;
+	}
+	err = bpf_map_delete_elem(htab, &key);
+	if (err) {
+		bpf_ringbuf_discard_dynptr(&key.f_3.name, 0);
+		err = 11;
+		goto out;
+	}
+
+	/* Lookup it again */
+	value = bpf_map_lookup_elem(htab, &key);
+	bpf_ringbuf_discard_dynptr(&key.f_3.name, 0);
+	if (value) {
+		err = 12;
+		goto out;
+	}
+out:
+	return err;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
+int BPF_PROG(pure_dynptr_key)
+{
+	if (bpf_get_current_pid_tgid() >> 32 != pid)
+		return 0;
+
+	test_err = test_pure_dynptr_key_htab((struct bpf_map *)&htab_1);
+	test_err |= test_pure_dynptr_key_htab((struct bpf_map *)&htab_2) << 8;
+
+	return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
+int BPF_PROG(mixed_dynptr_key)
+{
+	if (bpf_get_current_pid_tgid() >> 32 != pid)
+		return 0;
+
+	test_err = test_mixed_dynptr_key_htab((struct bpf_map *)&htab_3);
+
+	return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
+int BPF_PROG(multiple_dynptr_key)
+{
+	if (bpf_get_current_pid_tgid() >> 32 != pid)
+		return 0;
+
+	test_err = test_multiple_dynptr_key_htab((struct bpf_map *)&htab_4);
+
+	return 0;
+}