diff mbox series

[bpf-next,v1,3/4] samples/bpf: Add concurrency testing for BPF htab map's used size

Message ID 20221105025146.238209-4-horenchuang@bytedance.com (mailing list archive)
State New
Headers show
Series Add BPF htab map's used size for monitoring | expand

Commit Message

Ho-Ren (Jack) Chuang Nov. 5, 2022, 2:51 a.m. UTC
Add htab map's used_size test cases for concurrency testing.

Support hash table type (BPF_MAP_TYPE_HASH).

Signed-off-by: Ho-Ren (Jack) Chuang <horenchuang@bytedance.com>
---
 samples/bpf/Makefile             |   4 +
 samples/bpf/test_map_used_kern.c |  65 ++++++++++
 samples/bpf/test_map_used_user.c | 204 +++++++++++++++++++++++++++++++
 3 files changed, 273 insertions(+)
 create mode 100644 samples/bpf/test_map_used_kern.c
 create mode 100644 samples/bpf/test_map_used_user.c
diff mbox series

Patch

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 727da3c5879b..8725d0d64a21 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -40,6 +40,7 @@  tprogs-y += tc_l2_redirect
 tprogs-y += lwt_len_hist
 tprogs-y += xdp_tx_iptunnel
 tprogs-y += test_map_in_map
+tprogs-y += test_map_used
 tprogs-y += per_socket_stats_example
 tprogs-y += xdp_rxq_info
 tprogs-y += syscall_tp
@@ -101,6 +102,7 @@  tc_l2_redirect-objs := tc_l2_redirect_user.o
 lwt_len_hist-objs := lwt_len_hist_user.o
 xdp_tx_iptunnel-objs := xdp_tx_iptunnel_user.o
 test_map_in_map-objs := test_map_in_map_user.o
+test_map_used-objs := test_map_used_user.o
 per_socket_stats_example-objs := cookie_uid_helper_example.o
 xdp_rxq_info-objs := xdp_rxq_info_user.o
 syscall_tp-objs := syscall_tp_user.o
@@ -153,6 +155,7 @@  always-y += sampleip_kern.o
 always-y += lwt_len_hist_kern.o
 always-y += xdp_tx_iptunnel_kern.o
 always-y += test_map_in_map_kern.o
+always-y += test_map_used_kern.o
 always-y += tcp_synrto_kern.o
 always-y += tcp_rwnd_kern.o
 always-y += tcp_bufs_kern.o
@@ -216,6 +219,7 @@  TPROGLDLIBS_xdp_router_ipv4	+= -lm -pthread
 TPROGLDLIBS_tracex4		+= -lrt
 TPROGLDLIBS_trace_output	+= -lrt
 TPROGLDLIBS_map_perf_test	+= -lrt
+TPROGLDLIBS_test_map_used	+= -lrt
 TPROGLDLIBS_test_overhead	+= -lrt
 
 # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
diff --git a/samples/bpf/test_map_used_kern.c b/samples/bpf/test_map_used_kern.c
new file mode 100644
index 000000000000..e908593c1f09
--- /dev/null
+++ b/samples/bpf/test_map_used_kern.c
@@ -0,0 +1,65 @@ 
+/* Copyright (c) 2022 ByteDance
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/netdevice.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "trace_common.h"
+
+#define MAX_ENTRIES 1000
+
+struct {
+	__uint(type, BPF_MAP_TYPE_HASH);
+	__type(key, u32);
+	__type(value, long);
+	__uint(max_entries, MAX_ENTRIES);
+	__uint(map_flags, BPF_F_NO_PREALLOC);
+} touch_hash_no_prealloc SEC(".maps");
+
+struct {
+	__uint(type, BPF_MAP_TYPE_HASH);
+	__type(key, u32);
+	__type(value, long);
+	__uint(max_entries, MAX_ENTRIES);
+} touch_hash_prealloc SEC(".maps");
+
+SEC("kprobe/" SYSCALL(sys_mount))
+int stress_hmap_alloc(struct pt_regs *ctx)
+{
+	u32 key, i;
+	long init_val = bpf_get_current_pid_tgid();
+
+#pragma clang loop unroll(full)
+	for (i = 0; i < MAX_ENTRIES; ++i) {
+		key = i;
+		bpf_map_update_elem(&touch_hash_no_prealloc,
+							&key, &init_val, BPF_ANY);
+	}
+
+	return 0;
+}
+
+SEC("kprobe/" SYSCALL(sys_umount))
+int stress_hmap_prealloc(struct pt_regs *ctx)
+{
+	u32 key, i;
+	long init_val = bpf_get_current_pid_tgid();
+
+#pragma clang loop unroll(full)
+	for (i = 0; i < MAX_ENTRIES; ++i) {
+		key = i;
+		bpf_map_update_elem(&touch_hash_prealloc,
+							&key, &init_val, BPF_ANY);
+	}
+
+	return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/test_map_used_user.c b/samples/bpf/test_map_used_user.c
new file mode 100644
index 000000000000..797f6ca7434d
--- /dev/null
+++ b/samples/bpf/test_map_used_user.c
@@ -0,0 +1,204 @@ 
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2022 ByteDance
+ */
+#define _GNU_SOURCE
+#include <sched.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <asm/unistd.h>
+#include <unistd.h>
+#include <assert.h>
+#include <sys/wait.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <linux/bpf.h>
+#include <string.h>
+#include <time.h>
+#include <sys/resource.h>
+#include <arpa/inet.h>
+#include <errno.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#define TEST_BIT(t) (1U << (t))
+#define MAX_NR_CPUS 1024
+
+static __u64 time_get_ns(void)
+{
+	struct timespec ts;
+
+	clock_gettime(CLOCK_MONOTONIC, &ts);
+	return ts.tv_sec * 1000000000ull + ts.tv_nsec;
+}
+
+enum test_type {
+	HASH_TOUCH_PREALLOC,
+	HASH_TOUCH,
+	NR_TESTS,
+};
+
+const char *test_map_names[NR_TESTS] = {
+	[HASH_TOUCH_PREALLOC] = "hash_map",
+	[HASH_TOUCH] = "hash_map",
+};
+
+static int test_flags = ~0;
+static __u32 num_map_entries;
+static __u32 inner_lru_hash_size;
+static __u32 max_cnt = 1000;
+
+static int check_test_flags(enum test_type t)
+{
+	return test_flags & TEST_BIT(t);
+}
+
+static void test_hash_touch_prealloc(int cpu)
+{
+	__u64 start_time;
+	int i;
+
+	start_time = time_get_ns();
+	for (i = 0; i < max_cnt; i++)
+		syscall(__NR_umount2, NULL, 0);
+	printf("%d:hash_touch pre-alloc %lld touches per sec\n",
+		   cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
+}
+
+static void test_hash_touch(int cpu)
+{
+	__u64 start_time;
+	int i;
+
+	start_time = time_get_ns();
+	for (i = 0; i < max_cnt; i++)
+		syscall(__NR_mount, NULL, NULL, NULL, 0, NULL);
+	printf("%d:hash_touch %lld touchess per sec\n",
+		   cpu, max_cnt * 1000000000ll * 64 / (time_get_ns() - start_time));
+}
+
+typedef void (*test_func)(int cpu);
+const test_func test_funcs[] = {
+	[HASH_TOUCH_PREALLOC] = test_hash_touch_prealloc,
+	[HASH_TOUCH] = test_hash_touch,
+};
+
+static void loop(int cpu)
+{
+	cpu_set_t cpuset;
+	int i;
+
+	CPU_ZERO(&cpuset);
+	CPU_SET(cpu, &cpuset);
+	sched_setaffinity(0, sizeof(cpuset), &cpuset);
+
+	for (i = 0; i < NR_TESTS; i++) {
+		if (check_test_flags(i))
+			test_funcs[i](cpu);
+	}
+}
+
+static void run_perf_test(int tasks)
+{
+	pid_t pid[tasks];
+	int i;
+
+	for (i = 0; i < tasks; i++) {
+		pid[i] = fork();
+		if (pid[i])
+			printf("Spawn process #%d [%u]\n", i, pid[i]);
+
+		if (pid[i] == 0) {
+			loop(i);
+			exit(0);
+		} else if (pid[i] == -1) {
+			printf("couldn't spawn #%d process\n", i);
+			exit(1);
+		}
+	}
+	for (i = 0; i < tasks; i++) {
+		int status;
+
+		assert(waitpid(pid[i], &status, 0) == pid[i]);
+		assert(status == 0);
+	}
+}
+
+static void fixup_map(struct bpf_object *obj)
+{
+	struct bpf_map *map;
+	int i;
+
+	bpf_object__for_each_map(map, obj) {
+		const char *name = bpf_map__name(map);
+
+		/* Only change the max_entries for the enabled test(s) */
+		for (i = 0; i < NR_TESTS; i++) {
+			if (!strcmp(test_map_names[i], name) &&
+				(check_test_flags(i))) {
+				bpf_map__set_max_entries(map, num_map_entries);
+				continue;
+			}
+		}
+	}
+
+	inner_lru_hash_size = num_map_entries;
+}
+
+int main(int argc, char **argv)
+{
+	int nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+	struct bpf_link *links[8];
+	struct bpf_program *prog;
+	struct bpf_object *obj;
+	char filename[256];
+	int i = 0;
+
+	if (argc > 1)
+		test_flags = atoi(argv[1]) ? : test_flags;
+
+	if (argc > 2)
+		nr_cpus = atoi(argv[2]) ? : nr_cpus;
+
+	if (argc > 3)
+		num_map_entries = atoi(argv[3]);
+
+	if (argc > 4)
+		max_cnt = atoi(argv[4]);
+
+	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+	obj = bpf_object__open_file(filename, NULL);
+	if (libbpf_get_error(obj)) {
+		fprintf(stderr, "ERROR: opening BPF object file failed\n");
+		return 0;
+	}
+
+	/* resize BPF map prior to loading */
+	if (num_map_entries > 0)
+		fixup_map(obj);
+
+	/* load BPF program */
+	if (bpf_object__load(obj)) {
+		fprintf(stderr, "ERROR: loading BPF object file failed\n");
+		goto cleanup;
+	}
+
+	bpf_object__for_each_program(prog, obj) {
+		links[i] = bpf_program__attach(prog);
+		if (libbpf_get_error(links[i])) {
+			fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+			links[i] = NULL;
+			goto cleanup;
+		}
+		i++;
+	}
+
+	run_perf_test(nr_cpus);
+
+cleanup:
+	for (i--; i >= 0; i--)
+		bpf_link__destroy(links[i]);
+
+	bpf_object__close(obj);
+	return 0;
+}