diff mbox series

[bpf-next,v7] selftests/bpf: trace_helpers.c: optimize kallsyms cache

Message ID tencent_BD6E19C00BF565CD5C36A9A0BD828CFA210A@qq.com (mailing list archive)
State New
Headers show
Series [bpf-next,v7] selftests/bpf: trace_helpers.c: optimize kallsyms cache | expand

Commit Message

Rong Tao Aug. 25, 2023, 10:36 a.m. UTC
From: Rong Tao <rongtao@cestc.cn>

Static ksyms often have problems because the number of symbols exceeds the
MAX_SYMS limit. Like changing the MAX_SYMS from 300000 to 400000 in
commit e76a014334a6("selftests/bpf: Bump and validate MAX_SYMS") solves
the problem somewhat, but it's not the perfect way.

This commit uses dynamic memory allocation, which completely solves the
problem caused by the limitation of the number of kallsyms.

Acked-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Rong Tao <rongtao@cestc.cn>
---
v7: Fix __must_check macro.
v6: https://lore.kernel.org/lkml/tencent_4A09A36F883A06EA428A593497642AF8AF08@qq.com/
    Apply libbpf_ensure_mem()
v5: https://lore.kernel.org/lkml/tencent_0E9E1A1C0981678D5E7EA9E4BDBA8EE2200A@qq.com/
    Release the allocated memory once the load_kallsyms_refresh() upon error
    given it's dynamically allocated.
v4: https://lore.kernel.org/lkml/tencent_59C74613113F0C728524B2A82FE5540A5E09@qq.com/
    Make sure most cases we don't need the realloc() path to begin with,
    and check strdup() return value.
v3: https://lore.kernel.org/lkml/tencent_50B4B2622FE7546A5FF9464310650C008509@qq.com/
    Do not use structs and judge ksyms__add_symbol function return value.
v2: https://lore.kernel.org/lkml/tencent_B655EE5E5D463110D70CD2846AB3262EED09@qq.com/
    Do the usual len/capacity scheme here to amortize the cost of realloc, and
    don't free symbols.
v1: https://lore.kernel.org/lkml/tencent_AB461510B10CD484E0B2F62E3754165F2909@qq.com/
---
 samples/bpf/Makefile                        |  2 +
 tools/testing/selftests/bpf/trace_helpers.c | 58 ++++++++++++++++-----
 2 files changed, 48 insertions(+), 12 deletions(-)

Comments

Jiri Olsa Aug. 25, 2023, 4:19 p.m. UTC | #1
On Fri, Aug 25, 2023 at 06:36:24PM +0800, Rong Tao wrote:
> From: Rong Tao <rongtao@cestc.cn>
> 
> Static ksyms often have problems because the number of symbols exceeds the
> MAX_SYMS limit. Like changing the MAX_SYMS from 300000 to 400000 in
> commit e76a014334a6("selftests/bpf: Bump and validate MAX_SYMS") solves
> the problem somewhat, but it's not the perfect way.
> 
> This commit uses dynamic memory allocation, which completely solves the
> problem caused by the limitation of the number of kallsyms.
> 
> Acked-by: Stanislav Fomichev <sdf@google.com>
> Signed-off-by: Rong Tao <rongtao@cestc.cn>
> ---
> v7: Fix __must_check macro.
> v6: https://lore.kernel.org/lkml/tencent_4A09A36F883A06EA428A593497642AF8AF08@qq.com/
>     Apply libbpf_ensure_mem()
> v5: https://lore.kernel.org/lkml/tencent_0E9E1A1C0981678D5E7EA9E4BDBA8EE2200A@qq.com/
>     Release the allocated memory once the load_kallsyms_refresh() upon error
>     given it's dynamically allocated.
> v4: https://lore.kernel.org/lkml/tencent_59C74613113F0C728524B2A82FE5540A5E09@qq.com/
>     Make sure most cases we don't need the realloc() path to begin with,
>     and check strdup() return value.
> v3: https://lore.kernel.org/lkml/tencent_50B4B2622FE7546A5FF9464310650C008509@qq.com/
>     Do not use structs and judge ksyms__add_symbol function return value.
> v2: https://lore.kernel.org/lkml/tencent_B655EE5E5D463110D70CD2846AB3262EED09@qq.com/
>     Do the usual len/capacity scheme here to amortize the cost of realloc, and
>     don't free symbols.
> v1: https://lore.kernel.org/lkml/tencent_AB461510B10CD484E0B2F62E3754165F2909@qq.com/
> ---
>  samples/bpf/Makefile                        |  2 +
>  tools/testing/selftests/bpf/trace_helpers.c | 58 ++++++++++++++++-----
>  2 files changed, 48 insertions(+), 12 deletions(-)
> 
> diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
> index 4ccf4236031c..0cd45c42af2f 100644
> --- a/samples/bpf/Makefile
> +++ b/samples/bpf/Makefile
> @@ -175,6 +175,7 @@ TPROGS_CFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
>  TPROGS_CFLAGS += -I$(LIBBPF_INCLUDE)
>  TPROGS_CFLAGS += -I$(srctree)/tools/include
>  TPROGS_CFLAGS += -I$(srctree)/tools/perf
> +TPROGS_CFLAGS += -I$(srctree)/tools/lib
>  TPROGS_CFLAGS += -DHAVE_ATTR_TEST=0
>  
>  ifdef SYSROOT
> @@ -314,6 +315,7 @@ XDP_SAMPLE_CFLAGS += -Wall -O2 \
>  
>  $(obj)/$(XDP_SAMPLE): TPROGS_CFLAGS = $(XDP_SAMPLE_CFLAGS)
>  $(obj)/$(XDP_SAMPLE): $(src)/xdp_sample_user.h $(src)/xdp_sample_shared.h
> +$(obj)/$(TRACE_HELPERS): TPROGS_CFLAGS := $(TPROGS_CFLAGS) -D__must_check=

please add some comment why we did that, perhaps like:

+# Override includes for trace_helpers.o because __must_check won't be
+# defined in our include path.
+$(obj)/$(TRACE_HELPERS): TPROGS_CFLAGS := $(TPROGS_CFLAGS) -D__must_check=

>  
>  -include $(BPF_SAMPLES_PATH)/Makefile.target
>  
> diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
> index f83d9f65c65b..d62ab3b77153 100644
> --- a/tools/testing/selftests/bpf/trace_helpers.c
> +++ b/tools/testing/selftests/bpf/trace_helpers.c
> @@ -14,13 +14,44 @@
>  #include <linux/limits.h>
>  #include <libelf.h>
>  #include <gelf.h>
> +#include "bpf/libbpf_internal.h"
>  
>  #define TRACEFS_PIPE	"/sys/kernel/tracing/trace_pipe"
>  #define DEBUGFS_PIPE	"/sys/kernel/debug/tracing/trace_pipe"
>  
> -#define MAX_SYMS 400000
> -static struct ksym syms[MAX_SYMS];
> -static int sym_cnt;
> +static struct ksym *syms;
> +static size_t sym_cap;
> +static size_t sym_cnt;
> +
> +static int ksyms__add_symbol(const char *name, unsigned long addr)
> +{
> +	void *tmp;
> +
> +	tmp = strdup(name);
> +	if (!tmp)
> +		return -ENOMEM;
> +	syms[sym_cnt].addr = addr;
> +	syms[sym_cnt].name = tmp;
> +
> +	sym_cnt++;
> +
> +	return 0;
> +}
> +
> +static void ksyms__free(void)
> +{
> +	unsigned int i;
> +
> +	if (!syms)
> +		return;
> +
> +	for (i = 0; i < sym_cnt; i++)
> +		free(syms[i].name);
> +	free(syms);
> +	syms = NULL;
> +	sym_cnt = 0;
> +	sym_cap = 0;
> +}
>  
>  static int ksym_cmp(const void *p1, const void *p2)
>  {
> @@ -33,9 +64,7 @@ int load_kallsyms_refresh(void)
>  	char func[256], buf[256];
>  	char symbol;
>  	void *addr;
> -	int i = 0;
> -
> -	sym_cnt = 0;

you need to release all symbols in here, for static version it was
enough to zero the sym_cnt value, but we need to release current
symbols now

also after discussing with Daniel we found there's a problem with
paralel run where both load_kallsyms and load_kallsyms_refresh and
other helpers like ksym_search are racy because another test can
just drop the cache

that was problem even before, but I wonder now it will expose more
because we will touch freed pointer instead of accessing same static
array

perhaps we could fix that with 2 sets of functions:

  - load_kallsyms
    ksym_search
    ksym_get_addr

    that work like now on global syms pointer, perhaps we should
    initialize it before we run tests or use mutex for init

  - struct ksym *load_kallsyms_local()
    ksym_search_local(struct ksym *syms, ...)
    ksym_get_addr_local(struct ksym *syms, ...)

    that work on local ksyms cache and the test frees it at the end,
    it would be used in all tests that need updated kallsyms data and
    use load_kallsyms_refresh for that


jirka


> +	int ret;
>  
>  	f = fopen("/proc/kallsyms", "r");
>  	if (!f)
> @@ -46,17 +75,22 @@ int load_kallsyms_refresh(void)
>  			break;
>  		if (!addr)
>  			continue;
> -		if (i >= MAX_SYMS)
> -			return -EFBIG;
>  
> -		syms[i].addr = (long) addr;
> -		syms[i].name = strdup(func);
> -		i++;
> +		ret = libbpf_ensure_mem((void **) &syms, &sym_cap,
> +					sizeof(struct ksym), sym_cnt + 1);
> +		if (ret)
> +			goto error;
> +		ret = ksyms__add_symbol(func, (unsigned long)addr);
> +		if (ret)
> +			goto error;
>  	}
>  	fclose(f);
> -	sym_cnt = i;
>  	qsort(syms, sym_cnt, sizeof(struct ksym), ksym_cmp);
>  	return 0;
> +
> +error:
> +	ksyms__free();
> +	return ret;
>  }
>  
>  int load_kallsyms(void)
> -- 
> 2.39.3
>
Rong Tao Aug. 26, 2023, 2:52 p.m. UTC | #2
> perhaps we could fix that with 2 sets of functions:
> 
>   - load_kallsyms
>     ksym_search
>     ksym_get_addr
> 
>     that work like now on global syms pointer, perhaps we should
>     initialize it before we run tests or use mutex for init
> 
>   - struct ksym *load_kallsyms_local()
>     ksym_search_local(struct ksym *syms, ...)
>     ksym_get_addr_local(struct ksym *syms, ...)
> 
>     that work on local ksyms cache and the test frees it at the end,
>     it would be used in all tests that need updated kallsyms data and
>     use load_kallsyms_refresh for that

Hi, jirka

How about keeping only one type of interface to avoid confusion about use
and mutex locks. Like:

	struct ksyms *load_kallsyms(void);
	struct ksyms *load_kallsyms_refresh(struct ksyms *ksyms);
	void free_kallsyms(struct ksyms *ksyms);

	struct ksym *ksym_search(struct ksyms *ksyms, long key);
	long ksym_get_addr(struct ksyms *ksyms, const char *name);

I just submit v8 [0], please review.

[0] https://lore.kernel.org/lkml/tencent_6D23FE187408D965E95DFAA858BC7E8C760A@qq.com/
diff mbox series

Patch

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 4ccf4236031c..0cd45c42af2f 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -175,6 +175,7 @@  TPROGS_CFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
 TPROGS_CFLAGS += -I$(LIBBPF_INCLUDE)
 TPROGS_CFLAGS += -I$(srctree)/tools/include
 TPROGS_CFLAGS += -I$(srctree)/tools/perf
+TPROGS_CFLAGS += -I$(srctree)/tools/lib
 TPROGS_CFLAGS += -DHAVE_ATTR_TEST=0
 
 ifdef SYSROOT
@@ -314,6 +315,7 @@  XDP_SAMPLE_CFLAGS += -Wall -O2 \
 
 $(obj)/$(XDP_SAMPLE): TPROGS_CFLAGS = $(XDP_SAMPLE_CFLAGS)
 $(obj)/$(XDP_SAMPLE): $(src)/xdp_sample_user.h $(src)/xdp_sample_shared.h
+$(obj)/$(TRACE_HELPERS): TPROGS_CFLAGS := $(TPROGS_CFLAGS) -D__must_check=
 
 -include $(BPF_SAMPLES_PATH)/Makefile.target
 
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index f83d9f65c65b..d62ab3b77153 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -14,13 +14,44 @@ 
 #include <linux/limits.h>
 #include <libelf.h>
 #include <gelf.h>
+#include "bpf/libbpf_internal.h"
 
 #define TRACEFS_PIPE	"/sys/kernel/tracing/trace_pipe"
 #define DEBUGFS_PIPE	"/sys/kernel/debug/tracing/trace_pipe"
 
-#define MAX_SYMS 400000
-static struct ksym syms[MAX_SYMS];
-static int sym_cnt;
+static struct ksym *syms;
+static size_t sym_cap;
+static size_t sym_cnt;
+
+static int ksyms__add_symbol(const char *name, unsigned long addr)
+{
+	void *tmp;
+
+	tmp = strdup(name);
+	if (!tmp)
+		return -ENOMEM;
+	syms[sym_cnt].addr = addr;
+	syms[sym_cnt].name = tmp;
+
+	sym_cnt++;
+
+	return 0;
+}
+
+static void ksyms__free(void)
+{
+	unsigned int i;
+
+	if (!syms)
+		return;
+
+	for (i = 0; i < sym_cnt; i++)
+		free(syms[i].name);
+	free(syms);
+	syms = NULL;
+	sym_cnt = 0;
+	sym_cap = 0;
+}
 
 static int ksym_cmp(const void *p1, const void *p2)
 {
@@ -33,9 +64,7 @@  int load_kallsyms_refresh(void)
 	char func[256], buf[256];
 	char symbol;
 	void *addr;
-	int i = 0;
-
-	sym_cnt = 0;
+	int ret;
 
 	f = fopen("/proc/kallsyms", "r");
 	if (!f)
@@ -46,17 +75,22 @@  int load_kallsyms_refresh(void)
 			break;
 		if (!addr)
 			continue;
-		if (i >= MAX_SYMS)
-			return -EFBIG;
 
-		syms[i].addr = (long) addr;
-		syms[i].name = strdup(func);
-		i++;
+		ret = libbpf_ensure_mem((void **) &syms, &sym_cap,
+					sizeof(struct ksym), sym_cnt + 1);
+		if (ret)
+			goto error;
+		ret = ksyms__add_symbol(func, (unsigned long)addr);
+		if (ret)
+			goto error;
 	}
 	fclose(f);
-	sym_cnt = i;
 	qsort(syms, sym_cnt, sizeof(struct ksym), ksym_cmp);
 	return 0;
+
+error:
+	ksyms__free();
+	return ret;
 }
 
 int load_kallsyms(void)