diff mbox series

[v3,bpf-next] bpf/memalloc: Non-atomically allocate freelist during prefill

Message ID 20230728043359.3324347-1-zhuyifei@google.com (mailing list archive)
State Accepted
Commit d1a02358d48d659c2400fa3bbaf9cde2cf9f5040
Delegated to: BPF
Headers show
Series [v3,bpf-next] bpf/memalloc: Non-atomically allocate freelist during prefill | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-12 fail Logs for test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-16 fail Logs for test_progs_no_alu32 on s390x with gcc
netdev/series_format success Single patches do not need cover letters
netdev/tree_selection success Clearly marked for bpf-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 1347 this patch: 1347
netdev/cc_maintainers warning 5 maintainers not CCed: kpsingh@kernel.org john.fastabend@gmail.com song@kernel.org jolsa@kernel.org haoluo@google.com
netdev/build_clang success Errors and warnings before: 1365 this patch: 1365
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 1370 this patch: 1370
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 41 lines checked
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-VM_Test-8 success Logs for test_maps on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-2 success Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-4 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-5 success Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-6 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-19 success Logs for test_progs_no_alu32_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-22 success Logs for test_progs_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for test_maps on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-11 success Logs for test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-15 success Logs for test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-29 success Logs for veristat
bpf/vmtest-bpf-next-VM_Test-13 success Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-14 success Logs for test_progs on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-17 success Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-18 success Logs for test_progs_no_alu32 on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-21 success Logs for test_progs_no_alu32_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-24 success Logs for test_progs_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-27 success Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-28 success Logs for test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-26 success Logs for test_verifier on s390x with gcc

Commit Message

YiFei Zhu July 28, 2023, 4:33 a.m. UTC
In internal testing of test_maps, we sometimes observed failures like:
  test_maps: test_maps.c:173: void test_hashmap_percpu(unsigned int, void *):
    Assertion `bpf_map_update_elem(fd, &key, value, BPF_ANY) == 0' failed.
where the errno is ENOMEM. After some troubleshooting and enabling
the warnings, we saw:
  [   91.304708] percpu: allocation failed, size=8 align=8 atomic=1, atomic alloc failed, no space left
  [   91.304716] CPU: 51 PID: 24145 Comm: test_maps Kdump: loaded Tainted: G                 N 6.1.38-smp-DEV #7
  [   91.304719] Hardware name: Google Astoria/astoria, BIOS 0.20230627.0-0 06/27/2023
  [   91.304721] Call Trace:
  [   91.304724]  <TASK>
  [   91.304730]  [<ffffffffa7ef83b9>] dump_stack_lvl+0x59/0x88
  [   91.304737]  [<ffffffffa7ef83f8>] dump_stack+0x10/0x18
  [   91.304738]  [<ffffffffa75caa0c>] pcpu_alloc+0x6fc/0x870
  [   91.304741]  [<ffffffffa75ca302>] __alloc_percpu_gfp+0x12/0x20
  [   91.304743]  [<ffffffffa756785e>] alloc_bulk+0xde/0x1e0
  [   91.304746]  [<ffffffffa7566c02>] bpf_mem_alloc_init+0xd2/0x2f0
  [   91.304747]  [<ffffffffa7547c69>] htab_map_alloc+0x479/0x650
  [   91.304750]  [<ffffffffa751d6e0>] map_create+0x140/0x2e0
  [   91.304752]  [<ffffffffa751d413>] __sys_bpf+0x5a3/0x6c0
  [   91.304753]  [<ffffffffa751c3ec>] __x64_sys_bpf+0x1c/0x30
  [   91.304754]  [<ffffffffa7ef847a>] do_syscall_64+0x5a/0x80
  [   91.304756]  [<ffffffffa800009b>] entry_SYSCALL_64_after_hwframe+0x63/0xcd

This makes sense, because in atomic context, percpu allocation would
not create new chunks; it would only create in non-atomic contexts.
And if during prefill all precpu chunks are full, -ENOMEM would
happen immediately upon next unit_alloc.

Prefill phase does not actually run in atomic context, so we can
use this fact to allocate non-atomically with GFP_KERNEL instead
of GFP_NOWAIT. This avoids the immediate -ENOMEM.

GFP_NOWAIT has to be used in unit_alloc when bpf program runs
in atomic context. Even if bpf program runs in non-atomic context,
in most cases, rcu read lock is enabled for the program so
GFP_NOWAIT is still needed. This is often also the case for
BPF_MAP_UPDATE_ELEM syscalls.

Signed-off-by: YiFei Zhu <zhuyifei@google.com>
---
v1->v2:
- Rebase from bpf to bpf-next
- Dropped second patch and edited commit message to include parts
  of original cover letter, and dropped Fixes tag

v2->v3:
- Clarified commit message
---
 kernel/bpf/memalloc.c | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)

Comments

Yonghong Song July 28, 2023, 4:53 a.m. UTC | #1
On 7/27/23 9:33 PM, YiFei Zhu wrote:
> In internal testing of test_maps, we sometimes observed failures like:
>    test_maps: test_maps.c:173: void test_hashmap_percpu(unsigned int, void *):
>      Assertion `bpf_map_update_elem(fd, &key, value, BPF_ANY) == 0' failed.
> where the errno is ENOMEM. After some troubleshooting and enabling
> the warnings, we saw:
>    [   91.304708] percpu: allocation failed, size=8 align=8 atomic=1, atomic alloc failed, no space left
>    [   91.304716] CPU: 51 PID: 24145 Comm: test_maps Kdump: loaded Tainted: G                 N 6.1.38-smp-DEV #7
>    [   91.304719] Hardware name: Google Astoria/astoria, BIOS 0.20230627.0-0 06/27/2023
>    [   91.304721] Call Trace:
>    [   91.304724]  <TASK>
>    [   91.304730]  [<ffffffffa7ef83b9>] dump_stack_lvl+0x59/0x88
>    [   91.304737]  [<ffffffffa7ef83f8>] dump_stack+0x10/0x18
>    [   91.304738]  [<ffffffffa75caa0c>] pcpu_alloc+0x6fc/0x870
>    [   91.304741]  [<ffffffffa75ca302>] __alloc_percpu_gfp+0x12/0x20
>    [   91.304743]  [<ffffffffa756785e>] alloc_bulk+0xde/0x1e0
>    [   91.304746]  [<ffffffffa7566c02>] bpf_mem_alloc_init+0xd2/0x2f0
>    [   91.304747]  [<ffffffffa7547c69>] htab_map_alloc+0x479/0x650
>    [   91.304750]  [<ffffffffa751d6e0>] map_create+0x140/0x2e0
>    [   91.304752]  [<ffffffffa751d413>] __sys_bpf+0x5a3/0x6c0
>    [   91.304753]  [<ffffffffa751c3ec>] __x64_sys_bpf+0x1c/0x30
>    [   91.304754]  [<ffffffffa7ef847a>] do_syscall_64+0x5a/0x80
>    [   91.304756]  [<ffffffffa800009b>] entry_SYSCALL_64_after_hwframe+0x63/0xcd
> 
> This makes sense, because in atomic context, percpu allocation would
> not create new chunks; it would only create in non-atomic contexts.
> And if during prefill all precpu chunks are full, -ENOMEM would
> happen immediately upon next unit_alloc.
> 
> Prefill phase does not actually run in atomic context, so we can
> use this fact to allocate non-atomically with GFP_KERNEL instead
> of GFP_NOWAIT. This avoids the immediate -ENOMEM.
> 
> GFP_NOWAIT has to be used in unit_alloc when bpf program runs
> in atomic context. Even if bpf program runs in non-atomic context,
> in most cases, rcu read lock is enabled for the program so
> GFP_NOWAIT is still needed. This is often also the case for
> BPF_MAP_UPDATE_ELEM syscalls.
> 
> Signed-off-by: YiFei Zhu <zhuyifei@google.com>

Acked-by: Yonghong Song <yonghong.song@linux.dev>
Hou Tao July 28, 2023, 6:16 a.m. UTC | #2
On 7/28/2023 12:33 PM, YiFei Zhu wrote:
> In internal testing of test_maps, we sometimes observed failures like:
>   test_maps: test_maps.c:173: void test_hashmap_percpu(unsigned int, void *):
>     Assertion `bpf_map_update_elem(fd, &key, value, BPF_ANY) == 0' failed.
> where the errno is ENOMEM. After some troubleshooting and enabling
> the warnings, we saw:
>   [   91.304708] percpu: allocation failed, size=8 align=8 atomic=1, atomic alloc failed, no space left
>   [   91.304716] CPU: 51 PID: 24145 Comm: test_maps Kdump: loaded Tainted: G                 N 6.1.38-smp-DEV #7
>   [   91.304719] Hardware name: Google Astoria/astoria, BIOS 0.20230627.0-0 06/27/2023
>   [   91.304721] Call Trace:
>   [   91.304724]  <TASK>
>   [   91.304730]  [<ffffffffa7ef83b9>] dump_stack_lvl+0x59/0x88
>   [   91.304737]  [<ffffffffa7ef83f8>] dump_stack+0x10/0x18
>   [   91.304738]  [<ffffffffa75caa0c>] pcpu_alloc+0x6fc/0x870
>   [   91.304741]  [<ffffffffa75ca302>] __alloc_percpu_gfp+0x12/0x20
>   [   91.304743]  [<ffffffffa756785e>] alloc_bulk+0xde/0x1e0
>   [   91.304746]  [<ffffffffa7566c02>] bpf_mem_alloc_init+0xd2/0x2f0
>   [   91.304747]  [<ffffffffa7547c69>] htab_map_alloc+0x479/0x650
>   [   91.304750]  [<ffffffffa751d6e0>] map_create+0x140/0x2e0
>   [   91.304752]  [<ffffffffa751d413>] __sys_bpf+0x5a3/0x6c0
>   [   91.304753]  [<ffffffffa751c3ec>] __x64_sys_bpf+0x1c/0x30
>   [   91.304754]  [<ffffffffa7ef847a>] do_syscall_64+0x5a/0x80
>   [   91.304756]  [<ffffffffa800009b>] entry_SYSCALL_64_after_hwframe+0x63/0xcd
>
> This makes sense, because in atomic context, percpu allocation would
> not create new chunks; it would only create in non-atomic contexts.
> And if during prefill all precpu chunks are full, -ENOMEM would
> happen immediately upon next unit_alloc.
>
> Prefill phase does not actually run in atomic context, so we can
> use this fact to allocate non-atomically with GFP_KERNEL instead
> of GFP_NOWAIT. This avoids the immediate -ENOMEM.
>
> GFP_NOWAIT has to be used in unit_alloc when bpf program runs
> in atomic context. Even if bpf program runs in non-atomic context,
> in most cases, rcu read lock is enabled for the program so
> GFP_NOWAIT is still needed. This is often also the case for
> BPF_MAP_UPDATE_ELEM syscalls.
>
> Signed-off-by: YiFei Zhu <zhuyifei@google.com>

Acked-by: Hou Tao <houtao1@huawei.com>
patchwork-bot+netdevbpf@kernel.org July 28, 2023, 4:50 p.m. UTC | #3
Hello:

This patch was applied to bpf/bpf-next.git (master)
by Alexei Starovoitov <ast@kernel.org>:

On Fri, 28 Jul 2023 04:33:59 +0000 you wrote:
> In internal testing of test_maps, we sometimes observed failures like:
>   test_maps: test_maps.c:173: void test_hashmap_percpu(unsigned int, void *):
>     Assertion `bpf_map_update_elem(fd, &key, value, BPF_ANY) == 0' failed.
> where the errno is ENOMEM. After some troubleshooting and enabling
> the warnings, we saw:
>   [   91.304708] percpu: allocation failed, size=8 align=8 atomic=1, atomic alloc failed, no space left
>   [   91.304716] CPU: 51 PID: 24145 Comm: test_maps Kdump: loaded Tainted: G                 N 6.1.38-smp-DEV #7
>   [   91.304719] Hardware name: Google Astoria/astoria, BIOS 0.20230627.0-0 06/27/2023
>   [   91.304721] Call Trace:
>   [   91.304724]  <TASK>
>   [   91.304730]  [<ffffffffa7ef83b9>] dump_stack_lvl+0x59/0x88
>   [   91.304737]  [<ffffffffa7ef83f8>] dump_stack+0x10/0x18
>   [   91.304738]  [<ffffffffa75caa0c>] pcpu_alloc+0x6fc/0x870
>   [   91.304741]  [<ffffffffa75ca302>] __alloc_percpu_gfp+0x12/0x20
>   [   91.304743]  [<ffffffffa756785e>] alloc_bulk+0xde/0x1e0
>   [   91.304746]  [<ffffffffa7566c02>] bpf_mem_alloc_init+0xd2/0x2f0
>   [   91.304747]  [<ffffffffa7547c69>] htab_map_alloc+0x479/0x650
>   [   91.304750]  [<ffffffffa751d6e0>] map_create+0x140/0x2e0
>   [   91.304752]  [<ffffffffa751d413>] __sys_bpf+0x5a3/0x6c0
>   [   91.304753]  [<ffffffffa751c3ec>] __x64_sys_bpf+0x1c/0x30
>   [   91.304754]  [<ffffffffa7ef847a>] do_syscall_64+0x5a/0x80
>   [   91.304756]  [<ffffffffa800009b>] entry_SYSCALL_64_after_hwframe+0x63/0xcd
> 
> [...]

Here is the summary with links:
  - [v3,bpf-next] bpf/memalloc: Non-atomically allocate freelist during prefill
    https://git.kernel.org/bpf/bpf-next/c/d1a02358d48d

You are awesome, thank you!
diff mbox series

Patch

diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index 14d9b1a9a4ca..9c49ae53deaf 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -201,12 +201,16 @@  static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
 }
 
 /* Mostly runs from irq_work except __init phase. */
-static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
+static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node, bool atomic)
 {
 	struct mem_cgroup *memcg = NULL, *old_memcg;
+	gfp_t gfp;
 	void *obj;
 	int i;
 
+	gfp = __GFP_NOWARN | __GFP_ACCOUNT;
+	gfp |= atomic ? GFP_NOWAIT : GFP_KERNEL;
+
 	for (i = 0; i < cnt; i++) {
 		/*
 		 * For every 'c' llist_del_first(&c->free_by_rcu_ttrace); is
@@ -238,7 +242,7 @@  static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
 		 * will allocate from the current numa node which is what we
 		 * want here.
 		 */
-		obj = __alloc(c, node, GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT);
+		obj = __alloc(c, node, gfp);
 		if (!obj)
 			break;
 		add_obj_to_free_list(c, obj);
@@ -429,7 +433,7 @@  static void bpf_mem_refill(struct irq_work *work)
 		/* irq_work runs on this cpu and kmalloc will allocate
 		 * from the current numa node which is what we want here.
 		 */
-		alloc_bulk(c, c->batch, NUMA_NO_NODE);
+		alloc_bulk(c, c->batch, NUMA_NO_NODE, true);
 	else if (cnt > c->high_watermark)
 		free_bulk(c);
 
@@ -477,7 +481,7 @@  static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
 	 * prog won't be doing more than 4 map_update_elem from
 	 * irq disabled region
 	 */
-	alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu));
+	alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu), false);
 }
 
 /* When size != 0 bpf_mem_cache for each cpu.