Message ID | 20231007135106.3031284-3-houtao@huaweicloud.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | BPF |
Headers | show |
Series | bpf: Fixes for per-cpu kptr | expand |
On Sat, Oct 07, 2023 at 09:51:02PM +0800, Hou Tao wrote: > From: Hou Tao <houtao1@huawei.com> > > With alloc_size_percpu() in place, check whether or not the size of > the dynamic per-cpu area is matched with unit_size. > > Signed-off-by: Hou Tao <houtao1@huawei.com> > --- > kernel/bpf/memalloc.c | 25 ++++++++++++++----------- > 1 file changed, 14 insertions(+), 11 deletions(-) > > diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c > index 6cf61ea55c27..af9ff0755959 100644 > --- a/kernel/bpf/memalloc.c > +++ b/kernel/bpf/memalloc.c > @@ -497,21 +497,17 @@ static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx) > struct llist_node *first; > unsigned int obj_size; > > - /* For per-cpu allocator, the size of free objects in free list doesn't > - * match with unit_size and now there is no way to get the size of > - * per-cpu pointer saved in free object, so just skip the checking. > - */ > - if (c->percpu_size) > - return 0; > - > first = c->free_llist.first; > if (!first) > return 0; > > - obj_size = ksize(first); > + if (c->percpu_size) > + obj_size = alloc_size_percpu(((void **)first)[1]); > + else > + obj_size = ksize(first); > if (obj_size != c->unit_size) { > - WARN_ONCE(1, "bpf_mem_cache[%u]: unexpected object size %u, expect %u\n", > - idx, obj_size, c->unit_size); > + WARN_ONCE(1, "bpf_mem_cache[%u]: percpu %d, unexpected object size %u, expect %u\n", > + idx, c->percpu_size, obj_size, c->unit_size); > return -EINVAL; > } > return 0; > @@ -979,7 +975,14 @@ void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags) > return !ret ? NULL : ret + LLIST_NODE_SZ; > } > > -/* Most of the logic is taken from setup_kmalloc_cache_index_table() */ > +/* The alignment of dynamic per-cpu area is 8 and c->unit_size and the > + * actual size of dynamic per-cpu area will always be matched, so there is > + * no need to adjust size_index for per-cpu allocation. However for the > + * simplicity of the implementation, use an unified size_index for both > + * kmalloc and per-cpu allocation. > + * > + * Most of the logic is taken from setup_kmalloc_cache_index_table(). Since this logic is removed in bpf tree you probably need to wait for bpf tree to get merged into bpf-next before respinning to avoid conflicts.
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c index 6cf61ea55c27..af9ff0755959 100644 --- a/kernel/bpf/memalloc.c +++ b/kernel/bpf/memalloc.c @@ -497,21 +497,17 @@ static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx) struct llist_node *first; unsigned int obj_size; - /* For per-cpu allocator, the size of free objects in free list doesn't - * match with unit_size and now there is no way to get the size of - * per-cpu pointer saved in free object, so just skip the checking. - */ - if (c->percpu_size) - return 0; - first = c->free_llist.first; if (!first) return 0; - obj_size = ksize(first); + if (c->percpu_size) + obj_size = alloc_size_percpu(((void **)first)[1]); + else + obj_size = ksize(first); if (obj_size != c->unit_size) { - WARN_ONCE(1, "bpf_mem_cache[%u]: unexpected object size %u, expect %u\n", - idx, obj_size, c->unit_size); + WARN_ONCE(1, "bpf_mem_cache[%u]: percpu %d, unexpected object size %u, expect %u\n", + idx, c->percpu_size, obj_size, c->unit_size); return -EINVAL; } return 0; @@ -979,7 +975,14 @@ void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags) return !ret ? NULL : ret + LLIST_NODE_SZ; } -/* Most of the logic is taken from setup_kmalloc_cache_index_table() */ +/* The alignment of dynamic per-cpu area is 8 and c->unit_size and the + * actual size of dynamic per-cpu area will always be matched, so there is + * no need to adjust size_index for per-cpu allocation. However for the + * simplicity of the implementation, use an unified size_index for both + * kmalloc and per-cpu allocation. + * + * Most of the logic is taken from setup_kmalloc_cache_index_table(). + */ static __init int bpf_mem_cache_adjust_size(void) { unsigned int size, index;