diff mbox series

[bpf-next,v4,2/7] bpf: Refactor inode/task/sk storage map_{alloc,free}() for reuse

Message ID 20221023180524.2859994-1-yhs@fb.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series bpf: Implement cgroup local storage available to non-cgroup-attached bpf progs | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next-VM_Test-4 success Logs for llvm-toolchain
bpf/vmtest-bpf-next-VM_Test-5 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-2 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-1 success Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-12 fail Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-15 fail Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-16 fail Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-17 fail Logs for test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-7 fail Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 fail Logs for test_maps on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-10 fail Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-11 fail Logs for test_progs on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-13 fail Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-14 fail Logs for test_progs_no_alu32 on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-6 fail Logs for test_maps on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-9 fail Logs for test_progs on s390x with gcc
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 89 this patch: 89
netdev/cc_maintainers warning 12 maintainers not CCed: kuba@kernel.org sdf@google.com john.fastabend@gmail.com davem@davemloft.net joannelkoong@gmail.com haoluo@google.com netdev@vger.kernel.org jolsa@kernel.org song@kernel.org edumazet@google.com martin.lau@linux.dev pabeni@redhat.com
netdev/build_clang success Errors and warnings before: 17 this patch: 17
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 89 this patch: 89
netdev/checkpatch warning WARNING: line length of 81 exceeds 80 columns WARNING: line length of 83 exceeds 80 columns WARNING: line length of 88 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Yonghong Song Oct. 23, 2022, 6:05 p.m. UTC
Refactor codes so that inode/task/sk storage map_{alloc,free}
can maximally share the same code. There is no functionality change.

Acked-by: David Vernet <void@manifault.com>
Signed-off-by: Yonghong Song <yhs@fb.com>
---
 include/linux/bpf_local_storage.h | 11 ++++-----
 kernel/bpf/bpf_inode_storage.c    | 15 ++----------
 kernel/bpf/bpf_local_storage.c    | 39 +++++++++++++++++++++++++------
 kernel/bpf/bpf_task_storage.c     | 15 ++----------
 net/core/bpf_sk_storage.c         | 15 ++----------
 5 files changed, 43 insertions(+), 52 deletions(-)

Comments

Stanislav Fomichev Oct. 24, 2022, 6:02 p.m. UTC | #1
On 10/23, Yonghong Song wrote:
> Refactor codes so that inode/task/sk storage map_{alloc,free}
> can maximally share the same code. There is no functionality change.

Does it make sense to also do following? (see below, untested)
We aren't saving much code-wise here, but at least we won't have three  
copies
of the same long comment.


diff --git a/include/linux/bpf_local_storage.h  
b/include/linux/bpf_local_storage.h
index 7ea18d4da84b..e4b0b04d081b 100644
--- a/include/linux/bpf_local_storage.h
+++ b/include/linux/bpf_local_storage.h
@@ -138,6 +138,8 @@ int bpf_local_storage_map_check_btf(const struct  
bpf_map *map,
  				    const struct btf_type *key_type,
  				    const struct btf_type *value_type);

+bool bpf_local_storage_unlink_nolock(struct bpf_local_storage  
*local_storage);
+
  void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
  				   struct bpf_local_storage_elem *selem);

diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c
index 5f7683b19199..5313cb0b7181 100644
--- a/kernel/bpf/bpf_inode_storage.c
+++ b/kernel/bpf/bpf_inode_storage.c
@@ -56,11 +56,9 @@ static struct bpf_local_storage_data  
*inode_storage_lookup(struct inode *inode,

  void bpf_inode_storage_free(struct inode *inode)
  {
-	struct bpf_local_storage_elem *selem;
  	struct bpf_local_storage *local_storage;
  	bool free_inode_storage = false;
  	struct bpf_storage_blob *bsb;
-	struct hlist_node *n;

  	bsb = bpf_inode(inode);
  	if (!bsb)
@@ -74,30 +72,11 @@ void bpf_inode_storage_free(struct inode *inode)
  		return;
  	}

-	/* Neither the bpf_prog nor the bpf-map's syscall
-	 * could be modifying the local_storage->list now.
-	 * Thus, no elem can be added-to or deleted-from the
-	 * local_storage->list by the bpf_prog or by the bpf-map's syscall.
-	 *
-	 * It is racing with bpf_local_storage_map_free() alone
-	 * when unlinking elem from the local_storage->list and
-	 * the map's bucket->list.
-	 */
  	raw_spin_lock_bh(&local_storage->lock);
-	hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
-		/* Always unlink from map before unlinking from
-		 * local_storage.
-		 */
-		bpf_selem_unlink_map(selem);
-		free_inode_storage = bpf_selem_unlink_storage_nolock(
-			local_storage, selem, false, false);
-	}
+	free_inode_storage = bpf_local_storage_unlink_nolock(local_storage);
  	raw_spin_unlock_bh(&local_storage->lock);
  	rcu_read_unlock();

-	/* free_inoode_storage should always be true as long as
-	 * local_storage->list was non-empty.
-	 */
  	if (free_inode_storage)
  		kfree_rcu(local_storage, rcu);
  }
diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
index 9dc6de1cf185..0ea754953242 100644
--- a/kernel/bpf/bpf_local_storage.c
+++ b/kernel/bpf/bpf_local_storage.c
@@ -98,6 +98,36 @@ void bpf_local_storage_free_rcu(struct rcu_head *rcu)
  		kfree_rcu(local_storage, rcu);
  }

+bool bpf_local_storage_unlink_nolock(struct bpf_local_storage  
*local_storage)
+{
+	struct bpf_local_storage_elem *selem;
+	bool free_storage = false;
+	struct hlist_node *n;
+
+	/* Neither the bpf_prog nor the bpf-map's syscall
+	 * could be modifying the local_storage->list now.
+	 * Thus, no elem can be added-to or deleted-from the
+	 * local_storage->list by the bpf_prog or by the bpf-map's syscall.
+	 *
+	 * It is racing with bpf_local_storage_map_free() alone
+	 * when unlinking elem from the local_storage->list and
+	 * the map's bucket->list.
+	 */
+	hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
+		/* Always unlink from map before unlinking from
+		 * local_storage.
+		 */
+		bpf_selem_unlink_map(selem);
+		free_storage = bpf_selem_unlink_storage_nolock(
+			local_storage, selem, false, false);
+	}
+
+	/* free_storage should always be true as long as
+	 * local_storage->list was non-empty.
+	 */
+	return free_storage;
+}
+
  static void bpf_selem_free_rcu(struct rcu_head *rcu)
  {
  	struct bpf_local_storage_elem *selem;
diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c
index 6f290623347e..60887c25504b 100644
--- a/kernel/bpf/bpf_task_storage.c
+++ b/kernel/bpf/bpf_task_storage.c
@@ -71,10 +71,8 @@ task_storage_lookup(struct task_struct *task, struct  
bpf_map *map,

  void bpf_task_storage_free(struct task_struct *task)
  {
-	struct bpf_local_storage_elem *selem;
  	struct bpf_local_storage *local_storage;
  	bool free_task_storage = false;
-	struct hlist_node *n;
  	unsigned long flags;

  	rcu_read_lock();
@@ -85,32 +83,13 @@ void bpf_task_storage_free(struct task_struct *task)
  		return;
  	}

-	/* Neither the bpf_prog nor the bpf-map's syscall
-	 * could be modifying the local_storage->list now.
-	 * Thus, no elem can be added-to or deleted-from the
-	 * local_storage->list by the bpf_prog or by the bpf-map's syscall.
-	 *
-	 * It is racing with bpf_local_storage_map_free() alone
-	 * when unlinking elem from the local_storage->list and
-	 * the map's bucket->list.
-	 */
  	bpf_task_storage_lock();
  	raw_spin_lock_irqsave(&local_storage->lock, flags);
-	hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
-		/* Always unlink from map before unlinking from
-		 * local_storage.
-		 */
-		bpf_selem_unlink_map(selem);
-		free_task_storage = bpf_selem_unlink_storage_nolock(
-			local_storage, selem, false, false);
-	}
+	free_task_storage = bpf_local_storage_unlink_nolock(local_storage);
  	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
  	bpf_task_storage_unlock();
  	rcu_read_unlock();

-	/* free_task_storage should always be true as long as
-	 * local_storage->list was non-empty.
-	 */
  	if (free_task_storage)
  		kfree_rcu(local_storage, rcu);
  }
Yonghong Song Oct. 24, 2022, 7:08 p.m. UTC | #2
On 10/24/22 11:02 AM, sdf@google.com wrote:
> On 10/23, Yonghong Song wrote:
>> Refactor codes so that inode/task/sk storage map_{alloc,free}
>> can maximally share the same code. There is no functionality change.
> 
> Does it make sense to also do following? (see below, untested)
> We aren't saving much code-wise here, but at least we won't have three 
> copies
> of the same long comment.

Sounds good. Let me do this refactoring as well.

> 
> 
> diff --git a/include/linux/bpf_local_storage.h 
> b/include/linux/bpf_local_storage.h
> index 7ea18d4da84b..e4b0b04d081b 100644
> --- a/include/linux/bpf_local_storage.h
> +++ b/include/linux/bpf_local_storage.h
> @@ -138,6 +138,8 @@ int bpf_local_storage_map_check_btf(const struct 
> bpf_map *map,
>                       const struct btf_type *key_type,
>                       const struct btf_type *value_type);
> 
> +bool bpf_local_storage_unlink_nolock(struct bpf_local_storage 
> *local_storage);
> +
>   void bpf_selem_link_storage_nolock(struct bpf_local_storage 
> *local_storage,
>                      struct bpf_local_storage_elem *selem);
> 
> diff --git a/kernel/bpf/bpf_inode_storage.c 
> b/kernel/bpf/bpf_inode_storage.c
> index 5f7683b19199..5313cb0b7181 100644
> --- a/kernel/bpf/bpf_inode_storage.c
> +++ b/kernel/bpf/bpf_inode_storage.c
> @@ -56,11 +56,9 @@ static struct bpf_local_storage_data 
> *inode_storage_lookup(struct inode *inode,
> 
>   void bpf_inode_storage_free(struct inode *inode)
>   {
> -    struct bpf_local_storage_elem *selem;
>       struct bpf_local_storage *local_storage;
>       bool free_inode_storage = false;
>       struct bpf_storage_blob *bsb;
> -    struct hlist_node *n;
> 
>       bsb = bpf_inode(inode);
>       if (!bsb)
> @@ -74,30 +72,11 @@ void bpf_inode_storage_free(struct inode *inode)
>           return;
>       }
> 
> -    /* Neither the bpf_prog nor the bpf-map's syscall
> -     * could be modifying the local_storage->list now.
> -     * Thus, no elem can be added-to or deleted-from the
> -     * local_storage->list by the bpf_prog or by the bpf-map's syscall.
> -     *
> -     * It is racing with bpf_local_storage_map_free() alone
> -     * when unlinking elem from the local_storage->list and
> -     * the map's bucket->list.
> -     */
>       raw_spin_lock_bh(&local_storage->lock);
> -    hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
> -        /* Always unlink from map before unlinking from
> -         * local_storage.
> -         */
> -        bpf_selem_unlink_map(selem);
> -        free_inode_storage = bpf_selem_unlink_storage_nolock(
> -            local_storage, selem, false, false);
> -    }
> +    free_inode_storage = bpf_local_storage_unlink_nolock(local_storage);
>       raw_spin_unlock_bh(&local_storage->lock);
>       rcu_read_unlock();
> 
> -    /* free_inoode_storage should always be true as long as
> -     * local_storage->list was non-empty.
> -     */
>       if (free_inode_storage)
>           kfree_rcu(local_storage, rcu);
>   }
> diff --git a/kernel/bpf/bpf_local_storage.c 
> b/kernel/bpf/bpf_local_storage.c
> index 9dc6de1cf185..0ea754953242 100644
> --- a/kernel/bpf/bpf_local_storage.c
> +++ b/kernel/bpf/bpf_local_storage.c
> @@ -98,6 +98,36 @@ void bpf_local_storage_free_rcu(struct rcu_head *rcu)
>           kfree_rcu(local_storage, rcu);
>   }
> 
> +bool bpf_local_storage_unlink_nolock(struct bpf_local_storage 
> *local_storage)
> +{
> +    struct bpf_local_storage_elem *selem;
> +    bool free_storage = false;
> +    struct hlist_node *n;
> +
> +    /* Neither the bpf_prog nor the bpf-map's syscall
> +     * could be modifying the local_storage->list now.
> +     * Thus, no elem can be added-to or deleted-from the
> +     * local_storage->list by the bpf_prog or by the bpf-map's syscall.
> +     *
> +     * It is racing with bpf_local_storage_map_free() alone
> +     * when unlinking elem from the local_storage->list and
> +     * the map's bucket->list.
> +     */
> +    hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
> +        /* Always unlink from map before unlinking from
> +         * local_storage.
> +         */
> +        bpf_selem_unlink_map(selem);
> +        free_storage = bpf_selem_unlink_storage_nolock(
> +            local_storage, selem, false, false);
> +    }
> +
> +    /* free_storage should always be true as long as
> +     * local_storage->list was non-empty.
> +     */
> +    return free_storage;
> +}
> +
>   static void bpf_selem_free_rcu(struct rcu_head *rcu)
>   {
>       struct bpf_local_storage_elem *selem;
> diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c
> index 6f290623347e..60887c25504b 100644
> --- a/kernel/bpf/bpf_task_storage.c
> +++ b/kernel/bpf/bpf_task_storage.c
> @@ -71,10 +71,8 @@ task_storage_lookup(struct task_struct *task, struct 
> bpf_map *map,
> 
>   void bpf_task_storage_free(struct task_struct *task)
>   {
> -    struct bpf_local_storage_elem *selem;
>       struct bpf_local_storage *local_storage;
>       bool free_task_storage = false;
> -    struct hlist_node *n;
>       unsigned long flags;
> 
>       rcu_read_lock();
> @@ -85,32 +83,13 @@ void bpf_task_storage_free(struct task_struct *task)
>           return;
>       }
> 
> -    /* Neither the bpf_prog nor the bpf-map's syscall
> -     * could be modifying the local_storage->list now.
> -     * Thus, no elem can be added-to or deleted-from the
> -     * local_storage->list by the bpf_prog or by the bpf-map's syscall.
> -     *
> -     * It is racing with bpf_local_storage_map_free() alone
> -     * when unlinking elem from the local_storage->list and
> -     * the map's bucket->list.
> -     */
>       bpf_task_storage_lock();
>       raw_spin_lock_irqsave(&local_storage->lock, flags);
> -    hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
> -        /* Always unlink from map before unlinking from
> -         * local_storage.
> -         */
> -        bpf_selem_unlink_map(selem);
> -        free_task_storage = bpf_selem_unlink_storage_nolock(
> -            local_storage, selem, false, false);
> -    }
> +    free_task_storage = bpf_local_storage_unlink_nolock(local_storage);
>       raw_spin_unlock_irqrestore(&local_storage->lock, flags);
>       bpf_task_storage_unlock();
>       rcu_read_unlock();
> 
> -    /* free_task_storage should always be true as long as
> -     * local_storage->list was non-empty.
> -     */
>       if (free_task_storage)
>           kfree_rcu(local_storage, rcu);
>   }
Martin KaFai Lau Oct. 24, 2022, 8:34 p.m. UTC | #3
On 10/23/22 11:05 AM, Yonghong Song wrote
> -void bpf_local_storage_map_free(struct bpf_local_storage_map *smap,
> -				int __percpu *busy_counter)
> +static void __bpf_local_storage_map_free(struct bpf_local_storage_map *smap,
> +					 int __percpu *busy_counter)

nit.

This map_free does not look like it requires a separate "__" version since it is 
not reused.  probably just put everything into the bpf_local_storage_map_free() 
instead?

>   {
>   	struct bpf_local_storage_elem *selem;
>   	struct bpf_local_storage_map_bucket *b;
> @@ -613,7 +613,7 @@ int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
>   	return 0;
>   }
>   
> -struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr)
> +static struct bpf_local_storage_map *__bpf_local_storage_map_alloc(union bpf_attr *attr)
>   {
>   	struct bpf_local_storage_map *smap;
>   	unsigned int i;
> @@ -663,3 +663,28 @@ int bpf_local_storage_map_check_btf(const struct bpf_map *map,
>   
>   	return 0;
>   }

[ ... ]

> +void bpf_local_storage_map_free(struct bpf_map *map,
> +				struct bpf_local_storage_cache *cache,
> +				int __percpu *busy_counter)
> +{
> +	struct bpf_local_storage_map *smap;
> +
> +	smap = (struct bpf_local_storage_map *)map;
> +	bpf_local_storage_cache_idx_free(cache, smap->cache_idx);
> +	__bpf_local_storage_map_free(smap, busy_counter);
> +}
Yonghong Song Oct. 25, 2022, 2:28 a.m. UTC | #4
On 10/24/22 1:34 PM, Martin KaFai Lau wrote:
> On 10/23/22 11:05 AM, Yonghong Song wrote
>> -void bpf_local_storage_map_free(struct bpf_local_storage_map *smap,
>> -                int __percpu *busy_counter)
>> +static void __bpf_local_storage_map_free(struct bpf_local_storage_map 
>> *smap,
>> +                     int __percpu *busy_counter)
> 
> nit.
> 
> This map_free does not look like it requires a separate "__" version 
> since it is not reused.  probably just put everything into the 
> bpf_local_storage_map_free() instead?

Okay, will inline __bpf_local_storage_map_free() into
bpf_local_storage_map_free().

> 
>>   {
>>       struct bpf_local_storage_elem *selem;
>>       struct bpf_local_storage_map_bucket *b;
>> @@ -613,7 +613,7 @@ int bpf_local_storage_map_alloc_check(union 
>> bpf_attr *attr)
>>       return 0;
>>   }
>> -struct bpf_local_storage_map *bpf_local_storage_map_alloc(union 
>> bpf_attr *attr)
>> +static struct bpf_local_storage_map 
>> *__bpf_local_storage_map_alloc(union bpf_attr *attr)
>>   {
>>       struct bpf_local_storage_map *smap;
>>       unsigned int i;
>> @@ -663,3 +663,28 @@ int bpf_local_storage_map_check_btf(const struct 
>> bpf_map *map,
>>       return 0;
>>   }
> 
> [ ... ]
> 
>> +void bpf_local_storage_map_free(struct bpf_map *map,
>> +                struct bpf_local_storage_cache *cache,
>> +                int __percpu *busy_counter)
>> +{
>> +    struct bpf_local_storage_map *smap;
>> +
>> +    smap = (struct bpf_local_storage_map *)map;
>> +    bpf_local_storage_cache_idx_free(cache, smap->cache_idx);
>> +    __bpf_local_storage_map_free(smap, busy_counter);
>> +}
>
diff mbox series

Patch

diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
index 7ea18d4da84b..fdf753125778 100644
--- a/include/linux/bpf_local_storage.h
+++ b/include/linux/bpf_local_storage.h
@@ -116,21 +116,20 @@  static struct bpf_local_storage_cache name = {			\
 	.idx_lock = __SPIN_LOCK_UNLOCKED(name.idx_lock),	\
 }
 
-u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache);
-void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
-				      u16 idx);
-
 /* Helper functions for bpf_local_storage */
 int bpf_local_storage_map_alloc_check(union bpf_attr *attr);
 
-struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr);
+struct bpf_map *
+bpf_local_storage_map_alloc(union bpf_attr *attr,
+			    struct bpf_local_storage_cache *cache);
 
 struct bpf_local_storage_data *
 bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
 			 struct bpf_local_storage_map *smap,
 			 bool cacheit_lockit);
 
-void bpf_local_storage_map_free(struct bpf_local_storage_map *smap,
+void bpf_local_storage_map_free(struct bpf_map *map,
+				struct bpf_local_storage_cache *cache,
 				int __percpu *busy_counter);
 
 int bpf_local_storage_map_check_btf(const struct bpf_map *map,
diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c
index 5f7683b19199..34c315746d61 100644
--- a/kernel/bpf/bpf_inode_storage.c
+++ b/kernel/bpf/bpf_inode_storage.c
@@ -226,23 +226,12 @@  static int notsupp_get_next_key(struct bpf_map *map, void *key,
 
 static struct bpf_map *inode_storage_map_alloc(union bpf_attr *attr)
 {
-	struct bpf_local_storage_map *smap;
-
-	smap = bpf_local_storage_map_alloc(attr);
-	if (IS_ERR(smap))
-		return ERR_CAST(smap);
-
-	smap->cache_idx = bpf_local_storage_cache_idx_get(&inode_cache);
-	return &smap->map;
+	return bpf_local_storage_map_alloc(attr, &inode_cache);
 }
 
 static void inode_storage_map_free(struct bpf_map *map)
 {
-	struct bpf_local_storage_map *smap;
-
-	smap = (struct bpf_local_storage_map *)map;
-	bpf_local_storage_cache_idx_free(&inode_cache, smap->cache_idx);
-	bpf_local_storage_map_free(smap, NULL);
+	bpf_local_storage_map_free(map, &inode_cache, NULL);
 }
 
 BTF_ID_LIST_SINGLE(inode_storage_map_btf_ids, struct,
diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
index 9dc6de1cf185..f89b6d080e1f 100644
--- a/kernel/bpf/bpf_local_storage.c
+++ b/kernel/bpf/bpf_local_storage.c
@@ -346,7 +346,7 @@  int bpf_local_storage_alloc(void *owner,
 		/* Note that even first_selem was linked to smap's
 		 * bucket->list, first_selem can be freed immediately
 		 * (instead of kfree_rcu) because
-		 * bpf_local_storage_map_free() does a
+		 * __bpf_local_storage_map_free() does a
 		 * synchronize_rcu_mult (waiting for both sleepable and
 		 * normal programs) before walking the bucket->list.
 		 * Hence, no one is accessing selem from the
@@ -500,7 +500,7 @@  bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
 	return ERR_PTR(err);
 }
 
-u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
+static u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
 {
 	u64 min_usage = U64_MAX;
 	u16 i, res = 0;
@@ -524,16 +524,16 @@  u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
 	return res;
 }
 
-void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
-				      u16 idx)
+static void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
+					     u16 idx)
 {
 	spin_lock(&cache->idx_lock);
 	cache->idx_usage_counts[idx]--;
 	spin_unlock(&cache->idx_lock);
 }
 
-void bpf_local_storage_map_free(struct bpf_local_storage_map *smap,
-				int __percpu *busy_counter)
+static void __bpf_local_storage_map_free(struct bpf_local_storage_map *smap,
+					 int __percpu *busy_counter)
 {
 	struct bpf_local_storage_elem *selem;
 	struct bpf_local_storage_map_bucket *b;
@@ -613,7 +613,7 @@  int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
 	return 0;
 }
 
-struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr)
+static struct bpf_local_storage_map *__bpf_local_storage_map_alloc(union bpf_attr *attr)
 {
 	struct bpf_local_storage_map *smap;
 	unsigned int i;
@@ -663,3 +663,28 @@  int bpf_local_storage_map_check_btf(const struct bpf_map *map,
 
 	return 0;
 }
+
+struct bpf_map *
+bpf_local_storage_map_alloc(union bpf_attr *attr,
+			    struct bpf_local_storage_cache *cache)
+{
+	struct bpf_local_storage_map *smap;
+
+	smap = __bpf_local_storage_map_alloc(attr);
+	if (IS_ERR(smap))
+		return ERR_CAST(smap);
+
+	smap->cache_idx = bpf_local_storage_cache_idx_get(cache);
+	return &smap->map;
+}
+
+void bpf_local_storage_map_free(struct bpf_map *map,
+				struct bpf_local_storage_cache *cache,
+				int __percpu *busy_counter)
+{
+	struct bpf_local_storage_map *smap;
+
+	smap = (struct bpf_local_storage_map *)map;
+	bpf_local_storage_cache_idx_free(cache, smap->cache_idx);
+	__bpf_local_storage_map_free(smap, busy_counter);
+}
diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c
index 6f290623347e..020153902ef8 100644
--- a/kernel/bpf/bpf_task_storage.c
+++ b/kernel/bpf/bpf_task_storage.c
@@ -288,23 +288,12 @@  static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
 
 static struct bpf_map *task_storage_map_alloc(union bpf_attr *attr)
 {
-	struct bpf_local_storage_map *smap;
-
-	smap = bpf_local_storage_map_alloc(attr);
-	if (IS_ERR(smap))
-		return ERR_CAST(smap);
-
-	smap->cache_idx = bpf_local_storage_cache_idx_get(&task_cache);
-	return &smap->map;
+	return bpf_local_storage_map_alloc(attr, &task_cache);
 }
 
 static void task_storage_map_free(struct bpf_map *map)
 {
-	struct bpf_local_storage_map *smap;
-
-	smap = (struct bpf_local_storage_map *)map;
-	bpf_local_storage_cache_idx_free(&task_cache, smap->cache_idx);
-	bpf_local_storage_map_free(smap, &bpf_task_storage_busy);
+	bpf_local_storage_map_free(map, &task_cache, &bpf_task_storage_busy);
 }
 
 BTF_ID_LIST_SINGLE(task_storage_map_btf_ids, struct, bpf_local_storage_map)
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index 94374d529ea4..3bfdc8834a5b 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -87,23 +87,12 @@  void bpf_sk_storage_free(struct sock *sk)
 
 static void bpf_sk_storage_map_free(struct bpf_map *map)
 {
-	struct bpf_local_storage_map *smap;
-
-	smap = (struct bpf_local_storage_map *)map;
-	bpf_local_storage_cache_idx_free(&sk_cache, smap->cache_idx);
-	bpf_local_storage_map_free(smap, NULL);
+	bpf_local_storage_map_free(map, &sk_cache, NULL);
 }
 
 static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
 {
-	struct bpf_local_storage_map *smap;
-
-	smap = bpf_local_storage_map_alloc(attr);
-	if (IS_ERR(smap))
-		return ERR_CAST(smap);
-
-	smap->cache_idx = bpf_local_storage_cache_idx_get(&sk_cache);
-	return &smap->map;
+	return bpf_local_storage_map_alloc(attr, &sk_cache);
 }
 
 static int notsupp_get_next_key(struct bpf_map *map, void *key,