diff mbox series

[bpf-next,v3,4/4] xdp: remove unused {__,}xdp_release_frame()

Message ID 20230313215553.1045175-5-aleksander.lobakin@intel.com (mailing list archive)
State Accepted
Commit d4e492338d11937c55841b1279287280d6e35894
Delegated to: BPF
Headers show
Series xdp: recycle Page Pool backed skbs built from XDP frames | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 4352 this patch: 4352
netdev/cc_maintainers success CCed 10 of 10 maintainers
netdev/build_clang success Errors and warnings before: 1007 this patch: 1007
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 4561 this patch: 4561
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 56 lines checked
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-7 success Logs for llvm-toolchain
bpf/vmtest-bpf-next-VM_Test-8 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-2 success Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-5 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-4 success Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-32 success Logs for test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-33 success Logs for test_verifier on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-35 success Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-36 success Logs for test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-9 success Logs for test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for test_maps on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-12 success Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-14 fail Logs for test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-15 fail Logs for test_progs on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-17 fail Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-18 fail Logs for test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-19 fail Logs for test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 fail Logs for test_progs_no_alu32 on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-22 fail Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 fail Logs for test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-24 success Logs for test_progs_no_alu32_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for test_progs_no_alu32_parallel on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-26 success Logs for test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for test_progs_no_alu32_parallel on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-28 success Logs for test_progs_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-29 success Logs for test_progs_parallel on aarch64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-30 success Logs for test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-31 success Logs for test_progs_parallel on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-34 success Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-21 fail Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-16 fail Logs for test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for test_maps on s390x with gcc

Commit Message

Alexander Lobakin March 13, 2023, 9:55 p.m. UTC
__xdp_build_skb_from_frame() was the last user of
{__,}xdp_release_frame(), which detaches pages from the page_pool.
All the consumers now recycle Page Pool skbs and page, except mlx5,
stmmac and tsnep drivers, which use page_pool_release_page() directly
(might change one day). It's safe to assume this functionality is not
needed anymore and can be removed (in favor of recycling).

Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
---
 include/net/xdp.h | 29 -----------------------------
 net/core/xdp.c    | 15 ---------------
 2 files changed, 44 deletions(-)

Comments

Yunsheng Lin March 14, 2023, 11:37 a.m. UTC | #1
On 2023/3/14 5:55, Alexander Lobakin wrote:
> __xdp_build_skb_from_frame() was the last user of
> {__,}xdp_release_frame(), which detaches pages from the page_pool.
> All the consumers now recycle Page Pool skbs and page, except mlx5,
> stmmac and tsnep drivers, which use page_pool_release_page() directly
> (might change one day). It's safe to assume this functionality is not
> needed anymore and can be removed (in favor of recycling).
> 
> Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
> ---
>  include/net/xdp.h | 29 -----------------------------
>  net/core/xdp.c    | 15 ---------------
>  2 files changed, 44 deletions(-)
> 
> diff --git a/include/net/xdp.h b/include/net/xdp.h
> index d517bfac937b..5393b3ebe56e 100644
> --- a/include/net/xdp.h
> +++ b/include/net/xdp.h
> @@ -317,35 +317,6 @@ void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq);
>  void xdp_return_frame_bulk(struct xdp_frame *xdpf,
>  			   struct xdp_frame_bulk *bq);
>  
> -/* When sending xdp_frame into the network stack, then there is no
> - * return point callback, which is needed to release e.g. DMA-mapping
> - * resources with page_pool.  Thus, have explicit function to release
> - * frame resources.
> - */
> -void __xdp_release_frame(void *data, struct xdp_mem_info *mem);
> -static inline void xdp_release_frame(struct xdp_frame *xdpf)
> -{
> -	struct xdp_mem_info *mem = &xdpf->mem;
> -	struct skb_shared_info *sinfo;
> -	int i;
> -
> -	/* Curr only page_pool needs this */
> -	if (mem->type != MEM_TYPE_PAGE_POOL)
> -		return;
> -
> -	if (likely(!xdp_frame_has_frags(xdpf)))
> -		goto out;
> -
> -	sinfo = xdp_get_shared_info_from_frame(xdpf);
> -	for (i = 0; i < sinfo->nr_frags; i++) {
> -		struct page *page = skb_frag_page(&sinfo->frags[i]);
> -
> -		__xdp_release_frame(page_address(page), mem);
> -	}
> -out:
> -	__xdp_release_frame(xdpf->data, mem);
> -}
> -
>  static __always_inline unsigned int xdp_get_frame_len(struct xdp_frame *xdpf)
>  {
>  	struct skb_shared_info *sinfo;
> diff --git a/net/core/xdp.c b/net/core/xdp.c
> index a2237cfca8e9..8d3ad315f18d 100644
> --- a/net/core/xdp.c
> +++ b/net/core/xdp.c
> @@ -531,21 +531,6 @@ void xdp_return_buff(struct xdp_buff *xdp)
>  }
>  EXPORT_SYMBOL_GPL(xdp_return_buff);
>  
> -/* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
> -void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
> -{
> -	struct xdp_mem_allocator *xa;
> -	struct page *page;
> -
> -	rcu_read_lock();
> -	xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
> -	page = virt_to_head_page(data);
> -	if (xa)
> -		page_pool_release_page(xa->page_pool, page);

page_pool_release_page() is only call here when xa is not NULL
and mem->type == MEM_TYPE_PAGE_POOL.

But skb_mark_for_recycle() is call when mem->type == MEM_TYPE_PAGE_POOL
without checking xa, it does not seems symmetric to patch 3, if this is
intended?

> -	rcu_read_unlock();
> -}
> -EXPORT_SYMBOL_GPL(__xdp_release_frame);
> -
>  void xdp_attachment_setup(struct xdp_attachment_info *info,
>  			  struct netdev_bpf *bpf)
>  {
>
Alexander Lobakin March 14, 2023, 12:27 p.m. UTC | #2
From: Yunsheng Lin <linyunsheng@huawei.com>
Date: Tue, 14 Mar 2023 19:37:23 +0800

> On 2023/3/14 5:55, Alexander Lobakin wrote:
>> __xdp_build_skb_from_frame() was the last user of
>> {__,}xdp_release_frame(), which detaches pages from the page_pool.

[...]

>> -/* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
>> -void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
>> -{
>> -	struct xdp_mem_allocator *xa;
>> -	struct page *page;
>> -
>> -	rcu_read_lock();
>> -	xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
>> -	page = virt_to_head_page(data);
>> -	if (xa)
>> -		page_pool_release_page(xa->page_pool, page);
> 
> page_pool_release_page() is only call here when xa is not NULL
> and mem->type == MEM_TYPE_PAGE_POOL.
> 
> But skb_mark_for_recycle() is call when mem->type == MEM_TYPE_PAGE_POOL
> without checking xa, it does not seems symmetric to patch 3, if this is
> intended?

Intended. page_pool_return_skb_page() checks for %PP_SIGNATURE and if
a page doesn't belong to any PP, it will be returned to the MM layer.
Moreover, cases `mem->type == MEM_TYPE_PAGE_POOL && xa == NULL` are more
of an exception rather than regular -- this means the page was released
from its PP before reaching the function and IIRC it's even impossible
with our current drivers. Adding a hashtable lookup to
{__,}xdp_build_skb_from_frame() would only add hotpath overhead with no
positive impact.

> 
>> -	rcu_read_unlock();
>> -}
>> -EXPORT_SYMBOL_GPL(__xdp_release_frame);
>> -
>>  void xdp_attachment_setup(struct xdp_attachment_info *info,
>>  			  struct netdev_bpf *bpf)
>>  {
>>

Thanks,
Olek
diff mbox series

Patch

diff --git a/include/net/xdp.h b/include/net/xdp.h
index d517bfac937b..5393b3ebe56e 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -317,35 +317,6 @@  void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq);
 void xdp_return_frame_bulk(struct xdp_frame *xdpf,
 			   struct xdp_frame_bulk *bq);
 
-/* When sending xdp_frame into the network stack, then there is no
- * return point callback, which is needed to release e.g. DMA-mapping
- * resources with page_pool.  Thus, have explicit function to release
- * frame resources.
- */
-void __xdp_release_frame(void *data, struct xdp_mem_info *mem);
-static inline void xdp_release_frame(struct xdp_frame *xdpf)
-{
-	struct xdp_mem_info *mem = &xdpf->mem;
-	struct skb_shared_info *sinfo;
-	int i;
-
-	/* Curr only page_pool needs this */
-	if (mem->type != MEM_TYPE_PAGE_POOL)
-		return;
-
-	if (likely(!xdp_frame_has_frags(xdpf)))
-		goto out;
-
-	sinfo = xdp_get_shared_info_from_frame(xdpf);
-	for (i = 0; i < sinfo->nr_frags; i++) {
-		struct page *page = skb_frag_page(&sinfo->frags[i]);
-
-		__xdp_release_frame(page_address(page), mem);
-	}
-out:
-	__xdp_release_frame(xdpf->data, mem);
-}
-
 static __always_inline unsigned int xdp_get_frame_len(struct xdp_frame *xdpf)
 {
 	struct skb_shared_info *sinfo;
diff --git a/net/core/xdp.c b/net/core/xdp.c
index a2237cfca8e9..8d3ad315f18d 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -531,21 +531,6 @@  void xdp_return_buff(struct xdp_buff *xdp)
 }
 EXPORT_SYMBOL_GPL(xdp_return_buff);
 
-/* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
-void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
-{
-	struct xdp_mem_allocator *xa;
-	struct page *page;
-
-	rcu_read_lock();
-	xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
-	page = virt_to_head_page(data);
-	if (xa)
-		page_pool_release_page(xa->page_pool, page);
-	rcu_read_unlock();
-}
-EXPORT_SYMBOL_GPL(__xdp_release_frame);
-
 void xdp_attachment_setup(struct xdp_attachment_info *info,
 			  struct netdev_bpf *bpf)
 {