diff mbox series

[net-next,v1,1/2] page_pool: reintroduce page_pool_unlink_napi()

Message ID 20240625195522.2974466-2-dw@davidwei.uk (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series page_pool: bnxt_en: unlink old page pool in queue api using helper | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 842 this patch: 842
netdev/build_tools success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers success CCed 6 of 6 maintainers
netdev/build_clang success Errors and warnings before: 849 this patch: 849
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 989 this patch: 989
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 35 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 3 this patch: 3
netdev/source_inline success Was 0 now: 0

Commit Message

David Wei June 25, 2024, 7:55 p.m. UTC
56ef27e3 unexported page_pool_unlink_napi() and renamed it to
page_pool_disable_direct_recycling(). This is because there was no
in-tree user of page_pool_unlink_napi().

Since then Rx queue API and an implementation in bnxt got merged. In the
bnxt implementation, it broadly follows the following steps: allocate
new queue memory + page pool, stop old rx queue, swap, then destroy old
queue memory + page pool. The existing NAPI instance is re-used.

The page pool to be destroyed is still linked to the re-used NAPI
instance. Freeing it as-is will trigger warnings in
page_pool_disable_direct_recycling(). In my initial patches I unlinked
very directly by setting pp.napi to NULL.

Instead, bring back page_pool_unlink_napi() and use that instead of
having a driver touch a core struct directly.

Suggested-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: David Wei <dw@davidwei.uk>
---
 include/net/page_pool/types.h | 5 +++++
 net/core/page_pool.c          | 6 ++++++
 2 files changed, 11 insertions(+)

Comments

Jakub Kicinski June 25, 2024, 11:39 p.m. UTC | #1
On Tue, 25 Jun 2024 12:55:21 -0700 David Wei wrote:
>  #ifdef CONFIG_PAGE_POOL
> +void page_pool_unlink_napi(struct page_pool *pool);
>  void page_pool_destroy(struct page_pool *pool);
>  void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
>  			   const struct xdp_mem_info *mem);
>  void page_pool_put_page_bulk(struct page_pool *pool, void **data,
>  			     int count);
>  #else
> +static inline void page_pool_unlink_napi(struct page_pool *pool)
> +{
> +}

All callers must select PAGE_POOL, I don't think we need the empty
static inline in this particular case.

>  static inline void page_pool_destroy(struct page_pool *pool)
>  {
>  }
> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> index 3927a0a7fa9a..ec274dde0e32 100644
> --- a/net/core/page_pool.c
> +++ b/net/core/page_pool.c
> @@ -1021,6 +1021,11 @@ static void page_pool_disable_direct_recycling(struct page_pool *pool)
>  	 */
>  	WRITE_ONCE(pool->cpuid, -1);
>  
> +	page_pool_unlink_napi(pool);

No need to split page_pool_disable_direct_recycling()
into two, we can write cpuid, it won't hurt.

The purpose of the function didn't really change when Olek
renamed it. Unlinking NAPI is also precisely to prevent recycling.
So you can either export page_pool_disable_direct_recycling()
add a wrapper called page_pool_unlink_napi(), or come up with
another name... But there's no need to split it.

> +}
> +
> +void page_pool_unlink_napi(struct page_pool *pool)
> +{
>  	if (!pool->p.napi)
>  		return;
>  
> @@ -1032,6 +1037,7 @@ static void p
David Wei June 26, 2024, 12:10 a.m. UTC | #2
On 2024-06-25 16:39, Jakub Kicinski wrote:
> On Tue, 25 Jun 2024 12:55:21 -0700 David Wei wrote:
>>  #ifdef CONFIG_PAGE_POOL
>> +void page_pool_unlink_napi(struct page_pool *pool);
>>  void page_pool_destroy(struct page_pool *pool);
>>  void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
>>  			   const struct xdp_mem_info *mem);
>>  void page_pool_put_page_bulk(struct page_pool *pool, void **data,
>>  			     int count);
>>  #else
>> +static inline void page_pool_unlink_napi(struct page_pool *pool)
>> +{
>> +}
> 
> All callers must select PAGE_POOL, I don't think we need the empty
> static inline in this particular case.

Got it, I'll remove this.

> 
>>  static inline void page_pool_destroy(struct page_pool *pool)
>>  {
>>  }
>> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
>> index 3927a0a7fa9a..ec274dde0e32 100644
>> --- a/net/core/page_pool.c
>> +++ b/net/core/page_pool.c
>> @@ -1021,6 +1021,11 @@ static void page_pool_disable_direct_recycling(struct page_pool *pool)
>>  	 */
>>  	WRITE_ONCE(pool->cpuid, -1);
>>  
>> +	page_pool_unlink_napi(pool);
> 
> No need to split page_pool_disable_direct_recycling()
> into two, we can write cpuid, it won't hurt.

Ah, I see.

> 
> The purpose of the function didn't really change when Olek
> renamed it. Unlinking NAPI is also precisely to prevent recycling.
> So you can either export page_pool_disable_direct_recycling()
> add a wrapper called page_pool_unlink_napi(), or come up with
> another name... But there's no need to split it.

Thanks for the suggestions. I'll export
page_pool_disable_direct_recycling().

> 
>> +}
>> +
>> +void page_pool_unlink_napi(struct page_pool *pool)
>> +{
>>  	if (!pool->p.napi)
>>  		return;
>>  
>> @@ -1032,6 +1037,7 @@ static void p
diff mbox series

Patch

diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index 7e8477057f3d..aa3e615f1ae6 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -229,12 +229,17 @@  struct page_pool *page_pool_create_percpu(const struct page_pool_params *params,
 struct xdp_mem_info;
 
 #ifdef CONFIG_PAGE_POOL
+void page_pool_unlink_napi(struct page_pool *pool);
 void page_pool_destroy(struct page_pool *pool);
 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
 			   const struct xdp_mem_info *mem);
 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
 			     int count);
 #else
+static inline void page_pool_unlink_napi(struct page_pool *pool)
+{
+}
+
 static inline void page_pool_destroy(struct page_pool *pool)
 {
 }
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 3927a0a7fa9a..ec274dde0e32 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -1021,6 +1021,11 @@  static void page_pool_disable_direct_recycling(struct page_pool *pool)
 	 */
 	WRITE_ONCE(pool->cpuid, -1);
 
+	page_pool_unlink_napi(pool);
+}
+
+void page_pool_unlink_napi(struct page_pool *pool)
+{
 	if (!pool->p.napi)
 		return;
 
@@ -1032,6 +1037,7 @@  static void page_pool_disable_direct_recycling(struct page_pool *pool)
 
 	WRITE_ONCE(pool->p.napi, NULL);
 }
+EXPORT_SYMBOL(page_pool_unlink_napi);
 
 void page_pool_destroy(struct page_pool *pool)
 {