@@ -818,8 +818,17 @@ static bool page_pool_napi_local(const struct page_pool *pool)
void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
unsigned int dma_sync_size, bool allow_direct)
{
- if (!allow_direct)
+ bool allow_direct_orig = allow_direct;
+
+ /* page_pool_put_unrefed_netmem() is not supposed to be called with
+ * allow_direct being true after page_pool_destroy() is called, so
+ * the allow_direct being true case doesn't need synchronization.
+ */
+ DEBUG_NET_WARN_ON_ONCE(allow_direct && pool->destroy_cnt);
+ if (!allow_direct_orig) {
+ rcu_read_lock();
allow_direct = page_pool_napi_local(pool);
+ }
netmem =
__page_pool_put_page(pool, netmem, dma_sync_size, allow_direct);
@@ -828,6 +837,9 @@ void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
recycle_stat_inc(pool, ring_full);
page_pool_return_page(pool, netmem);
}
+
+ if (!allow_direct_orig)
+ rcu_read_unlock();
}
EXPORT_SYMBOL(page_pool_put_unrefed_netmem);
@@ -861,6 +873,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
bool allow_direct;
bool in_softirq;
+ rcu_read_lock();
allow_direct = page_pool_napi_local(pool);
for (i = 0; i < count; i++) {
@@ -876,8 +889,10 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
data[bulk_len++] = (__force void *)netmem;
}
- if (!bulk_len)
+ if (!bulk_len) {
+ rcu_read_unlock();
return;
+ }
/* Bulk producer into ptr_ring page_pool cache */
in_softirq = page_pool_producer_lock(pool);
@@ -892,14 +907,18 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
page_pool_producer_unlock(pool, in_softirq);
/* Hopefully all pages was return into ptr_ring */
- if (likely(i == bulk_len))
+ if (likely(i == bulk_len)) {
+ rcu_read_unlock();
return;
+ }
/* ptr_ring cache full, free remaining pages outside producer lock
* since put_page() with refcnt == 1 can be an expensive operation
*/
for (; i < bulk_len; i++)
page_pool_return_page(pool, (__force netmem_ref)data[i]);
+
+ rcu_read_unlock();
}
EXPORT_SYMBOL(page_pool_put_page_bulk);
@@ -1121,6 +1140,12 @@ void page_pool_destroy(struct page_pool *pool)
return;
page_pool_disable_direct_recycling(pool);
+
+ /* Wait for the freeing side see the disabling direct recycling setting
+ * to avoid the concurrent access to the pool->alloc cache.
+ */
+ synchronize_rcu();
+
page_pool_free_frag(pool);
if (!page_pool_release(pool))
page_pool page may be freed from skb_defer_free_flush() to softirq context, it may cause concurrent access problem for pool->alloc cache due to the below time window, as below, both CPU0 and CPU1 may access the pool->alloc cache concurrently in page_pool_empty_alloc_cache_once() and page_pool_recycle_in_cache(): CPU 0 CPU1 page_pool_destroy() skb_defer_free_flush() . . . page_pool_put_unrefed_page() . . . allow_direct = page_pool_napi_local() . . page_pool_disable_direct_recycling() . . . page_pool_empty_alloc_cache_once() page_pool_recycle_in_cache() Use rcu mechanism to avoid the above concurrent access problem. Note, the above was found during code reviewing on how to fix the problem in [1]. 1. https://lore.kernel.org/lkml/8067f204-1380-4d37-8ffd-007fc6f26738@kernel.org/T/ Fixes: 8c48eea3adf3 ("page_pool: allow caching from safely localized NAPI") Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com> CC: Alexander Lobakin <aleksander.lobakin@intel.com> --- net/core/page_pool.c | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-)