Message ID | 20240831004313.3713467-7-almasrymina@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Device Memory TCP | expand |
On Sat, 31 Aug 2024 00:43:06 +0000 Mina Almasry wrote: > diff --git a/include/net/mp_dmabuf_devmem.h b/include/net/mp_dmabuf_devmem.h > new file mode 100644 > index 000000000000..6d1cf2a77f6b > --- /dev/null > +++ b/include/net/mp_dmabuf_devmem.h this header can live under net/core/ like netmem_priv.h right? devmem internals should be of no interest outside of core networking. In fact the same is true for include/net/devmem.h ? Sorry for pushing back on all the header exports, we have had bad experiences with people treating anything under include/ as public API for any subsystem to use.. > @@ -0,0 +1,44 @@ > +/* SPDX-License-Identifier: GPL-2.0-or-later */ > +/* > + * Dmabuf device memory provider. > + * > + * Authors: Mina Almasry <almasrymina@google.com> > + * > + */ > +#ifndef _NET_MP_DMABUF_DEVMEM_H > +#define _NET_MP_DMABUF_DEVMEM_H > + > +#include <net/netmem.h> > + > +#if defined(CONFIG_NET_DEVMEM) > +int mp_dmabuf_devmem_init(struct page_pool *pool); > + > +netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp); > + > +void mp_dmabuf_devmem_destroy(struct page_pool *pool); > + > +bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem); > +#else > +static inline int mp_dmabuf_devmem_init(struct page_pool *pool) > +{ > + return -EOPNOTSUPP; > +} > + > +static inline netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, > + gfp_t gfp) Please break the lines after the return type if the line gets long: static inline netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp) Please fix where you can (at least where it cases going over 80 chars) > +{ > + return 0; > +} > + > +static inline void mp_dmabuf_devmem_destroy(struct page_pool *pool) > +{ > +} > + > +static inline bool mp_dmabuf_devmem_release_page(struct page_pool *pool, > + netmem_ref netmem) > +{ > + return false; > +} > +#endif > + > +#endif /* _NET_MP_DMABUF_DEVMEM_H */ > diff --git a/include/net/netmem.h b/include/net/netmem.h > index ac6c7945117b..61400d4b0d66 100644 > --- a/include/net/netmem.h > +++ b/include/net/netmem.h > @@ -8,6 +8,7 @@ > #ifndef _NET_NETMEM_H > #define _NET_NETMEM_H > > +#include <linux/mm.h> > #include <net/devmem.h> > #include <net/net_debug.h> > > diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h > index 4afd6dd56351..1b4698710f25 100644 > --- a/include/net/page_pool/types.h > +++ b/include/net/page_pool/types.h > @@ -20,8 +20,17 @@ > * device driver responsibility > */ > #define PP_FLAG_SYSTEM_POOL BIT(2) /* Global system page_pool */ > +#define PP_FLAG_ALLOW_UNREADABLE_NETMEM BIT(3) /* Allow unreadable (net_iov > + * backed) netmem in this > + * page_pool. Drivers setting > + * this must be able to support > + * unreadable netmem, where > + * netmem_address() would return > + * NULL. This flag should not be > + * set for header page_pools. > + */ Maybe move the comment before the define: /* Allow unreadable (net_iov backed) netmem in this page_pool. Drivers setting * this must be able to support unreadable netmem, where netmem_address() would * return NULL. This flag should not be set for header page_pools. */ #define PP_FLAG_ALLOW_UNREADABLE_NETMEM BIT(3) ? up to you. > #define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | \ > - PP_FLAG_SYSTEM_POOL) > + PP_FLAG_SYSTEM_POOL | PP_FLAG_ALLOW_UNREADABLE_NETMEM) > > /* > * Fast allocation side cache array/stack > @@ -57,7 +66,9 @@ struct pp_alloc_cache { > * @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV > * @slow: params with slowpath access only (initialization and Netlink) > * @netdev: netdev this pool will serve (leave as NULL if none or multiple) > - * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL > + * @queue: struct netdev_rx_queue this page_pool is being created for. > + * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL, > + * PP_FLAG_ALLOW_UNREADABLE_NETMEM. > */ > struct page_pool_params { > struct_group_tagged(page_pool_params_fast, fast, > @@ -72,6 +83,7 @@ struct page_pool_params { > ); > struct_group_tagged(page_pool_params_slow, slow, > struct net_device *netdev; > + struct netdev_rx_queue *queue; Why set a pointer? It should work but drivers don't usually deal with netdev_rx_queue struct directly. struct xdp_rxq_info takes an integer queue id, and it serves a somewhat similar function. Keep in mind that there will be more drivers than core code, so convenience for them matters more. > unsigned int flags; > /* private: used by test code only */ > void (*init_callback)(netmem_ref netmem, void *arg); > diff --git a/net/core/devmem.c b/net/core/devmem.c > index 727e5ee39f30..c8c112360caa 100644 > --- a/net/core/devmem.c > +++ b/net/core/devmem.c > @@ -13,6 +13,7 @@ > #include <linux/netdevice.h> > #include <linux/types.h> > #include <net/devmem.h> > +#include <net/mp_dmabuf_devmem.h> > #include <net/netdev_queues.h> > #include <net/netdev_rx_queue.h> > #include <net/page_pool/helpers.h> > @@ -320,3 +321,68 @@ void dev_dmabuf_uninstall(struct net_device *dev) > } > } > } > + > +/*** "Dmabuf devmem memory provider" ***/ > + > +int mp_dmabuf_devmem_init(struct page_pool *pool) > +{ > + struct net_devmem_dmabuf_binding *binding = pool->mp_priv; > + > + if (!binding) > + return -EINVAL; > + > + if (!pool->dma_map) > + return -EOPNOTSUPP; > + > + if (pool->dma_sync) > + return -EOPNOTSUPP; > + > + if (pool->p.order != 0) > + return -E2BIG; > + > + net_devmem_dmabuf_binding_get(binding); > + return 0; > +} > + > +netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp) > +{ > + struct net_devmem_dmabuf_binding *binding = pool->mp_priv; > + netmem_ref netmem; > + struct net_iov *niov; nit: reverse xmas tree > + niov = net_devmem_alloc_dmabuf(binding); > + if (!niov) > + return 0; > + > + netmem = net_iov_to_netmem(niov); > + > + page_pool_set_pp_info(pool, netmem); > + > + pool->pages_state_hold_cnt++; > + trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt); > + return netmem; > +} > + > +void mp_dmabuf_devmem_destroy(struct page_pool *pool) > +{ > + struct net_devmem_dmabuf_binding *binding = pool->mp_priv; > + > + net_devmem_dmabuf_binding_put(binding); > +} > + > +bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem) > +{ > + if (WARN_ON_ONCE(!netmem_is_net_iov(netmem))) > + return false; > + > + if (WARN_ON_ONCE(atomic_long_read(netmem_get_pp_ref_count_ref(netmem)) != > + 1)) something needs factoring out here, to make this line shorter, please.. either netmem -> net_iov conversion or at least reading of the ref count? > + return false; > + > + page_pool_clear_pp_info(netmem); > + > + net_devmem_free_dmabuf(netmem_to_net_iov(netmem)); > + > + /* We don't want the page pool put_page()ing our net_iovs. */ > + return false; > +} > diff --git a/net/core/netdev_rx_queue.c b/net/core/netdev_rx_queue.c > index da11720a5983..e217a5838c87 100644 > --- a/net/core/netdev_rx_queue.c > +++ b/net/core/netdev_rx_queue.c > @@ -4,8 +4,11 @@ > #include <net/netdev_queues.h> > #include <net/netdev_rx_queue.h> > > +#include "page_pool_priv.h" > + > int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx) > { > + struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx); > void *new_mem, *old_mem; > int err; > > @@ -31,6 +34,10 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx) > if (err) > goto err_free_old_mem; > > + err = page_pool_check_memory_provider(dev, rxq); > + if (err) > + goto err_free_new_queue_mem; > + > err = dev->queue_mgmt_ops->ndo_queue_stop(dev, old_mem, rxq_idx); > if (err) > goto err_free_new_queue_mem; > diff --git a/net/core/page_pool.c b/net/core/page_pool.c > index 52659db2d765..6e24950f2be4 100644 > --- a/net/core/page_pool.c > +++ b/net/core/page_pool.c > @@ -11,6 +11,8 @@ > #include <linux/slab.h> > #include <linux/device.h> > > +#include <net/mp_dmabuf_devmem.h> > +#include <net/netdev_rx_queue.h> > #include <net/page_pool/helpers.h> > #include <net/xdp.h> > > @@ -190,6 +192,7 @@ static int page_pool_init(struct page_pool *pool, > int cpuid) > { > unsigned int ring_qsize = 1024; /* Default */ > + int err; > > page_pool_struct_check(); > > @@ -271,7 +274,36 @@ static int page_pool_init(struct page_pool *pool, > if (pool->dma_map) > get_device(pool->p.dev); > > + if (pool->slow.queue && > + pool->slow.flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM) { > + /* We rely on rtnl_lock()ing to make sure netdev_rx_queue > + * configuration doesn't change while we're initializing the nit: 'the' to next line > + * page_pool. > + */ > + ASSERT_RTNL(); > + pool->mp_priv = pool->slow.queue->mp_params.mp_priv; > + }
On Tue, Sep 3, 2024 at 2:19 PM Jakub Kicinski <kuba@kernel.org> wrote: > > On Sat, 31 Aug 2024 00:43:06 +0000 Mina Almasry wrote: > > diff --git a/include/net/mp_dmabuf_devmem.h b/include/net/mp_dmabuf_devmem.h > > new file mode 100644 > > index 000000000000..6d1cf2a77f6b > > --- /dev/null > > +++ b/include/net/mp_dmabuf_devmem.h > > this header can live under net/core/ like netmem_priv.h right? > devmem internals should be of no interest outside of core networking. > Yes, those can be moved under net/core trivially. done. > In fact the same is true for include/net/devmem.h ? > This turned out to be possible, but with a minor moving around of some helpers. Basically netmem.h included devmem.h to get access to some devmem internals for some of the net_iov helpers specific to devmem. Moving these helpers to devmem.h enabled me to keep include/net/netmem.h but put devmem.h under net/core. Now netmem.h doesn't need to include devmem.h. I think this is an improvement. > > +static inline netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, > > + gfp_t gfp) > > Please break the lines after the return type if the line gets long: > > static inline netmem_ref > mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp) > > Please fix where you can (at least where it cases going over 80 chars) > FWIW I use a formatting tool (clang-format) which seems to prefer breaking in between the args, but I'll fix this manually and wherever else I notice. > > struct_group_tagged(page_pool_params_slow, slow, > > struct net_device *netdev; > > + struct netdev_rx_queue *queue; > > Why set a pointer? It should work but drivers don't usually deal with > netdev_rx_queue struct directly. struct xdp_rxq_info takes an integer > queue id, and it serves a somewhat similar function. > > Keep in mind that there will be more drivers than core code, so > convenience for them matters more. > Makes sense. > > +bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem) > > +{ > > + if (WARN_ON_ONCE(!netmem_is_net_iov(netmem))) > > + return false; > > + > > + if (WARN_ON_ONCE(atomic_long_read(netmem_get_pp_ref_count_ref(netmem)) != > > + 1)) > > something needs factoring out here, to make this line shorter, please.. > either netmem -> net_iov conversion or at least reading of the ref > count? > Ah, sorry I think you pointed this out earlier and I missed applying it. Should be done in the next iteration. -- Thanks, Mina
diff --git a/include/net/mp_dmabuf_devmem.h b/include/net/mp_dmabuf_devmem.h new file mode 100644 index 000000000000..6d1cf2a77f6b --- /dev/null +++ b/include/net/mp_dmabuf_devmem.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Dmabuf device memory provider. + * + * Authors: Mina Almasry <almasrymina@google.com> + * + */ +#ifndef _NET_MP_DMABUF_DEVMEM_H +#define _NET_MP_DMABUF_DEVMEM_H + +#include <net/netmem.h> + +#if defined(CONFIG_NET_DEVMEM) +int mp_dmabuf_devmem_init(struct page_pool *pool); + +netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp); + +void mp_dmabuf_devmem_destroy(struct page_pool *pool); + +bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem); +#else +static inline int mp_dmabuf_devmem_init(struct page_pool *pool) +{ + return -EOPNOTSUPP; +} + +static inline netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, + gfp_t gfp) +{ + return 0; +} + +static inline void mp_dmabuf_devmem_destroy(struct page_pool *pool) +{ +} + +static inline bool mp_dmabuf_devmem_release_page(struct page_pool *pool, + netmem_ref netmem) +{ + return false; +} +#endif + +#endif /* _NET_MP_DMABUF_DEVMEM_H */ diff --git a/include/net/netmem.h b/include/net/netmem.h index ac6c7945117b..61400d4b0d66 100644 --- a/include/net/netmem.h +++ b/include/net/netmem.h @@ -8,6 +8,7 @@ #ifndef _NET_NETMEM_H #define _NET_NETMEM_H +#include <linux/mm.h> #include <net/devmem.h> #include <net/net_debug.h> diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h index 4afd6dd56351..1b4698710f25 100644 --- a/include/net/page_pool/types.h +++ b/include/net/page_pool/types.h @@ -20,8 +20,17 @@ * device driver responsibility */ #define PP_FLAG_SYSTEM_POOL BIT(2) /* Global system page_pool */ +#define PP_FLAG_ALLOW_UNREADABLE_NETMEM BIT(3) /* Allow unreadable (net_iov + * backed) netmem in this + * page_pool. Drivers setting + * this must be able to support + * unreadable netmem, where + * netmem_address() would return + * NULL. This flag should not be + * set for header page_pools. + */ #define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | \ - PP_FLAG_SYSTEM_POOL) + PP_FLAG_SYSTEM_POOL | PP_FLAG_ALLOW_UNREADABLE_NETMEM) /* * Fast allocation side cache array/stack @@ -57,7 +66,9 @@ struct pp_alloc_cache { * @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV * @slow: params with slowpath access only (initialization and Netlink) * @netdev: netdev this pool will serve (leave as NULL if none or multiple) - * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL + * @queue: struct netdev_rx_queue this page_pool is being created for. + * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_SYSTEM_POOL, + * PP_FLAG_ALLOW_UNREADABLE_NETMEM. */ struct page_pool_params { struct_group_tagged(page_pool_params_fast, fast, @@ -72,6 +83,7 @@ struct page_pool_params { ); struct_group_tagged(page_pool_params_slow, slow, struct net_device *netdev; + struct netdev_rx_queue *queue; unsigned int flags; /* private: used by test code only */ void (*init_callback)(netmem_ref netmem, void *arg); diff --git a/net/core/devmem.c b/net/core/devmem.c index 727e5ee39f30..c8c112360caa 100644 --- a/net/core/devmem.c +++ b/net/core/devmem.c @@ -13,6 +13,7 @@ #include <linux/netdevice.h> #include <linux/types.h> #include <net/devmem.h> +#include <net/mp_dmabuf_devmem.h> #include <net/netdev_queues.h> #include <net/netdev_rx_queue.h> #include <net/page_pool/helpers.h> @@ -320,3 +321,68 @@ void dev_dmabuf_uninstall(struct net_device *dev) } } } + +/*** "Dmabuf devmem memory provider" ***/ + +int mp_dmabuf_devmem_init(struct page_pool *pool) +{ + struct net_devmem_dmabuf_binding *binding = pool->mp_priv; + + if (!binding) + return -EINVAL; + + if (!pool->dma_map) + return -EOPNOTSUPP; + + if (pool->dma_sync) + return -EOPNOTSUPP; + + if (pool->p.order != 0) + return -E2BIG; + + net_devmem_dmabuf_binding_get(binding); + return 0; +} + +netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp) +{ + struct net_devmem_dmabuf_binding *binding = pool->mp_priv; + netmem_ref netmem; + struct net_iov *niov; + + niov = net_devmem_alloc_dmabuf(binding); + if (!niov) + return 0; + + netmem = net_iov_to_netmem(niov); + + page_pool_set_pp_info(pool, netmem); + + pool->pages_state_hold_cnt++; + trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt); + return netmem; +} + +void mp_dmabuf_devmem_destroy(struct page_pool *pool) +{ + struct net_devmem_dmabuf_binding *binding = pool->mp_priv; + + net_devmem_dmabuf_binding_put(binding); +} + +bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem) +{ + if (WARN_ON_ONCE(!netmem_is_net_iov(netmem))) + return false; + + if (WARN_ON_ONCE(atomic_long_read(netmem_get_pp_ref_count_ref(netmem)) != + 1)) + return false; + + page_pool_clear_pp_info(netmem); + + net_devmem_free_dmabuf(netmem_to_net_iov(netmem)); + + /* We don't want the page pool put_page()ing our net_iovs. */ + return false; +} diff --git a/net/core/netdev_rx_queue.c b/net/core/netdev_rx_queue.c index da11720a5983..e217a5838c87 100644 --- a/net/core/netdev_rx_queue.c +++ b/net/core/netdev_rx_queue.c @@ -4,8 +4,11 @@ #include <net/netdev_queues.h> #include <net/netdev_rx_queue.h> +#include "page_pool_priv.h" + int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx) { + struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx); void *new_mem, *old_mem; int err; @@ -31,6 +34,10 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx) if (err) goto err_free_old_mem; + err = page_pool_check_memory_provider(dev, rxq); + if (err) + goto err_free_new_queue_mem; + err = dev->queue_mgmt_ops->ndo_queue_stop(dev, old_mem, rxq_idx); if (err) goto err_free_new_queue_mem; diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 52659db2d765..6e24950f2be4 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -11,6 +11,8 @@ #include <linux/slab.h> #include <linux/device.h> +#include <net/mp_dmabuf_devmem.h> +#include <net/netdev_rx_queue.h> #include <net/page_pool/helpers.h> #include <net/xdp.h> @@ -190,6 +192,7 @@ static int page_pool_init(struct page_pool *pool, int cpuid) { unsigned int ring_qsize = 1024; /* Default */ + int err; page_pool_struct_check(); @@ -271,7 +274,36 @@ static int page_pool_init(struct page_pool *pool, if (pool->dma_map) get_device(pool->p.dev); + if (pool->slow.queue && + pool->slow.flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM) { + /* We rely on rtnl_lock()ing to make sure netdev_rx_queue + * configuration doesn't change while we're initializing the + * page_pool. + */ + ASSERT_RTNL(); + pool->mp_priv = pool->slow.queue->mp_params.mp_priv; + } + + if (pool->mp_priv) { + err = mp_dmabuf_devmem_init(pool); + if (err) { + pr_warn("%s() mem-provider init failed %d\n", __func__, + err); + goto free_ptr_ring; + } + + static_branch_inc(&page_pool_mem_providers); + } + return 0; + +free_ptr_ring: + ptr_ring_cleanup(&pool->ring, NULL); +#ifdef CONFIG_PAGE_POOL_STATS + if (!pool->system) + free_percpu(pool->recycle_stats); +#endif + return err; } static void page_pool_uninit(struct page_pool *pool) @@ -455,28 +487,6 @@ static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem) return false; } -static void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem) -{ - netmem_set_pp(netmem, pool); - netmem_or_pp_magic(netmem, PP_SIGNATURE); - - /* Ensuring all pages have been split into one fragment initially: - * page_pool_set_pp_info() is only called once for every page when it - * is allocated from the page allocator and page_pool_fragment_page() - * is dirtying the same cache line as the page->pp_magic above, so - * the overhead is negligible. - */ - page_pool_fragment_netmem(netmem, 1); - if (pool->has_init_callback) - pool->slow.init_callback(netmem, pool->slow.init_arg); -} - -static void page_pool_clear_pp_info(netmem_ref netmem) -{ - netmem_clear_pp_magic(netmem); - netmem_set_pp(netmem, NULL); -} - static struct page *__page_pool_alloc_page_order(struct page_pool *pool, gfp_t gfp) { @@ -572,7 +582,10 @@ netmem_ref page_pool_alloc_netmem(struct page_pool *pool, gfp_t gfp) return netmem; /* Slow-path: cache empty, do real allocation */ - netmem = __page_pool_alloc_pages_slow(pool, gfp); + if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_priv) + netmem = mp_dmabuf_devmem_alloc_netmems(pool, gfp); + else + netmem = __page_pool_alloc_pages_slow(pool, gfp); return netmem; } EXPORT_SYMBOL(page_pool_alloc_netmem); @@ -608,6 +621,28 @@ s32 page_pool_inflight(const struct page_pool *pool, bool strict) return inflight; } +void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem) +{ + netmem_set_pp(netmem, pool); + netmem_or_pp_magic(netmem, PP_SIGNATURE); + + /* Ensuring all pages have been split into one fragment initially: + * page_pool_set_pp_info() is only called once for every page when it + * is allocated from the page allocator and page_pool_fragment_page() + * is dirtying the same cache line as the page->pp_magic above, so + * the overhead is negligible. + */ + page_pool_fragment_netmem(netmem, 1); + if (pool->has_init_callback) + pool->slow.init_callback(netmem, pool->slow.init_arg); +} + +void page_pool_clear_pp_info(netmem_ref netmem) +{ + netmem_clear_pp_magic(netmem); + netmem_set_pp(netmem, NULL); +} + static __always_inline void __page_pool_release_page_dma(struct page_pool *pool, netmem_ref netmem) { @@ -636,8 +671,13 @@ static __always_inline void __page_pool_release_page_dma(struct page_pool *pool, void page_pool_return_page(struct page_pool *pool, netmem_ref netmem) { int count; + bool put; - __page_pool_release_page_dma(pool, netmem); + put = true; + if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_priv) + put = mp_dmabuf_devmem_release_page(pool, netmem); + else + __page_pool_release_page_dma(pool, netmem); /* This may be the last page returned, releasing the pool, so * it is not safe to reference pool afterwards. @@ -645,8 +685,10 @@ void page_pool_return_page(struct page_pool *pool, netmem_ref netmem) count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt); trace_page_pool_state_release(pool, netmem, count); - page_pool_clear_pp_info(netmem); - put_page(netmem_to_page(netmem)); + if (put) { + page_pool_clear_pp_info(netmem); + put_page(netmem_to_page(netmem)); + } /* An optimization would be to call __free_pages(page, pool->p.order) * knowing page is not part of page-cache (thus avoiding a * __page_cache_release() call). @@ -965,6 +1007,12 @@ static void __page_pool_destroy(struct page_pool *pool) page_pool_unlist(pool); page_pool_uninit(pool); + + if (pool->mp_priv) { + mp_dmabuf_devmem_destroy(pool); + static_branch_dec(&page_pool_mem_providers); + } + kfree(pool); } diff --git a/net/core/page_pool_priv.h b/net/core/page_pool_priv.h index 2142caeddb7c..f90171dc477c 100644 --- a/net/core/page_pool_priv.h +++ b/net/core/page_pool_priv.h @@ -35,4 +35,24 @@ static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr) return page_pool_set_dma_addr_netmem(page_to_netmem(page), addr); } +#if defined(CONFIG_PAGE_POOL) +void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem); +void page_pool_clear_pp_info(netmem_ref netmem); +int page_pool_check_memory_provider(struct net_device *dev, + struct netdev_rx_queue *rxq); +#else +static inline void page_pool_set_pp_info(struct page_pool *pool, + netmem_ref netmem) +{ +} +static inline void page_pool_clear_pp_info(netmem_ref netmem) +{ +} +static inline int page_pool_check_memory_provider(struct net_device *dev, + struct netdev_rx_queue *rxq) +{ + return 0; +} +#endif + #endif diff --git a/net/core/page_pool_user.c b/net/core/page_pool_user.c index 3a3277ba167b..ce5167eb5548 100644 --- a/net/core/page_pool_user.c +++ b/net/core/page_pool_user.c @@ -4,8 +4,9 @@ #include <linux/netdevice.h> #include <linux/xarray.h> #include <net/net_debug.h> -#include <net/page_pool/types.h> +#include <net/netdev_rx_queue.h> #include <net/page_pool/helpers.h> +#include <net/page_pool/types.h> #include <net/sock.h> #include "page_pool_priv.h" @@ -344,6 +345,30 @@ void page_pool_unlist(struct page_pool *pool) mutex_unlock(&page_pools_lock); } +int page_pool_check_memory_provider(struct net_device *dev, + struct netdev_rx_queue *rxq) +{ + struct net_devmem_dmabuf_binding *binding = rxq->mp_params.mp_priv; + struct page_pool *pool; + struct hlist_node *n; + + if (!binding) + return 0; + + mutex_lock(&page_pools_lock); + hlist_for_each_entry_safe(pool, n, &dev->page_pools, user.list) { + if (pool->mp_priv != binding) + continue; + + if (pool->slow.queue == rxq) { + mutex_unlock(&page_pools_lock); + return 0; + } + } + mutex_unlock(&page_pools_lock); + return -ENODATA; +} + static void page_pool_unreg_netdev_wipe(struct net_device *netdev) { struct page_pool *pool;