diff mbox series

[01/24] netmem: Create new type

Message ID 20221130220803.3657490-2-willy@infradead.org (mailing list archive)
State New
Headers show
Series Split page pools from struct page | expand

Commit Message

Matthew Wilcox Nov. 30, 2022, 10:07 p.m. UTC
As part of simplifying struct page, create a new netmem type which
mirrors the page_pool members in struct page.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/net/page_pool.h | 41 +++++++++++++++++++++++++++++++++++++++++
 1 file changed, 41 insertions(+)

Comments

Jesper Dangaard Brouer Dec. 5, 2022, 2:42 p.m. UTC | #1
On 30/11/2022 23.07, Matthew Wilcox (Oracle) wrote:
> As part of simplifying struct page, create a new netmem type which
> mirrors the page_pool members in struct page.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>   include/net/page_pool.h | 41 +++++++++++++++++++++++++++++++++++++++++
>   1 file changed, 41 insertions(+)
> 
> diff --git a/include/net/page_pool.h b/include/net/page_pool.h
> index 813c93499f20..af6ff8c302a0 100644
> --- a/include/net/page_pool.h
> +++ b/include/net/page_pool.h
> @@ -50,6 +50,47 @@
>   				 PP_FLAG_DMA_SYNC_DEV |\
>   				 PP_FLAG_PAGE_FRAG)
>   
> +/* page_pool used by netstack */

Can we improve the comment, making in more clear that this netmem struct
is mirroring/sharing/using part of struct page?

My proposal:

/* page_pool used by netstack mirrors/uses members in struct page */

> +struct netmem {
> +	unsigned long flags;		/* Page flags */
> +	/**
> +	 * @pp_magic: magic value to avoid recycling non
> +	 * page_pool allocated pages.
> +	 */
> +	unsigned long pp_magic;
> +	struct page_pool *pp;
> +	unsigned long _pp_mapping_pad;
> +	unsigned long dma_addr;
> +	union {
> +		/**
> +		 * dma_addr_upper: might require a 64-bit
> +		 * value on 32-bit architectures.
> +		 */
> +		unsigned long dma_addr_upper;
> +		/**
> +		 * For frag page support, not supported in
> +		 * 32-bit architectures with 64-bit DMA.
> +		 */
> +		atomic_long_t pp_frag_count;
> +	};
> +	atomic_t _mapcount;
> +	atomic_t _refcount;
> +};
> +
> +#define NETMEM_MATCH(pg, nm)						\
> +	static_assert(offsetof(struct page, pg) == offsetof(struct netmem, nm))
> +NETMEM_MATCH(flags, flags);
> +NETMEM_MATCH(lru, pp_magic);
> +NETMEM_MATCH(pp, pp);
> +NETMEM_MATCH(mapping, _pp_mapping_pad);
> +NETMEM_MATCH(dma_addr, dma_addr);
> +NETMEM_MATCH(dma_addr_upper, dma_addr_upper);
> +NETMEM_MATCH(pp_frag_count, pp_frag_count);
> +NETMEM_MATCH(_mapcount, _mapcount);
> +NETMEM_MATCH(_refcount, _refcount);
> +#undef NETMEM_MATCH
> +static_assert(sizeof(struct netmem) <= sizeof(struct page));
> +
>   /*
>    * Fast allocation side cache array/stack
>    *
diff mbox series

Patch

diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 813c93499f20..af6ff8c302a0 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -50,6 +50,47 @@ 
 				 PP_FLAG_DMA_SYNC_DEV |\
 				 PP_FLAG_PAGE_FRAG)
 
+/* page_pool used by netstack */
+struct netmem {
+	unsigned long flags;		/* Page flags */
+	/**
+	 * @pp_magic: magic value to avoid recycling non
+	 * page_pool allocated pages.
+	 */
+	unsigned long pp_magic;
+	struct page_pool *pp;
+	unsigned long _pp_mapping_pad;
+	unsigned long dma_addr;
+	union {
+		/**
+		 * dma_addr_upper: might require a 64-bit
+		 * value on 32-bit architectures.
+		 */
+		unsigned long dma_addr_upper;
+		/**
+		 * For frag page support, not supported in
+		 * 32-bit architectures with 64-bit DMA.
+		 */
+		atomic_long_t pp_frag_count;
+	};
+	atomic_t _mapcount;
+	atomic_t _refcount;
+};
+
+#define NETMEM_MATCH(pg, nm)						\
+	static_assert(offsetof(struct page, pg) == offsetof(struct netmem, nm))
+NETMEM_MATCH(flags, flags);
+NETMEM_MATCH(lru, pp_magic);
+NETMEM_MATCH(pp, pp);
+NETMEM_MATCH(mapping, _pp_mapping_pad);
+NETMEM_MATCH(dma_addr, dma_addr);
+NETMEM_MATCH(dma_addr_upper, dma_addr_upper);
+NETMEM_MATCH(pp_frag_count, pp_frag_count);
+NETMEM_MATCH(_mapcount, _mapcount);
+NETMEM_MATCH(_refcount, _refcount);
+#undef NETMEM_MATCH
+static_assert(sizeof(struct netmem) <= sizeof(struct page));
+
 /*
  * Fast allocation side cache array/stack
  *