diff mbox series

[RFC,net-next,v1,1/2] net: mirror skb frag ref/unref helpers

Message ID 20240306235922.282781-2-almasrymina@google.com (mailing list archive)
State RFC
Delegated to: Netdev Maintainers
Headers show
Series Minor cleanups to skb frag ref/unref | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit fail Errors and warnings before: 5835 this patch: 4071
netdev/build_tools success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers success CCed 4 of 4 maintainers
netdev/build_clang fail Errors and warnings before: 2069 this patch: 1102
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn fail Errors and warnings before: 6150 this patch: 4534
netdev/checkpatch warning CHECK: Blank lines aren't necessary after an open brace '{'
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Mina Almasry March 6, 2024, 11:59 p.m. UTC
Refactor some of the skb frag ref/unref helpers for improved clarity.

Implement napi_pp_get_page() to be the mirror counterpart of
napi_pp_put_page().

Implement napi_frag_ref() to be the mirror counterpart of
napi_frag_unref().

Improve __skb_frag_ref() to become a mirror counterpart of
__skb_frag_unref(). Previously unref could handle pp & non-pp pages,
while the ref could only handle non-pp pages. Now both the ref & unref
helpers can correctly handle both pp & non-pp pages.

Now that __skb_frag_ref() can handle both pp & non-pp pages, remove
skb_pp_frag_ref(), and use __skb_frag_ref() instead.  This lets us
remove pp specific handling from skb_try_coalesce.

Signed-off-by: Mina Almasry <almasrymina@google.com>

---
 include/linux/skbuff.h | 24 +++++++++++++++---
 net/core/skbuff.c      | 56 ++++++++++++++----------------------------
 2 files changed, 39 insertions(+), 41 deletions(-)

Comments

Dragos Tatulea March 7, 2024, 10:44 a.m. UTC | #1
On Wed, 2024-03-06 at 15:59 -0800, Mina Almasry wrote:
> Refactor some of the skb frag ref/unref helpers for improved clarity.
> 
> Implement napi_pp_get_page() to be the mirror counterpart of
> napi_pp_put_page().
> 
> Implement napi_frag_ref() to be the mirror counterpart of
> napi_frag_unref().
> 
> Improve __skb_frag_ref() to become a mirror counterpart of
> __skb_frag_unref(). Previously unref could handle pp & non-pp pages,
> while the ref could only handle non-pp pages. Now both the ref & unref
> helpers can correctly handle both pp & non-pp pages.
> 
> Now that __skb_frag_ref() can handle both pp & non-pp pages, remove
> skb_pp_frag_ref(), and use __skb_frag_ref() instead.  This lets us
> remove pp specific handling from skb_try_coalesce.
> 
> Signed-off-by: Mina Almasry <almasrymina@google.com>
> 
> ---
>  include/linux/skbuff.h | 24 +++++++++++++++---
>  net/core/skbuff.c      | 56 ++++++++++++++----------------------------
>  2 files changed, 39 insertions(+), 41 deletions(-)
> 
> diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
> index d577e0bee18d..51316b0e20bc 100644
> --- a/include/linux/skbuff.h
> +++ b/include/linux/skbuff.h
> @@ -3477,15 +3477,31 @@ static inline struct page *skb_frag_page(const skb_frag_t *frag)
>  	return netmem_to_page(frag->netmem);
>  }
>  
> +bool napi_pp_get_page(struct page *page);
> +
> +static inline void napi_frag_ref(skb_frag_t *frag, bool recycle)
> +{
> +#ifdef CONFIG_PAGE_POOL
> +	struct page *page = skb_frag_page(frag);
> +
Move assignment out of ifdef.

> +	if (recycle && napi_pp_get_page(page))
> +		return;
> +#endif
> +	get_page(page);
> +}
> +
>  /**
>   * __skb_frag_ref - take an addition reference on a paged fragment.
>   * @frag: the paged fragment
> + * @recycle: skb->pp_recycle param of the parent skb.
>   *
> - * Takes an additional reference on the paged fragment @frag.
> + * Takes an additional reference on the paged fragment @frag. Obtains the
> + * correct reference count depending on whether skb->pp_recycle is set and
> + * whether the frag is a page pool frag.
>   */
> -static inline void __skb_frag_ref(skb_frag_t *frag)
> +static inline void __skb_frag_ref(skb_frag_t *frag, bool recycle)
>  {
> -	get_page(skb_frag_page(frag));
> +	napi_frag_ref(frag, recycle);
>  }
>  
>  /**
> @@ -3497,7 +3513,7 @@ static inline void __skb_frag_ref(skb_frag_t *frag)
>   */
>  static inline void skb_frag_ref(struct sk_buff *skb, int f)
>  {
> -	__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
> +	__skb_frag_ref(&skb_shinfo(skb)->frags[f], skb->pp_recycle);
>  }
>  
>  int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
> diff --git a/net/core/skbuff.c b/net/core/skbuff.c
> index 1f918e602bc4..6d234faa9d9e 100644
> --- a/net/core/skbuff.c
> +++ b/net/core/skbuff.c
> @@ -1006,6 +1006,21 @@ int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
>  EXPORT_SYMBOL(skb_cow_data_for_xdp);
>  
>  #if IS_ENABLED(CONFIG_PAGE_POOL)
> +bool napi_pp_get_page(struct page *page)
> +{
> +
> +	struct page *head_page;
> +
> +	head_page = compound_head(page);
> +
> +	if (!is_pp_page(page))
> +		return false;
> +
> +	page_pool_ref_page(head_page);
> +	return true;
> +}
> +EXPORT_SYMBOL(napi_pp_get_page);
> +
>  bool napi_pp_put_page(struct page *page, bool napi_safe)
>  {
>  	bool allow_direct = false;
> @@ -1058,37 +1073,6 @@ static bool skb_pp_recycle(struct sk_buff *skb, void *data, bool napi_safe)
>  	return napi_pp_put_page(virt_to_page(data), napi_safe);
>  }
>  
> -/**
> - * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb
> - * @skb:	page pool aware skb
> - *
> - * Increase the fragment reference count (pp_ref_count) of a skb. This is
> - * intended to gain fragment references only for page pool aware skbs,
> - * i.e. when skb->pp_recycle is true, and not for fragments in a
> - * non-pp-recycling skb. It has a fallback to increase references on normal
> - * pages, as page pool aware skbs may also have normal page fragments.
> - */
> -static int skb_pp_frag_ref(struct sk_buff *skb)
> -{
> -	struct skb_shared_info *shinfo;
> -	struct page *head_page;
> -	int i;
> -
> -	if (!skb->pp_recycle)
> -		return -EINVAL;
> -
> -	shinfo = skb_shinfo(skb);
> -
> -	for (i = 0; i < shinfo->nr_frags; i++) {
> -		head_page = compound_head(skb_frag_page(&shinfo->frags[i]));
> -		if (likely(is_pp_page(head_page)))
> -			page_pool_ref_page(head_page);
> -		else
> -			page_ref_inc(head_page);
> -	}
> -	return 0;
> -}
> -
>  static void skb_kfree_head(void *head, unsigned int end_offset)
>  {
>  	if (end_offset == SKB_SMALL_HEAD_HEADROOM)
> @@ -4199,7 +4183,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
>  			to++;
>  
>  		} else {
> -			__skb_frag_ref(fragfrom);
> +			__skb_frag_ref(fragfrom, skb->pp_recycle);
>  			skb_frag_page_copy(fragto, fragfrom);
>  			skb_frag_off_copy(fragto, fragfrom);
>  			skb_frag_size_set(fragto, todo);
> @@ -4849,7 +4833,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
>  			}
>  
>  			*nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
> -			__skb_frag_ref(nskb_frag);
> +			__skb_frag_ref(nskb_frag, nskb->pp_recycle);
>  			size = skb_frag_size(nskb_frag);
>  
>  			if (pos < offset) {
> @@ -5980,10 +5964,8 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
>  	/* if the skb is not cloned this does nothing
>  	 * since we set nr_frags to 0.
>  	 */
> -	if (skb_pp_frag_ref(from)) {
> -		for (i = 0; i < from_shinfo->nr_frags; i++)
> -			__skb_frag_ref(&from_shinfo->frags[i]);
> -	}
> +	for (i = 0; i < from_shinfo->nr_frags; i++)
> +		__skb_frag_ref(&from_shinfo->frags[i], from->pp_recycle);
>  
>  	to->truesize += delta;
>  	to->len += len;
Yunsheng Lin March 7, 2024, 12:28 p.m. UTC | #2
On 2024/3/7 7:59, Mina Almasry wrote:

...

>  
>  int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
> diff --git a/net/core/skbuff.c b/net/core/skbuff.c
> index 1f918e602bc4..6d234faa9d9e 100644
> --- a/net/core/skbuff.c
> +++ b/net/core/skbuff.c
> @@ -1006,6 +1006,21 @@ int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
>  EXPORT_SYMBOL(skb_cow_data_for_xdp);
>  
>  #if IS_ENABLED(CONFIG_PAGE_POOL)
> +bool napi_pp_get_page(struct page *page)
> +{
> +
> +	struct page *head_page;
> +
> +	head_page = compound_head(page);
> +
> +	if (!is_pp_page(page))

I would use the head_page for is_pp_page(), I am not sure it
matters that much, but I believe it is the precedent.

Maybe do the below and remove head_page varible:
page = compound_head(page);

> +		return false;
> +
> +	page_pool_ref_page(head_page);
> +	return true;
> +}
> +EXPORT_SYMBOL(napi_pp_get_page);
> +

...

> -
>  static void skb_kfree_head(void *head, unsigned int end_offset)
>  {
>  	if (end_offset == SKB_SMALL_HEAD_HEADROOM)
> @@ -4199,7 +4183,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
>  			to++;
>  
>  		} else {
> -			__skb_frag_ref(fragfrom);
> +			__skb_frag_ref(fragfrom, skb->pp_recycle);
>  			skb_frag_page_copy(fragto, fragfrom);
>  			skb_frag_off_copy(fragto, fragfrom);
>  			skb_frag_size_set(fragto, todo);
> @@ -4849,7 +4833,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
>  			}
>  
>  			*nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
> -			__skb_frag_ref(nskb_frag);
> +			__skb_frag_ref(nskb_frag, nskb->pp_recycle);
>  			size = skb_frag_size(nskb_frag);
>  
>  			if (pos < offset) {
> @@ -5980,10 +5964,8 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
>  	/* if the skb is not cloned this does nothing
>  	 * since we set nr_frags to 0.
>  	 */
> -	if (skb_pp_frag_ref(from)) {
I guess it worth mentioning that skb->pp_recycle is only checked once,
and skb->pp_recycle is checked for every frag after this patch.

> -		for (i = 0; i < from_shinfo->nr_frags; i++)
> -			__skb_frag_ref(&from_shinfo->frags[i]);
> -	}
> +	for (i = 0; i < from_shinfo->nr_frags; i++)
> +		__skb_frag_ref(&from_shinfo->frags[i], from->pp_recycle);
>  
>  	to->truesize += delta;
>  	to->len += len;
>
diff mbox series

Patch

diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d577e0bee18d..51316b0e20bc 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3477,15 +3477,31 @@  static inline struct page *skb_frag_page(const skb_frag_t *frag)
 	return netmem_to_page(frag->netmem);
 }
 
+bool napi_pp_get_page(struct page *page);
+
+static inline void napi_frag_ref(skb_frag_t *frag, bool recycle)
+{
+#ifdef CONFIG_PAGE_POOL
+	struct page *page = skb_frag_page(frag);
+
+	if (recycle && napi_pp_get_page(page))
+		return;
+#endif
+	get_page(page);
+}
+
 /**
  * __skb_frag_ref - take an addition reference on a paged fragment.
  * @frag: the paged fragment
+ * @recycle: skb->pp_recycle param of the parent skb.
  *
- * Takes an additional reference on the paged fragment @frag.
+ * Takes an additional reference on the paged fragment @frag. Obtains the
+ * correct reference count depending on whether skb->pp_recycle is set and
+ * whether the frag is a page pool frag.
  */
-static inline void __skb_frag_ref(skb_frag_t *frag)
+static inline void __skb_frag_ref(skb_frag_t *frag, bool recycle)
 {
-	get_page(skb_frag_page(frag));
+	napi_frag_ref(frag, recycle);
 }
 
 /**
@@ -3497,7 +3513,7 @@  static inline void __skb_frag_ref(skb_frag_t *frag)
  */
 static inline void skb_frag_ref(struct sk_buff *skb, int f)
 {
-	__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
+	__skb_frag_ref(&skb_shinfo(skb)->frags[f], skb->pp_recycle);
 }
 
 int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 1f918e602bc4..6d234faa9d9e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1006,6 +1006,21 @@  int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
 EXPORT_SYMBOL(skb_cow_data_for_xdp);
 
 #if IS_ENABLED(CONFIG_PAGE_POOL)
+bool napi_pp_get_page(struct page *page)
+{
+
+	struct page *head_page;
+
+	head_page = compound_head(page);
+
+	if (!is_pp_page(page))
+		return false;
+
+	page_pool_ref_page(head_page);
+	return true;
+}
+EXPORT_SYMBOL(napi_pp_get_page);
+
 bool napi_pp_put_page(struct page *page, bool napi_safe)
 {
 	bool allow_direct = false;
@@ -1058,37 +1073,6 @@  static bool skb_pp_recycle(struct sk_buff *skb, void *data, bool napi_safe)
 	return napi_pp_put_page(virt_to_page(data), napi_safe);
 }
 
-/**
- * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb
- * @skb:	page pool aware skb
- *
- * Increase the fragment reference count (pp_ref_count) of a skb. This is
- * intended to gain fragment references only for page pool aware skbs,
- * i.e. when skb->pp_recycle is true, and not for fragments in a
- * non-pp-recycling skb. It has a fallback to increase references on normal
- * pages, as page pool aware skbs may also have normal page fragments.
- */
-static int skb_pp_frag_ref(struct sk_buff *skb)
-{
-	struct skb_shared_info *shinfo;
-	struct page *head_page;
-	int i;
-
-	if (!skb->pp_recycle)
-		return -EINVAL;
-
-	shinfo = skb_shinfo(skb);
-
-	for (i = 0; i < shinfo->nr_frags; i++) {
-		head_page = compound_head(skb_frag_page(&shinfo->frags[i]));
-		if (likely(is_pp_page(head_page)))
-			page_pool_ref_page(head_page);
-		else
-			page_ref_inc(head_page);
-	}
-	return 0;
-}
-
 static void skb_kfree_head(void *head, unsigned int end_offset)
 {
 	if (end_offset == SKB_SMALL_HEAD_HEADROOM)
@@ -4199,7 +4183,7 @@  int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
 			to++;
 
 		} else {
-			__skb_frag_ref(fragfrom);
+			__skb_frag_ref(fragfrom, skb->pp_recycle);
 			skb_frag_page_copy(fragto, fragfrom);
 			skb_frag_off_copy(fragto, fragfrom);
 			skb_frag_size_set(fragto, todo);
@@ -4849,7 +4833,7 @@  struct sk_buff *skb_segment(struct sk_buff *head_skb,
 			}
 
 			*nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
-			__skb_frag_ref(nskb_frag);
+			__skb_frag_ref(nskb_frag, nskb->pp_recycle);
 			size = skb_frag_size(nskb_frag);
 
 			if (pos < offset) {
@@ -5980,10 +5964,8 @@  bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
 	/* if the skb is not cloned this does nothing
 	 * since we set nr_frags to 0.
 	 */
-	if (skb_pp_frag_ref(from)) {
-		for (i = 0; i < from_shinfo->nr_frags; i++)
-			__skb_frag_ref(&from_shinfo->frags[i]);
-	}
+	for (i = 0; i < from_shinfo->nr_frags; i++)
+		__skb_frag_ref(&from_shinfo->frags[i], from->pp_recycle);
 
 	to->truesize += delta;
 	to->len += len;