diff mbox series

net/core: use xx_zalloc instead xx_alloc and memset

Message ID 1605687308-57318-1-git-send-email-tiantao6@hisilicon.com (mailing list archive)
State Rejected
Delegated to: Netdev Maintainers
Headers show
Series net/core: use xx_zalloc instead xx_alloc and memset | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Guessed tree name to be net-next
netdev/subject_prefix warning Target tree name not specified in the subject
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 1 this patch: 1
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 27 lines checked
netdev/build_allmodconfig_warn success Errors and warnings before: 1 this patch: 1
netdev/header_inline success Link
netdev/stable success Stable not CCed

Commit Message

tiantao (H) Nov. 18, 2020, 8:15 a.m. UTC
use kmem_cache_zalloc instead kmem_cache_alloc and memset.

Signed-off-by: Tian Tao <tiantao6@hisilicon.com>
---
 net/core/skbuff.c | 10 +++-------
 1 file changed, 3 insertions(+), 7 deletions(-)

Comments

Joe Perches Nov. 18, 2020, 8:31 a.m. UTC | #1
On Wed, 2020-11-18 at 16:15 +0800, Tian Tao wrote:
> use kmem_cache_zalloc instead kmem_cache_alloc and memset.
[]
> diff --git a/net/core/skbuff.c b/net/core/skbuff.c
[]
> @@ -313,12 +313,10 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
>  {
>  	struct sk_buff *skb;
>  
> -	skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
> +	skb = kmem_cache_zalloc(skbuff_head_cache, GFP_ATOMIC);
>  	if (unlikely(!skb))
>  		return NULL;
>  
> -	memset(skb, 0, offsetof(struct sk_buff, tail));
> -
>  	return __build_skb_around(skb, data, frag_size);
>  }
>  
> 
> @@ -6170,12 +6168,10 @@ static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
>   */
>  struct skb_ext *__skb_ext_alloc(gfp_t flags)
>  {
> -	struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags);
> +	struct skb_ext *new = kmem_cache_zalloc(skbuff_ext_cache, flags);
>  
> -	if (new) {
> -		memset(new->offset, 0, sizeof(new->offset));
> +	if (new)
>  		refcount_set(&new->refcnt, 1);
> -	}
>  
>  	return new;
>  }

I think it'd be nicer to use the same form for both of these functions.
This could be:

struct skb_ext *__skb_ext_alloc(gfp_t flags)
{
	struct skb_ext *new;

	new = kmem_cache_zalloc(skbbuff_ext_cache, flags);
	if (unlikely(!new))
		return NULL;

	refcount_set(&new->refcnt, 1);

	return new;
}
Paolo Abeni Nov. 18, 2020, 10:47 a.m. UTC | #2
On Wed, 2020-11-18 at 16:15 +0800, Tian Tao wrote:
> use kmem_cache_zalloc instead kmem_cache_alloc and memset.
> 
> Signed-off-by: Tian Tao <tiantao6@hisilicon.com>
> ---
>  net/core/skbuff.c | 10 +++-------
>  1 file changed, 3 insertions(+), 7 deletions(-)
> 
> diff --git a/net/core/skbuff.c b/net/core/skbuff.c
> index c9a5a3c..3449c1c 100644
> --- a/net/core/skbuff.c
> +++ b/net/core/skbuff.c
> @@ -313,12 +313,10 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
>  {
>  	struct sk_buff *skb;
>  
> -	skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
> +	skb = kmem_cache_zalloc(skbuff_head_cache, GFP_ATOMIC);

This will zeroed a slighly larger amount of data compared to the
existing code: offsetof(struct sk_buff, tail) == 182, sizeof(struct
sk_buff) == 224.

>  	if (unlikely(!skb))
>  		return NULL;
>  
> -	memset(skb, 0, offsetof(struct sk_buff, tail));

Additionally this leverages constant argument optimizations.

Possibly overall not noticeable, but this code path is quite critical
performance wise.

I would avoid the above.
> -
>  	return __build_skb_around(skb, data, frag_size);
>  }
>  
> @@ -6170,12 +6168,10 @@ static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
>   */
>  struct skb_ext *__skb_ext_alloc(gfp_t flags)
>  {
> -	struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags);
> +	struct skb_ext *new = kmem_cache_zalloc(skbuff_ext_cache, flags);
>  
> -	if (new) {
> -		memset(new->offset, 0, sizeof(new->offset));

Similar to the above, but additionally here the number of zeroed bytes
changes a lot and a few additional cachelines will be touched. The
performance impact is likely relevant.

Overall I think we should not do this.

Thanks,

Paolo
diff mbox series

Patch

diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c9a5a3c..3449c1c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -313,12 +313,10 @@  struct sk_buff *__build_skb(void *data, unsigned int frag_size)
 {
 	struct sk_buff *skb;
 
-	skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+	skb = kmem_cache_zalloc(skbuff_head_cache, GFP_ATOMIC);
 	if (unlikely(!skb))
 		return NULL;
 
-	memset(skb, 0, offsetof(struct sk_buff, tail));
-
 	return __build_skb_around(skb, data, frag_size);
 }
 
@@ -6170,12 +6168,10 @@  static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
  */
 struct skb_ext *__skb_ext_alloc(gfp_t flags)
 {
-	struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags);
+	struct skb_ext *new = kmem_cache_zalloc(skbuff_ext_cache, flags);
 
-	if (new) {
-		memset(new->offset, 0, sizeof(new->offset));
+	if (new)
 		refcount_set(&new->refcnt, 1);
-	}
 
 	return new;
 }