diff mbox series

[net-next,1/5] skbuff: rename fields of struct napi_alloc_cache to be more intuitive

Message ID 20210111182801.12609-1-alobakin@pm.me (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series skbuff: introduce skbuff_heads bulking and reusing | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Clearly marked for net-next
netdev/subject_prefix success Link
netdev/cc_maintainers success CCed 11 of 11 maintainers
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 1 this patch: 1
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 51 lines checked
netdev/build_allmodconfig_warn success Errors and warnings before: 1 this patch: 1
netdev/header_inline success Link
netdev/stable success Stable not CCed

Commit Message

Alexander Lobakin Jan. 11, 2021, 6:28 p.m. UTC
skb_cache and skb_count fields are used to store skbuff_heads queued
for freeing to flush them by bulks, and aren't related to allocation
path. Give them more obvious names to improve code understanding and
allow to expand this struct with more allocation-related elements.

Misc: indent struct napi_alloc_cache declaration for better reading.

Signed-off-by: Alexander Lobakin <alobakin@pm.me>
---
 net/core/skbuff.c | 26 +++++++++++++-------------
 1 file changed, 13 insertions(+), 13 deletions(-)

Comments

Jonathan Lemon Jan. 11, 2021, 6:49 p.m. UTC | #1
On Mon, Jan 11, 2021 at 06:28:21PM +0000, Alexander Lobakin wrote:
> skb_cache and skb_count fields are used to store skbuff_heads queued
> for freeing to flush them by bulks, and aren't related to allocation
> path. Give them more obvious names to improve code understanding and
> allow to expand this struct with more allocation-related elements.

I don't think prefixing these with flush_ is the correct approach;
flush is just an operation on the structure, not a property of the
structure itself.  It especially becomes confusing in the later 
patches when the cache is used on the allocation path.
Alexander Lobakin Jan. 11, 2021, 9:03 p.m. UTC | #2
From: Jonathan Lemon <jonathan.lemon@gmail.com>
Date: Mon, 11 Jan 2021 10:49:45 -0800

> On Mon, Jan 11, 2021 at 06:28:21PM +0000, Alexander Lobakin wrote:
>> skb_cache and skb_count fields are used to store skbuff_heads queued
>> for freeing to flush them by bulks, and aren't related to allocation
>> path. Give them more obvious names to improve code understanding and
>> allow to expand this struct with more allocation-related elements.
>
> I don't think prefixing these with flush_ is the correct approach;
> flush is just an operation on the structure, not a property of the
> structure itself.  It especially becomes confusing in the later
> patches when the cache is used on the allocation path.

Agree, but didn't come up with anything more fitting. Any suggestions
maybe?

> --
> Jonathan

Thanks,
Al
diff mbox series

Patch

diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7626a33cce59..17ae5e90f103 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -366,9 +366,9 @@  EXPORT_SYMBOL(build_skb_around);
 #define NAPI_SKB_CACHE_SIZE	64
 
 struct napi_alloc_cache {
-	struct page_frag_cache page;
-	unsigned int skb_count;
-	void *skb_cache[NAPI_SKB_CACHE_SIZE];
+	struct page_frag_cache	page;
+	u32			flush_skb_count;
+	void			*flush_skb_cache[NAPI_SKB_CACHE_SIZE];
 };
 
 static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
@@ -860,11 +860,11 @@  void __kfree_skb_flush(void)
 {
 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 
-	/* flush skb_cache if containing objects */
-	if (nc->skb_count) {
-		kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
-				     nc->skb_cache);
-		nc->skb_count = 0;
+	/* flush flush_skb_cache if containing objects */
+	if (nc->flush_skb_count) {
+		kmem_cache_free_bulk(skbuff_head_cache, nc->flush_skb_count,
+				     nc->flush_skb_cache);
+		nc->flush_skb_count = 0;
 	}
 }
 
@@ -876,18 +876,18 @@  static inline void _kfree_skb_defer(struct sk_buff *skb)
 	skb_release_all(skb);
 
 	/* record skb to CPU local list */
-	nc->skb_cache[nc->skb_count++] = skb;
+	nc->flush_skb_cache[nc->flush_skb_count++] = skb;
 
 #ifdef CONFIG_SLUB
 	/* SLUB writes into objects when freeing */
 	prefetchw(skb);
 #endif
 
-	/* flush skb_cache if it is filled */
-	if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
+	/* flush flush_skb_cache if it is filled */
+	if (unlikely(nc->flush_skb_count == NAPI_SKB_CACHE_SIZE)) {
 		kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE,
-				     nc->skb_cache);
-		nc->skb_count = 0;
+				     nc->flush_skb_cache);
+		nc->flush_skb_count = 0;
 	}
 }
 void __kfree_skb_defer(struct sk_buff *skb)