diff mbox series

[net-next,4/5] skbuff: allocate skbuff_heads by bulks instead of one by one

Message ID 20210111182801.12609-4-alobakin@pm.me (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series skbuff: introduce skbuff_heads bulking and reusing | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Clearly marked for net-next
netdev/subject_prefix success Link
netdev/cc_maintainers success CCed 11 of 11 maintainers
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 1 this patch: 1
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 44 lines checked
netdev/build_allmodconfig_warn success Errors and warnings before: 1 this patch: 1
netdev/header_inline success Link
netdev/stable success Stable not CCed

Commit Message

Alexander Lobakin Jan. 11, 2021, 6:29 p.m. UTC
Use the same napi_alloc_cache struct and the same approach as used
for bulk-freeing skbuff_heads to allocate them for new skbs.
The new skb_cache will store up to NAPI_SKB_CACHE_SIZE (currently
64, which equals to NAPI_POLL_WEIGHT to be capable to serve one
polling cycle) and will be refilled by bulks in case of full
depletion or after completing network softirqs.

Signed-off-by: Alexander Lobakin <alobakin@pm.me>
---
 net/core/skbuff.c | 20 +++++++++++++++++++-
 1 file changed, 19 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 0e8c597ff6ce..57a7307689f3 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -367,6 +367,8 @@  EXPORT_SYMBOL(build_skb_around);
 
 struct napi_alloc_cache {
 	struct page_frag_cache	page;
+	u32			skb_count;
+	void			*skb_cache[NAPI_SKB_CACHE_SIZE];
 	u32			flush_skb_count;
 	void			*flush_skb_cache[NAPI_SKB_CACHE_SIZE];
 };
@@ -490,7 +492,15 @@  static struct sk_buff *__napi_decache_skb(struct napi_alloc_cache *nc)
 	if (nc->flush_skb_count)
 		return nc->flush_skb_cache[--nc->flush_skb_count];
 
-	return kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
+	if (unlikely(!nc->skb_count))
+		nc->skb_count = kmem_cache_alloc_bulk(skbuff_head_cache,
+						      GFP_ATOMIC,
+						      NAPI_SKB_CACHE_SIZE,
+						      nc->skb_cache);
+	if (unlikely(!nc->skb_count))
+		return NULL;
+
+	return nc->skb_cache[--nc->skb_count];
 }
 
 /**
@@ -870,6 +880,7 @@  void __consume_stateless_skb(struct sk_buff *skb)
 void __kfree_skb_flush(void)
 {
 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+	u32 num;
 
 	/* flush flush_skb_cache if containing objects */
 	if (nc->flush_skb_count) {
@@ -877,6 +888,13 @@  void __kfree_skb_flush(void)
 				     nc->flush_skb_cache);
 		nc->flush_skb_count = 0;
 	}
+
+	num = NAPI_SKB_CACHE_SIZE - nc->skb_count;
+	if (num)
+		nc->skb_count += kmem_cache_alloc_bulk(skbuff_head_cache,
+						       GFP_ATOMIC, num,
+						       nc->skb_cache +
+						       nc->skb_count);
 }
 
 static inline void _kfree_skb_defer(struct sk_buff *skb)