diff mbox series

[RFC,bpf-next,35/52] net, skbuff: introduce napi_skb_cache_get_bulk()

Message ID 20220628194812.1453059-36-alexandr.lobakin@intel.com (mailing list archive)
State RFC
Delegated to: BPF
Headers show
Series bpf, xdp: introduce and use Generic Hints/metadata | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next-VM_Test-1 fail Logs for Kernel LATEST on ubuntu-latest with gcc
bpf/vmtest-bpf-next-VM_Test-2 fail Logs for Kernel LATEST on ubuntu-latest with llvm-15
bpf/vmtest-bpf-next-VM_Test-3 fail Logs for Kernel LATEST on z15 with gcc
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/apply fail Patch does not apply to bpf-next

Commit Message

Alexander Lobakin June 28, 2022, 7:47 p.m. UTC
Add a function to get an array of skbs from the NAPI percpu cache.
It's supposed to be a drop-in replacement for
kmem_cache_alloc_bulk(skbuff_head_cache, GFP_ATOMIC) and
xdp_alloc_skb_bulk(GFP_ATOMIC). The difference (apart from the
requirement to call it only from the BH) is that it tries to use
as many NAPI cache entries for skbs as possible, and allocate new
ones only if and as less as needed.
It can save significant amounts of CPU cycles if there are GRO
cycles and/or Tx completion cycles (anything that descends to
napi_skb_cache_put()) happening on this CPU. If the function is
not able to provide the requested number of entries due to an
allocation error, it returns as much as it got.

Signed-off-by: Alexander Lobakin <alexandr.lobakin@intel.com>
---
 include/linux/skbuff.h |  1 +
 net/core/skbuff.c      | 43 ++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 44 insertions(+)
diff mbox series

Patch

diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0a95f753c1d9..0c1e5446653b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1240,6 +1240,7 @@  struct sk_buff *build_skb_around(struct sk_buff *skb,
 void skb_attempt_defer_free(struct sk_buff *skb);
 
 struct sk_buff *napi_build_skb(void *data, unsigned int frag_size);
+size_t napi_skb_cache_get_bulk(void **skbs, size_t n);
 
 /**
  * alloc_skb - allocate a network buffer
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 5b23fc7f1157..9b075f52d1fb 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -190,6 +190,49 @@  static struct sk_buff *napi_skb_cache_get(void)
 	return skb;
 }
 
+/**
+ * napi_skb_cache_get_bulk - obtain a number of zeroed skb heads from the cache
+ * @skbs: a pointer to an at least @n-sized array to fill with skb pointers
+ * @n: the number of entries to provide
+ *
+ * Tries to obtain @n &sk_buff entries from the NAPI percpu cache and writes
+ * the pointers into the provided array @skbs. If there are less entries
+ * available, bulk-allocates the diff from the MM layer.
+ * The heads are being zeroed with either memset() or %__GFP_ZERO, so they are
+ * ready for {,__}build_skb_around() and don't have any data buffers attached.
+ * Must be called *only* from the BH context.
+ *
+ * Returns the number of successfully allocated skbs (@n if
+ * kmem_cache_alloc_bulk() didn't fail).
+ */
+size_t napi_skb_cache_get_bulk(void **skbs, size_t n)
+{
+	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+	size_t total = n;
+
+	if (nc->skb_count < n)
+		n -= kmem_cache_alloc_bulk(skbuff_head_cache,
+					   GFP_ATOMIC | __GFP_ZERO,
+					   n - nc->skb_count,
+					   skbs + nc->skb_count);
+	if (unlikely(nc->skb_count < n)) {
+		total -= n - nc->skb_count;
+		n = nc->skb_count;
+	}
+
+	for (size_t i = 0; i < n; i++) {
+		skbs[i] = nc->skb_cache[nc->skb_count - n + i];
+
+		kasan_unpoison_object_data(skbuff_head_cache, skbs[i]);
+		memset(skbs[i], 0, offsetof(struct sk_buff, tail));
+	}
+
+	nc->skb_count -= n;
+
+	return total;
+}
+EXPORT_SYMBOL_GPL(napi_skb_cache_get_bulk);
+
 /* Caller must provide SKB that is memset cleared */
 static void __build_skb_around(struct sk_buff *skb, void *data,
 			       unsigned int frag_size)