diff mbox series

[v4,bpf-next,06/15] bpf: Optimize element count in non-preallocated hash map.

Message ID 20220826024430.84565-7-alexei.starovoitov@gmail.com (mailing list archive)
State New
Headers show
Series bpf: BPF specific memory allocator. | expand

Commit Message

Alexei Starovoitov Aug. 26, 2022, 2:44 a.m. UTC
From: Alexei Starovoitov <ast@kernel.org>

The atomic_inc/dec might cause extreme cache line bouncing when multiple cpus
access the same bpf map. Based on specified max_entries for the hash map
calculate when percpu_counter becomes faster than atomic_t and use it for such
maps. For example samples/bpf/map_perf_test is using hash map with max_entries
1000. On a system with 16 cpus the 'map_perf_test 4' shows 14k events per
second using atomic_t. On a system with 15 cpus it shows 100k events per second
using percpu. map_perf_test is an extreme case where all cpus colliding on
atomic_t which causes extreme cache bouncing. Note that the slow path of
percpu_counter is 5k events per secound vs 14k for atomic, so the heuristic is
necessary. See comment in the code why the heuristic is based on
num_online_cpus().

Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
 kernel/bpf/hashtab.c | 70 +++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 62 insertions(+), 8 deletions(-)

Comments

Daniel Borkmann Aug. 29, 2022, 9:47 p.m. UTC | #1
On 8/26/22 4:44 AM, Alexei Starovoitov wrote:
> From: Alexei Starovoitov <ast@kernel.org>
> 
> The atomic_inc/dec might cause extreme cache line bouncing when multiple cpus
> access the same bpf map. Based on specified max_entries for the hash map
> calculate when percpu_counter becomes faster than atomic_t and use it for such
> maps. For example samples/bpf/map_perf_test is using hash map with max_entries
> 1000. On a system with 16 cpus the 'map_perf_test 4' shows 14k events per
> second using atomic_t. On a system with 15 cpus it shows 100k events per second
> using percpu. map_perf_test is an extreme case where all cpus colliding on
> atomic_t which causes extreme cache bouncing. Note that the slow path of
> percpu_counter is 5k events per secound vs 14k for atomic, so the heuristic is
> necessary. See comment in the code why the heuristic is based on
> num_online_cpus().

nit: Could we include this logic inside percpu_counter logic, or as an extended
version of it? Except the heuristic of attr->max_entries / 2 > num_online_cpus() *
PERCPU_COUNTER_BATCH which toggles between plain atomic vs percpu_counter, the
rest feel generic enough that it could also be applicable outside bpf.

> Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
> ---
>   kernel/bpf/hashtab.c | 70 +++++++++++++++++++++++++++++++++++++++-----
>   1 file changed, 62 insertions(+), 8 deletions(-)
> 
> diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
> index bd23c8830d49..8f68c6e13339 100644
> --- a/kernel/bpf/hashtab.c
> +++ b/kernel/bpf/hashtab.c
> @@ -101,7 +101,12 @@ struct bpf_htab {
>   		struct bpf_lru lru;
>   	};
>   	struct htab_elem *__percpu *extra_elems;
> -	atomic_t count;	/* number of elements in this hashtable */
> +	/* number of elements in non-preallocated hashtable are kept
> +	 * in either pcount or count
> +	 */
> +	struct percpu_counter pcount;
> +	atomic_t count;
> +	bool use_percpu_counter;
>   	u32 n_buckets;	/* number of hash buckets */
>   	u32 elem_size;	/* size of each element in bytes */
>   	u32 hashrnd;
> @@ -552,6 +557,29 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
>   
>   	htab_init_buckets(htab);
>   
> +/* compute_batch_value() computes batch value as num_online_cpus() * 2
> + * and __percpu_counter_compare() needs
> + * htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus()
> + * for percpu_counter to be faster than atomic_t. In practice the average bpf
> + * hash map size is 10k, which means that a system with 64 cpus will fill
> + * hashmap to 20% of 10k before percpu_counter becomes ineffective. Therefore
> + * define our own batch count as 32 then 10k hash map can be filled up to 80%:
> + * 10k - 8k > 32 _batch_ * 64 _cpus_
> + * and __percpu_counter_compare() will still be fast. At that point hash map
> + * collisions will dominate its performance anyway. Assume that hash map filled
> + * to 50+% isn't going to be O(1) and use the following formula to choose
> + * between percpu_counter and atomic_t.
> + */
> +#define PERCPU_COUNTER_BATCH 32
> +	if (attr->max_entries / 2 > num_online_cpus() * PERCPU_COUNTER_BATCH)
> +		htab->use_percpu_counter = true;
> +
> +	if (htab->use_percpu_counter) {
> +		err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL);
> +		if (err)
> +			goto free_map_locked;
> +	}
> +
>   	if (prealloc) {
>   		err = prealloc_init(htab);
>   		if (err)
> @@ -878,6 +906,31 @@ static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
>   	}
>   }
>   
> +static bool is_map_full(struct bpf_htab *htab)
> +{
> +	if (htab->use_percpu_counter)
> +		return __percpu_counter_compare(&htab->pcount, htab->map.max_entries,
> +						PERCPU_COUNTER_BATCH) >= 0;
> +	return atomic_read(&htab->count) >= htab->map.max_entries;
> +}
> +
> +static void inc_elem_count(struct bpf_htab *htab)
> +{
> +	if (htab->use_percpu_counter)
> +		percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH);
> +	else
> +		atomic_inc(&htab->count);
> +}
> +
> +static void dec_elem_count(struct bpf_htab *htab)
> +{
> +	if (htab->use_percpu_counter)
> +		percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH);
> +	else
> +		atomic_dec(&htab->count);
> +}
> +
> +
Alexei Starovoitov Aug. 29, 2022, 9:57 p.m. UTC | #2
On Mon, Aug 29, 2022 at 2:47 PM Daniel Borkmann <daniel@iogearbox.net> wrote:
>
> On 8/26/22 4:44 AM, Alexei Starovoitov wrote:
> > From: Alexei Starovoitov <ast@kernel.org>
> >
> > The atomic_inc/dec might cause extreme cache line bouncing when multiple cpus
> > access the same bpf map. Based on specified max_entries for the hash map
> > calculate when percpu_counter becomes faster than atomic_t and use it for such
> > maps. For example samples/bpf/map_perf_test is using hash map with max_entries
> > 1000. On a system with 16 cpus the 'map_perf_test 4' shows 14k events per
> > second using atomic_t. On a system with 15 cpus it shows 100k events per second
> > using percpu. map_perf_test is an extreme case where all cpus colliding on
> > atomic_t which causes extreme cache bouncing. Note that the slow path of
> > percpu_counter is 5k events per secound vs 14k for atomic, so the heuristic is
> > necessary. See comment in the code why the heuristic is based on
> > num_online_cpus().
>
> nit: Could we include this logic inside percpu_counter logic, or as an extended
> version of it? Except the heuristic of attr->max_entries / 2 > num_online_cpus() *
> PERCPU_COUNTER_BATCH which toggles between plain atomic vs percpu_counter, the
> rest feel generic enough that it could also be applicable outside bpf.

The heuristic is probably not generic enough and this optimization
is a stop gap. It helps many cases, but doesn't solve all.
It's ok for this specific large hash map to count max_entries,
but we shouldn't claim generality to suggest this heuristic
to anyone else. I was thinking to do a follow up and create
a true generic combined percpu and atomic counter, similar
to percpu_ref that can switch from percpu to atomic.
But it's more of a wish-list task atm.
diff mbox series

Patch

diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index bd23c8830d49..8f68c6e13339 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -101,7 +101,12 @@  struct bpf_htab {
 		struct bpf_lru lru;
 	};
 	struct htab_elem *__percpu *extra_elems;
-	atomic_t count;	/* number of elements in this hashtable */
+	/* number of elements in non-preallocated hashtable are kept
+	 * in either pcount or count
+	 */
+	struct percpu_counter pcount;
+	atomic_t count;
+	bool use_percpu_counter;
 	u32 n_buckets;	/* number of hash buckets */
 	u32 elem_size;	/* size of each element in bytes */
 	u32 hashrnd;
@@ -552,6 +557,29 @@  static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
 
 	htab_init_buckets(htab);
 
+/* compute_batch_value() computes batch value as num_online_cpus() * 2
+ * and __percpu_counter_compare() needs
+ * htab->max_entries - cur_number_of_elems to be more than batch * num_online_cpus()
+ * for percpu_counter to be faster than atomic_t. In practice the average bpf
+ * hash map size is 10k, which means that a system with 64 cpus will fill
+ * hashmap to 20% of 10k before percpu_counter becomes ineffective. Therefore
+ * define our own batch count as 32 then 10k hash map can be filled up to 80%:
+ * 10k - 8k > 32 _batch_ * 64 _cpus_
+ * and __percpu_counter_compare() will still be fast. At that point hash map
+ * collisions will dominate its performance anyway. Assume that hash map filled
+ * to 50+% isn't going to be O(1) and use the following formula to choose
+ * between percpu_counter and atomic_t.
+ */
+#define PERCPU_COUNTER_BATCH 32
+	if (attr->max_entries / 2 > num_online_cpus() * PERCPU_COUNTER_BATCH)
+		htab->use_percpu_counter = true;
+
+	if (htab->use_percpu_counter) {
+		err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL);
+		if (err)
+			goto free_map_locked;
+	}
+
 	if (prealloc) {
 		err = prealloc_init(htab);
 		if (err)
@@ -878,6 +906,31 @@  static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
 	}
 }
 
+static bool is_map_full(struct bpf_htab *htab)
+{
+	if (htab->use_percpu_counter)
+		return __percpu_counter_compare(&htab->pcount, htab->map.max_entries,
+						PERCPU_COUNTER_BATCH) >= 0;
+	return atomic_read(&htab->count) >= htab->map.max_entries;
+}
+
+static void inc_elem_count(struct bpf_htab *htab)
+{
+	if (htab->use_percpu_counter)
+		percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH);
+	else
+		atomic_inc(&htab->count);
+}
+
+static void dec_elem_count(struct bpf_htab *htab)
+{
+	if (htab->use_percpu_counter)
+		percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH);
+	else
+		atomic_dec(&htab->count);
+}
+
+
 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
 {
 	htab_put_fd_value(htab, l);
@@ -886,7 +939,7 @@  static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
 		check_and_free_fields(htab, l);
 		__pcpu_freelist_push(&htab->freelist, &l->fnode);
 	} else {
-		atomic_dec(&htab->count);
+		dec_elem_count(htab);
 		l->htab = htab;
 		call_rcu(&l->rcu, htab_elem_free_rcu);
 	}
@@ -970,16 +1023,15 @@  static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
 			l_new = container_of(l, struct htab_elem, fnode);
 		}
 	} else {
-		if (atomic_inc_return(&htab->count) > htab->map.max_entries)
-			if (!old_elem) {
+		if (is_map_full(htab))
+			if (!old_elem)
 				/* when map is full and update() is replacing
 				 * old element, it's ok to allocate, since
 				 * old element will be freed immediately.
 				 * Otherwise return an error
 				 */
-				l_new = ERR_PTR(-E2BIG);
-				goto dec_count;
-			}
+				return ERR_PTR(-E2BIG);
+		inc_elem_count(htab);
 		l_new = bpf_mem_cache_alloc(&htab->ma);
 		if (!l_new) {
 			l_new = ERR_PTR(-ENOMEM);
@@ -1021,7 +1073,7 @@  static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
 	l_new->hash = hash;
 	return l_new;
 dec_count:
-	atomic_dec(&htab->count);
+	dec_elem_count(htab);
 	return l_new;
 }
 
@@ -1495,6 +1547,8 @@  static void htab_map_free(struct bpf_map *map)
 	free_percpu(htab->extra_elems);
 	bpf_map_area_free(htab->buckets);
 	bpf_mem_alloc_destroy(&htab->ma);
+	if (htab->use_percpu_counter)
+		percpu_counter_destroy(&htab->pcount);
 	for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
 		free_percpu(htab->map_locked[i]);
 	lockdep_unregister_key(&htab->lockdep_key);