diff mbox series

[bpf-next,07/12] bpf: Add a hint to allocated objects.

Message ID 20230621023238.87079-8-alexei.starovoitov@gmail.com (mailing list archive)
State Superseded
Headers show
Series bpf: Introduce bpf_mem_cache_free_rcu(). | expand

Commit Message

Alexei Starovoitov June 21, 2023, 2:32 a.m. UTC
From: Alexei Starovoitov <ast@kernel.org>

To address OOM issue when one cpu is allocating and another cpu is freeing add
a target bpf_mem_cache hint to allocated objects and when local cpu free_llist
overflows free to that bpf_mem_cache. The hint addresses the OOM while
maintaing the same performance for common case when alloc/free are done on the
same cpu.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
 kernel/bpf/memalloc.c | 46 ++++++++++++++++++++++++++-----------------
 1 file changed, 28 insertions(+), 18 deletions(-)

Comments

Hou Tao June 24, 2023, 3:28 a.m. UTC | #1
Hi,

On 6/21/2023 10:32 AM, Alexei Starovoitov wrote:
> From: Alexei Starovoitov <ast@kernel.org>
>
> To address OOM issue when one cpu is allocating and another cpu is freeing add
> a target bpf_mem_cache hint to allocated objects and when local cpu free_llist
> overflows free to that bpf_mem_cache. The hint addresses the OOM while
> maintaing the same performance for common case when alloc/free are done on the
> same cpu.
>
> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
> ---
>  kernel/bpf/memalloc.c | 46 ++++++++++++++++++++++++++-----------------
>  1 file changed, 28 insertions(+), 18 deletions(-)
>
> diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
> index 4fd79bd51f5a..8b7645bffd1a 100644
> --- a/kernel/bpf/memalloc.c
> +++ b/kernel/bpf/memalloc.c
> @@ -98,6 +98,7 @@ struct bpf_mem_cache {
>  	int free_cnt;
>  	int low_watermark, high_watermark, batch;
>  	int percpu_size;
> +	struct bpf_mem_cache *tgt;
>  
>  	/* list of objects to be freed after RCU tasks trace GP */
>  	struct llist_head free_by_rcu_ttrace;
> @@ -189,18 +190,11 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
>  
>  	for (i = 0; i < cnt; i++) {
>  		/*
> -		 * free_by_rcu_ttrace is only manipulated by irq work refill_work().
> -		 * IRQ works on the same CPU are called sequentially, so it is
> -		 * safe to use __llist_del_first() here. If alloc_bulk() is
> -		 * invoked by the initial prefill, there will be no running
> -		 * refill_work(), so __llist_del_first() is fine as well.
> -		 *
> -		 * In most cases, objects on free_by_rcu_ttrace are from the same CPU.
> -		 * If some objects come from other CPUs, it doesn't incur any
> -		 * harm because NUMA_NO_NODE means the preference for current
> -		 * numa node and it is not a guarantee.
> +		 * For every 'c' llist_del_first(&c->free_by_rcu_ttrace); is
> +		 * done only by one CPU == current CPU. Other CPUs might
> +		 * llist_add() and llist_del_all() in parallel.
>  		 */
> -		obj = __llist_del_first(&c->free_by_rcu_ttrace);
> +		obj = llist_del_first(&c->free_by_rcu_ttrace);
According to the comments in llist.h, when there are concurrent
llist_del_first() and llist_del_all() operations, locking is needed.
>  		if (!obj)
>  			break;
>  		add_obj_to_free_list(c, obj);
> @@ -274,7 +268,7 @@ static void enque_to_free(struct bpf_mem_cache *c, void *obj)
>  	/* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work.
>  	 * Nothing races to add to free_by_rcu_ttrace list.
>  	 */
> -	if (__llist_add(llnode, &c->free_by_rcu_ttrace))
> +	if (llist_add(llnode, &c->free_by_rcu_ttrace))
>  		c->free_by_rcu_ttrace_tail = llnode;
>  }
>  
> @@ -286,7 +280,7 @@ static void do_call_rcu_ttrace(struct bpf_mem_cache *c)
>  		return;
>  
>  	WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace));
> -	llnode = __llist_del_all(&c->free_by_rcu_ttrace);
> +	llnode = llist_del_all(&c->free_by_rcu_ttrace);
>  	if (llnode)
>  		/* There is no concurrent __llist_add(waiting_for_gp_ttrace) access.
>  		 * It doesn't race with llist_del_all either.
> @@ -299,16 +293,22 @@ static void do_call_rcu_ttrace(struct bpf_mem_cache *c)
>  	 * If RCU Tasks Trace grace period implies RCU grace period, free
>  	 * these elements directly, else use call_rcu() to wait for normal
>  	 * progs to finish and finally do free_one() on each element.
> +	 *
> +	 * call_rcu_tasks_trace() enqueues to a global queue, so it's ok
> +	 * that current cpu bpf_mem_cache != target bpf_mem_cache.
>  	 */
>  	call_rcu_tasks_trace(&c->rcu_ttrace, __free_rcu_tasks_trace);
>  }
>  
>  static void free_bulk(struct bpf_mem_cache *c)
>  {
> +	struct bpf_mem_cache *tgt = c->tgt;
>  	struct llist_node *llnode, *t;
>  	unsigned long flags;
>  	int cnt;
>  
> +	WARN_ON_ONCE(tgt->unit_size != c->unit_size);
> +
>  	do {
>  		if (IS_ENABLED(CONFIG_PREEMPT_RT))
>  			local_irq_save(flags);
> @@ -322,13 +322,13 @@ static void free_bulk(struct bpf_mem_cache *c)
>  		if (IS_ENABLED(CONFIG_PREEMPT_RT))
>  			local_irq_restore(flags);
>  		if (llnode)
> -			enque_to_free(c, llnode);
> +			enque_to_free(tgt, llnode);
>  	} while (cnt > (c->high_watermark + c->low_watermark) / 2);
>  
>  	/* and drain free_llist_extra */
>  	llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra))
> -		enque_to_free(c, llnode);
> -	do_call_rcu_ttrace(c);
> +		enque_to_free(tgt, llnode);
> +	do_call_rcu_ttrace(tgt);
>  }
>  
>  static void bpf_mem_refill(struct irq_work *work)
> @@ -427,6 +427,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
>  			c->unit_size = unit_size;
>  			c->objcg = objcg;
>  			c->percpu_size = percpu_size;
> +			c->tgt = c;
>  			prefill_mem_cache(c, cpu);
>  		}
>  		ma->cache = pc;
> @@ -449,6 +450,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
>  			c = &cc->cache[i];
>  			c->unit_size = sizes[i];
>  			c->objcg = objcg;
> +			c->tgt = c;
>  			prefill_mem_cache(c, cpu);
>  		}
>  	}
> @@ -467,7 +469,7 @@ static void drain_mem_cache(struct bpf_mem_cache *c)
>  	 * Except for waiting_for_gp_ttrace list, there are no concurrent operations
>  	 * on these lists, so it is safe to use __llist_del_all().
>  	 */
> -	free_all(__llist_del_all(&c->free_by_rcu_ttrace), percpu);
> +	free_all(llist_del_all(&c->free_by_rcu_ttrace), percpu);
>  	free_all(llist_del_all(&c->waiting_for_gp_ttrace), percpu);
>  	free_all(__llist_del_all(&c->free_llist), percpu);
>  	free_all(__llist_del_all(&c->free_llist_extra), percpu);
> @@ -599,8 +601,10 @@ static void notrace *unit_alloc(struct bpf_mem_cache *c)
>  	local_irq_save(flags);
>  	if (local_inc_return(&c->active) == 1) {
>  		llnode = __llist_del_first(&c->free_llist);
> -		if (llnode)
> +		if (llnode) {
>  			cnt = --c->free_cnt;
> +			*(struct bpf_mem_cache **)llnode = c;
> +		}
>  	}
>  	local_dec(&c->active);
>  	local_irq_restore(flags);
> @@ -624,6 +628,12 @@ static void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
>  
>  	BUILD_BUG_ON(LLIST_NODE_SZ > 8);
>  
> +	/*
> +	 * Remember bpf_mem_cache that allocated this object.
> +	 * The hint is not accurate.
> +	 */
> +	c->tgt = *(struct bpf_mem_cache **)llnode;
> +
>  	local_irq_save(flags);
>  	if (local_inc_return(&c->active) == 1) {
>  		__llist_add(llnode, &c->free_llist);
Alexei Starovoitov June 24, 2023, 3:42 a.m. UTC | #2
On Fri, Jun 23, 2023 at 8:28 PM Hou Tao <houtao@huaweicloud.com> wrote:
>
> >                */
> > -             obj = __llist_del_first(&c->free_by_rcu_ttrace);
> > +             obj = llist_del_first(&c->free_by_rcu_ttrace);
> According to the comments in llist.h, when there are concurrent
> llist_del_first() and llist_del_all() operations, locking is needed.

Good question.
1. When only one cpu is doing llist_del_first() locking is not needed.
 This is the case here. Only this cpu is doing llist_del_first() from this 'c'.
2. The comments doesn't mention it, but llist_del_first() is ok on
multiple cpus if ABA problem is addressed by other means.

PS
please trim your replies.
Hou Tao June 24, 2023, 3:54 a.m. UTC | #3
Hi,

On 6/24/2023 11:42 AM, Alexei Starovoitov wrote:
> On Fri, Jun 23, 2023 at 8:28 PM Hou Tao <houtao@huaweicloud.com> wrote:
>>>                */
>>> -             obj = __llist_del_first(&c->free_by_rcu_ttrace);
>>> +             obj = llist_del_first(&c->free_by_rcu_ttrace);
>> According to the comments in llist.h, when there are concurrent
>> llist_del_first() and llist_del_all() operations, locking is needed.
> Good question.
> 1. When only one cpu is doing llist_del_first() locking is not needed.
>  This is the case here. Only this cpu is doing llist_del_first() from this 'c'.
> 2. The comments doesn't mention it, but llist_del_first() is ok on
> multiple cpus if ABA problem is addressed by other means.
Haven't checked the implementation details of lockless list. Will do
that later. "by other means" do you mean RCU ? because the reuse will be
possible only after one RCU GP.
> PS
> please trim your replies.
Sorry for the inconvenience. Will do next time.
Alexei Starovoitov June 24, 2023, 3:57 a.m. UTC | #4
On Fri, Jun 23, 2023 at 8:54 PM Hou Tao <houtao@huaweicloud.com> wrote:
>
> Hi,
>
> On 6/24/2023 11:42 AM, Alexei Starovoitov wrote:
> > On Fri, Jun 23, 2023 at 8:28 PM Hou Tao <houtao@huaweicloud.com> wrote:
> >>>                */
> >>> -             obj = __llist_del_first(&c->free_by_rcu_ttrace);
> >>> +             obj = llist_del_first(&c->free_by_rcu_ttrace);
> >> According to the comments in llist.h, when there are concurrent
> >> llist_del_first() and llist_del_all() operations, locking is needed.
> > Good question.
> > 1. When only one cpu is doing llist_del_first() locking is not needed.
> >  This is the case here. Only this cpu is doing llist_del_first() from this 'c'.
> > 2. The comments doesn't mention it, but llist_del_first() is ok on
> > multiple cpus if ABA problem is addressed by other means.
> Haven't checked the implementation details of lockless list. Will do
> that later. "by other means" do you mean RCU ? because the reuse will be
> possible only after one RCU GP.

Right. Like RCU, but that's if we go that route in future patches.
We're at 1 == only one cpu is doing llist_del_first.

> > PS
> > please trim your replies.
> Sorry for the inconvenience. Will do next time.
>
>
diff mbox series

Patch

diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index 4fd79bd51f5a..8b7645bffd1a 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -98,6 +98,7 @@  struct bpf_mem_cache {
 	int free_cnt;
 	int low_watermark, high_watermark, batch;
 	int percpu_size;
+	struct bpf_mem_cache *tgt;
 
 	/* list of objects to be freed after RCU tasks trace GP */
 	struct llist_head free_by_rcu_ttrace;
@@ -189,18 +190,11 @@  static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
 
 	for (i = 0; i < cnt; i++) {
 		/*
-		 * free_by_rcu_ttrace is only manipulated by irq work refill_work().
-		 * IRQ works on the same CPU are called sequentially, so it is
-		 * safe to use __llist_del_first() here. If alloc_bulk() is
-		 * invoked by the initial prefill, there will be no running
-		 * refill_work(), so __llist_del_first() is fine as well.
-		 *
-		 * In most cases, objects on free_by_rcu_ttrace are from the same CPU.
-		 * If some objects come from other CPUs, it doesn't incur any
-		 * harm because NUMA_NO_NODE means the preference for current
-		 * numa node and it is not a guarantee.
+		 * For every 'c' llist_del_first(&c->free_by_rcu_ttrace); is
+		 * done only by one CPU == current CPU. Other CPUs might
+		 * llist_add() and llist_del_all() in parallel.
 		 */
-		obj = __llist_del_first(&c->free_by_rcu_ttrace);
+		obj = llist_del_first(&c->free_by_rcu_ttrace);
 		if (!obj)
 			break;
 		add_obj_to_free_list(c, obj);
@@ -274,7 +268,7 @@  static void enque_to_free(struct bpf_mem_cache *c, void *obj)
 	/* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work.
 	 * Nothing races to add to free_by_rcu_ttrace list.
 	 */
-	if (__llist_add(llnode, &c->free_by_rcu_ttrace))
+	if (llist_add(llnode, &c->free_by_rcu_ttrace))
 		c->free_by_rcu_ttrace_tail = llnode;
 }
 
@@ -286,7 +280,7 @@  static void do_call_rcu_ttrace(struct bpf_mem_cache *c)
 		return;
 
 	WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace));
-	llnode = __llist_del_all(&c->free_by_rcu_ttrace);
+	llnode = llist_del_all(&c->free_by_rcu_ttrace);
 	if (llnode)
 		/* There is no concurrent __llist_add(waiting_for_gp_ttrace) access.
 		 * It doesn't race with llist_del_all either.
@@ -299,16 +293,22 @@  static void do_call_rcu_ttrace(struct bpf_mem_cache *c)
 	 * If RCU Tasks Trace grace period implies RCU grace period, free
 	 * these elements directly, else use call_rcu() to wait for normal
 	 * progs to finish and finally do free_one() on each element.
+	 *
+	 * call_rcu_tasks_trace() enqueues to a global queue, so it's ok
+	 * that current cpu bpf_mem_cache != target bpf_mem_cache.
 	 */
 	call_rcu_tasks_trace(&c->rcu_ttrace, __free_rcu_tasks_trace);
 }
 
 static void free_bulk(struct bpf_mem_cache *c)
 {
+	struct bpf_mem_cache *tgt = c->tgt;
 	struct llist_node *llnode, *t;
 	unsigned long flags;
 	int cnt;
 
+	WARN_ON_ONCE(tgt->unit_size != c->unit_size);
+
 	do {
 		if (IS_ENABLED(CONFIG_PREEMPT_RT))
 			local_irq_save(flags);
@@ -322,13 +322,13 @@  static void free_bulk(struct bpf_mem_cache *c)
 		if (IS_ENABLED(CONFIG_PREEMPT_RT))
 			local_irq_restore(flags);
 		if (llnode)
-			enque_to_free(c, llnode);
+			enque_to_free(tgt, llnode);
 	} while (cnt > (c->high_watermark + c->low_watermark) / 2);
 
 	/* and drain free_llist_extra */
 	llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra))
-		enque_to_free(c, llnode);
-	do_call_rcu_ttrace(c);
+		enque_to_free(tgt, llnode);
+	do_call_rcu_ttrace(tgt);
 }
 
 static void bpf_mem_refill(struct irq_work *work)
@@ -427,6 +427,7 @@  int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
 			c->unit_size = unit_size;
 			c->objcg = objcg;
 			c->percpu_size = percpu_size;
+			c->tgt = c;
 			prefill_mem_cache(c, cpu);
 		}
 		ma->cache = pc;
@@ -449,6 +450,7 @@  int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
 			c = &cc->cache[i];
 			c->unit_size = sizes[i];
 			c->objcg = objcg;
+			c->tgt = c;
 			prefill_mem_cache(c, cpu);
 		}
 	}
@@ -467,7 +469,7 @@  static void drain_mem_cache(struct bpf_mem_cache *c)
 	 * Except for waiting_for_gp_ttrace list, there are no concurrent operations
 	 * on these lists, so it is safe to use __llist_del_all().
 	 */
-	free_all(__llist_del_all(&c->free_by_rcu_ttrace), percpu);
+	free_all(llist_del_all(&c->free_by_rcu_ttrace), percpu);
 	free_all(llist_del_all(&c->waiting_for_gp_ttrace), percpu);
 	free_all(__llist_del_all(&c->free_llist), percpu);
 	free_all(__llist_del_all(&c->free_llist_extra), percpu);
@@ -599,8 +601,10 @@  static void notrace *unit_alloc(struct bpf_mem_cache *c)
 	local_irq_save(flags);
 	if (local_inc_return(&c->active) == 1) {
 		llnode = __llist_del_first(&c->free_llist);
-		if (llnode)
+		if (llnode) {
 			cnt = --c->free_cnt;
+			*(struct bpf_mem_cache **)llnode = c;
+		}
 	}
 	local_dec(&c->active);
 	local_irq_restore(flags);
@@ -624,6 +628,12 @@  static void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
 
 	BUILD_BUG_ON(LLIST_NODE_SZ > 8);
 
+	/*
+	 * Remember bpf_mem_cache that allocated this object.
+	 * The hint is not accurate.
+	 */
+	c->tgt = *(struct bpf_mem_cache **)llnode;
+
 	local_irq_save(flags);
 	if (local_inc_return(&c->active) == 1) {
 		__llist_add(llnode, &c->free_llist);