diff mbox series

[RFC,bpf-next,v3,3/6] bpf: Introduce BPF_MA_REUSE_AFTER_RCU_GP

Message ID 20230429101215.111262-4-houtao@huaweicloud.com (mailing list archive)
State RFC
Delegated to: BPF
Headers show
Series Handle immediate reuse in bpf memory allocator | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 182 this patch: 182
netdev/cc_maintainers success CCed 12 of 12 maintainers
netdev/build_clang success Errors and warnings before: 20 this patch: 20
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 182 this patch: 182
netdev/checkpatch warning WARNING: Do not crash the kernel unless it is absolutely unavoidable--use WARN_ON_ONCE() plus recovery code (if feasible) instead of BUG() or variants WARNING: line length of 81 exceeds 80 columns WARNING: line length of 87 exceeds 80 columns WARNING: line length of 88 exceeds 80 columns WARNING: line length of 89 exceeds 80 columns WARNING: line length of 90 exceeds 80 columns WARNING: line length of 92 exceeds 80 columns WARNING: line length of 95 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline fail Was 0 now: 1

Commit Message

Hou Tao April 29, 2023, 10:12 a.m. UTC
From: Hou Tao <houtao1@huawei.com>

Currently the freed objects in bpf memory allocator may be reused
immediately by new allocation, it introduces use-after-bpf-ma-free
problem for non-preallocated hash map and makes lookup procedure
return incorrect result. The immediate reuse also makes introducing
new use case more difficult (e.g. qp-trie).

So introduce BPF_MA_REUSE_AFTER_RCU_GP to solve these problems. For
BPF_MA_REUSE_AFTER_GP, the freed objects are reused only after one RCU
grace period and may be returned back to slab system after another
RCU-tasks-trace grace period. So for bpf programs which care about reuse
problem, these programs can use bpf_rcu_read_{lock,unlock}() to access
these freed objects safely and for those which doesn't care, there will
be safely use-after-bpf-ma-free because these objects have not been
freed by bpf memory allocator.

To make these freed elements being reusab quickly, BPF_MA_REUSE_AFTER_GP
dynamically allocates memory to create many inflight RCU callbacks to
mark these freed element being reusable. These memories used for
bpf_reuse_batch will be freed when these RCU callbacks complete. When no
memory is available, synchronize_rcu_expedited() will be used to make
these freed element reusable. In order to reduce the risk of OOM, part
of these reusable memory will be freed through RCU-tasks-trace grace
period. Before these freeing memories are freed, these memories are also
available for reuse.

Signed-off-by: Hou Tao <houtao1@huawei.com>
---
 include/linux/bpf_mem_alloc.h |   1 +
 kernel/bpf/memalloc.c         | 353 +++++++++++++++++++++++++++++++---
 2 files changed, 326 insertions(+), 28 deletions(-)

Comments

Martin KaFai Lau May 1, 2023, 11:59 p.m. UTC | #1
On 4/29/23 3:12 AM, Hou Tao wrote:
> +static void bpf_ma_prepare_reuse_work(struct work_struct *work)
> +{
> +	struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, reuse_work);
> +	struct llist_node *head, *tail, *llnode, *tmp;
> +	struct bpf_reuse_batch *batch;
> +	unsigned long flags;
> +	bool do_free;
> +
> +	local_irq_save(flags);
> +	/* When CPU is offline, the running CPU may be different with
> +	 * the CPU which submitted the work. When these two CPUs are the same,
> +	 * kworker may be interrupted by NMI, so increase active to protect
> +	 * again such concurrency.
> +	 */
> +	if (c->cpu == smp_processor_id())
> +		WARN_ON_ONCE(local_inc_return(&c->active) != 1);
> +	raw_spin_lock(&c->reuse_lock);
> +	head = __llist_del_all(&c->prepare_reuse_head);
> +	tail = c->prepare_reuse_tail;
> +	c->prepare_reuse_tail = NULL;
> +	c->prepare_reuse_cnt = 0;
> +	if (c->cpu == smp_processor_id())
> +		local_dec(&c->active);
> +
> +	/* Try to free elements in reusable list. Before these elements are
> +	 * freed in RCU cb, these element will still be available for reuse.
> +	 */
> +	do_free = bpf_ma_try_free_reuse_objs(c);
> +	raw_spin_unlock(&c->reuse_lock);
> +	local_irq_restore(flags);
> +
> +	if (do_free)
> +		call_rcu_tasks_trace(&c->rcu, bpf_ma_free_reusable_cb);
> +
> +	llist_for_each_safe(llnode, tmp, llist_del_all(&c->free_llist_extra)) {
> +		if (!head)
> +			tail = llnode;
> +		llnode->next = head;
> +		head = llnode->next;
> +	}
> +	/* Draining is in progress ? */
> +	if (!head) {
> +		/* kworker completes and no RCU callback */
> +		atomic_dec(&c->reuse_cb_in_progress);
> +		return;
> +	}
> +
> +	batch = kmalloc(sizeof(*batch), GFP_KERNEL);
> +	if (!batch) {
> +		synchronize_rcu_expedited();
> +		bpf_ma_add_to_reuse_ready_or_free(c, head, tail);
> +		/* kworker completes and no RCU callback */
> +		atomic_dec(&c->reuse_cb_in_progress);
> +		return;
> +	}
> +
> +	batch->c = c;
> +	batch->head = head;
> +	batch->tail = tail;
> +	call_rcu(&batch->rcu, bpf_ma_reuse_cb);
> +}
> +
> +static void notrace wait_gp_reuse_free(struct bpf_mem_cache *c, struct llist_node *llnode)
> +{
> +	unsigned long flags;
> +
> +	local_irq_save(flags);
> +	/* In case a NMI-context bpf program is also freeing object. */
> +	if (local_inc_return(&c->active) == 1) {
> +		bool try_queue_work = false;
> +
> +		/* kworker may remove elements from prepare_reuse_head */
> +		raw_spin_lock(&c->reuse_lock);
> +		if (llist_empty(&c->prepare_reuse_head))
> +			c->prepare_reuse_tail = llnode;
> +		__llist_add(llnode, &c->prepare_reuse_head);
> +		if (++c->prepare_reuse_cnt > c->high_watermark) {
> +			/* Zero out prepare_reuse_cnt early to prevent
> +			 * unnecessary queue_work().
> +			 */
> +			c->prepare_reuse_cnt = 0;
> +			try_queue_work = true;
> +		}
> +		raw_spin_unlock(&c->reuse_lock);
> +
> +		if (try_queue_work && !work_pending(&c->reuse_work)) {
> +			/* Use reuse_cb_in_progress to indicate there is
> +			 * inflight reuse kworker or reuse RCU callback.
> +			 */
> +			atomic_inc(&c->reuse_cb_in_progress);
> +			/* Already queued */
> +			if (!queue_work(bpf_ma_wq, &c->reuse_work))

queue_work will be called from a bpf program (e.g. bpf_mem_cache_free -> 
unit_free -> queue_work). Is it safe from recursion and deadlock?
eg. what if a tracing bpf prog is attached to some functions in workqueue.c 
after acquiring a workqueue related spin lock and that tracing bpf prog is doing 
unit_free?
Not a workqueue expert. Asking because it is not obvious to me considering there 
is a lot of ground to cover in workqueue.c.

I wonder what happen to the current bpf memalloc approach to postpone work to 
irq work. v2 mentioned it does not work well. Did you figure out why?

> +				atomic_dec(&c->reuse_cb_in_progress);
> +		}
> +	} else {
> +		llist_add(llnode, &c->free_llist_extra);
> +	}
> +	local_dec(&c->active);
> +	local_irq_restore(flags);
> +}
> +
>   /* Though 'ptr' object could have been allocated on a different cpu
>    * add it to the free_llist of the current cpu.
>    * Let kfree() logic deal with it when it's later called from irq_work.
>    */
> -static void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
> +static void notrace immediate_reuse_free(struct bpf_mem_cache *c, struct llist_node *llnode)
>   {
> -	struct llist_node *llnode = ptr - LLIST_NODE_SZ;
>   	unsigned long flags;
>   	int cnt = 0;
>   
> -	BUILD_BUG_ON(LLIST_NODE_SZ > 8);
> -
>   	local_irq_save(flags);
>   	if (local_inc_return(&c->active) == 1) {
>   		__llist_add(llnode, &c->free_llist);
> @@ -633,6 +910,18 @@ static void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
>   		irq_work_raise(c);
>   }
>   
> +static inline void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
> +{
> +	struct llist_node *llnode = ptr - LLIST_NODE_SZ;
> +
> +	BUILD_BUG_ON(LLIST_NODE_SZ > 8);
> +
> +	if (c->flags & BPF_MA_REUSE_AFTER_RCU_GP)
> +		wait_gp_reuse_free(c, llnode);
> +	else
> +		immediate_reuse_free(c, llnode);
> +}
> +
Alexei Starovoitov May 3, 2023, 6:48 p.m. UTC | #2
On Sat, Apr 29, 2023 at 06:12:12PM +0800, Hou Tao wrote:
> +
> +static void notrace wait_gp_reuse_free(struct bpf_mem_cache *c, struct llist_node *llnode)
> +{
> +	unsigned long flags;
> +
> +	local_irq_save(flags);
> +	/* In case a NMI-context bpf program is also freeing object. */
> +	if (local_inc_return(&c->active) == 1) {
> +		bool try_queue_work = false;
> +
> +		/* kworker may remove elements from prepare_reuse_head */
> +		raw_spin_lock(&c->reuse_lock);
> +		if (llist_empty(&c->prepare_reuse_head))
> +			c->prepare_reuse_tail = llnode;
> +		__llist_add(llnode, &c->prepare_reuse_head);
> +		if (++c->prepare_reuse_cnt > c->high_watermark) {
> +			/* Zero out prepare_reuse_cnt early to prevent
> +			 * unnecessary queue_work().
> +			 */
> +			c->prepare_reuse_cnt = 0;
> +			try_queue_work = true;
> +		}
> +		raw_spin_unlock(&c->reuse_lock);
> +
> +		if (try_queue_work && !work_pending(&c->reuse_work)) {
> +			/* Use reuse_cb_in_progress to indicate there is
> +			 * inflight reuse kworker or reuse RCU callback.
> +			 */
> +			atomic_inc(&c->reuse_cb_in_progress);
> +			/* Already queued */
> +			if (!queue_work(bpf_ma_wq, &c->reuse_work))

As Martin pointed out queue_work() is not safe here.
The raw_spin_lock(&c->reuse_lock); earlier is not safe either.
For the next version please drop workers and spin_lock from unit_free/alloc paths.
If lock has to be taken it should be done from irq_work.
Under no circumstances we can use alloc_workqueue(). No new kthreads.

We can avoid adding new flag to bpf_mem_alloc to reduce the complexity
and do roughly equivalent of REUSE_AFTER_RCU_GP unconditionally in the following way:

- alloc_bulk() won't be trying to steal from c->free_by_rcu.

- do_call_rcu() does call_rcu(&c->rcu, __free_rcu) instead of task-trace version.

- rcu_trace_implies_rcu_gp() is never used.

- after RCU_GP __free_rcu() moves all waiting_for_gp elements into 
  a size specific link list per bpf_mem_alloc (not per bpf_mem_cache which is per-cpu)
  and does call_rcu_tasks_trace

- Let's call this list ma->free_by_rcu_tasks_trace
  (only one list for bpf_mem_alloc with known size or NUM_CACHES such lists when size == 0 at init)

- any cpu alloc_bulk() can steal from size specific ma->free_by_rcu_tasks_trace list that
  is protected by ma->spin_lock (1 or NUM_CACHES such locks)

- ma->waiting_for_gp_tasks_trace will be freeing elements into slab

What it means that sleepable progs using hashmap will be able to avoid uaf with bpf_rcu_read_lock().
Without explicit bpf_rcu_read_lock() it's still safe and equivalent to existing behavior of bpf_mem_alloc.
(while your proposed BPF_MA_FREE_AFTER_RCU_GP flavor is not safe to use in hashtab with sleepable progs)

After that we can unconditionally remove rcu_head/call_rcu from bpf_cpumask and improve usability of bpf_obj_drop.
Probably usage of bpf_mem_alloc in local storage can be simplified as well.
Martin wdyt?

I think this approach adds minimal complexity to bpf_mem_alloc while solving all existing pain points
including needs of qp-trie.
Martin KaFai Lau May 3, 2023, 9:57 p.m. UTC | #3
On 5/3/23 11:48 AM, Alexei Starovoitov wrote:
> What it means that sleepable progs using hashmap will be able to avoid uaf with bpf_rcu_read_lock().
> Without explicit bpf_rcu_read_lock() it's still safe and equivalent to existing behavior of bpf_mem_alloc.
> (while your proposed BPF_MA_FREE_AFTER_RCU_GP flavor is not safe to use in hashtab with sleepable progs)
> 
> After that we can unconditionally remove rcu_head/call_rcu from bpf_cpumask and improve usability of bpf_obj_drop.
> Probably usage of bpf_mem_alloc in local storage can be simplified as well.
> Martin wdyt?

If the bpf prog always does a bpf_rcu_read_lock() before accessing the (e.g.) 
task local storage, it can remove the reuse_now conditions in the 
bpf_local_storage and directly call the bpf_mem_cache_free().

The only corner use case is when the bpf_prog or syscall does 
bpf_task_storage_delete() instead of having the task storage stays with the 
whole lifetime of the task_struct. Using REUSE_AFTER_RCU_GP will be a change of 
this uaf guarantee to the sleepable program but it is still safe because it is 
freed after tasks_trace gp. We could take this chance to align this behavior of 
the local storage map to the other bpf maps.

For BPF_MA_FREE_AFTER_RCU_GP, there are cases that the bpf local storage knows 
it can be freed without waiting tasks_trace gp. However, only task/cgroup 
storages are in bpf ma and I don't believe this optimization matter much for 
them. I would rather focus on the REUSE_AFTER_RCU_GP first.
Alexei Starovoitov May 3, 2023, 11:06 p.m. UTC | #4
On Wed, May 03, 2023 at 02:57:03PM -0700, Martin KaFai Lau wrote:
> On 5/3/23 11:48 AM, Alexei Starovoitov wrote:
> > What it means that sleepable progs using hashmap will be able to avoid uaf with bpf_rcu_read_lock().
> > Without explicit bpf_rcu_read_lock() it's still safe and equivalent to existing behavior of bpf_mem_alloc.
> > (while your proposed BPF_MA_FREE_AFTER_RCU_GP flavor is not safe to use in hashtab with sleepable progs)
> > 
> > After that we can unconditionally remove rcu_head/call_rcu from bpf_cpumask and improve usability of bpf_obj_drop.
> > Probably usage of bpf_mem_alloc in local storage can be simplified as well.
> > Martin wdyt?
> 
> If the bpf prog always does a bpf_rcu_read_lock() before accessing the
> (e.g.) task local storage, it can remove the reuse_now conditions in the
> bpf_local_storage and directly call the bpf_mem_cache_free().
> 
> The only corner use case is when the bpf_prog or syscall does
> bpf_task_storage_delete() instead of having the task storage stays with the
> whole lifetime of the task_struct. Using REUSE_AFTER_RCU_GP will be a change
> of this uaf guarantee to the sleepable program but it is still safe because
> it is freed after tasks_trace gp. We could take this chance to align this
> behavior of the local storage map to the other bpf maps.
> 
> For BPF_MA_FREE_AFTER_RCU_GP, there are cases that the bpf local storage
> knows it can be freed without waiting tasks_trace gp. However, only
> task/cgroup storages are in bpf ma and I don't believe this optimization
> matter much for them. I would rather focus on the REUSE_AFTER_RCU_GP first.

I'm confused which REUSE_AFTER_RCU_GP you meant.
What I proposed above is REUSE_AFTER_rcu_GP_and_free_after_rcu_tasks_trace

Hou's proposals: 1. BPF_MA_REUSE_AFTER_two_RCUs_GP 2. BPF_MA_FREE_AFTER_single_RCU_GP

If I'm reading bpf_local_storage correctly it can remove reuse_now logic
in all conditions with REUSE_AFTER_rcu_GP_and_free_after_rcu_tasks_trace.
What am I missing?
Martin KaFai Lau May 3, 2023, 11:39 p.m. UTC | #5
On 5/3/23 4:06 PM, Alexei Starovoitov wrote:
> On Wed, May 03, 2023 at 02:57:03PM -0700, Martin KaFai Lau wrote:
>> On 5/3/23 11:48 AM, Alexei Starovoitov wrote:
>>> What it means that sleepable progs using hashmap will be able to avoid uaf with bpf_rcu_read_lock().
>>> Without explicit bpf_rcu_read_lock() it's still safe and equivalent to existing behavior of bpf_mem_alloc.
>>> (while your proposed BPF_MA_FREE_AFTER_RCU_GP flavor is not safe to use in hashtab with sleepable progs)
>>>
>>> After that we can unconditionally remove rcu_head/call_rcu from bpf_cpumask and improve usability of bpf_obj_drop.
>>> Probably usage of bpf_mem_alloc in local storage can be simplified as well.
>>> Martin wdyt?
>>
>> If the bpf prog always does a bpf_rcu_read_lock() before accessing the
>> (e.g.) task local storage, it can remove the reuse_now conditions in the
>> bpf_local_storage and directly call the bpf_mem_cache_free().
>>
>> The only corner use case is when the bpf_prog or syscall does
>> bpf_task_storage_delete() instead of having the task storage stays with the
>> whole lifetime of the task_struct. Using REUSE_AFTER_RCU_GP will be a change
>> of this uaf guarantee to the sleepable program but it is still safe because
>> it is freed after tasks_trace gp. We could take this chance to align this
>> behavior of the local storage map to the other bpf maps.
>>
>> For BPF_MA_FREE_AFTER_RCU_GP, there are cases that the bpf local storage
>> knows it can be freed without waiting tasks_trace gp. However, only
>> task/cgroup storages are in bpf ma and I don't believe this optimization
>> matter much for them. I would rather focus on the REUSE_AFTER_RCU_GP first.
> 
> I'm confused which REUSE_AFTER_RCU_GP you meant.
> What I proposed above is REUSE_AFTER_rcu_GP_and_free_after_rcu_tasks_trace

Regarding REUSE_AFTER_RCU_GP, I meant 
REUSE_AFTER_rcu_GP_and_free_after_rcu_tasks_trace.

> 
> Hou's proposals: 1. BPF_MA_REUSE_AFTER_two_RCUs_GP 2. BPF_MA_FREE_AFTER_single_RCU_GP

It probably is where the confusion is. I thought Hou's BPF_MA_REUSE_AFTER_RCU_GP 
is already REUSE_AFTER_rcu_GP_and_free_after_rcu_tasks_trace. From the commit 
message:

" ... So introduce BPF_MA_REUSE_AFTER_RCU_GP to solve these problems. For
BPF_MA_REUSE_AFTER_GP, the freed objects are reused only after one RCU
grace period and may be returned back to slab system after another
RCU-tasks-trace grace period. ..."

[I assumed BPF_MA_REUSE_AFTER_GP is just a typo of BPF_MA_REUSE_AFTER_"RCU"_GP]

> 
> If I'm reading bpf_local_storage correctly it can remove reuse_now logic
> in all conditions with REUSE_AFTER_rcu_GP_and_free_after_rcu_tasks_trace.

Right, for smap->bpf_ma == true (cgroup and task storage), all reuse_now logic 
can be gone and directly use the bpf_mem_cache_free(). Potentially the sk/inode 
can also move to bpf_ma after running some benchmark. This will simplify things 
a lot. For sk storage, the reuse_now was there to avoid the unnecessary 
tasks_trace gp because performance impact was reported on sk storage where 
connections can be open-and-close very frequently.
Hou Tao May 4, 2023, 1:35 a.m. UTC | #6
Hi,

On 5/4/2023 2:48 AM, Alexei Starovoitov wrote:
> On Sat, Apr 29, 2023 at 06:12:12PM +0800, Hou Tao wrote:
>> +
>> +static void notrace wait_gp_reuse_free(struct bpf_mem_cache *c, struct llist_node *llnode)
>> +{
>> +	unsigned long flags;
>> +
>> +	local_irq_save(flags);
>> +	/* In case a NMI-context bpf program is also freeing object. */
>> +	if (local_inc_return(&c->active) == 1) {
>> +		bool try_queue_work = false;
>> +
>> +		/* kworker may remove elements from prepare_reuse_head */
>> +		raw_spin_lock(&c->reuse_lock);
>> +		if (llist_empty(&c->prepare_reuse_head))
>> +			c->prepare_reuse_tail = llnode;
>> +		__llist_add(llnode, &c->prepare_reuse_head);
>> +		if (++c->prepare_reuse_cnt > c->high_watermark) {
>> +			/* Zero out prepare_reuse_cnt early to prevent
>> +			 * unnecessary queue_work().
>> +			 */
>> +			c->prepare_reuse_cnt = 0;
>> +			try_queue_work = true;
>> +		}
>> +		raw_spin_unlock(&c->reuse_lock);
>> +
>> +		if (try_queue_work && !work_pending(&c->reuse_work)) {
>> +			/* Use reuse_cb_in_progress to indicate there is
>> +			 * inflight reuse kworker or reuse RCU callback.
>> +			 */
>> +			atomic_inc(&c->reuse_cb_in_progress);
>> +			/* Already queued */
>> +			if (!queue_work(bpf_ma_wq, &c->reuse_work))
> As Martin pointed out queue_work() is not safe here.
> The raw_spin_lock(&c->reuse_lock); earlier is not safe either.
I see. Didn't recognize these problems.
> For the next version please drop workers and spin_lock from unit_free/alloc paths.
> If lock has to be taken it should be done from irq_work.
> Under no circumstances we can use alloc_workqueue(). No new kthreads.
Is there any reason to prohibit the use of new kthread in irq_work ?
>
> We can avoid adding new flag to bpf_mem_alloc to reduce the complexity
> and do roughly equivalent of REUSE_AFTER_RCU_GP unconditionally in the following way:
>
> - alloc_bulk() won't be trying to steal from c->free_by_rcu.
>
> - do_call_rcu() does call_rcu(&c->rcu, __free_rcu) instead of task-trace version.
No sure whether or not one inflight RCU callback is enough. Will check.
If one is not enough, I may use kmalloc(__GFP_NOWAIT) in irq work to
allocate multiple RCU callbacks.
> - rcu_trace_implies_rcu_gp() is never used.
>
> - after RCU_GP __free_rcu() moves all waiting_for_gp elements into 
>   a size specific link list per bpf_mem_alloc (not per bpf_mem_cache which is per-cpu)
>   and does call_rcu_tasks_trace
>
> - Let's call this list ma->free_by_rcu_tasks_trace
>   (only one list for bpf_mem_alloc with known size or NUM_CACHES such lists when size == 0 at init)
>
> - any cpu alloc_bulk() can steal from size specific ma->free_by_rcu_tasks_trace list that
>   is protected by ma->spin_lock (1 or NUM_CACHES such locks)
To reduce the lock contention, alloc_bulk() can steal from the global
list in batch. Had tried the global list before but I didn't do the
concurrent freeing, I think it could reduce the risk of OOM for
add_del_on_diff_cpu.
>
> - ma->waiting_for_gp_tasks_trace will be freeing elements into slab
>
> What it means that sleepable progs using hashmap will be able to avoid uaf with bpf_rcu_read_lock().
> Without explicit bpf_rcu_read_lock() it's still safe and equivalent to existing behavior of bpf_mem_alloc.
> (while your proposed BPF_MA_FREE_AFTER_RCU_GP flavor is not safe to use in hashtab with sleepable progs)
>
> After that we can unconditionally remove rcu_head/call_rcu from bpf_cpumask and improve usability of bpf_obj_drop.
> Probably usage of bpf_mem_alloc in local storage can be simplified as well.
> Martin wdyt?
>
> I think this approach adds minimal complexity to bpf_mem_alloc while solving all existing pain points
> including needs of qp-trie.
Thanks for these great suggestions. Will try to do it in v4.
Alexei Starovoitov May 4, 2023, 1:42 a.m. UTC | #7
On Wed, May 03, 2023 at 04:39:01PM -0700, Martin KaFai Lau wrote:
> On 5/3/23 4:06 PM, Alexei Starovoitov wrote:
> > On Wed, May 03, 2023 at 02:57:03PM -0700, Martin KaFai Lau wrote:
> > > On 5/3/23 11:48 AM, Alexei Starovoitov wrote:
> > > > What it means that sleepable progs using hashmap will be able to avoid uaf with bpf_rcu_read_lock().
> > > > Without explicit bpf_rcu_read_lock() it's still safe and equivalent to existing behavior of bpf_mem_alloc.
> > > > (while your proposed BPF_MA_FREE_AFTER_RCU_GP flavor is not safe to use in hashtab with sleepable progs)
> > > > 
> > > > After that we can unconditionally remove rcu_head/call_rcu from bpf_cpumask and improve usability of bpf_obj_drop.
> > > > Probably usage of bpf_mem_alloc in local storage can be simplified as well.
> > > > Martin wdyt?
> > > 
> > > If the bpf prog always does a bpf_rcu_read_lock() before accessing the
> > > (e.g.) task local storage, it can remove the reuse_now conditions in the
> > > bpf_local_storage and directly call the bpf_mem_cache_free().
> > > 
> > > The only corner use case is when the bpf_prog or syscall does
> > > bpf_task_storage_delete() instead of having the task storage stays with the
> > > whole lifetime of the task_struct. Using REUSE_AFTER_RCU_GP will be a change
> > > of this uaf guarantee to the sleepable program but it is still safe because
> > > it is freed after tasks_trace gp. We could take this chance to align this
> > > behavior of the local storage map to the other bpf maps.
> > > 
> > > For BPF_MA_FREE_AFTER_RCU_GP, there are cases that the bpf local storage
> > > knows it can be freed without waiting tasks_trace gp. However, only
> > > task/cgroup storages are in bpf ma and I don't believe this optimization
> > > matter much for them. I would rather focus on the REUSE_AFTER_RCU_GP first.
> > 
> > I'm confused which REUSE_AFTER_RCU_GP you meant.
> > What I proposed above is REUSE_AFTER_rcu_GP_and_free_after_rcu_tasks_trace
> 
> Regarding REUSE_AFTER_RCU_GP, I meant
> REUSE_AFTER_rcu_GP_and_free_after_rcu_tasks_trace.
> 
> > 
> > Hou's proposals: 1. BPF_MA_REUSE_AFTER_two_RCUs_GP 2. BPF_MA_FREE_AFTER_single_RCU_GP
> 
> It probably is where the confusion is. I thought Hou's
> BPF_MA_REUSE_AFTER_RCU_GP is already
> REUSE_AFTER_rcu_GP_and_free_after_rcu_tasks_trace. From the commit message:

Sorry. My bad. You're correct.
The difference between my and Hou's #1 is whether rcu_tasks_trace is global or per-cpu.

> 
> " ... So introduce BPF_MA_REUSE_AFTER_RCU_GP to solve these problems. For
> BPF_MA_REUSE_AFTER_GP, the freed objects are reused only after one RCU
> grace period and may be returned back to slab system after another
> RCU-tasks-trace grace period. ..."
> 
> [I assumed BPF_MA_REUSE_AFTER_GP is just a typo of BPF_MA_REUSE_AFTER_"RCU"_GP]
> 
> > 
> > If I'm reading bpf_local_storage correctly it can remove reuse_now logic
> > in all conditions with REUSE_AFTER_rcu_GP_and_free_after_rcu_tasks_trace.
> 
> Right, for smap->bpf_ma == true (cgroup and task storage), all reuse_now
> logic can be gone and directly use the bpf_mem_cache_free(). Potentially the
> sk/inode can also move to bpf_ma after running some benchmark. This will
> simplify things a lot. For sk storage, the reuse_now was there to avoid the
> unnecessary tasks_trace gp because performance impact was reported on sk
> storage where connections can be open-and-close very frequently.
Alexei Starovoitov May 4, 2023, 2 a.m. UTC | #8
On Thu, May 04, 2023 at 09:35:17AM +0800, Hou Tao wrote:
> Hi,
> 
> On 5/4/2023 2:48 AM, Alexei Starovoitov wrote:
> > On Sat, Apr 29, 2023 at 06:12:12PM +0800, Hou Tao wrote:
> >> +
> >> +static void notrace wait_gp_reuse_free(struct bpf_mem_cache *c, struct llist_node *llnode)
> >> +{
> >> +	unsigned long flags;
> >> +
> >> +	local_irq_save(flags);
> >> +	/* In case a NMI-context bpf program is also freeing object. */
> >> +	if (local_inc_return(&c->active) == 1) {
> >> +		bool try_queue_work = false;
> >> +
> >> +		/* kworker may remove elements from prepare_reuse_head */
> >> +		raw_spin_lock(&c->reuse_lock);
> >> +		if (llist_empty(&c->prepare_reuse_head))
> >> +			c->prepare_reuse_tail = llnode;
> >> +		__llist_add(llnode, &c->prepare_reuse_head);
> >> +		if (++c->prepare_reuse_cnt > c->high_watermark) {
> >> +			/* Zero out prepare_reuse_cnt early to prevent
> >> +			 * unnecessary queue_work().
> >> +			 */
> >> +			c->prepare_reuse_cnt = 0;
> >> +			try_queue_work = true;
> >> +		}
> >> +		raw_spin_unlock(&c->reuse_lock);
> >> +
> >> +		if (try_queue_work && !work_pending(&c->reuse_work)) {
> >> +			/* Use reuse_cb_in_progress to indicate there is
> >> +			 * inflight reuse kworker or reuse RCU callback.
> >> +			 */
> >> +			atomic_inc(&c->reuse_cb_in_progress);
> >> +			/* Already queued */
> >> +			if (!queue_work(bpf_ma_wq, &c->reuse_work))
> > As Martin pointed out queue_work() is not safe here.
> > The raw_spin_lock(&c->reuse_lock); earlier is not safe either.
> I see. Didn't recognize these problems.
> > For the next version please drop workers and spin_lock from unit_free/alloc paths.
> > If lock has to be taken it should be done from irq_work.
> > Under no circumstances we can use alloc_workqueue(). No new kthreads.
> Is there any reason to prohibit the use of new kthread in irq_work ?

Because:
1. there is a workable solution without kthreads.
2. if there was no solution we would have to come up with one.
kthread is not an answer. It's hard to reason about a setup when kthreads
are in critical path due to scheduler. Assume the system is 100% cpu loaded.
kthreads delays and behavior is unpredictable. We cannot subject memory alloc/free to it.

> >
> > We can avoid adding new flag to bpf_mem_alloc to reduce the complexity
> > and do roughly equivalent of REUSE_AFTER_RCU_GP unconditionally in the following way:
> >
> > - alloc_bulk() won't be trying to steal from c->free_by_rcu.
> >
> > - do_call_rcu() does call_rcu(&c->rcu, __free_rcu) instead of task-trace version.
> No sure whether or not one inflight RCU callback is enough. Will check.
> If one is not enough, I may use kmalloc(__GFP_NOWAIT) in irq work to
> allocate multiple RCU callbacks.

Pls dont. Just assume it will work, implement the proposal (if you agree),
come back with the numbers and then we will discuss again.
We cannot keep arguing about merits of complicated patch set that was done on partial data.
Just like the whole thing with kthreads.
I requested early on: "pls no kthreads" and weeks later we're still arguing.

> > - rcu_trace_implies_rcu_gp() is never used.
> >
> > - after RCU_GP __free_rcu() moves all waiting_for_gp elements into 
> >   a size specific link list per bpf_mem_alloc (not per bpf_mem_cache which is per-cpu)
> >   and does call_rcu_tasks_trace
> >
> > - Let's call this list ma->free_by_rcu_tasks_trace
> >   (only one list for bpf_mem_alloc with known size or NUM_CACHES such lists when size == 0 at init)
> >
> > - any cpu alloc_bulk() can steal from size specific ma->free_by_rcu_tasks_trace list that
> >   is protected by ma->spin_lock (1 or NUM_CACHES such locks)
> To reduce the lock contention, alloc_bulk() can steal from the global
> list in batch. 

Pls no special batches. The simplest implementation possible.
alloc_bulk() has 'int cnt' argument. It will try to steal 'cnt' from ma->free_by_rcu_tasks_trace.

> Had tried the global list before but I didn't do the
> concurrent freeing, I think it could reduce the risk of OOM for
> add_del_on_diff_cpu.

Maybe you've tried, but we didn't see the patches and we cannot take for granted
anyone saying: "I've tried *foo*. It didn't work. That's why I'm doing *bar* here".
Everything mm is tricky. Little details matter a lot.
It's also questionable whether we should make any design decisions based on this benchmark
and in particular based on add_del_on_diff_cpu part of it.
I'm not saying we shouldn't consider it, but all numbers have a "decision weight"
associated with them.
For example: there is existing samples/bpf/map_perf_test benchmark.
So far we haven't seen the numbers from it.
Is it more important than your new bench? Yes and no. All numbers matter.

> >
> > - ma->waiting_for_gp_tasks_trace will be freeing elements into slab
> >
> > What it means that sleepable progs using hashmap will be able to avoid uaf with bpf_rcu_read_lock().
> > Without explicit bpf_rcu_read_lock() it's still safe and equivalent to existing behavior of bpf_mem_alloc.
> > (while your proposed BPF_MA_FREE_AFTER_RCU_GP flavor is not safe to use in hashtab with sleepable progs)
> >
> > After that we can unconditionally remove rcu_head/call_rcu from bpf_cpumask and improve usability of bpf_obj_drop.
> > Probably usage of bpf_mem_alloc in local storage can be simplified as well.
> > Martin wdyt?
> >
> > I think this approach adds minimal complexity to bpf_mem_alloc while solving all existing pain points
> > including needs of qp-trie.
> Thanks for these great suggestions. Will try to do it in v4.

Thanks.
Also for benchmark, pls don't hack htab and benchmark as 'non-landable patches' (as in this series).
Construct the patch series as:
- prep patches
- benchmark
- unconditional convert of bpf_ma to REUSE_AFTER_rcu_GP_and_free_after_rcu_tasks_trace
  with numbers from bench(s) before and after this patch.
Hou Tao May 4, 2023, 2:08 a.m. UTC | #9
Hi,

On 5/4/2023 7:39 AM, Martin KaFai Lau wrote:
> On 5/3/23 4:06 PM, Alexei Starovoitov wrote:
>> On Wed, May 03, 2023 at 02:57:03PM -0700, Martin KaFai Lau wrote:
>>> On 5/3/23 11:48 AM, Alexei Starovoitov wrote:
SNIP
>>>
>>> If the bpf prog always does a bpf_rcu_read_lock() before accessing the
>>> (e.g.) task local storage, it can remove the reuse_now conditions in
>>> the
>>> bpf_local_storage and directly call the bpf_mem_cache_free().
>>>
>>> The only corner use case is when the bpf_prog or syscall does
>>> bpf_task_storage_delete() instead of having the task storage stays
>>> with the
>>> whole lifetime of the task_struct. Using REUSE_AFTER_RCU_GP will be
>>> a change
>>> of this uaf guarantee to the sleepable program but it is still safe
>>> because
>>> it is freed after tasks_trace gp. We could take this chance to align
>>> this
>>> behavior of the local storage map to the other bpf maps.
>>>
>>> For BPF_MA_FREE_AFTER_RCU_GP, there are cases that the bpf local
>>> storage
>>> knows it can be freed without waiting tasks_trace gp. However, only
>>> task/cgroup storages are in bpf ma and I don't believe this
>>> optimization
>>> matter much for them. I would rather focus on the REUSE_AFTER_RCU_GP
>>> first.
OK.
>>
>> I'm confused which REUSE_AFTER_RCU_GP you meant.
>> What I proposed above is
>> REUSE_AFTER_rcu_GP_and_free_after_rcu_tasks_trace
>
> Regarding REUSE_AFTER_RCU_GP, I meant
> REUSE_AFTER_rcu_GP_and_free_after_rcu_tasks_trace.
>
>>
>> Hou's proposals: 1. BPF_MA_REUSE_AFTER_two_RCUs_GP 2.
>> BPF_MA_FREE_AFTER_single_RCU_GP
>
> It probably is where the confusion is. I thought Hou's
> BPF_MA_REUSE_AFTER_RCU_GP is already
> REUSE_AFTER_rcu_GP_and_free_after_rcu_tasks_trace. From the commit
> message:
>
> " ... So introduce BPF_MA_REUSE_AFTER_RCU_GP to solve these problems. For
> BPF_MA_REUSE_AFTER_GP, the freed objects are reused only after one RCU
> grace period and may be returned back to slab system after another
> RCU-tasks-trace grace period. ..."
>
> [I assumed BPF_MA_REUSE_AFTER_GP is just a typo of
> BPF_MA_REUSE_AFTER_"RCU"_GP]
Yes. Now the implementation of BPF_MA_REUSE_AFTER_RCU_GP is already
being REUSE_AFTER_rcu_GP_and_free_after_rcu_tasks_trace. It moves the
free objects to reuse_ready_head list after one RCU GP, splices the
elements in reuse_ready_head to wait_for_free when reuse_ready_head is
not empty and frees these elements in wait_for_free by
call_rcu_tasks_trace().
>
>>
>> If I'm reading bpf_local_storage correctly it can remove reuse_now logic
>> in all conditions with
>> REUSE_AFTER_rcu_GP_and_free_after_rcu_tasks_trace.
>
> Right, for smap->bpf_ma == true (cgroup and task storage), all
> reuse_now logic can be gone and directly use the bpf_mem_cache_free().
> Potentially the sk/inode can also move to bpf_ma after running some
> benchmark. This will simplify things a lot. For sk storage, the
> reuse_now was there to avoid the unnecessary tasks_trace gp because
> performance impact was reported on sk storage where connections can be
> open-and-close very frequently.
Hou Tao May 4, 2023, 2:30 a.m. UTC | #10
Hi,

On 5/4/2023 10:00 AM, Alexei Starovoitov wrote:
> On Thu, May 04, 2023 at 09:35:17AM +0800, Hou Tao wrote:
>> Hi,
>>
>> On 5/4/2023 2:48 AM, Alexei Starovoitov wrote:
>>> On Sat, Apr 29, 2023 at 06:12:12PM +0800, Hou Tao wrote:
SNIP
>>> +			/* Already queued */
>>> +			if (!queue_work(bpf_ma_wq, &c->reuse_work))
>>> As Martin pointed out queue_work() is not safe here.
>>> The raw_spin_lock(&c->reuse_lock); earlier is not safe either.
>> I see. Didn't recognize these problems.
>>> For the next version please drop workers and spin_lock from unit_free/alloc paths.
>>> If lock has to be taken it should be done from irq_work.
>>> Under no circumstances we can use alloc_workqueue(). No new kthreads.
>> Is there any reason to prohibit the use of new kthread in irq_work ?
> Because:
> 1. there is a workable solution without kthreads.
> 2. if there was no solution we would have to come up with one.
> kthread is not an answer. It's hard to reason about a setup when kthreads
> are in critical path due to scheduler. Assume the system is 100% cpu loaded.
> kthreads delays and behavior is unpredictable. We cannot subject memory alloc/free to it.
I see. Thanks for the explanation.
>
>>> We can avoid adding new flag to bpf_mem_alloc to reduce the complexity
>>> and do roughly equivalent of REUSE_AFTER_RCU_GP unconditionally in the following way:
>>>
>>> - alloc_bulk() won't be trying to steal from c->free_by_rcu.
>>>
>>> - do_call_rcu() does call_rcu(&c->rcu, __free_rcu) instead of task-trace version.
>> No sure whether or not one inflight RCU callback is enough. Will check.
>> If one is not enough, I may use kmalloc(__GFP_NOWAIT) in irq work to
>> allocate multiple RCU callbacks.
> Pls dont. Just assume it will work, implement the proposal (if you agree),
> come back with the numbers and then we will discuss again.
> We cannot keep arguing about merits of complicated patch set that was done on partial data.
OK. Will do.
> Just like the whole thing with kthreads.
> I requested early on: "pls no kthreads" and weeks later we're still arguing.
Sorry about missing that part.
>
>>> - rcu_trace_implies_rcu_gp() is never used.
>>>
>>> - after RCU_GP __free_rcu() moves all waiting_for_gp elements into 
>>>   a size specific link list per bpf_mem_alloc (not per bpf_mem_cache which is per-cpu)
>>>   and does call_rcu_tasks_trace
>>>
>>> - Let's call this list ma->free_by_rcu_tasks_trace
>>>   (only one list for bpf_mem_alloc with known size or NUM_CACHES such lists when size == 0 at init)
>>>
>>> - any cpu alloc_bulk() can steal from size specific ma->free_by_rcu_tasks_trace list that
>>>   is protected by ma->spin_lock (1 or NUM_CACHES such locks)
>> To reduce the lock contention, alloc_bulk() can steal from the global
>> list in batch. 
> Pls no special batches. The simplest implementation possible.
> alloc_bulk() has 'int cnt' argument. It will try to steal 'cnt' from ma->free_by_rcu_tasks_trace.
I see. Will do.
>
>> Had tried the global list before but I didn't do the
>> concurrent freeing, I think it could reduce the risk of OOM for
>> add_del_on_diff_cpu.
> Maybe you've tried, but we didn't see the patches and we cannot take for granted
> anyone saying: "I've tried *foo*. It didn't work. That's why I'm doing *bar* here".
> Everything mm is tricky. Little details matter a lot.
OK. I think it will work. The reason I didn't post it is that I was
obsessed with lock-less bpf ma at that moment.
> It's also questionable whether we should make any design decisions based on this benchmark
> and in particular based on add_del_on_diff_cpu part of it.
> I'm not saying we shouldn't consider it, but all numbers have a "decision weight"
> associated with them.
I see. The reason for add_del_on_diff_cpu is just to complement the
possible use cases of bpf memory allocator.
> For example: there is existing samples/bpf/map_perf_test benchmark.
> So far we haven't seen the numbers from it.
> Is it more important than your new bench? Yes and no. All numbers matter.
Will post the benchmark result for map_perf_test in v4. Had planned to
migrate map_perf_test to selftests/bpf/benchs, but couldn't find enough
time to do that.
>
>>> - ma->waiting_for_gp_tasks_trace will be freeing elements into slab
>>>
>>> What it means that sleepable progs using hashmap will be able to avoid uaf with bpf_rcu_read_lock().
>>> Without explicit bpf_rcu_read_lock() it's still safe and equivalent to existing behavior of bpf_mem_alloc.
>>> (while your proposed BPF_MA_FREE_AFTER_RCU_GP flavor is not safe to use in hashtab with sleepable progs)
>>>
>>> After that we can unconditionally remove rcu_head/call_rcu from bpf_cpumask and improve usability of bpf_obj_drop.
>>> Probably usage of bpf_mem_alloc in local storage can be simplified as well.
>>> Martin wdyt?
>>>
>>> I think this approach adds minimal complexity to bpf_mem_alloc while solving all existing pain points
>>> including needs of qp-trie.
>> Thanks for these great suggestions. Will try to do it in v4.
> Thanks.
> Also for benchmark, pls don't hack htab and benchmark as 'non-landable patches' (as in this series).
> Construct the patch series as:
> - prep patches
> - benchmark
> - unconditional convert of bpf_ma to REUSE_AFTER_rcu_GP_and_free_after_rcu_tasks_trace
>   with numbers from bench(s) before and after this patch.
Thanks again for the suggestion. Will do in v4.
Alexei Starovoitov June 1, 2023, 5:36 p.m. UTC | #11
On Wed, May 3, 2023 at 7:30 PM Hou Tao <houtao@huaweicloud.com> wrote:
>
> > Construct the patch series as:
> > - prep patches
> > - benchmark
> > - unconditional convert of bpf_ma to REUSE_AFTER_rcu_GP_and_free_after_rcu_tasks_trace
> >   with numbers from bench(s) before and after this patch.
> Thanks again for the suggestion. Will do in v4.


It's been a month. Any update?

Should we take over this work if you're busy?
Hou Tao June 2, 2023, 2:39 a.m. UTC | #12
Hi,

On 6/2/2023 1:36 AM, Alexei Starovoitov wrote:
> On Wed, May 3, 2023 at 7:30 PM Hou Tao <houtao@huaweicloud.com> wrote:
>>> Construct the patch series as:
>>> - prep patches
>>> - benchmark
>>> - unconditional convert of bpf_ma to REUSE_AFTER_rcu_GP_and_free_after_rcu_tasks_trace
>>>   with numbers from bench(s) before and after this patch.
>> Thanks again for the suggestion. Will do in v4.
>
> It's been a month. Any update?
>
> Should we take over this work if you're busy?
Sorry for the delay. I should post some progress information about the
patch set early. The patch set is simpler compared with v3, I had
implemented v4 about two weeks ago. The problem is v4 don't work as
expected: its memory usage is huge compared with v3. The following is
the output from htab-mem benchmark:

overwrite:
Summary: loop   11.07 ±    1.25k/s, memory usage  995.08 ±  680.87MiB,
peak memory usage 2183.38MiB
batch_add_batch_del:
Summary: loop   11.48 ±    1.24k/s, memory usage 1393.36 ±  780.41MiB,
peak memory usage 2836.68MiB
add_del_on_diff_cpu:
Summary: loop    6.07 ±    0.69k/s, memory usage   14.44 ±    2.34MiB,
peak memory usage   20.30MiB

The direct reason for the huge memory usage is slower RCU grace period.
The RCU grace period used for reuse is much longer compared with v3 and
it is about 100ms or more (e.g, 2.6s). I am still trying to find out the
root cause of the slow RCU grace period. The first guest is the running
time of bpf program attached to getpgid() is longer, so the context
switch in bench is slowed down. The hist-diagram of getpgid() latency in
v4 indeed manifests a lot of abnormal tail latencies compared with v3 as
shown below.

v3 getpid() latency during overwrite benchmark:
@hist_ms:
[0]               193451
|@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@|
[1]                  767
|                                                    |
[2, 4)                75
|                                                    |
[4, 8)                 1
|                                                    |

v4 getpid() latency during overwrite benchmark:
@hist_ms:
[0]                86270
|@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@|
[1]                31252
|@@@@@@@@@@@@@@@@@@                                  |
[2, 4)                 1
|                                                    |
[4, 8)                 0
|                                                    |
[8, 16)                0
|                                                    |
[16, 32)               0
|                                                    |
[32, 64)               0
|                                                    |
[64, 128)              0
|                                                    |
[128, 256)             3
|                                                    |
[256, 512)             2
|                                                    |
[512, 1K)              1
|                                                    |
[1K, 2K)               2
|                                                    |
[2K, 4K)               1
|                                                    |

I think the newly-added global spin-lock in memory allocator and
irq-work running under the context of free procedure may lead to
abnormal tail latency and I am trying to demonstrate that by using
fine-grain locks and kworker (just temporarily). But on the other side,
considering the number of abnormal tail latency is much smaller compared
with the total number of getpgid() syscall, so I think maybe there is
still other causes for the slow RCU GP.

Because the progress of v4 is delayed, so how about I post v4 as soon as
possible for discussion (maybe I did it wrong) and at the same time I
continue to investigate the slow RCU grace period problem (I will try to
get some help from RCU community) ?

Regards,
Tao
Alexei Starovoitov June 2, 2023, 4:25 p.m. UTC | #13
On Thu, Jun 1, 2023 at 7:40 PM Hou Tao <houtao@huaweicloud.com> wrote:
>
> Hi,
>
> On 6/2/2023 1:36 AM, Alexei Starovoitov wrote:
> > On Wed, May 3, 2023 at 7:30 PM Hou Tao <houtao@huaweicloud.com> wrote:
> >>> Construct the patch series as:
> >>> - prep patches
> >>> - benchmark
> >>> - unconditional convert of bpf_ma to REUSE_AFTER_rcu_GP_and_free_after_rcu_tasks_trace
> >>>   with numbers from bench(s) before and after this patch.
> >> Thanks again for the suggestion. Will do in v4.
> >
> > It's been a month. Any update?
> >
> > Should we take over this work if you're busy?
> Sorry for the delay. I should post some progress information about the
> patch set early. The patch set is simpler compared with v3, I had
> implemented v4 about two weeks ago. The problem is v4 don't work as
> expected: its memory usage is huge compared with v3. The following is
> the output from htab-mem benchmark:
>
> overwrite:
> Summary: loop   11.07 ±    1.25k/s, memory usage  995.08 ±  680.87MiB,
> peak memory usage 2183.38MiB
> batch_add_batch_del:
> Summary: loop   11.48 ±    1.24k/s, memory usage 1393.36 ±  780.41MiB,
> peak memory usage 2836.68MiB
> add_del_on_diff_cpu:
> Summary: loop    6.07 ±    0.69k/s, memory usage   14.44 ±    2.34MiB,
> peak memory usage   20.30MiB
>
> The direct reason for the huge memory usage is slower RCU grace period.
> The RCU grace period used for reuse is much longer compared with v3 and
> it is about 100ms or more (e.g, 2.6s). I am still trying to find out the
> root cause of the slow RCU grace period. The first guest is the running
> time of bpf program attached to getpgid() is longer, so the context
> switch in bench is slowed down. The hist-diagram of getpgid() latency in
> v4 indeed manifests a lot of abnormal tail latencies compared with v3 as
> shown below.
>
> v3 getpid() latency during overwrite benchmark:
> @hist_ms:
> [0]               193451
> |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@|
> [1]                  767
> |                                                    |
> [2, 4)                75
> |                                                    |
> [4, 8)                 1
> |                                                    |
>
> v4 getpid() latency during overwrite benchmark:
> @hist_ms:
> [0]                86270
> |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@|
> [1]                31252
> |@@@@@@@@@@@@@@@@@@                                  |
> [2, 4)                 1
> |                                                    |
> [4, 8)                 0
> |                                                    |
> [8, 16)                0
> |                                                    |
> [16, 32)               0
> |                                                    |
> [32, 64)               0
> |                                                    |
> [64, 128)              0
> |                                                    |
> [128, 256)             3
> |                                                    |
> [256, 512)             2
> |                                                    |
> [512, 1K)              1
> |                                                    |
> [1K, 2K)               2
> |                                                    |
> [2K, 4K)               1
> |                                                    |
>
> I think the newly-added global spin-lock in memory allocator and
> irq-work running under the context of free procedure may lead to
> abnormal tail latency and I am trying to demonstrate that by using
> fine-grain locks and kworker (just temporarily). But on the other side,
> considering the number of abnormal tail latency is much smaller compared
> with the total number of getpgid() syscall, so I think maybe there is
> still other causes for the slow RCU GP.
>
> Because the progress of v4 is delayed, so how about I post v4 as soon as
> possible for discussion (maybe I did it wrong) and at the same time I
> continue to investigate the slow RCU grace period problem (I will try to
> get some help from RCU community) ?

Yes. Please send v4. Let's investigate huge memory consumption together.
diff mbox series

Patch

diff --git a/include/linux/bpf_mem_alloc.h b/include/linux/bpf_mem_alloc.h
index 148347950e16..e7f68432713b 100644
--- a/include/linux/bpf_mem_alloc.h
+++ b/include/linux/bpf_mem_alloc.h
@@ -18,6 +18,7 @@  struct bpf_mem_alloc {
 /* flags for bpf_mem_alloc_init() */
 enum {
 	BPF_MA_PERCPU = 1U << 0,
+	BPF_MA_REUSE_AFTER_RCU_GP = 1U << 1,
 };
 
 /* 'size != 0' is for bpf_mem_alloc which manages fixed-size objects.
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index 072102476019..262100f89610 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -63,6 +63,10 @@  static u8 size_index[24] __ro_after_init = {
 	2	/* 192 */
 };
 
+static struct workqueue_struct *bpf_ma_wq;
+
+static void bpf_ma_prepare_reuse_work(struct work_struct *work);
+
 static int bpf_mem_cache_idx(size_t size)
 {
 	if (!size || size > 4096)
@@ -98,18 +102,36 @@  struct bpf_mem_cache {
 	int free_cnt;
 	int low_watermark, high_watermark, batch;
 	int percpu_size;
+	int cpu;
 	unsigned int flags;
 
+	raw_spinlock_t reuse_lock;
+	bool abort_reuse;
+	struct llist_head reuse_ready_head;
+	struct llist_node *reuse_ready_tail;
+	struct llist_head wait_for_free;
+	struct llist_head prepare_reuse_head;
+	struct llist_node *prepare_reuse_tail;
+	unsigned int prepare_reuse_cnt;
+	atomic_t reuse_cb_in_progress;
+	struct work_struct reuse_work;
+
 	struct rcu_head rcu;
 	struct llist_head free_by_rcu;
 	struct llist_head waiting_for_gp;
-	atomic_t call_rcu_in_progress;
+	atomic_t free_cb_in_progress;
 };
 
 struct bpf_mem_caches {
 	struct bpf_mem_cache cache[NUM_CACHES];
 };
 
+struct bpf_reuse_batch {
+	struct bpf_mem_cache *c;
+	struct llist_node *head, *tail;
+	struct rcu_head rcu;
+};
+
 static struct llist_node notrace *__llist_del_first(struct llist_head *head)
 {
 	struct llist_node *entry, *next;
@@ -154,6 +176,45 @@  static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
 #endif
 }
 
+static void *bpf_ma_get_reusable_obj(struct bpf_mem_cache *c)
+{
+	if (c->flags & BPF_MA_REUSE_AFTER_RCU_GP) {
+		unsigned long flags;
+		void *obj;
+
+		if (llist_empty(&c->reuse_ready_head) && llist_empty(&c->wait_for_free))
+			return NULL;
+
+		/* reuse_ready_head and wait_for_free may be manipulated by
+		 * kworker and RCU callbacks.
+		 */
+		raw_spin_lock_irqsave(&c->reuse_lock, flags);
+		obj = __llist_del_first(&c->reuse_ready_head);
+		if (obj) {
+			if (llist_empty(&c->reuse_ready_head))
+				c->reuse_ready_tail = NULL;
+		} else {
+			obj = __llist_del_first(&c->wait_for_free);
+		}
+		raw_spin_unlock_irqrestore(&c->reuse_lock, flags);
+		return obj;
+	}
+
+	/*
+	 * free_by_rcu is only manipulated by irq work refill_work().
+	 * IRQ works on the same CPU are called sequentially, so it is
+	 * safe to use __llist_del_first() here. If alloc_bulk() is
+	 * invoked by the initial prefill, there will be no running
+	 * refill_work(), so __llist_del_first() is fine as well.
+	 *
+	 * In most cases, objects on free_by_rcu are from the same CPU.
+	 * If some objects come from other CPUs, it doesn't incur any
+	 * harm because NUMA_NO_NODE means the preference for current
+	 * numa node and it is not a guarantee.
+	 */
+	return __llist_del_first(&c->free_by_rcu);
+}
+
 /* Mostly runs from irq_work except __init phase. */
 static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
 {
@@ -165,19 +226,7 @@  static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
 	memcg = get_memcg(c);
 	old_memcg = set_active_memcg(memcg);
 	for (i = 0; i < cnt; i++) {
-		/*
-		 * free_by_rcu is only manipulated by irq work refill_work().
-		 * IRQ works on the same CPU are called sequentially, so it is
-		 * safe to use __llist_del_first() here. If alloc_bulk() is
-		 * invoked by the initial prefill, there will be no running
-		 * refill_work(), so __llist_del_first() is fine as well.
-		 *
-		 * In most cases, objects on free_by_rcu are from the same CPU.
-		 * If some objects come from other CPUs, it doesn't incur any
-		 * harm because NUMA_NO_NODE means the preference for current
-		 * numa node and it is not a guarantee.
-		 */
-		obj = __llist_del_first(&c->free_by_rcu);
+		obj = bpf_ma_get_reusable_obj(c);
 		if (!obj) {
 			/* Allocate, but don't deplete atomic reserves that typical
 			 * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc
@@ -236,7 +285,7 @@  static void __free_rcu(struct rcu_head *head)
 	struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu);
 
 	free_all(llist_del_all(&c->waiting_for_gp), !!c->percpu_size);
-	atomic_set(&c->call_rcu_in_progress, 0);
+	atomic_set(&c->free_cb_in_progress, 0);
 }
 
 static void __free_rcu_tasks_trace(struct rcu_head *head)
@@ -264,7 +313,7 @@  static void do_call_rcu(struct bpf_mem_cache *c)
 {
 	struct llist_node *llnode, *t;
 
-	if (atomic_xchg(&c->call_rcu_in_progress, 1))
+	if (atomic_xchg(&c->free_cb_in_progress, 1))
 		return;
 
 	WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp));
@@ -409,6 +458,8 @@  int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, unsigned int flags)
 			c->objcg = objcg;
 			c->percpu_size = percpu_size;
 			c->flags = flags;
+			c->cpu = cpu;
+			INIT_WORK(&c->reuse_work, bpf_ma_prepare_reuse_work);
 			prefill_mem_cache(c, cpu);
 		}
 		ma->cache = pc;
@@ -433,6 +484,8 @@  int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, unsigned int flags)
 			c->unit_size = sizes[i];
 			c->objcg = objcg;
 			c->flags = flags;
+			c->cpu = cpu;
+			INIT_WORK(&c->reuse_work, bpf_ma_prepare_reuse_work);
 			prefill_mem_cache(c, cpu);
 		}
 	}
@@ -444,18 +497,40 @@  int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, unsigned int flags)
 static void drain_mem_cache(struct bpf_mem_cache *c)
 {
 	bool percpu = !!c->percpu_size;
+	struct llist_node *head[3];
+	unsigned long flags;
 
 	/* No progs are using this bpf_mem_cache, but htab_map_free() called
 	 * bpf_mem_cache_free() for all remaining elements and they can be in
 	 * free_by_rcu or in waiting_for_gp lists, so drain those lists now.
 	 *
-	 * Except for waiting_for_gp list, there are no concurrent operations
-	 * on these lists, so it is safe to use __llist_del_all().
+	 * Except for waiting_for_gp and free_llist_extra list, there are no
+	 * concurrent operations on these lists, so it is safe to use
+	 * __llist_del_all().
 	 */
 	free_all(__llist_del_all(&c->free_by_rcu), percpu);
 	free_all(llist_del_all(&c->waiting_for_gp), percpu);
 	free_all(__llist_del_all(&c->free_llist), percpu);
-	free_all(__llist_del_all(&c->free_llist_extra), percpu);
+	free_all(llist_del_all(&c->free_llist_extra), percpu);
+
+	if (!(c->flags & BPF_MA_REUSE_AFTER_RCU_GP))
+		return;
+
+	raw_spin_lock_irqsave(&c->reuse_lock, flags);
+	/* Indicate kworker and RCU callback to free elements directly
+	 * instead of adding new elements into these lists.
+	 */
+	c->abort_reuse = true;
+	head[0] = __llist_del_all(&c->prepare_reuse_head);
+	c->prepare_reuse_tail = NULL;
+	head[1] = __llist_del_all(&c->reuse_ready_head);
+	c->reuse_ready_tail = NULL;
+	head[2] = __llist_del_all(&c->wait_for_free);
+	raw_spin_unlock_irqrestore(&c->reuse_lock, flags);
+
+	free_all(head[0], percpu);
+	free_all(head[1], percpu);
+	free_all(head[2], percpu);
 }
 
 static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma)
@@ -466,10 +541,39 @@  static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma)
 	ma->caches = NULL;
 }
 
+static void bpf_ma_cancel_reuse_work(struct bpf_mem_alloc *ma)
+{
+	struct bpf_mem_caches *cc;
+	struct bpf_mem_cache *c;
+	int cpu, i;
+
+	if (ma->cache) {
+		for_each_possible_cpu(cpu) {
+			c = per_cpu_ptr(ma->cache, cpu);
+			cancel_work_sync(&c->reuse_work);
+		}
+	}
+	if (ma->caches) {
+		for_each_possible_cpu(cpu) {
+			cc = per_cpu_ptr(ma->caches, cpu);
+			for (i = 0; i < NUM_CACHES; i++) {
+				c = &cc->cache[i];
+				cancel_work_sync(&c->reuse_work);
+			}
+		}
+	}
+}
+
 static void free_mem_alloc(struct bpf_mem_alloc *ma)
 {
-	/* waiting_for_gp lists was drained, but __free_rcu might
-	 * still execute. Wait for it now before we freeing percpu caches.
+	bool reuse_after_rcu_gp = ma->flags & BPF_MA_REUSE_AFTER_RCU_GP;
+
+	/* Cancel the inflight kworkers */
+	if (reuse_after_rcu_gp)
+		bpf_ma_cancel_reuse_work(ma);
+
+	/* For normal bpf ma, waiting_for_gp lists was drained, but __free_rcu
+	 * might still execute. Wait for it now before we freeing percpu caches.
 	 *
 	 * rcu_barrier_tasks_trace() doesn't imply synchronize_rcu_tasks_trace(),
 	 * but rcu_barrier_tasks_trace() and rcu_barrier() below are only used
@@ -477,9 +581,13 @@  static void free_mem_alloc(struct bpf_mem_alloc *ma)
 	 * so if call_rcu(head, __free_rcu) is skipped due to
 	 * rcu_trace_implies_rcu_gp(), it will be OK to skip rcu_barrier() by
 	 * using rcu_trace_implies_rcu_gp() as well.
+	 *
+	 * For reuse-after-rcu-gp bpf ma, use rcu_barrier_tasks_trace() to
+	 * wait for the pending bpf_ma_free_reusable_cb() and use rcu_barrier()
+	 * to wait for the pending bpf_ma_reuse_cb().
 	 */
 	rcu_barrier_tasks_trace();
-	if (!rcu_trace_implies_rcu_gp())
+	if (reuse_after_rcu_gp || !rcu_trace_implies_rcu_gp())
 		rcu_barrier();
 	free_mem_alloc_no_barrier(ma);
 }
@@ -512,6 +620,7 @@  static void destroy_mem_alloc(struct bpf_mem_alloc *ma, int rcu_in_progress)
 	}
 
 	/* Defer barriers into worker to let the rest of map memory to be freed */
+	copy->flags = ma->flags;
 	copy->cache = ma->cache;
 	ma->cache = NULL;
 	copy->caches = ma->caches;
@@ -541,7 +650,9 @@  void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
 			 */
 			irq_work_sync(&c->refill_work);
 			drain_mem_cache(c);
-			rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
+			rcu_in_progress += atomic_read(&c->free_cb_in_progress);
+			/* Pending kworkers or RCU callbacks */
+			rcu_in_progress += atomic_read(&c->reuse_cb_in_progress);
 		}
 		/* objcg is the same across cpus */
 		if (c->objcg)
@@ -556,7 +667,8 @@  void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
 				c = &cc->cache[i];
 				irq_work_sync(&c->refill_work);
 				drain_mem_cache(c);
-				rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
+				rcu_in_progress += atomic_read(&c->free_cb_in_progress);
+				rcu_in_progress += atomic_read(&c->reuse_cb_in_progress);
 			}
 		}
 		if (c->objcg)
@@ -600,18 +712,183 @@  static void notrace *unit_alloc(struct bpf_mem_cache *c)
 	return llnode;
 }
 
+static void bpf_ma_add_to_reuse_ready_or_free(struct bpf_mem_cache *c, struct llist_node *head,
+					      struct llist_node *tail)
+{
+	unsigned long flags;
+	bool abort;
+
+	raw_spin_lock_irqsave(&c->reuse_lock, flags);
+	abort = c->abort_reuse;
+	if (!abort) {
+		if (llist_empty(&c->reuse_ready_head))
+			c->reuse_ready_tail = tail;
+		__llist_add_batch(head, tail, &c->reuse_ready_head);
+	}
+	raw_spin_unlock_irqrestore(&c->reuse_lock, flags);
+
+	/* Don't move these objects to reuse_ready list and free
+	 * these objects directly.
+	 */
+	if (abort)
+		free_all(head, !!c->percpu_size);
+}
+
+static void bpf_ma_reuse_cb(struct rcu_head *rcu)
+{
+	struct bpf_reuse_batch *batch = container_of(rcu, struct bpf_reuse_batch, rcu);
+	struct bpf_mem_cache *c = batch->c;
+
+	bpf_ma_add_to_reuse_ready_or_free(c, batch->head, batch->tail);
+	atomic_dec(&c->reuse_cb_in_progress);
+	kfree(batch);
+}
+
+static bool bpf_ma_try_free_reuse_objs(struct bpf_mem_cache *c)
+{
+	struct llist_node *head, *tail;
+	bool do_free;
+
+	if (llist_empty(&c->reuse_ready_head))
+		return false;
+
+	do_free = !atomic_xchg(&c->free_cb_in_progress, 1);
+	if (!do_free)
+		return false;
+
+	head = __llist_del_all(&c->reuse_ready_head);
+	tail = c->reuse_ready_tail;
+	c->reuse_ready_tail = NULL;
+
+	__llist_add_batch(head, tail, &c->wait_for_free);
+
+	return true;
+}
+
+static void bpf_ma_free_reusable_cb(struct rcu_head *rcu)
+{
+	struct bpf_mem_cache *c = container_of(rcu, struct bpf_mem_cache, rcu);
+	struct llist_node *head;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&c->reuse_lock, flags);
+	head = __llist_del_all(&c->wait_for_free);
+	raw_spin_unlock_irqrestore(&c->reuse_lock, flags);
+
+	free_all(head, !!c->percpu_size);
+	atomic_set(&c->free_cb_in_progress, 0);
+}
+
+static void bpf_ma_prepare_reuse_work(struct work_struct *work)
+{
+	struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, reuse_work);
+	struct llist_node *head, *tail, *llnode, *tmp;
+	struct bpf_reuse_batch *batch;
+	unsigned long flags;
+	bool do_free;
+
+	local_irq_save(flags);
+	/* When CPU is offline, the running CPU may be different with
+	 * the CPU which submitted the work. When these two CPUs are the same,
+	 * kworker may be interrupted by NMI, so increase active to protect
+	 * again such concurrency.
+	 */
+	if (c->cpu == smp_processor_id())
+		WARN_ON_ONCE(local_inc_return(&c->active) != 1);
+	raw_spin_lock(&c->reuse_lock);
+	head = __llist_del_all(&c->prepare_reuse_head);
+	tail = c->prepare_reuse_tail;
+	c->prepare_reuse_tail = NULL;
+	c->prepare_reuse_cnt = 0;
+	if (c->cpu == smp_processor_id())
+		local_dec(&c->active);
+
+	/* Try to free elements in reusable list. Before these elements are
+	 * freed in RCU cb, these element will still be available for reuse.
+	 */
+	do_free = bpf_ma_try_free_reuse_objs(c);
+	raw_spin_unlock(&c->reuse_lock);
+	local_irq_restore(flags);
+
+	if (do_free)
+		call_rcu_tasks_trace(&c->rcu, bpf_ma_free_reusable_cb);
+
+	llist_for_each_safe(llnode, tmp, llist_del_all(&c->free_llist_extra)) {
+		if (!head)
+			tail = llnode;
+		llnode->next = head;
+		head = llnode->next;
+	}
+	/* Draining is in progress ? */
+	if (!head) {
+		/* kworker completes and no RCU callback */
+		atomic_dec(&c->reuse_cb_in_progress);
+		return;
+	}
+
+	batch = kmalloc(sizeof(*batch), GFP_KERNEL);
+	if (!batch) {
+		synchronize_rcu_expedited();
+		bpf_ma_add_to_reuse_ready_or_free(c, head, tail);
+		/* kworker completes and no RCU callback */
+		atomic_dec(&c->reuse_cb_in_progress);
+		return;
+	}
+
+	batch->c = c;
+	batch->head = head;
+	batch->tail = tail;
+	call_rcu(&batch->rcu, bpf_ma_reuse_cb);
+}
+
+static void notrace wait_gp_reuse_free(struct bpf_mem_cache *c, struct llist_node *llnode)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	/* In case a NMI-context bpf program is also freeing object. */
+	if (local_inc_return(&c->active) == 1) {
+		bool try_queue_work = false;
+
+		/* kworker may remove elements from prepare_reuse_head */
+		raw_spin_lock(&c->reuse_lock);
+		if (llist_empty(&c->prepare_reuse_head))
+			c->prepare_reuse_tail = llnode;
+		__llist_add(llnode, &c->prepare_reuse_head);
+		if (++c->prepare_reuse_cnt > c->high_watermark) {
+			/* Zero out prepare_reuse_cnt early to prevent
+			 * unnecessary queue_work().
+			 */
+			c->prepare_reuse_cnt = 0;
+			try_queue_work = true;
+		}
+		raw_spin_unlock(&c->reuse_lock);
+
+		if (try_queue_work && !work_pending(&c->reuse_work)) {
+			/* Use reuse_cb_in_progress to indicate there is
+			 * inflight reuse kworker or reuse RCU callback.
+			 */
+			atomic_inc(&c->reuse_cb_in_progress);
+			/* Already queued */
+			if (!queue_work(bpf_ma_wq, &c->reuse_work))
+				atomic_dec(&c->reuse_cb_in_progress);
+		}
+	} else {
+		llist_add(llnode, &c->free_llist_extra);
+	}
+	local_dec(&c->active);
+	local_irq_restore(flags);
+}
+
 /* Though 'ptr' object could have been allocated on a different cpu
  * add it to the free_llist of the current cpu.
  * Let kfree() logic deal with it when it's later called from irq_work.
  */
-static void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
+static void notrace immediate_reuse_free(struct bpf_mem_cache *c, struct llist_node *llnode)
 {
-	struct llist_node *llnode = ptr - LLIST_NODE_SZ;
 	unsigned long flags;
 	int cnt = 0;
 
-	BUILD_BUG_ON(LLIST_NODE_SZ > 8);
-
 	local_irq_save(flags);
 	if (local_inc_return(&c->active) == 1) {
 		__llist_add(llnode, &c->free_llist);
@@ -633,6 +910,18 @@  static void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
 		irq_work_raise(c);
 }
 
+static inline void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
+{
+	struct llist_node *llnode = ptr - LLIST_NODE_SZ;
+
+	BUILD_BUG_ON(LLIST_NODE_SZ > 8);
+
+	if (c->flags & BPF_MA_REUSE_AFTER_RCU_GP)
+		wait_gp_reuse_free(c, llnode);
+	else
+		immediate_reuse_free(c, llnode);
+}
+
 /* Called from BPF program or from sys_bpf syscall.
  * In both cases migration is disabled.
  */
@@ -724,3 +1013,11 @@  void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags)
 
 	return !ret ? NULL : ret + LLIST_NODE_SZ;
 }
+
+static int __init bpf_ma_init(void)
+{
+	bpf_ma_wq = alloc_workqueue("bpf_ma", WQ_MEM_RECLAIM, 0);
+	BUG_ON(!bpf_ma_wq);
+	return 0;
+}
+late_initcall(bpf_ma_init);