diff mbox series

[v2] memcg: fix soft lockup in the OOM process

Message ID 20241220103123.3677988-1-chenridong@huaweicloud.com (mailing list archive)
State New
Headers show
Series [v2] memcg: fix soft lockup in the OOM process | expand

Commit Message

Chen Ridong Dec. 20, 2024, 10:31 a.m. UTC
From: Chen Ridong <chenridong@huawei.com>

A soft lockup issue was found in the product with about 56,000 tasks were
in the OOM cgroup, it was traversing them when the soft lockup was
triggered.

watchdog: BUG: soft lockup - CPU#2 stuck for 23s! [VM Thread:1503066]
CPU: 2 PID: 1503066 Comm: VM Thread Kdump: loaded Tainted: G
Hardware name: Huawei Cloud OpenStack Nova, BIOS
RIP: 0010:console_unlock+0x343/0x540
RSP: 0000:ffffb751447db9a0 EFLAGS: 00000247 ORIG_RAX: ffffffffffffff13
RAX: 0000000000000001 RBX: 0000000000000000 RCX: 00000000ffffffff
RDX: 0000000000000000 RSI: 0000000000000004 RDI: 0000000000000247
RBP: ffffffffafc71f90 R08: 0000000000000000 R09: 0000000000000040
R10: 0000000000000080 R11: 0000000000000000 R12: ffffffffafc74bd0
R13: ffffffffaf60a220 R14: 0000000000000247 R15: 0000000000000000
CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007f2fe6ad91f0 CR3: 00000004b2076003 CR4: 0000000000360ee0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
Call Trace:
 vprintk_emit+0x193/0x280
 printk+0x52/0x6e
 dump_task+0x114/0x130
 mem_cgroup_scan_tasks+0x76/0x100
 dump_header+0x1fe/0x210
 oom_kill_process+0xd1/0x100
 out_of_memory+0x125/0x570
 mem_cgroup_out_of_memory+0xb5/0xd0
 try_charge+0x720/0x770
 mem_cgroup_try_charge+0x86/0x180
 mem_cgroup_try_charge_delay+0x1c/0x40
 do_anonymous_page+0xb5/0x390
 handle_mm_fault+0xc4/0x1f0

This is because thousands of processes are in the OOM cgroup, it takes a
long time to traverse all of them. As a result, this lead to soft lockup
in the OOM process.

To fix this issue, call 'cond_resched' in the 'mem_cgroup_scan_tasks'
function per 1000 iterations. For global OOM, call
'touch_softlockup_watchdog' per 1000 iterations to avoid this issue.

Fixes: 9cbb78bb3143 ("mm, memcg: introduce own oom handler to iterate only over its own threads")
Signed-off-by: Chen Ridong <chenridong@huawei.com>
---
 include/linux/oom.h | 7 +++++++
 mm/memcontrol.c     | 6 +++++-
 mm/oom_kill.c       | 7 ++++++-
 3 files changed, 18 insertions(+), 2 deletions(-)

Comments

Andrew Morton Dec. 20, 2024, 10:47 p.m. UTC | #1
On Fri, 20 Dec 2024 10:31:23 +0000 Chen Ridong <chenridong@huaweicloud.com> wrote:

> From: Chen Ridong <chenridong@huawei.com>
> 
> A soft lockup issue was found in the product with about 56,000 tasks were
> in the OOM cgroup, it was traversing them when the soft lockup was
> triggered.
> 
> ...
>
> This is because thousands of processes are in the OOM cgroup, it takes a
> long time to traverse all of them. As a result, this lead to soft lockup
> in the OOM process.
> 
> To fix this issue, call 'cond_resched' in the 'mem_cgroup_scan_tasks'
> function per 1000 iterations. For global OOM, call
> 'touch_softlockup_watchdog' per 1000 iterations to avoid this issue.
> 
> ...
>
> --- a/include/linux/oom.h
> +++ b/include/linux/oom.h
> @@ -14,6 +14,13 @@ struct notifier_block;
>  struct mem_cgroup;
>  struct task_struct;
>  
> +/* When it traverses for long time,  to prevent softlockup, call
> + * cond_resched/touch_softlockup_watchdog very 1000 iterations.
> + * The 1000 value  is not exactly right, it's used to mitigate the overhead
> + * of cond_resched/touch_softlockup_watchdog.
> + */
> +#define SOFTLOCKUP_PREVENTION_LIMIT 1000

If this is to have potentially kernel-wide scope, its name should
identify which subsystem it belongs to.  Maybe OOM_KILL_RESCHED or
something.

But I'm not sure that this really needs to exist.  Are the two usage
sites particularly related?

>  enum oom_constraint {
>  	CONSTRAINT_NONE,
>  	CONSTRAINT_CPUSET,
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 5c373d275e7a..f4c12d6e7b37 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -1161,6 +1161,7 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
>  {
>  	struct mem_cgroup *iter;
>  	int ret = 0;
> +	int i = 0;
>  
>  	BUG_ON(mem_cgroup_is_root(memcg));
>  
> @@ -1169,8 +1170,11 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
>  		struct task_struct *task;
>  
>  		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
> -		while (!ret && (task = css_task_iter_next(&it)))
> +		while (!ret && (task = css_task_iter_next(&it))) {
>  			ret = fn(task, arg);
> +			if (++i % SOFTLOCKUP_PREVENTION_LIMIT)

And a modulus operation is somewhat expensive.

Perhaps a simple

		/* Avoid potential softlockup warning */
		if ((++i & 1023) == 0)

at both sites will suffice.  Opinions might vary...
Michal Hocko Dec. 21, 2024, 7:28 a.m. UTC | #2
On Fri 20-12-24 14:47:34, Andrew Morton wrote:
> On Fri, 20 Dec 2024 10:31:23 +0000 Chen Ridong <chenridong@huaweicloud.com> wrote:
> 
> > From: Chen Ridong <chenridong@huawei.com>
> > 
> > A soft lockup issue was found in the product with about 56,000 tasks were
> > in the OOM cgroup, it was traversing them when the soft lockup was
> > triggered.
> > 
> > ...
> >
> > This is because thousands of processes are in the OOM cgroup, it takes a
> > long time to traverse all of them. As a result, this lead to soft lockup
> > in the OOM process.
> > 
> > To fix this issue, call 'cond_resched' in the 'mem_cgroup_scan_tasks'
> > function per 1000 iterations. For global OOM, call
> > 'touch_softlockup_watchdog' per 1000 iterations to avoid this issue.
> > 
> > ...
> >
> > --- a/include/linux/oom.h
> > +++ b/include/linux/oom.h
> > @@ -14,6 +14,13 @@ struct notifier_block;
> >  struct mem_cgroup;
> >  struct task_struct;
> >  
> > +/* When it traverses for long time,  to prevent softlockup, call
> > + * cond_resched/touch_softlockup_watchdog very 1000 iterations.
> > + * The 1000 value  is not exactly right, it's used to mitigate the overhead
> > + * of cond_resched/touch_softlockup_watchdog.
> > + */
> > +#define SOFTLOCKUP_PREVENTION_LIMIT 1000
> 
> If this is to have potentially kernel-wide scope, its name should
> identify which subsystem it belongs to.  Maybe OOM_KILL_RESCHED or
> something.
> 
> But I'm not sure that this really needs to exist.  Are the two usage
> sites particularly related?

Yes, I do not think this needs to pretend to be a more generic mechanism
to prevent soft lockups. The number of iterations highly depends on the
operation itself.

> 
> >  enum oom_constraint {
> >  	CONSTRAINT_NONE,
> >  	CONSTRAINT_CPUSET,
> > diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> > index 5c373d275e7a..f4c12d6e7b37 100644
> > --- a/mm/memcontrol.c
> > +++ b/mm/memcontrol.c
> > @@ -1161,6 +1161,7 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
> >  {
> >  	struct mem_cgroup *iter;
> >  	int ret = 0;
> > +	int i = 0;
> >  
> >  	BUG_ON(mem_cgroup_is_root(memcg));
> >  
> > @@ -1169,8 +1170,11 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
> >  		struct task_struct *task;
> >  
> >  		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
> > -		while (!ret && (task = css_task_iter_next(&it)))
> > +		while (!ret && (task = css_task_iter_next(&it))) {
> >  			ret = fn(task, arg);
> > +			if (++i % SOFTLOCKUP_PREVENTION_LIMIT)
> 
> And a modulus operation is somewhat expensive.

This is a cold path used during OOM. While we can make it more optimal I
doubt it matters in practice so we should aim at readbility. I do not
mind either way, I just wanted to note that this is not performance
sensitive.

> 
> Perhaps a simple
> 
> 		/* Avoid potential softlockup warning */
> 		if ((++i & 1023) == 0)
> 
> at both sites will suffice.  Opinions might vary...
>
diff mbox series

Patch

diff --git a/include/linux/oom.h b/include/linux/oom.h
index 1e0fc6931ce9..df2d336f653d 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -14,6 +14,13 @@  struct notifier_block;
 struct mem_cgroup;
 struct task_struct;
 
+/* When it traverses for long time,  to prevent softlockup, call
+ * cond_resched/touch_softlockup_watchdog very 1000 iterations.
+ * The 1000 value  is not exactly right, it's used to mitigate the overhead
+ * of cond_resched/touch_softlockup_watchdog.
+ */
+#define SOFTLOCKUP_PREVENTION_LIMIT 1000
+
 enum oom_constraint {
 	CONSTRAINT_NONE,
 	CONSTRAINT_CPUSET,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 5c373d275e7a..f4c12d6e7b37 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1161,6 +1161,7 @@  void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
 {
 	struct mem_cgroup *iter;
 	int ret = 0;
+	int i = 0;
 
 	BUG_ON(mem_cgroup_is_root(memcg));
 
@@ -1169,8 +1170,11 @@  void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
 		struct task_struct *task;
 
 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
-		while (!ret && (task = css_task_iter_next(&it)))
+		while (!ret && (task = css_task_iter_next(&it))) {
 			ret = fn(task, arg);
+			if (++i % SOFTLOCKUP_PREVENTION_LIMIT)
+				cond_resched();
+		}
 		css_task_iter_end(&it);
 		if (ret) {
 			mem_cgroup_iter_break(memcg, iter);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 1c485beb0b93..05e613750e9b 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -44,6 +44,7 @@ 
 #include <linux/init.h>
 #include <linux/mmu_notifier.h>
 #include <linux/cred.h>
+#include <linux/nmi.h>
 
 #include <asm/tlb.h>
 #include "internal.h"
@@ -430,10 +431,14 @@  static void dump_tasks(struct oom_control *oc)
 		mem_cgroup_scan_tasks(oc->memcg, dump_task, oc);
 	else {
 		struct task_struct *p;
+		int i = 0;
 
 		rcu_read_lock();
-		for_each_process(p)
+		for_each_process(p) {
+			if (++i % SOFTLOCKUP_PREVENTION_LIMIT)
+				touch_softlockup_watchdog();
 			dump_task(p, oc);
+		}
 		rcu_read_unlock();
 	}
 }