diff mbox series

[3/4] mm: Centralize & improve oom reporting in show_mem.c

Message ID 20220419203202.2670193-4-kent.overstreet@gmail.com (mailing list archive)
State New, archived
Headers show
Series Printbufs & shrinker OOM reporting | expand

Commit Message

Kent Overstreet April 19, 2022, 8:32 p.m. UTC
This patch:
 - Moves lib/show_mem.c to mm/show_mem.c
 - Changes show_mem() to always report on slab usage
 - Instead of reporting on all slabs, we only report on top 10 slabs,
   and in sorted order
 - Also reports on shrinkers, with the new shrinkers_to_text().

More OOM reporting can be moved to show_mem.c and improved, this patch
is only a small start.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
---
 lib/Makefile           |  2 +-
 mm/Makefile            |  2 +-
 mm/oom_kill.c          | 23 ------------------
 {lib => mm}/show_mem.c | 14 +++++++++++
 mm/slab.h              |  6 +++--
 mm/slab_common.c       | 53 +++++++++++++++++++++++++++++++++++-------
 6 files changed, 65 insertions(+), 35 deletions(-)
 rename {lib => mm}/show_mem.c (78%)

Comments

Michal Hocko April 20, 2022, 6:58 a.m. UTC | #1
On Tue 19-04-22 16:32:01, Kent Overstreet wrote:
> This patch:
>  - Moves lib/show_mem.c to mm/show_mem.c

Sure, why not. Should be a separate patch.

>  - Changes show_mem() to always report on slab usage
>  - Instead of reporting on all slabs, we only report on top 10 slabs,
>    and in sorted order
>  - Also reports on shrinkers, with the new shrinkers_to_text().

Why do we need/want this? It would be also great to provide an example
of why the new output is better (in which cases) than the existing one.
Kent Overstreet April 20, 2022, 4:58 p.m. UTC | #2
On Wed, Apr 20, 2022 at 08:58:36AM +0200, Michal Hocko wrote:
> On Tue 19-04-22 16:32:01, Kent Overstreet wrote:
> > This patch:
> >  - Moves lib/show_mem.c to mm/show_mem.c
> 
> Sure, why not. Should be a separate patch.
> 
> >  - Changes show_mem() to always report on slab usage
> >  - Instead of reporting on all slabs, we only report on top 10 slabs,
> >    and in sorted order
> >  - Also reports on shrinkers, with the new shrinkers_to_text().
> 
> Why do we need/want this? It would be also great to provide an example
> of why the new output is better (in which cases) than the existing one.

Did you read the cover letter to the patch series?

But sure, I can give you an example of the new output:

00177 018: page allocation failure: order:5, mode:0x40dc0(GFP_KERNEL|__GFP_COMP|__GFP_ZERO), nodemask=(null)
00177 CPU: 0 PID: 32171 Comm: 018 Not tainted 5.17.0-01346-g09b56740d418-dirty #154
00177 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.15.0-1 04/01/2014
00177 Call Trace:
00177  <TASK>
00177  dump_stack_lvl+0x38/0x49
00177  dump_stack+0x10/0x12
00177  warn_alloc+0x128/0x150
00177  ? __alloc_pages_direct_compact+0x171/0x1f0
00177  __alloc_pages_slowpath.constprop.0+0xac6/0xc30
00177  ? make_kgid+0x17/0x20
00177  ? p9pdu_readf+0x28c/0xb00
00177  __alloc_pages+0x215/0x230
00177  kmalloc_order+0x30/0x80
00177  kmalloc_order_trace+0x1d/0x80
00177  __kmalloc+0x1a2/0x1d0
00177  v9fs_alloc_rdir_buf.isra.0+0x28/0x40
00177  v9fs_dir_readdir_dotl+0x55/0x160
00177  ? __alloc_pages+0x151/0x230
00177  ? lru_cache_add+0x1c/0x20
00177  ? lru_cache_add_inactive_or_unevictable+0x27/0x80
00177  ? __handle_mm_fault+0x666/0xae0
00177  iterate_dir+0x151/0x1b0
00177  __x64_sys_getdents64+0x80/0x120
00177  ? compat_fillonedir+0x160/0x160
00177  do_syscall_64+0x35/0x80
00177  entry_SYSCALL_64_after_hwframe+0x44/0xae
00177 RIP: 0033:0x7f0b15e2f9c7
00177 Code: 00 00 0f 05 eb b3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 00 48 81 fa ff ff ff 7f b8 ff ff ff 7f 48 0f 47 d0 b8 d9 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 01 c3 48 8b 15 79 b4 10 00 f7 d8 64 89 02 48
00177 RSP: 002b:00007ffcf9445b88 EFLAGS: 00000293 ORIG_RAX: 00000000000000d9
00177 RAX: ffffffffffffffda RBX: 000056137dd23ba0 RCX: 00007f0b15e2f9c7
00177 RDX: 000000000001f000 RSI: 000056137dd23bd0 RDI: 0000000000000003
00177 RBP: 000056137dd23bd0 R08: 0000000000000030 R09: 00007f0b15f3bc00
00177 R10: fffffffffffff776 R11: 0000000000000293 R12: ffffffffffffff88
00177 R13: 000056137dd23ba4 R14: 0000000000000000 R15: 000056137dd23ba0
00177  </TASK>
00177 Mem-Info:
00177 active_anon:13706 inactive_anon:32266 isolated_anon:16
00177  active_file:1653 inactive_file:1822 isolated_file:0
00177  unevictable:0 dirty:0 writeback:0
00177  slab_reclaimable:6242 slab_unreclaimable:11168
00177  mapped:3824 shmem:3 pagetables:1266 bounce:0
00177  kernel_misc_reclaimable:0
00177  free:4362 free_pcp:35 free_cma:0
00177 Node 0 active_anon:54824kB inactive_anon:129064kB active_file:6612kB inactive_file:7288kB unevictable:0kB isolated(anon):64kB isolated(file):0kB mapped:15296kB dirty:0kB writeback:0kB shmem:12kB writeback_tmp:0kB kernel_stack:3392kB pagetables:5064kB all_unreclaimable? no
00177 DMA free:2232kB boost:0kB min:88kB low:108kB high:128kB reserved_highatomic:0KB active_anon:2924kB inactive_anon:6596kB active_file:428kB inactive_file:384kB unevictable:0kB writepending:0kB present:15992kB managed:15360kB mlocked:0kB bounce:0kB free_pcp:0kB local_pcp:0kB free_cma:0kB
00177 lowmem_reserve[]: 0 426 426 426
00177 DMA32 free:15092kB boost:5836kB min:8432kB low:9080kB high:9728kB reserved_highatomic:0KB active_anon:52196kB inactive_anon:122392kB active_file:6176kB inactive_file:7068kB unevictable:0kB writepending:0kB present:507760kB managed:441816kB mlocked:0kB bounce:0kB free_pcp:72kB local_pcp:0kB free_cma:0kB
00177 lowmem_reserve[]: 0 0 0 0
00177 DMA: 284*4kB (UM) 53*8kB (UM) 21*16kB (U) 11*32kB (U) 0*64kB 0*128kB 0*256kB 0*512kB 0*1024kB 0*2048kB 0*4096kB = 2248kB
00177 DMA32: 2765*4kB (UME) 375*8kB (UME) 57*16kB (UM) 5*32kB (U) 0*64kB 0*128kB 0*256kB 0*512kB 0*1024kB 0*2048kB 0*4096kB = 15132kB
00177 4656 total pagecache pages
00177 1031 pages in swap cache
00177 Swap cache stats: add 6572399, delete 6572173, find 488603/3286476
00177 Free swap  = 509112kB
00177 Total swap = 2097148kB
00177 130938 pages RAM
00177 0 pages HighMem/MovableOnly
00177 16644 pages reserved
00177 Unreclaimable slab info:
00177 9p-fcall-cache    total: 8.25 MiB active: 8.25 MiB
00177 kernfs_node_cache total: 2.15 MiB active: 2.15 MiB
00177 kmalloc-64        total: 2.08 MiB active: 2.07 MiB
00177 task_struct       total: 1.95 MiB active: 1.95 MiB
00177 kmalloc-4k        total: 1.50 MiB active: 1.50 MiB
00177 signal_cache      total: 1.34 MiB active: 1.34 MiB
00177 kmalloc-2k        total: 1.16 MiB active: 1.16 MiB
00177 bch_inode_info    total: 1.02 MiB active: 922 KiB
00177 perf_event        total: 1.02 MiB active: 1.02 MiB
00177 biovec-max        total: 992 KiB active: 960 KiB
00177 Shrinkers:
00177 super_cache_scan: objects: 127
00177 super_cache_scan: objects: 106
00177 jbd2_journal_shrink_scan: objects: 32
00177 ext4_es_scan: objects: 32
00177 bch2_btree_cache_scan: objects: 8
00177   nr nodes:          24
00177   nr dirty:          0
00177   cannibalize lock:  0000000000000000
00177 
00177 super_cache_scan: objects: 8
00177 super_cache_scan: objects: 1
Michal Hocko April 21, 2022, 9:18 a.m. UTC | #3
On Wed 20-04-22 12:58:05, Kent Overstreet wrote:
> On Wed, Apr 20, 2022 at 08:58:36AM +0200, Michal Hocko wrote:
> > On Tue 19-04-22 16:32:01, Kent Overstreet wrote:
> > > This patch:
> > >  - Moves lib/show_mem.c to mm/show_mem.c
> > 
> > Sure, why not. Should be a separate patch.
> > 
> > >  - Changes show_mem() to always report on slab usage
> > >  - Instead of reporting on all slabs, we only report on top 10 slabs,
> > >    and in sorted order
> > >  - Also reports on shrinkers, with the new shrinkers_to_text().
> > 
> > Why do we need/want this? It would be also great to provide an example
> > of why the new output is better (in which cases) than the existing one.
> 
> Did you read the cover letter to the patch series?

Nope, only this one made it into my inbox based on my filters. I usually
try to fish out other parts of the thread but I didn't this time.
Besides it is always better to have a full patch description explain not
only what has been changed but why as well.

> But sure, I can give you an example of the new output:

Calling out the changes would be really helpful, but I guess the crux 
is here.

> 00177 16644 pages reserved
> 00177 Unreclaimable slab info:
> 00177 9p-fcall-cache    total: 8.25 MiB active: 8.25 MiB
> 00177 kernfs_node_cache total: 2.15 MiB active: 2.15 MiB
> 00177 kmalloc-64        total: 2.08 MiB active: 2.07 MiB
> 00177 task_struct       total: 1.95 MiB active: 1.95 MiB
> 00177 kmalloc-4k        total: 1.50 MiB active: 1.50 MiB
> 00177 signal_cache      total: 1.34 MiB active: 1.34 MiB
> 00177 kmalloc-2k        total: 1.16 MiB active: 1.16 MiB
> 00177 bch_inode_info    total: 1.02 MiB active: 922 KiB
> 00177 perf_event        total: 1.02 MiB active: 1.02 MiB
> 00177 biovec-max        total: 992 KiB active: 960 KiB
> 00177 Shrinkers:
> 00177 super_cache_scan: objects: 127
> 00177 super_cache_scan: objects: 106
> 00177 jbd2_journal_shrink_scan: objects: 32
> 00177 ext4_es_scan: objects: 32
> 00177 bch2_btree_cache_scan: objects: 8
> 00177   nr nodes:          24
> 00177   nr dirty:          0
> 00177   cannibalize lock:  0000000000000000
> 00177 
> 00177 super_cache_scan: objects: 8
> 00177 super_cache_scan: objects: 1

How does this help to analyze this allocation failure?
Kent Overstreet April 21, 2022, 6:42 p.m. UTC | #4
On Thu, Apr 21, 2022 at 11:18:20AM +0200, Michal Hocko wrote:
> On Wed 20-04-22 12:58:05, Kent Overstreet wrote:
> > On Wed, Apr 20, 2022 at 08:58:36AM +0200, Michal Hocko wrote:
> > > On Tue 19-04-22 16:32:01, Kent Overstreet wrote:
> > > > This patch:
> > > >  - Moves lib/show_mem.c to mm/show_mem.c
> > > 
> > > Sure, why not. Should be a separate patch.
> > > 
> > > >  - Changes show_mem() to always report on slab usage
> > > >  - Instead of reporting on all slabs, we only report on top 10 slabs,
> > > >    and in sorted order
> > > >  - Also reports on shrinkers, with the new shrinkers_to_text().
> > > 
> > > Why do we need/want this? It would be also great to provide an example
> > > of why the new output is better (in which cases) than the existing one.
> > 
> > Did you read the cover letter to the patch series?
> 
> Nope, only this one made it into my inbox based on my filters. I usually
> try to fish out other parts of the thread but I didn't this time.
> Besides it is always better to have a full patch description explain not
> only what has been changed but why as well.
> 
> > But sure, I can give you an example of the new output:
> 
> Calling out the changes would be really helpful, but I guess the crux 
> is here.
> 
> > 00177 16644 pages reserved
> > 00177 Unreclaimable slab info:
> > 00177 9p-fcall-cache    total: 8.25 MiB active: 8.25 MiB
> > 00177 kernfs_node_cache total: 2.15 MiB active: 2.15 MiB
> > 00177 kmalloc-64        total: 2.08 MiB active: 2.07 MiB
> > 00177 task_struct       total: 1.95 MiB active: 1.95 MiB
> > 00177 kmalloc-4k        total: 1.50 MiB active: 1.50 MiB
> > 00177 signal_cache      total: 1.34 MiB active: 1.34 MiB
> > 00177 kmalloc-2k        total: 1.16 MiB active: 1.16 MiB
> > 00177 bch_inode_info    total: 1.02 MiB active: 922 KiB
> > 00177 perf_event        total: 1.02 MiB active: 1.02 MiB
> > 00177 biovec-max        total: 992 KiB active: 960 KiB
> > 00177 Shrinkers:
> > 00177 super_cache_scan: objects: 127
> > 00177 super_cache_scan: objects: 106
> > 00177 jbd2_journal_shrink_scan: objects: 32
> > 00177 ext4_es_scan: objects: 32
> > 00177 bch2_btree_cache_scan: objects: 8
> > 00177   nr nodes:          24
> > 00177   nr dirty:          0
> > 00177   cannibalize lock:  0000000000000000
> > 00177 
> > 00177 super_cache_scan: objects: 8
> > 00177 super_cache_scan: objects: 1
> 
> How does this help to analyze this allocation failure?

You asked for an example of the output, which was an entirely reasonable
request. Shrinkers weren't responsible for this OOM, so it doesn't help here -
are you asking me to explain why shrinkers are relevant to OOMs and memory
reclaim...?

Since shrinkers own and, critically, _are responsible for freeing memory_, a
shrinker not giving up memory when asked (or perhaps not being asked to give up
memory) can cause an OOM. A starting point - not an end - if we want to improve
OOM debugging is at least being able to see how much memory each shrinker owns.
Since we don't even have that, number of objects will have to do.

The reason for adding the .to_text() callback is that shrinkers have internal
state that affects whether they are able to give up objects when asked - the
primary example being object dirtyness.

If your system is using a ton of memory caching inodes, and something's wedged
writeback, and they're nearly all dirty - you're going to have a bad day.

The bcachefs btree node node shrinker included as an example of what we can do
with this: internally we may have to allocate new btree nodes by reclaiming from
our own cache, and we have a lock to prevent multiple threads from doing this at
the same time, and this lock also blocks the shrinker from freeing more memory
until we're done.

In filesystem land, debugging memory reclaim issues is a rather painful topic
that often comes up, this is a starting point...
Michal Hocko April 22, 2022, 8:03 a.m. UTC | #5
On Thu 21-04-22 14:42:13, Kent Overstreet wrote:
> On Thu, Apr 21, 2022 at 11:18:20AM +0200, Michal Hocko wrote:
[...]
> > > 00177 16644 pages reserved
> > > 00177 Unreclaimable slab info:
> > > 00177 9p-fcall-cache    total: 8.25 MiB active: 8.25 MiB
> > > 00177 kernfs_node_cache total: 2.15 MiB active: 2.15 MiB
> > > 00177 kmalloc-64        total: 2.08 MiB active: 2.07 MiB
> > > 00177 task_struct       total: 1.95 MiB active: 1.95 MiB
> > > 00177 kmalloc-4k        total: 1.50 MiB active: 1.50 MiB
> > > 00177 signal_cache      total: 1.34 MiB active: 1.34 MiB
> > > 00177 kmalloc-2k        total: 1.16 MiB active: 1.16 MiB
> > > 00177 bch_inode_info    total: 1.02 MiB active: 922 KiB
> > > 00177 perf_event        total: 1.02 MiB active: 1.02 MiB
> > > 00177 biovec-max        total: 992 KiB active: 960 KiB
> > > 00177 Shrinkers:
> > > 00177 super_cache_scan: objects: 127
> > > 00177 super_cache_scan: objects: 106
> > > 00177 jbd2_journal_shrink_scan: objects: 32
> > > 00177 ext4_es_scan: objects: 32
> > > 00177 bch2_btree_cache_scan: objects: 8
> > > 00177   nr nodes:          24
> > > 00177   nr dirty:          0
> > > 00177   cannibalize lock:  0000000000000000
> > > 00177 
> > > 00177 super_cache_scan: objects: 8
> > > 00177 super_cache_scan: objects: 1
> > 
> > How does this help to analyze this allocation failure?
> 
> You asked for an example of the output, which was an entirely reasonable
> request. Shrinkers weren't responsible for this OOM, so it doesn't help here -

OK, do you have an example where it clearly helps?

> are you asking me to explain why shrinkers are relevant to OOMs and memory
> reclaim...?

No, not really, I guess that is quite clear. The thing is that the oom
report is quite bloated already and we should be rather picky on what to
dump there. Your above example is a good one here. You have an order-5
allocation failure and that can be caused by almost anything. Compaction
not making progress for many reasons - e.g. internal framentation caused
by pinned pages but also kmalloc allocations. The above output doesn't
help with any of that. Could shrinkers operation be related? Of course
it could but how can I tell?

We already dump slab data when the slab usage is excessive for the oom
killer report and that was a very useful addition in many cases and it
is bound to cases where slab consumption could be the primary source of
the OOM condition.

That being said the additional output should be at least conditional and
reported when there is a chance that it could help with analysis.

> Since shrinkers own and, critically, _are responsible for freeing memory_, a
> shrinker not giving up memory when asked (or perhaps not being asked to give up
> memory) can cause an OOM. A starting point - not an end - if we want to improve
> OOM debugging is at least being able to see how much memory each shrinker owns.
> Since we don't even have that, number of objects will have to do.
> 
> The reason for adding the .to_text() callback is that shrinkers have internal
> state that affects whether they are able to give up objects when asked - the
> primary example being object dirtyness.
> 
> If your system is using a ton of memory caching inodes, and something's wedged
> writeback, and they're nearly all dirty - you're going to have a bad day.
> 
> The bcachefs btree node node shrinker included as an example of what we can do
> with this: internally we may have to allocate new btree nodes by reclaiming from
> our own cache, and we have a lock to prevent multiple threads from doing this at
> the same time, and this lock also blocks the shrinker from freeing more memory
> until we're done.
> 
> In filesystem land, debugging memory reclaim issues is a rather painful topic
> that often comes up, this is a starting point...

I completely understand the frustration. I've been analyzing oom reports
for years and I can tell that the existing report is quite good but
in many cases the information we provide is still insufficient. My
experience also tells me that those cases are usually very special and
a specific data dumped for them wouldn't be all that useful in the
majority of cases.

If we are lucky enough the oom is reproducible and additional
tracepoints (or whatever your prefer to use) tell us more. Far from
optimal, no question about that but I do not have a good answer on
where the trashhold should really be. Maybe we can come up with some
trigger based mechanism (e.g. some shrinkers are failing so they
register their debugging data which will get dumped on the OOM) which
would enable certain debugging information or something like that.
Kent Overstreet April 22, 2022, 8:30 a.m. UTC | #6
On Fri, Apr 22, 2022 at 10:03:36AM +0200, Michal Hocko wrote:
> On Thu 21-04-22 14:42:13, Kent Overstreet wrote:
> > On Thu, Apr 21, 2022 at 11:18:20AM +0200, Michal Hocko wrote:
> [...]
> > > > 00177 16644 pages reserved
> > > > 00177 Unreclaimable slab info:
> > > > 00177 9p-fcall-cache    total: 8.25 MiB active: 8.25 MiB
> > > > 00177 kernfs_node_cache total: 2.15 MiB active: 2.15 MiB
> > > > 00177 kmalloc-64        total: 2.08 MiB active: 2.07 MiB
> > > > 00177 task_struct       total: 1.95 MiB active: 1.95 MiB
> > > > 00177 kmalloc-4k        total: 1.50 MiB active: 1.50 MiB
> > > > 00177 signal_cache      total: 1.34 MiB active: 1.34 MiB
> > > > 00177 kmalloc-2k        total: 1.16 MiB active: 1.16 MiB
> > > > 00177 bch_inode_info    total: 1.02 MiB active: 922 KiB
> > > > 00177 perf_event        total: 1.02 MiB active: 1.02 MiB
> > > > 00177 biovec-max        total: 992 KiB active: 960 KiB
> > > > 00177 Shrinkers:
> > > > 00177 super_cache_scan: objects: 127
> > > > 00177 super_cache_scan: objects: 106
> > > > 00177 jbd2_journal_shrink_scan: objects: 32
> > > > 00177 ext4_es_scan: objects: 32
> > > > 00177 bch2_btree_cache_scan: objects: 8
> > > > 00177   nr nodes:          24
> > > > 00177   nr dirty:          0
> > > > 00177   cannibalize lock:  0000000000000000
> > > > 00177 
> > > > 00177 super_cache_scan: objects: 8
> > > > 00177 super_cache_scan: objects: 1
> > > 
> > > How does this help to analyze this allocation failure?
> > 
> > You asked for an example of the output, which was an entirely reasonable
> > request. Shrinkers weren't responsible for this OOM, so it doesn't help here -
> 
> OK, do you have an example where it clearly helps?

I've debugged quite a few issues with shrinkers over the years where this would
have helped a lot (especially if it was also in sysfs), although nothing
currently. I was just talking with Dave earlier tonight about more things that
could be added for shrinkers, but I'm going to have to go over that conversation
again and take notes.

Also, I feel I have to point out that OOM & memory reclaim debugging is an area
where many filesystem developers feel that the MM people have been dropping the
ball, and your initial response to this patch series...  well, it feels like
more of the same.

Still does to be honest, you're coming across like I haven't been working in
this area for a decade+ and don't know what I'm touching. Really, I'm not new to
this stuff.

> > are you asking me to explain why shrinkers are relevant to OOMs and memory
> > reclaim...?
> 
> No, not really, I guess that is quite clear. The thing is that the oom
> report is quite bloated already and we should be rather picky on what to
> dump there. Your above example is a good one here. You have an order-5
> allocation failure and that can be caused by almost anything. Compaction
> not making progress for many reasons - e.g. internal framentation caused
> by pinned pages but also kmalloc allocations. The above output doesn't
> help with any of that. Could shrinkers operation be related? Of course
> it could but how can I tell?

Yeah sure and internal fragmentation would actually be an _excellent_ thing to
add to the show_mem report.

> We already dump slab data when the slab usage is excessive for the oom
> killer report and that was a very useful addition in many cases and it
> is bound to cases where slab consumption could be the primary source of
> the OOM condition.
> 
> That being said the additional output should be at least conditional and
> reported when there is a chance that it could help with analysis.

These things don't need to be conditional if we're more carefully selective
about how we report, instead of just dumping everything like we currently do
with slab info.

We don't need to report on all the slabs that are barely used - if you'll read
my patch and example output, which changes it to the top 10 slabs by memory
usage.

I feel like I keep repeating myself here. It would help if you would make more
of an effort to follow the full patch series and descriptions before commenting
generically.

> > Since shrinkers own and, critically, _are responsible for freeing memory_, a
> > shrinker not giving up memory when asked (or perhaps not being asked to give up
> > memory) can cause an OOM. A starting point - not an end - if we want to improve
> > OOM debugging is at least being able to see how much memory each shrinker owns.
> > Since we don't even have that, number of objects will have to do.
> > 
> > The reason for adding the .to_text() callback is that shrinkers have internal
> > state that affects whether they are able to give up objects when asked - the
> > primary example being object dirtyness.
> > 
> > If your system is using a ton of memory caching inodes, and something's wedged
> > writeback, and they're nearly all dirty - you're going to have a bad day.
> > 
> > The bcachefs btree node node shrinker included as an example of what we can do
> > with this: internally we may have to allocate new btree nodes by reclaiming from
> > our own cache, and we have a lock to prevent multiple threads from doing this at
> > the same time, and this lock also blocks the shrinker from freeing more memory
> > until we're done.
> > 
> > In filesystem land, debugging memory reclaim issues is a rather painful topic
> > that often comes up, this is a starting point...
> 
> I completely understand the frustration. I've been analyzing oom reports
> for years and I can tell that the existing report is quite good but
> in many cases the information we provide is still insufficient. My
> experience also tells me that those cases are usually very special and
> a specific data dumped for them wouldn't be all that useful in the
> majority of cases.
> 
> If we are lucky enough the oom is reproducible and additional
> tracepoints (or whatever your prefer to use) tell us more. Far from
> optimal, no question about that but I do not have a good answer on
> where the trashhold should really be. Maybe we can come up with some
> trigger based mechanism (e.g. some shrinkers are failing so they
> register their debugging data which will get dumped on the OOM) which
> would enable certain debugging information or something like that.

Why would we need a trigger mechanism?

Could you explain your objection to simply unconditionally dumping the top 10
slabs and the top 10 shrinkers?
Michal Hocko April 22, 2022, 9:27 a.m. UTC | #7
On Fri 22-04-22 04:30:37, Kent Overstreet wrote:
> On Fri, Apr 22, 2022 at 10:03:36AM +0200, Michal Hocko wrote:
> > On Thu 21-04-22 14:42:13, Kent Overstreet wrote:
> > > On Thu, Apr 21, 2022 at 11:18:20AM +0200, Michal Hocko wrote:
> > [...]
> > > > > 00177 16644 pages reserved
> > > > > 00177 Unreclaimable slab info:
> > > > > 00177 9p-fcall-cache    total: 8.25 MiB active: 8.25 MiB
> > > > > 00177 kernfs_node_cache total: 2.15 MiB active: 2.15 MiB
> > > > > 00177 kmalloc-64        total: 2.08 MiB active: 2.07 MiB
> > > > > 00177 task_struct       total: 1.95 MiB active: 1.95 MiB
> > > > > 00177 kmalloc-4k        total: 1.50 MiB active: 1.50 MiB
> > > > > 00177 signal_cache      total: 1.34 MiB active: 1.34 MiB
> > > > > 00177 kmalloc-2k        total: 1.16 MiB active: 1.16 MiB
> > > > > 00177 bch_inode_info    total: 1.02 MiB active: 922 KiB
> > > > > 00177 perf_event        total: 1.02 MiB active: 1.02 MiB
> > > > > 00177 biovec-max        total: 992 KiB active: 960 KiB
> > > > > 00177 Shrinkers:
> > > > > 00177 super_cache_scan: objects: 127
> > > > > 00177 super_cache_scan: objects: 106
> > > > > 00177 jbd2_journal_shrink_scan: objects: 32
> > > > > 00177 ext4_es_scan: objects: 32
> > > > > 00177 bch2_btree_cache_scan: objects: 8
> > > > > 00177   nr nodes:          24
> > > > > 00177   nr dirty:          0
> > > > > 00177   cannibalize lock:  0000000000000000
> > > > > 00177 
> > > > > 00177 super_cache_scan: objects: 8
> > > > > 00177 super_cache_scan: objects: 1
> > > > 
> > > > How does this help to analyze this allocation failure?
> > > 
> > > You asked for an example of the output, which was an entirely reasonable
> > > request. Shrinkers weren't responsible for this OOM, so it doesn't help here -
> > 
> > OK, do you have an example where it clearly helps?
> 
> I've debugged quite a few issues with shrinkers over the years where this would
> have helped a lot (especially if it was also in sysfs), although nothing
> currently. I was just talking with Dave earlier tonight about more things that
> could be added for shrinkers, but I'm going to have to go over that conversation
> again and take notes.
> 
> Also, I feel I have to point out that OOM & memory reclaim debugging is an area
> where many filesystem developers feel that the MM people have been dropping the
> ball, and your initial response to this patch series...  well, it feels like
> more of the same.

Not sure where you get that feeling. Debugging memory reclaim is a PITA
because many problems can be indirect and tools we have available are
not really great. I do not remember MM people would be blocking useful
debugging tools addition.
 
> Still does to be honest, you're coming across like I haven't been working in
> this area for a decade+ and don't know what I'm touching. Really, I'm not new to
> this stuff.

I am sorry to hear that but there certainly is no intention like that
and TBH I do not even see where you get that feeling. You have posted a
changelog which doesn't explain really much. I am aware that you are far
from a kernel newbie and therefore I would really expect much more in
that regards.

> > > are you asking me to explain why shrinkers are relevant to OOMs and memory
> > > reclaim...?
> > 
> > No, not really, I guess that is quite clear. The thing is that the oom
> > report is quite bloated already and we should be rather picky on what to
> > dump there. Your above example is a good one here. You have an order-5
> > allocation failure and that can be caused by almost anything. Compaction
> > not making progress for many reasons - e.g. internal framentation caused
> > by pinned pages but also kmalloc allocations. The above output doesn't
> > help with any of that. Could shrinkers operation be related? Of course
> > it could but how can I tell?
> 
> Yeah sure and internal fragmentation would actually be an _excellent_ thing to
> add to the show_mem report.

Completely agreed. The only information we currently have is the
buddyinfo part which reports movability status but I do not think this
is remotely sufficient.

[...]

> > If we are lucky enough the oom is reproducible and additional
> > tracepoints (or whatever your prefer to use) tell us more. Far from
> > optimal, no question about that but I do not have a good answer on
> > where the trashhold should really be. Maybe we can come up with some
> > trigger based mechanism (e.g. some shrinkers are failing so they
> > register their debugging data which will get dumped on the OOM) which
> > would enable certain debugging information or something like that.
> 
> Why would we need a trigger mechanism?

Mostly because reasons for reclaim failures can vary a lot and the oom
report part doesn't have an idea what has happened during the
reclaim/compaction.

> Could you explain your objection to simply unconditionally dumping the top 10
> slabs and the top 10 shrinkers?

We already do that in some form. We dump unreclaimable slabs if they
consume more memory than user pages on LRUs. We also dump all slab
caches with some objects. Why is this approach not good? Should we tweak
the condition to dump or should we limit the dump? These are reasonable 
questions to ask. Your patch has dropped those without explaining any
of the motivation.

I am perfectly OK to modify should_dump_unreclaim_slab to dump even if
the slab memory consumption is lower. Also dumping small caches with
handful of objects can be excessive.

Wrt to shrinkers I really do not know what kind of shrinkers data would
be useful to dump and when. Therefore I am asking about examples.
Kent Overstreet April 22, 2022, 9:44 a.m. UTC | #8
On Fri, Apr 22, 2022 at 11:27:05AM +0200, Michal Hocko wrote:
> We already do that in some form. We dump unreclaimable slabs if they
> consume more memory than user pages on LRUs. We also dump all slab
> caches with some objects. Why is this approach not good? Should we tweak
> the condition to dump or should we limit the dump? These are reasonable 
> questions to ask. Your patch has dropped those without explaining any
> of the motivation.
> 
> I am perfectly OK to modify should_dump_unreclaim_slab to dump even if
> the slab memory consumption is lower. Also dumping small caches with
> handful of objects can be excessive.
> 
> Wrt to shrinkers I really do not know what kind of shrinkers data would
> be useful to dump and when. Therefore I am asking about examples.

Look, I've given you the sample output you asked for and explained repeatedly my
rationale and you haven't directly responded; if you have a reason you're
against the patches please say so, but please give your reasoning.
Michal Hocko April 22, 2022, 10:51 a.m. UTC | #9
On Fri 22-04-22 05:44:13, Kent Overstreet wrote:
> On Fri, Apr 22, 2022 at 11:27:05AM +0200, Michal Hocko wrote:
> > We already do that in some form. We dump unreclaimable slabs if they
> > consume more memory than user pages on LRUs. We also dump all slab
> > caches with some objects. Why is this approach not good? Should we tweak
> > the condition to dump or should we limit the dump? These are reasonable 
> > questions to ask. Your patch has dropped those without explaining any
> > of the motivation.
> > 
> > I am perfectly OK to modify should_dump_unreclaim_slab to dump even if
> > the slab memory consumption is lower. Also dumping small caches with
> > handful of objects can be excessive.
> > 
> > Wrt to shrinkers I really do not know what kind of shrinkers data would
> > be useful to dump and when. Therefore I am asking about examples.
> 
> Look, I've given you the sample

That sample is of no use as it doesn't really show how the additional
information is useful to analyze the allocation failure. I thought we
have agreed on that. You still haven't given any example where the
information is useful. So I do not really see any reason to change the
existing output.

> output you asked for and explained repeatedly my
> rationale and you haven't directly responded;

Your rationale is that we need more data and I do agree but it is not
clear which data and under which conditions.

> if you have a reason you're
> against the patches please say so, but please give your reasoning.

I have expressed that already, I believe, but let me repeat. I do not
like altering the oom report without a justification on how this new
output is useful. You have failed to explained that so far.
Kent Overstreet April 22, 2022, 10:58 a.m. UTC | #10
On Fri, Apr 22, 2022 at 12:51:09PM +0200, Michal Hocko wrote:
> On Fri 22-04-22 05:44:13, Kent Overstreet wrote:
> > On Fri, Apr 22, 2022 at 11:27:05AM +0200, Michal Hocko wrote:
> > > We already do that in some form. We dump unreclaimable slabs if they
> > > consume more memory than user pages on LRUs. We also dump all slab
> > > caches with some objects. Why is this approach not good? Should we tweak
> > > the condition to dump or should we limit the dump? These are reasonable 
> > > questions to ask. Your patch has dropped those without explaining any
> > > of the motivation.
> > > 
> > > I am perfectly OK to modify should_dump_unreclaim_slab to dump even if
> > > the slab memory consumption is lower. Also dumping small caches with
> > > handful of objects can be excessive.
> > > 
> > > Wrt to shrinkers I really do not know what kind of shrinkers data would
> > > be useful to dump and when. Therefore I am asking about examples.
> > 
> > Look, I've given you the sample
> 
> That sample is of no use as it doesn't really show how the additional
> information is useful to analyze the allocation failure. I thought we
> have agreed on that. You still haven't given any example where the
> information is useful. So I do not really see any reason to change the
> existing output.
> 
> > output you asked for and explained repeatedly my
> > rationale and you haven't directly responded;
> 
> Your rationale is that we need more data and I do agree but it is not
> clear which data and under which conditions.

You're completely mischaractarizing and making this _way_ more complicated than
it has to be, but I'll repeat:

- For the slab changes, top 10 slabs in sorted order, with human readable units
  are _vastly_ easier on human eyes than pages of slab output, in the previous
  format

- Shrinkers weren't reported on before at all, and as shrinkers are part of
  memory reclaim they're pretty integral to OOM debugging.

> > if you have a reason you're
> > against the patches please say so, but please give your reasoning.
> 
> I have expressed that already, I believe, but let me repeat. I do not
> like altering the oom report without a justification on how this new
> output is useful. You have failed to explained that so far.

Uh huh.

Sounds like someone has some scripts he doesn't want to have to update.
diff mbox series

Patch

diff --git a/lib/Makefile b/lib/Makefile
index 31a3904eda..c5041d33d0 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -30,7 +30,7 @@  endif
 lib-y := ctype.o string.o vsprintf.o cmdline.o \
 	 rbtree.o radix-tree.o timerqueue.o xarray.o \
 	 idr.o extable.o sha1.o irq_regs.o argv_split.o \
-	 flex_proportions.o ratelimit.o show_mem.o \
+	 flex_proportions.o ratelimit.o \
 	 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
 	 earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
 	 nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o \
diff --git a/mm/Makefile b/mm/Makefile
index 70d4309c9c..97c0be12f3 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -54,7 +54,7 @@  obj-y			:= filemap.o mempool.o oom_kill.o fadvise.o \
 			   mm_init.o percpu.o slab_common.o \
 			   compaction.o vmacache.o \
 			   interval_tree.o list_lru.o workingset.o \
-			   debug.o gup.o mmap_lock.o $(mmu-y)
+			   debug.o gup.o mmap_lock.o show_mem.o $(mmu-y)
 
 # Give 'page_alloc' its own module-parameter namespace
 page-alloc-y := page_alloc.o
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 832fb33037..659c7d6376 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -171,27 +171,6 @@  static bool oom_unkillable_task(struct task_struct *p)
 	return false;
 }
 
-/*
- * Check whether unreclaimable slab amount is greater than
- * all user memory(LRU pages).
- * dump_unreclaimable_slab() could help in the case that
- * oom due to too much unreclaimable slab used by kernel.
-*/
-static bool should_dump_unreclaim_slab(void)
-{
-	unsigned long nr_lru;
-
-	nr_lru = global_node_page_state(NR_ACTIVE_ANON) +
-		 global_node_page_state(NR_INACTIVE_ANON) +
-		 global_node_page_state(NR_ACTIVE_FILE) +
-		 global_node_page_state(NR_INACTIVE_FILE) +
-		 global_node_page_state(NR_ISOLATED_ANON) +
-		 global_node_page_state(NR_ISOLATED_FILE) +
-		 global_node_page_state(NR_UNEVICTABLE);
-
-	return (global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B) > nr_lru);
-}
-
 /**
  * oom_badness - heuristic function to determine which candidate task to kill
  * @p: task struct of which task we should calculate
@@ -465,8 +444,6 @@  static void dump_header(struct oom_control *oc, struct task_struct *p)
 		mem_cgroup_print_oom_meminfo(oc->memcg);
 	else {
 		show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
-		if (should_dump_unreclaim_slab())
-			dump_unreclaimable_slab();
 	}
 	if (sysctl_oom_dump_tasks)
 		dump_tasks(oc);
diff --git a/lib/show_mem.c b/mm/show_mem.c
similarity index 78%
rename from lib/show_mem.c
rename to mm/show_mem.c
index 1c26c14ffb..c9f37f13d6 100644
--- a/lib/show_mem.c
+++ b/mm/show_mem.c
@@ -7,11 +7,15 @@ 
 
 #include <linux/mm.h>
 #include <linux/cma.h>
+#include <linux/printbuf.h>
+
+#include "slab.h"
 
 void show_mem(unsigned int filter, nodemask_t *nodemask)
 {
 	pg_data_t *pgdat;
 	unsigned long total = 0, reserved = 0, highmem = 0;
+	struct printbuf buf = PRINTBUF;
 
 	printk("Mem-Info:\n");
 	show_free_areas(filter, nodemask);
@@ -41,4 +45,14 @@  void show_mem(unsigned int filter, nodemask_t *nodemask)
 #ifdef CONFIG_MEMORY_FAILURE
 	printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
 #endif
+
+	pr_info("Unreclaimable slab info:\n");
+	dump_unreclaimable_slab(&buf);
+	printk("%s", buf.buf);
+	printbuf_reset(&buf);
+
+	printk("Shrinkers:\n");
+	shrinkers_to_text(&buf);
+	printk("%s", buf.buf);
+	printbuf_exit(&buf);
 }
diff --git a/mm/slab.h b/mm/slab.h
index c7f2abc2b1..abefbf7674 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -788,10 +788,12 @@  static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
 
 #endif
 
+struct printbuf;
+
 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
-void dump_unreclaimable_slab(void);
+void dump_unreclaimable_slab(struct printbuf *);
 #else
-static inline void dump_unreclaimable_slab(void)
+static inline void dump_unreclaimable_slab(struct printbuf *out)
 {
 }
 #endif
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 23f2ab0713..cb1c548c73 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -24,6 +24,7 @@ 
 #include <asm/tlbflush.h>
 #include <asm/page.h>
 #include <linux/memcontrol.h>
+#include <linux/printbuf.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/kmem.h>
@@ -1084,10 +1085,15 @@  static int slab_show(struct seq_file *m, void *p)
 	return 0;
 }
 
-void dump_unreclaimable_slab(void)
+void dump_unreclaimable_slab(struct printbuf *out)
 {
 	struct kmem_cache *s;
 	struct slabinfo sinfo;
+	struct slab_by_mem {
+		struct kmem_cache *s;
+		size_t total, active;
+	} slabs_by_mem[10], n;
+	int i, nr = 0;
 
 	/*
 	 * Here acquiring slab_mutex is risky since we don't prefer to get
@@ -1097,12 +1103,11 @@  void dump_unreclaimable_slab(void)
 	 * without acquiring the mutex.
 	 */
 	if (!mutex_trylock(&slab_mutex)) {
-		pr_warn("excessive unreclaimable slab but cannot dump stats\n");
+		pr_buf(out, "excessive unreclaimable slab but cannot dump stats\n");
 		return;
 	}
 
-	pr_info("Unreclaimable slab info:\n");
-	pr_info("Name                      Used          Total\n");
+	buf->atomic++;
 
 	list_for_each_entry(s, &slab_caches, list) {
 		if (s->flags & SLAB_RECLAIM_ACCOUNT)
@@ -1110,11 +1115,43 @@  void dump_unreclaimable_slab(void)
 
 		get_slabinfo(s, &sinfo);
 
-		if (sinfo.num_objs > 0)
-			pr_info("%-17s %10luKB %10luKB\n", s->name,
-				(sinfo.active_objs * s->size) / 1024,
-				(sinfo.num_objs * s->size) / 1024);
+		if (!sinfo.num_objs)
+			continue;
+
+		n.s = s;
+		n.total = sinfo.num_objs * s->size;
+		n.active = sinfo.active_objs * s->size;
+
+		for (i = 0; i < nr; i++)
+			if (n.total < slabs_by_mem[i].total)
+				break;
+
+		if (nr < ARRAY_SIZE(slabs_by_mem)) {
+			memmove(&slabs_by_mem[i + 1],
+				&slabs_by_mem[i],
+				sizeof(slabs_by_mem[0]) * (nr - i));
+			nr++;
+		} else if (i) {
+			i--;
+			memmove(&slabs_by_mem[0],
+				&slabs_by_mem[1],
+				sizeof(slabs_by_mem[0]) * i);
+		} else {
+			continue;
+		}
+
+		slabs_by_mem[i] = n;
+	}
+
+	for (i = nr - 1; i >= 0; --i) {
+		pr_buf(out, "%-17s total: ", slabs_by_mem[i].s->name);
+		pr_human_readable_u64(out, slabs_by_mem[i].total);
+		pr_buf(out, " active: ");
+		pr_human_readable_u64(out, slabs_by_mem[i].active);
+		pr_newline(out);
 	}
+
+	--buf->atomic;
 	mutex_unlock(&slab_mutex);
 }