Message ID | 20220901173516.702122-21-surenb@google.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | per-VMA locks proposal | expand |
Le 01/09/2022 à 19:35, Suren Baghdasaryan a écrit : > Add a new CONFIG_PER_VMA_LOCK_STATS config option to dump extra > statistics about handling page fault under VMA lock. > Why not making this a default when per VMA lock are enabled? > Signed-off-by: Suren Baghdasaryan <surenb@google.com> > --- > include/linux/vm_event_item.h | 6 ++++++ > include/linux/vmstat.h | 6 ++++++ > mm/Kconfig.debug | 8 ++++++++ > mm/vmstat.c | 6 ++++++ > 4 files changed, 26 insertions(+) > > diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h > index f3fc36cd2276..a325783ed05d 100644 > --- a/include/linux/vm_event_item.h > +++ b/include/linux/vm_event_item.h > @@ -150,6 +150,12 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, > #ifdef CONFIG_X86 > DIRECT_MAP_LEVEL2_SPLIT, > DIRECT_MAP_LEVEL3_SPLIT, > +#endif > +#ifdef CONFIG_PER_VMA_LOCK_STATS > + VMA_LOCK_SUCCESS, > + VMA_LOCK_ABORT, > + VMA_LOCK_RETRY, > + VMA_LOCK_MISS, > #endif > NR_VM_EVENT_ITEMS > }; > diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h > index bfe38869498d..0c2611899cfc 100644 > --- a/include/linux/vmstat.h > +++ b/include/linux/vmstat.h > @@ -131,6 +131,12 @@ static inline void vm_events_fold_cpu(int cpu) > #define count_vm_vmacache_event(x) do {} while (0) > #endif > > +#ifdef CONFIG_PER_VMA_LOCK_STATS > +#define count_vm_vma_lock_event(x) count_vm_event(x) > +#else > +#define count_vm_vma_lock_event(x) do {} while (0) > +#endif > + > #define __count_zid_vm_events(item, zid, delta) \ > __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta) > > diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug > index ce8dded36de9..075642763a03 100644 > --- a/mm/Kconfig.debug > +++ b/mm/Kconfig.debug > @@ -207,3 +207,11 @@ config PTDUMP_DEBUGFS > kernel. > > If in doubt, say N. > + > + > +config PER_VMA_LOCK_STATS > + bool "Statistics for per-vma locks" > + depends on PER_VMA_LOCK > + help > + Statistics for per-vma locks. > + If in doubt, say N. > diff --git a/mm/vmstat.c b/mm/vmstat.c > index 90af9a8572f5..3f3804c846a6 100644 > --- a/mm/vmstat.c > +++ b/mm/vmstat.c > @@ -1411,6 +1411,12 @@ const char * const vmstat_text[] = { > "direct_map_level2_splits", > "direct_map_level3_splits", > #endif > +#ifdef CONFIG_PER_VMA_LOCK_STATS > + "vma_lock_success", > + "vma_lock_abort", > + "vma_lock_retry", > + "vma_lock_miss", > +#endif > #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ > }; > #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
On Fri, Sep 9, 2022 at 7:29 AM Laurent Dufour <ldufour@linux.ibm.com> wrote: > > Le 01/09/2022 à 19:35, Suren Baghdasaryan a écrit : > > Add a new CONFIG_PER_VMA_LOCK_STATS config option to dump extra > > statistics about handling page fault under VMA lock. > > > > Why not making this a default when per VMA lock are enabled? Good idea. If no objections I'll make that change. > > > Signed-off-by: Suren Baghdasaryan <surenb@google.com> > > --- > > include/linux/vm_event_item.h | 6 ++++++ > > include/linux/vmstat.h | 6 ++++++ > > mm/Kconfig.debug | 8 ++++++++ > > mm/vmstat.c | 6 ++++++ > > 4 files changed, 26 insertions(+) > > > > diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h > > index f3fc36cd2276..a325783ed05d 100644 > > --- a/include/linux/vm_event_item.h > > +++ b/include/linux/vm_event_item.h > > @@ -150,6 +150,12 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, > > #ifdef CONFIG_X86 > > DIRECT_MAP_LEVEL2_SPLIT, > > DIRECT_MAP_LEVEL3_SPLIT, > > +#endif > > +#ifdef CONFIG_PER_VMA_LOCK_STATS > > + VMA_LOCK_SUCCESS, > > + VMA_LOCK_ABORT, > > + VMA_LOCK_RETRY, > > + VMA_LOCK_MISS, > > #endif > > NR_VM_EVENT_ITEMS > > }; > > diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h > > index bfe38869498d..0c2611899cfc 100644 > > --- a/include/linux/vmstat.h > > +++ b/include/linux/vmstat.h > > @@ -131,6 +131,12 @@ static inline void vm_events_fold_cpu(int cpu) > > #define count_vm_vmacache_event(x) do {} while (0) > > #endif > > > > +#ifdef CONFIG_PER_VMA_LOCK_STATS > > +#define count_vm_vma_lock_event(x) count_vm_event(x) > > +#else > > +#define count_vm_vma_lock_event(x) do {} while (0) > > +#endif > > + > > #define __count_zid_vm_events(item, zid, delta) \ > > __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta) > > > > diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug > > index ce8dded36de9..075642763a03 100644 > > --- a/mm/Kconfig.debug > > +++ b/mm/Kconfig.debug > > @@ -207,3 +207,11 @@ config PTDUMP_DEBUGFS > > kernel. > > > > If in doubt, say N. > > + > > + > > +config PER_VMA_LOCK_STATS > > + bool "Statistics for per-vma locks" > > + depends on PER_VMA_LOCK > > + help > > + Statistics for per-vma locks. > > + If in doubt, say N. > > diff --git a/mm/vmstat.c b/mm/vmstat.c > > index 90af9a8572f5..3f3804c846a6 100644 > > --- a/mm/vmstat.c > > +++ b/mm/vmstat.c > > @@ -1411,6 +1411,12 @@ const char * const vmstat_text[] = { > > "direct_map_level2_splits", > > "direct_map_level3_splits", > > #endif > > +#ifdef CONFIG_PER_VMA_LOCK_STATS > > + "vma_lock_success", > > + "vma_lock_abort", > > + "vma_lock_retry", > > + "vma_lock_miss", > > +#endif > > #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ > > }; > > #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */ >
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index f3fc36cd2276..a325783ed05d 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -150,6 +150,12 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #ifdef CONFIG_X86 DIRECT_MAP_LEVEL2_SPLIT, DIRECT_MAP_LEVEL3_SPLIT, +#endif +#ifdef CONFIG_PER_VMA_LOCK_STATS + VMA_LOCK_SUCCESS, + VMA_LOCK_ABORT, + VMA_LOCK_RETRY, + VMA_LOCK_MISS, #endif NR_VM_EVENT_ITEMS }; diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index bfe38869498d..0c2611899cfc 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -131,6 +131,12 @@ static inline void vm_events_fold_cpu(int cpu) #define count_vm_vmacache_event(x) do {} while (0) #endif +#ifdef CONFIG_PER_VMA_LOCK_STATS +#define count_vm_vma_lock_event(x) count_vm_event(x) +#else +#define count_vm_vma_lock_event(x) do {} while (0) +#endif + #define __count_zid_vm_events(item, zid, delta) \ __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta) diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug index ce8dded36de9..075642763a03 100644 --- a/mm/Kconfig.debug +++ b/mm/Kconfig.debug @@ -207,3 +207,11 @@ config PTDUMP_DEBUGFS kernel. If in doubt, say N. + + +config PER_VMA_LOCK_STATS + bool "Statistics for per-vma locks" + depends on PER_VMA_LOCK + help + Statistics for per-vma locks. + If in doubt, say N. diff --git a/mm/vmstat.c b/mm/vmstat.c index 90af9a8572f5..3f3804c846a6 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1411,6 +1411,12 @@ const char * const vmstat_text[] = { "direct_map_level2_splits", "direct_map_level3_splits", #endif +#ifdef CONFIG_PER_VMA_LOCK_STATS + "vma_lock_success", + "vma_lock_abort", + "vma_lock_retry", + "vma_lock_miss", +#endif #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ }; #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
Add a new CONFIG_PER_VMA_LOCK_STATS config option to dump extra statistics about handling page fault under VMA lock. Signed-off-by: Suren Baghdasaryan <surenb@google.com> --- include/linux/vm_event_item.h | 6 ++++++ include/linux/vmstat.h | 6 ++++++ mm/Kconfig.debug | 8 ++++++++ mm/vmstat.c | 6 ++++++ 4 files changed, 26 insertions(+)