[v2,4/7] mmap_lock: allocate histogram (if enabled) in mm_init
diff mbox series

Message ID 20200528235300.77135-1-axelrasmussen@google.com
State New
Headers show
Series
  • Add histogram measuring mmap_lock contention latency
Related show

Commit Message

Axel Rasmussen May 28, 2020, 11:53 p.m. UTC
In effect, allocate a histogram for each process. On fork, initialize a
new empty histogram (don't keep recorded samples from the parent).

Signed-off-by: Axel Rasmussen <axelrasmussen@google.com>
---
 kernel/fork.c | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 55 insertions(+)

Patch
diff mbox series

diff --git a/kernel/fork.c b/kernel/fork.c
index e702e84897fa..afb036d81a73 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -94,6 +94,7 @@ 
 #include <linux/thread_info.h>
 #include <linux/stackleak.h>
 #include <linux/kasan.h>
+#include <linux/histogram.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -975,6 +976,58 @@  __setup("coredump_filter=", coredump_filter_setup);
 
 #include <linux/init_task.h>
 
+#ifdef CONFIG_MMAP_LOCK_HISTOGRAMS
+static const u64 mmap_lock_contention_buckets[] = {
+	1,		/* 0ns - uncontended acquisitions */
+	250,		/* 250 ns */
+	375,		/* 375 ns */
+	500,		/* 500 ns */
+	1000,		/* 1 us */
+	10000,		/* 10 us */
+	100000,		/* 100 us */
+	500000,		/* 500 us */
+	1000000,	/* 1 ms */
+	5000000,	/* 5 ms */
+	10000000,	/* 10 ms */
+	50000000,	/* 50 ms */
+	100000000,	/* 100 ms */
+	500000000,	/* 500 ms */
+	1000000000,	/* 1 s */
+	5000000000UL,	/* 5 s */
+	10000000000UL,	/* 10 s */
+	~0		/* > 10s */
+};
+#endif
+
+static void mm_init_histograms(struct mm_struct *mm)
+{
+#ifdef CONFIG_MMAP_LOCK_HISTOGRAMS
+	mm->mmap_lock_contention =
+		kmalloc(sizeof(struct histogram_rcu), GFP_KERNEL);
+	if (unlikely(!mm->mmap_lock_contention))
+		return;
+
+	if (unlikely(histogram_init_rcu(
+		    mm->mmap_lock_contention, mmap_lock_contention_buckets,
+		    ARRAY_SIZE(mmap_lock_contention_buckets)))) {
+		kfree(mm->mmap_lock_contention);
+		mm->mmap_lock_contention = NULL;
+	}
+#endif
+}
+
+static void mm_free_histograms(struct mm_struct *mm)
+{
+#ifdef CONFIG_MMAP_LOCK_HISTOGRAMS
+	if (unlikely(!mm->mmap_lock_contention))
+		return;
+
+	histogram_destroy_rcu(mm->mmap_lock_contention);
+	kfree(mm->mmap_lock_contention);
+	mm->mmap_lock_contention = NULL;
+#endif
+}
+
 static void mm_init_aio(struct mm_struct *mm)
 {
 #ifdef CONFIG_AIO
@@ -1015,6 +1068,7 @@  static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
 	atomic_set(&mm->mm_users, 1);
 	atomic_set(&mm->mm_count, 1);
 	mmap_init_lock(mm);
+	mm_init_histograms(mm);
 	INIT_LIST_HEAD(&mm->mmlist);
 	mm->core_state = NULL;
 	mm_pgtables_bytes_init(mm);
@@ -1078,6 +1132,7 @@  static inline void __mmput(struct mm_struct *mm)
 {
 	VM_BUG_ON(atomic_read(&mm->mm_users));
 
+	mm_free_histograms(mm);
 	uprobe_clear_state(mm);
 	exit_aio(mm);
 	ksm_exit(mm);