diff mbox series

[RFC,3/4] mm/ksm: allow anonymous memory automerging

Message ID 20190510072125.18059-4-oleksandr@redhat.com (mailing list archive)
State New, archived
Headers show
Series mm/ksm: add option to automerge VMAs | expand

Commit Message

Oleksandr Natalenko May 10, 2019, 7:21 a.m. UTC
Introduce 2 KSM modes:

  * madvise, which is default and maintains old behaviour; and
  * always, in which new anonymous allocations are marked as eligible
    for merging.

The mode is controlled either via sysfs or via kernel cmdline for VMAs
to be marked as soon as possible during the boot process.

Previously introduced ksm_enter() helper is used to hook into
do_anonymous_page() and mark each eligible VMA as ready for merging.
This avoids introducing separate kthread to walk through the task/VMAs
list.

Signed-off-by: Oleksandr Natalenko <oleksandr@redhat.com>
---
 include/linux/ksm.h |  3 +++
 mm/ksm.c            | 65 +++++++++++++++++++++++++++++++++++++++++++++
 mm/memory.c         |  6 +++++
 3 files changed, 74 insertions(+)
diff mbox series

Patch

diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index bc13f228e2ed..3c076b35259c 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -21,6 +21,9 @@  struct mem_cgroup;
 #ifdef CONFIG_KSM
 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
 		unsigned long end, int advice, unsigned long *vm_flags);
+#ifdef VM_UNMERGEABLE
+bool ksm_mode_always(void);
+#endif
 int ksm_enter(struct mm_struct *mm, struct vm_area_struct *vma,
 		unsigned long *vm_flags);
 int __ksm_enter(struct mm_struct *mm);
diff --git a/mm/ksm.c b/mm/ksm.c
index 0fb5f850087a..6a2280b875cc 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -295,6 +295,12 @@  static int ksm_nr_node_ids = 1;
 static unsigned long ksm_run = KSM_RUN_STOP;
 static void wait_while_offlining(void);
 
+#ifdef VM_UNMERGEABLE
+#define KSM_MODE_MADVISE	0
+#define KSM_MODE_ALWAYS		1
+static unsigned long ksm_mode = KSM_MODE_MADVISE;
+#endif
+
 static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
 static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait);
 static DEFINE_MUTEX(ksm_thread_mutex);
@@ -2478,6 +2484,36 @@  int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
 	return 0;
 }
 
+#ifdef VM_UNMERGEABLE
+bool ksm_mode_always(void)
+{
+	return ksm_mode == KSM_MODE_ALWAYS;
+}
+
+static int __init setup_ksm_mode(char *str)
+{
+	int ret = 0;
+
+	if (!str)
+		goto out;
+
+	if (!strcmp(str, "madvise")) {
+		ksm_mode = KSM_MODE_MADVISE;
+		ret = 1;
+	} else if (!strcmp(str, "always")) {
+		ksm_mode = KSM_MODE_ALWAYS;
+		ret = 1;
+	}
+
+out:
+	if (!ret)
+		pr_warn("ksm_mode= cannot parse, ignored\n");
+
+	return ret;
+}
+__setup("ksm_mode=", setup_ksm_mode);
+#endif
+
 int ksm_enter(struct mm_struct *mm, struct vm_area_struct *vma,
 		unsigned long *vm_flags)
 {
@@ -2881,6 +2917,35 @@  static void wait_while_offlining(void)
 	static struct kobj_attribute _name##_attr = \
 		__ATTR(_name, 0644, _name##_show, _name##_store)
 
+#ifdef VM_UNMERGEABLE
+static ssize_t mode_show(struct kobject *kobj, struct kobj_attribute *attr,
+	char *buf)
+{
+	switch (ksm_mode) {
+	case KSM_MODE_MADVISE:
+		return sprintf(buf, "always [madvise]\n");
+	case KSM_MODE_ALWAYS:
+		return sprintf(buf, "[always] madvise\n");
+	}
+
+	return sprintf(buf, "always [madvise]\n");
+}
+
+static ssize_t mode_store(struct kobject *kobj, struct kobj_attribute *attr,
+	const char *buf, size_t count)
+{
+	if (!memcmp("madvise", buf, min(sizeof("madvise")-1, count)))
+		ksm_mode = KSM_MODE_MADVISE;
+	else if (!memcmp("always", buf, min(sizeof("always")-1, count)))
+		ksm_mode = KSM_MODE_ALWAYS;
+	else
+		return -EINVAL;
+
+	return count;
+}
+KSM_ATTR(mode);
+#endif
+
 static ssize_t sleep_millisecs_show(struct kobject *kobj,
 				    struct kobj_attribute *attr, char *buf)
 {
diff --git a/mm/memory.c b/mm/memory.c
index ab650c21bccd..08f3f92de310 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2994,6 +2994,12 @@  static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 	update_mmu_cache(vma, vmf->address, vmf->pte);
 unlock:
 	pte_unmap_unlock(vmf->pte, vmf->ptl);
+
+#if defined(CONFIG_KSM) && defined(VM_UNMERGEABLE)
+	if (ksm_mode_always())
+		ksm_enter(vma->vm_mm, vma, &vma->vm_flags);
+#endif
+
 	return ret;
 release:
 	mem_cgroup_cancel_charge(page, memcg, false);