diff mbox series

[RFC,19/25] mempolicy: add MPOL_F_MEMCG flag, enforcing memcg memory limit.

Message ID 20190404020046.32741-20-zi.yan@sent.com (mailing list archive)
State New, archived
Headers show
Series Accelerate page migration and use memcg for PMEM management | expand

Commit Message

Zi Yan April 4, 2019, 2 a.m. UTC
From: Zi Yan <ziy@nvidia.com>

With MPOL_F_MEMCG set and MPOL_PREFERRED is used, we will enforce
the memory limit set in the corresponding memcg.

Signed-off-by: Zi Yan <ziy@nvidia.com>
---
 include/uapi/linux/mempolicy.h |  3 ++-
 mm/mempolicy.c                 | 36 ++++++++++++++++++++++++++++++++++++
 2 files changed, 38 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h
index eb6560e..a9d03e5 100644
--- a/include/uapi/linux/mempolicy.h
+++ b/include/uapi/linux/mempolicy.h
@@ -28,12 +28,13 @@  enum {
 /* Flags for set_mempolicy */
 #define MPOL_F_STATIC_NODES	(1 << 15)
 #define MPOL_F_RELATIVE_NODES	(1 << 14)
+#define MPOL_F_MEMCG		(1 << 13)
 
 /*
  * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to
  * either set_mempolicy() or mbind().
  */
-#define MPOL_MODE_FLAGS	(MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES)
+#define MPOL_MODE_FLAGS	(MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES | MPOL_F_MEMCG)
 
 /* Flags for get_mempolicy */
 #define MPOL_F_NODE	(1<<0)	/* return next IL mode instead of node mask */
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index af171cc..0e30049 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2040,6 +2040,42 @@  alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
 		goto out;
 	}
 
+	if (pol->mode == MPOL_PREFERRED && (pol->flags & MPOL_F_MEMCG)) {
+		struct task_struct *p = current;
+		struct mem_cgroup *memcg = mem_cgroup_from_task(p);
+		int nid = pol->v.preferred_node;
+		unsigned long nr_memcg_node_size;
+		struct mm_struct *mm = get_task_mm(p);
+		unsigned long nr_pages = hugepage?HPAGE_PMD_NR:1;
+
+		if (!(memcg && mm)) {
+			if (mm)
+				mmput(mm);
+			goto use_other_policy;
+		}
+
+		/* skip preferred node if mm_manage is going on */
+		if (test_bit(MMF_MM_MANAGE, &mm->flags)) {
+			nid = next_memory_node(nid);
+			if (nid == MAX_NUMNODES)
+				nid = first_memory_node;
+		}
+		mmput(mm);
+
+		nr_memcg_node_size = memcg_max_size_node(memcg, nid);
+
+		while (nr_memcg_node_size != ULONG_MAX &&
+			   nr_memcg_node_size <= (memcg_size_node(memcg, nid) + nr_pages)) {
+			if ((nid = next_memory_node(nid)) == MAX_NUMNODES)
+				nid = first_memory_node;
+			nr_memcg_node_size = memcg_max_size_node(memcg, nid);
+		}
+
+		mpol_cond_put(pol);
+		page = __alloc_pages_node(nid, gfp | __GFP_THISNODE, order);
+		goto out;
+	}
+use_other_policy:
 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
 		int hpage_node = node;