@@ -189,6 +189,16 @@ static bool is_dump_unreclaim_slabs(void)
return (global_node_page_state(NR_SLAB_UNRECLAIMABLE) > nr_lru);
}
+/*
+ * Rough memory consumption of the given mm which should be theoretically freed
+ * when the mm is removed.
+ */
+static unsigned long oom_badness_pages(struct mm_struct *mm)
+{
+ return get_mm_rss(mm) + get_mm_counter(mm, MM_SWAPENTS) +
+ mm_pgtables_bytes(mm) / PAGE_SIZE;
+}
+
/**
* oom_badness - heuristic function to determine which candidate task to kill
* @p: task struct of which task we should calculate
@@ -230,8 +240,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
* The baseline for the badness score is the proportion of RAM that each
* task's rss, pagetable and swap space use.
*/
- points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
- mm_pgtables_bytes(p->mm) / PAGE_SIZE;
+ points = oom_badness_pages(p->mm);
task_unlock(p);
/* Normalize to oom_score_adj units */
@@ -532,6 +541,16 @@ bool __oom_reap_task_mm(struct mm_struct *mm)
}
}
+ /*
+ * If we still sit on a noticeable amount of memory even after successfully
+ * reaping the address space then keep retrying until exit_mmap makes some
+ * further progress.
+ * TODO: add a flag for a stage when the exit path doesn't block anymore
+ * and hand over MMF_OOM_SKIP handling there in that case
+ */
+ if (ret && oom_badness_pages(mm) > 1024)
+ ret = false;
+
return ret;
}