@@ -86,6 +86,14 @@ static struct kmem_cache *mm_slot_cache __read_mostly;
#define MAX_PTE_MAPPED_THP 8
+struct collapse_control {
+ /* Num pages scanned per node */
+ int node_load[MAX_NUMNODES];
+
+ /* Last target selected in khugepaged_find_target_node() for this scan */
+ int last_target_node;
+};
+
/**
* struct mm_slot - hash lookup from mm to mm_slot
* @hash: hash collision list
@@ -796,9 +804,7 @@ static void khugepaged_alloc_sleep(void)
remove_wait_queue(&khugepaged_wait, &wait);
}
-static int khugepaged_node_load[MAX_NUMNODES];
-
-static bool khugepaged_scan_abort(int nid)
+static bool khugepaged_scan_abort(int nid, struct collapse_control *cc)
{
int i;
@@ -810,11 +816,11 @@ static bool khugepaged_scan_abort(int nid)
return false;
/* If there is a count for this node already, it must be acceptable */
- if (khugepaged_node_load[nid])
+ if (cc->node_load[nid])
return false;
for (i = 0; i < MAX_NUMNODES; i++) {
- if (!khugepaged_node_load[i])
+ if (!cc->node_load[i])
continue;
if (node_distance(nid, i) > node_reclaim_distance)
return true;
@@ -829,28 +835,28 @@ static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
}
#ifdef CONFIG_NUMA
-static int khugepaged_find_target_node(void)
+static int khugepaged_find_target_node(struct collapse_control *cc)
{
- static int last_khugepaged_target_node = NUMA_NO_NODE;
int nid, target_node = 0, max_value = 0;
/* find first node with max normal pages hit */
for (nid = 0; nid < MAX_NUMNODES; nid++)
- if (khugepaged_node_load[nid] > max_value) {
- max_value = khugepaged_node_load[nid];
+ if (cc->node_load[nid] > max_value) {
+ max_value = cc->node_load[nid];
target_node = nid;
}
/* do some balance if several nodes have the same hit record */
- if (target_node <= last_khugepaged_target_node)
- for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
- nid++)
- if (max_value == khugepaged_node_load[nid]) {
+ if (target_node <= cc->last_target_node)
+ for (nid = cc->last_target_node + 1; nid < MAX_NUMNODES;
+ nid++) {
+ if (max_value == cc->node_load[nid]) {
target_node = nid;
break;
}
+ }
- last_khugepaged_target_node = target_node;
+ cc->last_target_node = target_node;
return target_node;
}
@@ -888,7 +894,7 @@ khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
return *hpage;
}
#else
-static int khugepaged_find_target_node(void)
+static int khugepaged_find_target_node(struct collapse_control *cc)
{
return 0;
}
@@ -1248,7 +1254,8 @@ static void collapse_huge_page(struct mm_struct *mm,
static int khugepaged_scan_pmd(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long address,
- struct page **hpage)
+ struct page **hpage,
+ struct collapse_control *cc)
{
pmd_t *pmd;
pte_t *pte, *_pte;
@@ -1266,7 +1273,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
if (result != SCAN_SUCCEED)
goto out;
- memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
+ memset(cc->node_load, 0, sizeof(cc->node_load));
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
_pte++, _address += PAGE_SIZE) {
@@ -1332,16 +1339,16 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
/*
* Record which node the original page is from and save this
- * information to khugepaged_node_load[].
+ * information to cc->node_load[].
* Khugepaged will allocate hugepage from the node has the max
* hit record.
*/
node = page_to_nid(page);
- if (khugepaged_scan_abort(node)) {
+ if (khugepaged_scan_abort(node, cc)) {
result = SCAN_SCAN_ABORT;
goto out_unmap;
}
- khugepaged_node_load[node]++;
+ cc->node_load[node]++;
if (!PageLRU(page)) {
result = SCAN_PAGE_LRU;
goto out_unmap;
@@ -1392,7 +1399,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
out_unmap:
pte_unmap_unlock(pte, ptl);
if (ret) {
- node = khugepaged_find_target_node();
+ node = khugepaged_find_target_node(cc);
/* collapse_huge_page will return with the mmap_lock released */
collapse_huge_page(mm, address, hpage, node,
referenced, unmapped);
@@ -2044,7 +2051,8 @@ static void collapse_file(struct mm_struct *mm,
}
static void khugepaged_scan_file(struct mm_struct *mm,
- struct file *file, pgoff_t start, struct page **hpage)
+ struct file *file, pgoff_t start, struct page **hpage,
+ struct collapse_control *cc)
{
struct page *page = NULL;
struct address_space *mapping = file->f_mapping;
@@ -2055,7 +2063,7 @@ static void khugepaged_scan_file(struct mm_struct *mm,
present = 0;
swap = 0;
- memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
+ memset(cc->node_load, 0, sizeof(cc->node_load));
rcu_read_lock();
xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
if (xas_retry(&xas, page))
@@ -2080,11 +2088,11 @@ static void khugepaged_scan_file(struct mm_struct *mm,
}
node = page_to_nid(page);
- if (khugepaged_scan_abort(node)) {
+ if (khugepaged_scan_abort(node, cc)) {
result = SCAN_SCAN_ABORT;
break;
}
- khugepaged_node_load[node]++;
+ cc->node_load[node]++;
if (!PageLRU(page)) {
result = SCAN_PAGE_LRU;
@@ -2117,7 +2125,7 @@ static void khugepaged_scan_file(struct mm_struct *mm,
result = SCAN_EXCEED_NONE_PTE;
count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
} else {
- node = khugepaged_find_target_node();
+ node = khugepaged_find_target_node(cc);
collapse_file(mm, file, start, hpage, node);
}
}
@@ -2126,7 +2134,8 @@ static void khugepaged_scan_file(struct mm_struct *mm,
}
#else
static void khugepaged_scan_file(struct mm_struct *mm,
- struct file *file, pgoff_t start, struct page **hpage)
+ struct file *file, pgoff_t start, struct page **hpage,
+ struct collapse_control *cc)
{
BUILD_BUG();
}
@@ -2137,7 +2146,8 @@ static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
#endif
static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
- struct page **hpage)
+ struct page **hpage,
+ struct collapse_control *cc)
__releases(&khugepaged_mm_lock)
__acquires(&khugepaged_mm_lock)
{
@@ -2213,12 +2223,12 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
mmap_read_unlock(mm);
ret = 1;
- khugepaged_scan_file(mm, file, pgoff, hpage);
+ khugepaged_scan_file(mm, file, pgoff, hpage, cc);
fput(file);
} else {
ret = khugepaged_scan_pmd(mm, vma,
khugepaged_scan.address,
- hpage);
+ hpage, cc);
}
/* move to next address */
khugepaged_scan.address += HPAGE_PMD_SIZE;
@@ -2274,7 +2284,7 @@ static int khugepaged_wait_event(void)
kthread_should_stop();
}
-static void khugepaged_do_scan(void)
+static void khugepaged_do_scan(struct collapse_control *cc)
{
struct page *hpage = NULL;
unsigned int progress = 0, pass_through_head = 0;
@@ -2298,7 +2308,7 @@ static void khugepaged_do_scan(void)
if (khugepaged_has_work() &&
pass_through_head < 2)
progress += khugepaged_scan_mm_slot(pages - progress,
- &hpage);
+ &hpage, cc);
else
progress = pages;
spin_unlock(&khugepaged_mm_lock);
@@ -2337,12 +2347,15 @@ static void khugepaged_wait_work(void)
static int khugepaged(void *none)
{
struct mm_slot *mm_slot;
+ struct collapse_control cc = {
+ .last_target_node = NUMA_NO_NODE,
+ };
set_freezable();
set_user_nice(current, MAX_NICE);
while (!kthread_should_stop()) {
- khugepaged_do_scan();
+ khugepaged_do_scan(&cc);
khugepaged_wait_work();
}
Modularize hugepage collapse by introducing struct collapse_control. This structure serves to describe the properties of the requested collapse, as well as serve as a local scratch pad to use during the collapse itself. Signed-off-by: Zach O'Keefe <zokeefe@google.com> --- mm/khugepaged.c | 79 ++++++++++++++++++++++++++++--------------------- 1 file changed, 46 insertions(+), 33 deletions(-)