@@ -98,7 +98,8 @@ enum ttu_flags {
* do a final flush if necessary */
TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock:
* caller holds it */
- TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */
+ TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */
+ TTU_LRU_ISOLATED = 0x200, /* caller isolated page from LRU */
};
#ifdef CONFIG_MMU
@@ -306,7 +306,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
* know the page is still mapped, we don't even
* need to check for file-cache page truncation.
*/
- mlock_vma_page(vma, page);
+ mlock_vma_page(vma, page, false);
unlock_page(page);
}
}
@@ -1513,7 +1513,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
goto skip_mlock;
lru_add_drain();
if (page->mapping && !PageDoubleMap(page))
- mlock_vma_page(vma, page);
+ mlock_vma_page(vma, page, false);
unlock_page(page);
}
skip_mlock:
@@ -3009,7 +3009,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
page_add_file_rmap(new, true);
set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new))
- mlock_vma_page(vma, new);
+ mlock_vma_page(vma, new, false);
update_mmu_cache_pmd(vma, address, pvmw->pmd);
}
#endif
@@ -305,7 +305,8 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
/*
* must be called with vma's mmap_sem held for read or write, and page locked.
*/
-extern void mlock_vma_page(struct vm_area_struct *vma, struct page *page);
+extern void mlock_vma_page(struct vm_area_struct *vma, struct page *page,
+ bool isolated);
extern unsigned int munlock_vma_page(struct page *page);
/*
@@ -364,7 +365,8 @@ vma_address(struct page *page, struct vm_area_struct *vma)
#else /* !CONFIG_MMU */
static inline void clear_page_mlock(struct page *page) { }
-static inline void mlock_vma_page(struct vm_area_struct *, struct page *) { }
+static inline void mlock_vma_page(struct vm_area_struct *vma,
+ struct page *page, bool isolated) { }
static inline void mlock_migrate_page(struct page *new, struct page *old) { }
#endif /* !CONFIG_MMU */
@@ -1274,7 +1274,7 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
if (!PageMlocked(kpage)) {
unlock_page(page);
lock_page(kpage);
- mlock_vma_page(vma, kpage);
+ mlock_vma_page(vma, kpage, false);
page = kpage; /* for final unlock */
}
}
@@ -269,7 +269,7 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
page_add_file_rmap(new, false);
}
if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
- mlock_vma_page(vma, new);
+ mlock_vma_page(vma, new, false);
if (PageTransHuge(page) && PageMlocked(page))
clear_page_mlock(page);
@@ -85,7 +85,8 @@ void clear_page_mlock(struct page *page)
* Mark page as mlocked if not already.
* If page on LRU, isolate and putback to move to unevictable list.
*/
-void mlock_vma_page(struct vm_area_struct *vma, struct page *page)
+void mlock_vma_page(struct vm_area_struct *vma, struct page *page,
+ bool isolated)
{
/* Serialize with page migration */
BUG_ON(!PageLocked(page));
@@ -97,15 +98,17 @@ void mlock_vma_page(struct vm_area_struct *vma, struct page *page)
mod_zone_page_state(page_zone(page), NR_MLOCK,
hpage_nr_pages(page));
count_vm_event(UNEVICTABLE_PGMLOCKED);
- if (!isolate_lru_page(page)) {
- /*
- * Force memory recharge to mlock user. Cannot
- * reclaim memory because called under pte lock.
- */
- mem_cgroup_try_recharge(page, vma->vm_mm,
- GFP_NOWAIT | __GFP_NOFAIL);
+
+ if (!isolated && isolate_lru_page(page))
+ return;
+ /*
+ * Force memory recharge to mlock user.
+ * Cannot reclaim memory because called under pte lock.
+ */
+ mem_cgroup_try_recharge(page, vma->vm_mm,
+ GFP_NOWAIT | __GFP_NOFAIL);
+ if (!isolated)
putback_lru_page(page);
- }
}
}
@@ -1410,7 +1410,8 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* Holding pte lock, we do *not* need
* mmap_sem here
*/
- mlock_vma_page(vma, page);
+ mlock_vma_page(vma, page,
+ !!(flags & TTU_LRU_ISOLATED));
}
ret = false;
page_vma_mapped_walk_done(&pvmw);
@@ -1752,7 +1753,7 @@ void try_to_munlock(struct page *page)
{
struct rmap_walk_control rwc = {
.rmap_one = try_to_unmap_one,
- .arg = (void *)TTU_MUNLOCK,
+ .arg = (void *)(TTU_MUNLOCK | TTU_LRU_ISOLATED),
.done = page_not_mapped,
.anon_lock = page_lock_anon_vma_read,
Munlock isolates page from LRU and then looks for another mlock vma. Thus we could could rechange page to second mlock without isolating. This patch adds argument 'isolated' to mlock_vma_page() and passes this flag through try_to_ummap as TTU_LRU_ISOLATED. Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> --- include/linux/rmap.h | 3 ++- mm/gup.c | 2 +- mm/huge_memory.c | 4 ++-- mm/internal.h | 6 ++++-- mm/ksm.c | 2 +- mm/migrate.c | 2 +- mm/mlock.c | 21 ++++++++++++--------- mm/rmap.c | 5 +++-- 8 files changed, 26 insertions(+), 19 deletions(-)