@@ -110,7 +110,7 @@ u64 stable_page_flags(struct page *page)
u |= 1 << KPF_MMAP;
if (PageAnon(page))
u |= 1 << KPF_ANON;
- if (PageKsm(page))
+ if (PageReadOnly(page))
u |= 1 << KPF_KSM;
/*
@@ -374,12 +374,12 @@ PAGEFLAG(Idle, idle, PF_ANY)
* page->mapping points to its anon_vma, not to a struct address_space;
* with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
*
- * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
+ * On an anonymous page in a VM_MERGEABLE area, if CONFIG_RONLY is enabled,
* the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
* bit; and then page->mapping points, not to an anon_vma, but to a private
- * structure which KSM associates with that merged page. See ksm.h.
+ * structure which RONLY associates with that merged page. See page-ronly.h.
*
- * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
+ * PAGE_MAPPING_RONLY without PAGE_MAPPING_ANON is used for non-lru movable
* page and then page->mapping points a struct address_space.
*
* Please note that, confusingly, "page_mapping" refers to the inode
@@ -388,7 +388,7 @@ PAGEFLAG(Idle, idle, PF_ANY)
*/
#define PAGE_MAPPING_ANON 0x1
#define PAGE_MAPPING_MOVABLE 0x2
-#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
+#define PAGE_MAPPING_RONLY (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
static __always_inline int PageMappingFlags(struct page *page)
@@ -408,21 +408,25 @@ static __always_inline int __PageMovable(struct page *page)
PAGE_MAPPING_MOVABLE;
}
-#ifdef CONFIG_KSM
-/*
- * A KSM page is one of those write-protected "shared pages" or "merged pages"
- * which KSM maps into multiple mms, wherever identical anonymous page content
- * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
- * anon_vma, but to that page's node of the stable tree.
+#ifdef CONFIG_PAGE_RONLY
+/* PageReadOnly() - Returns true if page is read only, false otherwise.
+ *
+ * @page: Page under test.
+ *
+ * A read only page is one of those write-protected. Currently only KSM does
+ * write protect a page as "shared pages" or "merged pages" which KSM maps
+ * into multiple mms, wherever identical anonymous page content is found in
+ * VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any anon_vma,
+ * but to that page's node of the stable tree.
*/
-static __always_inline int PageKsm(struct page *page)
+static __always_inline int PageReadOnly(struct page *page)
{
page = compound_head(page);
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
- PAGE_MAPPING_KSM;
+ PAGE_MAPPING_RONLY;
}
#else
-TESTPAGEFLAG_FALSE(Ksm)
+TESTPAGEFLAG_FALSE(ReadOnly)
#endif
u64 stable_page_flags(struct page *page);
@@ -318,13 +318,13 @@ static void __init ksm_slab_free(void)
static inline struct stable_node *page_stable_node(struct page *page)
{
- return PageKsm(page) ? page_rmapping(page) : NULL;
+ return PageReadOnly(page) ? page_rmapping(page) : NULL;
}
static inline void set_page_stable_node(struct page *page,
struct stable_node *stable_node)
{
- page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
+ page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_RONLY);
}
static __always_inline bool is_stable_node_chain(struct stable_node *chain)
@@ -470,7 +470,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE);
if (IS_ERR_OR_NULL(page))
break;
- if (PageKsm(page))
+ if (PageReadOnly(page))
ret = handle_mm_fault(vma, addr,
FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE);
else
@@ -684,7 +684,7 @@ static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it)
unsigned long kpfn;
expected_mapping = (void *)((unsigned long)stable_node |
- PAGE_MAPPING_KSM);
+ PAGE_MAPPING_RONLY);
again:
kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */
page = pfn_to_page(kpfn);
@@ -2490,7 +2490,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
struct anon_vma *anon_vma = page_anon_vma(page);
struct page *new_page;
- if (PageKsm(page)) {
+ if (PageReadOnly(page)) {
if (page_stable_node(page) &&
!(ksm_run & KSM_RUN_UNMERGE))
return page; /* no need to copy it */
@@ -2521,7 +2521,7 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
struct rmap_item *rmap_item;
int search_new_forks = 0;
- VM_BUG_ON_PAGE(!PageKsm(page), page);
+ VM_BUG_ON_PAGE(!PageReadOnly(page), page);
/*
* Rely on the page lock to protect against concurrent modifications
@@ -947,7 +947,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
if (!page_mapped(hpage))
return true;
- if (PageKsm(p)) {
+ if (PageReadOnly(p)) {
pr_err("Memory failure: %#lx: can't handle KSM pages.\n", pfn);
return false;
}
@@ -2733,7 +2733,7 @@ static int do_wp_page(struct vm_fault *vmf)
* Take out anonymous pages first, anonymous shared vmas are
* not dirty accountable.
*/
- if (PageAnon(vmf->page) && !PageKsm(vmf->page)) {
+ if (PageAnon(vmf->page) && !PageReadOnly(vmf->page)) {
int total_map_swapcount;
if (!trylock_page(vmf->page)) {
get_page(vmf->page);
@@ -214,7 +214,7 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
VM_BUG_ON_PAGE(PageTail(page), page);
while (page_vma_mapped_walk(&pvmw)) {
- if (PageKsm(page))
+ if (PageReadOnly(page))
new = page;
else
new = page - pvmw.page->index +
@@ -1038,7 +1038,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
* because that implies that the anon page is no longer mapped
* (and cannot be remapped so long as we hold the page lock).
*/
- if (PageAnon(page) && !PageKsm(page))
+ if (PageAnon(page) && !PageReadOnly(page))
anon_vma = page_get_anon_vma(page);
/*
@@ -1077,7 +1077,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
}
} else if (page_mapped(page)) {
/* Establish migration ptes */
- VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
+ VM_BUG_ON_PAGE(PageAnon(page) && !PageReadOnly(page) && !anon_vma,
page);
try_to_unmap(page,
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
@@ -81,7 +81,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
struct page *page;
page = vm_normal_page(vma, addr, oldpte);
- if (!page || PageKsm(page))
+ if (!page || PageReadOnly(page))
continue;
/* Also skip shared copy-on-write pages */
@@ -104,7 +104,7 @@ static void page_idle_clear_pte_refs(struct page *page)
!page_rmapping(page))
return;
- need_lock = !PageAnon(page) || PageKsm(page);
+ need_lock = !PageAnon(page) || PageReadOnly(page);
if (need_lock && !trylock_page(page))
return;
@@ -855,7 +855,7 @@ int page_referenced(struct page *page,
if (!page_rmapping(page))
return 0;
- if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
+ if (!is_locked && (!PageAnon(page) || PageReadOnly(page))) {
we_locked = trylock_page(page);
if (!we_locked)
return 1;
@@ -1122,7 +1122,7 @@ void do_page_add_anon_rmap(struct page *page,
__inc_node_page_state(page, NR_ANON_THPS);
__mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
}
- if (unlikely(PageKsm(page)))
+ if (unlikely(PageReadOnly(page)))
return;
VM_BUG_ON_PAGE(!PageLocked(page), page);
@@ -1660,7 +1660,7 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
* temporary VMAs until after exec() completes.
*/
if ((flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))
- && !PageKsm(page) && PageAnon(page))
+ && !PageReadOnly(page) && PageAnon(page))
rwc.invalid_vma = invalid_migration_vma;
if (flags & TTU_RMAP_LOCKED)
@@ -1842,7 +1842,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
{
- if (unlikely(PageKsm(page)))
+ if (unlikely(PageReadOnly(page)))
rmap_walk_ksm(page, rwc);
else if (PageAnon(page))
rmap_walk_anon(page, rwc, false);
@@ -1854,7 +1854,7 @@ void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
{
/* no ksm support for now */
- VM_BUG_ON_PAGE(PageKsm(page), page);
+ VM_BUG_ON_PAGE(PageReadOnly(page), page);
if (PageAnon(page))
rmap_walk_anon(page, rwc, true);
else
@@ -1552,7 +1552,7 @@ bool reuse_swap_page(struct page *page, int *total_map_swapcount)
int count, total_mapcount, total_swapcount;
VM_BUG_ON_PAGE(!PageLocked(page), page);
- if (unlikely(PageKsm(page)))
+ if (unlikely(PageReadOnly(page)))
return false;
count = page_trans_huge_map_swapcount(page, &total_mapcount,
&total_swapcount);