@@ -213,6 +213,12 @@ struct page_vma_mapped_walk {
unsigned int flags;
};
+static inline void pvmw_set_page(struct page_vma_mapped_walk *pvmw,
+ struct page *page)
+{
+ pvmw->page = page;
+}
+
static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
{
/* HugeTLB pte is set to the relevant page table entry without pte_mapped. */
@@ -156,13 +156,13 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
{
struct mm_struct *mm = vma->vm_mm;
struct page_vma_mapped_walk pvmw = {
- .page = compound_head(old_page),
.vma = vma,
.address = addr,
};
int err;
struct mmu_notifier_range range;
+ pvmw_set_page(&pvmw, compound_head(old_page));
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
addr + PAGE_SIZE);
@@ -20,11 +20,11 @@ static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *arg)
{
struct page_vma_mapped_walk pvmw = {
- .page = page,
.vma = vma,
.address = addr,
};
+ pvmw_set_page(&pvmw, page);
while (page_vma_mapped_walk(&pvmw)) {
addr = pvmw.address;
if (pvmw.pte)
@@ -94,11 +94,11 @@ static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma,
{
struct damon_pa_access_chk_result *result = arg;
struct page_vma_mapped_walk pvmw = {
- .page = page,
.vma = vma,
.address = addr,
};
+ pvmw_set_page(&pvmw, page);
result->accessed = false;
result->page_sz = PAGE_SIZE;
while (page_vma_mapped_walk(&pvmw)) {
@@ -1035,13 +1035,13 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
{
struct mm_struct *mm = vma->vm_mm;
struct page_vma_mapped_walk pvmw = {
- .page = page,
.vma = vma,
};
int swapped;
int err = -EFAULT;
struct mmu_notifier_range range;
+ pvmw_set_page(&pvmw, page);
pvmw.address = page_address_in_vma(page, vma);
if (pvmw.address == -EFAULT)
goto out;
@@ -177,7 +177,6 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *old)
{
struct page_vma_mapped_walk pvmw = {
- .page = old,
.vma = vma,
.address = addr,
.flags = PVMW_SYNC | PVMW_MIGRATION,
@@ -187,6 +186,7 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
swp_entry_t entry;
VM_BUG_ON_PAGE(PageTail(page), page);
+ pvmw_set_page(&pvmw, old);
while (page_vma_mapped_walk(&pvmw)) {
if (PageKsm(page))
new = page;
@@ -49,12 +49,12 @@ static bool page_idle_clear_pte_refs_one(struct page *page,
unsigned long addr, void *arg)
{
struct page_vma_mapped_walk pvmw = {
- .page = page,
.vma = vma,
.address = addr,
};
bool referenced = false;
+ pvmw_set_page(&pvmw, page);
while (page_vma_mapped_walk(&pvmw)) {
addr = pvmw.address;
if (pvmw.pte) {
@@ -803,12 +803,12 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
{
struct page_referenced_arg *pra = arg;
struct page_vma_mapped_walk pvmw = {
- .page = page,
.vma = vma,
.address = address,
};
int referenced = 0;
+ pvmw_set_page(&pvmw, page);
while (page_vma_mapped_walk(&pvmw)) {
address = pvmw.address;
@@ -932,7 +932,6 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct page_vma_mapped_walk pvmw = {
- .page = page,
.vma = vma,
.address = address,
.flags = PVMW_SYNC,
@@ -940,6 +939,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
struct mmu_notifier_range range;
int *cleaned = arg;
+ pvmw_set_page(&pvmw, page);
/*
* We have to assume the worse case ie pmd for invalidation. Note that
* the page can not be free from this function.
@@ -1423,7 +1423,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
{
struct mm_struct *mm = vma->vm_mm;
struct page_vma_mapped_walk pvmw = {
- .page = page,
.vma = vma,
.address = address,
};
@@ -1433,6 +1432,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
+ pvmw_set_page(&pvmw, page);
/*
* When racing against e.g. zap_pte_range() on another cpu,
* in between its ptep_get_and_clear_full() and page_remove_rmap(),
@@ -1723,7 +1723,6 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
{
struct mm_struct *mm = vma->vm_mm;
struct page_vma_mapped_walk pvmw = {
- .page = page,
.vma = vma,
.address = address,
};
@@ -1733,6 +1732,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
+ pvmw_set_page(&pvmw, page);
/*
* When racing against e.g. zap_pte_range() on another cpu,
* in between its ptep_get_and_clear_full() and page_remove_rmap(),
@@ -2003,11 +2003,11 @@ static bool page_mlock_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *unused)
{
struct page_vma_mapped_walk pvmw = {
- .page = page,
.vma = vma,
.address = address,
};
+ pvmw_set_page(&pvmw, page);
/* An un-locked vma doesn't have any pages to lock, continue the scan */
if (!(vma->vm_flags & VM_LOCKED))
return true;
@@ -2078,7 +2078,6 @@ static bool page_make_device_exclusive_one(struct page *page,
{
struct mm_struct *mm = vma->vm_mm;
struct page_vma_mapped_walk pvmw = {
- .page = page,
.vma = vma,
.address = address,
};
@@ -2090,6 +2089,7 @@ static bool page_make_device_exclusive_one(struct page *page,
swp_entry_t entry;
pte_t swp_pte;
+ pvmw_set_page(&pvmw, page);
mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
vma->vm_mm, address, min(vma->vm_end,
address + page_size(page)), args->owner);
Instead of setting the page directly in struct page_vma_mapped_walk, use this helper to allow us to transition to a PFN approach in the next patch. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- include/linux/rmap.h | 6 ++++++ kernel/events/uprobes.c | 2 +- mm/damon/paddr.c | 4 ++-- mm/ksm.c | 2 +- mm/migrate.c | 2 +- mm/page_idle.c | 2 +- mm/rmap.c | 12 ++++++------ 7 files changed, 18 insertions(+), 12 deletions(-)