@@ -13,12 +13,12 @@
/*
* If our huge pte is non-zero then mark the valid bit.
- * This allows pte_present(huge_ptep_get(ptep)) to return true for non-zero
+ * This allows pte_present(huge_ptep_get(mm,addr,ptep)) to return true for non-zero
* ptes.
* (The valid bit is automatically cleared by set_pte_at for PROT_NONE ptes).
*/
#define __HAVE_ARCH_HUGE_PTEP_GET
-static inline pte_t huge_ptep_get(pte_t *ptep)
+static inline pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t retval = *ptep;
if (pte_val(retval))
@@ -46,7 +46,7 @@ extern pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_GET
-extern pte_t huge_ptep_get(pte_t *ptep);
+extern pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
void __init arm64_hugetlb_cma_reserve(void);
@@ -127,7 +127,7 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
return contig_ptes;
}
-pte_t huge_ptep_get(pte_t *ptep)
+pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
int ncontig, i;
size_t pgsize;
@@ -44,7 +44,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
pte_t pte, int dirty);
#define __HAVE_ARCH_HUGE_PTEP_GET
-pte_t huge_ptep_get(pte_t *ptep);
+pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
#define arch_make_huge_pte arch_make_huge_pte
@@ -3,7 +3,7 @@
#include <linux/err.h>
#ifdef CONFIG_RISCV_ISA_SVNAPOT
-pte_t huge_ptep_get(pte_t *ptep)
+pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
unsigned long pte_num;
int i;
@@ -19,7 +19,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned long sz);
void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
-pte_t huge_ptep_get(pte_t *ptep);
+pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
@@ -64,7 +64,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
- int changed = !pte_same(huge_ptep_get(ptep), pte);
+ int changed = !pte_same(huge_ptep_get(vma->vm_mm, addr, ptep), pte);
if (changed) {
huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
__set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
@@ -169,7 +169,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
__set_huge_pte_at(mm, addr, ptep, pte);
}
-pte_t huge_ptep_get(pte_t *ptep)
+pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
return __rste_to_pte(pte_val(*ptep));
}
@@ -177,7 +177,7 @@ pte_t huge_ptep_get(pte_t *ptep)
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- pte_t pte = huge_ptep_get(ptep);
+ pte_t pte = huge_ptep_get(mm, addr, ptep);
pmd_t *pmdp = (pmd_t *) ptep;
pud_t *pudp = (pud_t *) ptep;
@@ -422,7 +422,7 @@ static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
if (!ptep)
return false;
- pte = huge_ptep_get(ptep);
+ pte = huge_ptep_get(vma->vm_mm, addr, ptep);
if (huge_pte_none(pte) || !pte_present(pte))
return false;
@@ -730,7 +730,7 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
{
struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = walk->vma;
- pte_t ptent = huge_ptep_get(pte);
+ pte_t ptent = huge_ptep_get(walk->mm, addr, pte);
struct folio *folio = NULL;
if (pte_present(ptent)) {
@@ -1582,7 +1582,7 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
if (vma->vm_flags & VM_SOFTDIRTY)
flags |= PM_SOFT_DIRTY;
- pte = huge_ptep_get(ptep);
+ pte = huge_ptep_get(walk->mm, addr, ptep);
if (pte_present(pte)) {
struct folio *folio = page_folio(pte_page(pte));
@@ -2271,7 +2271,7 @@ static int pagemap_scan_hugetlb_entry(pte_t *ptep, unsigned long hmask,
if (~p->arg.flags & PM_SCAN_WP_MATCHING) {
/* Go the short route when not write-protecting pages. */
- pte = huge_ptep_get(ptep);
+ pte = huge_ptep_get(walk->mm, start, ptep);
categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
if (!pagemap_scan_is_interesting_page(categories, p))
@@ -2283,7 +2283,7 @@ static int pagemap_scan_hugetlb_entry(pte_t *ptep, unsigned long hmask,
i_mmap_lock_write(vma->vm_file->f_mapping);
ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, ptep);
- pte = huge_ptep_get(ptep);
+ pte = huge_ptep_get(walk->mm, start, ptep);
categories = p->cur_vma_category | pagemap_hugetlb_category(pte);
if (!pagemap_scan_is_interesting_page(categories, p))
@@ -2679,7 +2679,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end, struct mm_walk *walk)
{
- pte_t huge_pte = huge_ptep_get(pte);
+ pte_t huge_pte = huge_ptep_get(walk->mm, addr, pte);
struct numa_maps *md;
struct page *page;
@@ -257,7 +257,7 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
goto out;
ret = false;
- pte = huge_ptep_get(ptep);
+ pte = huge_ptep_get(vma->vm_mm, vmf->address, ptep);
/*
* Lockless access: we're in a wait_event so it's ok if it
@@ -144,7 +144,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
#endif
#ifndef __HAVE_ARCH_HUGE_PTEP_GET
-static inline pte_t huge_ptep_get(pte_t *ptep)
+static inline pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
return ptep_get(ptep);
}
@@ -334,7 +334,7 @@ static inline bool is_migration_entry_dirty(swp_entry_t entry)
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address);
-extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte);
+extern void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *pte);
#else /* CONFIG_MIGRATION */
static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
{
@@ -359,7 +359,7 @@ static inline int is_migration_entry(swp_entry_t swp)
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address) { }
static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
- pte_t *pte) { }
+ unsigned long addr, pte_t *pte) { }
static inline int is_writable_migration_entry(swp_entry_t entry)
{
return 0;
@@ -339,7 +339,7 @@ static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long addr)
{
bool referenced = false;
- pte_t entry = huge_ptep_get(pte);
+ pte_t entry = huge_ptep_get(mm, addr, pte);
struct folio *folio = pfn_folio(pte_pfn(entry));
unsigned long psize = huge_page_size(hstate_vma(vma));
@@ -373,7 +373,7 @@ static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask,
pte_t entry;
ptl = huge_pte_lock(h, walk->mm, pte);
- entry = huge_ptep_get(pte);
+ entry = huge_ptep_get(walk->mm, addr, pte);
if (!pte_present(entry))
goto out;
@@ -509,7 +509,7 @@ static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
pte_t entry;
ptl = huge_pte_lock(h, walk->mm, pte);
- entry = huge_ptep_get(pte);
+ entry = huge_ptep_get(walk->mm, addr, pte);
if (!pte_present(entry))
goto out;
@@ -547,7 +547,7 @@ static int gup_hugepte(struct vm_area_struct *vma, pte_t *ptep, unsigned long sz
if (pte_end < end)
end = pte_end;
- pte = huge_ptep_get(ptep);
+ pte = huge_ptep_get(vma->vm_mm, addr, ptep);
if (!pte_access_permitted(pte, flags & FOLL_WRITE))
return 0;
@@ -480,7 +480,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
pte_t entry;
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
- entry = huge_ptep_get(pte);
+ entry = huge_ptep_get(walk->mm, addr, pte);
i = (start - range->start) >> PAGE_SHIFT;
pfn_req_flags = range->hmm_pfns[i];
@@ -5315,7 +5315,7 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
{
pte_t entry;
- entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
+ entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(vma->vm_mm, address, ptep)));
if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
update_mmu_cache(vma, address, ptep);
}
@@ -5423,7 +5423,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
dst_ptl = huge_pte_lock(h, dst, dst_pte);
src_ptl = huge_pte_lockptr(h, src, src_pte);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
- entry = huge_ptep_get(src_pte);
+ entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
again:
if (huge_pte_none(entry)) {
/*
@@ -5461,7 +5461,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
set_huge_pte_at(dst, addr, dst_pte,
make_pte_marker(marker), sz);
} else {
- entry = huge_ptep_get(src_pte);
+ entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
pte_folio = page_folio(pte_page(entry));
folio_get(pte_folio);
@@ -5503,7 +5503,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
dst_ptl = huge_pte_lock(h, dst, dst_pte);
src_ptl = huge_pte_lockptr(h, src, src_pte);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
- entry = huge_ptep_get(src_pte);
+ entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
if (!pte_same(src_pte_old, entry)) {
restore_reserve_on_error(h, dst_vma, addr,
new_folio);
@@ -5613,7 +5613,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
new_addr |= last_addr_mask;
continue;
}
- if (huge_pte_none(huge_ptep_get(src_pte)))
+ if (huge_pte_none(huge_ptep_get(mm, old_addr, src_pte)))
continue;
if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
@@ -5686,7 +5686,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
continue;
}
- pte = huge_ptep_get(ptep);
+ pte = huge_ptep_get(mm, address, ptep);
if (huge_pte_none(pte)) {
spin_unlock(ptl);
continue;
@@ -5935,7 +5935,7 @@ static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
struct vm_area_struct *vma = vmf->vma;
struct mm_struct *mm = vma->vm_mm;
const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
- pte_t pte = huge_ptep_get(vmf->pte);
+ pte_t pte = huge_ptep_get(mm, vmf->address, vmf->pte);
struct hstate *h = hstate_vma(vma);
struct folio *old_folio;
struct folio *new_folio;
@@ -6056,7 +6056,7 @@ static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
vmf->pte = hugetlb_walk(vma, vmf->address,
huge_page_size(h));
if (likely(vmf->pte &&
- pte_same(huge_ptep_get(vmf->pte), pte)))
+ pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte)))
goto retry_avoidcopy;
/*
* race occurs while re-acquiring page table
@@ -6094,7 +6094,7 @@ static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
*/
spin_lock(vmf->ptl);
vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h));
- if (likely(vmf->pte && pte_same(huge_ptep_get(vmf->pte), pte))) {
+ if (likely(vmf->pte && pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) {
pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare);
/* Break COW or unshare */
@@ -6195,14 +6195,14 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf,
* Recheck pte with pgtable lock. Returns true if pte didn't change, or
* false if pte changed or is changing.
*/
-static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm,
+static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t old_pte)
{
spinlock_t *ptl;
bool same;
ptl = huge_pte_lock(h, mm, ptep);
- same = pte_same(huge_ptep_get(ptep), old_pte);
+ same = pte_same(huge_ptep_get(mm, addr, ptep), old_pte);
spin_unlock(ptl);
return same;
@@ -6263,7 +6263,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
* never happen on the page after UFFDIO_COPY has
* correctly installed the page and returned.
*/
- if (!hugetlb_pte_stable(h, mm, vmf->pte, vmf->orig_pte)) {
+ if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) {
ret = 0;
goto out;
}
@@ -6292,7 +6292,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
* here. Before returning error, get ptl and make
* sure there really is no pte entry.
*/
- if (hugetlb_pte_stable(h, mm, vmf->pte, vmf->orig_pte))
+ if (hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte))
ret = vmf_error(PTR_ERR(folio));
else
ret = 0;
@@ -6342,7 +6342,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
folio_unlock(folio);
folio_put(folio);
/* See comment in userfaultfd_missing() block above */
- if (!hugetlb_pte_stable(h, mm, vmf->pte, vmf->orig_pte)) {
+ if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) {
ret = 0;
goto out;
}
@@ -6369,7 +6369,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
vmf->ptl = huge_pte_lock(h, mm, vmf->pte);
ret = 0;
/* If pte changed from under us, retry */
- if (!pte_same(huge_ptep_get(vmf->pte), vmf->orig_pte))
+ if (!pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), vmf->orig_pte))
goto backout;
if (anon_rmap)
@@ -6490,7 +6490,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return VM_FAULT_OOM;
}
- vmf.orig_pte = huge_ptep_get(vmf.pte);
+ vmf.orig_pte = huge_ptep_get(mm, vmf.address, vmf.pte);
if (huge_pte_none_mostly(vmf.orig_pte)) {
if (is_pte_marker(vmf.orig_pte)) {
pte_marker marker =
@@ -6531,7 +6531,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* be released there.
*/
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- migration_entry_wait_huge(vma, vmf.pte);
+ migration_entry_wait_huge(vma, vmf.address, vmf.pte);
return 0;
} else if (unlikely(is_hugetlb_entry_hwpoisoned(vmf.orig_pte)))
ret = VM_FAULT_HWPOISON_LARGE |
@@ -6564,11 +6564,11 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
vmf.ptl = huge_pte_lock(h, mm, vmf.pte);
/* Check for a racing update before calling hugetlb_wp() */
- if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(vmf.pte))))
+ if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(mm, vmf.address, vmf.pte))))
goto out_ptl;
/* Handle userfault-wp first, before trying to lock more pages */
- if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(vmf.pte)) &&
+ if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(mm, vmf.address, vmf.pte)) &&
(flags & FAULT_FLAG_WRITE) && !huge_pte_write(vmf.orig_pte)) {
if (!userfaultfd_wp_async(vma)) {
spin_unlock(vmf.ptl);
@@ -6696,7 +6696,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
ptl = huge_pte_lock(h, dst_mm, dst_pte);
/* Don't overwrite any existing PTEs (even markers) */
- if (!huge_pte_none(huge_ptep_get(dst_pte))) {
+ if (!huge_pte_none(huge_ptep_get(dst_mm, dst_addr, dst_pte))) {
spin_unlock(ptl);
return -EEXIST;
}
@@ -6833,7 +6833,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
* page backing it, then access the page.
*/
ret = -EEXIST;
- if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
+ if (!huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, dst_pte)))
goto out_release_unlock;
if (folio_in_pagecache)
@@ -6954,7 +6954,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
address |= last_addr_mask;
continue;
}
- pte = huge_ptep_get(ptep);
+ pte = huge_ptep_get(mm, address, ptep);
if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
/* Nothing to do. */
} else if (unlikely(is_hugetlb_entry_migration(pte))) {
@@ -834,7 +834,7 @@ static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
struct mm_walk *walk)
{
struct hwpoison_walk *hwp = walk->private;
- pte_t pte = huge_ptep_get(ptep);
+ pte_t pte = huge_ptep_get(walk->mm, addr, ptep);
struct hstate *h = hstate_vma(walk->vma);
return check_hwpoisoned_entry(pte, addr, huge_page_shift(h),
@@ -624,7 +624,7 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
pte_t entry;
ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
- entry = huge_ptep_get(pte);
+ entry = huge_ptep_get(walk->mm, addr, pte);
if (!pte_present(entry)) {
if (unlikely(is_hugetlb_entry_migration(entry)))
qp->nr_failed++;
@@ -338,14 +338,14 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
*
* This function will release the vma lock before returning.
*/
-void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
+void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
pte_t pte;
hugetlb_vma_assert_locked(vma);
spin_lock(ptl);
- pte = huge_ptep_get(ptep);
+ pte = huge_ptep_get(vma->vm_mm, addr, ptep);
if (unlikely(!is_hugetlb_entry_migration(pte))) {
spin_unlock(ptl);
@@ -33,7 +33,7 @@ static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
* Hugepages under user process are always in RAM and never
* swapped out, but theoretically it needs to be checked.
*/
- present = pte && !huge_pte_none_mostly(huge_ptep_get(pte));
+ present = pte && !huge_pte_none_mostly(huge_ptep_get(walk->mm, addr, pte));
for (; addr != end; vec++, addr += PAGE_SIZE)
*vec = present;
walk->private = vec;
@@ -587,7 +587,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
}
if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) &&
- !huge_pte_none_mostly(huge_ptep_get(dst_pte))) {
+ !huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, dst_pte))) {
err = -EEXIST;
hugetlb_vma_unlock_read(dst_vma);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);