@@ -199,7 +199,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
unsigned long hugetlb_mask_last_page(struct hstate *h);
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long *addr, pte_t *ptep);
+ unsigned long addr, pte_t *ptep);
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end);
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
@@ -246,7 +246,7 @@ static inline struct address_space *hugetlb_page_mapping_lock_write(
static inline int huge_pmd_unshare(struct mm_struct *mm,
struct vm_area_struct *vma,
- unsigned long *addr, pte_t *ptep)
+ unsigned long addr, pte_t *ptep)
{
return 0;
}
@@ -4945,7 +4945,6 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
unsigned long old_end = old_addr + len;
unsigned long last_addr_mask;
- unsigned long old_addr_copy;
pte_t *src_pte, *dst_pte;
struct mmu_notifier_range range;
bool shared_pmd = false;
@@ -4973,14 +4972,10 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
if (huge_pte_none(huge_ptep_get(src_pte)))
continue;
- /* old_addr arg to huge_pmd_unshare() is a pointer and so the
- * arg may be modified. Pass a copy instead to preserve the
- * value in old_addr.
- */
- old_addr_copy = old_addr;
-
- if (huge_pmd_unshare(mm, vma, &old_addr_copy, src_pte)) {
+ if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
shared_pmd = true;
+ old_addr |= last_addr_mask;
+ new_addr |= last_addr_mask;
continue;
}
@@ -5045,10 +5040,11 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
}
ptl = huge_pte_lock(h, mm, ptep);
- if (huge_pmd_unshare(mm, vma, &address, ptep)) {
+ if (huge_pmd_unshare(mm, vma, address, ptep)) {
spin_unlock(ptl);
tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
force_flush = true;
+ address |= last_addr_mask;
continue;
}
@@ -6343,7 +6339,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
continue;
}
ptl = huge_pte_lock(h, mm, ptep);
- if (huge_pmd_unshare(mm, vma, &address, ptep)) {
+ if (huge_pmd_unshare(mm, vma, address, ptep)) {
/*
* When uffd-wp is enabled on the vma, unshare
* shouldn't happen at all. Warn about it if it
@@ -6353,6 +6349,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
pages++;
spin_unlock(ptl);
shared_pmd = true;
+ address |= last_addr_mask;
continue;
}
pte = huge_ptep_get(ptep);
@@ -6776,11 +6773,11 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
* 0 the underlying pte page is not shared, or it is the last user
*/
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long *addr, pte_t *ptep)
+ unsigned long addr, pte_t *ptep)
{
- pgd_t *pgd = pgd_offset(mm, *addr);
- p4d_t *p4d = p4d_offset(pgd, *addr);
- pud_t *pud = pud_offset(p4d, *addr);
+ pgd_t *pgd = pgd_offset(mm, addr);
+ p4d_t *p4d = p4d_offset(pgd, addr);
+ pud_t *pud = pud_offset(p4d, addr);
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
BUG_ON(page_count(virt_to_page(ptep)) == 0);
@@ -6790,14 +6787,6 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
pud_clear(pud);
put_page(virt_to_page(ptep));
mm_dec_nr_pmds(mm);
- /*
- * This update of passed address optimizes loops sequentially
- * processing addresses in increments of huge page size (PMD_SIZE
- * in this case). By clearing the pud, a PUD_SIZE area is unmapped.
- * Update address to the 'last page' in the cleared area so that
- * calling loop can move to first page past this area.
- */
- *addr |= PUD_SIZE - PMD_SIZE;
return 1;
}
@@ -6809,7 +6798,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
}
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long *addr, pte_t *ptep)
+ unsigned long addr, pte_t *ptep)
{
return 0;
}
@@ -6916,6 +6905,12 @@ unsigned long hugetlb_mask_last_page(struct hstate *h)
/* See description above. Architectures can provide their own version. */
__weak unsigned long hugetlb_mask_last_page(struct hstate *h)
{
+ unsigned long hp_size = huge_page_size(h);
+
+#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+ if (hp_size == PMD_SIZE)
+ return PUD_SIZE - PMD_SIZE;
+#endif
return 0UL;
}
@@ -7142,14 +7137,11 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
mmu_notifier_invalidate_range_start(&range);
i_mmap_lock_write(vma->vm_file->f_mapping);
for (address = start; address < end; address += PUD_SIZE) {
- unsigned long tmp = address;
-
ptep = huge_pte_offset(mm, address, sz);
if (!ptep)
continue;
ptl = huge_pte_lock(h, mm, ptep);
- /* We don't want 'address' to be changed */
- huge_pmd_unshare(mm, vma, &tmp, ptep);
+ huge_pmd_unshare(mm, vma, address, ptep);
spin_unlock(ptl);
}
flush_hugetlb_tlb_range(vma, start, end);
@@ -1559,7 +1559,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
*/
VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
- if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
+ if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
flush_tlb_range(vma, range.start, range.end);
mmu_notifier_invalidate_range(mm, range.start,
range.end);
@@ -1923,7 +1923,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
*/
VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
- if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
+ if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
flush_tlb_range(vma, range.start, range.end);
mmu_notifier_invalidate_range(mm, range.start,
range.end);