Message ID | 20240826204353.2228736-17-peterx@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | mm: Support huge pfnmaps | expand |
On Mon, Aug 26, 2024 at 2:44 PM Peter Xu <peterx@redhat.com> wrote: > > follow_pte() users have been converted to follow_pfnmap*(). Remove the > API. > > Signed-off-by: Peter Xu <peterx@redhat.com> > --- > include/linux/mm.h | 2 -- > mm/memory.c | 73 ---------------------------------------------- > 2 files changed, 75 deletions(-) > > diff --git a/include/linux/mm.h b/include/linux/mm.h > index 161d496bfd18..b31d4bdd65ad 100644 > --- a/include/linux/mm.h > +++ b/include/linux/mm.h > @@ -2368,8 +2368,6 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, > unsigned long end, unsigned long floor, unsigned long ceiling); > int > copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); > -int follow_pte(struct vm_area_struct *vma, unsigned long address, > - pte_t **ptepp, spinlock_t **ptlp); > int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, > void *buf, int len, int write); > > diff --git a/mm/memory.c b/mm/memory.c > index b5d07f493d5d..288f81a8698e 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -6100,79 +6100,6 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) > } > #endif /* __PAGETABLE_PMD_FOLDED */ > > -/** > - * follow_pte - look up PTE at a user virtual address > - * @vma: the memory mapping > - * @address: user virtual address > - * @ptepp: location to store found PTE > - * @ptlp: location to store the lock for the PTE > - * > - * On a successful return, the pointer to the PTE is stored in @ptepp; > - * the corresponding lock is taken and its location is stored in @ptlp. > - * > - * The contents of the PTE are only stable until @ptlp is released using > - * pte_unmap_unlock(). This function will fail if the PTE is non-present. > - * Present PTEs may include PTEs that map refcounted pages, such as > - * anonymous folios in COW mappings. > - * > - * Callers must be careful when relying on PTE content after > - * pte_unmap_unlock(). Especially if the PTE maps a refcounted page, > - * callers must protect against invalidation with MMU notifiers; otherwise > - * access to the PFN at a later point in time can trigger use-after-free. > - * > - * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore > - * should be taken for read. > - * > - * This function must not be used to modify PTE content. > - * > - * Return: zero on success, -ve otherwise. > - */ > -int follow_pte(struct vm_area_struct *vma, unsigned long address, > - pte_t **ptepp, spinlock_t **ptlp) > -{ > - struct mm_struct *mm = vma->vm_mm; > - pgd_t *pgd; > - p4d_t *p4d; > - pud_t *pud; > - pmd_t *pmd; > - pte_t *ptep; > - > - mmap_assert_locked(mm); > - if (unlikely(address < vma->vm_start || address >= vma->vm_end)) > - goto out; > - > - if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) > - goto out; > - > - pgd = pgd_offset(mm, address); > - if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) > - goto out; > - > - p4d = p4d_offset(pgd, address); > - if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) > - goto out; > - > - pud = pud_offset(p4d, address); > - if (pud_none(*pud) || unlikely(pud_bad(*pud))) > - goto out; > - > - pmd = pmd_offset(pud, address); > - VM_BUG_ON(pmd_trans_huge(*pmd)); > - > - ptep = pte_offset_map_lock(mm, pmd, address, ptlp); > - if (!ptep) > - goto out; > - if (!pte_present(ptep_get(ptep))) > - goto unlock; > - *ptepp = ptep; > - return 0; > -unlock: > - pte_unmap_unlock(ptep, *ptlp); > -out: > - return -EINVAL; > -} > -EXPORT_SYMBOL_GPL(follow_pte); I ran into build errors with this -- removing exported symbols breaks ABI, so I think we should make follow_pte() as a wrapper of its new equivalent, if that's possible?
Am 01.09.24 um 06:33 schrieb Yu Zhao: > On Mon, Aug 26, 2024 at 2:44 PM Peter Xu <peterx@redhat.com> wrote: >> >> follow_pte() users have been converted to follow_pfnmap*(). Remove the >> API. >> >> Signed-off-by: Peter Xu <peterx@redhat.com> >> --- >> include/linux/mm.h | 2 -- >> mm/memory.c | 73 ---------------------------------------------- >> 2 files changed, 75 deletions(-) >> >> diff --git a/include/linux/mm.h b/include/linux/mm.h >> index 161d496bfd18..b31d4bdd65ad 100644 >> --- a/include/linux/mm.h >> +++ b/include/linux/mm.h >> @@ -2368,8 +2368,6 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, >> unsigned long end, unsigned long floor, unsigned long ceiling); >> int >> copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); >> -int follow_pte(struct vm_area_struct *vma, unsigned long address, >> - pte_t **ptepp, spinlock_t **ptlp); >> int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, >> void *buf, int len, int write); >> >> diff --git a/mm/memory.c b/mm/memory.c >> index b5d07f493d5d..288f81a8698e 100644 >> --- a/mm/memory.c >> +++ b/mm/memory.c >> @@ -6100,79 +6100,6 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) >> } >> #endif /* __PAGETABLE_PMD_FOLDED */ >> >> -/** >> - * follow_pte - look up PTE at a user virtual address >> - * @vma: the memory mapping >> - * @address: user virtual address >> - * @ptepp: location to store found PTE >> - * @ptlp: location to store the lock for the PTE >> - * >> - * On a successful return, the pointer to the PTE is stored in @ptepp; >> - * the corresponding lock is taken and its location is stored in @ptlp. >> - * >> - * The contents of the PTE are only stable until @ptlp is released using >> - * pte_unmap_unlock(). This function will fail if the PTE is non-present. >> - * Present PTEs may include PTEs that map refcounted pages, such as >> - * anonymous folios in COW mappings. >> - * >> - * Callers must be careful when relying on PTE content after >> - * pte_unmap_unlock(). Especially if the PTE maps a refcounted page, >> - * callers must protect against invalidation with MMU notifiers; otherwise >> - * access to the PFN at a later point in time can trigger use-after-free. >> - * >> - * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore >> - * should be taken for read. >> - * >> - * This function must not be used to modify PTE content. >> - * >> - * Return: zero on success, -ve otherwise. >> - */ >> -int follow_pte(struct vm_area_struct *vma, unsigned long address, >> - pte_t **ptepp, spinlock_t **ptlp) >> -{ >> - struct mm_struct *mm = vma->vm_mm; >> - pgd_t *pgd; >> - p4d_t *p4d; >> - pud_t *pud; >> - pmd_t *pmd; >> - pte_t *ptep; >> - >> - mmap_assert_locked(mm); >> - if (unlikely(address < vma->vm_start || address >= vma->vm_end)) >> - goto out; >> - >> - if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) >> - goto out; >> - >> - pgd = pgd_offset(mm, address); >> - if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) >> - goto out; >> - >> - p4d = p4d_offset(pgd, address); >> - if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) >> - goto out; >> - >> - pud = pud_offset(p4d, address); >> - if (pud_none(*pud) || unlikely(pud_bad(*pud))) >> - goto out; >> - >> - pmd = pmd_offset(pud, address); >> - VM_BUG_ON(pmd_trans_huge(*pmd)); >> - >> - ptep = pte_offset_map_lock(mm, pmd, address, ptlp); >> - if (!ptep) >> - goto out; >> - if (!pte_present(ptep_get(ptep))) >> - goto unlock; >> - *ptepp = ptep; >> - return 0; >> -unlock: >> - pte_unmap_unlock(ptep, *ptlp); >> -out: >> - return -EINVAL; >> -} >> -EXPORT_SYMBOL_GPL(follow_pte); > > I ran into build errors with this -- removing exported symbols breaks > ABI, so I think we should make follow_pte() as a wrapper of its new > equivalent, if that's possible? Build error with OOT modules or in-tree modules? If you are talking about OOT modules, it is their responsibility to fix this up in their implementation. There are no real kabi stability guarantees provided by the kernel. If you are talking about in-tree modules, did Peter miss some (probably in -next?)?
diff --git a/include/linux/mm.h b/include/linux/mm.h index 161d496bfd18..b31d4bdd65ad 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2368,8 +2368,6 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); int copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); -int follow_pte(struct vm_area_struct *vma, unsigned long address, - pte_t **ptepp, spinlock_t **ptlp); int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); diff --git a/mm/memory.c b/mm/memory.c index b5d07f493d5d..288f81a8698e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -6100,79 +6100,6 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) } #endif /* __PAGETABLE_PMD_FOLDED */ -/** - * follow_pte - look up PTE at a user virtual address - * @vma: the memory mapping - * @address: user virtual address - * @ptepp: location to store found PTE - * @ptlp: location to store the lock for the PTE - * - * On a successful return, the pointer to the PTE is stored in @ptepp; - * the corresponding lock is taken and its location is stored in @ptlp. - * - * The contents of the PTE are only stable until @ptlp is released using - * pte_unmap_unlock(). This function will fail if the PTE is non-present. - * Present PTEs may include PTEs that map refcounted pages, such as - * anonymous folios in COW mappings. - * - * Callers must be careful when relying on PTE content after - * pte_unmap_unlock(). Especially if the PTE maps a refcounted page, - * callers must protect against invalidation with MMU notifiers; otherwise - * access to the PFN at a later point in time can trigger use-after-free. - * - * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore - * should be taken for read. - * - * This function must not be used to modify PTE content. - * - * Return: zero on success, -ve otherwise. - */ -int follow_pte(struct vm_area_struct *vma, unsigned long address, - pte_t **ptepp, spinlock_t **ptlp) -{ - struct mm_struct *mm = vma->vm_mm; - pgd_t *pgd; - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - pte_t *ptep; - - mmap_assert_locked(mm); - if (unlikely(address < vma->vm_start || address >= vma->vm_end)) - goto out; - - if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) - goto out; - - pgd = pgd_offset(mm, address); - if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) - goto out; - - p4d = p4d_offset(pgd, address); - if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) - goto out; - - pud = pud_offset(p4d, address); - if (pud_none(*pud) || unlikely(pud_bad(*pud))) - goto out; - - pmd = pmd_offset(pud, address); - VM_BUG_ON(pmd_trans_huge(*pmd)); - - ptep = pte_offset_map_lock(mm, pmd, address, ptlp); - if (!ptep) - goto out; - if (!pte_present(ptep_get(ptep))) - goto unlock; - *ptepp = ptep; - return 0; -unlock: - pte_unmap_unlock(ptep, *ptlp); -out: - return -EINVAL; -} -EXPORT_SYMBOL_GPL(follow_pte); - static inline void pfnmap_args_setup(struct follow_pfnmap_args *args, spinlock_t *lock, pte_t *ptep, pgprot_t pgprot, unsigned long pfn_base,
follow_pte() users have been converted to follow_pfnmap*(). Remove the API. Signed-off-by: Peter Xu <peterx@redhat.com> --- include/linux/mm.h | 2 -- mm/memory.c | 73 ---------------------------------------------- 2 files changed, 75 deletions(-)