diff mbox series

[2/3] userfaultfd: UFFDIO_REMAP uABI

Message ID 20230914152620.2743033-3-surenb@google.com (mailing list archive)
State New, archived
Headers show
Series userfaultfd remap option | expand

Commit Message

Suren Baghdasaryan Sept. 14, 2023, 3:26 p.m. UTC
From: Andrea Arcangeli <aarcange@redhat.com>

This implements the uABI of UFFDIO_REMAP.

Notably one mode bitflag is also forwarded (and in turn known) by the
lowlevel remap_pages method.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
 fs/userfaultfd.c                 |  49 +++
 include/linux/rmap.h             |   5 +
 include/linux/userfaultfd_k.h    |  17 +
 include/uapi/linux/userfaultfd.h |  22 ++
 mm/huge_memory.c                 | 118 +++++++
 mm/khugepaged.c                  |   3 +
 mm/userfaultfd.c                 | 586 +++++++++++++++++++++++++++++++
 7 files changed, 800 insertions(+)

Comments

Matthew Wilcox (Oracle) Sept. 14, 2023, 6:11 p.m. UTC | #1
On Thu, Sep 14, 2023 at 08:26:12AM -0700, Suren Baghdasaryan wrote:
> +++ b/include/linux/userfaultfd_k.h
> @@ -93,6 +93,23 @@ extern int mwriteprotect_range(struct mm_struct *dst_mm,
>  extern long uffd_wp_range(struct vm_area_struct *vma,
>  			  unsigned long start, unsigned long len, bool enable_wp);
>  
> +/* remap_pages */
> +extern void double_pt_lock(spinlock_t *ptl1, spinlock_t *ptl2);
> +extern void double_pt_unlock(spinlock_t *ptl1, spinlock_t *ptl2);
> +extern ssize_t remap_pages(struct mm_struct *dst_mm,
> +			   struct mm_struct *src_mm,
> +			   unsigned long dst_start,
> +			   unsigned long src_start,
> +			   unsigned long len, __u64 flags);
> +extern int remap_pages_huge_pmd(struct mm_struct *dst_mm,
> +				struct mm_struct *src_mm,
> +				pmd_t *dst_pmd, pmd_t *src_pmd,
> +				pmd_t dst_pmdval,
> +				struct vm_area_struct *dst_vma,
> +				struct vm_area_struct *src_vma,
> +				unsigned long dst_addr,
> +				unsigned long src_addr);

Drop the 'extern' markers from function declarations.

> +int remap_pages_huge_pmd(struct mm_struct *dst_mm,
> +			 struct mm_struct *src_mm,
> +			 pmd_t *dst_pmd, pmd_t *src_pmd,
> +			 pmd_t dst_pmdval,
> +			 struct vm_area_struct *dst_vma,
> +			 struct vm_area_struct *src_vma,
> +			 unsigned long dst_addr,
> +			 unsigned long src_addr)
> +{
> +	pmd_t _dst_pmd, src_pmdval;
> +	struct page *src_page;
> +	struct anon_vma *src_anon_vma, *dst_anon_vma;
> +	spinlock_t *src_ptl, *dst_ptl;
> +	pgtable_t pgtable;
> +	struct mmu_notifier_range range;
> +
> +	src_pmdval = *src_pmd;
> +	src_ptl = pmd_lockptr(src_mm, src_pmd);
> +
> +	BUG_ON(!pmd_trans_huge(src_pmdval));
> +	BUG_ON(!pmd_none(dst_pmdval));
> +	BUG_ON(!spin_is_locked(src_ptl));
> +	mmap_assert_locked(src_mm);
> +	mmap_assert_locked(dst_mm);
> +	BUG_ON(src_addr & ~HPAGE_PMD_MASK);
> +	BUG_ON(dst_addr & ~HPAGE_PMD_MASK);
> +
> +	src_page = pmd_page(src_pmdval);
> +	BUG_ON(!PageHead(src_page));
> +	BUG_ON(!PageAnon(src_page));

Better to add a src_folio = page_folio(src_page);
and then folio_test_anon() here.

> +	if (unlikely(page_mapcount(src_page) != 1)) {

Brr, this is going to miss PTE mappings of this folio.  I think you
actually want folio_mapcount() instead, although it'd be more efficient
to look at folio->_entire_mapcount == 1 and _nr_pages_mapped == 0.
Not wure what a good name for that predicate would be.

> +	get_page(src_page);

folio_get()

> +	/* block all concurrent rmap walks */
> +	lock_page(src_page);

folio_lock()

> +	/*
> +	 * split_huge_page walks the anon_vma chain without the page
> +	 * lock. Serialize against it with the anon_vma lock, the page
> +	 * lock is not enough.
> +	 */
> +	src_anon_vma = folio_get_anon_vma(page_folio(src_page));

...

> +	if (!src_anon_vma) {
> +		unlock_page(src_page);

folio_unlock()

> +		put_page(src_page);

folio_put()

> +	if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
> +		     !pmd_same(*dst_pmd, dst_pmdval) ||
> +		     page_mapcount(src_page) != 1)) {

similar folio_total_mapcount()

> +		double_pt_unlock(src_ptl, dst_ptl);
> +		anon_vma_unlock_write(src_anon_vma);
> +		put_anon_vma(src_anon_vma);
> +		unlock_page(src_page);
> +		put_page(src_page);

...

> +	BUG_ON(!PageHead(src_page));
> +	BUG_ON(!PageAnon(src_page));
> +	/* the PT lock is enough to keep the page pinned now */
> +	put_page(src_page);

...

> +	dst_anon_vma = (void *) dst_vma->anon_vma + PAGE_MAPPING_ANON;
> +	WRITE_ONCE(src_page->mapping, (struct address_space *) dst_anon_vma);
> +	WRITE_ONCE(src_page->index, linear_page_index(dst_vma, dst_addr));

Definitely want to use the folio here.

> +	/* unblock rmap walks */
> +	unlock_page(src_page);

...

> +static int remap_pages_pte(struct mm_struct *dst_mm,
> +			   struct mm_struct *src_mm,
> +			   pmd_t *dst_pmd,
> +			   pmd_t *src_pmd,
> +			   struct vm_area_struct *dst_vma,
> +			   struct vm_area_struct *src_vma,
> +			   unsigned long dst_addr,
> +			   unsigned long src_addr,
> +			   __u64 mode)
> +{
> +	swp_entry_t entry;
> +	pte_t orig_src_pte, orig_dst_pte;
> +	spinlock_t *src_ptl, *dst_ptl;
> +	pte_t *src_pte = NULL;
> +	pte_t *dst_pte = NULL;
> +
> +	struct folio *src_folio = NULL;
> +	struct anon_vma *src_anon_vma = NULL;
> +	struct anon_vma *dst_anon_vma;
> +	struct mmu_notifier_range range;
> +	int err = 0;
> +
> +retry:
> +	dst_pte = pte_offset_map_nolock(dst_mm, dst_pmd, dst_addr, &dst_ptl);
> +
> +	/* If an huge pmd materialized from under us fail */
> +	if (unlikely(!dst_pte)) {
> +		err = -EFAULT;
> +		goto out;
> +	}
> +
> +	src_pte = pte_offset_map_nolock(src_mm, src_pmd, src_addr, &src_ptl);
> +
> +	/*
> +	 * We held the mmap_lock for reading so MADV_DONTNEED
> +	 * can zap transparent huge pages under us, or the
> +	 * transparent huge page fault can establish new
> +	 * transparent huge pages under us.
> +	 */
> +	if (unlikely(!src_pte)) {
> +		err = -EFAULT;
> +		goto out;
> +	}
> +
> +	BUG_ON(pmd_none(*dst_pmd));
> +	BUG_ON(pmd_none(*src_pmd));
> +	BUG_ON(pmd_trans_huge(*dst_pmd));
> +	BUG_ON(pmd_trans_huge(*src_pmd));
> +
> +	spin_lock(dst_ptl);
> +	orig_dst_pte = *dst_pte;
> +	spin_unlock(dst_ptl);
> +	if (!pte_none(orig_dst_pte)) {
> +		err = -EEXIST;
> +		goto out;
> +	}
> +
> +	spin_lock(src_ptl);
> +	orig_src_pte = *src_pte;
> +	spin_unlock(src_ptl);
> +	if (pte_none(orig_src_pte)) {
> +		if (!(mode & UFFDIO_REMAP_MODE_ALLOW_SRC_HOLES))
> +			err = -ENOENT;
> +		else /* nothing to do to remap a hole */
> +			err = 0;
> +		goto out;
> +	}
> +
> +	if (pte_present(orig_src_pte)) {
> +		if (!src_folio) {
> +			struct folio *folio;
> +
> +			/*
> +			 * Pin the page while holding the lock to be sure the
> +			 * page isn't freed under us
> +			 */
> +			spin_lock(src_ptl);
> +			if (!pte_same(orig_src_pte, *src_pte)) {
> +				spin_unlock(src_ptl);
> +				err = -EAGAIN;
> +				goto out;
> +			}
> +
> +			folio = vm_normal_folio(src_vma, src_addr, orig_src_pte);
> +			if (!folio || !folio_test_anon(folio) ||
> +			    folio_estimated_sharers(folio) != 1) {

I wonder if we also want to fail if folio_test_large()?  While we don't
have large anon folios today, it seems to me that trying to migrate one
of them like this is just not going to work.
David Hildenbrand Sept. 14, 2023, 6:43 p.m. UTC | #2
On 14.09.23 20:11, Matthew Wilcox wrote:
> On Thu, Sep 14, 2023 at 08:26:12AM -0700, Suren Baghdasaryan wrote:
>> +++ b/include/linux/userfaultfd_k.h
>> @@ -93,6 +93,23 @@ extern int mwriteprotect_range(struct mm_struct *dst_mm,
>>   extern long uffd_wp_range(struct vm_area_struct *vma,
>>   			  unsigned long start, unsigned long len, bool enable_wp);
>>   
>> +/* remap_pages */
>> +extern void double_pt_lock(spinlock_t *ptl1, spinlock_t *ptl2);
>> +extern void double_pt_unlock(spinlock_t *ptl1, spinlock_t *ptl2);
>> +extern ssize_t remap_pages(struct mm_struct *dst_mm,
>> +			   struct mm_struct *src_mm,
>> +			   unsigned long dst_start,
>> +			   unsigned long src_start,
>> +			   unsigned long len, __u64 flags);
>> +extern int remap_pages_huge_pmd(struct mm_struct *dst_mm,
>> +				struct mm_struct *src_mm,
>> +				pmd_t *dst_pmd, pmd_t *src_pmd,
>> +				pmd_t dst_pmdval,
>> +				struct vm_area_struct *dst_vma,
>> +				struct vm_area_struct *src_vma,
>> +				unsigned long dst_addr,
>> +				unsigned long src_addr);
> 
> Drop the 'extern' markers from function declarations.
> 
>> +int remap_pages_huge_pmd(struct mm_struct *dst_mm,
>> +			 struct mm_struct *src_mm,
>> +			 pmd_t *dst_pmd, pmd_t *src_pmd,
>> +			 pmd_t dst_pmdval,
>> +			 struct vm_area_struct *dst_vma,
>> +			 struct vm_area_struct *src_vma,
>> +			 unsigned long dst_addr,
>> +			 unsigned long src_addr)
>> +{
>> +	pmd_t _dst_pmd, src_pmdval;
>> +	struct page *src_page;
>> +	struct anon_vma *src_anon_vma, *dst_anon_vma;
>> +	spinlock_t *src_ptl, *dst_ptl;
>> +	pgtable_t pgtable;
>> +	struct mmu_notifier_range range;
>> +
>> +	src_pmdval = *src_pmd;
>> +	src_ptl = pmd_lockptr(src_mm, src_pmd);
>> +
>> +	BUG_ON(!pmd_trans_huge(src_pmdval));
>> +	BUG_ON(!pmd_none(dst_pmdval));
>> +	BUG_ON(!spin_is_locked(src_ptl));
>> +	mmap_assert_locked(src_mm);
>> +	mmap_assert_locked(dst_mm);
>> +	BUG_ON(src_addr & ~HPAGE_PMD_MASK);
>> +	BUG_ON(dst_addr & ~HPAGE_PMD_MASK);
>> +
>> +	src_page = pmd_page(src_pmdval);
>> +	BUG_ON(!PageHead(src_page));
>> +	BUG_ON(!PageAnon(src_page));
> 
> Better to add a src_folio = page_folio(src_page);
> and then folio_test_anon() here.
> 
>> +	if (unlikely(page_mapcount(src_page) != 1)) {
> 
> Brr, this is going to miss PTE mappings of this folio.  I think you
> actually want folio_mapcount() instead, although it'd be more efficient
> to look at folio->_entire_mapcount == 1 and _nr_pages_mapped == 0.
> Not wure what a good name for that predicate would be.

We have

  * It only works on non shared anonymous pages because those can
  * be relocated without generating non linear anon_vmas in the rmap
  * code.
  *
  * It provides a zero copy mechanism to handle userspace page faults.
  * The source vma pages should have mapcount == 1, which can be
  * enforced by using madvise(MADV_DONTFORK) on src vma.

Use PageAnonExclusive(). As long as KSM is not involved and you don't 
use fork(), that flag should be good enough for that use case here.

[...]

>> +			/*
>> +			 * Pin the page while holding the lock to be sure the
>> +			 * page isn't freed under us
>> +			 */
>> +			spin_lock(src_ptl);
>> +			if (!pte_same(orig_src_pte, *src_pte)) {
>> +				spin_unlock(src_ptl);
>> +				err = -EAGAIN;
>> +				goto out;
>> +			}
>> +
>> +			folio = vm_normal_folio(src_vma, src_addr, orig_src_pte);
>> +			if (!folio || !folio_test_anon(folio) ||
>> +			    folio_estimated_sharers(folio) != 1) {
> 
> I wonder if we also want to fail if folio_test_large()?  While we don't
> have large anon folios today, it seems to me that trying to migrate one
> of them like this is just not going to work.

Yes, refuse any PTE-mapped large folios.
David Hildenbrand Sept. 14, 2023, 6:45 p.m. UTC | #3
On 14.09.23 20:43, David Hildenbrand wrote:
> On 14.09.23 20:11, Matthew Wilcox wrote:
>> On Thu, Sep 14, 2023 at 08:26:12AM -0700, Suren Baghdasaryan wrote:
>>> +++ b/include/linux/userfaultfd_k.h
>>> @@ -93,6 +93,23 @@ extern int mwriteprotect_range(struct mm_struct *dst_mm,
>>>    extern long uffd_wp_range(struct vm_area_struct *vma,
>>>    			  unsigned long start, unsigned long len, bool enable_wp);
>>>    
>>> +/* remap_pages */
>>> +extern void double_pt_lock(spinlock_t *ptl1, spinlock_t *ptl2);
>>> +extern void double_pt_unlock(spinlock_t *ptl1, spinlock_t *ptl2);
>>> +extern ssize_t remap_pages(struct mm_struct *dst_mm,
>>> +			   struct mm_struct *src_mm,
>>> +			   unsigned long dst_start,
>>> +			   unsigned long src_start,
>>> +			   unsigned long len, __u64 flags);
>>> +extern int remap_pages_huge_pmd(struct mm_struct *dst_mm,
>>> +				struct mm_struct *src_mm,
>>> +				pmd_t *dst_pmd, pmd_t *src_pmd,
>>> +				pmd_t dst_pmdval,
>>> +				struct vm_area_struct *dst_vma,
>>> +				struct vm_area_struct *src_vma,
>>> +				unsigned long dst_addr,
>>> +				unsigned long src_addr);
>>
>> Drop the 'extern' markers from function declarations.
>>
>>> +int remap_pages_huge_pmd(struct mm_struct *dst_mm,
>>> +			 struct mm_struct *src_mm,
>>> +			 pmd_t *dst_pmd, pmd_t *src_pmd,
>>> +			 pmd_t dst_pmdval,
>>> +			 struct vm_area_struct *dst_vma,
>>> +			 struct vm_area_struct *src_vma,
>>> +			 unsigned long dst_addr,
>>> +			 unsigned long src_addr)
>>> +{
>>> +	pmd_t _dst_pmd, src_pmdval;
>>> +	struct page *src_page;
>>> +	struct anon_vma *src_anon_vma, *dst_anon_vma;
>>> +	spinlock_t *src_ptl, *dst_ptl;
>>> +	pgtable_t pgtable;
>>> +	struct mmu_notifier_range range;
>>> +
>>> +	src_pmdval = *src_pmd;
>>> +	src_ptl = pmd_lockptr(src_mm, src_pmd);
>>> +
>>> +	BUG_ON(!pmd_trans_huge(src_pmdval));
>>> +	BUG_ON(!pmd_none(dst_pmdval));
>>> +	BUG_ON(!spin_is_locked(src_ptl));
>>> +	mmap_assert_locked(src_mm);
>>> +	mmap_assert_locked(dst_mm);
>>> +	BUG_ON(src_addr & ~HPAGE_PMD_MASK);
>>> +	BUG_ON(dst_addr & ~HPAGE_PMD_MASK);
>>> +
>>> +	src_page = pmd_page(src_pmdval);
>>> +	BUG_ON(!PageHead(src_page));
>>> +	BUG_ON(!PageAnon(src_page));
>>
>> Better to add a src_folio = page_folio(src_page);
>> and then folio_test_anon() here.
>>
>>> +	if (unlikely(page_mapcount(src_page) != 1)) {
>>
>> Brr, this is going to miss PTE mappings of this folio.  I think you
>> actually want folio_mapcount() instead, although it'd be more efficient
>> to look at folio->_entire_mapcount == 1 and _nr_pages_mapped == 0.
>> Not wure what a good name for that predicate would be.
> 
> We have
> 
>    * It only works on non shared anonymous pages because those can
>    * be relocated without generating non linear anon_vmas in the rmap
>    * code.
>    *
>    * It provides a zero copy mechanism to handle userspace page faults.
>    * The source vma pages should have mapcount == 1, which can be
>    * enforced by using madvise(MADV_DONTFORK) on src vma.
> 
> Use PageAnonExclusive(). As long as KSM is not involved and you don't
> use fork(), that flag should be good enough for that use case here.
>
... and similarly don't do any of that swapcount stuff and only check if 
the swap pte is anon exclusive.
David Hildenbrand Sept. 14, 2023, 6:47 p.m. UTC | #4
On 14.09.23 17:26, Suren Baghdasaryan wrote:
> From: Andrea Arcangeli <aarcange@redhat.com>
> 
> This implements the uABI of UFFDIO_REMAP.
> 
> Notably one mode bitflag is also forwarded (and in turn known) by the
> lowlevel remap_pages method.

Sorry to say, but these functions are unacceptably long. Please find 
ways to structure the code in a better way.
Suren Baghdasaryan Sept. 14, 2023, 6:54 p.m. UTC | #5
On Thu, Sep 14, 2023 at 11:47 AM David Hildenbrand <david@redhat.com> wrote:
>
> On 14.09.23 17:26, Suren Baghdasaryan wrote:
> > From: Andrea Arcangeli <aarcange@redhat.com>
> >
> > This implements the uABI of UFFDIO_REMAP.
> >
> > Notably one mode bitflag is also forwarded (and in turn known) by the
> > lowlevel remap_pages method.
>
> Sorry to say, but these functions are unacceptably long. Please find
> ways to structure the code in a better way.

Thanks for the comments, guys! I'll address them and will try to break
the functions into smaller pieces.

>
> --
> Cheers,
>
> David / dhildenb
>
Jann Horn Sept. 14, 2023, 7:28 p.m. UTC | #6
On Thu, Sep 14, 2023 at 5:26 PM Suren Baghdasaryan <surenb@google.com> wrote:
>
> From: Andrea Arcangeli <aarcange@redhat.com>
>
> This implements the uABI of UFFDIO_REMAP.
>
> Notably one mode bitflag is also forwarded (and in turn known) by the
> lowlevel remap_pages method.
>
> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> ---
>  fs/userfaultfd.c                 |  49 +++
>  include/linux/rmap.h             |   5 +
>  include/linux/userfaultfd_k.h    |  17 +
>  include/uapi/linux/userfaultfd.h |  22 ++
>  mm/huge_memory.c                 | 118 +++++++
>  mm/khugepaged.c                  |   3 +
>  mm/userfaultfd.c                 | 586 +++++++++++++++++++++++++++++++
>  7 files changed, 800 insertions(+)
>
> diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
> index 56eaae9dac1a..7bf64e7541c1 100644
> --- a/fs/userfaultfd.c
> +++ b/fs/userfaultfd.c
> @@ -2027,6 +2027,52 @@ static inline unsigned int uffd_ctx_features(__u64 user_features)
>         return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED;
>  }
>
> +static int userfaultfd_remap(struct userfaultfd_ctx *ctx,
> +                            unsigned long arg)
> +{
> +       __s64 ret;
> +       struct uffdio_remap uffdio_remap;
> +       struct uffdio_remap __user *user_uffdio_remap;
> +       struct userfaultfd_wake_range range;
> +
> +       user_uffdio_remap = (struct uffdio_remap __user *) arg;
> +
> +       ret = -EFAULT;
> +       if (copy_from_user(&uffdio_remap, user_uffdio_remap,
> +                          /* don't copy "remap" last field */
> +                          sizeof(uffdio_remap)-sizeof(__s64)))
> +               goto out;
> +
> +       ret = validate_range(ctx->mm, uffdio_remap.dst, uffdio_remap.len);
> +       if (ret)
> +               goto out;
> +       ret = validate_range(current->mm, uffdio_remap.src, uffdio_remap.len);
> +       if (ret)
> +               goto out;
> +       ret = -EINVAL;
> +       if (uffdio_remap.mode & ~(UFFDIO_REMAP_MODE_ALLOW_SRC_HOLES|
> +                                 UFFDIO_REMAP_MODE_DONTWAKE))
> +               goto out;

Do you not need mmget_not_zero(ctx->mm) to make sure the MM can't be
concurrently torn down while remap_pages() is running, similar to what
the other userfaultfd ioctl handlers do?

> +       ret = remap_pages(ctx->mm, current->mm,
> +                         uffdio_remap.dst, uffdio_remap.src,
> +                         uffdio_remap.len, uffdio_remap.mode);
> +       if (unlikely(put_user(ret, &user_uffdio_remap->remap)))
> +               return -EFAULT;
> +       if (ret < 0)
> +               goto out;
> +       /* len == 0 would wake all */
> +       BUG_ON(!ret);
> +       range.len = ret;
> +       if (!(uffdio_remap.mode & UFFDIO_REMAP_MODE_DONTWAKE)) {
> +               range.start = uffdio_remap.dst;
> +               wake_userfault(ctx, &range);
> +       }
> +       ret = range.len == uffdio_remap.len ? 0 : -EAGAIN;
> +out:
> +       return ret;
> +}
[...]
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 064fbd90822b..c7a9880a1f6a 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -1932,6 +1932,124 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
>         return ret;
>  }
>
> +#ifdef CONFIG_USERFAULTFD
> +/*
> + * The PT lock for src_pmd and the mmap_lock for reading are held by
> + * the caller, but it must return after releasing the
> + * page_table_lock. We're guaranteed the src_pmd is a pmd_trans_huge
> + * until the PT lock of the src_pmd is released. Just move the page
> + * from src_pmd to dst_pmd if possible. Return zero if succeeded in
> + * moving the page, -EAGAIN if it needs to be repeated by the caller,
> + * or other errors in case of failure.
> + */
> +int remap_pages_huge_pmd(struct mm_struct *dst_mm,
> +                        struct mm_struct *src_mm,
> +                        pmd_t *dst_pmd, pmd_t *src_pmd,
> +                        pmd_t dst_pmdval,
> +                        struct vm_area_struct *dst_vma,
> +                        struct vm_area_struct *src_vma,
> +                        unsigned long dst_addr,
> +                        unsigned long src_addr)
> +{
> +       pmd_t _dst_pmd, src_pmdval;
> +       struct page *src_page;
> +       struct anon_vma *src_anon_vma, *dst_anon_vma;
> +       spinlock_t *src_ptl, *dst_ptl;
> +       pgtable_t pgtable;
> +       struct mmu_notifier_range range;
> +
> +       src_pmdval = *src_pmd;
> +       src_ptl = pmd_lockptr(src_mm, src_pmd);
> +
> +       BUG_ON(!pmd_trans_huge(src_pmdval));
> +       BUG_ON(!pmd_none(dst_pmdval));

Why can we assert that pmd_none(dst_pmdval) is true here? Can we not
have concurrent faults (or userfaultfd operations) populating that
PMD?

> +       BUG_ON(!spin_is_locked(src_ptl));
> +       mmap_assert_locked(src_mm);
> +       mmap_assert_locked(dst_mm);
> +       BUG_ON(src_addr & ~HPAGE_PMD_MASK);
> +       BUG_ON(dst_addr & ~HPAGE_PMD_MASK);
> +
> +       src_page = pmd_page(src_pmdval);
> +       BUG_ON(!PageHead(src_page));
> +       BUG_ON(!PageAnon(src_page));
> +       if (unlikely(page_mapcount(src_page) != 1)) {
> +               spin_unlock(src_ptl);
> +               return -EBUSY;
> +       }
> +
> +       get_page(src_page);
> +       spin_unlock(src_ptl);
> +
> +       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src_mm, src_addr,
> +                               src_addr + HPAGE_PMD_SIZE);
> +       mmu_notifier_invalidate_range_start(&range);
> +
> +       /* block all concurrent rmap walks */
> +       lock_page(src_page);
> +
> +       /*
> +        * split_huge_page walks the anon_vma chain without the page
> +        * lock. Serialize against it with the anon_vma lock, the page
> +        * lock is not enough.
> +        */
> +       src_anon_vma = folio_get_anon_vma(page_folio(src_page));
> +       if (!src_anon_vma) {
> +               unlock_page(src_page);
> +               put_page(src_page);
> +               mmu_notifier_invalidate_range_end(&range);
> +               return -EAGAIN;
> +       }
> +       anon_vma_lock_write(src_anon_vma);
> +
> +       dst_ptl = pmd_lockptr(dst_mm, dst_pmd);
> +       double_pt_lock(src_ptl, dst_ptl);
> +       if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
> +                    !pmd_same(*dst_pmd, dst_pmdval) ||
> +                    page_mapcount(src_page) != 1)) {
> +               double_pt_unlock(src_ptl, dst_ptl);
> +               anon_vma_unlock_write(src_anon_vma);
> +               put_anon_vma(src_anon_vma);
> +               unlock_page(src_page);
> +               put_page(src_page);
> +               mmu_notifier_invalidate_range_end(&range);
> +               return -EAGAIN;
> +       }
> +
> +       BUG_ON(!PageHead(src_page));
> +       BUG_ON(!PageAnon(src_page));
> +       /* the PT lock is enough to keep the page pinned now */
> +       put_page(src_page);
> +
> +       dst_anon_vma = (void *) dst_vma->anon_vma + PAGE_MAPPING_ANON;
> +       WRITE_ONCE(src_page->mapping, (struct address_space *) dst_anon_vma);
> +       WRITE_ONCE(src_page->index, linear_page_index(dst_vma, dst_addr));
> +
> +       if (!pmd_same(pmdp_huge_clear_flush(src_vma, src_addr, src_pmd),
> +                     src_pmdval))
> +               BUG_ON(1);

I'm not sure we can assert that the PMDs are exactly equal; the CPU
might have changed the A/D bits under us?

> +       _dst_pmd = mk_huge_pmd(src_page, dst_vma->vm_page_prot);
> +       _dst_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
> +       set_pmd_at(dst_mm, dst_addr, dst_pmd, _dst_pmd);
> +
> +       pgtable = pgtable_trans_huge_withdraw(src_mm, src_pmd);
> +       pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);

Are we allowed to move page tables between mm_structs on all
architectures? The first example I found that looks a bit dodgy,
looking through various architectures' pte_alloc_one(), is s390's
page_table_alloc() which looks like page tables are tied to per-MM
lists sometimes.
If that's not allowed, we might have to allocate a new deposit table
and free the old one or something like that.

> +       if (dst_mm != src_mm) {
> +               add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
> +               add_mm_counter(src_mm, MM_ANONPAGES, -HPAGE_PMD_NR);
> +       }
> +       double_pt_unlock(src_ptl, dst_ptl);
> +
> +       anon_vma_unlock_write(src_anon_vma);
> +       put_anon_vma(src_anon_vma);
> +
> +       /* unblock rmap walks */
> +       unlock_page(src_page);
> +
> +       mmu_notifier_invalidate_range_end(&range);
> +       return 0;
> +}
> +#endif /* CONFIG_USERFAULTFD */
> +
>  /*
>   * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
>   *
[...]
> diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> index 96d9eae5c7cc..0cca60dfa8f8 100644
> --- a/mm/userfaultfd.c
> +++ b/mm/userfaultfd.c
[...]
> +ssize_t remap_pages(struct mm_struct *dst_mm, struct mm_struct *src_mm,
> +                   unsigned long dst_start, unsigned long src_start,
> +                   unsigned long len, __u64 mode)
> +{
[...]
> +
> +       if (pgprot_val(src_vma->vm_page_prot) !=
> +           pgprot_val(dst_vma->vm_page_prot))
> +               goto out;

Does this check intentionally allow moving pages from a
PROT_READ|PROT_WRITE anonymous private VMA into a PROT_READ anonymous
private VMA (on architectures like x86 and arm64 where CoW memory has
the same protection flags as read-only memory), but forbid moving them
from a PROT_READ|PROT_EXEC VMA into a PROT_READ VMA? I think this
check needs at least a comment to explain what's going on here.

> +       /* only allow remapping if both are mlocked or both aren't */
> +       if ((src_vma->vm_flags & VM_LOCKED) ^ (dst_vma->vm_flags & VM_LOCKED))
> +               goto out;
> +
> +       /*
> +        * Be strict and only allow remap_pages if either the src or
> +        * dst range is registered in the userfaultfd to prevent
> +        * userland errors going unnoticed. As far as the VM
> +        * consistency is concerned, it would be perfectly safe to
> +        * remove this check, but there's no useful usage for
> +        * remap_pages ouside of userfaultfd registered ranges. This
> +        * is after all why it is an ioctl belonging to the
> +        * userfaultfd and not a syscall.
> +        *
> +        * Allow both vmas to be registered in the userfaultfd, just
> +        * in case somebody finds a way to make such a case useful.
> +        * Normally only one of the two vmas would be registered in
> +        * the userfaultfd.
> +        */
> +       if (!dst_vma->vm_userfaultfd_ctx.ctx &&
> +           !src_vma->vm_userfaultfd_ctx.ctx)
> +               goto out;
> +
> +       /*
> +        * FIXME: only allow remapping across anonymous vmas,
> +        * tmpfs should be added.
> +        */
> +       if (src_vma->vm_ops || dst_vma->vm_ops)
> +               goto out;

I don't think it's okay to check for anonymous VMAs by checking
->vm_ops. There are some weird drivers whose ->mmap helpers don't set
->vm_ops and instead just shove all the necessary PTEs into the VMA
right on ->mmap, so I think they end up with ->vm_ops==NULL. For
example, kcov_mmap() looks that way. I'm not sure how common this is.

Though, uuuuuh, I guess if that's true, the existing
vma_is_anonymous() is broken, since that also just checks ->vm_ops?
I'm not sure what the consequences of that would be... Either way,
vma_is_anonymous() might be the better way to check for anonymous VMAs
here, and someone should figure out whether vma_is_anonymous() needs
to be fixed.

> +       /*
> +        * Ensure the dst_vma has a anon_vma or this page
> +        * would get a NULL anon_vma when moved in the
> +        * dst_vma.
> +        */
> +       err = -ENOMEM;
> +       if (unlikely(anon_vma_prepare(dst_vma)))
> +               goto out;
> +
> +       for (src_addr = src_start, dst_addr = dst_start;
> +            src_addr < src_start + len;) {
> +               spinlock_t *ptl;
> +               pmd_t dst_pmdval;
> +
> +               BUG_ON(dst_addr >= dst_start + len);
> +               src_pmd = mm_find_pmd(src_mm, src_addr);

(this would blow up pretty badly if we could have transparent huge PUD
in the region but I think that's limited to file VMAs so it's fine as
it currently is)

> +               if (unlikely(!src_pmd)) {
> +                       if (!(mode & UFFDIO_REMAP_MODE_ALLOW_SRC_HOLES)) {
> +                               err = -ENOENT;
> +                               break;
> +                       }
> +                       src_pmd = mm_alloc_pmd(src_mm, src_addr);
> +                       if (unlikely(!src_pmd)) {
> +                               err = -ENOMEM;
> +                               break;
> +                       }
> +               }
> +               dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
> +               if (unlikely(!dst_pmd)) {
> +                       err = -ENOMEM;
> +                       break;
> +               }
> +
> +               dst_pmdval = pmdp_get_lockless(dst_pmd);
> +               /*
> +                * If the dst_pmd is mapped as THP don't
> +                * override it and just be strict.
> +                */
> +               if (unlikely(pmd_trans_huge(dst_pmdval))) {
> +                       err = -EEXIST;
> +                       break;
> +               }

This check is racy because the dst_pmd can still change at this point,
from previously pointing to a zeroed PMD to now pointing to a
hugepage, right? And we rely on remap_pages_pte() and
remap_pages_huge_pmd() to recheck for that?
If yes, maybe add a comment noting this and explaining why we want this check.

> +               ptl = pmd_trans_huge_lock(src_pmd, src_vma);
> +               if (ptl) {
> +                       /*
> +                        * Check if we can move the pmd without
> +                        * splitting it. First check the address
> +                        * alignment to be the same in src/dst.  These
> +                        * checks don't actually need the PT lock but
> +                        * it's good to do it here to optimize this
> +                        * block away at build time if
> +                        * CONFIG_TRANSPARENT_HUGEPAGE is not set.
> +                        */
> +                       if (thp_aligned == -1)
> +                               thp_aligned = ((src_addr & ~HPAGE_PMD_MASK) ==
> +                                              (dst_addr & ~HPAGE_PMD_MASK));
> +                       if (!thp_aligned || (src_addr & ~HPAGE_PMD_MASK) ||

This seems overly complicated, the only case when you can move a huge
PMD is if both addresses are hugepage-aligned and you have enough
length for one hugepage:

(src_addr & ~HPAGE_PMD_MASK) == 0 && (dst_addr & ~HPAGE_PMD_MASK) == 0
&& (src_start + len - src_addr >= HPAGE_PMD_SIZE).

> +                           !pmd_none(dst_pmdval) ||
> +                           src_start + len - src_addr < HPAGE_PMD_SIZE) {
> +                               spin_unlock(ptl);
> +                               /* Fall through */
> +                               split_huge_pmd(src_vma, src_pmd, src_addr);
> +                       } else {
> +                               err = remap_pages_huge_pmd(dst_mm,
> +                                                          src_mm,
> +                                                          dst_pmd,
> +                                                          src_pmd,
> +                                                          dst_pmdval,
> +                                                          dst_vma,
> +                                                          src_vma,
> +                                                          dst_addr,
> +                                                          src_addr);
> +                               cond_resched();
> +
> +                               if (!err) {
> +                                       dst_addr += HPAGE_PMD_SIZE;
> +                                       src_addr += HPAGE_PMD_SIZE;
> +                                       moved += HPAGE_PMD_SIZE;
> +                               }
> +
> +                               if ((!err || err == -EAGAIN) &&
> +                                   fatal_signal_pending(current))
> +                                       err = -EINTR;
> +
> +                               if (err && err != -EAGAIN)
> +                                       break;
> +
> +                               continue;
> +                       }
> +               }
> +
> +               if (pmd_none(*src_pmd)) {
> +                       if (!(mode & UFFDIO_REMAP_MODE_ALLOW_SRC_HOLES)) {
> +                               err = -ENOENT;
> +                               break;
> +                       }
> +                       if (unlikely(__pte_alloc(src_mm, src_pmd))) {
> +                               err = -ENOMEM;
> +                               break;
> +                       }
> +               }
> +
> +               if (unlikely(pmd_none(dst_pmdval)) &&
> +                   unlikely(__pte_alloc(dst_mm, dst_pmd))) {

Maybe just use pte_alloc() here?

> +                       err = -ENOMEM;
> +                       break;
> +               }
> +
> +               err = remap_pages_pte(dst_mm, src_mm,
> +                                     dst_pmd, src_pmd,
> +                                     dst_vma, src_vma,
> +                                     dst_addr, src_addr,
> +                                     mode);
> +
> +               cond_resched();
> +
> +               if (!err) {
> +                       dst_addr += PAGE_SIZE;
> +                       src_addr += PAGE_SIZE;
> +                       moved += PAGE_SIZE;
> +               }
> +
> +               if ((!err || err == -EAGAIN) &&
> +                   fatal_signal_pending(current))
> +                       err = -EINTR;
> +
> +               if (err && err != -EAGAIN)
> +                       break;
> +       }
> +
> +out:
> +       mmap_read_unlock(dst_mm);
> +       if (dst_mm != src_mm)
> +               mmap_read_unlock(src_mm);
> +       BUG_ON(moved < 0);
> +       BUG_ON(err > 0);
> +       BUG_ON(!moved && !err);
> +       return moved ? moved : err;
> +}

Maybe you could try whether this function would look simpler with a
shape roughly like:

for (src_addr = ...; src_addr < ...;) {
  unsigned long step_size;

  if (hugepage case) {
    if (have to split) {
      split it;
      continue;
    }
    step_size = HPAGE_PMD_SIZE;
    ...
  } else {
    ... 4k case ...
    step_size = PAGE_SIZE;
  }
  ...
  cond_resched();
  if (!err) {
    dst_addr += step_size;
    src_addr += step_size;
    moved += step_size;
  }
  ...
}
Suren Baghdasaryan Sept. 14, 2023, 8:57 p.m. UTC | #7
On Thu, Sep 14, 2023 at 12:28 PM Jann Horn <jannh@google.com> wrote:
>
> On Thu, Sep 14, 2023 at 5:26 PM Suren Baghdasaryan <surenb@google.com> wrote:
> >
> > From: Andrea Arcangeli <aarcange@redhat.com>
> >
> > This implements the uABI of UFFDIO_REMAP.
> >
> > Notably one mode bitflag is also forwarded (and in turn known) by the
> > lowlevel remap_pages method.
> >
> > Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
> > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> > ---
> >  fs/userfaultfd.c                 |  49 +++
> >  include/linux/rmap.h             |   5 +
> >  include/linux/userfaultfd_k.h    |  17 +
> >  include/uapi/linux/userfaultfd.h |  22 ++
> >  mm/huge_memory.c                 | 118 +++++++
> >  mm/khugepaged.c                  |   3 +
> >  mm/userfaultfd.c                 | 586 +++++++++++++++++++++++++++++++
> >  7 files changed, 800 insertions(+)
> >
> > diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
> > index 56eaae9dac1a..7bf64e7541c1 100644
> > --- a/fs/userfaultfd.c
> > +++ b/fs/userfaultfd.c
> > @@ -2027,6 +2027,52 @@ static inline unsigned int uffd_ctx_features(__u64 user_features)
> >         return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED;
> >  }
> >
> > +static int userfaultfd_remap(struct userfaultfd_ctx *ctx,
> > +                            unsigned long arg)
> > +{
> > +       __s64 ret;
> > +       struct uffdio_remap uffdio_remap;
> > +       struct uffdio_remap __user *user_uffdio_remap;
> > +       struct userfaultfd_wake_range range;
> > +
> > +       user_uffdio_remap = (struct uffdio_remap __user *) arg;
> > +
> > +       ret = -EFAULT;
> > +       if (copy_from_user(&uffdio_remap, user_uffdio_remap,
> > +                          /* don't copy "remap" last field */
> > +                          sizeof(uffdio_remap)-sizeof(__s64)))
> > +               goto out;
> > +
> > +       ret = validate_range(ctx->mm, uffdio_remap.dst, uffdio_remap.len);
> > +       if (ret)
> > +               goto out;
> > +       ret = validate_range(current->mm, uffdio_remap.src, uffdio_remap.len);
> > +       if (ret)
> > +               goto out;
> > +       ret = -EINVAL;
> > +       if (uffdio_remap.mode & ~(UFFDIO_REMAP_MODE_ALLOW_SRC_HOLES|
> > +                                 UFFDIO_REMAP_MODE_DONTWAKE))
> > +               goto out;
>
> Do you not need mmget_not_zero(ctx->mm) to make sure the MM can't be
> concurrently torn down while remap_pages() is running, similar to what
> the other userfaultfd ioctl handlers do?
>
> > +       ret = remap_pages(ctx->mm, current->mm,
> > +                         uffdio_remap.dst, uffdio_remap.src,
> > +                         uffdio_remap.len, uffdio_remap.mode);
> > +       if (unlikely(put_user(ret, &user_uffdio_remap->remap)))
> > +               return -EFAULT;
> > +       if (ret < 0)
> > +               goto out;
> > +       /* len == 0 would wake all */
> > +       BUG_ON(!ret);
> > +       range.len = ret;
> > +       if (!(uffdio_remap.mode & UFFDIO_REMAP_MODE_DONTWAKE)) {
> > +               range.start = uffdio_remap.dst;
> > +               wake_userfault(ctx, &range);
> > +       }
> > +       ret = range.len == uffdio_remap.len ? 0 : -EAGAIN;
> > +out:
> > +       return ret;
> > +}
> [...]
> > diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> > index 064fbd90822b..c7a9880a1f6a 100644
> > --- a/mm/huge_memory.c
> > +++ b/mm/huge_memory.c
> > @@ -1932,6 +1932,124 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
> >         return ret;
> >  }
> >
> > +#ifdef CONFIG_USERFAULTFD
> > +/*
> > + * The PT lock for src_pmd and the mmap_lock for reading are held by
> > + * the caller, but it must return after releasing the
> > + * page_table_lock. We're guaranteed the src_pmd is a pmd_trans_huge
> > + * until the PT lock of the src_pmd is released. Just move the page
> > + * from src_pmd to dst_pmd if possible. Return zero if succeeded in
> > + * moving the page, -EAGAIN if it needs to be repeated by the caller,
> > + * or other errors in case of failure.
> > + */
> > +int remap_pages_huge_pmd(struct mm_struct *dst_mm,
> > +                        struct mm_struct *src_mm,
> > +                        pmd_t *dst_pmd, pmd_t *src_pmd,
> > +                        pmd_t dst_pmdval,
> > +                        struct vm_area_struct *dst_vma,
> > +                        struct vm_area_struct *src_vma,
> > +                        unsigned long dst_addr,
> > +                        unsigned long src_addr)
> > +{
> > +       pmd_t _dst_pmd, src_pmdval;
> > +       struct page *src_page;
> > +       struct anon_vma *src_anon_vma, *dst_anon_vma;
> > +       spinlock_t *src_ptl, *dst_ptl;
> > +       pgtable_t pgtable;
> > +       struct mmu_notifier_range range;
> > +
> > +       src_pmdval = *src_pmd;
> > +       src_ptl = pmd_lockptr(src_mm, src_pmd);
> > +
> > +       BUG_ON(!pmd_trans_huge(src_pmdval));
> > +       BUG_ON(!pmd_none(dst_pmdval));
>
> Why can we assert that pmd_none(dst_pmdval) is true here? Can we not
> have concurrent faults (or userfaultfd operations) populating that
> PMD?
>
> > +       BUG_ON(!spin_is_locked(src_ptl));
> > +       mmap_assert_locked(src_mm);
> > +       mmap_assert_locked(dst_mm);
> > +       BUG_ON(src_addr & ~HPAGE_PMD_MASK);
> > +       BUG_ON(dst_addr & ~HPAGE_PMD_MASK);
> > +
> > +       src_page = pmd_page(src_pmdval);
> > +       BUG_ON(!PageHead(src_page));
> > +       BUG_ON(!PageAnon(src_page));
> > +       if (unlikely(page_mapcount(src_page) != 1)) {
> > +               spin_unlock(src_ptl);
> > +               return -EBUSY;
> > +       }
> > +
> > +       get_page(src_page);
> > +       spin_unlock(src_ptl);
> > +
> > +       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src_mm, src_addr,
> > +                               src_addr + HPAGE_PMD_SIZE);
> > +       mmu_notifier_invalidate_range_start(&range);
> > +
> > +       /* block all concurrent rmap walks */
> > +       lock_page(src_page);
> > +
> > +       /*
> > +        * split_huge_page walks the anon_vma chain without the page
> > +        * lock. Serialize against it with the anon_vma lock, the page
> > +        * lock is not enough.
> > +        */
> > +       src_anon_vma = folio_get_anon_vma(page_folio(src_page));
> > +       if (!src_anon_vma) {
> > +               unlock_page(src_page);
> > +               put_page(src_page);
> > +               mmu_notifier_invalidate_range_end(&range);
> > +               return -EAGAIN;
> > +       }
> > +       anon_vma_lock_write(src_anon_vma);
> > +
> > +       dst_ptl = pmd_lockptr(dst_mm, dst_pmd);
> > +       double_pt_lock(src_ptl, dst_ptl);
> > +       if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
> > +                    !pmd_same(*dst_pmd, dst_pmdval) ||
> > +                    page_mapcount(src_page) != 1)) {
> > +               double_pt_unlock(src_ptl, dst_ptl);
> > +               anon_vma_unlock_write(src_anon_vma);
> > +               put_anon_vma(src_anon_vma);
> > +               unlock_page(src_page);
> > +               put_page(src_page);
> > +               mmu_notifier_invalidate_range_end(&range);
> > +               return -EAGAIN;
> > +       }
> > +
> > +       BUG_ON(!PageHead(src_page));
> > +       BUG_ON(!PageAnon(src_page));
> > +       /* the PT lock is enough to keep the page pinned now */
> > +       put_page(src_page);
> > +
> > +       dst_anon_vma = (void *) dst_vma->anon_vma + PAGE_MAPPING_ANON;
> > +       WRITE_ONCE(src_page->mapping, (struct address_space *) dst_anon_vma);
> > +       WRITE_ONCE(src_page->index, linear_page_index(dst_vma, dst_addr));
> > +
> > +       if (!pmd_same(pmdp_huge_clear_flush(src_vma, src_addr, src_pmd),
> > +                     src_pmdval))
> > +               BUG_ON(1);
>
> I'm not sure we can assert that the PMDs are exactly equal; the CPU
> might have changed the A/D bits under us?
>
> > +       _dst_pmd = mk_huge_pmd(src_page, dst_vma->vm_page_prot);
> > +       _dst_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
> > +       set_pmd_at(dst_mm, dst_addr, dst_pmd, _dst_pmd);
> > +
> > +       pgtable = pgtable_trans_huge_withdraw(src_mm, src_pmd);
> > +       pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
>
> Are we allowed to move page tables between mm_structs on all
> architectures? The first example I found that looks a bit dodgy,
> looking through various architectures' pte_alloc_one(), is s390's
> page_table_alloc() which looks like page tables are tied to per-MM
> lists sometimes.
> If that's not allowed, we might have to allocate a new deposit table
> and free the old one or something like that.
>
> > +       if (dst_mm != src_mm) {
> > +               add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
> > +               add_mm_counter(src_mm, MM_ANONPAGES, -HPAGE_PMD_NR);
> > +       }
> > +       double_pt_unlock(src_ptl, dst_ptl);
> > +
> > +       anon_vma_unlock_write(src_anon_vma);
> > +       put_anon_vma(src_anon_vma);
> > +
> > +       /* unblock rmap walks */
> > +       unlock_page(src_page);
> > +
> > +       mmu_notifier_invalidate_range_end(&range);
> > +       return 0;
> > +}
> > +#endif /* CONFIG_USERFAULTFD */
> > +
> >  /*
> >   * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
> >   *
> [...]
> > diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> > index 96d9eae5c7cc..0cca60dfa8f8 100644
> > --- a/mm/userfaultfd.c
> > +++ b/mm/userfaultfd.c
> [...]
> > +ssize_t remap_pages(struct mm_struct *dst_mm, struct mm_struct *src_mm,
> > +                   unsigned long dst_start, unsigned long src_start,
> > +                   unsigned long len, __u64 mode)
> > +{
> [...]
> > +
> > +       if (pgprot_val(src_vma->vm_page_prot) !=
> > +           pgprot_val(dst_vma->vm_page_prot))
> > +               goto out;
>
> Does this check intentionally allow moving pages from a
> PROT_READ|PROT_WRITE anonymous private VMA into a PROT_READ anonymous
> private VMA (on architectures like x86 and arm64 where CoW memory has
> the same protection flags as read-only memory), but forbid moving them
> from a PROT_READ|PROT_EXEC VMA into a PROT_READ VMA? I think this
> check needs at least a comment to explain what's going on here.
>
> > +       /* only allow remapping if both are mlocked or both aren't */
> > +       if ((src_vma->vm_flags & VM_LOCKED) ^ (dst_vma->vm_flags & VM_LOCKED))
> > +               goto out;
> > +
> > +       /*
> > +        * Be strict and only allow remap_pages if either the src or
> > +        * dst range is registered in the userfaultfd to prevent
> > +        * userland errors going unnoticed. As far as the VM
> > +        * consistency is concerned, it would be perfectly safe to
> > +        * remove this check, but there's no useful usage for
> > +        * remap_pages ouside of userfaultfd registered ranges. This
> > +        * is after all why it is an ioctl belonging to the
> > +        * userfaultfd and not a syscall.
> > +        *
> > +        * Allow both vmas to be registered in the userfaultfd, just
> > +        * in case somebody finds a way to make such a case useful.
> > +        * Normally only one of the two vmas would be registered in
> > +        * the userfaultfd.
> > +        */
> > +       if (!dst_vma->vm_userfaultfd_ctx.ctx &&
> > +           !src_vma->vm_userfaultfd_ctx.ctx)
> > +               goto out;
> > +
> > +       /*
> > +        * FIXME: only allow remapping across anonymous vmas,
> > +        * tmpfs should be added.
> > +        */
> > +       if (src_vma->vm_ops || dst_vma->vm_ops)
> > +               goto out;
>
> I don't think it's okay to check for anonymous VMAs by checking
> ->vm_ops. There are some weird drivers whose ->mmap helpers don't set
> ->vm_ops and instead just shove all the necessary PTEs into the VMA
> right on ->mmap, so I think they end up with ->vm_ops==NULL. For
> example, kcov_mmap() looks that way. I'm not sure how common this is.
>
> Though, uuuuuh, I guess if that's true, the existing
> vma_is_anonymous() is broken, since that also just checks ->vm_ops?
> I'm not sure what the consequences of that would be... Either way,
> vma_is_anonymous() might be the better way to check for anonymous VMAs
> here, and someone should figure out whether vma_is_anonymous() needs
> to be fixed.
>
> > +       /*
> > +        * Ensure the dst_vma has a anon_vma or this page
> > +        * would get a NULL anon_vma when moved in the
> > +        * dst_vma.
> > +        */
> > +       err = -ENOMEM;
> > +       if (unlikely(anon_vma_prepare(dst_vma)))
> > +               goto out;
> > +
> > +       for (src_addr = src_start, dst_addr = dst_start;
> > +            src_addr < src_start + len;) {
> > +               spinlock_t *ptl;
> > +               pmd_t dst_pmdval;
> > +
> > +               BUG_ON(dst_addr >= dst_start + len);
> > +               src_pmd = mm_find_pmd(src_mm, src_addr);
>
> (this would blow up pretty badly if we could have transparent huge PUD
> in the region but I think that's limited to file VMAs so it's fine as
> it currently is)
>
> > +               if (unlikely(!src_pmd)) {
> > +                       if (!(mode & UFFDIO_REMAP_MODE_ALLOW_SRC_HOLES)) {
> > +                               err = -ENOENT;
> > +                               break;
> > +                       }
> > +                       src_pmd = mm_alloc_pmd(src_mm, src_addr);
> > +                       if (unlikely(!src_pmd)) {
> > +                               err = -ENOMEM;
> > +                               break;
> > +                       }
> > +               }
> > +               dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
> > +               if (unlikely(!dst_pmd)) {
> > +                       err = -ENOMEM;
> > +                       break;
> > +               }
> > +
> > +               dst_pmdval = pmdp_get_lockless(dst_pmd);
> > +               /*
> > +                * If the dst_pmd is mapped as THP don't
> > +                * override it and just be strict.
> > +                */
> > +               if (unlikely(pmd_trans_huge(dst_pmdval))) {
> > +                       err = -EEXIST;
> > +                       break;
> > +               }
>
> This check is racy because the dst_pmd can still change at this point,
> from previously pointing to a zeroed PMD to now pointing to a
> hugepage, right? And we rely on remap_pages_pte() and
> remap_pages_huge_pmd() to recheck for that?
> If yes, maybe add a comment noting this and explaining why we want this check.
>
> > +               ptl = pmd_trans_huge_lock(src_pmd, src_vma);
> > +               if (ptl) {
> > +                       /*
> > +                        * Check if we can move the pmd without
> > +                        * splitting it. First check the address
> > +                        * alignment to be the same in src/dst.  These
> > +                        * checks don't actually need the PT lock but
> > +                        * it's good to do it here to optimize this
> > +                        * block away at build time if
> > +                        * CONFIG_TRANSPARENT_HUGEPAGE is not set.
> > +                        */
> > +                       if (thp_aligned == -1)
> > +                               thp_aligned = ((src_addr & ~HPAGE_PMD_MASK) ==
> > +                                              (dst_addr & ~HPAGE_PMD_MASK));
> > +                       if (!thp_aligned || (src_addr & ~HPAGE_PMD_MASK) ||
>
> This seems overly complicated, the only case when you can move a huge
> PMD is if both addresses are hugepage-aligned and you have enough
> length for one hugepage:
>
> (src_addr & ~HPAGE_PMD_MASK) == 0 && (dst_addr & ~HPAGE_PMD_MASK) == 0
> && (src_start + len - src_addr >= HPAGE_PMD_SIZE).
>
> > +                           !pmd_none(dst_pmdval) ||
> > +                           src_start + len - src_addr < HPAGE_PMD_SIZE) {
> > +                               spin_unlock(ptl);
> > +                               /* Fall through */
> > +                               split_huge_pmd(src_vma, src_pmd, src_addr);
> > +                       } else {
> > +                               err = remap_pages_huge_pmd(dst_mm,
> > +                                                          src_mm,
> > +                                                          dst_pmd,
> > +                                                          src_pmd,
> > +                                                          dst_pmdval,
> > +                                                          dst_vma,
> > +                                                          src_vma,
> > +                                                          dst_addr,
> > +                                                          src_addr);
> > +                               cond_resched();
> > +
> > +                               if (!err) {
> > +                                       dst_addr += HPAGE_PMD_SIZE;
> > +                                       src_addr += HPAGE_PMD_SIZE;
> > +                                       moved += HPAGE_PMD_SIZE;
> > +                               }
> > +
> > +                               if ((!err || err == -EAGAIN) &&
> > +                                   fatal_signal_pending(current))
> > +                                       err = -EINTR;
> > +
> > +                               if (err && err != -EAGAIN)
> > +                                       break;
> > +
> > +                               continue;
> > +                       }
> > +               }
> > +
> > +               if (pmd_none(*src_pmd)) {
> > +                       if (!(mode & UFFDIO_REMAP_MODE_ALLOW_SRC_HOLES)) {
> > +                               err = -ENOENT;
> > +                               break;
> > +                       }
> > +                       if (unlikely(__pte_alloc(src_mm, src_pmd))) {
> > +                               err = -ENOMEM;
> > +                               break;
> > +                       }
> > +               }
> > +
> > +               if (unlikely(pmd_none(dst_pmdval)) &&
> > +                   unlikely(__pte_alloc(dst_mm, dst_pmd))) {
>
> Maybe just use pte_alloc() here?
>
> > +                       err = -ENOMEM;
> > +                       break;
> > +               }
> > +
> > +               err = remap_pages_pte(dst_mm, src_mm,
> > +                                     dst_pmd, src_pmd,
> > +                                     dst_vma, src_vma,
> > +                                     dst_addr, src_addr,
> > +                                     mode);
> > +
> > +               cond_resched();
> > +
> > +               if (!err) {
> > +                       dst_addr += PAGE_SIZE;
> > +                       src_addr += PAGE_SIZE;
> > +                       moved += PAGE_SIZE;
> > +               }
> > +
> > +               if ((!err || err == -EAGAIN) &&
> > +                   fatal_signal_pending(current))
> > +                       err = -EINTR;
> > +
> > +               if (err && err != -EAGAIN)
> > +                       break;
> > +       }
> > +
> > +out:
> > +       mmap_read_unlock(dst_mm);
> > +       if (dst_mm != src_mm)
> > +               mmap_read_unlock(src_mm);
> > +       BUG_ON(moved < 0);
> > +       BUG_ON(err > 0);
> > +       BUG_ON(!moved && !err);
> > +       return moved ? moved : err;
> > +}
>
> Maybe you could try whether this function would look simpler with a
> shape roughly like:
>
> for (src_addr = ...; src_addr < ...;) {
>   unsigned long step_size;
>
>   if (hugepage case) {
>     if (have to split) {
>       split it;
>       continue;
>     }
>     step_size = HPAGE_PMD_SIZE;
>     ...
>   } else {
>     ... 4k case ...
>     step_size = PAGE_SIZE;
>   }
>   ...
>   cond_resched();
>   if (!err) {
>     dst_addr += step_size;
>     src_addr += step_size;
>     moved += step_size;
>   }
>   ...
> }

I'll need some time to gather the answers to all your questions and
will reply once I have them ready.
Thanks for reviewing, Jann!
Nadav Amit Sept. 14, 2023, 9:57 p.m. UTC | #8
> On Sep 14, 2023, at 8:26 AM, Suren Baghdasaryan <surenb@google.com> wrote:
> 
> + 	if (!pte_same(ptep_clear_flush(src_vma, src_addr, src_pte),
> +       	orig_src_pte))
> + 		BUG_ON(1);

Just a minor detail regarding these few lines:

Besides the less-than-ideal use of BUG_ON() here, I think that this code
assumes that the PTE cannot change at this point. However, as the PTE was
still mapped at this point, I think the access and dirty bits can be set.

tl;dr: this appears to be triggerable by userspace.

[ as for the performance of this code, the lack of batching would mean
  that for multithreaded applications where more than a single page is
  remapped, performance would suffer ]
Suren Baghdasaryan Sept. 15, 2023, 3:28 a.m. UTC | #9
On Thu, Sep 14, 2023 at 2:57 PM Nadav Amit <nadav.amit@gmail.com> wrote:
>
>
> > On Sep 14, 2023, at 8:26 AM, Suren Baghdasaryan <surenb@google.com> wrote:
> >
> > +     if (!pte_same(ptep_clear_flush(src_vma, src_addr, src_pte),
> > +             orig_src_pte))
> > +             BUG_ON(1);
>
> Just a minor detail regarding these few lines:
>
> Besides the less-than-ideal use of BUG_ON() here, I think that this code
> assumes that the PTE cannot change at this point. However, as the PTE was
> still mapped at this point, I think the access and dirty bits can be set.

At this point we are holding PTLs for both PTEs (see
double_pt_lock()).  Can a PTE be modified from under us in this
situation?

>
> tl;dr: this appears to be triggerable by userspace.
>
> [ as for the performance of this code, the lack of batching would mean
>   that for multithreaded applications where more than a single page is
>   remapped, performance would suffer ]

Thanks for the note! I'll see if it's possible to implement some
batching mechanism here.
Thanks,
Suren.
Nadav Amit Sept. 15, 2023, 4:03 a.m. UTC | #10
> On Sep 14, 2023, at 8:28 PM, Suren Baghdasaryan <surenb@google.com> wrote:
> 
> On Thu, Sep 14, 2023 at 2:57 PM Nadav Amit <nadav.amit@gmail.com> wrote:
>> 
>> 
>>> On Sep 14, 2023, at 8:26 AM, Suren Baghdasaryan <surenb@google.com> wrote:
>>> 
>>> +     if (!pte_same(ptep_clear_flush(src_vma, src_addr, src_pte),
>>> +             orig_src_pte))
>>> +             BUG_ON(1);
>> 
>> Just a minor detail regarding these few lines:
>> 
>> Besides the less-than-ideal use of BUG_ON() here, I think that this code
>> assumes that the PTE cannot change at this point. However, as the PTE was
>> still mapped at this point, I think the access and dirty bits can be set.
> 
> At this point we are holding PTLs for both PTEs (see
> double_pt_lock()).  Can a PTE be modified from under us in this
> situation?

PTEs has several parts: access-control bits (e.g., writable), physical
frame number, software-only bits and log-bits. The log-bits, which are
“access” and “dirty” on x86, track whether the PTE has ever been used
for translation or write correspondingly.

Without getting into all the subtleties (e.g., “access" can be set
speculatively even if no actual access take place), as long as the PTE
is present, it might be used for access (and write if it is writable)
by other cores. The page-table locks are irrelevant here, because the
PTE is not updated by software, but it is updated by the CPU itself
during the page-walk/write.
Suren Baghdasaryan Sept. 15, 2023, 4:15 a.m. UTC | #11
On Fri, Sep 15, 2023 at 4:04 AM Nadav Amit <nadav.amit@gmail.com> wrote:
>
>
>
> > On Sep 14, 2023, at 8:28 PM, Suren Baghdasaryan <surenb@google.com> wrote:
> >
> > On Thu, Sep 14, 2023 at 2:57 PM Nadav Amit <nadav.amit@gmail.com> wrote:
> >>
> >>
> >>> On Sep 14, 2023, at 8:26 AM, Suren Baghdasaryan <surenb@google.com> wrote:
> >>>
> >>> +     if (!pte_same(ptep_clear_flush(src_vma, src_addr, src_pte),
> >>> +             orig_src_pte))
> >>> +             BUG_ON(1);
> >>
> >> Just a minor detail regarding these few lines:
> >>
> >> Besides the less-than-ideal use of BUG_ON() here, I think that this code
> >> assumes that the PTE cannot change at this point. However, as the PTE was
> >> still mapped at this point, I think the access and dirty bits can be set.
> >
> > At this point we are holding PTLs for both PTEs (see
> > double_pt_lock()).  Can a PTE be modified from under us in this
> > situation?
>
> PTEs has several parts: access-control bits (e.g., writable), physical
> frame number, software-only bits and log-bits. The log-bits, which are
> “access” and “dirty” on x86, track whether the PTE has ever been used
> for translation or write correspondingly.
>
> Without getting into all the subtleties (e.g., “access" can be set
> speculatively even if no actual access take place), as long as the PTE
> is present, it might be used for access (and write if it is writable)
> by other cores. The page-table locks are irrelevant here, because the
> PTE is not updated by software, but it is updated by the CPU itself
> during the page-walk/write.

Ah, I see. I believe Jann also pointed this out in one of his comments
and I didn't realize that. Thanks for the note! I'll see how I can
rework this.

>
> --
> To unsubscribe from this group and stop receiving emails from it, send an email to kernel-team+unsubscribe@android.com.
>
Jann Horn Sept. 15, 2023, 11:33 p.m. UTC | #12
On Thu, Sep 14, 2023 at 5:26 PM Suren Baghdasaryan <surenb@google.com> wrote:
> +/*
> + * The mmap_lock for reading is held by the caller. Just move the page
> + * from src_pmd to dst_pmd if possible, and return true if succeeded
> + * in moving the page.
> + */
> +static int remap_pages_pte(struct mm_struct *dst_mm,
> +                          struct mm_struct *src_mm,
> +                          pmd_t *dst_pmd,
> +                          pmd_t *src_pmd,
> +                          struct vm_area_struct *dst_vma,
> +                          struct vm_area_struct *src_vma,
> +                          unsigned long dst_addr,
> +                          unsigned long src_addr,
> +                          __u64 mode)
> +{
> +       swp_entry_t entry;
> +       pte_t orig_src_pte, orig_dst_pte;
> +       spinlock_t *src_ptl, *dst_ptl;
> +       pte_t *src_pte = NULL;
> +       pte_t *dst_pte = NULL;
> +
> +       struct folio *src_folio = NULL;
> +       struct anon_vma *src_anon_vma = NULL;
> +       struct anon_vma *dst_anon_vma;
> +       struct mmu_notifier_range range;
> +       int err = 0;
> +
> +retry:
> +       dst_pte = pte_offset_map_nolock(dst_mm, dst_pmd, dst_addr, &dst_ptl);
> +
> +       /* If an huge pmd materialized from under us fail */
> +       if (unlikely(!dst_pte)) {
> +               err = -EFAULT;
> +               goto out;
> +       }
> +
> +       src_pte = pte_offset_map_nolock(src_mm, src_pmd, src_addr, &src_ptl);
> +
> +       /*
> +        * We held the mmap_lock for reading so MADV_DONTNEED
> +        * can zap transparent huge pages under us, or the
> +        * transparent huge page fault can establish new
> +        * transparent huge pages under us.
> +        */
> +       if (unlikely(!src_pte)) {
> +               err = -EFAULT;
> +               goto out;
> +       }
> +
> +       BUG_ON(pmd_none(*dst_pmd));
> +       BUG_ON(pmd_none(*src_pmd));
> +       BUG_ON(pmd_trans_huge(*dst_pmd));
> +       BUG_ON(pmd_trans_huge(*src_pmd));
> +
> +       spin_lock(dst_ptl);
> +       orig_dst_pte = *dst_pte;
> +       spin_unlock(dst_ptl);
> +       if (!pte_none(orig_dst_pte)) {
> +               err = -EEXIST;
> +               goto out;
> +       }
> +
> +       spin_lock(src_ptl);
> +       orig_src_pte = *src_pte;
> +       spin_unlock(src_ptl);
> +       if (pte_none(orig_src_pte)) {
> +               if (!(mode & UFFDIO_REMAP_MODE_ALLOW_SRC_HOLES))
> +                       err = -ENOENT;
> +               else /* nothing to do to remap a hole */
> +                       err = 0;
> +               goto out;
> +       }
> +
> +       if (pte_present(orig_src_pte)) {
> +               if (!src_folio) {
> +                       struct folio *folio;
> +
> +                       /*
> +                        * Pin the page while holding the lock to be sure the
> +                        * page isn't freed under us
> +                        */
> +                       spin_lock(src_ptl);
> +                       if (!pte_same(orig_src_pte, *src_pte)) {
> +                               spin_unlock(src_ptl);
> +                               err = -EAGAIN;
> +                               goto out;
> +                       }
> +
> +                       folio = vm_normal_folio(src_vma, src_addr, orig_src_pte);
> +                       if (!folio || !folio_test_anon(folio) ||
> +                           folio_estimated_sharers(folio) != 1) {
> +                               spin_unlock(src_ptl);
> +                               err = -EBUSY;
> +                               goto out;
> +                       }
> +
> +                       src_folio = folio;
> +                       folio_get(src_folio);
> +                       spin_unlock(src_ptl);
> +
> +                       /* try to block all concurrent rmap walks */
> +                       if (!folio_trylock(src_folio)) {
> +                               pte_unmap(&orig_src_pte);
> +                               pte_unmap(&orig_dst_pte);
> +                               src_pte = dst_pte = NULL;
> +                               folio_lock(src_folio);
> +                               goto retry;
> +                       }
> +               }
> +
> +               if (!src_anon_vma) {
> +                       /*
> +                        * folio_referenced walks the anon_vma chain
> +                        * without the folio lock. Serialize against it with
> +                        * the anon_vma lock, the folio lock is not enough.
> +                        */
> +                       src_anon_vma = folio_get_anon_vma(src_folio);
> +                       if (!src_anon_vma) {
> +                               /* page was unmapped from under us */
> +                               err = -EAGAIN;
> +                               goto out;
> +                       }
> +                       if (!anon_vma_trylock_write(src_anon_vma)) {
> +                               pte_unmap(&orig_src_pte);
> +                               pte_unmap(&orig_dst_pte);
> +                               src_pte = dst_pte = NULL;
> +                               anon_vma_lock_write(src_anon_vma);
> +                               goto retry;
> +                       }
> +               }
> +
> +               mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src_mm,
> +                                       src_addr, src_addr + PAGE_SIZE);
> +               mmu_notifier_invalidate_range_start_nonblock(&range);

I'm pretty sure you're not allowed to use
mmu_notifier_invalidate_range_start_nonblock(). Use
mmu_notifier_invalidate_range_start() at a point where it's fine to
block; perhaps all the way up in remap_pages() around the whole loop
or something like that. mmu_notifier_invalidate_range_start_nonblock()
is special magic sauce for OOM reaping (with deceptively inviting
naming and no documentation that explains that this is evil hazmat
code), and if you chase down what various users of MMU notifiers do
when you just hand them a random notification where
mmu_notifier_range_blockable() is false, you end up in fun paths like
in KVM's kvm_mmu_notifier_invalidate_range_start(), which calls
gfn_to_pfn_cache_invalidate_start(), which does this:

    /*
     * If the OOM reaper is active, then all vCPUs should have
     * been stopped already, so perform the request without
     * KVM_REQUEST_WAIT and be sad if any needed to be IPI'd.
     */
    if (!may_block)
       req &= ~KVM_REQUEST_WAIT;

    called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap);

    WARN_ON_ONCE(called && !may_block);

Which means if you hit this codepath from a place like userfaultfd
where the process is probably still running (and not being OOM reaped
in a context where it's guaranteed to have been killed and stopped),
you could probably get KVM into a situation where it needs to wait for
vCPUs to acknowledge that they've processed a message before it's safe
to continue, but it can't wait because we're in nonsleepable context,
and then it just continues without waiting and presumably the KVM TLB
gets out of sync or something?



> +
> +               double_pt_lock(dst_ptl, src_ptl);
> +
> +               if (!pte_same(*src_pte, orig_src_pte) ||
> +                   !pte_same(*dst_pte, orig_dst_pte) ||
> +                   folio_estimated_sharers(src_folio) != 1) {
> +                       double_pt_unlock(dst_ptl, src_ptl);
> +                       err = -EAGAIN;
> +                       goto out;
> +               }
> +
> +               BUG_ON(!folio_test_anon(src_folio));
> +               /* the PT lock is enough to keep the page pinned now */
> +               folio_put(src_folio);
> +
> +               dst_anon_vma = (void *) dst_vma->anon_vma + PAGE_MAPPING_ANON;
> +               WRITE_ONCE(src_folio->mapping,
> +                          (struct address_space *) dst_anon_vma);
> +               WRITE_ONCE(src_folio->index, linear_page_index(dst_vma,
> +                                                             dst_addr));
> +
> +               if (!pte_same(ptep_clear_flush(src_vma, src_addr, src_pte),
> +                             orig_src_pte))
> +                       BUG_ON(1);
> +
> +               orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot);
> +               orig_dst_pte = maybe_mkwrite(pte_mkdirty(orig_dst_pte),
> +                                            dst_vma);
> +
> +               set_pte_at(dst_mm, dst_addr, dst_pte, orig_dst_pte);
> +
> +               if (dst_mm != src_mm) {
> +                       inc_mm_counter(dst_mm, MM_ANONPAGES);
> +                       dec_mm_counter(src_mm, MM_ANONPAGES);
> +               }
> +
> +               double_pt_unlock(dst_ptl, src_ptl);
> +
> +               anon_vma_unlock_write(src_anon_vma);
> +               mmu_notifier_invalidate_range_end(&range);
> +               put_anon_vma(src_anon_vma);
> +               src_anon_vma = NULL;
> +
> +               /* unblock rmap walks */
> +               folio_unlock(src_folio);
> +               src_folio = NULL;
> +
> +       } else {
> +               struct swap_info_struct *si;
> +               int swap_count;
> +
> +               entry = pte_to_swp_entry(orig_src_pte);
> +               if (non_swap_entry(entry)) {
> +                       if (is_migration_entry(entry)) {
> +                               pte_unmap(&orig_src_pte);
> +                               pte_unmap(&orig_dst_pte);
> +                               src_pte = dst_pte = NULL;
> +                               migration_entry_wait(src_mm, src_pmd,
> +                                                    src_addr);
> +                               err = -EAGAIN;
> +                       } else
> +                               err = -EFAULT;
> +                       goto out;
> +               }
> +
> +               /*
> +                * COUNT_CONTINUE to be returned is fine here, no need
> +                * of follow all swap continuation to check against
> +                * number 1.
> +                */
> +               si = get_swap_device(entry);
> +               if (!si) {
> +                       err = -EBUSY;
> +                       goto out;
> +               }
> +
> +               swap_count = swap_swapcount(si, entry);
> +               put_swap_device(si);
> +               if (swap_count != 1) {
> +                       err = -EBUSY;
> +                       goto out;
> +               }
> +
> +               double_pt_lock(dst_ptl, src_ptl);
> +
> +               if (!pte_same(*src_pte, orig_src_pte) ||
> +                   !pte_same(*dst_pte, orig_dst_pte) ||
> +                   swp_swapcount(entry) != 1) {
> +                       double_pt_unlock(dst_ptl, src_ptl);
> +                       err = -EAGAIN;
> +                       goto out;
> +               }
> +
> +               if (pte_val(ptep_get_and_clear(src_mm, src_addr, src_pte)) !=
> +                   pte_val(orig_src_pte))
> +                       BUG_ON(1);
> +               set_pte_at(dst_mm, dst_addr, dst_pte, orig_src_pte);
> +
> +               if (dst_mm != src_mm) {
> +                       inc_mm_counter(dst_mm, MM_ANONPAGES);
> +                       dec_mm_counter(src_mm, MM_ANONPAGES);
> +               }
> +
> +               double_pt_unlock(dst_ptl, src_ptl);
> +       }
> +
> +out:
> +       if (src_anon_vma) {
> +               anon_vma_unlock_write(src_anon_vma);
> +               put_anon_vma(src_anon_vma);
> +       }
> +       if (src_folio) {
> +               folio_unlock(src_folio);
> +               folio_put(src_folio);
> +       }
> +       if (dst_pte)
> +               pte_unmap(dst_pte);
> +       if (src_pte)
> +               pte_unmap(src_pte);
> +
> +       return err;
> +}
Suren Baghdasaryan Sept. 15, 2023, 11:39 p.m. UTC | #13
On Fri, Sep 15, 2023 at 4:34 PM Jann Horn <jannh@google.com> wrote:
>
> On Thu, Sep 14, 2023 at 5:26 PM Suren Baghdasaryan <surenb@google.com> wrote:
> > +/*
> > + * The mmap_lock for reading is held by the caller. Just move the page
> > + * from src_pmd to dst_pmd if possible, and return true if succeeded
> > + * in moving the page.
> > + */
> > +static int remap_pages_pte(struct mm_struct *dst_mm,
> > +                          struct mm_struct *src_mm,
> > +                          pmd_t *dst_pmd,
> > +                          pmd_t *src_pmd,
> > +                          struct vm_area_struct *dst_vma,
> > +                          struct vm_area_struct *src_vma,
> > +                          unsigned long dst_addr,
> > +                          unsigned long src_addr,
> > +                          __u64 mode)
> > +{
> > +       swp_entry_t entry;
> > +       pte_t orig_src_pte, orig_dst_pte;
> > +       spinlock_t *src_ptl, *dst_ptl;
> > +       pte_t *src_pte = NULL;
> > +       pte_t *dst_pte = NULL;
> > +
> > +       struct folio *src_folio = NULL;
> > +       struct anon_vma *src_anon_vma = NULL;
> > +       struct anon_vma *dst_anon_vma;
> > +       struct mmu_notifier_range range;
> > +       int err = 0;
> > +
> > +retry:
> > +       dst_pte = pte_offset_map_nolock(dst_mm, dst_pmd, dst_addr, &dst_ptl);
> > +
> > +       /* If an huge pmd materialized from under us fail */
> > +       if (unlikely(!dst_pte)) {
> > +               err = -EFAULT;
> > +               goto out;
> > +       }
> > +
> > +       src_pte = pte_offset_map_nolock(src_mm, src_pmd, src_addr, &src_ptl);
> > +
> > +       /*
> > +        * We held the mmap_lock for reading so MADV_DONTNEED
> > +        * can zap transparent huge pages under us, or the
> > +        * transparent huge page fault can establish new
> > +        * transparent huge pages under us.
> > +        */
> > +       if (unlikely(!src_pte)) {
> > +               err = -EFAULT;
> > +               goto out;
> > +       }
> > +
> > +       BUG_ON(pmd_none(*dst_pmd));
> > +       BUG_ON(pmd_none(*src_pmd));
> > +       BUG_ON(pmd_trans_huge(*dst_pmd));
> > +       BUG_ON(pmd_trans_huge(*src_pmd));
> > +
> > +       spin_lock(dst_ptl);
> > +       orig_dst_pte = *dst_pte;
> > +       spin_unlock(dst_ptl);
> > +       if (!pte_none(orig_dst_pte)) {
> > +               err = -EEXIST;
> > +               goto out;
> > +       }
> > +
> > +       spin_lock(src_ptl);
> > +       orig_src_pte = *src_pte;
> > +       spin_unlock(src_ptl);
> > +       if (pte_none(orig_src_pte)) {
> > +               if (!(mode & UFFDIO_REMAP_MODE_ALLOW_SRC_HOLES))
> > +                       err = -ENOENT;
> > +               else /* nothing to do to remap a hole */
> > +                       err = 0;
> > +               goto out;
> > +       }
> > +
> > +       if (pte_present(orig_src_pte)) {
> > +               if (!src_folio) {
> > +                       struct folio *folio;
> > +
> > +                       /*
> > +                        * Pin the page while holding the lock to be sure the
> > +                        * page isn't freed under us
> > +                        */
> > +                       spin_lock(src_ptl);
> > +                       if (!pte_same(orig_src_pte, *src_pte)) {
> > +                               spin_unlock(src_ptl);
> > +                               err = -EAGAIN;
> > +                               goto out;
> > +                       }
> > +
> > +                       folio = vm_normal_folio(src_vma, src_addr, orig_src_pte);
> > +                       if (!folio || !folio_test_anon(folio) ||
> > +                           folio_estimated_sharers(folio) != 1) {
> > +                               spin_unlock(src_ptl);
> > +                               err = -EBUSY;
> > +                               goto out;
> > +                       }
> > +
> > +                       src_folio = folio;
> > +                       folio_get(src_folio);
> > +                       spin_unlock(src_ptl);
> > +
> > +                       /* try to block all concurrent rmap walks */
> > +                       if (!folio_trylock(src_folio)) {
> > +                               pte_unmap(&orig_src_pte);
> > +                               pte_unmap(&orig_dst_pte);
> > +                               src_pte = dst_pte = NULL;
> > +                               folio_lock(src_folio);
> > +                               goto retry;
> > +                       }
> > +               }
> > +
> > +               if (!src_anon_vma) {
> > +                       /*
> > +                        * folio_referenced walks the anon_vma chain
> > +                        * without the folio lock. Serialize against it with
> > +                        * the anon_vma lock, the folio lock is not enough.
> > +                        */
> > +                       src_anon_vma = folio_get_anon_vma(src_folio);
> > +                       if (!src_anon_vma) {
> > +                               /* page was unmapped from under us */
> > +                               err = -EAGAIN;
> > +                               goto out;
> > +                       }
> > +                       if (!anon_vma_trylock_write(src_anon_vma)) {
> > +                               pte_unmap(&orig_src_pte);
> > +                               pte_unmap(&orig_dst_pte);
> > +                               src_pte = dst_pte = NULL;
> > +                               anon_vma_lock_write(src_anon_vma);
> > +                               goto retry;
> > +                       }
> > +               }
> > +
> > +               mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src_mm,
> > +                                       src_addr, src_addr + PAGE_SIZE);
> > +               mmu_notifier_invalidate_range_start_nonblock(&range);
>
> I'm pretty sure you're not allowed to use
> mmu_notifier_invalidate_range_start_nonblock(). Use
> mmu_notifier_invalidate_range_start() at a point where it's fine to
> block; perhaps all the way up in remap_pages() around the whole loop
> or something like that. mmu_notifier_invalidate_range_start_nonblock()
> is special magic sauce for OOM reaping (with deceptively inviting
> naming and no documentation that explains that this is evil hazmat
> code), and if you chase down what various users of MMU notifiers do
> when you just hand them a random notification where
> mmu_notifier_range_blockable() is false, you end up in fun paths like
> in KVM's kvm_mmu_notifier_invalidate_range_start(), which calls
> gfn_to_pfn_cache_invalidate_start(), which does this:
>
>     /*
>      * If the OOM reaper is active, then all vCPUs should have
>      * been stopped already, so perform the request without
>      * KVM_REQUEST_WAIT and be sad if any needed to be IPI'd.
>      */
>     if (!may_block)
>        req &= ~KVM_REQUEST_WAIT;
>
>     called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap);
>
>     WARN_ON_ONCE(called && !may_block);
>
> Which means if you hit this codepath from a place like userfaultfd
> where the process is probably still running (and not being OOM reaped
> in a context where it's guaranteed to have been killed and stopped),
> you could probably get KVM into a situation where it needs to wait for
> vCPUs to acknowledge that they've processed a message before it's safe
> to continue, but it can't wait because we're in nonsleepable context,
> and then it just continues without waiting and presumably the KVM TLB
> gets out of sync or something?

Yeah, I was sceptical about this function too but it was very tempting :)
Thanks for the warning, I'll move
mmu_notifier_invalidate_range_start() to before we map the ptes
because that's where we start the RCU read section.

>
>
>
> > +
> > +               double_pt_lock(dst_ptl, src_ptl);
> > +
> > +               if (!pte_same(*src_pte, orig_src_pte) ||
> > +                   !pte_same(*dst_pte, orig_dst_pte) ||
> > +                   folio_estimated_sharers(src_folio) != 1) {
> > +                       double_pt_unlock(dst_ptl, src_ptl);
> > +                       err = -EAGAIN;
> > +                       goto out;
> > +               }
> > +
> > +               BUG_ON(!folio_test_anon(src_folio));
> > +               /* the PT lock is enough to keep the page pinned now */
> > +               folio_put(src_folio);
> > +
> > +               dst_anon_vma = (void *) dst_vma->anon_vma + PAGE_MAPPING_ANON;
> > +               WRITE_ONCE(src_folio->mapping,
> > +                          (struct address_space *) dst_anon_vma);
> > +               WRITE_ONCE(src_folio->index, linear_page_index(dst_vma,
> > +                                                             dst_addr));
> > +
> > +               if (!pte_same(ptep_clear_flush(src_vma, src_addr, src_pte),
> > +                             orig_src_pte))
> > +                       BUG_ON(1);
> > +
> > +               orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot);
> > +               orig_dst_pte = maybe_mkwrite(pte_mkdirty(orig_dst_pte),
> > +                                            dst_vma);
> > +
> > +               set_pte_at(dst_mm, dst_addr, dst_pte, orig_dst_pte);
> > +
> > +               if (dst_mm != src_mm) {
> > +                       inc_mm_counter(dst_mm, MM_ANONPAGES);
> > +                       dec_mm_counter(src_mm, MM_ANONPAGES);
> > +               }
> > +
> > +               double_pt_unlock(dst_ptl, src_ptl);
> > +
> > +               anon_vma_unlock_write(src_anon_vma);
> > +               mmu_notifier_invalidate_range_end(&range);
> > +               put_anon_vma(src_anon_vma);
> > +               src_anon_vma = NULL;
> > +
> > +               /* unblock rmap walks */
> > +               folio_unlock(src_folio);
> > +               src_folio = NULL;
> > +
> > +       } else {
> > +               struct swap_info_struct *si;
> > +               int swap_count;
> > +
> > +               entry = pte_to_swp_entry(orig_src_pte);
> > +               if (non_swap_entry(entry)) {
> > +                       if (is_migration_entry(entry)) {
> > +                               pte_unmap(&orig_src_pte);
> > +                               pte_unmap(&orig_dst_pte);
> > +                               src_pte = dst_pte = NULL;
> > +                               migration_entry_wait(src_mm, src_pmd,
> > +                                                    src_addr);
> > +                               err = -EAGAIN;
> > +                       } else
> > +                               err = -EFAULT;
> > +                       goto out;
> > +               }
> > +
> > +               /*
> > +                * COUNT_CONTINUE to be returned is fine here, no need
> > +                * of follow all swap continuation to check against
> > +                * number 1.
> > +                */
> > +               si = get_swap_device(entry);
> > +               if (!si) {
> > +                       err = -EBUSY;
> > +                       goto out;
> > +               }
> > +
> > +               swap_count = swap_swapcount(si, entry);
> > +               put_swap_device(si);
> > +               if (swap_count != 1) {
> > +                       err = -EBUSY;
> > +                       goto out;
> > +               }
> > +
> > +               double_pt_lock(dst_ptl, src_ptl);
> > +
> > +               if (!pte_same(*src_pte, orig_src_pte) ||
> > +                   !pte_same(*dst_pte, orig_dst_pte) ||
> > +                   swp_swapcount(entry) != 1) {
> > +                       double_pt_unlock(dst_ptl, src_ptl);
> > +                       err = -EAGAIN;
> > +                       goto out;
> > +               }
> > +
> > +               if (pte_val(ptep_get_and_clear(src_mm, src_addr, src_pte)) !=
> > +                   pte_val(orig_src_pte))
> > +                       BUG_ON(1);
> > +               set_pte_at(dst_mm, dst_addr, dst_pte, orig_src_pte);
> > +
> > +               if (dst_mm != src_mm) {
> > +                       inc_mm_counter(dst_mm, MM_ANONPAGES);
> > +                       dec_mm_counter(src_mm, MM_ANONPAGES);
> > +               }
> > +
> > +               double_pt_unlock(dst_ptl, src_ptl);
> > +       }
> > +
> > +out:
> > +       if (src_anon_vma) {
> > +               anon_vma_unlock_write(src_anon_vma);
> > +               put_anon_vma(src_anon_vma);
> > +       }
> > +       if (src_folio) {
> > +               folio_unlock(src_folio);
> > +               folio_put(src_folio);
> > +       }
> > +       if (dst_pte)
> > +               pte_unmap(dst_pte);
> > +       if (src_pte)
> > +               pte_unmap(src_pte);
> > +
> > +       return err;
> > +}
Suren Baghdasaryan Sept. 19, 2023, 11:08 p.m. UTC | #14
On Thu, Sep 14, 2023 at 7:28 PM Jann Horn <jannh@google.com> wrote:
>
> On Thu, Sep 14, 2023 at 5:26 PM Suren Baghdasaryan <surenb@google.com> wrote:
> >
> > From: Andrea Arcangeli <aarcange@redhat.com>
> >
> > This implements the uABI of UFFDIO_REMAP.
> >
> > Notably one mode bitflag is also forwarded (and in turn known) by the
> > lowlevel remap_pages method.
> >
> > Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
> > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> > ---
> >  fs/userfaultfd.c                 |  49 +++
> >  include/linux/rmap.h             |   5 +
> >  include/linux/userfaultfd_k.h    |  17 +
> >  include/uapi/linux/userfaultfd.h |  22 ++
> >  mm/huge_memory.c                 | 118 +++++++
> >  mm/khugepaged.c                  |   3 +
> >  mm/userfaultfd.c                 | 586 +++++++++++++++++++++++++++++++
> >  7 files changed, 800 insertions(+)
> >
> > diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
> > index 56eaae9dac1a..7bf64e7541c1 100644
> > --- a/fs/userfaultfd.c
> > +++ b/fs/userfaultfd.c
> > @@ -2027,6 +2027,52 @@ static inline unsigned int uffd_ctx_features(__u64 user_features)
> >         return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED;
> >  }
> >
> > +static int userfaultfd_remap(struct userfaultfd_ctx *ctx,
> > +                            unsigned long arg)
> > +{
> > +       __s64 ret;
> > +       struct uffdio_remap uffdio_remap;
> > +       struct uffdio_remap __user *user_uffdio_remap;
> > +       struct userfaultfd_wake_range range;
> > +
> > +       user_uffdio_remap = (struct uffdio_remap __user *) arg;
> > +
> > +       ret = -EFAULT;
> > +       if (copy_from_user(&uffdio_remap, user_uffdio_remap,
> > +                          /* don't copy "remap" last field */
> > +                          sizeof(uffdio_remap)-sizeof(__s64)))
> > +               goto out;
> > +
> > +       ret = validate_range(ctx->mm, uffdio_remap.dst, uffdio_remap.len);
> > +       if (ret)
> > +               goto out;
> > +       ret = validate_range(current->mm, uffdio_remap.src, uffdio_remap.len);
> > +       if (ret)
> > +               goto out;
> > +       ret = -EINVAL;
> > +       if (uffdio_remap.mode & ~(UFFDIO_REMAP_MODE_ALLOW_SRC_HOLES|
> > +                                 UFFDIO_REMAP_MODE_DONTWAKE))
> > +               goto out;
>

Sorry for the delay with the answers...

> Do you not need mmget_not_zero(ctx->mm) to make sure the MM can't be
> concurrently torn down while remap_pages() is running, similar to what
> the other userfaultfd ioctl handlers do?

Yes, I do need that. Will add.


>
> > +       ret = remap_pages(ctx->mm, current->mm,
> > +                         uffdio_remap.dst, uffdio_remap.src,
> > +                         uffdio_remap.len, uffdio_remap.mode);
> > +       if (unlikely(put_user(ret, &user_uffdio_remap->remap)))
> > +               return -EFAULT;
> > +       if (ret < 0)
> > +               goto out;
> > +       /* len == 0 would wake all */
> > +       BUG_ON(!ret);
> > +       range.len = ret;
> > +       if (!(uffdio_remap.mode & UFFDIO_REMAP_MODE_DONTWAKE)) {
> > +               range.start = uffdio_remap.dst;
> > +               wake_userfault(ctx, &range);
> > +       }
> > +       ret = range.len == uffdio_remap.len ? 0 : -EAGAIN;
> > +out:
> > +       return ret;
> > +}
> [...]
> > diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> > index 064fbd90822b..c7a9880a1f6a 100644
> > --- a/mm/huge_memory.c
> > +++ b/mm/huge_memory.c
> > @@ -1932,6 +1932,124 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
> >         return ret;
> >  }
> >
> > +#ifdef CONFIG_USERFAULTFD
> > +/*
> > + * The PT lock for src_pmd and the mmap_lock for reading are held by
> > + * the caller, but it must return after releasing the
> > + * page_table_lock. We're guaranteed the src_pmd is a pmd_trans_huge
> > + * until the PT lock of the src_pmd is released. Just move the page
> > + * from src_pmd to dst_pmd if possible. Return zero if succeeded in
> > + * moving the page, -EAGAIN if it needs to be repeated by the caller,
> > + * or other errors in case of failure.
> > + */
> > +int remap_pages_huge_pmd(struct mm_struct *dst_mm,
> > +                        struct mm_struct *src_mm,
> > +                        pmd_t *dst_pmd, pmd_t *src_pmd,
> > +                        pmd_t dst_pmdval,
> > +                        struct vm_area_struct *dst_vma,
> > +                        struct vm_area_struct *src_vma,
> > +                        unsigned long dst_addr,
> > +                        unsigned long src_addr)
> > +{
> > +       pmd_t _dst_pmd, src_pmdval;
> > +       struct page *src_page;
> > +       struct anon_vma *src_anon_vma, *dst_anon_vma;
> > +       spinlock_t *src_ptl, *dst_ptl;
> > +       pgtable_t pgtable;
> > +       struct mmu_notifier_range range;
> > +
> > +       src_pmdval = *src_pmd;
> > +       src_ptl = pmd_lockptr(src_mm, src_pmd);
> > +
> > +       BUG_ON(!pmd_trans_huge(src_pmdval));
> > +       BUG_ON(!pmd_none(dst_pmdval));
>
> Why can we assert that pmd_none(dst_pmdval) is true here? Can we not
> have concurrent faults (or userfaultfd operations) populating that
> PMD?

IIUC dst_pmdval is a copy of the value from dst_pmd, so that local
copy should not change even if some concurrent operation changes
dst_pmd. We can assert that it's pmd_none because we checked for that
before calling remap_pages_huge_pmd. Later on we check if dst_pmd
changed from under us (see pmd_same(*dst_pmd, dst_pmdval) check) and
retry if that happened.

>
> > +       BUG_ON(!spin_is_locked(src_ptl));
> > +       mmap_assert_locked(src_mm);
> > +       mmap_assert_locked(dst_mm);
> > +       BUG_ON(src_addr & ~HPAGE_PMD_MASK);
> > +       BUG_ON(dst_addr & ~HPAGE_PMD_MASK);
> > +
> > +       src_page = pmd_page(src_pmdval);
> > +       BUG_ON(!PageHead(src_page));
> > +       BUG_ON(!PageAnon(src_page));
> > +       if (unlikely(page_mapcount(src_page) != 1)) {
> > +               spin_unlock(src_ptl);
> > +               return -EBUSY;
> > +       }
> > +
> > +       get_page(src_page);
> > +       spin_unlock(src_ptl);
> > +
> > +       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src_mm, src_addr,
> > +                               src_addr + HPAGE_PMD_SIZE);
> > +       mmu_notifier_invalidate_range_start(&range);
> > +
> > +       /* block all concurrent rmap walks */
> > +       lock_page(src_page);
> > +
> > +       /*
> > +        * split_huge_page walks the anon_vma chain without the page
> > +        * lock. Serialize against it with the anon_vma lock, the page
> > +        * lock is not enough.
> > +        */
> > +       src_anon_vma = folio_get_anon_vma(page_folio(src_page));
> > +       if (!src_anon_vma) {
> > +               unlock_page(src_page);
> > +               put_page(src_page);
> > +               mmu_notifier_invalidate_range_end(&range);
> > +               return -EAGAIN;
> > +       }
> > +       anon_vma_lock_write(src_anon_vma);
> > +
> > +       dst_ptl = pmd_lockptr(dst_mm, dst_pmd);
> > +       double_pt_lock(src_ptl, dst_ptl);
> > +       if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
> > +                    !pmd_same(*dst_pmd, dst_pmdval) ||
> > +                    page_mapcount(src_page) != 1)) {
> > +               double_pt_unlock(src_ptl, dst_ptl);
> > +               anon_vma_unlock_write(src_anon_vma);
> > +               put_anon_vma(src_anon_vma);
> > +               unlock_page(src_page);
> > +               put_page(src_page);
> > +               mmu_notifier_invalidate_range_end(&range);
> > +               return -EAGAIN;
> > +       }
> > +
> > +       BUG_ON(!PageHead(src_page));
> > +       BUG_ON(!PageAnon(src_page));
> > +       /* the PT lock is enough to keep the page pinned now */
> > +       put_page(src_page);
> > +
> > +       dst_anon_vma = (void *) dst_vma->anon_vma + PAGE_MAPPING_ANON;
> > +       WRITE_ONCE(src_page->mapping, (struct address_space *) dst_anon_vma);
> > +       WRITE_ONCE(src_page->index, linear_page_index(dst_vma, dst_addr));
> > +
> > +       if (!pmd_same(pmdp_huge_clear_flush(src_vma, src_addr, src_pmd),
> > +                     src_pmdval))
> > +               BUG_ON(1);
>
> I'm not sure we can assert that the PMDs are exactly equal; the CPU
> might have changed the A/D bits under us?

Yes. I wonder if I can simply remove the BUG_ON here like this:

src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);

Technically we don't use src_pmdval after this but for the possible
future use that would keep things correct. If A/D bits changed from
under us we will still copy correct values into dst_pmd.

>
> > +       _dst_pmd = mk_huge_pmd(src_page, dst_vma->vm_page_prot);
> > +       _dst_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
> > +       set_pmd_at(dst_mm, dst_addr, dst_pmd, _dst_pmd);
> > +
> > +       pgtable = pgtable_trans_huge_withdraw(src_mm, src_pmd);
> > +       pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
>
> Are we allowed to move page tables between mm_structs on all
> architectures? The first example I found that looks a bit dodgy,
> looking through various architectures' pte_alloc_one(), is s390's
> page_table_alloc() which looks like page tables are tied to per-MM
> lists sometimes.
> If that's not allowed, we might have to allocate a new deposit table
> and free the old one or something like that.

Hmm. Yeah, looks like in the case of !CONFIG_PGSTE the table can be
linked to mm->context.pgtable_list, so can't be moved to another mm. I
guess I'll have to keep a pgtable allocated, ready to be deposited and
free the old one. Maybe it's worth having an arch-specific function
indicating whether moving a pgtable between MMs is supported? Or do it
separately as an optimization. WDYT?

>
> > +       if (dst_mm != src_mm) {
> > +               add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
> > +               add_mm_counter(src_mm, MM_ANONPAGES, -HPAGE_PMD_NR);
> > +       }
> > +       double_pt_unlock(src_ptl, dst_ptl);
> > +
> > +       anon_vma_unlock_write(src_anon_vma);
> > +       put_anon_vma(src_anon_vma);
> > +
> > +       /* unblock rmap walks */
> > +       unlock_page(src_page);
> > +
> > +       mmu_notifier_invalidate_range_end(&range);
> > +       return 0;
> > +}
> > +#endif /* CONFIG_USERFAULTFD */
> > +
> >  /*
> >   * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
> >   *
> [...]
> > diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> > index 96d9eae5c7cc..0cca60dfa8f8 100644
> > --- a/mm/userfaultfd.c
> > +++ b/mm/userfaultfd.c
> [...]
> > +ssize_t remap_pages(struct mm_struct *dst_mm, struct mm_struct *src_mm,
> > +                   unsigned long dst_start, unsigned long src_start,
> > +                   unsigned long len, __u64 mode)
> > +{
> [...]
> > +
> > +       if (pgprot_val(src_vma->vm_page_prot) !=
> > +           pgprot_val(dst_vma->vm_page_prot))
> > +               goto out;
>
> Does this check intentionally allow moving pages from a
> PROT_READ|PROT_WRITE anonymous private VMA into a PROT_READ anonymous
> private VMA (on architectures like x86 and arm64 where CoW memory has
> the same protection flags as read-only memory), but forbid moving them
> from a PROT_READ|PROT_EXEC VMA into a PROT_READ VMA? I think this
> check needs at least a comment to explain what's going on here.

The check is simply to ensure the VMAs have the same access
permissions to prevent page copies that should have different
permissions. The fact that x86 and arm64 have the same protection bits
for R/O and COW memory is a "side-effect" IMHO. I'm not sure what
comment would be good here but I'm open to suggestions.

>
> > +       /* only allow remapping if both are mlocked or both aren't */
> > +       if ((src_vma->vm_flags & VM_LOCKED) ^ (dst_vma->vm_flags & VM_LOCKED))
> > +               goto out;
> > +
> > +       /*
> > +        * Be strict and only allow remap_pages if either the src or
> > +        * dst range is registered in the userfaultfd to prevent
> > +        * userland errors going unnoticed. As far as the VM
> > +        * consistency is concerned, it would be perfectly safe to
> > +        * remove this check, but there's no useful usage for
> > +        * remap_pages ouside of userfaultfd registered ranges. This
> > +        * is after all why it is an ioctl belonging to the
> > +        * userfaultfd and not a syscall.
> > +        *
> > +        * Allow both vmas to be registered in the userfaultfd, just
> > +        * in case somebody finds a way to make such a case useful.
> > +        * Normally only one of the two vmas would be registered in
> > +        * the userfaultfd.
> > +        */
> > +       if (!dst_vma->vm_userfaultfd_ctx.ctx &&
> > +           !src_vma->vm_userfaultfd_ctx.ctx)
> > +               goto out;
> > +
> > +       /*
> > +        * FIXME: only allow remapping across anonymous vmas,
> > +        * tmpfs should be added.
> > +        */
> > +       if (src_vma->vm_ops || dst_vma->vm_ops)
> > +               goto out;
>
> I don't think it's okay to check for anonymous VMAs by checking
> ->vm_ops. There are some weird drivers whose ->mmap helpers don't set
> ->vm_ops and instead just shove all the necessary PTEs into the VMA
> right on ->mmap, so I think they end up with ->vm_ops==NULL. For
> example, kcov_mmap() looks that way. I'm not sure how common this is.
>
> Though, uuuuuh, I guess if that's true, the existing
> vma_is_anonymous() is broken, since that also just checks ->vm_ops?
> I'm not sure what the consequences of that would be... Either way,
> vma_is_anonymous() might be the better way to check for anonymous VMAs
> here, and someone should figure out whether vma_is_anonymous() needs
> to be fixed.

Absolutely, vma_is_anonymous() should have been used here. Will fix it.

>
> > +       /*
> > +        * Ensure the dst_vma has a anon_vma or this page
> > +        * would get a NULL anon_vma when moved in the
> > +        * dst_vma.
> > +        */
> > +       err = -ENOMEM;
> > +       if (unlikely(anon_vma_prepare(dst_vma)))
> > +               goto out;
> > +
> > +       for (src_addr = src_start, dst_addr = dst_start;
> > +            src_addr < src_start + len;) {
> > +               spinlock_t *ptl;
> > +               pmd_t dst_pmdval;
> > +
> > +               BUG_ON(dst_addr >= dst_start + len);
> > +               src_pmd = mm_find_pmd(src_mm, src_addr);
>
> (this would blow up pretty badly if we could have transparent huge PUD
> in the region but I think that's limited to file VMAs so it's fine as
> it currently is)

Should I add a comment here as a warning if in the future we decide to
implement support for file-backed pages?

>
> > +               if (unlikely(!src_pmd)) {
> > +                       if (!(mode & UFFDIO_REMAP_MODE_ALLOW_SRC_HOLES)) {
> > +                               err = -ENOENT;
> > +                               break;
> > +                       }
> > +                       src_pmd = mm_alloc_pmd(src_mm, src_addr);
> > +                       if (unlikely(!src_pmd)) {
> > +                               err = -ENOMEM;
> > +                               break;
> > +                       }
> > +               }
> > +               dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
> > +               if (unlikely(!dst_pmd)) {
> > +                       err = -ENOMEM;
> > +                       break;
> > +               }
> > +
> > +               dst_pmdval = pmdp_get_lockless(dst_pmd);
> > +               /*
> > +                * If the dst_pmd is mapped as THP don't
> > +                * override it and just be strict.
> > +                */
> > +               if (unlikely(pmd_trans_huge(dst_pmdval))) {
> > +                       err = -EEXIST;
> > +                       break;
> > +               }
>
> This check is racy because the dst_pmd can still change at this point,
> from previously pointing to a zeroed PMD to now pointing to a
> hugepage, right? And we rely on remap_pages_pte() and
> remap_pages_huge_pmd() to recheck for that?
> If yes, maybe add a comment noting this and explaining why we want this check.

Yes, you are right. remap_pages_huge_pmd() does check
pmd_same(*dst_pmd, dst_pmdval) after doing double_pt_lock(), so it
will retry and exit on this check if dst_pmd got mapped as THP from
under us. remap_pages_pte() OTOH simply does
BUG_ON(pmd_trans_huge(*dst_pmd)), which is wrong. Instead it should
also check if PMD value changed from under us and retry in such a
case. I'll fix that and will add a comment here about this racy check
and the retry logic.

>
> > +               ptl = pmd_trans_huge_lock(src_pmd, src_vma);
> > +               if (ptl) {
> > +                       /*
> > +                        * Check if we can move the pmd without
> > +                        * splitting it. First check the address
> > +                        * alignment to be the same in src/dst.  These
> > +                        * checks don't actually need the PT lock but
> > +                        * it's good to do it here to optimize this
> > +                        * block away at build time if
> > +                        * CONFIG_TRANSPARENT_HUGEPAGE is not set.
> > +                        */
> > +                       if (thp_aligned == -1)
> > +                               thp_aligned = ((src_addr & ~HPAGE_PMD_MASK) ==
> > +                                              (dst_addr & ~HPAGE_PMD_MASK));
> > +                       if (!thp_aligned || (src_addr & ~HPAGE_PMD_MASK) ||
>
> This seems overly complicated, the only case when you can move a huge
> PMD is if both addresses are hugepage-aligned and you have enough
> length for one hugepage:
>
> (src_addr & ~HPAGE_PMD_MASK) == 0 && (dst_addr & ~HPAGE_PMD_MASK) == 0
> && (src_start + len - src_addr >= HPAGE_PMD_SIZE).

Ack.

>
> > +                           !pmd_none(dst_pmdval) ||
> > +                           src_start + len - src_addr < HPAGE_PMD_SIZE) {
> > +                               spin_unlock(ptl);
> > +                               /* Fall through */
> > +                               split_huge_pmd(src_vma, src_pmd, src_addr);
> > +                       } else {
> > +                               err = remap_pages_huge_pmd(dst_mm,
> > +                                                          src_mm,
> > +                                                          dst_pmd,
> > +                                                          src_pmd,
> > +                                                          dst_pmdval,
> > +                                                          dst_vma,
> > +                                                          src_vma,
> > +                                                          dst_addr,
> > +                                                          src_addr);
> > +                               cond_resched();
> > +
> > +                               if (!err) {
> > +                                       dst_addr += HPAGE_PMD_SIZE;
> > +                                       src_addr += HPAGE_PMD_SIZE;
> > +                                       moved += HPAGE_PMD_SIZE;
> > +                               }
> > +
> > +                               if ((!err || err == -EAGAIN) &&
> > +                                   fatal_signal_pending(current))
> > +                                       err = -EINTR;
> > +
> > +                               if (err && err != -EAGAIN)
> > +                                       break;
> > +
> > +                               continue;
> > +                       }
> > +               }
> > +
> > +               if (pmd_none(*src_pmd)) {
> > +                       if (!(mode & UFFDIO_REMAP_MODE_ALLOW_SRC_HOLES)) {
> > +                               err = -ENOENT;
> > +                               break;
> > +                       }
> > +                       if (unlikely(__pte_alloc(src_mm, src_pmd))) {
> > +                               err = -ENOMEM;
> > +                               break;
> > +                       }
> > +               }
> > +
> > +               if (unlikely(pmd_none(dst_pmdval)) &&
> > +                   unlikely(__pte_alloc(dst_mm, dst_pmd))) {
>
> Maybe just use pte_alloc() here?

Ack.

>
> > +                       err = -ENOMEM;
> > +                       break;
> > +               }
> > +
> > +               err = remap_pages_pte(dst_mm, src_mm,
> > +                                     dst_pmd, src_pmd,
> > +                                     dst_vma, src_vma,
> > +                                     dst_addr, src_addr,
> > +                                     mode);
> > +
> > +               cond_resched();
> > +
> > +               if (!err) {
> > +                       dst_addr += PAGE_SIZE;
> > +                       src_addr += PAGE_SIZE;
> > +                       moved += PAGE_SIZE;
> > +               }
> > +
> > +               if ((!err || err == -EAGAIN) &&
> > +                   fatal_signal_pending(current))
> > +                       err = -EINTR;
> > +
> > +               if (err && err != -EAGAIN)
> > +                       break;
> > +       }
> > +
> > +out:
> > +       mmap_read_unlock(dst_mm);
> > +       if (dst_mm != src_mm)
> > +               mmap_read_unlock(src_mm);
> > +       BUG_ON(moved < 0);
> > +       BUG_ON(err > 0);
> > +       BUG_ON(!moved && !err);
> > +       return moved ? moved : err;
> > +}
>
> Maybe you could try whether this function would look simpler with a
> shape roughly like:
>
> for (src_addr = ...; src_addr < ...;) {
>   unsigned long step_size;
>
>   if (hugepage case) {
>     if (have to split) {
>       split it;
>       continue;
>     }
>     step_size = HPAGE_PMD_SIZE;
>     ...
>   } else {
>     ... 4k case ...
>     step_size = PAGE_SIZE;
>   }
>   ...
>   cond_resched();
>   if (!err) {
>     dst_addr += step_size;
>     src_addr += step_size;
>     moved += step_size;
>   }

Yeah, that looks simpler. I'll try that, thanks!

>   ...
> }
Suren Baghdasaryan Sept. 19, 2023, 11:40 p.m. UTC | #15
On Tue, Sep 19, 2023 at 4:08 PM Suren Baghdasaryan <surenb@google.com> wrote:
>
> On Thu, Sep 14, 2023 at 7:28 PM Jann Horn <jannh@google.com> wrote:
> >
> > On Thu, Sep 14, 2023 at 5:26 PM Suren Baghdasaryan <surenb@google.com> wrote:
> > >
> > > From: Andrea Arcangeli <aarcange@redhat.com>
> > >
> > > This implements the uABI of UFFDIO_REMAP.
> > >
> > > Notably one mode bitflag is also forwarded (and in turn known) by the
> > > lowlevel remap_pages method.
> > >
> > > Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
> > > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> > > ---
> > >  fs/userfaultfd.c                 |  49 +++
> > >  include/linux/rmap.h             |   5 +
> > >  include/linux/userfaultfd_k.h    |  17 +
> > >  include/uapi/linux/userfaultfd.h |  22 ++
> > >  mm/huge_memory.c                 | 118 +++++++
> > >  mm/khugepaged.c                  |   3 +
> > >  mm/userfaultfd.c                 | 586 +++++++++++++++++++++++++++++++
> > >  7 files changed, 800 insertions(+)
> > >
> > > diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
> > > index 56eaae9dac1a..7bf64e7541c1 100644
> > > --- a/fs/userfaultfd.c
> > > +++ b/fs/userfaultfd.c
> > > @@ -2027,6 +2027,52 @@ static inline unsigned int uffd_ctx_features(__u64 user_features)
> > >         return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED;
> > >  }
> > >
> > > +static int userfaultfd_remap(struct userfaultfd_ctx *ctx,
> > > +                            unsigned long arg)
> > > +{
> > > +       __s64 ret;
> > > +       struct uffdio_remap uffdio_remap;
> > > +       struct uffdio_remap __user *user_uffdio_remap;
> > > +       struct userfaultfd_wake_range range;
> > > +
> > > +       user_uffdio_remap = (struct uffdio_remap __user *) arg;
> > > +
> > > +       ret = -EFAULT;
> > > +       if (copy_from_user(&uffdio_remap, user_uffdio_remap,
> > > +                          /* don't copy "remap" last field */
> > > +                          sizeof(uffdio_remap)-sizeof(__s64)))
> > > +               goto out;
> > > +
> > > +       ret = validate_range(ctx->mm, uffdio_remap.dst, uffdio_remap.len);
> > > +       if (ret)
> > > +               goto out;
> > > +       ret = validate_range(current->mm, uffdio_remap.src, uffdio_remap.len);
> > > +       if (ret)
> > > +               goto out;
> > > +       ret = -EINVAL;
> > > +       if (uffdio_remap.mode & ~(UFFDIO_REMAP_MODE_ALLOW_SRC_HOLES|
> > > +                                 UFFDIO_REMAP_MODE_DONTWAKE))
> > > +               goto out;
> >
>
> Sorry for the delay with the answers...
>
> > Do you not need mmget_not_zero(ctx->mm) to make sure the MM can't be
> > concurrently torn down while remap_pages() is running, similar to what
> > the other userfaultfd ioctl handlers do?
>
> Yes, I do need that. Will add.
>
>
> >
> > > +       ret = remap_pages(ctx->mm, current->mm,
> > > +                         uffdio_remap.dst, uffdio_remap.src,
> > > +                         uffdio_remap.len, uffdio_remap.mode);
> > > +       if (unlikely(put_user(ret, &user_uffdio_remap->remap)))
> > > +               return -EFAULT;
> > > +       if (ret < 0)
> > > +               goto out;
> > > +       /* len == 0 would wake all */
> > > +       BUG_ON(!ret);
> > > +       range.len = ret;
> > > +       if (!(uffdio_remap.mode & UFFDIO_REMAP_MODE_DONTWAKE)) {
> > > +               range.start = uffdio_remap.dst;
> > > +               wake_userfault(ctx, &range);
> > > +       }
> > > +       ret = range.len == uffdio_remap.len ? 0 : -EAGAIN;
> > > +out:
> > > +       return ret;
> > > +}
> > [...]
> > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> > > index 064fbd90822b..c7a9880a1f6a 100644
> > > --- a/mm/huge_memory.c
> > > +++ b/mm/huge_memory.c
> > > @@ -1932,6 +1932,124 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
> > >         return ret;
> > >  }
> > >
> > > +#ifdef CONFIG_USERFAULTFD
> > > +/*
> > > + * The PT lock for src_pmd and the mmap_lock for reading are held by
> > > + * the caller, but it must return after releasing the
> > > + * page_table_lock. We're guaranteed the src_pmd is a pmd_trans_huge
> > > + * until the PT lock of the src_pmd is released. Just move the page
> > > + * from src_pmd to dst_pmd if possible. Return zero if succeeded in
> > > + * moving the page, -EAGAIN if it needs to be repeated by the caller,
> > > + * or other errors in case of failure.
> > > + */
> > > +int remap_pages_huge_pmd(struct mm_struct *dst_mm,
> > > +                        struct mm_struct *src_mm,
> > > +                        pmd_t *dst_pmd, pmd_t *src_pmd,
> > > +                        pmd_t dst_pmdval,
> > > +                        struct vm_area_struct *dst_vma,
> > > +                        struct vm_area_struct *src_vma,
> > > +                        unsigned long dst_addr,
> > > +                        unsigned long src_addr)
> > > +{
> > > +       pmd_t _dst_pmd, src_pmdval;
> > > +       struct page *src_page;
> > > +       struct anon_vma *src_anon_vma, *dst_anon_vma;
> > > +       spinlock_t *src_ptl, *dst_ptl;
> > > +       pgtable_t pgtable;
> > > +       struct mmu_notifier_range range;
> > > +
> > > +       src_pmdval = *src_pmd;
> > > +       src_ptl = pmd_lockptr(src_mm, src_pmd);
> > > +
> > > +       BUG_ON(!pmd_trans_huge(src_pmdval));
> > > +       BUG_ON(!pmd_none(dst_pmdval));
> >
> > Why can we assert that pmd_none(dst_pmdval) is true here? Can we not
> > have concurrent faults (or userfaultfd operations) populating that
> > PMD?
>
> IIUC dst_pmdval is a copy of the value from dst_pmd, so that local
> copy should not change even if some concurrent operation changes
> dst_pmd. We can assert that it's pmd_none because we checked for that
> before calling remap_pages_huge_pmd. Later on we check if dst_pmd
> changed from under us (see pmd_same(*dst_pmd, dst_pmdval) check) and
> retry if that happened.
>
> >
> > > +       BUG_ON(!spin_is_locked(src_ptl));
> > > +       mmap_assert_locked(src_mm);
> > > +       mmap_assert_locked(dst_mm);
> > > +       BUG_ON(src_addr & ~HPAGE_PMD_MASK);
> > > +       BUG_ON(dst_addr & ~HPAGE_PMD_MASK);
> > > +
> > > +       src_page = pmd_page(src_pmdval);
> > > +       BUG_ON(!PageHead(src_page));
> > > +       BUG_ON(!PageAnon(src_page));
> > > +       if (unlikely(page_mapcount(src_page) != 1)) {
> > > +               spin_unlock(src_ptl);
> > > +               return -EBUSY;
> > > +       }
> > > +
> > > +       get_page(src_page);
> > > +       spin_unlock(src_ptl);
> > > +
> > > +       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src_mm, src_addr,
> > > +                               src_addr + HPAGE_PMD_SIZE);
> > > +       mmu_notifier_invalidate_range_start(&range);
> > > +
> > > +       /* block all concurrent rmap walks */
> > > +       lock_page(src_page);
> > > +
> > > +       /*
> > > +        * split_huge_page walks the anon_vma chain without the page
> > > +        * lock. Serialize against it with the anon_vma lock, the page
> > > +        * lock is not enough.
> > > +        */
> > > +       src_anon_vma = folio_get_anon_vma(page_folio(src_page));
> > > +       if (!src_anon_vma) {
> > > +               unlock_page(src_page);
> > > +               put_page(src_page);
> > > +               mmu_notifier_invalidate_range_end(&range);
> > > +               return -EAGAIN;
> > > +       }
> > > +       anon_vma_lock_write(src_anon_vma);
> > > +
> > > +       dst_ptl = pmd_lockptr(dst_mm, dst_pmd);
> > > +       double_pt_lock(src_ptl, dst_ptl);
> > > +       if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
> > > +                    !pmd_same(*dst_pmd, dst_pmdval) ||
> > > +                    page_mapcount(src_page) != 1)) {
> > > +               double_pt_unlock(src_ptl, dst_ptl);
> > > +               anon_vma_unlock_write(src_anon_vma);
> > > +               put_anon_vma(src_anon_vma);
> > > +               unlock_page(src_page);
> > > +               put_page(src_page);
> > > +               mmu_notifier_invalidate_range_end(&range);
> > > +               return -EAGAIN;
> > > +       }
> > > +
> > > +       BUG_ON(!PageHead(src_page));
> > > +       BUG_ON(!PageAnon(src_page));
> > > +       /* the PT lock is enough to keep the page pinned now */
> > > +       put_page(src_page);
> > > +
> > > +       dst_anon_vma = (void *) dst_vma->anon_vma + PAGE_MAPPING_ANON;
> > > +       WRITE_ONCE(src_page->mapping, (struct address_space *) dst_anon_vma);
> > > +       WRITE_ONCE(src_page->index, linear_page_index(dst_vma, dst_addr));
> > > +
> > > +       if (!pmd_same(pmdp_huge_clear_flush(src_vma, src_addr, src_pmd),
> > > +                     src_pmdval))
> > > +               BUG_ON(1);
> >
> > I'm not sure we can assert that the PMDs are exactly equal; the CPU
> > might have changed the A/D bits under us?
>
> Yes. I wonder if I can simply remove the BUG_ON here like this:
>
> src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
>
> Technically we don't use src_pmdval after this but for the possible
> future use that would keep things correct. If A/D bits changed from
> under us we will still copy correct values into dst_pmd.
>
> >
> > > +       _dst_pmd = mk_huge_pmd(src_page, dst_vma->vm_page_prot);
> > > +       _dst_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
> > > +       set_pmd_at(dst_mm, dst_addr, dst_pmd, _dst_pmd);
> > > +
> > > +       pgtable = pgtable_trans_huge_withdraw(src_mm, src_pmd);
> > > +       pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
> >
> > Are we allowed to move page tables between mm_structs on all
> > architectures? The first example I found that looks a bit dodgy,
> > looking through various architectures' pte_alloc_one(), is s390's
> > page_table_alloc() which looks like page tables are tied to per-MM
> > lists sometimes.
> > If that's not allowed, we might have to allocate a new deposit table
> > and free the old one or something like that.
>
> Hmm. Yeah, looks like in the case of !CONFIG_PGSTE the table can be
> linked to mm->context.pgtable_list, so can't be moved to another mm. I
> guess I'll have to keep a pgtable allocated, ready to be deposited and
> free the old one. Maybe it's worth having an arch-specific function
> indicating whether moving a pgtable between MMs is supported? Or do it
> separately as an optimization. WDYT?
>
> >
> > > +       if (dst_mm != src_mm) {
> > > +               add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
> > > +               add_mm_counter(src_mm, MM_ANONPAGES, -HPAGE_PMD_NR);
> > > +       }
> > > +       double_pt_unlock(src_ptl, dst_ptl);
> > > +
> > > +       anon_vma_unlock_write(src_anon_vma);
> > > +       put_anon_vma(src_anon_vma);
> > > +
> > > +       /* unblock rmap walks */
> > > +       unlock_page(src_page);
> > > +
> > > +       mmu_notifier_invalidate_range_end(&range);
> > > +       return 0;
> > > +}
> > > +#endif /* CONFIG_USERFAULTFD */
> > > +
> > >  /*
> > >   * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
> > >   *
> > [...]
> > > diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> > > index 96d9eae5c7cc..0cca60dfa8f8 100644
> > > --- a/mm/userfaultfd.c
> > > +++ b/mm/userfaultfd.c
> > [...]
> > > +ssize_t remap_pages(struct mm_struct *dst_mm, struct mm_struct *src_mm,
> > > +                   unsigned long dst_start, unsigned long src_start,
> > > +                   unsigned long len, __u64 mode)
> > > +{
> > [...]
> > > +
> > > +       if (pgprot_val(src_vma->vm_page_prot) !=
> > > +           pgprot_val(dst_vma->vm_page_prot))
> > > +               goto out;
> >
> > Does this check intentionally allow moving pages from a
> > PROT_READ|PROT_WRITE anonymous private VMA into a PROT_READ anonymous
> > private VMA (on architectures like x86 and arm64 where CoW memory has
> > the same protection flags as read-only memory), but forbid moving them
> > from a PROT_READ|PROT_EXEC VMA into a PROT_READ VMA? I think this
> > check needs at least a comment to explain what's going on here.
>
> The check is simply to ensure the VMAs have the same access
> permissions to prevent page copies that should have different
> permissions. The fact that x86 and arm64 have the same protection bits
> for R/O and COW memory is a "side-effect" IMHO. I'm not sure what
> comment would be good here but I'm open to suggestions.
>
> >
> > > +       /* only allow remapping if both are mlocked or both aren't */
> > > +       if ((src_vma->vm_flags & VM_LOCKED) ^ (dst_vma->vm_flags & VM_LOCKED))
> > > +               goto out;
> > > +
> > > +       /*
> > > +        * Be strict and only allow remap_pages if either the src or
> > > +        * dst range is registered in the userfaultfd to prevent
> > > +        * userland errors going unnoticed. As far as the VM
> > > +        * consistency is concerned, it would be perfectly safe to
> > > +        * remove this check, but there's no useful usage for
> > > +        * remap_pages ouside of userfaultfd registered ranges. This
> > > +        * is after all why it is an ioctl belonging to the
> > > +        * userfaultfd and not a syscall.
> > > +        *
> > > +        * Allow both vmas to be registered in the userfaultfd, just
> > > +        * in case somebody finds a way to make such a case useful.
> > > +        * Normally only one of the two vmas would be registered in
> > > +        * the userfaultfd.
> > > +        */
> > > +       if (!dst_vma->vm_userfaultfd_ctx.ctx &&
> > > +           !src_vma->vm_userfaultfd_ctx.ctx)
> > > +               goto out;
> > > +
> > > +       /*
> > > +        * FIXME: only allow remapping across anonymous vmas,
> > > +        * tmpfs should be added.
> > > +        */
> > > +       if (src_vma->vm_ops || dst_vma->vm_ops)
> > > +               goto out;
> >
> > I don't think it's okay to check for anonymous VMAs by checking
> > ->vm_ops. There are some weird drivers whose ->mmap helpers don't set
> > ->vm_ops and instead just shove all the necessary PTEs into the VMA
> > right on ->mmap, so I think they end up with ->vm_ops==NULL. For
> > example, kcov_mmap() looks that way. I'm not sure how common this is.
> >
> > Though, uuuuuh, I guess if that's true, the existing
> > vma_is_anonymous() is broken, since that also just checks ->vm_ops?
> > I'm not sure what the consequences of that would be... Either way,
> > vma_is_anonymous() might be the better way to check for anonymous VMAs
> > here, and someone should figure out whether vma_is_anonymous() needs
> > to be fixed.
>
> Absolutely, vma_is_anonymous() should have been used here. Will fix it.
>
> >
> > > +       /*
> > > +        * Ensure the dst_vma has a anon_vma or this page
> > > +        * would get a NULL anon_vma when moved in the
> > > +        * dst_vma.
> > > +        */
> > > +       err = -ENOMEM;
> > > +       if (unlikely(anon_vma_prepare(dst_vma)))
> > > +               goto out;
> > > +
> > > +       for (src_addr = src_start, dst_addr = dst_start;
> > > +            src_addr < src_start + len;) {
> > > +               spinlock_t *ptl;
> > > +               pmd_t dst_pmdval;
> > > +
> > > +               BUG_ON(dst_addr >= dst_start + len);
> > > +               src_pmd = mm_find_pmd(src_mm, src_addr);
> >
> > (this would blow up pretty badly if we could have transparent huge PUD
> > in the region but I think that's limited to file VMAs so it's fine as
> > it currently is)
>
> Should I add a comment here as a warning if in the future we decide to
> implement support for file-backed pages?
>
> >
> > > +               if (unlikely(!src_pmd)) {
> > > +                       if (!(mode & UFFDIO_REMAP_MODE_ALLOW_SRC_HOLES)) {
> > > +                               err = -ENOENT;
> > > +                               break;
> > > +                       }
> > > +                       src_pmd = mm_alloc_pmd(src_mm, src_addr);
> > > +                       if (unlikely(!src_pmd)) {
> > > +                               err = -ENOMEM;
> > > +                               break;
> > > +                       }
> > > +               }
> > > +               dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
> > > +               if (unlikely(!dst_pmd)) {
> > > +                       err = -ENOMEM;
> > > +                       break;
> > > +               }
> > > +
> > > +               dst_pmdval = pmdp_get_lockless(dst_pmd);
> > > +               /*
> > > +                * If the dst_pmd is mapped as THP don't
> > > +                * override it and just be strict.
> > > +                */
> > > +               if (unlikely(pmd_trans_huge(dst_pmdval))) {
> > > +                       err = -EEXIST;
> > > +                       break;
> > > +               }
> >
> > This check is racy because the dst_pmd can still change at this point,
> > from previously pointing to a zeroed PMD to now pointing to a
> > hugepage, right? And we rely on remap_pages_pte() and
> > remap_pages_huge_pmd() to recheck for that?
> > If yes, maybe add a comment noting this and explaining why we want this check.
>
> Yes, you are right. remap_pages_huge_pmd() does check
> pmd_same(*dst_pmd, dst_pmdval) after doing double_pt_lock(), so it
> will retry and exit on this check if dst_pmd got mapped as THP from
> under us. remap_pages_pte() OTOH simply does
> BUG_ON(pmd_trans_huge(*dst_pmd)), which is wrong. Instead it should
> also check if PMD value changed from under us and retry in such a
> case. I'll fix that and will add a comment here about this racy check
> and the retry logic.

Actually, remap_pages_pte() also indirectly handles the case when
dst_pmd changes from under us with this code:

dst_pte = pte_offset_map_nolock(dst_mm, dst_pmd, dst_addr, &dst_ptl);
/* If an huge pmd materialized from under us fail */
if (unlikely(!dst_pte)) {
        err = -EFAULT;
        goto out;
}

so, it would fail if someone concurrently mapped a THP at that
location. Seems like this race is handled, so the only thing left here
is to add a comment clarifying this.

>
> >
> > > +               ptl = pmd_trans_huge_lock(src_pmd, src_vma);
> > > +               if (ptl) {
> > > +                       /*
> > > +                        * Check if we can move the pmd without
> > > +                        * splitting it. First check the address
> > > +                        * alignment to be the same in src/dst.  These
> > > +                        * checks don't actually need the PT lock but
> > > +                        * it's good to do it here to optimize this
> > > +                        * block away at build time if
> > > +                        * CONFIG_TRANSPARENT_HUGEPAGE is not set.
> > > +                        */
> > > +                       if (thp_aligned == -1)
> > > +                               thp_aligned = ((src_addr & ~HPAGE_PMD_MASK) ==
> > > +                                              (dst_addr & ~HPAGE_PMD_MASK));
> > > +                       if (!thp_aligned || (src_addr & ~HPAGE_PMD_MASK) ||
> >
> > This seems overly complicated, the only case when you can move a huge
> > PMD is if both addresses are hugepage-aligned and you have enough
> > length for one hugepage:
> >
> > (src_addr & ~HPAGE_PMD_MASK) == 0 && (dst_addr & ~HPAGE_PMD_MASK) == 0
> > && (src_start + len - src_addr >= HPAGE_PMD_SIZE).
>
> Ack.
>
> >
> > > +                           !pmd_none(dst_pmdval) ||
> > > +                           src_start + len - src_addr < HPAGE_PMD_SIZE) {
> > > +                               spin_unlock(ptl);
> > > +                               /* Fall through */
> > > +                               split_huge_pmd(src_vma, src_pmd, src_addr);
> > > +                       } else {
> > > +                               err = remap_pages_huge_pmd(dst_mm,
> > > +                                                          src_mm,
> > > +                                                          dst_pmd,
> > > +                                                          src_pmd,
> > > +                                                          dst_pmdval,
> > > +                                                          dst_vma,
> > > +                                                          src_vma,
> > > +                                                          dst_addr,
> > > +                                                          src_addr);
> > > +                               cond_resched();
> > > +
> > > +                               if (!err) {
> > > +                                       dst_addr += HPAGE_PMD_SIZE;
> > > +                                       src_addr += HPAGE_PMD_SIZE;
> > > +                                       moved += HPAGE_PMD_SIZE;
> > > +                               }
> > > +
> > > +                               if ((!err || err == -EAGAIN) &&
> > > +                                   fatal_signal_pending(current))
> > > +                                       err = -EINTR;
> > > +
> > > +                               if (err && err != -EAGAIN)
> > > +                                       break;
> > > +
> > > +                               continue;
> > > +                       }
> > > +               }
> > > +
> > > +               if (pmd_none(*src_pmd)) {
> > > +                       if (!(mode & UFFDIO_REMAP_MODE_ALLOW_SRC_HOLES)) {
> > > +                               err = -ENOENT;
> > > +                               break;
> > > +                       }
> > > +                       if (unlikely(__pte_alloc(src_mm, src_pmd))) {
> > > +                               err = -ENOMEM;
> > > +                               break;
> > > +                       }
> > > +               }
> > > +
> > > +               if (unlikely(pmd_none(dst_pmdval)) &&
> > > +                   unlikely(__pte_alloc(dst_mm, dst_pmd))) {
> >
> > Maybe just use pte_alloc() here?
>
> Ack.
>
> >
> > > +                       err = -ENOMEM;
> > > +                       break;
> > > +               }
> > > +
> > > +               err = remap_pages_pte(dst_mm, src_mm,
> > > +                                     dst_pmd, src_pmd,
> > > +                                     dst_vma, src_vma,
> > > +                                     dst_addr, src_addr,
> > > +                                     mode);
> > > +
> > > +               cond_resched();
> > > +
> > > +               if (!err) {
> > > +                       dst_addr += PAGE_SIZE;
> > > +                       src_addr += PAGE_SIZE;
> > > +                       moved += PAGE_SIZE;
> > > +               }
> > > +
> > > +               if ((!err || err == -EAGAIN) &&
> > > +                   fatal_signal_pending(current))
> > > +                       err = -EINTR;
> > > +
> > > +               if (err && err != -EAGAIN)
> > > +                       break;
> > > +       }
> > > +
> > > +out:
> > > +       mmap_read_unlock(dst_mm);
> > > +       if (dst_mm != src_mm)
> > > +               mmap_read_unlock(src_mm);
> > > +       BUG_ON(moved < 0);
> > > +       BUG_ON(err > 0);
> > > +       BUG_ON(!moved && !err);
> > > +       return moved ? moved : err;
> > > +}
> >
> > Maybe you could try whether this function would look simpler with a
> > shape roughly like:
> >
> > for (src_addr = ...; src_addr < ...;) {
> >   unsigned long step_size;
> >
> >   if (hugepage case) {
> >     if (have to split) {
> >       split it;
> >       continue;
> >     }
> >     step_size = HPAGE_PMD_SIZE;
> >     ...
> >   } else {
> >     ... 4k case ...
> >     step_size = PAGE_SIZE;
> >   }
> >   ...
> >   cond_resched();
> >   if (!err) {
> >     dst_addr += step_size;
> >     src_addr += step_size;
> >     moved += step_size;
> >   }
>
> Yeah, that looks simpler. I'll try that, thanks!
>
> >   ...
> > }
Jann Horn Sept. 19, 2023, 11:50 p.m. UTC | #16
On Wed, Sep 20, 2023 at 1:08 AM Suren Baghdasaryan <surenb@google.com> wrote:
> On Thu, Sep 14, 2023 at 7:28 PM Jann Horn <jannh@google.com> wrote:
> > On Thu, Sep 14, 2023 at 5:26 PM Suren Baghdasaryan <surenb@google.com> wrote:
> > > From: Andrea Arcangeli <aarcange@redhat.com>
> > >
> > > This implements the uABI of UFFDIO_REMAP.
> > >
> > > Notably one mode bitflag is also forwarded (and in turn known) by the
> > > lowlevel remap_pages method.
[...]
> > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c
[...]
> > > +int remap_pages_huge_pmd(struct mm_struct *dst_mm,
> > > +                        struct mm_struct *src_mm,
> > > +                        pmd_t *dst_pmd, pmd_t *src_pmd,
> > > +                        pmd_t dst_pmdval,
> > > +                        struct vm_area_struct *dst_vma,
> > > +                        struct vm_area_struct *src_vma,
> > > +                        unsigned long dst_addr,
> > > +                        unsigned long src_addr)
> > > +{
> > > +       pmd_t _dst_pmd, src_pmdval;
> > > +       struct page *src_page;
> > > +       struct anon_vma *src_anon_vma, *dst_anon_vma;
> > > +       spinlock_t *src_ptl, *dst_ptl;
> > > +       pgtable_t pgtable;
> > > +       struct mmu_notifier_range range;
> > > +
> > > +       src_pmdval = *src_pmd;
> > > +       src_ptl = pmd_lockptr(src_mm, src_pmd);
> > > +
> > > +       BUG_ON(!pmd_trans_huge(src_pmdval));
> > > +       BUG_ON(!pmd_none(dst_pmdval));
> >
> > Why can we assert that pmd_none(dst_pmdval) is true here? Can we not
> > have concurrent faults (or userfaultfd operations) populating that
> > PMD?
>
> IIUC dst_pmdval is a copy of the value from dst_pmd, so that local
> copy should not change even if some concurrent operation changes
> dst_pmd. We can assert that it's pmd_none because we checked for that
> before calling remap_pages_huge_pmd. Later on we check if dst_pmd
> changed from under us (see pmd_same(*dst_pmd, dst_pmdval) check) and
> retry if that happened.

Oh, right, I don't know what I was thinking when I typed that.

But now I wonder about the check directly above that: What does this
code do for swap PMDs? It looks like that might splat on the
BUG_ON(!pmd_trans_huge(src_pmdval)). All we've checked on the path to
here is that the virtual memory area is aligned, that the destination
PMD is empty, and that pmd_trans_huge_lock() succeeded; but
pmd_trans_huge_lock() explicitly permits swap PMDs (which is the
swapped-out version of transhuge PMDs):

static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
                struct vm_area_struct *vma)
{
        if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
                return __pmd_trans_huge_lock(pmd, vma);
        else
                return NULL;
}

> >
> > > +       BUG_ON(!spin_is_locked(src_ptl));
> > > +       mmap_assert_locked(src_mm);
> > > +       mmap_assert_locked(dst_mm);
> > > +       BUG_ON(src_addr & ~HPAGE_PMD_MASK);
> > > +       BUG_ON(dst_addr & ~HPAGE_PMD_MASK);
> > > +
> > > +       src_page = pmd_page(src_pmdval);
> > > +       BUG_ON(!PageHead(src_page));
> > > +       BUG_ON(!PageAnon(src_page));
> > > +       if (unlikely(page_mapcount(src_page) != 1)) {
> > > +               spin_unlock(src_ptl);
> > > +               return -EBUSY;
> > > +       }
> > > +
> > > +       get_page(src_page);
> > > +       spin_unlock(src_ptl);
> > > +
> > > +       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src_mm, src_addr,
> > > +                               src_addr + HPAGE_PMD_SIZE);
> > > +       mmu_notifier_invalidate_range_start(&range);
> > > +
> > > +       /* block all concurrent rmap walks */
> > > +       lock_page(src_page);
> > > +
> > > +       /*
> > > +        * split_huge_page walks the anon_vma chain without the page
> > > +        * lock. Serialize against it with the anon_vma lock, the page
> > > +        * lock is not enough.
> > > +        */
> > > +       src_anon_vma = folio_get_anon_vma(page_folio(src_page));
> > > +       if (!src_anon_vma) {
> > > +               unlock_page(src_page);
> > > +               put_page(src_page);
> > > +               mmu_notifier_invalidate_range_end(&range);
> > > +               return -EAGAIN;
> > > +       }
> > > +       anon_vma_lock_write(src_anon_vma);
> > > +
> > > +       dst_ptl = pmd_lockptr(dst_mm, dst_pmd);
> > > +       double_pt_lock(src_ptl, dst_ptl);
> > > +       if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
> > > +                    !pmd_same(*dst_pmd, dst_pmdval) ||
> > > +                    page_mapcount(src_page) != 1)) {
> > > +               double_pt_unlock(src_ptl, dst_ptl);
> > > +               anon_vma_unlock_write(src_anon_vma);
> > > +               put_anon_vma(src_anon_vma);
> > > +               unlock_page(src_page);
> > > +               put_page(src_page);
> > > +               mmu_notifier_invalidate_range_end(&range);
> > > +               return -EAGAIN;
> > > +       }
> > > +
> > > +       BUG_ON(!PageHead(src_page));
> > > +       BUG_ON(!PageAnon(src_page));
> > > +       /* the PT lock is enough to keep the page pinned now */
> > > +       put_page(src_page);
> > > +
> > > +       dst_anon_vma = (void *) dst_vma->anon_vma + PAGE_MAPPING_ANON;
> > > +       WRITE_ONCE(src_page->mapping, (struct address_space *) dst_anon_vma);
> > > +       WRITE_ONCE(src_page->index, linear_page_index(dst_vma, dst_addr));
> > > +
> > > +       if (!pmd_same(pmdp_huge_clear_flush(src_vma, src_addr, src_pmd),
> > > +                     src_pmdval))
> > > +               BUG_ON(1);
> >
> > I'm not sure we can assert that the PMDs are exactly equal; the CPU
> > might have changed the A/D bits under us?
>
> Yes. I wonder if I can simply remove the BUG_ON here like this:
>
> src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
>
> Technically we don't use src_pmdval after this but for the possible
> future use that would keep things correct. If A/D bits changed from
> under us we will still copy correct values into dst_pmd.

And when we set up the dst_pmd, we always mark it as dirty and
accessed... so I guess that's fine.

> > > +       _dst_pmd = mk_huge_pmd(src_page, dst_vma->vm_page_prot);
> > > +       _dst_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
> > > +       set_pmd_at(dst_mm, dst_addr, dst_pmd, _dst_pmd);
> > > +
> > > +       pgtable = pgtable_trans_huge_withdraw(src_mm, src_pmd);
> > > +       pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
> >
> > Are we allowed to move page tables between mm_structs on all
> > architectures? The first example I found that looks a bit dodgy,
> > looking through various architectures' pte_alloc_one(), is s390's
> > page_table_alloc() which looks like page tables are tied to per-MM
> > lists sometimes.
> > If that's not allowed, we might have to allocate a new deposit table
> > and free the old one or something like that.
>
> Hmm. Yeah, looks like in the case of !CONFIG_PGSTE the table can be
> linked to mm->context.pgtable_list, so can't be moved to another mm. I
> guess I'll have to keep a pgtable allocated, ready to be deposited and
> free the old one. Maybe it's worth having an arch-specific function
> indicating whether moving a pgtable between MMs is supported? Or do it
> separately as an optimization. WDYT?

Hm, dunno. I guess you could have architectures opt in with some
config flag similar to how flags like
ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH are wired up - define it in
init/Kconfig, select it in the architectures that support it, and then
gate the fast version on that with #ifdef?

> > > +       if (dst_mm != src_mm) {
> > > +               add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
> > > +               add_mm_counter(src_mm, MM_ANONPAGES, -HPAGE_PMD_NR);
> > > +       }
> > > +       double_pt_unlock(src_ptl, dst_ptl);
> > > +
> > > +       anon_vma_unlock_write(src_anon_vma);
> > > +       put_anon_vma(src_anon_vma);
> > > +
> > > +       /* unblock rmap walks */
> > > +       unlock_page(src_page);
> > > +
> > > +       mmu_notifier_invalidate_range_end(&range);
> > > +       return 0;
> > > +}
> > > +#endif /* CONFIG_USERFAULTFD */
> > > +
> > >  /*
> > >   * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
> > >   *
> > [...]
> > > diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> > > index 96d9eae5c7cc..0cca60dfa8f8 100644
> > > --- a/mm/userfaultfd.c
> > > +++ b/mm/userfaultfd.c
> > [...]
> > > +ssize_t remap_pages(struct mm_struct *dst_mm, struct mm_struct *src_mm,
> > > +                   unsigned long dst_start, unsigned long src_start,
> > > +                   unsigned long len, __u64 mode)
> > > +{
> > [...]
> > > +
> > > +       if (pgprot_val(src_vma->vm_page_prot) !=
> > > +           pgprot_val(dst_vma->vm_page_prot))
> > > +               goto out;
> >
> > Does this check intentionally allow moving pages from a
> > PROT_READ|PROT_WRITE anonymous private VMA into a PROT_READ anonymous
> > private VMA (on architectures like x86 and arm64 where CoW memory has
> > the same protection flags as read-only memory), but forbid moving them
> > from a PROT_READ|PROT_EXEC VMA into a PROT_READ VMA? I think this
> > check needs at least a comment to explain what's going on here.
>
> The check is simply to ensure the VMAs have the same access
> permissions to prevent page copies that should have different
> permissions. The fact that x86 and arm64 have the same protection bits
> for R/O and COW memory is a "side-effect" IMHO. I'm not sure what
> comment would be good here but I'm open to suggestions.

I'm not sure if you can do a meaningful security check on the
->vm_page_prot. I also don't think it matters for anonymous VMAs.
I guess if you want to keep this check but make this behavior more
consistent, you could put another check in front of this that rejects
VMAs where vm_flags like VM_READ, VM_WRITE, VM_SHARED or VM_EXEC are
different?

[...]
> > > +       /*
> > > +        * Ensure the dst_vma has a anon_vma or this page
> > > +        * would get a NULL anon_vma when moved in the
> > > +        * dst_vma.
> > > +        */
> > > +       err = -ENOMEM;
> > > +       if (unlikely(anon_vma_prepare(dst_vma)))
> > > +               goto out;
> > > +
> > > +       for (src_addr = src_start, dst_addr = dst_start;
> > > +            src_addr < src_start + len;) {
> > > +               spinlock_t *ptl;
> > > +               pmd_t dst_pmdval;
> > > +
> > > +               BUG_ON(dst_addr >= dst_start + len);
> > > +               src_pmd = mm_find_pmd(src_mm, src_addr);
> >
> > (this would blow up pretty badly if we could have transparent huge PUD
> > in the region but I think that's limited to file VMAs so it's fine as
> > it currently is)
>
> Should I add a comment here as a warning if in the future we decide to
> implement support for file-backed pages?

Hm, yeah, I guess that might be a good idea.
Suren Baghdasaryan Sept. 20, 2023, 1:49 a.m. UTC | #17
On Tue, Sep 19, 2023 at 4:51 PM Jann Horn <jannh@google.com> wrote:
>
> On Wed, Sep 20, 2023 at 1:08 AM Suren Baghdasaryan <surenb@google.com> wrote:
> > On Thu, Sep 14, 2023 at 7:28 PM Jann Horn <jannh@google.com> wrote:
> > > On Thu, Sep 14, 2023 at 5:26 PM Suren Baghdasaryan <surenb@google.com> wrote:
> > > > From: Andrea Arcangeli <aarcange@redhat.com>
> > > >
> > > > This implements the uABI of UFFDIO_REMAP.
> > > >
> > > > Notably one mode bitflag is also forwarded (and in turn known) by the
> > > > lowlevel remap_pages method.
> [...]
> > > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> [...]
> > > > +int remap_pages_huge_pmd(struct mm_struct *dst_mm,
> > > > +                        struct mm_struct *src_mm,
> > > > +                        pmd_t *dst_pmd, pmd_t *src_pmd,
> > > > +                        pmd_t dst_pmdval,
> > > > +                        struct vm_area_struct *dst_vma,
> > > > +                        struct vm_area_struct *src_vma,
> > > > +                        unsigned long dst_addr,
> > > > +                        unsigned long src_addr)
> > > > +{
> > > > +       pmd_t _dst_pmd, src_pmdval;
> > > > +       struct page *src_page;
> > > > +       struct anon_vma *src_anon_vma, *dst_anon_vma;
> > > > +       spinlock_t *src_ptl, *dst_ptl;
> > > > +       pgtable_t pgtable;
> > > > +       struct mmu_notifier_range range;
> > > > +
> > > > +       src_pmdval = *src_pmd;
> > > > +       src_ptl = pmd_lockptr(src_mm, src_pmd);
> > > > +
> > > > +       BUG_ON(!pmd_trans_huge(src_pmdval));
> > > > +       BUG_ON(!pmd_none(dst_pmdval));
> > >
> > > Why can we assert that pmd_none(dst_pmdval) is true here? Can we not
> > > have concurrent faults (or userfaultfd operations) populating that
> > > PMD?
> >
> > IIUC dst_pmdval is a copy of the value from dst_pmd, so that local
> > copy should not change even if some concurrent operation changes
> > dst_pmd. We can assert that it's pmd_none because we checked for that
> > before calling remap_pages_huge_pmd. Later on we check if dst_pmd
> > changed from under us (see pmd_same(*dst_pmd, dst_pmdval) check) and
> > retry if that happened.
>
> Oh, right, I don't know what I was thinking when I typed that.
>
> But now I wonder about the check directly above that: What does this
> code do for swap PMDs? It looks like that might splat on the
> BUG_ON(!pmd_trans_huge(src_pmdval)). All we've checked on the path to
> here is that the virtual memory area is aligned, that the destination
> PMD is empty, and that pmd_trans_huge_lock() succeeded; but
> pmd_trans_huge_lock() explicitly permits swap PMDs (which is the
> swapped-out version of transhuge PMDs):
>
> static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
>                 struct vm_area_struct *vma)
> {
>         if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
>                 return __pmd_trans_huge_lock(pmd, vma);
>         else
>                 return NULL;
> }

Yeah... Ok, I think I'm missing a check for  pmd_trans_huge(*src_pmd)
after we lock it with pmd_trans_huge_lock(src_pmd, src_vma). And we
can remove the above BUG_ON(). Would that address your concern?

>
> > >
> > > > +       BUG_ON(!spin_is_locked(src_ptl));
> > > > +       mmap_assert_locked(src_mm);
> > > > +       mmap_assert_locked(dst_mm);
> > > > +       BUG_ON(src_addr & ~HPAGE_PMD_MASK);
> > > > +       BUG_ON(dst_addr & ~HPAGE_PMD_MASK);
> > > > +
> > > > +       src_page = pmd_page(src_pmdval);
> > > > +       BUG_ON(!PageHead(src_page));
> > > > +       BUG_ON(!PageAnon(src_page));
> > > > +       if (unlikely(page_mapcount(src_page) != 1)) {
> > > > +               spin_unlock(src_ptl);
> > > > +               return -EBUSY;
> > > > +       }
> > > > +
> > > > +       get_page(src_page);
> > > > +       spin_unlock(src_ptl);
> > > > +
> > > > +       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src_mm, src_addr,
> > > > +                               src_addr + HPAGE_PMD_SIZE);
> > > > +       mmu_notifier_invalidate_range_start(&range);
> > > > +
> > > > +       /* block all concurrent rmap walks */
> > > > +       lock_page(src_page);
> > > > +
> > > > +       /*
> > > > +        * split_huge_page walks the anon_vma chain without the page
> > > > +        * lock. Serialize against it with the anon_vma lock, the page
> > > > +        * lock is not enough.
> > > > +        */
> > > > +       src_anon_vma = folio_get_anon_vma(page_folio(src_page));
> > > > +       if (!src_anon_vma) {
> > > > +               unlock_page(src_page);
> > > > +               put_page(src_page);
> > > > +               mmu_notifier_invalidate_range_end(&range);
> > > > +               return -EAGAIN;
> > > > +       }
> > > > +       anon_vma_lock_write(src_anon_vma);
> > > > +
> > > > +       dst_ptl = pmd_lockptr(dst_mm, dst_pmd);
> > > > +       double_pt_lock(src_ptl, dst_ptl);
> > > > +       if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
> > > > +                    !pmd_same(*dst_pmd, dst_pmdval) ||
> > > > +                    page_mapcount(src_page) != 1)) {
> > > > +               double_pt_unlock(src_ptl, dst_ptl);
> > > > +               anon_vma_unlock_write(src_anon_vma);
> > > > +               put_anon_vma(src_anon_vma);
> > > > +               unlock_page(src_page);
> > > > +               put_page(src_page);
> > > > +               mmu_notifier_invalidate_range_end(&range);
> > > > +               return -EAGAIN;
> > > > +       }
> > > > +
> > > > +       BUG_ON(!PageHead(src_page));
> > > > +       BUG_ON(!PageAnon(src_page));
> > > > +       /* the PT lock is enough to keep the page pinned now */
> > > > +       put_page(src_page);
> > > > +
> > > > +       dst_anon_vma = (void *) dst_vma->anon_vma + PAGE_MAPPING_ANON;
> > > > +       WRITE_ONCE(src_page->mapping, (struct address_space *) dst_anon_vma);
> > > > +       WRITE_ONCE(src_page->index, linear_page_index(dst_vma, dst_addr));
> > > > +
> > > > +       if (!pmd_same(pmdp_huge_clear_flush(src_vma, src_addr, src_pmd),
> > > > +                     src_pmdval))
> > > > +               BUG_ON(1);
> > >
> > > I'm not sure we can assert that the PMDs are exactly equal; the CPU
> > > might have changed the A/D bits under us?
> >
> > Yes. I wonder if I can simply remove the BUG_ON here like this:
> >
> > src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
> >
> > Technically we don't use src_pmdval after this but for the possible
> > future use that would keep things correct. If A/D bits changed from
> > under us we will still copy correct values into dst_pmd.
>
> And when we set up the dst_pmd, we always mark it as dirty and
> accessed... so I guess that's fine.

Ack.

>
> > > > +       _dst_pmd = mk_huge_pmd(src_page, dst_vma->vm_page_prot);
> > > > +       _dst_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
> > > > +       set_pmd_at(dst_mm, dst_addr, dst_pmd, _dst_pmd);
> > > > +
> > > > +       pgtable = pgtable_trans_huge_withdraw(src_mm, src_pmd);
> > > > +       pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
> > >
> > > Are we allowed to move page tables between mm_structs on all
> > > architectures? The first example I found that looks a bit dodgy,
> > > looking through various architectures' pte_alloc_one(), is s390's
> > > page_table_alloc() which looks like page tables are tied to per-MM
> > > lists sometimes.
> > > If that's not allowed, we might have to allocate a new deposit table
> > > and free the old one or something like that.
> >
> > Hmm. Yeah, looks like in the case of !CONFIG_PGSTE the table can be
> > linked to mm->context.pgtable_list, so can't be moved to another mm. I
> > guess I'll have to keep a pgtable allocated, ready to be deposited and
> > free the old one. Maybe it's worth having an arch-specific function
> > indicating whether moving a pgtable between MMs is supported? Or do it
> > separately as an optimization. WDYT?
>
> Hm, dunno. I guess you could have architectures opt in with some
> config flag similar to how flags like
> ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH are wired up - define it in
> init/Kconfig, select it in the architectures that support it, and then
> gate the fast version on that with #ifdef?

Yeah, that sounds good to me. I can implement an unoptimized common
path first and then introduce this optimization in the follow-up
patches.

>
> > > > +       if (dst_mm != src_mm) {
> > > > +               add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
> > > > +               add_mm_counter(src_mm, MM_ANONPAGES, -HPAGE_PMD_NR);
> > > > +       }
> > > > +       double_pt_unlock(src_ptl, dst_ptl);
> > > > +
> > > > +       anon_vma_unlock_write(src_anon_vma);
> > > > +       put_anon_vma(src_anon_vma);
> > > > +
> > > > +       /* unblock rmap walks */
> > > > +       unlock_page(src_page);
> > > > +
> > > > +       mmu_notifier_invalidate_range_end(&range);
> > > > +       return 0;
> > > > +}
> > > > +#endif /* CONFIG_USERFAULTFD */
> > > > +
> > > >  /*
> > > >   * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
> > > >   *
> > > [...]
> > > > diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> > > > index 96d9eae5c7cc..0cca60dfa8f8 100644
> > > > --- a/mm/userfaultfd.c
> > > > +++ b/mm/userfaultfd.c
> > > [...]
> > > > +ssize_t remap_pages(struct mm_struct *dst_mm, struct mm_struct *src_mm,
> > > > +                   unsigned long dst_start, unsigned long src_start,
> > > > +                   unsigned long len, __u64 mode)
> > > > +{
> > > [...]
> > > > +
> > > > +       if (pgprot_val(src_vma->vm_page_prot) !=
> > > > +           pgprot_val(dst_vma->vm_page_prot))
> > > > +               goto out;
> > >
> > > Does this check intentionally allow moving pages from a
> > > PROT_READ|PROT_WRITE anonymous private VMA into a PROT_READ anonymous
> > > private VMA (on architectures like x86 and arm64 where CoW memory has
> > > the same protection flags as read-only memory), but forbid moving them
> > > from a PROT_READ|PROT_EXEC VMA into a PROT_READ VMA? I think this
> > > check needs at least a comment to explain what's going on here.
> >
> > The check is simply to ensure the VMAs have the same access
> > permissions to prevent page copies that should have different
> > permissions. The fact that x86 and arm64 have the same protection bits
> > for R/O and COW memory is a "side-effect" IMHO. I'm not sure what
> > comment would be good here but I'm open to suggestions.
>
> I'm not sure if you can do a meaningful security check on the
> ->vm_page_prot. I also don't think it matters for anonymous VMAs.
> I guess if you want to keep this check but make this behavior more
> consistent, you could put another check in front of this that rejects
> VMAs where vm_flags like VM_READ, VM_WRITE, VM_SHARED or VM_EXEC are
> different?

Ok, I'll post that in the next version and we can decide if that's enough.

>
> [...]
> > > > +       /*
> > > > +        * Ensure the dst_vma has a anon_vma or this page
> > > > +        * would get a NULL anon_vma when moved in the
> > > > +        * dst_vma.
> > > > +        */
> > > > +       err = -ENOMEM;
> > > > +       if (unlikely(anon_vma_prepare(dst_vma)))
> > > > +               goto out;
> > > > +
> > > > +       for (src_addr = src_start, dst_addr = dst_start;
> > > > +            src_addr < src_start + len;) {
> > > > +               spinlock_t *ptl;
> > > > +               pmd_t dst_pmdval;
> > > > +
> > > > +               BUG_ON(dst_addr >= dst_start + len);
> > > > +               src_pmd = mm_find_pmd(src_mm, src_addr);
> > >
> > > (this would blow up pretty badly if we could have transparent huge PUD
> > > in the region but I think that's limited to file VMAs so it's fine as
> > > it currently is)
> >
> > Should I add a comment here as a warning if in the future we decide to
> > implement support for file-backed pages?
>
> Hm, yeah, I guess that might be a good idea.

Ack.

Thanks for the feedback!
Jann Horn Sept. 20, 2023, 4:11 p.m. UTC | #18
On Wed, Sep 20, 2023 at 3:49 AM Suren Baghdasaryan <surenb@google.com> wrote:
> On Tue, Sep 19, 2023 at 4:51 PM Jann Horn <jannh@google.com> wrote:
> > On Wed, Sep 20, 2023 at 1:08 AM Suren Baghdasaryan <surenb@google.com> wrote:
> > > On Thu, Sep 14, 2023 at 7:28 PM Jann Horn <jannh@google.com> wrote:
> > > > On Thu, Sep 14, 2023 at 5:26 PM Suren Baghdasaryan <surenb@google.com> wrote:
> > > > > From: Andrea Arcangeli <aarcange@redhat.com>
> > > > >
> > > > > This implements the uABI of UFFDIO_REMAP.
> > > > >
> > > > > Notably one mode bitflag is also forwarded (and in turn known) by the
> > > > > lowlevel remap_pages method.
> > [...]
> > > > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> > [...]
> > > > > +int remap_pages_huge_pmd(struct mm_struct *dst_mm,
> > > > > +                        struct mm_struct *src_mm,
> > > > > +                        pmd_t *dst_pmd, pmd_t *src_pmd,
> > > > > +                        pmd_t dst_pmdval,
> > > > > +                        struct vm_area_struct *dst_vma,
> > > > > +                        struct vm_area_struct *src_vma,
> > > > > +                        unsigned long dst_addr,
> > > > > +                        unsigned long src_addr)
> > > > > +{
> > > > > +       pmd_t _dst_pmd, src_pmdval;
> > > > > +       struct page *src_page;
> > > > > +       struct anon_vma *src_anon_vma, *dst_anon_vma;
> > > > > +       spinlock_t *src_ptl, *dst_ptl;
> > > > > +       pgtable_t pgtable;
> > > > > +       struct mmu_notifier_range range;
> > > > > +
> > > > > +       src_pmdval = *src_pmd;
> > > > > +       src_ptl = pmd_lockptr(src_mm, src_pmd);
> > > > > +
> > > > > +       BUG_ON(!pmd_trans_huge(src_pmdval));
> > > > > +       BUG_ON(!pmd_none(dst_pmdval));
> > > >
> > > > Why can we assert that pmd_none(dst_pmdval) is true here? Can we not
> > > > have concurrent faults (or userfaultfd operations) populating that
> > > > PMD?
> > >
> > > IIUC dst_pmdval is a copy of the value from dst_pmd, so that local
> > > copy should not change even if some concurrent operation changes
> > > dst_pmd. We can assert that it's pmd_none because we checked for that
> > > before calling remap_pages_huge_pmd. Later on we check if dst_pmd
> > > changed from under us (see pmd_same(*dst_pmd, dst_pmdval) check) and
> > > retry if that happened.
> >
> > Oh, right, I don't know what I was thinking when I typed that.
> >
> > But now I wonder about the check directly above that: What does this
> > code do for swap PMDs? It looks like that might splat on the
> > BUG_ON(!pmd_trans_huge(src_pmdval)). All we've checked on the path to
> > here is that the virtual memory area is aligned, that the destination
> > PMD is empty, and that pmd_trans_huge_lock() succeeded; but
> > pmd_trans_huge_lock() explicitly permits swap PMDs (which is the
> > swapped-out version of transhuge PMDs):
> >
> > static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
> >                 struct vm_area_struct *vma)
> > {
> >         if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
> >                 return __pmd_trans_huge_lock(pmd, vma);
> >         else
> >                 return NULL;
> > }
>
> Yeah... Ok, I think I'm missing a check for  pmd_trans_huge(*src_pmd)
> after we lock it with pmd_trans_huge_lock(src_pmd, src_vma). And we
> can remove the above BUG_ON(). Would that address your concern?

Sounds good. It'll end up splitting huge swap entries but I guess the
extra code for moving huge swap entries might not be worth it.
Jann Horn Sept. 21, 2023, 4:59 p.m. UTC | #19
On Thu, Sep 14, 2023 at 9:28 PM Jann Horn <jannh@google.com> wrote:
> Though, uuuuuh, I guess if that's true, the existing
> vma_is_anonymous() is broken, since that also just checks ->vm_ops?
> I'm not sure what the consequences of that would be... Either way,
> vma_is_anonymous() might be the better way to check for anonymous VMAs
> here, and someone should figure out whether vma_is_anonymous() needs
> to be fixed.

(Not really relevant to the rest of the thread, but just as a
sidenote: Turns out this is not an issue; since commit bfd40eaff5ab
("mm: fix vma_is_anonymous() false-positives"), VMAs where the ->mmap
handler does not set an operations pointer end up with a dummy
operations pointer.)
Suren Baghdasaryan Sept. 21, 2023, 6:04 p.m. UTC | #20
On Thu, Sep 14, 2023 at 6:45 PM David Hildenbrand <david@redhat.com> wrote:
>
> On 14.09.23 20:43, David Hildenbrand wrote:
> > On 14.09.23 20:11, Matthew Wilcox wrote:
> >> On Thu, Sep 14, 2023 at 08:26:12AM -0700, Suren Baghdasaryan wrote:
> >>> +++ b/include/linux/userfaultfd_k.h
> >>> @@ -93,6 +93,23 @@ extern int mwriteprotect_range(struct mm_struct *dst_mm,
> >>>    extern long uffd_wp_range(struct vm_area_struct *vma,
> >>>                       unsigned long start, unsigned long len, bool enable_wp);
> >>>
> >>> +/* remap_pages */
> >>> +extern void double_pt_lock(spinlock_t *ptl1, spinlock_t *ptl2);
> >>> +extern void double_pt_unlock(spinlock_t *ptl1, spinlock_t *ptl2);
> >>> +extern ssize_t remap_pages(struct mm_struct *dst_mm,
> >>> +                      struct mm_struct *src_mm,
> >>> +                      unsigned long dst_start,
> >>> +                      unsigned long src_start,
> >>> +                      unsigned long len, __u64 flags);
> >>> +extern int remap_pages_huge_pmd(struct mm_struct *dst_mm,
> >>> +                           struct mm_struct *src_mm,
> >>> +                           pmd_t *dst_pmd, pmd_t *src_pmd,
> >>> +                           pmd_t dst_pmdval,
> >>> +                           struct vm_area_struct *dst_vma,
> >>> +                           struct vm_area_struct *src_vma,
> >>> +                           unsigned long dst_addr,
> >>> +                           unsigned long src_addr);
> >>
> >> Drop the 'extern' markers from function declarations.
> >>
> >>> +int remap_pages_huge_pmd(struct mm_struct *dst_mm,
> >>> +                    struct mm_struct *src_mm,
> >>> +                    pmd_t *dst_pmd, pmd_t *src_pmd,
> >>> +                    pmd_t dst_pmdval,
> >>> +                    struct vm_area_struct *dst_vma,
> >>> +                    struct vm_area_struct *src_vma,
> >>> +                    unsigned long dst_addr,
> >>> +                    unsigned long src_addr)
> >>> +{
> >>> +   pmd_t _dst_pmd, src_pmdval;
> >>> +   struct page *src_page;
> >>> +   struct anon_vma *src_anon_vma, *dst_anon_vma;
> >>> +   spinlock_t *src_ptl, *dst_ptl;
> >>> +   pgtable_t pgtable;
> >>> +   struct mmu_notifier_range range;
> >>> +
> >>> +   src_pmdval = *src_pmd;
> >>> +   src_ptl = pmd_lockptr(src_mm, src_pmd);
> >>> +
> >>> +   BUG_ON(!pmd_trans_huge(src_pmdval));
> >>> +   BUG_ON(!pmd_none(dst_pmdval));
> >>> +   BUG_ON(!spin_is_locked(src_ptl));
> >>> +   mmap_assert_locked(src_mm);
> >>> +   mmap_assert_locked(dst_mm);
> >>> +   BUG_ON(src_addr & ~HPAGE_PMD_MASK);
> >>> +   BUG_ON(dst_addr & ~HPAGE_PMD_MASK);
> >>> +
> >>> +   src_page = pmd_page(src_pmdval);
> >>> +   BUG_ON(!PageHead(src_page));
> >>> +   BUG_ON(!PageAnon(src_page));
> >>
> >> Better to add a src_folio = page_folio(src_page);
> >> and then folio_test_anon() here.
> >>
> >>> +   if (unlikely(page_mapcount(src_page) != 1)) {
> >>
> >> Brr, this is going to miss PTE mappings of this folio.  I think you
> >> actually want folio_mapcount() instead, although it'd be more efficient
> >> to look at folio->_entire_mapcount == 1 and _nr_pages_mapped == 0.
> >> Not wure what a good name for that predicate would be.
> >
> > We have
> >
> >    * It only works on non shared anonymous pages because those can
> >    * be relocated without generating non linear anon_vmas in the rmap
> >    * code.
> >    *
> >    * It provides a zero copy mechanism to handle userspace page faults.
> >    * The source vma pages should have mapcount == 1, which can be
> >    * enforced by using madvise(MADV_DONTFORK) on src vma.
> >
> > Use PageAnonExclusive(). As long as KSM is not involved and you don't
> > use fork(), that flag should be good enough for that use case here.
> >
> ... and similarly don't do any of that swapcount stuff and only check if
> the swap pte is anon exclusive.

I'm preparing v2 and this is the only part left for me to address but
I'm not clear how. David, could you please clarify how I should be
checking swap pte to be exclusive without swapcount?

>
> --
> Cheers,
>
> David / dhildenb
>
David Hildenbrand Sept. 21, 2023, 6:17 p.m. UTC | #21
On 21.09.23 20:04, Suren Baghdasaryan wrote:
> On Thu, Sep 14, 2023 at 6:45 PM David Hildenbrand <david@redhat.com> wrote:
>>
>> On 14.09.23 20:43, David Hildenbrand wrote:
>>> On 14.09.23 20:11, Matthew Wilcox wrote:
>>>> On Thu, Sep 14, 2023 at 08:26:12AM -0700, Suren Baghdasaryan wrote:
>>>>> +++ b/include/linux/userfaultfd_k.h
>>>>> @@ -93,6 +93,23 @@ extern int mwriteprotect_range(struct mm_struct *dst_mm,
>>>>>     extern long uffd_wp_range(struct vm_area_struct *vma,
>>>>>                        unsigned long start, unsigned long len, bool enable_wp);
>>>>>
>>>>> +/* remap_pages */
>>>>> +extern void double_pt_lock(spinlock_t *ptl1, spinlock_t *ptl2);
>>>>> +extern void double_pt_unlock(spinlock_t *ptl1, spinlock_t *ptl2);
>>>>> +extern ssize_t remap_pages(struct mm_struct *dst_mm,
>>>>> +                      struct mm_struct *src_mm,
>>>>> +                      unsigned long dst_start,
>>>>> +                      unsigned long src_start,
>>>>> +                      unsigned long len, __u64 flags);
>>>>> +extern int remap_pages_huge_pmd(struct mm_struct *dst_mm,
>>>>> +                           struct mm_struct *src_mm,
>>>>> +                           pmd_t *dst_pmd, pmd_t *src_pmd,
>>>>> +                           pmd_t dst_pmdval,
>>>>> +                           struct vm_area_struct *dst_vma,
>>>>> +                           struct vm_area_struct *src_vma,
>>>>> +                           unsigned long dst_addr,
>>>>> +                           unsigned long src_addr);
>>>>
>>>> Drop the 'extern' markers from function declarations.
>>>>
>>>>> +int remap_pages_huge_pmd(struct mm_struct *dst_mm,
>>>>> +                    struct mm_struct *src_mm,
>>>>> +                    pmd_t *dst_pmd, pmd_t *src_pmd,
>>>>> +                    pmd_t dst_pmdval,
>>>>> +                    struct vm_area_struct *dst_vma,
>>>>> +                    struct vm_area_struct *src_vma,
>>>>> +                    unsigned long dst_addr,
>>>>> +                    unsigned long src_addr)
>>>>> +{
>>>>> +   pmd_t _dst_pmd, src_pmdval;
>>>>> +   struct page *src_page;
>>>>> +   struct anon_vma *src_anon_vma, *dst_anon_vma;
>>>>> +   spinlock_t *src_ptl, *dst_ptl;
>>>>> +   pgtable_t pgtable;
>>>>> +   struct mmu_notifier_range range;
>>>>> +
>>>>> +   src_pmdval = *src_pmd;
>>>>> +   src_ptl = pmd_lockptr(src_mm, src_pmd);
>>>>> +
>>>>> +   BUG_ON(!pmd_trans_huge(src_pmdval));
>>>>> +   BUG_ON(!pmd_none(dst_pmdval));
>>>>> +   BUG_ON(!spin_is_locked(src_ptl));
>>>>> +   mmap_assert_locked(src_mm);
>>>>> +   mmap_assert_locked(dst_mm);
>>>>> +   BUG_ON(src_addr & ~HPAGE_PMD_MASK);
>>>>> +   BUG_ON(dst_addr & ~HPAGE_PMD_MASK);
>>>>> +
>>>>> +   src_page = pmd_page(src_pmdval);
>>>>> +   BUG_ON(!PageHead(src_page));
>>>>> +   BUG_ON(!PageAnon(src_page));
>>>>
>>>> Better to add a src_folio = page_folio(src_page);
>>>> and then folio_test_anon() here.
>>>>
>>>>> +   if (unlikely(page_mapcount(src_page) != 1)) {
>>>>
>>>> Brr, this is going to miss PTE mappings of this folio.  I think you
>>>> actually want folio_mapcount() instead, although it'd be more efficient
>>>> to look at folio->_entire_mapcount == 1 and _nr_pages_mapped == 0.
>>>> Not wure what a good name for that predicate would be.
>>>
>>> We have
>>>
>>>     * It only works on non shared anonymous pages because those can
>>>     * be relocated without generating non linear anon_vmas in the rmap
>>>     * code.
>>>     *
>>>     * It provides a zero copy mechanism to handle userspace page faults.
>>>     * The source vma pages should have mapcount == 1, which can be
>>>     * enforced by using madvise(MADV_DONTFORK) on src vma.
>>>
>>> Use PageAnonExclusive(). As long as KSM is not involved and you don't
>>> use fork(), that flag should be good enough for that use case here.
>>>
>> ... and similarly don't do any of that swapcount stuff and only check if
>> the swap pte is anon exclusive.
> 
> I'm preparing v2 and this is the only part left for me to address but
> I'm not clear how. David, could you please clarify how I should be
> checking swap pte to be exclusive without swapcount?

If you have a real swp pte (not a non-swap pte like migration entries) 
you should be able to just use pte_swp_exclusive().
Suren Baghdasaryan Sept. 22, 2023, 1:57 a.m. UTC | #22
On Thu, Sep 21, 2023 at 11:17 AM David Hildenbrand <david@redhat.com> wrote:
>
> On 21.09.23 20:04, Suren Baghdasaryan wrote:
> > On Thu, Sep 14, 2023 at 6:45 PM David Hildenbrand <david@redhat.com> wrote:
> >>
> >> On 14.09.23 20:43, David Hildenbrand wrote:
> >>> On 14.09.23 20:11, Matthew Wilcox wrote:
> >>>> On Thu, Sep 14, 2023 at 08:26:12AM -0700, Suren Baghdasaryan wrote:
> >>>>> +++ b/include/linux/userfaultfd_k.h
> >>>>> @@ -93,6 +93,23 @@ extern int mwriteprotect_range(struct mm_struct *dst_mm,
> >>>>>     extern long uffd_wp_range(struct vm_area_struct *vma,
> >>>>>                        unsigned long start, unsigned long len, bool enable_wp);
> >>>>>
> >>>>> +/* remap_pages */
> >>>>> +extern void double_pt_lock(spinlock_t *ptl1, spinlock_t *ptl2);
> >>>>> +extern void double_pt_unlock(spinlock_t *ptl1, spinlock_t *ptl2);
> >>>>> +extern ssize_t remap_pages(struct mm_struct *dst_mm,
> >>>>> +                      struct mm_struct *src_mm,
> >>>>> +                      unsigned long dst_start,
> >>>>> +                      unsigned long src_start,
> >>>>> +                      unsigned long len, __u64 flags);
> >>>>> +extern int remap_pages_huge_pmd(struct mm_struct *dst_mm,
> >>>>> +                           struct mm_struct *src_mm,
> >>>>> +                           pmd_t *dst_pmd, pmd_t *src_pmd,
> >>>>> +                           pmd_t dst_pmdval,
> >>>>> +                           struct vm_area_struct *dst_vma,
> >>>>> +                           struct vm_area_struct *src_vma,
> >>>>> +                           unsigned long dst_addr,
> >>>>> +                           unsigned long src_addr);
> >>>>
> >>>> Drop the 'extern' markers from function declarations.
> >>>>
> >>>>> +int remap_pages_huge_pmd(struct mm_struct *dst_mm,
> >>>>> +                    struct mm_struct *src_mm,
> >>>>> +                    pmd_t *dst_pmd, pmd_t *src_pmd,
> >>>>> +                    pmd_t dst_pmdval,
> >>>>> +                    struct vm_area_struct *dst_vma,
> >>>>> +                    struct vm_area_struct *src_vma,
> >>>>> +                    unsigned long dst_addr,
> >>>>> +                    unsigned long src_addr)
> >>>>> +{
> >>>>> +   pmd_t _dst_pmd, src_pmdval;
> >>>>> +   struct page *src_page;
> >>>>> +   struct anon_vma *src_anon_vma, *dst_anon_vma;
> >>>>> +   spinlock_t *src_ptl, *dst_ptl;
> >>>>> +   pgtable_t pgtable;
> >>>>> +   struct mmu_notifier_range range;
> >>>>> +
> >>>>> +   src_pmdval = *src_pmd;
> >>>>> +   src_ptl = pmd_lockptr(src_mm, src_pmd);
> >>>>> +
> >>>>> +   BUG_ON(!pmd_trans_huge(src_pmdval));
> >>>>> +   BUG_ON(!pmd_none(dst_pmdval));
> >>>>> +   BUG_ON(!spin_is_locked(src_ptl));
> >>>>> +   mmap_assert_locked(src_mm);
> >>>>> +   mmap_assert_locked(dst_mm);
> >>>>> +   BUG_ON(src_addr & ~HPAGE_PMD_MASK);
> >>>>> +   BUG_ON(dst_addr & ~HPAGE_PMD_MASK);
> >>>>> +
> >>>>> +   src_page = pmd_page(src_pmdval);
> >>>>> +   BUG_ON(!PageHead(src_page));
> >>>>> +   BUG_ON(!PageAnon(src_page));
> >>>>
> >>>> Better to add a src_folio = page_folio(src_page);
> >>>> and then folio_test_anon() here.
> >>>>
> >>>>> +   if (unlikely(page_mapcount(src_page) != 1)) {
> >>>>
> >>>> Brr, this is going to miss PTE mappings of this folio.  I think you
> >>>> actually want folio_mapcount() instead, although it'd be more efficient
> >>>> to look at folio->_entire_mapcount == 1 and _nr_pages_mapped == 0.
> >>>> Not wure what a good name for that predicate would be.
> >>>
> >>> We have
> >>>
> >>>     * It only works on non shared anonymous pages because those can
> >>>     * be relocated without generating non linear anon_vmas in the rmap
> >>>     * code.
> >>>     *
> >>>     * It provides a zero copy mechanism to handle userspace page faults.
> >>>     * The source vma pages should have mapcount == 1, which can be
> >>>     * enforced by using madvise(MADV_DONTFORK) on src vma.
> >>>
> >>> Use PageAnonExclusive(). As long as KSM is not involved and you don't
> >>> use fork(), that flag should be good enough for that use case here.
> >>>
> >> ... and similarly don't do any of that swapcount stuff and only check if
> >> the swap pte is anon exclusive.
> >
> > I'm preparing v2 and this is the only part left for me to address but
> > I'm not clear how. David, could you please clarify how I should be
> > checking swap pte to be exclusive without swapcount?
>
> If you have a real swp pte (not a non-swap pte like migration entries)
> you should be able to just use pte_swp_exclusive().

Got it. Thanks!

>
> --
> Cheers,
>
> David / dhildenb
>
diff mbox series

Patch

diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 56eaae9dac1a..7bf64e7541c1 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -2027,6 +2027,52 @@  static inline unsigned int uffd_ctx_features(__u64 user_features)
 	return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED;
 }
 
+static int userfaultfd_remap(struct userfaultfd_ctx *ctx,
+			     unsigned long arg)
+{
+	__s64 ret;
+	struct uffdio_remap uffdio_remap;
+	struct uffdio_remap __user *user_uffdio_remap;
+	struct userfaultfd_wake_range range;
+
+	user_uffdio_remap = (struct uffdio_remap __user *) arg;
+
+	ret = -EFAULT;
+	if (copy_from_user(&uffdio_remap, user_uffdio_remap,
+			   /* don't copy "remap" last field */
+			   sizeof(uffdio_remap)-sizeof(__s64)))
+		goto out;
+
+	ret = validate_range(ctx->mm, uffdio_remap.dst, uffdio_remap.len);
+	if (ret)
+		goto out;
+	ret = validate_range(current->mm, uffdio_remap.src, uffdio_remap.len);
+	if (ret)
+		goto out;
+	ret = -EINVAL;
+	if (uffdio_remap.mode & ~(UFFDIO_REMAP_MODE_ALLOW_SRC_HOLES|
+				  UFFDIO_REMAP_MODE_DONTWAKE))
+		goto out;
+
+	ret = remap_pages(ctx->mm, current->mm,
+			  uffdio_remap.dst, uffdio_remap.src,
+			  uffdio_remap.len, uffdio_remap.mode);
+	if (unlikely(put_user(ret, &user_uffdio_remap->remap)))
+		return -EFAULT;
+	if (ret < 0)
+		goto out;
+	/* len == 0 would wake all */
+	BUG_ON(!ret);
+	range.len = ret;
+	if (!(uffdio_remap.mode & UFFDIO_REMAP_MODE_DONTWAKE)) {
+		range.start = uffdio_remap.dst;
+		wake_userfault(ctx, &range);
+	}
+	ret = range.len == uffdio_remap.len ? 0 : -EAGAIN;
+out:
+	return ret;
+}
+
 /*
  * userland asks for a certain API version and we return which bits
  * and ioctl commands are implemented in this kernel for such API
@@ -2113,6 +2159,9 @@  static long userfaultfd_ioctl(struct file *file, unsigned cmd,
 	case UFFDIO_ZEROPAGE:
 		ret = userfaultfd_zeropage(ctx, arg);
 		break;
+	case UFFDIO_REMAP:
+		ret = userfaultfd_remap(ctx, arg);
+		break;
 	case UFFDIO_WRITEPROTECT:
 		ret = userfaultfd_writeprotect(ctx, arg);
 		break;
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 51cc21ebb568..614c4b439907 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -121,6 +121,11 @@  static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
 	down_write(&anon_vma->root->rwsem);
 }
 
+static inline int anon_vma_trylock_write(struct anon_vma *anon_vma)
+{
+	return down_write_trylock(&anon_vma->root->rwsem);
+}
+
 static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
 {
 	up_write(&anon_vma->root->rwsem);
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index ac8c6854097c..2bc807dc390b 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -93,6 +93,23 @@  extern int mwriteprotect_range(struct mm_struct *dst_mm,
 extern long uffd_wp_range(struct vm_area_struct *vma,
 			  unsigned long start, unsigned long len, bool enable_wp);
 
+/* remap_pages */
+extern void double_pt_lock(spinlock_t *ptl1, spinlock_t *ptl2);
+extern void double_pt_unlock(spinlock_t *ptl1, spinlock_t *ptl2);
+extern ssize_t remap_pages(struct mm_struct *dst_mm,
+			   struct mm_struct *src_mm,
+			   unsigned long dst_start,
+			   unsigned long src_start,
+			   unsigned long len, __u64 flags);
+extern int remap_pages_huge_pmd(struct mm_struct *dst_mm,
+				struct mm_struct *src_mm,
+				pmd_t *dst_pmd, pmd_t *src_pmd,
+				pmd_t dst_pmdval,
+				struct vm_area_struct *dst_vma,
+				struct vm_area_struct *src_vma,
+				unsigned long dst_addr,
+				unsigned long src_addr);
+
 /* mm helpers */
 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
 					struct vm_userfaultfd_ctx vm_ctx)
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index 62151706c5a3..22d1c43e39f9 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -49,6 +49,7 @@ 
 	((__u64)1 << _UFFDIO_WAKE |		\
 	 (__u64)1 << _UFFDIO_COPY |		\
 	 (__u64)1 << _UFFDIO_ZEROPAGE |		\
+	 (__u64)1 << _UFFDIO_REMAP |		\
 	 (__u64)1 << _UFFDIO_WRITEPROTECT |	\
 	 (__u64)1 << _UFFDIO_CONTINUE |		\
 	 (__u64)1 << _UFFDIO_POISON)
@@ -72,6 +73,7 @@ 
 #define _UFFDIO_WAKE			(0x02)
 #define _UFFDIO_COPY			(0x03)
 #define _UFFDIO_ZEROPAGE		(0x04)
+#define _UFFDIO_REMAP			(0x05)
 #define _UFFDIO_WRITEPROTECT		(0x06)
 #define _UFFDIO_CONTINUE		(0x07)
 #define _UFFDIO_POISON			(0x08)
@@ -91,6 +93,8 @@ 
 				      struct uffdio_copy)
 #define UFFDIO_ZEROPAGE		_IOWR(UFFDIO, _UFFDIO_ZEROPAGE,	\
 				      struct uffdio_zeropage)
+#define UFFDIO_REMAP		_IOWR(UFFDIO, _UFFDIO_REMAP,	\
+				      struct uffdio_remap)
 #define UFFDIO_WRITEPROTECT	_IOWR(UFFDIO, _UFFDIO_WRITEPROTECT, \
 				      struct uffdio_writeprotect)
 #define UFFDIO_CONTINUE		_IOWR(UFFDIO, _UFFDIO_CONTINUE,	\
@@ -340,6 +344,24 @@  struct uffdio_poison {
 	__s64 updated;
 };
 
+struct uffdio_remap {
+	__u64 dst;
+	__u64 src;
+	__u64 len;
+	/*
+	 * Especially if used to atomically remove memory from the
+	 * address space the wake on the dst range is not needed.
+	 */
+#define UFFDIO_REMAP_MODE_DONTWAKE		((__u64)1<<0)
+#define UFFDIO_REMAP_MODE_ALLOW_SRC_HOLES	((__u64)1<<1)
+	__u64 mode;
+	/*
+	 * "remap" is written by the ioctl and must be at the end: the
+	 * copy_from_user will not read the last 8 bytes.
+	 */
+	__s64 remap;
+};
+
 /*
  * Flags for the userfaultfd(2) system call itself.
  */
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 064fbd90822b..c7a9880a1f6a 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1932,6 +1932,124 @@  int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 	return ret;
 }
 
+#ifdef CONFIG_USERFAULTFD
+/*
+ * The PT lock for src_pmd and the mmap_lock for reading are held by
+ * the caller, but it must return after releasing the
+ * page_table_lock. We're guaranteed the src_pmd is a pmd_trans_huge
+ * until the PT lock of the src_pmd is released. Just move the page
+ * from src_pmd to dst_pmd if possible. Return zero if succeeded in
+ * moving the page, -EAGAIN if it needs to be repeated by the caller,
+ * or other errors in case of failure.
+ */
+int remap_pages_huge_pmd(struct mm_struct *dst_mm,
+			 struct mm_struct *src_mm,
+			 pmd_t *dst_pmd, pmd_t *src_pmd,
+			 pmd_t dst_pmdval,
+			 struct vm_area_struct *dst_vma,
+			 struct vm_area_struct *src_vma,
+			 unsigned long dst_addr,
+			 unsigned long src_addr)
+{
+	pmd_t _dst_pmd, src_pmdval;
+	struct page *src_page;
+	struct anon_vma *src_anon_vma, *dst_anon_vma;
+	spinlock_t *src_ptl, *dst_ptl;
+	pgtable_t pgtable;
+	struct mmu_notifier_range range;
+
+	src_pmdval = *src_pmd;
+	src_ptl = pmd_lockptr(src_mm, src_pmd);
+
+	BUG_ON(!pmd_trans_huge(src_pmdval));
+	BUG_ON(!pmd_none(dst_pmdval));
+	BUG_ON(!spin_is_locked(src_ptl));
+	mmap_assert_locked(src_mm);
+	mmap_assert_locked(dst_mm);
+	BUG_ON(src_addr & ~HPAGE_PMD_MASK);
+	BUG_ON(dst_addr & ~HPAGE_PMD_MASK);
+
+	src_page = pmd_page(src_pmdval);
+	BUG_ON(!PageHead(src_page));
+	BUG_ON(!PageAnon(src_page));
+	if (unlikely(page_mapcount(src_page) != 1)) {
+		spin_unlock(src_ptl);
+		return -EBUSY;
+	}
+
+	get_page(src_page);
+	spin_unlock(src_ptl);
+
+	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src_mm, src_addr,
+				src_addr + HPAGE_PMD_SIZE);
+	mmu_notifier_invalidate_range_start(&range);
+
+	/* block all concurrent rmap walks */
+	lock_page(src_page);
+
+	/*
+	 * split_huge_page walks the anon_vma chain without the page
+	 * lock. Serialize against it with the anon_vma lock, the page
+	 * lock is not enough.
+	 */
+	src_anon_vma = folio_get_anon_vma(page_folio(src_page));
+	if (!src_anon_vma) {
+		unlock_page(src_page);
+		put_page(src_page);
+		mmu_notifier_invalidate_range_end(&range);
+		return -EAGAIN;
+	}
+	anon_vma_lock_write(src_anon_vma);
+
+	dst_ptl = pmd_lockptr(dst_mm, dst_pmd);
+	double_pt_lock(src_ptl, dst_ptl);
+	if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
+		     !pmd_same(*dst_pmd, dst_pmdval) ||
+		     page_mapcount(src_page) != 1)) {
+		double_pt_unlock(src_ptl, dst_ptl);
+		anon_vma_unlock_write(src_anon_vma);
+		put_anon_vma(src_anon_vma);
+		unlock_page(src_page);
+		put_page(src_page);
+		mmu_notifier_invalidate_range_end(&range);
+		return -EAGAIN;
+	}
+
+	BUG_ON(!PageHead(src_page));
+	BUG_ON(!PageAnon(src_page));
+	/* the PT lock is enough to keep the page pinned now */
+	put_page(src_page);
+
+	dst_anon_vma = (void *) dst_vma->anon_vma + PAGE_MAPPING_ANON;
+	WRITE_ONCE(src_page->mapping, (struct address_space *) dst_anon_vma);
+	WRITE_ONCE(src_page->index, linear_page_index(dst_vma, dst_addr));
+
+	if (!pmd_same(pmdp_huge_clear_flush(src_vma, src_addr, src_pmd),
+		      src_pmdval))
+		BUG_ON(1);
+	_dst_pmd = mk_huge_pmd(src_page, dst_vma->vm_page_prot);
+	_dst_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
+	set_pmd_at(dst_mm, dst_addr, dst_pmd, _dst_pmd);
+
+	pgtable = pgtable_trans_huge_withdraw(src_mm, src_pmd);
+	pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
+	if (dst_mm != src_mm) {
+		add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
+		add_mm_counter(src_mm, MM_ANONPAGES, -HPAGE_PMD_NR);
+	}
+	double_pt_unlock(src_ptl, dst_ptl);
+
+	anon_vma_unlock_write(src_anon_vma);
+	put_anon_vma(src_anon_vma);
+
+	/* unblock rmap walks */
+	unlock_page(src_page);
+
+	mmu_notifier_invalidate_range_end(&range);
+	return 0;
+}
+#endif /* CONFIG_USERFAULTFD */
+
 /*
  * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
  *
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 88433cc25d8a..af23248b3551 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1135,6 +1135,9 @@  static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
 	 * Prevent all access to pagetables with the exception of
 	 * gup_fast later handled by the ptep_clear_flush and the VM
 	 * handled by the anon_vma lock + PG_lock.
+	 *
+	 * UFFDIO_REMAP is prevented to race as well thanks to the
+	 * mmap_lock.
 	 */
 	mmap_write_lock(mm);
 	result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 96d9eae5c7cc..0cca60dfa8f8 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -842,3 +842,589 @@  int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
 	mmap_read_unlock(dst_mm);
 	return err;
 }
+
+
+void double_pt_lock(spinlock_t *ptl1,
+		    spinlock_t *ptl2)
+	__acquires(ptl1)
+	__acquires(ptl2)
+{
+	spinlock_t *ptl_tmp;
+
+	if (ptl1 > ptl2) {
+		/* exchange ptl1 and ptl2 */
+		ptl_tmp = ptl1;
+		ptl1 = ptl2;
+		ptl2 = ptl_tmp;
+	}
+	/* lock in virtual address order to avoid lock inversion */
+	spin_lock(ptl1);
+	if (ptl1 != ptl2)
+		spin_lock_nested(ptl2, SINGLE_DEPTH_NESTING);
+	else
+		__acquire(ptl2);
+}
+
+void double_pt_unlock(spinlock_t *ptl1,
+		      spinlock_t *ptl2)
+	__releases(ptl1)
+	__releases(ptl2)
+{
+	spin_unlock(ptl1);
+	if (ptl1 != ptl2)
+		spin_unlock(ptl2);
+	else
+		__release(ptl2);
+}
+
+/*
+ * The mmap_lock for reading is held by the caller. Just move the page
+ * from src_pmd to dst_pmd if possible, and return true if succeeded
+ * in moving the page.
+ */
+static int remap_pages_pte(struct mm_struct *dst_mm,
+			   struct mm_struct *src_mm,
+			   pmd_t *dst_pmd,
+			   pmd_t *src_pmd,
+			   struct vm_area_struct *dst_vma,
+			   struct vm_area_struct *src_vma,
+			   unsigned long dst_addr,
+			   unsigned long src_addr,
+			   __u64 mode)
+{
+	swp_entry_t entry;
+	pte_t orig_src_pte, orig_dst_pte;
+	spinlock_t *src_ptl, *dst_ptl;
+	pte_t *src_pte = NULL;
+	pte_t *dst_pte = NULL;
+
+	struct folio *src_folio = NULL;
+	struct anon_vma *src_anon_vma = NULL;
+	struct anon_vma *dst_anon_vma;
+	struct mmu_notifier_range range;
+	int err = 0;
+
+retry:
+	dst_pte = pte_offset_map_nolock(dst_mm, dst_pmd, dst_addr, &dst_ptl);
+
+	/* If an huge pmd materialized from under us fail */
+	if (unlikely(!dst_pte)) {
+		err = -EFAULT;
+		goto out;
+	}
+
+	src_pte = pte_offset_map_nolock(src_mm, src_pmd, src_addr, &src_ptl);
+
+	/*
+	 * We held the mmap_lock for reading so MADV_DONTNEED
+	 * can zap transparent huge pages under us, or the
+	 * transparent huge page fault can establish new
+	 * transparent huge pages under us.
+	 */
+	if (unlikely(!src_pte)) {
+		err = -EFAULT;
+		goto out;
+	}
+
+	BUG_ON(pmd_none(*dst_pmd));
+	BUG_ON(pmd_none(*src_pmd));
+	BUG_ON(pmd_trans_huge(*dst_pmd));
+	BUG_ON(pmd_trans_huge(*src_pmd));
+
+	spin_lock(dst_ptl);
+	orig_dst_pte = *dst_pte;
+	spin_unlock(dst_ptl);
+	if (!pte_none(orig_dst_pte)) {
+		err = -EEXIST;
+		goto out;
+	}
+
+	spin_lock(src_ptl);
+	orig_src_pte = *src_pte;
+	spin_unlock(src_ptl);
+	if (pte_none(orig_src_pte)) {
+		if (!(mode & UFFDIO_REMAP_MODE_ALLOW_SRC_HOLES))
+			err = -ENOENT;
+		else /* nothing to do to remap a hole */
+			err = 0;
+		goto out;
+	}
+
+	if (pte_present(orig_src_pte)) {
+		if (!src_folio) {
+			struct folio *folio;
+
+			/*
+			 * Pin the page while holding the lock to be sure the
+			 * page isn't freed under us
+			 */
+			spin_lock(src_ptl);
+			if (!pte_same(orig_src_pte, *src_pte)) {
+				spin_unlock(src_ptl);
+				err = -EAGAIN;
+				goto out;
+			}
+
+			folio = vm_normal_folio(src_vma, src_addr, orig_src_pte);
+			if (!folio || !folio_test_anon(folio) ||
+			    folio_estimated_sharers(folio) != 1) {
+				spin_unlock(src_ptl);
+				err = -EBUSY;
+				goto out;
+			}
+
+			src_folio = folio;
+			folio_get(src_folio);
+			spin_unlock(src_ptl);
+
+			/* try to block all concurrent rmap walks */
+			if (!folio_trylock(src_folio)) {
+				pte_unmap(&orig_src_pte);
+				pte_unmap(&orig_dst_pte);
+				src_pte = dst_pte = NULL;
+				folio_lock(src_folio);
+				goto retry;
+			}
+		}
+
+		if (!src_anon_vma) {
+			/*
+			 * folio_referenced walks the anon_vma chain
+			 * without the folio lock. Serialize against it with
+			 * the anon_vma lock, the folio lock is not enough.
+			 */
+			src_anon_vma = folio_get_anon_vma(src_folio);
+			if (!src_anon_vma) {
+				/* page was unmapped from under us */
+				err = -EAGAIN;
+				goto out;
+			}
+			if (!anon_vma_trylock_write(src_anon_vma)) {
+				pte_unmap(&orig_src_pte);
+				pte_unmap(&orig_dst_pte);
+				src_pte = dst_pte = NULL;
+				anon_vma_lock_write(src_anon_vma);
+				goto retry;
+			}
+		}
+
+		mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src_mm,
+					src_addr, src_addr + PAGE_SIZE);
+		mmu_notifier_invalidate_range_start_nonblock(&range);
+
+		double_pt_lock(dst_ptl, src_ptl);
+
+		if (!pte_same(*src_pte, orig_src_pte) ||
+		    !pte_same(*dst_pte, orig_dst_pte) ||
+		    folio_estimated_sharers(src_folio) != 1) {
+			double_pt_unlock(dst_ptl, src_ptl);
+			err = -EAGAIN;
+			goto out;
+		}
+
+		BUG_ON(!folio_test_anon(src_folio));
+		/* the PT lock is enough to keep the page pinned now */
+		folio_put(src_folio);
+
+		dst_anon_vma = (void *) dst_vma->anon_vma + PAGE_MAPPING_ANON;
+		WRITE_ONCE(src_folio->mapping,
+			   (struct address_space *) dst_anon_vma);
+		WRITE_ONCE(src_folio->index, linear_page_index(dst_vma,
+							      dst_addr));
+
+		if (!pte_same(ptep_clear_flush(src_vma, src_addr, src_pte),
+			      orig_src_pte))
+			BUG_ON(1);
+
+		orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot);
+		orig_dst_pte = maybe_mkwrite(pte_mkdirty(orig_dst_pte),
+					     dst_vma);
+
+		set_pte_at(dst_mm, dst_addr, dst_pte, orig_dst_pte);
+
+		if (dst_mm != src_mm) {
+			inc_mm_counter(dst_mm, MM_ANONPAGES);
+			dec_mm_counter(src_mm, MM_ANONPAGES);
+		}
+
+		double_pt_unlock(dst_ptl, src_ptl);
+
+		anon_vma_unlock_write(src_anon_vma);
+		mmu_notifier_invalidate_range_end(&range);
+		put_anon_vma(src_anon_vma);
+		src_anon_vma = NULL;
+
+		/* unblock rmap walks */
+		folio_unlock(src_folio);
+		src_folio = NULL;
+
+	} else {
+		struct swap_info_struct *si;
+		int swap_count;
+
+		entry = pte_to_swp_entry(orig_src_pte);
+		if (non_swap_entry(entry)) {
+			if (is_migration_entry(entry)) {
+				pte_unmap(&orig_src_pte);
+				pte_unmap(&orig_dst_pte);
+				src_pte = dst_pte = NULL;
+				migration_entry_wait(src_mm, src_pmd,
+						     src_addr);
+				err = -EAGAIN;
+			} else
+				err = -EFAULT;
+			goto out;
+		}
+
+		/*
+		 * COUNT_CONTINUE to be returned is fine here, no need
+		 * of follow all swap continuation to check against
+		 * number 1.
+		 */
+		si = get_swap_device(entry);
+		if (!si) {
+			err = -EBUSY;
+			goto out;
+		}
+
+		swap_count = swap_swapcount(si, entry);
+		put_swap_device(si);
+		if (swap_count != 1) {
+			err = -EBUSY;
+			goto out;
+		}
+
+		double_pt_lock(dst_ptl, src_ptl);
+
+		if (!pte_same(*src_pte, orig_src_pte) ||
+		    !pte_same(*dst_pte, orig_dst_pte) ||
+		    swp_swapcount(entry) != 1) {
+			double_pt_unlock(dst_ptl, src_ptl);
+			err = -EAGAIN;
+			goto out;
+		}
+
+		if (pte_val(ptep_get_and_clear(src_mm, src_addr, src_pte)) !=
+		    pte_val(orig_src_pte))
+			BUG_ON(1);
+		set_pte_at(dst_mm, dst_addr, dst_pte, orig_src_pte);
+
+		if (dst_mm != src_mm) {
+			inc_mm_counter(dst_mm, MM_ANONPAGES);
+			dec_mm_counter(src_mm, MM_ANONPAGES);
+		}
+
+		double_pt_unlock(dst_ptl, src_ptl);
+	}
+
+out:
+	if (src_anon_vma) {
+		anon_vma_unlock_write(src_anon_vma);
+		put_anon_vma(src_anon_vma);
+	}
+	if (src_folio) {
+		folio_unlock(src_folio);
+		folio_put(src_folio);
+	}
+	if (dst_pte)
+		pte_unmap(dst_pte);
+	if (src_pte)
+		pte_unmap(src_pte);
+
+	return err;
+}
+
+/**
+ * remap_pages - remap arbitrary anonymous pages of an existing vma
+ * @dst_start: start of the destination virtual memory range
+ * @src_start: start of the source virtual memory range
+ * @len: length of the virtual memory range
+ *
+ * remap_pages() remaps arbitrary anonymous pages atomically in zero
+ * copy. It only works on non shared anonymous pages because those can
+ * be relocated without generating non linear anon_vmas in the rmap
+ * code.
+ *
+ * It provides a zero copy mechanism to handle userspace page faults.
+ * The source vma pages should have mapcount == 1, which can be
+ * enforced by using madvise(MADV_DONTFORK) on src vma.
+ *
+ * The thread receiving the page during the userland page fault
+ * will receive the faulting page in the source vma through the network,
+ * storage or any other I/O device (MADV_DONTFORK in the source vma
+ * avoids remap_pages() to fail with -EBUSY if the process forks before
+ * remap_pages() is called), then it will call remap_pages() to map the
+ * page in the faulting address in the destination vma.
+ *
+ * This userfaultfd command works purely via pagetables, so it's the
+ * most efficient way to move physical non shared anonymous pages
+ * across different virtual addresses. Unlike mremap()/mmap()/munmap()
+ * it does not create any new vmas. The mapping in the destination
+ * address is atomic.
+ *
+ * It only works if the vma protection bits are identical from the
+ * source and destination vma.
+ *
+ * It can remap non shared anonymous pages within the same vma too.
+ *
+ * If the source virtual memory range has any unmapped holes, or if
+ * the destination virtual memory range is not a whole unmapped hole,
+ * remap_pages() will fail respectively with -ENOENT or -EEXIST. This
+ * provides a very strict behavior to avoid any chance of memory
+ * corruption going unnoticed if there are userland race conditions.
+ * Only one thread should resolve the userland page fault at any given
+ * time for any given faulting address. This means that if two threads
+ * try to both call remap_pages() on the same destination address at the
+ * same time, the second thread will get an explicit error from this
+ * command.
+ *
+ * The command retval will return "len" is successful. The command
+ * however can be interrupted by fatal signals or errors. If
+ * interrupted it will return the number of bytes successfully
+ * remapped before the interruption if any, or the negative error if
+ * none. It will never return zero. Either it will return an error or
+ * an amount of bytes successfully moved. If the retval reports a
+ * "short" remap, the remap_pages() command should be repeated by
+ * userland with src+retval, dst+reval, len-retval if it wants to know
+ * about the error that interrupted it.
+ *
+ * The UFFDIO_REMAP_MODE_ALLOW_SRC_HOLES flag can be specified to
+ * prevent -ENOENT errors to materialize if there are holes in the
+ * source virtual range that is being remapped. The holes will be
+ * accounted as successfully remapped in the retval of the
+ * command. This is mostly useful to remap hugepage naturally aligned
+ * virtual regions without knowing if there are transparent hugepage
+ * in the regions or not, but preventing the risk of having to split
+ * the hugepmd during the remap.
+ *
+ * If there's any rmap walk that is taking the anon_vma locks without
+ * first obtaining the folio lock (for example split_huge_page and
+ * folio_referenced), they will have to verify if the folio->mapping
+ * has changed after taking the anon_vma lock. If it changed they
+ * should release the lock and retry obtaining a new anon_vma, because
+ * it means the anon_vma was changed by remap_pages() before the lock
+ * could be obtained. This is the only additional complexity added to
+ * the rmap code to provide this anonymous page remapping functionality.
+ */
+ssize_t remap_pages(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+		    unsigned long dst_start, unsigned long src_start,
+		    unsigned long len, __u64 mode)
+{
+	struct vm_area_struct *src_vma, *dst_vma;
+	long err = -EINVAL;
+	pmd_t *src_pmd, *dst_pmd;
+	unsigned long src_addr, dst_addr;
+	int thp_aligned = -1;
+	ssize_t moved = 0;
+
+	/*
+	 * Sanitize the command parameters:
+	 */
+	BUG_ON(src_start & ~PAGE_MASK);
+	BUG_ON(dst_start & ~PAGE_MASK);
+	BUG_ON(len & ~PAGE_MASK);
+
+	/* Does the address range wrap, or is the span zero-sized? */
+	BUG_ON(src_start + len <= src_start);
+	BUG_ON(dst_start + len <= dst_start);
+
+	/*
+	 * Because these are read sempahores there's no risk of lock
+	 * inversion.
+	 */
+	mmap_read_lock(dst_mm);
+	if (dst_mm != src_mm)
+		mmap_read_lock(src_mm);
+
+	/*
+	 * Make sure the vma is not shared, that the src and dst remap
+	 * ranges are both valid and fully within a single existing
+	 * vma.
+	 */
+	src_vma = find_vma(src_mm, src_start);
+	if (!src_vma || (src_vma->vm_flags & VM_SHARED))
+		goto out;
+	if (src_start < src_vma->vm_start ||
+	    src_start + len > src_vma->vm_end)
+		goto out;
+
+	dst_vma = find_vma(dst_mm, dst_start);
+	if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
+		goto out;
+	if (dst_start < dst_vma->vm_start ||
+	    dst_start + len > dst_vma->vm_end)
+		goto out;
+
+	if (pgprot_val(src_vma->vm_page_prot) !=
+	    pgprot_val(dst_vma->vm_page_prot))
+		goto out;
+
+	/* only allow remapping if both are mlocked or both aren't */
+	if ((src_vma->vm_flags & VM_LOCKED) ^ (dst_vma->vm_flags & VM_LOCKED))
+		goto out;
+
+	/*
+	 * Be strict and only allow remap_pages if either the src or
+	 * dst range is registered in the userfaultfd to prevent
+	 * userland errors going unnoticed. As far as the VM
+	 * consistency is concerned, it would be perfectly safe to
+	 * remove this check, but there's no useful usage for
+	 * remap_pages ouside of userfaultfd registered ranges. This
+	 * is after all why it is an ioctl belonging to the
+	 * userfaultfd and not a syscall.
+	 *
+	 * Allow both vmas to be registered in the userfaultfd, just
+	 * in case somebody finds a way to make such a case useful.
+	 * Normally only one of the two vmas would be registered in
+	 * the userfaultfd.
+	 */
+	if (!dst_vma->vm_userfaultfd_ctx.ctx &&
+	    !src_vma->vm_userfaultfd_ctx.ctx)
+		goto out;
+
+	/*
+	 * FIXME: only allow remapping across anonymous vmas,
+	 * tmpfs should be added.
+	 */
+	if (src_vma->vm_ops || dst_vma->vm_ops)
+		goto out;
+
+	/*
+	 * Ensure the dst_vma has a anon_vma or this page
+	 * would get a NULL anon_vma when moved in the
+	 * dst_vma.
+	 */
+	err = -ENOMEM;
+	if (unlikely(anon_vma_prepare(dst_vma)))
+		goto out;
+
+	for (src_addr = src_start, dst_addr = dst_start;
+	     src_addr < src_start + len;) {
+		spinlock_t *ptl;
+		pmd_t dst_pmdval;
+
+		BUG_ON(dst_addr >= dst_start + len);
+		src_pmd = mm_find_pmd(src_mm, src_addr);
+		if (unlikely(!src_pmd)) {
+			if (!(mode & UFFDIO_REMAP_MODE_ALLOW_SRC_HOLES)) {
+				err = -ENOENT;
+				break;
+			}
+			src_pmd = mm_alloc_pmd(src_mm, src_addr);
+			if (unlikely(!src_pmd)) {
+				err = -ENOMEM;
+				break;
+			}
+		}
+		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
+		if (unlikely(!dst_pmd)) {
+			err = -ENOMEM;
+			break;
+		}
+
+		dst_pmdval = pmdp_get_lockless(dst_pmd);
+		/*
+		 * If the dst_pmd is mapped as THP don't
+		 * override it and just be strict.
+		 */
+		if (unlikely(pmd_trans_huge(dst_pmdval))) {
+			err = -EEXIST;
+			break;
+		}
+		ptl = pmd_trans_huge_lock(src_pmd, src_vma);
+		if (ptl) {
+			/*
+			 * Check if we can move the pmd without
+			 * splitting it. First check the address
+			 * alignment to be the same in src/dst.  These
+			 * checks don't actually need the PT lock but
+			 * it's good to do it here to optimize this
+			 * block away at build time if
+			 * CONFIG_TRANSPARENT_HUGEPAGE is not set.
+			 */
+			if (thp_aligned == -1)
+				thp_aligned = ((src_addr & ~HPAGE_PMD_MASK) ==
+					       (dst_addr & ~HPAGE_PMD_MASK));
+			if (!thp_aligned || (src_addr & ~HPAGE_PMD_MASK) ||
+			    !pmd_none(dst_pmdval) ||
+			    src_start + len - src_addr < HPAGE_PMD_SIZE) {
+				spin_unlock(ptl);
+				/* Fall through */
+				split_huge_pmd(src_vma, src_pmd, src_addr);
+			} else {
+				err = remap_pages_huge_pmd(dst_mm,
+							   src_mm,
+							   dst_pmd,
+							   src_pmd,
+							   dst_pmdval,
+							   dst_vma,
+							   src_vma,
+							   dst_addr,
+							   src_addr);
+				cond_resched();
+
+				if (!err) {
+					dst_addr += HPAGE_PMD_SIZE;
+					src_addr += HPAGE_PMD_SIZE;
+					moved += HPAGE_PMD_SIZE;
+				}
+
+				if ((!err || err == -EAGAIN) &&
+				    fatal_signal_pending(current))
+					err = -EINTR;
+
+				if (err && err != -EAGAIN)
+					break;
+
+				continue;
+			}
+		}
+
+		if (pmd_none(*src_pmd)) {
+			if (!(mode & UFFDIO_REMAP_MODE_ALLOW_SRC_HOLES)) {
+				err = -ENOENT;
+				break;
+			}
+			if (unlikely(__pte_alloc(src_mm, src_pmd))) {
+				err = -ENOMEM;
+				break;
+			}
+		}
+
+		if (unlikely(pmd_none(dst_pmdval)) &&
+		    unlikely(__pte_alloc(dst_mm, dst_pmd))) {
+			err = -ENOMEM;
+			break;
+		}
+
+		err = remap_pages_pte(dst_mm, src_mm,
+				      dst_pmd, src_pmd,
+				      dst_vma, src_vma,
+				      dst_addr, src_addr,
+				      mode);
+
+		cond_resched();
+
+		if (!err) {
+			dst_addr += PAGE_SIZE;
+			src_addr += PAGE_SIZE;
+			moved += PAGE_SIZE;
+		}
+
+		if ((!err || err == -EAGAIN) &&
+		    fatal_signal_pending(current))
+			err = -EINTR;
+
+		if (err && err != -EAGAIN)
+			break;
+	}
+
+out:
+	mmap_read_unlock(dst_mm);
+	if (dst_mm != src_mm)
+		mmap_read_unlock(src_mm);
+	BUG_ON(moved < 0);
+	BUG_ON(err > 0);
+	BUG_ON(!moved && !err);
+	return moved ? moved : err;
+}