diff mbox series

[v2,4/5] userfaultfd: zero access/write hints

Message ID 20220718114748.2623-5-namit@vmware.com (mailing list archive)
State New
Headers show
Series userfaultfd: support access/write hints | expand

Commit Message

Nadav Amit July 18, 2022, 11:47 a.m. UTC
From: Nadav Amit <namit@vmware.com>

When userfaultfd provides a zeropage in response to ioctl, it provides a
readonly alias to the zero page. If the page is later written (which is
the likely scenario), page-fault occurs and the page-fault allocator
allocates a page and rewires the page-tables.

This is an expensive flow for cases in which a page is likely be written
to. Users can use the copy ioctl to initialize zero page (by copying
zeros), but this is also wasteful.

Allow userfaultfd users to efficiently map initialized zero-pages that
are writable. IF UFFDIO_ZEROPAGE_MODE_WRITE_LIKELY is provided would map
a clear page instead of an alias to the zero page.

Suggested-by: David Hildenbrand <david@redhat.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Nadav Amit <namit@vmware.com>
---
 mm/userfaultfd.c | 35 +++++++++++++++++++++++++++++++++++
 1 file changed, 35 insertions(+)

Comments

David Hildenbrand July 22, 2022, 7:47 a.m. UTC | #1
On 18.07.22 13:47, Nadav Amit wrote:
> From: Nadav Amit <namit@vmware.com>
> 
> When userfaultfd provides a zeropage in response to ioctl, it provides a
> readonly alias to the zero page. If the page is later written (which is
> the likely scenario), page-fault occurs and the page-fault allocator
> allocates a page and rewires the page-tables.
> 
> This is an expensive flow for cases in which a page is likely be written
> to. Users can use the copy ioctl to initialize zero page (by copying
> zeros), but this is also wasteful.
> 
> Allow userfaultfd users to efficiently map initialized zero-pages that
> are writable. IF UFFDIO_ZEROPAGE_MODE_WRITE_LIKELY is provided would map
> a clear page instead of an alias to the zero page.
> 
> Suggested-by: David Hildenbrand <david@redhat.com>
> Cc: Mike Kravetz <mike.kravetz@oracle.com>
> Cc: Hugh Dickins <hughd@google.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Axel Rasmussen <axelrasmussen@google.com>
> Cc: Mike Rapoport <rppt@linux.ibm.com>
> Acked-by: Peter Xu <peterx@redhat.com>
> Signed-off-by: Nadav Amit <namit@vmware.com>
> ---
>  mm/userfaultfd.c | 35 +++++++++++++++++++++++++++++++++++
>  1 file changed, 35 insertions(+)
> 
> diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
> index c15679f3eb6a..954c6980b29f 100644
> --- a/mm/userfaultfd.c
> +++ b/mm/userfaultfd.c
> @@ -241,6 +241,37 @@ static int mfill_zeropage_pte(struct mm_struct *dst_mm,
>  	return ret;
>  }
>  
> +static int mfill_clearpage_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
> +			       struct vm_area_struct *dst_vma,
> +			       unsigned long dst_addr,
> +			       uffd_flags_t uffd_flags)
> +{
> +	struct page *page;
> +	int ret;
> +
> +	ret = -ENOMEM;
> +	page = alloc_zeroed_user_highpage_movable(dst_vma, dst_addr);
> +	if (!page)
> +		goto out;
> +
> +	/* The PTE is not marked as dirty unconditionally */
> +	SetPageDirty(page);
> +	__SetPageUptodate(page);
> +
> +	if (mem_cgroup_charge(page_folio(page), dst_vma->vm_mm, GFP_KERNEL))
> +		goto out_release;
> +
> +	ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
> +				       page, true, uffd_flags);
> +	if (ret)
> +		goto out_release;
> +out:
> +	return ret;
> +out_release:
> +	put_page(page);
> +	goto out;
> +}
> +
>  /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
>  static int mcontinue_atomic_pte(struct mm_struct *dst_mm,
>  				pmd_t *dst_pmd,
> @@ -500,6 +531,10 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
>  			err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
>  					       dst_addr, src_addr, page,
>  					       uffd_flags);
> +		else if (!(uffd_flags & UFFD_FLAGS_WP) &&
> +			 (uffd_flags & UFFD_FLAGS_WRITE_LIKELY))
> +			err = mfill_clearpage_pte(dst_mm, dst_pmd, dst_vma,
> +						  dst_addr, uffd_flags);
>  		else
>  			err = mfill_zeropage_pte(dst_mm, dst_pmd,
>  						 dst_vma, dst_addr, uffd_flags);

Reviewed-by: David Hildenbrand <david@redhat.com>
diff mbox series

Patch

diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index c15679f3eb6a..954c6980b29f 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -241,6 +241,37 @@  static int mfill_zeropage_pte(struct mm_struct *dst_mm,
 	return ret;
 }
 
+static int mfill_clearpage_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
+			       struct vm_area_struct *dst_vma,
+			       unsigned long dst_addr,
+			       uffd_flags_t uffd_flags)
+{
+	struct page *page;
+	int ret;
+
+	ret = -ENOMEM;
+	page = alloc_zeroed_user_highpage_movable(dst_vma, dst_addr);
+	if (!page)
+		goto out;
+
+	/* The PTE is not marked as dirty unconditionally */
+	SetPageDirty(page);
+	__SetPageUptodate(page);
+
+	if (mem_cgroup_charge(page_folio(page), dst_vma->vm_mm, GFP_KERNEL))
+		goto out_release;
+
+	ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
+				       page, true, uffd_flags);
+	if (ret)
+		goto out_release;
+out:
+	return ret;
+out_release:
+	put_page(page);
+	goto out;
+}
+
 /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
 static int mcontinue_atomic_pte(struct mm_struct *dst_mm,
 				pmd_t *dst_pmd,
@@ -500,6 +531,10 @@  static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
 			err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
 					       dst_addr, src_addr, page,
 					       uffd_flags);
+		else if (!(uffd_flags & UFFD_FLAGS_WP) &&
+			 (uffd_flags & UFFD_FLAGS_WRITE_LIKELY))
+			err = mfill_clearpage_pte(dst_mm, dst_pmd, dst_vma,
+						  dst_addr, uffd_flags);
 		else
 			err = mfill_zeropage_pte(dst_mm, dst_pmd,
 						 dst_vma, dst_addr, uffd_flags);