diff mbox series

[RFC,03/13] mm/memory: slightly simplify copy_present_pte()

Message ID 20220224122614.94921-4-david@redhat.com (mailing list archive)
State New
Headers show
Series mm: COW fixes part 2: reliable GUP pins of anonymous pages | expand

Commit Message

David Hildenbrand Feb. 24, 2022, 12:26 p.m. UTC
Let's move the pinning check into the caller, to simplify return code
logic and prepare for further changes: relocating the
page_needs_cow_for_dma() into rmap handling code.

While at it, remove the unused pte parameter and simplify the comments a
bit.

No functional change intended.

Signed-off-by: David Hildenbrand <david@redhat.com>
---
 mm/memory.c | 53 ++++++++++++++++-------------------------------------
 1 file changed, 16 insertions(+), 37 deletions(-)

Comments

Hillf Danton Feb. 25, 2022, 5:15 a.m. UTC | #1
On Thu, 24 Feb 2022 13:26:04 +0100 David Hildenbrand wrote:
> Let's move the pinning check into the caller, to simplify return code
> logic and prepare for further changes: relocating the
> page_needs_cow_for_dma() into rmap handling code.
> 
> While at it, remove the unused pte parameter and simplify the comments a
> bit.
> 
> No functional change intended.
> 
> Signed-off-by: David Hildenbrand <david@redhat.com>
> ---
>  mm/memory.c | 53 ++++++++++++++++-------------------------------------
>  1 file changed, 16 insertions(+), 37 deletions(-)
> 
> diff --git a/mm/memory.c b/mm/memory.c
> index c6177d897964..accb72a3343d 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -865,19 +865,11 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
>  }
>  
>  /*
> - * Copy a present and normal page if necessary.
> + * Copy a present and normal page.
>   *
> - * NOTE! The usual case is that this doesn't need to do
> - * anything, and can just return a positive value. That
> - * will let the caller know that it can just increase
> - * the page refcount and re-use the pte the traditional
> - * way.
> - *
> - * But _if_ we need to copy it because it needs to be
> - * pinned in the parent (and the child should get its own
> - * copy rather than just a reference to the same page),
> - * we'll do that here and return zero to let the caller
> - * know we're done.
> + * NOTE! The usual case is that this isn't required;
> + * instead, the caller can just increase the page refcount
> + * and re-use the pte the traditional way.
>   *
>   * And if we need a pre-allocated page but don't yet have
>   * one, return a negative error to let the preallocation
> @@ -887,25 +879,10 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
>  static inline int
>  copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
>  		  pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
> -		  struct page **prealloc, pte_t pte, struct page *page)
> +		  struct page **prealloc, struct page *page)
>  {
>  	struct page *new_page;
> -
> -	/*
> -	 * What we want to do is to check whether this page may
> -	 * have been pinned by the parent process.  If so,
> -	 * instead of wrprotect the pte on both sides, we copy
> -	 * the page immediately so that we'll always guarantee
> -	 * the pinned page won't be randomly replaced in the
> -	 * future.
> -	 *
> -	 * The page pinning checks are just "has this mm ever
> -	 * seen pinning", along with the (inexact) check of
> -	 * the page count. That might give false positives for
> -	 * for pinning, but it will work correctly.
> -	 */
> -	if (likely(!page_needs_cow_for_dma(src_vma, page)))
> -		return 1;
> +	pte_t pte;
>  
>  	new_page = *prealloc;
>  	if (!new_page)
> @@ -947,14 +924,16 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
>  	struct page *page;
>  
>  	page = vm_normal_page(src_vma, addr, pte);
> -	if (page) {
> -		int retval;
> -
> -		retval = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
> -					   addr, rss, prealloc, pte, page);
> -		if (retval <= 0)
> -			return retval;
> -
> +	if (page && unlikely(page_needs_cow_for_dma(src_vma, page))) {
> +		/*
> +		 * If this page may have been pinned by the parent process,
> +		 * copy the page immediately for the child so that we'll always
> +		 * guarantee the pinned page won't be randomly replaced in the
> +		 * future.
> +		 */
> +		return copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
> +					 addr, rss, prealloc, page);

Off topic question, is it likely that the DMA tranfer from device in parallel
to copying page ends up with different data between parent and kid?

Hillf

> +	} else if (page) {
>  		get_page(page);
>  		page_dup_rmap(page, false);
>  		rss[mm_counter(page)]++;
> -- 
> 2.35.1
David Hildenbrand Feb. 25, 2022, 8:01 a.m. UTC | #2
On 25.02.22 06:15, Hillf Danton wrote:
> On Thu, 24 Feb 2022 13:26:04 +0100 David Hildenbrand wrote:
>> Let's move the pinning check into the caller, to simplify return code
>> logic and prepare for further changes: relocating the
>> page_needs_cow_for_dma() into rmap handling code.
>>
>> While at it, remove the unused pte parameter and simplify the comments a
>> bit.
>>
>> No functional change intended.
>>
>> Signed-off-by: David Hildenbrand <david@redhat.com>
>> ---
>>  mm/memory.c | 53 ++++++++++++++++-------------------------------------
>>  1 file changed, 16 insertions(+), 37 deletions(-)
>>
>> diff --git a/mm/memory.c b/mm/memory.c
>> index c6177d897964..accb72a3343d 100644
>> --- a/mm/memory.c
>> +++ b/mm/memory.c
>> @@ -865,19 +865,11 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
>>  }
>>  
>>  /*
>> - * Copy a present and normal page if necessary.
>> + * Copy a present and normal page.
>>   *
>> - * NOTE! The usual case is that this doesn't need to do
>> - * anything, and can just return a positive value. That
>> - * will let the caller know that it can just increase
>> - * the page refcount and re-use the pte the traditional
>> - * way.
>> - *
>> - * But _if_ we need to copy it because it needs to be
>> - * pinned in the parent (and the child should get its own
>> - * copy rather than just a reference to the same page),
>> - * we'll do that here and return zero to let the caller
>> - * know we're done.
>> + * NOTE! The usual case is that this isn't required;
>> + * instead, the caller can just increase the page refcount
>> + * and re-use the pte the traditional way.
>>   *
>>   * And if we need a pre-allocated page but don't yet have
>>   * one, return a negative error to let the preallocation
>> @@ -887,25 +879,10 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
>>  static inline int
>>  copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
>>  		  pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
>> -		  struct page **prealloc, pte_t pte, struct page *page)
>> +		  struct page **prealloc, struct page *page)
>>  {
>>  	struct page *new_page;
>> -
>> -	/*
>> -	 * What we want to do is to check whether this page may
>> -	 * have been pinned by the parent process.  If so,
>> -	 * instead of wrprotect the pte on both sides, we copy
>> -	 * the page immediately so that we'll always guarantee
>> -	 * the pinned page won't be randomly replaced in the
>> -	 * future.
>> -	 *
>> -	 * The page pinning checks are just "has this mm ever
>> -	 * seen pinning", along with the (inexact) check of
>> -	 * the page count. That might give false positives for
>> -	 * for pinning, but it will work correctly.
>> -	 */
>> -	if (likely(!page_needs_cow_for_dma(src_vma, page)))
>> -		return 1;
>> +	pte_t pte;
>>  
>>  	new_page = *prealloc;
>>  	if (!new_page)
>> @@ -947,14 +924,16 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
>>  	struct page *page;
>>  
>>  	page = vm_normal_page(src_vma, addr, pte);
>> -	if (page) {
>> -		int retval;
>> -
>> -		retval = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
>> -					   addr, rss, prealloc, pte, page);
>> -		if (retval <= 0)
>> -			return retval;
>> -
>> +	if (page && unlikely(page_needs_cow_for_dma(src_vma, page))) {
>> +		/*
>> +		 * If this page may have been pinned by the parent process,
>> +		 * copy the page immediately for the child so that we'll always
>> +		 * guarantee the pinned page won't be randomly replaced in the
>> +		 * future.
>> +		 */
>> +		return copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
>> +					 addr, rss, prealloc, page);
> 
> Off topic question, is it likely that the DMA tranfer from device in parallel
> to copying page ends up with different data between parent and kid?

If the parent has a GUP pin on the page before fork(), the parent will
keep that page mapped exclusively and the child will receive a copy.

It's pretty much undefined which content that copy will have if there is
concurrent DMA via that GUP pin: we'll snapshot that page at some point
in time.

It's fully under the parent process control when to start/stop I/O via a
GUP pin and when to call fork().

So yes, if there is fork() with concurrent DMA via a GUP pin modifying
the page, the page content isn't well defined: could be the content
before DMA, mid DMA or post DMA.
diff mbox series

Patch

diff --git a/mm/memory.c b/mm/memory.c
index c6177d897964..accb72a3343d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -865,19 +865,11 @@  copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 }
 
 /*
- * Copy a present and normal page if necessary.
+ * Copy a present and normal page.
  *
- * NOTE! The usual case is that this doesn't need to do
- * anything, and can just return a positive value. That
- * will let the caller know that it can just increase
- * the page refcount and re-use the pte the traditional
- * way.
- *
- * But _if_ we need to copy it because it needs to be
- * pinned in the parent (and the child should get its own
- * copy rather than just a reference to the same page),
- * we'll do that here and return zero to let the caller
- * know we're done.
+ * NOTE! The usual case is that this isn't required;
+ * instead, the caller can just increase the page refcount
+ * and re-use the pte the traditional way.
  *
  * And if we need a pre-allocated page but don't yet have
  * one, return a negative error to let the preallocation
@@ -887,25 +879,10 @@  copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 static inline int
 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
 		  pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
-		  struct page **prealloc, pte_t pte, struct page *page)
+		  struct page **prealloc, struct page *page)
 {
 	struct page *new_page;
-
-	/*
-	 * What we want to do is to check whether this page may
-	 * have been pinned by the parent process.  If so,
-	 * instead of wrprotect the pte on both sides, we copy
-	 * the page immediately so that we'll always guarantee
-	 * the pinned page won't be randomly replaced in the
-	 * future.
-	 *
-	 * The page pinning checks are just "has this mm ever
-	 * seen pinning", along with the (inexact) check of
-	 * the page count. That might give false positives for
-	 * for pinning, but it will work correctly.
-	 */
-	if (likely(!page_needs_cow_for_dma(src_vma, page)))
-		return 1;
+	pte_t pte;
 
 	new_page = *prealloc;
 	if (!new_page)
@@ -947,14 +924,16 @@  copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
 	struct page *page;
 
 	page = vm_normal_page(src_vma, addr, pte);
-	if (page) {
-		int retval;
-
-		retval = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
-					   addr, rss, prealloc, pte, page);
-		if (retval <= 0)
-			return retval;
-
+	if (page && unlikely(page_needs_cow_for_dma(src_vma, page))) {
+		/*
+		 * If this page may have been pinned by the parent process,
+		 * copy the page immediately for the child so that we'll always
+		 * guarantee the pinned page won't be randomly replaced in the
+		 * future.
+		 */
+		return copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
+					 addr, rss, prealloc, page);
+	} else if (page) {
 		get_page(page);
 		page_dup_rmap(page, false);
 		rss[mm_counter(page)]++;