diff mbox series

[v2,2/6] mm: userfaultfd: refactor hugetlb folio allocation / lookup code

Message ID 20230629205040.665834-2-axelrasmussen@google.com (mailing list archive)
State New
Headers show
Series [v2,1/6] mm: userfaultfd: add new UFFDIO_POISON ioctl | expand

Commit Message

Axel Rasmussen June 29, 2023, 8:50 p.m. UTC
At the top of `hugetlb_mfill_atomic_pte`, we need to get the folio we're
going to be mapping. There are three basic cases we're dealing with
here:

1. We're doing a UFFDIO_CONTINUE, in which case we lookup an existing
   folio in the pagecache, instead of allocating a new one.
2. We need to allocate a new folio.
3. We previously failed while populating our new folio, so we "returned"
   a temporary folio using `foliop` and had our caller retry.

In a future commit I'm going to add a fourth case for UFFDIO_POISON,
where we aren't going to map a folio at all (newly allocated or
otherwise). This end state will be simpler, and we can re-use a bit more
code, if we stop using `if (...)` to distinguish the cases.

So, refactor the cases so they share most of the same code, and instead
switch to `goto` to skip some parts depending on the case at hand.

Signed-off-by: Axel Rasmussen <axelrasmussen@google.com>
---
 mm/hugetlb.c | 53 +++++++++++++++++++++++++---------------------------
 1 file changed, 25 insertions(+), 28 deletions(-)

Comments

Peter Xu July 4, 2023, 8:57 p.m. UTC | #1
On Thu, Jun 29, 2023 at 01:50:36PM -0700, Axel Rasmussen wrote:
> At the top of `hugetlb_mfill_atomic_pte`, we need to get the folio we're
> going to be mapping. There are three basic cases we're dealing with
> here:
> 
> 1. We're doing a UFFDIO_CONTINUE, in which case we lookup an existing
>    folio in the pagecache, instead of allocating a new one.
> 2. We need to allocate a new folio.
> 3. We previously failed while populating our new folio, so we "returned"
>    a temporary folio using `foliop` and had our caller retry.
> 
> In a future commit I'm going to add a fourth case for UFFDIO_POISON,
> where we aren't going to map a folio at all (newly allocated or
> otherwise). This end state will be simpler, and we can re-use a bit more
> code, if we stop using `if (...)` to distinguish the cases.
> 
> So, refactor the cases so they share most of the same code, and instead
> switch to `goto` to skip some parts depending on the case at hand.
> 
> Signed-off-by: Axel Rasmussen <axelrasmussen@google.com>

I didn't get why this patch is needed..

IIUC you added MFILL_ATOMIC_POISON handling at the entry of
hugetlb_mfill_atomic_pte() anyway.  Maybe it can even have its own
hugetlb_mfill_atomic_poison()?  Did I miss something?

> ---
>  mm/hugetlb.c | 53 +++++++++++++++++++++++++---------------------------
>  1 file changed, 25 insertions(+), 28 deletions(-)
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index bce28cca73a1..38711d49e4db 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -6259,22 +6259,32 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
>  		if (IS_ERR(folio))
>  			goto out;
>  		folio_in_pagecache = true;
> -	} else if (!*foliop) {
> -		/* If a folio already exists, then it's UFFDIO_COPY for
> -		 * a non-missing case. Return -EEXIST.
> -		 */
> -		if (vm_shared &&
> -		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
> -			ret = -EEXIST;
> -			goto out;
> +		goto ready;
> +	}
> +
> +	/* If a folio already exists, then it's UFFDIO_COPY for
> +	 * a non-missing case. Return -EEXIST.
> +	 */
> +	if (vm_shared && hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
> +		ret = -EEXIST;
> +		if (*foliop) {
> +			folio_put(*foliop);
> +			*foliop = NULL;
>  		}
> +		goto out;
> +	}
>  
> -		folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
> -		if (IS_ERR(folio)) {
> -			ret = -ENOMEM;
> -			goto out;
> +	folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
> +	if (IS_ERR(folio)) {
> +		ret = -ENOMEM;
> +		if (*foliop) {
> +			folio_put(*foliop);
> +			*foliop = NULL;
>  		}
> +		goto out;
> +	}
>  
> +	if (!*foliop) {
>  		ret = copy_folio_from_user(folio, (const void __user *) src_addr,
>  					   false);
>  
> @@ -6302,22 +6312,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
>  			 */
>  			goto out;
>  		}
> -	} else {
> -		if (vm_shared &&
> -		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
> -			folio_put(*foliop);
> -			ret = -EEXIST;
> -			*foliop = NULL;
> -			goto out;
> -		}
> -
> -		folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
> -		if (IS_ERR(folio)) {
> -			folio_put(*foliop);
> -			ret = -ENOMEM;
> -			*foliop = NULL;
> -			goto out;
> -		}
> +	} else { /* Caller retried because we set *foliop previously */
>  		ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
>  		folio_put(*foliop);
>  		*foliop = NULL;
> @@ -6327,6 +6322,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
>  		}
>  	}
>  
> +ready: /* `folio` ready to map (non-NULL, populated) */
> +
>  	/*
>  	 * The memory barrier inside __folio_mark_uptodate makes sure that
>  	 * preceding stores to the page contents become visible before
> -- 
> 2.41.0.255.g8b1d071c50-goog
>
diff mbox series

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index bce28cca73a1..38711d49e4db 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6259,22 +6259,32 @@  int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
 		if (IS_ERR(folio))
 			goto out;
 		folio_in_pagecache = true;
-	} else if (!*foliop) {
-		/* If a folio already exists, then it's UFFDIO_COPY for
-		 * a non-missing case. Return -EEXIST.
-		 */
-		if (vm_shared &&
-		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
-			ret = -EEXIST;
-			goto out;
+		goto ready;
+	}
+
+	/* If a folio already exists, then it's UFFDIO_COPY for
+	 * a non-missing case. Return -EEXIST.
+	 */
+	if (vm_shared && hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
+		ret = -EEXIST;
+		if (*foliop) {
+			folio_put(*foliop);
+			*foliop = NULL;
 		}
+		goto out;
+	}
 
-		folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
-		if (IS_ERR(folio)) {
-			ret = -ENOMEM;
-			goto out;
+	folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
+	if (IS_ERR(folio)) {
+		ret = -ENOMEM;
+		if (*foliop) {
+			folio_put(*foliop);
+			*foliop = NULL;
 		}
+		goto out;
+	}
 
+	if (!*foliop) {
 		ret = copy_folio_from_user(folio, (const void __user *) src_addr,
 					   false);
 
@@ -6302,22 +6312,7 @@  int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
 			 */
 			goto out;
 		}
-	} else {
-		if (vm_shared &&
-		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
-			folio_put(*foliop);
-			ret = -EEXIST;
-			*foliop = NULL;
-			goto out;
-		}
-
-		folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
-		if (IS_ERR(folio)) {
-			folio_put(*foliop);
-			ret = -ENOMEM;
-			*foliop = NULL;
-			goto out;
-		}
+	} else { /* Caller retried because we set *foliop previously */
 		ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
 		folio_put(*foliop);
 		*foliop = NULL;
@@ -6327,6 +6322,8 @@  int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
 		}
 	}
 
+ready: /* `folio` ready to map (non-NULL, populated) */
+
 	/*
 	 * The memory barrier inside __folio_mark_uptodate makes sure that
 	 * preceding stores to the page contents become visible before