diff mbox series

[v1,1/4] lib/test_hmm: make dmirror_atomic_map() consume a single page

Message ID 20250129115803.2084769-2-david@redhat.com (mailing list archive)
State New
Headers show
Series mm: cleanups for device-exclusive entries (hmm) | expand

Commit Message

David Hildenbrand Jan. 29, 2025, 11:57 a.m. UTC
The caller now always passes a single page; let's simplify, and return
"0" on success.

Signed-off-by: David Hildenbrand <david@redhat.com>
---
 lib/test_hmm.c | 33 ++++++++++-----------------------
 1 file changed, 10 insertions(+), 23 deletions(-)

Comments

Alistair Popple Jan. 30, 2025, 12:29 a.m. UTC | #1
On Wed, Jan 29, 2025 at 12:57:59PM +0100, David Hildenbrand wrote:
> The caller now always passes a single page; let's simplify, and return
> "0" on success.

Thanks for cleaning that up.

Reviewed-by: Alistair Popple <apopple@nvidia.com>

> Signed-off-by: David Hildenbrand <david@redhat.com>
> ---
>  lib/test_hmm.c | 33 ++++++++++-----------------------
>  1 file changed, 10 insertions(+), 23 deletions(-)
> 
> diff --git a/lib/test_hmm.c b/lib/test_hmm.c
> index 9e1b07a227a3..1c0a58279db9 100644
> --- a/lib/test_hmm.c
> +++ b/lib/test_hmm.c
> @@ -706,34 +706,23 @@ static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start,
>  	return 0;
>  }
>  
> -static int dmirror_atomic_map(unsigned long start, unsigned long end,
> -			      struct page **pages, struct dmirror *dmirror)
> +static int dmirror_atomic_map(unsigned long addr, struct page *page,
> +		struct dmirror *dmirror)
>  {
> -	unsigned long pfn, mapped = 0;
> -	int i;
> +	void *entry;
>  
>  	/* Map the migrated pages into the device's page tables. */
>  	mutex_lock(&dmirror->mutex);
>  
> -	for (i = 0, pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++, i++) {
> -		void *entry;
> -
> -		if (!pages[i])
> -			continue;
> -
> -		entry = pages[i];
> -		entry = xa_tag_pointer(entry, DPT_XA_TAG_ATOMIC);
> -		entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
> -		if (xa_is_err(entry)) {
> -			mutex_unlock(&dmirror->mutex);
> -			return xa_err(entry);
> -		}
> -
> -		mapped++;
> +	entry = xa_tag_pointer(page, DPT_XA_TAG_ATOMIC);
> +	entry = xa_store(&dmirror->pt, addr >> PAGE_SHIFT, entry, GFP_ATOMIC);
> +	if (xa_is_err(entry)) {
> +		mutex_unlock(&dmirror->mutex);
> +		return xa_err(entry);
>  	}
>  
>  	mutex_unlock(&dmirror->mutex);
> -	return mapped;
> +	return 0;
>  }
>  
>  static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
> @@ -803,9 +792,7 @@ static int dmirror_exclusive(struct dmirror *dmirror,
>  			break;
>  		}
>  
> -		ret = dmirror_atomic_map(addr, addr + PAGE_SIZE, &page, dmirror);
> -		if (!ret)
> -			ret = -EBUSY;
> +		ret = dmirror_atomic_map(addr, page, dmirror);
>  		folio_unlock(folio);
>  		folio_put(folio);
>  
> -- 
> 2.48.1
>
diff mbox series

Patch

diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 9e1b07a227a3..1c0a58279db9 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -706,34 +706,23 @@  static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start,
 	return 0;
 }
 
-static int dmirror_atomic_map(unsigned long start, unsigned long end,
-			      struct page **pages, struct dmirror *dmirror)
+static int dmirror_atomic_map(unsigned long addr, struct page *page,
+		struct dmirror *dmirror)
 {
-	unsigned long pfn, mapped = 0;
-	int i;
+	void *entry;
 
 	/* Map the migrated pages into the device's page tables. */
 	mutex_lock(&dmirror->mutex);
 
-	for (i = 0, pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++, i++) {
-		void *entry;
-
-		if (!pages[i])
-			continue;
-
-		entry = pages[i];
-		entry = xa_tag_pointer(entry, DPT_XA_TAG_ATOMIC);
-		entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC);
-		if (xa_is_err(entry)) {
-			mutex_unlock(&dmirror->mutex);
-			return xa_err(entry);
-		}
-
-		mapped++;
+	entry = xa_tag_pointer(page, DPT_XA_TAG_ATOMIC);
+	entry = xa_store(&dmirror->pt, addr >> PAGE_SHIFT, entry, GFP_ATOMIC);
+	if (xa_is_err(entry)) {
+		mutex_unlock(&dmirror->mutex);
+		return xa_err(entry);
 	}
 
 	mutex_unlock(&dmirror->mutex);
-	return mapped;
+	return 0;
 }
 
 static int dmirror_migrate_finalize_and_map(struct migrate_vma *args,
@@ -803,9 +792,7 @@  static int dmirror_exclusive(struct dmirror *dmirror,
 			break;
 		}
 
-		ret = dmirror_atomic_map(addr, addr + PAGE_SIZE, &page, dmirror);
-		if (!ret)
-			ret = -EBUSY;
+		ret = dmirror_atomic_map(addr, page, dmirror);
 		folio_unlock(folio);
 		folio_put(folio);