diff mbox series

[V7,5/5] testing

Message ID 20190114095438.32470-7-aneesh.kumar@linux.ibm.com (mailing list archive)
State New, archived
Headers show
Series mm/kvm/vfio/ppc64: Migrate compound pages out of CMA region | expand

Commit Message

Aneesh Kumar K.V Jan. 14, 2019, 9:54 a.m. UTC
---
 mm/gup.c | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

Comments

Aneesh Kumar K.V Jan. 15, 2019, 11:25 a.m. UTC | #1
You can ignore this one.

On 1/14/19 3:24 PM, Aneesh Kumar K.V wrote:
> ---
>   mm/gup.c | 6 ++++--
>   1 file changed, 4 insertions(+), 2 deletions(-)
> 
> diff --git a/mm/gup.c b/mm/gup.c
> index 6e8152594e83..91849c39931a 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -1226,7 +1226,7 @@ static long check_and_migrate_cma_pages(unsigned long start, long nr_pages,
>   		 * be pinning these entries, we might as well move them out
>   		 * of the CMA zone if possible.
>   		 */
> -		if (is_migrate_cma_page(pages[i])) {
> +		if (true || is_migrate_cma_page(pages[i])) {
>   
>   			struct page *head = compound_head(pages[i]);
>   
> @@ -1256,6 +1256,7 @@ static long check_and_migrate_cma_pages(unsigned long start, long nr_pages,
>   		for (i = 0; i < nr_pages; i++)
>   			put_page(pages[i]);
>   
> +		pr_emerg("migrating nr_pages");
>   		if (migrate_pages(&cma_page_list, new_non_cma_page,
>   				  NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
>   			/*
> @@ -1274,10 +1275,11 @@ static long check_and_migrate_cma_pages(unsigned long start, long nr_pages,
>   		nr_pages = get_user_pages(start, nr_pages, gup_flags, pages, vmas);
>   		if ((nr_pages > 0) && migrate_allow) {
>   			drain_allow = true;
> -			goto check_again;
> +			//goto check_again;
>   		}
>   	}
>   
> +	pr_emerg("Returning with %ld\n", nr_pages);
>   	return nr_pages;
>   }
>   #else
>
diff mbox series

Patch

diff --git a/mm/gup.c b/mm/gup.c
index 6e8152594e83..91849c39931a 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1226,7 +1226,7 @@  static long check_and_migrate_cma_pages(unsigned long start, long nr_pages,
 		 * be pinning these entries, we might as well move them out
 		 * of the CMA zone if possible.
 		 */
-		if (is_migrate_cma_page(pages[i])) {
+		if (true || is_migrate_cma_page(pages[i])) {
 
 			struct page *head = compound_head(pages[i]);
 
@@ -1256,6 +1256,7 @@  static long check_and_migrate_cma_pages(unsigned long start, long nr_pages,
 		for (i = 0; i < nr_pages; i++)
 			put_page(pages[i]);
 
+		pr_emerg("migrating nr_pages");
 		if (migrate_pages(&cma_page_list, new_non_cma_page,
 				  NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
 			/*
@@ -1274,10 +1275,11 @@  static long check_and_migrate_cma_pages(unsigned long start, long nr_pages,
 		nr_pages = get_user_pages(start, nr_pages, gup_flags, pages, vmas);
 		if ((nr_pages > 0) && migrate_allow) {
 			drain_allow = true;
-			goto check_again;
+			//goto check_again;
 		}
 	}
 
+	pr_emerg("Returning with %ld\n", nr_pages);
 	return nr_pages;
 }
 #else