diff mbox series

[v3,5-fix/5] mm: memory_hotplug: unify Huge/LRU/non-LRU movable folio isolation fix

Message ID 20240829150500.2599549-1-wangkefeng.wang@huawei.com (mailing list archive)
State New
Headers show
Series [v3,5-fix/5] mm: memory_hotplug: unify Huge/LRU/non-LRU movable folio isolation fix | expand

Commit Message

Kefeng Wang Aug. 29, 2024, 3:05 p.m. UTC
- Use folio_try_get()/folio_put() unconditionally, per David
  - print current pfn when isolate_folio_to_list fails.
  - memory hotremove test passed for free/used hugetlb folio

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/memory_hotplug.c | 20 +++++++++-----------
 1 file changed, 9 insertions(+), 11 deletions(-)

Comments

David Hildenbrand Aug. 29, 2024, 3:19 p.m. UTC | #1
On 29.08.24 17:05, Kefeng Wang wrote:
>    - Use folio_try_get()/folio_put() unconditionally, per David
>    - print current pfn when isolate_folio_to_list fails.
>    - memory hotremove test passed for free/used hugetlb folio
> 
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> ---
>   mm/memory_hotplug.c | 20 +++++++++-----------
>   1 file changed, 9 insertions(+), 11 deletions(-)
> 
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index 5f09866d17cf..621ae1015106 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -1780,7 +1780,6 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
>   
>   	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
>   		struct page *page;
> -		bool hugetlb;
>   
>   		if (!pfn_valid(pfn))
>   			continue;
> @@ -1811,22 +1810,21 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
>   			continue;
>   		}
>   
> -		hugetlb = folio_test_hugetlb(folio);
> -		if (!hugetlb) {
> -			folio = folio_get_nontail_page(page);
> -			if (!folio)
> -				continue;
> -		}
> +		if (!folio_try_get(folio))
> +			continue;
> +
> +		if (unlikely(page_folio(page) != folio))
> +			goto put_folio;
>   
>   		if (!isolate_folio_to_list(folio, &source)) {
>   			if (__ratelimit(&migrate_rs)) {
> -				pr_warn("failed to isolate pfn %lx\n", pfn);
> +				pr_warn("failed to isolate pfn %lx\n",
> +					page_to_pfn(page));
>   				dump_page(page, "isolation failed");
>   			}
>   		}
> -
> -		if (!hugetlb)
> -			folio_put(folio);
> +put_folio:
> +		folio_put(folio);
>   	}
>   	if (!list_empty(&source)) {
>   		nodemask_t nmask = node_states[N_MEMORY];

LGTM, isolate_hugetlb() would not handle free hugetlb folios either, 
because of the folio_try_get().
Kefeng Wang Aug. 30, 2024, 1:23 a.m. UTC | #2
On 2024/8/29 23:19, David Hildenbrand wrote:
> On 29.08.24 17:05, Kefeng Wang wrote:
>>    - Use folio_try_get()/folio_put() unconditionally, per David
>>    - print current pfn when isolate_folio_to_list fails.
>>    - memory hotremove test passed for free/used hugetlb folio
>>
>> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
>> ---
>>   mm/memory_hotplug.c | 20 +++++++++-----------
>>   1 file changed, 9 insertions(+), 11 deletions(-)
>>
>> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
>> index 5f09866d17cf..621ae1015106 100644
>> --- a/mm/memory_hotplug.c
>> +++ b/mm/memory_hotplug.c
>> @@ -1780,7 +1780,6 @@ static void do_migrate_range(unsigned long 
>> start_pfn, unsigned long end_pfn)
>>       for (pfn = start_pfn; pfn < end_pfn; pfn++) {
>>           struct page *page;
>> -        bool hugetlb;
>>           if (!pfn_valid(pfn))
>>               continue;
>> @@ -1811,22 +1810,21 @@ static void do_migrate_range(unsigned long 
>> start_pfn, unsigned long end_pfn)
>>               continue;
>>           }
>> -        hugetlb = folio_test_hugetlb(folio);
>> -        if (!hugetlb) {
>> -            folio = folio_get_nontail_page(page);
>> -            if (!folio)
>> -                continue;
>> -        }
>> +        if (!folio_try_get(folio))
>> +            continue;
>> +
>> +        if (unlikely(page_folio(page) != folio))
>> +            goto put_folio;
>>           if (!isolate_folio_to_list(folio, &source)) {
>>               if (__ratelimit(&migrate_rs)) {
>> -                pr_warn("failed to isolate pfn %lx\n", pfn);
>> +                pr_warn("failed to isolate pfn %lx\n",
>> +                    page_to_pfn(page));
>>                   dump_page(page, "isolation failed");
>>               }
>>           }
>> -
>> -        if (!hugetlb)
>> -            folio_put(folio);
>> +put_folio:
>> +        folio_put(folio);
>>       }
>>       if (!list_empty(&source)) {
>>           nodemask_t nmask = node_states[N_MEMORY];
> 
> LGTM, isolate_hugetlb() would not handle free hugetlb folios either, 
> because of the folio_try_get().

Exactly,thanks for your review :)
>
diff mbox series

Patch

diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 5f09866d17cf..621ae1015106 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1780,7 +1780,6 @@  static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
 
 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
 		struct page *page;
-		bool hugetlb;
 
 		if (!pfn_valid(pfn))
 			continue;
@@ -1811,22 +1810,21 @@  static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
 			continue;
 		}
 
-		hugetlb = folio_test_hugetlb(folio);
-		if (!hugetlb) {
-			folio = folio_get_nontail_page(page);
-			if (!folio)
-				continue;
-		}
+		if (!folio_try_get(folio))
+			continue;
+
+		if (unlikely(page_folio(page) != folio))
+			goto put_folio;
 
 		if (!isolate_folio_to_list(folio, &source)) {
 			if (__ratelimit(&migrate_rs)) {
-				pr_warn("failed to isolate pfn %lx\n", pfn);
+				pr_warn("failed to isolate pfn %lx\n",
+					page_to_pfn(page));
 				dump_page(page, "isolation failed");
 			}
 		}
-
-		if (!hugetlb)
-			folio_put(folio);
+put_folio:
+		folio_put(folio);
 	}
 	if (!list_empty(&source)) {
 		nodemask_t nmask = node_states[N_MEMORY];