diff mbox series

[-next,v2,4/7] mm: damon: paddr: convert damon_pa_*() to use folios

Message ID 20221227122714.161224-5-wangkefeng.wang@huawei.com (mailing list archive)
State New
Headers show
Series mm: convert page_idle/damon to use folios | expand

Commit Message

Kefeng Wang Dec. 27, 2022, 12:27 p.m. UTC
With damon_get_folio(), let's convert all the damon_pa_*() to use folios.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/damon/paddr.c | 59 +++++++++++++++++++++---------------------------
 1 file changed, 26 insertions(+), 33 deletions(-)

Comments

SeongJae Park Dec. 27, 2022, 7:50 p.m. UTC | #1
Hi Kefeng,

> With damon_get_folio(), let's convert all the damon_pa_*() to use folios.
> 
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> ---
>  mm/damon/paddr.c | 59 +++++++++++++++++++++---------------------------
>  1 file changed, 26 insertions(+), 33 deletions(-)
> 
> diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
> index 6334c99e5152..728a96c929fc 100644
> --- a/mm/damon/paddr.c
> +++ b/mm/damon/paddr.c
> @@ -33,17 +33,15 @@ static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
>  
>  static void damon_pa_mkold(unsigned long paddr)
>  {
> -	struct folio *folio;
> -	struct page *page = damon_get_page(PHYS_PFN(paddr));
> +	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
>  	struct rmap_walk_control rwc = {
>  		.rmap_one = __damon_pa_mkold,
>  		.anon_lock = folio_lock_anon_vma_read,
>  	};
>  	bool need_lock;
>  
> -	if (!page)
> +	if (!folio)
>  		return;
> -	folio = page_folio(page);
>  
>  	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
>  		folio_set_idle(folio);
> @@ -58,7 +56,6 @@ static void damon_pa_mkold(unsigned long paddr)
>  
>  	if (need_lock)
>  		folio_unlock(folio);
> -

Seems unnecessary change?

>  out:
>  	folio_put(folio);
>  }
> @@ -122,8 +119,7 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
>  
>  static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
>  {
> -	struct folio *folio;
> -	struct page *page = damon_get_page(PHYS_PFN(paddr));
> +	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
>  	struct damon_pa_access_chk_result result = {
>  		.page_sz = PAGE_SIZE,
>  		.accessed = false,
> @@ -135,9 +131,8 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
>  	};
>  	bool need_lock;
>  
> -	if (!page)
> +	if (!folio)
>  		return false;
> -	folio = page_folio(page);
>  
>  	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
>  		if (folio_test_idle(folio))
> @@ -203,18 +198,18 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
>  }
>  
>  static bool __damos_pa_filter_out(struct damos_filter *filter,
> -		struct page *page)
> +		struct folio *folio)
>  {
>  	bool matched = false;
>  	struct mem_cgroup *memcg;
>  
>  	switch (filter->type) {
>  	case DAMOS_FILTER_TYPE_ANON:
> -		matched = PageAnon(page);
> +		matched = folio_test_anon(folio);
>  		break;
>  	case DAMOS_FILTER_TYPE_MEMCG:
>  		rcu_read_lock();
> -		memcg = page_memcg_check(page);
> +		memcg = page_memcg_check(folio_page(folio, 0));
>  		if (!memcg)
>  			matched = false;
>  		else
> @@ -231,12 +226,12 @@ static bool __damos_pa_filter_out(struct damos_filter *filter,
>  /*
>   * damos_pa_filter_out - Return true if the page should be filtered out.
>   */
> -static bool damos_pa_filter_out(struct damos *scheme, struct page *page)
> +static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
>  {
>  	struct damos_filter *filter;
>  
>  	damos_for_each_filter(filter, scheme) {
> -		if (__damos_pa_filter_out(filter, page))
> +		if (__damos_pa_filter_out(filter, folio))
>  			return true;
>  	}
>  	return false;
> @@ -245,33 +240,33 @@ static bool damos_pa_filter_out(struct damos *scheme, struct page *page)
>  static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
>  {
>  	unsigned long addr, applied;
> -	LIST_HEAD(page_list);
> +	LIST_HEAD(folio_list);
>  
>  	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
> -		struct page *page = damon_get_page(PHYS_PFN(addr));
> +		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
>  
> -		if (!page)
> +		if (!folio)
>  			continue;
>  
> -		if (damos_pa_filter_out(s, page)) {
> -			put_page(page);
> +		if (damos_pa_filter_out(s, folio)) {
> +			folio_put(folio);
>  			continue;
>  		}
>  
> -		ClearPageReferenced(page);
> -		test_and_clear_page_young(page);
> -		if (isolate_lru_page(page)) {
> -			put_page(page);
> +		folio_clear_referenced(folio);
> +		folio_test_clear_young(folio);
> +		if (folio_isolate_lru(folio)) {
> +			folio_put(folio);
>  			continue;
>  		}
> -		if (PageUnevictable(page)) {
> -			putback_lru_page(page);
> +		if (folio_test_unevictable(folio)) {
> +			folio_putback_lru(folio);
>  		} else {
> -			list_add(&page->lru, &page_list);
> -			put_page(page);
> +			list_add(&folio->lru, &folio_list);
> +			folio_put(folio);
>  		}
>  	}
> -	applied = reclaim_pages(&page_list);
> +	applied = reclaim_pages(&folio_list);
>  	cond_resched();
>  	return applied * PAGE_SIZE;
>  }
> @@ -282,14 +277,12 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
>  	unsigned long addr, applied = 0;
>  
>  	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
> -		struct page *page = damon_get_page(PHYS_PFN(addr));
> -		struct folio *folio;
> +		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
>  
> -		if (!page)
> +		if (!folio)
>  			continue;
> -		folio = page_folio(page);
>  
> -		if (damos_pa_filter_out(s, &folio->page)) {
> +		if (damos_pa_filter_out(s, folio)) {
>  			folio_put(folio);
>  			continue;
>  		}
> -- 
> 2.35.3


Thanks,
SJ
Kefeng Wang Dec. 28, 2022, 1:26 a.m. UTC | #2
On 2022/12/28 3:50, SeongJae Park wrote:
> Hi Kefeng,
> 
>> With damon_get_folio(), let's convert all the damon_pa_*() to use folios.
>>
>> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
>> ---
>>   mm/damon/paddr.c | 59 +++++++++++++++++++++---------------------------
>>   1 file changed, 26 insertions(+), 33 deletions(-)
>>
>> diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
>> index 6334c99e5152..728a96c929fc 100644
>> --- a/mm/damon/paddr.c
>> +++ b/mm/damon/paddr.c
>> @@ -33,17 +33,15 @@ static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
>>   
>>   static void damon_pa_mkold(unsigned long paddr)
>>   {
>> -	struct folio *folio;
>> -	struct page *page = damon_get_page(PHYS_PFN(paddr));
>> +	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
>>   	struct rmap_walk_control rwc = {
>>   		.rmap_one = __damon_pa_mkold,
>>   		.anon_lock = folio_lock_anon_vma_read,
>>   	};
>>   	bool need_lock;
>>   
>> -	if (!page)
>> +	if (!folio)
>>   		return;
>> -	folio = page_folio(page);
>>   
>>   	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
>>   		folio_set_idle(folio);
>> @@ -58,7 +56,6 @@ static void damon_pa_mkold(unsigned long paddr)
>>   
>>   	if (need_lock)
>>   		folio_unlock(folio);
>> -
> 
> Seems unnecessary change?

oh, will drop this change, thanks

> 
> Thanks,
> SJ
diff mbox series

Patch

diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index 6334c99e5152..728a96c929fc 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -33,17 +33,15 @@  static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
 
 static void damon_pa_mkold(unsigned long paddr)
 {
-	struct folio *folio;
-	struct page *page = damon_get_page(PHYS_PFN(paddr));
+	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
 	struct rmap_walk_control rwc = {
 		.rmap_one = __damon_pa_mkold,
 		.anon_lock = folio_lock_anon_vma_read,
 	};
 	bool need_lock;
 
-	if (!page)
+	if (!folio)
 		return;
-	folio = page_folio(page);
 
 	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
 		folio_set_idle(folio);
@@ -58,7 +56,6 @@  static void damon_pa_mkold(unsigned long paddr)
 
 	if (need_lock)
 		folio_unlock(folio);
-
 out:
 	folio_put(folio);
 }
@@ -122,8 +119,7 @@  static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
 
 static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
 {
-	struct folio *folio;
-	struct page *page = damon_get_page(PHYS_PFN(paddr));
+	struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
 	struct damon_pa_access_chk_result result = {
 		.page_sz = PAGE_SIZE,
 		.accessed = false,
@@ -135,9 +131,8 @@  static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
 	};
 	bool need_lock;
 
-	if (!page)
+	if (!folio)
 		return false;
-	folio = page_folio(page);
 
 	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
 		if (folio_test_idle(folio))
@@ -203,18 +198,18 @@  static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
 }
 
 static bool __damos_pa_filter_out(struct damos_filter *filter,
-		struct page *page)
+		struct folio *folio)
 {
 	bool matched = false;
 	struct mem_cgroup *memcg;
 
 	switch (filter->type) {
 	case DAMOS_FILTER_TYPE_ANON:
-		matched = PageAnon(page);
+		matched = folio_test_anon(folio);
 		break;
 	case DAMOS_FILTER_TYPE_MEMCG:
 		rcu_read_lock();
-		memcg = page_memcg_check(page);
+		memcg = page_memcg_check(folio_page(folio, 0));
 		if (!memcg)
 			matched = false;
 		else
@@ -231,12 +226,12 @@  static bool __damos_pa_filter_out(struct damos_filter *filter,
 /*
  * damos_pa_filter_out - Return true if the page should be filtered out.
  */
-static bool damos_pa_filter_out(struct damos *scheme, struct page *page)
+static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
 {
 	struct damos_filter *filter;
 
 	damos_for_each_filter(filter, scheme) {
-		if (__damos_pa_filter_out(filter, page))
+		if (__damos_pa_filter_out(filter, folio))
 			return true;
 	}
 	return false;
@@ -245,33 +240,33 @@  static bool damos_pa_filter_out(struct damos *scheme, struct page *page)
 static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
 {
 	unsigned long addr, applied;
-	LIST_HEAD(page_list);
+	LIST_HEAD(folio_list);
 
 	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
-		struct page *page = damon_get_page(PHYS_PFN(addr));
+		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
 
-		if (!page)
+		if (!folio)
 			continue;
 
-		if (damos_pa_filter_out(s, page)) {
-			put_page(page);
+		if (damos_pa_filter_out(s, folio)) {
+			folio_put(folio);
 			continue;
 		}
 
-		ClearPageReferenced(page);
-		test_and_clear_page_young(page);
-		if (isolate_lru_page(page)) {
-			put_page(page);
+		folio_clear_referenced(folio);
+		folio_test_clear_young(folio);
+		if (folio_isolate_lru(folio)) {
+			folio_put(folio);
 			continue;
 		}
-		if (PageUnevictable(page)) {
-			putback_lru_page(page);
+		if (folio_test_unevictable(folio)) {
+			folio_putback_lru(folio);
 		} else {
-			list_add(&page->lru, &page_list);
-			put_page(page);
+			list_add(&folio->lru, &folio_list);
+			folio_put(folio);
 		}
 	}
-	applied = reclaim_pages(&page_list);
+	applied = reclaim_pages(&folio_list);
 	cond_resched();
 	return applied * PAGE_SIZE;
 }
@@ -282,14 +277,12 @@  static inline unsigned long damon_pa_mark_accessed_or_deactivate(
 	unsigned long addr, applied = 0;
 
 	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
-		struct page *page = damon_get_page(PHYS_PFN(addr));
-		struct folio *folio;
+		struct folio *folio = damon_get_folio(PHYS_PFN(addr));
 
-		if (!page)
+		if (!folio)
 			continue;
-		folio = page_folio(page);
 
-		if (damos_pa_filter_out(s, &folio->page)) {
+		if (damos_pa_filter_out(s, folio)) {
 			folio_put(folio);
 			continue;
 		}