diff mbox series

[5/6] mm: memory: add vm_normal_pmd_folio()

Message ID 20230918103213.4166210-6-wangkefeng.wang@huawei.com (mailing list archive)
State New
Headers show
Series mm: convert numa balancing functions to use a folio | expand

Commit Message

Kefeng Wang Sept. 18, 2023, 10:32 a.m. UTC
The new vm_normal_pmd_folio() wrapper is similar to vm_normal_folio(),
which allow them to completely replace the struct page variables with
struct folio variables.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 include/linux/mm.h |  2 ++
 mm/memory.c        | 10 ++++++++++
 2 files changed, 12 insertions(+)

Comments

Huang, Ying Sept. 20, 2023, 3:12 a.m. UTC | #1
Kefeng Wang <wangkefeng.wang@huawei.com> writes:

> The new vm_normal_pmd_folio() wrapper is similar to vm_normal_folio(),
> which allow them to completely replace the struct page variables with
> struct folio variables.
>
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> ---
>  include/linux/mm.h |  2 ++
>  mm/memory.c        | 10 ++++++++++
>  2 files changed, 12 insertions(+)
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 12335de50140..7d05ec047186 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -2327,6 +2327,8 @@ struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
>  			     pte_t pte);
>  struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
>  			     pte_t pte);
> +struct folio *vm_normal_pmd_folio(struct vm_area_struct *vma, unsigned long addr,
> +				  pmd_t pmd);
>  struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
>  				pmd_t pmd);

Why do not follow the counterpart of page (vm_normal_page_pmd()) to be
vm_normal_folio_pmd()?

--
Best Regards,
Huang, Ying

> diff --git a/mm/memory.c b/mm/memory.c
> index ce3efe7255d2..d4296ee72730 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -689,6 +689,16 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
>  out:
>  	return pfn_to_page(pfn);
>  }
> +
> +struct folio *vm_normal_pmd_folio(struct vm_area_struct *vma, unsigned long addr,
> +				  pmd_t pmd)
> +{
> +	struct page *page = vm_normal_page_pmd(vma, addr, pmd);
> +
> +	if (page)
> +		return page_folio(page);
> +	return NULL;
> +}
>  #endif
>  
>  static void restore_exclusive_pte(struct vm_area_struct *vma,
Kefeng Wang Sept. 20, 2023, 8:07 a.m. UTC | #2
On 2023/9/20 11:12, Huang, Ying wrote:
> Kefeng Wang <wangkefeng.wang@huawei.com> writes:
> 
>> The new vm_normal_pmd_folio() wrapper is similar to vm_normal_folio(),
>> which allow them to completely replace the struct page variables with
>> struct folio variables.
>>
>> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
>> ---
>>   include/linux/mm.h |  2 ++
>>   mm/memory.c        | 10 ++++++++++
>>   2 files changed, 12 insertions(+)
>>
>> diff --git a/include/linux/mm.h b/include/linux/mm.h
>> index 12335de50140..7d05ec047186 100644
>> --- a/include/linux/mm.h
>> +++ b/include/linux/mm.h
>> @@ -2327,6 +2327,8 @@ struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
>>   			     pte_t pte);
>>   struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
>>   			     pte_t pte);
>> +struct folio *vm_normal_pmd_folio(struct vm_area_struct *vma, unsigned long addr,
>> +				  pmd_t pmd);
>>   struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
>>   				pmd_t pmd);
> 
> Why do not follow the counterpart of page (vm_normal_page_pmd()) to be
> vm_normal_folio_pmd()?

Personally, X_pmd_folio seems to get folio from a pmd, but X_folio_pmd 
looks like "return the PMD of a folio", I could use 
vm_normal_folio_pmd() for consistency, thanks.
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 12335de50140..7d05ec047186 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2327,6 +2327,8 @@  struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
 			     pte_t pte);
 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
 			     pte_t pte);
+struct folio *vm_normal_pmd_folio(struct vm_area_struct *vma, unsigned long addr,
+				  pmd_t pmd);
 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
 				pmd_t pmd);
 
diff --git a/mm/memory.c b/mm/memory.c
index ce3efe7255d2..d4296ee72730 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -689,6 +689,16 @@  struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
 out:
 	return pfn_to_page(pfn);
 }
+
+struct folio *vm_normal_pmd_folio(struct vm_area_struct *vma, unsigned long addr,
+				  pmd_t pmd)
+{
+	struct page *page = vm_normal_page_pmd(vma, addr, pmd);
+
+	if (page)
+		return page_folio(page);
+	return NULL;
+}
 #endif
 
 static void restore_exclusive_pte(struct vm_area_struct *vma,