diff mbox series

mm: add more readable thp_vma_allowable_order_foo()

Message ID 20240424140715.5838-1-wangkefeng.wang@huawei.com (mailing list archive)
State New
Headers show
Series mm: add more readable thp_vma_allowable_order_foo() | expand

Commit Message

Kefeng Wang April 24, 2024, 2:07 p.m. UTC
There are too many bool arguments in thp_vma_allowable_orders(), adding
some more readable thp_vma_allowable_order_foo(),

  thp_vma_allowable_orders_insmaps() is used in samps
  thp_vma_allowable_order[s]_inpf()  is used in page fault
  thp_vma_allowable_pmd_order_inhuge is used in khugepaged scan and madvise

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 fs/proc/task_mmu.c      |  3 +--
 include/linux/huge_mm.h | 14 ++++++++++++--
 mm/khugepaged.c         | 20 ++++++++------------
 mm/memory.c             |  8 ++++----
 4 files changed, 25 insertions(+), 20 deletions(-)

Comments

Ryan Roberts April 24, 2024, 2:05 p.m. UTC | #1
On 24/04/2024 15:07, Kefeng Wang wrote:
> There are too many bool arguments in thp_vma_allowable_orders(), adding
> some more readable thp_vma_allowable_order_foo(),
> 
>   thp_vma_allowable_orders_insmaps() is used in samps
>   thp_vma_allowable_order[s]_inpf()  is used in page fault
>   thp_vma_allowable_pmd_order_inhuge is used in khugepaged scan and madvise
> 
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>

Just one nit below. With that addressed:

Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>

> ---
>  fs/proc/task_mmu.c      |  3 +--
>  include/linux/huge_mm.h | 14 ++++++++++++--
>  mm/khugepaged.c         | 20 ++++++++------------
>  mm/memory.c             |  8 ++++----
>  4 files changed, 25 insertions(+), 20 deletions(-)
> 
> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> index f4259b7edfde..1136aa97f143 100644
> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -871,8 +871,7 @@ static int show_smap(struct seq_file *m, void *v)
>  	__show_smap(m, &mss, false);
>  
>  	seq_printf(m, "THPeligible:    %8u\n",
> -		   !!thp_vma_allowable_orders(vma, vma->vm_flags, true, false,
> -					      true, THP_ORDERS_ALL));
> +		   thp_vma_allowable_orders_insmaps(vma, vma->vm_flags));
>  
>  	if (arch_pkeys_enabled())
>  		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index 56c7ea73090b..345cf394480b 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -83,8 +83,18 @@ extern struct kobj_attribute shmem_enabled_attr;
>   */
>  #define THP_ORDERS_ALL		(THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
>  
> -#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
> -	(!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
> +#define thp_vma_allowable_orders_insmaps(vma, vm_flags) \
> +	(!!thp_vma_allowable_orders(vma, vm_flags, true, false, true, THP_ORDERS_ALL))
> +
> +#define thp_vma_allowable_orders_inpf(vma, vm_flags, orders) \
> +	(!!thp_vma_allowable_orders(vma, vm_flags, false, true, true, orders))
> +
> +#define thp_vma_allowable_order_inpf(vma, vm_flags, order) \
> +	(!!thp_vma_allowable_orders_inpf(vma, vm_flags, BIT(order)))
> +
> +#define thp_vma_allowable_pmd_order_inhuge(vma, vm_flags, enforce_sysfs) \
> +	(!!thp_vma_allowable_orders(vma, vm_flags, false, false, enforce_sysfs, BIT(PMD_ORDER)))

nit: Personally I'd leave the order as an argument rather than encoding it in
the name. It's likely that khugepaged will grow support for non-PMD-size
collapse in future. The first part of the name "thp_vma_allowable_order" is then
consistent and easy to search for all variants. And perhaps "inkhuge" is more
precise?

> +
>  
>  #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
>  #define HPAGE_PMD_SHIFT PMD_SHIFT
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 2f73d2aa9ae8..5a27dccfda02 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -453,8 +453,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
>  {
>  	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
>  	    hugepage_flags_enabled()) {
> -		if (thp_vma_allowable_order(vma, vm_flags, false, false, true,
> -					    PMD_ORDER))
> +		if (thp_vma_allowable_pmd_order_inhuge(vma, vm_flags, true))
>  			__khugepaged_enter(vma->vm_mm);
>  	}
>  }
> @@ -909,15 +908,15 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
>  
>  	if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
>  		return SCAN_ADDRESS_RANGE;
> -	if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
> -				     cc->is_khugepaged, PMD_ORDER))
> +	if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags,
> +						cc->is_khugepaged))
>  		return SCAN_VMA_CHECK;
>  	/*
>  	 * Anon VMA expected, the address may be unmapped then
>  	 * remapped to file after khugepaged reaquired the mmap_lock.
>  	 *
> -	 * thp_vma_allowable_order may return true for qualified file
> -	 * vmas.
> +	 * thp_vma_allowable_pmd_order_inhuge may return true for
> +	 * qualified file vmas.
>  	 */
>  	if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
>  		return SCAN_PAGE_ANON;
> @@ -1493,8 +1492,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
>  	 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
>  	 * analogously elide sysfs THP settings here.
>  	 */
> -	if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
> -				     PMD_ORDER))
> +	if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags, false))
>  		return SCAN_VMA_CHECK;
>  
>  	/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
> @@ -2355,8 +2353,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
>  			progress++;
>  			break;
>  		}
> -		if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
> -					     true, PMD_ORDER)) {
> +		if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags, true)) {
>  skip:
>  			progress++;
>  			continue;
> @@ -2693,8 +2690,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
>  
>  	*prev = vma;
>  
> -	if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
> -				     PMD_ORDER))
> +	if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags, false))
>  		return -EINVAL;
>  
>  	cc = kmalloc(sizeof(*cc), GFP_KERNEL);
> diff --git a/mm/memory.c b/mm/memory.c
> index 09ed76e5b8c0..8507bfda461a 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -4329,8 +4329,8 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
>  	 * for this vma. Then filter out the orders that can't be allocated over
>  	 * the faulting address and still be fully contained in the vma.
>  	 */
> -	orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true,
> -					  BIT(PMD_ORDER) - 1);
> +	orders = thp_vma_allowable_orders_inpf(vma, vma->vm_flags,
> +					       BIT(PMD_ORDER) - 1);
>  	orders = thp_vma_suitable_orders(vma, vmf->address, orders);
>  
>  	if (!orders)
> @@ -5433,7 +5433,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
>  		return VM_FAULT_OOM;
>  retry_pud:
>  	if (pud_none(*vmf.pud) &&
> -	    thp_vma_allowable_order(vma, vm_flags, false, true, true, PUD_ORDER)) {
> +	    thp_vma_allowable_order_inpf(vma, vm_flags, PUD_ORDER)) {
>  		ret = create_huge_pud(&vmf);
>  		if (!(ret & VM_FAULT_FALLBACK))
>  			return ret;
> @@ -5467,7 +5467,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
>  		goto retry_pud;
>  
>  	if (pmd_none(*vmf.pmd) &&
> -	    thp_vma_allowable_order(vma, vm_flags, false, true, true, PMD_ORDER)) {
> +	    thp_vma_allowable_order_inpf(vma, vm_flags, PMD_ORDER)) {
>  		ret = create_huge_pmd(&vmf);
>  		if (!(ret & VM_FAULT_FALLBACK))
>  			return ret;
Kefeng Wang April 24, 2024, 2:11 p.m. UTC | #2
On 2024/4/24 22:05, Ryan Roberts wrote:
> On 24/04/2024 15:07, Kefeng Wang wrote:
>> There are too many bool arguments in thp_vma_allowable_orders(), adding
>> some more readable thp_vma_allowable_order_foo(),
>>
>>    thp_vma_allowable_orders_insmaps() is used in samps
>>    thp_vma_allowable_order[s]_inpf()  is used in page fault
>>    thp_vma_allowable_pmd_order_inhuge is used in khugepaged scan and madvise
>>
>> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> 
> Just one nit below. With that addressed:
> 
> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
> 
>> ---
>>   fs/proc/task_mmu.c      |  3 +--
>>   include/linux/huge_mm.h | 14 ++++++++++++--
>>   mm/khugepaged.c         | 20 ++++++++------------
>>   mm/memory.c             |  8 ++++----
>>   4 files changed, 25 insertions(+), 20 deletions(-)
>>
>> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
>> index f4259b7edfde..1136aa97f143 100644
>> --- a/fs/proc/task_mmu.c
>> +++ b/fs/proc/task_mmu.c
>> @@ -871,8 +871,7 @@ static int show_smap(struct seq_file *m, void *v)
>>   	__show_smap(m, &mss, false);
>>   
>>   	seq_printf(m, "THPeligible:    %8u\n",
>> -		   !!thp_vma_allowable_orders(vma, vma->vm_flags, true, false,
>> -					      true, THP_ORDERS_ALL));
>> +		   thp_vma_allowable_orders_insmaps(vma, vma->vm_flags));
>>   
>>   	if (arch_pkeys_enabled())
>>   		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
>> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
>> index 56c7ea73090b..345cf394480b 100644
>> --- a/include/linux/huge_mm.h
>> +++ b/include/linux/huge_mm.h
>> @@ -83,8 +83,18 @@ extern struct kobj_attribute shmem_enabled_attr;
>>    */
>>   #define THP_ORDERS_ALL		(THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
>>   
>> -#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
>> -	(!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
>> +#define thp_vma_allowable_orders_insmaps(vma, vm_flags) \
>> +	(!!thp_vma_allowable_orders(vma, vm_flags, true, false, true, THP_ORDERS_ALL))
>> +
>> +#define thp_vma_allowable_orders_inpf(vma, vm_flags, orders) \
>> +	(!!thp_vma_allowable_orders(vma, vm_flags, false, true, true, orders))
>> +
>> +#define thp_vma_allowable_order_inpf(vma, vm_flags, order) \
>> +	(!!thp_vma_allowable_orders_inpf(vma, vm_flags, BIT(order)))
>> +
>> +#define thp_vma_allowable_pmd_order_inhuge(vma, vm_flags, enforce_sysfs) \
>> +	(!!thp_vma_allowable_orders(vma, vm_flags, false, false, enforce_sysfs, BIT(PMD_ORDER)))
> 
> nit: Personally I'd leave the order as an argument rather than encoding it in
> the name. It's likely that khugepaged will grow support for non-PMD-size
> collapse in future. The first part of the name "thp_vma_allowable_order" is then
> consistent and easy to search for all variants. And perhaps "inkhuge" is more
> precise?

Sure, thp_vma_allowable_order_inkhuge(vma, vm_flags, enforce_sysfs, order),
maybe add thp_vma_allowable_orders_inkhuge() like inpf in future.

Thanks.


> 
>> +
>>   
>>   #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
>>   #define HPAGE_PMD_SHIFT PMD_SHIFT
>> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
>> index 2f73d2aa9ae8..5a27dccfda02 100644
>> --- a/mm/khugepaged.c
>> +++ b/mm/khugepaged.c
>> @@ -453,8 +453,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
>>   {
>>   	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
>>   	    hugepage_flags_enabled()) {
>> -		if (thp_vma_allowable_order(vma, vm_flags, false, false, true,
>> -					    PMD_ORDER))
>> +		if (thp_vma_allowable_pmd_order_inhuge(vma, vm_flags, true))
>>   			__khugepaged_enter(vma->vm_mm);
>>   	}
>>   }
>> @@ -909,15 +908,15 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
>>   
>>   	if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
>>   		return SCAN_ADDRESS_RANGE;
>> -	if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
>> -				     cc->is_khugepaged, PMD_ORDER))
>> +	if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags,
>> +						cc->is_khugepaged))
>>   		return SCAN_VMA_CHECK;
>>   	/*
>>   	 * Anon VMA expected, the address may be unmapped then
>>   	 * remapped to file after khugepaged reaquired the mmap_lock.
>>   	 *
>> -	 * thp_vma_allowable_order may return true for qualified file
>> -	 * vmas.
>> +	 * thp_vma_allowable_pmd_order_inhuge may return true for
>> +	 * qualified file vmas.
>>   	 */
>>   	if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
>>   		return SCAN_PAGE_ANON;
>> @@ -1493,8 +1492,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
>>   	 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
>>   	 * analogously elide sysfs THP settings here.
>>   	 */
>> -	if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
>> -				     PMD_ORDER))
>> +	if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags, false))
>>   		return SCAN_VMA_CHECK;
>>   
>>   	/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
>> @@ -2355,8 +2353,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
>>   			progress++;
>>   			break;
>>   		}
>> -		if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
>> -					     true, PMD_ORDER)) {
>> +		if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags, true)) {
>>   skip:
>>   			progress++;
>>   			continue;
>> @@ -2693,8 +2690,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
>>   
>>   	*prev = vma;
>>   
>> -	if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
>> -				     PMD_ORDER))
>> +	if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags, false))
>>   		return -EINVAL;
>>   
>>   	cc = kmalloc(sizeof(*cc), GFP_KERNEL);
>> diff --git a/mm/memory.c b/mm/memory.c
>> index 09ed76e5b8c0..8507bfda461a 100644
>> --- a/mm/memory.c
>> +++ b/mm/memory.c
>> @@ -4329,8 +4329,8 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
>>   	 * for this vma. Then filter out the orders that can't be allocated over
>>   	 * the faulting address and still be fully contained in the vma.
>>   	 */
>> -	orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true,
>> -					  BIT(PMD_ORDER) - 1);
>> +	orders = thp_vma_allowable_orders_inpf(vma, vma->vm_flags,
>> +					       BIT(PMD_ORDER) - 1);
>>   	orders = thp_vma_suitable_orders(vma, vmf->address, orders);
>>   
>>   	if (!orders)
>> @@ -5433,7 +5433,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
>>   		return VM_FAULT_OOM;
>>   retry_pud:
>>   	if (pud_none(*vmf.pud) &&
>> -	    thp_vma_allowable_order(vma, vm_flags, false, true, true, PUD_ORDER)) {
>> +	    thp_vma_allowable_order_inpf(vma, vm_flags, PUD_ORDER)) {
>>   		ret = create_huge_pud(&vmf);
>>   		if (!(ret & VM_FAULT_FALLBACK))
>>   			return ret;
>> @@ -5467,7 +5467,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
>>   		goto retry_pud;
>>   
>>   	if (pmd_none(*vmf.pmd) &&
>> -	    thp_vma_allowable_order(vma, vm_flags, false, true, true, PMD_ORDER)) {
>> +	    thp_vma_allowable_order_inpf(vma, vm_flags, PMD_ORDER)) {
>>   		ret = create_huge_pmd(&vmf);
>>   		if (!(ret & VM_FAULT_FALLBACK))
>>   			return ret;
>
David Hildenbrand April 24, 2024, 2:57 p.m. UTC | #3
On 24.04.24 16:07, Kefeng Wang wrote:
> There are too many bool arguments in thp_vma_allowable_orders(), adding
> some more readable thp_vma_allowable_order_foo(),

Good, I had something similar in mind when talking about that with Ryan 
in the past (during mTHP development).

> 
>    thp_vma_allowable_orders_insmaps() is used in samps
>    thp_vma_allowable_order[s]_inpf()  is used in page fault
>    thp_vma_allowable_pmd_order_inhuge is used in khugepaged scan and madvise

I really don't like the "_in" stuff. And "inhuge" doesn't add any clarity.

What about

thp_vma_allowable_orders_smaps()
thp_vma_allowable_order[s]_pf()
thp_vma_allowable_pmd_order()
David Hildenbrand April 24, 2024, 2:58 p.m. UTC | #4
On 24.04.24 16:05, Ryan Roberts wrote:
> On 24/04/2024 15:07, Kefeng Wang wrote:
>> There are too many bool arguments in thp_vma_allowable_orders(), adding
>> some more readable thp_vma_allowable_order_foo(),
>>
>>    thp_vma_allowable_orders_insmaps() is used in samps
>>    thp_vma_allowable_order[s]_inpf()  is used in page fault
>>    thp_vma_allowable_pmd_order_inhuge is used in khugepaged scan and madvise
>>
>> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> 
> Just one nit below. With that addressed:
> 
> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
> 
>> ---
>>   fs/proc/task_mmu.c      |  3 +--
>>   include/linux/huge_mm.h | 14 ++++++++++++--
>>   mm/khugepaged.c         | 20 ++++++++------------
>>   mm/memory.c             |  8 ++++----
>>   4 files changed, 25 insertions(+), 20 deletions(-)
>>
>> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
>> index f4259b7edfde..1136aa97f143 100644
>> --- a/fs/proc/task_mmu.c
>> +++ b/fs/proc/task_mmu.c
>> @@ -871,8 +871,7 @@ static int show_smap(struct seq_file *m, void *v)
>>   	__show_smap(m, &mss, false);
>>   
>>   	seq_printf(m, "THPeligible:    %8u\n",
>> -		   !!thp_vma_allowable_orders(vma, vma->vm_flags, true, false,
>> -					      true, THP_ORDERS_ALL));
>> +		   thp_vma_allowable_orders_insmaps(vma, vma->vm_flags));
>>   
>>   	if (arch_pkeys_enabled())
>>   		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
>> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
>> index 56c7ea73090b..345cf394480b 100644
>> --- a/include/linux/huge_mm.h
>> +++ b/include/linux/huge_mm.h
>> @@ -83,8 +83,18 @@ extern struct kobj_attribute shmem_enabled_attr;
>>    */
>>   #define THP_ORDERS_ALL		(THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
>>   
>> -#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
>> -	(!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
>> +#define thp_vma_allowable_orders_insmaps(vma, vm_flags) \
>> +	(!!thp_vma_allowable_orders(vma, vm_flags, true, false, true, THP_ORDERS_ALL))
>> +
>> +#define thp_vma_allowable_orders_inpf(vma, vm_flags, orders) \
>> +	(!!thp_vma_allowable_orders(vma, vm_flags, false, true, true, orders))
>> +
>> +#define thp_vma_allowable_order_inpf(vma, vm_flags, order) \
>> +	(!!thp_vma_allowable_orders_inpf(vma, vm_flags, BIT(order)))
>> +
>> +#define thp_vma_allowable_pmd_order_inhuge(vma, vm_flags, enforce_sysfs) \
>> +	(!!thp_vma_allowable_orders(vma, vm_flags, false, false, enforce_sysfs, BIT(PMD_ORDER)))
> 
> nit: Personally I'd leave the order as an argument rather than encoding it in
> the name. It's likely that khugepaged will grow support for non-PMD-size

Agreed.

> collapse in future. The first part of the name "thp_vma_allowable_order" is then
> consistent and easy to search for all variants. And perhaps "inkhuge" is more
> precise?

"_khugepaged" or something else that people can actually parse and 
understand.
Kefeng Wang April 25, 2024, 1:09 a.m. UTC | #5
On 2024/4/24 22:58, David Hildenbrand wrote:
> On 24.04.24 16:05, Ryan Roberts wrote:
>> On 24/04/2024 15:07, Kefeng Wang wrote:
>>> There are too many bool arguments in thp_vma_allowable_orders(), adding
>>> some more readable thp_vma_allowable_order_foo(),
>>>
>>>    thp_vma_allowable_orders_insmaps() is used in samps
>>>    thp_vma_allowable_order[s]_inpf()  is used in page fault
>>>    thp_vma_allowable_pmd_order_inhuge is used in khugepaged scan and 
>>> madvise
>>>
>>> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
>>
>> Just one nit below. With that addressed:
>>
>> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
>>

...
>>
>> nit: Personally I'd leave the order as an argument rather than 
>> encoding it in
>> the name. It's likely that khugepaged will grow support for non-PMD-size
> 
> Agreed.
> 
>> collapse in future. The first part of the name 
>> "thp_vma_allowable_order" is then
>> consistent and easy to search for all variants. And perhaps "inkhuge" 
>> is more
>> precise?
> 
> "_khugepaged" or something else that people can actually parse and 
> understand.
> 

Try this before, _inkhugepaged is a bit long, so choose inkhuge, but I 
can't find a better name, as you and Ryan suggested,

thp_vma_allowable_orders_smaps()
thp_vma_allowable_order[s]_pf()
thp_vma_allowable_order_khugepaged()

Thanks.
diff mbox series

Patch

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f4259b7edfde..1136aa97f143 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -871,8 +871,7 @@  static int show_smap(struct seq_file *m, void *v)
 	__show_smap(m, &mss, false);
 
 	seq_printf(m, "THPeligible:    %8u\n",
-		   !!thp_vma_allowable_orders(vma, vma->vm_flags, true, false,
-					      true, THP_ORDERS_ALL));
+		   thp_vma_allowable_orders_insmaps(vma, vma->vm_flags));
 
 	if (arch_pkeys_enabled())
 		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 56c7ea73090b..345cf394480b 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -83,8 +83,18 @@  extern struct kobj_attribute shmem_enabled_attr;
  */
 #define THP_ORDERS_ALL		(THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
 
-#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
-	(!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
+#define thp_vma_allowable_orders_insmaps(vma, vm_flags) \
+	(!!thp_vma_allowable_orders(vma, vm_flags, true, false, true, THP_ORDERS_ALL))
+
+#define thp_vma_allowable_orders_inpf(vma, vm_flags, orders) \
+	(!!thp_vma_allowable_orders(vma, vm_flags, false, true, true, orders))
+
+#define thp_vma_allowable_order_inpf(vma, vm_flags, order) \
+	(!!thp_vma_allowable_orders_inpf(vma, vm_flags, BIT(order)))
+
+#define thp_vma_allowable_pmd_order_inhuge(vma, vm_flags, enforce_sysfs) \
+	(!!thp_vma_allowable_orders(vma, vm_flags, false, false, enforce_sysfs, BIT(PMD_ORDER)))
+
 
 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
 #define HPAGE_PMD_SHIFT PMD_SHIFT
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 2f73d2aa9ae8..5a27dccfda02 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -453,8 +453,7 @@  void khugepaged_enter_vma(struct vm_area_struct *vma,
 {
 	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
 	    hugepage_flags_enabled()) {
-		if (thp_vma_allowable_order(vma, vm_flags, false, false, true,
-					    PMD_ORDER))
+		if (thp_vma_allowable_pmd_order_inhuge(vma, vm_flags, true))
 			__khugepaged_enter(vma->vm_mm);
 	}
 }
@@ -909,15 +908,15 @@  static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
 
 	if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
 		return SCAN_ADDRESS_RANGE;
-	if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
-				     cc->is_khugepaged, PMD_ORDER))
+	if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags,
+						cc->is_khugepaged))
 		return SCAN_VMA_CHECK;
 	/*
 	 * Anon VMA expected, the address may be unmapped then
 	 * remapped to file after khugepaged reaquired the mmap_lock.
 	 *
-	 * thp_vma_allowable_order may return true for qualified file
-	 * vmas.
+	 * thp_vma_allowable_pmd_order_inhuge may return true for
+	 * qualified file vmas.
 	 */
 	if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
 		return SCAN_PAGE_ANON;
@@ -1493,8 +1492,7 @@  int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
 	 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
 	 * analogously elide sysfs THP settings here.
 	 */
-	if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
-				     PMD_ORDER))
+	if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags, false))
 		return SCAN_VMA_CHECK;
 
 	/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
@@ -2355,8 +2353,7 @@  static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
 			progress++;
 			break;
 		}
-		if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
-					     true, PMD_ORDER)) {
+		if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags, true)) {
 skip:
 			progress++;
 			continue;
@@ -2693,8 +2690,7 @@  int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
 
 	*prev = vma;
 
-	if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
-				     PMD_ORDER))
+	if (!thp_vma_allowable_pmd_order_inhuge(vma, vma->vm_flags, false))
 		return -EINVAL;
 
 	cc = kmalloc(sizeof(*cc), GFP_KERNEL);
diff --git a/mm/memory.c b/mm/memory.c
index 09ed76e5b8c0..8507bfda461a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4329,8 +4329,8 @@  static struct folio *alloc_anon_folio(struct vm_fault *vmf)
 	 * for this vma. Then filter out the orders that can't be allocated over
 	 * the faulting address and still be fully contained in the vma.
 	 */
-	orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true,
-					  BIT(PMD_ORDER) - 1);
+	orders = thp_vma_allowable_orders_inpf(vma, vma->vm_flags,
+					       BIT(PMD_ORDER) - 1);
 	orders = thp_vma_suitable_orders(vma, vmf->address, orders);
 
 	if (!orders)
@@ -5433,7 +5433,7 @@  static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
 		return VM_FAULT_OOM;
 retry_pud:
 	if (pud_none(*vmf.pud) &&
-	    thp_vma_allowable_order(vma, vm_flags, false, true, true, PUD_ORDER)) {
+	    thp_vma_allowable_order_inpf(vma, vm_flags, PUD_ORDER)) {
 		ret = create_huge_pud(&vmf);
 		if (!(ret & VM_FAULT_FALLBACK))
 			return ret;
@@ -5467,7 +5467,7 @@  static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
 		goto retry_pud;
 
 	if (pmd_none(*vmf.pmd) &&
-	    thp_vma_allowable_order(vma, vm_flags, false, true, true, PMD_ORDER)) {
+	    thp_vma_allowable_order_inpf(vma, vm_flags, PMD_ORDER)) {
 		ret = create_huge_pmd(&vmf);
 		if (!(ret & VM_FAULT_FALLBACK))
 			return ret;