diff mbox series

[v3,2/8] mm: khugepaged: remove redundant check for VM_NO_KHUGEPAGED

Message ID 20220404200250.321455-3-shy828301@gmail.com (mailing list archive)
State New
Headers show
Series Make khugepaged collapse readonly FS THP more consistent | expand

Commit Message

Yang Shi April 4, 2022, 8:02 p.m. UTC
The hugepage_vma_check() called by khugepaged_enter_vma_merge() does
check VM_NO_KHUGEPAGED. Remove the check from caller and move the check
in hugepage_vma_check() up.

More checks may be run for VM_NO_KHUGEPAGED vmas, but MADV_HUGEPAGE is
definitely not a hot path, so cleaner code does outweigh.

Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
Acked-by: Song Liu <song@kernel.org>
Signed-off-by: Yang Shi <shy828301@gmail.com>
---
 mm/khugepaged.c | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

Comments

Vlastimil Babka May 9, 2022, 12:45 p.m. UTC | #1
On 4/4/22 22:02, Yang Shi wrote:
> The hugepage_vma_check() called by khugepaged_enter_vma_merge() does
> check VM_NO_KHUGEPAGED. Remove the check from caller and move the check
> in hugepage_vma_check() up.
> 
> More checks may be run for VM_NO_KHUGEPAGED vmas, but MADV_HUGEPAGE is
> definitely not a hot path, so cleaner code does outweigh.
> 
> Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
> Acked-by: Song Liu <song@kernel.org>
> Signed-off-by: Yang Shi <shy828301@gmail.com>

Acked-by: Vlastimil Babka <vbabka@suse.cz>

> ---
>  mm/khugepaged.c | 9 ++++++---
>  1 file changed, 6 insertions(+), 3 deletions(-)
> 
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index a4e5eaf3eb01..7d197d9e3258 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -365,8 +365,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
>  		 * register it here without waiting a page fault that
>  		 * may not happen any time soon.
>  		 */
> -		if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
> -				khugepaged_enter_vma_merge(vma, *vm_flags))
> +		if (khugepaged_enter_vma_merge(vma, *vm_flags))
>  			return -ENOMEM;
>  		break;
>  	case MADV_NOHUGEPAGE:
> @@ -445,6 +444,9 @@ static bool hugepage_vma_check(struct vm_area_struct *vma,
>  	if (!transhuge_vma_enabled(vma, vm_flags))
>  		return false;
>  
> +	if (vm_flags & VM_NO_KHUGEPAGED)
> +		return false;
> +
>  	if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
>  				vma->vm_pgoff, HPAGE_PMD_NR))
>  		return false;
> @@ -470,7 +472,8 @@ static bool hugepage_vma_check(struct vm_area_struct *vma,
>  		return false;
>  	if (vma_is_temporary_stack(vma))
>  		return false;
> -	return !(vm_flags & VM_NO_KHUGEPAGED);
> +
> +	return true;
>  }
>  
>  int __khugepaged_enter(struct mm_struct *mm)
diff mbox series

Patch

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index a4e5eaf3eb01..7d197d9e3258 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -365,8 +365,7 @@  int hugepage_madvise(struct vm_area_struct *vma,
 		 * register it here without waiting a page fault that
 		 * may not happen any time soon.
 		 */
-		if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
-				khugepaged_enter_vma_merge(vma, *vm_flags))
+		if (khugepaged_enter_vma_merge(vma, *vm_flags))
 			return -ENOMEM;
 		break;
 	case MADV_NOHUGEPAGE:
@@ -445,6 +444,9 @@  static bool hugepage_vma_check(struct vm_area_struct *vma,
 	if (!transhuge_vma_enabled(vma, vm_flags))
 		return false;
 
+	if (vm_flags & VM_NO_KHUGEPAGED)
+		return false;
+
 	if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
 				vma->vm_pgoff, HPAGE_PMD_NR))
 		return false;
@@ -470,7 +472,8 @@  static bool hugepage_vma_check(struct vm_area_struct *vma,
 		return false;
 	if (vma_is_temporary_stack(vma))
 		return false;
-	return !(vm_flags & VM_NO_KHUGEPAGED);
+
+	return true;
 }
 
 int __khugepaged_enter(struct mm_struct *mm)