diff mbox series

[3/3] mm: shmem: remove __shmem_huge_global_enabled()

Message ID 20241010061024.1846220-4-wangkefeng.wang@huawei.com (mailing list archive)
State New
Headers show
Series mm: cleanup thp and shmem allowable order check | expand

Commit Message

Kefeng Wang Oct. 10, 2024, 6:10 a.m. UTC
Remove __shmem_huge_global_enabled() since only one caller,
and remove repeated check of VM_NOHUGEPAGE/MMF_DISABLE_THP
as they are checked in shmem_allowable_huge_orders(), also
remove unnecessary vma parameter.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 mm/shmem.c | 33 ++++++++++-----------------------
 1 file changed, 10 insertions(+), 23 deletions(-)

Comments

Baolin Wang Oct. 12, 2024, 3:38 a.m. UTC | #1
On 2024/10/10 14:10, Kefeng Wang wrote:
> Remove __shmem_huge_global_enabled() since only one caller,
> and remove repeated check of VM_NOHUGEPAGE/MMF_DISABLE_THP
> as they are checked in shmem_allowable_huge_orders(), also
> remove unnecessary vma parameter.
> 
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>

LGTM. Thanks.
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>

> ---
>   mm/shmem.c | 33 ++++++++++-----------------------
>   1 file changed, 10 insertions(+), 23 deletions(-)
> 
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 34a31e7e527c..36ac51d55867 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -548,17 +548,15 @@ static bool shmem_confirm_swap(struct address_space *mapping,
>   
>   static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
>   
> -static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
> -					loff_t write_end, bool shmem_huge_force,
> -					struct vm_area_struct *vma,
> -					unsigned long vm_flags)
> +static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
> +				      loff_t write_end, bool shmem_huge_force,
> +				      unsigned long vm_flags)
>   {
> -	struct mm_struct *mm = vma ? vma->vm_mm : NULL;
>   	loff_t i_size;
>   
> -	if (!S_ISREG(inode->i_mode))
> +	if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
>   		return false;
> -	if (mm && ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &mm->flags)))
> +	if (!S_ISREG(inode->i_mode))
>   		return false;
>   	if (shmem_huge == SHMEM_HUGE_DENY)
>   		return false;
> @@ -576,7 +574,7 @@ static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
>   			return true;
>   		fallthrough;
>   	case SHMEM_HUGE_ADVISE:
> -		if (mm && (vm_flags & VM_HUGEPAGE))
> +		if (vm_flags & VM_HUGEPAGE)
>   			return true;
>   		fallthrough;
>   	default:
> @@ -584,17 +582,6 @@ static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
>   	}
>   }
>   
> -static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
> -		   loff_t write_end, bool shmem_huge_force,
> -		   struct vm_area_struct *vma, unsigned long vm_flags)
> -{
> -	if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
> -		return false;
> -
> -	return __shmem_huge_global_enabled(inode, index, write_end,
> -					   shmem_huge_force, vma, vm_flags);
> -}
> -
>   #if defined(CONFIG_SYSFS)
>   static int shmem_parse_huge(const char *str)
>   {
> @@ -772,8 +759,8 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
>   }
>   
>   static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
> -		loff_t write_end, bool shmem_huge_force,
> -		struct vm_area_struct *vma, unsigned long vm_flags)
> +				      loff_t write_end, bool shmem_huge_force,
> +				      unsigned long vm_flags)
>   {
>   	return false;
>   }
> @@ -1170,7 +1157,7 @@ static int shmem_getattr(struct mnt_idmap *idmap,
>   	generic_fillattr(idmap, request_mask, inode, stat);
>   	inode_unlock_shared(inode);
>   
> -	if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0))
> +	if (shmem_huge_global_enabled(inode, 0, 0, false, 0))
>   		stat->blksize = HPAGE_PMD_SIZE;
>   
>   	if (request_mask & STATX_BTIME) {
> @@ -1687,7 +1674,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
>   		return 0;
>   
>   	global_huge = shmem_huge_global_enabled(inode, index, write_end,
> -					shmem_huge_force, vma, vm_flags);
> +						shmem_huge_force, vm_flags);
>   	if (!vma || !vma_is_anon_shmem(vma)) {
>   		/*
>   		 * For tmpfs, we now only support PMD sized THP if huge page
diff mbox series

Patch

diff --git a/mm/shmem.c b/mm/shmem.c
index 34a31e7e527c..36ac51d55867 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -548,17 +548,15 @@  static bool shmem_confirm_swap(struct address_space *mapping,
 
 static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
 
-static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
-					loff_t write_end, bool shmem_huge_force,
-					struct vm_area_struct *vma,
-					unsigned long vm_flags)
+static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+				      loff_t write_end, bool shmem_huge_force,
+				      unsigned long vm_flags)
 {
-	struct mm_struct *mm = vma ? vma->vm_mm : NULL;
 	loff_t i_size;
 
-	if (!S_ISREG(inode->i_mode))
+	if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
 		return false;
-	if (mm && ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &mm->flags)))
+	if (!S_ISREG(inode->i_mode))
 		return false;
 	if (shmem_huge == SHMEM_HUGE_DENY)
 		return false;
@@ -576,7 +574,7 @@  static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
 			return true;
 		fallthrough;
 	case SHMEM_HUGE_ADVISE:
-		if (mm && (vm_flags & VM_HUGEPAGE))
+		if (vm_flags & VM_HUGEPAGE)
 			return true;
 		fallthrough;
 	default:
@@ -584,17 +582,6 @@  static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
 	}
 }
 
-static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
-		   loff_t write_end, bool shmem_huge_force,
-		   struct vm_area_struct *vma, unsigned long vm_flags)
-{
-	if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
-		return false;
-
-	return __shmem_huge_global_enabled(inode, index, write_end,
-					   shmem_huge_force, vma, vm_flags);
-}
-
 #if defined(CONFIG_SYSFS)
 static int shmem_parse_huge(const char *str)
 {
@@ -772,8 +759,8 @@  static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
 }
 
 static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
-		loff_t write_end, bool shmem_huge_force,
-		struct vm_area_struct *vma, unsigned long vm_flags)
+				      loff_t write_end, bool shmem_huge_force,
+				      unsigned long vm_flags)
 {
 	return false;
 }
@@ -1170,7 +1157,7 @@  static int shmem_getattr(struct mnt_idmap *idmap,
 	generic_fillattr(idmap, request_mask, inode, stat);
 	inode_unlock_shared(inode);
 
-	if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0))
+	if (shmem_huge_global_enabled(inode, 0, 0, false, 0))
 		stat->blksize = HPAGE_PMD_SIZE;
 
 	if (request_mask & STATX_BTIME) {
@@ -1687,7 +1674,7 @@  unsigned long shmem_allowable_huge_orders(struct inode *inode,
 		return 0;
 
 	global_huge = shmem_huge_global_enabled(inode, index, write_end,
-					shmem_huge_force, vma, vm_flags);
+						shmem_huge_force, vm_flags);
 	if (!vma || !vma_is_anon_shmem(vma)) {
 		/*
 		 * For tmpfs, we now only support PMD sized THP if huge page