diff mbox series

[v3] mm/mempolicy,hugetlb: Checking hstate for hugetlbfs page in vma_migratable

Message ID 1579095541-32731-1-git-send-email-lixinhai.lxh@gmail.com (mailing list archive)
State New, archived
Headers show
Series [v3] mm/mempolicy,hugetlb: Checking hstate for hugetlbfs page in vma_migratable | expand

Commit Message

Li Xinhai Jan. 15, 2020, 1:39 p.m. UTC
Checking hstate at early phase when isolating page, instead of during
unmap and move phase, to avoid useless isolation.

Signed-off-by: Li Xinhai <lixinhai.lxh@gmail.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
---
 include/linux/hugetlb_inline.h | 7 +++++++
 include/linux/mempolicy.h      | 5 ++---
 mm/hugetlb.c                   | 9 +++++++++
 3 files changed, 18 insertions(+), 3 deletions(-)

Comments

Mike Kravetz Jan. 16, 2020, 12:09 a.m. UTC | #1
On 1/15/20 5:39 AM, Li Xinhai wrote:
> Checking hstate at early phase when isolating page, instead of during
> unmap and move phase, to avoid useless isolation.
> 
> Signed-off-by: Li Xinhai <lixinhai.lxh@gmail.com>
> Cc: Michal Hocko <mhocko@suse.com>
> Cc: Mike Kravetz <mike.kravetz@oracle.com>
> ---
>  include/linux/hugetlb_inline.h | 7 +++++++
>  include/linux/mempolicy.h      | 5 ++---
>  mm/hugetlb.c                   | 9 +++++++++
>  3 files changed, 18 insertions(+), 3 deletions(-)
> 
> diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h
> index 0660a03..fc07139 100644
> --- a/include/linux/hugetlb_inline.h
> +++ b/include/linux/hugetlb_inline.h
> @@ -11,6 +11,8 @@ static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
>  	return !!(vma->vm_flags & VM_HUGETLB);
>  }
>  
> +extern bool vm_hugepage_migration_supported(struct vm_area_struct *vma);
> +
>  #else
>  
>  static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
> @@ -18,6 +20,11 @@ static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
>  	return false;
>  }
>  
> +static inline bool vm_hugepage_migration_supported(struct vm_area_struct *vma)
> +{
> +	return false;
> +}
> +
>  #endif
>  
>  #endif
> diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
> index 5228c62..e9ed49e 100644
> --- a/include/linux/mempolicy.h
> +++ b/include/linux/mempolicy.h
> @@ -185,10 +185,9 @@ static inline bool vma_migratable(struct vm_area_struct *vma)
>  	if (vma_is_dax(vma))
>  		return false;
>  
> -#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
> -	if (vma->vm_flags & VM_HUGETLB)
> +	if (is_vm_hugetlb_page(vma) &&
> +		!vm_hugepage_migration_supported(vma))
>  		return false;
> -#endif
>  
>  	/*
>  	 * Migration allocates pages in the highest zone. If we cannot

There are not many ways to work around the hugepage_migration_supported
dependency issue.  I suppose you could have moved vma_migratable() into
mm/mempolicy.c instead of being an inline in the header file.  However,
it is probably best to do what you have done and hide the ugly stuff in
the hugetlbfs files.

> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index dd8737a..8ce4a98 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1316,6 +1316,15 @@ int PageHeadHuge(struct page *page_head)
>  	return get_compound_page_dtor(page_head) == free_huge_page;
>  }
>  
> +bool vm_hugepage_migration_supported(struct vm_area_struct *vma)
> +{
> +	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);

This VM_BUG_ON_VMA() seems unnecessary.  The only caller is the one
added with this patch which is only called if is_vm_hugetlb_page(vma).
Even though VM_BUG_ON() is only enabled if CONFIG_DEBUG_VM, many distros
(including the one running my desktop) enable CONFIG_DEBUG_VM in their
default kernel.
Li Xinhai Jan. 16, 2020, 3:22 a.m. UTC | #2
On 2020-01-16 at 08:09 Mike Kravetz wrote:
>On 1/15/20 5:39 AM, Li Xinhai wrote:
>> Checking hstate at early phase when isolating page, instead of during
>> unmap and move phase, to avoid useless isolation.
>>
>> Signed-off-by: Li Xinhai <lixinhai.lxh@gmail.com>
>> Cc: Michal Hocko <mhocko@suse.com>
>> Cc: Mike Kravetz <mike.kravetz@oracle.com>
>> ---
>>  include/linux/hugetlb_inline.h | 7 +++++++
>>  include/linux/mempolicy.h      | 5 ++---
>>  mm/hugetlb.c                   | 9 +++++++++
>>  3 files changed, 18 insertions(+), 3 deletions(-)
>>
>> diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h
>> index 0660a03..fc07139 100644
>> --- a/include/linux/hugetlb_inline.h
>> +++ b/include/linux/hugetlb_inline.h
>> @@ -11,6 +11,8 @@ static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
>>  return !!(vma->vm_flags & VM_HUGETLB);
>>  }
>> 
>> +extern bool vm_hugepage_migration_supported(struct vm_area_struct *vma);
>> +
>>  #else
>> 
>>  static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
>> @@ -18,6 +20,11 @@ static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
>>  return false;
>>  }
>> 
>> +static inline bool vm_hugepage_migration_supported(struct vm_area_struct *vma)
>> +{
>> +	return false;
>> +}
>> +
>>  #endif
>> 
>>  #endif
>> diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
>> index 5228c62..e9ed49e 100644
>> --- a/include/linux/mempolicy.h
>> +++ b/include/linux/mempolicy.h
>> @@ -185,10 +185,9 @@ static inline bool vma_migratable(struct vm_area_struct *vma)
>>  if (vma_is_dax(vma))
>>  return false;
>> 
>> -#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
>> -	if (vma->vm_flags & VM_HUGETLB)
>> +	if (is_vm_hugetlb_page(vma) &&
>> +	!vm_hugepage_migration_supported(vma))
>>  return false;
>> -#endif
>> 
>>  /*
>>  * Migration allocates pages in the highest zone. If we cannot
>
>There are not many ways to work around the hugepage_migration_supported
>dependency issue.  I suppose you could have moved vma_migratable() into
>mm/mempolicy.c instead of being an inline in the header file.  However,
>it is probably best to do what you have done and hide the ugly stuff in
>the hugetlbfs files. 
Yes, that looks better, thanks. I will prepare new one.

>
>> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
>> index dd8737a..8ce4a98 100644
>> --- a/mm/hugetlb.c
>> +++ b/mm/hugetlb.c
>> @@ -1316,6 +1316,15 @@ int PageHeadHuge(struct page *page_head)
>>  return get_compound_page_dtor(page_head) == free_huge_page;
>>  }
>> 
>> +bool vm_hugepage_migration_supported(struct vm_area_struct *vma)
>> +{
>> +	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
>
>This VM_BUG_ON_VMA() seems unnecessary.  The only caller is the one
>added with this patch which is only called if is_vm_hugetlb_page(vma).
>Even though VM_BUG_ON() is only enabled if CONFIG_DEBUG_VM, many distros
>(including the one running my desktop) enable CONFIG_DEBUG_VM in their
>default kernel.
>
>--
>Mike Kravetz
>
>> +	if (hugepage_migration_supported(hstate_vma(vma)))
>> +	return true;
>> +
>> +	return false;
>> +}
>> +
>>  pgoff_t __basepage_index(struct page *page)
>>  {
>>  struct page *page_head = compound_head(page);
>>
diff mbox series

Patch

diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h
index 0660a03..fc07139 100644
--- a/include/linux/hugetlb_inline.h
+++ b/include/linux/hugetlb_inline.h
@@ -11,6 +11,8 @@  static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
 	return !!(vma->vm_flags & VM_HUGETLB);
 }
 
+extern bool vm_hugepage_migration_supported(struct vm_area_struct *vma);
+
 #else
 
 static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
@@ -18,6 +20,11 @@  static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
 	return false;
 }
 
+static inline bool vm_hugepage_migration_supported(struct vm_area_struct *vma)
+{
+	return false;
+}
+
 #endif
 
 #endif
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 5228c62..e9ed49e 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -185,10 +185,9 @@  static inline bool vma_migratable(struct vm_area_struct *vma)
 	if (vma_is_dax(vma))
 		return false;
 
-#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
-	if (vma->vm_flags & VM_HUGETLB)
+	if (is_vm_hugetlb_page(vma) &&
+		!vm_hugepage_migration_supported(vma))
 		return false;
-#endif
 
 	/*
 	 * Migration allocates pages in the highest zone. If we cannot
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index dd8737a..8ce4a98 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1316,6 +1316,15 @@  int PageHeadHuge(struct page *page_head)
 	return get_compound_page_dtor(page_head) == free_huge_page;
 }
 
+bool vm_hugepage_migration_supported(struct vm_area_struct *vma)
+{
+	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
+	if (hugepage_migration_supported(hstate_vma(vma)))
+		return true;
+
+	return false;
+}
+
 pgoff_t __basepage_index(struct page *page)
 {
 	struct page *page_head = compound_head(page);