diff mbox series

[v2] mm/migrate: move common code to numa_migrate_check (was numa_migrate_prep)

Message ID 20240808233728.1477034-1-ziy@nvidia.com (mailing list archive)
State New
Headers show
Series [v2] mm/migrate: move common code to numa_migrate_check (was numa_migrate_prep) | expand

Commit Message

Zi Yan Aug. 8, 2024, 11:37 p.m. UTC
do_numa_page() and do_huge_pmd_numa_page() share a lot of common code. To
reduce redundancy, move common code to numa_migrate_prep() and rename
the function to numa_migrate_check() to reflect its functionality.

Now do_huge_pmd_numa_page() also checks shared folios to set TNF_SHARED
flag.

Suggested-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
 mm/huge_memory.c | 29 +++++++++-------------
 mm/internal.h    |  5 ++--
 mm/memory.c      | 63 +++++++++++++++++++++++++-----------------------
 3 files changed, 47 insertions(+), 50 deletions(-)

Comments

Huang, Ying Aug. 9, 2024, 1:09 a.m. UTC | #1
Zi Yan <ziy@nvidia.com> writes:

> do_numa_page() and do_huge_pmd_numa_page() share a lot of common code. To
> reduce redundancy, move common code to numa_migrate_prep() and rename
> the function to numa_migrate_check() to reflect its functionality.
>
> Now do_huge_pmd_numa_page() also checks shared folios to set TNF_SHARED
> flag.
>
> Suggested-by: David Hildenbrand <david@redhat.com>
> Signed-off-by: Zi Yan <ziy@nvidia.com>

LGTM, Thanks!

Reviewed-by: "Huang, Ying" <ying.huang@intel.com>

[snip]
Baolin Wang Aug. 9, 2024, 3:43 a.m. UTC | #2
On 2024/8/9 07:37, Zi Yan wrote:
> do_numa_page() and do_huge_pmd_numa_page() share a lot of common code. To
> reduce redundancy, move common code to numa_migrate_prep() and rename
> the function to numa_migrate_check() to reflect its functionality.
> 
> Now do_huge_pmd_numa_page() also checks shared folios to set TNF_SHARED
> flag.
> 
> Suggested-by: David Hildenbrand <david@redhat.com>
> Signed-off-by: Zi Yan <ziy@nvidia.com>

LGTM. Feel free to add:
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>

> ---
>   mm/huge_memory.c | 29 +++++++++-------------
>   mm/internal.h    |  5 ++--
>   mm/memory.c      | 63 +++++++++++++++++++++++++-----------------------
>   3 files changed, 47 insertions(+), 50 deletions(-)
> 
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 4e4364a17e6d..96a52e71d167 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -1669,22 +1669,23 @@ static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
>   vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
>   {
>   	struct vm_area_struct *vma = vmf->vma;
> -	pmd_t oldpmd = vmf->orig_pmd;
> -	pmd_t pmd;
>   	struct folio *folio;
>   	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
>   	int nid = NUMA_NO_NODE;
> -	int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
> +	int target_nid, last_cpupid;
> +	pmd_t pmd, old_pmd;
>   	bool writable = false;
>   	int flags = 0;
>   
>   	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
> -	if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
> +	old_pmd = pmdp_get(vmf->pmd);
> +
> +	if (unlikely(!pmd_same(old_pmd, vmf->orig_pmd))) {
>   		spin_unlock(vmf->ptl);
>   		return 0;
>   	}
>   
> -	pmd = pmd_modify(oldpmd, vma->vm_page_prot);
> +	pmd = pmd_modify(old_pmd, vma->vm_page_prot);
>   
>   	/*
>   	 * Detect now whether the PMD could be writable; this information
> @@ -1699,18 +1700,10 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
>   	if (!folio)
>   		goto out_map;
>   
> -	/* See similar comment in do_numa_page for explanation */
> -	if (!writable)
> -		flags |= TNF_NO_GROUP;
> -
>   	nid = folio_nid(folio);
> -	/*
> -	 * For memory tiering mode, cpupid of slow memory page is used
> -	 * to record page access time.  So use default value.
> -	 */
> -	if (!folio_use_access_time(folio))
> -		last_cpupid = folio_last_cpupid(folio);
> -	target_nid = numa_migrate_prep(folio, vmf, haddr, nid, &flags);
> +
> +	target_nid = numa_migrate_check(folio, vmf, haddr, &flags, writable,
> +					&last_cpupid);
>   	if (target_nid == NUMA_NO_NODE)
>   		goto out_map;
>   	if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
> @@ -1728,7 +1721,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
>   	} else {
>   		flags |= TNF_MIGRATE_FAIL;
>   		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
> -		if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
> +		if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) {
>   			spin_unlock(vmf->ptl);
>   			return 0;
>   		}
> @@ -1736,7 +1729,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
>   
>   out_map:
>   	/* Restore the PMD */
> -	pmd = pmd_modify(oldpmd, vma->vm_page_prot);
> +	pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot);
>   	pmd = pmd_mkyoung(pmd);
>   	if (writable)
>   		pmd = pmd_mkwrite(pmd, vma);
> diff --git a/mm/internal.h b/mm/internal.h
> index 52f7fc4e8ac3..fb16e18c9761 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -1191,8 +1191,9 @@ void vunmap_range_noflush(unsigned long start, unsigned long end);
>   
>   void __vunmap_range_noflush(unsigned long start, unsigned long end);
>   
> -int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
> -		      unsigned long addr, int page_nid, int *flags);
> +int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
> +		      unsigned long addr, int *flags, bool writable,
> +		      int *last_cpupid);
>   
>   void free_zone_device_folio(struct folio *folio);
>   int migrate_device_coherent_page(struct page *page);
> diff --git a/mm/memory.c b/mm/memory.c
> index d9b1dff9dc57..3441f60d54ef 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -5368,16 +5368,43 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
>   	return ret;
>   }
>   
> -int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
> -		      unsigned long addr, int page_nid, int *flags)
> +int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
> +		      unsigned long addr, int *flags,
> +		      bool writable, int *last_cpupid)
>   {
>   	struct vm_area_struct *vma = vmf->vma;
>   
> +	/*
> +	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
> +	 * much anyway since they can be in shared cache state. This misses
> +	 * the case where a mapping is writable but the process never writes
> +	 * to it but pte_write gets cleared during protection updates and
> +	 * pte_dirty has unpredictable behaviour between PTE scan updates,
> +	 * background writeback, dirty balancing and application behaviour.
> +	 */
> +	if (!writable)
> +		*flags |= TNF_NO_GROUP;
> +
> +	/*
> +	 * Flag if the folio is shared between multiple address spaces. This
> +	 * is later used when determining whether to group tasks together
> +	 */
> +	if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
> +		*flags |= TNF_SHARED;
> +	/*
> +	 * For memory tiering mode, cpupid of slow memory page is used
> +	 * to record page access time.  So use default value.
> +	 */
> +	if (folio_use_access_time(folio))
> +		*last_cpupid = (-1 & LAST_CPUPID_MASK);
> +	else
> +		*last_cpupid = folio_last_cpupid(folio);
> +
>   	/* Record the current PID acceesing VMA */
>   	vma_set_access_pid_bit(vma);
>   
>   	count_vm_numa_event(NUMA_HINT_FAULTS);
> -	if (page_nid == numa_node_id()) {
> +	if (folio_nid(folio) == numa_node_id()) {
>   		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
>   		*flags |= TNF_FAULT_LOCAL;
>   	}
> @@ -5479,35 +5506,11 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>   	if (!folio || folio_is_zone_device(folio))
>   		goto out_map;
>   
> -	/*
> -	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
> -	 * much anyway since they can be in shared cache state. This misses
> -	 * the case where a mapping is writable but the process never writes
> -	 * to it but pte_write gets cleared during protection updates and
> -	 * pte_dirty has unpredictable behaviour between PTE scan updates,
> -	 * background writeback, dirty balancing and application behaviour.
> -	 */
> -	if (!writable)
> -		flags |= TNF_NO_GROUP;
> -
> -	/*
> -	 * Flag if the folio is shared between multiple address spaces. This
> -	 * is later used when determining whether to group tasks together
> -	 */
> -	if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
> -		flags |= TNF_SHARED;
> -
>   	nid = folio_nid(folio);
>   	nr_pages = folio_nr_pages(folio);
> -	/*
> -	 * For memory tiering mode, cpupid of slow memory page is used
> -	 * to record page access time.  So use default value.
> -	 */
> -	if (folio_use_access_time(folio))
> -		last_cpupid = (-1 & LAST_CPUPID_MASK);
> -	else
> -		last_cpupid = folio_last_cpupid(folio);
> -	target_nid = numa_migrate_prep(folio, vmf, vmf->address, nid, &flags);
> +
> +	target_nid = numa_migrate_check(folio, vmf, vmf->address, &flags,
> +					writable, &last_cpupid);
>   	if (target_nid == NUMA_NO_NODE)
>   		goto out_map;
>   	if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
diff mbox series

Patch

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4e4364a17e6d..96a52e71d167 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1669,22 +1669,23 @@  static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	pmd_t oldpmd = vmf->orig_pmd;
-	pmd_t pmd;
 	struct folio *folio;
 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
 	int nid = NUMA_NO_NODE;
-	int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
+	int target_nid, last_cpupid;
+	pmd_t pmd, old_pmd;
 	bool writable = false;
 	int flags = 0;
 
 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
-	if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
+	old_pmd = pmdp_get(vmf->pmd);
+
+	if (unlikely(!pmd_same(old_pmd, vmf->orig_pmd))) {
 		spin_unlock(vmf->ptl);
 		return 0;
 	}
 
-	pmd = pmd_modify(oldpmd, vma->vm_page_prot);
+	pmd = pmd_modify(old_pmd, vma->vm_page_prot);
 
 	/*
 	 * Detect now whether the PMD could be writable; this information
@@ -1699,18 +1700,10 @@  vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
 	if (!folio)
 		goto out_map;
 
-	/* See similar comment in do_numa_page for explanation */
-	if (!writable)
-		flags |= TNF_NO_GROUP;
-
 	nid = folio_nid(folio);
-	/*
-	 * For memory tiering mode, cpupid of slow memory page is used
-	 * to record page access time.  So use default value.
-	 */
-	if (!folio_use_access_time(folio))
-		last_cpupid = folio_last_cpupid(folio);
-	target_nid = numa_migrate_prep(folio, vmf, haddr, nid, &flags);
+
+	target_nid = numa_migrate_check(folio, vmf, haddr, &flags, writable,
+					&last_cpupid);
 	if (target_nid == NUMA_NO_NODE)
 		goto out_map;
 	if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
@@ -1728,7 +1721,7 @@  vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
 	} else {
 		flags |= TNF_MIGRATE_FAIL;
 		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
-		if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
+		if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) {
 			spin_unlock(vmf->ptl);
 			return 0;
 		}
@@ -1736,7 +1729,7 @@  vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
 
 out_map:
 	/* Restore the PMD */
-	pmd = pmd_modify(oldpmd, vma->vm_page_prot);
+	pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot);
 	pmd = pmd_mkyoung(pmd);
 	if (writable)
 		pmd = pmd_mkwrite(pmd, vma);
diff --git a/mm/internal.h b/mm/internal.h
index 52f7fc4e8ac3..fb16e18c9761 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1191,8 +1191,9 @@  void vunmap_range_noflush(unsigned long start, unsigned long end);
 
 void __vunmap_range_noflush(unsigned long start, unsigned long end);
 
-int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
-		      unsigned long addr, int page_nid, int *flags);
+int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
+		      unsigned long addr, int *flags, bool writable,
+		      int *last_cpupid);
 
 void free_zone_device_folio(struct folio *folio);
 int migrate_device_coherent_page(struct page *page);
diff --git a/mm/memory.c b/mm/memory.c
index d9b1dff9dc57..3441f60d54ef 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5368,16 +5368,43 @@  static vm_fault_t do_fault(struct vm_fault *vmf)
 	return ret;
 }
 
-int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
-		      unsigned long addr, int page_nid, int *flags)
+int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
+		      unsigned long addr, int *flags,
+		      bool writable, int *last_cpupid)
 {
 	struct vm_area_struct *vma = vmf->vma;
 
+	/*
+	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
+	 * much anyway since they can be in shared cache state. This misses
+	 * the case where a mapping is writable but the process never writes
+	 * to it but pte_write gets cleared during protection updates and
+	 * pte_dirty has unpredictable behaviour between PTE scan updates,
+	 * background writeback, dirty balancing and application behaviour.
+	 */
+	if (!writable)
+		*flags |= TNF_NO_GROUP;
+
+	/*
+	 * Flag if the folio is shared between multiple address spaces. This
+	 * is later used when determining whether to group tasks together
+	 */
+	if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
+		*flags |= TNF_SHARED;
+	/*
+	 * For memory tiering mode, cpupid of slow memory page is used
+	 * to record page access time.  So use default value.
+	 */
+	if (folio_use_access_time(folio))
+		*last_cpupid = (-1 & LAST_CPUPID_MASK);
+	else
+		*last_cpupid = folio_last_cpupid(folio);
+
 	/* Record the current PID acceesing VMA */
 	vma_set_access_pid_bit(vma);
 
 	count_vm_numa_event(NUMA_HINT_FAULTS);
-	if (page_nid == numa_node_id()) {
+	if (folio_nid(folio) == numa_node_id()) {
 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
 		*flags |= TNF_FAULT_LOCAL;
 	}
@@ -5479,35 +5506,11 @@  static vm_fault_t do_numa_page(struct vm_fault *vmf)
 	if (!folio || folio_is_zone_device(folio))
 		goto out_map;
 
-	/*
-	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
-	 * much anyway since they can be in shared cache state. This misses
-	 * the case where a mapping is writable but the process never writes
-	 * to it but pte_write gets cleared during protection updates and
-	 * pte_dirty has unpredictable behaviour between PTE scan updates,
-	 * background writeback, dirty balancing and application behaviour.
-	 */
-	if (!writable)
-		flags |= TNF_NO_GROUP;
-
-	/*
-	 * Flag if the folio is shared between multiple address spaces. This
-	 * is later used when determining whether to group tasks together
-	 */
-	if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
-		flags |= TNF_SHARED;
-
 	nid = folio_nid(folio);
 	nr_pages = folio_nr_pages(folio);
-	/*
-	 * For memory tiering mode, cpupid of slow memory page is used
-	 * to record page access time.  So use default value.
-	 */
-	if (folio_use_access_time(folio))
-		last_cpupid = (-1 & LAST_CPUPID_MASK);
-	else
-		last_cpupid = folio_last_cpupid(folio);
-	target_nid = numa_migrate_prep(folio, vmf, vmf->address, nid, &flags);
+
+	target_nid = numa_migrate_check(folio, vmf, vmf->address, &flags,
+					writable, &last_cpupid);
 	if (target_nid == NUMA_NO_NODE)
 		goto out_map;
 	if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {