diff mbox series

mm/numa: do_numa_page() do_huge_pmd_numa_page() code clean up.

Message ID 20240809134502.1504111-1-ziy@nvidia.com (mailing list archive)
State New
Headers show
Series mm/numa: do_numa_page() do_huge_pmd_numa_page() code clean up. | expand

Commit Message

Zi Yan Aug. 9, 2024, 1:45 p.m. UTC
Remove unnecessary else branch to reduce code indentation.

Suggested-by: "Huang, Ying" <ying.huang@intel.com>
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
 mm/huge_memory.c | 13 ++++++-------
 mm/memory.c      | 20 ++++++++++----------
 2 files changed, 16 insertions(+), 17 deletions(-)

Comments

Zi Yan Aug. 9, 2024, 2:17 p.m. UTC | #1
On 9 Aug 2024, at 9:45, Zi Yan wrote:

> Remove unnecessary else branch to reduce code indentation.
>
> Suggested-by: "Huang, Ying" <ying.huang@intel.com>
> Signed-off-by: Zi Yan <ziy@nvidia.com>
> ---

Please ignore this for now. I will send it with other patches.

Best Regards,
Yan, Zi
diff mbox series

Patch

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 14f314bfce10..f2fd3aabb67b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1719,15 +1719,14 @@  vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
 		nid = target_nid;
 		task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
 		return 0;
-	} else {
-		flags |= TNF_MIGRATE_FAIL;
-		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
-		if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) {
-			spin_unlock(vmf->ptl);
-			return 0;
-		}
 	}
 
+	flags |= TNF_MIGRATE_FAIL;
+	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+	if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) {
+		spin_unlock(vmf->ptl);
+		return 0;
+	}
 out_map:
 	/* Restore the PMD */
 	pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot);
diff --git a/mm/memory.c b/mm/memory.c
index dd993fb3f654..e4f27c0696cb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5528,16 +5528,16 @@  static vm_fault_t do_numa_page(struct vm_fault *vmf)
 		flags |= TNF_MIGRATED;
 		task_numa_fault(last_cpupid, nid, nr_pages, flags);
 		return 0;
-	} else {
-		flags |= TNF_MIGRATE_FAIL;
-		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
-					       vmf->address, &vmf->ptl);
-		if (unlikely(!vmf->pte))
-			return 0;
-		if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
-			pte_unmap_unlock(vmf->pte, vmf->ptl);
-			return 0;
-		}
+	}
+
+	flags |= TNF_MIGRATE_FAIL;
+	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
+				       vmf->address, &vmf->ptl);
+	if (unlikely(!vmf->pte))
+		return 0;
+	if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
+		pte_unmap_unlock(vmf->pte, vmf->ptl);
+		return 0;
 	}
 out_map:
 	/*