diff mbox series

[28/45] mm/damon: Enable damon_mkold_pmd_entry to handle hugetlb vmas

Message ID 20240704043132.28501-29-osalvador@suse.de (mailing list archive)
State New
Headers show
Series hugetlb pagewalk unification | expand

Commit Message

Oscar Salvador July 4, 2024, 4:31 a.m. UTC
PMD-mapped hugetlb vmas will also reach damon_mkold_pmd_entry.
Add the required code so it knows how to handle those there.

Signed-off-by: Oscar Salvador <osalvador@suse.de>
---
 mm/damon/ops-common.c | 21 ++++++++++++++++-----
 mm/damon/vaddr.c      | 15 +++++----------
 2 files changed, 21 insertions(+), 15 deletions(-)

Comments

David Hildenbrand July 4, 2024, 11:03 a.m. UTC | #1
On 04.07.24 06:31, Oscar Salvador wrote:
> PMD-mapped hugetlb vmas will also reach damon_mkold_pmd_entry.
> Add the required code so it knows how to handle those there.
> 
> Signed-off-by: Oscar Salvador <osalvador@suse.de>
> ---
>   mm/damon/ops-common.c | 21 ++++++++++++++++-----
>   mm/damon/vaddr.c      | 15 +++++----------
>   2 files changed, 21 insertions(+), 15 deletions(-)
> 

(besides a lot of this code needing cleanups and likely fixes)

> diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
> index d25d99cb5f2b..6727658a3ef5 100644
> --- a/mm/damon/ops-common.c
> +++ b/mm/damon/ops-common.c
> @@ -53,18 +53,29 @@ void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr
>   
>   void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr)
>   {
> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> -	struct folio *folio = damon_get_folio(pmd_pfn(pmdp_get(pmd)));
> +#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
> +	struct folio *folio;
> +	unsigned long size;
> +
> +	if (is_vm_hugetlb_page(vma)) {
> +		folio = pfn_folio(pdm_pfn(*pmd))
> +		folio_get(folio);
> +		size = huge_page_size(hstate_vma(vma));
> +	} else {
> +		folio = damon_get_folio(pmd_pfn(*pmd));
> +		size = PMD_SIZE;
> +	}
>   
>   	if (!folio)
> -		return;
> +		return 0;
>   
> -	if (pmdp_clear_young_notify(vma, addr, pmd))
> +	if (pmdp_test_and_clear_young(vma, addr, pmd) ||
> +	    mmu_notifier_clear_young(mm, addr, addr + size))
>   		folio_set_young(folio);

And I think here is the issue for both the cont-PMD and cont-PTE case:

For hugetlb we *absolutely must* use the set_huge_pte_at()-style 
functions, otherwise we might suddenly lose the cont-pte/cont-pmd bit. 
We cannot arbitrarily replace these "huge_pte" functions by others that 
work on individual PTEs/PMDs.

(noting that the hugetlb code in damon_hugetlb_mkold() is likely not 
correct, because we could be losing concurrently set dirty bits I believe)
diff mbox series

Patch

diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
index d25d99cb5f2b..6727658a3ef5 100644
--- a/mm/damon/ops-common.c
+++ b/mm/damon/ops-common.c
@@ -53,18 +53,29 @@  void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr
 
 void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr)
 {
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-	struct folio *folio = damon_get_folio(pmd_pfn(pmdp_get(pmd)));
+#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
+	struct folio *folio;
+	unsigned long size;
+
+	if (is_vm_hugetlb_page(vma)) {
+		folio = pfn_folio(pdm_pfn(*pmd))
+		folio_get(folio);
+		size = huge_page_size(hstate_vma(vma));
+	} else {
+		folio = damon_get_folio(pmd_pfn(*pmd));
+		size = PMD_SIZE;
+	}
 
 	if (!folio)
-		return;
+		return 0;
 
-	if (pmdp_clear_young_notify(vma, addr, pmd))
+	if (pmdp_test_and_clear_young(vma, addr, pmd) ||
+	    mmu_notifier_clear_young(mm, addr, addr + size))
 		folio_set_young(folio);
 
 	folio_set_idle(folio);
 	folio_put(folio);
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif /*CONFIG_PGTABLE_HAS_HUGE_LEAVES */
 }
 
 #define DAMON_MAX_SUBSCORE	(100)
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 2d5ad47b9dae..47c84cdda32c 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -304,21 +304,16 @@  static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
 	pmd_t pmde;
 	spinlock_t *ptl;
 
-	if (pmd_trans_huge(pmdp_get(pmd))) {
-		ptl = pmd_lock(walk->mm, pmd);
-		pmde = pmdp_get(pmd);
-
-		if (!pmd_present(pmde)) {
+	ptl = pmd_huge_lock(walk->vma, pmd);
+	if (ptl) {
+		if (!pmd_present(*pmd)) {
 			spin_unlock(ptl);
 			return 0;
 		}
 
-		if (pmd_trans_huge(pmde)) {
-			damon_pmdp_mkold(pmd, walk->vma, addr);
-			spin_unlock(ptl);
-			return 0;
-		}
+		damon_pmdp_mkold(pmd, walk->vma, addr);
 		spin_unlock(ptl);
+		return 0;
 	}
 
 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);