diff mbox series

[v3,07/16] mm/rmap: pass rmap flags to hugepage_add_anon_rmap()

Message ID 20220329160440.193848-8-david@redhat.com (mailing list archive)
State New
Headers show
Series mm: COW fixes part 2: reliable GUP pins of anonymous pages | expand

Commit Message

David Hildenbrand March 29, 2022, 4:04 p.m. UTC
Let's prepare for passing RMAP_EXCLUSIVE, similarly as we do for
page_add_anon_rmap() now. RMAP_COMPOUND is implicit for hugetlb
pages and ignored.

Signed-off-by: David Hildenbrand <david@redhat.com>
---
 include/linux/rmap.h | 2 +-
 mm/migrate.c         | 3 ++-
 mm/rmap.c            | 9 ++++++---
 3 files changed, 9 insertions(+), 5 deletions(-)

Comments

Vlastimil Babka April 12, 2022, 8:37 a.m. UTC | #1
On 3/29/22 18:04, David Hildenbrand wrote:
> Let's prepare for passing RMAP_EXCLUSIVE, similarly as we do for
> page_add_anon_rmap() now. RMAP_COMPOUND is implicit for hugetlb
> pages and ignored.
> 
> Signed-off-by: David Hildenbrand <david@redhat.com>

Acked-by: Vlastimil Babka <vbabka@suse.cz>

> ---
>  include/linux/rmap.h | 2 +-
>  mm/migrate.c         | 3 ++-
>  mm/rmap.c            | 9 ++++++---
>  3 files changed, 9 insertions(+), 5 deletions(-)
> 
> diff --git a/include/linux/rmap.h b/include/linux/rmap.h
> index aa734d2e2b01..f47bc937c383 100644
> --- a/include/linux/rmap.h
> +++ b/include/linux/rmap.h
> @@ -191,7 +191,7 @@ void page_add_file_rmap(struct page *, struct vm_area_struct *,
>  void page_remove_rmap(struct page *, struct vm_area_struct *,
>  		bool compound);
>  void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
> -		unsigned long address);
> +		unsigned long address, rmap_t flags);
>  void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
>  		unsigned long address);
>  
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 436f0ec2da03..48db9500d20e 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -232,7 +232,8 @@ static bool remove_migration_pte(struct folio *folio,
>  			pte = pte_mkhuge(pte);
>  			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
>  			if (folio_test_anon(folio))
> -				hugepage_add_anon_rmap(new, vma, pvmw.address);
> +				hugepage_add_anon_rmap(new, vma, pvmw.address,
> +						       RMAP_NONE);
>  			else
>  				page_dup_file_rmap(new, true);
>  			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
> diff --git a/mm/rmap.c b/mm/rmap.c
> index 71bf881da2a6..b972eb8f351b 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -2347,9 +2347,11 @@ void rmap_walk_locked(struct folio *folio, const struct rmap_walk_control *rwc)
>   * The following two functions are for anonymous (private mapped) hugepages.
>   * Unlike common anonymous pages, anonymous hugepages have no accounting code
>   * and no lru code, because we handle hugepages differently from common pages.
> + *
> + * RMAP_COMPOUND is ignored.
>   */
> -void hugepage_add_anon_rmap(struct page *page,
> -			    struct vm_area_struct *vma, unsigned long address)
> +void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
> +			    unsigned long address, rmap_t flags)
>  {
>  	struct anon_vma *anon_vma = vma->anon_vma;
>  	int first;
> @@ -2359,7 +2361,8 @@ void hugepage_add_anon_rmap(struct page *page,
>  	/* address might be in next vma when migration races vma_adjust */
>  	first = atomic_inc_and_test(compound_mapcount_ptr(page));
>  	if (first)
> -		__page_set_anon_rmap(page, vma, address, 0);
> +		__page_set_anon_rmap(page, vma, address,
> +				     !!(flags & RMAP_EXCLUSIVE));
>  }
>  
>  void hugepage_add_new_anon_rmap(struct page *page,
diff mbox series

Patch

diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index aa734d2e2b01..f47bc937c383 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -191,7 +191,7 @@  void page_add_file_rmap(struct page *, struct vm_area_struct *,
 void page_remove_rmap(struct page *, struct vm_area_struct *,
 		bool compound);
 void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
-		unsigned long address);
+		unsigned long address, rmap_t flags);
 void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
 		unsigned long address);
 
diff --git a/mm/migrate.c b/mm/migrate.c
index 436f0ec2da03..48db9500d20e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -232,7 +232,8 @@  static bool remove_migration_pte(struct folio *folio,
 			pte = pte_mkhuge(pte);
 			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
 			if (folio_test_anon(folio))
-				hugepage_add_anon_rmap(new, vma, pvmw.address);
+				hugepage_add_anon_rmap(new, vma, pvmw.address,
+						       RMAP_NONE);
 			else
 				page_dup_file_rmap(new, true);
 			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
diff --git a/mm/rmap.c b/mm/rmap.c
index 71bf881da2a6..b972eb8f351b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -2347,9 +2347,11 @@  void rmap_walk_locked(struct folio *folio, const struct rmap_walk_control *rwc)
  * The following two functions are for anonymous (private mapped) hugepages.
  * Unlike common anonymous pages, anonymous hugepages have no accounting code
  * and no lru code, because we handle hugepages differently from common pages.
+ *
+ * RMAP_COMPOUND is ignored.
  */
-void hugepage_add_anon_rmap(struct page *page,
-			    struct vm_area_struct *vma, unsigned long address)
+void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
+			    unsigned long address, rmap_t flags)
 {
 	struct anon_vma *anon_vma = vma->anon_vma;
 	int first;
@@ -2359,7 +2361,8 @@  void hugepage_add_anon_rmap(struct page *page,
 	/* address might be in next vma when migration races vma_adjust */
 	first = atomic_inc_and_test(compound_mapcount_ptr(page));
 	if (first)
-		__page_set_anon_rmap(page, vma, address, 0);
+		__page_set_anon_rmap(page, vma, address,
+				     !!(flags & RMAP_EXCLUSIVE));
 }
 
 void hugepage_add_new_anon_rmap(struct page *page,