diff mbox series

[RFC,13/39] mm/rmap: factor out adding folio mappings into __folio_add_rmap()

Message ID 20231204142146.91437-14-david@redhat.com (mailing list archive)
State New
Headers show
Series mm/rmap: interface overhaul | expand

Commit Message

David Hildenbrand Dec. 4, 2023, 2:21 p.m. UTC
Let's factor it out to prepare for reuse as we convert
page_add_anon_rmap() to folio_add_anon_rmap_[pte|ptes|pmd]().

Make the compiler always special-case on the granularity by using
__always_inline.

Signed-off-by: David Hildenbrand <david@redhat.com>
---
 mm/rmap.c | 75 +++++++++++++++++++++++++++++++------------------------
 1 file changed, 42 insertions(+), 33 deletions(-)

Comments

Yin Fengwei Dec. 8, 2023, 1:44 a.m. UTC | #1
On 12/4/2023 10:21 PM, David Hildenbrand wrote:
> Let's factor it out to prepare for reuse as we convert
> page_add_anon_rmap() to folio_add_anon_rmap_[pte|ptes|pmd]().
> 
> Make the compiler always special-case on the granularity by using
> __always_inline.
> 
> Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Yin Fengwei <fengwei.yin@intel.com>

> ---
>   mm/rmap.c | 75 +++++++++++++++++++++++++++++++------------------------
>   1 file changed, 42 insertions(+), 33 deletions(-)
> 
> diff --git a/mm/rmap.c b/mm/rmap.c
> index 53e2c653be99a..c09b360402599 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1127,6 +1127,46 @@ int folio_total_mapcount(struct folio *folio)
>   	return mapcount;
>   }
>   
> +static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
> +		struct page *page, unsigned int nr_pages, enum rmap_mode mode,
> +		int *nr_pmdmapped)
> +{
> +	atomic_t *mapped = &folio->_nr_pages_mapped;
> +	int first, nr = 0;
> +
> +	__folio_rmap_sanity_checks(folio, page, nr_pages, mode);
> +
> +	/* Is page being mapped by PTE? Is this its first map to be added? */
> +	if (likely(mode == RMAP_MODE_PTE)) {
> +		do {
> +			first = atomic_inc_and_test(&page->_mapcount);
> +			if (first && folio_test_large(folio)) {
> +				first = atomic_inc_return_relaxed(mapped);
> +				first = (first < COMPOUND_MAPPED);
> +			}
> +
> +			if (first)
> +				nr++;
> +		} while (page++, --nr_pages > 0);
> +	} else if (mode == RMAP_MODE_PMD) {
> +		first = atomic_inc_and_test(&folio->_entire_mapcount);
> +		if (first) {
> +			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
> +			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
> +				*nr_pmdmapped = folio_nr_pages(folio);
> +				nr = *nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
> +				/* Raced ahead of a remove and another add? */
> +				if (unlikely(nr < 0))
> +					nr = 0;
> +			} else {
> +				/* Raced ahead of a remove of COMPOUND_MAPPED */
> +				nr = 0;
> +			}
> +		}
> +	}
> +	return nr;
> +}
> +
>   /**
>    * folio_move_anon_rmap - move a folio to our anon_vma
>    * @folio:	The folio to move to our anon_vma
> @@ -1338,42 +1378,11 @@ static __always_inline void __folio_add_file_rmap(struct folio *folio,
>   		struct page *page, unsigned int nr_pages,
>   		struct vm_area_struct *vma, enum rmap_mode mode)
>   {
> -	atomic_t *mapped = &folio->_nr_pages_mapped;
> -	unsigned int nr_pmdmapped = 0, first;
> -	int nr = 0;
> +	unsigned int nr, nr_pmdmapped = 0;
>   
>   	VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
> -	__folio_rmap_sanity_checks(folio, page, nr_pages, mode);
> -
> -	/* Is page being mapped by PTE? Is this its first map to be added? */
> -	if (likely(mode == RMAP_MODE_PTE)) {
> -		do {
> -			first = atomic_inc_and_test(&page->_mapcount);
> -			if (first && folio_test_large(folio)) {
> -				first = atomic_inc_return_relaxed(mapped);
> -				first = (first < COMPOUND_MAPPED);
> -			}
> -
> -			if (first)
> -				nr++;
> -		} while (page++, --nr_pages > 0);
> -	} else if (mode == RMAP_MODE_PMD) {
> -		first = atomic_inc_and_test(&folio->_entire_mapcount);
> -		if (first) {
> -			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
> -			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
> -				nr_pmdmapped = folio_nr_pages(folio);
> -				nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
> -				/* Raced ahead of a remove and another add? */
> -				if (unlikely(nr < 0))
> -					nr = 0;
> -			} else {
> -				/* Raced ahead of a remove of COMPOUND_MAPPED */
> -				nr = 0;
> -			}
> -		}
> -	}
>   
> +	nr = __folio_add_rmap(folio, page, nr_pages, mode, &nr_pmdmapped);
>   	if (nr_pmdmapped)
>   		__lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ?
>   			NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped);
diff mbox series

Patch

diff --git a/mm/rmap.c b/mm/rmap.c
index 53e2c653be99a..c09b360402599 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1127,6 +1127,46 @@  int folio_total_mapcount(struct folio *folio)
 	return mapcount;
 }
 
+static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
+		struct page *page, unsigned int nr_pages, enum rmap_mode mode,
+		int *nr_pmdmapped)
+{
+	atomic_t *mapped = &folio->_nr_pages_mapped;
+	int first, nr = 0;
+
+	__folio_rmap_sanity_checks(folio, page, nr_pages, mode);
+
+	/* Is page being mapped by PTE? Is this its first map to be added? */
+	if (likely(mode == RMAP_MODE_PTE)) {
+		do {
+			first = atomic_inc_and_test(&page->_mapcount);
+			if (first && folio_test_large(folio)) {
+				first = atomic_inc_return_relaxed(mapped);
+				first = (first < COMPOUND_MAPPED);
+			}
+
+			if (first)
+				nr++;
+		} while (page++, --nr_pages > 0);
+	} else if (mode == RMAP_MODE_PMD) {
+		first = atomic_inc_and_test(&folio->_entire_mapcount);
+		if (first) {
+			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
+			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
+				*nr_pmdmapped = folio_nr_pages(folio);
+				nr = *nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
+				/* Raced ahead of a remove and another add? */
+				if (unlikely(nr < 0))
+					nr = 0;
+			} else {
+				/* Raced ahead of a remove of COMPOUND_MAPPED */
+				nr = 0;
+			}
+		}
+	}
+	return nr;
+}
+
 /**
  * folio_move_anon_rmap - move a folio to our anon_vma
  * @folio:	The folio to move to our anon_vma
@@ -1338,42 +1378,11 @@  static __always_inline void __folio_add_file_rmap(struct folio *folio,
 		struct page *page, unsigned int nr_pages,
 		struct vm_area_struct *vma, enum rmap_mode mode)
 {
-	atomic_t *mapped = &folio->_nr_pages_mapped;
-	unsigned int nr_pmdmapped = 0, first;
-	int nr = 0;
+	unsigned int nr, nr_pmdmapped = 0;
 
 	VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
-	__folio_rmap_sanity_checks(folio, page, nr_pages, mode);
-
-	/* Is page being mapped by PTE? Is this its first map to be added? */
-	if (likely(mode == RMAP_MODE_PTE)) {
-		do {
-			first = atomic_inc_and_test(&page->_mapcount);
-			if (first && folio_test_large(folio)) {
-				first = atomic_inc_return_relaxed(mapped);
-				first = (first < COMPOUND_MAPPED);
-			}
-
-			if (first)
-				nr++;
-		} while (page++, --nr_pages > 0);
-	} else if (mode == RMAP_MODE_PMD) {
-		first = atomic_inc_and_test(&folio->_entire_mapcount);
-		if (first) {
-			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
-			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
-				nr_pmdmapped = folio_nr_pages(folio);
-				nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
-				/* Raced ahead of a remove and another add? */
-				if (unlikely(nr < 0))
-					nr = 0;
-			} else {
-				/* Raced ahead of a remove of COMPOUND_MAPPED */
-				nr = 0;
-			}
-		}
-	}
 
+	nr = __folio_add_rmap(folio, page, nr_pages, mode, &nr_pmdmapped);
 	if (nr_pmdmapped)
 		__lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ?
 			NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped);