diff mbox series

[v1,13/39] mm/rmap: factor out adding folio mappings into __folio_add_rmap()

Message ID 20231211155652.131054-14-david@redhat.com (mailing list archive)
State New
Headers show
Series mm/rmap: interface overhaul | expand

Commit Message

David Hildenbrand Dec. 11, 2023, 3:56 p.m. UTC
Let's factor it out to prepare for reuse as we convert
page_add_anon_rmap() to folio_add_anon_rmap_[pte|ptes|pmd]().

Make the compiler always special-case on the granularity by using
__always_inline.

Reviewed-by: Yin Fengwei <fengwei.yin@intel.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
---
 mm/rmap.c | 81 ++++++++++++++++++++++++++++++-------------------------
 1 file changed, 45 insertions(+), 36 deletions(-)

Comments

Ryan Roberts Dec. 18, 2023, 4:07 p.m. UTC | #1
On 11/12/2023 15:56, David Hildenbrand wrote:
> Let's factor it out to prepare for reuse as we convert
> page_add_anon_rmap() to folio_add_anon_rmap_[pte|ptes|pmd]().
> 
> Make the compiler always special-case on the granularity by using
> __always_inline.
> 
> Reviewed-by: Yin Fengwei <fengwei.yin@intel.com>
> Signed-off-by: David Hildenbrand <david@redhat.com>
> ---
>  mm/rmap.c | 81 ++++++++++++++++++++++++++++++-------------------------
>  1 file changed, 45 insertions(+), 36 deletions(-)
> 
> diff --git a/mm/rmap.c b/mm/rmap.c
> index 2ff2f11275e5..c5761986a411 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1157,6 +1157,49 @@ int folio_total_mapcount(struct folio *folio)
>  	return mapcount;
>  }
>  
> +static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
> +		struct page *page, int nr_pages, enum rmap_mode mode,
> +		unsigned int *nr_pmdmapped)
> +{
> +	atomic_t *mapped = &folio->_nr_pages_mapped;
> +	int first, nr = 0;
> +
> +	__folio_rmap_sanity_checks(folio, page, nr_pages, mode);
> +
> +	/* Is page being mapped by PTE? Is this its first map to be added? */

I suspect this comment is left over from the old version? It sounds a bit odd in
its new context.

> +	switch (mode) {
> +	case RMAP_MODE_PTE:
> +		do {
> +			first = atomic_inc_and_test(&page->_mapcount);
> +			if (first && folio_test_large(folio)) {
> +				first = atomic_inc_return_relaxed(mapped);
> +				first = (first < COMPOUND_MAPPED);
> +			}
> +
> +			if (first)
> +				nr++;
> +		} while (page++, --nr_pages > 0);
> +		break;
> +	case RMAP_MODE_PMD:
> +		first = atomic_inc_and_test(&folio->_entire_mapcount);
> +		if (first) {
> +			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
> +			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
> +				*nr_pmdmapped = folio_nr_pages(folio);
> +				nr = *nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
> +				/* Raced ahead of a remove and another add? */
> +				if (unlikely(nr < 0))
> +					nr = 0;
> +			} else {
> +				/* Raced ahead of a remove of COMPOUND_MAPPED */
> +				nr = 0;
> +			}
> +		}
> +		break;
> +	}
> +	return nr;
> +}
> +
>  /**
>   * folio_move_anon_rmap - move a folio to our anon_vma
>   * @folio:	The folio to move to our anon_vma
> @@ -1380,45 +1423,11 @@ static __always_inline void __folio_add_file_rmap(struct folio *folio,
>  		struct page *page, int nr_pages, struct vm_area_struct *vma,
>  		enum rmap_mode mode)
>  {
> -	atomic_t *mapped = &folio->_nr_pages_mapped;
> -	unsigned int nr_pmdmapped = 0, first;
> -	int nr = 0;
> +	unsigned int nr, nr_pmdmapped = 0;

You're still being inconsistent with signed/unsigned here. Is there a reason
these can't be signed like nr_pages in the interface?

>  
>  	VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
> -	__folio_rmap_sanity_checks(folio, page, nr_pages, mode);
> -
> -	/* Is page being mapped by PTE? Is this its first map to be added? */
> -	switch (mode) {
> -	case RMAP_MODE_PTE:
> -		do {
> -			first = atomic_inc_and_test(&page->_mapcount);
> -			if (first && folio_test_large(folio)) {
> -				first = atomic_inc_return_relaxed(mapped);
> -				first = (first < COMPOUND_MAPPED);
> -			}
> -
> -			if (first)
> -				nr++;
> -		} while (page++, --nr_pages > 0);
> -		break;
> -	case RMAP_MODE_PMD:
> -		first = atomic_inc_and_test(&folio->_entire_mapcount);
> -		if (first) {
> -			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
> -			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
> -				nr_pmdmapped = folio_nr_pages(folio);
> -				nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
> -				/* Raced ahead of a remove and another add? */
> -				if (unlikely(nr < 0))
> -					nr = 0;
> -			} else {
> -				/* Raced ahead of a remove of COMPOUND_MAPPED */
> -				nr = 0;
> -			}
> -		}
> -		break;
> -	}
>  
> +	nr = __folio_add_rmap(folio, page, nr_pages, mode, &nr_pmdmapped);
>  	if (nr_pmdmapped)
>  		__lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ?
>  			NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped);
David Hildenbrand Dec. 18, 2023, 5:06 p.m. UTC | #2
On 18.12.23 17:07, Ryan Roberts wrote:
> On 11/12/2023 15:56, David Hildenbrand wrote:
>> Let's factor it out to prepare for reuse as we convert
>> page_add_anon_rmap() to folio_add_anon_rmap_[pte|ptes|pmd]().
>>
>> Make the compiler always special-case on the granularity by using
>> __always_inline.
>>
>> Reviewed-by: Yin Fengwei <fengwei.yin@intel.com>
>> Signed-off-by: David Hildenbrand <david@redhat.com>
>> ---
>>   mm/rmap.c | 81 ++++++++++++++++++++++++++++++-------------------------
>>   1 file changed, 45 insertions(+), 36 deletions(-)
>>
>> diff --git a/mm/rmap.c b/mm/rmap.c
>> index 2ff2f11275e5..c5761986a411 100644
>> --- a/mm/rmap.c
>> +++ b/mm/rmap.c
>> @@ -1157,6 +1157,49 @@ int folio_total_mapcount(struct folio *folio)
>>   	return mapcount;
>>   }
>>   
>> +static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
>> +		struct page *page, int nr_pages, enum rmap_mode mode,
>> +		unsigned int *nr_pmdmapped)
>> +{
>> +	atomic_t *mapped = &folio->_nr_pages_mapped;
>> +	int first, nr = 0;
>> +
>> +	__folio_rmap_sanity_checks(folio, page, nr_pages, mode);
>> +
>> +	/* Is page being mapped by PTE? Is this its first map to be added? */
> 
> I suspect this comment is left over from the old version? It sounds a bit odd in
> its new context.

In this patch, I'm just moving the code, so it would have to be dropped 
in a previous patch.

I'm happy to drop all these comments in previous patches.

> 
>> +	switch (mode) {
>> +	case RMAP_MODE_PTE:
>> +		do {
>> +			first = atomic_inc_and_test(&page->_mapcount);
>> +			if (first && folio_test_large(folio)) {
>> +				first = atomic_inc_return_relaxed(mapped);
>> +				first = (first < COMPOUND_MAPPED);
>> +			}
>> +
>> +			if (first)
>> +				nr++;
>> +		} while (page++, --nr_pages > 0);
>> +		break;
>> +	case RMAP_MODE_PMD:
>> +		first = atomic_inc_and_test(&folio->_entire_mapcount);
>> +		if (first) {
>> +			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
>> +			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
>> +				*nr_pmdmapped = folio_nr_pages(folio);
>> +				nr = *nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
>> +				/* Raced ahead of a remove and another add? */
>> +				if (unlikely(nr < 0))
>> +					nr = 0;
>> +			} else {
>> +				/* Raced ahead of a remove of COMPOUND_MAPPED */
>> +				nr = 0;
>> +			}
>> +		}
>> +		break;
>> +	}
>> +	return nr;
>> +}
>> +
>>   /**
>>    * folio_move_anon_rmap - move a folio to our anon_vma
>>    * @folio:	The folio to move to our anon_vma
>> @@ -1380,45 +1423,11 @@ static __always_inline void __folio_add_file_rmap(struct folio *folio,
>>   		struct page *page, int nr_pages, struct vm_area_struct *vma,
>>   		enum rmap_mode mode)
>>   {
>> -	atomic_t *mapped = &folio->_nr_pages_mapped;
>> -	unsigned int nr_pmdmapped = 0, first;
>> -	int nr = 0;
>> +	unsigned int nr, nr_pmdmapped = 0;
> 
> You're still being inconsistent with signed/unsigned here. Is there a reason
> these can't be signed like nr_pages in the interface?

I can turn them into signed values.

Personally, I think it's misleading to use "signed" for values that have 
absolutely no meaning for negative meaning. But sure, we can be 
consistent, at least in rmap code.
Ryan Roberts Dec. 19, 2023, 8:40 a.m. UTC | #3
On 18/12/2023 17:06, David Hildenbrand wrote:
> On 18.12.23 17:07, Ryan Roberts wrote:
>> On 11/12/2023 15:56, David Hildenbrand wrote:
>>> Let's factor it out to prepare for reuse as we convert
>>> page_add_anon_rmap() to folio_add_anon_rmap_[pte|ptes|pmd]().
>>>
>>> Make the compiler always special-case on the granularity by using
>>> __always_inline.
>>>
>>> Reviewed-by: Yin Fengwei <fengwei.yin@intel.com>
>>> Signed-off-by: David Hildenbrand <david@redhat.com>
>>> ---
>>>   mm/rmap.c | 81 ++++++++++++++++++++++++++++++-------------------------
>>>   1 file changed, 45 insertions(+), 36 deletions(-)
>>>
>>> diff --git a/mm/rmap.c b/mm/rmap.c
>>> index 2ff2f11275e5..c5761986a411 100644
>>> --- a/mm/rmap.c
>>> +++ b/mm/rmap.c
>>> @@ -1157,6 +1157,49 @@ int folio_total_mapcount(struct folio *folio)
>>>       return mapcount;
>>>   }
>>>   +static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
>>> +        struct page *page, int nr_pages, enum rmap_mode mode,
>>> +        unsigned int *nr_pmdmapped)
>>> +{
>>> +    atomic_t *mapped = &folio->_nr_pages_mapped;
>>> +    int first, nr = 0;
>>> +
>>> +    __folio_rmap_sanity_checks(folio, page, nr_pages, mode);
>>> +
>>> +    /* Is page being mapped by PTE? Is this its first map to be added? */
>>
>> I suspect this comment is left over from the old version? It sounds a bit odd in
>> its new context.
> 
> In this patch, I'm just moving the code, so it would have to be dropped in a
> previous patch.
> 
> I'm happy to drop all these comments in previous patches.

Well it doesn't really mean much to me in this new context, so I would reword if
there is still something you need to convey to the reader, else just remove.

> 
>>
>>> +    switch (mode) {
>>> +    case RMAP_MODE_PTE:
>>> +        do {
>>> +            first = atomic_inc_and_test(&page->_mapcount);
>>> +            if (first && folio_test_large(folio)) {
>>> +                first = atomic_inc_return_relaxed(mapped);
>>> +                first = (first < COMPOUND_MAPPED);
>>> +            }
>>> +
>>> +            if (first)
>>> +                nr++;
>>> +        } while (page++, --nr_pages > 0);
>>> +        break;
>>> +    case RMAP_MODE_PMD:
>>> +        first = atomic_inc_and_test(&folio->_entire_mapcount);
>>> +        if (first) {
>>> +            nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
>>> +            if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
>>> +                *nr_pmdmapped = folio_nr_pages(folio);
>>> +                nr = *nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
>>> +                /* Raced ahead of a remove and another add? */
>>> +                if (unlikely(nr < 0))
>>> +                    nr = 0;
>>> +            } else {
>>> +                /* Raced ahead of a remove of COMPOUND_MAPPED */
>>> +                nr = 0;
>>> +            }
>>> +        }
>>> +        break;
>>> +    }
>>> +    return nr;
>>> +}
>>> +
>>>   /**
>>>    * folio_move_anon_rmap - move a folio to our anon_vma
>>>    * @folio:    The folio to move to our anon_vma
>>> @@ -1380,45 +1423,11 @@ static __always_inline void
>>> __folio_add_file_rmap(struct folio *folio,
>>>           struct page *page, int nr_pages, struct vm_area_struct *vma,
>>>           enum rmap_mode mode)
>>>   {
>>> -    atomic_t *mapped = &folio->_nr_pages_mapped;
>>> -    unsigned int nr_pmdmapped = 0, first;
>>> -    int nr = 0;
>>> +    unsigned int nr, nr_pmdmapped = 0;
>>
>> You're still being inconsistent with signed/unsigned here. Is there a reason
>> these can't be signed like nr_pages in the interface?
> 
> I can turn them into signed values.
> 
> Personally, I think it's misleading to use "signed" for values that have
> absolutely no meaning for negative meaning. But sure, we can be consistent, at
> least in rmap code.
> 

Well it's an easy way to detect overflow? But I know what you mean. There are
lots of other APIs that accept signed/unsigned 32/64 bits; It's a mess. It would
be a tiny step in the right direction if a series could at least be consistent
with itself though, IMHO. :)
diff mbox series

Patch

diff --git a/mm/rmap.c b/mm/rmap.c
index 2ff2f11275e5..c5761986a411 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1157,6 +1157,49 @@  int folio_total_mapcount(struct folio *folio)
 	return mapcount;
 }
 
+static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
+		struct page *page, int nr_pages, enum rmap_mode mode,
+		unsigned int *nr_pmdmapped)
+{
+	atomic_t *mapped = &folio->_nr_pages_mapped;
+	int first, nr = 0;
+
+	__folio_rmap_sanity_checks(folio, page, nr_pages, mode);
+
+	/* Is page being mapped by PTE? Is this its first map to be added? */
+	switch (mode) {
+	case RMAP_MODE_PTE:
+		do {
+			first = atomic_inc_and_test(&page->_mapcount);
+			if (first && folio_test_large(folio)) {
+				first = atomic_inc_return_relaxed(mapped);
+				first = (first < COMPOUND_MAPPED);
+			}
+
+			if (first)
+				nr++;
+		} while (page++, --nr_pages > 0);
+		break;
+	case RMAP_MODE_PMD:
+		first = atomic_inc_and_test(&folio->_entire_mapcount);
+		if (first) {
+			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
+			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
+				*nr_pmdmapped = folio_nr_pages(folio);
+				nr = *nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
+				/* Raced ahead of a remove and another add? */
+				if (unlikely(nr < 0))
+					nr = 0;
+			} else {
+				/* Raced ahead of a remove of COMPOUND_MAPPED */
+				nr = 0;
+			}
+		}
+		break;
+	}
+	return nr;
+}
+
 /**
  * folio_move_anon_rmap - move a folio to our anon_vma
  * @folio:	The folio to move to our anon_vma
@@ -1380,45 +1423,11 @@  static __always_inline void __folio_add_file_rmap(struct folio *folio,
 		struct page *page, int nr_pages, struct vm_area_struct *vma,
 		enum rmap_mode mode)
 {
-	atomic_t *mapped = &folio->_nr_pages_mapped;
-	unsigned int nr_pmdmapped = 0, first;
-	int nr = 0;
+	unsigned int nr, nr_pmdmapped = 0;
 
 	VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
-	__folio_rmap_sanity_checks(folio, page, nr_pages, mode);
-
-	/* Is page being mapped by PTE? Is this its first map to be added? */
-	switch (mode) {
-	case RMAP_MODE_PTE:
-		do {
-			first = atomic_inc_and_test(&page->_mapcount);
-			if (first && folio_test_large(folio)) {
-				first = atomic_inc_return_relaxed(mapped);
-				first = (first < COMPOUND_MAPPED);
-			}
-
-			if (first)
-				nr++;
-		} while (page++, --nr_pages > 0);
-		break;
-	case RMAP_MODE_PMD:
-		first = atomic_inc_and_test(&folio->_entire_mapcount);
-		if (first) {
-			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
-			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
-				nr_pmdmapped = folio_nr_pages(folio);
-				nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
-				/* Raced ahead of a remove and another add? */
-				if (unlikely(nr < 0))
-					nr = 0;
-			} else {
-				/* Raced ahead of a remove of COMPOUND_MAPPED */
-				nr = 0;
-			}
-		}
-		break;
-	}
 
+	nr = __folio_add_rmap(folio, page, nr_pages, mode, &nr_pmdmapped);
 	if (nr_pmdmapped)
 		__lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ?
 			NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped);