diff mbox series

[RFC,4/6] mm: Implement folio_add_new_anon_rmap_range()

Message ID 20230317105802.2634004-5-ryan.roberts@arm.com (mailing list archive)
State New
Headers show
Series variable-order, large folios for anonymous memory | expand

Commit Message

Ryan Roberts March 17, 2023, 10:58 a.m. UTC
Like folio_add_new_anon_rmap() but batch-rmaps all the pages belonging
to a folio, for effciency savings.

Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
---
 include/linux/rmap.h |  2 ++
 mm/rmap.c            | 43 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 45 insertions(+)

--
2.25.1

Comments

Yin, Fengwei March 22, 2023, 6:59 a.m. UTC | #1
On 3/17/23 18:58, Ryan Roberts wrote:
> Like folio_add_new_anon_rmap() but batch-rmaps all the pages belonging
> to a folio, for effciency savings.
> 
> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
> ---
>   include/linux/rmap.h |  2 ++
>   mm/rmap.c            | 43 +++++++++++++++++++++++++++++++++++++++++++
>   2 files changed, 45 insertions(+)
> 
> diff --git a/include/linux/rmap.h b/include/linux/rmap.h
> index b87d01660412..d1d731650ce8 100644
> --- a/include/linux/rmap.h
> +++ b/include/linux/rmap.h
> @@ -196,6 +196,8 @@ void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
>   		unsigned long address);
>   void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
>   		unsigned long address);
> +void folio_add_new_anon_rmap_range(struct folio *folio,
> +		struct vm_area_struct *vma, unsigned long address);
>   void page_add_file_rmap(struct page *, struct vm_area_struct *,
>   		bool compound);
>   void page_remove_rmap(struct page *, struct vm_area_struct *,
> diff --git a/mm/rmap.c b/mm/rmap.c
> index 8632e02661ac..05a0c0a700e7 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1302,6 +1302,49 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
>   	__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
>   }
> 
> +/**
> + * folio_add_new_anon_rmap_range - Add mapping to a new anonymous potentially
> + * large but definitely non-THP folio.
> + * @folio:      The folio to add the mapping to.
> + * @vma:        the vm area in which the mapping is added
> + * @address:    the user virtual address of the first page in the folio
> + *
> + * Like folio_add_new_anon_rmap() but must only be called for new *non-THP*
> + * folios. Like folio_add_new_anon_rmap(), the inc-and-test is bypassed and the
> + * folio does not have to be locked. All pages in the folio are individually
> + * accounted.
> + *
> + * As the folio is new, it's assumed to be mapped exclusively by a single
> + * process.
> + */
> +void folio_add_new_anon_rmap_range(struct folio *folio,
> +			struct vm_area_struct *vma, unsigned long address)
> +{
> +	int i;
> +	int nr = folio_nr_pages(folio);
> +	struct page *page = &folio->page;
> +
> +	VM_BUG_ON_VMA(address < vma->vm_start ||
> +		      address + (nr << PAGE_SHIFT) > vma->vm_end, vma);
> +	__folio_set_swapbacked(folio);
> +
> +	if (folio_test_large(folio)) {
> +		/* increment count (starts at 0) */
> +		atomic_set(&folio->_nr_pages_mapped, nr);
> +	}
> +
> +	for (i = 0; i < nr; i++) {
> +		/* increment count (starts at -1) */
> +		atomic_set(&page->_mapcount, 0);
> +		__page_set_anon_rmap(folio, page, vma, address, 1);
> +		page++;
> +		address += PAGE_SIZE;
> +	}
> +
> +	__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
It looks like you missed __page_set_anon_rmap() call here.


Regards
Yin, Fengwei

> +
> +}
> +
>   /**
>    * page_add_file_rmap - add pte mapping to a file page
>    * @page:	the page to add the mapping to
> --
> 2.25.1
>
Yin, Fengwei March 22, 2023, 7:10 a.m. UTC | #2
On 3/17/23 18:58, Ryan Roberts wrote:
> Like folio_add_new_anon_rmap() but batch-rmaps all the pages belonging
> to a folio, for effciency savings.
> 
> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
> ---
>   include/linux/rmap.h |  2 ++
>   mm/rmap.c            | 43 +++++++++++++++++++++++++++++++++++++++++++
>   2 files changed, 45 insertions(+)
> 
> diff --git a/include/linux/rmap.h b/include/linux/rmap.h
> index b87d01660412..d1d731650ce8 100644
> --- a/include/linux/rmap.h
> +++ b/include/linux/rmap.h
> @@ -196,6 +196,8 @@ void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
>   		unsigned long address);
>   void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
>   		unsigned long address);
> +void folio_add_new_anon_rmap_range(struct folio *folio,
> +		struct vm_area_struct *vma, unsigned long address);
>   void page_add_file_rmap(struct page *, struct vm_area_struct *,
>   		bool compound);
>   void page_remove_rmap(struct page *, struct vm_area_struct *,
> diff --git a/mm/rmap.c b/mm/rmap.c
> index 8632e02661ac..05a0c0a700e7 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1302,6 +1302,49 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
>   	__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
>   }
> 
> +/**
> + * folio_add_new_anon_rmap_range - Add mapping to a new anonymous potentially
> + * large but definitely non-THP folio.
> + * @folio:      The folio to add the mapping to.
> + * @vma:        the vm area in which the mapping is added
> + * @address:    the user virtual address of the first page in the folio
> + *
> + * Like folio_add_new_anon_rmap() but must only be called for new *non-THP*
> + * folios. Like folio_add_new_anon_rmap(), the inc-and-test is bypassed and the
> + * folio does not have to be locked. All pages in the folio are individually
> + * accounted.
> + *
> + * As the folio is new, it's assumed to be mapped exclusively by a single
> + * process.
> + */
> +void folio_add_new_anon_rmap_range(struct folio *folio,
> +			struct vm_area_struct *vma, unsigned long address)
> +{
> +	int i;
> +	int nr = folio_nr_pages(folio);
> +	struct page *page = &folio->page;
> +
> +	VM_BUG_ON_VMA(address < vma->vm_start ||
> +		      address + (nr << PAGE_SHIFT) > vma->vm_end, vma);
> +	__folio_set_swapbacked(folio);
> +
> +	if (folio_test_large(folio)) {
> +		/* increment count (starts at 0) */
> +		atomic_set(&folio->_nr_pages_mapped, nr);
> +	}
> +
> +	for (i = 0; i < nr; i++) {
> +		/* increment count (starts at -1) */
> +		atomic_set(&page->_mapcount, 0);
> +		__page_set_anon_rmap(folio, page, vma, address, 1);
My bad. You did call it here.

Regards
Yin, Fengwei

> +		page++;
> +		address += PAGE_SIZE;
> +	}
> +
> +	__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
> +
> +}
> +
>   /**
>    * page_add_file_rmap - add pte mapping to a file page
>    * @page:	the page to add the mapping to
> --
> 2.25.1
>
Ryan Roberts March 22, 2023, 7:42 a.m. UTC | #3
On 22/03/2023 07:10, Yin Fengwei wrote:
> On 3/17/23 18:58, Ryan Roberts wrote:
>> Like folio_add_new_anon_rmap() but batch-rmaps all the pages belonging
>> to a folio, for effciency savings.
>>
>> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
>> ---
>>   include/linux/rmap.h |  2 ++
>>   mm/rmap.c            | 43 +++++++++++++++++++++++++++++++++++++++++++
>>   2 files changed, 45 insertions(+)
>>
>> diff --git a/include/linux/rmap.h b/include/linux/rmap.h
>> index b87d01660412..d1d731650ce8 100644
>> --- a/include/linux/rmap.h
>> +++ b/include/linux/rmap.h
>> @@ -196,6 +196,8 @@ void page_add_new_anon_rmap(struct page *, struct
>> vm_area_struct *,
>>           unsigned long address);
>>   void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
>>           unsigned long address);
>> +void folio_add_new_anon_rmap_range(struct folio *folio,
>> +        struct vm_area_struct *vma, unsigned long address);
>>   void page_add_file_rmap(struct page *, struct vm_area_struct *,
>>           bool compound);
>>   void page_remove_rmap(struct page *, struct vm_area_struct *,
>> diff --git a/mm/rmap.c b/mm/rmap.c
>> index 8632e02661ac..05a0c0a700e7 100644
>> --- a/mm/rmap.c
>> +++ b/mm/rmap.c
>> @@ -1302,6 +1302,49 @@ void folio_add_new_anon_rmap(struct folio *folio,
>> struct vm_area_struct *vma,
>>       __page_set_anon_rmap(folio, &folio->page, vma, address, 1);
>>   }
>>
>> +/**
>> + * folio_add_new_anon_rmap_range - Add mapping to a new anonymous potentially
>> + * large but definitely non-THP folio.
>> + * @folio:      The folio to add the mapping to.
>> + * @vma:        the vm area in which the mapping is added
>> + * @address:    the user virtual address of the first page in the folio
>> + *
>> + * Like folio_add_new_anon_rmap() but must only be called for new *non-THP*
>> + * folios. Like folio_add_new_anon_rmap(), the inc-and-test is bypassed and the
>> + * folio does not have to be locked. All pages in the folio are individually
>> + * accounted.
>> + *
>> + * As the folio is new, it's assumed to be mapped exclusively by a single
>> + * process.
>> + */
>> +void folio_add_new_anon_rmap_range(struct folio *folio,
>> +            struct vm_area_struct *vma, unsigned long address)
>> +{
>> +    int i;
>> +    int nr = folio_nr_pages(folio);
>> +    struct page *page = &folio->page;
>> +
>> +    VM_BUG_ON_VMA(address < vma->vm_start ||
>> +              address + (nr << PAGE_SHIFT) > vma->vm_end, vma);
>> +    __folio_set_swapbacked(folio);
>> +
>> +    if (folio_test_large(folio)) {
>> +        /* increment count (starts at 0) */
>> +        atomic_set(&folio->_nr_pages_mapped, nr);
>> +    }
>> +
>> +    for (i = 0; i < nr; i++) {
>> +        /* increment count (starts at -1) */
>> +        atomic_set(&page->_mapcount, 0);
>> +        __page_set_anon_rmap(folio, page, vma, address, 1);
> My bad. You did call it here.

Yes, calling it per subpage to ensure every subpage is marked AnonExclusive.
Although this does rely on calling it _first_ for the head page so that the
index is set correctly. I think that all works out though.

I did wonder if the order of the calls (__page_set_anon_rmap() vs
__lruvec_stat_mod_folio() might matter - I've swapped them. But I haven't found
any evidence that it does from reviewing the code.

> 
> Regards
> Yin, Fengwei
> 
>> +        page++;
>> +        address += PAGE_SIZE;
>> +    }
>> +
>> +    __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
>> +
>> +}
>> +
>>   /**
>>    * page_add_file_rmap - add pte mapping to a file page
>>    * @page:    the page to add the mapping to
>> -- 
>> 2.25.1
>>
>
diff mbox series

Patch

diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index b87d01660412..d1d731650ce8 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -196,6 +196,8 @@  void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
 		unsigned long address);
 void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
 		unsigned long address);
+void folio_add_new_anon_rmap_range(struct folio *folio,
+		struct vm_area_struct *vma, unsigned long address);
 void page_add_file_rmap(struct page *, struct vm_area_struct *,
 		bool compound);
 void page_remove_rmap(struct page *, struct vm_area_struct *,
diff --git a/mm/rmap.c b/mm/rmap.c
index 8632e02661ac..05a0c0a700e7 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1302,6 +1302,49 @@  void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
 	__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
 }

+/**
+ * folio_add_new_anon_rmap_range - Add mapping to a new anonymous potentially
+ * large but definitely non-THP folio.
+ * @folio:      The folio to add the mapping to.
+ * @vma:        the vm area in which the mapping is added
+ * @address:    the user virtual address of the first page in the folio
+ *
+ * Like folio_add_new_anon_rmap() but must only be called for new *non-THP*
+ * folios. Like folio_add_new_anon_rmap(), the inc-and-test is bypassed and the
+ * folio does not have to be locked. All pages in the folio are individually
+ * accounted.
+ *
+ * As the folio is new, it's assumed to be mapped exclusively by a single
+ * process.
+ */
+void folio_add_new_anon_rmap_range(struct folio *folio,
+			struct vm_area_struct *vma, unsigned long address)
+{
+	int i;
+	int nr = folio_nr_pages(folio);
+	struct page *page = &folio->page;
+
+	VM_BUG_ON_VMA(address < vma->vm_start ||
+		      address + (nr << PAGE_SHIFT) > vma->vm_end, vma);
+	__folio_set_swapbacked(folio);
+
+	if (folio_test_large(folio)) {
+		/* increment count (starts at 0) */
+		atomic_set(&folio->_nr_pages_mapped, nr);
+	}
+
+	for (i = 0; i < nr; i++) {
+		/* increment count (starts at -1) */
+		atomic_set(&page->_mapcount, 0);
+		__page_set_anon_rmap(folio, page, vma, address, 1);
+		page++;
+		address += PAGE_SIZE;
+	}
+
+	__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
+
+}
+
 /**
  * page_add_file_rmap - add pte mapping to a file page
  * @page:	the page to add the mapping to