diff mbox series

[RFC,3/5] rmap: add page_add_file_rmap_range()

Message ID 20230130125504.2509710-4-fengwei.yin@intel.com (mailing list archive)
State New
Headers show
Series folio based filemap_map_pages() | expand

Commit Message

Yin, Fengwei Jan. 30, 2023, 12:55 p.m. UTC
page_add_file_rmap_range() allows to add specific range of
large folio rmap.

Signed-off-by: Yin Fengwei <fengwei.yin@intel.com>
---
 include/linux/rmap.h |  2 ++
 mm/rmap.c            | 70 ++++++++++++++++++++++++++++----------------
 2 files changed, 46 insertions(+), 26 deletions(-)

Comments

Matthew Wilcox Jan. 30, 2023, 1:50 p.m. UTC | #1
On Mon, Jan 30, 2023 at 08:55:02PM +0800, Yin Fengwei wrote:
> +++ b/include/linux/rmap.h
> @@ -198,6 +198,8 @@ void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
>  		unsigned long address);
>  void page_add_file_rmap(struct page *, struct vm_area_struct *,
>  		bool compound);
> +void page_add_file_rmap_range(struct folio *, struct page *, int len,
> +	struct vm_area_struct *, bool compound);

Again, two tabs please.

> -/**
> - * page_add_file_rmap - add pte mapping to a file page
> - * @page:	the page to add the mapping to
> - * @vma:	the vm area in which the mapping is added
> - * @compound:	charge the page as compound or small page
> - *
> - * The caller needs to hold the pte lock.
> - */

We really should have kernel-doc for this function.

> -void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
> -		bool compound)
> +void page_add_file_rmap_range(struct folio *folio, struct page *page, int len,
> +	struct vm_area_struct *vma, bool compound)

Indentation

>  {
> -	struct folio *folio = page_folio(page);
>  	atomic_t *mapped = &folio->_nr_pages_mapped;
> -	int nr = 0, nr_pmdmapped = 0;
> +	int nr = 0, nr_pmdmapped = 0, nr_pages = folio_nr_pages(folio);

Should be unsigned.

Also the 'len' parameter should be called nr_pages, and then do
something like:

	if (nr_pages > folio_nr_pages(folio))
		nr_pages = folio_nr_pages(folio);

>  	bool first;
>  
> -	VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
> -
> -	/* Is page being mapped by PTE? Is this its first map to be added? */
> -	if (likely(!compound)) {
> -		first = atomic_inc_and_test(&page->_mapcount);
> -		nr = first;
> -		if (first && folio_test_large(folio)) {
> -			nr = atomic_inc_return_relaxed(mapped);
> -			nr = (nr < COMPOUND_MAPPED);
> -		}
> -	} else if (folio_test_pmd_mappable(folio)) {
> -		/* That test is redundant: it's for safety or to optimize out */
> -
> +	if (compound) {

You've dropped the 'unlikely' marker here.  Also I'm not sure why you
switched the order of these two cases around; makes it harder to review.

>  		first = atomic_inc_and_test(&folio->_entire_mapcount);
>  		if (first) {
>  			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
>  			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
> -				nr_pmdmapped = folio_nr_pages(folio);
> +				nr_pmdmapped = nr_pages;
>  				nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
>  				/* Raced ahead of a remove and another add? */
>  				if (unlikely(nr < 0))
> @@ -1344,6 +1323,20 @@ void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
>  				nr = 0;
>  			}
>  		}
> +	} else {
> +		int i = 0, new_mapped = 0, count;
> +
> +		count = min_t(int, len, nr_pages - folio_page_idx(folio, page));
> +		do {
> +			first = atomic_inc_and_test(&page->_mapcount);
> +			new_mapped = first;
> +			if (first && folio_test_large(folio)) {
> +				new_mapped = atomic_inc_return_relaxed(mapped);
> +				new_mapped = (new_mapped < COMPOUND_MAPPED);
> +			}
> +			if  (new_mapped)

Space between if and '('.

I'm not quite sure why you want to pass 'page' in rather than the
offset of the page within the folio, but perhaps that will become
obvious as I read along.
Huang, Ying Jan. 31, 2023, 7:24 a.m. UTC | #2
Yin Fengwei <fengwei.yin@intel.com> writes:

> page_add_file_rmap_range() allows to add specific range of
> large folio rmap.
>
> Signed-off-by: Yin Fengwei <fengwei.yin@intel.com>
> ---
>  include/linux/rmap.h |  2 ++
>  mm/rmap.c            | 70 ++++++++++++++++++++++++++++----------------
>  2 files changed, 46 insertions(+), 26 deletions(-)
>
> diff --git a/include/linux/rmap.h b/include/linux/rmap.h
> index a6bd1f0a183d..063e0addcbf8 100644
> --- a/include/linux/rmap.h
> +++ b/include/linux/rmap.h
> @@ -198,6 +198,8 @@ void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
>  		unsigned long address);
>  void page_add_file_rmap(struct page *, struct vm_area_struct *,
>  		bool compound);
> +void page_add_file_rmap_range(struct folio *, struct page *, int len,
> +	struct vm_area_struct *, bool compound);
>  void page_remove_rmap(struct page *, struct vm_area_struct *,
>  		bool compound);
>  
> diff --git a/mm/rmap.c b/mm/rmap.c
> index 948ca17a96ad..cc7fe3010330 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1301,40 +1301,19 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
>  	__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
>  }
>  
> -/**
> - * page_add_file_rmap - add pte mapping to a file page
> - * @page:	the page to add the mapping to
> - * @vma:	the vm area in which the mapping is added
> - * @compound:	charge the page as compound or small page
> - *
> - * The caller needs to hold the pte lock.
> - */
> -void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
> -		bool compound)
> +void page_add_file_rmap_range(struct folio *folio, struct page *page, int len,
> +	struct vm_area_struct *vma, bool compound)
>  {
> -	struct folio *folio = page_folio(page);
>  	atomic_t *mapped = &folio->_nr_pages_mapped;
> -	int nr = 0, nr_pmdmapped = 0;
> +	int nr = 0, nr_pmdmapped = 0, nr_pages = folio_nr_pages(folio);
>  	bool first;
>  
> -	VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
> -
> -	/* Is page being mapped by PTE? Is this its first map to be added? */
> -	if (likely(!compound)) {
> -		first = atomic_inc_and_test(&page->_mapcount);
> -		nr = first;
> -		if (first && folio_test_large(folio)) {
> -			nr = atomic_inc_return_relaxed(mapped);
> -			nr = (nr < COMPOUND_MAPPED);
> -		}
> -	} else if (folio_test_pmd_mappable(folio)) {
> -		/* That test is redundant: it's for safety or to optimize out */
> -
> +	if (compound) {
>  		first = atomic_inc_and_test(&folio->_entire_mapcount);
>  		if (first) {
>  			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
>  			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
> -				nr_pmdmapped = folio_nr_pages(folio);
> +				nr_pmdmapped = nr_pages;
>  				nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
>  				/* Raced ahead of a remove and another add? */
>  				if (unlikely(nr < 0))
> @@ -1344,6 +1323,20 @@ void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
>  				nr = 0;
>  			}
>  		}
> +	} else {
> +		int i = 0, new_mapped = 0, count;
> +
> +		count = min_t(int, len, nr_pages - folio_page_idx(folio, page));

It appears that count isn't used after assignment.

Best Regards,
Huang, Ying

> +		do {
> +			first = atomic_inc_and_test(&page->_mapcount);
> +			new_mapped = first;
> +			if (first && folio_test_large(folio)) {
> +				new_mapped = atomic_inc_return_relaxed(mapped);
> +				new_mapped = (new_mapped < COMPOUND_MAPPED);
> +			}
> +			if  (new_mapped)
> +				nr++;
> +		} while (page++, ++i < len);
>  	}
>  
>  	if (nr_pmdmapped)
> @@ -1355,6 +1348,31 @@ void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
>  	mlock_vma_folio(folio, vma, compound);
>  }
>  
> +/**
> + * page_add_file_rmap - add pte mapping to a file page
> + * @page:	the page to add the mapping to
> + * @vma:	the vm area in which the mapping is added
> + * @compound:	charge the page as compound or small page
> + *
> + * The caller needs to hold the pte lock.
> + */
> +void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
> +		bool compound)
> +{
> +	struct folio *folio = page_folio(page);
> +	int nr_pages;
> +
> +	VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
> +
> +	if (likely(!compound))
> +		nr_pages = 1;
> +	else
> +		nr_pages = folio_nr_pages(folio);
> +
> +	page_add_file_rmap_range(folio, page, nr_pages, vma, compound);
> +}
> +
>  /**
>   * page_remove_rmap - take down pte mapping from a page
>   * @page:	page to remove mapping from
Yin, Fengwei Jan. 31, 2023, 7:48 a.m. UTC | #3
On 1/31/2023 3:24 PM, Huang, Ying wrote:
> Yin Fengwei <fengwei.yin@intel.com> writes:
> 
>> page_add_file_rmap_range() allows to add specific range of
>> large folio rmap.
>>
>> Signed-off-by: Yin Fengwei <fengwei.yin@intel.com>
>> ---
>>  include/linux/rmap.h |  2 ++
>>  mm/rmap.c            | 70 ++++++++++++++++++++++++++++----------------
>>  2 files changed, 46 insertions(+), 26 deletions(-)
>>
>> diff --git a/include/linux/rmap.h b/include/linux/rmap.h
>> index a6bd1f0a183d..063e0addcbf8 100644
>> --- a/include/linux/rmap.h
>> +++ b/include/linux/rmap.h
>> @@ -198,6 +198,8 @@ void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
>>  		unsigned long address);
>>  void page_add_file_rmap(struct page *, struct vm_area_struct *,
>>  		bool compound);
>> +void page_add_file_rmap_range(struct folio *, struct page *, int len,
>> +	struct vm_area_struct *, bool compound);
>>  void page_remove_rmap(struct page *, struct vm_area_struct *,
>>  		bool compound);
>>  
>> diff --git a/mm/rmap.c b/mm/rmap.c
>> index 948ca17a96ad..cc7fe3010330 100644
>> --- a/mm/rmap.c
>> +++ b/mm/rmap.c
>> @@ -1301,40 +1301,19 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
>>  	__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
>>  }
>>  
>> -/**
>> - * page_add_file_rmap - add pte mapping to a file page
>> - * @page:	the page to add the mapping to
>> - * @vma:	the vm area in which the mapping is added
>> - * @compound:	charge the page as compound or small page
>> - *
>> - * The caller needs to hold the pte lock.
>> - */
>> -void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
>> -		bool compound)
>> +void page_add_file_rmap_range(struct folio *folio, struct page *page, int len,
>> +	struct vm_area_struct *vma, bool compound)
>>  {
>> -	struct folio *folio = page_folio(page);
>>  	atomic_t *mapped = &folio->_nr_pages_mapped;
>> -	int nr = 0, nr_pmdmapped = 0;
>> +	int nr = 0, nr_pmdmapped = 0, nr_pages = folio_nr_pages(folio);
>>  	bool first;
>>  
>> -	VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
>> -
>> -	/* Is page being mapped by PTE? Is this its first map to be added? */
>> -	if (likely(!compound)) {
>> -		first = atomic_inc_and_test(&page->_mapcount);
>> -		nr = first;
>> -		if (first && folio_test_large(folio)) {
>> -			nr = atomic_inc_return_relaxed(mapped);
>> -			nr = (nr < COMPOUND_MAPPED);
>> -		}
>> -	} else if (folio_test_pmd_mappable(folio)) {
>> -		/* That test is redundant: it's for safety or to optimize out */
>> -
>> +	if (compound) {
>>  		first = atomic_inc_and_test(&folio->_entire_mapcount);
>>  		if (first) {
>>  			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
>>  			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
>> -				nr_pmdmapped = folio_nr_pages(folio);
>> +				nr_pmdmapped = nr_pages;
>>  				nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
>>  				/* Raced ahead of a remove and another add? */
>>  				if (unlikely(nr < 0))
>> @@ -1344,6 +1323,20 @@ void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
>>  				nr = 0;
>>  			}
>>  		}
>> +	} else {
>> +		int i = 0, new_mapped = 0, count;
>> +
>> +		count = min_t(int, len, nr_pages - folio_page_idx(folio, page));
> 
> It appears that count isn't used after assignment.
Oops. This should be used in while (page++, ++i < count);
Maybe this line can be removed if we are sure the range is not out of
folio size? Thanks.

Regards
Yin, Fengwei

> 
> Best Regards,
> Huang, Ying
> 
>> +		do {
>> +			first = atomic_inc_and_test(&page->_mapcount);
>> +			new_mapped = first;
>> +			if (first && folio_test_large(folio)) {
>> +				new_mapped = atomic_inc_return_relaxed(mapped);
>> +				new_mapped = (new_mapped < COMPOUND_MAPPED);
>> +			}
>> +			if  (new_mapped)
>> +				nr++;
>> +		} while (page++, ++i < len);
>>  	}
>>  
>>  	if (nr_pmdmapped)
>> @@ -1355,6 +1348,31 @@ void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
>>  	mlock_vma_folio(folio, vma, compound);
>>  }
>>  
>> +/**
>> + * page_add_file_rmap - add pte mapping to a file page
>> + * @page:	the page to add the mapping to
>> + * @vma:	the vm area in which the mapping is added
>> + * @compound:	charge the page as compound or small page
>> + *
>> + * The caller needs to hold the pte lock.
>> + */
>> +void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
>> +		bool compound)
>> +{
>> +	struct folio *folio = page_folio(page);
>> +	int nr_pages;
>> +
>> +	VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
>> +
>> +	if (likely(!compound))
>> +		nr_pages = 1;
>> +	else
>> +		nr_pages = folio_nr_pages(folio);
>> +
>> +	page_add_file_rmap_range(folio, page, nr_pages, vma, compound);
>> +}
>> +
>>  /**
>>   * page_remove_rmap - take down pte mapping from a page
>>   * @page:	page to remove mapping from
diff mbox series

Patch

diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index a6bd1f0a183d..063e0addcbf8 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -198,6 +198,8 @@  void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
 		unsigned long address);
 void page_add_file_rmap(struct page *, struct vm_area_struct *,
 		bool compound);
+void page_add_file_rmap_range(struct folio *, struct page *, int len,
+	struct vm_area_struct *, bool compound);
 void page_remove_rmap(struct page *, struct vm_area_struct *,
 		bool compound);
 
diff --git a/mm/rmap.c b/mm/rmap.c
index 948ca17a96ad..cc7fe3010330 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1301,40 +1301,19 @@  void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
 	__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
 }
 
-/**
- * page_add_file_rmap - add pte mapping to a file page
- * @page:	the page to add the mapping to
- * @vma:	the vm area in which the mapping is added
- * @compound:	charge the page as compound or small page
- *
- * The caller needs to hold the pte lock.
- */
-void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
-		bool compound)
+void page_add_file_rmap_range(struct folio *folio, struct page *page, int len,
+	struct vm_area_struct *vma, bool compound)
 {
-	struct folio *folio = page_folio(page);
 	atomic_t *mapped = &folio->_nr_pages_mapped;
-	int nr = 0, nr_pmdmapped = 0;
+	int nr = 0, nr_pmdmapped = 0, nr_pages = folio_nr_pages(folio);
 	bool first;
 
-	VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
-
-	/* Is page being mapped by PTE? Is this its first map to be added? */
-	if (likely(!compound)) {
-		first = atomic_inc_and_test(&page->_mapcount);
-		nr = first;
-		if (first && folio_test_large(folio)) {
-			nr = atomic_inc_return_relaxed(mapped);
-			nr = (nr < COMPOUND_MAPPED);
-		}
-	} else if (folio_test_pmd_mappable(folio)) {
-		/* That test is redundant: it's for safety or to optimize out */
-
+	if (compound) {
 		first = atomic_inc_and_test(&folio->_entire_mapcount);
 		if (first) {
 			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
 			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
-				nr_pmdmapped = folio_nr_pages(folio);
+				nr_pmdmapped = nr_pages;
 				nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
 				/* Raced ahead of a remove and another add? */
 				if (unlikely(nr < 0))
@@ -1344,6 +1323,20 @@  void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
 				nr = 0;
 			}
 		}
+	} else {
+		int i = 0, new_mapped = 0, count;
+
+		count = min_t(int, len, nr_pages - folio_page_idx(folio, page));
+		do {
+			first = atomic_inc_and_test(&page->_mapcount);
+			new_mapped = first;
+			if (first && folio_test_large(folio)) {
+				new_mapped = atomic_inc_return_relaxed(mapped);
+				new_mapped = (new_mapped < COMPOUND_MAPPED);
+			}
+			if  (new_mapped)
+				nr++;
+		} while (page++, ++i < len);
 	}
 
 	if (nr_pmdmapped)
@@ -1355,6 +1348,31 @@  void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
 	mlock_vma_folio(folio, vma, compound);
 }
 
+/**
+ * page_add_file_rmap - add pte mapping to a file page
+ * @page:	the page to add the mapping to
+ * @vma:	the vm area in which the mapping is added
+ * @compound:	charge the page as compound or small page
+ *
+ * The caller needs to hold the pte lock.
+ */
+void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
+		bool compound)
+{
+	struct folio *folio = page_folio(page);
+	int nr_pages;
+
+	VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
+
+	if (likely(!compound))
+		nr_pages = 1;
+	else
+		nr_pages = folio_nr_pages(folio);
+
+	page_add_file_rmap_range(folio, page, nr_pages, vma, compound);
+}
+
 /**
  * page_remove_rmap - take down pte mapping from a page
  * @page:	page to remove mapping from
-- 
2.30.2