diff mbox series

[RFC,v3,1/4] filemap: add function filemap_map_folio_range()

Message ID 20230203131636.1648662-2-fengwei.yin@intel.com (mailing list archive)
State New
Headers show
Series folio based filemap_map_pages() | expand

Commit Message

Yin Fengwei Feb. 3, 2023, 1:16 p.m. UTC
filemap_map_folio_range() maps partial/full folio. Comparing to
original filemap_map_pages(), it batched updates refcount and
get minor performance improvement for large folio.

a self cooked will-it-scale.page_fault3 like app (change file
write fault to read fault) with xfs filesystem got 2% performance
gain.

Signed-off-by: Yin Fengwei <fengwei.yin@intel.com>
---
 mm/filemap.c | 88 ++++++++++++++++++++++++++++++++--------------------
 1 file changed, 54 insertions(+), 34 deletions(-)

Comments

Matthew Wilcox (Oracle) Feb. 3, 2023, 1:53 p.m. UTC | #1
On Fri, Feb 03, 2023 at 09:16:33PM +0800, Yin Fengwei wrote:
> filemap_map_folio_range() maps partial/full folio. Comparing to
> original filemap_map_pages(), it batched updates refcount and
> get minor performance improvement for large folio.
> 
> a self cooked will-it-scale.page_fault3 like app (change file
> write fault to read fault) with xfs filesystem got 2% performance
> gain.

Please delete folio_more_pages() as part of this patch; this was the
only caller.

> +		ret |=	filemap_map_folio_range(vmf, folio,
> +				xas.xa_index - folio->index, addr, nr_pages);

Sorry to nitpick, but there's an extra space between |= and
filemap_map_folio_range() here.
Kirill A. Shutemov Feb. 3, 2023, 2:17 p.m. UTC | #2
On Fri, Feb 03, 2023 at 09:16:33PM +0800, Yin Fengwei wrote:
> @@ -3378,45 +3425,18 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
>  	addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
>  	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
>  	do {
> -again:
> -		page = folio_file_page(folio, xas.xa_index);
> -		if (PageHWPoison(page))
> -			goto unlock;
> -
> -		if (mmap_miss > 0)
> -			mmap_miss--;
> +		unsigned long end;
>  
>  		addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
>  		vmf->pte += xas.xa_index - last_pgoff;
>  		last_pgoff = xas.xa_index;
> +		end = folio->index + folio_nr_pages(folio) - 1;
> +		nr_pages = min(end, end_pgoff) - xas.xa_index + 1;
>  
> -		/*
> -		 * NOTE: If there're PTE markers, we'll leave them to be
> -		 * handled in the specific fault path, and it'll prohibit the
> -		 * fault-around logic.
> -		 */
> -		if (!pte_none(*vmf->pte))
> -			goto unlock;
> -
> -		/* We're about to handle the fault */
> -		if (vmf->address == addr)
> -			ret = VM_FAULT_NOPAGE;
> +		ret |=	filemap_map_folio_range(vmf, folio,
> +				xas.xa_index - folio->index, addr, nr_pages);
> +		xas.xa_index = end;

IIRC, end here can be beyond end_pgoff. Can it cause an issue? I don't see
it, but just in case.
Yin Fengwei Feb. 4, 2023, 3:25 a.m. UTC | #3
On 2/3/2023 9:53 PM, Matthew Wilcox wrote:
> On Fri, Feb 03, 2023 at 09:16:33PM +0800, Yin Fengwei wrote:
>> filemap_map_folio_range() maps partial/full folio. Comparing to
>> original filemap_map_pages(), it batched updates refcount and
>> get minor performance improvement for large folio.
>>
>> a self cooked will-it-scale.page_fault3 like app (change file
>> write fault to read fault) with xfs filesystem got 2% performance
>> gain.
> 
> Please delete folio_more_pages() as part of this patch; this was the
> only caller.
OK. Will update in next version.

> 
>> +		ret |=	filemap_map_folio_range(vmf, folio,
>> +				xas.xa_index - folio->index, addr, nr_pages);
> 
> Sorry to nitpick, but there's an extra space between |= and
> filemap_map_folio_range() here.
OK. Will update in next version.


Regards
Yin, Fengwei

>
Yin Fengwei Feb. 4, 2023, 3:31 a.m. UTC | #4
On 2/3/2023 10:17 PM, Kirill A. Shutemov wrote:
> On Fri, Feb 03, 2023 at 09:16:33PM +0800, Yin Fengwei wrote:
>> @@ -3378,45 +3425,18 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
>>  	addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
>>  	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
>>  	do {
>> -again:
>> -		page = folio_file_page(folio, xas.xa_index);
>> -		if (PageHWPoison(page))
>> -			goto unlock;
>> -
>> -		if (mmap_miss > 0)
>> -			mmap_miss--;
>> +		unsigned long end;
>>  
>>  		addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
>>  		vmf->pte += xas.xa_index - last_pgoff;
>>  		last_pgoff = xas.xa_index;
>> +		end = folio->index + folio_nr_pages(folio) - 1;
>> +		nr_pages = min(end, end_pgoff) - xas.xa_index + 1;
>>  
>> -		/*
>> -		 * NOTE: If there're PTE markers, we'll leave them to be
>> -		 * handled in the specific fault path, and it'll prohibit the
>> -		 * fault-around logic.
>> -		 */
>> -		if (!pte_none(*vmf->pte))
>> -			goto unlock;
>> -
>> -		/* We're about to handle the fault */
>> -		if (vmf->address == addr)
>> -			ret = VM_FAULT_NOPAGE;
>> +		ret |=	filemap_map_folio_range(vmf, folio,
>> +				xas.xa_index - folio->index, addr, nr_pages);
>> +		xas.xa_index = end;
> 
> IIRC, end here can be beyond end_pgoff. Can it cause an issue? I don't see
> it, but just in case.
Yes. end can beyond end_pgoff. And it's fine because that will end the loop
and no access to xas.xa_index after that. But let me change the line to:
   xas.xa_index += nr_pages;
to keep the same behavior as before in next version. Thanks.

Regards
Yin, Fengwei

> 
>
diff mbox series

Patch

diff --git a/mm/filemap.c b/mm/filemap.c
index 992554c18f1f..f444684db9f2 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3351,6 +3351,53 @@  static inline struct folio *next_map_page(struct address_space *mapping,
 				  mapping, xas, end_pgoff);
 }
 
+/*
+ * Map sub-pages range [start_page, start_page + nr_pages) of folio.
+ * start_page is gotten from start by folio_page(folio, start)
+ */
+static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
+			struct folio *folio, unsigned long start,
+			unsigned long addr, unsigned int nr_pages)
+{
+	vm_fault_t ret = 0;
+	struct vm_area_struct *vma = vmf->vma;
+	struct file *file = vma->vm_file;
+	struct page *page = folio_page(folio, start);
+	unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
+	unsigned int ref_count = 0, count = 0;
+
+	do {
+		if (PageHWPoison(page))
+			continue;
+
+		if (mmap_miss > 0)
+			mmap_miss--;
+
+		/*
+		 * NOTE: If there're PTE markers, we'll leave them to be
+		 * handled in the specific fault path, and it'll prohibit the
+		 * fault-around logic.
+		 */
+		if (!pte_none(*vmf->pte))
+			continue;
+
+		if (vmf->address == addr)
+			ret = VM_FAULT_NOPAGE;
+
+		ref_count++;
+		do_set_pte(vmf, page, addr);
+		update_mmu_cache(vma, addr, vmf->pte);
+	} while (vmf->pte++, page++, addr += PAGE_SIZE, ++count < nr_pages);
+
+	/* Restore the vmf->pte */
+	vmf->pte -= nr_pages;
+
+	folio_ref_add(folio, ref_count);
+	WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
+
+	return ret;
+}
+
 vm_fault_t filemap_map_pages(struct vm_fault *vmf,
 			     pgoff_t start_pgoff, pgoff_t end_pgoff)
 {
@@ -3361,9 +3408,9 @@  vm_fault_t filemap_map_pages(struct vm_fault *vmf,
 	unsigned long addr;
 	XA_STATE(xas, &mapping->i_pages, start_pgoff);
 	struct folio *folio;
-	struct page *page;
 	unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
 	vm_fault_t ret = 0;
+	int nr_pages = 0;
 
 	rcu_read_lock();
 	folio = first_map_page(mapping, &xas, end_pgoff);
@@ -3378,45 +3425,18 @@  vm_fault_t filemap_map_pages(struct vm_fault *vmf,
 	addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
 	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
 	do {
-again:
-		page = folio_file_page(folio, xas.xa_index);
-		if (PageHWPoison(page))
-			goto unlock;
-
-		if (mmap_miss > 0)
-			mmap_miss--;
+		unsigned long end;
 
 		addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
 		vmf->pte += xas.xa_index - last_pgoff;
 		last_pgoff = xas.xa_index;
+		end = folio->index + folio_nr_pages(folio) - 1;
+		nr_pages = min(end, end_pgoff) - xas.xa_index + 1;
 
-		/*
-		 * NOTE: If there're PTE markers, we'll leave them to be
-		 * handled in the specific fault path, and it'll prohibit the
-		 * fault-around logic.
-		 */
-		if (!pte_none(*vmf->pte))
-			goto unlock;
-
-		/* We're about to handle the fault */
-		if (vmf->address == addr)
-			ret = VM_FAULT_NOPAGE;
+		ret |=	filemap_map_folio_range(vmf, folio,
+				xas.xa_index - folio->index, addr, nr_pages);
+		xas.xa_index = end;
 
-		do_set_pte(vmf, page, addr);
-		/* no need to invalidate: a not-present page won't be cached */
-		update_mmu_cache(vma, addr, vmf->pte);
-		if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
-			xas.xa_index++;
-			folio_ref_inc(folio);
-			goto again;
-		}
-		folio_unlock(folio);
-		continue;
-unlock:
-		if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
-			xas.xa_index++;
-			goto again;
-		}
 		folio_unlock(folio);
 		folio_put(folio);
 	} while ((folio = next_map_page(mapping, &xas, end_pgoff)) != NULL);