diff mbox series

[3/3] mm: Allow find_get_page to be used for large pages

Message ID 20190905182348.5319-4-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Large pages in the page cache | expand

Commit Message

Matthew Wilcox Sept. 5, 2019, 6:23 p.m. UTC
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

Add FGP_PMD to indicate that we're trying to find-or-create a page that
is at least PMD_ORDER in size.  The internal 'conflict' entry usage
is modelled after that in DAX, but the implementations are different
due to DAX using multi-order entries and the page cache using multiple
order-0 entries.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/pagemap.h |  9 +++++
 mm/filemap.c            | 82 +++++++++++++++++++++++++++++++++++++----
 2 files changed, 84 insertions(+), 7 deletions(-)

Comments

Matthew Wilcox Sept. 5, 2019, 10:12 p.m. UTC | #1
On Fri, Sep 06, 2019 at 06:04:05AM +0800, kbuild test robot wrote:
> Hi Matthew,
> 
> Thank you for the patch! Yet something to improve:
> 
> [auto build test ERROR on linus/master]
> [cannot apply to v5.3-rc7 next-20190904]
> [if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

It looks like you're not applying these to the -mm tree?  I thought that
was included in -next.
Kirill A . Shutemov Sept. 6, 2019, 12:59 p.m. UTC | #2
On Thu, Sep 05, 2019 at 11:23:48AM -0700, Matthew Wilcox wrote:
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> 
> Add FGP_PMD to indicate that we're trying to find-or-create a page that
> is at least PMD_ORDER in size.  The internal 'conflict' entry usage
> is modelled after that in DAX, but the implementations are different
> due to DAX using multi-order entries and the page cache using multiple
> order-0 entries.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  include/linux/pagemap.h |  9 +++++
>  mm/filemap.c            | 82 +++++++++++++++++++++++++++++++++++++----
>  2 files changed, 84 insertions(+), 7 deletions(-)
> 
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index d2147215d415..72101811524c 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -248,6 +248,15 @@ pgoff_t page_cache_prev_miss(struct address_space *mapping,
>  #define FGP_NOFS		0x00000010
>  #define FGP_NOWAIT		0x00000020
>  #define FGP_FOR_MMAP		0x00000040
> +/*
> + * If you add more flags, increment FGP_ORDER_SHIFT (no further than 25).

Maybe some BUILD_BUG_ON()s to ensure FGP_ORDER_SHIFT is sane?

> + * Do not insert flags above the FGP order bits.
> + */
> +#define FGP_ORDER_SHIFT		7
> +#define FGP_PMD			((PMD_SHIFT - PAGE_SHIFT) << FGP_ORDER_SHIFT)
> +#define FGP_PUD			((PUD_SHIFT - PAGE_SHIFT) << FGP_ORDER_SHIFT)
> +
> +#define fgp_order(fgp)		((fgp) >> FGP_ORDER_SHIFT)
>  
>  struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
>  		int fgp_flags, gfp_t cache_gfp_mask);
> diff --git a/mm/filemap.c b/mm/filemap.c
> index ae3c0a70a8e9..904dfabbea52 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -1572,7 +1572,71 @@ struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
>  
>  	return page;
>  }
> -EXPORT_SYMBOL(find_get_entry);
> +
> +static bool pagecache_is_conflict(struct page *page)
> +{
> +	return page == XA_RETRY_ENTRY;
> +}
> +
> +/**
> + * __find_get_page - Find and get a page cache entry.
> + * @mapping: The address_space to search.
> + * @offset: The page cache index.
> + * @order: The minimum order of the entry to return.
> + *
> + * Looks up the page cache entries at @mapping between @offset and
> + * @offset + 2^@order.  If there is a page cache page, it is returned with

Off by one? :P

> + * an increased refcount unless it is smaller than @order.
> + *
> + * If the slot holds a shadow entry of a previously evicted page, or a
> + * swap entry from shmem/tmpfs, it is returned.
> + *
> + * Return: the found page, a value indicating a conflicting page or %NULL if
> + * there are no pages in this range.
> + */
> +static struct page *__find_get_page(struct address_space *mapping,
> +		unsigned long offset, unsigned int order)
> +{
> +	XA_STATE(xas, &mapping->i_pages, offset);
> +	struct page *page;
> +
> +	rcu_read_lock();
> +repeat:
> +	xas_reset(&xas);
> +	page = xas_find(&xas, offset | ((1UL << order) - 1));

Hm. '|' is confusing. What is expectation about offset?
Is round_down(offset, 1UL << order) expected to be equal offset?
If yes, please use '+' instead of '|'.

> +	if (xas_retry(&xas, page))
> +		goto repeat;
> +	/*
> +	 * A shadow entry of a recently evicted page, or a swap entry from
> +	 * shmem/tmpfs.  Skip it; keep looking for pages.
> +	 */
> +	if (xa_is_value(page))
> +		goto repeat;
> +	if (!page)
> +		goto out;
> +	if (compound_order(page) < order) {
> +		page = XA_RETRY_ENTRY;
> +		goto out;
> +	}

compound_order() is not stable if you don't have pin on the page.
Check it after page_cache_get_speculative().

> +
> +	if (!page_cache_get_speculative(page))
> +		goto repeat;
> +
> +	/*
> +	 * Has the page moved or been split?
> +	 * This is part of the lockless pagecache protocol. See
> +	 * include/linux/pagemap.h for details.
> +	 */
> +	if (unlikely(page != xas_reload(&xas))) {
> +		put_page(page);
> +		goto repeat;
> +	}
> +	page = find_subpage(page, offset);
> +out:
> +	rcu_read_unlock();
> +
> +	return page;
> +}
>  
>  /**
>   * find_lock_entry - locate, pin and lock a page cache entry
> @@ -1614,12 +1678,12 @@ EXPORT_SYMBOL(find_lock_entry);
>   * pagecache_get_page - find and get a page reference
>   * @mapping: the address_space to search
>   * @offset: the page index
> - * @fgp_flags: PCG flags
> + * @fgp_flags: FGP flags
>   * @gfp_mask: gfp mask to use for the page cache data page allocation
>   *
>   * Looks up the page cache slot at @mapping & @offset.
>   *
> - * PCG flags modify how the page is returned.
> + * FGP flags modify how the page is returned.
>   *
>   * @fgp_flags can be:
>   *
> @@ -1632,6 +1696,10 @@ EXPORT_SYMBOL(find_lock_entry);
>   * - FGP_FOR_MMAP: Similar to FGP_CREAT, only we want to allow the caller to do
>   *   its own locking dance if the page is already in cache, or unlock the page
>   *   before returning if we had to add the page to pagecache.
> + * - FGP_PMD: We're only interested in pages at PMD granularity.  If there
> + *   is no page here (and FGP_CREATE is set), we'll create one large enough.
> + *   If there is a smaller page in the cache that overlaps the PMD page, we
> + *   return %NULL and do not attempt to create a page.

Is it really the best inteface?

Maybe allow user to ask bitmask of allowed orders? For THP order-0 is fine
if order-9 has failed.

>   *
>   * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
>   * if the GFP flags specified for FGP_CREAT are atomic.
> @@ -1646,9 +1714,9 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
>  	struct page *page;
>  
>  repeat:
> -	page = find_get_entry(mapping, offset);
> -	if (xa_is_value(page))
> -		page = NULL;
> +	page = __find_get_page(mapping, offset, fgp_order(fgp_flags));
> +	if (pagecache_is_conflict(page))
> +		return NULL;
>  	if (!page)
>  		goto no_page;
>  
> @@ -1682,7 +1750,7 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
>  		if (fgp_flags & FGP_NOFS)
>  			gfp_mask &= ~__GFP_FS;
>  
> -		page = __page_cache_alloc(gfp_mask);
> +		page = __page_cache_alloc_order(gfp_mask, fgp_order(fgp_flags));
>  		if (!page)
>  			return NULL;
>  
> -- 
> 2.23.0.rc1
>
Matthew Wilcox Sept. 6, 2019, 1:41 p.m. UTC | #3
On Fri, Sep 06, 2019 at 03:59:28PM +0300, Kirill A. Shutemov wrote:
> > @@ -248,6 +248,15 @@ pgoff_t page_cache_prev_miss(struct address_space *mapping,
> >  #define FGP_NOFS		0x00000010
> >  #define FGP_NOWAIT		0x00000020
> >  #define FGP_FOR_MMAP		0x00000040
> > +/*
> > + * If you add more flags, increment FGP_ORDER_SHIFT (no further than 25).
> 
> Maybe some BUILD_BUG_ON()s to ensure FGP_ORDER_SHIFT is sane?

Yeah, probably a good idea.

> > +/**
> > + * __find_get_page - Find and get a page cache entry.
> > + * @mapping: The address_space to search.
> > + * @offset: The page cache index.
> > + * @order: The minimum order of the entry to return.
> > + *
> > + * Looks up the page cache entries at @mapping between @offset and
> > + * @offset + 2^@order.  If there is a page cache page, it is returned with
> 
> Off by one? :P

Hah!  I thought it reasonable to be ambiguous in the English description
...  it's not entirely uncommon to describe something being 'between A
and B' when meaning ">= A and < B".

> > +static struct page *__find_get_page(struct address_space *mapping,
> > +		unsigned long offset, unsigned int order)
> > +{
> > +	XA_STATE(xas, &mapping->i_pages, offset);
> > +	struct page *page;
> > +
> > +	rcu_read_lock();
> > +repeat:
> > +	xas_reset(&xas);
> > +	page = xas_find(&xas, offset | ((1UL << order) - 1));
> 
> Hm. '|' is confusing. What is expectation about offset?
> Is round_down(offset, 1UL << order) expected to be equal offset?
> If yes, please use '+' instead of '|'.

Might make sense to put in ...

	VM_BUG_ON(offset & ((1UL << order) - 1));

> > +	if (xas_retry(&xas, page))
> > +		goto repeat;
> > +	/*
> > +	 * A shadow entry of a recently evicted page, or a swap entry from
> > +	 * shmem/tmpfs.  Skip it; keep looking for pages.
> > +	 */
> > +	if (xa_is_value(page))
> > +		goto repeat;
> > +	if (!page)
> > +		goto out;
> > +	if (compound_order(page) < order) {
> > +		page = XA_RETRY_ENTRY;
> > +		goto out;
> > +	}
> 
> compound_order() is not stable if you don't have pin on the page.
> Check it after page_cache_get_speculative().

Maybe check both before and after?  If we check it before, we don't bother
to bump the refcount on a page which is too small.

> > @@ -1632,6 +1696,10 @@ EXPORT_SYMBOL(find_lock_entry);
> >   * - FGP_FOR_MMAP: Similar to FGP_CREAT, only we want to allow the caller to do
> >   *   its own locking dance if the page is already in cache, or unlock the page
> >   *   before returning if we had to add the page to pagecache.
> > + * - FGP_PMD: We're only interested in pages at PMD granularity.  If there
> > + *   is no page here (and FGP_CREATE is set), we'll create one large enough.
> > + *   If there is a smaller page in the cache that overlaps the PMD page, we
> > + *   return %NULL and do not attempt to create a page.
> 
> Is it really the best inteface?
> 
> Maybe allow user to ask bitmask of allowed orders? For THP order-0 is fine
> if order-9 has failed.

That's the semantics that filemap_huge_fault() wants.  If the page isn't
available at order-9, it needs to return VM_FAULT_FALLBACK (and the VM
will call into filemap_fault() to handle the regular sized fault).

Now, maybe there are other users who want to specify "create a page of
this size if you can, but if there's already something there smaller,
return that".  We can add another FGP flag when those show up ;-)


Thanks for the review.
Kirill A . Shutemov Sept. 6, 2019, 1:52 p.m. UTC | #4
On Fri, Sep 06, 2019 at 06:41:45AM -0700, Matthew Wilcox wrote:
> On Fri, Sep 06, 2019 at 03:59:28PM +0300, Kirill A. Shutemov wrote:
> > > +/**
> > > + * __find_get_page - Find and get a page cache entry.
> > > + * @mapping: The address_space to search.
> > > + * @offset: The page cache index.
> > > + * @order: The minimum order of the entry to return.
> > > + *
> > > + * Looks up the page cache entries at @mapping between @offset and
> > > + * @offset + 2^@order.  If there is a page cache page, it is returned with
> > 
> > Off by one? :P
> 
> Hah!  I thought it reasonable to be ambiguous in the English description
> ...  it's not entirely uncommon to describe something being 'between A
> and B' when meaning ">= A and < B".

It is reasable. I was just a nitpick.

> > > +	if (compound_order(page) < order) {
> > > +		page = XA_RETRY_ENTRY;
> > > +		goto out;
> > > +	}
> > 
> > compound_order() is not stable if you don't have pin on the page.
> > Check it after page_cache_get_speculative().
> 
> Maybe check both before and after?  If we check it before, we don't bother
> to bump the refcount on a page which is too small.

Makes sense. False-positives should be rare enough to ignore them.

> > > @@ -1632,6 +1696,10 @@ EXPORT_SYMBOL(find_lock_entry);
> > >   * - FGP_FOR_MMAP: Similar to FGP_CREAT, only we want to allow the caller to do
> > >   *   its own locking dance if the page is already in cache, or unlock the page
> > >   *   before returning if we had to add the page to pagecache.
> > > + * - FGP_PMD: We're only interested in pages at PMD granularity.  If there
> > > + *   is no page here (and FGP_CREATE is set), we'll create one large enough.
> > > + *   If there is a smaller page in the cache that overlaps the PMD page, we
> > > + *   return %NULL and do not attempt to create a page.
> > 
> > Is it really the best inteface?
> > 
> > Maybe allow user to ask bitmask of allowed orders? For THP order-0 is fine
> > if order-9 has failed.
> 
> That's the semantics that filemap_huge_fault() wants.  If the page isn't
> available at order-9, it needs to return VM_FAULT_FALLBACK (and the VM
> will call into filemap_fault() to handle the regular sized fault).

Ideally, we should not have division between ->fault and ->huge_fault.
Integrating them together will give a shorter fallback loop and more
flexible inteface here would give benefit.

But I guess it's out-of-scope of the patchset.
Matthew Wilcox Sept. 6, 2019, 3:22 p.m. UTC | #5
On Fri, Sep 06, 2019 at 04:52:15PM +0300, Kirill A. Shutemov wrote:
> On Fri, Sep 06, 2019 at 06:41:45AM -0700, Matthew Wilcox wrote:
> > On Fri, Sep 06, 2019 at 03:59:28PM +0300, Kirill A. Shutemov wrote:
> > > > + * - FGP_PMD: We're only interested in pages at PMD granularity.  If there
> > > > + *   is no page here (and FGP_CREATE is set), we'll create one large enough.
> > > > + *   If there is a smaller page in the cache that overlaps the PMD page, we
> > > > + *   return %NULL and do not attempt to create a page.
> > > 
> > > Is it really the best inteface?
> > > 
> > > Maybe allow user to ask bitmask of allowed orders? For THP order-0 is fine
> > > if order-9 has failed.
> > 
> > That's the semantics that filemap_huge_fault() wants.  If the page isn't
> > available at order-9, it needs to return VM_FAULT_FALLBACK (and the VM
> > will call into filemap_fault() to handle the regular sized fault).
> 
> Ideally, we should not have division between ->fault and ->huge_fault.
> Integrating them together will give a shorter fallback loop and more
> flexible inteface here would give benefit.
> 
> But I guess it's out-of-scope of the patchset.

Heh, just a little bit ... there are about 150 occurrences of
vm_operations_struct in the kernel, and I don't fancy one bit converting
them all to use ->huge_fault instead of ->fault!
Chen, Rong A Sept. 9, 2019, 12:42 a.m. UTC | #6
On 9/6/19 6:12 AM, Matthew Wilcox wrote:
> On Fri, Sep 06, 2019 at 06:04:05AM +0800, kbuild test robot wrote:
>> Hi Matthew,
>>
>> Thank you for the patch! Yet something to improve:
>>
>> [auto build test ERROR on linus/master]
>> [cannot apply to v5.3-rc7 next-20190904]
>> [if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
> It looks like you're not applying these to the -mm tree?  I thought that
> was included in -next.

Hi,

Sorry for the inconvenience, we'll look into it. and 0day-CI introduced 
'--base' option to record base tree info in format-patch.
could you kindly add it to help robot to base on the right tree? please 
see https://stackoverflow.com/a/37406982

Best Regards,
Rong Chen

>
>
> _______________________________________________
> kbuild-all mailing list
> kbuild-all@lists.01.org
> https://lists.01.org/mailman/listinfo/kbuild-all
Matthew Wilcox Sept. 9, 2019, 1:12 a.m. UTC | #7
On Mon, Sep 09, 2019 at 08:42:03AM +0800, Rong Chen wrote:
> 
> 
> On 9/6/19 6:12 AM, Matthew Wilcox wrote:
> > On Fri, Sep 06, 2019 at 06:04:05AM +0800, kbuild test robot wrote:
> > > Hi Matthew,
> > > 
> > > Thank you for the patch! Yet something to improve:
> > > 
> > > [auto build test ERROR on linus/master]
> > > [cannot apply to v5.3-rc7 next-20190904]
> > > [if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
> > It looks like you're not applying these to the -mm tree?  I thought that
> > was included in -next.
> 
> Hi,
> 
> Sorry for the inconvenience, we'll look into it. and 0day-CI introduced
> '--base' option to record base tree info in format-patch.
> could you kindly add it to help robot to base on the right tree? please see
> https://stackoverflow.com/a/37406982

There isn't a stable git base tree to work from with mmotm:

https://www.ozlabs.org/~akpm/mmotm/mmotm-readme.txt
diff mbox series

Patch

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index d2147215d415..72101811524c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -248,6 +248,15 @@  pgoff_t page_cache_prev_miss(struct address_space *mapping,
 #define FGP_NOFS		0x00000010
 #define FGP_NOWAIT		0x00000020
 #define FGP_FOR_MMAP		0x00000040
+/*
+ * If you add more flags, increment FGP_ORDER_SHIFT (no further than 25).
+ * Do not insert flags above the FGP order bits.
+ */
+#define FGP_ORDER_SHIFT		7
+#define FGP_PMD			((PMD_SHIFT - PAGE_SHIFT) << FGP_ORDER_SHIFT)
+#define FGP_PUD			((PUD_SHIFT - PAGE_SHIFT) << FGP_ORDER_SHIFT)
+
+#define fgp_order(fgp)		((fgp) >> FGP_ORDER_SHIFT)
 
 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
 		int fgp_flags, gfp_t cache_gfp_mask);
diff --git a/mm/filemap.c b/mm/filemap.c
index ae3c0a70a8e9..904dfabbea52 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1572,7 +1572,71 @@  struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
 
 	return page;
 }
-EXPORT_SYMBOL(find_get_entry);
+
+static bool pagecache_is_conflict(struct page *page)
+{
+	return page == XA_RETRY_ENTRY;
+}
+
+/**
+ * __find_get_page - Find and get a page cache entry.
+ * @mapping: The address_space to search.
+ * @offset: The page cache index.
+ * @order: The minimum order of the entry to return.
+ *
+ * Looks up the page cache entries at @mapping between @offset and
+ * @offset + 2^@order.  If there is a page cache page, it is returned with
+ * an increased refcount unless it is smaller than @order.
+ *
+ * If the slot holds a shadow entry of a previously evicted page, or a
+ * swap entry from shmem/tmpfs, it is returned.
+ *
+ * Return: the found page, a value indicating a conflicting page or %NULL if
+ * there are no pages in this range.
+ */
+static struct page *__find_get_page(struct address_space *mapping,
+		unsigned long offset, unsigned int order)
+{
+	XA_STATE(xas, &mapping->i_pages, offset);
+	struct page *page;
+
+	rcu_read_lock();
+repeat:
+	xas_reset(&xas);
+	page = xas_find(&xas, offset | ((1UL << order) - 1));
+	if (xas_retry(&xas, page))
+		goto repeat;
+	/*
+	 * A shadow entry of a recently evicted page, or a swap entry from
+	 * shmem/tmpfs.  Skip it; keep looking for pages.
+	 */
+	if (xa_is_value(page))
+		goto repeat;
+	if (!page)
+		goto out;
+	if (compound_order(page) < order) {
+		page = XA_RETRY_ENTRY;
+		goto out;
+	}
+
+	if (!page_cache_get_speculative(page))
+		goto repeat;
+
+	/*
+	 * Has the page moved or been split?
+	 * This is part of the lockless pagecache protocol. See
+	 * include/linux/pagemap.h for details.
+	 */
+	if (unlikely(page != xas_reload(&xas))) {
+		put_page(page);
+		goto repeat;
+	}
+	page = find_subpage(page, offset);
+out:
+	rcu_read_unlock();
+
+	return page;
+}
 
 /**
  * find_lock_entry - locate, pin and lock a page cache entry
@@ -1614,12 +1678,12 @@  EXPORT_SYMBOL(find_lock_entry);
  * pagecache_get_page - find and get a page reference
  * @mapping: the address_space to search
  * @offset: the page index
- * @fgp_flags: PCG flags
+ * @fgp_flags: FGP flags
  * @gfp_mask: gfp mask to use for the page cache data page allocation
  *
  * Looks up the page cache slot at @mapping & @offset.
  *
- * PCG flags modify how the page is returned.
+ * FGP flags modify how the page is returned.
  *
  * @fgp_flags can be:
  *
@@ -1632,6 +1696,10 @@  EXPORT_SYMBOL(find_lock_entry);
  * - FGP_FOR_MMAP: Similar to FGP_CREAT, only we want to allow the caller to do
  *   its own locking dance if the page is already in cache, or unlock the page
  *   before returning if we had to add the page to pagecache.
+ * - FGP_PMD: We're only interested in pages at PMD granularity.  If there
+ *   is no page here (and FGP_CREATE is set), we'll create one large enough.
+ *   If there is a smaller page in the cache that overlaps the PMD page, we
+ *   return %NULL and do not attempt to create a page.
  *
  * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
  * if the GFP flags specified for FGP_CREAT are atomic.
@@ -1646,9 +1714,9 @@  struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
 	struct page *page;
 
 repeat:
-	page = find_get_entry(mapping, offset);
-	if (xa_is_value(page))
-		page = NULL;
+	page = __find_get_page(mapping, offset, fgp_order(fgp_flags));
+	if (pagecache_is_conflict(page))
+		return NULL;
 	if (!page)
 		goto no_page;
 
@@ -1682,7 +1750,7 @@  struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
 		if (fgp_flags & FGP_NOFS)
 			gfp_mask &= ~__GFP_FS;
 
-		page = __page_cache_alloc(gfp_mask);
+		page = __page_cache_alloc_order(gfp_mask, fgp_order(fgp_flags));
 		if (!page)
 			return NULL;