diff mbox series

[v4,36/36] mm: Align THP mappings for non-DAX

Message ID 20200515131656.12890-37-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Large pages in the page cache | expand

Commit Message

Matthew Wilcox May 15, 2020, 1:16 p.m. UTC
From: William Kucharski <william.kucharski@oracle.com>

When we have the opportunity to use transparent huge pages to map a
file, we want to follow the same rules as DAX.

Signed-off-by: William Kucharski <william.kucharski@oracle.com>
[Inline __thp_get_unmapped_area() into thp_get_unmapped_area()]
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/huge_memory.c | 40 +++++++++++++---------------------------
 1 file changed, 13 insertions(+), 27 deletions(-)

Comments

William Kucharski May 26, 2020, 10:05 p.m. UTC | #1
Thinking about this, if the intent is to make THP usable for any
greater than PAGESIZE page size, this routine should probably go back
to taking a size or perhaps order parameter so it could be called to
align addresses accordingly rather than hard code PMD_SIZE.


> On May 15, 2020, at 7:16 AM, Matthew Wilcox <willy@infradead.org> wrote:
> 
> From: William Kucharski <william.kucharski@oracle.com>
> 
> When we have the opportunity to use transparent huge pages to map a
> file, we want to follow the same rules as DAX.
> 
> Signed-off-by: William Kucharski <william.kucharski@oracle.com>
> [Inline __thp_get_unmapped_area() into thp_get_unmapped_area()]
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
> mm/huge_memory.c | 40 +++++++++++++---------------------------
> 1 file changed, 13 insertions(+), 27 deletions(-)
> 
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 15a86b06befc..e78686b628ae 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -535,30 +535,30 @@ bool is_transparent_hugepage(struct page *page)
> }
> EXPORT_SYMBOL_GPL(is_transparent_hugepage);
> 
> -static unsigned long __thp_get_unmapped_area(struct file *filp,
> -		unsigned long addr, unsigned long len,
> -		loff_t off, unsigned long flags, unsigned long size)
> +unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
> +		unsigned long len, unsigned long pgoff, unsigned long flags)
> {
> +	loff_t off = (loff_t)pgoff << PAGE_SHIFT;
> 	loff_t off_end = off + len;
> -	loff_t off_align = round_up(off, size);
> +	loff_t off_align = round_up(off, PMD_SIZE);
> 	unsigned long len_pad, ret;
> 
> -	if (off_end <= off_align || (off_end - off_align) < size)
> -		return 0;
> +	if (off_end <= off_align || (off_end - off_align) < PMD_SIZE)
> +		goto regular;
> 
> -	len_pad = len + size;
> +	len_pad = len + PMD_SIZE;
> 	if (len_pad < len || (off + len_pad) < off)
> -		return 0;
> +		goto regular;
> 
> 	ret = current->mm->get_unmapped_area(filp, addr, len_pad,
> 					      off >> PAGE_SHIFT, flags);
> 
> 	/*
> -	 * The failure might be due to length padding. The caller will retry
> -	 * without the padding.
> +	 * The failure might be due to length padding.  Retry without
> +	 * the padding.
> 	 */
> 	if (IS_ERR_VALUE(ret))
> -		return 0;
> +		goto regular;
> 
> 	/*
> 	 * Do not try to align to THP boundary if allocation at the address
> @@ -567,23 +567,9 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
> 	if (ret == addr)
> 		return addr;
> 
> -	ret += (off - ret) & (size - 1);
> +	ret += (off - ret) & (PMD_SIZE - 1);
> 	return ret;
> -}
> -
> -unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
> -		unsigned long len, unsigned long pgoff, unsigned long flags)
> -{
> -	unsigned long ret;
> -	loff_t off = (loff_t)pgoff << PAGE_SHIFT;
> -
> -	if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD))
> -		goto out;
> -
> -	ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
> -	if (ret)
> -		return ret;
> -out:
> +regular:
> 	return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
> }
> EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
> -- 
> 2.26.2
>
Matthew Wilcox May 26, 2020, 10:20 p.m. UTC | #2
On Tue, May 26, 2020 at 04:05:58PM -0600, William Kucharski wrote:
> Thinking about this, if the intent is to make THP usable for any
> greater than PAGESIZE page size, this routine should probably go back
> to taking a size or perhaps order parameter so it could be called to
> align addresses accordingly rather than hard code PMD_SIZE.

Yes, that's a good point.  For example, on ARM, we'd want to 64kB-align
files which we could use 64kB pages, but there would be no point doing
that on x86.  I'll revert to the earlier version of this patch that
you sent.  Not sure how best to allow the architecture to tell us what
page sizes are useful to align to, but that earlier patch is a better
base to build on than this version.
diff mbox series

Patch

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 15a86b06befc..e78686b628ae 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -535,30 +535,30 @@  bool is_transparent_hugepage(struct page *page)
 }
 EXPORT_SYMBOL_GPL(is_transparent_hugepage);
 
-static unsigned long __thp_get_unmapped_area(struct file *filp,
-		unsigned long addr, unsigned long len,
-		loff_t off, unsigned long flags, unsigned long size)
+unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
+		unsigned long len, unsigned long pgoff, unsigned long flags)
 {
+	loff_t off = (loff_t)pgoff << PAGE_SHIFT;
 	loff_t off_end = off + len;
-	loff_t off_align = round_up(off, size);
+	loff_t off_align = round_up(off, PMD_SIZE);
 	unsigned long len_pad, ret;
 
-	if (off_end <= off_align || (off_end - off_align) < size)
-		return 0;
+	if (off_end <= off_align || (off_end - off_align) < PMD_SIZE)
+		goto regular;
 
-	len_pad = len + size;
+	len_pad = len + PMD_SIZE;
 	if (len_pad < len || (off + len_pad) < off)
-		return 0;
+		goto regular;
 
 	ret = current->mm->get_unmapped_area(filp, addr, len_pad,
 					      off >> PAGE_SHIFT, flags);
 
 	/*
-	 * The failure might be due to length padding. The caller will retry
-	 * without the padding.
+	 * The failure might be due to length padding.  Retry without
+	 * the padding.
 	 */
 	if (IS_ERR_VALUE(ret))
-		return 0;
+		goto regular;
 
 	/*
 	 * Do not try to align to THP boundary if allocation at the address
@@ -567,23 +567,9 @@  static unsigned long __thp_get_unmapped_area(struct file *filp,
 	if (ret == addr)
 		return addr;
 
-	ret += (off - ret) & (size - 1);
+	ret += (off - ret) & (PMD_SIZE - 1);
 	return ret;
-}
-
-unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
-		unsigned long len, unsigned long pgoff, unsigned long flags)
-{
-	unsigned long ret;
-	loff_t off = (loff_t)pgoff << PAGE_SHIFT;
-
-	if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD))
-		goto out;
-
-	ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
-	if (ret)
-		return ret;
-out:
+regular:
 	return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
 }
 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);