diff mbox series

[v4,04/36] mm: Introduce thp_size

Message ID 20200515131656.12890-5-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Large pages in the page cache | expand

Commit Message

Matthew Wilcox May 15, 2020, 1:16 p.m. UTC
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

This is like page_size(), but compiles down to just PAGE_SIZE if THP
are disabled.  Convert the users of hpage_nr_pages() which would prefer
this interface.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 drivers/nvdimm/btt.c    | 4 +---
 drivers/nvdimm/pmem.c   | 6 ++----
 include/linux/huge_mm.h | 7 +++++++
 mm/internal.h           | 2 +-
 mm/page_io.c            | 2 +-
 mm/page_vma_mapped.c    | 4 ++--
 6 files changed, 14 insertions(+), 11 deletions(-)

Comments

David Hildenbrand May 15, 2020, 1:38 p.m. UTC | #1
On 15.05.20 15:16, Matthew Wilcox wrote:
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> 
> This is like page_size(), but compiles down to just PAGE_SIZE if THP
> are disabled.  Convert the users of hpage_nr_pages() which would prefer
> this interface.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  drivers/nvdimm/btt.c    | 4 +---
>  drivers/nvdimm/pmem.c   | 6 ++----
>  include/linux/huge_mm.h | 7 +++++++
>  mm/internal.h           | 2 +-
>  mm/page_io.c            | 2 +-
>  mm/page_vma_mapped.c    | 4 ++--
>  6 files changed, 14 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
> index 3b09419218d6..78e8d972d45a 100644
> --- a/drivers/nvdimm/btt.c
> +++ b/drivers/nvdimm/btt.c
> @@ -1488,10 +1488,8 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector,
>  {
>  	struct btt *btt = bdev->bd_disk->private_data;
>  	int rc;
> -	unsigned int len;
>  
> -	len = hpage_nr_pages(page) * PAGE_SIZE;
> -	rc = btt_do_bvec(btt, NULL, page, len, 0, op, sector);
> +	rc = btt_do_bvec(btt, NULL, page, thp_size(page), 0, op, sector);
>  	if (rc == 0)
>  		page_endio(page, op_is_write(op), 0);
>  
> diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
> index 2df6994acf83..d511504d07af 100644
> --- a/drivers/nvdimm/pmem.c
> +++ b/drivers/nvdimm/pmem.c
> @@ -235,11 +235,9 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
>  	blk_status_t rc;
>  
>  	if (op_is_write(op))
> -		rc = pmem_do_write(pmem, page, 0, sector,
> -				   hpage_nr_pages(page) * PAGE_SIZE);
> +		rc = pmem_do_write(pmem, page, 0, sector, thp_size(page));
>  	else
> -		rc = pmem_do_read(pmem, page, 0, sector,
> -				   hpage_nr_pages(page) * PAGE_SIZE);
> +		rc = pmem_do_read(pmem, page, 0, sector, thp_size(page));
>  	/*
>  	 * The ->rw_page interface is subtle and tricky.  The core
>  	 * retries on any error, so we can only invoke page_endio() in
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index 6bec4b5b61e1..e944f9757349 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -271,6 +271,11 @@ static inline int hpage_nr_pages(struct page *page)
>  	return compound_nr(page);
>  }
>  
> +static inline unsigned long thp_size(struct page *page)
> +{
> +	return page_size(page);
> +}
> +
>  struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
>  		pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
>  struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
> @@ -329,6 +334,8 @@ static inline int hpage_nr_pages(struct page *page)
>  	return 1;
>  }
>  
> +#define thp_size(x)		PAGE_SIZE
> +
>  static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
>  {
>  	return false;
> diff --git a/mm/internal.h b/mm/internal.h
> index f762a34b0c57..5efb13d5c226 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -386,7 +386,7 @@ vma_address(struct page *page, struct vm_area_struct *vma)
>  	unsigned long start, end;
>  
>  	start = __vma_address(page, vma);
> -	end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
> +	end = start + thp_size(page) - PAGE_SIZE;
>  
>  	/* page should be within @vma mapping range */
>  	VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
> diff --git a/mm/page_io.c b/mm/page_io.c
> index 76965be1d40e..dd935129e3cb 100644
> --- a/mm/page_io.c
> +++ b/mm/page_io.c
> @@ -41,7 +41,7 @@ static struct bio *get_swap_bio(gfp_t gfp_flags,
>  		bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
>  		bio->bi_end_io = end_io;
>  
> -		bio_add_page(bio, page, PAGE_SIZE * hpage_nr_pages(page), 0);
> +		bio_add_page(bio, page, thp_size(page), 0);
>  	}
>  	return bio;
>  }
> diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
> index 719c35246cfa..e65629c056e8 100644
> --- a/mm/page_vma_mapped.c
> +++ b/mm/page_vma_mapped.c
> @@ -227,7 +227,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
>  			if (pvmw->address >= pvmw->vma->vm_end ||
>  			    pvmw->address >=
>  					__vma_address(pvmw->page, pvmw->vma) +
> -					hpage_nr_pages(pvmw->page) * PAGE_SIZE)
> +					thp_size(pvmw->page))
>  				return not_found(pvmw);
>  			/* Did we cross page table boundary? */
>  			if (pvmw->address % PMD_SIZE == 0) {
> @@ -268,7 +268,7 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
>  	unsigned long start, end;
>  
>  	start = __vma_address(page, vma);
> -	end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
> +	end = start + thp_size(page) - PAGE_SIZE;
>  
>  	if (unlikely(end < vma->vm_start || start >= vma->vm_end))
>  		return 0;
> 

Reviewed-by: David Hildenbrand <david@redhat.com>
diff mbox series

Patch

diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 3b09419218d6..78e8d972d45a 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1488,10 +1488,8 @@  static int btt_rw_page(struct block_device *bdev, sector_t sector,
 {
 	struct btt *btt = bdev->bd_disk->private_data;
 	int rc;
-	unsigned int len;
 
-	len = hpage_nr_pages(page) * PAGE_SIZE;
-	rc = btt_do_bvec(btt, NULL, page, len, 0, op, sector);
+	rc = btt_do_bvec(btt, NULL, page, thp_size(page), 0, op, sector);
 	if (rc == 0)
 		page_endio(page, op_is_write(op), 0);
 
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 2df6994acf83..d511504d07af 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -235,11 +235,9 @@  static int pmem_rw_page(struct block_device *bdev, sector_t sector,
 	blk_status_t rc;
 
 	if (op_is_write(op))
-		rc = pmem_do_write(pmem, page, 0, sector,
-				   hpage_nr_pages(page) * PAGE_SIZE);
+		rc = pmem_do_write(pmem, page, 0, sector, thp_size(page));
 	else
-		rc = pmem_do_read(pmem, page, 0, sector,
-				   hpage_nr_pages(page) * PAGE_SIZE);
+		rc = pmem_do_read(pmem, page, 0, sector, thp_size(page));
 	/*
 	 * The ->rw_page interface is subtle and tricky.  The core
 	 * retries on any error, so we can only invoke page_endio() in
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 6bec4b5b61e1..e944f9757349 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -271,6 +271,11 @@  static inline int hpage_nr_pages(struct page *page)
 	return compound_nr(page);
 }
 
+static inline unsigned long thp_size(struct page *page)
+{
+	return page_size(page);
+}
+
 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
 		pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
@@ -329,6 +334,8 @@  static inline int hpage_nr_pages(struct page *page)
 	return 1;
 }
 
+#define thp_size(x)		PAGE_SIZE
+
 static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
 {
 	return false;
diff --git a/mm/internal.h b/mm/internal.h
index f762a34b0c57..5efb13d5c226 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -386,7 +386,7 @@  vma_address(struct page *page, struct vm_area_struct *vma)
 	unsigned long start, end;
 
 	start = __vma_address(page, vma);
-	end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
+	end = start + thp_size(page) - PAGE_SIZE;
 
 	/* page should be within @vma mapping range */
 	VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
diff --git a/mm/page_io.c b/mm/page_io.c
index 76965be1d40e..dd935129e3cb 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -41,7 +41,7 @@  static struct bio *get_swap_bio(gfp_t gfp_flags,
 		bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
 		bio->bi_end_io = end_io;
 
-		bio_add_page(bio, page, PAGE_SIZE * hpage_nr_pages(page), 0);
+		bio_add_page(bio, page, thp_size(page), 0);
 	}
 	return bio;
 }
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 719c35246cfa..e65629c056e8 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -227,7 +227,7 @@  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 			if (pvmw->address >= pvmw->vma->vm_end ||
 			    pvmw->address >=
 					__vma_address(pvmw->page, pvmw->vma) +
-					hpage_nr_pages(pvmw->page) * PAGE_SIZE)
+					thp_size(pvmw->page))
 				return not_found(pvmw);
 			/* Did we cross page table boundary? */
 			if (pvmw->address % PMD_SIZE == 0) {
@@ -268,7 +268,7 @@  int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
 	unsigned long start, end;
 
 	start = __vma_address(page, vma);
-	end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
+	end = start + thp_size(page) - PAGE_SIZE;
 
 	if (unlikely(end < vma->vm_start || start >= vma->vm_end))
 		return 0;