diff mbox series

mm: Introduce page_size()

Message ID 20190510181242.24580-1-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series mm: Introduce page_size() | expand

Commit Message

Matthew Wilcox May 10, 2019, 6:12 p.m. UTC
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

It's unnecessarily hard to find out the size of a potentially large page.
Replace 'PAGE_SIZE << compound_order(page)' with 'page_size(page)'.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 arch/arm/mm/flush.c                           | 3 +--
 arch/arm64/mm/flush.c                         | 3 +--
 arch/ia64/mm/init.c                           | 2 +-
 drivers/staging/android/ion/ion_system_heap.c | 4 ++--
 drivers/target/tcm_fc/tfc_io.c                | 3 +--
 fs/io_uring.c                                 | 2 +-
 include/linux/hugetlb.h                       | 2 +-
 include/linux/mm.h                            | 9 +++++++++
 lib/iov_iter.c                                | 2 +-
 mm/kasan/common.c                             | 8 +++-----
 mm/nommu.c                                    | 2 +-
 mm/page_vma_mapped.c                          | 3 +--
 mm/rmap.c                                     | 6 ++----
 mm/slob.c                                     | 2 +-
 mm/slub.c                                     | 4 ++--
 net/xdp/xsk.c                                 | 2 +-
 16 files changed, 29 insertions(+), 28 deletions(-)

Comments

Michal Hocko May 13, 2019, 10:56 a.m. UTC | #1
On Fri 10-05-19 11:12:42, Matthew Wilcox wrote:
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> 
> It's unnecessarily hard to find out the size of a potentially large page.
> Replace 'PAGE_SIZE << compound_order(page)' with 'page_size(page)'.

I like the new helper. The conversion looks like something for
coccinelle.

> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

I haven't checked for other potential places to convert but the ones in
the patch looks ok to me.

Acked-by: Michal Hocko <mhocko@suse.com>

> ---
>  arch/arm/mm/flush.c                           | 3 +--
>  arch/arm64/mm/flush.c                         | 3 +--
>  arch/ia64/mm/init.c                           | 2 +-
>  drivers/staging/android/ion/ion_system_heap.c | 4 ++--
>  drivers/target/tcm_fc/tfc_io.c                | 3 +--
>  fs/io_uring.c                                 | 2 +-
>  include/linux/hugetlb.h                       | 2 +-
>  include/linux/mm.h                            | 9 +++++++++
>  lib/iov_iter.c                                | 2 +-
>  mm/kasan/common.c                             | 8 +++-----
>  mm/nommu.c                                    | 2 +-
>  mm/page_vma_mapped.c                          | 3 +--
>  mm/rmap.c                                     | 6 ++----
>  mm/slob.c                                     | 2 +-
>  mm/slub.c                                     | 4 ++--
>  net/xdp/xsk.c                                 | 2 +-
>  16 files changed, 29 insertions(+), 28 deletions(-)
> 
> diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
> index 58469623b015..c68a120de28b 100644
> --- a/arch/arm/mm/flush.c
> +++ b/arch/arm/mm/flush.c
> @@ -207,8 +207,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
>  	 * coherent with the kernels mapping.
>  	 */
>  	if (!PageHighMem(page)) {
> -		size_t page_size = PAGE_SIZE << compound_order(page);
> -		__cpuc_flush_dcache_area(page_address(page), page_size);
> +		__cpuc_flush_dcache_area(page_address(page), page_size(page));
>  	} else {
>  		unsigned long i;
>  		if (cache_is_vipt_nonaliasing()) {
> diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
> index 5c9073bace83..280fdbc3bfa5 100644
> --- a/arch/arm64/mm/flush.c
> +++ b/arch/arm64/mm/flush.c
> @@ -67,8 +67,7 @@ void __sync_icache_dcache(pte_t pte)
>  	struct page *page = pte_page(pte);
>  
>  	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
> -		sync_icache_aliases(page_address(page),
> -				    PAGE_SIZE << compound_order(page));
> +		sync_icache_aliases(page_address(page), page_size(page));
>  }
>  EXPORT_SYMBOL_GPL(__sync_icache_dcache);
>  
> diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
> index d28e29103bdb..cc4061cd9899 100644
> --- a/arch/ia64/mm/init.c
> +++ b/arch/ia64/mm/init.c
> @@ -63,7 +63,7 @@ __ia64_sync_icache_dcache (pte_t pte)
>  	if (test_bit(PG_arch_1, &page->flags))
>  		return;				/* i-cache is already coherent with d-cache */
>  
> -	flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
> +	flush_icache_range(addr, addr + page_size(page));
>  	set_bit(PG_arch_1, &page->flags);	/* mark page as clean */
>  }
>  
> diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
> index aa8d8425be25..b83a1d16bd89 100644
> --- a/drivers/staging/android/ion/ion_system_heap.c
> +++ b/drivers/staging/android/ion/ion_system_heap.c
> @@ -120,7 +120,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
>  		if (!page)
>  			goto free_pages;
>  		list_add_tail(&page->lru, &pages);
> -		size_remaining -= PAGE_SIZE << compound_order(page);
> +		size_remaining -= page_size(page);
>  		max_order = compound_order(page);
>  		i++;
>  	}
> @@ -133,7 +133,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
>  
>  	sg = table->sgl;
>  	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
> -		sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
> +		sg_set_page(sg, page, page_size(page), 0);
>  		sg = sg_next(sg);
>  		list_del(&page->lru);
>  	}
> diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
> index 1eb1f58e00e4..83c1ec65dbcc 100644
> --- a/drivers/target/tcm_fc/tfc_io.c
> +++ b/drivers/target/tcm_fc/tfc_io.c
> @@ -148,8 +148,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
>  					   page, off_in_page, tlen);
>  			fr_len(fp) += tlen;
>  			fp_skb(fp)->data_len += tlen;
> -			fp_skb(fp)->truesize +=
> -					PAGE_SIZE << compound_order(page);
> +			fp_skb(fp)->truesize += page_size(page);
>  		} else {
>  			BUG_ON(!page);
>  			from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index fdc18321d70c..2c37da095517 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -2891,7 +2891,7 @@ static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
>  	}
>  
>  	page = virt_to_head_page(ptr);
> -	if (sz > (PAGE_SIZE << compound_order(page)))
> +	if (sz > page_size(page))
>  		return -EINVAL;
>  
>  	pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index edf476c8cfb9..2e909072a41f 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -472,7 +472,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
>  static inline struct hstate *page_hstate(struct page *page)
>  {
>  	VM_BUG_ON_PAGE(!PageHuge(page), page);
> -	return size_to_hstate(PAGE_SIZE << compound_order(page));
> +	return size_to_hstate(page_size(page));
>  }
>  
>  static inline unsigned hstate_index_to_shift(unsigned index)
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 0e8834ac32b7..0208f77bab63 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -772,6 +772,15 @@ static inline void set_compound_order(struct page *page, unsigned int order)
>  	page[1].compound_order = order;
>  }
>  
> +/*
> + * Returns the number of bytes in this potentially compound page.
> + * Must be called with the head page, not a tail page.
> + */
> +static inline unsigned long page_size(struct page *page)
> +{
> +	return (unsigned long)PAGE_SIZE << compound_order(page);
> +}
> +
>  void free_compound_page(struct page *page);
>  
>  #ifdef CONFIG_MMU
> diff --git a/lib/iov_iter.c b/lib/iov_iter.c
> index f74fa832f3aa..d4349c9d0c7e 100644
> --- a/lib/iov_iter.c
> +++ b/lib/iov_iter.c
> @@ -877,7 +877,7 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
>  	head = compound_head(page);
>  	v += (page - head) << PAGE_SHIFT;
>  
> -	if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
> +	if (likely(n <= v && v <= page_size(head)))
>  		return true;
>  	WARN_ON(1);
>  	return false;
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 36afcf64e016..dd1d3d88ac9e 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -323,8 +323,7 @@ void kasan_poison_slab(struct page *page)
>  
>  	for (i = 0; i < (1 << compound_order(page)); i++)
>  		page_kasan_tag_reset(page + i);
> -	kasan_poison_shadow(page_address(page),
> -			PAGE_SIZE << compound_order(page),
> +	kasan_poison_shadow(page_address(page), page_size(page),
>  			KASAN_KMALLOC_REDZONE);
>  }
>  
> @@ -520,7 +519,7 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
>  	page = virt_to_page(ptr);
>  	redzone_start = round_up((unsigned long)(ptr + size),
>  				KASAN_SHADOW_SCALE_SIZE);
> -	redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
> +	redzone_end = (unsigned long)ptr + page_size(page);
>  
>  	kasan_unpoison_shadow(ptr, size);
>  	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
> @@ -556,8 +555,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
>  			kasan_report_invalid_free(ptr, ip);
>  			return;
>  		}
> -		kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
> -				KASAN_FREE_PAGE);
> +		kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
>  	} else {
>  		__kasan_slab_free(page->slab_cache, ptr, ip, false);
>  	}
> diff --git a/mm/nommu.c b/mm/nommu.c
> index b492fd1fcf9f..6dbd5251b366 100644
> --- a/mm/nommu.c
> +++ b/mm/nommu.c
> @@ -107,7 +107,7 @@ unsigned int kobjsize(const void *objp)
>  	 * The ksize() function is only guaranteed to work for pointers
>  	 * returned by kmalloc(). So handle arbitrary pointers here.
>  	 */
> -	return PAGE_SIZE << compound_order(page);
> +	return page_size(page);
>  }
>  
>  static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
> diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
> index 11df03e71288..eff4b4520c8d 100644
> --- a/mm/page_vma_mapped.c
> +++ b/mm/page_vma_mapped.c
> @@ -153,8 +153,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
>  
>  	if (unlikely(PageHuge(pvmw->page))) {
>  		/* when pud is not present, pte will be NULL */
> -		pvmw->pte = huge_pte_offset(mm, pvmw->address,
> -					    PAGE_SIZE << compound_order(page));
> +		pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
>  		if (!pvmw->pte)
>  			return false;
>  
> diff --git a/mm/rmap.c b/mm/rmap.c
> index e5dfe2ae6b0d..09ce05c481fc 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -898,8 +898,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
>  	 */
>  	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
>  				0, vma, vma->vm_mm, address,
> -				min(vma->vm_end, address +
> -				    (PAGE_SIZE << compound_order(page))));
> +				min(vma->vm_end, address + page_size(page)));
>  	mmu_notifier_invalidate_range_start(&range);
>  
>  	while (page_vma_mapped_walk(&pvmw)) {
> @@ -1374,8 +1373,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
>  	 */
>  	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
>  				address,
> -				min(vma->vm_end, address +
> -				    (PAGE_SIZE << compound_order(page))));
> +				min(vma->vm_end, address + page_size(page)));
>  	if (PageHuge(page)) {
>  		/*
>  		 * If sharing is possible, start and end will be adjusted
> diff --git a/mm/slob.c b/mm/slob.c
> index 510f0941d032..e7104d1ce92b 100644
> --- a/mm/slob.c
> +++ b/mm/slob.c
> @@ -539,7 +539,7 @@ size_t ksize(const void *block)
>  
>  	sp = virt_to_page(block);
>  	if (unlikely(!PageSlab(sp)))
> -		return PAGE_SIZE << compound_order(sp);
> +		return page_size(sp);
>  
>  	align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
>  	m = (unsigned int *)(block - align);
> diff --git a/mm/slub.c b/mm/slub.c
> index 51453216a1ed..fe2098f95e05 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -829,7 +829,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
>  		return 1;
>  
>  	start = page_address(page);
> -	length = PAGE_SIZE << compound_order(page);
> +	length = page_size(page);
>  	end = start + length;
>  	remainder = length % s->size;
>  	if (!remainder)
> @@ -3912,7 +3912,7 @@ static size_t __ksize(const void *object)
>  
>  	if (unlikely(!PageSlab(page))) {
>  		WARN_ON(!PageCompound(page));
> -		return PAGE_SIZE << compound_order(page);
> +		return page_size(page);
>  	}
>  
>  	return slab_ksize(page->slab_cache);
> diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
> index a14e8864e4fa..1e7f5dcaefad 100644
> --- a/net/xdp/xsk.c
> +++ b/net/xdp/xsk.c
> @@ -685,7 +685,7 @@ static int xsk_mmap(struct file *file, struct socket *sock,
>  	/* Matches the smp_wmb() in xsk_init_queue */
>  	smp_rmb();
>  	qpg = virt_to_head_page(q->ring);
> -	if (size > (PAGE_SIZE << compound_order(qpg)))
> +	if (size > page_size(qpg))
>  		return -EINVAL;
>  
>  	pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
> -- 
> 2.20.1
Kirill Tkhai May 13, 2019, 12:43 p.m. UTC | #2
Hi, Matthew,

On 10.05.2019 21:12, Matthew Wilcox wrote:
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> 
> It's unnecessarily hard to find out the size of a potentially large page.
> Replace 'PAGE_SIZE << compound_order(page)' with 'page_size(page)'.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  arch/arm/mm/flush.c                           | 3 +--
>  arch/arm64/mm/flush.c                         | 3 +--
>  arch/ia64/mm/init.c                           | 2 +-
>  drivers/staging/android/ion/ion_system_heap.c | 4 ++--
>  drivers/target/tcm_fc/tfc_io.c                | 3 +--
>  fs/io_uring.c                                 | 2 +-
>  include/linux/hugetlb.h                       | 2 +-
>  include/linux/mm.h                            | 9 +++++++++
>  lib/iov_iter.c                                | 2 +-
>  mm/kasan/common.c                             | 8 +++-----
>  mm/nommu.c                                    | 2 +-
>  mm/page_vma_mapped.c                          | 3 +--
>  mm/rmap.c                                     | 6 ++----
>  mm/slob.c                                     | 2 +-
>  mm/slub.c                                     | 4 ++--
>  net/xdp/xsk.c                                 | 2 +-
>  16 files changed, 29 insertions(+), 28 deletions(-)
> 
> diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
> index 58469623b015..c68a120de28b 100644
> --- a/arch/arm/mm/flush.c
> +++ b/arch/arm/mm/flush.c
> @@ -207,8 +207,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
>  	 * coherent with the kernels mapping.
>  	 */
>  	if (!PageHighMem(page)) {
> -		size_t page_size = PAGE_SIZE << compound_order(page);
> -		__cpuc_flush_dcache_area(page_address(page), page_size);
> +		__cpuc_flush_dcache_area(page_address(page), page_size(page));
>  	} else {
>  		unsigned long i;
>  		if (cache_is_vipt_nonaliasing()) {
> diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
> index 5c9073bace83..280fdbc3bfa5 100644
> --- a/arch/arm64/mm/flush.c
> +++ b/arch/arm64/mm/flush.c
> @@ -67,8 +67,7 @@ void __sync_icache_dcache(pte_t pte)
>  	struct page *page = pte_page(pte);
>  
>  	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
> -		sync_icache_aliases(page_address(page),
> -				    PAGE_SIZE << compound_order(page));
> +		sync_icache_aliases(page_address(page), page_size(page));
>  }
>  EXPORT_SYMBOL_GPL(__sync_icache_dcache);
>  
> diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
> index d28e29103bdb..cc4061cd9899 100644
> --- a/arch/ia64/mm/init.c
> +++ b/arch/ia64/mm/init.c
> @@ -63,7 +63,7 @@ __ia64_sync_icache_dcache (pte_t pte)
>  	if (test_bit(PG_arch_1, &page->flags))
>  		return;				/* i-cache is already coherent with d-cache */
>  
> -	flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
> +	flush_icache_range(addr, addr + page_size(page));
>  	set_bit(PG_arch_1, &page->flags);	/* mark page as clean */
>  }
>  
> diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
> index aa8d8425be25..b83a1d16bd89 100644
> --- a/drivers/staging/android/ion/ion_system_heap.c
> +++ b/drivers/staging/android/ion/ion_system_heap.c
> @@ -120,7 +120,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
>  		if (!page)
>  			goto free_pages;
>  		list_add_tail(&page->lru, &pages);
> -		size_remaining -= PAGE_SIZE << compound_order(page);
> +		size_remaining -= page_size(page);
>  		max_order = compound_order(page);
>  		i++;
>  	}
> @@ -133,7 +133,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
>  
>  	sg = table->sgl;
>  	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
> -		sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
> +		sg_set_page(sg, page, page_size(page), 0);
>  		sg = sg_next(sg);
>  		list_del(&page->lru);
>  	}
> diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
> index 1eb1f58e00e4..83c1ec65dbcc 100644
> --- a/drivers/target/tcm_fc/tfc_io.c
> +++ b/drivers/target/tcm_fc/tfc_io.c
> @@ -148,8 +148,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
>  					   page, off_in_page, tlen);
>  			fr_len(fp) += tlen;
>  			fp_skb(fp)->data_len += tlen;
> -			fp_skb(fp)->truesize +=
> -					PAGE_SIZE << compound_order(page);
> +			fp_skb(fp)->truesize += page_size(page);
>  		} else {
>  			BUG_ON(!page);
>  			from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index fdc18321d70c..2c37da095517 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -2891,7 +2891,7 @@ static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
>  	}
>  
>  	page = virt_to_head_page(ptr);
> -	if (sz > (PAGE_SIZE << compound_order(page)))
> +	if (sz > page_size(page))
>  		return -EINVAL;
>  
>  	pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index edf476c8cfb9..2e909072a41f 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -472,7 +472,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
>  static inline struct hstate *page_hstate(struct page *page)
>  {
>  	VM_BUG_ON_PAGE(!PageHuge(page), page);
> -	return size_to_hstate(PAGE_SIZE << compound_order(page));
> +	return size_to_hstate(page_size(page));
>  }
>  
>  static inline unsigned hstate_index_to_shift(unsigned index)
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 0e8834ac32b7..0208f77bab63 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -772,6 +772,15 @@ static inline void set_compound_order(struct page *page, unsigned int order)
>  	page[1].compound_order = order;
>  }
>  
> +/*
> + * Returns the number of bytes in this potentially compound page.
> + * Must be called with the head page, not a tail page.
> + */
> +static inline unsigned long page_size(struct page *page)
> +{

Maybe we should underline commented head page limitation with VM_BUG_ON()?

Kirill
William Kucharski May 14, 2019, 11:53 a.m. UTC | #3
> On May 13, 2019, at 6:43 AM, Kirill Tkhai <ktkhai@virtuozzo.com> wrote:
> 
> Hi, Matthew,
> 
> Maybe we should underline commented head page limitation with VM_BUG_ON()?
> 
> Kirill

I like that idea as well; even if all the present callers are well-vetted, it's
inevitable someone will come along and call page_size() without reading the
head-only comment first.
Andrew Morton May 22, 2019, 8:03 p.m. UTC | #4
On Mon, 13 May 2019 15:43:08 +0300 Kirill Tkhai <ktkhai@virtuozzo.com> wrote:

> > +/*
> > + * Returns the number of bytes in this potentially compound page.
> > + * Must be called with the head page, not a tail page.
> > + */
> > +static inline unsigned long page_size(struct page *page)
> > +{
> 
> Maybe we should underline commented head page limitation with VM_BUG_ON()?

VM_WARN_ONCE() if poss, please.

The code bloatage from that is likely to be distressing.  Perhaps
adding an out-of-line compound_order_head_only() for this reason would
help.  In which case, just uninline the whole thing...

> +	return (unsigned long)PAGE_SIZE << compound_order(page);
> + }

Also, I suspect the cast here is unneeded.  Architectures used to
differe in the type of PAGE_SIZE but please tell me that's been fixed
for a lomng time...
Matthew Wilcox May 23, 2019, 1:55 a.m. UTC | #5
On Wed, May 22, 2019 at 01:03:18PM -0700, Andrew Morton wrote:
> On Mon, 13 May 2019 15:43:08 +0300 Kirill Tkhai <ktkhai@virtuozzo.com> wrote:
> > > +/*
> > > + * Returns the number of bytes in this potentially compound page.
> > > + * Must be called with the head page, not a tail page.
> > > + */
> > > +static inline unsigned long page_size(struct page *page)
> > > +{
> > 
> > Maybe we should underline commented head page limitation with VM_BUG_ON()?
> 
> VM_WARN_ONCE() if poss, please.
> 
> The code bloatage from that is likely to be distressing.  Perhaps
> adding an out-of-line compound_order_head_only() for this reason would
> help.  In which case, just uninline the whole thing...

I think this is unnecessary.  Nobody's currently calling the code it
replaces on a tail page, and the plan is to reduce or eliminate the
amount of places that parts of the system see tail pages.  I strongly
oppose adding any kind of check here.

> > +	return (unsigned long)PAGE_SIZE << compound_order(page);
> > + }
> 
> Also, I suspect the cast here is unneeded.  Architectures used to
> differe in the type of PAGE_SIZE but please tell me that's been fixed
> for a lomng time...

It's an unsigned int for most, if not all architectures.  For, eg,
PowerPC, a PUD page is larger than 4GB.  So let's just include the cast
and not have to worry about undefined semantics screwing us over.
Andrew Morton May 23, 2019, 9:33 p.m. UTC | #6
On Wed, 22 May 2019 18:55:11 -0700 Matthew Wilcox <willy@infradead.org> wrote:

> > > +	return (unsigned long)PAGE_SIZE << compound_order(page);
> > > + }
> > 
> > Also, I suspect the cast here is unneeded.  Architectures used to
> > differe in the type of PAGE_SIZE but please tell me that's been fixed
> > for a lomng time...
> 
> It's an unsigned int for most, if not all architectures.  For, eg,
> PowerPC, a PUD page is larger than 4GB.  So let's just include the cast
> and not have to worry about undefined semantics screwing us over.

I think you'll find that PAGE_SIZE is unsigned long on all
architectures.
Matthew Wilcox May 23, 2019, 9:44 p.m. UTC | #7
On Thu, May 23, 2019 at 02:33:15PM -0700, Andrew Morton wrote:
> On Wed, 22 May 2019 18:55:11 -0700 Matthew Wilcox <willy@infradead.org> wrote:
> 
> > > > +	return (unsigned long)PAGE_SIZE << compound_order(page);
> > > > + }
> > > 
> > > Also, I suspect the cast here is unneeded.  Architectures used to
> > > differe in the type of PAGE_SIZE but please tell me that's been fixed
> > > for a lomng time...
> > 
> > It's an unsigned int for most, if not all architectures.  For, eg,
> > PowerPC, a PUD page is larger than 4GB.  So let's just include the cast
> > and not have to worry about undefined semantics screwing us over.
> 
> I think you'll find that PAGE_SIZE is unsigned long on all
> architectures.

arch/openrisc/include/asm/page.h:#define PAGE_SIZE       (1 << PAGE_SHIFT)

The others are a miscellany of different defines, but I think you're
right for every other architecture.
Christoph Hellwig May 24, 2019, 6:34 a.m. UTC | #8
On Thu, May 23, 2019 at 02:44:02PM -0700, Matthew Wilcox wrote:
> > I think you'll find that PAGE_SIZE is unsigned long on all
> > architectures.
> 
> arch/openrisc/include/asm/page.h:#define PAGE_SIZE       (1 << PAGE_SHIFT)

Well, the whole context is:

ifdef __ASSEMBLY__
#define PAGE_SIZE       (1 << PAGE_SHIFT)
#else
#define PAGE_SIZE       (1UL << PAGE_SHIFT)
#endif

Which reminds me that there is absolutely not point in letting
architectures even defined this.

Add a Kconfig PAGE_SHIFT symbol, and let common code define
PAGE_SHIFT/PAGE_SIZE/PAGE_MASK..
diff mbox series

Patch

diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 58469623b015..c68a120de28b 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -207,8 +207,7 @@  void __flush_dcache_page(struct address_space *mapping, struct page *page)
 	 * coherent with the kernels mapping.
 	 */
 	if (!PageHighMem(page)) {
-		size_t page_size = PAGE_SIZE << compound_order(page);
-		__cpuc_flush_dcache_area(page_address(page), page_size);
+		__cpuc_flush_dcache_area(page_address(page), page_size(page));
 	} else {
 		unsigned long i;
 		if (cache_is_vipt_nonaliasing()) {
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 5c9073bace83..280fdbc3bfa5 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -67,8 +67,7 @@  void __sync_icache_dcache(pte_t pte)
 	struct page *page = pte_page(pte);
 
 	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
-		sync_icache_aliases(page_address(page),
-				    PAGE_SIZE << compound_order(page));
+		sync_icache_aliases(page_address(page), page_size(page));
 }
 EXPORT_SYMBOL_GPL(__sync_icache_dcache);
 
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index d28e29103bdb..cc4061cd9899 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -63,7 +63,7 @@  __ia64_sync_icache_dcache (pte_t pte)
 	if (test_bit(PG_arch_1, &page->flags))
 		return;				/* i-cache is already coherent with d-cache */
 
-	flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
+	flush_icache_range(addr, addr + page_size(page));
 	set_bit(PG_arch_1, &page->flags);	/* mark page as clean */
 }
 
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index aa8d8425be25..b83a1d16bd89 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -120,7 +120,7 @@  static int ion_system_heap_allocate(struct ion_heap *heap,
 		if (!page)
 			goto free_pages;
 		list_add_tail(&page->lru, &pages);
-		size_remaining -= PAGE_SIZE << compound_order(page);
+		size_remaining -= page_size(page);
 		max_order = compound_order(page);
 		i++;
 	}
@@ -133,7 +133,7 @@  static int ion_system_heap_allocate(struct ion_heap *heap,
 
 	sg = table->sgl;
 	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
-		sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
+		sg_set_page(sg, page, page_size(page), 0);
 		sg = sg_next(sg);
 		list_del(&page->lru);
 	}
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 1eb1f58e00e4..83c1ec65dbcc 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -148,8 +148,7 @@  int ft_queue_data_in(struct se_cmd *se_cmd)
 					   page, off_in_page, tlen);
 			fr_len(fp) += tlen;
 			fp_skb(fp)->data_len += tlen;
-			fp_skb(fp)->truesize +=
-					PAGE_SIZE << compound_order(page);
+			fp_skb(fp)->truesize += page_size(page);
 		} else {
 			BUG_ON(!page);
 			from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
diff --git a/fs/io_uring.c b/fs/io_uring.c
index fdc18321d70c..2c37da095517 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2891,7 +2891,7 @@  static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
 	}
 
 	page = virt_to_head_page(ptr);
-	if (sz > (PAGE_SIZE << compound_order(page)))
+	if (sz > page_size(page))
 		return -EINVAL;
 
 	pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index edf476c8cfb9..2e909072a41f 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -472,7 +472,7 @@  static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
 static inline struct hstate *page_hstate(struct page *page)
 {
 	VM_BUG_ON_PAGE(!PageHuge(page), page);
-	return size_to_hstate(PAGE_SIZE << compound_order(page));
+	return size_to_hstate(page_size(page));
 }
 
 static inline unsigned hstate_index_to_shift(unsigned index)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0e8834ac32b7..0208f77bab63 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -772,6 +772,15 @@  static inline void set_compound_order(struct page *page, unsigned int order)
 	page[1].compound_order = order;
 }
 
+/*
+ * Returns the number of bytes in this potentially compound page.
+ * Must be called with the head page, not a tail page.
+ */
+static inline unsigned long page_size(struct page *page)
+{
+	return (unsigned long)PAGE_SIZE << compound_order(page);
+}
+
 void free_compound_page(struct page *page);
 
 #ifdef CONFIG_MMU
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index f74fa832f3aa..d4349c9d0c7e 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -877,7 +877,7 @@  static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
 	head = compound_head(page);
 	v += (page - head) << PAGE_SHIFT;
 
-	if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
+	if (likely(n <= v && v <= page_size(head)))
 		return true;
 	WARN_ON(1);
 	return false;
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 36afcf64e016..dd1d3d88ac9e 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -323,8 +323,7 @@  void kasan_poison_slab(struct page *page)
 
 	for (i = 0; i < (1 << compound_order(page)); i++)
 		page_kasan_tag_reset(page + i);
-	kasan_poison_shadow(page_address(page),
-			PAGE_SIZE << compound_order(page),
+	kasan_poison_shadow(page_address(page), page_size(page),
 			KASAN_KMALLOC_REDZONE);
 }
 
@@ -520,7 +519,7 @@  void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
 	page = virt_to_page(ptr);
 	redzone_start = round_up((unsigned long)(ptr + size),
 				KASAN_SHADOW_SCALE_SIZE);
-	redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
+	redzone_end = (unsigned long)ptr + page_size(page);
 
 	kasan_unpoison_shadow(ptr, size);
 	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
@@ -556,8 +555,7 @@  void kasan_poison_kfree(void *ptr, unsigned long ip)
 			kasan_report_invalid_free(ptr, ip);
 			return;
 		}
-		kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
-				KASAN_FREE_PAGE);
+		kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
 	} else {
 		__kasan_slab_free(page->slab_cache, ptr, ip, false);
 	}
diff --git a/mm/nommu.c b/mm/nommu.c
index b492fd1fcf9f..6dbd5251b366 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -107,7 +107,7 @@  unsigned int kobjsize(const void *objp)
 	 * The ksize() function is only guaranteed to work for pointers
 	 * returned by kmalloc(). So handle arbitrary pointers here.
 	 */
-	return PAGE_SIZE << compound_order(page);
+	return page_size(page);
 }
 
 static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 11df03e71288..eff4b4520c8d 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -153,8 +153,7 @@  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 
 	if (unlikely(PageHuge(pvmw->page))) {
 		/* when pud is not present, pte will be NULL */
-		pvmw->pte = huge_pte_offset(mm, pvmw->address,
-					    PAGE_SIZE << compound_order(page));
+		pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
 		if (!pvmw->pte)
 			return false;
 
diff --git a/mm/rmap.c b/mm/rmap.c
index e5dfe2ae6b0d..09ce05c481fc 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -898,8 +898,7 @@  static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
 	 */
 	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
 				0, vma, vma->vm_mm, address,
-				min(vma->vm_end, address +
-				    (PAGE_SIZE << compound_order(page))));
+				min(vma->vm_end, address + page_size(page)));
 	mmu_notifier_invalidate_range_start(&range);
 
 	while (page_vma_mapped_walk(&pvmw)) {
@@ -1374,8 +1373,7 @@  static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 	 */
 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
 				address,
-				min(vma->vm_end, address +
-				    (PAGE_SIZE << compound_order(page))));
+				min(vma->vm_end, address + page_size(page)));
 	if (PageHuge(page)) {
 		/*
 		 * If sharing is possible, start and end will be adjusted
diff --git a/mm/slob.c b/mm/slob.c
index 510f0941d032..e7104d1ce92b 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -539,7 +539,7 @@  size_t ksize(const void *block)
 
 	sp = virt_to_page(block);
 	if (unlikely(!PageSlab(sp)))
-		return PAGE_SIZE << compound_order(sp);
+		return page_size(sp);
 
 	align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
 	m = (unsigned int *)(block - align);
diff --git a/mm/slub.c b/mm/slub.c
index 51453216a1ed..fe2098f95e05 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -829,7 +829,7 @@  static int slab_pad_check(struct kmem_cache *s, struct page *page)
 		return 1;
 
 	start = page_address(page);
-	length = PAGE_SIZE << compound_order(page);
+	length = page_size(page);
 	end = start + length;
 	remainder = length % s->size;
 	if (!remainder)
@@ -3912,7 +3912,7 @@  static size_t __ksize(const void *object)
 
 	if (unlikely(!PageSlab(page))) {
 		WARN_ON(!PageCompound(page));
-		return PAGE_SIZE << compound_order(page);
+		return page_size(page);
 	}
 
 	return slab_ksize(page->slab_cache);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index a14e8864e4fa..1e7f5dcaefad 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -685,7 +685,7 @@  static int xsk_mmap(struct file *file, struct socket *sock,
 	/* Matches the smp_wmb() in xsk_init_queue */
 	smp_rmb();
 	qpg = virt_to_head_page(q->ring);
-	if (size > (PAGE_SIZE << compound_order(qpg)))
+	if (size > page_size(qpg))
 		return -EINVAL;
 
 	pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;