@@ -123,6 +123,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
* to override the version in mm/hugetlb.c
*/
#define vma_mmu_pagesize vma_mmu_pagesize
+unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
/*
* If the arch doesn't supply something else, assume that hugepage
@@ -568,10 +568,7 @@ unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
if (!radix_enabled())
return 1UL << mmu_psize_to_shift(psize);
#endif
- if (!is_vm_hugetlb_page(vma))
- return PAGE_SIZE;
-
- return huge_page_size(hstate_vma(vma));
+ return vma_kernel_pagesize(vma);
}
static inline bool is_power_of_4(unsigned long x)
@@ -394,10 +394,6 @@ static inline unsigned long huge_page_size(struct hstate *h)
return (unsigned long)PAGE_SIZE << h->order;
}
-extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
-
-extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
-
static inline unsigned long huge_page_mask(struct hstate *h)
{
return h->mask;
@@ -430,6 +426,30 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h)
#include <asm/hugetlb.h>
+/*
+ * Return the size of the pages allocated when backing a VMA. In the majority
+ * cases this will be same size as used by the page table entries.
+ */
+static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
+{
+ if (vma->vm_ops && vma->vm_ops->pagesize)
+ return vma->vm_ops->pagesize(vma);
+ return PAGE_SIZE;
+}
+
+/*
+ * Return the page size being used by the MMU to back a VMA. In the majority
+ * of cases, the page size used by the kernel matches the MMU size. On
+ * architectures where it differs, an architecture-specific version of this
+ * function is required.
+ */
+#ifndef vma_mmu_pagesize
+static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
+{
+ return vma_kernel_pagesize(vma);
+}
+#endif
+
#ifndef arch_make_huge_pte
static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
struct page *page, int writable)
@@ -533,8 +553,6 @@ struct hstate {};
#define page_hstate(page) NULL
#define huge_page_size(h) PAGE_SIZE
#define huge_page_mask(h) PAGE_MASK
-#define vma_kernel_pagesize(v) PAGE_SIZE
-#define vma_mmu_pagesize(v) PAGE_SIZE
#define huge_page_order(h) 0
#define huge_page_shift(h) PAGE_SHIFT
static inline bool hstate_is_gigantic(struct hstate *h)
@@ -383,6 +383,7 @@ struct vm_operations_struct {
int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
void (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
+ unsigned long (*pagesize)(struct vm_area_struct * area);
/* notification that a previously read-only page is about to become
* writable, if an error is returned it will cause a SIGBUS */
@@ -629,36 +629,6 @@ pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
}
EXPORT_SYMBOL_GPL(linear_hugepage_index);
-/*
- * Return the size of the pages allocated when backing a VMA. In the majority
- * cases this will be same size as used by the page table entries.
- */
-unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
-{
- struct hstate *hstate;
-
- if (!is_vm_hugetlb_page(vma))
- return PAGE_SIZE;
-
- hstate = hstate_vma(vma);
-
- return 1UL << huge_page_shift(hstate);
-}
-EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
-
-/*
- * Return the page size being used by the MMU to back a VMA. In the majority
- * of cases, the page size used by the kernel matches the MMU size. On
- * architectures where it differs, an architecture-specific version of this
- * function is required.
- */
-#ifndef vma_mmu_pagesize
-unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
-{
- return vma_kernel_pagesize(vma);
-}
-#endif
-
/*
* Flags for MAP_PRIVATE reservations. These are stored in the bottom
* bits of the reservation map pointer, which are always clear due to
@@ -3142,6 +3112,13 @@ static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
return 0;
}
+static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
+{
+ struct hstate *hstate = hstate_vma(vma);
+
+ return 1UL << huge_page_shift(hstate);
+}
+
/*
* We cannot handle pagefaults against hugetlb pages at all. They cause
* handle_mm_fault() to try to instantiate regular-sized pages in the
@@ -3159,6 +3136,7 @@ const struct vm_operations_struct hugetlb_vm_ops = {
.open = hugetlb_vm_op_open,
.close = hugetlb_vm_op_close,
.split = hugetlb_vm_op_split,
+ .pagesize = hugetlb_vm_op_pagesize,
};
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,