@@ -261,7 +261,7 @@ static inline int pmd_trans_huge(pmd_t pmd)
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static inline int pud_trans_huge(pud_t pud)
{
- return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
+ return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP|_PAGE_SPECIAL)) == _PAGE_PSE;
}
#endif
@@ -300,6 +300,17 @@ static inline int pmd_special(pmd_t pmd)
{
return !!(pmd_flags(pmd) & _PAGE_SPECIAL);
}
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static inline int pud_special(pud_t pud)
+{
+ return !!(pud_flags(pud) & _PAGE_SPECIAL);
+}
+#else
+static inline int pud_special(pud_t pud)
+{
+ return 0;
+}
#endif
#endif
@@ -487,6 +498,11 @@ static inline pud_t pud_mkhuge(pud_t pud)
return pud_set_flags(pud, _PAGE_PSE);
}
+static inline pud_t pud_mkspecial(pud_t pud)
+{
+ return pud_set_flags(pud, _PAGE_SPECIAL);
+}
+
static inline pud_t pud_mkyoung(pud_t pud)
{
return pud_set_flags(pud, _PAGE_ACCESSED);
@@ -2123,6 +2123,9 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr);
}
+ if (pud_special(orig))
+ return 0;
+
refs = 0;
page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
do {
@@ -879,6 +879,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
entry = pud_mkhuge(pfn_t_pud(pfn, prot));
if (pfn_t_devmap(pfn))
entry = pud_mkdevmap(entry);
+ else if (pfn_t_special(pfn))
+ entry = pud_mkspecial(entry);
if (write) {
entry = pud_mkyoung(pud_mkdirty(entry));
entry = maybe_pud_mkwrite(entry, vma);
@@ -901,8 +903,7 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
* but we need to be consistent with PTEs and architectures that
* can't support a 'special' bit.
*/
- BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
- !pfn_t_devmap(pfn));
+ BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
(VM_PFNMAP|VM_MIXEDMAP));
BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
@@ -2031,7 +2032,8 @@ spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
spinlock_t *ptl;
ptl = pud_lock(vma->vm_mm, pud);
- if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
+ if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)) ||
+ pud_special(*pud))
return ptl;
spin_unlock(ptl);
return NULL;
@@ -1201,7 +1201,8 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
pud = pud_offset(p4d, addr);
do {
next = pud_addr_end(addr, end);
- if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
+ if (pud_trans_huge(*pud) || pud_devmap(*pud) ||
+ pud_special(*pud)) {
if (next - addr != HPAGE_PUD_SIZE) {
VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma);
split_huge_pud(vma, pud, addr);
Currently vmf_insert_pfn_pud only works with devmap and BUG_ON otherwise. Add support for handling page special when pfn_t has it marked with PFN_SPECIAL. Usage of this type of pages aren't expected to do GUP hence return no pages on gup_huge_pud() much like how it is done for ptes on gup_pte_range() and for pmds on gup_huge_pmd(). This allows device-dax to handle 1G hugepages without struct pages. Signed-off-by: Joao Martins <joao.m.martins@oracle.com> --- arch/x86/include/asm/pgtable.h | 18 +++++++++++++++++- mm/gup.c | 3 +++ mm/huge_memory.c | 8 +++++--- mm/memory.c | 3 ++- 4 files changed, 27 insertions(+), 5 deletions(-)