@@ -507,7 +507,7 @@ int dmemfs_file_mmap(struct file *file, struct vm_area_struct *vma)
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- vma->vm_flags |= VM_PFNMAP;
+ vma->vm_flags |= VM_PFNMAP | VM_DMEM | VM_IO;
file_accessed(file);
vma->vm_ops = &dmemfs_vm_ops;
@@ -311,6 +311,8 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
+#define VM_DMEM BIT(38) /* Dmem page VM */
+
#ifdef CONFIG_ARCH_HAS_PKEYS
# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */
@@ -666,6 +668,11 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma)
return vma->vm_flags & VM_ACCESS_FLAGS;
}
+static inline bool vma_is_dmem(struct vm_area_struct *vma)
+{
+ return !!(vma->vm_flags & VM_DMEM);
+}
+
#ifdef CONFIG_SHMEM
/*
* The vma_is_shmem is not inline because it is used only by slow
@@ -492,8 +492,11 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
goto no_page;
} else if (unlikely(!page)) {
if (flags & FOLL_DUMP) {
- /* Avoid special (like zero) pages in core dumps */
- page = ERR_PTR(-EFAULT);
+ if (vma_is_dmem(vma))
+ page = ERR_PTR(-EEXIST);
+ else
+ /* Avoid special (like zero) pages in core dumps */
+ page = ERR_PTR(-EFAULT);
goto out;
}
@@ -78,8 +78,12 @@ static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
pgoff_t pgoff;
pgoff = linear_page_index(vma, addr);
- for (i = 0; i < nr; i++, pgoff++)
- vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
+ for (i = 0; i < nr; i++, pgoff++) {
+ if (vma_is_dmem(vma))
+ vec[i] = 1;
+ else
+ vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
+ }
} else {
for (i = 0; i < nr; i++)
vec[i] = 0;
@@ -236,7 +236,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
* for all the checks.
*/
if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) &&
- pmd_none_or_clear_bad_unless_trans_huge(pmd))
+ pmd_none_or_clear_bad_unless_trans_huge(pmd) && !pmd_special(*pmd))
goto next;
/* invoke the mmu notifier if the pmd is populated */
@@ -412,6 +412,9 @@ static int prot_none_test(unsigned long addr, unsigned long next,
return 0;
}
+ if (vma_is_dmem(vma))
+ return -EINVAL;
+
/*
* Do PROT_NONE PFN permission checks here when we can still
* bail out without undoing a lot of state. This is a rather
@@ -482,6 +482,9 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
if (!vma || vma->vm_start > addr)
return ERR_PTR(-EFAULT);
+ if (vma_is_dmem(vma))
+ return ERR_PTR(-EINVAL);
+
/*
* !old_len is a special case where an attempt is made to 'duplicate'
* a mapping. This makes no sense for private mappings as it will