@@ -39,6 +39,7 @@ struct anon_vma_chain;
struct user_struct;
struct pt_regs;
struct folio_batch;
+struct zap_details;
extern int sysctl_page_lock_unfairness;
@@ -687,8 +688,17 @@ struct vm_operations_struct {
*/
struct page *(*find_special_page)(struct vm_area_struct *vma,
unsigned long addr);
+ void (*unmap_page_range)(struct mmu_gather *tlb,
+ struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end,
+ struct zap_details *details);
};
+void __unmap_page_range(struct mmu_gather *tlb,
+ struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end,
+ struct zap_details *details);
+
#ifdef CONFIG_NUMA_BALANCING
static inline void vma_numab_state_init(struct vm_area_struct *vma)
{
@@ -585,6 +585,22 @@ static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
}
#endif
+static void shm_unmap_page_range(struct mmu_gather *tlb,
+ struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end,
+ struct zap_details *details)
+{
+ struct file *file = vma->vm_file;
+ struct shm_file_data *sfd = shm_file_data(file);
+
+ if (sfd->vm_ops->unmap_page_range) {
+ sfd->vm_ops->unmap_page_range(tlb, vma, addr, end, details);
+ return;
+ }
+
+ __unmap_page_range(tlb, vma, addr, end, details);
+}
+
static int shm_mmap(struct file *file, struct vm_area_struct *vma)
{
struct shm_file_data *sfd = shm_file_data(file);
@@ -685,6 +701,7 @@ static const struct vm_operations_struct shm_vm_ops = {
.set_policy = shm_set_policy,
.get_policy = shm_get_policy,
#endif
+ .unmap_page_range = shm_unmap_page_range,
};
/**
@@ -5147,6 +5147,30 @@ static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
return 0;
}
+static void hugetlb_vm_op_unmap_page_range(struct mmu_gather *tlb,
+ struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end,
+ struct zap_details *details)
+{
+ zap_flags_t zap_flags = details ? details->zap_flags : 0;
+
+ /*
+ * It is undesirable to test vma->vm_file as it
+ * should be non-null for valid hugetlb area.
+ * However, vm_file will be NULL in the error
+ * cleanup path of mmap_region. When
+ * hugetlbfs ->mmap method fails,
+ * mmap_region() nullifies vma->vm_file
+ * before calling this function to clean up.
+ * Since no pte has actually been setup, it is
+ * safe to do nothing in this case.
+ */
+ if (!vma->vm_file)
+ return;
+
+ __unmap_hugepage_range(tlb, vma, addr, end, NULL, zap_flags);
+}
+
/*
* When a new function is introduced to vm_operations_struct and added
* to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
@@ -5160,6 +5184,7 @@ const struct vm_operations_struct hugetlb_vm_ops = {
.close = hugetlb_vm_op_close,
.may_split = hugetlb_vm_op_split,
.pagesize = hugetlb_vm_op_pagesize,
+ .unmap_page_range = hugetlb_vm_op_unmap_page_range,
};
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
@@ -1875,7 +1875,7 @@ static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
return addr;
}
-void unmap_page_range(struct mmu_gather *tlb,
+void __unmap_page_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
struct zap_details *details)
@@ -1895,6 +1895,16 @@ void unmap_page_range(struct mmu_gather *tlb,
tlb_end_vma(tlb, vma);
}
+void unmap_page_range(struct mmu_gather *tlb,
+ struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end,
+ struct zap_details *details)
+{
+ if (vma->vm_ops && vma->vm_ops->unmap_page_range)
+ vma->vm_ops->unmap_page_range(tlb, vma, addr, end, details);
+ else
+ __unmap_page_range(tlb, vma, addr, end, details);
+}
static void unmap_single_vma(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start_addr,
@@ -1916,28 +1926,8 @@ static void unmap_single_vma(struct mmu_gather *tlb,
if (unlikely(vma->vm_flags & VM_PFNMAP))
untrack_pfn(vma, 0, 0, mm_wr_locked);
- if (start != end) {
- if (unlikely(is_vm_hugetlb_page(vma))) {
- /*
- * It is undesirable to test vma->vm_file as it
- * should be non-null for valid hugetlb area.
- * However, vm_file will be NULL in the error
- * cleanup path of mmap_region. When
- * hugetlbfs ->mmap method fails,
- * mmap_region() nullifies vma->vm_file
- * before calling this function to clean up.
- * Since no pte has actually been setup, it is
- * safe to do nothing in this case.
- */
- if (vma->vm_file) {
- zap_flags_t zap_flags = details ?
- details->zap_flags : 0;
- __unmap_hugepage_range(tlb, vma, start, end,
- NULL, zap_flags);
- }
- } else
- unmap_page_range(tlb, vma, start, end, details);
- }
+ if (start != end)
+ unmap_page_range(tlb, vma, start, end, details);
}
/**
Special handling is needed when unmapping a hugetlb vma and will be needed when unmapping an msharefs vma once support is added for handling faults in an mshare region. Signed-off-by: Anthony Yznaga <anthony.yznaga@oracle.com> --- include/linux/mm.h | 10 ++++++++++ ipc/shm.c | 17 +++++++++++++++++ mm/hugetlb.c | 25 +++++++++++++++++++++++++ mm/memory.c | 36 +++++++++++++----------------------- 4 files changed, 65 insertions(+), 23 deletions(-)