@@ -1395,16 +1395,13 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
if (unlikely(!can_modify_mm(mm, addr, end)))
return -EPERM;
- /* arch_unmap() might do unmaps itself. */
- arch_unmap(mm, addr, end);
-
/* Find the first overlapping VMA */
vma = vma_find(&vmi, end);
+ init_vma_munmap(&vms, &vmi, vma, addr, end, uf, /* unlock = */ false);
if (vma) {
mt_init_flags(&mt_detach, vmi.mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
mt_on_stack(mt_detach);
mas_init(&mas_detach, &mt_detach, /* addr = */ 0);
- init_vma_munmap(&vms, &vmi, vma, addr, end, uf, /* unlock = */ false);
/* Prepare to unmap any existing mapping in the area */
if (vms_gather_munmap_vmas(&vms, &mas_detach))
return -ENOMEM;
@@ -352,16 +352,22 @@ static void __vma_link_file(struct vm_area_struct *vma,
* @uf: The userfaultfd list_head
* @unlock: Unlock after the operation. Only unlocked on success
*/
-static inline void init_vma_munmap(struct vma_munmap_struct *vms,
+void
+init_vma_munmap(struct vma_munmap_struct *vms,
struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end, struct list_head *uf,
- bool unlock)
+ unsigned long start, unsigned long end,
+ struct list_head *uf, bool unlock)
{
vms->vmi = vmi;
vms->vma = vma;
- vms->mm = vma->vm_mm;
- vms->start = start;
- vms->end = end;
+ if (vma) {
+ vms->mm = vma->vm_mm;
+ vms->start = start;
+ vms->end = end;
+ } else {
+ vms->mm = NULL;
+ vms->start = vms->end = 0;
+ }
vms->unlock = unlock;
vms->uf = uf;
vms->vma_count = 0;
@@ -699,8 +705,8 @@ static inline void abort_munmap_vmas(struct ma_state *mas_detach)
* needed to be done once the vma maple tree is updated.
*/
-static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
- struct ma_state *mas_detach)
+void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
+ struct ma_state *mas_detach)
{
struct vm_area_struct *vma;
struct mm_struct *mm;
@@ -748,8 +754,8 @@ static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
*
* Return: 0 on success
*/
-static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
- struct ma_state *mas_detach)
+int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
+ struct ma_state *mas_detach)
{
struct vm_area_struct *next = NULL;
int error = -ENOMEM;
@@ -63,6 +63,12 @@ void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma);
/* Required for do_brk_flags(). */
void vma_prepare(struct vma_prepare *vp);
+/* Required for mmap_region() */
+void init_vma_munmap(struct vma_munmap_struct *vms,
+ struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct list_head *uf, bool unlock);
+
/* Required for do_brk_flags(). */
void init_vma_prep(struct vma_prepare *vp,
struct vm_area_struct *vma);
@@ -78,6 +84,14 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end, pgoff_t pgoff);
+/* Required for mmap_region() */
+void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
+ struct ma_state *mas_detach);
+
+/* Required for mmap_region() */
+int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
+ struct ma_state *mas_detach);
+
int
do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
struct mm_struct *mm, unsigned long start,
Adding support for a NULL vma means the init_vma_munmap() can be initialized for a less error-prone process when calling vms_complete_munmap_vmas() later on. Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com> --- mm/mmap.c | 5 +---- mm/vma.c | 26 ++++++++++++++++---------- mm/vma.h | 14 ++++++++++++++ 3 files changed, 31 insertions(+), 14 deletions(-) -- 2.45.2