@@ -266,7 +266,8 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason)
"file %px anon_vma %px policy %px\n"
"uffd_ctx %px\n"
"anon_name %px\n"
- "merge_flags %x state %x\n",
+ "state %x\n"
+ "just_expand %b __remove_middle %b __remove_next %b\n",
vmg, vmg->mm, vmg->pgoff,
vmg->vmi, vmg->vmi ? vma_iter_addr(vmg->vmi) : 0,
vmg->vmi ? vma_iter_end(vmg->vmi) : 0,
@@ -279,7 +280,8 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason)
(void *)0,
#endif
vmg->anon_name,
- (int)vmg->merge_flags, (int)vmg->state);
+ (int)vmg->state,
+ vmg->just_expand, vmg->__remove_middle, vmg->__remove_next);
if (vmg->mm) {
pr_warn("vmg %px mm:\n", vmg);
@@ -55,7 +55,6 @@ struct mmap_state {
.middle = vma_, \
.next = (vma_) ? NULL : (map_)->next, \
.state = VMA_MERGE_START, \
- .merge_flags = VMG_FLAG_DEFAULT, \
}
static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next)
@@ -120,8 +119,8 @@ static void init_multi_vma_prep(struct vma_prepare *vp,
memset(vp, 0, sizeof(struct vma_prepare));
vp->vma = vma;
vp->anon_vma = vma->anon_vma;
- vp->remove = remove;
- vp->remove2 = remove2;
+ vp->remove = remove ? remove : remove2;
+ vp->remove2 = remove ? remove2 : NULL;
vp->adj_next = next;
if (!vp->anon_vma && next)
vp->anon_vma = next->anon_vma;
@@ -129,7 +128,6 @@ static void init_multi_vma_prep(struct vma_prepare *vp,
vp->file = vma->vm_file;
if (vp->file)
vp->mapping = vma->vm_file->f_mapping;
-
}
/*
@@ -629,22 +627,40 @@ void validate_mm(struct mm_struct *mm)
}
#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
-/* Actually perform the VMA merge operation. */
-static int commit_merge(struct vma_merge_struct *vmg,
- struct vm_area_struct *adjust,
- struct vm_area_struct *remove,
- struct vm_area_struct *remove2,
- long adj_start,
- bool expanded)
+/*
+ * Actually perform the VMA merge operation.
+ *
+ * On success, returns the merged VMA. Otherwise returns NULL.
+ */
+static struct vm_area_struct *commit_merge(struct vma_merge_struct *vmg,
+ long adj_start)
{
struct vma_prepare vp;
+ struct vm_area_struct *remove = NULL;
+ struct vm_area_struct *remove2 = NULL;
+ struct vm_area_struct *adjust = NULL;
+ /*
+ * In all cases but that of merge right, shrink next, we write
+ * vmg->target to the maple tree and return this as the merged VMA.
+ */
+ bool merge_target = adj_start >= 0;
+
+ if (vmg->__remove_middle)
+ remove = vmg->middle;
+ if (vmg->__remove_next)
+ remove2 = vmg->next;
+
+ if (adj_start > 0)
+ adjust = vmg->middle;
+ else if (adj_start < 0)
+ adjust = vmg->next;
init_multi_vma_prep(&vp, vmg->target, adjust, remove, remove2);
VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
vp.anon_vma != adjust->anon_vma);
- if (expanded) {
+ if (merge_target) {
/* Note: vma iterator must be pointing to 'start'. */
vma_iter_config(vmg->vmi, vmg->start, vmg->end);
} else {
@@ -653,27 +669,26 @@ static int commit_merge(struct vma_merge_struct *vmg,
}
if (vma_iter_prealloc(vmg->vmi, vmg->target))
- return -ENOMEM;
+ return NULL;
vma_prepare(&vp);
vma_adjust_trans_huge(vmg->target, vmg->start, vmg->end, adj_start);
vma_set_range(vmg->target, vmg->start, vmg->end, vmg->pgoff);
- if (expanded)
+ if (merge_target)
vma_iter_store(vmg->vmi, vmg->target);
if (adj_start) {
adjust->vm_start += adj_start;
adjust->vm_pgoff += PHYS_PFN(adj_start);
- if (adj_start < 0) {
- WARN_ON(expanded);
+
+ if (!merge_target)
vma_iter_store(vmg->vmi, adjust);
- }
}
vma_complete(&vp, vmg->vmi, vmg->target->vm_mm);
- return 0;
+ return merge_target ? vmg->target : vmg->next;
}
/* We can only remove VMAs when merging if they do not have a close hook. */
@@ -718,16 +733,13 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
struct vm_area_struct *prev = vmg->prev;
struct vm_area_struct *next, *res;
struct vm_area_struct *anon_dup = NULL;
- struct vm_area_struct *adjust = NULL;
unsigned long start = vmg->start;
unsigned long end = vmg->end;
bool left_side = middle && start == middle->vm_start;
bool right_side = middle && end == middle->vm_end;
int err = 0;
long adj_start = 0;
- bool merge_will_delete_middle, merge_will_delete_next;
bool merge_left, merge_right, merge_both;
- bool expanded;
mmap_assert_write_locked(vmg->mm);
VM_WARN_ON_VMG(!middle, vmg); /* We are modifying a VMA, so caller must specify. */
@@ -779,27 +791,27 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
merge_both = merge_left && merge_right;
/* If we span the entire VMA, a merge implies it will be deleted. */
- merge_will_delete_middle = left_side && right_side;
+ vmg->__remove_middle = left_side && right_side;
/*
* If we need to remove middle in its entirety but are unable to do so,
* we have no sensible recourse but to abort the merge.
*/
- if (merge_will_delete_middle && !can_merge_remove_vma(middle))
+ if (vmg->__remove_middle && !can_merge_remove_vma(middle))
return NULL;
/*
* If we merge both VMAs, then next is also deleted. This implies
* merge_will_delete_vma also.
*/
- merge_will_delete_next = merge_both;
+ vmg->__remove_next = merge_both;
/*
* If we cannot delete next, then we can reduce the operation to merging
* prev and middle (thereby deleting middle).
*/
- if (merge_will_delete_next && !can_merge_remove_vma(next)) {
- merge_will_delete_next = false;
+ if (vmg->__remove_next && !can_merge_remove_vma(next)) {
+ vmg->__remove_next = false;
merge_right = false;
merge_both = false;
}
@@ -846,10 +858,11 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
vmg->start = prev->vm_start;
vmg->pgoff = prev->vm_pgoff;
- if (!merge_will_delete_middle) {
- adjust = middle;
+ /*
+ * We both expand prev and shrink middle.
+ */
+ if (!vmg->__remove_middle)
adj_start = vmg->end - middle->vm_start;
- }
err = dup_anon_vma(prev, middle, &anon_dup);
} else { /* merge_right */
@@ -867,7 +880,7 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
/* If we are offset into a VMA, then prev must be middle. */
VM_WARN_ON_VMG(vmg->start > middle->vm_start && prev && middle != prev, vmg);
- if (merge_will_delete_middle) {
+ if (vmg->__remove_middle) {
vmg->target = next;
vmg->end = next->vm_end;
vmg->pgoff = next->vm_pgoff - pglen;
@@ -883,7 +896,6 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
vmg->end = start;
vmg->pgoff = middle->vm_pgoff;
- adjust = next;
adj_start = -(middle->vm_end - start);
}
@@ -893,17 +905,8 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
if (err)
goto abort;
- /*
- * In nearly all cases, we expand vmg->middle. There is one exception -
- * merge_right where we partially span the VMA. In this case we shrink
- * the end of vmg->middle and adjust the start of vmg->next accordingly.
- */
- expanded = !merge_right || merge_will_delete_middle;
-
- if (commit_merge(vmg, adjust,
- merge_will_delete_middle ? middle : NULL,
- merge_will_delete_next ? next : NULL,
- adj_start, expanded)) {
+ res = commit_merge(vmg, adj_start);
+ if (!res) {
if (anon_dup)
unlink_anon_vmas(anon_dup);
@@ -911,9 +914,7 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
return NULL;
}
- res = merge_left ? prev : next;
khugepaged_enter_vma(res, vmg->flags);
-
vmg->state = VMA_MERGE_SUCCESS;
return res;
@@ -974,7 +975,6 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
struct vm_area_struct *next = vmg->next;
unsigned long end = vmg->end;
bool can_merge_left, can_merge_right;
- bool just_expand = vmg->merge_flags & VMG_FLAG_JUST_EXPAND;
mmap_assert_write_locked(vmg->mm);
VM_WARN_ON_VMG(vmg->middle, vmg);
@@ -988,7 +988,7 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
return NULL;
can_merge_left = can_vma_merge_left(vmg);
- can_merge_right = !just_expand && can_vma_merge_right(vmg, can_merge_left);
+ can_merge_right = !vmg->just_expand && can_vma_merge_right(vmg, can_merge_left);
/* If we can merge with the next VMA, adjust vmg accordingly. */
if (can_merge_right) {
@@ -1011,7 +1011,7 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
vmg->end = end;
/* In expand-only case we are already positioned at prev. */
- if (!just_expand) {
+ if (!vmg->just_expand) {
/* Equivalent to going to the previous range. */
vma_prev(vmg->vmi);
}
@@ -1076,7 +1076,10 @@ int vma_expand(struct vma_merge_struct *vmg)
middle->vm_end > vmg->end, vmg);
vmg->target = middle;
- if (commit_merge(vmg, NULL, remove_next ? next : NULL, NULL, 0, true))
+ if (remove_next)
+ vmg->__remove_next = true;
+
+ if (!commit_merge(vmg, 0))
goto nomem;
return 0;
@@ -2588,7 +2591,7 @@ int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
vmg.prev = vma;
/* vmi is positioned at prev, which this mode expects. */
- vmg.merge_flags = VMG_FLAG_JUST_EXPAND;
+ vmg.just_expand = true;
if (vma_merge_new_range(&vmg))
goto out;
@@ -58,17 +58,6 @@ enum vma_merge_state {
VMA_MERGE_SUCCESS,
};
-enum vma_merge_flags {
- VMG_FLAG_DEFAULT = 0,
- /*
- * If we can expand, simply do so. We know there is nothing to merge to
- * the right. Does not reset state upon failure to merge. The VMA
- * iterator is assumed to be positioned at the previous VMA, rather than
- * at the gap.
- */
- VMG_FLAG_JUST_EXPAND = 1 << 0,
-};
-
/*
* Describes a VMA merge operation and is threaded throughout it.
*
@@ -117,8 +106,31 @@ struct vma_merge_struct {
struct mempolicy *policy;
struct vm_userfaultfd_ctx uffd_ctx;
struct anon_vma_name *anon_name;
- enum vma_merge_flags merge_flags;
enum vma_merge_state state;
+
+ /* Flags which callers can use to modify merge behaviour: */
+
+ /*
+ * If we can expand, simply do so. We know there is nothing to merge to
+ * the right. Does not reset state upon failure to merge. The VMA
+ * iterator is assumed to be positioned at the previous VMA, rather than
+ * at the gap.
+ */
+ bool just_expand :1;
+
+ /* Internal flags set during merge process: */
+
+ /*
+ * Internal flag used during the merge operation to indicate we will
+ * remove vmg->middle.
+ */
+ bool __remove_middle :1;
+ /*
+ * Internal flag used during the merge operationr to indicate we will
+ * remove vmg->next.
+ */
+ bool __remove_next :1;
+
};
static inline bool vmg_nomem(struct vma_merge_struct *vmg)
@@ -142,7 +154,6 @@ static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
.flags = flags_, \
.pgoff = pgoff_, \
.state = VMA_MERGE_START, \
- .merge_flags = VMG_FLAG_DEFAULT, \
}
#define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_) \
@@ -162,7 +173,6 @@ static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
.uffd_ctx = vma_->vm_userfaultfd_ctx, \
.anon_name = anon_vma_name(vma_), \
.state = VMA_MERGE_START, \
- .merge_flags = VMG_FLAG_DEFAULT, \
}
#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
@@ -149,11 +149,16 @@ static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
vmg->prev = NULL;
vmg->middle = NULL;
vmg->next = NULL;
+ vmg->target = NULL;
vmg->start = start;
vmg->end = end;
vmg->pgoff = pgoff;
vmg->flags = flags;
+
+ vmg->just_expand = false;
+ vmg->__remove_middle = false;
+ vmg->__remove_next = false;
}
/*
@@ -1546,7 +1551,7 @@ static bool test_expand_only_mode(void)
/*
* Place a VMA prior to the one we're expanding so we assert that we do
* not erroneously try to traverse to the previous VMA even though we
- * have, through the use of VMG_FLAG_JUST_EXPAND, indicated we do not
+ * have, through the use of the just_expand flag, indicated we do not
* need to do so.
*/
alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
@@ -1558,7 +1563,7 @@ static bool test_expand_only_mode(void)
vma_iter_set(&vmi, 0x3000);
vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
vmg.prev = vma_prev;
- vmg.merge_flags = VMG_FLAG_JUST_EXPAND;
+ vmg.just_expand = true;
vma = vma_merge_new_range(&vmg);
ASSERT_NE(vma, NULL);