@@ -261,7 +261,7 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason)
pr_warn("vmg %px state: mm %px pgoff %lx\n"
"vmi %px [%lx,%lx)\n"
- "prev %px next %px vma %px\n"
+ "prev %px middle %px next %px target %px\n"
"start %lx end %lx flags %lx\n"
"file %px anon_vma %px policy %px\n"
"uffd_ctx %px\n"
@@ -270,7 +270,7 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason)
vmg, vmg->mm, vmg->pgoff,
vmg->vmi, vmg->vmi ? vma_iter_addr(vmg->vmi) : 0,
vmg->vmi ? vma_iter_end(vmg->vmi) : 0,
- vmg->prev, vmg->next, vmg->vma,
+ vmg->prev, vmg->middle, vmg->next, vmg->target,
vmg->start, vmg->end, vmg->flags,
vmg->file, vmg->anon_vma, vmg->policy,
#ifdef CONFIG_USERFAULTFD
@@ -288,13 +288,6 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason)
pr_warn("vmg %px mm: (NULL)\n", vmg);
}
- if (vmg->vma) {
- pr_warn("vmg %px vma:\n", vmg);
- dump_vma(vmg->vma);
- } else {
- pr_warn("vmg %px vma: (NULL)\n", vmg);
- }
-
if (vmg->prev) {
pr_warn("vmg %px prev:\n", vmg);
dump_vma(vmg->prev);
@@ -302,6 +295,13 @@ void dump_vmg(const struct vma_merge_struct *vmg, const char *reason)
pr_warn("vmg %px prev: (NULL)\n", vmg);
}
+ if (vmg->middle) {
+ pr_warn("vmg %px middle:\n", vmg);
+ dump_vma(vmg->middle);
+ } else {
+ pr_warn("vmg %px middle: (NULL)\n", vmg);
+ }
+
if (vmg->next) {
pr_warn("vmg %px next:\n", vmg);
dump_vma(vmg->next);
@@ -1707,7 +1707,7 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift)
/*
* cover the whole range: [new_start, old_end)
*/
- vmg.vma = vma;
+ vmg.middle = vma;
if (vma_expand(&vmg))
return -ENOMEM;
@@ -52,7 +52,7 @@ struct mmap_state {
.pgoff = (map_)->pgoff, \
.file = (map_)->file, \
.prev = (map_)->prev, \
- .vma = vma_, \
+ .middle = vma_, \
.next = (vma_) ? NULL : (map_)->next, \
.state = VMA_MERGE_START, \
.merge_flags = VMG_FLAG_DEFAULT, \
@@ -639,7 +639,7 @@ static int commit_merge(struct vma_merge_struct *vmg,
{
struct vma_prepare vp;
- init_multi_vma_prep(&vp, vmg->vma, adjust, remove, remove2);
+ init_multi_vma_prep(&vp, vmg->target, adjust, remove, remove2);
VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
vp.anon_vma != adjust->anon_vma);
@@ -652,15 +652,15 @@ static int commit_merge(struct vma_merge_struct *vmg,
adjust->vm_end);
}
- if (vma_iter_prealloc(vmg->vmi, vmg->vma))
+ if (vma_iter_prealloc(vmg->vmi, vmg->target))
return -ENOMEM;
vma_prepare(&vp);
- vma_adjust_trans_huge(vmg->vma, vmg->start, vmg->end, adj_start);
- vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
+ vma_adjust_trans_huge(vmg->target, vmg->start, vmg->end, adj_start);
+ vma_set_range(vmg->target, vmg->start, vmg->end, vmg->pgoff);
if (expanded)
- vma_iter_store(vmg->vmi, vmg->vma);
+ vma_iter_store(vmg->vmi, vmg->target);
if (adj_start) {
adjust->vm_start += adj_start;
@@ -671,7 +671,7 @@ static int commit_merge(struct vma_merge_struct *vmg,
}
}
- vma_complete(&vp, vmg->vmi, vmg->vma->vm_mm);
+ vma_complete(&vp, vmg->vmi, vmg->target->vm_mm);
return 0;
}
@@ -694,8 +694,9 @@ static bool can_merge_remove_vma(struct vm_area_struct *vma)
* identical properties.
*
* This function checks for the existence of any such mergeable VMAs and updates
- * the maple tree describing the @vmg->vma->vm_mm address space to account for
- * this, as well as any VMAs shrunk/expanded/deleted as a result of this merge.
+ * the maple tree describing the @vmg->middle->vm_mm address space to account
+ * for this, as well as any VMAs shrunk/expanded/deleted as a result of this
+ * merge.
*
* As part of this operation, if a merge occurs, the @vmg object will have its
* vma, start, end, and pgoff fields modified to execute the merge. Subsequent
@@ -704,45 +705,47 @@ static bool can_merge_remove_vma(struct vm_area_struct *vma)
* Returns: The merged VMA if merge succeeds, or NULL otherwise.
*
* ASSUMPTIONS:
- * - The caller must assign the VMA to be modifed to @vmg->vma.
+ * - The caller must assign the VMA to be modifed to @vmg->middle.
* - The caller must have set @vmg->prev to the previous VMA, if there is one.
* - The caller must not set @vmg->next, as we determine this.
* - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
- * - vmi must be positioned within [@vmg->vma->vm_start, @vmg->vma->vm_end).
+ * - vmi must be positioned within [@vmg->middle->vm_start, @vmg->middle->vm_end).
*/
static __must_check struct vm_area_struct *vma_merge_existing_range(
struct vma_merge_struct *vmg)
{
- struct vm_area_struct *vma = vmg->vma;
+ struct vm_area_struct *middle = vmg->middle;
struct vm_area_struct *prev = vmg->prev;
struct vm_area_struct *next, *res;
struct vm_area_struct *anon_dup = NULL;
struct vm_area_struct *adjust = NULL;
unsigned long start = vmg->start;
unsigned long end = vmg->end;
- bool left_side = vma && start == vma->vm_start;
- bool right_side = vma && end == vma->vm_end;
+ bool left_side = middle && start == middle->vm_start;
+ bool right_side = middle && end == middle->vm_end;
int err = 0;
long adj_start = 0;
- bool merge_will_delete_vma, merge_will_delete_next;
+ bool merge_will_delete_middle, merge_will_delete_next;
bool merge_left, merge_right, merge_both;
bool expanded;
mmap_assert_write_locked(vmg->mm);
- VM_WARN_ON_VMG(!vma, vmg); /* We are modifying a VMA, so caller must specify. */
+ VM_WARN_ON_VMG(!middle, vmg); /* We are modifying a VMA, so caller must specify. */
VM_WARN_ON_VMG(vmg->next, vmg); /* We set this. */
VM_WARN_ON_VMG(prev && start <= prev->vm_start, vmg);
VM_WARN_ON_VMG(start >= end, vmg);
/*
- * If vma == prev, then we are offset into a VMA. Otherwise, if we are
+ * If middle == prev, then we are offset into a VMA. Otherwise, if we are
* not, we must span a portion of the VMA.
*/
- VM_WARN_ON_VMG(vma && ((vma != prev && vmg->start != vma->vm_start) ||
- vmg->end > vma->vm_end), vmg);
- /* The vmi must be positioned within vmg->vma. */
- VM_WARN_ON_VMG(vma && !(vma_iter_addr(vmg->vmi) >= vma->vm_start &&
- vma_iter_addr(vmg->vmi) < vma->vm_end), vmg);
+ VM_WARN_ON_VMG(middle &&
+ ((middle != prev && vmg->start != middle->vm_start) ||
+ vmg->end > middle->vm_end), vmg);
+ /* The vmi must be positioned within vmg->middle. */
+ VM_WARN_ON_VMG(middle &&
+ !(vma_iter_addr(vmg->vmi) >= middle->vm_start &&
+ vma_iter_addr(vmg->vmi) < middle->vm_end), vmg);
vmg->state = VMA_MERGE_NOMERGE;
@@ -776,13 +779,13 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
merge_both = merge_left && merge_right;
/* If we span the entire VMA, a merge implies it will be deleted. */
- merge_will_delete_vma = left_side && right_side;
+ merge_will_delete_middle = left_side && right_side;
/*
- * If we need to remove vma in its entirety but are unable to do so,
+ * If we need to remove middle in its entirety but are unable to do so,
* we have no sensible recourse but to abort the merge.
*/
- if (merge_will_delete_vma && !can_merge_remove_vma(vma))
+ if (merge_will_delete_middle && !can_merge_remove_vma(middle))
return NULL;
/*
@@ -793,7 +796,7 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
/*
* If we cannot delete next, then we can reduce the operation to merging
- * prev and vma (thereby deleting vma).
+ * prev and middle (thereby deleting middle).
*/
if (merge_will_delete_next && !can_merge_remove_vma(next)) {
merge_will_delete_next = false;
@@ -801,8 +804,8 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
merge_both = false;
}
- /* No matter what happens, we will be adjusting vma. */
- vma_start_write(vma);
+ /* No matter what happens, we will be adjusting middle. */
+ vma_start_write(middle);
if (merge_left)
vma_start_write(prev);
@@ -812,13 +815,13 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
if (merge_both) {
/*
- * |<----->|
- * |-------*********-------|
- * prev vma next
- * extend delete delete
+ * |<-------------------->|
+ * |-------********-------|
+ * prev middle next
+ * extend delete delete
*/
- vmg->vma = prev;
+ vmg->target = prev;
vmg->start = prev->vm_start;
vmg->end = next->vm_end;
vmg->pgoff = prev->vm_pgoff;
@@ -826,78 +829,79 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
/*
* We already ensured anon_vma compatibility above, so now it's
* simply a case of, if prev has no anon_vma object, which of
- * next or vma contains the anon_vma we must duplicate.
+ * next or middle contains the anon_vma we must duplicate.
*/
- err = dup_anon_vma(prev, next->anon_vma ? next : vma, &anon_dup);
+ err = dup_anon_vma(prev, next->anon_vma ? next : middle,
+ &anon_dup);
} else if (merge_left) {
/*
- * |<----->| OR
- * |<--------->|
+ * |<------------>| OR
+ * |<----------------->|
* |-------*************
- * prev vma
+ * prev middle
* extend shrink/delete
*/
- vmg->vma = prev;
+ vmg->target = prev;
vmg->start = prev->vm_start;
vmg->pgoff = prev->vm_pgoff;
- if (!merge_will_delete_vma) {
- adjust = vma;
- adj_start = vmg->end - vma->vm_start;
+ if (!merge_will_delete_middle) {
+ adjust = middle;
+ adj_start = vmg->end - middle->vm_start;
}
- err = dup_anon_vma(prev, vma, &anon_dup);
+ err = dup_anon_vma(prev, middle, &anon_dup);
} else { /* merge_right */
/*
- * |<----->| OR
- * |<--------->|
+ * |<------------->| OR
+ * |<----------------->|
* *************-------|
- * vma next
+ * middle next
* shrink/delete extend
*/
pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
VM_WARN_ON_VMG(!merge_right, vmg);
- /* If we are offset into a VMA, then prev must be vma. */
- VM_WARN_ON_VMG(vmg->start > vma->vm_start && prev && vma != prev, vmg);
+ /* If we are offset into a VMA, then prev must be middle. */
+ VM_WARN_ON_VMG(vmg->start > middle->vm_start && prev && middle != prev, vmg);
- if (merge_will_delete_vma) {
- vmg->vma = next;
+ if (merge_will_delete_middle) {
+ vmg->target = next;
vmg->end = next->vm_end;
vmg->pgoff = next->vm_pgoff - pglen;
} else {
/*
- * We shrink vma and expand next.
+ * We shrink middle and expand next.
*
* IMPORTANT: This is the ONLY case where the final
- * merged VMA is NOT vmg->vma, but rather vmg->next.
+ * merged VMA is NOT vmg->target, but rather vmg->next.
*/
-
- vmg->start = vma->vm_start;
+ vmg->target = middle;
+ vmg->start = middle->vm_start;
vmg->end = start;
- vmg->pgoff = vma->vm_pgoff;
+ vmg->pgoff = middle->vm_pgoff;
adjust = next;
- adj_start = -(vma->vm_end - start);
+ adj_start = -(middle->vm_end - start);
}
- err = dup_anon_vma(next, vma, &anon_dup);
+ err = dup_anon_vma(next, middle, &anon_dup);
}
if (err)
goto abort;
/*
- * In nearly all cases, we expand vmg->vma. There is one exception -
+ * In nearly all cases, we expand vmg->middle. There is one exception -
* merge_right where we partially span the VMA. In this case we shrink
- * the end of vmg->vma and adjust the start of vmg->next accordingly.
+ * the end of vmg->middle and adjust the start of vmg->next accordingly.
*/
- expanded = !merge_right || merge_will_delete_vma;
+ expanded = !merge_right || merge_will_delete_middle;
if (commit_merge(vmg, adjust,
- merge_will_delete_vma ? vma : NULL,
+ merge_will_delete_middle ? middle : NULL,
merge_will_delete_next ? next : NULL,
adj_start, expanded)) {
if (anon_dup)
@@ -973,7 +977,7 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
bool just_expand = vmg->merge_flags & VMG_FLAG_JUST_EXPAND;
mmap_assert_write_locked(vmg->mm);
- VM_WARN_ON_VMG(vmg->vma, vmg);
+ VM_WARN_ON_VMG(vmg->middle, vmg);
/* vmi must point at or before the gap. */
VM_WARN_ON_VMG(vma_iter_addr(vmg->vmi) > end, vmg);
@@ -989,13 +993,13 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
/* If we can merge with the next VMA, adjust vmg accordingly. */
if (can_merge_right) {
vmg->end = next->vm_end;
- vmg->vma = next;
+ vmg->middle = next;
}
/* If we can merge with the previous VMA, adjust vmg accordingly. */
if (can_merge_left) {
vmg->start = prev->vm_start;
- vmg->vma = prev;
+ vmg->middle = prev;
vmg->pgoff = prev->vm_pgoff;
/*
@@ -1017,10 +1021,10 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
* Now try to expand adjacent VMA(s). This takes care of removing the
* following VMA if we have VMAs on both sides.
*/
- if (vmg->vma && !vma_expand(vmg)) {
- khugepaged_enter_vma(vmg->vma, vmg->flags);
+ if (vmg->middle && !vma_expand(vmg)) {
+ khugepaged_enter_vma(vmg->middle, vmg->flags);
vmg->state = VMA_MERGE_SUCCESS;
- return vmg->vma;
+ return vmg->middle;
}
return NULL;
@@ -1032,44 +1036,46 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
* @vmg: Describes a VMA expansion operation.
*
* Expand @vma to vmg->start and vmg->end. Can expand off the start and end.
- * Will expand over vmg->next if it's different from vmg->vma and vmg->end ==
- * vmg->next->vm_end. Checking if the vmg->vma can expand and merge with
+ * Will expand over vmg->next if it's different from vmg->middle and vmg->end ==
+ * vmg->next->vm_end. Checking if the vmg->middle can expand and merge with
* vmg->next needs to be handled by the caller.
*
* Returns: 0 on success.
*
* ASSUMPTIONS:
- * - The caller must hold a WRITE lock on vmg->vma->mm->mmap_lock.
- * - The caller must have set @vmg->vma and @vmg->next.
+ * - The caller must hold a WRITE lock on vmg->middle->mm->mmap_lock.
+ * - The caller must have set @vmg->middle and @vmg->next.
*/
int vma_expand(struct vma_merge_struct *vmg)
{
struct vm_area_struct *anon_dup = NULL;
bool remove_next = false;
- struct vm_area_struct *vma = vmg->vma;
+ struct vm_area_struct *middle = vmg->middle;
struct vm_area_struct *next = vmg->next;
mmap_assert_write_locked(vmg->mm);
- vma_start_write(vma);
- if (next && (vma != next) && (vmg->end == next->vm_end)) {
+ vma_start_write(middle);
+ if (next && (middle != next) && (vmg->end == next->vm_end)) {
int ret;
remove_next = true;
/* This should already have been checked by this point. */
VM_WARN_ON_VMG(!can_merge_remove_vma(next), vmg);
vma_start_write(next);
- ret = dup_anon_vma(vma, next, &anon_dup);
+ ret = dup_anon_vma(middle, next, &anon_dup);
if (ret)
return ret;
}
/* Not merging but overwriting any part of next is not handled. */
VM_WARN_ON_VMG(next && !remove_next &&
- next != vma && vmg->end > next->vm_start, vmg);
+ next != middle && vmg->end > next->vm_start, vmg);
/* Only handles expanding */
- VM_WARN_ON_VMG(vma->vm_start < vmg->start || vma->vm_end > vmg->end, vmg);
+ VM_WARN_ON_VMG(middle->vm_start < vmg->start ||
+ middle->vm_end > vmg->end, vmg);
+ vmg->target = middle;
if (commit_merge(vmg, NULL, remove_next ? next : NULL, NULL, 0, true))
goto nomem;
@@ -1508,7 +1514,7 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
*/
static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg)
{
- struct vm_area_struct *vma = vmg->vma;
+ struct vm_area_struct *vma = vmg->middle;
struct vm_area_struct *merged;
/* First, try to merge. */
@@ -1605,7 +1611,7 @@ struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta);
vmg.next = vma_iter_next_rewind(vmi, NULL);
- vmg.vma = NULL; /* We use the VMA to populate VMG fields only. */
+ vmg.middle = NULL; /* We use the VMA to populate VMG fields only. */
return vma_merge_new_range(&vmg);
}
@@ -1726,7 +1732,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
if (new_vma && new_vma->vm_start < addr + len)
return NULL; /* should never get here */
- vmg.vma = NULL; /* New VMA range. */
+ vmg.middle = NULL; /* New VMA range. */
vmg.pgoff = pgoff;
vmg.next = vma_iter_next_rewind(&vmi, NULL);
new_vma = vma_merge_new_range(&vmg);
@@ -69,16 +69,48 @@ enum vma_merge_flags {
VMG_FLAG_JUST_EXPAND = 1 << 0,
};
-/* Represents a VMA merge operation. */
+/*
+ * Describes a VMA merge operation and is threaded throughout it.
+ *
+ * Any of the fields may be mutated by the merge operation, so no guarantees are
+ * made to the contents of this structure after a merge operation has completed.
+ */
struct vma_merge_struct {
struct mm_struct *mm;
struct vma_iterator *vmi;
- pgoff_t pgoff;
+ /*
+ * Adjacent VMAs, any of which may be NULL if not present:
+ *
+ * |------|--------|------|
+ * | prev | middle | next |
+ * |------|--------|------|
+ *
+ * middle may not yet exist in the case of a proposed new VMA being
+ * merged, or it may be an existing VMA.
+ *
+ * next may be assigned by the caller.
+ */
struct vm_area_struct *prev;
- struct vm_area_struct *next; /* Modified by vma_merge(). */
- struct vm_area_struct *vma; /* Either a new VMA or the one being modified. */
+ struct vm_area_struct *middle;
+ struct vm_area_struct *next;
+ /*
+ * This is the VMA we ultimately target to become the merged VMA, except
+ * for the one exception of merge right, shrink next (for details of
+ * this scenario see vma_merge_existing_range()).
+ */
+ struct vm_area_struct *target;
+ /*
+ * Initially, the start, end, pgoff fields are provided by the caller
+ * and describe the proposed new VMA range, whether modifying an
+ * existing VMA (which will be 'middle'), or adding a new one.
+ *
+ * During the merge process these fields are updated to describe the new
+ * range _including those VMAs which will be merged_.
+ */
unsigned long start;
unsigned long end;
+ pgoff_t pgoff;
+
unsigned long flags;
struct file *file;
struct anon_vma *anon_vma;
@@ -118,8 +150,8 @@ static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
.mm = vma_->vm_mm, \
.vmi = vmi_, \
.prev = prev_, \
+ .middle = vma_, \
.next = NULL, \
- .vma = vma_, \
.start = start_, \
.end = end_, \
.flags = vma_->vm_flags, \
@@ -147,8 +147,8 @@ static void vmg_set_range(struct vma_merge_struct *vmg, unsigned long start,
vma_iter_set(vmg->vmi, start);
vmg->prev = NULL;
+ vmg->middle = NULL;
vmg->next = NULL;
- vmg->vma = NULL;
vmg->start = start;
vmg->end = end;
@@ -338,7 +338,7 @@ static bool test_simple_expand(void)
VMA_ITERATOR(vmi, &mm, 0);
struct vma_merge_struct vmg = {
.vmi = &vmi,
- .vma = vma,
+ .middle = vma,
.start = 0,
.end = 0x3000,
.pgoff = 0,
@@ -631,7 +631,7 @@ static bool test_vma_merge_special_flags(void)
*/
vma = alloc_and_link_vma(&mm, 0x3000, 0x4000, 3, flags);
ASSERT_NE(vma, NULL);
- vmg.vma = vma;
+ vmg.middle = vma;
for (i = 0; i < ARRAY_SIZE(special_flags); i++) {
vm_flags_t special_flag = special_flags[i];
@@ -760,7 +760,7 @@ static bool test_vma_merge_with_close(void)
vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
vmg.prev = vma_prev;
- vmg.vma = vma;
+ vmg.middle = vma;
/*
* The VMA being modified in a way that would otherwise merge should
@@ -787,7 +787,7 @@ static bool test_vma_merge_with_close(void)
vma->vm_ops = &vm_ops;
vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
/*
* Initially this is misapprehended as an out of memory report, as the
@@ -817,7 +817,7 @@ static bool test_vma_merge_with_close(void)
vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
vmg.prev = vma_prev;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
@@ -843,7 +843,7 @@ static bool test_vma_merge_with_close(void)
vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
vmg.prev = vma_prev;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
@@ -940,7 +940,7 @@ static bool test_merge_existing(void)
vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
vma_next->vm_ops = &vm_ops; /* This should have no impact. */
vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
- vmg.vma = vma;
+ vmg.middle = vma;
vmg.prev = vma;
vma->anon_vma = &dummy_anon_vma;
ASSERT_EQ(merge_existing(&vmg), vma_next);
@@ -973,7 +973,7 @@ static bool test_merge_existing(void)
vma_next = alloc_and_link_vma(&mm, 0x6000, 0x9000, 6, flags);
vma_next->vm_ops = &vm_ops; /* This should have no impact. */
vmg_set_range(&vmg, 0x2000, 0x6000, 2, flags);
- vmg.vma = vma;
+ vmg.middle = vma;
vma->anon_vma = &dummy_anon_vma;
ASSERT_EQ(merge_existing(&vmg), vma_next);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
@@ -1003,7 +1003,7 @@ static bool test_merge_existing(void)
vma->vm_ops = &vm_ops; /* This should have no impact. */
vmg_set_range(&vmg, 0x3000, 0x6000, 3, flags);
vmg.prev = vma_prev;
- vmg.vma = vma;
+ vmg.middle = vma;
vma->anon_vma = &dummy_anon_vma;
ASSERT_EQ(merge_existing(&vmg), vma_prev);
@@ -1037,7 +1037,7 @@ static bool test_merge_existing(void)
vma = alloc_and_link_vma(&mm, 0x3000, 0x7000, 3, flags);
vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
vmg.prev = vma_prev;
- vmg.vma = vma;
+ vmg.middle = vma;
vma->anon_vma = &dummy_anon_vma;
ASSERT_EQ(merge_existing(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
@@ -1067,7 +1067,7 @@ static bool test_merge_existing(void)
vma_next = alloc_and_link_vma(&mm, 0x7000, 0x9000, 7, flags);
vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
vmg.prev = vma_prev;
- vmg.vma = vma;
+ vmg.middle = vma;
vma->anon_vma = &dummy_anon_vma;
ASSERT_EQ(merge_existing(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
@@ -1102,37 +1102,37 @@ static bool test_merge_existing(void)
vmg_set_range(&vmg, 0x4000, 0x5000, 4, flags);
vmg.prev = vma;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
vmg.prev = vma;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
vmg_set_range(&vmg, 0x6000, 0x7000, 6, flags);
vmg.prev = vma;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
vmg_set_range(&vmg, 0x4000, 0x7000, 4, flags);
vmg.prev = vma;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
vmg_set_range(&vmg, 0x4000, 0x6000, 4, flags);
vmg.prev = vma;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
vmg_set_range(&vmg, 0x5000, 0x6000, 5, flags);
vmg.prev = vma;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), NULL);
ASSERT_EQ(vmg.state, VMA_MERGE_NOMERGE);
@@ -1197,7 +1197,7 @@ static bool test_anon_vma_non_mergeable(void)
vmg_set_range(&vmg, 0x3000, 0x7000, 3, flags);
vmg.prev = vma_prev;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
@@ -1277,7 +1277,7 @@ static bool test_dup_anon_vma(void)
vma_next->anon_vma = &dummy_anon_vma;
vmg_set_range(&vmg, 0, 0x5000, 0, flags);
- vmg.vma = vma_prev;
+ vmg.middle = vma_prev;
vmg.next = vma_next;
ASSERT_EQ(expand_existing(&vmg), 0);
@@ -1309,7 +1309,7 @@ static bool test_dup_anon_vma(void)
vma_next->anon_vma = &dummy_anon_vma;
vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
vmg.prev = vma_prev;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
@@ -1338,7 +1338,7 @@ static bool test_dup_anon_vma(void)
vma->anon_vma = &dummy_anon_vma;
vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
vmg.prev = vma_prev;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
@@ -1366,7 +1366,7 @@ static bool test_dup_anon_vma(void)
vma->anon_vma = &dummy_anon_vma;
vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
vmg.prev = vma_prev;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), vma_prev);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
@@ -1394,7 +1394,7 @@ static bool test_dup_anon_vma(void)
vma->anon_vma = &dummy_anon_vma;
vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
vmg.prev = vma;
- vmg.vma = vma;
+ vmg.middle = vma;
ASSERT_EQ(merge_existing(&vmg), vma_next);
ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
@@ -1432,7 +1432,7 @@ static bool test_vmi_prealloc_fail(void)
vmg_set_range(&vmg, 0x3000, 0x5000, 3, flags);
vmg.prev = vma_prev;
- vmg.vma = vma;
+ vmg.middle = vma;
fail_prealloc = true;
@@ -1458,7 +1458,7 @@ static bool test_vmi_prealloc_fail(void)
vma->anon_vma = &dummy_anon_vma;
vmg_set_range(&vmg, 0, 0x5000, 3, flags);
- vmg.vma = vma_prev;
+ vmg.middle = vma_prev;
vmg.next = vma;
fail_prealloc = true;