@@ -821,6 +821,16 @@ static inline void vma_assert_locked(struct vm_area_struct *vma)
vma_assert_write_locked(vma);
}
+static inline void vma_assert_attached(struct vm_area_struct *vma)
+{
+ VM_BUG_ON_VMA(vma->detached, vma);
+}
+
+static inline void vma_assert_detached(struct vm_area_struct *vma)
+{
+ VM_BUG_ON_VMA(!vma->detached, vma);
+}
+
static inline void vma_mark_attached(struct vm_area_struct *vma)
{
vma->detached = false;
@@ -866,6 +876,8 @@ static inline void vma_end_read(struct vm_area_struct *vma) {}
static inline void vma_start_write(struct vm_area_struct *vma) {}
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
{ mmap_assert_write_locked(vma->vm_mm); }
+static inline void vma_assert_attached(struct vm_area_struct *vma) {}
+static inline void vma_assert_detached(struct vm_area_struct *vma) {}
static inline void vma_mark_attached(struct vm_area_struct *vma) {}
static inline void vma_mark_detached(struct vm_area_struct *vma) {}
@@ -1191,7 +1191,7 @@ unsigned long do_mmap(struct file *file,
setup_vma_to_mm(vma, current->mm);
current->mm->map_count++;
/* add the VMA to the tree */
- vma_iter_store(&vmi, vma);
+ vma_iter_store(&vmi, vma, true);
/* we flush the region from the icache only when the first executable
* mapping of it is made */
@@ -1356,7 +1356,7 @@ static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
setup_vma_to_mm(vma, mm);
setup_vma_to_mm(new, mm);
- vma_iter_store(vmi, new);
+ vma_iter_store(vmi, new, true);
mm->map_count++;
return 0;
@@ -306,7 +306,7 @@ static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
* us to insert it before dropping the locks
* (it may either follow vma or precede it).
*/
- vma_iter_store(vmi, vp->insert);
+ vma_iter_store(vmi, vp->insert, true);
mm->map_count++;
}
@@ -660,14 +660,14 @@ static int commit_merge(struct vma_merge_struct *vmg,
vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
if (expanded)
- vma_iter_store(vmg->vmi, vmg->vma);
+ vma_iter_store(vmg->vmi, vmg->vma, false);
if (adj_start) {
adjust->vm_start += adj_start;
adjust->vm_pgoff += PHYS_PFN(adj_start);
if (adj_start < 0) {
WARN_ON(expanded);
- vma_iter_store(vmg->vmi, adjust);
+ vma_iter_store(vmg->vmi, adjust, false);
}
}
@@ -1689,7 +1689,7 @@ int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
return -ENOMEM;
vma_start_write(vma);
- vma_iter_store(&vmi, vma);
+ vma_iter_store(&vmi, vma, true);
vma_link_file(vma);
mm->map_count++;
validate_mm(mm);
@@ -2368,7 +2368,7 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
/* Lock the VMA since it is modified after insertion into VMA tree */
vma_start_write(vma);
- vma_iter_store(vmi, vma);
+ vma_iter_store(vmi, vma, true);
map->mm->map_count++;
vma_link_file(vma);
@@ -2542,7 +2542,7 @@ int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
vm_flags_init(vma, flags);
vma->vm_page_prot = vm_get_page_prot(flags);
vma_start_write(vma);
- if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
+ if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL, true))
goto mas_store_fail;
mm->map_count++;
@@ -2785,7 +2785,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_end = address;
/* Overwrite old entry in mtree. */
- vma_iter_store(&vmi, vma);
+ vma_iter_store(&vmi, vma, false);
anon_vma_interval_tree_post_update_vma(vma);
perf_event_mmap(vma);
@@ -2865,7 +2865,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
vma->vm_start = address;
vma->vm_pgoff -= grow;
/* Overwrite old entry in mtree. */
- vma_iter_store(&vmi, vma);
+ vma_iter_store(&vmi, vma, false);
anon_vma_interval_tree_post_update_vma(vma);
perf_event_mmap(vma);
@@ -145,7 +145,7 @@ __must_check int vma_shrink(struct vma_iterator *vmi,
unsigned long start, unsigned long end, pgoff_t pgoff);
static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
- struct vm_area_struct *vma, gfp_t gfp)
+ struct vm_area_struct *vma, gfp_t gfp, bool new_vma)
{
if (vmi->mas.status != ma_start &&
@@ -157,7 +157,10 @@ static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
if (unlikely(mas_is_err(&vmi->mas)))
return -ENOMEM;
- vma_mark_attached(vma);
+ if (new_vma)
+ vma_mark_attached(vma);
+ vma_assert_attached(vma);
+
return 0;
}
@@ -366,7 +369,7 @@ static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
/* Store a VMA with preallocated memory */
static inline void vma_iter_store(struct vma_iterator *vmi,
- struct vm_area_struct *vma)
+ struct vm_area_struct *vma, bool new_vma)
{
#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
@@ -390,7 +393,9 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
mas_store_prealloc(&vmi->mas, vma);
- vma_mark_attached(vma);
+ if (new_vma)
+ vma_mark_attached(vma);
+ vma_assert_attached(vma);
}
static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
vma_iter_store() functions can be used both when adding a new vma and when updating an existing one. However for existing ones we do not need to mark them attached as they are already marked that way. Add a parameter to distinguish the usage and skip vma_mark_attached() when not needed. Signed-off-by: Suren Baghdasaryan <surenb@google.com> --- include/linux/mm.h | 12 ++++++++++++ mm/nommu.c | 4 ++-- mm/vma.c | 16 ++++++++-------- mm/vma.h | 13 +++++++++---- 4 files changed, 31 insertions(+), 14 deletions(-)