@@ -820,12 +820,21 @@ static inline void vma_assert_locked(struct vm_area_struct *vma)
vma_assert_write_locked(vma);
}
-static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
+static inline void vma_mark_attached(struct vm_area_struct *vma)
+{
+ vma->detached = false;
+}
+
+static inline void vma_mark_detached(struct vm_area_struct *vma)
{
/* When detaching vma should be write-locked */
- if (detached)
- vma_assert_write_locked(vma);
- vma->detached = detached;
+ vma_assert_write_locked(vma);
+ vma->detached = true;
+}
+
+static inline bool is_vma_detached(struct vm_area_struct *vma)
+{
+ return vma->detached;
}
static inline void release_fault_lock(struct vm_fault *vmf)
@@ -856,8 +865,8 @@ static inline void vma_end_read(struct vm_area_struct *vma) {}
static inline void vma_start_write(struct vm_area_struct *vma) {}
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
{ mmap_assert_write_locked(vma->vm_mm); }
-static inline void vma_mark_detached(struct vm_area_struct *vma,
- bool detached) {}
+static inline void vma_mark_attached(struct vm_area_struct *vma) {}
+static inline void vma_mark_detached(struct vm_area_struct *vma) {}
static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
unsigned long address)
@@ -890,7 +899,10 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
vma->vm_mm = mm;
vma->vm_ops = &vma_dummy_vm_ops;
INIT_LIST_HEAD(&vma->anon_vma_chain);
- vma_mark_detached(vma, false);
+#ifdef CONFIG_PER_VMA_LOCK
+ /* vma is not locked, can't use vma_mark_detached() */
+ vma->detached = true;
+#endif
vma_numab_state_init(vma);
vma_lock_init(vma);
}
@@ -1085,6 +1097,7 @@ static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
if (unlikely(mas_is_err(&vmi->mas)))
return -ENOMEM;
+ vma_mark_attached(vma);
return 0;
}
@@ -465,6 +465,10 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
data_race(memcpy(new, orig, sizeof(*new)));
vma_lock_init(new);
INIT_LIST_HEAD(&new->anon_vma_chain);
+#ifdef CONFIG_PER_VMA_LOCK
+ /* vma is not locked, can't use vma_mark_detached() */
+ new->detached = true;
+#endif
vma_numab_state_init(new);
dup_anon_vma_name(orig, new);
@@ -6372,7 +6372,7 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
goto inval;
/* Check if the VMA got isolated after we found it */
- if (vma->detached) {
+ if (is_vma_detached(vma)) {
vma_end_read(vma);
count_vm_vma_lock_event(VMA_LOCK_MISS);
/* The area was replaced with another one */
@@ -327,7 +327,7 @@ static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
if (vp->remove) {
again:
- vma_mark_detached(vp->remove, true);
+ vma_mark_detached(vp->remove);
if (vp->file) {
uprobe_munmap(vp->remove, vp->remove->vm_start,
vp->remove->vm_end);
@@ -1220,7 +1220,7 @@ static void reattach_vmas(struct ma_state *mas_detach)
mas_set(mas_detach, 0);
mas_for_each(mas_detach, vma, ULONG_MAX)
- vma_mark_detached(vma, false);
+ vma_mark_attached(vma);
__mt_destroy(mas_detach->tree);
}
@@ -1295,7 +1295,7 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
if (error)
goto munmap_gather_failed;
- vma_mark_detached(next, true);
+ vma_mark_detached(next);
nrpages = vma_pages(next);
vms->nr_pages += nrpages;
@@ -156,6 +156,7 @@ static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
if (unlikely(mas_is_err(&vmi->mas)))
return -ENOMEM;
+ vma_mark_attached(vma);
return 0;
}
@@ -385,6 +386,7 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
mas_store_prealloc(&vmi->mas, vma);
+ vma_mark_attached(vma);
}
static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
@@ -465,13 +465,17 @@ static inline void vma_lock_init(struct vm_area_struct *vma)
vma->vm_lock_seq = UINT_MAX;
}
+static inline void vma_mark_attached(struct vm_area_struct *vma)
+{
+ vma->detached = false;
+}
+
static inline void vma_assert_write_locked(struct vm_area_struct *);
-static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
+static inline void vma_mark_detached(struct vm_area_struct *vma)
{
/* When detaching vma should be write-locked */
- if (detached)
- vma_assert_write_locked(vma);
- vma->detached = detached;
+ vma_assert_write_locked(vma);
+ vma->detached = true;
}
extern const struct vm_operations_struct vma_dummy_vm_ops;
@@ -484,7 +488,8 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
vma->vm_mm = mm;
vma->vm_ops = &vma_dummy_vm_ops;
INIT_LIST_HEAD(&vma->anon_vma_chain);
- vma_mark_detached(vma, false);
+ /* vma is not locked, can't use vma_mark_detached() */
+ vma->detached = true;
vma_lock_init(vma);
}
@@ -510,6 +515,8 @@ static inline struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
memcpy(new, orig, sizeof(*new));
vma_lock_init(new);
INIT_LIST_HEAD(&new->anon_vma_chain);
+ /* vma is not locked, can't use vma_mark_detached() */
+ new->detached = true;
return new;
}