@@ -658,6 +658,37 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma)
return vma->vm_flags & VM_ACCESS_FLAGS;
}
+static inline
+struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
+{
+ return mas_find(&vmi->mas, max);
+}
+
+static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
+{
+ /*
+ * Uses vma_find() to get the first VMA when the iterator starts.
+ * Calling mas_next() could skip the first entry.
+ */
+ return vma_find(vmi, ULONG_MAX);
+}
+
+static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
+{
+ return mas_prev(&vmi->mas, 0);
+}
+
+static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
+{
+ return vmi->mas.index;
+}
+
+#define for_each_vma(vmi, vma) while ((vma = vma_next(&(vmi))) != NULL)
+
+/* The MM code likes to work with exclusive end addresses */
+#define for_each_vma_range(vmi, vma, end) \
+ while ((vma = vma_find(&(vmi), (end) - 1)) != NULL)
+
#ifdef CONFIG_SHMEM
/*
* The vma_is_shmem is not inline because it is used only by slow
@@ -691,6 +691,27 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
return (struct cpumask *)&mm->cpu_bitmap;
}
+struct vma_iterator {
+ struct ma_state mas;
+};
+
+#define VMA_ITERATOR(name, mm, addr) \
+ struct vma_iterator name = { \
+ .mas = { \
+ .tree = &mm->mm_mt, \
+ .index = addr, \
+ .node = MAS_START, \
+ }, \
+ }
+
+static inline void vma_iter_init(struct vma_iterator *vmi,
+ struct mm_struct *mm, unsigned long addr)
+{
+ vmi->mas.tree = &mm->mm_mt;
+ vmi->mas.index = addr;
+ vmi->mas.node = MAS_START;
+}
+
struct mmu_gather;
extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
@@ -632,7 +632,7 @@ static int find_vma_links(struct mm_struct *mm, unsigned long addr,
}
/*
- * vma_next() - Get the next VMA.
+ * __vma_next() - Get the next VMA.
* @mm: The mm_struct.
* @vma: The current vma.
*
@@ -640,7 +640,7 @@ static int find_vma_links(struct mm_struct *mm, unsigned long addr,
*
* Returns: The next VMA after @vma.
*/
-static inline struct vm_area_struct *vma_next(struct mm_struct *mm,
+static inline struct vm_area_struct *__vma_next(struct mm_struct *mm,
struct vm_area_struct *vma)
{
if (!vma)
@@ -1333,7 +1333,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
if (vm_flags & VM_SPECIAL)
return NULL;
- next = vma_next(mm, prev);
+ next = __vma_next(mm, prev);
area = next;
if (area && area->vm_end == end) /* cases 6, 7, 8 */
next = next->vm_next;
@@ -2856,7 +2856,7 @@ static void unmap_region(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *prev,
unsigned long start, unsigned long end)
{
- struct vm_area_struct *next = vma_next(mm, prev);
+ struct vm_area_struct *next = __vma_next(mm, prev);
struct mmu_gather tlb;
lru_add_drain();
@@ -3061,7 +3061,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
if (error)
goto split_failed;
}
- vma = vma_next(mm, prev);
+ vma = __vma_next(mm, prev);
if (unlikely(uf)) {
/*