@@ -2249,7 +2249,7 @@ struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
vma; vma = vma_interval_tree_iter_next(vma, start, last))
#define vma_interval_tree_foreach_stab(vma, root, start) \
- vma_interval_tree_foreach(vma, root, start, start)
+ vma_interval_tree_foreach(vma, root, start, start + 1)
void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
struct rb_root_cached *root);
@@ -2269,7 +2269,7 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
#define anon_vma_interval_tree_foreach_stab(vma, root, start) \
- anon_vma_interval_tree_foreach(vma, root, start, start)
+ anon_vma_interval_tree_foreach(vma, root, start, start + 1)
/* mmap.c */
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
@@ -8,7 +8,7 @@
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/rmap.h>
-#include <linux/interval_tree_generic.h>
+#include <linux/interval_tree_gen.h>
static inline unsigned long vma_start_pgoff(struct vm_area_struct *v)
{
@@ -17,7 +17,7 @@ static inline unsigned long vma_start_pgoff(struct vm_area_struct *v)
static inline unsigned long vma_last_pgoff(struct vm_area_struct *v)
{
- return v->vm_pgoff + vma_pages(v) - 1;
+ return v->vm_pgoff + vma_pages(v);
}
INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb,
@@ -2679,7 +2679,7 @@ void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
details.check_mapping = even_cows ? NULL : mapping;
details.first_index = start;
- details.last_index = start + nr - 1;
+ details.last_index = start + nr;
if (details.last_index < details.first_index)
details.last_index = ULONG_MAX;
@@ -1793,7 +1793,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
size_t r_size, r_top;
low = newsize >> PAGE_SHIFT;
- high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ high = (size + PAGE_SIZE) >> PAGE_SHIFT;
down_write(&nommu_region_sem);
i_mmap_lock_read(inode->i_mapping);
@@ -1826,7 +1826,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
return;
pgoff_start = page_to_pgoff(page);
- pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
+ pgoff_end = pgoff_start + hpage_nr_pages(page);
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
pgoff_start, pgoff_end) {
struct vm_area_struct *vma = avc->vma;
@@ -1879,11 +1879,11 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
return;
pgoff_start = page_to_pgoff(page);
- pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
+ pgoff_end = pgoff_start + hpage_nr_pages(page);
if (!locked)
i_mmap_lock_read(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap,
- pgoff_start, pgoff_end) {
+ pgoff_start, pgoff_end) {
unsigned long address = vma_address(page, vma);
cond_resched();
The vma and anon vma interval tree really wants [a, b) intervals, not fully closed. As such convert it to use the new interval_tree_gen.h. Because of vma_last_pgoff(), the conversion is quite straightforward. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> --- include/linux/mm.h | 4 ++-- mm/interval_tree.c | 4 ++-- mm/memory.c | 2 +- mm/nommu.c | 2 +- mm/rmap.c | 6 +++--- 5 files changed, 9 insertions(+), 9 deletions(-)