@@ -1223,6 +1223,8 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
info.length = len;
info.low_limit = addr;
info.high_limit = limit;
+ info.hint = addr;
+ info.mmap_flags = flags;
return vm_unmapped_area(&info);
}
@@ -29,6 +29,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct vm_area_struct *vma;
struct vm_unmapped_area_info info = {};
+ info.hint = addr;
+ info.mmap_flags = flags;
+
/*
* We enforce the MAP_FIXED case.
*/
@@ -36,6 +36,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
int aliasing = cache_is_vipt_aliasing();
struct vm_unmapped_area_info info = {};
+ info.hint = addr;
+ info.mmap_flags = flags;
+
/*
* We only need to do colour alignment if either the I or D
* caches alias.
@@ -56,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (len > TASK_SIZE)
return -ENOMEM;
+
if (addr) {
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
@@ -88,6 +92,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
int aliasing = cache_is_vipt_aliasing();
struct vm_unmapped_area_info info = {};
+ info.hint = addr;
+ info.mmap_flags = flags;
+
/*
* We only need to do colour alignment if either the I or D
* caches alias.
@@ -35,6 +35,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
.align_offset = pgoff << PAGE_SHIFT
};
+ info.hint = addr;
+ info.mmap_flags = flags;
+
/*
* We only need to do colour alignment if either the I or D
* caches alias.
@@ -27,6 +27,9 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
int do_color_align;
struct vm_unmapped_area_info info = {};
+ info.hint = addr;
+ info.mmap_flags = flags;
+
if (unlikely(len > TASK_SIZE))
return -ENOMEM;
@@ -36,6 +36,9 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
int do_color_align;
struct vm_unmapped_area_info info = {};
+ info.hint = addr;
+ info.mmap_flags = flags;
+
if (unlikely(len > TASK_SIZE))
return -ENOMEM;
@@ -108,6 +108,9 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
.length = len
};
+ info.hint = addr;
+ info.mmap_flags = flags;
+
if (unlikely(len > TASK_SIZE))
return -ENOMEM;
@@ -286,6 +286,10 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
.length = len,
.align_mask = PAGE_MASK & ((1ul << pshift) - 1),
};
+
+ info.hint = addr;
+ info.mmap_flags = flags;
+
/*
* Check till the allow max value for this mmap request
*/
@@ -331,6 +335,9 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
};
unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
+ info.hint = addr;
+ info.mmap_flags = flags;
+
/*
* If we are trying to allocate above DEFAULT_MAP_WINDOW
* Add the different to the mmap_base.
@@ -254,6 +254,8 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
info.low_limit = current->mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+ info.hint = addr;
+ info.mmap_flags = flags;
return vm_unmapped_area(&info);
}
@@ -270,6 +272,8 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
info.low_limit = PAGE_SIZE;
info.high_limit = current->mm->mmap_base;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+ info.hint = addr;
+ info.mmap_flags = flags;
addr = vm_unmapped_area(&info);
/*
@@ -88,6 +88,9 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct vm_area_struct *vma;
struct vm_unmapped_area_info info = {};
+ info.hint = addr;
+ info.mmap_flags = flags;
+
if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
@@ -123,6 +126,9 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long ad
struct mm_struct *mm = current->mm;
struct vm_unmapped_area_info info = {};
+ info.hint = addr;
+ info.mmap_flags = flags;
+
/* requested length too big for entire address space */
if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
@@ -59,6 +59,9 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
int do_colour_align;
struct vm_unmapped_area_info info = {};
+ info.hint = addr;
+ info.mmap_flags = flags;
+
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
@@ -107,6 +110,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
int do_colour_align;
struct vm_unmapped_area_info info = {};
+ info.hint = addr;
+ info.mmap_flags = flags;
+
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
@@ -43,6 +43,9 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
{
struct vm_unmapped_area_info info = {};
+ info.hint = addr;
+ info.mmap_flags = flags;
+
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
@@ -95,6 +95,9 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
int do_color_align;
struct vm_unmapped_area_info info = {};
+ info.hint = addr;
+ info.mmap_flags = flags;
+
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
@@ -155,6 +158,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
int do_color_align;
struct vm_unmapped_area_info info = {};
+ info.hint = addr;
+ info.mmap_flags = flags;
+
/* This should only ever run for 32-bit processes. */
BUG_ON(!test_thread_flag(TIF_32BIT));
@@ -40,6 +40,8 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = min(task_size, VA_EXCLUDE_START);
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+ info.hint = addr;
+ info.mmap_flags = flags;
addr = vm_unmapped_area(&info);
if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
@@ -71,6 +73,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+ info.hint = addr;
+ info.mmap_flags = flags;
addr = vm_unmapped_area(&info);
/*
@@ -129,6 +129,9 @@ arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, unsigned l
struct vm_unmapped_area_info info = {};
unsigned long begin, end;
+ info.hint = addr;
+ info.mmap_flags = flags;
+
if (flags & MAP_FIXED)
return addr;
@@ -167,6 +170,9 @@ arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr0,
unsigned long addr = addr0;
struct vm_unmapped_area_info info = {};
+ info.hint = addr;
+ info.mmap_flags = flags;
+
/* requested length too big for entire address space */
if (len > TASK_SIZE)
return -ENOMEM;
@@ -29,6 +29,8 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
info.length = len;
info.low_limit = get_mmap_base(1);
+ info.hint = addr;
+ info.mmap_flags = flags;
/*
* If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
@@ -52,6 +54,8 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = get_mmap_base(0);
+ info.hint = addr;
+ info.mmap_flags = flags;
/*
* If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
@@ -182,6 +182,8 @@ hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
info.low_limit = current->mm->mmap_base;
info.high_limit = arch_get_mmap_end(addr, len, flags);
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+ info.hint = addr;
+ info.mmap_flags = flags;
return vm_unmapped_area(&info);
}
@@ -197,6 +199,8 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
info.low_limit = PAGE_SIZE;
info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+ info.hint = addr;
+ info.mmap_flags = flags;
addr = vm_unmapped_area(&info);
/*
@@ -3445,6 +3445,8 @@ struct vm_unmapped_area_info {
unsigned long align_mask;
unsigned long align_offset;
unsigned long start_gap;
+ unsigned long hint;
+ unsigned long mmap_flags;
};
extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
@@ -1796,6 +1796,9 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr,
struct vm_unmapped_area_info info = {};
const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
+ info.hint = addr;
+ info.mmap_flags = flags;
+
if (len > mmap_end - mmap_min_addr)
return -ENOMEM;
@@ -1841,6 +1844,9 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
struct vm_unmapped_area_info info = {};
const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
+ info.hint = addr;
+ info.mmap_flags = flags;
+
/* requested length too big for entire address space */
if (len > mmap_end - mmap_min_addr)
return -ENOMEM;
The hint address and mmap_flags are necessary to determine if MAP_BELOW_HINT requirements are satisfied. Signed-off-by: Charlie Jenkins <charlie@rivosinc.com> --- arch/alpha/kernel/osf_sys.c | 2 ++ arch/arc/mm/mmap.c | 3 +++ arch/arm/mm/mmap.c | 7 +++++++ arch/csky/abiv1/mmap.c | 3 +++ arch/loongarch/mm/mmap.c | 3 +++ arch/mips/mm/mmap.c | 3 +++ arch/parisc/kernel/sys_parisc.c | 3 +++ arch/powerpc/mm/book3s64/slice.c | 7 +++++++ arch/s390/mm/hugetlbpage.c | 4 ++++ arch/s390/mm/mmap.c | 6 ++++++ arch/sh/mm/mmap.c | 6 ++++++ arch/sparc/kernel/sys_sparc_32.c | 3 +++ arch/sparc/kernel/sys_sparc_64.c | 6 ++++++ arch/sparc/mm/hugetlbpage.c | 4 ++++ arch/x86/kernel/sys_x86_64.c | 6 ++++++ arch/x86/mm/hugetlbpage.c | 4 ++++ fs/hugetlbfs/inode.c | 4 ++++ include/linux/mm.h | 2 ++ mm/mmap.c | 6 ++++++ 19 files changed, 82 insertions(+)