@@ -73,6 +73,8 @@ static inline unsigned long mmap_base(unsigned long rnd,
static int get_align_mask(struct file *filp, unsigned long flags)
{
+ if (filp && is_file_hugepages(filp))
+ return huge_page_mask_align(filp);
if (!(current->flags & PF_RANDOMIZE))
return 0;
if (filp || (flags & MAP_SHARED))
@@ -106,7 +108,8 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = get_align_mask(filp, flags);
- info.align_offset = pgoff << PAGE_SHIFT;
+ if (!(file && is_file_hugepages(filp))
+ info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info);
if (offset_in_page(addr))
return addr;
@@ -144,7 +147,8 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long ad
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
info.align_mask = get_align_mask(filp, flags);
- info.align_offset = pgoff << PAGE_SHIFT;
+ if (!(file && is_file_hugepages(filp))
+ info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info);
/*
We want to stop special casing hugetlb mappings and make them go through generic channels, so teach arch_get_unmapped_area{_topdown} to handle those. s390 specific hugetlb function does not set info.align_offset, so do the same here for compatibility. Signed-off-by: Oscar Salvador <osalvador@suse.de> --- arch/s390/mm/mmap.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-)