@@ -1364,7 +1364,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = NULL;
struct vm_area_struct *next, *prev, *merge;
- pgoff_t pglen = len >> PAGE_SHIFT;
+ pgoff_t pglen = PHYS_PFN(len);
unsigned long charged = 0;
struct vma_munmap_struct vms;
struct ma_state mas_detach;
@@ -1384,7 +1384,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
* MAP_FIXED may remove pages of mappings that intersects with requested
* mapping. Account for the pages it would unmap.
*/
- if (!may_expand_vm(mm, vm_flags, (len >> PAGE_SHIFT) - nr_pages))
+ if (!may_expand_vm(mm, vm_flags, pglen - nr_pages))
return -ENOMEM;
if (unlikely(!can_modify_mm(mm, addr, end)))
@@ -1415,7 +1415,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
* Private writable mapping: check memory availability
*/
if (accountable_mapping(file, vm_flags)) {
- charged = len >> PAGE_SHIFT;
+ charged = pglen;
charged -= nr_accounted;
if (security_vm_enough_memory_mm(mm, charged))
goto abort_munmap;
@@ -1575,14 +1575,14 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
if (vms.nr_pages)
vms_complete_munmap_vmas(&vms, &mas_detach);
- vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
+ vm_stat_account(mm, vm_flags, pglen);
if (vm_flags & VM_LOCKED) {
if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current->mm))
vm_flags_clear(vma, VM_LOCKED_MASK);
else
- mm->locked_vm += (len >> PAGE_SHIFT);
+ mm->locked_vm += pglen;
}
if (file)