diff mbox series

mm: remove unnecessary wrapper function do_mmap_pgoff()

Message ID 20200727194109.1371462-1-pcc@google.com (mailing list archive)
State New, archived
Headers show
Series mm: remove unnecessary wrapper function do_mmap_pgoff() | expand

Commit Message

Peter Collingbourne July 27, 2020, 7:41 p.m. UTC
The current split between do_mmap() and do_mmap_pgoff() was introduced
in commit 1fcfd8db7f82 ("mm, mpx: add "vm_flags_t vm_flags"
arg to do_mmap_pgoff()") to support MPX. The wrapper function
do_mmap_pgoff() always passed 0 as the value of the vm_flags argument
to do_mmap(). However, MPX support has subsequently been removed from
the kernel and there were no more direct callers of do_mmap(); all
calls were going via do_mmap_pgoff(). Simplify the code by removing
do_mmap_pgoff() and changing all callers to directly call do_mmap(),
which now no longer takes a vm_flags argument.

Signed-off-by: Peter Collingbourne <pcc@google.com>
---
 fs/aio.c             |  6 +++---
 fs/hugetlbfs/inode.c |  2 +-
 include/linux/fs.h   |  2 +-
 include/linux/mm.h   | 12 +-----------
 ipc/shm.c            |  2 +-
 mm/mmap.c            | 16 ++++++++--------
 mm/nommu.c           |  6 +++---
 mm/shmem.c           |  2 +-
 mm/util.c            |  4 ++--
 9 files changed, 21 insertions(+), 31 deletions(-)

Comments

David Hildenbrand July 29, 2020, 9:05 a.m. UTC | #1
On 27.07.20 21:41, Peter Collingbourne wrote:
> The current split between do_mmap() and do_mmap_pgoff() was introduced
> in commit 1fcfd8db7f82 ("mm, mpx: add "vm_flags_t vm_flags"
> arg to do_mmap_pgoff()") to support MPX. The wrapper function
> do_mmap_pgoff() always passed 0 as the value of the vm_flags argument
> to do_mmap(). However, MPX support has subsequently been removed from
> the kernel and there were no more direct callers of do_mmap(); all
> calls were going via do_mmap_pgoff(). Simplify the code by removing
> do_mmap_pgoff() and changing all callers to directly call do_mmap(),
> which now no longer takes a vm_flags argument.
> 
> Signed-off-by: Peter Collingbourne <pcc@google.com>
> ---
>  fs/aio.c             |  6 +++---
>  fs/hugetlbfs/inode.c |  2 +-
>  include/linux/fs.h   |  2 +-
>  include/linux/mm.h   | 12 +-----------
>  ipc/shm.c            |  2 +-
>  mm/mmap.c            | 16 ++++++++--------
>  mm/nommu.c           |  6 +++---
>  mm/shmem.c           |  2 +-
>  mm/util.c            |  4 ++--
>  9 files changed, 21 insertions(+), 31 deletions(-)
> 
> diff --git a/fs/aio.c b/fs/aio.c
> index 91e7cc4a9f17..5736bff48e9e 100644
> --- a/fs/aio.c
> +++ b/fs/aio.c
> @@ -525,9 +525,9 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
>  		return -EINTR;
>  	}
>  
> -	ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
> -				       PROT_READ | PROT_WRITE,
> -				       MAP_SHARED, 0, &unused, NULL);
> +	ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size,
> +				 PROT_READ | PROT_WRITE,
> +				 MAP_SHARED, 0, &unused, NULL);
>  	mmap_write_unlock(mm);
>  	if (IS_ERR((void *)ctx->mmap_base)) {
>  		ctx->mmap_size = 0;
> diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
> index ef5313f9c78f..523954d00dff 100644
> --- a/fs/hugetlbfs/inode.c
> +++ b/fs/hugetlbfs/inode.c
> @@ -140,7 +140,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
>  	 * already been checked by prepare_hugepage_range.  If you add
>  	 * any error returns here, do so after setting VM_HUGETLB, so
>  	 * is_vm_hugetlb_page tests below unmap_region go the right
> -	 * way when do_mmap_pgoff unwinds (may be important on powerpc
> +	 * way when do_mmap unwinds (may be important on powerpc
>  	 * and ia64).
>  	 */
>  	vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
> diff --git a/include/linux/fs.h b/include/linux/fs.h
> index f5abba86107d..47bdfb907158 100644
> --- a/include/linux/fs.h
> +++ b/include/linux/fs.h
> @@ -559,7 +559,7 @@ static inline int mapping_mapped(struct address_space *mapping)
>  
>  /*
>   * Might pages of this file have been modified in userspace?
> - * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff
> + * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap
>   * marks vma as VM_SHARED if it is shared, and the file was opened for
>   * writing i.e. vma may be mprotected writable even if now readonly.
>   *
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index dc7b87310c10..256e1bc83460 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -2579,23 +2579,13 @@ extern unsigned long mmap_region(struct file *file, unsigned long addr,
>  	struct list_head *uf);
>  extern unsigned long do_mmap(struct file *file, unsigned long addr,
>  	unsigned long len, unsigned long prot, unsigned long flags,
> -	vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
> -	struct list_head *uf);
> +	unsigned long pgoff, unsigned long *populate, struct list_head *uf);
>  extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
>  		       struct list_head *uf, bool downgrade);
>  extern int do_munmap(struct mm_struct *, unsigned long, size_t,
>  		     struct list_head *uf);
>  extern int do_madvise(unsigned long start, size_t len_in, int behavior);
>  
> -static inline unsigned long
> -do_mmap_pgoff(struct file *file, unsigned long addr,
> -	unsigned long len, unsigned long prot, unsigned long flags,
> -	unsigned long pgoff, unsigned long *populate,
> -	struct list_head *uf)
> -{
> -	return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf);
> -}
> -
>  #ifdef CONFIG_MMU
>  extern int __mm_populate(unsigned long addr, unsigned long len,
>  			 int ignore_errors);
> diff --git a/ipc/shm.c b/ipc/shm.c
> index 0a6dd94afa21..bf38d7e2fbe9 100644
> --- a/ipc/shm.c
> +++ b/ipc/shm.c
> @@ -1558,7 +1558,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
>  			goto invalid;
>  	}
>  
> -	addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate, NULL);
> +	addr = do_mmap(file, addr, size, prot, flags, 0, &populate, NULL);
>  	*raddr = addr;
>  	err = 0;
>  	if (IS_ERR_VALUE(addr))
> diff --git a/mm/mmap.c b/mm/mmap.c
> index 59a4682ebf3f..d43cc3b0187c 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -1030,7 +1030,7 @@ static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
>   * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
>   *
>   * We don't check here for the merged mmap wrapping around the end of pagecache
> - * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
> + * indices (16TB on ia32) because do_mmap() does not permit mmap's which
>   * wrap, nor mmaps which cover the final page at index -1UL.
>   */
>  static int
> @@ -1365,11 +1365,11 @@ static inline bool file_mmap_ok(struct file *file, struct inode *inode,
>   */
>  unsigned long do_mmap(struct file *file, unsigned long addr,
>  			unsigned long len, unsigned long prot,
> -			unsigned long flags, vm_flags_t vm_flags,
> -			unsigned long pgoff, unsigned long *populate,
> -			struct list_head *uf)
> +			unsigned long flags, unsigned long pgoff,
> +			unsigned long *populate, struct list_head *uf)
>  {
>  	struct mm_struct *mm = current->mm;
> +	vm_flags_t vm_flags;
>  	int pkey = 0;
>  
>  	*populate = 0;
> @@ -1431,7 +1431,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
>  	 * to. we assume access permissions have been handled by the open
>  	 * of the memory object, so we don't do any here.
>  	 */
> -	vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
> +	vm_flags = calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
>  			mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
>  
>  	if (flags & MAP_LOCKED)
> @@ -2209,7 +2209,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
>  		/*
>  		 * mmap_region() will call shmem_zero_setup() to create a file,
>  		 * so use shmem's get_unmapped_area in case it can be huge.
> -		 * do_mmap_pgoff() will clear pgoff, so match alignment.
> +		 * do_mmap() will clear pgoff, so match alignment.
>  		 */
>  		pgoff = 0;
>  		get_area = shmem_get_unmapped_area;
> @@ -2970,7 +2970,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
>  	}
>  
>  	file = get_file(vma->vm_file);
> -	ret = do_mmap_pgoff(vma->vm_file, start, size,
> +	ret = do_mmap(vma->vm_file, start, size,
>  			prot, flags, pgoff, &populate, NULL);
>  	fput(file);
>  out:
> @@ -3189,7 +3189,7 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
>  	 * By setting it to reflect the virtual start address of the
>  	 * vma, merges and splits can happen in a seamless way, just
>  	 * using the existing file pgoff checks and manipulations.
> -	 * Similarly in do_mmap_pgoff and in do_brk.
> +	 * Similarly in do_mmap and in do_brk.
>  	 */
>  	if (vma_is_anonymous(vma)) {
>  		BUG_ON(vma->anon_vma);
> diff --git a/mm/nommu.c b/mm/nommu.c
> index f32a69095d50..11a27e5770ba 100644
> --- a/mm/nommu.c
> +++ b/mm/nommu.c
> @@ -1078,7 +1078,6 @@ unsigned long do_mmap(struct file *file,
>  			unsigned long len,
>  			unsigned long prot,
>  			unsigned long flags,
> -			vm_flags_t vm_flags,
>  			unsigned long pgoff,
>  			unsigned long *populate,
>  			struct list_head *uf)
> @@ -1086,6 +1085,7 @@ unsigned long do_mmap(struct file *file,
>  	struct vm_area_struct *vma;
>  	struct vm_region *region;
>  	struct rb_node *rb;
> +	vm_flags_t vm_flags;
>  	unsigned long capabilities, result;
>  	int ret;
>  
> @@ -1104,7 +1104,7 @@ unsigned long do_mmap(struct file *file,
>  
>  	/* we've determined that we can make the mapping, now translate what we
>  	 * now know into VMA flags */
> -	vm_flags |= determine_vm_flags(file, prot, flags, capabilities);
> +	vm_flags = determine_vm_flags(file, prot, flags, capabilities);
>  
>  	/* we're going to need to record the mapping */
>  	region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
> @@ -1763,7 +1763,7 @@ EXPORT_SYMBOL_GPL(access_process_vm);
>   *
>   * Check the shared mappings on an inode on behalf of a shrinking truncate to
>   * make sure that that any outstanding VMAs aren't broken and then shrink the
> - * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
> + * vm_regions that extend that beyond so that do_mmap() doesn't
>   * automatically grant mappings that are too large.
>   */
>  int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
> diff --git a/mm/shmem.c b/mm/shmem.c
> index a0dbe62f8042..0ef2eb83c2df 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -4128,7 +4128,7 @@ EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
>  
>  /**
>   * shmem_zero_setup - setup a shared anonymous mapping
> - * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
> + * @vma: the vma to be mmapped is prepared by do_mmap
>   */
>  int shmem_zero_setup(struct vm_area_struct *vma)
>  {
> diff --git a/mm/util.c b/mm/util.c
> index c63c8e47be57..329150d35dfe 100644
> --- a/mm/util.c
> +++ b/mm/util.c
> @@ -503,8 +503,8 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
>  	if (!ret) {
>  		if (mmap_write_lock_killable(mm))
>  			return -EINTR;
> -		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
> -				    &populate, &uf);
> +		ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
> +			      &uf);
>  		mmap_write_unlock(mm);
>  		userfaultfd_unmap_complete(mm, &uf);
>  		if (populate)
> 

LGTM and the history makes sense

Reviewed-by: David Hildenbrand <david@redhat.com>
diff mbox series

Patch

diff --git a/fs/aio.c b/fs/aio.c
index 91e7cc4a9f17..5736bff48e9e 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -525,9 +525,9 @@  static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
 		return -EINTR;
 	}
 
-	ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
-				       PROT_READ | PROT_WRITE,
-				       MAP_SHARED, 0, &unused, NULL);
+	ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size,
+				 PROT_READ | PROT_WRITE,
+				 MAP_SHARED, 0, &unused, NULL);
 	mmap_write_unlock(mm);
 	if (IS_ERR((void *)ctx->mmap_base)) {
 		ctx->mmap_size = 0;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index ef5313f9c78f..523954d00dff 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -140,7 +140,7 @@  static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
 	 * already been checked by prepare_hugepage_range.  If you add
 	 * any error returns here, do so after setting VM_HUGETLB, so
 	 * is_vm_hugetlb_page tests below unmap_region go the right
-	 * way when do_mmap_pgoff unwinds (may be important on powerpc
+	 * way when do_mmap unwinds (may be important on powerpc
 	 * and ia64).
 	 */
 	vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index f5abba86107d..47bdfb907158 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -559,7 +559,7 @@  static inline int mapping_mapped(struct address_space *mapping)
 
 /*
  * Might pages of this file have been modified in userspace?
- * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff
+ * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap
  * marks vma as VM_SHARED if it is shared, and the file was opened for
  * writing i.e. vma may be mprotected writable even if now readonly.
  *
diff --git a/include/linux/mm.h b/include/linux/mm.h
index dc7b87310c10..256e1bc83460 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2579,23 +2579,13 @@  extern unsigned long mmap_region(struct file *file, unsigned long addr,
 	struct list_head *uf);
 extern unsigned long do_mmap(struct file *file, unsigned long addr,
 	unsigned long len, unsigned long prot, unsigned long flags,
-	vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
-	struct list_head *uf);
+	unsigned long pgoff, unsigned long *populate, struct list_head *uf);
 extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
 		       struct list_head *uf, bool downgrade);
 extern int do_munmap(struct mm_struct *, unsigned long, size_t,
 		     struct list_head *uf);
 extern int do_madvise(unsigned long start, size_t len_in, int behavior);
 
-static inline unsigned long
-do_mmap_pgoff(struct file *file, unsigned long addr,
-	unsigned long len, unsigned long prot, unsigned long flags,
-	unsigned long pgoff, unsigned long *populate,
-	struct list_head *uf)
-{
-	return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf);
-}
-
 #ifdef CONFIG_MMU
 extern int __mm_populate(unsigned long addr, unsigned long len,
 			 int ignore_errors);
diff --git a/ipc/shm.c b/ipc/shm.c
index 0a6dd94afa21..bf38d7e2fbe9 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1558,7 +1558,7 @@  long do_shmat(int shmid, char __user *shmaddr, int shmflg,
 			goto invalid;
 	}
 
-	addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate, NULL);
+	addr = do_mmap(file, addr, size, prot, flags, 0, &populate, NULL);
 	*raddr = addr;
 	err = 0;
 	if (IS_ERR_VALUE(addr))
diff --git a/mm/mmap.c b/mm/mmap.c
index 59a4682ebf3f..d43cc3b0187c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1030,7 +1030,7 @@  static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
  *
  * We don't check here for the merged mmap wrapping around the end of pagecache
- * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
+ * indices (16TB on ia32) because do_mmap() does not permit mmap's which
  * wrap, nor mmaps which cover the final page at index -1UL.
  */
 static int
@@ -1365,11 +1365,11 @@  static inline bool file_mmap_ok(struct file *file, struct inode *inode,
  */
 unsigned long do_mmap(struct file *file, unsigned long addr,
 			unsigned long len, unsigned long prot,
-			unsigned long flags, vm_flags_t vm_flags,
-			unsigned long pgoff, unsigned long *populate,
-			struct list_head *uf)
+			unsigned long flags, unsigned long pgoff,
+			unsigned long *populate, struct list_head *uf)
 {
 	struct mm_struct *mm = current->mm;
+	vm_flags_t vm_flags;
 	int pkey = 0;
 
 	*populate = 0;
@@ -1431,7 +1431,7 @@  unsigned long do_mmap(struct file *file, unsigned long addr,
 	 * to. we assume access permissions have been handled by the open
 	 * of the memory object, so we don't do any here.
 	 */
-	vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
+	vm_flags = calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
 			mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
 
 	if (flags & MAP_LOCKED)
@@ -2209,7 +2209,7 @@  get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
 		/*
 		 * mmap_region() will call shmem_zero_setup() to create a file,
 		 * so use shmem's get_unmapped_area in case it can be huge.
-		 * do_mmap_pgoff() will clear pgoff, so match alignment.
+		 * do_mmap() will clear pgoff, so match alignment.
 		 */
 		pgoff = 0;
 		get_area = shmem_get_unmapped_area;
@@ -2970,7 +2970,7 @@  SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
 	}
 
 	file = get_file(vma->vm_file);
-	ret = do_mmap_pgoff(vma->vm_file, start, size,
+	ret = do_mmap(vma->vm_file, start, size,
 			prot, flags, pgoff, &populate, NULL);
 	fput(file);
 out:
@@ -3189,7 +3189,7 @@  int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
 	 * By setting it to reflect the virtual start address of the
 	 * vma, merges and splits can happen in a seamless way, just
 	 * using the existing file pgoff checks and manipulations.
-	 * Similarly in do_mmap_pgoff and in do_brk.
+	 * Similarly in do_mmap and in do_brk.
 	 */
 	if (vma_is_anonymous(vma)) {
 		BUG_ON(vma->anon_vma);
diff --git a/mm/nommu.c b/mm/nommu.c
index f32a69095d50..11a27e5770ba 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1078,7 +1078,6 @@  unsigned long do_mmap(struct file *file,
 			unsigned long len,
 			unsigned long prot,
 			unsigned long flags,
-			vm_flags_t vm_flags,
 			unsigned long pgoff,
 			unsigned long *populate,
 			struct list_head *uf)
@@ -1086,6 +1085,7 @@  unsigned long do_mmap(struct file *file,
 	struct vm_area_struct *vma;
 	struct vm_region *region;
 	struct rb_node *rb;
+	vm_flags_t vm_flags;
 	unsigned long capabilities, result;
 	int ret;
 
@@ -1104,7 +1104,7 @@  unsigned long do_mmap(struct file *file,
 
 	/* we've determined that we can make the mapping, now translate what we
 	 * now know into VMA flags */
-	vm_flags |= determine_vm_flags(file, prot, flags, capabilities);
+	vm_flags = determine_vm_flags(file, prot, flags, capabilities);
 
 	/* we're going to need to record the mapping */
 	region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
@@ -1763,7 +1763,7 @@  EXPORT_SYMBOL_GPL(access_process_vm);
  *
  * Check the shared mappings on an inode on behalf of a shrinking truncate to
  * make sure that that any outstanding VMAs aren't broken and then shrink the
- * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
+ * vm_regions that extend that beyond so that do_mmap() doesn't
  * automatically grant mappings that are too large.
  */
 int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
diff --git a/mm/shmem.c b/mm/shmem.c
index a0dbe62f8042..0ef2eb83c2df 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -4128,7 +4128,7 @@  EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
 
 /**
  * shmem_zero_setup - setup a shared anonymous mapping
- * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
+ * @vma: the vma to be mmapped is prepared by do_mmap
  */
 int shmem_zero_setup(struct vm_area_struct *vma)
 {
diff --git a/mm/util.c b/mm/util.c
index c63c8e47be57..329150d35dfe 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -503,8 +503,8 @@  unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
 	if (!ret) {
 		if (mmap_write_lock_killable(mm))
 			return -EINTR;
-		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
-				    &populate, &uf);
+		ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
+			      &uf);
 		mmap_write_unlock(mm);
 		userfaultfd_unmap_complete(mm, &uf);
 		if (populate)