[4/8] mmap locking API: convert mmap_sem call sites missed by coccinelle
diff mbox series

Message ID 20200326070236.235835-5-walken@google.com
State New
Headers show
Series
  • Add a new mmap locking API wrapping mmap_sem calls
Related show

Commit Message

Michel Lespinasse March 26, 2020, 7:02 a.m. UTC
Convert the last few remaining mmap_sem rwsem calls to use the new
mmap locking API. These were missed by coccinelle for some reason
(I think coccinelle does not support some of the preprocessor
constructs in these files ?)

Signed-off-by: Michel Lespinasse <walken@google.com>
---
 arch/mips/mm/fault.c           | 10 +++++-----
 arch/x86/kvm/mmu/paging_tmpl.h |  8 ++++----
 drivers/android/binder_alloc.c |  4 ++--
 fs/proc/base.c                 |  6 +++---
 4 files changed, 14 insertions(+), 14 deletions(-)

Comments

kernel test robot March 26, 2020, 11:13 p.m. UTC | #1
Hi Michel,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on rdma/for-next]
[also build test ERROR on linus/master v5.6-rc7]
[cannot apply to powerpc/next next-20200326]
[if your patch is applied to the wrong git tree, please drop us a note to help
improve the system. BTW, we also suggest to use '--base' option to specify the
base tree in git format-patch, please see https://stackoverflow.com/a/37406982]

url:    https://github.com/0day-ci/linux/commits/Michel-Lespinasse/Add-a-new-mmap-locking-API-wrapping-mmap_sem-calls/20200327-035215
base:   https://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git for-next
config: mips-allyesconfig (attached as .config)
compiler: mips-linux-gcc (GCC) 9.2.0
reproduce:
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        GCC_VERSION=9.2.0 make.cross ARCH=mips 

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   arch/mips/mm/fault.c: In function '__do_page_fault':
>> arch/mips/mm/fault.c:202:2: error: implicit declaration of function 'mm_read_unlock'; did you mean 'mmap_read_unlock'? [-Werror=implicit-function-declaration]
     202 |  mm_read_unlock(mm);
         |  ^~~~~~~~~~~~~~
         |  mmap_read_unlock
   cc1: all warnings being treated as errors

vim +202 arch/mips/mm/fault.c

    89	
    90		/*
    91		 * If we're in an interrupt or have no user
    92		 * context, we must not take the fault..
    93		 */
    94		if (faulthandler_disabled() || !mm)
    95			goto bad_area_nosemaphore;
    96	
    97		if (user_mode(regs))
    98			flags |= FAULT_FLAG_USER;
    99	retry:
   100		mmap_read_lock(mm);
   101		vma = find_vma(mm, address);
   102		if (!vma)
   103			goto bad_area;
   104		if (vma->vm_start <= address)
   105			goto good_area;
   106		if (!(vma->vm_flags & VM_GROWSDOWN))
   107			goto bad_area;
   108		if (expand_stack(vma, address))
   109			goto bad_area;
   110	/*
   111	 * Ok, we have a good vm_area for this memory access, so
   112	 * we can handle it..
   113	 */
   114	good_area:
   115		si_code = SEGV_ACCERR;
   116	
   117		if (write) {
   118			if (!(vma->vm_flags & VM_WRITE))
   119				goto bad_area;
   120			flags |= FAULT_FLAG_WRITE;
   121		} else {
   122			if (cpu_has_rixi) {
   123				if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
   124	#if 0
   125					pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n",
   126						  raw_smp_processor_id(),
   127						  current->comm, current->pid,
   128						  field, address, write,
   129						  field, regs->cp0_epc);
   130	#endif
   131					goto bad_area;
   132				}
   133				if (!(vma->vm_flags & VM_READ) &&
   134				    exception_epc(regs) != address) {
   135	#if 0
   136					pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
   137						  raw_smp_processor_id(),
   138						  current->comm, current->pid,
   139						  field, address, write,
   140						  field, regs->cp0_epc);
   141	#endif
   142					goto bad_area;
   143				}
   144			} else {
   145				if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
   146					goto bad_area;
   147			}
   148		}
   149	
   150		/*
   151		 * If for any reason at all we couldn't handle the fault,
   152		 * make sure we exit gracefully rather than endlessly redo
   153		 * the fault.
   154		 */
   155		fault = handle_mm_fault(vma, address, flags);
   156	
   157		if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
   158			return;
   159	
   160		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
   161		if (unlikely(fault & VM_FAULT_ERROR)) {
   162			if (fault & VM_FAULT_OOM)
   163				goto out_of_memory;
   164			else if (fault & VM_FAULT_SIGSEGV)
   165				goto bad_area;
   166			else if (fault & VM_FAULT_SIGBUS)
   167				goto do_sigbus;
   168			BUG();
   169		}
   170		if (flags & FAULT_FLAG_ALLOW_RETRY) {
   171			if (fault & VM_FAULT_MAJOR) {
   172				perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
   173							  regs, address);
   174				tsk->maj_flt++;
   175			} else {
   176				perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
   177							  regs, address);
   178				tsk->min_flt++;
   179			}
   180			if (fault & VM_FAULT_RETRY) {
   181				flags &= ~FAULT_FLAG_ALLOW_RETRY;
   182				flags |= FAULT_FLAG_TRIED;
   183	
   184				/*
   185				 * No need to up_read(&mm->mmap_sem) as we would
   186				 * have already released it in __lock_page_or_retry
   187				 * in mm/filemap.c.
   188				 */
   189	
   190				goto retry;
   191			}
   192		}
   193	
   194		mmap_read_unlock(mm);
   195		return;
   196	
   197	/*
   198	 * Something tried to access memory that isn't in our memory map..
   199	 * Fix it, but check if it's kernel or user first..
   200	 */
   201	bad_area:
 > 202		mm_read_unlock(mm);
   203	
   204	bad_area_nosemaphore:
   205		/* User mode accesses just cause a SIGSEGV */
   206		if (user_mode(regs)) {
   207			tsk->thread.cp0_badvaddr = address;
   208			tsk->thread.error_code = write;
   209			if (show_unhandled_signals &&
   210			    unhandled_signal(tsk, SIGSEGV) &&
   211			    __ratelimit(&ratelimit_state)) {
   212				pr_info("do_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx\n",
   213					tsk->comm,
   214					write ? "write access to" : "read access from",
   215					field, address);
   216				pr_info("epc = %0*lx in", field,
   217					(unsigned long) regs->cp0_epc);
   218				print_vma_addr(KERN_CONT " ", regs->cp0_epc);
   219				pr_cont("\n");
   220				pr_info("ra  = %0*lx in", field,
   221					(unsigned long) regs->regs[31]);
   222				print_vma_addr(KERN_CONT " ", regs->regs[31]);
   223				pr_cont("\n");
   224			}
   225			current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
   226			force_sig_fault(SIGSEGV, si_code, (void __user *)address);
   227			return;
   228		}
   229	
   230	no_context:
   231		/* Are we prepared to handle this kernel fault?	 */
   232		if (fixup_exception(regs)) {
   233			current->thread.cp0_baduaddr = address;
   234			return;
   235		}
   236	
   237		/*
   238		 * Oops. The kernel tried to access some bad page. We'll have to
   239		 * terminate things with extreme prejudice.
   240		 */
   241		bust_spinlocks(1);
   242	
   243		printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
   244		       "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
   245		       raw_smp_processor_id(), field, address, field, regs->cp0_epc,
   246		       field,  regs->regs[31]);
   247		die("Oops", regs);
   248	
   249	out_of_memory:
   250		/*
   251		 * We ran out of memory, call the OOM killer, and return the userspace
   252		 * (which will retry the fault, or kill us if we got oom-killed).
   253		 */
   254		mm_read_unlock(mm);
   255		if (!user_mode(regs))
   256			goto no_context;
   257		pagefault_out_of_memory();
   258		return;
   259	
   260	do_sigbus:
   261		mm_read_unlock(mm);
   262	
   263		/* Kernel mode? Handle exceptions or die */
   264		if (!user_mode(regs))
   265			goto no_context;
   266	
   267		/*
   268		 * Send a sigbus, regardless of whether we were in kernel
   269		 * or user mode.
   270		 */
   271	#if 0
   272		printk("do_page_fault() #3: sending SIGBUS to %s for "
   273		       "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
   274		       tsk->comm,
   275		       write ? "write access to" : "read access from",
   276		       field, address,
   277		       field, (unsigned long) regs->cp0_epc,
   278		       field, (unsigned long) regs->regs[31]);
   279	#endif
   280		current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
   281		tsk->thread.cp0_badvaddr = address;
   282		force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
   283	
   284		return;
   285	#ifndef CONFIG_64BIT
   286	vmalloc_fault:
   287		{
   288			/*
   289			 * Synchronize this task's top level page-table
   290			 * with the 'reference' page table.
   291			 *
   292			 * Do _not_ use "tsk" here. We might be inside
   293			 * an interrupt in the middle of a task switch..
   294			 */
   295			int offset = pgd_index(address);
   296			pgd_t *pgd, *pgd_k;
   297			p4d_t *p4d, *p4d_k;
   298			pud_t *pud, *pud_k;
   299			pmd_t *pmd, *pmd_k;
   300			pte_t *pte_k;
   301	
   302			pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset;
   303			pgd_k = init_mm.pgd + offset;
   304	
   305			if (!pgd_present(*pgd_k))
   306				goto no_context;
   307			set_pgd(pgd, *pgd_k);
   308	
   309			p4d = p4d_offset(pgd, address);
   310			p4d_k = p4d_offset(pgd_k, address);
   311			if (!p4d_present(*p4d_k))
   312				goto no_context;
   313	
   314			pud = pud_offset(p4d, address);
   315			pud_k = pud_offset(p4d_k, address);
   316			if (!pud_present(*pud_k))
   317				goto no_context;
   318	
   319			pmd = pmd_offset(pud, address);
   320			pmd_k = pmd_offset(pud_k, address);
   321			if (!pmd_present(*pmd_k))
   322				goto no_context;
   323			set_pmd(pmd, *pmd_k);
   324	
   325			pte_k = pte_offset_kernel(pmd_k, address);
   326			if (!pte_present(*pte_k))
   327				goto no_context;
   328			return;
   329		}
   330	#endif
   331	}
   332	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
kernel test robot March 26, 2020, 11:27 p.m. UTC | #2
Hi Michel,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on rdma/for-next]
[also build test ERROR on linus/master v5.6-rc7]
[cannot apply to powerpc/next next-20200326]
[if your patch is applied to the wrong git tree, please drop us a note to help
improve the system. BTW, we also suggest to use '--base' option to specify the
base tree in git format-patch, please see https://stackoverflow.com/a/37406982]

url:    https://github.com/0day-ci/linux/commits/Michel-Lespinasse/Add-a-new-mmap-locking-API-wrapping-mmap_sem-calls/20200327-035215
base:   https://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git for-next
config: mips-randconfig-a001-20200326 (attached as .config)
compiler: mips64el-linux-gcc (GCC) 5.5.0
reproduce:
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        GCC_VERSION=5.5.0 make.cross ARCH=mips 

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   arch/mips/mm/fault.c: In function '__do_page_fault':
>> arch/mips/mm/fault.c:202:2: error: implicit declaration of function 'mm_read_unlock' [-Werror=implicit-function-declaration]
     mm_read_unlock(mm);
     ^
   cc1: all warnings being treated as errors

vim +/mm_read_unlock +202 arch/mips/mm/fault.c

    89	
    90		/*
    91		 * If we're in an interrupt or have no user
    92		 * context, we must not take the fault..
    93		 */
    94		if (faulthandler_disabled() || !mm)
    95			goto bad_area_nosemaphore;
    96	
    97		if (user_mode(regs))
    98			flags |= FAULT_FLAG_USER;
    99	retry:
   100		mmap_read_lock(mm);
   101		vma = find_vma(mm, address);
   102		if (!vma)
   103			goto bad_area;
   104		if (vma->vm_start <= address)
   105			goto good_area;
   106		if (!(vma->vm_flags & VM_GROWSDOWN))
   107			goto bad_area;
   108		if (expand_stack(vma, address))
   109			goto bad_area;
   110	/*
   111	 * Ok, we have a good vm_area for this memory access, so
   112	 * we can handle it..
   113	 */
   114	good_area:
   115		si_code = SEGV_ACCERR;
   116	
   117		if (write) {
   118			if (!(vma->vm_flags & VM_WRITE))
   119				goto bad_area;
   120			flags |= FAULT_FLAG_WRITE;
   121		} else {
   122			if (cpu_has_rixi) {
   123				if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
   124	#if 0
   125					pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n",
   126						  raw_smp_processor_id(),
   127						  current->comm, current->pid,
   128						  field, address, write,
   129						  field, regs->cp0_epc);
   130	#endif
   131					goto bad_area;
   132				}
   133				if (!(vma->vm_flags & VM_READ) &&
   134				    exception_epc(regs) != address) {
   135	#if 0
   136					pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
   137						  raw_smp_processor_id(),
   138						  current->comm, current->pid,
   139						  field, address, write,
   140						  field, regs->cp0_epc);
   141	#endif
   142					goto bad_area;
   143				}
   144			} else {
   145				if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
   146					goto bad_area;
   147			}
   148		}
   149	
   150		/*
   151		 * If for any reason at all we couldn't handle the fault,
   152		 * make sure we exit gracefully rather than endlessly redo
   153		 * the fault.
   154		 */
   155		fault = handle_mm_fault(vma, address, flags);
   156	
   157		if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
   158			return;
   159	
   160		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
   161		if (unlikely(fault & VM_FAULT_ERROR)) {
   162			if (fault & VM_FAULT_OOM)
   163				goto out_of_memory;
   164			else if (fault & VM_FAULT_SIGSEGV)
   165				goto bad_area;
   166			else if (fault & VM_FAULT_SIGBUS)
   167				goto do_sigbus;
   168			BUG();
   169		}
   170		if (flags & FAULT_FLAG_ALLOW_RETRY) {
   171			if (fault & VM_FAULT_MAJOR) {
   172				perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
   173							  regs, address);
   174				tsk->maj_flt++;
   175			} else {
   176				perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
   177							  regs, address);
   178				tsk->min_flt++;
   179			}
   180			if (fault & VM_FAULT_RETRY) {
   181				flags &= ~FAULT_FLAG_ALLOW_RETRY;
   182				flags |= FAULT_FLAG_TRIED;
   183	
   184				/*
   185				 * No need to up_read(&mm->mmap_sem) as we would
   186				 * have already released it in __lock_page_or_retry
   187				 * in mm/filemap.c.
   188				 */
   189	
   190				goto retry;
   191			}
   192		}
   193	
   194		mmap_read_unlock(mm);
   195		return;
   196	
   197	/*
   198	 * Something tried to access memory that isn't in our memory map..
   199	 * Fix it, but check if it's kernel or user first..
   200	 */
   201	bad_area:
 > 202		mm_read_unlock(mm);
   203	
   204	bad_area_nosemaphore:
   205		/* User mode accesses just cause a SIGSEGV */
   206		if (user_mode(regs)) {
   207			tsk->thread.cp0_badvaddr = address;
   208			tsk->thread.error_code = write;
   209			if (show_unhandled_signals &&
   210			    unhandled_signal(tsk, SIGSEGV) &&
   211			    __ratelimit(&ratelimit_state)) {
   212				pr_info("do_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx\n",
   213					tsk->comm,
   214					write ? "write access to" : "read access from",
   215					field, address);
   216				pr_info("epc = %0*lx in", field,
   217					(unsigned long) regs->cp0_epc);
   218				print_vma_addr(KERN_CONT " ", regs->cp0_epc);
   219				pr_cont("\n");
   220				pr_info("ra  = %0*lx in", field,
   221					(unsigned long) regs->regs[31]);
   222				print_vma_addr(KERN_CONT " ", regs->regs[31]);
   223				pr_cont("\n");
   224			}
   225			current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
   226			force_sig_fault(SIGSEGV, si_code, (void __user *)address);
   227			return;
   228		}
   229	
   230	no_context:
   231		/* Are we prepared to handle this kernel fault?	 */
   232		if (fixup_exception(regs)) {
   233			current->thread.cp0_baduaddr = address;
   234			return;
   235		}
   236	
   237		/*
   238		 * Oops. The kernel tried to access some bad page. We'll have to
   239		 * terminate things with extreme prejudice.
   240		 */
   241		bust_spinlocks(1);
   242	
   243		printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
   244		       "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
   245		       raw_smp_processor_id(), field, address, field, regs->cp0_epc,
   246		       field,  regs->regs[31]);
   247		die("Oops", regs);
   248	
   249	out_of_memory:
   250		/*
   251		 * We ran out of memory, call the OOM killer, and return the userspace
   252		 * (which will retry the fault, or kill us if we got oom-killed).
   253		 */
   254		mm_read_unlock(mm);
   255		if (!user_mode(regs))
   256			goto no_context;
   257		pagefault_out_of_memory();
   258		return;
   259	
   260	do_sigbus:
   261		mm_read_unlock(mm);
   262	
   263		/* Kernel mode? Handle exceptions or die */
   264		if (!user_mode(regs))
   265			goto no_context;
   266	
   267		/*
   268		 * Send a sigbus, regardless of whether we were in kernel
   269		 * or user mode.
   270		 */
   271	#if 0
   272		printk("do_page_fault() #3: sending SIGBUS to %s for "
   273		       "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
   274		       tsk->comm,
   275		       write ? "write access to" : "read access from",
   276		       field, address,
   277		       field, (unsigned long) regs->cp0_epc,
   278		       field, (unsigned long) regs->regs[31]);
   279	#endif
   280		current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
   281		tsk->thread.cp0_badvaddr = address;
   282		force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
   283	
   284		return;
   285	#ifndef CONFIG_64BIT
   286	vmalloc_fault:
   287		{
   288			/*
   289			 * Synchronize this task's top level page-table
   290			 * with the 'reference' page table.
   291			 *
   292			 * Do _not_ use "tsk" here. We might be inside
   293			 * an interrupt in the middle of a task switch..
   294			 */
   295			int offset = pgd_index(address);
   296			pgd_t *pgd, *pgd_k;
   297			p4d_t *p4d, *p4d_k;
   298			pud_t *pud, *pud_k;
   299			pmd_t *pmd, *pmd_k;
   300			pte_t *pte_k;
   301	
   302			pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset;
   303			pgd_k = init_mm.pgd + offset;
   304	
   305			if (!pgd_present(*pgd_k))
   306				goto no_context;
   307			set_pgd(pgd, *pgd_k);
   308	
   309			p4d = p4d_offset(pgd, address);
   310			p4d_k = p4d_offset(pgd_k, address);
   311			if (!p4d_present(*p4d_k))
   312				goto no_context;
   313	
   314			pud = pud_offset(p4d, address);
   315			pud_k = pud_offset(p4d_k, address);
   316			if (!pud_present(*pud_k))
   317				goto no_context;
   318	
   319			pmd = pmd_offset(pud, address);
   320			pmd_k = pmd_offset(pud_k, address);
   321			if (!pmd_present(*pmd_k))
   322				goto no_context;
   323			set_pmd(pmd, *pmd_k);
   324	
   325			pte_k = pte_offset_kernel(pmd_k, address);
   326			if (!pte_present(*pte_k))
   327				goto no_context;
   328			return;
   329		}
   330	#endif
   331	}
   332	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org

Patch
diff mbox series

diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 1e8d00793784..0e7c41fb36ba 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -97,7 +97,7 @@  static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
 	if (user_mode(regs))
 		flags |= FAULT_FLAG_USER;
 retry:
-	down_read(&mm->mmap_sem);
+	mmap_read_lock(mm);
 	vma = find_vma(mm, address);
 	if (!vma)
 		goto bad_area;
@@ -191,7 +191,7 @@  static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
 		}
 	}
 
-	up_read(&mm->mmap_sem);
+	mmap_read_unlock(mm);
 	return;
 
 /*
@@ -199,7 +199,7 @@  static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
  * Fix it, but check if it's kernel or user first..
  */
 bad_area:
-	up_read(&mm->mmap_sem);
+	mm_read_unlock(mm);
 
 bad_area_nosemaphore:
 	/* User mode accesses just cause a SIGSEGV */
@@ -251,14 +251,14 @@  static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
 	 * We ran out of memory, call the OOM killer, and return the userspace
 	 * (which will retry the fault, or kill us if we got oom-killed).
 	 */
-	up_read(&mm->mmap_sem);
+	mm_read_unlock(mm);
 	if (!user_mode(regs))
 		goto no_context;
 	pagefault_out_of_memory();
 	return;
 
 do_sigbus:
-	up_read(&mm->mmap_sem);
+	mm_read_unlock(mm);
 
 	/* Kernel mode? Handle exceptions or die */
 	if (!user_mode(regs))
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index e4c8a4cbf407..8cbfe6c96d82 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -165,22 +165,22 @@  static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 		unsigned long pfn;
 		unsigned long paddr;
 
-		down_read(&current->mm->mmap_sem);
+		mmap_read_lock(current->mm);
 		vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE);
 		if (!vma || !(vma->vm_flags & VM_PFNMAP)) {
-			up_read(&current->mm->mmap_sem);
+			mmap_read_unlock(current->mm);
 			return -EFAULT;
 		}
 		pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
 		paddr = pfn << PAGE_SHIFT;
 		table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB);
 		if (!table) {
-			up_read(&current->mm->mmap_sem);
+			mmap_read_unlock(current->mm);
 			return -EFAULT;
 		}
 		ret = CMPXCHG(&table[index], orig_pte, new_pte);
 		memunmap(table);
-		up_read(&current->mm->mmap_sem);
+		mmap_read_unlock(current->mm);
 	}
 
 	return (ret != orig_pte);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 5e063739a3a8..cbdc43ed0f9f 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -932,7 +932,7 @@  enum lru_status binder_alloc_free_page(struct list_head *item,
 	mm = alloc->vma_vm_mm;
 	if (!mmget_not_zero(mm))
 		goto err_mmget;
-	if (!down_read_trylock(&mm->mmap_sem))
+	if (!mmap_read_trylock(mm))
 		goto err_down_read_mmap_sem_failed;
 	vma = binder_alloc_get_vma(alloc);
 
@@ -946,7 +946,7 @@  enum lru_status binder_alloc_free_page(struct list_head *item,
 
 		trace_binder_unmap_user_end(alloc, index);
 	}
-	up_read(&mm->mmap_sem);
+	mmap_read_unlock(mm);
 	mmput(mm);
 
 	trace_binder_unmap_kernel_start(alloc, index);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 479efdba60b9..0dc54b5d75f2 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2280,7 +2280,7 @@  proc_map_files_readdir(struct file *file, struct dir_context *ctx)
 	if (!mm)
 		goto out_put_task;
 
-	ret = down_read_killable(&mm->mmap_sem);
+	ret = mmap_read_lock_killable(mm);
 	if (ret) {
 		mmput(mm);
 		goto out_put_task;
@@ -2307,7 +2307,7 @@  proc_map_files_readdir(struct file *file, struct dir_context *ctx)
 		p = genradix_ptr_alloc(&fa, nr_files++, GFP_KERNEL);
 		if (!p) {
 			ret = -ENOMEM;
-			up_read(&mm->mmap_sem);
+			mmap_read_unlock(mm);
 			mmput(mm);
 			goto out_put_task;
 		}
@@ -2316,7 +2316,7 @@  proc_map_files_readdir(struct file *file, struct dir_context *ctx)
 		p->end = vma->vm_end;
 		p->mode = vma->vm_file->f_mode;
 	}
-	up_read(&mm->mmap_sem);
+	mmap_read_unlock(mm);
 	mmput(mm);
 
 	for (i = 0; i < nr_files; i++) {