diff mbox series

[v5.5,10/10] mmap locking API: rename mmap_sem to mmap_lock

Message ID 20200424013958.GC158937@google.com (mailing list archive)
State New, archived
Headers show
Series None | expand

Commit Message

Michel Lespinasse April 24, 2020, 1:39 a.m. UTC
Rename the mmap_sem field to mmap_lock. Any new uses of this lock
should now go through the new mmap locking api. The mmap_lock is
still implemented as a rwsem, though this could change in the future.

Signed-off-by: Michel Lespinasse <walken@google.com>
---
 arch/ia64/mm/fault.c                  |  4 +--
 arch/x86/mm/fault.c                   |  2 +-
 drivers/gpu/drm/etnaviv/etnaviv_gem.c |  2 +-
 include/linux/mm_types.h              |  2 +-
 include/linux/mmap_lock.h             | 38 +++++++++++++--------------
 mm/memory.c                           |  2 +-
 mm/mmap.c                             |  4 +--
 mm/mmu_notifier.c                     |  2 +-
 8 files changed, 28 insertions(+), 28 deletions(-)

Comments

Vlastimil Babka May 18, 2020, 11:07 a.m. UTC | #1
On 4/24/20 3:39 AM, Michel Lespinasse wrote:
> Rename the mmap_sem field to mmap_lock. Any new uses of this lock
> should now go through the new mmap locking api. The mmap_lock is
> still implemented as a rwsem, though this could change in the future.
> 
> Signed-off-by: Michel Lespinasse <walken@google.com>

Except gpu/drm/etnaviv/etnaviv_gem.c all direct users remain in mm or arch/fault
code so that seems fine.


Reviewed-by: Vlastimil Babka <vbabka@suse.cz>


Any plan about all the code comments mentioning mmap_sem? :) Not urgent.
Laurent Dufour May 18, 2020, 1:45 p.m. UTC | #2
Le 24/04/2020 à 03:39, Michel Lespinasse a écrit :
> Rename the mmap_sem field to mmap_lock. Any new uses of this lock
> should now go through the new mmap locking api. The mmap_lock is
> still implemented as a rwsem, though this could change in the future.
> 
> Signed-off-by: Michel Lespinasse <walken@google.com>
> ---
>   arch/ia64/mm/fault.c                  |  4 +--
>   arch/x86/mm/fault.c                   |  2 +-
>   drivers/gpu/drm/etnaviv/etnaviv_gem.c |  2 +-
>   include/linux/mm_types.h              |  2 +-
>   include/linux/mmap_lock.h             | 38 +++++++++++++--------------
>   mm/memory.c                           |  2 +-
>   mm/mmap.c                             |  4 +--
>   mm/mmu_notifier.c                     |  2 +-
>   8 files changed, 28 insertions(+), 28 deletions(-)
> 
> diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
> index 693f00b117e1..9b95050c2048 100644
> --- a/arch/ia64/mm/fault.c
> +++ b/arch/ia64/mm/fault.c
> @@ -70,8 +70,8 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
>   	mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
>   		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
>   
> -	/* mmap_sem is performance critical.... */
> -	prefetchw(&mm->mmap_sem);
> +	/* mmap_lock is performance critical.... */
> +	prefetchw(&mm->mmap_lock);
>   
>   	/*
>   	 * If we're in an interrupt or have no user context, we must not take the fault..
> diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
> index 181f66b9049f..35f530f9dfc0 100644
> --- a/arch/x86/mm/fault.c
> +++ b/arch/x86/mm/fault.c
> @@ -1522,7 +1522,7 @@ dotraplinkage void
>   do_page_fault(struct pt_regs *regs, unsigned long hw_error_code,
>   		unsigned long address)
>   {
> -	prefetchw(&current->mm->mmap_sem);
> +	prefetchw(&current->mm->mmap_lock);
>   	trace_page_fault_entries(regs, hw_error_code, address);
>   
>   	if (unlikely(kmmio_fault(regs, address)))
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> index dc9ef302f517..701f3995f621 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> @@ -661,7 +661,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
>   	struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
>   	int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
>   
> -	might_lock_read(&current->mm->mmap_sem);
> +	might_lock_read(&current->mm->mmap_lock);

Why not a mm_might_lock_read() new API to hide the mmap_lock, and add it to the 
previous patch?

>   
>   	if (userptr->mm != current->mm)
>   		return -EPERM;
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 4aba6c0c2ba8..d13b90399c16 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -436,7 +436,7 @@ struct mm_struct {
>   		spinlock_t page_table_lock; /* Protects page tables and some
>   					     * counters
>   					     */
> -		struct rw_semaphore mmap_sem;
> +		struct rw_semaphore mmap_lock;
>   
>   		struct list_head mmlist; /* List of maybe swapped mm's.	These
>   					  * are globally strung together off
> diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
> index 5bf7cee5d93b..9dc632add390 100644
> --- a/include/linux/mmap_lock.h
> +++ b/include/linux/mmap_lock.h
> @@ -4,67 +4,67 @@
>   #include <linux/mmdebug.h>
>   
>   #define MMAP_LOCK_INITIALIZER(name) \
> -	.mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem),
> +	.mmap_lock = __RWSEM_INITIALIZER(name.mmap_lock),
>   
>   static inline void mmap_init_lock(struct mm_struct *mm)
>   {
> -	init_rwsem(&mm->mmap_sem);
> +	init_rwsem(&mm->mmap_lock);
>   }
>   
>   static inline void mmap_write_lock(struct mm_struct *mm)
>   {
> -	down_write(&mm->mmap_sem);
> +	down_write(&mm->mmap_lock);
>   }
>   
>   static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
>   {
> -	down_write_nested(&mm->mmap_sem, subclass);
> +	down_write_nested(&mm->mmap_lock, subclass);
>   }
>   
>   static inline int mmap_write_lock_killable(struct mm_struct *mm)
>   {
> -	return down_write_killable(&mm->mmap_sem);
> +	return down_write_killable(&mm->mmap_lock);
>   }
>   
>   static inline bool mmap_write_trylock(struct mm_struct *mm)
>   {
> -	return down_write_trylock(&mm->mmap_sem) != 0;
> +	return down_write_trylock(&mm->mmap_lock) != 0;
>   }
>   
>   static inline void mmap_write_unlock(struct mm_struct *mm)
>   {
> -	up_write(&mm->mmap_sem);
> +	up_write(&mm->mmap_lock);
>   }
>   
>   static inline void mmap_write_downgrade(struct mm_struct *mm)
>   {
> -	downgrade_write(&mm->mmap_sem);
> +	downgrade_write(&mm->mmap_lock);
>   }
>   
>   static inline void mmap_read_lock(struct mm_struct *mm)
>   {
> -	down_read(&mm->mmap_sem);
> +	down_read(&mm->mmap_lock);
>   }
>   
>   static inline int mmap_read_lock_killable(struct mm_struct *mm)
>   {
> -	return down_read_killable(&mm->mmap_sem);
> +	return down_read_killable(&mm->mmap_lock);
>   }
>   
>   static inline bool mmap_read_trylock(struct mm_struct *mm)
>   {
> -	return down_read_trylock(&mm->mmap_sem) != 0;
> +	return down_read_trylock(&mm->mmap_lock) != 0;
>   }
>   
>   static inline void mmap_read_unlock(struct mm_struct *mm)
>   {
> -	up_read(&mm->mmap_sem);
> +	up_read(&mm->mmap_lock);
>   }
>   
>   static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm)
>   {
> -	if (down_read_trylock(&mm->mmap_sem)) {
> -		rwsem_release(&mm->mmap_sem.dep_map, _RET_IP_);
> +	if (down_read_trylock(&mm->mmap_lock)) {
> +		rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);
>   		return true;
>   	}
>   	return false;
> @@ -72,19 +72,19 @@ static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm)
>   
>   static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
>   {
> -	up_read_non_owner(&mm->mmap_sem);
> +	up_read_non_owner(&mm->mmap_lock);
>   }
>   
>   static inline void mmap_assert_locked(struct mm_struct *mm)
>   {
> -	VM_BUG_ON_MM(!lockdep_is_held_type(&mm->mmap_sem, -1), mm);
> -	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
> +	VM_BUG_ON_MM(!lockdep_is_held_type(&mm->mmap_lock, -1), mm);
> +	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
>   }
>   
>   static inline void mmap_assert_write_locked(struct mm_struct *mm)
>   {
> -	VM_BUG_ON_MM(!lockdep_is_held_type(&mm->mmap_sem, 0), mm);
> -	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
> +	VM_BUG_ON_MM(!lockdep_is_held_type(&mm->mmap_lock, 0), mm);
> +	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
>   }
>   
>   #endif /* _LINUX_MMAP_LOCK_H */
> diff --git a/mm/memory.c b/mm/memory.c
> index 20f98ea8968e..c2963e7affa9 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -4811,7 +4811,7 @@ void __might_fault(const char *file, int line)
>   	__might_sleep(file, line, 0);
>   #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
>   	if (current->mm)
> -		might_lock_read(&current->mm->mmap_sem);
> +		might_lock_read(&current->mm->mmap_lock);
>   #endif
>   }
>   EXPORT_SYMBOL(__might_fault);
> diff --git a/mm/mmap.c b/mm/mmap.c
> index 2f4ffccc5972..80a47031d5db 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -3474,7 +3474,7 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
>   		 * The LSB of head.next can't change from under us
>   		 * because we hold the mm_all_locks_mutex.
>   		 */
> -		down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
> +		down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
>   		/*
>   		 * We can safely modify head.next after taking the
>   		 * anon_vma->root->rwsem. If some other vma in this mm shares
> @@ -3504,7 +3504,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
>   		 */
>   		if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
>   			BUG();
> -		down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem);
> +		down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
>   	}
>   }
>   
> diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
> index 24eb9d1ed0a7..2f348b6c9c9a 100644
> --- a/mm/mmu_notifier.c
> +++ b/mm/mmu_notifier.c
> @@ -983,7 +983,7 @@ int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
>   	struct mmu_notifier_subscriptions *subscriptions;
>   	int ret;
>   
> -	might_lock(&mm->mmap_sem);
> +	might_lock(&mm->mmap_lock);

Same here with a new mm_might_lock()?

>   
>   	subscriptions = smp_load_acquire(&mm->notifier_subscriptions);
>   	if (!subscriptions || !subscriptions->has_itree) {
>
Michel Lespinasse May 19, 2020, 1:10 p.m. UTC | #3
On Mon, May 18, 2020 at 03:45:22PM +0200, Laurent Dufour wrote:
> Le 24/04/2020 à 03:39, Michel Lespinasse a écrit :
> > Rename the mmap_sem field to mmap_lock. Any new uses of this lock
> > should now go through the new mmap locking api. The mmap_lock is
> > still implemented as a rwsem, though this could change in the future.
> > 
> > diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> > index dc9ef302f517..701f3995f621 100644
> > --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> > +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> > @@ -661,7 +661,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
> >   	struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
> >   	int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
> > -	might_lock_read(&current->mm->mmap_sem);
> > +	might_lock_read(&current->mm->mmap_lock);
> 
> Why not a mm_might_lock_read() new API to hide the mmap_lock, and add it to
> the previous patch?

I'm not sure why this is needed - we may rework the lock to be
something else than rwsem, but might_lock_read should still apply to
it and make sense ? I'm not sure what the extra API would bring...
Michel Lespinasse May 19, 2020, 1:12 p.m. UTC | #4
On Mon, May 18, 2020 at 01:07:26PM +0200, Vlastimil Babka wrote:
> Any plan about all the code comments mentioning mmap_sem? :) Not urgent.

It's mostly a sed job, I'll add it in the next version as it seems
the patchset is getting ready for inclusion.
Laurent Dufour May 19, 2020, 1:20 p.m. UTC | #5
Le 19/05/2020 à 15:10, Michel Lespinasse a écrit :
> On Mon, May 18, 2020 at 03:45:22PM +0200, Laurent Dufour wrote:
>> Le 24/04/2020 à 03:39, Michel Lespinasse a écrit :
>>> Rename the mmap_sem field to mmap_lock. Any new uses of this lock
>>> should now go through the new mmap locking api. The mmap_lock is
>>> still implemented as a rwsem, though this could change in the future.
>>>
>>> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
>>> index dc9ef302f517..701f3995f621 100644
>>> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
>>> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
>>> @@ -661,7 +661,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
>>>    	struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
>>>    	int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
>>> -	might_lock_read(&current->mm->mmap_sem);
>>> +	might_lock_read(&current->mm->mmap_lock);
>>
>> Why not a mm_might_lock_read() new API to hide the mmap_lock, and add it to
>> the previous patch?
> 
> I'm not sure why this is needed - we may rework the lock to be
> something else than rwsem, but might_lock_read should still apply to
> it and make sense ? I'm not sure what the extra API would bring...

I guess at one time the API would become might_lock_read_a_range(), isn't it?
Furthermore this would hiding the lock's name which the goal of this series.
Matthew Wilcox May 19, 2020, 3:32 p.m. UTC | #6
On Tue, May 19, 2020 at 03:20:40PM +0200, Laurent Dufour wrote:
> Le 19/05/2020 à 15:10, Michel Lespinasse a écrit :
> > On Mon, May 18, 2020 at 03:45:22PM +0200, Laurent Dufour wrote:
> > > Le 24/04/2020 à 03:39, Michel Lespinasse a écrit :
> > > > Rename the mmap_sem field to mmap_lock. Any new uses of this lock
> > > > should now go through the new mmap locking api. The mmap_lock is
> > > > still implemented as a rwsem, though this could change in the future.
> > > > 
> > > > diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> > > > index dc9ef302f517..701f3995f621 100644
> > > > --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> > > > +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> > > > @@ -661,7 +661,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
> > > >    	struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
> > > >    	int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
> > > > -	might_lock_read(&current->mm->mmap_sem);
> > > > +	might_lock_read(&current->mm->mmap_lock);
> > > 
> > > Why not a mm_might_lock_read() new API to hide the mmap_lock, and add it to
> > > the previous patch?
> > 
> > I'm not sure why this is needed - we may rework the lock to be
> > something else than rwsem, but might_lock_read should still apply to
> > it and make sense ? I'm not sure what the extra API would bring...
> 
> I guess at one time the API would become might_lock_read_a_range(), isn't it?
> Furthermore this would hiding the lock's name which the goal of this series.

I think this assertion should be deleted from this driver.  It's there
in case get_user_pages_fast() takes the mmap sem.  It would make sense to
have this assertion in get_user_pages_fast() in case we take the fast path
which doesn't acquire the mmap_sem.  Something like this:

+++ b/mm/gup.c
@@ -2754,6 +2754,7 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
                                       FOLL_FORCE | FOLL_PIN | FOLL_GET)))
                return -EINVAL;
 
+       might_lock_read(&current->mm->mmap_lock);
        start = untagged_addr(start) & PAGE_MASK;
        addr = start;
        len = (unsigned long) nr_pages << PAGE_SHIFT;
John Hubbard May 19, 2020, 6:14 p.m. UTC | #7
On 2020-05-19 08:32, Matthew Wilcox wrote:
> On Tue, May 19, 2020 at 03:20:40PM +0200, Laurent Dufour wrote:
>> Le 19/05/2020 à 15:10, Michel Lespinasse a écrit :
>>> On Mon, May 18, 2020 at 03:45:22PM +0200, Laurent Dufour wrote:
>>>> Le 24/04/2020 à 03:39, Michel Lespinasse a écrit :
>>>>> Rename the mmap_sem field to mmap_lock. Any new uses of this lock
>>>>> should now go through the new mmap locking api. The mmap_lock is
>>>>> still implemented as a rwsem, though this could change in the future.
>>>>>
>>>>> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
>>>>> index dc9ef302f517..701f3995f621 100644
>>>>> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
>>>>> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
>>>>> @@ -661,7 +661,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
>>>>>     	struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
>>>>>     	int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
>>>>> -	might_lock_read(&current->mm->mmap_sem);
>>>>> +	might_lock_read(&current->mm->mmap_lock);
>>>>
>>>> Why not a mm_might_lock_read() new API to hide the mmap_lock, and add it to
>>>> the previous patch?
>>>
>>> I'm not sure why this is needed - we may rework the lock to be
>>> something else than rwsem, but might_lock_read should still apply to
>>> it and make sense ? I'm not sure what the extra API would bring...
>>
>> I guess at one time the API would become might_lock_read_a_range(), isn't it?
>> Furthermore this would hiding the lock's name which the goal of this series.
> 
> I think this assertion should be deleted from this driver.  It's there
> in case get_user_pages_fast() takes the mmap sem.  It would make sense to
> have this assertion in get_user_pages_fast() in case we take the fast path
> which doesn't acquire the mmap_sem.  Something like this:
> 
> +++ b/mm/gup.c
> @@ -2754,6 +2754,7 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
>                                         FOLL_FORCE | FOLL_PIN | FOLL_GET)))
>                  return -EINVAL;
>   
> +       might_lock_read(&current->mm->mmap_lock);
>          start = untagged_addr(start) & PAGE_MASK;
>          addr = start;
>          len = (unsigned long) nr_pages << PAGE_SHIFT;
> 
> 

Hi Michel and Matthew and all,

There are a couple of recent developments in this code to keep in mind. I don't
*think* either one is a problem here, but just in case:

a) The latest version of the above routine [1] is on its way to mmotm as of
yesterday, and that version more firmly divides the fast and slow parts,
via a new FOLL_FAST_ONLY flag. The fall-back to slow/regular gup only occurs
if the caller does not set FOLL_FAST_ONLY. (Note that it's a gup.c internal
flag, btw.)

That gives you additional options inside internal_get_user_pages_fast(), such
as, approximately:

if (!(gup_flags & FOLL_FAST_ONLY))
	might_lock_read(&current->mm->mmap_lock);

...not that that is necessarily a great idea, seeing as how it merely changes
"might lock" into "maybe might lock".  :)

b) I've posted a small patch to that same etnaviv_gem.c file [2], over the
weekend, to convert from get_user_pages()/put_page(), to 
pin_user_pages()/unpin_user_pages(). It hasn't been merged yet, and it
shouldn't conflict either, but just one more reason to hope that the
etnaviv list can do some run time testing on the whole lot.

[1] https://lore.kernel.org/r/20200519002124.2025955-3-jhubbard@nvidia.com

[2] https://lore.kernel.org/r/20200518054315.2407093-1-jhubbard@nvidia.com


thanks,
Michel Lespinasse May 20, 2020, 2:39 a.m. UTC | #8
On Tue, May 19, 2020 at 11:15 AM John Hubbard <jhubbard@nvidia.com> wrote:
> On 2020-05-19 08:32, Matthew Wilcox wrote:
> > On Tue, May 19, 2020 at 03:20:40PM +0200, Laurent Dufour wrote:
> >> Le 19/05/2020 à 15:10, Michel Lespinasse a écrit :
> >>> On Mon, May 18, 2020 at 03:45:22PM +0200, Laurent Dufour wrote:
> >>>> Le 24/04/2020 à 03:39, Michel Lespinasse a écrit :
> >>>>> Rename the mmap_sem field to mmap_lock. Any new uses of this lock
> >>>>> should now go through the new mmap locking api. The mmap_lock is
> >>>>> still implemented as a rwsem, though this could change in the future.
> >>>>>
> >>>>> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> >>>>> index dc9ef302f517..701f3995f621 100644
> >>>>> --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> >>>>> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
> >>>>> @@ -661,7 +661,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
> >>>>>           struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
> >>>>>           int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
> >>>>> - might_lock_read(&current->mm->mmap_sem);
> >>>>> + might_lock_read(&current->mm->mmap_lock);
> >>>>
> >>>> Why not a mm_might_lock_read() new API to hide the mmap_lock, and add it to
> >>>> the previous patch?
> >>>
> >>> I'm not sure why this is needed - we may rework the lock to be
> >>> something else than rwsem, but might_lock_read should still apply to
> >>> it and make sense ? I'm not sure what the extra API would bring...
> >>
> >> I guess at one time the API would become might_lock_read_a_range(), isn't it?

I don't think this should be necessary - from lockdep perspective,
there should not be a difference between locking a range vs the entire
address space.

> >> Furthermore this would hiding the lock's name which the goal of this series.

Actually to me the name is very secondary - the goal of the series is
to add an api abstracting the mmap locking. The lock name is secondary
to that, it only gets renamed because mmap_sem was too specific (if we
are to change the mmap locking) and to ensure we convert all direct
uses to use the api instead.

> > I think this assertion should be deleted from this driver.  It's there
> > in case get_user_pages_fast() takes the mmap sem.  It would make sense to
> > have this assertion in get_user_pages_fast() in case we take the fast path
> > which doesn't acquire the mmap_sem.  Something like this:

I like this idea a lot - having might_lock assertions in
get_user_pages_fast makes a log more sense than doing the same at the
call sites.

> There are a couple of recent developments in this code to keep in mind. I don't
> *think* either one is a problem here, but just in case:
>
> a) The latest version of the above routine [1] is on its way to mmotm as of
> yesterday, and that version more firmly divides the fast and slow parts,
> via a new FOLL_FAST_ONLY flag. The fall-back to slow/regular gup only occurs
> if the caller does not set FOLL_FAST_ONLY. (Note that it's a gup.c internal
> flag, btw.)
>
> That gives you additional options inside internal_get_user_pages_fast(), such
> as, approximately:
>
> if (!(gup_flags & FOLL_FAST_ONLY))
>         might_lock_read(&current->mm->mmap_lock);
>
> ...not that that is necessarily a great idea, seeing as how it merely changes
> "might lock" into "maybe might lock".  :)

I think that is completely fine, makes sure everyone not using
FOLL_FAST_ONLY realizes that the call could block.

Can I ask you to add that assertion in your patchset ? Based on
Matthew's feedback, I would do it in my patchset, but it doesn't seem
worth doing if we know this will conflict with your changes.
John Hubbard May 20, 2020, 7:32 a.m. UTC | #9
On 2020-05-19 19:39, Michel Lespinasse wrote:
...
>> That gives you additional options inside internal_get_user_pages_fast(), such
>> as, approximately:
>>
>> if (!(gup_flags & FOLL_FAST_ONLY))
>>          might_lock_read(&current->mm->mmap_lock);
>>
>> ...not that that is necessarily a great idea, seeing as how it merely changes
>> "might lock" into "maybe might lock".  :)
> 
> I think that is completely fine, makes sure everyone not using
> FOLL_FAST_ONLY realizes that the call could block.
> 
> Can I ask you to add that assertion in your patchset ? Based on
> Matthew's feedback, I would do it in my patchset, but it doesn't seem
> worth doing if we know this will conflict with your changes.
> 

Sure, that's no problem. Although it looks like my changes may land
in mmotm first, and then your patchset, so maybe the right move is to
make this change *after* both of those things happen, yes?


thanks,
Michel Lespinasse May 20, 2020, 8:02 a.m. UTC | #10
On Wed, May 20, 2020 at 12:32 AM John Hubbard <jhubbard@nvidia.com> wrote:
> On 2020-05-19 19:39, Michel Lespinasse wrote:
> >> That gives you additional options inside internal_get_user_pages_fast(), such
> >> as, approximately:
> >>
> >> if (!(gup_flags & FOLL_FAST_ONLY))
> >>          might_lock_read(&current->mm->mmap_lock);
> >>
> >> ...not that that is necessarily a great idea, seeing as how it merely changes
> >> "might lock" into "maybe might lock".  :)
> >
> > I think that is completely fine, makes sure everyone not using
> > FOLL_FAST_ONLY realizes that the call could block.
> >
> > Can I ask you to add that assertion in your patchset ? Based on
> > Matthew's feedback, I would do it in my patchset, but it doesn't seem
> > worth doing if we know this will conflict with your changes.
>
> Sure, that's no problem. Although it looks like my changes may land
> in mmotm first, and then your patchset, so maybe the right move is to
> make this change *after* both of those things happen, yes?

I don't have a strong opinion on this. I would suggest you add the
might_lock_read() assertion and whoever comes in second will deal with
the conflict by changing mmap_sem to mmap_lock in that assertion.
Jason Gunthorpe May 20, 2020, 12:48 p.m. UTC | #11
On Tue, May 19, 2020 at 07:39:30PM -0700, Michel Lespinasse wrote:
> > > I think this assertion should be deleted from this driver.  It's there
> > > in case get_user_pages_fast() takes the mmap sem.  It would make sense to
> > > have this assertion in get_user_pages_fast() in case we take the fast path
> > > which doesn't acquire the mmap_sem.  Something like this:
> 
> I like this idea a lot - having might_lock assertions in
> get_user_pages_fast makes a log more sense than doing the same at the
> call sites.

+1 I've wanted to see more complete lockdep annotations in gup.c for a
while now.. There has been a number of bugs this would have caught

Jason
diff mbox series

Patch

diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 693f00b117e1..9b95050c2048 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -70,8 +70,8 @@  ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
 	mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
 		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
 
-	/* mmap_sem is performance critical.... */
-	prefetchw(&mm->mmap_sem);
+	/* mmap_lock is performance critical.... */
+	prefetchw(&mm->mmap_lock);
 
 	/*
 	 * If we're in an interrupt or have no user context, we must not take the fault..
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 181f66b9049f..35f530f9dfc0 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1522,7 +1522,7 @@  dotraplinkage void
 do_page_fault(struct pt_regs *regs, unsigned long hw_error_code,
 		unsigned long address)
 {
-	prefetchw(&current->mm->mmap_sem);
+	prefetchw(&current->mm->mmap_lock);
 	trace_page_fault_entries(regs, hw_error_code, address);
 
 	if (unlikely(kmmio_fault(regs, address)))
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index dc9ef302f517..701f3995f621 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -661,7 +661,7 @@  static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
 	struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
 	int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
 
-	might_lock_read(&current->mm->mmap_sem);
+	might_lock_read(&current->mm->mmap_lock);
 
 	if (userptr->mm != current->mm)
 		return -EPERM;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 4aba6c0c2ba8..d13b90399c16 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -436,7 +436,7 @@  struct mm_struct {
 		spinlock_t page_table_lock; /* Protects page tables and some
 					     * counters
 					     */
-		struct rw_semaphore mmap_sem;
+		struct rw_semaphore mmap_lock;
 
 		struct list_head mmlist; /* List of maybe swapped mm's.	These
 					  * are globally strung together off
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 5bf7cee5d93b..9dc632add390 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -4,67 +4,67 @@ 
 #include <linux/mmdebug.h>
 
 #define MMAP_LOCK_INITIALIZER(name) \
-	.mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem),
+	.mmap_lock = __RWSEM_INITIALIZER(name.mmap_lock),
 
 static inline void mmap_init_lock(struct mm_struct *mm)
 {
-	init_rwsem(&mm->mmap_sem);
+	init_rwsem(&mm->mmap_lock);
 }
 
 static inline void mmap_write_lock(struct mm_struct *mm)
 {
-	down_write(&mm->mmap_sem);
+	down_write(&mm->mmap_lock);
 }
 
 static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
 {
-	down_write_nested(&mm->mmap_sem, subclass);
+	down_write_nested(&mm->mmap_lock, subclass);
 }
 
 static inline int mmap_write_lock_killable(struct mm_struct *mm)
 {
-	return down_write_killable(&mm->mmap_sem);
+	return down_write_killable(&mm->mmap_lock);
 }
 
 static inline bool mmap_write_trylock(struct mm_struct *mm)
 {
-	return down_write_trylock(&mm->mmap_sem) != 0;
+	return down_write_trylock(&mm->mmap_lock) != 0;
 }
 
 static inline void mmap_write_unlock(struct mm_struct *mm)
 {
-	up_write(&mm->mmap_sem);
+	up_write(&mm->mmap_lock);
 }
 
 static inline void mmap_write_downgrade(struct mm_struct *mm)
 {
-	downgrade_write(&mm->mmap_sem);
+	downgrade_write(&mm->mmap_lock);
 }
 
 static inline void mmap_read_lock(struct mm_struct *mm)
 {
-	down_read(&mm->mmap_sem);
+	down_read(&mm->mmap_lock);
 }
 
 static inline int mmap_read_lock_killable(struct mm_struct *mm)
 {
-	return down_read_killable(&mm->mmap_sem);
+	return down_read_killable(&mm->mmap_lock);
 }
 
 static inline bool mmap_read_trylock(struct mm_struct *mm)
 {
-	return down_read_trylock(&mm->mmap_sem) != 0;
+	return down_read_trylock(&mm->mmap_lock) != 0;
 }
 
 static inline void mmap_read_unlock(struct mm_struct *mm)
 {
-	up_read(&mm->mmap_sem);
+	up_read(&mm->mmap_lock);
 }
 
 static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm)
 {
-	if (down_read_trylock(&mm->mmap_sem)) {
-		rwsem_release(&mm->mmap_sem.dep_map, _RET_IP_);
+	if (down_read_trylock(&mm->mmap_lock)) {
+		rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);
 		return true;
 	}
 	return false;
@@ -72,19 +72,19 @@  static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm)
 
 static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
 {
-	up_read_non_owner(&mm->mmap_sem);
+	up_read_non_owner(&mm->mmap_lock);
 }
 
 static inline void mmap_assert_locked(struct mm_struct *mm)
 {
-	VM_BUG_ON_MM(!lockdep_is_held_type(&mm->mmap_sem, -1), mm);
-	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
+	VM_BUG_ON_MM(!lockdep_is_held_type(&mm->mmap_lock, -1), mm);
+	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
 }
 
 static inline void mmap_assert_write_locked(struct mm_struct *mm)
 {
-	VM_BUG_ON_MM(!lockdep_is_held_type(&mm->mmap_sem, 0), mm);
-	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
+	VM_BUG_ON_MM(!lockdep_is_held_type(&mm->mmap_lock, 0), mm);
+	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
 }
 
 #endif /* _LINUX_MMAP_LOCK_H */
diff --git a/mm/memory.c b/mm/memory.c
index 20f98ea8968e..c2963e7affa9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4811,7 +4811,7 @@  void __might_fault(const char *file, int line)
 	__might_sleep(file, line, 0);
 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
 	if (current->mm)
-		might_lock_read(&current->mm->mmap_sem);
+		might_lock_read(&current->mm->mmap_lock);
 #endif
 }
 EXPORT_SYMBOL(__might_fault);
diff --git a/mm/mmap.c b/mm/mmap.c
index 2f4ffccc5972..80a47031d5db 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3474,7 +3474,7 @@  static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
 		 * The LSB of head.next can't change from under us
 		 * because we hold the mm_all_locks_mutex.
 		 */
-		down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
+		down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
 		/*
 		 * We can safely modify head.next after taking the
 		 * anon_vma->root->rwsem. If some other vma in this mm shares
@@ -3504,7 +3504,7 @@  static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
 		 */
 		if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
 			BUG();
-		down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem);
+		down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
 	}
 }
 
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 24eb9d1ed0a7..2f348b6c9c9a 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -983,7 +983,7 @@  int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
 	struct mmu_notifier_subscriptions *subscriptions;
 	int ret;
 
-	might_lock(&mm->mmap_sem);
+	might_lock(&mm->mmap_lock);
 
 	subscriptions = smp_load_acquire(&mm->notifier_subscriptions);
 	if (!subscriptions || !subscriptions->has_itree) {