diff mbox series

[RFC,05/11] mm/hmm: Improve locking around hmm->dead

Message ID 20190523153436.19102-6-jgg@ziepe.ca (mailing list archive)
State New, archived
Headers show
Series mm/hmm: Various revisions from a locking/code review | expand

Commit Message

Jason Gunthorpe May 23, 2019, 3:34 p.m. UTC
From: Jason Gunthorpe <jgg@mellanox.com>

This value is being read without any locking, so it is just an unreliable
hint, however in many cases we need to have certainty that code is not
racing with mmput()/hmm_release().

For the two functions doing find_vma(), document that the caller is
expected to hold mmap_sem and thus also have a mmget().

For hmm_range_register acquire a mmget internally as it must not race with
hmm_release() when it sets valid.

Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
---
 mm/hmm.c | 27 +++++++++++++++++++--------
 1 file changed, 19 insertions(+), 8 deletions(-)

Comments

Jason Gunthorpe May 24, 2019, 1:40 p.m. UTC | #1
On Thu, May 23, 2019 at 12:34:30PM -0300, Jason Gunthorpe wrote:
> From: Jason Gunthorpe <jgg@mellanox.com>
> 
> This value is being read without any locking, so it is just an unreliable
> hint, however in many cases we need to have certainty that code is not
> racing with mmput()/hmm_release().
> 
> For the two functions doing find_vma(), document that the caller is
> expected to hold mmap_sem and thus also have a mmget().
> 
> For hmm_range_register acquire a mmget internally as it must not race with
> hmm_release() when it sets valid.
> 
> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
>  mm/hmm.c | 27 +++++++++++++++++++--------
>  1 file changed, 19 insertions(+), 8 deletions(-)
> 
> diff --git a/mm/hmm.c b/mm/hmm.c
> index ec54be54d81135..d97ec293336ea5 100644
> +++ b/mm/hmm.c
> @@ -909,8 +909,10 @@ int hmm_range_register(struct hmm_range *range,
>  	range->start = start;
>  	range->end = end;
>  
> -	/* Check if hmm_mm_destroy() was call. */
> -	if (mirror->hmm->mm == NULL || mirror->hmm->dead)
> +	/*
> +	 * We cannot set range->value to true if hmm_release has already run.
> +	 */
> +	if (!mmget_not_zero(mirror->hmm->mm))
>  		return -EFAULT;
>  
>  	range->hmm = mirror->hmm;
> @@ -928,6 +930,7 @@ int hmm_range_register(struct hmm_range *range,
>  	if (!range->hmm->notifiers)
>  		range->valid = true;
>  	mutex_unlock(&range->hmm->lock);
> +	mmput(mirror->hmm->mm);

Hi Jerome, when you revised this patch to move the mmput to
hmm_range_unregister() it means hmm_release() cannot run while a range
exists, and thus we can have this futher simplification rolled into
this patch. Can you update your git? Thanks:

diff --git a/mm/hmm.c b/mm/hmm.c
index 2a08b78550b90d..ddd05f2ebe739a 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -128,17 +128,17 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
 {
 	struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
 	struct hmm_mirror *mirror;
-	struct hmm_range *range;
 
 	/* hmm is in progress to free */
 	if (!kref_get_unless_zero(&hmm->kref))
 		return;
 
-	/* Wake-up everyone waiting on any range. */
 	mutex_lock(&hmm->lock);
-	list_for_each_entry(range, &hmm->ranges, list)
-		range->valid = false;
-	wake_up_all(&hmm->wq);
+	/*
+	 * Since hmm_range_register() holds the mmget() lock hmm_release() is
+	 * prevented as long as a range exists.
+	 */
+	WARN_ON(!list_empty(&hmm->ranges));
 	mutex_unlock(&hmm->lock);
 
 	down_write(&hmm->mirrors_sem);
@@ -908,9 +908,7 @@ int hmm_range_register(struct hmm_range *range,
 	range->hmm = mm->hmm;
 	kref_get(&range->hmm->kref);
 
-	/*
-	 * We cannot set range->value to true if hmm_release has already run.
-	 */
+	/* Prevent hmm_release() from running while the range is valid */
 	if (!mmget_not_zero(mm))
 		return -EFAULT;
diff mbox series

Patch

diff --git a/mm/hmm.c b/mm/hmm.c
index ec54be54d81135..d97ec293336ea5 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -909,8 +909,10 @@  int hmm_range_register(struct hmm_range *range,
 	range->start = start;
 	range->end = end;
 
-	/* Check if hmm_mm_destroy() was call. */
-	if (mirror->hmm->mm == NULL || mirror->hmm->dead)
+	/*
+	 * We cannot set range->value to true if hmm_release has already run.
+	 */
+	if (!mmget_not_zero(mirror->hmm->mm))
 		return -EFAULT;
 
 	range->hmm = mirror->hmm;
@@ -928,6 +930,7 @@  int hmm_range_register(struct hmm_range *range,
 	if (!range->hmm->notifiers)
 		range->valid = true;
 	mutex_unlock(&range->hmm->lock);
+	mmput(mirror->hmm->mm);
 
 	return 0;
 }
@@ -979,9 +982,13 @@  long hmm_range_snapshot(struct hmm_range *range)
 	struct vm_area_struct *vma;
 	struct mm_walk mm_walk;
 
-	/* Check if hmm_mm_destroy() was call. */
-	if (hmm->mm == NULL || hmm->dead)
-		return -EFAULT;
+	/*
+	 * Caller must hold the mmap_sem, and that requires the caller to have
+	 * a mmget.
+	 */
+	lockdep_assert_held(hmm->mm->mmap_sem);
+	if (WARN_ON(!atomic_read(&hmm->mm->mm_users)))
+		return -EINVAL;
 
 	do {
 		/* If range is no longer valid force retry. */
@@ -1077,9 +1084,13 @@  long hmm_range_fault(struct hmm_range *range, bool block)
 	struct mm_walk mm_walk;
 	int ret;
 
-	/* Check if hmm_mm_destroy() was call. */
-	if (hmm->mm == NULL || hmm->dead)
-		return -EFAULT;
+	/*
+	 * Caller must hold the mmap_sem, and that requires the caller to have
+	 * a mmget.
+	 */
+	lockdep_assert_held(hmm->mm->mmap_sem);
+	if (WARN_ON(!atomic_read(&hmm->mm->mm_users)))
+		return -EINVAL;
 
 	do {
 		/* If range is no longer valid force retry. */