@@ -82,7 +82,6 @@
* @mirrors_sem: read/write semaphore protecting the mirrors list
* @wq: wait queue for user waiting on a range invalidation
* @notifiers: count of active mmu notifiers
- * @dead: is the mm dead ?
*/
struct hmm {
struct mm_struct *mm;
@@ -95,7 +94,6 @@ struct hmm {
wait_queue_head_t wq;
struct rcu_head rcu;
long notifiers;
- bool dead;
};
/*
@@ -459,30 +457,6 @@ struct hmm_mirror {
int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm);
void hmm_mirror_unregister(struct hmm_mirror *mirror);
-/*
- * hmm_mirror_mm_is_alive() - test if mm is still alive
- * @mirror: the HMM mm mirror for which we want to lock the mmap_sem
- * Return: false if the mm is dead, true otherwise
- *
- * This is an optimization, it will not always accurately return false if the
- * mm is dead; i.e., there can be false negatives (process is being killed but
- * HMM is not yet informed of that). It is only intended to be used to optimize
- * out cases where the driver is about to do something time consuming and it
- * would be better to skip it if the mm is dead.
- */
-static inline bool hmm_mirror_mm_is_alive(struct hmm_mirror *mirror)
-{
- struct mm_struct *mm;
-
- if (!mirror || !mirror->hmm)
- return false;
- mm = READ_ONCE(mirror->hmm->mm);
- if (mirror->hmm->dead || !mm)
- return false;
-
- return true;
-}
-
/*
* Please see Documentation/vm/hmm.rst for how to use the range API.
*/
@@ -70,7 +70,6 @@ static struct hmm *hmm_get_or_create(struct mm_struct *mm)
mutex_init(&hmm->lock);
kref_init(&hmm->kref);
hmm->notifiers = 0;
- hmm->dead = false;
hmm->mm = mm;
hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
@@ -125,20 +124,17 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
{
struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
struct hmm_mirror *mirror;
- struct hmm_range *range;
/* Bail out if hmm is in the process of being freed */
if (!kref_get_unless_zero(&hmm->kref))
return;
- /* Report this HMM as dying. */
- hmm->dead = true;
-
- /* Wake-up everyone waiting on any range. */
mutex_lock(&hmm->lock);
- list_for_each_entry(range, &hmm->ranges, list)
- range->valid = false;
- wake_up_all(&hmm->wq);
+ /*
+ * Since hmm_range_register() holds the mmget() lock hmm_release() is
+ * prevented as long as a range exists.
+ */
+ WARN_ON(!list_empty(&hmm->ranges));
mutex_unlock(&hmm->lock);
down_write(&hmm->mirrors_sem);
@@ -908,8 +904,8 @@ int hmm_range_register(struct hmm_range *range,
range->start = start;
range->end = end;
- /* Check if hmm_mm_destroy() was call. */
- if (hmm->mm == NULL || hmm->dead)
+ /* Prevent hmm_release() from running while the range is valid */
+ if (!mmget_not_zero(hmm->mm))
return -EFAULT;
/* Initialize range to track CPU page table updates. */
@@ -952,6 +948,7 @@ void hmm_range_unregister(struct hmm_range *range)
/* Drop reference taken by hmm_range_register() */
range->valid = false;
+ mmput(hmm->mm);
hmm_put(hmm);
range->hmm = NULL;
}
@@ -979,10 +976,7 @@ long hmm_range_snapshot(struct hmm_range *range)
struct vm_area_struct *vma;
struct mm_walk mm_walk;
- /* Check if hmm_mm_destroy() was call. */
- if (hmm->mm == NULL || hmm->dead)
- return -EFAULT;
-
+ lockdep_assert_held(&hmm->mm->mmap_sem);
do {
/* If range is no longer valid force retry. */
if (!range->valid)
@@ -1077,9 +1071,7 @@ long hmm_range_fault(struct hmm_range *range, bool block)
struct mm_walk mm_walk;
int ret;
- /* Check if hmm_mm_destroy() was call. */
- if (hmm->mm == NULL || hmm->dead)
- return -EFAULT;
+ lockdep_assert_held(&hmm->mm->mmap_sem);
do {
/* If range is no longer valid force retry. */