@@ -91,7 +91,6 @@
* @mirrors_sem: read/write semaphore protecting the mirrors list
* @wq: wait queue for user waiting on a range invalidation
* @notifiers: count of active mmu notifiers
- * @dead: is the mm dead ?
*/
struct hmm {
struct mm_struct *mm;
@@ -104,7 +103,6 @@ struct hmm {
wait_queue_head_t wq;
struct rcu_head rcu;
long notifiers;
- bool dead;
};
/*
@@ -466,31 +464,6 @@ struct hmm_mirror {
int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm);
void hmm_mirror_unregister(struct hmm_mirror *mirror);
-/*
- * hmm_mirror_mm_is_alive() - test if mm is still alive
- * @mirror: the HMM mm mirror for which we want to lock the mmap_sem
- * Returns: false if the mm is dead, true otherwise
- *
- * This is an optimization it will not accurately always return -EINVAL if the
- * mm is dead ie there can be false negative (process is being kill but HMM is
- * not yet inform of that). It is only intented to be use to optimize out case
- * where driver is about to do something time consuming and it would be better
- * to skip it if the mm is dead.
- */
-static inline bool hmm_mirror_mm_is_alive(struct hmm_mirror *mirror)
-{
- struct mm_struct *mm;
-
- if (!mirror || !mirror->hmm)
- return false;
- mm = READ_ONCE(mirror->hmm->mm);
- if (mirror->hmm->dead || !mm)
- return false;
-
- return true;
-}
-
-
/*
* Please see Documentation/vm/hmm.rst for how to use the range API.
*/
@@ -80,7 +80,6 @@ static struct hmm *hmm_get_or_create(struct mm_struct *mm)
mutex_init(&hmm->lock);
kref_init(&hmm->kref);
hmm->notifiers = 0;
- hmm->dead = false;
hmm->mm = mm;
mmgrab(hmm->mm);
@@ -130,9 +129,6 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
if (!kref_get_unless_zero(&hmm->kref))
return;
- /* Report this HMM as dying. */
- hmm->dead = true;
-
/* Wake-up everyone waiting on any range. */
mutex_lock(&hmm->lock);
list_for_each_entry(range, &hmm->ranges, list) {