Message ID | 20190614004450.20252-3-jgg@ziepe.ca (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | mm/hmm: Various revisions from a locking/code review | expand |
On Sat, Jun 15, 2019 at 06:59:06AM -0700, Christoph Hellwig wrote: > On Thu, Jun 13, 2019 at 09:44:40PM -0300, Jason Gunthorpe wrote: > > From: Jason Gunthorpe <jgg@mellanox.com> > > > > Ralph observes that hmm_range_register() can only be called by a driver > > while a mirror is registered. Make this clear in the API by passing in the > > mirror structure as a parameter. > > > > This also simplifies understanding the lifetime model for struct hmm, as > > the hmm pointer must be valid as part of a registered mirror so all we > > need in hmm_register_range() is a simple kref_get. > > Looks good, at least an an intermediate step: > > Reviewed-by: Christoph Hellwig <hch@lst.de> > > > index f6956d78e3cb25..22a97ada108b4e 100644 > > +++ b/mm/hmm.c > > @@ -914,13 +914,13 @@ static void hmm_pfns_clear(struct hmm_range *range, > > * Track updates to the CPU page table see include/linux/hmm.h > > */ > > int hmm_range_register(struct hmm_range *range, > > - struct mm_struct *mm, > > + struct hmm_mirror *mirror, > > unsigned long start, > > unsigned long end, > > unsigned page_shift) > > { > > unsigned long mask = ((1UL << page_shift) - 1UL); > > - struct hmm *hmm; > > + struct hmm *hmm = mirror->hmm; > > > > range->valid = false; > > range->hmm = NULL; > > @@ -934,20 +934,15 @@ int hmm_range_register(struct hmm_range *range, > > range->start = start; > > range->end = end; > > But while you're at it: the calling conventions of hmm_range_register > are still rather odd, as the staet, end and page_shift arguments are > only used to fill out fields in the range structure passed in. Might > be worth cleaning up as well if we change the calling convention. I'm thinking to tackle that as part of the mmu notififer invlock idea.. Once the range looses the lock then we don't really need to register it at all. Thanks, Jason
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 93ed43c413f0bb..8c92374afcf227 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -649,7 +649,7 @@ nouveau_svm_fault(struct nvif_notify *notify) range.values = nouveau_svm_pfn_values; range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT; again: - ret = hmm_vma_fault(&range, true); + ret = hmm_vma_fault(&svmm->mirror, &range, true); if (ret == 0) { mutex_lock(&svmm->mutex); if (!hmm_vma_range_done(&range)) { diff --git a/include/linux/hmm.h b/include/linux/hmm.h index cb01cf1fa3c08b..1fba6979adf460 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -496,7 +496,7 @@ static inline bool hmm_mirror_mm_is_alive(struct hmm_mirror *mirror) * Please see Documentation/vm/hmm.rst for how to use the range API. */ int hmm_range_register(struct hmm_range *range, - struct mm_struct *mm, + struct hmm_mirror *mirror, unsigned long start, unsigned long end, unsigned page_shift); @@ -532,7 +532,8 @@ static inline bool hmm_vma_range_done(struct hmm_range *range) } /* This is a temporary helper to avoid merge conflict between trees. */ -static inline int hmm_vma_fault(struct hmm_range *range, bool block) +static inline int hmm_vma_fault(struct hmm_mirror *mirror, + struct hmm_range *range, bool block) { long ret; @@ -545,7 +546,7 @@ static inline int hmm_vma_fault(struct hmm_range *range, bool block) range->default_flags = 0; range->pfn_flags_mask = -1UL; - ret = hmm_range_register(range, range->vma->vm_mm, + ret = hmm_range_register(range, mirror, range->start, range->end, PAGE_SHIFT); if (ret) diff --git a/mm/hmm.c b/mm/hmm.c index f6956d78e3cb25..22a97ada108b4e 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -914,13 +914,13 @@ static void hmm_pfns_clear(struct hmm_range *range, * Track updates to the CPU page table see include/linux/hmm.h */ int hmm_range_register(struct hmm_range *range, - struct mm_struct *mm, + struct hmm_mirror *mirror, unsigned long start, unsigned long end, unsigned page_shift) { unsigned long mask = ((1UL << page_shift) - 1UL); - struct hmm *hmm; + struct hmm *hmm = mirror->hmm; range->valid = false; range->hmm = NULL; @@ -934,20 +934,15 @@ int hmm_range_register(struct hmm_range *range, range->start = start; range->end = end; - hmm = hmm_get_or_create(mm); - if (!hmm) - return -EFAULT; - /* Check if hmm_mm_destroy() was call. */ - if (hmm->mm == NULL || hmm->dead) { - hmm_put(hmm); + if (hmm->mm == NULL || hmm->dead) return -EFAULT; - } /* Initialize range to track CPU page table updates. */ mutex_lock(&hmm->lock); range->hmm = hmm; + kref_get(&hmm->kref); list_add_rcu(&range->list, &hmm->ranges); /*