diff mbox series

[1/2] RDMA/umem: Minor optimizations

Message ID d0df1b5b0fefb8a86c1d83f205dff6f97717d7a2.1537543687.git.dledford@redhat.com (mailing list archive)
State Accepted
Delegated to: Jason Gunthorpe
Headers show
Series Minor umem cleanups | expand

Commit Message

Doug Ledford Sept. 21, 2018, 3:30 p.m. UTC
Noticed while reviewing d4b4dd1b9706 ("RDMA/umem: Do not use
current->tgid to track the mm_struct") patch.  Why would we take a lock,
adjust a protected variable, drop the lock, and *then* check the input
into our protected variable adjustment?  Then we have to take the lock
again on our error unwind.  Let's just check the input early and skip
taking the locks needlessly if the input isn't valid.

It was also noticed that we set mm = current->mm, we then never modify
mm, but we still go back and reference current->mm a number of times
needlessly.  Be consistent in using the stored reference in mm.

Signed-off-by: Doug Ledford <dledford@redhat.com>
---
 drivers/infiniband/core/umem.c | 17 ++++++++---------
 1 file changed, 8 insertions(+), 9 deletions(-)

Comments

Leon Romanovsky Sept. 25, 2018, 5:31 p.m. UTC | #1
On Fri, Sep 21, 2018 at 11:30:12AM -0400, Doug Ledford wrote:
> Noticed while reviewing d4b4dd1b9706 ("RDMA/umem: Do not use
> current->tgid to track the mm_struct") patch.  Why would we take a lock,
> adjust a protected variable, drop the lock, and *then* check the input
> into our protected variable adjustment?  Then we have to take the lock
> again on our error unwind.  Let's just check the input early and skip
> taking the locks needlessly if the input isn't valid.
>
> It was also noticed that we set mm = current->mm, we then never modify
> mm, but we still go back and reference current->mm a number of times
> needlessly.  Be consistent in using the stored reference in mm.
>
> Signed-off-by: Doug Ledford <dledford@redhat.com>
> ---
>  drivers/infiniband/core/umem.c | 17 ++++++++---------
>  1 file changed, 8 insertions(+), 9 deletions(-)
>

Thanks,
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
diff mbox series

Patch

diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index c32a3e27a896..e54e50dd854b 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -147,6 +147,10 @@  struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
 		umem->hugetlb = 0;
 
 	npages = ib_umem_num_pages(umem);
+	if (npages == 0 || npages > UINT_MAX) {
+		ret = -EINVAL;
+		goto out;
+	}
 
 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 
@@ -161,11 +165,6 @@  struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
 
 	cur_base = addr & PAGE_MASK;
 
-	if (npages == 0 || npages > UINT_MAX) {
-		ret = -EINVAL;
-		goto vma;
-	}
-
 	ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
 	if (ret)
 		goto vma;
@@ -219,16 +218,16 @@  struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
 umem_release:
 	__ib_umem_release(context->device, umem, 0);
 vma:
-	down_write(&current->mm->mmap_sem);
-	current->mm->pinned_vm -= ib_umem_num_pages(umem);
-	up_write(&current->mm->mmap_sem);
+	down_write(&mm->mmap_sem);
+	mm->pinned_vm -= ib_umem_num_pages(umem);
+	up_write(&mm->mmap_sem);
 out:
 	if (vma_list)
 		free_page((unsigned long) vma_list);
 	free_page((unsigned long) page_list);
 umem_kfree_drop:
 	if (ret)
-		mmdrop(umem->owning_mm);
+		mmdrop(mm);
 umem_kfree:
 	if (ret)
 		kfree(umem);