diff mbox

[v1,for-rc,5/8] RDMA/vmw_pvrdma: Use refcount_t instead of atomic_t

Message ID 20171214002402.GA16082@bryantan-devbox.prom.eng.vmware.com.prom.eng.vmware.com (mailing list archive)
State Superseded
Headers show

Commit Message

Bryan Tan Dec. 14, 2017, 12:24 a.m. UTC
refcount_t is the preferred type for refcounts. Change the
QP and CQ refcnt fields to use refcount_t.

Reviewed-by: Adit Ranadive <aditr@vmware.com>
Reviewed-by: Aditya Sarwade <asarwade@vmware.com>
Reviewed-by: Jorgen Hansen <jhansen@vmware.com>
Signed-off-by: Bryan Tan <bryantan@vmware.com>
---
 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h      |  4 ++--
 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c   |  6 +++---
 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c | 15 ++++++---------
 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c   |  6 +++---
 4 files changed, 14 insertions(+), 17 deletions(-)

Comments

Leon Romanovsky Dec. 19, 2017, 5:32 a.m. UTC | #1
On Wed, Dec 13, 2017 at 04:24:12PM -0800, Bryan Tan wrote:
> refcount_t is the preferred type for refcounts. Change the
> QP and CQ refcnt fields to use refcount_t.
>
> Reviewed-by: Adit Ranadive <aditr@vmware.com>
> Reviewed-by: Aditya Sarwade <asarwade@vmware.com>
> Reviewed-by: Jorgen Hansen <jhansen@vmware.com>
> Signed-off-by: Bryan Tan <bryantan@vmware.com>
> ---
>  drivers/infiniband/hw/vmw_pvrdma/pvrdma.h      |  4 ++--
>  drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c   |  6 +++---
>  drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c | 15 ++++++---------
>  drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c   |  6 +++---
>  4 files changed, 14 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
> index 63bc2ef..07d287e 100644
> --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
> +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
> @@ -93,7 +93,7 @@ struct pvrdma_cq {
>  	struct pvrdma_page_dir pdir;
>  	u32 cq_handle;
>  	bool is_kernel;
> -	atomic_t refcnt;
> +	refcount_t refcnt;
>  	wait_queue_head_t wait;
>  };
>
> @@ -196,7 +196,7 @@ struct pvrdma_qp {
>  	u8 state;
>  	bool is_kernel;
>  	struct mutex mutex; /* QP state mutex. */
> -	atomic_t refcnt;
> +	refcount_t refcnt;
>  	wait_queue_head_t wait;
>  };
>
> diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
> index ea8db5e6..9dba949 100644
> --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
> +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
> @@ -177,7 +177,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
>  	else
>  		pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
>
> -	atomic_set(&cq->refcnt, 1);
> +	refcount_set(&cq->refcnt, 1);
>  	init_waitqueue_head(&cq->wait);
>  	spin_lock_init(&cq->cq_lock);
>
> @@ -229,8 +229,8 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
>
>  static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
>  {
> -	atomic_dec(&cq->refcnt);
> -	wait_event(cq->wait, !atomic_read(&cq->refcnt));
> +	if (!refcount_dec_and_test(&cq->refcnt))
> +		wait_event(cq->wait, !refcount_read(&cq->refcnt));

Don't you suppose to call to wait_event without condition on refcnt and
sleep till refcnt == 0??

>
>  	if (!cq->is_kernel)
>  		ib_umem_release(cq->umem);
> diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
> index 941e324..5cff9fa 100644
> --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
> +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
> @@ -333,7 +333,7 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
>  	spin_lock_irqsave(&dev->qp_tbl_lock, flags);
>  	qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp];
>  	if (qp)
> -		atomic_inc(&qp->refcnt);
> +		refcount_inc(&qp->refcnt);
>  	spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
>
>  	if (qp && qp->ibqp.event_handler) {
> @@ -346,8 +346,7 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
>  		ibqp->event_handler(&e, ibqp->qp_context);
>  	}
>  	if (qp) {
> -		atomic_dec(&qp->refcnt);
> -		if (atomic_read(&qp->refcnt) == 0)
> +		if (refcount_dec_and_test(&qp->refcnt))
>  			wake_up(&qp->wait);
>  	}
>  }
> @@ -360,7 +359,7 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
>  	spin_lock_irqsave(&dev->cq_tbl_lock, flags);
>  	cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq];
>  	if (cq)
> -		atomic_inc(&cq->refcnt);
> +		refcount_inc(&cq->refcnt);
>  	spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
>
>  	if (cq && cq->ibcq.event_handler) {
> @@ -373,8 +372,7 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
>  		ibcq->event_handler(&e, ibcq->cq_context);
>  	}
>  	if (cq) {
> -		atomic_dec(&cq->refcnt);
> -		if (atomic_read(&cq->refcnt) == 0)
> +		if (refcount_dec_and_test(&cq->refcnt))
>  			wake_up(&cq->wait);
>  	}
>  }
> @@ -533,14 +531,13 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
>  		spin_lock_irqsave(&dev->cq_tbl_lock, flags);
>  		cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
>  		if (cq)
> -			atomic_inc(&cq->refcnt);
> +			refcount_inc(&cq->refcnt);
>  		spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
>
>  		if (cq && cq->ibcq.comp_handler)
>  			cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
>  		if (cq) {
> -			atomic_dec(&cq->refcnt);
> -			if (atomic_read(&cq->refcnt))
> +			if (refcount_dec_and_test(&cq->refcnt))
>  				wake_up(&cq->wait);
>  		}
>  		pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
> diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
> index 77e7e57..9745cb1 100644
> --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
> +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
> @@ -245,7 +245,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
>  		spin_lock_init(&qp->sq.lock);
>  		spin_lock_init(&qp->rq.lock);
>  		mutex_init(&qp->mutex);
> -		atomic_set(&qp->refcnt, 1);
> +		refcount_set(&qp->refcnt, 1);
>  		init_waitqueue_head(&qp->wait);
>
>  		qp->state = IB_QPS_RESET;
> @@ -427,8 +427,8 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp)
>
>  	pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
>
> -	atomic_dec(&qp->refcnt);
> -	wait_event(qp->wait, !atomic_read(&qp->refcnt));
> +	if (!refcount_dec_and_test(&qp->refcnt))
> +		wait_event(qp->wait, !refcount_read(&qp->refcnt));
>
>  	if (!qp->is_kernel) {
>  		if (qp->rumem)
> --
> 1.8.5.6
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
Bryan Tan Dec. 20, 2017, 8:02 p.m. UTC | #2
On Tue, Dec 19, 2017 at 07:32:01AM +0200, Leon Romanovsky wrote:
> On Wed, Dec 13, 2017 at 04:24:12PM -0800, Bryan Tan wrote:
> > @@ -229,8 +229,8 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
> >
> >  static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
> >  {
> > -	atomic_dec(&cq->refcnt);
> > -	wait_event(cq->wait, !atomic_read(&cq->refcnt));
> > +	if (!refcount_dec_and_test(&cq->refcnt))
> > +		wait_event(cq->wait, !refcount_read(&cq->refcnt));
> 
> Don't you suppose to call to wait_event without condition on refcnt and
> sleep till refcnt == 0??

By the time we check this condition, there cannot be any new references
to this CQ. If refcnt is zero, there isn't any reason to call
wait_event.

However, we've changed this to use completions instead of wait queues
because there is a possibility of use after free here still. For SRQs we
use this pattern first to fix the problem of using refcount_dec, and
then a few commits later in the new patch series wait queues are
switched to completions. Please take a look at the v2 patch series.

Thanks!
Bryan
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 63bc2ef..07d287e 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -93,7 +93,7 @@  struct pvrdma_cq {
 	struct pvrdma_page_dir pdir;
 	u32 cq_handle;
 	bool is_kernel;
-	atomic_t refcnt;
+	refcount_t refcnt;
 	wait_queue_head_t wait;
 };
 
@@ -196,7 +196,7 @@  struct pvrdma_qp {
 	u8 state;
 	bool is_kernel;
 	struct mutex mutex; /* QP state mutex. */
-	atomic_t refcnt;
+	refcount_t refcnt;
 	wait_queue_head_t wait;
 };
 
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index ea8db5e6..9dba949 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -177,7 +177,7 @@  struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
 	else
 		pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
 
-	atomic_set(&cq->refcnt, 1);
+	refcount_set(&cq->refcnt, 1);
 	init_waitqueue_head(&cq->wait);
 	spin_lock_init(&cq->cq_lock);
 
@@ -229,8 +229,8 @@  struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
 
 static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
 {
-	atomic_dec(&cq->refcnt);
-	wait_event(cq->wait, !atomic_read(&cq->refcnt));
+	if (!refcount_dec_and_test(&cq->refcnt))
+		wait_event(cq->wait, !refcount_read(&cq->refcnt));
 
 	if (!cq->is_kernel)
 		ib_umem_release(cq->umem);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 941e324..5cff9fa 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -333,7 +333,7 @@  static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
 	spin_lock_irqsave(&dev->qp_tbl_lock, flags);
 	qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp];
 	if (qp)
-		atomic_inc(&qp->refcnt);
+		refcount_inc(&qp->refcnt);
 	spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
 
 	if (qp && qp->ibqp.event_handler) {
@@ -346,8 +346,7 @@  static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
 		ibqp->event_handler(&e, ibqp->qp_context);
 	}
 	if (qp) {
-		atomic_dec(&qp->refcnt);
-		if (atomic_read(&qp->refcnt) == 0)
+		if (refcount_dec_and_test(&qp->refcnt))
 			wake_up(&qp->wait);
 	}
 }
@@ -360,7 +359,7 @@  static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
 	spin_lock_irqsave(&dev->cq_tbl_lock, flags);
 	cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq];
 	if (cq)
-		atomic_inc(&cq->refcnt);
+		refcount_inc(&cq->refcnt);
 	spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
 
 	if (cq && cq->ibcq.event_handler) {
@@ -373,8 +372,7 @@  static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
 		ibcq->event_handler(&e, ibcq->cq_context);
 	}
 	if (cq) {
-		atomic_dec(&cq->refcnt);
-		if (atomic_read(&cq->refcnt) == 0)
+		if (refcount_dec_and_test(&cq->refcnt))
 			wake_up(&cq->wait);
 	}
 }
@@ -533,14 +531,13 @@  static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
 		spin_lock_irqsave(&dev->cq_tbl_lock, flags);
 		cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
 		if (cq)
-			atomic_inc(&cq->refcnt);
+			refcount_inc(&cq->refcnt);
 		spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
 
 		if (cq && cq->ibcq.comp_handler)
 			cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
 		if (cq) {
-			atomic_dec(&cq->refcnt);
-			if (atomic_read(&cq->refcnt))
+			if (refcount_dec_and_test(&cq->refcnt))
 				wake_up(&cq->wait);
 		}
 		pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 77e7e57..9745cb1 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -245,7 +245,7 @@  struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
 		spin_lock_init(&qp->sq.lock);
 		spin_lock_init(&qp->rq.lock);
 		mutex_init(&qp->mutex);
-		atomic_set(&qp->refcnt, 1);
+		refcount_set(&qp->refcnt, 1);
 		init_waitqueue_head(&qp->wait);
 
 		qp->state = IB_QPS_RESET;
@@ -427,8 +427,8 @@  static void pvrdma_free_qp(struct pvrdma_qp *qp)
 
 	pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
 
-	atomic_dec(&qp->refcnt);
-	wait_event(qp->wait, !atomic_read(&qp->refcnt));
+	if (!refcount_dec_and_test(&qp->refcnt))
+		wait_event(qp->wait, !refcount_read(&qp->refcnt));
 
 	if (!qp->is_kernel) {
 		if (qp->rumem)