diff mbox series

[rdma-next] RDMA/uverbs: Don't do double free of allocated PD

Message ID 20190213170705.18702-1-leon@kernel.org (mailing list archive)
State Mainlined
Commit bb618451544ca9152a1a213d0a2b93d231c4cce1
Delegated to: Jason Gunthorpe
Headers show
Series [rdma-next] RDMA/uverbs: Don't do double free of allocated PD | expand

Commit Message

Leon Romanovsky Feb. 13, 2019, 5:07 p.m. UTC
From: Leon Romanovsky <leonro@mellanox.com>

There is no need to call kfree(pd) because ib_dealloc_pd()
internally frees PD.

Fixes: 21a428a019c9 ("RDMA: Handle PD allocations by IB/core")
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
---
 drivers/infiniband/core/uverbs_cmd.c | 1 +
 1 file changed, 1 insertion(+)

Comments

Jason Gunthorpe Feb. 14, 2019, 5:50 a.m. UTC | #1
On Wed, Feb 13, 2019 at 07:07:05PM +0200, Leon Romanovsky wrote:
> From: Leon Romanovsky <leonro@mellanox.com>
> 
> There is no need to call kfree(pd) because ib_dealloc_pd()
> internally frees PD.
> 
> Fixes: 21a428a019c9 ("RDMA: Handle PD allocations by IB/core")
> Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
> Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
> ---
>  drivers/infiniband/core/uverbs_cmd.c | 1 +
>  1 file changed, 1 insertion(+)

> diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
> index 4f70b34dc3fa..5425e0e534e2 100644
> --- a/drivers/infiniband/core/uverbs_cmd.c
> +++ b/drivers/infiniband/core/uverbs_cmd.c
> @@ -440,6 +440,7 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
>  
>  err_copy:
>  	ib_dealloc_pd(pd);
> +	pd = NULL;
>  err_alloc:
>  	kfree(pd);
>  err:
>	uobj_alloc_abort(uobj);

Hum. This becomes quite ugly.. I had left it in this weird state
because it was too big to fix at the time, but maybe we should have a
go at it now...

After the HW object is created it should immediately set the
uobj->object and from then on the uobj system owns it and will be
responsible to destroy it. ie rdma_alloc_abort() will fully destroy
the thing.

This makes all the old style flows pretty clean, but trades a sketchy
pd = NULL for a sketchy out of order goto for the new flows, though I
think we could tidy that further and reduce more repetative code with
some small effort..

Something like this as a first pass:

 drivers/infiniband/core/rdma_core.c           |  1 -
 drivers/infiniband/core/uverbs_cmd.c          | 85 ++++++-------------
 .../core/uverbs_std_types_counters.c          | 10 +--
 drivers/infiniband/core/uverbs_std_types_cq.c | 10 +--
 4 files changed, 28 insertions(+), 78 deletions(-)

diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index a260d2f8e0b771..3599b97b00b59b 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -679,7 +679,6 @@ void rdma_alloc_abort_uobject(struct ib_uobject *uobj)
 {
 	struct ib_uverbs_file *ufile = uobj->ufile;
 
-	uobj->object = NULL;
 	uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT);
 
 	/* Matches the down_read in rdma_alloc_begin_uobject */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 5ac143f22df009..15f31c915cd1fd 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -430,12 +430,10 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
 
 	ret = uverbs_response(attrs, &resp, sizeof(resp));
 	if (ret)
-		goto err_copy;
+		goto err;
 
 	return uobj_alloc_commit(uobj);
 
-err_copy:
-	ib_dealloc_pd(pd);
 err_alloc:
 	kfree(pd);
 err:
@@ -609,20 +607,20 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
 	memset(&resp, 0, sizeof resp);
 	resp.xrcd_handle = obj->uobject.id;
 
+	ret = uverbs_response(attrs, &resp, sizeof(resp));
+	if (ret)
+		goto err;
+
 	if (inode) {
 		if (new_xrcd) {
 			/* create new inode/xrcd table entry */
 			ret = xrcd_table_insert(ibudev, inode, xrcd);
 			if (ret)
-				goto err_dealloc_xrcd;
+				goto err;
 		}
 		atomic_inc(&xrcd->usecnt);
 	}
 
-	ret = uverbs_response(attrs, &resp, sizeof(resp));
-	if (ret)
-		goto err_copy;
-
 	if (f.file)
 		fdput(f);
 
@@ -630,16 +628,6 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
 
 	return uobj_alloc_commit(&obj->uobject);
 
-err_copy:
-	if (inode) {
-		if (new_xrcd)
-			xrcd_table_delete(ibudev, inode);
-		atomic_dec(&xrcd->usecnt);
-	}
-
-err_dealloc_xrcd:
-	ib_dealloc_xrcd(xrcd);
-
 err:
 	uobj_alloc_abort(&obj->uobject);
 
@@ -754,15 +742,12 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
 
 	ret = uverbs_response(attrs, &resp, sizeof(resp));
 	if (ret)
-		goto err_copy;
+		goto err_put;
 
 	uobj_put_obj_read(pd);
 
 	return uobj_alloc_commit(uobj);
 
-err_copy:
-	ib_dereg_mr(mr);
-
 err_put:
 	uobj_put_obj_read(pd);
 
@@ -905,13 +890,11 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
 
 	ret = uverbs_response(attrs, &resp, sizeof(resp));
 	if (ret)
-		goto err_copy;
+		goto err_put;
 
 	uobj_put_obj_read(pd);
 	return uobj_alloc_commit(uobj);
 
-err_copy:
-	uverbs_dealloc_mw(mw);
 err_put:
 	uobj_put_obj_read(pd);
 err_free:
@@ -1025,16 +1008,13 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
 
 	ret = uverbs_response(attrs, &resp, sizeof(resp));
 	if (ret)
-		goto err_cb;
+		goto err;
 
 	ret = uobj_alloc_commit(&obj->uobject);
 	if (ret)
 		return ERR_PTR(ret);
 	return obj;
 
-err_cb:
-	ib_destroy_cq(cq);
-
 err_file:
 	if (ev_file)
 		ib_uverbs_release_ucq(attrs->ufile, ev_file, obj);
@@ -1404,12 +1384,9 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
 		ret = PTR_ERR(qp);
 		goto err_put;
 	}
+	obj->uevent.uobject.object = qp;
 
 	if (cmd->qp_type != IB_QPT_XRC_TGT) {
-		ret = ib_create_qp_security(qp, device);
-		if (ret)
-			goto err_cb;
-
 		qp->real_qp	  = qp;
 		qp->pd		  = pd;
 		qp->send_cq	  = attr.send_cq;
@@ -1430,13 +1407,15 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
 			atomic_inc(&attr.srq->usecnt);
 		if (ind_tbl)
 			atomic_inc(&ind_tbl->usecnt);
+
+		ret = ib_create_qp_security(qp, device);
+		if (ret)
+			goto err_put;
 	} else {
 		/* It is done in _ib_create_qp for other QP types */
 		qp->uobject = &obj->uevent.uobject;
 	}
 
-	obj->uevent.uobject.object = qp;
-
 	memset(&resp, 0, sizeof resp);
 	resp.base.qpn             = qp->qp_num;
 	resp.base.qp_handle       = obj->uevent.uobject.id;
@@ -1449,7 +1428,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
 
 	ret = uverbs_response(attrs, &resp, sizeof(resp));
 	if (ret)
-		goto err_cb;
+		goto err_put;
 
 	if (xrcd) {
 		obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
@@ -1470,8 +1449,6 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
 		uobj_put_obj_read(ind_tbl);
 
 	return uobj_alloc_commit(&obj->uevent.uobject);
-err_cb:
-	ib_destroy_qp(qp);
 
 err_put:
 	if (!IS_ERR(xrcd_uobj))
@@ -1594,7 +1571,7 @@ static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
 
 	ret = uverbs_response(attrs, &resp, sizeof(resp));
 	if (ret)
-		goto err_destroy;
+		goto err_xrcd;
 
 	obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
 	atomic_inc(&obj->uxrcd->refcnt);
@@ -1603,8 +1580,6 @@ static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
 
 	return uobj_alloc_commit(&obj->uevent.uobject);
 
-err_destroy:
-	ib_destroy_qp(qp);
 err_xrcd:
 	uobj_put_read(xrcd_uobj);
 err_put:
@@ -2440,14 +2415,11 @@ static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs)
 
 	ret = uverbs_response(attrs, &resp, sizeof(resp));
 	if (ret)
-		goto err_copy;
+		goto err_put;
 
 	uobj_put_obj_read(pd);
 	return uobj_alloc_commit(uobj);
 
-err_copy:
-	rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
-
 err_put:
 	uobj_put_obj_read(pd);
 
@@ -2950,14 +2922,12 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
 	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
 	err = uverbs_response(attrs, &resp, sizeof(resp));
 	if (err)
-		goto err_copy;
+		goto err_put_cq;
 
 	uobj_put_obj_read(pd);
 	uobj_put_obj_read(cq);
 	return uobj_alloc_commit(&obj->uevent.uobject);
 
-err_copy:
-	ib_destroy_wq(wq);
 err_put_cq:
 	uobj_put_obj_read(cq);
 err_put_pd:
@@ -3105,6 +3075,7 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
 		goto err_uobj;
 	}
 
+	wqs = NULL;
 	rwq_ind_tbl->ind_tbl = wqs;
 	rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
 	rwq_ind_tbl->uobject = uobj;
@@ -3121,7 +3092,7 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
 
 	err = uverbs_response(attrs, &resp, sizeof(resp));
 	if (err)
-		goto err_copy;
+		goto err_uobj;
 
 	kfree(wqs_handles);
 
@@ -3130,8 +3101,6 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
 
 	return uobj_alloc_commit(uobj);
 
-err_copy:
-	ib_destroy_rwq_ind_table(rwq_ind_tbl);
 err_uobj:
 	uobj_alloc_abort(uobj);
 put_wqs:
@@ -3293,23 +3262,20 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
 		goto err_free;
 	}
 
-	ib_set_flow(uobj, flow_id, qp, qp->device, uflow_res);
-
 	memset(&resp, 0, sizeof(resp));
 	resp.flow_handle = uobj->id;
 
 	err = uverbs_response(attrs, &resp, sizeof(resp));
 	if (err)
-		goto err_copy;
+		goto err_free;
+
+	ib_set_flow(uobj, flow_id, qp, qp->device, uflow_res);
 
 	uobj_put_obj_read(qp);
 	kfree(flow_attr);
 	if (cmd.flow_attr.num_of_specs)
 		kfree(kern_flow_attr);
 	return uobj_alloc_commit(uobj);
-err_copy:
-	if (!qp->device->ops.destroy_flow(flow_id))
-		atomic_dec(&qp->usecnt);
 err_free:
 	ib_uverbs_flow_resources_free(uflow_res);
 err_free_flow_attr:
@@ -3441,7 +3407,7 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
 
 	ret = uverbs_response(attrs, &resp, sizeof(resp));
 	if (ret)
-		goto err_copy;
+		goto err;
 
 	if (cmd->srq_type == IB_SRQT_XRC)
 		uobj_put_read(xrcd_uobj);
@@ -3452,9 +3418,6 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
 	uobj_put_obj_read(pd);
 	return uobj_alloc_commit(&obj->uevent.uobject);
 
-err_copy:
-	ib_destroy_srq(srq);
-
 err_put:
 	uobj_put_obj_read(pd);
 
diff --git a/drivers/infiniband/core/uverbs_std_types_counters.c b/drivers/infiniband/core/uverbs_std_types_counters.c
index 309c5e80988d2e..9b096d7e36cf59 100644
--- a/drivers/infiniband/core/uverbs_std_types_counters.c
+++ b/drivers/infiniband/core/uverbs_std_types_counters.c
@@ -54,7 +54,6 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(
 		attrs, UVERBS_ATTR_CREATE_COUNTERS_HANDLE);
 	struct ib_device *ib_dev = uobj->context->device;
 	struct ib_counters *counters;
-	int ret;
 
 	/*
 	 * This check should be removed once the infrastructure
@@ -65,10 +64,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(
 		return -EOPNOTSUPP;
 
 	counters = ib_dev->ops.create_counters(ib_dev, attrs);
-	if (IS_ERR(counters)) {
-		ret = PTR_ERR(counters);
-		goto err_create_counters;
-	}
+	if (IS_ERR(counters))
+		return PTR_ERR(counters);
 
 	counters->device = ib_dev;
 	counters->uobject = uobj;
@@ -76,9 +73,6 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(
 	atomic_set(&counters->usecnt, 0);
 
 	return 0;
-
-err_create_counters:
-	return ret;
 }
 
 static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index a59ea89e3f2b06..f9f4c1460c7a58 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -128,14 +128,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
 	cq->res.type = RDMA_RESTRACK_CQ;
 	rdma_restrack_uadd(&cq->res);
 
-	ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_CQ_RESP_CQE, &cq->cqe,
-			     sizeof(cq->cqe));
-	if (ret)
-		goto err_cq;
-
-	return 0;
-err_cq:
-	ib_destroy_cq(cq);
+	return uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_CQ_RESP_CQE, &cq->cqe,
+			      sizeof(cq->cqe));
 
 err_event_file:
 	if (ev_file)
Leon Romanovsky Feb. 14, 2019, 6:27 a.m. UTC | #2
On Wed, Feb 13, 2019 at 10:50:41PM -0700, Jason Gunthorpe wrote:
> On Wed, Feb 13, 2019 at 07:07:05PM +0200, Leon Romanovsky wrote:
> > From: Leon Romanovsky <leonro@mellanox.com>
> >
> > There is no need to call kfree(pd) because ib_dealloc_pd()
> > internally frees PD.
> >
> > Fixes: 21a428a019c9 ("RDMA: Handle PD allocations by IB/core")
> > Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
> > Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
> > ---
> >  drivers/infiniband/core/uverbs_cmd.c | 1 +
> >  1 file changed, 1 insertion(+)
>
> > diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
> > index 4f70b34dc3fa..5425e0e534e2 100644
> > --- a/drivers/infiniband/core/uverbs_cmd.c
> > +++ b/drivers/infiniband/core/uverbs_cmd.c
> > @@ -440,6 +440,7 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
> >
> >  err_copy:
> >  	ib_dealloc_pd(pd);
> > +	pd = NULL;
> >  err_alloc:
> >  	kfree(pd);
> >  err:
> >	uobj_alloc_abort(uobj);
>
> Hum. This becomes quite ugly.. I had left it in this weird state
> because it was too big to fix at the time, but maybe we should have a
> go at it now...
>
> After the HW object is created it should immediately set the
> uobj->object and from then on the uobj system owns it and will be
> responsible to destroy it. ie rdma_alloc_abort() will fully destroy
> the thing.
>
> This makes all the old style flows pretty clean, but trades a sketchy
> pd = NULL for a sketchy out of order goto for the new flows, though I
> think we could tidy that further and reduce more repetative code with
> some small effort..
>
> Something like this as a first pass:
>
>  drivers/infiniband/core/rdma_core.c           |  1 -
>  drivers/infiniband/core/uverbs_cmd.c          | 85 ++++++-------------
>  .../core/uverbs_std_types_counters.c          | 10 +--
>  drivers/infiniband/core/uverbs_std_types_cq.c | 10 +--
>  4 files changed, 28 insertions(+), 78 deletions(-)
>
> diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
> index a260d2f8e0b771..3599b97b00b59b 100644
> --- a/drivers/infiniband/core/rdma_core.c
> +++ b/drivers/infiniband/core/rdma_core.c
> @@ -679,7 +679,6 @@ void rdma_alloc_abort_uobject(struct ib_uobject *uobj)
>  {
>  	struct ib_uverbs_file *ufile = uobj->ufile;
>
> -	uobj->object = NULL;
>  	uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT);
>
>  	/* Matches the down_read in rdma_alloc_begin_uobject */
> diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
> index 5ac143f22df009..15f31c915cd1fd 100644
> --- a/drivers/infiniband/core/uverbs_cmd.c
> +++ b/drivers/infiniband/core/uverbs_cmd.c
> @@ -430,12 +430,10 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
>
>  	ret = uverbs_response(attrs, &resp, sizeof(resp));
>  	if (ret)
> -		goto err_copy;
> +		goto err;
>
>  	return uobj_alloc_commit(uobj);
>
> -err_copy:
> -	ib_dealloc_pd(pd);
>  err_alloc:
>  	kfree(pd);


It looks even stranger, you are doing out of order cleanup. IMHO, it is
worse than "pd = NULL".

>  err:
> @@ -609,20 +607,20 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
>  	memset(&resp, 0, sizeof resp);
>  	resp.xrcd_handle = obj->uobject.id;
>
> +	ret = uverbs_response(attrs, &resp, sizeof(resp));
> +	if (ret)
> +		goto err;
> +
>  	if (inode) {
>  		if (new_xrcd) {
>  			/* create new inode/xrcd table entry */
>  			ret = xrcd_table_insert(ibudev, inode, xrcd);
>  			if (ret)
> -				goto err_dealloc_xrcd;
> +				goto err;
>  		}
>  		atomic_inc(&xrcd->usecnt);
>  	}
>
> -	ret = uverbs_response(attrs, &resp, sizeof(resp));
> -	if (ret)
> -		goto err_copy;
> -
>  	if (f.file)
>  		fdput(f);
>
> @@ -630,16 +628,6 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
>
>  	return uobj_alloc_commit(&obj->uobject);
>
> -err_copy:
> -	if (inode) {
> -		if (new_xrcd)
> -			xrcd_table_delete(ibudev, inode);
> -		atomic_dec(&xrcd->usecnt);
> -	}
> -
> -err_dealloc_xrcd:
> -	ib_dealloc_xrcd(xrcd);
> -
>  err:
>  	uobj_alloc_abort(&obj->uobject);
>
> @@ -754,15 +742,12 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
>
>  	ret = uverbs_response(attrs, &resp, sizeof(resp));
>  	if (ret)
> -		goto err_copy;
> +		goto err_put;
>
>  	uobj_put_obj_read(pd);
>
>  	return uobj_alloc_commit(uobj);
>
> -err_copy:
> -	ib_dereg_mr(mr);
> -
>  err_put:
>  	uobj_put_obj_read(pd);
>
> @@ -905,13 +890,11 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
>
>  	ret = uverbs_response(attrs, &resp, sizeof(resp));
>  	if (ret)
> -		goto err_copy;
> +		goto err_put;
>
>  	uobj_put_obj_read(pd);
>  	return uobj_alloc_commit(uobj);
>
> -err_copy:
> -	uverbs_dealloc_mw(mw);
>  err_put:
>  	uobj_put_obj_read(pd);
>  err_free:
> @@ -1025,16 +1008,13 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
>
>  	ret = uverbs_response(attrs, &resp, sizeof(resp));
>  	if (ret)
> -		goto err_cb;
> +		goto err;
>
>  	ret = uobj_alloc_commit(&obj->uobject);
>  	if (ret)
>  		return ERR_PTR(ret);
>  	return obj;
>
> -err_cb:
> -	ib_destroy_cq(cq);
> -
>  err_file:
>  	if (ev_file)
>  		ib_uverbs_release_ucq(attrs->ufile, ev_file, obj);
> @@ -1404,12 +1384,9 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
>  		ret = PTR_ERR(qp);
>  		goto err_put;
>  	}
> +	obj->uevent.uobject.object = qp;
>
>  	if (cmd->qp_type != IB_QPT_XRC_TGT) {
> -		ret = ib_create_qp_security(qp, device);
> -		if (ret)
> -			goto err_cb;
> -
>  		qp->real_qp	  = qp;
>  		qp->pd		  = pd;
>  		qp->send_cq	  = attr.send_cq;
> @@ -1430,13 +1407,15 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
>  			atomic_inc(&attr.srq->usecnt);
>  		if (ind_tbl)
>  			atomic_inc(&ind_tbl->usecnt);
> +
> +		ret = ib_create_qp_security(qp, device);
> +		if (ret)
> +			goto err_put;
>  	} else {
>  		/* It is done in _ib_create_qp for other QP types */
>  		qp->uobject = &obj->uevent.uobject;
>  	}
>
> -	obj->uevent.uobject.object = qp;
> -
>  	memset(&resp, 0, sizeof resp);
>  	resp.base.qpn             = qp->qp_num;
>  	resp.base.qp_handle       = obj->uevent.uobject.id;
> @@ -1449,7 +1428,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
>
>  	ret = uverbs_response(attrs, &resp, sizeof(resp));
>  	if (ret)
> -		goto err_cb;
> +		goto err_put;
>
>  	if (xrcd) {
>  		obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
> @@ -1470,8 +1449,6 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
>  		uobj_put_obj_read(ind_tbl);
>
>  	return uobj_alloc_commit(&obj->uevent.uobject);
> -err_cb:
> -	ib_destroy_qp(qp);
>
>  err_put:
>  	if (!IS_ERR(xrcd_uobj))
> @@ -1594,7 +1571,7 @@ static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
>
>  	ret = uverbs_response(attrs, &resp, sizeof(resp));
>  	if (ret)
> -		goto err_destroy;
> +		goto err_xrcd;
>
>  	obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
>  	atomic_inc(&obj->uxrcd->refcnt);
> @@ -1603,8 +1580,6 @@ static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
>
>  	return uobj_alloc_commit(&obj->uevent.uobject);
>
> -err_destroy:
> -	ib_destroy_qp(qp);
>  err_xrcd:
>  	uobj_put_read(xrcd_uobj);
>  err_put:
> @@ -2440,14 +2415,11 @@ static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs)
>
>  	ret = uverbs_response(attrs, &resp, sizeof(resp));
>  	if (ret)
> -		goto err_copy;
> +		goto err_put;
>
>  	uobj_put_obj_read(pd);
>  	return uobj_alloc_commit(uobj);
>
> -err_copy:
> -	rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
> -
>  err_put:
>  	uobj_put_obj_read(pd);
>
> @@ -2950,14 +2922,12 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
>  	resp.response_length = uverbs_response_length(attrs, sizeof(resp));
>  	err = uverbs_response(attrs, &resp, sizeof(resp));
>  	if (err)
> -		goto err_copy;
> +		goto err_put_cq;
>
>  	uobj_put_obj_read(pd);
>  	uobj_put_obj_read(cq);
>  	return uobj_alloc_commit(&obj->uevent.uobject);
>
> -err_copy:
> -	ib_destroy_wq(wq);
>  err_put_cq:
>  	uobj_put_obj_read(cq);
>  err_put_pd:
> @@ -3105,6 +3075,7 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
>  		goto err_uobj;
>  	}
>
> +	wqs = NULL;
>  	rwq_ind_tbl->ind_tbl = wqs;
>  	rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
>  	rwq_ind_tbl->uobject = uobj;
> @@ -3121,7 +3092,7 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
>
>  	err = uverbs_response(attrs, &resp, sizeof(resp));
>  	if (err)
> -		goto err_copy;
> +		goto err_uobj;
>
>  	kfree(wqs_handles);
>
> @@ -3130,8 +3101,6 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs)
>
>  	return uobj_alloc_commit(uobj);
>
> -err_copy:
> -	ib_destroy_rwq_ind_table(rwq_ind_tbl);
>  err_uobj:
>  	uobj_alloc_abort(uobj);
>  put_wqs:
> @@ -3293,23 +3262,20 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs)
>  		goto err_free;
>  	}
>
> -	ib_set_flow(uobj, flow_id, qp, qp->device, uflow_res);
> -
>  	memset(&resp, 0, sizeof(resp));
>  	resp.flow_handle = uobj->id;
>
>  	err = uverbs_response(attrs, &resp, sizeof(resp));
>  	if (err)
> -		goto err_copy;
> +		goto err_free;
> +
> +	ib_set_flow(uobj, flow_id, qp, qp->device, uflow_res);
>
>  	uobj_put_obj_read(qp);
>  	kfree(flow_attr);
>  	if (cmd.flow_attr.num_of_specs)
>  		kfree(kern_flow_attr);
>  	return uobj_alloc_commit(uobj);
> -err_copy:
> -	if (!qp->device->ops.destroy_flow(flow_id))
> -		atomic_dec(&qp->usecnt);
>  err_free:
>  	ib_uverbs_flow_resources_free(uflow_res);
>  err_free_flow_attr:
> @@ -3441,7 +3407,7 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
>
>  	ret = uverbs_response(attrs, &resp, sizeof(resp));
>  	if (ret)
> -		goto err_copy;
> +		goto err;
>
>  	if (cmd->srq_type == IB_SRQT_XRC)
>  		uobj_put_read(xrcd_uobj);
> @@ -3452,9 +3418,6 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
>  	uobj_put_obj_read(pd);
>  	return uobj_alloc_commit(&obj->uevent.uobject);
>
> -err_copy:
> -	ib_destroy_srq(srq);
> -
>  err_put:
>  	uobj_put_obj_read(pd);
>
> diff --git a/drivers/infiniband/core/uverbs_std_types_counters.c b/drivers/infiniband/core/uverbs_std_types_counters.c
> index 309c5e80988d2e..9b096d7e36cf59 100644
> --- a/drivers/infiniband/core/uverbs_std_types_counters.c
> +++ b/drivers/infiniband/core/uverbs_std_types_counters.c
> @@ -54,7 +54,6 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(
>  		attrs, UVERBS_ATTR_CREATE_COUNTERS_HANDLE);
>  	struct ib_device *ib_dev = uobj->context->device;
>  	struct ib_counters *counters;
> -	int ret;
>
>  	/*
>  	 * This check should be removed once the infrastructure
> @@ -65,10 +64,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(
>  		return -EOPNOTSUPP;
>
>  	counters = ib_dev->ops.create_counters(ib_dev, attrs);
> -	if (IS_ERR(counters)) {
> -		ret = PTR_ERR(counters);
> -		goto err_create_counters;
> -	}
> +	if (IS_ERR(counters))
> +		return PTR_ERR(counters);
>
>  	counters->device = ib_dev;
>  	counters->uobject = uobj;
> @@ -76,9 +73,6 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)(
>  	atomic_set(&counters->usecnt, 0);
>
>  	return 0;
> -
> -err_create_counters:
> -	return ret;
>  }
>
>  static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(
> diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
> index a59ea89e3f2b06..f9f4c1460c7a58 100644
> --- a/drivers/infiniband/core/uverbs_std_types_cq.c
> +++ b/drivers/infiniband/core/uverbs_std_types_cq.c
> @@ -128,14 +128,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
>  	cq->res.type = RDMA_RESTRACK_CQ;
>  	rdma_restrack_uadd(&cq->res);
>
> -	ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_CQ_RESP_CQE, &cq->cqe,
> -			     sizeof(cq->cqe));
> -	if (ret)
> -		goto err_cq;
> -
> -	return 0;
> -err_cq:
> -	ib_destroy_cq(cq);
> +	return uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_CQ_RESP_CQE, &cq->cqe,
> +			      sizeof(cq->cqe));
>
>  err_event_file:
>  	if (ev_file)
> --
> 2.20.1
>
Jason Gunthorpe Feb. 14, 2019, 5:47 p.m. UTC | #3
On Thu, Feb 14, 2019 at 08:27:56AM +0200, Leon Romanovsky wrote:
> > diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
> > index 5ac143f22df009..15f31c915cd1fd 100644
> > +++ b/drivers/infiniband/core/uverbs_cmd.c
> > @@ -430,12 +430,10 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
> >
> >  	ret = uverbs_response(attrs, &resp, sizeof(resp));
> >  	if (ret)
> > -		goto err_copy;
> > +		goto err;
> >
> >  	return uobj_alloc_commit(uobj);
> >
> > -err_copy:
> > -	ib_dealloc_pd(pd);
> >  err_alloc:
> >  	kfree(pd);
> 
> 
> It looks even stranger, you are doing out of order cleanup. IMHO, it is
> worse than "pd = NULL".

Like I said, we can at least fix this with some more work..

The sensible way to handle this is to make the uobject allocation
always come along with the necessary driver memory:

	pd = uobj_alloc_drv(&uobj, UVERBS_OBJECT_PD, attrs, &ib_dev);
	if (IS_ERR(pd))
		return PTR_ERR(uobj);
        // ie pd == uobj->object == rdma_zalloc_drv_obj()

	ret = ib_dev->ops.alloc_pd(pd, ..);
        if (ret)
            	uobj_alloc_abort(uobj); // does kfree
        uobj_completed_hw_setup(uobj);
        ...
        if (ret)
            	uobj_alloc_abort(uobj); // does hw_destroy

This is now really quite a clean pattern - the uobject always owns the
driver HW object and always frees it as part of uobj cleanup.

Jason
Leon Romanovsky Feb. 23, 2019, 8:39 a.m. UTC | #4
On Wed, Feb 13, 2019 at 07:07:05PM +0200, Leon Romanovsky wrote:
> From: Leon Romanovsky <leonro@mellanox.com>
>
> There is no need to call kfree(pd) because ib_dealloc_pd()
> internally frees PD.
>
> Fixes: 21a428a019c9 ("RDMA: Handle PD allocations by IB/core")
> Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
> Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
> ---
>  drivers/infiniband/core/uverbs_cmd.c | 1 +
>  1 file changed, 1 insertion(+)
>

Jason, Doug,

I see that this patch is accepted
https://patchwork.kernel.org/patch/10810421/, but don't see it in the tree.
https://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git/tree/drivers/infiniband/core/uverbs_cmd.c?h=wip/jgg-for-next#n444

Thanks
Jason Gunthorpe Feb. 25, 2019, 10:03 p.m. UTC | #5
On Sat, Feb 23, 2019 at 01:39:11AM -0700, Leon Romanovsky wrote:
> On Wed, Feb 13, 2019 at 07:07:05PM +0200, Leon Romanovsky wrote:
> > From: Leon Romanovsky <leonro@mellanox.com>
> >
> > There is no need to call kfree(pd) because ib_dealloc_pd()
> > internally frees PD.
> >
> > Fixes: 21a428a019c9 ("RDMA: Handle PD allocations by IB/core")
> > Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
> > Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
> >  drivers/infiniband/core/uverbs_cmd.c | 1 +
> >  1 file changed, 1 insertion(+)
> >
> 
> Jason, Doug,
> 
> I see that this patch is accepted
> https://patchwork.kernel.org/patch/10810421/, but don't see it in the tree.
> https://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git/tree/drivers/infiniband/core/uverbs_cmd.c?h=wip/jgg-for-next#n444

Ah, my bad, I had intended to apply it and try some more in this area
later.

Jason
diff mbox series

Patch

diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 4f70b34dc3fa..5425e0e534e2 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -440,6 +440,7 @@  static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
 
 err_copy:
 	ib_dealloc_pd(pd);
+	pd = NULL;
 err_alloc:
 	kfree(pd);
 err: