diff mbox

[3/3] nvme-rdma: Fix device removal handling

Message ID 0cb1ccaa920b3ec48dd94ea49fa0f0b7c5520d38.1468879135.git.swise@opengridcomputing.com (mailing list archive)
State Superseded
Headers show

Commit Message

Sagi Grimberg July 18, 2016, 8:44 p.m. UTC
Device removal sequence may have crashed because the
controller (and admin queue space) was freed before
we destroyed the admin queue resources. Thus we
want to destroy the admin queue and only then queue
controller deletion and wait for it to complete.

More specifically we:
1. own the controller deletion (make sure we are not
   competing with another deletion).
2. get rid of inflight reconnects if exists (which
   also destroy and create queues).
3. destroy the queue.
4. safely queue controller deletion (and wait for it
   to complete).

Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
---
 drivers/nvme/host/rdma.c | 49 ++++++++++++++++++++++++++----------------------
 1 file changed, 27 insertions(+), 22 deletions(-)

Comments

Christoph Hellwig July 21, 2016, 8:15 a.m. UTC | #1
On Mon, Jul 18, 2016 at 01:44:50PM -0700, Sagi Grimberg wrote:
> Device removal sequence may have crashed because the
> controller (and admin queue space) was freed before
> we destroyed the admin queue resources. Thus we
> want to destroy the admin queue and only then queue
> controller deletion and wait for it to complete.
> 
> More specifically we:
> 1. own the controller deletion (make sure we are not
>    competing with another deletion).
> 2. get rid of inflight reconnects if exists (which
>    also destroy and create queues).
> 3. destroy the queue.
> 4. safely queue controller deletion (and wait for it
>    to complete).
> 
> Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
> ---
>  drivers/nvme/host/rdma.c | 49 ++++++++++++++++++++++++++----------------------
>  1 file changed, 27 insertions(+), 22 deletions(-)
> 
> diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
> index 3e3ce2b..0e58450 100644
> --- a/drivers/nvme/host/rdma.c
> +++ b/drivers/nvme/host/rdma.c
> @@ -169,7 +169,6 @@ MODULE_PARM_DESC(register_always,
>  static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
>  		struct rdma_cm_event *event);
>  static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
> -static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl);
>  
>  /* XXX: really should move to a generic header sooner or later.. */
>  static inline void put_unaligned_le24(u32 val, u8 *p)
> @@ -1318,37 +1317,43 @@ out_destroy_queue_ib:
>   * that caught the event. Since we hold the callout until the controller
>   * deletion is completed, we'll deadlock if the controller deletion will
>   * call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership
> - * of destroying this queue before-hand, destroy the queue resources
> - * after the controller deletion completed with the exception of destroying
> - * the cm_id implicitely by returning a non-zero rc to the callout.
> + * of destroying this queue before-hand, destroy the queue resources,
> + * then queue the controller deletion which won't destroy this queue and
> + * we destroy the cm_id implicitely by returning a non-zero rc to the callout.
>   */
>  static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue)
>  {
>  	struct nvme_rdma_ctrl *ctrl = queue->ctrl;
> +	int ret;
>  
> +	/* Own the controller deletion */
> +	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
> +		return 0;
>  
> +	dev_warn(ctrl->ctrl.device,
> +		"Got rdma device removal event, deleting ctrl\n");
>  
> +	/* Get rid of reconnect work if its running */
> +	cancel_delayed_work_sync(&ctrl->reconnect_work);
>  
> +	/* Disable the queue so ctrl delete won't free it */
> +	if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) {
> +		ret = 0;
> +		goto queue_delete;
>  	}
>  
> +	/* Free this queue ourselves */
> +	nvme_rdma_stop_queue(queue);
> +	nvme_rdma_destroy_queue_ib(queue);
> +
> +	/* Return non-zero so the cm_id will destroy implicitly */
> +	ret = 1;
> +
> +queue_delete:
> +	/* queue controller deletion */
> +	queue_work(nvme_rdma_wq, &ctrl->delete_work);
> +	flush_work(&ctrl->delete_work);
> +	return ret;

Seems like we should be able to just skip the goto here:

	/* Disable the queue so ctrl delete won't free it */
	if (test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) {
		/* Free this queue ourselves */
		nvme_rdma_stop_queue(queue);
		nvme_rdma_destroy_queue_ib(queue);

		/* Return non-zero so the cm_id will destroy implicitly */
		ret = 1;
	}

	/* queue controller deletion */
	queue_work(nvme_rdma_wq, &ctrl->delete_work);
	flush_work(&ctrl->delete_work);
	return ret;
}


And that opportunity could used to improve the comment for that if
connected stop queue case a bit as well.

Otherwise this looks reasonable to me.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Steve Wise July 22, 2016, 6:37 p.m. UTC | #2
> Device removal sequence may have crashed because the
> controller (and admin queue space) was freed before
> we destroyed the admin queue resources. Thus we
> want to destroy the admin queue and only then queue
> controller deletion and wait for it to complete.
> 
> More specifically we:
> 1. own the controller deletion (make sure we are not
>    competing with another deletion).
> 2. get rid of inflight reconnects if exists (which
>    also destroy and create queues).
> 3. destroy the queue.
> 4. safely queue controller deletion (and wait for it
>    to complete).
> 
> Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
> ---
>  drivers/nvme/host/rdma.c | 49
++++++++++++++++++++++++++----------------------
>  1 file changed, 27 insertions(+), 22 deletions(-)
> 
> diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
> index 3e3ce2b..0e58450 100644
> --- a/drivers/nvme/host/rdma.c
> +++ b/drivers/nvme/host/rdma.c
> @@ -169,7 +169,6 @@ MODULE_PARM_DESC(register_always,
>  static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
>  		struct rdma_cm_event *event);
>  static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
> -static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl);
> 
>  /* XXX: really should move to a generic header sooner or later.. */
>  static inline void put_unaligned_le24(u32 val, u8 *p)
> @@ -1318,37 +1317,43 @@ out_destroy_queue_ib:
>   * that caught the event. Since we hold the callout until the controller
>   * deletion is completed, we'll deadlock if the controller deletion will
>   * call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership
> - * of destroying this queue before-hand, destroy the queue resources
> - * after the controller deletion completed with the exception of destroying
> - * the cm_id implicitely by returning a non-zero rc to the callout.
> + * of destroying this queue before-hand, destroy the queue resources,
> + * then queue the controller deletion which won't destroy this queue and
> + * we destroy the cm_id implicitely by returning a non-zero rc to the
callout.
>   */
>  static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue)
>  {
>  	struct nvme_rdma_ctrl *ctrl = queue->ctrl;
> -	int ret, ctrl_deleted = 0;
> +	int ret;
> 
> -	/* First disable the queue so ctrl delete won't free it */
> -	if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags))
> -		goto out;
> +	/* Own the controller deletion */
> +	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
> +		return 0;
> 
> -	/* delete the controller */
> -	ret = __nvme_rdma_del_ctrl(ctrl);
> -	if (!ret) {
> -		dev_warn(ctrl->ctrl.device,
> -			"Got rdma device removal event, deleting ctrl\n");
> -		flush_work(&ctrl->delete_work);
> +	dev_warn(ctrl->ctrl.device,
> +		"Got rdma device removal event, deleting ctrl\n");
> 
> -		/* Return non-zero so the cm_id will destroy implicitly */
> -		ctrl_deleted = 1;
> +	/* Get rid of reconnect work if its running */
> +	cancel_delayed_work_sync(&ctrl->reconnect_work);
> 
> -		/* Free this queue ourselves */
> -		rdma_disconnect(queue->cm_id);
> -		ib_drain_qp(queue->qp);
> -		nvme_rdma_destroy_queue_ib(queue);
> +	/* Disable the queue so ctrl delete won't free it */
> +	if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) {
> +		ret = 0;
> +		goto queue_delete;
>  	}
> 
> -out:
> -	return ctrl_deleted;
> +	/* Free this queue ourselves */
> +	nvme_rdma_stop_queue(queue);
> +	nvme_rdma_destroy_queue_ib(queue);
> +
> +	/* Return non-zero so the cm_id will destroy implicitly */
> +	ret = 1;
> +
> +queue_delete:
> +	/* queue controller deletion */
> +	queue_work(nvme_rdma_wq, &ctrl->delete_work);
> +	flush_work(&ctrl->delete_work);

Actually, since the queue_work() fires off the workq thread to delete the
controller and its resources (on another cpu potentially), and think the
flush_work() could end up being a touch-after-free because it accesses *ctrl,
no?


--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 3e3ce2b..0e58450 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -169,7 +169,6 @@  MODULE_PARM_DESC(register_always,
 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
 		struct rdma_cm_event *event);
 static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
-static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl);
 
 /* XXX: really should move to a generic header sooner or later.. */
 static inline void put_unaligned_le24(u32 val, u8 *p)
@@ -1318,37 +1317,43 @@  out_destroy_queue_ib:
  * that caught the event. Since we hold the callout until the controller
  * deletion is completed, we'll deadlock if the controller deletion will
  * call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership
- * of destroying this queue before-hand, destroy the queue resources
- * after the controller deletion completed with the exception of destroying
- * the cm_id implicitely by returning a non-zero rc to the callout.
+ * of destroying this queue before-hand, destroy the queue resources,
+ * then queue the controller deletion which won't destroy this queue and
+ * we destroy the cm_id implicitely by returning a non-zero rc to the callout.
  */
 static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue)
 {
 	struct nvme_rdma_ctrl *ctrl = queue->ctrl;
-	int ret, ctrl_deleted = 0;
+	int ret;
 
-	/* First disable the queue so ctrl delete won't free it */
-	if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags))
-		goto out;
+	/* Own the controller deletion */
+	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
+		return 0;
 
-	/* delete the controller */
-	ret = __nvme_rdma_del_ctrl(ctrl);
-	if (!ret) {
-		dev_warn(ctrl->ctrl.device,
-			"Got rdma device removal event, deleting ctrl\n");
-		flush_work(&ctrl->delete_work);
+	dev_warn(ctrl->ctrl.device,
+		"Got rdma device removal event, deleting ctrl\n");
 
-		/* Return non-zero so the cm_id will destroy implicitly */
-		ctrl_deleted = 1;
+	/* Get rid of reconnect work if its running */
+	cancel_delayed_work_sync(&ctrl->reconnect_work);
 
-		/* Free this queue ourselves */
-		rdma_disconnect(queue->cm_id);
-		ib_drain_qp(queue->qp);
-		nvme_rdma_destroy_queue_ib(queue);
+	/* Disable the queue so ctrl delete won't free it */
+	if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) {
+		ret = 0;
+		goto queue_delete;
 	}
 
-out:
-	return ctrl_deleted;
+	/* Free this queue ourselves */
+	nvme_rdma_stop_queue(queue);
+	nvme_rdma_destroy_queue_ib(queue);
+
+	/* Return non-zero so the cm_id will destroy implicitly */
+	ret = 1;
+
+queue_delete:
+	/* queue controller deletion */
+	queue_work(nvme_rdma_wq, &ctrl->delete_work);
+	flush_work(&ctrl->delete_work);
+	return ret;
 }
 
 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,