diff mbox

[24/28] bnxt_re: Handling dispatching of events to IB stack and cleanup during unload

Message ID 1480919912-1079-25-git-send-email-selvin.xavier@broadcom.com (mailing list archive)
State Superseded
Headers show

Commit Message

Selvin Xavier Dec. 5, 2016, 6:38 a.m. UTC
This patch implements handling dispatch of appropriate event to the IB stack
based on NETDEV events received.
Also implements cleanup of the resources during driver unload.

Signed-off-by: Eddie Wai <eddie.wai@broadcom.com>
Signed-off-by: Devesh Sharma <devesh.sharma@broadcom.com>
Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
---
 drivers/infiniband/hw/bnxtre/bnxt_re_main.c | 77 +++++++++++++++++++++++++++++
 1 file changed, 77 insertions(+)
diff mbox

Patch

diff --git a/drivers/infiniband/hw/bnxtre/bnxt_re_main.c b/drivers/infiniband/hw/bnxtre/bnxt_re_main.c
index a138741..3f55548 100644
--- a/drivers/infiniband/hw/bnxtre/bnxt_re_main.c
+++ b/drivers/infiniband/hw/bnxtre/bnxt_re_main.c
@@ -714,6 +714,60 @@  static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
 	return rc;
 }
 
+static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp,
+				   u8 port_num, enum ib_event_type event)
+{
+	struct ib_event ib_event;
+
+	ib_event.device = ibdev;
+	if (qp)
+		ib_event.element.qp = qp;
+	else
+		ib_event.element.port_num = port_num;
+	ib_event.event = event;
+	ib_dispatch_event(&ib_event);
+}
+
+static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
+					struct bnxt_re_qp *qp)
+{
+	return (qp->ib_qp.qp_type == IB_QPT_GSI) || (qp == rdev->qp1_sqp);
+}
+
+static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev, bool qp_wait)
+{
+	int mask = IB_QP_STATE, qp_count, count = 1;
+	struct ib_qp_attr qp_attr;
+	struct bnxt_re_qp *qp;
+
+	qp_attr.qp_state = IB_QPS_ERR;
+	mutex_lock(&rdev->qp_lock);
+	list_for_each_entry(qp, &rdev->qp_list, list) {
+		/* Modify the state of all QPs except QP1/Shadow QP */
+		if (qp && !bnxt_re_is_qp1_or_shadow_qp(rdev, qp)) {
+			if (qp->qplib_qp.state !=
+			    CMDQ_MODIFY_QP_NEW_STATE_RESET ||
+			    qp->qplib_qp.state !=
+			    CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+				bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
+						       1, IB_EVENT_QP_FATAL);
+				bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, mask,
+						  NULL);
+			}
+		}
+	}
+
+	mutex_unlock(&rdev->qp_lock);
+	if (qp_wait) {
+		/* Give the application some time to clean up */
+		do {
+			qp_count = atomic_read(&rdev->qp_count);
+			msleep(100);
+		} while ((qp_count != atomic_read(&rdev->qp_count)) &&
+			  count--);
+	}
+}
+
 static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait)
 {
 	int i, rc;
@@ -872,6 +926,8 @@  static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
 		}
 	}
 	set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
+	bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE);
+	bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE);
 
 	return 0;
 free_sctx:
@@ -950,10 +1006,18 @@  static void bnxt_re_task(struct work_struct *work)
 				"Failed to register with IB: %#x", rc);
 			break;
 	case NETDEV_UP:
+		bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
+				       IB_EVENT_PORT_ACTIVE);
 		break;
 	case NETDEV_DOWN:
+		bnxt_re_dev_stop(rdev, false);
 		break;
 	case NETDEV_CHANGE:
+		if (!netif_carrier_ok(rdev->netdev))
+			bnxt_re_dev_stop(rdev, false);
+		else if (netif_carrier_ok(rdev->netdev))
+			bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
+					       IB_EVENT_PORT_ACTIVE);
 		break;
 	default:
 		break;
@@ -1071,6 +1135,7 @@  static int __init bnxt_re_mod_init(void)
 }
 static void __exit bnxt_re_mod_exit(void)
 {
+	struct bnxt_re_dev *rdev, *next;
 	LIST_HEAD(to_be_deleted);
 
 	/* Free all adapter allocated resources */
@@ -1079,6 +1144,18 @@  static void __exit bnxt_re_mod_exit(void)
 		list_splice_init_rcu(&bnxt_re_dev_list, &to_be_deleted,
 				     synchronize_rcu);
 	mutex_unlock(&bnxt_re_dev_lock);
+
+	/* Can use the new list without protection */
+	/* Cleanup the devices in reverse order so that the VF device
+	 * cleanup is done before PF cleanup
+	 */
+	list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) {
+		bnxt_re_dev_stop(rdev, true);
+		bnxt_re_ib_unreg(rdev, true);
+		bnxt_re_remove_one(rdev);
+		bnxt_re_dev_unreg(rdev);
+	}
+
 	unregister_netdevice_notifier(&bnxt_re_netdev_notifier);
 	if (bnxt_re_wq)
 		destroy_workqueue(bnxt_re_wq);