diff mbox series

[v3,for-next,11/11] RDMA/hns: Simplify AEQE process for different types of queue

Message ID 1607606572-11968-12-git-send-email-liweihang@huawei.com (mailing list archive)
State Superseded
Headers show
Series RDMA/hns: Updates for 5.11 | expand

Commit Message

Weihang Li Dec. 10, 2020, 1:22 p.m. UTC
From: Yixian Liu <liuyixian@huawei.com>

There is no need to get queue number repeatly for different queues from an
AEQE entity, as they are the same. Furthermore, redefine the AEQE structure
to make the codes more readable.

In addition, HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW is removed because the
hardware never reports this event.

Signed-off-by: Yixian Liu <liuyixian@huawei.com>
Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
---
 drivers/infiniband/hw/hns/hns_roce_device.h | 26 ++---------------
 drivers/infiniband/hw/hns/hns_roce_hw_v1.c  | 16 ++++-------
 drivers/infiniband/hw/hns/hns_roce_hw_v2.c  | 43 ++++++++++-------------------
 3 files changed, 23 insertions(+), 62 deletions(-)
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 89c0c74..55d5386 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -160,7 +160,6 @@  enum hns_roce_event {
 	/* 0x10 and 0x11 is unused in currently application case */
 	HNS_ROCE_EVENT_TYPE_DB_OVERFLOW               = 0x12,
 	HNS_ROCE_EVENT_TYPE_MB                        = 0x13,
-	HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW              = 0x14,
 	HNS_ROCE_EVENT_TYPE_FLR			      = 0x15,
 };
 
@@ -636,10 +635,9 @@  enum {
 struct hns_roce_work {
 	struct hns_roce_dev *hr_dev;
 	struct work_struct work;
-	u32 qpn;
-	u32 cqn;
 	int event_type;
 	int sub_type;
+	u32 queue_num;
 };
 
 struct hns_roce_qp {
@@ -707,28 +705,10 @@  struct hns_roce_aeqe {
 	__le32 asyn;
 	union {
 		struct {
-			__le32 qp;
+			__le32 num;
 			u32 rsv0;
 			u32 rsv1;
-		} qp_event;
-
-		struct {
-			__le32 srq;
-			u32 rsv0;
-			u32 rsv1;
-		} srq_event;
-
-		struct {
-			__le32 cq;
-			u32 rsv0;
-			u32 rsv1;
-		} cq_event;
-
-		struct {
-			__le32 ceqe;
-			u32 rsv0;
-			u32 rsv1;
-		} ce_event;
+		} queue_event;
 
 		struct {
 			__le64  out_param;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index b7dd867..cc20231 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -3683,10 +3683,10 @@  static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev,
 	int phy_port;
 	int qpn;
 
-	qpn = roce_get_field(aeqe->event.qp_event.qp,
+	qpn = roce_get_field(aeqe->event.queue_event.num,
 			     HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
 			     HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
-	phy_port = roce_get_field(aeqe->event.qp_event.qp,
+	phy_port = roce_get_field(aeqe->event.queue_event.num,
 				  HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
 				  HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
 	if (qpn <= 1)
@@ -3717,9 +3717,9 @@  static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
 	struct device *dev = &hr_dev->pdev->dev;
 	u32 cqn;
 
-	cqn = roce_get_field(aeqe->event.cq_event.cq,
-			  HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
-			  HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S);
+	cqn = roce_get_field(aeqe->event.queue_event.num,
+			     HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
+			     HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S);
 
 	switch (event_type) {
 	case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
@@ -3848,12 +3848,6 @@  static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
 		case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
 			hns_roce_v1_db_overflow_handle(hr_dev, aeqe);
 			break;
-		case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
-			dev_warn(dev, "CEQ 0x%lx overflow.\n",
-			roce_get_field(aeqe->event.ce_event.ceqe,
-				     HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
-				     HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
-			break;
 		default:
 			dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n",
 				 event_type, eq->eqn, eq->cons_index);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 546f77d..2ab0c0a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -5451,8 +5451,6 @@  static void hns_roce_irq_work_handle(struct work_struct *work)
 	struct hns_roce_work *irq_work =
 				container_of(work, struct hns_roce_work, work);
 	struct ib_device *ibdev = &irq_work->hr_dev->ib_dev;
-	u32 qpn = irq_work->qpn;
-	u32 cqn = irq_work->cqn;
 
 	switch (irq_work->event_type) {
 	case HNS_ROCE_EVENT_TYPE_PATH_MIG:
@@ -5468,15 +5466,15 @@  static void hns_roce_irq_work_handle(struct work_struct *work)
 		break;
 	case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
 		ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n",
-			  qpn, irq_work->sub_type);
+			  irq_work->queue_num, irq_work->sub_type);
 		break;
 	case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
 		ibdev_err(ibdev, "Invalid request local work queue 0x%x error.\n",
-			  qpn);
+			  irq_work->queue_num);
 		break;
 	case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
 		ibdev_err(ibdev, "Local access violation work queue 0x%x error, sub_event type is: %d\n",
-			  qpn, irq_work->sub_type);
+			  irq_work->queue_num, irq_work->sub_type);
 		break;
 	case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
 		ibdev_warn(ibdev, "SRQ limit reach.\n");
@@ -5488,10 +5486,10 @@  static void hns_roce_irq_work_handle(struct work_struct *work)
 		ibdev_err(ibdev, "SRQ catas error.\n");
 		break;
 	case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
-		ibdev_err(ibdev, "CQ 0x%x access err.\n", cqn);
+		ibdev_err(ibdev, "CQ 0x%x access err.\n", irq_work->queue_num);
 		break;
 	case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
-		ibdev_warn(ibdev, "CQ 0x%x overflow\n", cqn);
+		ibdev_warn(ibdev, "CQ 0x%x overflow\n", irq_work->queue_num);
 		break;
 	case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
 		ibdev_warn(ibdev, "DB overflow.\n");
@@ -5507,8 +5505,7 @@  static void hns_roce_irq_work_handle(struct work_struct *work)
 }
 
 static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
-				      struct hns_roce_eq *eq,
-				      u32 qpn, u32 cqn)
+				      struct hns_roce_eq *eq, u32 queue_num)
 {
 	struct hns_roce_work *irq_work;
 
@@ -5518,10 +5515,9 @@  static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
 
 	INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
 	irq_work->hr_dev = hr_dev;
-	irq_work->qpn = qpn;
-	irq_work->cqn = cqn;
 	irq_work->event_type = eq->event_type;
 	irq_work->sub_type = eq->sub_type;
+	irq_work->queue_num = queue_num;
 	queue_work(hr_dev->irq_workq, &(irq_work->work));
 }
 
@@ -5573,10 +5569,8 @@  static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
 	struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
 	int aeqe_found = 0;
 	int event_type;
+	u32 queue_num;
 	int sub_type;
-	u32 srqn;
-	u32 qpn;
-	u32 cqn;
 
 	while (aeqe) {
 		/* Make sure we read AEQ entry after we have checked the
@@ -5590,15 +5584,9 @@  static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
 		sub_type = roce_get_field(aeqe->asyn,
 					  HNS_ROCE_V2_AEQE_SUB_TYPE_M,
 					  HNS_ROCE_V2_AEQE_SUB_TYPE_S);
-		qpn = roce_get_field(aeqe->event.qp_event.qp,
-				     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
-				     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
-		cqn = roce_get_field(aeqe->event.cq_event.cq,
-				     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
-				     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
-		srqn = roce_get_field(aeqe->event.srq_event.srq,
-				     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
-				     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
+		queue_num = roce_get_field(aeqe->event.queue_event.num,
+					   HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
+					   HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
 
 		switch (event_type) {
 		case HNS_ROCE_EVENT_TYPE_PATH_MIG:
@@ -5609,15 +5597,15 @@  static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
 		case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
 		case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
 		case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
-			hns_roce_qp_event(hr_dev, qpn, event_type);
+			hns_roce_qp_event(hr_dev, queue_num, event_type);
 			break;
 		case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
 		case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
-			hns_roce_srq_event(hr_dev, srqn, event_type);
+			hns_roce_srq_event(hr_dev, queue_num, event_type);
 			break;
 		case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
 		case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
-			hns_roce_cq_event(hr_dev, cqn, event_type);
+			hns_roce_cq_event(hr_dev, queue_num, event_type);
 			break;
 		case HNS_ROCE_EVENT_TYPE_MB:
 			hns_roce_cmd_event(hr_dev,
@@ -5626,7 +5614,6 @@  static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
 					le64_to_cpu(aeqe->event.cmd.out_param));
 			break;
 		case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
-		case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
 		case HNS_ROCE_EVENT_TYPE_FLR:
 			break;
 		default:
@@ -5643,7 +5630,7 @@  static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
 		if (eq->cons_index > (2 * eq->entries - 1))
 			eq->cons_index = 0;
 
-		hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
+		hns_roce_v2_init_irq_work(hr_dev, eq, queue_num);
 
 		aeqe = next_aeqe_sw_v2(eq);
 	}