diff mbox series

[10/16] lpfc: Revise interrupt coalescing for missing scenarios

Message ID 20191018211832.7917-11-jsmart2021@gmail.com (mailing list archive)
State Mainlined
Commit 8156d378c4cbf8ca19df5d8f0c610ce6923b61e2
Headers show
Series lpfc: Update lpfc to revision 12.6.0.0 | expand

Commit Message

James Smart Oct. 18, 2019, 9:18 p.m. UTC
The existing "auto eq delay" mechanism was sometimes skipping over
an eq, not ramping the coalescing down under light load fast enough,
and in other cases never kicked in as cpu sharing by multiple vectors
didn't quite add up right.

Tweak the interrupt mechanism such that:
- Add a flag to the eq to force checking for colaescing values
  when being serviced in the interrupt handler.  The flag will
  be set by any CQ bound to the EQ whenever the number of CQ
  elements process in a single scan meets or exceeds the hardware
  queue notify level. E.g. there's a significant number of
  completions happening.
- In the heartbeat work item that checks coalescing:
 - Replace the structure that was counting the number of EQs
   that interrupted on a single cpu with a new structure that
   looks at the EQ to see whether EQ currently has a coalescing
   value (thus it should be re-evaluate) or was marked by the
   new flag indicating heavy completions.
 - When a cpu, which may be servicing multiple vectors, had
   at least 1 EQ that should be checked, a new coalescing delay
   is calculated based on the number of interrupts that occurred
   on the cpu.
 - The new coalescing value is then applied to the EQs that
   had interrupted on the cpu.

Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
---
 drivers/scsi/lpfc/lpfc_hw4.h  |  1 -
 drivers/scsi/lpfc/lpfc_init.c | 51 ++++++++++++++++++-------------------------
 drivers/scsi/lpfc/lpfc_sli.c  |  3 ++-
 drivers/scsi/lpfc/lpfc_sli4.h |  1 +
 4 files changed, 24 insertions(+), 32 deletions(-)
diff mbox series

Patch

diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 1cd3016f7783..ac86b80230e7 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -210,7 +210,6 @@  struct lpfc_sli_intf {
 #define LPFC_MAX_IMAX          5000000
 #define LPFC_DEF_IMAX          0
 
-#define LPFC_IMAX_THRESHOLD    1000
 #define LPFC_MAX_AUTO_EQ_DELAY 120
 #define LPFC_EQ_DELAY_STEP     15
 #define LPFC_EQD_ISR_TRIGGER   20000
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 8292b66e4b07..316a2c2beb0c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1235,10 +1235,9 @@  lpfc_hb_eq_delay_work(struct work_struct *work)
 					     struct lpfc_hba, eq_delay_work);
 	struct lpfc_eq_intr_info *eqi, *eqi_new;
 	struct lpfc_queue *eq, *eq_next;
-	unsigned char *eqcnt = NULL;
+	unsigned char *ena_delay = NULL;
 	uint32_t usdelay;
 	int i;
-	bool update = false;
 
 	if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
 		return;
@@ -1247,44 +1246,36 @@  lpfc_hb_eq_delay_work(struct work_struct *work)
 	    phba->pport->fc_flag & FC_OFFLINE_MODE)
 		goto requeue;
 
-	eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char),
-			GFP_KERNEL);
-	if (!eqcnt)
+	ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
+			    GFP_KERNEL);
+	if (!ena_delay)
 		goto requeue;
 
-	if (phba->cfg_irq_chann > 1) {
-		/* Loop thru all IRQ vectors */
-		for (i = 0; i < phba->cfg_irq_chann; i++) {
-			/* Get the EQ corresponding to the IRQ vector */
-			eq = phba->sli4_hba.hba_eq_hdl[i].eq;
-			if (!eq)
-				continue;
-			if (eq->q_mode) {
-				update = true;
-				break;
-			}
-			if (eqcnt[eq->last_cpu] < 2)
-				eqcnt[eq->last_cpu]++;
+	for (i = 0; i < phba->cfg_irq_chann; i++) {
+		/* Get the EQ corresponding to the IRQ vector */
+		eq = phba->sli4_hba.hba_eq_hdl[i].eq;
+		if (!eq)
+			continue;
+		if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
+			eq->q_flag &= ~HBA_EQ_DELAY_CHK;
+			ena_delay[eq->last_cpu] = 1;
 		}
-	} else
-		update = true;
+	}
 
 	for_each_present_cpu(i) {
 		eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
-		if (!update && eqcnt[i] < 2) {
-			eqi->icnt = 0;
-			continue;
+		if (ena_delay[i]) {
+			usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
+			if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
+				usdelay = LPFC_MAX_AUTO_EQ_DELAY;
+		} else {
+			usdelay = 0;
 		}
 
-		usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) *
-			   LPFC_EQ_DELAY_STEP;
-		if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
-			usdelay = LPFC_MAX_AUTO_EQ_DELAY;
-
 		eqi->icnt = 0;
 
 		list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
-			if (eq->last_cpu != i) {
+			if (unlikely(eq->last_cpu != i)) {
 				eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
 						      eq->last_cpu);
 				list_move_tail(&eq->cpu_list, &eqi_new->list);
@@ -1296,7 +1287,7 @@  lpfc_hb_eq_delay_work(struct work_struct *work)
 		}
 	}
 
-	kfree(eqcnt);
+	kfree(ena_delay);
 
 requeue:
 	queue_delayed_work(phba->wq, &phba->eq_delay_work,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index bb0e155eb32c..0e6674bd15c6 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -13640,6 +13640,7 @@  __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
 			phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
 						LPFC_QUEUE_NOARM);
 			consumed = 0;
+			cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
 		}
 
 		if (count == LPFC_NVMET_CQ_NOTIFY)
@@ -14278,7 +14279,7 @@  lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
 	fpeq->last_cpu = raw_smp_processor_id();
 
 	if (icnt > LPFC_EQD_ISR_TRIGGER &&
-	    phba->cfg_irq_chann == 1 &&
+	    fpeq->q_flag & HBA_EQ_DELAY_CHK &&
 	    phba->cfg_auto_imax &&
 	    fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
 	    phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 0d4882a9e634..c9e068ca0fec 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -199,6 +199,7 @@  struct lpfc_queue {
 	uint8_t q_flag;
 #define HBA_NVMET_WQFULL	0x1 /* We hit WQ Full condition for NVMET */
 #define HBA_NVMET_CQ_NOTIFY	0x1 /* LPFC_NVMET_CQ_NOTIFY CQEs this EQE */
+#define HBA_EQ_DELAY_CHK	0x2 /* EQ is a candidate for coalescing */
 #define LPFC_NVMET_CQ_NOTIFY	4
 	void __iomem *db_regaddr;
 	uint16_t dpp_enable;