@@ -185,12 +185,65 @@ static void bnxt_re_shutdown(void *p)
bnxt_re_ib_unreg(rdev, false);
}
+static void bnxt_re_stop_irq(void *handle)
+{
+ struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
+ struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
+ struct bnxt_qplib_nq *nq;
+ int indx;
+
+ for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) {
+ nq = &rdev->nq[indx - 1];
+ bnxt_qplib_nq_stop_irq(nq, false);
+ }
+
+ bnxt_qplib_rcfw_stop_irq(rcfw, false);
+}
+
+static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
+{
+ struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
+ struct bnxt_msix_entry *msix_ent = rdev->msix_entries;
+ struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
+ struct bnxt_qplib_nq *nq;
+ int indx, rc;
+
+ if (!ent) {
+ /* Not setting the f/w timeout bit in rcfw.
+ * During the driver unload the first command
+ * to f/w will timeout and that will set the
+ * timeout bit.
+ */
+ dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n");
+ return;
+ }
+
+ /* Vectors may change after restart, so update with new vectors
+ * in device sctructure.
+ */
+ for (indx = 0; indx < rdev->num_msix; indx++)
+ rdev->msix_entries[indx].vector = ent[indx].vector;
+
+ bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
+ false);
+ for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
+ nq = &rdev->nq[indx - 1];
+ rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
+ msix_ent[indx].vector, false);
+ if (rc)
+ dev_warn(rdev_to_dev(rdev),
+ "Failed to reinit NQ index %d\n", indx - 1);
+ }
+}
+
static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
.ulp_async_notifier = NULL,
.ulp_stop = bnxt_re_stop,
.ulp_start = bnxt_re_start,
.ulp_sriov_config = bnxt_re_sriov_config,
- .ulp_shutdown = bnxt_re_shutdown
+ .ulp_shutdown = bnxt_re_shutdown,
+ .ulp_irq_stop = bnxt_re_stop_irq,
+ .ulp_irq_restart = bnxt_re_start_irq
};
/* RoCE -> Net driver */
@@ -336,14 +336,15 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
return IRQ_HANDLED;
}
-static void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq)
+void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
{
tasklet_disable(&nq->worker);
/* Mask h/w interrupt */
NQ_DB(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
/* Sync with last running IRQ handler */
synchronize_irq(nq->vector);
- tasklet_kill(&nq->worker);
+ if (kill)
+ tasklet_kill(&nq->worker);
if (nq->requested) {
irq_set_affinity_hint(nq->vector, NULL);
free_irq(nq->vector, nq);
@@ -359,7 +360,7 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
}
/* Make sure the HW is stopped! */
- bnxt_qplib_nq_stop_irq(nq);
+ bnxt_qplib_nq_stop_irq(nq, true);
if (nq->bar_reg_iomem)
iounmap(nq->bar_reg_iomem);
@@ -370,8 +371,8 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
nq->vector = 0;
}
-static int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq,
- int nq_indx, int msix_vector)
+int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
+ int msix_vector, bool need_init)
{
int rc;
@@ -379,8 +380,11 @@ static int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq,
return -EFAULT;
nq->vector = msix_vector;
- tasklet_init(&nq->worker, bnxt_qplib_service_nq,
- (unsigned long)nq);
+ if (need_init)
+ tasklet_init(&nq->worker, bnxt_qplib_service_nq,
+ (unsigned long)nq);
+ else
+ tasklet_enable(&nq->worker);
memset(nq->name, 0, 32);
sprintf(nq->name, "bnxt_qplib_nq-%d", nq_indx);
@@ -437,7 +441,7 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
goto fail;
}
- rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector);
+ rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
if (rc) {
dev_err(&nq->pdev->dev,
"QPLIB: Failed to request irq for nq-idx %d", nq_idx);
@@ -467,7 +467,10 @@ struct bnxt_qplib_nq_work {
struct bnxt_qplib_cq *cq;
};
+void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill);
void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
+int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
+ int msix_vector, bool need_init);
int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
int nq_idx, int msix_vector, int bar_reg_offset,
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
@@ -582,7 +582,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
return -ENOMEM;
}
-static void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw)
+void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
{
tasklet_disable(&rcfw->worker);
/* Mask h/w interrupts */
@@ -590,7 +590,8 @@ static void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw)
rcfw->creq.max_elements);
/* Sync with last running IRQ-handler */
synchronize_irq(rcfw->vector);
- tasklet_kill(&rcfw->worker);
+ if (kill)
+ tasklet_kill(&rcfw->worker);
if (rcfw->requested) {
free_irq(rcfw->vector, rcfw);
@@ -602,7 +603,7 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
{
unsigned long indx;
- bnxt_qplib_rcfw_stop_irq(rcfw);
+ bnxt_qplib_rcfw_stop_irq(rcfw, true);
if (rcfw->cmdq_bar_reg_iomem)
iounmap(rcfw->cmdq_bar_reg_iomem);
@@ -623,16 +624,20 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
rcfw->vector = 0;
}
-static int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw,
- int msix_vector)
+int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
+ bool need_init)
{
int rc;
if (rcfw->requested)
return -EFAULT;
+
rcfw->vector = msix_vector;
- tasklet_init(&rcfw->worker,
- bnxt_qplib_service_creq, (unsigned long)rcfw);
+ if (need_init)
+ tasklet_init(&rcfw->worker,
+ bnxt_qplib_service_creq, (unsigned long)rcfw);
+ else
+ tasklet_enable(&rcfw->worker);
rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
"bnxt_qplib_creq", rcfw);
if (rc)
@@ -640,6 +645,7 @@ static int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw,
rcfw->requested = true;
CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
rcfw->creq.max_elements);
+
return 0;
}
@@ -708,7 +714,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
rcfw->aeq_handler = aeq_handler;
init_waitqueue_head(&rcfw->waitq);
- rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector);
+ rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
if (rc) {
dev_err(&rcfw->pdev->dev,
"QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
@@ -195,7 +195,10 @@ struct bnxt_qplib_rcfw {
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw, int qp_tbl_sz);
+void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill);
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
+int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
+ bool need_init);
int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw,
int msix_vector,
When there is a need from L2 driver to change the number available IRQs, L2 driver informs RoCE driver via a new API to free all it's IRQs so that it can proceed with disabling MSI-X on all the vectors. Once L2 driver is done reshuffling the IRQs, it tells roce driver to resume all the IRQs it was using. L2 driver will guarantee that none of the Ring-ID to vector mapping change as a result of reshuffling. Roce driver gets the same vectors which it was using prior to the change. RoCE driver just has to re-enable all the vectors when L2 driver tell it to do so via a new API. Signed-off-by: Devesh Sharma <devesh.sharma@broadcom.com> --- drivers/infiniband/hw/bnxt_re/main.c | 55 +++++++++++++++++++++++++++++- drivers/infiniband/hw/bnxt_re/qplib_fp.c | 20 ++++++----- drivers/infiniband/hw/bnxt_re/qplib_fp.h | 3 ++ drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | 22 +++++++----- drivers/infiniband/hw/bnxt_re/qplib_rcfw.h | 3 ++ 5 files changed, 86 insertions(+), 17 deletions(-)