@@ -144,6 +144,10 @@ extern struct list_head mrioc_list;
/* Default target device queue depth */
#define MPI3MR_DEFAULT_SDEV_QD 32
+/* Definitions for Threaded IRQ poll*/
+#define MPI3MR_IRQ_POLL_SLEEP 2
+#define MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT 8
+
/* SGE Flag definition */
#define MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST \
(MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | MPI3_SGE_FLAGS_DLAS_SYSTEM | \
@@ -295,6 +299,9 @@ struct op_req_qinfo {
* @q_segment_list: Segment list base virtual address
* @q_segment_list_dma: Segment list base DMA address
* @ephase: Expected phased identifier for the reply queue
+ * @pend_ios: Number of IOs pending in HW for this queue
+ * @enable_irq_poll: Flag to indicate polling is enabled
+ * @in_use: Queue is handled by poll/ISR
*/
struct op_reply_qinfo {
u16 ci;
@@ -306,6 +313,9 @@ struct op_reply_qinfo {
void *q_segment_list;
dma_addr_t q_segment_list_dma;
u8 ephase;
+ atomic_t pend_ios;
+ bool enable_irq_poll;
+ atomic_t in_use;
};
/**
@@ -557,6 +567,7 @@ struct scmd_priv {
* @shost: Scsi_Host pointer
* @id: Controller ID
* @cpu_count: Number of online CPUs
+ * @irqpoll_sleep: usleep unit used in threaded isr irqpoll
* @name: Controller ASCII name
* @driver_name: Driver ASCII name
* @sysif_regs: System interface registers virtual address
@@ -658,6 +669,7 @@ struct mpi3mr_ioc {
u8 id;
int cpu_count;
bool enable_segqueue;
+ u32 irqpoll_sleep;
char name[MPI3MR_NAME_LENGTH];
char driver_name[MPI3MR_NAME_LENGTH];
@@ -345,12 +345,16 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
reply_qidx = op_reply_q->qid - 1;
+ if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
+ return 0;
+
exp_phase = op_reply_q->ephase;
reply_ci = op_reply_q->ci;
reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
if ((le16_to_cpu(reply_desc->ReplyFlags) &
MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
+ atomic_dec(&op_reply_q->in_use);
return 0;
}
@@ -361,6 +365,7 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->RequestQueueCI));
mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
reply_qidx);
+ atomic_dec(&op_reply_q->pend_ios);
if (reply_dma)
mpi3mr_repost_reply_buf(mrioc, reply_dma);
num_op_reply++;
@@ -375,6 +380,14 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
if ((le16_to_cpu(reply_desc->ReplyFlags) &
MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
break;
+ /*
+ * Exit completion loop to avoid CPU lockup
+ * Ensure remaining completion happens from threaded ISR.
+ */
+ if (num_op_reply > mrioc->max_host_ios) {
+ intr_info->op_reply_q->enable_irq_poll = true;
+ break;
+ }
} while (1);
@@ -383,6 +396,7 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
&mrioc->sysif_regs->OperQueueIndexes[reply_qidx].ConsumerIndex);
op_reply_q->ci = reply_ci;
op_reply_q->ephase = exp_phase;
+ atomic_dec(&op_reply_q->in_use);
return num_op_reply;
}
@@ -392,7 +406,7 @@ static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
struct mpi3mr_intr_info *intr_info = privdata;
struct mpi3mr_ioc *mrioc;
u16 midx;
- u32 num_admin_replies = 0;
+ u32 num_admin_replies = 0, num_op_reply = 0;
if (!intr_info)
return IRQ_NONE;
@@ -406,8 +420,10 @@ static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
if (!midx)
num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
+ if (intr_info->op_reply_q)
+ num_op_reply = mpi3mr_process_op_reply_q(mrioc, intr_info);
- if (num_admin_replies)
+ if (num_admin_replies || num_op_reply)
return IRQ_HANDLED;
else
return IRQ_NONE;
@@ -428,7 +444,20 @@ static irqreturn_t mpi3mr_isr(int irq, void *privdata)
/* Call primary ISR routine */
ret = mpi3mr_isr_primary(irq, privdata);
- return ret;
+ /*
+ * If more IOs are expected, schedule IRQ polling thread.
+ * Otherwise exit from ISR.
+ */
+ if (!intr_info->op_reply_q)
+ return ret;
+
+ if (!intr_info->op_reply_q->enable_irq_poll ||
+ !atomic_read(&intr_info->op_reply_q->pend_ios))
+ return ret;
+
+ disable_irq_nosync(pci_irq_vector(mrioc->pdev, midx));
+
+ return IRQ_WAKE_THREAD;
}
/**
@@ -443,6 +472,36 @@ static irqreturn_t mpi3mr_isr(int irq, void *privdata)
*/
static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
{
+ struct mpi3mr_intr_info *intr_info = privdata;
+ struct mpi3mr_ioc *mrioc;
+ u16 midx;
+ u32 num_admin_replies = 0, num_op_reply = 0;
+
+ if (!intr_info || !intr_info->op_reply_q)
+ return IRQ_NONE;
+
+ mrioc = intr_info->mrioc;
+ midx = intr_info->msix_index;
+
+ /* Poll for pending IOs completions */
+ do {
+ if (!mrioc->intr_enabled)
+ break;
+
+ if (!midx)
+ num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
+ if (intr_info->op_reply_q)
+ num_op_reply +=
+ mpi3mr_process_op_reply_q(mrioc, intr_info);
+
+ usleep_range(mrioc->irqpoll_sleep, 10 * mrioc->irqpoll_sleep);
+
+ } while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
+ (num_op_reply < mrioc->max_host_ios));
+
+ intr_info->op_reply_q->enable_irq_poll = false;
+ enable_irq(pci_irq_vector(mrioc->pdev, midx));
+
return IRQ_HANDLED;
}
@@ -1158,6 +1217,9 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
op_reply_q->ci = 0;
op_reply_q->ephase = 1;
+ atomic_set(&op_reply_q->pend_ios, 0);
+ atomic_set(&op_reply_q->in_use, 0);
+ op_reply_q->enable_irq_poll = false;
if (!op_reply_q->q_segments) {
retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
@@ -1479,6 +1541,10 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
pi = 0;
op_req_q->pi = pi;
+ if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
+ > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
+ mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
+
writel(op_req_q->pi,
&mrioc->sysif_regs->OperQueueIndexes[reply_qidx].ProducerIndex);
@@ -2804,6 +2870,7 @@ int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 re_init)
u32 ioc_status, ioc_config, i;
Mpi3IOCFactsData_t facts_data;
+ mrioc->irqpoll_sleep = MPI3MR_IRQ_POLL_SLEEP;
mrioc->change_count = 0;
if (!re_init) {
mrioc->cpu_count = num_online_cpus();
@@ -3089,6 +3156,8 @@ static void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
mrioc->op_reply_qinfo[i].ci = 0;
mrioc->op_reply_qinfo[i].num_replies = 0;
mrioc->op_reply_qinfo[i].ephase = 0;
+ atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
+ atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
mpi3mr_memset_op_reply_q_buffers(mrioc, i);
mrioc->req_qinfo[i].ci = 0;