@@ -3770,6 +3770,16 @@ struct qla_fw_resources {
u16 pad;
};
+struct qla_fw_res {
+ u16 iocb_total;
+ u16 iocb_limit;
+ atomic_t iocb_used;
+
+ u16 exch_total;
+ u16 exch_limit;
+ atomic_t exch_used;
+};
+
#define QLA_IOCB_PCT_LIMIT 95
struct qla_buf_pool {
@@ -4829,6 +4839,7 @@ struct qla_hw_data {
u8 edif_post_stop_cnt_down;
struct qla_vp_map *vp_map;
struct qla_nvme_fc_rjt lsrjt;
+ struct qla_fw_res fwres ____cacheline_aligned;
};
#define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES))
@@ -276,6 +276,16 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
seq_printf(s, "estimate exchange used[%d] high water limit [%d] n",
exch_used, ha->base_qpair->fwres.exch_limit);
+
+ if (ql2xenforce_iocb_limit == 2) {
+ iocbs_used = atomic_read(&ha->fwres.iocb_used);
+ exch_used = atomic_read(&ha->fwres.exch_used);
+ seq_printf(s, " estimate iocb2 used [%d] high water limit [%d]\n",
+ iocbs_used, ha->fwres.iocb_limit);
+
+ seq_printf(s, " estimate exchange2 used[%d] high water limit [%d] \n",
+ exch_used, ha->fwres.exch_limit);
+ }
}
return 0;
@@ -4214,6 +4214,14 @@ void qla_init_iocb_limit(scsi_qla_host_t *vha)
ha->queue_pair_map[i]->fwres.exch_used = 0;
}
}
+
+ ha->fwres.iocb_total = ha->orig_fw_iocb_count;
+ ha->fwres.iocb_limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
+ ha->fwres.exch_total = ha->orig_fw_xcb_count;
+ ha->fwres.exch_limit = (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
+
+ atomic_set(&ha->fwres.iocb_used, 0);
+ atomic_set(&ha->fwres.exch_used, 0);
}
void qla_adjust_iocb_limit(scsi_qla_host_t *vha)
@@ -386,6 +386,7 @@ enum {
RESOURCE_IOCB = BIT_0,
RESOURCE_EXCH = BIT_1, /* exchange */
RESOURCE_FORCE = BIT_2,
+ RESOURCE_HA = BIT_3,
};
static inline int
@@ -393,7 +394,7 @@ qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
{
u16 iocbs_used, i;
u16 exch_used;
- struct qla_hw_data *ha = qp->vha->hw;
+ struct qla_hw_data *ha = qp->hw;
if (!ql2xenforce_iocb_limit) {
iores->res_type = RESOURCE_NONE;
@@ -428,15 +429,69 @@ qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
return -ENOSPC;
}
}
+
+ if (ql2xenforce_iocb_limit == 2) {
+ if ((iores->iocb_cnt + atomic_read(&ha->fwres.iocb_used)) >=
+ ha->fwres.iocb_limit) {
+ iores->res_type = RESOURCE_NONE;
+ return -ENOSPC;
+ }
+
+ if (iores->res_type & RESOURCE_EXCH) {
+ if ((iores->exch_cnt + atomic_read(&ha->fwres.exch_used)) >=
+ ha->fwres.exch_limit) {
+ iores->res_type = RESOURCE_NONE;
+ return -ENOSPC;
+ }
+ }
+ }
+
force:
qp->fwres.iocbs_used += iores->iocb_cnt;
qp->fwres.exch_used += iores->exch_cnt;
+ if (ql2xenforce_iocb_limit == 2) {
+ atomic_add(iores->iocb_cnt, &ha->fwres.iocb_used);
+ atomic_add(iores->exch_cnt, &ha->fwres.exch_used);
+ iores->res_type |= RESOURCE_HA;
+ }
return 0;
}
+/*
+ * decrement to zero. This routine will not decrement below zero
+ * @v: pointer of type atomic_t
+ * @amount: amount to decrement from v
+ */
+static void qla_atomic_dtz(atomic_t *v, int amount)
+{
+ int c, old, dec;
+
+ c = atomic_read(v);
+ for (;;) {
+ dec = c - amount;
+ if (unlikely(dec < 0))
+ dec = 0;
+
+ old = atomic_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+}
+
static inline void
qla_put_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
{
+ struct qla_hw_data *ha = qp->hw;
+
+ if (iores->res_type & RESOURCE_HA) {
+ if (iores->res_type & RESOURCE_IOCB)
+ qla_atomic_dtz(&ha->fwres.iocb_used, iores->iocb_cnt);
+
+ if (iores->res_type & RESOURCE_EXCH)
+ qla_atomic_dtz(&ha->fwres.exch_used, iores->exch_cnt);
+ }
+
if (iores->res_type & RESOURCE_IOCB) {
if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
qp->fwres.iocbs_used -= iores->iocb_cnt;
@@ -44,10 +44,11 @@ module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
"Set this to take full dump on MPI hang.");
-int ql2xenforce_iocb_limit = 1;
+int ql2xenforce_iocb_limit = 2;
module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xenforce_iocb_limit,
- "Enforce IOCB throttling, to avoid FW congestion. (default: 1)");
+ "Enforce IOCB throttling, to avoid FW congestion. (default: 2) "
+ "1: track usage per queue, 2: track usage per adapter");
/*
* CT6 CTX allocation cache