@@ -496,6 +496,12 @@ struct hns_roce_bank {
u32 next; /* Next ID to allocate. */
};
+struct hns_roce_idx_table {
+ u32 *spare_idx;
+ u32 head;
+ u32 tail;
+};
+
struct hns_roce_qp_table {
struct hns_roce_hem_table qp_table;
struct hns_roce_hem_table irrl_table;
@@ -504,6 +510,7 @@ struct hns_roce_qp_table {
struct mutex scc_mutex;
struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
struct mutex bank_mutex;
+ struct hns_roce_idx_table idx_table;
};
struct hns_roce_cq_table {
@@ -1144,7 +1151,7 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
-void hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
+int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
@@ -4504,12 +4504,18 @@ static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
{
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ u32 *spare_idx = hr_dev->qp_table.idx_table.spare_idx;
+ u32 *head = &hr_dev->qp_table.idx_table.head;
+ u32 *tail = &hr_dev->qp_table.idx_table.tail;
struct hns_roce_dip *hr_dip;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
+ spare_idx[*tail] = ibqp->qp_num;
+ *tail = (*tail == hr_dev->caps.num_qps - 1) ? 0 : (*tail + 1);
+
list_for_each_entry(hr_dip, &hr_dev->dip_list, node) {
if (!memcmp(grh->dgid.raw, hr_dip->dgid, 16)) {
*dip_idx = hr_dip->dip_idx;
@@ -4527,7 +4533,8 @@ static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
}
memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
- hr_dip->dip_idx = *dip_idx = ibqp->qp_num;
+ hr_dip->dip_idx = *dip_idx = spare_idx[*head];
+ *head = (*head == hr_dev->caps.num_qps - 1) ? 0 : (*head + 1);
list_add_tail(&hr_dip->node, &hr_dev->dip_list);
out:
@@ -746,6 +746,12 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
goto err_uar_table_free;
}
+ ret = hns_roce_init_qp_table(hr_dev);
+ if (ret) {
+ dev_err(dev, "Failed to init qp_table.\n");
+ goto err_uar_table_free;
+ }
+
hns_roce_init_pd_table(hr_dev);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
@@ -755,8 +761,6 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
hns_roce_init_cq_table(hr_dev);
- hns_roce_init_qp_table(hr_dev);
-
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
hns_roce_init_srq_table(hr_dev);
}
@@ -1411,12 +1411,17 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
return cur + nreq >= hr_wq->wqe_cnt;
}
-void hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
+int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
unsigned int reserved_from_bot;
unsigned int i;
+ qp_table->idx_table.spare_idx = kcalloc(hr_dev->caps.num_qps,
+ sizeof(u32), GFP_KERNEL);
+ if (!qp_table->idx_table.spare_idx)
+ return -ENOMEM;
+
mutex_init(&qp_table->scc_mutex);
mutex_init(&qp_table->bank_mutex);
xa_init(&hr_dev->qp_table_xa);
@@ -1434,6 +1439,8 @@ void hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
HNS_ROCE_QP_BANK_NUM - 1;
hr_dev->qp_table.bank[i].next = hr_dev->qp_table.bank[i].min;
}
+
+ return 0;
}
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
@@ -1442,4 +1449,5 @@ void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++)
ida_destroy(&hr_dev->qp_table.bank[i].ida);
+ kfree(hr_dev->qp_table.idx_table.spare_idx);
}