@@ -729,6 +729,15 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
struct ib_qp *qp, *real_qp;
struct ib_device *device;
+ if (qp_init_attr->rx_hash_conf) {
+ if (qp_init_attr->send_cq || qp_init_attr->recv_cq)
+ return ERR_PTR(-EINVAL);
+ if ((qp_init_attr->qp_type != IB_QPT_UD) ||
+ (qp_init_attr->qp_type != IB_QPT_RAW_ETHERTYPE) ||
+ (qp_init_attr->qp_type != IB_QPT_RAW_PACKET))
+ return ERR_PTR(-EINVAL);
+ }
+
device = pd ? pd->device : qp_init_attr->xrcd->device;
qp = device->create_qp(pd, qp_init_attr, NULL);
@@ -737,6 +746,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
qp->real_qp = qp;
qp->uobject = NULL;
qp->qp_type = qp_init_attr->qp_type;
+ qp->rwq_ind_tbl = NULL;
atomic_set(&qp->usecnt, 0);
if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
@@ -764,7 +774,8 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
qp->srq = NULL;
} else {
qp->recv_cq = qp_init_attr->recv_cq;
- atomic_inc(&qp_init_attr->recv_cq->usecnt);
+ if (!qp_init_attr->rx_hash_conf)
+ atomic_inc(&qp_init_attr->recv_cq->usecnt);
qp->srq = qp_init_attr->srq;
if (qp->srq)
atomic_inc(&qp_init_attr->srq->usecnt);
@@ -775,7 +786,12 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
qp->xrcd = NULL;
atomic_inc(&pd->usecnt);
- atomic_inc(&qp_init_attr->send_cq->usecnt);
+ if (qp_init_attr->rx_hash_conf) {
+ qp->rwq_ind_tbl = qp_init_attr->rx_hash_conf->rwq_ind_tbl;
+ atomic_inc(&qp->rwq_ind_tbl->usecnt);
+ } else {
+ atomic_inc(&qp_init_attr->send_cq->usecnt);
+ }
}
}
@@ -1248,6 +1264,7 @@ int ib_destroy_qp(struct ib_qp *qp)
struct ib_pd *pd;
struct ib_cq *scq, *rcq;
struct ib_srq *srq;
+ struct ib_rwq_ind_table *ind_tbl;
int ret;
if (atomic_read(&qp->usecnt))
@@ -1260,6 +1277,7 @@ int ib_destroy_qp(struct ib_qp *qp)
scq = qp->send_cq;
rcq = qp->recv_cq;
srq = qp->srq;
+ ind_tbl = qp->rwq_ind_tbl;
ret = qp->device->destroy_qp(qp);
if (!ret) {
@@ -1271,6 +1289,8 @@ int ib_destroy_qp(struct ib_qp *qp)
atomic_dec(&rcq->usecnt);
if (srq)
atomic_dec(&srq->usecnt);
+ if (ind_tbl)
+ atomic_dec(&ind_tbl->usecnt);
}
return ret;
@@ -1005,6 +1005,7 @@ struct ib_qp_init_attr {
enum ib_qp_type qp_type;
enum ib_qp_create_flags create_flags;
u8 port_num; /* special QP types only */
+ struct ib_rx_hash_conf *rx_hash_conf;
};
struct ib_qp_open_attr {
@@ -1494,6 +1495,40 @@ struct ib_qp {
void *qp_context;
u32 qp_num;
enum ib_qp_type qp_type;
+ struct ib_rwq_ind_table *rwq_ind_tbl;
+};
+
+/* RX Hash function flags */
+enum ib_rx_hash_function_flags {
+ IB_RX_HASH_FUNC_TOEPLITZ = 1 << 0,
+ IB_RX_HASH_FUNC_XOR = 1 << 1
+};
+
+/*
+ * RX Hash flags, these flags allows to set which incoming packet's field should
+ * participates in RX Hash. Each flag represent certain packet's field,
+ * when the flag is set the field that is represented by the flag will
+ * participate in RX Hash calculation.
+ * Note: *IPV4 and *IPV6 flags can't be enabled together on the same QP
+ * and *TCP and *UDP flags can't be enabled together on the same QP.
+*/
+enum ib_rx_hash_fields {
+ IB_RX_HASH_SRC_IPV4 = 1 << 0,
+ IB_RX_HASH_DST_IPV4 = 1 << 1,
+ IB_RX_HASH_SRC_IPV6 = 1 << 2,
+ IB_RX_HASH_DST_IPV6 = 1 << 3,
+ IB_RX_HASH_SRC_PORT_TCP = 1 << 4,
+ IB_RX_HASH_DST_PORT_TCP = 1 << 5,
+ IB_RX_HASH_SRC_PORT_UDP = 1 << 6,
+ IB_RX_HASH_DST_PORT_UDP = 1 << 7
+};
+
+struct ib_rx_hash_conf {
+ enum ib_rx_hash_function_flags rx_hash_function;
+ u8 rx_key_len; /* valid only for Toeplitz */
+ u8 *rx_hash_key;
+ u64 rx_hash_fields_mask; /* enum ib_rx_hash_fields */
+ struct ib_rwq_ind_table *rwq_ind_tbl;
};
struct ib_mr {