@@ -606,8 +606,8 @@ struct efa_admin_feature_queue_attr_desc {
/* Number of sub-CQs to be created for each CQ */
u16 sub_cqs_per_cq;
- /* MBZ */
- u16 reserved;
+ /* Minimum number of WQEs per SQ */
+ u16 min_sq_depth;
/* Maximum number of SGEs (buffers) allowed for a single send WQE */
u16 max_wr_send_sges;
@@ -481,6 +481,7 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
result->sub_cqs_per_cq = resp.u.queue_attr.sub_cqs_per_cq;
result->max_wr_rdma_sge = resp.u.queue_attr.max_wr_rdma_sges;
result->max_tx_batch = resp.u.queue_attr.max_tx_batch;
+ result->min_sq_depth = resp.u.queue_attr.min_sq_depth;
err = efa_com_get_feature(edev, &resp, EFA_ADMIN_NETWORK_ATTR);
if (err) {
@@ -128,6 +128,7 @@ struct efa_com_get_device_attr_result {
u16 max_rq_sge;
u16 max_wr_rdma_sge;
u16 max_tx_batch;
+ u16 min_sq_depth;
u8 db_bar;
};
@@ -1526,6 +1526,7 @@ int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
resp.inline_buf_size = dev->dev_attr.inline_buf_size;
resp.max_llq_size = dev->dev_attr.max_llq_size;
resp.max_tx_batch = dev->dev_attr.max_tx_batch;
+ resp.min_sq_wr = dev->dev_attr.min_sq_depth;
if (udata && udata->outlen) {
err = ib_copy_to_udata(udata, &resp,
@@ -32,7 +32,8 @@ struct efa_ibv_alloc_ucontext_resp {
__u16 inline_buf_size;
__u32 max_llq_size; /* bytes */
__u16 max_tx_batch; /* units of 64 bytes */
- __u8 reserved_90[6];
+ __u16 min_sq_wr;
+ __u8 reserved_a0[4];
};
struct efa_ibv_alloc_pd_resp {