@@ -55,7 +55,11 @@ struct mlx5_alloc_ucontext {
__u32 total_num_uuars;
__u32 num_low_latency_uuars;
__u32 flags;
- __u32 reserved;
+ __u32 comp_mask;
+ __u8 cqe_version;
+ __u8 reserved0;
+ __u16 reserved1;
+ __u32 reserved2;
};
struct mlx5_alloc_ucontext_resp {
@@ -70,7 +74,12 @@ struct mlx5_alloc_ucontext_resp {
__u32 max_recv_wr;
__u32 max_srq_recv_wr;
__u16 num_ports;
- __u16 reserved;
+ __u16 reserved1;
+ __u32 comp_mask;
+ __u32 response_length;
+ __u8 cqe_version;
+ __u8 reserved2;
+ __u16 reserved3;
};
struct mlx5_alloc_pd_resp {
@@ -590,8 +590,11 @@ static int mlx5_init_context(struct verbs_device *vdev,
}
memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
req.total_num_uuars = tot_uuars;
req.num_low_latency_uuars = low_lat_uuars;
+ req.cqe_version = MLX5_CQE_VERSION_V1;
if (ibv_cmd_get_context(&context->ibv_ctx, &req.ibv_req, sizeof req,
&resp.ibv_resp, sizeof resp))
goto err_free_bf;
@@ -608,6 +611,7 @@ static int mlx5_init_context(struct verbs_device *vdev,
context->max_recv_wr = resp.max_recv_wr;
context->max_srq_recv_wr = resp.max_srq_recv_wr;
+ context->cqe_version = resp.cqe_version;
if (context->cqe_version) {
if (context->cqe_version == 1)
mlx5_ctx_ops.poll_cq = mlx5_poll_cq_v1;
@@ -120,6 +120,11 @@ enum {
MLX5_MMAP_GET_CONTIGUOUS_PAGES_CMD = 1
};
+enum {
+ MLX5_CQE_VERSION_V0 = 0,
+ MLX5_CQE_VERSION_V1 = 1,
+};
+
#define MLX5_CQ_PREFIX "MLX_CQ"
#define MLX5_QP_PREFIX "MLX_QP"
#define MLX5_MR_PREFIX "MLX_MR"