@@ -8,8 +8,10 @@ libmlx5.so.1 ibverbs-providers #MINVER#
MLX5_1.0@MLX5_1.0 13
MLX5_1.1@MLX5_1.1 14
MLX5_1.2@MLX5_1.2 15
+ MLX5_1.3@MLX5_1.3 16
mlx5dv_init_obj@MLX5_1.0 13
mlx5dv_init_obj@MLX5_1.2 15
mlx5dv_query_device@MLX5_1.0 13
mlx5dv_create_cq@MLX5_1.1 14
mlx5dv_set_context_attr@MLX5_1.2 15
+ mlx5dv_create_wq@MLX5_1.3 16
@@ -11,7 +11,7 @@ if (MLX5_MW_DEBUG)
endif()
rdma_shared_provider(mlx5 libmlx5.map
- 1 1.2.${PACKAGE_VERSION}
+ 1 1.3.${PACKAGE_VERSION}
buf.c
cq.c
dbrec.c
@@ -17,3 +17,8 @@ MLX5_1.2 {
mlx5dv_init_obj;
mlx5dv_set_context_attr;
} MLX5_1.1;
+
+MLX5_1.3 {
+ global:
+ mlx5dv_create_wq;
+} MLX5_1.2;
@@ -210,6 +210,10 @@ struct mlx5_create_qp_resp {
__u32 uuar_index;
};
+enum mlx5_create_wq_comp_mask {
+ MLX5_IB_CREATE_WQ_STRIDING_RQ = 1 << 0,
+};
+
struct mlx5_drv_create_wq {
__u64 buf_addr;
__u64 db_addr;
@@ -218,7 +222,9 @@ struct mlx5_drv_create_wq {
__u32 user_index;
__u32 flags;
__u32 comp_mask;
- __u32 reserved;
+ __u32 single_stride_log_num_of_bytes;
+ __u32 single_wqe_log_num_of_strides;
+ __u32 two_byte_shift_en;
};
struct mlx5_create_wq {
@@ -213,6 +213,43 @@ enum mlx5dv_obj_type {
MLX5DV_OBJ_RWQ = 1 << 3,
};
+enum mlx5dv_wq_init_attr_mask {
+ MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ = 1 << 0,
+};
+
+struct mlx5dv_striding_rq_init_attr {
+ uint32_t single_stride_log_num_of_bytes;
+ uint32_t single_wqe_log_num_of_strides;
+ uint8_t two_byte_shift_en;
+};
+
+struct mlx5dv_wq_init_attr {
+ uint64_t comp_mask; /* Use enum mlx5dv_wq_init_attr_mask */
+ struct mlx5dv_striding_rq_init_attr striding_rq_attrs;
+};
+
+/*
+ * This function creates a work queue object with extra properties
+ * defined by mlx5dv_wq_init_attr struct.
+ *
+ * For each bit in the comp_mask, a field in mlx5dv_wq_init_attr
+ * should follow.
+ *
+ * MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ: Create a work queue with
+ * striding RQ capabilities.
+ * - single_stride_log_num_of_bytes represents the size of each stride in the
+ * WQE and its value should be between min_single_stride_log_num_of_bytes
+ * and max_single_stride_log_num_of_bytes that are reported in
+ * mlx5dv_query_device.
+ * - single_wqe_log_num_of_strides represents the number of strides in each WQE.
+ * Its value should be between min_single_wqe_log_num_of_strides and
+ * max_single_wqe_log_num_of_strides that are reported in mlx5dv_query_device.
+ * - two_byte_shift_en: When enabled, hardware pads 2 bytes of zeroes
+ * before writing the message to memory (e.g. for IP alignment)
+ */
+struct ibv_wq *mlx5dv_create_wq(struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_init_attr,
+ struct mlx5dv_wq_init_attr *mlx5_wq_attr);
/*
* This function will initialize mlx5dv_xxx structs based on supplied type.
* The information for initialization is taken from ibv_xx structs supplied
@@ -328,7 +365,9 @@ struct mlx5_tm_cqe {
struct mlx5_cqe64 {
union {
struct {
- uint8_t rsvd0[17];
+ uint8_t rsvd0[2];
+ __be16 wqe_id;
+ uint8_t rsvd4[13];
uint8_t ml_path;
uint8_t rsvd20[4];
__be16 slid;
@@ -455,6 +494,11 @@ struct mlx5_wqe_ctrl_seg {
__be32 imm;
};
+struct mlx5_mprq_wqe {
+ struct mlx5_wqe_srq_next_seg nseg;
+ struct mlx5_wqe_data_seg dseg;
+};
+
struct mlx5_wqe_av {
union {
struct {
@@ -952,21 +952,36 @@ static int mlx5_calc_sq_size(struct mlx5_context *ctx,
return wq_size;
}
+enum {
+ DV_CREATE_WQ_SUPPORTED_COMP_MASK = MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ
+};
+
static int mlx5_calc_rwq_size(struct mlx5_context *ctx,
struct mlx5_rwq *rwq,
- struct ibv_wq_init_attr *attr)
+ struct ibv_wq_init_attr *attr,
+ struct mlx5dv_wq_init_attr *mlx5wq_attr)
{
size_t wqe_size;
int wq_size;
uint32_t num_scatter;
+ int is_mprq = 0;
int scat_spc;
if (!attr->max_wr)
return -EINVAL;
+ if (mlx5wq_attr) {
+ if (!check_comp_mask(mlx5wq_attr->comp_mask,
+ DV_CREATE_WQ_SUPPORTED_COMP_MASK))
+ return -EINVAL;
+
+ is_mprq = !!(mlx5wq_attr->comp_mask &
+ MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ);
+ }
/* TBD: check caps for RQ */
num_scatter = max_t(uint32_t, attr->max_sge, 1);
- wqe_size = sizeof(struct mlx5_wqe_data_seg) * num_scatter;
+ wqe_size = sizeof(struct mlx5_wqe_data_seg) * num_scatter +
+ sizeof(struct mlx5_wqe_srq_next_seg) * is_mprq;
if (rwq->wq_sig)
wqe_size += sizeof(struct mlx5_rwqe_sig);
@@ -981,7 +996,8 @@ static int mlx5_calc_rwq_size(struct mlx5_context *ctx,
rwq->rq.wqe_shift = mlx5_ilog2(wqe_size);
rwq->rq.max_post = 1 << mlx5_ilog2(wq_size / wqe_size);
scat_spc = wqe_size -
- ((rwq->wq_sig) ? sizeof(struct mlx5_rwqe_sig) : 0);
+ ((rwq->wq_sig) ? sizeof(struct mlx5_rwqe_sig) : 0) -
+ is_mprq * sizeof(struct mlx5_wqe_srq_next_seg);
rwq->rq.max_gs = scat_spc / sizeof(struct mlx5_wqe_data_seg);
return wq_size;
}
@@ -2226,8 +2242,9 @@ static int mlx5_alloc_rwq_buf(struct ibv_context *context,
return 0;
}
-struct ibv_wq *mlx5_create_wq(struct ibv_context *context,
- struct ibv_wq_init_attr *attr)
+static struct ibv_wq *create_wq(struct ibv_context *context,
+ struct ibv_wq_init_attr *attr,
+ struct mlx5dv_wq_init_attr *mlx5wq_attr)
{
struct mlx5_create_wq cmd;
struct mlx5_create_wq_resp resp;
@@ -2252,7 +2269,7 @@ struct ibv_wq *mlx5_create_wq(struct ibv_context *context,
if (rwq->wq_sig)
cmd.drv.flags = MLX5_RWQ_FLAG_SIGNATURE;
- ret = mlx5_calc_rwq_size(ctx, rwq, attr);
+ ret = mlx5_calc_rwq_size(ctx, rwq, attr, mlx5wq_attr);
if (ret < 0) {
errno = -ret;
goto err;
@@ -2286,6 +2303,35 @@ struct ibv_wq *mlx5_create_wq(struct ibv_context *context,
}
cmd.drv.user_index = usr_idx;
+
+ if (mlx5wq_attr) {
+ if (mlx5wq_attr->comp_mask & MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ) {
+ if ((mlx5wq_attr->striding_rq_attrs.single_stride_log_num_of_bytes <
+ ctx->striding_rq_caps.min_single_stride_log_num_of_bytes) ||
+ (mlx5wq_attr->striding_rq_attrs.single_stride_log_num_of_bytes >
+ ctx->striding_rq_caps.max_single_stride_log_num_of_bytes)) {
+ errno = EINVAL;
+ goto err_create;
+ }
+
+ if ((mlx5wq_attr->striding_rq_attrs.single_wqe_log_num_of_strides <
+ ctx->striding_rq_caps.min_single_wqe_log_num_of_strides) ||
+ (mlx5wq_attr->striding_rq_attrs.single_wqe_log_num_of_strides >
+ ctx->striding_rq_caps.max_single_wqe_log_num_of_strides)) {
+ errno = EINVAL;
+ goto err_create;
+ }
+
+ cmd.drv.single_stride_log_num_of_bytes =
+ mlx5wq_attr->striding_rq_attrs.single_stride_log_num_of_bytes;
+ cmd.drv.single_wqe_log_num_of_strides =
+ mlx5wq_attr->striding_rq_attrs.single_wqe_log_num_of_strides;
+ cmd.drv.two_byte_shift_en =
+ mlx5wq_attr->striding_rq_attrs.two_byte_shift_en;
+ cmd.drv.comp_mask |= MLX5_IB_CREATE_WQ_STRIDING_RQ;
+ }
+ }
+
err = ibv_cmd_create_wq(context, attr, &rwq->wq, &cmd.ibv_cmd,
sizeof(cmd.ibv_cmd),
sizeof(cmd),
@@ -2311,6 +2357,19 @@ err:
return NULL;
}
+struct ibv_wq *mlx5_create_wq(struct ibv_context *context,
+ struct ibv_wq_init_attr *attr)
+{
+ return create_wq(context, attr, NULL);
+}
+
+struct ibv_wq *mlx5dv_create_wq(struct ibv_context *context,
+ struct ibv_wq_init_attr *attr,
+ struct mlx5dv_wq_init_attr *mlx5_wq_attr)
+{
+ return create_wq(context, attr, mlx5_wq_attr);
+}
+
int mlx5_modify_wq(struct ibv_wq *wq, struct ibv_wq_attr *attr)
{
struct mlx5_modify_wq cmd = {};