@@ -85,6 +85,8 @@ DECLARE_DRV_CMD(mlx5_query_device_ex, IB_USER_VERBS_EX_CMD_QUERY_DEVICE,
empty, mlx5_ib_query_device_resp);
DECLARE_DRV_CMD(mlx5_modify_qp_ex, IB_USER_VERBS_EX_CMD_MODIFY_QP,
empty, mlx5_ib_modify_qp_resp);
+DECLARE_DRV_CMD(mlx5_import_pd, IB_USER_VERBS_CMD_IMPORT_PD,
+ empty, mlx5_ib_alloc_pd_resp);
struct mlx5_modify_qp {
struct ibv_modify_qp_ex ibv_cmd;
@@ -91,6 +91,7 @@ static const struct verbs_context_ops mlx5_ctx_common_ops = {
.alloc_pd = mlx5_alloc_pd,
.async_event = mlx5_async_event,
.dealloc_pd = mlx5_free_pd,
+ .import_pd = mlx5_import_pd,
.reg_mr = mlx5_reg_mr,
.rereg_mr = mlx5_rereg_mr,
.dereg_mr = mlx5_dereg_mr,
@@ -816,6 +816,8 @@ int mlx5_query_port(struct ibv_context *context, uint8_t port,
struct ibv_pd *mlx5_alloc_pd(struct ibv_context *context);
int mlx5_free_pd(struct ibv_pd *pd);
+struct ibv_pd *mlx5_import_pd(struct ibv_context *context, uint32_t fd,
+ uint32_t handle);
void mlx5_async_event(struct ibv_context *context,
struct ibv_async_event *event);
@@ -178,6 +178,34 @@ struct ibv_pd *mlx5_alloc_pd(struct ibv_context *context)
return &pd->ibv_pd;
}
+struct ibv_pd *mlx5_import_pd(struct ibv_context *context, uint32_t fd,
+ uint32_t handle)
+{
+ struct ibv_import_pd cmd = {
+ .handle = handle,
+ .type = UVERBS_OBJECT_PD,
+ .fd = fd,
+ };
+ struct mlx5_import_pd_resp resp;
+ struct mlx5_pd *pd;
+ int ret;
+
+ pd = calloc(1, sizeof(*pd));
+ if (!pd)
+ return NULL;
+
+ ret = ibv_cmd_import_pd(context, &pd->ibv_pd, &cmd, sizeof(cmd),
+ &resp.ibv_resp, sizeof(resp));
+ if (ret) {
+ free(pd);
+ return NULL;
+ }
+
+ pd->pdn = resp.pdn;
+
+ return &pd->ibv_pd;
+}
+
static void mlx5_put_bfreg_index(struct mlx5_context *ctx, uint32_t bfreg_dyn_index)
{
pthread_mutex_lock(&ctx->dyn_bfregs_mutex);