@@ -81,6 +81,23 @@ enum mlx5_ib_alloc_ucontext_resp_mask {
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
};
+/* Bit indexes for the mlx5_alloc_ucontext_resp.clock_info_versions bitmap */
+enum {
+ MLX5_IB_CLOCK_INFO_V1 = 0,
+};
+
+struct mlx5_ib_clock_info {
+ __u32 sig;
+ __u32 resv;
+ __u64 nsec;
+ __u64 last_cycles;
+ __u64 frac;
+ __u32 mult;
+ __u32 shift;
+ __u64 mask;
+ __u64 overflow_period;
+};
+
struct mlx5_alloc_ucontext_resp {
struct ibv_get_context_resp ibv_resp;
__u32 qp_tab_size;
@@ -98,7 +115,8 @@ struct mlx5_alloc_ucontext_resp {
__u32 response_length;
__u8 cqe_version;
__u8 cmds_supp_uhw;
- __u16 reserved2;
+ __u8 reserved2;
+ __u8 clock_info_versions;
__u64 hca_core_clock_offset;
__u32 log_uar_size;
__u32 num_uars_per_page;
@@ -607,6 +607,23 @@ static int mlx5_map_internal_clock(struct mlx5_device *mdev,
return 0;
}
+static void mlx5_map_clock_info(struct mlx5_device *mdev,
+ struct ibv_context *ibv_ctx)
+{
+ struct mlx5_context *context = to_mctx(ibv_ctx);
+ void *clock_info_page;
+ off_t offset = 0;
+
+ set_command(MLX5_MMAP_GET_CLOCK_INFO_CMD, &offset);
+ set_index(MLX5_IB_CLOCK_INFO_V1, &offset);
+ clock_info_page = mmap(NULL, mdev->page_size,
+ PROT_READ, MAP_SHARED, ibv_ctx->cmd_fd,
+ offset * mdev->page_size);
+
+ if (clock_info_page != MAP_FAILED)
+ context->clock_info_page = clock_info_page;
+}
+
int mlx5dv_query_device(struct ibv_context *ctx_in,
struct mlx5dv_context *attrs_out)
{
@@ -1027,6 +1044,14 @@ static int mlx5_init_context(struct verbs_device *vdev,
mlx5_map_internal_clock(mdev, ctx);
}
+ context->clock_info_page = NULL;
+ if (resp.response_length + sizeof(resp.ibv_resp) >=
+ offsetof(struct mlx5_alloc_ucontext_resp, clock_info_versions) +
+ sizeof(resp.clock_info_versions) &&
+ (resp.clock_info_versions & (1 << MLX5_IB_CLOCK_INFO_V1))) {
+ mlx5_map_clock_info(mdev, ctx);
+ }
+
mlx5_read_env(&vdev->device, context);
mlx5_spinlock_init(&context->hugetlb_lock);
@@ -1107,6 +1132,8 @@ static void mlx5_cleanup_context(struct verbs_device *device,
if (context->hca_core_clock)
munmap(context->hca_core_clock - context->core_clock.offset,
page_size);
+ if (context->clock_info_page)
+ munmap((void *)context->clock_info_page, page_size);
close_debug_file(context);
}
@@ -60,6 +60,7 @@ enum {
MLX5_MMAP_GET_CONTIGUOUS_PAGES_CMD = 1,
MLX5_MMAP_GET_CORE_CLOCK_CMD = 5,
MLX5_MMAP_ALLOC_WC = 6,
+ MLX5_MMAP_GET_CLOCK_INFO_CMD = 7,
};
enum {
@@ -287,6 +288,7 @@ struct mlx5_context {
uint64_t mask;
} core_clock;
void *hca_core_clock;
+ const struct mlx5_ib_clock_info *clock_info_page;
struct ibv_tso_caps cached_tso_caps;
int cmds_supp_uhw;
uint32_t uar_size;