@@ -47,6 +47,7 @@ uint64_t flags;
uint64_t comp_mask; /* Use enum mlx5dv_context_comp_mask */
struct mlx5dv_cqe_comp_caps cqe_comp_caps;
struct mlx5dv_sw_parsing_caps sw_parsing_caps;
+uint32_t tunnel_offloads_caps;
.in -8
};
@@ -71,7 +72,8 @@ enum mlx5dv_context_comp_mask {
MLX5DV_CONTEXT_MASK_CQE_COMPRESION = 1 << 0,
MLX5DV_CONTEXT_MASK_SWP = 1 << 1,
MLX5DV_CONTEXT_MASK_STRIDING_RQ = 1 << 2,
-MLX5DV_CONTEXT_MASK_RESERVED = 1 << 3,
+MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS = 1 << 3,
+MLX5DV_CONTEXT_MASK_RESERVED = 1 << 4,
.in -8
};
@@ -84,6 +86,17 @@ MLX5DV_SW_PARSING_CSUM = 1 << 1,
MLX5DV_SW_PARSING_LSO = 1 << 2,
.in -8
};
+
+.PP
+.nf
+enum mlx5dv_tunnel_offloads {
+.in +8
+MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN = 1 << 0,
+MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE = 1 << 1,
+MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE = 1 << 2,
+.in -8
+};
+
.fi
.SH "RETURN VALUE"
0 on success or the value of errno on failure (which indicates the failure reason).
@@ -312,6 +312,8 @@ struct mlx5_query_device_ex_resp {
__u32 flags; /* Use enum mlx5_query_dev_resp_flags */
struct mlx5dv_sw_parsing_caps sw_parsing_caps;
struct mlx5_striding_rq_caps striding_rq_caps;
+ __u32 tunnel_offloads_caps;
+ __u32 reserved;
};
#endif /* MLX5_ABI_H */
@@ -646,6 +646,11 @@ int mlx5dv_query_device(struct ibv_context *ctx_in,
comp_mask_out |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
}
+ if (attrs_out->comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
+ attrs_out->tunnel_offloads_caps = mctx->tunnel_offloads_caps;
+ comp_mask_out |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
+ }
+
attrs_out->comp_mask = comp_mask_out;
return 0;
@@ -293,6 +293,7 @@ struct mlx5_context {
struct mlx5dv_ctx_allocators extern_alloc;
struct mlx5dv_sw_parsing_caps sw_parsing_caps;
struct mlx5dv_striding_rq_caps striding_rq_caps;
+ uint32_t tunnel_offloads_caps;
};
struct mlx5_bitmap {
@@ -61,7 +61,8 @@ enum mlx5dv_context_comp_mask {
MLX5DV_CONTEXT_MASK_CQE_COMPRESION = 1 << 0,
MLX5DV_CONTEXT_MASK_SWP = 1 << 1,
MLX5DV_CONTEXT_MASK_STRIDING_RQ = 1 << 2,
- MLX5DV_CONTEXT_MASK_RESERVED = 1 << 3,
+ MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS = 1 << 3,
+ MLX5DV_CONTEXT_MASK_RESERVED = 1 << 4,
};
struct mlx5dv_cqe_comp_caps {
@@ -82,6 +83,12 @@ struct mlx5dv_striding_rq_caps {
uint32_t supported_qpts;
};
+enum mlx5dv_tunnel_offloads {
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN = 1 << 0,
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE = 1 << 1,
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE = 1 << 2,
+};
+
/*
* Direct verbs device-specific attributes
*/
@@ -92,6 +99,7 @@ struct mlx5dv_context {
struct mlx5dv_cqe_comp_caps cqe_comp_caps;
struct mlx5dv_sw_parsing_caps sw_parsing_caps;
struct mlx5dv_striding_rq_caps striding_rq_caps;
+ uint32_t tunnel_offloads_caps;
};
enum mlx5dv_context_flags {
@@ -2214,6 +2214,7 @@ int mlx5_query_device_ex(struct ibv_context *context,
mctx->cqe_comp_caps = resp.cqe_comp_caps;
mctx->sw_parsing_caps = resp.sw_parsing_caps;
mctx->striding_rq_caps = resp.striding_rq_caps.caps;
+ mctx->tunnel_offloads_caps = resp.tunnel_offloads_caps;
if (resp.flags & MLX5_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP)
mctx->vendor_cap_flags |= MLX5_VENDOR_CAP_FLAGS_CQE_128B_COMP;