@@ -99,6 +99,18 @@ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE = 1 << 2,
.in -8
};
+.PP
+.nf
+enum mlx5dv_flow_action_cap_flags {
+.in +8
+MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM = 1 << 0, /* Flow action ESP (with AES_GCM keymat) is supported */
+MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA = 1 << 1, /* Flow action ESP always return metadata in the payload */
+MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING = 1 << 2, /* ESP (with AESGCM keymat) Supports matching by SPI (rather than hashing against SPI) */
+MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD = 1 << 3, /* Flow action ESP supports full offload (with AES_GCM keymat) */
+MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4, /* Flow action ESP (with AES_GCM keymat), ESN comes implicitly from IV. */
+.in -8
+};
+
.fi
.SH "RETURN VALUE"
0 on success or the value of errno on failure (which indicates the failure reason).
@@ -116,7 +116,7 @@ struct mlx5_alloc_ucontext_resp {
__u32 max_recv_wr;
__u32 max_srq_recv_wr;
__u16 num_ports;
- __u16 reserved1;
+ __u16 flow_action_flags;
__u32 comp_mask;
__u32 response_length;
__u8 cqe_version;
@@ -712,6 +712,11 @@ int mlx5dv_query_device(struct ibv_context *ctx_in,
}
}
+ if (attrs_out->comp_mask & MLX5DV_CONTEXT_MASK_FLOW_ACTION_FLAGS) {
+ attrs_out->flow_action_flags = mctx->flow_action_flags;
+ comp_mask_out |= MLX5DV_CONTEXT_MASK_FLOW_ACTION_FLAGS;
+ }
+
attrs_out->comp_mask = comp_mask_out;
return 0;
@@ -1122,6 +1127,8 @@ static struct verbs_context *mlx5_alloc_context(struct ibv_device *ibdev,
mlx5_map_clock_info(mdev, &v_ctx->context);
}
+ context->flow_action_flags = resp.flow_action_flags;
+
mlx5_read_env(ibdev, context);
mlx5_spinlock_init(&context->hugetlb_lock);
@@ -303,6 +303,7 @@ struct mlx5_context {
uint32_t num_dyn_bfregs;
uint32_t *count_dyn_bfregs;
uint32_t start_dyn_bfregs_index;
+ uint16_t flow_action_flags;
};
struct mlx5_bitmap {
@@ -70,6 +70,7 @@ enum mlx5dv_context_comp_mask {
MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS = 1 << 3,
MLX5DV_CONTEXT_MASK_DYN_BFREGS = 1 << 4,
MLX5DV_CONTEXT_MASK_CLOCK_INFO_UPDATE = 1 << 5,
+ MLX5DV_CONTEXT_MASK_FLOW_ACTION_FLAGS = 1 << 6,
};
struct mlx5dv_cqe_comp_caps {
@@ -96,6 +97,14 @@ enum mlx5dv_tunnel_offloads {
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE = 1 << 2,
};
+enum mlx5dv_flow_action_cap_flags {
+ MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM = 1 << 0,
+ MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA = 1 << 1,
+ MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING = 1 << 2,
+ MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD = 1 << 3,
+ MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4,
+};
+
/*
* Direct verbs device-specific attributes
*/
@@ -109,6 +118,7 @@ struct mlx5dv_context {
uint32_t tunnel_offloads_caps;
uint32_t max_dynamic_bfregs;
uint64_t max_clock_info_update_nsec;
+ uint32_t flow_action_flags; /* use enum mlx5dv_flow_action_cap_flags */
};
enum mlx5dv_context_flags {