@@ -274,10 +274,6 @@ enum packet_merge {
struct mlx5e_packet_merge_param {
enum packet_merge type;
u32 timeout;
- struct {
- u8 match_criteria_type;
- u8 alignment_granularity;
- } shampo;
};
struct mlx5e_params {
@@ -930,9 +930,9 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
MLX5_SET(rqc, rqc, reservation_timeout,
mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_SHAMPO_TIMEOUT));
MLX5_SET(rqc, rqc, shampo_match_criteria_type,
- params->packet_merge.shampo.match_criteria_type);
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED);
MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
- params->packet_merge.shampo.alignment_granularity);
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE);
}
break;
}
@@ -4047,10 +4047,6 @@ static int set_feature_hw_gro(struct net_device *netdev, bool enable)
if (enable) {
new_params.packet_merge.type = MLX5E_PACKET_MERGE_SHAMPO;
- new_params.packet_merge.shampo.match_criteria_type =
- MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED;
- new_params.packet_merge.shampo.alignment_granularity =
- MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE;
} else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE;
} else {