@@ -254,6 +254,8 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_del_flow_rules(rx->sa.rule);
mlx5_destroy_flow_group(rx->sa.group);
mlx5_destroy_flow_table(rx->ft.sa);
+ if (rx->allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(mdev);
if (rx == ipsec->rx_esw) {
mlx5_esw_ipsec_rx_status_destroy(ipsec, rx);
} else {
@@ -357,6 +359,8 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
goto err_add;
/* Create FT */
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
+ rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
if (rx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 2, flags);
@@ -411,6 +415,8 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
err_fs:
mlx5_destroy_flow_table(rx->ft.sa);
err_fs_ft:
+ if (rx->allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(mdev);
mlx5_del_flow_rules(rx->status.rule);
mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
err_add:
@@ -428,26 +434,19 @@ static int rx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (rx->ft.refcnt)
goto skip;
- if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
- rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
-
- err = mlx5_eswitch_block_mode_trylock(mdev);
+ err = mlx5_eswitch_block_mode(mdev);
if (err)
- goto err_out;
+ return err;
err = rx_create(mdev, ipsec, rx, family);
- mlx5_eswitch_block_mode_unlock(mdev, err);
- if (err)
- goto err_out;
+ if (err) {
+ mlx5_eswitch_unblock_mode(mdev);
+ return err;
+ }
skip:
rx->ft.refcnt++;
return 0;
-
-err_out:
- if (rx->allow_tunnel_mode)
- mlx5_eswitch_unblock_encap(mdev);
- return err;
}
static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx,
@@ -456,12 +455,8 @@ static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx,
if (--rx->ft.refcnt)
return;
- mlx5_eswitch_unblock_mode_lock(ipsec->mdev);
rx_destroy(ipsec->mdev, ipsec, rx, family);
- mlx5_eswitch_unblock_mode_unlock(ipsec->mdev);
-
- if (rx->allow_tunnel_mode)
- mlx5_eswitch_unblock_encap(ipsec->mdev);
+ mlx5_eswitch_unblock_mode(ipsec->mdev);
}
static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
@@ -581,6 +576,8 @@ static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
mlx5_destroy_flow_group(tx->sa.group);
}
mlx5_destroy_flow_table(tx->ft.sa);
+ if (tx->allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(ipsec->mdev);
mlx5_del_flow_rules(tx->status.rule);
mlx5_destroy_flow_table(tx->ft.status);
}
@@ -621,6 +618,8 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
if (err)
goto err_status_rule;
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
+ tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
if (tx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 4, flags);
@@ -687,6 +686,8 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
err_sa_miss:
mlx5_destroy_flow_table(tx->ft.sa);
err_sa_ft:
+ if (tx->allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(mdev);
mlx5_del_flow_rules(tx->status.rule);
err_status_rule:
mlx5_destroy_flow_table(tx->ft.status);
@@ -720,32 +721,22 @@ static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (tx->ft.refcnt)
goto skip;
- if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
- tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
-
- err = mlx5_eswitch_block_mode_trylock(mdev);
+ err = mlx5_eswitch_block_mode(mdev);
if (err)
- goto err_out;
+ return err;
err = tx_create(ipsec, tx, ipsec->roce);
if (err) {
- mlx5_eswitch_block_mode_unlock(mdev, err);
- goto err_out;
+ mlx5_eswitch_unblock_mode(mdev);
+ return err;
}
if (tx == ipsec->tx_esw)
ipsec_esw_tx_ft_policy_set(mdev, tx->ft.pol);
- mlx5_eswitch_block_mode_unlock(mdev, err);
-
skip:
tx->ft.refcnt++;
return 0;
-
-err_out:
- if (tx->allow_tunnel_mode)
- mlx5_eswitch_unblock_encap(mdev);
- return err;
}
static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
@@ -753,19 +744,13 @@ static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
if (--tx->ft.refcnt)
return;
- mlx5_eswitch_unblock_mode_lock(ipsec->mdev);
-
if (tx == ipsec->tx_esw) {
mlx5_esw_ipsec_restore_dest_uplink(ipsec->mdev);
ipsec_esw_tx_ft_policy_set(ipsec->mdev, NULL);
}
tx_destroy(ipsec, tx, ipsec->roce);
-
- mlx5_eswitch_unblock_mode_unlock(ipsec->mdev);
-
- if (tx->allow_tunnel_mode)
- mlx5_eswitch_unblock_encap(ipsec->mdev);
+ mlx5_eswitch_unblock_mode(ipsec->mdev);
}
static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
@@ -785,10 +785,8 @@ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev);
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
-int mlx5_eswitch_block_mode_trylock(struct mlx5_core_dev *dev);
-void mlx5_eswitch_block_mode_unlock(struct mlx5_core_dev *dev, int err);
-void mlx5_eswitch_unblock_mode_lock(struct mlx5_core_dev *dev);
-void mlx5_eswitch_unblock_mode_unlock(struct mlx5_core_dev *dev);
+int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev);
+void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev);
static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw)
{
@@ -872,13 +870,8 @@ static inline void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
{
}
-static inline int mlx5_eswitch_block_mode_trylock(struct mlx5_core_dev *dev) { return 0; }
-
-static inline void mlx5_eswitch_block_mode_unlock(struct mlx5_core_dev *dev, int err) {}
-
-static inline void mlx5_eswitch_unblock_mode_lock(struct mlx5_core_dev *dev) {}
-
-static inline void mlx5_eswitch_unblock_mode_unlock(struct mlx5_core_dev *dev) {}
+static inline int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev) { return 0; }
+static inline void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev) {}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */
@@ -3617,65 +3617,32 @@ static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
return net_eq(devl_net, netdev_net);
}
-int mlx5_eswitch_block_mode_trylock(struct mlx5_core_dev *dev)
+int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
int err;
- devl_lock(devlink);
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw)) {
- /* Failure means no eswitch => not possible to change eswitch mode */
- devl_unlock(devlink);
+ if (!mlx5_esw_allowed(esw))
return 0;
- }
+ /* Take TC into account */
err = mlx5_esw_try_lock(esw);
- if (err < 0) {
- devl_unlock(devlink);
+ if (err < 0)
return err;
- }
- return 0;
-}
-
-void mlx5_eswitch_block_mode_unlock(struct mlx5_core_dev *dev, int err)
-{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
-
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw))
- return;
-
- if (!err)
- esw->offloads.num_block_mode++;
+ esw->offloads.num_block_mode++;
mlx5_esw_unlock(esw);
- devl_unlock(devlink);
+ return 0;
}
-void mlx5_eswitch_unblock_mode_lock(struct mlx5_core_dev *dev)
+void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw))
+ if (!mlx5_esw_allowed(esw))
return;
down_write(&esw->mode_lock);
-}
-
-void mlx5_eswitch_unblock_mode_unlock(struct mlx5_core_dev *dev)
-{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
-
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw))
- return;
-
esw->offloads.num_block_mode--;
up_write(&esw->mode_lock);
}