@@ -42,6 +42,25 @@ static struct net_device *felix_classify_db(struct dsa_db db)
}
}
+static int felix_cpu_port_for_master(struct dsa_switch *ds,
+ struct net_device *master)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct dsa_port *cpu_dp;
+ int lag;
+
+ if (netif_is_lag_master(master)) {
+ mutex_lock(&ocelot->fwd_domain_lock);
+ lag = ocelot_bond_get_id(ocelot, master);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+
+ return lag;
+ }
+
+ cpu_dp = master->dsa_ptr;
+ return cpu_dp->index;
+}
+
/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that
* the tagger can perform RX source port identification.
*/
@@ -422,6 +441,39 @@ static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds)
return BIT(ocelot->num_phys_ports);
}
+static int felix_tag_npi_change_master(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
+ struct ocelot *ocelot = ds->priv;
+
+ if (netif_is_lag_master(master)) {
+ NL_SET_ERR_MSG_MOD(extack, "LAG DSA master only supported using ocelot-8021q");
+ return -EOPNOTSUPP;
+ }
+
+ /* Changing the NPI port breaks user ports still assigned to the old
+ * one, so only allow it while they're down, and don't allow them to
+ * come back up until they're all changed to the new one.
+ */
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ struct net_device *slave = other_dp->slave;
+
+ if (other_dp != dp && (slave->flags & IFF_UP) &&
+ dsa_port_to_master(other_dp) != master) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change while old master still has users");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ felix_npi_port_deinit(ocelot, ocelot->npi);
+ felix_npi_port_init(ocelot, felix_cpu_port_for_master(ds, master));
+
+ return 0;
+}
+
/* Alternatively to using the NPI functionality, that same hardware MAC
* connected internally to the enetc or fman DSA master can be configured to
* use the software-defined tag_8021q frame format. As far as the hardware is
@@ -433,6 +485,7 @@ static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = {
.setup = felix_tag_npi_setup,
.teardown = felix_tag_npi_teardown,
.get_host_fwd_mask = felix_tag_npi_get_host_fwd_mask,
+ .change_master = felix_tag_npi_change_master,
};
static int felix_tag_8021q_setup(struct dsa_switch *ds)
@@ -501,10 +554,24 @@ static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds)
return dsa_cpu_ports(ds);
}
+static int felix_tag_8021q_change_master(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
+{
+ int cpu = felix_cpu_port_for_master(ds, master);
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_unassign_dsa_8021q_cpu(ocelot, port);
+ ocelot_port_assign_dsa_8021q_cpu(ocelot, port, cpu);
+
+ return felix_update_trapping_destinations(ds, true);
+}
+
static const struct felix_tag_proto_ops felix_tag_8021q_proto_ops = {
.setup = felix_tag_8021q_setup,
.teardown = felix_tag_8021q_teardown,
.get_host_fwd_mask = felix_tag_8021q_get_host_fwd_mask,
+ .change_master = felix_tag_8021q_change_master,
};
static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask,
@@ -664,6 +731,16 @@ static void felix_port_set_host_flood(struct dsa_switch *ds, int port,
!!felix->host_flood_mc_mask, true);
}
+static int felix_port_change_master(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ return felix->tag_proto_ops->change_master(ds, port, master, extack);
+}
+
static int felix_set_ageing_time(struct dsa_switch *ds,
unsigned int ageing_time)
{
@@ -855,8 +932,17 @@ static int felix_lag_join(struct dsa_switch *ds, int port,
struct netdev_lag_upper_info *info)
{
struct ocelot *ocelot = ds->priv;
+ int err;
- return ocelot_port_lag_join(ocelot, port, lag.dev, info);
+ err = ocelot_port_lag_join(ocelot, port, lag.dev, info);
+ if (err)
+ return err;
+
+ /* Update the logical LAG port that serves as tag_8021q CPU port */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ return felix_port_change_master(ds, port, lag.dev, NULL);
}
static int felix_lag_leave(struct dsa_switch *ds, int port,
@@ -866,7 +952,11 @@ static int felix_lag_leave(struct dsa_switch *ds, int port,
ocelot_port_lag_leave(ocelot, port, lag.dev);
- return 0;
+ /* Update the logical LAG port that serves as tag_8021q CPU port */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ return felix_port_change_master(ds, port, lag.dev, NULL);
}
static int felix_lag_change(struct dsa_switch *ds, int port)
@@ -1004,6 +1094,27 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
felix->info->port_sched_speed_set(ocelot, port, speed);
}
+static int felix_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct ocelot *ocelot = ds->priv;
+
+ if (!dsa_port_is_user(dp))
+ return 0;
+
+ if (ocelot->npi >= 0) {
+ struct net_device *master = dsa_port_to_master(dp);
+
+ if (felix_cpu_port_for_master(ds, master) != ocelot->npi) {
+ dev_err(ds->dev, "Multiple masters are not allowed\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
{
int i;
@@ -1839,6 +1950,7 @@ const struct dsa_switch_ops felix_switch_ops = {
.phylink_mac_select_pcs = felix_phylink_mac_select_pcs,
.phylink_mac_link_down = felix_phylink_mac_link_down,
.phylink_mac_link_up = felix_phylink_mac_link_up,
+ .port_enable = felix_port_enable,
.port_fast_age = felix_port_fast_age,
.port_fdb_dump = felix_fdb_dump,
.port_fdb_add = felix_fdb_add,
@@ -1894,6 +2006,7 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_add_dscp_prio = felix_port_add_dscp_prio,
.port_del_dscp_prio = felix_port_del_dscp_prio,
.port_set_host_flood = felix_port_set_host_flood,
+ .port_change_master = felix_port_change_master,
};
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
@@ -70,6 +70,9 @@ struct felix_tag_proto_ops {
int (*setup)(struct dsa_switch *ds);
void (*teardown)(struct dsa_switch *ds);
unsigned long (*get_host_fwd_mask)(struct dsa_switch *ds);
+ int (*change_master)(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack);
};
extern const struct dsa_switch_ops felix_switch_ops;
@@ -2036,7 +2036,7 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond)
/* The logical port number of a LAG is equal to the lowest numbered physical
* port ID present in that LAG. It may change if that port ever leaves the LAG.
*/
-static int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond)
+int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond)
{
int bond_mask = ocelot_get_bond_mask(ocelot, bond);
@@ -2045,6 +2045,7 @@ static int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond)
return __ffs(bond_mask);
}
+EXPORT_SYMBOL_GPL(ocelot_bond_get_id);
static u32 ocelot_dsa_8021q_cpu_assigned_ports(struct ocelot *ocelot,
struct ocelot_port *cpu)
@@ -948,6 +948,7 @@ int ocelot_port_lag_join(struct ocelot *ocelot, int port,
void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
struct net_device *bond);
void ocelot_port_lag_change(struct ocelot *ocelot, int port, bool lag_tx_active);
+int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond);
int ocelot_devlink_sb_register(struct ocelot *ocelot);
void ocelot_devlink_sb_unregister(struct ocelot *ocelot);