@@ -64,13 +64,19 @@ ice_eswitch_br_netdev_to_port(struct net_device *dev)
static void
ice_eswitch_br_ingress_rule_setup(struct ice_adv_lkup_elem *list,
struct ice_adv_rule_info *rule_info,
- const unsigned char *mac,
+ const unsigned char *mac, u16 vid,
u8 pf_id, u16 vf_vsi_idx)
{
list[0].type = ICE_MAC_OFOS;
ether_addr_copy(list[0].h_u.eth_hdr.dst_addr, mac);
eth_broadcast_addr(list[0].m_u.eth_hdr.dst_addr);
+ if (ice_eswitch_is_vid_valid(vid)) {
+ list[1].type = ICE_VLAN_OFOS;
+ list[1].h_u.vlan_hdr.vlan = cpu_to_be16(vid & VLAN_VID_MASK);
+ list[1].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
+ }
+
rule_info->sw_act.vsi_handle = vf_vsi_idx;
rule_info->sw_act.flag |= ICE_FLTR_RX;
rule_info->sw_act.src = pf_id;
@@ -80,13 +86,19 @@ ice_eswitch_br_ingress_rule_setup(struct ice_adv_lkup_elem *list,
static void
ice_eswitch_br_egress_rule_setup(struct ice_adv_lkup_elem *list,
struct ice_adv_rule_info *rule_info,
- const unsigned char *mac,
+ const unsigned char *mac, u16 vid,
u16 pf_vsi_idx)
{
list[0].type = ICE_MAC_OFOS;
ether_addr_copy(list[0].h_u.eth_hdr.dst_addr, mac);
eth_broadcast_addr(list[0].m_u.eth_hdr.dst_addr);
+ if (ice_eswitch_is_vid_valid(vid)) {
+ list[1].type = ICE_VLAN_OFOS;
+ list[1].h_u.vlan_hdr.vlan = cpu_to_be16(vid & VLAN_VID_MASK);
+ list[1].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
+ }
+
rule_info->sw_act.vsi_handle = pf_vsi_idx;
rule_info->sw_act.flag |= ICE_FLTR_TX;
rule_info->flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
@@ -110,14 +122,19 @@ ice_eswitch_br_rule_delete(struct ice_hw *hw, struct ice_rule_query_data *rule)
static struct ice_rule_query_data *
ice_eswitch_br_fwd_rule_create(struct ice_hw *hw, int vsi_idx, int port_type,
- const unsigned char *mac)
+ const unsigned char *mac, u16 vid)
{
struct ice_adv_rule_info rule_info = { 0 };
struct ice_rule_query_data *rule;
struct ice_adv_lkup_elem *list;
- u16 lkups_cnt = 1;
+ u16 lkups_cnt;
int err;
+ if (ice_eswitch_is_vid_valid(vid))
+ lkups_cnt = 2;
+ else
+ lkups_cnt = 1;
+
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule)
return ERR_PTR(-ENOMEM);
@@ -131,11 +148,11 @@ ice_eswitch_br_fwd_rule_create(struct ice_hw *hw, int vsi_idx, int port_type,
switch (port_type) {
case ICE_ESWITCH_BR_UPLINK_PORT:
ice_eswitch_br_egress_rule_setup(list, &rule_info, mac,
- vsi_idx);
+ vid, vsi_idx);
break;
case ICE_ESWITCH_BR_VF_REPR_PORT:
ice_eswitch_br_ingress_rule_setup(list, &rule_info, mac,
- hw->pf_id, vsi_idx);
+ vid, hw->pf_id, vsi_idx);
break;
default:
err = -EINVAL;
@@ -164,13 +181,18 @@ ice_eswitch_br_fwd_rule_create(struct ice_hw *hw, int vsi_idx, int port_type,
static struct ice_rule_query_data *
ice_eswitch_br_guard_rule_create(struct ice_hw *hw, u16 vsi_idx,
- const unsigned char *mac)
+ const unsigned char *mac, u16 vid)
{
struct ice_adv_rule_info rule_info = { 0 };
struct ice_rule_query_data *rule;
struct ice_adv_lkup_elem *list;
- const u16 lkups_cnt = 1;
int err = -ENOMEM;
+ u16 lkups_cnt;
+
+ if (ice_eswitch_is_vid_valid(vid))
+ lkups_cnt = 2;
+ else
+ lkups_cnt = 1;
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule)
@@ -184,6 +206,12 @@ ice_eswitch_br_guard_rule_create(struct ice_hw *hw, u16 vsi_idx,
ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac);
eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr);
+ if (ice_eswitch_is_vid_valid(vid)) {
+ list[1].type = ICE_VLAN_OFOS;
+ list[1].h_u.vlan_hdr.vlan = cpu_to_be16(vid & VLAN_VID_MASK);
+ list[1].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
+ }
+
rule_info.allow_pass_l2 = true;
rule_info.sw_act.vsi_handle = vsi_idx;
rule_info.sw_act.fltr_act = ICE_NOP;
@@ -205,7 +233,7 @@ ice_eswitch_br_guard_rule_create(struct ice_hw *hw, u16 vsi_idx,
static struct ice_esw_br_flow *
ice_eswitch_br_flow_create(struct device *dev, struct ice_hw *hw, int vsi_idx,
- int port_type, const unsigned char *mac)
+ int port_type, const unsigned char *mac, u16 vid)
{
struct ice_rule_query_data *fwd_rule, *guard_rule;
struct ice_esw_br_flow *flow;
@@ -215,7 +243,8 @@ ice_eswitch_br_flow_create(struct device *dev, struct ice_hw *hw, int vsi_idx,
if (!flow)
return ERR_PTR(-ENOMEM);
- fwd_rule = ice_eswitch_br_fwd_rule_create(hw, vsi_idx, port_type, mac);
+ fwd_rule = ice_eswitch_br_fwd_rule_create(hw, vsi_idx, port_type, mac,
+ vid);
err = PTR_ERR_OR_ZERO(fwd_rule);
if (err) {
dev_err(dev, "Failed to create eswitch bridge %sgress forward rule, err: %d\n",
@@ -224,7 +253,7 @@ ice_eswitch_br_flow_create(struct device *dev, struct ice_hw *hw, int vsi_idx,
goto err_fwd_rule;
}
- guard_rule = ice_eswitch_br_guard_rule_create(hw, vsi_idx, mac);
+ guard_rule = ice_eswitch_br_guard_rule_create(hw, vsi_idx, mac, vid);
err = PTR_ERR_OR_ZERO(guard_rule);
if (err) {
dev_err(dev, "Failed to create eswitch bridge %sgress guard rule, err: %d\n",
@@ -278,6 +307,30 @@ ice_eswitch_br_flow_delete(struct ice_pf *pf, struct ice_esw_br_flow *flow)
kfree(flow);
}
+static struct ice_esw_br_vlan *
+ice_esw_br_port_vlan_lookup(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid)
+{
+ struct ice_pf *pf = bridge->br_offloads->pf;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_esw_br_port *port;
+ struct ice_esw_br_vlan *vlan;
+
+ port = xa_load(&bridge->ports, vsi_idx);
+ if (!port) {
+ dev_info(dev, "Bridge port lookup failed (vsi=%u)\n", vsi_idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ vlan = xa_load(&port->vlans, vid);
+ if (!vlan) {
+ dev_info(dev, "Bridge port vlan metadata lookup failed (vsi=%u)\n",
+ vsi_idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return vlan;
+}
+
static void
ice_eswitch_br_fdb_entry_delete(struct ice_esw_br *bridge,
struct ice_esw_br_fdb_entry *fdb_entry)
@@ -347,10 +400,25 @@ ice_eswitch_br_fdb_entry_create(struct net_device *netdev,
struct device *dev = ice_pf_to_dev(pf);
struct ice_esw_br_fdb_entry *fdb_entry;
struct ice_esw_br_flow *flow;
+ struct ice_esw_br_vlan *vlan;
struct ice_hw *hw = &pf->hw;
unsigned long event;
int err;
+ /* untagged filtering is not yet supported */
+ if (!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING) && vid)
+ return;
+
+ if ((bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING)) {
+ vlan = ice_esw_br_port_vlan_lookup(bridge, br_port->vsi_idx,
+ vid);
+ if (IS_ERR(vlan)) {
+ dev_err(dev, "Failed to find vlan lookup, err: %ld\n",
+ PTR_ERR(vlan));
+ return;
+ }
+ }
+
fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid);
if (fdb_entry)
ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry);
@@ -362,7 +430,7 @@ ice_eswitch_br_fdb_entry_create(struct net_device *netdev,
}
flow = ice_eswitch_br_flow_create(dev, hw, br_port->vsi_idx,
- br_port->type, mac);
+ br_port->type, mac, vid);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
goto err_add_flow;
@@ -521,6 +589,207 @@ ice_eswitch_br_switchdev_event(struct notifier_block *nb,
return NOTIFY_DONE;
}
+static void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge)
+{
+ struct ice_esw_br_fdb_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
+ ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry);
+}
+
+static void
+ice_eswitch_br_vlan_filtering_set(struct ice_esw_br *bridge, bool enable)
+{
+ if (enable == !!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING))
+ return;
+
+ ice_eswitch_br_fdb_flush(bridge);
+ if (enable)
+ bridge->flags |= ICE_ESWITCH_BR_VLAN_FILTERING;
+ else
+ bridge->flags &= ~ICE_ESWITCH_BR_VLAN_FILTERING;
+}
+
+static void
+ice_eswitch_br_vlan_cleanup(struct ice_esw_br_port *port,
+ struct ice_esw_br_vlan *vlan)
+{
+ xa_erase(&port->vlans, vlan->vid);
+ kfree(vlan);
+}
+
+static void ice_eswitch_br_port_vlans_flush(struct ice_esw_br_port *port)
+{
+ struct ice_esw_br_vlan *vlan;
+ unsigned long index;
+
+ xa_for_each(&port->vlans, index, vlan)
+ ice_eswitch_br_vlan_cleanup(port, vlan);
+}
+
+static struct ice_esw_br_vlan *
+ice_eswitch_br_vlan_create(u16 vid, u16 flags, struct ice_esw_br_port *port)
+{
+ struct ice_esw_br_vlan *vlan;
+ int err;
+
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return ERR_PTR(-ENOMEM);
+
+ vlan->vid = vid;
+ vlan->flags = flags;
+
+ err = xa_insert(&port->vlans, vlan->vid, vlan, GFP_KERNEL);
+ if (err) {
+ kfree(vlan);
+ return ERR_PTR(err);
+ }
+
+ return vlan;
+}
+
+static int
+ice_eswitch_br_port_vlan_add(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid,
+ u16 flags, struct netlink_ext_ack *extack)
+{
+ struct ice_esw_br_port *port;
+ struct ice_esw_br_vlan *vlan;
+
+ port = xa_load(&bridge->ports, vsi_idx);
+ if (!port)
+ return -EINVAL;
+
+ vlan = xa_load(&port->vlans, vid);
+ if (vlan) {
+ if (vlan->flags == flags)
+ return 0;
+
+ ice_eswitch_br_vlan_cleanup(port, vlan);
+ }
+
+ vlan = ice_eswitch_br_vlan_create(vid, flags, port);
+ if (IS_ERR(vlan)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to create VLAN entry, vid: %u, vsi: %u",
+ vid, vsi_idx);
+ return PTR_ERR(vlan);
+ }
+
+ return 0;
+}
+
+static void
+ice_eswitch_br_port_vlan_del(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid)
+{
+ struct ice_esw_br_port *port;
+ struct ice_esw_br_vlan *vlan;
+
+ port = xa_load(&bridge->ports, vsi_idx);
+ if (!port)
+ return;
+
+ vlan = xa_load(&port->vlans, vid);
+ if (!vlan)
+ return;
+
+ ice_eswitch_br_vlan_cleanup(port, vlan);
+}
+
+static int
+ice_eswitch_br_port_obj_add(struct net_device *netdev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
+ struct switchdev_obj_port_vlan *vlan;
+ int err;
+
+ if (!br_port)
+ return -EINVAL;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ err = ice_eswitch_br_port_vlan_add(br_port->bridge,
+ br_port->vsi_idx, vlan->vid,
+ vlan->flags, extack);
+ return err;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+ice_eswitch_br_port_obj_del(struct net_device *netdev, const void *ctx,
+ const struct switchdev_obj *obj)
+{
+ struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
+ struct switchdev_obj_port_vlan *vlan;
+
+ if (!br_port)
+ return -EINVAL;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ ice_eswitch_br_port_vlan_del(br_port->bridge, br_port->vsi_idx,
+ vlan->vid);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+ice_eswitch_br_port_obj_attr_set(struct net_device *netdev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
+
+ if (!br_port)
+ return -EINVAL;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ ice_eswitch_br_vlan_filtering_set(br_port->bridge,
+ attr->u.vlan_filtering);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+ice_eswitch_br_event_blocking(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ ice_eswitch_br_is_dev_valid,
+ ice_eswitch_br_port_obj_add);
+ break;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ ice_eswitch_br_is_dev_valid,
+ ice_eswitch_br_port_obj_del);
+ break;
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ ice_eswitch_br_is_dev_valid,
+ ice_eswitch_br_port_obj_attr_set);
+ break;
+ default:
+ err = 0;
+ }
+
+ return notifier_from_errno(err);
+}
+
static void
ice_eswitch_br_port_deinit(struct ice_esw_br *bridge,
struct ice_esw_br_port *br_port)
@@ -539,6 +808,7 @@ ice_eswitch_br_port_deinit(struct ice_esw_br *bridge,
vsi->vf->repr->br_port = NULL;
xa_erase(&bridge->ports, br_port->vsi_idx);
+ ice_eswitch_br_port_vlans_flush(br_port);
kfree(br_port);
}
@@ -551,6 +821,8 @@ ice_eswitch_br_port_init(struct ice_esw_br *bridge)
if (!br_port)
return ERR_PTR(-ENOMEM);
+ xa_init(&br_port->vlans);
+
br_port->bridge = bridge;
return br_port;
@@ -858,6 +1130,7 @@ ice_eswitch_br_offloads_deinit(struct ice_pf *pf)
return;
unregister_netdevice_notifier(&br_offloads->netdev_nb);
+ unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
unregister_switchdev_notifier(&br_offloads->switchdev_nb);
destroy_workqueue(br_offloads->wq);
/* Although notifier block is unregistered just before,
@@ -901,6 +1174,15 @@ ice_eswitch_br_offloads_init(struct ice_pf *pf)
goto err_reg_switchdev_nb;
}
+ br_offloads->switchdev_blk.notifier_call =
+ ice_eswitch_br_event_blocking;
+ err = register_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
+ if (err) {
+ dev_err(dev,
+ "Failed to register bridge blocking switchdev notifier\n");
+ goto err_reg_switchdev_blk;
+ }
+
br_offloads->netdev_nb.notifier_call = ice_eswitch_br_port_event;
err = register_netdevice_notifier(&br_offloads->netdev_nb);
if (err) {
@@ -912,6 +1194,8 @@ ice_eswitch_br_offloads_init(struct ice_pf *pf)
return 0;
err_reg_netdev_nb:
+ unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
+err_reg_switchdev_blk:
unregister_switchdev_notifier(&br_offloads->switchdev_nb);
err_reg_switchdev_nb:
destroy_workqueue(br_offloads->wq);
@@ -42,6 +42,11 @@ struct ice_esw_br_port {
struct ice_vsi *vsi;
enum ice_esw_br_port_type type;
u16 vsi_idx;
+ struct xarray vlans;
+};
+
+enum {
+ ICE_ESWITCH_BR_VLAN_FILTERING = BIT(0),
};
struct ice_esw_br {
@@ -52,12 +57,14 @@ struct ice_esw_br {
struct list_head fdb_list;
int ifindex;
+ u32 flags;
};
struct ice_esw_br_offloads {
struct ice_pf *pf;
struct ice_esw_br *bridge;
struct notifier_block netdev_nb;
+ struct notifier_block switchdev_blk;
struct notifier_block switchdev_nb;
struct workqueue_struct *wq;
@@ -71,6 +78,11 @@ struct ice_esw_br_fdb_work {
unsigned long event;
};
+struct ice_esw_br_vlan {
+ u16 vid;
+ u16 flags;
+};
+
#define ice_nb_to_br_offloads(nb, nb_name) \
container_of(nb, \
struct ice_esw_br_offloads, \
@@ -81,6 +93,15 @@ struct ice_esw_br_fdb_work {
struct ice_esw_br_fdb_work, \
work)
+static inline bool ice_eswitch_is_vid_valid(u16 vid)
+{
+ /* In trunk VLAN mode, for untagged traffic the bridge sends requests
+ * to offload VLAN 1 with pvid and untagged flags set. Since these
+ * flags are not supported, add a MAC filter instead.
+ */
+ return vid > 1;
+}
+
void
ice_eswitch_br_offloads_deinit(struct ice_pf *pf);
int