@@ -16,9 +16,32 @@ struct sparx5_tc_flower_parse_usage {
struct flow_cls_offload *fco;
struct flow_rule *frule;
struct vcap_rule *vrule;
+ u16 l3_proto;
+ u8 l4_proto;
unsigned int used_keys;
};
+/* These protocols have dedicated keysets in IS2 and a TC dissector
+ * ETH_P_ARP does not have a TC dissector
+ */
+static u16 sparx5_tc_known_etypes[] = {
+ ETH_P_ALL,
+ ETH_P_IP,
+ ETH_P_IPV6,
+};
+
+static bool sparx5_tc_is_known_etype(u16 etype)
+{
+ int idx;
+
+ /* For now this only knows about IS2 traffic classification */
+ for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_known_etypes); ++idx)
+ if (sparx5_tc_known_etypes[idx] == etype)
+ return true;
+
+ return false;
+}
+
static int sparx5_tc_flower_handler_ethaddr_usage(struct sparx5_tc_flower_parse_usage *st)
{
enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
@@ -54,9 +77,368 @@ static int sparx5_tc_flower_handler_ethaddr_usage(struct sparx5_tc_flower_parse_
return err;
}
+static int
+sparx5_tc_flower_handler_ipv4_usage(struct sparx5_tc_flower_parse_usage *st)
+{
+ int err = 0;
+
+ if (st->l3_proto == ETH_P_IP) {
+ struct flow_match_ipv4_addrs mt;
+
+ flow_rule_match_ipv4_addrs(st->frule, &mt);
+ if (mt.mask->src) {
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_IP4_SIP,
+ be32_to_cpu(mt.key->src),
+ be32_to_cpu(mt.mask->src));
+ if (err)
+ goto out;
+ }
+ if (mt.mask->dst) {
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_IP4_DIP,
+ be32_to_cpu(mt.key->dst),
+ be32_to_cpu(mt.mask->dst));
+ if (err)
+ goto out;
+ }
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error");
+ return err;
+}
+
+static int
+sparx5_tc_flower_handler_ipv6_usage(struct sparx5_tc_flower_parse_usage *st)
+{
+ int err = 0;
+
+ if (st->l3_proto == ETH_P_IPV6) {
+ struct flow_match_ipv6_addrs mt;
+ struct vcap_u128_key sip;
+ struct vcap_u128_key dip;
+
+ flow_rule_match_ipv6_addrs(st->frule, &mt);
+ /* Check if address masks are non-zero */
+ if (!ipv6_addr_any(&mt.mask->src)) {
+ vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16);
+ vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16);
+ err = vcap_rule_add_key_u128(st->vrule,
+ VCAP_KF_L3_IP6_SIP, &sip);
+ if (err)
+ goto out;
+ }
+ if (!ipv6_addr_any(&mt.mask->dst)) {
+ vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16);
+ vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16);
+ err = vcap_rule_add_key_u128(st->vrule,
+ VCAP_KF_L3_IP6_DIP, &dip);
+ if (err)
+ goto out;
+ }
+ }
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
+ return err;
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error");
+ return err;
+}
+
+static int
+sparx5_tc_flower_handler_control_usage(struct sparx5_tc_flower_parse_usage *st)
+{
+ struct flow_match_control mt;
+ u32 value, mask;
+ int err = 0;
+
+ flow_rule_match_control(st->frule, &mt);
+
+ if (mt.mask->flags) {
+ if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
+ if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
+ value = 1; /* initial fragment */
+ mask = 0x3;
+ } else {
+ if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ value = 3; /* follow up fragment */
+ mask = 0x3;
+ } else {
+ value = 0; /* no fragment */
+ mask = 0x3;
+ }
+ }
+ } else {
+ if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ value = 3; /* follow up fragment */
+ mask = 0x3;
+ } else {
+ value = 0; /* no fragment */
+ mask = 0x3;
+ }
+ }
+
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_FRAGMENT_TYPE,
+ value, mask);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
+ return err;
+}
+
+static int
+sparx5_tc_flower_handler_portnum_usage(struct sparx5_tc_flower_parse_usage *st)
+{
+ struct flow_match_ports mt;
+ u16 value, mask;
+ int err = 0;
+
+ flow_rule_match_ports(st->frule, &mt);
+
+ if (mt.mask->src) {
+ value = be16_to_cpu(mt.key->src);
+ mask = be16_to_cpu(mt.mask->src);
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value,
+ mask);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->dst) {
+ value = be16_to_cpu(mt.key->dst);
+ mask = be16_to_cpu(mt.mask->dst);
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value,
+ mask);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error");
+ return err;
+}
+
+static int
+sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st)
+{
+ struct flow_match_basic mt;
+ int err = 0;
+
+ flow_rule_match_basic(st->frule, &mt);
+
+ if (mt.mask->n_proto) {
+ st->l3_proto = be16_to_cpu(mt.key->n_proto);
+ if (!sparx5_tc_is_known_etype(st->l3_proto)) {
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
+ st->l3_proto, ~0);
+ if (err)
+ goto out;
+ } else if (st->l3_proto == ETH_P_IP) {
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ } else if (st->l3_proto == ETH_P_IPV6) {
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
+ VCAP_BIT_0);
+ if (err)
+ goto out;
+ }
+ }
+
+ if (mt.mask->ip_proto) {
+ st->l4_proto = mt.key->ip_proto;
+ if (st->l4_proto == IPPROTO_TCP) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ } else if (st->l4_proto == IPPROTO_UDP) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_IS,
+ VCAP_BIT_0);
+ if (err)
+ goto out;
+ } else {
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_IP_PROTO,
+ st->l4_proto, ~0);
+ if (err)
+ goto out;
+ }
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error");
+ return err;
+}
+
+static int
+sparx5_tc_flower_handler_vlan_usage(struct sparx5_tc_flower_parse_usage *st)
+{
+ enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
+ enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
+ struct flow_match_vlan mt;
+ int err;
+
+ flow_rule_match_vlan(st->frule, &mt);
+
+ if (mt.mask->vlan_id) {
+ err = vcap_rule_add_key_u32(st->vrule, vid_key,
+ mt.key->vlan_id,
+ mt.mask->vlan_id);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->vlan_priority) {
+ err = vcap_rule_add_key_u32(st->vrule, pcp_key,
+ mt.key->vlan_priority,
+ mt.mask->vlan_priority);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
+
+ return err;
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error");
+ return err;
+}
+
+static int
+sparx5_tc_flower_handler_tcp_usage(struct sparx5_tc_flower_parse_usage *st)
+{
+ struct flow_match_tcp mt;
+ u16 tcp_flags_mask;
+ u16 tcp_flags_key;
+ enum vcap_bit val;
+ int err = 0;
+
+ flow_rule_match_tcp(st->frule, &mt);
+ tcp_flags_key = be16_to_cpu(mt.key->flags);
+ tcp_flags_mask = be16_to_cpu(mt.mask->flags);
+
+ if (tcp_flags_mask & TCPHDR_FIN) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_FIN)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_SYN) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_SYN)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_RST) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_RST)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_PSH) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_PSH)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_ACK) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_ACK)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_URG) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_URG)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error");
+ return err;
+}
+
+static int
+sparx5_tc_flower_handler_ip_usage(struct sparx5_tc_flower_parse_usage *st)
+{
+ struct flow_match_ip mt;
+ int err = 0;
+
+ flow_rule_match_ip(st->frule, &mt);
+
+ if (mt.mask->tos) {
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS,
+ mt.key->tos,
+ mt.mask->tos);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IP);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error");
+ return err;
+}
+
static int (*sparx5_tc_flower_usage_handlers[])(struct sparx5_tc_flower_parse_usage *st) = {
- /* More dissector handlers will be added here later */
[FLOW_DISSECTOR_KEY_ETH_ADDRS] = sparx5_tc_flower_handler_ethaddr_usage,
+ [FLOW_DISSECTOR_KEY_IPV4_ADDRS] = sparx5_tc_flower_handler_ipv4_usage,
+ [FLOW_DISSECTOR_KEY_IPV6_ADDRS] = sparx5_tc_flower_handler_ipv6_usage,
+ [FLOW_DISSECTOR_KEY_CONTROL] = sparx5_tc_flower_handler_control_usage,
+ [FLOW_DISSECTOR_KEY_PORTS] = sparx5_tc_flower_handler_portnum_usage,
+ [FLOW_DISSECTOR_KEY_BASIC] = sparx5_tc_flower_handler_basic_usage,
+ [FLOW_DISSECTOR_KEY_VLAN] = sparx5_tc_flower_handler_vlan_usage,
+ [FLOW_DISSECTOR_KEY_TCP] = sparx5_tc_flower_handler_tcp_usage,
+ [FLOW_DISSECTOR_KEY_IP] = sparx5_tc_flower_handler_ip_usage,
};
static int sparx5_tc_use_dissectors(struct flow_cls_offload *fco,
@@ -1073,6 +1073,17 @@ int vcap_rule_add_key_u72(struct vcap_rule *rule, enum vcap_key_field key,
}
EXPORT_SYMBOL_GPL(vcap_rule_add_key_u72);
+/* Add a 128 bit key with value and mask to the rule */
+int vcap_rule_add_key_u128(struct vcap_rule *rule, enum vcap_key_field key,
+ struct vcap_u128_key *fieldval)
+{
+ struct vcap_client_keyfield_data data;
+
+ memcpy(&data.u128, fieldval, sizeof(data.u128));
+ return vcap_rule_add_key(rule, key, VCAP_FIELD_U128, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_add_key_u128);
+
static void vcap_copy_from_client_actionfield(struct vcap_rule *rule,
struct vcap_client_actionfield *field,
struct vcap_client_actionfield_data *data)
@@ -176,6 +176,8 @@ int vcap_rule_add_key_u48(struct vcap_rule *rule, enum vcap_key_field key,
struct vcap_u48_key *fieldval);
int vcap_rule_add_key_u72(struct vcap_rule *rule, enum vcap_key_field key,
struct vcap_u72_key *fieldval);
+int vcap_rule_add_key_u128(struct vcap_rule *rule, enum vcap_key_field key,
+ struct vcap_u128_key *fieldval);
int vcap_rule_add_action_bit(struct vcap_rule *rule,
enum vcap_action_field action, enum vcap_bit val);
int vcap_rule_add_action_u32(struct vcap_rule *rule,