@@ -3,6 +3,32 @@
#include "conntrack.h"
+/**
+ * get_hashentry() - Wrapper around hashtable lookup.
+ * @ht: hashtable where entry could be found
+ * @params: hashtable params
+ * @size: size of entry to allocate if not in table
+ *
+ * Returns an entry from a hashtable. If entry does not exist
+ * yet allocate the memory for it and return the new entry.
+ */
+static void *get_hashentry(struct rhashtable *ht, void *key,
+ const struct rhashtable_params params, size_t size)
+{
+ void *result;
+
+ result = rhashtable_lookup_fast(ht, key, params);
+
+ if (result)
+ return result;
+
+ result = kzalloc(size, GFP_KERNEL);
+ if (!result)
+ return ERR_PTR(-ENOMEM);
+
+ return result;
+}
+
bool is_pre_ct_flow(struct flow_cls_offload *flow)
{
struct flow_action_entry *act;
@@ -29,11 +55,88 @@ bool is_post_ct_flow(struct flow_cls_offload *flow)
return false;
}
+static struct
+nfp_fl_ct_zone_entry *get_nfp_zone_entry(struct nfp_flower_priv *priv,
+ u16 zone, bool wildcarded)
+{
+ struct nfp_fl_ct_zone_entry *zt;
+ int err;
+
+ if (wildcarded && priv->ct_zone_wc)
+ return priv->ct_zone_wc;
+
+ if (!wildcarded) {
+ zt = get_hashentry(&priv->ct_zone_table, &zone,
+ nfp_zone_table_params, sizeof(*zt));
+
+ /* If priv is set this is an existing entry, just return it */
+ if (IS_ERR(zt) || zt->priv)
+ return zt;
+ } else {
+ zt = kzalloc(sizeof(*zt), GFP_KERNEL);
+ if (!zt)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ zt->zone = zone;
+ zt->priv = priv;
+ zt->nft = NULL;
+
+ if (wildcarded) {
+ priv->ct_zone_wc = zt;
+ } else {
+ err = rhashtable_insert_fast(&priv->ct_zone_table,
+ &zt->hash_node,
+ nfp_zone_table_params);
+ if (err)
+ goto err_zone_insert;
+ }
+
+ return zt;
+
+err_zone_insert:
+ kfree(zt);
+ return ERR_PTR(err);
+}
+
+static struct flow_action_entry *get_flow_act(struct flow_cls_offload *flow,
+ enum flow_action_id act_id)
+{
+ struct flow_action_entry *act = NULL;
+ int i;
+
+ flow_action_for_each(i, act, &flow->rule->action) {
+ if (act->id == act_id)
+ return act;
+ }
+ return NULL;
+}
+
int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
struct net_device *netdev,
struct flow_cls_offload *flow,
struct netlink_ext_ack *extack)
{
+ struct flow_action_entry *ct_act;
+ struct nfp_fl_ct_zone_entry *zt;
+
+ ct_act = get_flow_act(flow, FLOW_ACTION_CT);
+ if (!ct_act) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: Conntrack action empty in conntrack offload");
+ return -EOPNOTSUPP;
+ }
+
+ zt = get_nfp_zone_entry(priv, ct_act->ct.zone, false);
+ if (IS_ERR(zt)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload error: Could not create zone table entry");
+ return PTR_ERR(zt);
+ }
+
+ if (!zt->nft)
+ zt->nft = ct_act->ct.flow_table;
+
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: Conntrack action not supported");
return -EOPNOTSUPP;
}
@@ -43,6 +146,27 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
struct flow_cls_offload *flow,
struct netlink_ext_ack *extack)
{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+ struct nfp_fl_ct_zone_entry *zt;
+ bool wildcarded = false;
+ struct flow_match_ct ct;
+
+ flow_rule_match_ct(rule, &ct);
+ if (!ct.mask->ct_zone) {
+ wildcarded = true;
+ } else if (ct.mask->ct_zone != U16_MAX) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: partially wildcarded ct_zone is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ zt = get_nfp_zone_entry(priv, ct.key->ct_zone, wildcarded);
+ if (IS_ERR(zt)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload error: Could not create zone table entry");
+ return PTR_ERR(zt);
+ }
+
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: Conntrack match not supported");
return -EOPNOTSUPP;
}
@@ -194,6 +194,7 @@ struct nfp_fl_internal_ports {
* @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded
* @merge_table: Hash table to store merged flows
* @ct_zone_table: Hash table used to store the different zones
+ * @ct_zone_wc: Special zone entry for wildcarded zone matches
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -229,6 +230,7 @@ struct nfp_flower_priv {
int pre_tun_rule_cnt;
struct rhashtable merge_table;
struct rhashtable ct_zone_table;
+ struct nfp_fl_ct_zone_entry *ct_zone_wc;
};
/**
@@ -585,6 +585,9 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
static void nfp_free_zone_table_entry(void *ptr, void *arg)
{
+ struct nfp_fl_ct_zone_entry *zt = ptr;
+
+ kfree(zt);
}
void nfp_flower_metadata_cleanup(struct nfp_app *app)
@@ -602,6 +605,7 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
nfp_check_rhashtable_empty, NULL);
rhashtable_free_and_destroy(&priv->ct_zone_table,
nfp_free_zone_table_entry, NULL);
+ kfree(priv->ct_zone_wc);
kvfree(priv->stats);
kfree(priv->mask_ids.mask_id_free_list.buf);
kfree(priv->mask_ids.last_used);