@@ -44,6 +44,7 @@ struct tc_action {
u8 hw_stats;
u8 used_hw_stats;
bool used_hw_stats_valid;
+ u32 in_hw_count;
};
#define tcf_index common.tcfa_index
#define tcf_refcnt common.tcfa_refcnt
@@ -236,6 +237,12 @@ static inline void tcf_action_inc_overlimit_qstats(struct tc_action *a)
spin_unlock(&a->tcfa_lock);
}
+static inline void flow_action_hw_count_set(struct tc_action *act,
+ u32 hw_count)
+{
+ act->in_hw_count = hw_count;
+}
+
void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
u64 drops, bool hw);
int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
@@ -261,6 +261,29 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts)
#define tcf_act_for_each_action(i, a, actions) \
for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = actions[i]); i++)
+static inline bool tc_act_skip_hw(u32 flags)
+{
+ return (flags & TCA_ACT_FLAGS_SKIP_HW) ? true : false;
+}
+
+static inline bool tc_act_skip_sw(u32 flags)
+{
+ return (flags & TCA_ACT_FLAGS_SKIP_SW) ? true : false;
+}
+
+static inline bool tc_act_in_hw(struct tc_action *act)
+{
+ return !!act->in_hw_count;
+}
+
+/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
+static inline bool tc_act_flags_valid(u32 flags)
+{
+ flags &= TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW;
+
+ return flags ^ (TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW);
+}
+
static inline void
tcf_exts_stats_update(const struct tcf_exts *exts,
u64 bytes, u64 packets, u64 drops, u64 lastuse,
@@ -19,13 +19,16 @@ enum {
TCA_ACT_FLAGS,
TCA_ACT_HW_STATS,
TCA_ACT_USED_HW_STATS,
+ TCA_ACT_IN_HW_COUNT,
__TCA_ACT_MAX
};
/* See other TCA_ACT_FLAGS_ * flags in include/net/act_api.h. */
-#define TCA_ACT_FLAGS_NO_PERCPU_STATS 1 /* Don't use percpu allocator for
- * actions stats.
- */
+#define TCA_ACT_FLAGS_NO_PERCPU_STATS (1 << 0) /* Don't use percpu allocator for
+ * actions stats.
+ */
+#define TCA_ACT_FLAGS_SKIP_HW (1 << 1) /* don't offload action to HW */
+#define TCA_ACT_FLAGS_SKIP_SW (1 << 2) /* don't use action in SW */
/* tca HW stats type
* When user does not pass the attribute, he does not care.
@@ -751,6 +751,9 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
jmp_prgcnt -= 1;
continue;
}
+
+ if (tc_act_skip_sw(a->tcfa_flags))
+ continue;
repeat:
ret = a->ops->act(skb, a, res);
if (ret == TC_ACT_REPEAT)
@@ -856,6 +859,9 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
a->tcfa_flags, a->tcfa_flags))
goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count))
+ goto nla_put_failure;
+
nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
@@ -935,7 +941,9 @@ static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
[TCA_ACT_COOKIE] = { .type = NLA_BINARY,
.len = TC_COOKIE_MAX_SIZE },
[TCA_ACT_OPTIONS] = { .type = NLA_NESTED },
- [TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS),
+ [TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS |
+ TCA_ACT_FLAGS_SKIP_HW |
+ TCA_ACT_FLAGS_SKIP_SW),
[TCA_ACT_HW_STATS] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY),
};
@@ -1048,8 +1056,13 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
}
}
hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]);
- if (tb[TCA_ACT_FLAGS])
+ if (tb[TCA_ACT_FLAGS]) {
userflags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
+ if (!tc_act_flags_valid(userflags.value)) {
+ err = -EINVAL;
+ goto err_out;
+ }
+ }
err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, tp,
userflags.value | flags, extack);
@@ -1161,6 +1174,7 @@ static int flow_action_init(struct flow_offload_action *fl_action,
}
static int tcf_action_offload_cmd(struct flow_offload_action *fl_act,
+ u32 *hw_count,
struct netlink_ext_ack *extack)
{
int err;
@@ -1173,6 +1187,9 @@ static int tcf_action_offload_cmd(struct flow_offload_action *fl_act,
if (err < 0)
return err;
+ if (hw_count)
+ *hw_count = err;
+
return 0;
}
@@ -1180,12 +1197,17 @@ static int tcf_action_offload_cmd(struct flow_offload_action *fl_act,
static int tcf_action_offload_add(struct tc_action *action,
struct netlink_ext_ack *extack)
{
+ bool skip_sw = tc_act_skip_sw(action->tcfa_flags);
struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
[0] = action,
};
struct flow_offload_action *fl_action;
+ u32 in_hw_count = 0;
int err = 0;
+ if (tc_act_skip_hw(action->tcfa_flags))
+ return 0;
+
fl_action = flow_action_alloc(tcf_act_num_actions_single(action));
if (!fl_action)
return -EINVAL;
@@ -1201,7 +1223,13 @@ static int tcf_action_offload_add(struct tc_action *action,
goto fl_err;
}
- err = tcf_action_offload_cmd(fl_action, extack);
+ err = tcf_action_offload_cmd(fl_action, &in_hw_count, extack);
+ if (!err)
+ flow_action_hw_count_set(action, in_hw_count);
+
+ if (skip_sw && !tc_act_in_hw(action))
+ err = -EINVAL;
+
tc_cleanup_flow_action(&fl_action->action);
fl_err:
@@ -1213,16 +1241,27 @@ static int tcf_action_offload_add(struct tc_action *action,
int tcf_action_offload_del(struct tc_action *action)
{
struct flow_offload_action fl_act;
+ u32 in_hw_count = 0;
int err = 0;
if (!action)
return -EINVAL;
+ if (!tc_act_in_hw(action))
+ return 0;
+
err = flow_action_init(&fl_act, action, FLOW_ACT_DESTROY, NULL);
if (err)
return err;
- return tcf_action_offload_cmd(&fl_act, NULL);
+ err = tcf_action_offload_cmd(&fl_act, &in_hw_count, NULL);
+ if (err)
+ return err;
+
+ if (action->in_hw_count != in_hw_count)
+ return -EINVAL;
+
+ return 0;
}
/* Returns numbers of initialized actions or negative error. */
@@ -1267,8 +1306,11 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
sz += tcf_action_fill_size(act);
/* Start from index 0 */
actions[i - 1] = act;
- if (!(flags & TCA_ACT_FLAGS_BIND))
- tcf_action_offload_add(act, extack);
+ if (!(flags & TCA_ACT_FLAGS_BIND)) {
+ err = tcf_action_offload_add(act, extack);
+ if (tc_act_skip_sw(act->tcfa_flags) && err)
+ goto err;
+ }
}
/* We have to commit them all together, because if any error happened in