@@ -68,6 +68,8 @@ struct tc_action {
#define TCA_ACT_FLAGS_REPLACE (1U << (TCA_ACT_FLAGS_USER_BITS + 2))
#define TCA_ACT_FLAGS_NO_RTNL (1U << (TCA_ACT_FLAGS_USER_BITS + 3))
#define TCA_ACT_FLAGS_AT_INGRESS (1U << (TCA_ACT_FLAGS_USER_BITS + 4))
+#define TCA_ACT_FLAGS_PREALLOC (1U << (TCA_ACT_FLAGS_USER_BITS + 5))
+#define TCA_ACT_FLAGS_UNREFERENCED (1U << (TCA_ACT_FLAGS_USER_BITS + 6))
/* Update lastuse only if needed, to avoid dirtying a cache line.
* We use a temp variable to avoid fetching jiffies twice.
@@ -201,6 +203,7 @@ int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
const struct tc_action_ops *ops, int bind,
u32 flags);
void tcf_idr_insert_many(struct tc_action *actions[], int init_res[]);
+void tcf_idr_insert_n(struct tc_action *actions[], const u32 n);
void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
struct tc_action **a, int bind);
@@ -560,6 +560,8 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
continue;
if (IS_ERR(p))
continue;
+ if (p->tcfa_flags & TCA_ACT_FLAGS_UNREFERENCED)
+ continue;
if (jiffy_since &&
time_after(jiffy_since,
@@ -640,6 +642,9 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
idr_for_each_entry_ul(idr, p, tmp, id) {
if (IS_ERR(p))
continue;
+ if (p->tcfa_flags & TCA_ACT_FLAGS_PREALLOC)
+ continue;
+
ret = tcf_idr_release_unsafe(p);
if (ret == ACT_P_DELETED)
module_put(ops->owner);
@@ -1398,25 +1403,40 @@ static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
[TCA_ACT_HW_STATS] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY),
};
+static void tcf_idr_insert_1(struct tc_action *a)
+{
+ struct tcf_idrinfo *idrinfo;
+
+ idrinfo = a->idrinfo;
+ mutex_lock(&idrinfo->lock);
+ /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if
+ * it is just created, otherwise this is just a nop.
+ */
+ idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
+ mutex_unlock(&idrinfo->lock);
+}
+
void tcf_idr_insert_many(struct tc_action *actions[], int init_res[])
{
struct tc_action *a;
int i;
tcf_act_for_each_action(i, a, actions) {
- struct tcf_idrinfo *idrinfo;
-
if (init_res[i] == ACT_P_BOUND)
continue;
- idrinfo = a->idrinfo;
- mutex_lock(&idrinfo->lock);
- /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
- idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
- mutex_unlock(&idrinfo->lock);
+ tcf_idr_insert_1(a);
}
}
+void tcf_idr_insert_n(struct tc_action *actions[], const u32 n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ tcf_idr_insert_1(actions[i]);
+}
+
struct tc_action_ops *
tc_action_load_ops(struct net *net, struct nlattr *nla,
u32 flags, struct netlink_ext_ack *extack)
@@ -2093,8 +2113,17 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
ret = PTR_ERR(act);
goto err;
}
- attr_size += tcf_action_fill_size(act);
actions[i - 1] = act;
+
+ if (event == RTM_DELACTION &&
+ act->tcfa_flags & TCA_ACT_FLAGS_PREALLOC) {
+ ret = -EINVAL;
+ NL_SET_ERR_MSG_FMT(extack,
+ "Unable to delete preallocated action %s",
+ act->ops->kind);
+ goto err;
+ }
+ attr_size += tcf_action_fill_size(act);
}
attr_size = tcf_action_full_attrs_size(attr_size);