@@ -74,15 +74,6 @@ static inline bool tcf_block_non_null_shared(struct tcf_block *block)
return block && block->index;
}
-#ifdef CONFIG_NET_CLS_ACT
-DECLARE_STATIC_KEY_FALSE(tcf_bypass_check_needed_key);
-
-static inline bool tcf_block_bypass_sw(struct tcf_block *block)
-{
- return block && block->bypass_wanted;
-}
-#endif
-
static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
{
WARN_ON(tcf_block_shared(block));
@@ -760,6 +751,15 @@ tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
cls_common->extack = extack;
}
+static inline void tcf_proto_update_usesw(struct tcf_proto *tp, u32 flags)
+{
+ if (tp->usesw)
+ return;
+ if (tc_skip_sw(flags) && tc_in_hw(flags))
+ return;
+ tp->usesw = true;
+}
+
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb)
{
@@ -425,6 +425,7 @@ struct tcf_proto {
spinlock_t lock;
bool deleting;
bool counted;
+ bool usesw;
refcount_t refcnt;
struct rcu_head rcu;
struct hlist_node destroy_ht_node;
@@ -474,9 +475,7 @@ struct tcf_block {
struct flow_block flow_block;
struct list_head owner_list;
bool keep_dst;
- bool bypass_wanted;
- atomic_t filtercnt; /* Number of filters */
- atomic_t skipswcnt; /* Number of skip_sw filters */
+ atomic_t useswcnt;
atomic_t offloadcnt; /* Number of oddloaded filters */
unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */
@@ -2103,11 +2103,6 @@ void net_dec_egress_queue(void)
EXPORT_SYMBOL_GPL(net_dec_egress_queue);
#endif
-#ifdef CONFIG_NET_CLS_ACT
-DEFINE_STATIC_KEY_FALSE(tcf_bypass_check_needed_key);
-EXPORT_SYMBOL(tcf_bypass_check_needed_key);
-#endif
-
DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
EXPORT_SYMBOL(netstamp_needed_key);
#ifdef CONFIG_JUMP_LABEL
@@ -3998,10 +3993,8 @@ static int tc_run(struct tcx_entry *entry, struct sk_buff *skb,
if (!miniq)
return ret;
- if (static_branch_unlikely(&tcf_bypass_check_needed_key)) {
- if (tcf_block_bypass_sw(miniq->block))
- return ret;
- }
+ if (miniq->block && !atomic_read(&miniq->block->useswcnt))
+ return ret;
tc_skb_cb(skb)->mru = 0;
tc_skb_cb(skb)->post_ct = false;
@@ -390,6 +390,7 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
tp->protocol = protocol;
tp->prio = prio;
tp->chain = chain;
+ tp->usesw = !tp->ops->reoffload;
spin_lock_init(&tp->lock);
refcount_set(&tp->refcnt, 1);
@@ -410,48 +411,16 @@ static void tcf_proto_get(struct tcf_proto *tp)
refcount_inc(&tp->refcnt);
}
-static void tcf_maintain_bypass(struct tcf_block *block)
-{
- int filtercnt = atomic_read(&block->filtercnt);
- int skipswcnt = atomic_read(&block->skipswcnt);
- bool bypass_wanted = filtercnt > 0 && filtercnt == skipswcnt;
-
- if (bypass_wanted != block->bypass_wanted) {
-#ifdef CONFIG_NET_CLS_ACT
- if (bypass_wanted)
- static_branch_inc(&tcf_bypass_check_needed_key);
- else
- static_branch_dec(&tcf_bypass_check_needed_key);
-#endif
- block->bypass_wanted = bypass_wanted;
- }
-}
-
-static void tcf_block_filter_cnt_update(struct tcf_block *block, bool *counted, bool add)
-{
- lockdep_assert_not_held(&block->cb_lock);
-
- down_write(&block->cb_lock);
- if (*counted != add) {
- if (add) {
- atomic_inc(&block->filtercnt);
- *counted = true;
- } else {
- atomic_dec(&block->filtercnt);
- *counted = false;
- }
- }
- tcf_maintain_bypass(block);
- up_write(&block->cb_lock);
-}
-
static void tcf_chain_put(struct tcf_chain *chain);
static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
bool sig_destroy, struct netlink_ext_ack *extack)
{
tp->ops->destroy(tp, rtnl_held, extack);
- tcf_block_filter_cnt_update(tp->chain->block, &tp->counted, false);
+ if (tp->usesw && tp->counted) {
+ tp->counted = false;
+ atomic_dec(&tp->chain->block->useswcnt);
+ }
if (sig_destroy)
tcf_proto_signal_destroyed(tp->chain, tp);
tcf_chain_put(tp->chain);
@@ -2409,7 +2378,12 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
tfilter_notify(net, skb, n, tp, block, q, parent, fh,
RTM_NEWTFILTER, false, rtnl_held, extack);
tfilter_put(tp, fh);
- tcf_block_filter_cnt_update(block, &tp->counted, true);
+ spin_lock(&tp->lock);
+ if (tp->usesw && !tp->counted) {
+ tp->counted = true;
+ atomic_inc(&block->useswcnt);
+ }
+ spin_unlock(&tp->lock);
/* q pointer is NULL for shared blocks */
if (q)
q->flags &= ~TCQ_F_CAN_BYPASS;
@@ -3532,8 +3506,6 @@ static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
if (*flags & TCA_CLS_FLAGS_IN_HW)
return;
*flags |= TCA_CLS_FLAGS_IN_HW;
- if (tc_skip_sw(*flags))
- atomic_inc(&block->skipswcnt);
atomic_inc(&block->offloadcnt);
}
@@ -3542,8 +3514,6 @@ static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
if (!(*flags & TCA_CLS_FLAGS_IN_HW))
return;
*flags &= ~TCA_CLS_FLAGS_IN_HW;
- if (tc_skip_sw(*flags))
- atomic_dec(&block->skipswcnt);
atomic_dec(&block->offloadcnt);
}
@@ -509,6 +509,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
if (!tc_in_hw(prog->gen_flags))
prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ tcf_proto_update_usesw(tp, prog->gen_flags);
+
if (oldprog) {
idr_replace(&head->handle_idr, prog, handle);
list_replace_rcu(&oldprog->link, &prog->link);
@@ -2503,6 +2503,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
if (!tc_in_hw(fnew->flags))
fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ tcf_proto_update_usesw(tp, fnew->flags);
+
spin_lock(&tp->lock);
/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
@@ -228,6 +228,8 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
if (!tc_in_hw(new->flags))
new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ tcf_proto_update_usesw(tp, new->flags);
+
*arg = head;
rcu_assign_pointer(tp->root, new);
return 0;
@@ -1164,6 +1164,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
if (!tc_in_hw(n->flags))
n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ tcf_proto_update_usesw(tp, n->flags);
+
ins = &ht->ht[TC_U32_HASH(handle)];
for (pins = rtnl_dereference(*ins); pins;
ins = &pins->next, pins = rtnl_dereference(*ins))