diff mbox series

[RFC,net-next,1/3] net/sched: add retpoline wrapper for tc

Message ID 20221125175207.473866-2-pctammela@mojatatu.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series net/sched: retpoline wrappers for tc | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for net-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 1 maintainers not CCed: bpf@vger.kernel.org
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch fail ERROR: trailing statements should be on next line WARNING: From:/Signed-off-by: email address mismatch: 'From: Pedro Tammela <pctammela@gmail.com>' != 'Signed-off-by: Pedro Tammela <pctammela@mojatatu.com>' WARNING: added, moved or deleted file(s), does MAINTAINERS need updating? WARNING: braces {} are not necessary for single statement blocks
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Pedro Tammela Nov. 25, 2022, 5:52 p.m. UTC
On kernels compiled with CONFIG_RETPOLINE and CONFIG_NET_TC_INDIRECT_WRAPPER,
optimize actions and filters that are compiled as built-ins into a direct call.
The calls are ordered alphabetically, but new ones should be ideally
added last.

On subsequent patches we expose the classifiers and actions functions
and wire up the wrapper into tc.

Signed-off-by: Pedro Tammela <pctammela@mojatatu.com>
Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
---
 include/net/tc_wrapper.h | 274 +++++++++++++++++++++++++++++++++++++++
 net/sched/Kconfig        |  13 ++
 2 files changed, 287 insertions(+)
 create mode 100644 include/net/tc_wrapper.h

Comments

Kuniyuki Iwashima Nov. 27, 2022, 1:47 a.m. UTC | #1
From:   Pedro Tammela <pctammela@gmail.com>
Date:   Fri, 25 Nov 2022 14:52:05 -0300
> On kernels compiled with CONFIG_RETPOLINE and CONFIG_NET_TC_INDIRECT_WRAPPER,
> optimize actions and filters that are compiled as built-ins into a direct call.
> The calls are ordered alphabetically, but new ones should be ideally
> added last.
> 
> On subsequent patches we expose the classifiers and actions functions
> and wire up the wrapper into tc.
> 
> Signed-off-by: Pedro Tammela <pctammela@mojatatu.com>
> Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
> ---
>  include/net/tc_wrapper.h | 274 +++++++++++++++++++++++++++++++++++++++
>  net/sched/Kconfig        |  13 ++
>  2 files changed, 287 insertions(+)
>  create mode 100644 include/net/tc_wrapper.h
> 
> diff --git a/include/net/tc_wrapper.h b/include/net/tc_wrapper.h
> new file mode 100644
> index 000000000000..7890d2810148
> --- /dev/null
> +++ b/include/net/tc_wrapper.h
> @@ -0,0 +1,274 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef __NET_TC_WRAPPER_H
> +#define __NET_TC_WRAPPER_H
> +
> +#include <linux/indirect_call_wrapper.h>
> +#include <net/pkt_cls.h>
> +
> +#if IS_ENABLED(CONFIG_RETPOLINE) && IS_ENABLED(CONFIG_NET_TC_INDIRECT_WRAPPER)
> +
> +#define TC_INDIRECT_SCOPE
> +
> +/* TC Actions */
> +INDIRECT_CALLABLE_DECLARE(int tcf_bpf_act(struct sk_buff *skb,
> +					  const struct tc_action *a,
> +					  struct tcf_result *res));

I prefer writing this like below than repeating INDIRECT_CALLABLE_DECLARE()
as all action have the same args.

  #define TC_INDIRECT_ACTION_DECLARE(func) \
  	INDIRECT_CALLABLE_DECLARE(int func(...))

  TC_INDIRECT_ACTION_DECLARE(tcf_bpf_act);
  TC_INDIRECT_ACTION_DECLARE(tcf_csum_act);
  ...


> +INDIRECT_CALLABLE_DECLARE(int tcf_connmark_act(struct sk_buff *skb,
> +					       const struct tc_action *a,
> +					       struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_csum_act(struct sk_buff *skb,
> +					   const struct tc_action *a,
> +					   struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_ct_act(struct sk_buff *skb,
> +					 const struct tc_action *a,
> +					 struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_ctinfo_act(struct sk_buff *skb,
> +					     const struct tc_action *a,
> +					     struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_gact_act(struct sk_buff *skb,
> +					   const struct tc_action *a,
> +					   struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_gate_act(struct sk_buff *skb,
> +					   const struct tc_action *a,
> +					   struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_ife_act(struct sk_buff *skb,
> +					  const struct tc_action *a,
> +					  struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_ipt_act(struct sk_buff *skb,
> +					  const struct tc_action *a,
> +					  struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_mirred_act(struct sk_buff *skb,
> +					     const struct tc_action *a,
> +					     struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_mpls_act(struct sk_buff *skb,
> +					   const struct tc_action *a,
> +					   struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_nat_act(struct sk_buff *skb,
> +					  const struct tc_action *a,
> +					  struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_pedit_act(struct sk_buff *skb,
> +					    const struct tc_action *a,
> +					    struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_police_act(struct sk_buff *skb,
> +					     const struct tc_action *a,
> +					     struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_sample_act(struct sk_buff *skb,
> +					     const struct tc_action *a,
> +					     struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_simp_act(struct sk_buff *skb,
> +					   const struct tc_action *a,
> +					   struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_skbedit_act(struct sk_buff *skb,
> +					      const struct tc_action *a,
> +					      struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_skbmod_act(struct sk_buff *skb,
> +					     const struct tc_action *a,
> +					     struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcf_vlan_act(struct sk_buff *skb,
> +					   const struct tc_action *a,
> +					   struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tunnel_key_act(struct sk_buff *skb,
> +					     const struct tc_action *a,
> +					     struct tcf_result *res));
> +
> +/* TC Filters */
> +INDIRECT_CALLABLE_DECLARE(int basic_classify(struct sk_buff *skb,
> +					     const struct tcf_proto *tp,
> +					     struct tcf_result *res));

Same here.


> +INDIRECT_CALLABLE_DECLARE(int cls_bpf_classify(struct sk_buff *skb,
> +					       const struct tcf_proto *tp,
> +					       struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int cls_cgroup_classify(struct sk_buff *skb,
> +						  const struct tcf_proto *tp,
> +						  struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int fl_classify(struct sk_buff *skb,
> +					  const struct tcf_proto *tp,
> +					  struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int flow_classify(struct sk_buff *skb,
> +					    const struct tcf_proto *tp,
> +					    struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int fw_classify(struct sk_buff *skb,
> +					  const struct tcf_proto *tp,
> +					  struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int mall_classify(struct sk_buff *skb,
> +					    const struct tcf_proto *tp,
> +					    struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int route4_classify(struct sk_buff *skb,
> +					      const struct tcf_proto *tp,
> +					      struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int rsvp_classify(struct sk_buff *skb,
> +					    const struct tcf_proto *tp,
> +					    struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int rsvp6_classify(struct sk_buff *skb,
> +					     const struct tcf_proto *tp,
> +					     struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int tcindex_classify(struct sk_buff *skb,
> +					       const struct tcf_proto *tp,
> +					       struct tcf_result *res));
> +INDIRECT_CALLABLE_DECLARE(int u32_classify(struct sk_buff *skb,
> +					   const struct tcf_proto *tp,
> +					   struct tcf_result *res));
> +
> +static inline int __tc_act(struct sk_buff *skb, const struct tc_action *a,
> +			   struct tcf_result *res)
> +{
> +	if (0) { /* noop */ }
> +#if IS_BUILTIN(CONFIG_NET_ACT_BPF)
> +	else if (a->ops->act == tcf_bpf_act)
> +		return tcf_bpf_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_CONNMARK)
> +	else if (a->ops->act == tcf_connmark_act)
> +		return tcf_connmark_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_CSUM)
> +	else if (a->ops->act == tcf_csum_act)
> +		return tcf_csum_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_CT)
> +	else if (a->ops->act == tcf_ct_act)
> +		return tcf_ct_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_CTINFO)
> +	else if (a->ops->act == tcf_ctinfo_act)
> +		return tcf_ctinfo_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_GACT)
> +	else if (a->ops->act == tcf_gact_act)
> +		return tcf_gact_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_GATE)
> +	else if (a->ops->act == tcf_gate_act)
> +		return tcf_gate_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_IFE)
> +	else if (a->ops->act == tcf_ife_act)
> +		return tcf_ife_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_IPT)
> +	else if (a->ops->act == tcf_ipt_act)
> +		return tcf_ipt_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_MIRRED)
> +	else if (a->ops->act == tcf_mirred_act)
> +		return tcf_mirred_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_MPLS)
> +	else if (a->ops->act == tcf_mpls_act)
> +		return tcf_mpls_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_NAT)
> +	else if (a->ops->act == tcf_nat_act)
> +		return tcf_nat_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_PEDIT)
> +	else if (a->ops->act == tcf_pedit_act)
> +		return tcf_pedit_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_POLICE)
> +	else if (a->ops->act == tcf_police_act)
> +		return tcf_police_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_SAMPLE)
> +	else if (a->ops->act == tcf_sample_act)
> +		return tcf_sample_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_SIMP)
> +	else if (a->ops->act == tcf_simp_act)
> +		return tcf_simp_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_SKBEDIT)
> +	else if (a->ops->act == tcf_skbedit_act)
> +		return tcf_skbedit_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_SKBMOD)
> +	else if (a->ops->act == tcf_skbmod_act)
> +		return tcf_skbmod_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_TUNNEL_KEY)
> +	else if (a->ops->act == tunnel_key_act)
> +		return tunnel_key_act(skb, a, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_ACT_VLAN)
> +	else if (a->ops->act == tcf_vlan_act)
> +		return tcf_vlan_act(skb, a, res);
> +#endif
> +	else
> +		return a->ops->act(skb, a, res);
> +}
> +
> +static inline int __tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
> +				struct tcf_result *res)
> +{
> +	if (0) { /* noop */ }
> +#if IS_BUILTIN(CONFIG_NET_CLS_BASIC)
> +	else if (tp->classify == basic_classify)
> +		return basic_classify(skb, tp, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_CLS_BPF)
> +	else if (tp->classify == cls_bpf_classify)
> +		return cls_bpf_classify(skb, tp, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_CLS_CGROUP)
> +	else if (tp->classify == cls_cgroup_classify)
> +		return cls_cgroup_classify(skb, tp, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_CLS_FLOW)
> +	else if (tp->classify == flow_classify)
> +		return flow_classify(skb, tp, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_CLS_FLOWER)
> +	else if (tp->classify == fl_classify)
> +		return fl_classify(skb, tp, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_CLS_FW)
> +	else if (tp->classify == fw_classify)
> +		return fw_classify(skb, tp, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_CLS_MATCHALL)
> +	else if (tp->classify == mall_classify)
> +		return mall_classify(skb, tp, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_CLS_ROUTE4)
> +	else if (tp->classify == route4_classify)
> +		return route4_classify(skb, tp, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_CLS_RSVP)
> +	else if (tp->classify == rsvp_classify)
> +		return rsvp_classify(skb, tp, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_CLS_RSVP6)
> +	else if (tp->classify == rsvp6_classify)
> +		return rsvp_classify(skb, tp, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_CLS_TCINDEX)
> +	else if (tp->classify == tcindex_classify)
> +		return tcindex_classify(skb, tp, res);
> +#endif
> +#if IS_BUILTIN(CONFIG_NET_CLS_U32)
> +	else if (tp->classify == u32_classify)
> +		return u32_classify(skb, tp, res);
> +#endif
> +	else
> +		return tp->classify(skb, tp, res);
> +}
> +
> +#else
> +
> +#define TC_INDIRECT_SCOPE static
> +
> +static inline int __tc_act(struct sk_buff *skb, const struct tc_action *a,
> +			   struct tcf_result *res)
> +{
> +	return a->ops->act(skb, a, res);
> +}
> +
> +static inline int __tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
> +				struct tcf_result *res)
> +{
> +	return tp->classify(skb, tp, res);
> +}
> +
> +#endif
> +
> +#endif /* __NET_TC_WRAPPER_H */
> diff --git a/net/sched/Kconfig b/net/sched/Kconfig
> index 1e8ab4749c6c..9bc055f8013e 100644
> --- a/net/sched/Kconfig
> +++ b/net/sched/Kconfig
> @@ -1021,6 +1021,19 @@ config NET_TC_SKB_EXT
>  
>  	  Say N here if you won't be using tc<->ovs offload or tc chains offload.
>  
> +config NET_TC_INDIRECT_WRAPPER
> +	bool "TC indirect call wrapper"
> +	depends on NET_SCHED
> +	depends on RETPOLINE
> +
> +	help
> +	  Say Y here to skip indirect calls in the TC datapath for known
> +	  builtin classifiers/actions under CONFIG_RETPOLINE kernels.
> +
> +	  TC may run slower on CPUs with hardware based mitigations.
> +
> +	  If unsure, say N.
> +
>  endif # NET_SCHED
>  
>  config NET_SCH_FIFO
> -- 
> 2.34.1
diff mbox series

Patch

diff --git a/include/net/tc_wrapper.h b/include/net/tc_wrapper.h
new file mode 100644
index 000000000000..7890d2810148
--- /dev/null
+++ b/include/net/tc_wrapper.h
@@ -0,0 +1,274 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_TC_WRAPPER_H
+#define __NET_TC_WRAPPER_H
+
+#include <linux/indirect_call_wrapper.h>
+#include <net/pkt_cls.h>
+
+#if IS_ENABLED(CONFIG_RETPOLINE) && IS_ENABLED(CONFIG_NET_TC_INDIRECT_WRAPPER)
+
+#define TC_INDIRECT_SCOPE
+
+/* TC Actions */
+INDIRECT_CALLABLE_DECLARE(int tcf_bpf_act(struct sk_buff *skb,
+					  const struct tc_action *a,
+					  struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int tcf_connmark_act(struct sk_buff *skb,
+					       const struct tc_action *a,
+					       struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int tcf_csum_act(struct sk_buff *skb,
+					   const struct tc_action *a,
+					   struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int tcf_ct_act(struct sk_buff *skb,
+					 const struct tc_action *a,
+					 struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int tcf_ctinfo_act(struct sk_buff *skb,
+					     const struct tc_action *a,
+					     struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int tcf_gact_act(struct sk_buff *skb,
+					   const struct tc_action *a,
+					   struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int tcf_gate_act(struct sk_buff *skb,
+					   const struct tc_action *a,
+					   struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int tcf_ife_act(struct sk_buff *skb,
+					  const struct tc_action *a,
+					  struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int tcf_ipt_act(struct sk_buff *skb,
+					  const struct tc_action *a,
+					  struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int tcf_mirred_act(struct sk_buff *skb,
+					     const struct tc_action *a,
+					     struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int tcf_mpls_act(struct sk_buff *skb,
+					   const struct tc_action *a,
+					   struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int tcf_nat_act(struct sk_buff *skb,
+					  const struct tc_action *a,
+					  struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int tcf_pedit_act(struct sk_buff *skb,
+					    const struct tc_action *a,
+					    struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int tcf_police_act(struct sk_buff *skb,
+					     const struct tc_action *a,
+					     struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int tcf_sample_act(struct sk_buff *skb,
+					     const struct tc_action *a,
+					     struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int tcf_simp_act(struct sk_buff *skb,
+					   const struct tc_action *a,
+					   struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int tcf_skbedit_act(struct sk_buff *skb,
+					      const struct tc_action *a,
+					      struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int tcf_skbmod_act(struct sk_buff *skb,
+					     const struct tc_action *a,
+					     struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int tcf_vlan_act(struct sk_buff *skb,
+					   const struct tc_action *a,
+					   struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int tunnel_key_act(struct sk_buff *skb,
+					     const struct tc_action *a,
+					     struct tcf_result *res));
+
+/* TC Filters */
+INDIRECT_CALLABLE_DECLARE(int basic_classify(struct sk_buff *skb,
+					     const struct tcf_proto *tp,
+					     struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int cls_bpf_classify(struct sk_buff *skb,
+					       const struct tcf_proto *tp,
+					       struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int cls_cgroup_classify(struct sk_buff *skb,
+						  const struct tcf_proto *tp,
+						  struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int fl_classify(struct sk_buff *skb,
+					  const struct tcf_proto *tp,
+					  struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int flow_classify(struct sk_buff *skb,
+					    const struct tcf_proto *tp,
+					    struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int fw_classify(struct sk_buff *skb,
+					  const struct tcf_proto *tp,
+					  struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int mall_classify(struct sk_buff *skb,
+					    const struct tcf_proto *tp,
+					    struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int route4_classify(struct sk_buff *skb,
+					      const struct tcf_proto *tp,
+					      struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int rsvp_classify(struct sk_buff *skb,
+					    const struct tcf_proto *tp,
+					    struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int rsvp6_classify(struct sk_buff *skb,
+					     const struct tcf_proto *tp,
+					     struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int tcindex_classify(struct sk_buff *skb,
+					       const struct tcf_proto *tp,
+					       struct tcf_result *res));
+INDIRECT_CALLABLE_DECLARE(int u32_classify(struct sk_buff *skb,
+					   const struct tcf_proto *tp,
+					   struct tcf_result *res));
+
+static inline int __tc_act(struct sk_buff *skb, const struct tc_action *a,
+			   struct tcf_result *res)
+{
+	if (0) { /* noop */ }
+#if IS_BUILTIN(CONFIG_NET_ACT_BPF)
+	else if (a->ops->act == tcf_bpf_act)
+		return tcf_bpf_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_CONNMARK)
+	else if (a->ops->act == tcf_connmark_act)
+		return tcf_connmark_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_CSUM)
+	else if (a->ops->act == tcf_csum_act)
+		return tcf_csum_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_CT)
+	else if (a->ops->act == tcf_ct_act)
+		return tcf_ct_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_CTINFO)
+	else if (a->ops->act == tcf_ctinfo_act)
+		return tcf_ctinfo_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_GACT)
+	else if (a->ops->act == tcf_gact_act)
+		return tcf_gact_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_GATE)
+	else if (a->ops->act == tcf_gate_act)
+		return tcf_gate_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_IFE)
+	else if (a->ops->act == tcf_ife_act)
+		return tcf_ife_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_IPT)
+	else if (a->ops->act == tcf_ipt_act)
+		return tcf_ipt_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_MIRRED)
+	else if (a->ops->act == tcf_mirred_act)
+		return tcf_mirred_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_MPLS)
+	else if (a->ops->act == tcf_mpls_act)
+		return tcf_mpls_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_NAT)
+	else if (a->ops->act == tcf_nat_act)
+		return tcf_nat_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_PEDIT)
+	else if (a->ops->act == tcf_pedit_act)
+		return tcf_pedit_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_POLICE)
+	else if (a->ops->act == tcf_police_act)
+		return tcf_police_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_SAMPLE)
+	else if (a->ops->act == tcf_sample_act)
+		return tcf_sample_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_SIMP)
+	else if (a->ops->act == tcf_simp_act)
+		return tcf_simp_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_SKBEDIT)
+	else if (a->ops->act == tcf_skbedit_act)
+		return tcf_skbedit_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_SKBMOD)
+	else if (a->ops->act == tcf_skbmod_act)
+		return tcf_skbmod_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_TUNNEL_KEY)
+	else if (a->ops->act == tunnel_key_act)
+		return tunnel_key_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_VLAN)
+	else if (a->ops->act == tcf_vlan_act)
+		return tcf_vlan_act(skb, a, res);
+#endif
+	else
+		return a->ops->act(skb, a, res);
+}
+
+static inline int __tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+				struct tcf_result *res)
+{
+	if (0) { /* noop */ }
+#if IS_BUILTIN(CONFIG_NET_CLS_BASIC)
+	else if (tp->classify == basic_classify)
+		return basic_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_BPF)
+	else if (tp->classify == cls_bpf_classify)
+		return cls_bpf_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_CGROUP)
+	else if (tp->classify == cls_cgroup_classify)
+		return cls_cgroup_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_FLOW)
+	else if (tp->classify == flow_classify)
+		return flow_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_FLOWER)
+	else if (tp->classify == fl_classify)
+		return fl_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_FW)
+	else if (tp->classify == fw_classify)
+		return fw_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_MATCHALL)
+	else if (tp->classify == mall_classify)
+		return mall_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_ROUTE4)
+	else if (tp->classify == route4_classify)
+		return route4_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_RSVP)
+	else if (tp->classify == rsvp_classify)
+		return rsvp_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_RSVP6)
+	else if (tp->classify == rsvp6_classify)
+		return rsvp_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_TCINDEX)
+	else if (tp->classify == tcindex_classify)
+		return tcindex_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_U32)
+	else if (tp->classify == u32_classify)
+		return u32_classify(skb, tp, res);
+#endif
+	else
+		return tp->classify(skb, tp, res);
+}
+
+#else
+
+#define TC_INDIRECT_SCOPE static
+
+static inline int __tc_act(struct sk_buff *skb, const struct tc_action *a,
+			   struct tcf_result *res)
+{
+	return a->ops->act(skb, a, res);
+}
+
+static inline int __tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+				struct tcf_result *res)
+{
+	return tp->classify(skb, tp, res);
+}
+
+#endif
+
+#endif /* __NET_TC_WRAPPER_H */
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 1e8ab4749c6c..9bc055f8013e 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -1021,6 +1021,19 @@  config NET_TC_SKB_EXT
 
 	  Say N here if you won't be using tc<->ovs offload or tc chains offload.
 
+config NET_TC_INDIRECT_WRAPPER
+	bool "TC indirect call wrapper"
+	depends on NET_SCHED
+	depends on RETPOLINE
+
+	help
+	  Say Y here to skip indirect calls in the TC datapath for known
+	  builtin classifiers/actions under CONFIG_RETPOLINE kernels.
+
+	  TC may run slower on CPUs with hardware based mitigations.
+
+	  If unsure, say N.
+
 endif # NET_SCHED
 
 config NET_SCH_FIFO