@@ -89,6 +89,11 @@ enum flow_offload_tuple_dir {
FLOW_OFFLOAD_DIR_MAX = IP_CT_DIR_MAX
};
+enum flow_offload_xmit_type {
+ FLOW_OFFLOAD_XMIT_NEIGH = 0,
+ FLOW_OFFLOAD_XMIT_XFRM,
+};
+
struct flow_offload_tuple {
union {
struct in_addr src_v4;
@@ -111,7 +116,8 @@ struct flow_offload_tuple {
/* All members above are keys for lookups, see flow_offload_hash(). */
struct { } __hash;
- u8 dir;
+ u8 dir:6,
+ xmit_type:2;
u16 mtu;
@@ -158,7 +164,8 @@ static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
struct nf_flow_route {
struct {
- struct dst_entry *dst;
+ struct dst_entry *dst;
+ enum flow_offload_xmit_type xmit_type;
} tuple[FLOW_OFFLOAD_DIR_MAX];
};
@@ -95,6 +95,7 @@ static int flow_offload_fill_route(struct flow_offload *flow,
}
flow_tuple->iifidx = other_dst->dev->ifindex;
+ flow_tuple->xmit_type = route->tuple[dir].xmit_type;
flow_tuple->dst_cache = dst;
return 0;
@@ -220,10 +220,20 @@ static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
return true;
}
-static int nf_flow_offload_dst_check(struct dst_entry *dst)
+static inline struct dst_entry *
+nft_flow_dst(struct flow_offload_tuple_rhash *tuplehash)
{
- if (unlikely(dst_xfrm(dst)))
+ return tuplehash->tuple.dst_cache;
+}
+
+static int nf_flow_offload_dst_check(struct flow_offload_tuple_rhash *tuplehash)
+{
+ struct dst_entry *dst;
+
+ if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
+ dst = nft_flow_dst(tuplehash);
return dst_check(dst, 0) ? 0 : -1;
+ }
return 0;
}
@@ -265,8 +275,6 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
dir = tuplehash->tuple.dir;
flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
- rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
- outdev = rt->dst.dev;
if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
return NF_ACCEPT;
@@ -280,7 +288,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
flow_offload_refresh(flow_table, flow);
- if (nf_flow_offload_dst_check(&rt->dst)) {
+ if (nf_flow_offload_dst_check(tuplehash)) {
flow_offload_teardown(flow);
return NF_ACCEPT;
}
@@ -295,13 +303,16 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
if (flow_table->flags & NF_FLOWTABLE_COUNTER)
nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
- if (unlikely(dst_xfrm(&rt->dst))) {
+ rt = (struct rtable *)tuplehash->tuple.dst_cache;
+
+ if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
memset(skb->cb, 0, sizeof(struct inet_skb_parm));
IPCB(skb)->iif = skb->dev->ifindex;
IPCB(skb)->flags = IPSKB_FORWARDED;
return nf_flow_xmit_xfrm(skb, state, &rt->dst);
}
+ outdev = rt->dst.dev;
skb->dev = outdev;
nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
skb_dst_set_noref(skb, &rt->dst);
@@ -506,8 +517,6 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
dir = tuplehash->tuple.dir;
flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
- rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache;
- outdev = rt->dst.dev;
if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
return NF_ACCEPT;
@@ -518,7 +527,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
flow_offload_refresh(flow_table, flow);
- if (nf_flow_offload_dst_check(&rt->dst)) {
+ if (nf_flow_offload_dst_check(tuplehash)) {
flow_offload_teardown(flow);
return NF_ACCEPT;
}
@@ -536,13 +545,16 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
if (flow_table->flags & NF_FLOWTABLE_COUNTER)
nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
- if (unlikely(dst_xfrm(&rt->dst))) {
+ rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
+
+ if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH)) {
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
IP6CB(skb)->iif = skb->dev->ifindex;
IP6CB(skb)->flags = IP6SKB_FORWARDED;
return nf_flow_xmit_xfrm(skb, state, &rt->dst);
}
+ outdev = rt->dst.dev;
skb->dev = outdev;
nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
skb_dst_set_noref(skb, &rt->dst);
@@ -19,6 +19,22 @@ struct nft_flow_offload {
struct nft_flowtable *flowtable;
};
+static enum flow_offload_xmit_type nft_xmit_type(struct dst_entry *dst)
+{
+ if (dst_xfrm(dst))
+ return FLOW_OFFLOAD_XMIT_XFRM;
+
+ return FLOW_OFFLOAD_XMIT_NEIGH;
+}
+
+static void nft_default_forward_path(struct nf_flow_route *route,
+ struct dst_entry *dst_cache,
+ enum ip_conntrack_dir dir)
+{
+ route->tuple[dir].dst = dst_cache;
+ route->tuple[dir].xmit_type = nft_xmit_type(dst_cache);
+}
+
static int nft_flow_route(const struct nft_pktinfo *pkt,
const struct nf_conn *ct,
struct nf_flow_route *route,
@@ -44,8 +60,8 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
if (!other_dst)
return -ENOENT;
- route->tuple[dir].dst = this_dst;
- route->tuple[!dir].dst = other_dst;
+ nft_default_forward_path(route, this_dst, dir);
+ nft_default_forward_path(route, other_dst, !dir);
return 0;
}
Add the xmit_type field that defines the two supported xmit paths in the flowtable data plane, which are the neighbour and the xfrm xmit paths. This patch prepares for new flowtable xmit path types to come. Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> --- v4: no changes include/net/netfilter/nf_flow_table.h | 11 +++++++-- net/netfilter/nf_flow_table_core.c | 1 + net/netfilter/nf_flow_table_ip.c | 32 ++++++++++++++++++--------- net/netfilter/nft_flow_offload.c | 20 +++++++++++++++-- 4 files changed, 50 insertions(+), 14 deletions(-)