Message ID | 160216616276.882446.17894852306425732310.stgit@firesoul (mailing list archive) |
---|---|
State | Changes Requested |
Delegated to: | BPF |
Headers | show |
Series | bpf: New approach for BPF MTU handling | expand |
On Thu, Oct 8, 2020 at 7:09 AM Jesper Dangaard Brouer <brouer@redhat.com> wrote: > > The use-case for dropping the MTU check when TC-BPF does redirect to > ingress, is described by Eyal Birger in email[0]. The summary is the > ability to increase packet size (e.g. with IPv6 headers for NAT64) and > ingress redirect packet and let normal netstack fragment packet as needed. > > [0] https://lore.kernel.org/netdev/CAHsH6Gug-hsLGHQ6N0wtixdOa85LDZ3HNRHVd0opR=19Qo4W4Q@mail.gmail.com/ > > Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> > --- > include/linux/netdevice.h | 5 +++-- > net/core/dev.c | 2 +- > net/core/filter.c | 12 ++++++++++-- > 3 files changed, 14 insertions(+), 5 deletions(-) > > diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h > index 28cfa53daf72..58fb7b4869ba 100644 > --- a/include/linux/netdevice.h > +++ b/include/linux/netdevice.h > @@ -3866,10 +3866,11 @@ bool is_skb_forwardable(const struct net_device *dev, > const struct sk_buff *skb); > > static __always_inline int ____dev_forward_skb(struct net_device *dev, > - struct sk_buff *skb) > + struct sk_buff *skb, > + const bool mtu_check) check_mtu might be a better arg name then 'mtu_check' > { > if (skb_orphan_frags(skb, GFP_ATOMIC) || > - unlikely(!is_skb_forwardable(dev, skb))) { > + (mtu_check && unlikely(!is_skb_forwardable(dev, skb)))) { > atomic_long_inc(&dev->rx_dropped); > kfree_skb(skb); > return NET_RX_DROP; > diff --git a/net/core/dev.c b/net/core/dev.c > index b433098896b2..96b455f15872 100644 > --- a/net/core/dev.c > +++ b/net/core/dev.c > @@ -2209,7 +2209,7 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable); > > int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) > { > - int ret = ____dev_forward_skb(dev, skb); > + int ret = ____dev_forward_skb(dev, skb, true); > > if (likely(!ret)) { > skb->protocol = eth_type_trans(skb, dev); > diff --git a/net/core/filter.c b/net/core/filter.c > index 5986156e700e..a8e24092e4f5 100644 > --- a/net/core/filter.c > +++ b/net/core/filter.c > @@ -2083,13 +2083,21 @@ static const struct bpf_func_proto bpf_csum_level_proto = { > > static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) > { > - return dev_forward_skb(dev, skb); > + int ret = ____dev_forward_skb(dev, skb, false); > + > + if (likely(!ret)) { > + skb->protocol = eth_type_trans(skb, dev); > + skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); this blindly assumes eth header size in a function that does (by name) seem ethernet specific... could this use dev->hard_header_len? or change func name to be __bpf_ethernet_rx_skb or something > + ret = netif_rx(skb); > + } > + > + return ret; > } > > static inline int __bpf_rx_skb_no_mac(struct net_device *dev, > struct sk_buff *skb) > { > - int ret = ____dev_forward_skb(dev, skb); > + int ret = ____dev_forward_skb(dev, skb, false); > > if (likely(!ret)) { > skb->dev = dev;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 28cfa53daf72..58fb7b4869ba 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3866,10 +3866,11 @@ bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb); static __always_inline int ____dev_forward_skb(struct net_device *dev, - struct sk_buff *skb) + struct sk_buff *skb, + const bool mtu_check) { if (skb_orphan_frags(skb, GFP_ATOMIC) || - unlikely(!is_skb_forwardable(dev, skb))) { + (mtu_check && unlikely(!is_skb_forwardable(dev, skb)))) { atomic_long_inc(&dev->rx_dropped); kfree_skb(skb); return NET_RX_DROP; diff --git a/net/core/dev.c b/net/core/dev.c index b433098896b2..96b455f15872 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2209,7 +2209,7 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) { - int ret = ____dev_forward_skb(dev, skb); + int ret = ____dev_forward_skb(dev, skb, true); if (likely(!ret)) { skb->protocol = eth_type_trans(skb, dev); diff --git a/net/core/filter.c b/net/core/filter.c index 5986156e700e..a8e24092e4f5 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2083,13 +2083,21 @@ static const struct bpf_func_proto bpf_csum_level_proto = { static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) { - return dev_forward_skb(dev, skb); + int ret = ____dev_forward_skb(dev, skb, false); + + if (likely(!ret)) { + skb->protocol = eth_type_trans(skb, dev); + skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); + ret = netif_rx(skb); + } + + return ret; } static inline int __bpf_rx_skb_no_mac(struct net_device *dev, struct sk_buff *skb) { - int ret = ____dev_forward_skb(dev, skb); + int ret = ____dev_forward_skb(dev, skb, false); if (likely(!ret)) { skb->dev = dev;
The use-case for dropping the MTU check when TC-BPF does redirect to ingress, is described by Eyal Birger in email[0]. The summary is the ability to increase packet size (e.g. with IPv6 headers for NAT64) and ingress redirect packet and let normal netstack fragment packet as needed. [0] https://lore.kernel.org/netdev/CAHsH6Gug-hsLGHQ6N0wtixdOa85LDZ3HNRHVd0opR=19Qo4W4Q@mail.gmail.com/ Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> --- include/linux/netdevice.h | 5 +++-- net/core/dev.c | 2 +- net/core/filter.c | 12 ++++++++++-- 3 files changed, 14 insertions(+), 5 deletions(-)