diff mbox series

[net] net: ipv6: Validate GSO SKB before finish IPv6 processing

Message ID 1609331028-2566-1-git-send-email-ayal@nvidia.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series [net] net: ipv6: Validate GSO SKB before finish IPv6 processing | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Clearly marked for net
netdev/subject_prefix success Link
netdev/cc_maintainers warning 4 maintainers not CCed: jengelh@medozas.de kaber@trash.net yoshfuji@linux-ipv6.org kuznet@ms2.inr.ac.ru
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 55 lines checked
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/header_inline success Link
netdev/stable success Stable not CCed

Commit Message

Aya Levin Dec. 30, 2020, 12:23 p.m. UTC
There are cases where GSO segment's length exceeds the egress MTU.
If so:
 - Consume the SKB and its segments.
 - Issue an ICMP packet with 'Packet Too Big' message containing the
   MTU, allowing the source host to reduce its Path MTU appropriately.

Note: These cases are handled in the same manner in IPv4 output finish.
This patch aligns the behavior of IPv6 and IPv4.

Fixes: 9e50849054a4 ("netfilter: ipv6: move POSTROUTING invocation before fragmentation")
Signed-off-by: Aya Levin <ayal@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
---
 net/ipv6/ip6_output.c | 41 ++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 40 insertions(+), 1 deletion(-)

Hi,

Please queue to -stable >= v2.6.35.

Daniel Axtens, does this solve the issue referred in your commit?
8914a595110a bnx2x: disable GSO where gso_size is too big for hardware

Thanks,
Aya

Comments

Daniel Axtens Dec. 31, 2020, 1:49 p.m. UTC | #1
Hi Aya,

> Daniel Axtens, does this solve the issue referred in your commit?
> 8914a595110a bnx2x: disable GSO where gso_size is too big for hardware

No, because:
> Note: These cases are handled in the same manner in IPv4 output finish.
> This patch aligns the behavior of IPv6 and IPv4.
and the issue my patch addresses was hit on IPv4, not IPv6.

In my case, the issue was that a packet came in on a ibmveth interface,
GSO but with a gso_size that's very very large. Open vSwitch then
transferred it to the bnx2x interface, where the firmware promptly
paniced.

There's a nice long description at
https://patchwork.ozlabs.org/project/netdev/patch/20180111235905.10110-1-dja@axtens.net/
however the description wasn't included in the commit message.

Kind regards,
Daniel

>
> Thanks,
> Aya
>
> diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
> index 749ad72386b2..36466f2211bd 100644
> --- a/net/ipv6/ip6_output.c
> +++ b/net/ipv6/ip6_output.c
> @@ -125,8 +125,43 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
>  	return -EINVAL;
>  }
>  
> +static int
> +ip6_finish_output_gso_slowpath_drop(struct net *net, struct sock *sk,
> +				    struct sk_buff *skb, unsigned int mtu)
> +{
> +	struct sk_buff *segs, *nskb;
> +	netdev_features_t features;
> +	int ret;
> +
> +	/* Please see corresponding comment in ip_finish_output_gso
> +	 * describing the cases where GSO segment length exceeds the
> +	 * egress MTU.
> +	 */
> +	features = netif_skb_features(skb);
> +	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
> +	if (IS_ERR_OR_NULL(segs)) {
> +		kfree_skb(skb);
> +		return -ENOMEM;
> +	}
> +
> +	consume_skb(skb);
> +
> +	skb_list_walk_safe(segs, segs, nskb) {
> +		int err;
> +
> +		skb_mark_not_on_list(segs);
> +		err = ip6_fragment(net, sk, segs, ip6_finish_output2);
> +		if (err && ret == 0)
> +			ret = err;
> +	}
> +
> +	return ret;
> +}
> +
>  static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
>  {
> +	unsigned int mtu;
> +
>  #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
>  	/* Policy lookup after SNAT yielded a new policy */
>  	if (skb_dst(skb)->xfrm) {
> @@ -135,7 +170,11 @@ static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff
>  	}
>  #endif
>  
> -	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
> +	mtu = ip6_skb_dst_mtu(skb);
> +	if (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu))
> +		return ip6_finish_output_gso_slowpath_drop(net, sk, skb, mtu);
> +
> +	if ((skb->len > mtu && !skb_is_gso(skb)) ||
>  	    dst_allfrag(skb_dst(skb)) ||
>  	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
>  		return ip6_fragment(net, sk, skb, ip6_finish_output2);
> -- 
> 2.14.1
diff mbox series

Patch

diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 749ad72386b2..36466f2211bd 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -125,8 +125,43 @@  static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
 	return -EINVAL;
 }
 
+static int
+ip6_finish_output_gso_slowpath_drop(struct net *net, struct sock *sk,
+				    struct sk_buff *skb, unsigned int mtu)
+{
+	struct sk_buff *segs, *nskb;
+	netdev_features_t features;
+	int ret;
+
+	/* Please see corresponding comment in ip_finish_output_gso
+	 * describing the cases where GSO segment length exceeds the
+	 * egress MTU.
+	 */
+	features = netif_skb_features(skb);
+	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+	if (IS_ERR_OR_NULL(segs)) {
+		kfree_skb(skb);
+		return -ENOMEM;
+	}
+
+	consume_skb(skb);
+
+	skb_list_walk_safe(segs, segs, nskb) {
+		int err;
+
+		skb_mark_not_on_list(segs);
+		err = ip6_fragment(net, sk, segs, ip6_finish_output2);
+		if (err && ret == 0)
+			ret = err;
+	}
+
+	return ret;
+}
+
 static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+	unsigned int mtu;
+
 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
 	/* Policy lookup after SNAT yielded a new policy */
 	if (skb_dst(skb)->xfrm) {
@@ -135,7 +170,11 @@  static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff
 	}
 #endif
 
-	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
+	mtu = ip6_skb_dst_mtu(skb);
+	if (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu))
+		return ip6_finish_output_gso_slowpath_drop(net, sk, skb, mtu);
+
+	if ((skb->len > mtu && !skb_is_gso(skb)) ||
 	    dst_allfrag(skb_dst(skb)) ||
 	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
 		return ip6_fragment(net, sk, skb, ip6_finish_output2);