@@ -151,7 +151,8 @@
#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
/* Descriptors required for maximum contiguous TSO/GSO packet */
-#define XGBE_TX_MAX_SPLIT ((GSO_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1)
+#define XGBE_TX_MAX_SPLIT \
+ ((GSO_LEGACY_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1)
/* Maximum possible descriptors needed for an SKB:
* - Maximum number of SKB frags
@@ -2038,7 +2038,7 @@ mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt)
{
int nr_frags = skb_shinfo(skb)->nr_frags;
- return PAGE_SIZE * nr_frags + data_bcnt <= GSO_MAX_SIZE;
+ return PAGE_SIZE * nr_frags + data_bcnt <= GRO_MAX_SIZE;
}
static void
@@ -1008,7 +1008,8 @@ static int ef100_process_design_param(struct efx_nic *efx,
}
return 0;
case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN:
- nic_data->tso_max_payload_len = min_t(u64, reader->value, GSO_MAX_SIZE);
+ nic_data->tso_max_payload_len = min_t(u64, reader->value,
+ GSO_LEGACY_MAX_SIZE);
netif_set_tso_max_size(efx->net_dev,
nic_data->tso_max_payload_len);
return 0;
@@ -98,7 +98,8 @@ unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx)
/* Possibly more for PCIe page boundaries within input fragments */
if (PAGE_SIZE > EF4_PAGE_SIZE)
max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
- DIV_ROUND_UP(GSO_MAX_SIZE, EF4_PAGE_SIZE));
+ DIV_ROUND_UP(GSO_LEGACY_MAX_SIZE,
+ EF4_PAGE_SIZE));
return max_descs;
}
@@ -416,7 +416,8 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
/* Possibly more for PCIe page boundaries within input fragments */
if (PAGE_SIZE > EFX_PAGE_SIZE)
max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
- DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
+ DIV_ROUND_UP(GSO_LEGACY_MAX_SIZE,
+ EFX_PAGE_SIZE));
return max_descs;
}
@@ -38,7 +38,8 @@
#define XLGMAC_RX_DESC_MAX_DIRTY (XLGMAC_RX_DESC_CNT >> 3)
/* Descriptors required for maximum contiguous TSO/GSO packet */
-#define XLGMAC_TX_MAX_SPLIT ((GSO_MAX_SIZE / XLGMAC_TX_MAX_BUF_SIZE) + 1)
+#define XLGMAC_TX_MAX_SPLIT \
+ ((GSO_LEGACY_MAX_SIZE / XLGMAC_TX_MAX_BUF_SIZE) + 1)
/* Maximum possible descriptors needed for a SKB */
#define XLGMAC_TX_MAX_DESC_NR (MAX_SKB_FRAGS + XLGMAC_TX_MAX_SPLIT + 2)
@@ -1349,7 +1349,7 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
struct net_device_context *net_device_ctx = netdev_priv(net);
struct ndis_offload hwcaps;
struct ndis_offload_params offloads;
- unsigned int gso_max_size = GSO_MAX_SIZE;
+ unsigned int gso_max_size = GSO_LEGACY_MAX_SIZE;
int ret;
/* Find HW offload capabilities */
@@ -667,7 +667,7 @@ static void fcoe_netdev_features_change(struct fc_lport *lport,
if (netdev->features & NETIF_F_FSO) {
lport->seq_offload = 1;
- lport->lso_max = netdev->gso_max_size;
+ lport->lso_max = min(netdev->gso_max_size, GSO_LEGACY_MAX_SIZE);
FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
lport->lso_max);
} else {
@@ -2262,7 +2262,8 @@ struct net_device {
const struct rtnl_link_ops *rtnl_link_ops;
/* for setting kernel sock attribute on TCP connection setup */
-#define GSO_MAX_SIZE 65536
+#define GSO_LEGACY_MAX_SIZE 65536u
+#define GSO_MAX_SIZE UINT_MAX
unsigned int gso_max_size;
#define TSO_LEGACY_MAX_SIZE 65536
#define TSO_MAX_SIZE UINT_MAX
@@ -1001,7 +1001,7 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
cb->pkt_len = skb->len;
} else {
if (__skb->wire_len < skb->len ||
- __skb->wire_len > GSO_MAX_SIZE)
+ __skb->wire_len > GSO_LEGACY_MAX_SIZE)
return -EINVAL;
cb->pkt_len = __skb->wire_len;
}
@@ -2998,7 +2998,8 @@ EXPORT_SYMBOL(netif_set_real_num_queues);
* @size: max skb->len of a TSO frame
*
* Set the limit on the size of TSO super-frames the device can handle.
- * Unless explicitly set the stack will assume the value of %GSO_MAX_SIZE.
+ * Unless explicitly set the stack will assume the value of
+ * %GSO_LEGACY_MAX_SIZE.
*/
void netif_set_tso_max_size(struct net_device *dev, unsigned int size)
{
@@ -10602,7 +10603,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev_net_set(dev, &init_net);
- dev->gso_max_size = GSO_MAX_SIZE;
+ dev->gso_max_size = GSO_LEGACY_MAX_SIZE;
dev->gso_max_segs = GSO_MAX_SEGS;
dev->gro_max_size = GRO_MAX_SIZE;
dev->tso_max_size = TSO_LEGACY_MAX_SIZE;
@@ -2809,7 +2809,7 @@ static int do_setlink(const struct sk_buff *skb,
if (tb[IFLA_GSO_MAX_SIZE]) {
u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
- if (max_size > GSO_MAX_SIZE || max_size > dev->tso_max_size) {
+ if (max_size > dev->tso_max_size) {
err = -EINVAL;
goto errout;
}
@@ -2312,6 +2312,10 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
/* pairs with the WRITE_ONCE() in netif_set_gso_max_size() */
sk->sk_gso_max_size = READ_ONCE(dst->dev->gso_max_size);
+ if (sk->sk_gso_max_size > GSO_LEGACY_MAX_SIZE &&
+ (!IS_ENABLED(CONFIG_IPV6) || sk->sk_family != AF_INET6 ||
+ !sk_is_tcp(sk) || ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)))
+ sk->sk_gso_max_size = GSO_LEGACY_MAX_SIZE;
sk->sk_gso_max_size -= (MAX_TCP_HEADER + 1);
/* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */
max_segs = max_t(u32, READ_ONCE(dst->dev->gso_max_segs), 1);
@@ -310,7 +310,7 @@ static u32 bbr_tso_segs_goal(struct sock *sk)
*/
bytes = min_t(unsigned long,
sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
- GSO_MAX_SIZE - 1 - MAX_TCP_HEADER);
+ GSO_LEGACY_MAX_SIZE - 1 - MAX_TCP_HEADER);
segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
return min(segs, 0x7FU);
@@ -1553,7 +1553,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
* SO_SNDBUF values.
* Also allow first and last skb in retransmit queue to be split.
*/
- limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
+ limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_LEGACY_MAX_SIZE);
if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
skb != tcp_rtx_queue_head(sk) &&
@@ -134,7 +134,8 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
dst_hold(tp->dst);
sk_setup_caps(sk, tp->dst);
}
- packet->max_size = sk_can_gso(sk) ? READ_ONCE(tp->dst->dev->gso_max_size)
+ packet->max_size = sk_can_gso(sk) ? min(READ_ONCE(tp->dst->dev->gso_max_size),
+ GSO_LEGACY_MAX_SIZE)
: asoc->pathmtu;
rcu_read_unlock();
}