@@ -240,4 +240,107 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
return 0;
}
+static inline unsigned int virtio_l3min(bool is_ipv6)
+{
+ return is_ipv6 ? sizeof(struct ipv6hdr) : sizeof(struct iphdr);
+}
+
+static inline int virtio_net_hdr_tnl_to_skb(struct sk_buff *skb,
+ struct virtio_net_hdr *hdr,
+ unsigned int tnl_hdr_offset,
+ bool little_endian,
+ bool has_mac)
+{
+ u8 gso_tunnel_type = hdr->gso_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL;
+ unsigned int inner_nh, outer_th, inner_th;
+ unsigned int inner_l3min, outer_l3min;
+ struct virtio_net_hdr_tunnel *tnl;
+ u8 gso_inner_type;
+ bool outer_isv6;
+ int ret;
+
+ if (!gso_tunnel_type)
+ return virtio_net_hdr_to_skb(skb, hdr, little_endian);
+
+ /* Tunnel not supported/negotiated, but the hdr asks for it. */
+ if (!tnl_hdr_offset)
+ return -EINVAL;
+
+ /* Either ipv4 or ipv6. */
+ if (gso_tunnel_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV4 &&
+ gso_tunnel_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6)
+ return -EINVAL;
+
+ /* No UDP fragments over UDP tunnel. */
+ hdr->gso_type &= ~gso_tunnel_type;
+ gso_inner_type = hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN;
+ if (!gso_inner_type || gso_inner_type == VIRTIO_NET_HDR_GSO_UDP_L4)
+ return -EINVAL;
+
+ /* Relay on csum being present. */
+ if (!(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
+ return -EINVAL;
+
+ /* Validate offsets. */
+ outer_isv6 = gso_tunnel_type & VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6;
+ inner_l3min = virtio_l3min(gso_inner_type == VIRTIO_NET_HDR_GSO_TCPV6);
+ outer_l3min = virtio_l3min(outer_isv6);
+ if (has_mac)
+ outer_l3min += ETH_HLEN;
+
+ tnl = ((void *)hdr) + tnl_hdr_offset;
+ inner_th = __virtio16_to_cpu(little_endian, hdr->csum_start);
+ inner_nh = __virtio16_to_cpu(little_endian, tnl->inner_nh_offset);
+ outer_th = __virtio16_to_cpu(little_endian, tnl->outer_th_offset);
+ if (outer_th < outer_l3min + sizeof(struct udphdr) ||
+ outer_th > inner_nh ||
+ inner_th < inner_nh + inner_l3min)
+ return -EINVAL;
+
+ /* Let the basic parsing deal with plain GSO features. */
+ ret = virtio_net_hdr_to_skb(skb, hdr, little_endian);
+ if (ret)
+ return ret;
+
+ skb_set_inner_protocol(skb, outer_isv6 ? htons(ETH_P_IPV6) :
+ htons(ETH_P_IP));
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ skb->inner_transport_header = inner_th;
+ skb->inner_network_header = inner_nh;
+ skb->transport_header = outer_th;
+ return 0;
+}
+
+static inline int virtio_net_hdr_tnl_from_skb(struct sk_buff *skb,
+ struct virtio_net_hdr *hdr,
+ unsigned int tnl_hdr_offset,
+ bool little_endian,
+ int vlan_hlen)
+{
+ struct virtio_net_hdr_tunnel *tnl;
+ int ret;
+
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL))
+ return virtio_net_hdr_from_skb(skb, hdr, little_endian, false,
+ vlan_hlen);
+
+ skb_shinfo(skb)->gso_type &= ~SKB_GSO_UDP_TUNNEL;
+ ret = virtio_net_hdr_from_skb(skb, hdr, little_endian, false,
+ vlan_hlen);
+ if (ret)
+ return ret;
+
+ if (skb->protocol == htons(ETH_P_IPV6))
+ hdr->gso_type |= VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6;
+ else
+ hdr->gso_type |= VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV4;
+
+ tnl = ((void *)hdr) + tnl_hdr_offset;
+ tnl->inner_nh_offset = __cpu_to_virtio16(little_endian,
+ skb->inner_network_header);
+ tnl->outer_th_offset = __cpu_to_virtio16(little_endian,
+ skb->transport_header);
+ return 0;
+}
+
#endif /* _LINUX_VIRTIO_NET_H */
@@ -56,6 +56,8 @@
#define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow
* Steering */
#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
+#define VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO 47 /* Guest can handle GSO over UDP tunnel */
+#define VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO 49 /* Host can handle GSO over UDP tunnel */
#define VIRTIO_NET_F_DEVICE_STATS 50 /* Device can provide device-level statistics. */
#define VIRTIO_NET_F_VQ_NOTF_COAL 52 /* Device supports virtqueue notification coalescing */
#define VIRTIO_NET_F_NOTF_COAL 53 /* Device supports notifications coalescing */
@@ -137,6 +139,10 @@ struct virtio_net_hdr_v1 {
#define VIRTIO_NET_HDR_GSO_UDP 3 /* GSO frame, IPv4 UDP (UFO) */
#define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */
#define VIRTIO_NET_HDR_GSO_UDP_L4 5 /* GSO frame, IPv4& IPv6 UDP (USO) */
+#define VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV4 0x20 /* UDP over IPv4 tunnel present */
+#define VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6 0x40 /* UDP over IPv6 tunnel present */
+#define VIRTIO_NET_HDR_GSO_UDP_TUNNEL (VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV4 | \
+ VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6)
#define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */
__u8 gso_type;
__virtio16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */
@@ -181,6 +187,12 @@ struct virtio_net_hdr_v1_hash {
__le16 padding;
};
+/* This header after hashing information */
+struct virtio_net_tunnel_hdr {
+ __virtio16 outer_th_offset;
+ __virtio16 inner_nh_offset;
+};
+
#ifndef VIRTIO_NET_NO_LEGACY
/* This header comes first in the scatter-gather list.
* For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, it must
The virtio specification are introducing support for GSO over UDP tunnel. This patch brings in the needed defines and the additional virtio hdr parsing/building helpers. The UDP tunnel support uses additional fields in the virtio hdr, and such fields location can change depending on other negotiated features - specifically VIRTIO_NET_F_HASH_REPORT. Try to be as conservative as possible with the new field validation. Signed-off-by: Paolo Abeni <pabeni@redhat.com> --- include/linux/virtio_net.h | 103 ++++++++++++++++++++++++++++++++ include/uapi/linux/virtio_net.h | 12 ++++ 2 files changed, 115 insertions(+)