diff mbox series

[net-next,v3,2/3] net: ethernet: rmnet: Support for downlink MAPv5 checksum offload

Message ID 1614110571-11604-3-git-send-email-sharathv@codeaurora.org (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series net: qualcomm: rmnet: Enable Mapv5 | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Clearly marked for net-next
netdev/subject_prefix success Link
netdev/cc_maintainers warning 1 maintainers not CCed: stranche@codeaurora.org
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 6876 this patch: 6876
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 214 lines checked
netdev/build_allmodconfig_warn success Errors and warnings before: 7270 this patch: 7270
netdev/header_inline success Link
netdev/stable success Stable not CCed

Commit Message

Sharath Chandra Vurukala Feb. 23, 2021, 8:02 p.m. UTC
Adding support for processing of Mapv5 downlink packets.
It involves parsing the Mapv5 packet and checking the csum header
to know whether the hardware has validated the checksum and is
valid or not.

Based on the checksum valid bit the corresponding stats are
incremented and skb->ip_summed is marked either CHECKSUM_UNNECESSARY
or left as CHEKSUM_NONE to let network stack revalidated the checksum
and update the respective snmp stats.

Current MapV1 header has been modified, the reserved field in the
Mapv1 header is now used for next header indication.

Signed-off-by: Sharath Chandra Vurukala <sharathv@codeaurora.org>
---
 .../net/ethernet/qualcomm/rmnet/rmnet_handlers.c   | 19 ++++---
 drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h    | 14 ++++-
 .../net/ethernet/qualcomm/rmnet/rmnet_map_data.c   | 60 +++++++++++++++++++++-
 include/linux/if_rmnet.h                           | 24 +++++++--
 include/uapi/linux/if_link.h                       |  1 +
 5 files changed, 106 insertions(+), 12 deletions(-)

Comments

Jakub Kicinski Feb. 24, 2021, 6:23 p.m. UTC | #1
On Wed, 24 Feb 2021 01:32:50 +0530 Sharath Chandra Vurukala wrote:
> +/* MAP CSUM headers */
> +struct rmnet_map_v5_csum_header {
> +#if defined(__LITTLE_ENDIAN_BITFIELD)
> +	u8  next_hdr:1;
> +	u8  header_type:7;
> +	u8  hw_reserved:7;
> +	u8  csum_valid_required:1;
> +#elif defined(__BIG_ENDIAN_BITFIELD)
> +	u8  header_type:7;
> +	u8  next_hdr:1;
> +	u8  csum_valid_required:1;
> +	u8  hw_reserved:7;
> +#else
> +#error	"Please fix <asm/byteorder.h>"
> +#endif
> +	__be16 reserved;
> +} __aligned(1);

This seems to be your first contribution so let me spell it out.

In Linux when maintainers ask you to do something you are expected 
to do it.

You can leave the existing bitfields for later, but don't add another.
Alex Elder Feb. 25, 2021, 3:29 p.m. UTC | #2
On 2/24/21 12:23 PM, Jakub Kicinski wrote:
> On Wed, 24 Feb 2021 01:32:50 +0530 Sharath Chandra Vurukala wrote:
>> +/* MAP CSUM headers */
>> +struct rmnet_map_v5_csum_header {
>> +#if defined(__LITTLE_ENDIAN_BITFIELD)
>> +	u8  next_hdr:1;
>> +	u8  header_type:7;
>> +	u8  hw_reserved:7;
>> +	u8  csum_valid_required:1;
>> +#elif defined(__BIG_ENDIAN_BITFIELD)
>> +	u8  header_type:7;
>> +	u8  next_hdr:1;
>> +	u8  csum_valid_required:1;
>> +	u8  hw_reserved:7;
>> +#else
>> +#error	"Please fix <asm/byteorder.h>"
>> +#endif
>> +	__be16 reserved;
>> +} __aligned(1);
> 
> This seems to be your first contribution so let me spell it out.
> 
> In Linux when maintainers ask you to do something you are expected
> to do it.
> 
> You can leave the existing bitfields for later, but don't add another.

As I offered, I will implement changes to the existing
code to use masks in place of these C bit-fields.

I will try complete this within the next week.  If it
looks good it could serve as an example of how to go
about it.

					-Alex
diff mbox series

Patch

diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 3d7d3ab..70ad6a7 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -1,5 +1,5 @@ 
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
  *
  * RMNET Data ingress/egress handler
  */
@@ -57,8 +57,8 @@  __rmnet_map_ingress_handler(struct sk_buff *skb,
 			    struct rmnet_port *port)
 {
 	struct rmnet_endpoint *ep;
+	u8 mux_id, next_hdr;
 	u16 len, pad;
-	u8 mux_id;
 
 	if (RMNET_MAP_GET_CD_BIT(skb)) {
 		if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
@@ -70,6 +70,7 @@  __rmnet_map_ingress_handler(struct sk_buff *skb,
 	mux_id = RMNET_MAP_GET_MUX_ID(skb);
 	pad = RMNET_MAP_GET_PAD(skb);
 	len = RMNET_MAP_GET_LENGTH(skb) - pad;
+	next_hdr = RMNET_MAP_GET_NH_BIT(skb);
 
 	if (mux_id >= RMNET_MAX_LOGICAL_EP)
 		goto free_skb;
@@ -80,15 +81,19 @@  __rmnet_map_ingress_handler(struct sk_buff *skb,
 
 	skb->dev = ep->egress_dev;
 
-	/* Subtract MAP header */
-	skb_pull(skb, sizeof(struct rmnet_map_header));
-	rmnet_set_skb_proto(skb);
-
-	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
+	if (next_hdr &&
+	    (port->data_format & (RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) {
+		if (rmnet_map_process_next_hdr_packet(skb, len))
+			goto free_skb;
+	} else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
 		if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
 			skb->ip_summed = CHECKSUM_UNNECESSARY;
 	}
 
+	/* Subtract MAP header */
+	skb_pull(skb, sizeof(struct rmnet_map_header));
+	rmnet_set_skb_proto(skb);
+
 	skb_trim(skb, len);
 	rmnet_deliver_skb(skb);
 	return;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index 576501d..2ee1ce2 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -1,5 +1,5 @@ 
 /* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _RMNET_MAP_H_
@@ -23,6 +23,12 @@  struct rmnet_map_control_command {
 	};
 }  __aligned(1);
 
+enum rmnet_map_v5_header_type {
+	RMNET_MAP_HEADER_TYPE_UNKNOWN,
+	RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2,
+	RMNET_MAP_HEADER_TYPE_ENUM_LENGTH
+};
+
 enum rmnet_map_commands {
 	RMNET_MAP_COMMAND_NONE,
 	RMNET_MAP_COMMAND_FLOW_DISABLE,
@@ -44,6 +50,9 @@  enum rmnet_map_commands {
 #define RMNET_MAP_GET_LENGTH(Y) (ntohs(((struct rmnet_map_header *) \
 					(Y)->data)->pkt_len))
 
+#define RMNET_MAP_GET_NH_BIT(Y)  (((struct rmnet_map_header *) \
+				    (Y)->data)->next_hdr)
+
 #define RMNET_MAP_COMMAND_REQUEST     0
 #define RMNET_MAP_COMMAND_ACK         1
 #define RMNET_MAP_COMMAND_UNSUPPORTED 2
@@ -60,5 +69,8 @@  void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
 int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
 void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
 				      struct net_device *orig_dev);
+int rmnet_map_process_next_hdr_packet(struct sk_buff *skb, u16 len);
+u8 rmnet_map_get_next_hdr_type(struct sk_buff *skb);
+bool rmnet_map_get_csum_valid(struct sk_buff *skb);
 
 #endif /* _RMNET_MAP_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 21d3816..a3dc220 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -1,5 +1,5 @@ 
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
  *
  * RMNET Data MAP protocol
  */
@@ -311,6 +311,7 @@  struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
 struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
 				      struct rmnet_port *port)
 {
+	unsigned char *data = skb->data, *next_hdr = NULL;
 	struct rmnet_map_header *maph;
 	struct sk_buff *skbn;
 	u32 packet_len;
@@ -323,6 +324,12 @@  struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
 
 	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
 		packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
+	else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) {
+		if (!maph->cd_bit) {
+			packet_len += sizeof(struct rmnet_map_v5_csum_header);
+			next_hdr = data + sizeof(*maph);
+		}
+	}
 
 	if (((int)skb->len - (int)packet_len) < 0)
 		return NULL;
@@ -331,6 +338,11 @@  struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
 	if (ntohs(maph->pkt_len) == 0)
 		return NULL;
 
+	if (next_hdr &&
+	    ((struct rmnet_map_v5_csum_header *)next_hdr)->header_type !=
+	     RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
+		return NULL;
+
 	skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
 	if (!skbn)
 		return NULL;
@@ -428,3 +440,49 @@  void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
 
 	priv->stats.csum_sw++;
 }
+
+u8 rmnet_map_get_next_hdr_type(struct sk_buff *skb)
+{
+	unsigned char *data = skb->data;
+
+	data += sizeof(struct rmnet_map_header);
+	return ((struct rmnet_map_v5_csum_header *)data)->header_type;
+}
+
+bool rmnet_map_get_csum_valid(struct sk_buff *skb)
+{
+	unsigned char *data = skb->data;
+
+	data += sizeof(struct rmnet_map_header);
+	return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required;
+}
+
+/* Process a MAPv5 packet header */
+int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
+				      u16 len)
+{
+	struct rmnet_priv *priv = netdev_priv(skb->dev);
+	int rc = 0;
+
+	switch (rmnet_map_get_next_hdr_type(skb)) {
+	case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
+		if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
+			priv->stats.csum_sw++;
+		} else if (rmnet_map_get_csum_valid(skb)) {
+			priv->stats.csum_ok++;
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		} else {
+			priv->stats.csum_valid_unset++;
+		}
+
+		/* Pull csum v5 header */
+		skb_pull(skb, sizeof(struct rmnet_map_v5_csum_header));
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
diff --git a/include/linux/if_rmnet.h b/include/linux/if_rmnet.h
index 9661416..004773f 100644
--- a/include/linux/if_rmnet.h
+++ b/include/linux/if_rmnet.h
@@ -1,5 +1,5 @@ 
 /* SPDX-License-Identifier: GPL-2.0-only
- * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2019, 2021 The Linux Foundation. All rights reserved.
  */
 
 #ifndef _LINUX_IF_RMNET_H_
@@ -8,11 +8,11 @@ 
 struct rmnet_map_header {
 #if defined(__LITTLE_ENDIAN_BITFIELD)
 	u8  pad_len:6;
-	u8  reserved_bit:1;
+	u8  next_hdr:1;
 	u8  cd_bit:1;
 #elif defined (__BIG_ENDIAN_BITFIELD)
 	u8  cd_bit:1;
-	u8  reserved_bit:1;
+	u8  next_hdr:1;
 	u8  pad_len:6;
 #else
 #error	"Please fix <asm/byteorder.h>"
@@ -52,4 +52,22 @@  struct rmnet_map_ul_csum_header {
 #endif
 } __aligned(1);
 
+/* MAP CSUM headers */
+struct rmnet_map_v5_csum_header {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8  next_hdr:1;
+	u8  header_type:7;
+	u8  hw_reserved:7;
+	u8  csum_valid_required:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+	u8  header_type:7;
+	u8  next_hdr:1;
+	u8  csum_valid_required:1;
+	u8  hw_reserved:7;
+#else
+#error	"Please fix <asm/byteorder.h>"
+#endif
+	__be16 reserved;
+} __aligned(1);
+
 #endif /* !(_LINUX_IF_RMNET_H_) */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 82708c6..838bd29 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -1233,6 +1233,7 @@  enum {
 #define RMNET_FLAGS_INGRESS_MAP_COMMANDS          (1U << 1)
 #define RMNET_FLAGS_INGRESS_MAP_CKSUMV4           (1U << 2)
 #define RMNET_FLAGS_EGRESS_MAP_CKSUMV4            (1U << 3)
+#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5           (1U << 4)
 
 enum {
 	IFLA_RMNET_UNSPEC,