Message ID | 1613079324-20166-4-git-send-email-sharathv@codeaurora.org (mailing list archive) |
---|---|
State | Changes Requested |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | net:qualcomm:rmnet:Enable Mapv5. | expand |
Context | Check | Description |
---|---|---|
netdev/cover_letter | success | Link |
netdev/fixes_present | success | Link |
netdev/patch_count | success | Link |
netdev/tree_selection | success | Guessed tree name to be net-next |
netdev/subject_prefix | success | Link |
netdev/cc_maintainers | warning | 1 maintainers not CCed: stranche@codeaurora.org |
netdev/source_inline | success | Was 0 now: 0 |
netdev/verify_signedoff | success | Link |
netdev/module_param | success | Was 0 now: 0 |
netdev/build_32bit | fail | Errors and warnings before: 6864 this patch: 7123 |
netdev/kdoc | success | Errors and warnings before: 0 this patch: 0 |
netdev/verify_fixes | success | Link |
netdev/checkpatch | warning | CHECK: Unbalanced braces around else statement WARNING: line length of 83 exceeds 80 columns |
netdev/build_allmodconfig_warn | fail | Errors and warnings before: 6191 this patch: 7520 |
netdev/header_inline | success | Link |
netdev/stable | success | Stable not CCed |
Hi Sharath, Thank you for the patch! Perhaps something to improve: [auto build test WARNING on ipvs/master] [also build test WARNING on linus/master sparc-next/master v5.11-rc7 next-20210211] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Sharath-Chandra-Vurukala/docs-networking-Add-documentation-for-MAP-v5/20210212-063547 base: https://git.kernel.org/pub/scm/linux/kernel/git/horms/ipvs.git master config: x86_64-randconfig-a012-20210209 (attached as .config) compiler: clang version 12.0.0 (https://github.com/llvm/llvm-project c9439ca36342fb6013187d0a69aef92736951476) reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # install x86_64 cross compiling tool for clang build # apt-get install binutils-x86-64-linux-gnu # https://github.com/0day-ci/linux/commit/7f0a1e35c1d1c17de5873aded88d5dadfedce2fb git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review Sharath-Chandra-Vurukala/docs-networking-Add-documentation-for-MAP-v5/20210212-063547 git checkout 7f0a1e35c1d1c17de5873aded88d5dadfedce2fb # save the attached .config to linux build tree COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=x86_64 If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@intel.com> All warnings (new ones prefixed by >>): >> drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c:266:6: warning: no previous prototype for function 'rmnet_map_v5_checksum_uplink_packet' [-Wmissing-prototypes] void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb, ^ drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c:266:1: note: declare 'static' if the function is not intended to be used outside of this translation unit void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb, ^ static >> drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c:459:6: warning: no previous prototype for function 'rmnet_map_v4_checksum_uplink_packet' [-Wmissing-prototypes] void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb, ^ drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c:459:1: note: declare 'static' if the function is not intended to be used outside of this translation unit void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb, ^ static 2 warnings generated. vim +/rmnet_map_v5_checksum_uplink_packet +266 drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c 265 > 266 void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb, 267 struct rmnet_port *port, 268 struct net_device *orig_dev) 269 { 270 struct rmnet_priv *priv = netdev_priv(orig_dev); 271 struct rmnet_map_v5_csum_header *ul_header; 272 273 if (!(port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5)) 274 return; 275 276 ul_header = (struct rmnet_map_v5_csum_header *) 277 skb_push(skb, sizeof(*ul_header)); 278 memset(ul_header, 0, sizeof(*ul_header)); 279 ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD; 280 281 if (skb->ip_summed == CHECKSUM_PARTIAL) { 282 void *iph = (char *)ul_header + sizeof(*ul_header); 283 __sum16 *check; 284 void *trans; 285 u8 proto; 286 287 if (skb->protocol == htons(ETH_P_IP)) { 288 u16 ip_len = ((struct iphdr *)iph)->ihl * 4; 289 290 proto = ((struct iphdr *)iph)->protocol; 291 trans = iph + ip_len; 292 } 293 #if IS_ENABLED(CONFIG_IPV6) 294 else if (skb->protocol == htons(ETH_P_IPV6)) { 295 u16 ip_len = sizeof(struct ipv6hdr); 296 297 proto = ((struct ipv6hdr *)iph)->nexthdr; 298 trans = iph + ip_len; 299 } 300 #endif /* CONFIG_IPV6 */ 301 else { 302 priv->stats.csum_err_invalid_ip_version++; 303 goto sw_csum; 304 } 305 306 check = rmnet_map_get_csum_field(proto, trans); 307 if (check) { 308 skb->ip_summed = CHECKSUM_NONE; 309 /* Ask for checksum offloading */ 310 ul_header->csum_valid_required = 1; 311 priv->stats.csum_hw++; 312 return; 313 } 314 } 315 316 sw_csum: 317 priv->stats.csum_sw++; 318 } 319 320 /* Adds MAP header to front of skb->data 321 * Padding is calculated and set appropriately in MAP header. Mux ID is 322 * initialized to 0. 323 */ 324 struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, 325 int hdrlen, 326 struct rmnet_port *port, 327 int pad) 328 { 329 struct rmnet_map_header *map_header; 330 u32 padding, map_datalen; 331 u8 *padbytes; 332 333 map_datalen = skb->len - hdrlen; 334 map_header = (struct rmnet_map_header *) 335 skb_push(skb, sizeof(struct rmnet_map_header)); 336 memset(map_header, 0, sizeof(struct rmnet_map_header)); 337 338 /* Set next_hdr bit for csum offload packets */ 339 if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) { 340 map_header->next_hdr = 1; 341 } 342 343 if (pad == RMNET_MAP_NO_PAD_BYTES) { 344 map_header->pkt_len = htons(map_datalen); 345 return map_header; 346 } 347 348 padding = ALIGN(map_datalen, 4) - map_datalen; 349 350 if (padding == 0) 351 goto done; 352 353 if (skb_tailroom(skb) < padding) 354 return NULL; 355 356 padbytes = (u8 *)skb_put(skb, padding); 357 memset(padbytes, 0, padding); 358 359 done: 360 map_header->pkt_len = htons(map_datalen + padding); 361 map_header->pad_len = padding & 0x3F; 362 363 return map_header; 364 } 365 366 /* Deaggregates a single packet 367 * A whole new buffer is allocated for each portion of an aggregated frame. 368 * Caller should keep calling deaggregate() on the source skb until 0 is 369 * returned, indicating that there are no more packets to deaggregate. Caller 370 * is responsible for freeing the original skb. 371 */ 372 struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, 373 struct rmnet_port *port) 374 { 375 unsigned char *data = skb->data, *next_hdr = NULL; 376 struct rmnet_map_header *maph; 377 struct sk_buff *skbn; 378 u32 packet_len; 379 380 if (skb->len == 0) 381 return NULL; 382 383 maph = (struct rmnet_map_header *)skb->data; 384 packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header); 385 386 if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) 387 packet_len += sizeof(struct rmnet_map_dl_csum_trailer); 388 else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) { 389 if (!maph->cd_bit) { 390 packet_len += sizeof(struct rmnet_map_v5_csum_header); 391 next_hdr = data + sizeof(*maph); 392 } 393 } 394 395 if (((int)skb->len - (int)packet_len) < 0) 396 return NULL; 397 398 /* Some hardware can send us empty frames. Catch them */ 399 if (ntohs(maph->pkt_len) == 0) 400 return NULL; 401 402 if (next_hdr && 403 ((struct rmnet_map_v5_csum_header *)next_hdr)->header_type != 404 RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD) 405 return NULL; 406 407 skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC); 408 if (!skbn) 409 return NULL; 410 411 skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM); 412 skb_put(skbn, packet_len); 413 memcpy(skbn->data, skb->data, packet_len); 414 skb_pull(skb, packet_len); 415 416 return skbn; 417 } 418 419 /* Validates packet checksums. Function takes a pointer to 420 * the beginning of a buffer which contains the IP payload + 421 * padding + checksum trailer. 422 * Only IPv4 and IPv6 are supported along with TCP & UDP. 423 * Fragmented or tunneled packets are not supported. 424 */ 425 int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len) 426 { 427 struct rmnet_priv *priv = netdev_priv(skb->dev); 428 struct rmnet_map_dl_csum_trailer *csum_trailer; 429 430 if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) { 431 priv->stats.csum_sw++; 432 return -EOPNOTSUPP; 433 } 434 435 csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len); 436 437 if (!csum_trailer->valid) { 438 priv->stats.csum_valid_unset++; 439 return -EINVAL; 440 } 441 442 if (skb->protocol == htons(ETH_P_IP)) { 443 return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv); 444 } else if (skb->protocol == htons(ETH_P_IPV6)) { 445 #if IS_ENABLED(CONFIG_IPV6) 446 return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv); 447 #else 448 priv->stats.csum_err_invalid_ip_version++; 449 return -EPROTONOSUPPORT; 450 #endif 451 } else { 452 priv->stats.csum_err_invalid_ip_version++; 453 return -EPROTONOSUPPORT; 454 } 455 456 return 0; 457 } 458 > 459 void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb, 460 struct net_device *orig_dev) 461 { 462 struct rmnet_priv *priv = netdev_priv(orig_dev); 463 struct rmnet_map_ul_csum_header *ul_header; 464 void *iphdr; 465 466 ul_header = (struct rmnet_map_ul_csum_header *) 467 skb_push(skb, sizeof(struct rmnet_map_ul_csum_header)); 468 469 if (unlikely(!(orig_dev->features & 470 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))) 471 goto sw_csum; 472 473 if (skb->ip_summed == CHECKSUM_PARTIAL) { 474 iphdr = (char *)ul_header + 475 sizeof(struct rmnet_map_ul_csum_header); 476 477 if (skb->protocol == htons(ETH_P_IP)) { 478 rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb); 479 priv->stats.csum_hw++; 480 return; 481 } else if (skb->protocol == htons(ETH_P_IPV6)) { 482 #if IS_ENABLED(CONFIG_IPV6) 483 rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb); 484 priv->stats.csum_hw++; 485 return; 486 #else 487 priv->stats.csum_err_invalid_ip_version++; 488 goto sw_csum; 489 #endif 490 } else { 491 priv->stats.csum_err_invalid_ip_version++; 492 } 493 } 494 495 sw_csum: 496 ul_header->csum_start_offset = 0; 497 ul_header->csum_insert_offset = 0; 498 ul_header->csum_enabled = 0; 499 ul_header->udp_ind = 0; 500 501 priv->stats.csum_sw++; 502 } 503 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index d4d61471..8e64ca9 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2013-2014, 2016-2018 The Linux Foundation. +/* Copyright (c) 2013-2014, 2016-2018, 2021 The Linux Foundation. * All rights reserved. * * RMNET Data configuration engine @@ -57,6 +57,7 @@ struct rmnet_priv_stats { u64 csum_fragmented_pkt; u64 csum_skipped; u64 csum_sw; + u64 csum_hw; }; struct rmnet_priv { diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index 70ad6a7..870be09 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -131,8 +131,9 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, struct rmnet_port *port, u8 mux_id, struct net_device *orig_dev) { - int required_headroom, additional_header_len; + int required_headroom, additional_header_len, csum_type; struct rmnet_map_header *map_header; + csum_type = 0; additional_header_len = 0; required_headroom = sizeof(struct rmnet_map_header); @@ -140,17 +141,25 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) { additional_header_len = sizeof(struct rmnet_map_ul_csum_header); required_headroom += additional_header_len; + csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4; + } else if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) { + additional_header_len = sizeof(struct rmnet_map_v5_csum_header); + csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV5; } + required_headroom += additional_header_len; + if (skb_headroom(skb) < required_headroom) { if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC)) return -ENOMEM; } - if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) - rmnet_map_checksum_uplink_packet(skb, orig_dev); + if (csum_type) { + rmnet_map_checksum_uplink_packet(skb, port, orig_dev, + csum_type); + } - map_header = rmnet_map_add_map_header(skb, additional_header_len, 0); + map_header = rmnet_map_add_map_header(skb, additional_header_len, port, 0); if (!map_header) return -ENOMEM; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h index 55d293c..9b2aef0 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h @@ -70,7 +70,9 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port); int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len); void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, - struct net_device *orig_dev); + struct rmnet_port *port, + struct net_device *orig_dev, + int csum_type); int rmnet_map_process_next_hdr_packet(struct sk_buff *skb, u16 len); static u8 rmnet_map_get_next_hdr_type(struct sk_buff *skb) diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c index 3d7e03f..600b9a2 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c @@ -263,12 +263,68 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr, } #endif +void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb, + struct rmnet_port *port, + struct net_device *orig_dev) +{ + struct rmnet_priv *priv = netdev_priv(orig_dev); + struct rmnet_map_v5_csum_header *ul_header; + + if (!(port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5)) + return; + + ul_header = (struct rmnet_map_v5_csum_header *) + skb_push(skb, sizeof(*ul_header)); + memset(ul_header, 0, sizeof(*ul_header)); + ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + void *iph = (char *)ul_header + sizeof(*ul_header); + __sum16 *check; + void *trans; + u8 proto; + + if (skb->protocol == htons(ETH_P_IP)) { + u16 ip_len = ((struct iphdr *)iph)->ihl * 4; + + proto = ((struct iphdr *)iph)->protocol; + trans = iph + ip_len; + } +#if IS_ENABLED(CONFIG_IPV6) + else if (skb->protocol == htons(ETH_P_IPV6)) { + u16 ip_len = sizeof(struct ipv6hdr); + + proto = ((struct ipv6hdr *)iph)->nexthdr; + trans = iph + ip_len; + } +#endif /* CONFIG_IPV6 */ + else { + priv->stats.csum_err_invalid_ip_version++; + goto sw_csum; + } + + check = rmnet_map_get_csum_field(proto, trans); + if (check) { + skb->ip_summed = CHECKSUM_NONE; + /* Ask for checksum offloading */ + ul_header->csum_valid_required = 1; + priv->stats.csum_hw++; + return; + } + } + +sw_csum: + priv->stats.csum_sw++; +} + /* Adds MAP header to front of skb->data * Padding is calculated and set appropriately in MAP header. Mux ID is * initialized to 0. */ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, - int hdrlen, int pad) + int hdrlen, + struct rmnet_port *port, + int pad) { struct rmnet_map_header *map_header; u32 padding, map_datalen; @@ -279,6 +335,11 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, skb_push(skb, sizeof(struct rmnet_map_header)); memset(map_header, 0, sizeof(struct rmnet_map_header)); + /* Set next_hdr bit for csum offload packets */ + if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) { + map_header->next_hdr = 1; + } + if (pad == RMNET_MAP_NO_PAD_BYTES) { map_header->pkt_len = htons(map_datalen); return map_header; @@ -395,11 +456,8 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len) return 0; } -/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP - * packets that are supported for UL checksum offload. - */ -void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, - struct net_device *orig_dev) +void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb, + struct net_device *orig_dev) { struct rmnet_priv *priv = netdev_priv(orig_dev); struct rmnet_map_ul_csum_header *ul_header; @@ -418,10 +476,12 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, if (skb->protocol == htons(ETH_P_IP)) { rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb); + priv->stats.csum_hw++; return; } else if (skb->protocol == htons(ETH_P_IPV6)) { #if IS_ENABLED(CONFIG_IPV6) rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb); + priv->stats.csum_hw++; return; #else priv->stats.csum_err_invalid_ip_version++; @@ -441,6 +501,26 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, priv->stats.csum_sw++; } +/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP + * packets that are supported for UL checksum offload. + */ +void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, + struct rmnet_port *port, + struct net_device *orig_dev, + int csum_type) +{ + switch (csum_type) { + case RMNET_FLAGS_EGRESS_MAP_CKSUMV4: + rmnet_map_v4_checksum_uplink_packet(skb, orig_dev); + break; + case RMNET_FLAGS_EGRESS_MAP_CKSUMV5: + rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev); + break; + default: + break; + } +} + /* Process a MAPv5 packet header */ int rmnet_map_process_next_hdr_packet(struct sk_buff *skb, u16 len) diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 838bd29..319865f 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -1234,6 +1234,7 @@ enum { #define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2) #define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3) #define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 4) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 5) enum { IFLA_RMNET_UNSPEC,
Adding Support for Mapv5 uplink packet. Based on the configuration Request HW for csum offload by setting the csum_valid_required of Mapv5 packet. Signed-off-by: Sharath Chandra Vurukala <sharathv@codeaurora.org> --- drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h | 3 +- .../net/ethernet/qualcomm/rmnet/rmnet_handlers.c | 17 +++- drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h | 4 +- .../net/ethernet/qualcomm/rmnet/rmnet_map_data.c | 92 ++++++++++++++++++++-- include/uapi/linux/if_link.h | 1 + 5 files changed, 105 insertions(+), 12 deletions(-)