diff mbox series

[net] octeon_ep: Add SKB allocation failures handling in __octep_oq_process_rx()

Message ID 20240906063907.9591-1-amishin@t-argos.ru (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series [net] octeon_ep: Add SKB allocation failures handling in __octep_oq_process_rx() | expand

Checks

Context Check Description
netdev/series_format success Single patches do not need cover letters
netdev/tree_selection success Clearly marked for net
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag present in non-next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 7 this patch: 7
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 8 of 8 maintainers
netdev/build_clang success Errors and warnings before: 16 this patch: 16
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success Fixes tag looks correct
netdev/build_allmodconfig_warn success Errors and warnings before: 16 this patch: 16
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 72 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 3 this patch: 3
netdev/source_inline success Was 0 now: 0
netdev/contest success net-next-2024-09-07--06-00 (tests: 722)

Commit Message

Aleksandr Mishin Sept. 6, 2024, 6:39 a.m. UTC
build_skb() returns NULL in case of a memory allocation failure so handle
it inside __octep_oq_process_rx() to avoid NULL pointer dereference.

__octep_oq_process_rx() is called during NAPI polling by the driver. If
skb allocation fails, keep on pulling packets out of the Rx DMA queue: we
shouldn't break the polling immediately and thus falsely indicate to the
octep_napi_poll() that the Rx pressure is going down. As there is no
associated skb in this case, don't process the packets and don't push them
up the network stack - they are skipped.

The common code with skb and index manipulations is extracted to make the
fix more readable and avoid code duplication. Also 'alloc_failures' counter
is incremented to mark the skb allocation error in driver statistics.

The suggested approach for handling buffer allocation failures in the NAPI
polling functions is also implemented in the Cavium Liquidio driver. It
has the same structure, namely in octeon_droq_fast_process_packets().

A similar situation is present in the __octep_vf_oq_process_rx() of the
Octeon VF driver. First we want to try the fix on __octep_oq_process_rx().

Compile tested only. Marvell folks, could you review and test this, please?

Found by Linux Verification Center (linuxtesting.org) with SVACE.

Fixes: 37d79d059606 ("octeon_ep: add Tx/Rx processing and interrupt support")
Signed-off-by: Aleksandr Mishin <amishin@t-argos.ru>
---
 .../net/ethernet/marvell/octeon_ep/octep_rx.c | 44 ++++++++++---------
 1 file changed, 24 insertions(+), 20 deletions(-)

Comments

Paolo Abeni Sept. 10, 2024, 11:39 a.m. UTC | #1
On 9/6/24 08:39, Aleksandr Mishin wrote:
> diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
> index 4746a6b258f0..e92afd1a372a 100644
> --- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
> +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
> @@ -394,32 +394,32 @@ static int __octep_oq_process_rx(struct octep_device *oct,
>   			data_offset = OCTEP_OQ_RESP_HW_SIZE;
>   			rx_ol_flags = 0;
>   		}
> +
> +		skb = build_skb((void *)resp_hw, PAGE_SIZE);
> +		if (skb)
> +			skb_reserve(skb, data_offset);
> +		else
> +			oq->stats.alloc_failures++;
>   		rx_bytes += buff_info->len;

The packet is dropped, we should not increase 'rx_bytes'

> +		read_idx++;
> +		desc_used++;
> +		if (read_idx == oq->max_count)
> +			read_idx = 0;
>   
>   		if (buff_info->len <= oq->max_single_buffer_size) {
> -			skb = build_skb((void *)resp_hw, PAGE_SIZE);
> -			skb_reserve(skb, data_offset);
> -			skb_put(skb, buff_info->len);
> -			read_idx++;
> -			desc_used++;
> -			if (read_idx == oq->max_count)
> -				read_idx = 0;
> +			if (skb)
> +				skb_put(skb, buff_info->len);
>   		} else {
>   			struct skb_shared_info *shinfo;
>   			u16 data_len;
>   
> -			skb = build_skb((void *)resp_hw, PAGE_SIZE);
> -			skb_reserve(skb, data_offset);
>   			/* Head fragment includes response header(s);
>   			 * subsequent fragments contains only data.
>   			 */
> -			skb_put(skb, oq->max_single_buffer_size);
> -			read_idx++;
> -			desc_used++;
> -			if (read_idx == oq->max_count)
> -				read_idx = 0;
> -
> -			shinfo = skb_shinfo(skb);
> +			if (skb) {
> +				skb_put(skb, oq->max_single_buffer_size);
> +				shinfo = skb_shinfo(skb);
> +			}
>   			data_len = buff_info->len - oq->max_single_buffer_size;
>   			while (data_len) {
>   				dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
> @@ -434,10 +434,11 @@ static int __octep_oq_process_rx(struct octep_device *oct,
>   					data_len -= oq->buffer_size;
>   				}
>   
> -				skb_add_rx_frag(skb, shinfo->nr_frags,
> -						buff_info->page, 0,
> -						buff_info->len,
> -						buff_info->len);
> +				if (skb)
> +					skb_add_rx_frag(skb, shinfo->nr_frags,
> +							buff_info->page, 0,
> +							buff_info->len,
> +							buff_info->len);
>   				buff_info->page = NULL;
>   				read_idx++;
>   				desc_used++;
> @@ -446,6 +447,9 @@ static int __octep_oq_process_rx(struct octep_device *oct,
>   			}
>   		}
>   
> +		if (!skb)
> +			continue;

Instead of adding multiple checks for !skb, I think it would be better 
to implement an helper to unmmap/flush all the fragment buffers used by 
the dropped packet, call such helper at skb allocation failure and 
continue looping earlier/at that point.

Thanks,

Paolo
diff mbox series

Patch

diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
index 4746a6b258f0..e92afd1a372a 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -394,32 +394,32 @@  static int __octep_oq_process_rx(struct octep_device *oct,
 			data_offset = OCTEP_OQ_RESP_HW_SIZE;
 			rx_ol_flags = 0;
 		}
+
+		skb = build_skb((void *)resp_hw, PAGE_SIZE);
+		if (skb)
+			skb_reserve(skb, data_offset);
+		else
+			oq->stats.alloc_failures++;
 		rx_bytes += buff_info->len;
+		read_idx++;
+		desc_used++;
+		if (read_idx == oq->max_count)
+			read_idx = 0;
 
 		if (buff_info->len <= oq->max_single_buffer_size) {
-			skb = build_skb((void *)resp_hw, PAGE_SIZE);
-			skb_reserve(skb, data_offset);
-			skb_put(skb, buff_info->len);
-			read_idx++;
-			desc_used++;
-			if (read_idx == oq->max_count)
-				read_idx = 0;
+			if (skb)
+				skb_put(skb, buff_info->len);
 		} else {
 			struct skb_shared_info *shinfo;
 			u16 data_len;
 
-			skb = build_skb((void *)resp_hw, PAGE_SIZE);
-			skb_reserve(skb, data_offset);
 			/* Head fragment includes response header(s);
 			 * subsequent fragments contains only data.
 			 */
-			skb_put(skb, oq->max_single_buffer_size);
-			read_idx++;
-			desc_used++;
-			if (read_idx == oq->max_count)
-				read_idx = 0;
-
-			shinfo = skb_shinfo(skb);
+			if (skb) {
+				skb_put(skb, oq->max_single_buffer_size);
+				shinfo = skb_shinfo(skb);
+			}
 			data_len = buff_info->len - oq->max_single_buffer_size;
 			while (data_len) {
 				dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
@@ -434,10 +434,11 @@  static int __octep_oq_process_rx(struct octep_device *oct,
 					data_len -= oq->buffer_size;
 				}
 
-				skb_add_rx_frag(skb, shinfo->nr_frags,
-						buff_info->page, 0,
-						buff_info->len,
-						buff_info->len);
+				if (skb)
+					skb_add_rx_frag(skb, shinfo->nr_frags,
+							buff_info->page, 0,
+							buff_info->len,
+							buff_info->len);
 				buff_info->page = NULL;
 				read_idx++;
 				desc_used++;
@@ -446,6 +447,9 @@  static int __octep_oq_process_rx(struct octep_device *oct,
 			}
 		}
 
+		if (!skb)
+			continue;
+
 		skb->dev = oq->netdev;
 		skb->protocol =  eth_type_trans(skb, skb->dev);
 		if (feat & NETIF_F_RXCSUM &&