diff mbox series

[v3] net: stmmac: allocate separate page for buffer

Message ID 20241015121009.3903121-1-quic_jsuraj@quicinc.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series [v3] net: stmmac: allocate separate page for buffer | expand

Checks

Context Check Description
netdev/series_format warning Single patches do not need cover letters; Target tree name not specified in the subject
netdev/tree_selection success Guessed tree name to be net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 5 this patch: 5
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 9 of 9 maintainers
netdev/build_clang success Errors and warnings before: 3 this patch: 3
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 4 this patch: 4
netdev/checkpatch warning WARNING: line length of 81 exceeds 80 columns WARNING: line length of 90 exceeds 80 columns WARNING: line length of 94 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc fail Errors and warnings before: 38 this patch: 39
netdev/source_inline success Was 0 now: 0

Commit Message

Suraj Jaiswal Oct. 15, 2024, 12:10 p.m. UTC
Currently for TSO page is mapped with dma_map_single()
and then resulting dma address is referenced (and offset)
by multiple descriptors until the whole region is
programmed into the descriptors.
This makes it possible for stmmac_tx_clean() to dma_unmap()
the first of the already processed descriptors, while the
rest are still being processed by the DMA engine. This leads
to an iommu fault due to the DMA engine using unmapped memory
as seen below:

arm-smmu 15000000.iommu: Unhandled context fault: fsr=0x402,
iova=0xfc401000, fsynr=0x60003, cbfrsynra=0x121, cb=38

Descriptor content:
     TDES0       TDES1   TDES2   TDES3
317: 0xfc400800  0x0     0x36    0xa02c0b68
318: 0xfc400836  0x0     0xb68   0x90000000

As we can see above descriptor 317 holding a page address
and 318 holding the buffer address by adding offset to page
address. Now if 317 descritor is cleaned as part of tx_clean()
then we will get SMMU fault if 318 descriptor is getting accessed.

To fix this, let's map each descriptor's memory reference individually.
This way there's no risk of unmapping a region that's still being
referenced by the DMA engine in a later descriptor.

Signed-off-by: Suraj Jaiswal <quic_jsuraj@quicinc.com>
Signed-off-by: Sarosh Hasan <quic_sarohasa@quicinc.com>
---

Changes since v3:
- Update stmmac_tso_allocator based on DMA mask.
- Update return statement in documentation.
- removed duplicate code.
- fixed Reverse xmas tree order issue.

Changes since v2:
- Update commit text with more details.
- fixed Reverse xmas tree order issue.


Changes since v1:
- Fixed function description 
- Fixed handling of return value.
 .../net/ethernet/stmicro/stmmac/stmmac_main.c | 68 +++++++++++++------
 1 file changed, 47 insertions(+), 21 deletions(-)

Comments

Furong Xu Oct. 17, 2024, 11:42 a.m. UTC | #1
Hi Suraj,

Thanks for this fix.

I tested your patch on XGMAC 3.20a, all goes well, except a performance
drop of ~10%
Like Jakub Kicinski said in V2, this involves more dma_map() and does add
overhead :-/

I might have a better fix for this, I will send to review and CC it to you.

Thanks.
Andrew Lunn Oct. 18, 2024, 2:32 a.m. UTC | #2
On Thu, Oct 17, 2024 at 07:42:58PM +0800, Furong Xu wrote:
> Hi Suraj,
> 
> Thanks for this fix.
> 
> I tested your patch on XGMAC 3.20a, all goes well, except a performance
> drop of ~10%
> Like Jakub Kicinski said in V2, this involves more dma_map() and does add
> overhead :-/
> 
> I might have a better fix for this, I will send to review and CC it to you.

Lets mark this as changed-requested, due to the performance drop

It also introduces one more kdoc warning.


    Andrew

---
pw-bot: cr
diff mbox series

Patch

diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 83b654b7a9fd..e81461ac3424 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4136,18 +4136,23 @@  static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
 /**
  *  stmmac_tso_allocator - close entry point of the driver
  *  @priv: driver private structure
- *  @des: buffer start address
+ *  @addr: Contains either skb frag address or skb->data address
  *  @total_len: total length to fill in descriptors
  *  @last_segment: condition for the last descriptor
  *  @queue: TX queue index
+ * @is_skb_frag: condition to check whether skb data is part of fragment or not
  *  Description:
  *  This function fills descriptor and request new descriptors according to
  *  buffer length to fill
+ *  Return value:
+ *  0 on success else -ERRNO on fail
  */
-static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
-				 int total_len, bool last_segment, u32 queue)
+static int stmmac_tso_allocator(struct stmmac_priv *priv, void *addr,
+				int total_len, bool last_segment, u32 queue, bool is_skb_frag)
 {
 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+	unsigned char *data = addr;
+	unsigned int offset = 0;
 	struct dma_desc *desc;
 	u32 buff_size;
 	int tmp_len;
@@ -4161,20 +4166,42 @@  static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
 						priv->dma_conf.dma_tx_size);
 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
 
+		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
+					TSO_MAX_BUFF_SIZE : tmp_len;
+
 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
 		else
 			desc = &tx_q->dma_tx[tx_q->cur_tx];
 
-		curr_addr = des + (total_len - tmp_len);
+		offset = total_len - tmp_len;
+		if (!is_skb_frag) {
+			curr_addr = dma_map_single(priv->device, data + offset, buff_size,
+						   DMA_TO_DEVICE);
+
+			if (dma_mapping_error(priv->device, curr_addr))
+				return -ENOMEM;
+
+			tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
+		} else {
+			curr_addr = skb_frag_dma_map(priv->device, addr, offset,
+						     buff_size,
+						     DMA_TO_DEVICE);
+
+			if (dma_mapping_error(priv->device, curr_addr))
+				return -ENOMEM;
+
+			tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
+		}
+		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = curr_addr;
+		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = buff_size;
+		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
+
 		if (priv->dma_cap.addr64 <= 32)
 			desc->des0 = cpu_to_le32(curr_addr);
 		else
 			stmmac_set_desc_addr(priv, desc, curr_addr);
 
-		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
-			    TSO_MAX_BUFF_SIZE : tmp_len;
-
 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
 				0, 1,
 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
@@ -4182,6 +4209,7 @@  static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
 
 		tmp_len -= TSO_MAX_BUFF_SIZE;
 	}
+	return 0;
 }
 
 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
@@ -4351,25 +4379,23 @@  static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
 		pay_len = 0;
 	}
 
-	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
+	if (priv->dma_cap.addr64 <= 32) {
+		if (stmmac_tso_allocator(priv, skb->data,
+					 tmp_pay_len, nfrags == 0, queue, false))
+			goto dma_map_err;
+	} else {
+		if (stmmac_tso_allocator(priv, (skb->data + proto_hdr_len),
+					 tmp_pay_len, nfrags == 0, queue, false))
+			goto dma_map_err;
+	}
 
 	/* Prepare fragments */
 	for (i = 0; i < nfrags; i++) {
-		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
-		des = skb_frag_dma_map(priv->device, frag, 0,
-				       skb_frag_size(frag),
-				       DMA_TO_DEVICE);
-		if (dma_mapping_error(priv->device, des))
+		if (stmmac_tso_allocator(priv, frag, skb_frag_size(frag),
+					 (i == nfrags - 1), queue, true))
 			goto dma_map_err;
-
-		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
-				     (i == nfrags - 1), queue);
-
-		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
-		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
-		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
-		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
 	}
 
 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;