diff mbox series

[net,v3] net: ethernet: mtk_eth_soc: handle dma buffer size soc specific

Message ID 20240603192505.217881-1-linux@fw-web.de (mailing list archive)
State Accepted
Commit c57e558194430d10d5e5f4acd8a8655b68dade13
Delegated to: Netdev Maintainers
Headers show
Series [net,v3] net: ethernet: mtk_eth_soc: handle dma buffer size soc specific | expand

Checks

Context Check Description
netdev/series_format success Single patches do not need cover letters
netdev/tree_selection success Clearly marked for net
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag present in non-next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 911 this patch: 911
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 13 of 14 maintainers
netdev/build_clang success Errors and warnings before: 905 this patch: 905
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success Fixes tag looks correct
netdev/build_allmodconfig_warn success Errors and warnings before: 915 this patch: 915
netdev/checkpatch warning WARNING: line length of 81 exceeds 80 columns WARNING: line length of 84 exceeds 80 columns WARNING: line length of 89 exceeds 80 columns WARNING: line length of 90 exceeds 80 columns WARNING: line length of 96 exceeds 80 columns WARNING: line length of 98 exceeds 80 columns WARNING: line length of 99 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
netdev/contest success net-next-2024-06-04--18-00 (tests: 1045)

Commit Message

Frank Wunderlich June 3, 2024, 7:25 p.m. UTC
From: Frank Wunderlich <frank-w@public-files.de>

The mainline MTK ethernet driver suffers long time from rarly but
annoying tx queue timeouts. We think that this is caused by fixed
dma sizes hardcoded for all SoCs.

We suspect this problem arises from a low level of free TX DMADs,
the TX Ring alomost full.

The transmit timeout is caused by the Tx queue not waking up. The
Tx queue stops when the free counter is less than ring->thres, and
it will wake up once the free counter is greater than ring->thres.
If the CPU is too late to wake up the Tx queues, it may cause a
transmit timeout.
Therefore, we increased the TX and RX DMADs to improve this error
situation.

Use the dma-size implementation from SDK in a per SoC manner. In
difference to SDK we have no RSS feature yet, so all RX/TX sizes
should be raised from 512 to 2048 byte except fqdma on mt7988 to
avoid the tx timeout issue.

Fixes: 656e705243fd ("net-next: mediatek: add support for MT7623 ethernet")
Suggested-by: Daniel Golle <daniel@makrotopia.org>
Signed-off-by: Frank Wunderlich <frank-w@public-files.de>
---
based on SDK:

https://git01.mediatek.com/plugins/gitiles/openwrt/feeds/mtk-openwrt-feeds/+/fac194d6253d339e15c651c052b532a449a04d6e

v3:
- rephrase commit message with some information from mtk
- change the TX/RX DMA Size from 512 to 2048 for all platforms.
  When rss gets upstream TX Size should be increases to 4096 and
  the RX Size decreased to 1024 for MT798[1/6/8].
- drop fq_dma_size from rt5350/mt7628 as this does not have QDMA
v2:
- fix unused variable 'addr' in 32bit build
---
 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 104 +++++++++++++-------
 drivers/net/ethernet/mediatek/mtk_eth_soc.h |   9 +-
 2 files changed, 77 insertions(+), 36 deletions(-)

Comments

Jacob Keller June 4, 2024, 10:25 p.m. UTC | #1
On 6/3/2024 12:25 PM, Frank Wunderlich wrote:
> @@ -1142,40 +1142,46 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
>  						       cnt * soc->tx.desc_size,
>  						       &eth->phy_scratch_ring,
>  						       GFP_KERNEL);
> +
>  	if (unlikely(!eth->scratch_ring))
>  		return -ENOMEM;
>  
> -	eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
> -	if (unlikely(!eth->scratch_head))
> -		return -ENOMEM;
> +	phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
>  
> -	dma_addr = dma_map_single(eth->dma_dev,
> -				  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
> -				  DMA_FROM_DEVICE);
> -	if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
> -		return -ENOMEM;
> +	for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
> +		len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
> +		eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
>  
> -	phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
> +		if (unlikely(!eth->scratch_head[j]))
> +			return -ENOMEM;
>  
> -	for (i = 0; i < cnt; i++) {
> -		dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
> -		struct mtk_tx_dma_v2 *txd;
> +		dma_addr = dma_map_single(eth->dma_dev,
> +					  eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
> +					  DMA_FROM_DEVICE);
>  
> -		txd = eth->scratch_ring + i * soc->tx.desc_size;
> -		txd->txd1 = addr;
> -		if (i < cnt - 1)
> -			txd->txd2 = eth->phy_scratch_ring +
> -				    (i + 1) * soc->tx.desc_size;
> +		if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
> +			return -ENOMEM;
>  
> -		txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
> -		if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
> -			txd->txd3 |= TX_DMA_PREP_ADDR64(addr);
> -		txd->txd4 = 0;
> -		if (mtk_is_netsys_v2_or_greater(eth)) {
> -			txd->txd5 = 0;
> -			txd->txd6 = 0;
> -			txd->txd7 = 0;
> -			txd->txd8 = 0;
> +		for (i = 0; i < cnt; i++) {
> +			struct mtk_tx_dma_v2 *txd;
> +
> +			txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
> +			txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
> +			if (j * MTK_FQ_DMA_LENGTH + i < cnt)
> +				txd->txd2 = eth->phy_scratch_ring +
> +					    (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
> +
> +			txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
> +			if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
> +				txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
> +
> +			txd->txd4 = 0;
> +			if (mtk_is_netsys_v2_or_greater(eth)) {
> +				txd->txd5 = 0;
> +				txd->txd6 = 0;
> +				txd->txd7 = 0;
> +				txd->txd8 = 0;
> +			}

This block of change was a bit hard to understand what was going on, but
I think I get the result is that you end up allocating different set of
scratch_head per size vs the original only having one scratch_head per
device?

Perhaps you can explain, but we're now allocating a bunch of different
scratch_head pointers.. However, in the patch, the only places that we
modify scratch_head appear to be the allocation path and the free path..
but I can't seem to understand how that would impact the users of
scratch head? I guess it changes the dma_addr which then changes the txd
values we program?

Ok.

I sort of understand whats going on here, but it was a fair bit to fully
grok this flow.

Overall, I'm no expert on the part or DMA here, but:

Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
patchwork-bot+netdevbpf@kernel.org June 5, 2024, 1:10 p.m. UTC | #2
Hello:

This patch was applied to netdev/net.git (main)
by David S. Miller <davem@davemloft.net>:

On Mon,  3 Jun 2024 21:25:05 +0200 you wrote:
> From: Frank Wunderlich <frank-w@public-files.de>
> 
> The mainline MTK ethernet driver suffers long time from rarly but
> annoying tx queue timeouts. We think that this is caused by fixed
> dma sizes hardcoded for all SoCs.
> 
> We suspect this problem arises from a low level of free TX DMADs,
> the TX Ring alomost full.
> 
> [...]

Here is the summary with links:
  - [net,v3] net: ethernet: mtk_eth_soc: handle dma buffer size soc specific
    https://git.kernel.org/netdev/net/c/c57e55819443

You are awesome, thank you!
Bc-bocun Chen (陳柏村) June 6, 2024, 2:43 a.m. UTC | #3
On Tue, 2024-06-04 at 15:25 -0700, Jacob Keller wrote:
>  	 
> External email : Please do not click links or open attachments until
> you have verified the sender or the content.
>  
> 
> On 6/3/2024 12:25 PM, Frank Wunderlich wrote:
> > @@ -1142,40 +1142,46 @@ static int mtk_init_fq_dma(struct mtk_eth
> *eth)
> >         cnt * soc->tx.desc_size,
> >         &eth->phy_scratch_ring,
> >         GFP_KERNEL);
> > +
> >  if (unlikely(!eth->scratch_ring))
> >  return -ENOMEM;
> >  
> > -eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
> > -if (unlikely(!eth->scratch_head))
> > -return -ENOMEM;
> > +phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt -
> 1);
> >  
> > -dma_addr = dma_map_single(eth->dma_dev,
> > -  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
> > -  DMA_FROM_DEVICE);
> > -if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
> > -return -ENOMEM;
> > +for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size,
> MTK_FQ_DMA_LENGTH); j++) {
> > +len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
> > +eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE,
> GFP_KERNEL);
> >  
> > -phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt -
> 1);
> > +if (unlikely(!eth->scratch_head[j]))
> > +return -ENOMEM;
> >  
> > -for (i = 0; i < cnt; i++) {
> > -dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
> > -struct mtk_tx_dma_v2 *txd;
> > +dma_addr = dma_map_single(eth->dma_dev,
> > +  eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
> > +  DMA_FROM_DEVICE);
> >  
> > -txd = eth->scratch_ring + i * soc->tx.desc_size;
> > -txd->txd1 = addr;
> > -if (i < cnt - 1)
> > -txd->txd2 = eth->phy_scratch_ring +
> > -    (i + 1) * soc->tx.desc_size;
> > +if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
> > +return -ENOMEM;
> >  
> > -txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
> > -if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
> > -txd->txd3 |= TX_DMA_PREP_ADDR64(addr);
> > -txd->txd4 = 0;
> > -if (mtk_is_netsys_v2_or_greater(eth)) {
> > -txd->txd5 = 0;
> > -txd->txd6 = 0;
> > -txd->txd7 = 0;
> > -txd->txd8 = 0;
> > +for (i = 0; i < cnt; i++) {
> > +struct mtk_tx_dma_v2 *txd;
> > +
> > +txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc-
> >tx.desc_size;
> > +txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
> > +if (j * MTK_FQ_DMA_LENGTH + i < cnt)
> > +txd->txd2 = eth->phy_scratch_ring +
> > +    (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
> > +
> > +txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
> > +if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
> > +txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i *
> MTK_QDMA_PAGE_SIZE);
> > +
> > +txd->txd4 = 0;
> > +if (mtk_is_netsys_v2_or_greater(eth)) {
> > +txd->txd5 = 0;
> > +txd->txd6 = 0;
> > +txd->txd7 = 0;
> > +txd->txd8 = 0;
> > +}
> 
> This block of change was a bit hard to understand what was going on,
> but
> I think I get the result is that you end up allocating different set
> of
> scratch_head per size vs the original only having one scratch_head
> per
> device?
> 
> Perhaps you can explain, but we're now allocating a bunch of
> different
> scratch_head pointers.. However, in the patch, the only places that
> we
> modify scratch_head appear to be the allocation path and the free
> path..
> but I can't seem to understand how that would impact the users of
> scratch head? I guess it changes the dma_addr which then changes the
> txd
> values we program?

In our hardware design, we need to allocate a large number of fq_dma
buffers for buffering in the hardware-accelerated path. Each fq_dma
buffer requires 2048 bytes of memory from the kernel. However, the
driver can only request up to 4 MB of contiguous memory at a time if we
do not want to request a large contiguous memory from the CMA
allocator. Therefore, in the previous driver code, we could only
allocate 2048 fq_dma buffers (2048 * 2048 bytes = 4 MB).
 
With the MT7988, the Ethernet bandwidth has increased to 2*10 Gbps,
which means we need to allocate more fq_dma buffers (increased to 4096)
to handle the buffering. Consequently, we need to modify the driver
code to allocate multiple contiguous memory and assign them into the
fq_dma ring.

> Ok.
> 
> I sort of understand whats going on here, but it was a fair bit to
> fully
> grok this flow.
> 
> Overall, I'm no expert on the part or DMA here, but:
> 
> Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index cae46290a7ae..c84ce54a84a0 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1131,9 +1131,9 @@  static int mtk_init_fq_dma(struct mtk_eth *eth)
 {
 	const struct mtk_soc_data *soc = eth->soc;
 	dma_addr_t phy_ring_tail;
-	int cnt = MTK_QDMA_RING_SIZE;
+	int cnt = soc->tx.fq_dma_size;
 	dma_addr_t dma_addr;
-	int i;
+	int i, j, len;
 
 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
 		eth->scratch_ring = eth->sram_base;
@@ -1142,40 +1142,46 @@  static int mtk_init_fq_dma(struct mtk_eth *eth)
 						       cnt * soc->tx.desc_size,
 						       &eth->phy_scratch_ring,
 						       GFP_KERNEL);
+
 	if (unlikely(!eth->scratch_ring))
 		return -ENOMEM;
 
-	eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
-	if (unlikely(!eth->scratch_head))
-		return -ENOMEM;
+	phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
 
-	dma_addr = dma_map_single(eth->dma_dev,
-				  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
-				  DMA_FROM_DEVICE);
-	if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
-		return -ENOMEM;
+	for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
+		len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
+		eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
 
-	phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
+		if (unlikely(!eth->scratch_head[j]))
+			return -ENOMEM;
 
-	for (i = 0; i < cnt; i++) {
-		dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
-		struct mtk_tx_dma_v2 *txd;
+		dma_addr = dma_map_single(eth->dma_dev,
+					  eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
+					  DMA_FROM_DEVICE);
 
-		txd = eth->scratch_ring + i * soc->tx.desc_size;
-		txd->txd1 = addr;
-		if (i < cnt - 1)
-			txd->txd2 = eth->phy_scratch_ring +
-				    (i + 1) * soc->tx.desc_size;
+		if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+			return -ENOMEM;
 
-		txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
-		if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
-			txd->txd3 |= TX_DMA_PREP_ADDR64(addr);
-		txd->txd4 = 0;
-		if (mtk_is_netsys_v2_or_greater(eth)) {
-			txd->txd5 = 0;
-			txd->txd6 = 0;
-			txd->txd7 = 0;
-			txd->txd8 = 0;
+		for (i = 0; i < cnt; i++) {
+			struct mtk_tx_dma_v2 *txd;
+
+			txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
+			txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
+			if (j * MTK_FQ_DMA_LENGTH + i < cnt)
+				txd->txd2 = eth->phy_scratch_ring +
+					    (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
+
+			txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+			if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
+				txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
+
+			txd->txd4 = 0;
+			if (mtk_is_netsys_v2_or_greater(eth)) {
+				txd->txd5 = 0;
+				txd->txd6 = 0;
+				txd->txd7 = 0;
+				txd->txd8 = 0;
+			}
 		}
 	}
 
@@ -2457,7 +2463,7 @@  static int mtk_tx_alloc(struct mtk_eth *eth)
 	if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
 		ring_size = MTK_QDMA_RING_SIZE;
 	else
-		ring_size = MTK_DMA_SIZE;
+		ring_size = soc->tx.dma_size;
 
 	ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
 			       GFP_KERNEL);
@@ -2465,8 +2471,8 @@  static int mtk_tx_alloc(struct mtk_eth *eth)
 		goto no_tx_mem;
 
 	if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
-		ring->dma = eth->sram_base + ring_size * sz;
-		ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz;
+		ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz;
+		ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz;
 	} else {
 		ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
 					       &ring->phys, GFP_KERNEL);
@@ -2588,6 +2594,7 @@  static void mtk_tx_clean(struct mtk_eth *eth)
 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
 {
 	const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+	const struct mtk_soc_data *soc = eth->soc;
 	struct mtk_rx_ring *ring;
 	int rx_data_len, rx_dma_size, tx_ring_size;
 	int i;
@@ -2595,7 +2602,7 @@  static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
 	if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
 		tx_ring_size = MTK_QDMA_RING_SIZE;
 	else
-		tx_ring_size = MTK_DMA_SIZE;
+		tx_ring_size = soc->tx.dma_size;
 
 	if (rx_flag == MTK_RX_FLAGS_QDMA) {
 		if (ring_no)
@@ -2610,7 +2617,7 @@  static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
 		rx_dma_size = MTK_HW_LRO_DMA_SIZE;
 	} else {
 		rx_data_len = ETH_DATA_LEN;
-		rx_dma_size = MTK_DMA_SIZE;
+		rx_dma_size = soc->rx.dma_size;
 	}
 
 	ring->frag_size = mtk_max_frag_size(rx_data_len);
@@ -3139,7 +3146,10 @@  static void mtk_dma_free(struct mtk_eth *eth)
 			mtk_rx_clean(eth, &eth->rx_ring[i], false);
 	}
 
-	kfree(eth->scratch_head);
+	for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
+		kfree(eth->scratch_head[i]);
+		eth->scratch_head[i] = NULL;
+	}
 }
 
 static bool mtk_hw_reset_check(struct mtk_eth *eth)
@@ -5052,11 +5062,14 @@  static const struct mtk_soc_data mt2701_data = {
 		.desc_size = sizeof(struct mtk_tx_dma),
 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
 		.dma_len_offset = 16,
+		.dma_size = MTK_DMA_SIZE(2K),
+		.fq_dma_size = MTK_DMA_SIZE(2K),
 	},
 	.rx = {
 		.desc_size = sizeof(struct mtk_rx_dma),
 		.irq_done_mask = MTK_RX_DONE_INT,
 		.dma_l4_valid = RX_DMA_L4_VALID,
+		.dma_size = MTK_DMA_SIZE(2K),
 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
 		.dma_len_offset = 16,
 	},
@@ -5076,11 +5089,14 @@  static const struct mtk_soc_data mt7621_data = {
 		.desc_size = sizeof(struct mtk_tx_dma),
 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
 		.dma_len_offset = 16,
+		.dma_size = MTK_DMA_SIZE(2K),
+		.fq_dma_size = MTK_DMA_SIZE(2K),
 	},
 	.rx = {
 		.desc_size = sizeof(struct mtk_rx_dma),
 		.irq_done_mask = MTK_RX_DONE_INT,
 		.dma_l4_valid = RX_DMA_L4_VALID,
+		.dma_size = MTK_DMA_SIZE(2K),
 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
 		.dma_len_offset = 16,
 	},
@@ -5102,11 +5118,14 @@  static const struct mtk_soc_data mt7622_data = {
 		.desc_size = sizeof(struct mtk_tx_dma),
 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
 		.dma_len_offset = 16,
+		.dma_size = MTK_DMA_SIZE(2K),
+		.fq_dma_size = MTK_DMA_SIZE(2K),
 	},
 	.rx = {
 		.desc_size = sizeof(struct mtk_rx_dma),
 		.irq_done_mask = MTK_RX_DONE_INT,
 		.dma_l4_valid = RX_DMA_L4_VALID,
+		.dma_size = MTK_DMA_SIZE(2K),
 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
 		.dma_len_offset = 16,
 	},
@@ -5127,11 +5146,14 @@  static const struct mtk_soc_data mt7623_data = {
 		.desc_size = sizeof(struct mtk_tx_dma),
 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
 		.dma_len_offset = 16,
+		.dma_size = MTK_DMA_SIZE(2K),
+		.fq_dma_size = MTK_DMA_SIZE(2K),
 	},
 	.rx = {
 		.desc_size = sizeof(struct mtk_rx_dma),
 		.irq_done_mask = MTK_RX_DONE_INT,
 		.dma_l4_valid = RX_DMA_L4_VALID,
+		.dma_size = MTK_DMA_SIZE(2K),
 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
 		.dma_len_offset = 16,
 	},
@@ -5150,11 +5172,14 @@  static const struct mtk_soc_data mt7629_data = {
 		.desc_size = sizeof(struct mtk_tx_dma),
 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
 		.dma_len_offset = 16,
+		.dma_size = MTK_DMA_SIZE(2K),
+		.fq_dma_size = MTK_DMA_SIZE(2K),
 	},
 	.rx = {
 		.desc_size = sizeof(struct mtk_rx_dma),
 		.irq_done_mask = MTK_RX_DONE_INT,
 		.dma_l4_valid = RX_DMA_L4_VALID,
+		.dma_size = MTK_DMA_SIZE(2K),
 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
 		.dma_len_offset = 16,
 	},
@@ -5176,6 +5201,8 @@  static const struct mtk_soc_data mt7981_data = {
 		.desc_size = sizeof(struct mtk_tx_dma_v2),
 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
 		.dma_len_offset = 8,
+		.dma_size = MTK_DMA_SIZE(2K),
+		.fq_dma_size = MTK_DMA_SIZE(2K),
 	},
 	.rx = {
 		.desc_size = sizeof(struct mtk_rx_dma),
@@ -5183,6 +5210,7 @@  static const struct mtk_soc_data mt7981_data = {
 		.dma_l4_valid = RX_DMA_L4_VALID_V2,
 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
 		.dma_len_offset = 16,
+		.dma_size = MTK_DMA_SIZE(2K),
 	},
 };
 
@@ -5202,6 +5230,8 @@  static const struct mtk_soc_data mt7986_data = {
 		.desc_size = sizeof(struct mtk_tx_dma_v2),
 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
 		.dma_len_offset = 8,
+		.dma_size = MTK_DMA_SIZE(2K),
+		.fq_dma_size = MTK_DMA_SIZE(2K),
 	},
 	.rx = {
 		.desc_size = sizeof(struct mtk_rx_dma),
@@ -5209,6 +5239,7 @@  static const struct mtk_soc_data mt7986_data = {
 		.dma_l4_valid = RX_DMA_L4_VALID_V2,
 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
 		.dma_len_offset = 16,
+		.dma_size = MTK_DMA_SIZE(2K),
 	},
 };
 
@@ -5228,6 +5259,8 @@  static const struct mtk_soc_data mt7988_data = {
 		.desc_size = sizeof(struct mtk_tx_dma_v2),
 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
 		.dma_len_offset = 8,
+		.dma_size = MTK_DMA_SIZE(2K),
+		.fq_dma_size = MTK_DMA_SIZE(4K),
 	},
 	.rx = {
 		.desc_size = sizeof(struct mtk_rx_dma_v2),
@@ -5235,6 +5268,7 @@  static const struct mtk_soc_data mt7988_data = {
 		.dma_l4_valid = RX_DMA_L4_VALID_V2,
 		.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
 		.dma_len_offset = 8,
+		.dma_size = MTK_DMA_SIZE(2K),
 	},
 };
 
@@ -5249,6 +5283,7 @@  static const struct mtk_soc_data rt5350_data = {
 		.desc_size = sizeof(struct mtk_tx_dma),
 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
 		.dma_len_offset = 16,
+		.dma_size = MTK_DMA_SIZE(2K),
 	},
 	.rx = {
 		.desc_size = sizeof(struct mtk_rx_dma),
@@ -5256,6 +5291,7 @@  static const struct mtk_soc_data rt5350_data = {
 		.dma_l4_valid = RX_DMA_L4_VALID_PDMA,
 		.dma_max_len = MTK_TX_DMA_BUF_LEN,
 		.dma_len_offset = 16,
+		.dma_size = MTK_DMA_SIZE(2K),
 	},
 };
 
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 4eab30b44070..f5174f6cb1bb 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -32,7 +32,9 @@ 
 #define MTK_TX_DMA_BUF_LEN	0x3fff
 #define MTK_TX_DMA_BUF_LEN_V2	0xffff
 #define MTK_QDMA_RING_SIZE	2048
-#define MTK_DMA_SIZE		512
+#define MTK_DMA_SIZE(x)		(SZ_##x)
+#define MTK_FQ_DMA_HEAD		32
+#define MTK_FQ_DMA_LENGTH	2048
 #define MTK_RX_ETH_HLEN		(ETH_HLEN + ETH_FCS_LEN)
 #define MTK_RX_HLEN		(NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
 #define MTK_DMA_DUMMY_DESC	0xffffffff
@@ -1176,6 +1178,8 @@  struct mtk_soc_data {
 		u32	desc_size;
 		u32	dma_max_len;
 		u32	dma_len_offset;
+		u32	dma_size;
+		u32	fq_dma_size;
 	} tx;
 	struct {
 		u32	desc_size;
@@ -1183,6 +1187,7 @@  struct mtk_soc_data {
 		u32	dma_l4_valid;
 		u32	dma_max_len;
 		u32	dma_len_offset;
+		u32	dma_size;
 	} rx;
 };
 
@@ -1264,7 +1269,7 @@  struct mtk_eth {
 	struct napi_struct		rx_napi;
 	void				*scratch_ring;
 	dma_addr_t			phy_scratch_ring;
-	void				*scratch_head;
+	void				*scratch_head[MTK_FQ_DMA_HEAD];
 	struct clk			*clks[MTK_CLK_MAX];
 
 	struct mii_bus			*mii_bus;