@@ -84,6 +84,7 @@ struct k3_udma_glue_rx_channel {
struct k3_udma_glue_rx_flow *flows;
u32 flow_num;
u32 flows_ready;
+ bool single_fdq; /* one FDQ for all flows */
};
static void k3_udma_chan_dev_release(struct device *dev)
@@ -970,10 +971,13 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
ep_cfg = rx_chn->common.ep_config;
- if (xudma_is_pktdma(rx_chn->common.udmax))
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id;
- else
+ rx_chn->single_fdq = false;
+ } else {
rx_chn->udma_rchan_id = -1;
+ rx_chn->single_fdq = true;
+ }
/* request and cfg UDMAP RX channel */
rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax,
@@ -1103,6 +1107,9 @@ k3_udma_glue_request_remote_rx_chn_common(struct k3_udma_glue_rx_channel *rx_chn
rx_chn->common.chan_dev.dma_coherent = true;
dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
DMA_BIT_MASK(48));
+ rx_chn->single_fdq = false;
+ } else {
+ rx_chn->single_fdq = true;
}
ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
@@ -1453,7 +1460,7 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num, void *data,
- void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
+ void (*cleanup)(void *data, dma_addr_t desc_dma))
{
struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
struct device *dev = rx_chn->common.dev;
@@ -1465,7 +1472,7 @@ void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
/* Skip RX FDQ in case one FDQ is used for the set of flows */
- if (skip_fdq)
+ if (rx_chn->single_fdq && flow_num)
goto do_reset;
/*
@@ -859,7 +859,7 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
fail_rx:
for (i = 0; i < common->rx_ch_num_flows; i++)
k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
- am65_cpsw_nuss_rx_cleanup, !!i);
+ am65_cpsw_nuss_rx_cleanup);
am65_cpsw_destroy_xdp_rxqs(common);
@@ -915,7 +915,7 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
napi_disable(&rx_chn->flows[i].napi_rx);
hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer);
k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
- am65_cpsw_nuss_rx_cleanup, !!i);
+ am65_cpsw_nuss_rx_cleanup);
}
k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
@@ -3364,7 +3364,7 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
for (i = 0; i < common->rx_ch_num_flows; i++)
k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i,
rx_chan,
- am65_cpsw_nuss_rx_cleanup, !!i);
+ am65_cpsw_nuss_rx_cleanup);
k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
@@ -955,7 +955,7 @@ void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
for (i = 0; i < num_flows; i++)
k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn,
- prueth_rx_cleanup, !!i);
+ prueth_rx_cleanup);
if (disable)
k3_udma_glue_disable_rx_chn(chn->rx_chn);
}
@@ -138,8 +138,7 @@ int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num);
void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num, void *data,
- void (*cleanup)(void *data, dma_addr_t desc_dma),
- bool skip_fdq);
+ void (*cleanup)(void *data, dma_addr_t desc_dma));
int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_idx);
int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,