Message ID | 1610292623-15564-12-git-send-email-stefanc@marvell.com (mailing list archive) |
---|---|
State | RFC |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | net: mvpp2: Add TX Flow Control support | expand |
Context | Check | Description |
---|---|---|
netdev/cover_letter | success | Link |
netdev/fixes_present | success | Link |
netdev/patch_count | fail | Series longer than 15 patches |
netdev/tree_selection | success | Clearly marked for net-next |
netdev/subject_prefix | success | Link |
netdev/cc_maintainers | success | CCed 5 of 5 maintainers |
netdev/source_inline | success | Was 0 now: 0 |
netdev/verify_signedoff | success | Link |
netdev/module_param | success | Was 0 now: 0 |
netdev/build_32bit | fail | Errors and warnings before: 7 this patch: 13 |
netdev/kdoc | success | Errors and warnings before: 0 this patch: 0 |
netdev/verify_fixes | success | Link |
netdev/checkpatch | warning | WARNING: 'conditon' may be misspelled - perhaps 'condition'? |
netdev/build_allmodconfig_warn | fail | Errors and warnings before: 7 this patch: 13 |
netdev/header_inline | success | Link |
netdev/stable | success | Stable not CCed |
On Sun, Jan 10, 2021 at 05:30:15PM +0200, stefanc@marvell.com wrote: > From: Stefan Chulski <stefanc@marvell.com> > > This patch did not change any functionality. > Added flow control RXQ and BM pool config callbacks that would be > used to configure RXQ and BM pool thresholds. > APIs also will disable/enable RXQ and pool Flow Control polling. > > In this stage BM pool and RXQ has same stop/start thresholds > defined in code. > Also there are common thresholds for all RXQs. > > Signed-off-by: Stefan Chulski <stefanc@marvell.com> > --- > drivers/net/ethernet/marvell/mvpp2/mvpp2.h | 51 +++++- > drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 169 ++++++++++++++++++++ > 2 files changed, 216 insertions(+), 4 deletions(-) > > diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h > index 4d58af6..0ba0598 100644 > --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h > +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h > @@ -763,10 +763,53 @@ > ((kb) * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) > > /* MSS Flow control */ > -#define MSS_SRAM_SIZE 0x800 > -#define FC_QUANTA 0xFFFF > -#define FC_CLK_DIVIDER 0x140 > -#define MSS_THRESHOLD_STOP 768 > +#define MSS_SRAM_SIZE 0x800 > +#define MSS_FC_COM_REG 0 > +#define FLOW_CONTROL_ENABLE_BIT BIT(0) > +#define FLOW_CONTROL_UPDATE_COMMAND_BIT BIT(31) > +#define FC_QUANTA 0xFFFF > +#define FC_CLK_DIVIDER 0x140 > + > +#define MSS_BUF_POOL_BASE 0x40 > +#define MSS_BUF_POOL_OFFS 4 > +#define MSS_BUF_POOL_REG(id) (MSS_BUF_POOL_BASE \ > + + (id) * MSS_BUF_POOL_OFFS) > + > +#define MSS_BUF_POOL_STOP_MASK 0xFFF > +#define MSS_BUF_POOL_START_MASK (0xFFF << MSS_BUF_POOL_START_OFFS) > +#define MSS_BUF_POOL_START_OFFS 12 > +#define MSS_BUF_POOL_PORTS_MASK (0xF << MSS_BUF_POOL_PORTS_OFFS) > +#define MSS_BUF_POOL_PORTS_OFFS 24 > +#define MSS_BUF_POOL_PORT_OFFS(id) (0x1 << \ > + ((id) + MSS_BUF_POOL_PORTS_OFFS)) > + > +#define MSS_RXQ_TRESH_BASE 0x200 > +#define MSS_RXQ_TRESH_OFFS 4 > +#define MSS_RXQ_TRESH_REG(q, fq) (MSS_RXQ_TRESH_BASE + (((q) + (fq)) \ > + * MSS_RXQ_TRESH_OFFS)) > + > +#define MSS_RXQ_TRESH_START_MASK 0xFFFF > +#define MSS_RXQ_TRESH_STOP_MASK (0xFFFF << MSS_RXQ_TRESH_STOP_OFFS) > +#define MSS_RXQ_TRESH_STOP_OFFS 16 > + > +#define MSS_RXQ_ASS_BASE 0x80 > +#define MSS_RXQ_ASS_OFFS 4 > +#define MSS_RXQ_ASS_PER_REG 4 > +#define MSS_RXQ_ASS_PER_OFFS 8 > +#define MSS_RXQ_ASS_PORTID_OFFS 0 > +#define MSS_RXQ_ASS_PORTID_MASK 0x3 > +#define MSS_RXQ_ASS_HOSTID_OFFS 2 > +#define MSS_RXQ_ASS_HOSTID_MASK 0x3F > + > +#define MSS_RXQ_ASS_Q_BASE(q, fq) ((((q) + (fq)) % MSS_RXQ_ASS_PER_REG) \ > + * MSS_RXQ_ASS_PER_OFFS) > +#define MSS_RXQ_ASS_PQ_BASE(q, fq) ((((q) + (fq)) / MSS_RXQ_ASS_PER_REG) \ > + * MSS_RXQ_ASS_OFFS) > +#define MSS_RXQ_ASS_REG(q, fq) (MSS_RXQ_ASS_BASE + MSS_RXQ_ASS_PQ_BASE(q, fq)) > + > +#define MSS_THRESHOLD_STOP 768 > +#define MSS_THRESHOLD_START 1024 > + > > /* RX buffer constants */ > #define MVPP2_SKB_SHINFO_SIZE \ > diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c > index bc4b8069..19648c4 100644 > --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c > +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c > @@ -744,6 +744,175 @@ static void *mvpp2_buf_alloc(struct mvpp2_port *port, > return data; > } > > +/* Routine calculate single queue shares address space */ > +static int mvpp22_calc_shared_addr_space(struct mvpp2_port *port) > +{ > + /* If number of CPU's greater than number of threads, return last > + * address space > + */ > + if (num_active_cpus() >= MVPP2_MAX_THREADS) > + return MVPP2_MAX_THREADS - 1; > + > + return num_active_cpus(); Firstly - this can be written as: return min(num_active_cpus(), MVPP2_MAX_THREADS - 1); Secondly - what if the number of active CPUs change, for example due to hotplug activity. What if we boot with maxcpus=1 and then bring the other CPUs online after networking has been started? The number of active CPUs is dynamically managed via the scheduler as CPUs are brought online or offline. > +/* Routine enable flow control for RXQs conditon */ > +void mvpp2_rxq_enable_fc(struct mvpp2_port *port) ... > +/* Routine disable flow control for RXQs conditon */ > +void mvpp2_rxq_disable_fc(struct mvpp2_port *port) Nothing seems to call these in this patch, so on its own, it's not obvious how these are being called, and therefore what remedy to suggest for num_active_cpus().
> > > > +/* Routine calculate single queue shares address space */ static int > > +mvpp22_calc_shared_addr_space(struct mvpp2_port *port) { > > + /* If number of CPU's greater than number of threads, return last > > + * address space > > + */ > > + if (num_active_cpus() >= MVPP2_MAX_THREADS) > > + return MVPP2_MAX_THREADS - 1; > > + > > + return num_active_cpus(); > > Firstly - this can be written as: > > return min(num_active_cpus(), MVPP2_MAX_THREADS - 1); OK. > Secondly - what if the number of active CPUs change, for example due to > hotplug activity. What if we boot with maxcpus=1 and then bring the other > CPUs online after networking has been started? The number of active CPUs is > dynamically managed via the scheduler as CPUs are brought online or offline. > > > +/* Routine enable flow control for RXQs conditon */ void > > +mvpp2_rxq_enable_fc(struct mvpp2_port *port) > ... > > +/* Routine disable flow control for RXQs conditon */ void > > +mvpp2_rxq_disable_fc(struct mvpp2_port *port) > > Nothing seems to call these in this patch, so on its own, it's not obvious how > these are being called, and therefore what remedy to suggest for > num_active_cpus(). I don't think that current driver support CPU hotplug, anyway I can remove num_active_cpus and just use shared RX IRQ ID. Thanks. .
On Sun, Jan 10, 2021 at 06:24:30PM +0000, Stefan Chulski wrote: > > > > > > +/* Routine calculate single queue shares address space */ static int > > > +mvpp22_calc_shared_addr_space(struct mvpp2_port *port) { > > > + /* If number of CPU's greater than number of threads, return last > > > + * address space > > > + */ > > > + if (num_active_cpus() >= MVPP2_MAX_THREADS) > > > + return MVPP2_MAX_THREADS - 1; > > > + > > > + return num_active_cpus(); > > > > Firstly - this can be written as: > > > > return min(num_active_cpus(), MVPP2_MAX_THREADS - 1); > > OK. > > > Secondly - what if the number of active CPUs change, for example due to > > hotplug activity. What if we boot with maxcpus=1 and then bring the other > > CPUs online after networking has been started? The number of active CPUs is > > dynamically managed via the scheduler as CPUs are brought online or offline. > > > > > +/* Routine enable flow control for RXQs conditon */ void > > > +mvpp2_rxq_enable_fc(struct mvpp2_port *port) > > ... > > > +/* Routine disable flow control for RXQs conditon */ void > > > +mvpp2_rxq_disable_fc(struct mvpp2_port *port) > > > > Nothing seems to call these in this patch, so on its own, it's not obvious how > > these are being called, and therefore what remedy to suggest for > > num_active_cpus(). > > I don't think that current driver support CPU hotplug, anyway I can > remove num_active_cpus and just use shared RX IRQ ID. Sorry, but that is not really a decision the driver can make. It is part of a kernel that _does_ support CPU hotplug, and the online CPUs can be changed today. It is likely that every distro out there builds the kernel with CPU hotplug enabled. If changing the online CPUs causes the driver to misbehave, that is a(nother) bug with the driver.
> Sorry, but that is not really a decision the driver can make. It is part of a kernel > that _does_ support CPU hotplug, and the online CPUs can be changed today. > > It is likely that every distro out there builds the kernel with CPU hotplug > enabled. > > If changing the online CPUs causes the driver to misbehave, that is a(nother) > bug with the driver. This function doesn't really need to know num_active_cpus, only host ID used by used by shared RX interrupt in single queue mode. Host ID is just register address space used to access PPv2 register space. So I can remove this use of num_active_cpus. Stefan, Regards.
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 4d58af6..0ba0598 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -763,10 +763,53 @@ ((kb) * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) /* MSS Flow control */ -#define MSS_SRAM_SIZE 0x800 -#define FC_QUANTA 0xFFFF -#define FC_CLK_DIVIDER 0x140 -#define MSS_THRESHOLD_STOP 768 +#define MSS_SRAM_SIZE 0x800 +#define MSS_FC_COM_REG 0 +#define FLOW_CONTROL_ENABLE_BIT BIT(0) +#define FLOW_CONTROL_UPDATE_COMMAND_BIT BIT(31) +#define FC_QUANTA 0xFFFF +#define FC_CLK_DIVIDER 0x140 + +#define MSS_BUF_POOL_BASE 0x40 +#define MSS_BUF_POOL_OFFS 4 +#define MSS_BUF_POOL_REG(id) (MSS_BUF_POOL_BASE \ + + (id) * MSS_BUF_POOL_OFFS) + +#define MSS_BUF_POOL_STOP_MASK 0xFFF +#define MSS_BUF_POOL_START_MASK (0xFFF << MSS_BUF_POOL_START_OFFS) +#define MSS_BUF_POOL_START_OFFS 12 +#define MSS_BUF_POOL_PORTS_MASK (0xF << MSS_BUF_POOL_PORTS_OFFS) +#define MSS_BUF_POOL_PORTS_OFFS 24 +#define MSS_BUF_POOL_PORT_OFFS(id) (0x1 << \ + ((id) + MSS_BUF_POOL_PORTS_OFFS)) + +#define MSS_RXQ_TRESH_BASE 0x200 +#define MSS_RXQ_TRESH_OFFS 4 +#define MSS_RXQ_TRESH_REG(q, fq) (MSS_RXQ_TRESH_BASE + (((q) + (fq)) \ + * MSS_RXQ_TRESH_OFFS)) + +#define MSS_RXQ_TRESH_START_MASK 0xFFFF +#define MSS_RXQ_TRESH_STOP_MASK (0xFFFF << MSS_RXQ_TRESH_STOP_OFFS) +#define MSS_RXQ_TRESH_STOP_OFFS 16 + +#define MSS_RXQ_ASS_BASE 0x80 +#define MSS_RXQ_ASS_OFFS 4 +#define MSS_RXQ_ASS_PER_REG 4 +#define MSS_RXQ_ASS_PER_OFFS 8 +#define MSS_RXQ_ASS_PORTID_OFFS 0 +#define MSS_RXQ_ASS_PORTID_MASK 0x3 +#define MSS_RXQ_ASS_HOSTID_OFFS 2 +#define MSS_RXQ_ASS_HOSTID_MASK 0x3F + +#define MSS_RXQ_ASS_Q_BASE(q, fq) ((((q) + (fq)) % MSS_RXQ_ASS_PER_REG) \ + * MSS_RXQ_ASS_PER_OFFS) +#define MSS_RXQ_ASS_PQ_BASE(q, fq) ((((q) + (fq)) / MSS_RXQ_ASS_PER_REG) \ + * MSS_RXQ_ASS_OFFS) +#define MSS_RXQ_ASS_REG(q, fq) (MSS_RXQ_ASS_BASE + MSS_RXQ_ASS_PQ_BASE(q, fq)) + +#define MSS_THRESHOLD_STOP 768 +#define MSS_THRESHOLD_START 1024 + /* RX buffer constants */ #define MVPP2_SKB_SHINFO_SIZE \ diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index bc4b8069..19648c4 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -744,6 +744,175 @@ static void *mvpp2_buf_alloc(struct mvpp2_port *port, return data; } +/* Routine calculate single queue shares address space */ +static int mvpp22_calc_shared_addr_space(struct mvpp2_port *port) +{ + /* If number of CPU's greater than number of threads, return last + * address space + */ + if (num_active_cpus() >= MVPP2_MAX_THREADS) + return MVPP2_MAX_THREADS - 1; + + return num_active_cpus(); +} + +/* Routine enable flow control for RXQs conditon */ +void mvpp2_rxq_enable_fc(struct mvpp2_port *port) +{ + int val, cm3_state, host_id, q; + int fq = port->first_rxq; + unsigned long flags; + + spin_lock_irqsave(&port->priv->mss_spinlock, flags); + + /* Remove Flow control enable bit to prevent race between FW and Kernel + * If Flow control were enabled, it would be re-enabled. + */ + val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); + cm3_state = (val & FLOW_CONTROL_ENABLE_BIT); + val &= ~FLOW_CONTROL_ENABLE_BIT; + mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); + + /* Set same Flow control for all RXQs */ + for (q = 0; q < port->nrxqs; q++) { + /* Set stop and start Flow control RXQ thresholds */ + val = MSS_THRESHOLD_START; + val |= (MSS_THRESHOLD_STOP << MSS_RXQ_TRESH_STOP_OFFS); + mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val); + + val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq)); + /* Set RXQ port ID */ + val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq)); + val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq)); + val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq) + + MSS_RXQ_ASS_HOSTID_OFFS)); + + /* Calculate RXQ host ID: + * In Single queue mode: Host ID equal to Host ID used for + * shared RX interrupt + * In Multi queue mode: Host ID equal to number of + * RXQ ID / number of CoS queues + * In Single resource mode: Host ID always equal to 0 + */ + if (queue_mode == MVPP2_QDIST_SINGLE_MODE) + host_id = mvpp22_calc_shared_addr_space(port); + else if (queue_mode == MVPP2_QDIST_MULTI_MODE) + host_id = q; + else + host_id = 0; + + /* Set RXQ host ID */ + val |= (host_id << (MSS_RXQ_ASS_Q_BASE(q, fq) + + MSS_RXQ_ASS_HOSTID_OFFS)); + + mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val); + } + + /* Notify Firmware that Flow control config space ready for update */ + val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); + val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; + val |= cm3_state; + mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); + + spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); +} + +/* Routine disable flow control for RXQs conditon */ +void mvpp2_rxq_disable_fc(struct mvpp2_port *port) +{ + int val, cm3_state, q; + unsigned long flags; + int fq = port->first_rxq; + + spin_lock_irqsave(&port->priv->mss_spinlock, flags); + + /* Remove Flow control enable bit to prevent race between FW and Kernel + * If Flow control were enabled, it would be re-enabled. + */ + val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); + cm3_state = (val & FLOW_CONTROL_ENABLE_BIT); + val &= ~FLOW_CONTROL_ENABLE_BIT; + mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); + + /* Disable Flow control for all RXQs */ + for (q = 0; q < port->nrxqs; q++) { + /* Set threshold 0 to disable Flow control */ + val = 0; + val |= (0 << MSS_RXQ_TRESH_STOP_OFFS); + mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val); + + val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq)); + + val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq)); + + val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq) + + MSS_RXQ_ASS_HOSTID_OFFS)); + + mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val); + } + + /* Notify Firmware that Flow control config space ready for update */ + val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); + val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; + val |= cm3_state; + mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); + + spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); +} + +/* Routine disable/enable flow control for BM pool conditon */ +void mvpp2_bm_pool_update_fc(struct mvpp2_port *port, + struct mvpp2_bm_pool *pool, + bool en) +{ + int val, cm3_state; + unsigned long flags; + + spin_lock_irqsave(&port->priv->mss_spinlock, flags); + + /* Remove Flow control enable bit to prevent race between FW and Kernel + * If Flow control were enabled, it would be re-enabled. + */ + val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); + cm3_state = (val & FLOW_CONTROL_ENABLE_BIT); + val &= ~FLOW_CONTROL_ENABLE_BIT; + mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); + + /* Check if BM pool should be enabled/disable */ + if (en) { + /* Set BM pool start and stop thresholds per port */ + val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id)); + val |= MSS_BUF_POOL_PORT_OFFS(port->id); + val &= ~MSS_BUF_POOL_START_MASK; + val |= (MSS_THRESHOLD_START << MSS_BUF_POOL_START_OFFS); + val &= ~MSS_BUF_POOL_STOP_MASK; + val |= MSS_THRESHOLD_STOP; + mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val); + } else { + /* Remove BM pool from the port */ + val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id)); + val &= ~MSS_BUF_POOL_PORT_OFFS(port->id); + + /* Zero BM pool start and stop thresholds to disable pool + * flow control if pool empty (not used by any port) + */ + if (!pool->buf_num) { + val &= ~MSS_BUF_POOL_START_MASK; + val &= ~MSS_BUF_POOL_STOP_MASK; + } + + mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val); + } + + /* Notify Firmware that Flow control config space ready for update */ + val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG); + val |= FLOW_CONTROL_UPDATE_COMMAND_BIT; + val |= cm3_state; + mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val); + + spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); +} + /* Release buffer to BM */ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, dma_addr_t buf_dma_addr,