diff mbox

[net-next,3/3] net: mvneta: Add naive RSS support

Message ID 1448463243-21057-4-git-send-email-gregory.clement@free-electrons.com (mailing list archive)
State New, archived
Headers show

Commit Message

Gregory CLEMENT Nov. 25, 2015, 2:54 p.m. UTC
This patch add the support for the RSS related ethtool
function. Currently it only use one entry in the indirection table which
allows associating an mveneta interface to a given CPU.

Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
---
 drivers/net/ethernet/marvell/mvneta.c | 125 ++++++++++++++++++++++++++++++++++
 1 file changed, 125 insertions(+)

Comments

Thomas Petazzoni Nov. 25, 2015, 4:55 p.m. UTC | #1
Gregory,

On Wed, 25 Nov 2015 15:54:03 +0100, Gregory CLEMENT wrote:

>  	pp->rxq_def = rxq_def;
>  
> +	pp->indir[0] = rxq_def;

So now we have the RX queue in both pp->rxq_def and pp->indir[0] ? Is
this really useful ?

Also, if the RX queue becomes a per-port definition, why do you keep
the global rxq_def variable ?

Thomas
Gregory CLEMENT Nov. 25, 2015, 5:08 p.m. UTC | #2
Hi Thomas,
 
 On mer., nov. 25 2015, Thomas Petazzoni <thomas.petazzoni@free-electrons.com> wrote:

> Gregory,
>
> On Wed, 25 Nov 2015 15:54:03 +0100, Gregory CLEMENT wrote:
>
>>  	pp->rxq_def = rxq_def;
>>  
>> +	pp->indir[0] = rxq_def;
>
> So now we have the RX queue in both pp->rxq_def and pp->indir[0] ? Is
> this really useful ?

Currently pp->rxq_def and pp->indir are the same but only because there
is one entry in the indirection table. When the 256 entries will be used
then there won't be the same. But we will still need the rxq_def as the
mvneta IP need a reference to a default RX queue even for example for
MVNETA_PORT_CONFIG_DEFL_VALUE.

>
> Also, if the RX queue becomes a per-port definition, why do you keep
> the global rxq_def variable ?

The global rxq_def variable is used for the module parameter. Even if
we can change it latter, it is still good to be able to set the default
value, however the parameter is set at module level not at port level,
that why we still need this variable.

Thanks,

Gregory
Marcin Wojtas Nov. 28, 2015, 4:43 p.m. UTC | #3
Hi Gregory,

> +
> +       /* update unicast mapping */
> +       mvneta_set_rx_mode(pp->dev);

I know it may be an ultimate level of nitpicking, but can you start a
comment with capital letter?:)

Best regards,
Marcin
Gregory CLEMENT Nov. 30, 2015, 4:40 p.m. UTC | #4
Hi Marcin,
 
 On sam., nov. 28 2015, Marcin Wojtas <mw@semihalf.com> wrote:

> Hi Gregory,
>
>> +
>> +       /* update unicast mapping */
>> +       mvneta_set_rx_mode(pp->dev);
>
> I know it may be an ultimate level of nitpicking, but can you start a
> comment with capital letter?:)

If I got other review, then I can fix it in the next version. But if you
have a look on the otehr commet not all of them start by capital letter.

Thanks,

Greogry

>
> Best regards,
> Marcin
diff mbox

Patch

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 8974ab084839..b4ed96a639ff 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -259,6 +259,11 @@ 
 
 #define MVNETA_TX_MTU_MAX		0x3ffff
 
+/* The RSS lookup table actually has 256 entries but we do not use
+ * them yet
+ */
+#define MVNETA_RSS_LU_TABLE_SIZE	1
+
 /* TSO header size */
 #define TSO_HEADER_SIZE 128
 
@@ -380,6 +385,8 @@  struct mvneta_port {
 	int use_inband_status:1;
 
 	u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
+
+	u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
 };
 
 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
@@ -2483,6 +2490,18 @@  static void mvneta_percpu_unmask_interrupt(void *arg)
 		    MVNETA_MISCINTR_INTR_MASK);
 }
 
+static void mvneta_percpu_mask_interrupt(void *arg)
+{
+	struct mvneta_port *pp = arg;
+
+	/* All the queue are masked, but actually only the ones
+	 * maped to this CPU will be masked
+	 */
+	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+	mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
+	mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+}
+
 static void mvneta_start_dev(struct mvneta_port *pp)
 {
 	unsigned int cpu;
@@ -3173,6 +3192,106 @@  static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
 	return -EOPNOTSUPP;
 }
 
+static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
+{
+	return MVNETA_RSS_LU_TABLE_SIZE;
+}
+
+static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
+				    struct ethtool_rxnfc *info,
+				    u32 *rules __always_unused)
+{
+	switch (info->cmd) {
+	case ETHTOOL_GRXRINGS:
+		info->data =  rxq_number;
+		return 0;
+	case ETHTOOL_GRXFH:
+		return -EOPNOTSUPP;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int  mvneta_config_rss(struct mvneta_port *pp)
+{
+	int cpu;
+	u32 val;
+
+	netif_tx_stop_all_queues(pp->dev);
+
+	for_each_online_cpu(cpu)
+		smp_call_function_single(cpu, mvneta_percpu_mask_interrupt,
+					 pp, true);
+
+	/* We have to synchronise on the napi of each CPU */
+	for_each_online_cpu(cpu) {
+		struct mvneta_pcpu_port *pcpu_port =
+			per_cpu_ptr(pp->ports, cpu);
+
+		napi_synchronize(&pcpu_port->napi);
+		napi_disable(&pcpu_port->napi);
+	}
+
+	pp->rxq_def = pp->indir[0];
+
+	/* update unicast mapping */
+	mvneta_set_rx_mode(pp->dev);
+
+	/* Update val of portCfg register accordingly with all RxQueue types */
+	val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
+	mvreg_write(pp, MVNETA_PORT_CONFIG, val);
+
+	/* Update the elected CPU matching the new rxq_def */
+	mvneta_percpu_elect(pp);
+
+	/* We have to synchronise on the napi of each CPU */
+	for_each_online_cpu(cpu) {
+		struct mvneta_pcpu_port *pcpu_port =
+			per_cpu_ptr(pp->ports, cpu);
+
+		napi_enable(&pcpu_port->napi);
+	}
+
+	netif_tx_start_all_queues(pp->dev);
+
+	return 0;
+}
+
+static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
+				   const u8 *key, const u8 hfunc)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+	/* We require at least one supported parameter to be changed
+	* and no change in any of the unsupported parameters
+	*/
+	if (key ||
+	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+		return -EOPNOTSUPP;
+
+	if (!indir)
+		return 0;
+
+	memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
+
+	return mvneta_config_rss(pp);
+}
+
+static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+				   u8 *hfunc)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+
+	if (hfunc)
+		*hfunc = ETH_RSS_HASH_TOP;
+
+	if (!indir)
+		return 0;
+
+	memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
+
+	return 0;
+}
+
 static const struct net_device_ops mvneta_netdev_ops = {
 	.ndo_open            = mvneta_open,
 	.ndo_stop            = mvneta_stop,
@@ -3197,6 +3316,10 @@  const struct ethtool_ops mvneta_eth_tool_ops = {
 	.get_strings	= mvneta_ethtool_get_strings,
 	.get_ethtool_stats = mvneta_ethtool_get_stats,
 	.get_sset_count	= mvneta_ethtool_get_sset_count,
+	.get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
+	.get_rxnfc	= mvneta_ethtool_get_rxnfc,
+	.get_rxfh	= mvneta_ethtool_get_rxfh,
+	.set_rxfh	= mvneta_ethtool_set_rxfh,
 };
 
 /* Initialize hw */
@@ -3389,6 +3512,8 @@  static int mvneta_probe(struct platform_device *pdev)
 
 	pp->rxq_def = rxq_def;
 
+	pp->indir[0] = rxq_def;
+
 	pp->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(pp->clk)) {
 		err = PTR_ERR(pp->clk);