diff mbox series

[net-next,12/13] qed: populate supported link modes maps on module init

Message ID 20200716115446.994-13-alobakin@marvell.com (mailing list archive)
State Superseded
Headers show
Series qed/qede: add support for new operating modes | expand

Commit Message

Alexander Lobakin July 16, 2020, 11:54 a.m. UTC
Simplify and lighten qed_set_link() by declaring static link modes maps
and populating them on module init. This way we save plenty of text size
at the low expense of __ro_after_init and __initconst data (the latter
will be purged after module init is done).

Misc: sanitize exit callback.

Signed-off-by: Alexander Lobakin <alobakin@marvell.com>
Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
---
 drivers/net/ethernet/qlogic/qed/qed_main.c | 194 +++++++++++++--------
 1 file changed, 123 insertions(+), 71 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index ff8e41694f65..28f13cd7bd9b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -64,20 +64,122 @@  MODULE_VERSION(DRV_MODULE_VERSION);
 
 MODULE_FIRMWARE(QED_FW_FILE_NAME);
 
+/* MFW speed capabilities maps */
+
+struct qed_mfw_speed_map {
+	u32		mfw_val;
+
+	QED_LM_DECLARE(caps);
+
+	const u32	*cap_arr;
+	u32		arr_size;
+};
+
+#define QED_MFW_SPEED_MAP(type, arr)		\
+{						\
+	.mfw_val	= (type),		\
+	.cap_arr	= (arr),		\
+	.arr_size	= ARRAY_SIZE(arr),	\
+}
+
+static const u32 qed_mfw_legacy_1g[] __initconst = {
+	QED_LM_1000baseT_Full,
+	QED_LM_1000baseKX_Full,
+	QED_LM_1000baseX_Full,
+};
+
+static const u32 qed_mfw_legacy_10g[] __initconst = {
+	QED_LM_10000baseT_Full,
+	QED_LM_10000baseKR_Full,
+	QED_LM_10000baseKX4_Full,
+	QED_LM_10000baseR_FEC,
+	QED_LM_10000baseCR_Full,
+	QED_LM_10000baseSR_Full,
+	QED_LM_10000baseLR_Full,
+	QED_LM_10000baseLRM_Full,
+};
+
+static const u32 qed_mfw_legacy_20g[] __initconst = {
+	QED_LM_20000baseKR2_Full,
+};
+
+static const u32 qed_mfw_legacy_25g[] __initconst = {
+	QED_LM_25000baseKR_Full,
+	QED_LM_25000baseCR_Full,
+	QED_LM_25000baseSR_Full,
+};
+
+static const u32 qed_mfw_legacy_40g[] __initconst = {
+	QED_LM_40000baseLR4_Full,
+	QED_LM_40000baseKR4_Full,
+	QED_LM_40000baseCR4_Full,
+	QED_LM_40000baseSR4_Full,
+};
+
+static const u32 qed_mfw_legacy_50g[] __initconst = {
+	QED_LM_50000baseKR2_Full,
+	QED_LM_50000baseCR2_Full,
+	QED_LM_50000baseSR2_Full,
+};
+
+static const u32 qed_mfw_legacy_bb_100g[] __initconst = {
+	QED_LM_100000baseKR4_Full,
+	QED_LM_100000baseSR4_Full,
+	QED_LM_100000baseCR4_Full,
+	QED_LM_100000baseLR4_ER4_Full,
+};
+
+static struct qed_mfw_speed_map qed_mfw_legacy_maps[] __ro_after_init = {
+	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G,
+			  qed_mfw_legacy_1g),
+	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G,
+			  qed_mfw_legacy_10g),
+	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G,
+			  qed_mfw_legacy_20g),
+	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G,
+			  qed_mfw_legacy_25g),
+	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G,
+			  qed_mfw_legacy_40g),
+	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G,
+			  qed_mfw_legacy_50g),
+	QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G,
+			  qed_mfw_legacy_bb_100g),
+};
+
+static void __init qed_mfw_speed_map_populate(struct qed_mfw_speed_map *map)
+{
+	u32 i;
+
+	for (i = 0; i < map->arr_size; i++)
+		__set_bit(map->cap_arr[i], map->caps);
+
+	map->cap_arr = NULL;
+	map->arr_size = 0;
+}
+
+static void __init qed_mfw_speed_maps_init(void)
+{
+	u32 i;
+
+	for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++)
+		qed_mfw_speed_map_populate(qed_mfw_legacy_maps + i);
+}
+
 static int __init qed_init(void)
 {
 	pr_info("%s", version);
 
+	qed_mfw_speed_maps_init();
+
 	return 0;
 }
+module_init(qed_init);
 
-static void __exit qed_cleanup(void)
+static void __exit qed_exit(void)
 {
-	pr_notice("qed_cleanup called\n");
+	/* To prevent marking this module as "permanent" */
 }
-
-module_init(qed_init);
-module_exit(qed_cleanup);
+module_exit(qed_exit);
 
 /* Check if the DMA controller on the machine can properly handle the DMA
  * addressing required by the device.
@@ -1457,11 +1559,12 @@  static bool qed_can_link_change(struct qed_dev *cdev)
 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
 {
 	struct qed_mcp_link_params *link_params;
-	QED_LM_DECLARE(sup_caps);
+	struct qed_mcp_link_speed_params *speed;
+	const struct qed_mfw_speed_map *map;
 	struct qed_hwfn *hwfn;
 	struct qed_ptt *ptt;
-	u32 as;
 	int rc;
+	u32 i;
 
 	if (!cdev)
 		return -ENODEV;
@@ -1486,78 +1589,26 @@  static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
 	if (!link_params)
 		return -ENODATA;
 
+	speed = &link_params->speed;
+
 	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
-		link_params->speed.autoneg = params->autoneg;
+		speed->autoneg = !!params->autoneg;
 
 	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
-		as = 0;
-
-		qed_link_mode_zero(sup_caps);
-		__set_bit(QED_LM_1000baseT_Full, sup_caps);
-		__set_bit(QED_LM_1000baseKX_Full, sup_caps);
-		__set_bit(QED_LM_1000baseX_Full, sup_caps);
-
-		if (qed_link_mode_intersects(params->adv_speeds, sup_caps))
-			as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
-
-		qed_link_mode_zero(sup_caps);
-		__set_bit(QED_LM_10000baseT_Full, sup_caps);
-		__set_bit(QED_LM_10000baseKR_Full, sup_caps);
-		__set_bit(QED_LM_10000baseKX4_Full, sup_caps);
-		__set_bit(QED_LM_10000baseR_FEC, sup_caps);
-		__set_bit(QED_LM_10000baseCR_Full, sup_caps);
-		__set_bit(QED_LM_10000baseSR_Full, sup_caps);
-		__set_bit(QED_LM_10000baseLR_Full, sup_caps);
-		__set_bit(QED_LM_10000baseLRM_Full, sup_caps);
+		speed->advertised_speeds = 0;
 
-		if (qed_link_mode_intersects(params->adv_speeds, sup_caps))
-			as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+		for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) {
+			map = qed_mfw_legacy_maps + i;
 
-		qed_link_mode_zero(sup_caps);
-		__set_bit(QED_LM_20000baseKR2_Full, sup_caps);
-
-		if (qed_link_mode_intersects(params->adv_speeds, sup_caps))
-			as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
-
-		qed_link_mode_zero(sup_caps);
-		__set_bit(QED_LM_25000baseKR_Full, sup_caps);
-		__set_bit(QED_LM_25000baseCR_Full, sup_caps);
-		__set_bit(QED_LM_25000baseSR_Full, sup_caps);
-
-		if (qed_link_mode_intersects(params->adv_speeds, sup_caps))
-			as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
-
-		qed_link_mode_zero(sup_caps);
-		__set_bit(QED_LM_40000baseLR4_Full, sup_caps);
-		__set_bit(QED_LM_40000baseKR4_Full, sup_caps);
-		__set_bit(QED_LM_40000baseCR4_Full, sup_caps);
-		__set_bit(QED_LM_40000baseSR4_Full, sup_caps);
-
-		if (qed_link_mode_intersects(params->adv_speeds, sup_caps))
-			as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
-
-		qed_link_mode_zero(sup_caps);
-		__set_bit(QED_LM_50000baseKR2_Full, sup_caps);
-		__set_bit(QED_LM_50000baseCR2_Full, sup_caps);
-		__set_bit(QED_LM_50000baseSR2_Full, sup_caps);
-
-		if (qed_link_mode_intersects(params->adv_speeds, sup_caps))
-			as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
-
-		qed_link_mode_zero(sup_caps);
-		__set_bit(QED_LM_100000baseKR4_Full, sup_caps);
-		__set_bit(QED_LM_100000baseSR4_Full, sup_caps);
-		__set_bit(QED_LM_100000baseCR4_Full, sup_caps);
-		__set_bit(QED_LM_100000baseLR4_ER4_Full, sup_caps);
-
-		if (qed_link_mode_intersects(params->adv_speeds, sup_caps))
-			as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
-
-		link_params->speed.advertised_speeds = as;
+			if (qed_link_mode_intersects(params->adv_speeds,
+						     map->caps))
+				speed->advertised_speeds |= map->mfw_val;
+		}
 	}
 
 	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
-		link_params->speed.forced_speed = params->forced_speed;
+		speed->forced_speed = params->forced_speed;
+
 	if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
 		if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
 			link_params->pause.autoneg = true;
@@ -1572,6 +1623,7 @@  static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
 		else
 			link_params->pause.forced_tx = false;
 	}
+
 	if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
 		switch (params->loopback_mode) {
 		case QED_LINK_LOOPBACK_INT_PHY: