diff mbox series

[net-next,2/2] amd-xgbe: add support for rx-adaptation

Message ID 20230125072529.2222420-3-Raju.Rangoju@amd.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series amd-xgbe: add support for 2.5GbE and rx-adaptation | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers success CCed 6 of 6 maintainers
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch warning WARNING: line length of 100 exceeds 80 columns WARNING: line length of 86 exceeds 80 columns WARNING: line length of 88 exceeds 80 columns WARNING: line length of 89 exceeds 80 columns WARNING: line length of 90 exceeds 80 columns WARNING: line length of 92 exceeds 80 columns WARNING: line length of 95 exceeds 80 columns WARNING: line length of 96 exceeds 80 columns WARNING: line length of 98 exceeds 80 columns WARNING: line length of 99 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline fail Was 0 now: 1

Commit Message

Raju Rangoju Jan. 25, 2023, 7:25 a.m. UTC
The existing implementation for non-Autonegotiation 10G speed modes does
not enable RX adaptation in the Driver and FW. The RX Equalization
settings (AFE settings alone) are manually configured and the existing
link-up sequence in the driver does not perform rx adaptation process as
mentioned in the Synopsys databook. There's a customer request for 10G
backplane mode without Auto-negotiation and for the DAC cables of more
significant length that follow the non-Autonegotiation mode. These modes
require PHY to perform RX Adaptation.

The proposed logic adds the necessary changes to Yellow Carp devices to
ensure seamless RX Adaptation for 10G-SFI (LONG DAC) and 10G-KR without
AN (CL72 not present). The RX adaptation core algorithm is executed by
firmware, however, to achieve that a new mailbox sub-command is required
to be sent by the driver.

Co-developed-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
Signed-off-by: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
Signed-off-by: Raju Rangoju <Raju.Rangoju@amd.com>
---
 drivers/net/ethernet/amd/xgbe/xgbe-common.h |  37 +++++
 drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 160 +++++++++++++++++++-
 drivers/net/ethernet/amd/xgbe/xgbe.h        |   5 +
 3 files changed, 198 insertions(+), 4 deletions(-)

Comments

Jakub Kicinski Jan. 26, 2023, 7:04 a.m. UTC | #1
On Wed, 25 Jan 2023 12:55:29 +0530 Raju Rangoju wrote:
> --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
> +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
> @@ -387,7 +387,13 @@ struct xgbe_phy_data {
>  /* I2C, MDIO and GPIO lines are muxed, so only one device at a time */
>  static DEFINE_MUTEX(xgbe_phy_comm_lock);
>  
> +static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
> +					unsigned int cmd, unsigned int sub_cmd);
>  static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata);
> +static void xgbe_phy_rrc(struct xgbe_prv_data *pdata);
> +static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode);
> +static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata);
> +static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata);

Why the forward declarations? It's against the kernel coding style.

>  static int xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata,
>  			     struct xgbe_i2c_op *i2c_op)
> @@ -2038,6 +2044,87 @@ static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
>  	xgbe_phy_put_comm_ownership(pdata);
>  }
>  
> +#define MAX_RX_ADAPT_RETRIES	1
> +#define XGBE_PMA_RX_VAL_SIG_MASK	(XGBE_PMA_RX_SIG_DET_0_MASK | XGBE_PMA_RX_VALID_0_MASK)
> +
> +static inline void xgbe_set_rx_adap_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)

Don't pointlessly use inline, please. The compiler will know when 
to inline, and this is not the datapath.
Raju Rangoju Jan. 27, 2023, 7:43 a.m. UTC | #2
On 1/26/2023 12:34 PM, Jakub Kicinski wrote:
> On Wed, 25 Jan 2023 12:55:29 +0530 Raju Rangoju wrote:
>> --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
>> +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
>> @@ -387,7 +387,13 @@ struct xgbe_phy_data {
>>   /* I2C, MDIO and GPIO lines are muxed, so only one device at a time */
>>   static DEFINE_MUTEX(xgbe_phy_comm_lock);
>>   
>> +static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
>> +					unsigned int cmd, unsigned int sub_cmd);
>>   static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata);
>> +static void xgbe_phy_rrc(struct xgbe_prv_data *pdata);
>> +static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode);
>> +static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata);
>> +static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata);
> 
> Why the forward declarations? It's against the kernel coding style.

Hi Jakub,

Sure, I've re-ordered most of the functions correctly to avoid forward 
declarations. However, there is circular dependency on couple of them. 
Let us know if its fine to have forward declaration for the functions 
that have circular-dependency.

> 
>>   static int xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata,
>>   			     struct xgbe_i2c_op *i2c_op)
>> @@ -2038,6 +2044,87 @@ static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
>>   	xgbe_phy_put_comm_ownership(pdata);
>>   }
>>   
>> +#define MAX_RX_ADAPT_RETRIES	1
>> +#define XGBE_PMA_RX_VAL_SIG_MASK	(XGBE_PMA_RX_SIG_DET_0_MASK | XGBE_PMA_RX_VALID_0_MASK)
>> +
>> +static inline void xgbe_set_rx_adap_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
> 
> Don't pointlessly use inline, please. The compiler will know when
> to inline, and this is not the datapath.

Sure, I'll take care of this in the next revision.
Raju Rangoju Jan. 31, 2023, 9:56 a.m. UTC | #3
On 1/27/2023 1:13 PM, Raju Rangoju wrote:
> 
> On 1/26/2023 12:34 PM, Jakub Kicinski wrote:
>> On Wed, 25 Jan 2023 12:55:29 +0530 Raju Rangoju wrote:
>>> --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
>>> +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
>>> @@ -387,7 +387,13 @@ struct xgbe_phy_data {
>>>   /* I2C, MDIO and GPIO lines are muxed, so only one device at a time */
>>>   static DEFINE_MUTEX(xgbe_phy_comm_lock);
>>> +static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
>>> +                    unsigned int cmd, unsigned int sub_cmd);
>>>   static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data 
>>> *pdata);
>>> +static void xgbe_phy_rrc(struct xgbe_prv_data *pdata);
>>> +static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum 
>>> xgbe_mode mode);
>>> +static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata);
>>> +static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata);
>>
>> Why the forward declarations? It's against the kernel coding style.
> 
> Hi Jakub,
> 
> Sure, I've re-ordered most of the functions correctly to avoid forward 
> declarations. However, there is circular dependency on couple of them. 
> Let us know if its fine to have forward declaration for the functions 
> that have circular-dependency.

Hi Jakub,

Gentle reminder!

Let us know if it is okay to have forward declarations for functions 
that have circular dependency.

Thanks,
Raju

> 
>>
>>>   static int xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata,
>>>                    struct xgbe_i2c_op *i2c_op)
>>> @@ -2038,6 +2044,87 @@ static void xgbe_phy_set_redrv_mode(struct 
>>> xgbe_prv_data *pdata)
>>>       xgbe_phy_put_comm_ownership(pdata);
>>>   }
>>> +#define MAX_RX_ADAPT_RETRIES    1
>>> +#define XGBE_PMA_RX_VAL_SIG_MASK    (XGBE_PMA_RX_SIG_DET_0_MASK | 
>>> XGBE_PMA_RX_VALID_0_MASK)
>>> +
>>> +static inline void xgbe_set_rx_adap_mode(struct xgbe_prv_data 
>>> *pdata, enum xgbe_mode mode)
>>
>> Don't pointlessly use inline, please. The compiler will know when
>> to inline, and this is not the datapath.
> 
> Sure, I'll take care of this in the next revision.
Jakub Kicinski Jan. 31, 2023, 7:11 p.m. UTC | #4
On Tue, 31 Jan 2023 15:26:14 +0530 Raju Rangoju wrote:
> > Sure, I've re-ordered most of the functions correctly to avoid forward 
> > declarations. However, there is circular dependency on couple of them. 
> > Let us know if its fine to have forward declaration for the functions 
> > that have circular-dependency.  
> 
> Gentle reminder!
> 
> Let us know if it is okay to have forward declarations for functions 
> that have circular dependency.

Yes, in case of circular dependencies there's not much we can do.
diff mbox series

Patch

diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 3fd9728f817f..52964ce72950 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -1285,6 +1285,22 @@ 
 #define MDIO_PMA_RX_CTRL1		0x8051
 #endif
 
+#ifndef MDIO_PMA_RX_LSTS
+#define MDIO_PMA_RX_LSTS		0x018020
+#endif
+
+#ifndef MDIO_PMA_RX_EQ_CTRL4
+#define MDIO_PMA_RX_EQ_CTRL4		0x0001805C
+#endif
+
+#ifndef MDIO_PMA_MP_MISC_STS
+#define MDIO_PMA_MP_MISC_STS		0x0078
+#endif
+
+#ifndef MDIO_PMA_PHY_RX_EQ_CEU
+#define MDIO_PMA_PHY_RX_EQ_CEU		0x1800E
+#endif
+
 #ifndef MDIO_PCS_DIG_CTRL
 #define MDIO_PCS_DIG_CTRL		0x8000
 #endif
@@ -1395,6 +1411,27 @@ 
 #define XGBE_PMA_RX_RST_0_RESET_ON	0x10
 #define XGBE_PMA_RX_RST_0_RESET_OFF	0x00
 
+#define XGBE_PMA_RX_SIG_DET_0_MASK	BIT(4)
+#define XGBE_PMA_RX_SIG_DET_0_ENABLE	BIT(4)
+#define XGBE_PMA_RX_SIG_DET_0_DISABLE	0x0000
+
+#define XGBE_PMA_RX_VALID_0_MASK	BIT(12)
+#define XGBE_PMA_RX_VALID_0_ENABLE	BIT(12)
+#define XGBE_PMA_RX_VALID_0_DISABLE	0x0000
+
+#define XGBE_PMA_RX_AD_REQ_MASK		BIT(12)
+#define XGBE_PMA_RX_AD_REQ_ENABLE	BIT(12)
+#define XGBE_PMA_RX_AD_REQ_DISABLE	0x0000
+
+#define XGBE_PMA_RX_ADPT_ACK_MASK	BIT(12)
+#define XGBE_PMA_RX_ADPT_ACK		BIT(12)
+
+#define XGBE_PMA_CFF_UPDTM1_VLD		BIT(8)
+#define XGBE_PMA_CFF_UPDT0_VLD		BIT(9)
+#define XGBE_PMA_CFF_UPDT1_VLD		BIT(10)
+#define XGBE_PMA_CFF_UPDT_MASK		(XGBE_PMA_CFF_UPDTM1_VLD | XGBE_PMA_CFF_UPDT0_VLD |\
+					 XGBE_PMA_CFF_UPDT1_VLD)
+
 #define XGBE_PMA_PLL_CTRL_MASK		BIT(15)
 #define XGBE_PMA_PLL_CTRL_ENABLE	BIT(15)
 #define XGBE_PMA_PLL_CTRL_DISABLE	0x0000
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index eea06e07b4a0..e48c1986926a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -387,7 +387,13 @@  struct xgbe_phy_data {
 /* I2C, MDIO and GPIO lines are muxed, so only one device at a time */
 static DEFINE_MUTEX(xgbe_phy_comm_lock);
 
+static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
+					unsigned int cmd, unsigned int sub_cmd);
 static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata);
+static void xgbe_phy_rrc(struct xgbe_prv_data *pdata);
+static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode);
+static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata);
+static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata);
 
 static int xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata,
 			     struct xgbe_i2c_op *i2c_op)
@@ -2038,6 +2044,87 @@  static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
 	xgbe_phy_put_comm_ownership(pdata);
 }
 
+#define MAX_RX_ADAPT_RETRIES	1
+#define XGBE_PMA_RX_VAL_SIG_MASK	(XGBE_PMA_RX_SIG_DET_0_MASK | XGBE_PMA_RX_VALID_0_MASK)
+
+static inline void xgbe_set_rx_adap_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+	if (pdata->rx_adapt_retries++ >= MAX_RX_ADAPT_RETRIES) {
+		pdata->rx_adapt_retries = 0;
+		return;
+	}
+
+	xgbe_phy_perform_ratechange(pdata,
+				    mode == XGBE_MODE_KR ?
+				    XGBE_MB_CMD_SET_10G_KR :
+				    XGBE_MB_CMD_SET_10G_SFI,
+				    XGBE_MB_SUBCMD_RX_ADAP);
+}
+
+static void xgbe_rx_adaptation(struct xgbe_prv_data *pdata)
+{
+	struct xgbe_phy_data *phy_data = pdata->phy_data;
+	unsigned int reg;
+
+	/* step 2: force PCS to send RX_ADAPT Req to PHY */
+	XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_EQ_CTRL4,
+			 XGBE_PMA_RX_AD_REQ_MASK, XGBE_PMA_RX_AD_REQ_ENABLE);
+
+	/* Step 3: Wait for RX_ADAPT ACK from the PHY */
+	msleep(200);
+
+	/* Software polls for coefficient update command (given by local PHY) */
+	reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_PHY_RX_EQ_CEU);
+
+	/* Clear the RX_AD_REQ bit */
+	XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_EQ_CTRL4,
+			 XGBE_PMA_RX_AD_REQ_MASK, XGBE_PMA_RX_AD_REQ_DISABLE);
+
+	/* Check if coefficient update command is set */
+	if ((reg & XGBE_PMA_CFF_UPDT_MASK) != XGBE_PMA_CFF_UPDT_MASK)
+		goto set_mode;
+
+	/* Step 4: Check for Block lock */
+
+	/* Link status is latched low, so read once to clear
+	 * and then read again to get current state
+	 */
+	reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+	reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+	if (reg & MDIO_STAT1_LSTATUS) {
+		/* If the block lock is found, update the helpers and declare the link up */
+		netif_dbg(pdata, link, pdata->netdev, "Block_lock done");
+		pdata->rx_adapt_done = true;
+		pdata->mode_set = false;
+		return;
+	}
+
+set_mode:
+	xgbe_set_rx_adap_mode(pdata, phy_data->cur_mode);
+}
+
+static void xgbe_phy_rx_adaptation(struct xgbe_prv_data *pdata)
+{
+	unsigned int reg;
+
+rx_adapt_reinit:
+	reg = XMDIO_READ_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_LSTS, XGBE_PMA_RX_VAL_SIG_MASK);
+
+	/* step 1: Check for RX_VALID && LF_SIGDET */
+	if ((reg & XGBE_PMA_RX_VAL_SIG_MASK) != XGBE_PMA_RX_VAL_SIG_MASK) {
+		netif_dbg(pdata, link, pdata->netdev, "RX_VALID or LF_SIGDET is unset, issue rrc");
+		xgbe_phy_rrc(pdata);
+		if (pdata->rx_adapt_retries++ >= MAX_RX_ADAPT_RETRIES) {
+			pdata->rx_adapt_retries = 0;
+			return;
+		}
+		goto rx_adapt_reinit;
+	}
+
+	/* perform rx adaptation */
+	xgbe_rx_adaptation(pdata);
+}
+
 static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)
 {
 	int reg;
@@ -2103,7 +2190,7 @@  static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
 	wait = XGBE_RATECHANGE_COUNT;
 	while (wait--) {
 		if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
-			goto reenable_pll;
+			goto do_rx_adaptation;
 
 		usleep_range(1000, 2000);
 	}
@@ -2113,6 +2200,17 @@  static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
 
 	/* Reset on error */
 	xgbe_phy_rx_reset(pdata);
+	goto reenable_pll;
+
+do_rx_adaptation:
+	if (pdata->en_rx_adap && sub_cmd == XGBE_MB_SUBCMD_RX_ADAP &&
+	    (cmd == XGBE_MB_CMD_SET_10G_KR || cmd == XGBE_MB_CMD_SET_10G_SFI)) {
+		netif_dbg(pdata, link, pdata->netdev, "Enabling RX adaptation\n");
+		pdata->mode_set = true;
+		xgbe_phy_rx_adaptation(pdata);
+		/* return from here to avoid enabling PLL ctrl during adaptation phase */
+		return;
+	}
 
 reenable_pll:
 	/* Enable PLL re-initialization, not needed for PHY Power Off and RRC cmds */
@@ -2141,6 +2239,31 @@  static void xgbe_phy_power_off(struct xgbe_prv_data *pdata)
 	netif_dbg(pdata, link, pdata->netdev, "phy powered off\n");
 }
 
+static bool enable_rx_adap(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+	struct xgbe_phy_data *phy_data = pdata->phy_data;
+	unsigned int ver;
+
+	/* Rx-Adaptation is not supported on older platforms(< 0x30H) */
+	ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
+	if (ver < 0x30)
+		return false;
+
+	/* Re-driver models 4223 && 4227 do not support Rx-Adaptation */
+	if (phy_data->redrv &&
+	    (phy_data->redrv_model == XGBE_PHY_REDRV_MODEL_4223 ||
+	     phy_data->redrv_model == XGBE_PHY_REDRV_MODEL_4227))
+		return false;
+
+	/* 10G KR mode with AN does not support Rx-Adaptation */
+	if (mode == XGBE_MODE_KR &&
+	    phy_data->port_mode != XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG)
+		return false;
+
+	pdata->en_rx_adap = 1;
+	return true;
+}
+
 static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
 {
 	struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -2149,7 +2272,11 @@  static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
 
 	/* 10G/SFI */
 	if (phy_data->sfp_cable != XGBE_SFP_CABLE_PASSIVE) {
+		pdata->en_rx_adap = 0;
 		xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI, XGBE_MB_SUBCMD_ACTIVE);
+	} else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) &&
+		   (enable_rx_adap(pdata, XGBE_MODE_SFI))) {
+		xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI, XGBE_MB_SUBCMD_RX_ADAP);
 	} else {
 		if (phy_data->sfp_cable_len <= 1)
 			xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
@@ -2230,7 +2357,10 @@  static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
 	xgbe_phy_set_redrv_mode(pdata);
 
 	/* 10G/KR */
-	xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR, XGBE_MB_SUBCMD_NONE);
+	if (enable_rx_adap(pdata, XGBE_MODE_KR))
+		xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR, XGBE_MB_SUBCMD_RX_ADAP);
+	else
+		xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR, XGBE_MB_SUBCMD_NONE);
 
 	phy_data->cur_mode = XGBE_MODE_KR;
 
@@ -2743,8 +2873,11 @@  static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
 			return 0;
 		}
 
-		if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los)
+		if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los) {
+			if (pdata->en_rx_adap)
+				pdata->rx_adapt_done = false;
 			return 0;
+		}
 	}
 
 	if (phy_data->phydev) {
@@ -2766,7 +2899,26 @@  static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
 	 */
 	reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
 	reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
-	if (reg & MDIO_STAT1_LSTATUS)
+
+	if (pdata->en_rx_adap) {
+		/* if the link is available and adaptation is done, declare link up */
+		if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done)
+			return 1;
+		/* If either link is not available or adaptation not done, retrigger the
+		 * adaptation logic. (if the mode is not set, then issue mailbox command first)
+		 */
+		if (pdata->mode_set) {
+			xgbe_phy_rx_adaptation(pdata);
+		} else {
+			pdata->rx_adapt_done = false;
+			xgbe_phy_set_mode(pdata, phy_data->cur_mode);
+		}
+
+		/* check again for the link and adaptation status */
+		reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+		if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done)
+			return 1;
+	} else if (reg & MDIO_STAT1_LSTATUS)
 		return 1;
 
 	if (pdata->phy.autoneg == AUTONEG_ENABLE &&
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 16e73df3e9b9..ad136ed493ed 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -625,6 +625,7 @@  enum xgbe_mb_cmd {
 
 enum xgbe_mb_subcmd {
 	XGBE_MB_SUBCMD_NONE = 0,
+	XGBE_MB_SUBCMD_RX_ADAP,
 
 	/* 10GbE SFP subcommands */
 	XGBE_MB_SUBCMD_ACTIVE = 0,
@@ -1316,6 +1317,10 @@  struct xgbe_prv_data {
 
 	bool debugfs_an_cdr_workaround;
 	bool debugfs_an_cdr_track_early;
+	bool en_rx_adap;
+	int rx_adapt_retries;
+	bool rx_adapt_done;
+	bool mode_set;
 };
 
 /* Function prototypes*/