diff mbox series

[RFC,v1,2/2] PCI/AER: report fatal errors of RCiEP and EP if link recoverd

Message ID 20241106090339.24920-3-xueshuai@linux.alibaba.com (mailing list archive)
State New
Headers show
Series PCI/AER: report fatal errors of RCiEP and EP if link recoverd | expand

Commit Message

Shuai Xue Nov. 6, 2024, 9:03 a.m. UTC
The AER driver has historically avoided reading the configuration space of an
endpoint or RCiEP that reported a fatal error, considering the link to that
device unreliable. Consequently, when a fatal error occurs, the AER and DPC
drivers do not report specific error types, resulting in logs like:

[  245.281980] pcieport 0000:30:03.0: EDR: EDR event received
[  245.287466] pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
[  245.295372] pcieport 0000:30:03.0: DPC: ERR_FATAL detected
[  245.300849] pcieport 0000:30:03.0: AER: broadcast error_detected message
[  245.307540] nvme nvme0: frozen state error detected, reset controller
[  245.722582] nvme 0000:34:00.0: ready 0ms after DPC
[  245.727365] pcieport 0000:30:03.0: AER: broadcast slot_reset message

But, if the link recovered after hot reset, we can safely access AER status of
the error device. In such case, report fatal error which helps to figure out the
error root case.

After this patch, the logs like:

[  414.356755] pcieport 0000:30:03.0: EDR: EDR event received
[  414.362240] pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
[  414.370148] pcieport 0000:30:03.0: DPC: ERR_FATAL detected
[  414.375642] pcieport 0000:30:03.0: AER: broadcast error_detected message
[  414.382335] nvme nvme0: frozen state error detected, reset controller
[  414.645413] pcieport 0000:30:03.0: waiting 100 ms for downstream link, after activation
[  414.788016] nvme 0000:34:00.0: ready 0ms after DPC
[  414.796975] nvme 0000:34:00.0: PCIe Bus Error: severity=Uncorrectable (Fatal), type=Data Link Layer, (Receiver ID)
[  414.807312] nvme 0000:34:00.0:   device [144d:a804] error status/mask=00000010/00504000
[  414.815305] nvme 0000:34:00.0:    [ 4] DLP                    (First)
[  414.821768] pcieport 0000:30:03.0: AER: broadcast slot_reset message

Signed-off-by: Shuai Xue <xueshuai@linux.alibaba.com>
---
 drivers/pci/pci.h      |  1 +
 drivers/pci/pcie/aer.c | 50 ++++++++++++++++++++++++++++++++++++++++++
 drivers/pci/pcie/err.c |  6 +++++
 3 files changed, 57 insertions(+)

Comments

Bjorn Helgaas Nov. 6, 2024, 4:02 p.m. UTC | #1
On Wed, Nov 06, 2024 at 05:03:39PM +0800, Shuai Xue wrote:
> The AER driver has historically avoided reading the configuration space of an
> endpoint or RCiEP that reported a fatal error, considering the link to that
> device unreliable. Consequently, when a fatal error occurs, the AER and DPC
> drivers do not report specific error types, resulting in logs like:
> 
> [  245.281980] pcieport 0000:30:03.0: EDR: EDR event received
> [  245.287466] pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
> [  245.295372] pcieport 0000:30:03.0: DPC: ERR_FATAL detected
> [  245.300849] pcieport 0000:30:03.0: AER: broadcast error_detected message
> [  245.307540] nvme nvme0: frozen state error detected, reset controller
> [  245.722582] nvme 0000:34:00.0: ready 0ms after DPC
> [  245.727365] pcieport 0000:30:03.0: AER: broadcast slot_reset message
> 
> But, if the link recovered after hot reset, we can safely access AER status of
> the error device. In such case, report fatal error which helps to figure out the
> error root case.

Explain why we can access these registers after reset.  I think it's
important that these registers are sticky ("RW1CS" per spec).

> After this patch, the logs like:
> 
> [  414.356755] pcieport 0000:30:03.0: EDR: EDR event received
> [  414.362240] pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
> [  414.370148] pcieport 0000:30:03.0: DPC: ERR_FATAL detected
> [  414.375642] pcieport 0000:30:03.0: AER: broadcast error_detected message
> [  414.382335] nvme nvme0: frozen state error detected, reset controller
> [  414.645413] pcieport 0000:30:03.0: waiting 100 ms for downstream link, after activation
> [  414.788016] nvme 0000:34:00.0: ready 0ms after DPC
> [  414.796975] nvme 0000:34:00.0: PCIe Bus Error: severity=Uncorrectable (Fatal), type=Data Link Layer, (Receiver ID)
> [  414.807312] nvme 0000:34:00.0:   device [144d:a804] error status/mask=00000010/00504000
> [  414.815305] nvme 0000:34:00.0:    [ 4] DLP                    (First)
> [  414.821768] pcieport 0000:30:03.0: AER: broadcast slot_reset message

Capitalize subject lines to match history (use "git log --oneline
drivers/pci/pcie/aer.c" to see it).

Remove timestamps since they don't help understand the problem.

Indent the quoted material two spaces.

Wrap commit log to fit in 75 columns (except the quoted material;
don't insert line breaks there).

> Signed-off-by: Shuai Xue <xueshuai@linux.alibaba.com>
> ---
>  drivers/pci/pci.h      |  1 +
>  drivers/pci/pcie/aer.c | 50 ++++++++++++++++++++++++++++++++++++++++++
>  drivers/pci/pcie/err.c |  6 +++++
>  3 files changed, 57 insertions(+)
> 
> diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
> index 0866f79aec54..143f960a813d 100644
> --- a/drivers/pci/pci.h
> +++ b/drivers/pci/pci.h
> @@ -505,6 +505,7 @@ struct aer_err_info {
>  };
>  
>  int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
> +int aer_get_device_fatal_error_info(struct pci_dev *dev, struct aer_err_info *info);
>  void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
>  #endif	/* CONFIG_PCIEAER */
>  
> diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
> index 13b8586924ea..0c1e382ce117 100644
> --- a/drivers/pci/pcie/aer.c
> +++ b/drivers/pci/pcie/aer.c
> @@ -1252,6 +1252,56 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
>  	return 1;
>  }
>  
> +/**
> + * aer_get_device_fatal_error_info - read fatal error status from EP or RCiEP
> + * and store it to info
> + * @dev: pointer to the device expected to have a error record
> + * @info: pointer to structure to store the error record
> + *
> + * Return 1 on success, 0 on error.

Backwards from the usual return value convention.

> + * Note that @info is reused among all error devices. Clear fields properly.
> + */
> +int aer_get_device_fatal_error_info(struct pci_dev *dev, struct aer_err_info *info)
> +{
> +	int type = pci_pcie_type(dev);
> +	int aer = dev->aer_cap;
> +	u32 aercc;
> +
> +	pci_info(dev, "type :%d\n", type);

I don't see this line in the sample output in the commit log.  Is this
debug that you intended to remove?

> +	/* Must reset in this function */
> +	info->status = 0;
> +	info->tlp_header_valid = 0;
> +	info->severity = AER_FATAL;
> +
> +	/* The device might not support AER */

Unnecessary comment.

> +	if (!aer)
> +		return 0;
> +
> +
> +	if (type == PCI_EXP_TYPE_ENDPOINT || type == PCI_EXP_TYPE_RC_END) {
> +		/* Link is healthy for IO reads now */
> +		pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS,
> +			&info->status);
> +		pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK,
> +			&info->mask);
> +		if (!(info->status & ~info->mask))
> +			return 0;
> +
> +		/* Get First Error Pointer */
> +		pci_read_config_dword(dev, aer + PCI_ERR_CAP, &aercc);
> +		info->first_error = PCI_ERR_CAP_FEP(aercc);
> +
> +		if (info->status & AER_LOG_TLP_MASKS) {
> +			info->tlp_header_valid = 1;
> +			pcie_read_tlp_log(dev, aer + PCI_ERR_HEADER_LOG, &info->tlp);
> +		}
> +	}
> +
> +	return 1;
> +}
> +
>  static inline void aer_process_err_devices(struct aer_err_info *e_info)
>  {
>  	int i;
> diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
> index 31090770fffc..a74ae6a55064 100644
> --- a/drivers/pci/pcie/err.c
> +++ b/drivers/pci/pcie/err.c
> @@ -196,6 +196,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
>  	struct pci_dev *bridge;
>  	pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER;
>  	struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
> +	struct aer_err_info info;
>  
>  	/*
>  	 * If the error was detected by a Root Port, Downstream Port, RCEC,
> @@ -223,6 +224,10 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
>  			pci_warn(bridge, "subordinate device reset failed\n");
>  			goto failed;
>  		}
> +
> +		/* Link recovered, report fatal errors on RCiEP or EP */
> +		if (aer_get_device_fatal_error_info(dev, &info))
> +			aer_print_error(dev, &info);
>  	} else {
>  		pci_walk_bridge(bridge, report_normal_detected, &status);
>  	}
> @@ -259,6 +264,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
>  	if (host->native_aer || pcie_ports_native) {
>  		pcie_clear_device_status(dev);
>  		pci_aer_clear_nonfatal_status(dev);
> +		pci_aer_clear_fatal_status(dev);
>  	}
>  
>  	pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
> -- 
> 2.39.3
>
Keith Busch Nov. 6, 2024, 4:39 p.m. UTC | #2
On Wed, Nov 06, 2024 at 05:03:39PM +0800, Shuai Xue wrote:
> +int aer_get_device_fatal_error_info(struct pci_dev *dev, struct aer_err_info *info)
> +{
> +	int type = pci_pcie_type(dev);
> +	int aer = dev->aer_cap;
> +	u32 aercc;
> +
> +	pci_info(dev, "type :%d\n", type);
> +
> +	/* Must reset in this function */
> +	info->status = 0;
> +	info->tlp_header_valid = 0;
> +	info->severity = AER_FATAL;
> +
> +	/* The device might not support AER */
> +	if (!aer)
> +		return 0;
> +
> +
> +	if (type == PCI_EXP_TYPE_ENDPOINT || type == PCI_EXP_TYPE_RC_END) {
> +		/* Link is healthy for IO reads now */
> +		pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS,
> +			&info->status);
> +		pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK,
> +			&info->mask);
> +		if (!(info->status & ~info->mask))
> +			return 0;
> +
> +		/* Get First Error Pointer */
> +		pci_read_config_dword(dev, aer + PCI_ERR_CAP, &aercc);
> +		info->first_error = PCI_ERR_CAP_FEP(aercc);
> +
> +		if (info->status & AER_LOG_TLP_MASKS) {
> +			info->tlp_header_valid = 1;
> +			pcie_read_tlp_log(dev, aer + PCI_ERR_HEADER_LOG, &info->tlp);
> +		}

This matches the uncorrectable handling in aer_get_device_error_info, so
perhaps a helper to reduce duplication.

> +	}
> +
> +	return 1;
> +}

Returning '1' even if type is root or downstream port?

>  static inline void aer_process_err_devices(struct aer_err_info *e_info)
>  {
>  	int i;
> diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
> index 31090770fffc..a74ae6a55064 100644
> --- a/drivers/pci/pcie/err.c
> +++ b/drivers/pci/pcie/err.c
> @@ -196,6 +196,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
>  	struct pci_dev *bridge;
>  	pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER;
>  	struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
> +	struct aer_err_info info;
>  
>  	/*
>  	 * If the error was detected by a Root Port, Downstream Port, RCEC,
> @@ -223,6 +224,10 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
>  			pci_warn(bridge, "subordinate device reset failed\n");
>  			goto failed;
>  		}
> +
> +		/* Link recovered, report fatal errors on RCiEP or EP */
> +		if (aer_get_device_fatal_error_info(dev, &info))
> +			aer_print_error(dev, &info);

This will always print the error info even for root and downstream
ports, but you initialize "info" status and mask only if it's an EP or
RCiEP.
Shuai Xue Nov. 7, 2024, 1:24 a.m. UTC | #3
在 2024/11/7 00:02, Bjorn Helgaas 写道:
> On Wed, Nov 06, 2024 at 05:03:39PM +0800, Shuai Xue wrote:
>> The AER driver has historically avoided reading the configuration space of an
>> endpoint or RCiEP that reported a fatal error, considering the link to that
>> device unreliable. Consequently, when a fatal error occurs, the AER and DPC
>> drivers do not report specific error types, resulting in logs like:
>>
>> [  245.281980] pcieport 0000:30:03.0: EDR: EDR event received
>> [  245.287466] pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
>> [  245.295372] pcieport 0000:30:03.0: DPC: ERR_FATAL detected
>> [  245.300849] pcieport 0000:30:03.0: AER: broadcast error_detected message
>> [  245.307540] nvme nvme0: frozen state error detected, reset controller
>> [  245.722582] nvme 0000:34:00.0: ready 0ms after DPC
>> [  245.727365] pcieport 0000:30:03.0: AER: broadcast slot_reset message
>>
>> But, if the link recovered after hot reset, we can safely access AER status of
>> the error device. In such case, report fatal error which helps to figure out the
>> error root case.
> 
> Explain why we can access these registers after reset.  I think it's
> important that these registers are sticky ("RW1CS" per spec).

Yes, AER error status registers are Sticky and Write-1-to-clear. If we 
does not read them after reset_subordinates, the registers will be 
cleared in pci_error_handlers, e.g. nvme_err_handler

   slot_reset() => nvme_slot_reset()
     pci_restore_state()
       pci_aer_clear_status()

Will add the reason in commit log.

> 
>> After this patch, the logs like:
>>
>> [  414.356755] pcieport 0000:30:03.0: EDR: EDR event received
>> [  414.362240] pcieport 0000:30:03.0: DPC: containment event, status:0x0005 source:0x3400
>> [  414.370148] pcieport 0000:30:03.0: DPC: ERR_FATAL detected
>> [  414.375642] pcieport 0000:30:03.0: AER: broadcast error_detected message
>> [  414.382335] nvme nvme0: frozen state error detected, reset controller
>> [  414.645413] pcieport 0000:30:03.0: waiting 100 ms for downstream link, after activation
>> [  414.788016] nvme 0000:34:00.0: ready 0ms after DPC
>> [  414.796975] nvme 0000:34:00.0: PCIe Bus Error: severity=Uncorrectable (Fatal), type=Data Link Layer, (Receiver ID)
>> [  414.807312] nvme 0000:34:00.0:   device [144d:a804] error status/mask=00000010/00504000
>> [  414.815305] nvme 0000:34:00.0:    [ 4] DLP                    (First)
>> [  414.821768] pcieport 0000:30:03.0: AER: broadcast slot_reset message
> 
> Capitalize subject lines to match history (use "git log --oneline
> drivers/pci/pcie/aer.c" to see it).
> 
> Remove timestamps since they don't help understand the problem.
> 
> Indent the quoted material two spaces.
> 
> Wrap commit log to fit in 75 columns (except the quoted material;
> don't insert line breaks there).

Will do.

> 
>> Signed-off-by: Shuai Xue <xueshuai@linux.alibaba.com>
>> ---
>>   drivers/pci/pci.h      |  1 +
>>   drivers/pci/pcie/aer.c | 50 ++++++++++++++++++++++++++++++++++++++++++
>>   drivers/pci/pcie/err.c |  6 +++++
>>   3 files changed, 57 insertions(+)
>>
>> diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
>> index 0866f79aec54..143f960a813d 100644
>> --- a/drivers/pci/pci.h
>> +++ b/drivers/pci/pci.h
>> @@ -505,6 +505,7 @@ struct aer_err_info {
>>   };
>>   
>>   int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
>> +int aer_get_device_fatal_error_info(struct pci_dev *dev, struct aer_err_info *info);
>>   void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
>>   #endif	/* CONFIG_PCIEAER */
>>   
>> diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
>> index 13b8586924ea..0c1e382ce117 100644
>> --- a/drivers/pci/pcie/aer.c
>> +++ b/drivers/pci/pcie/aer.c
>> @@ -1252,6 +1252,56 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
>>   	return 1;
>>   }
>>   
>> +/**
>> + * aer_get_device_fatal_error_info - read fatal error status from EP or RCiEP
>> + * and store it to info
>> + * @dev: pointer to the device expected to have a error record
>> + * @info: pointer to structure to store the error record
>> + *
>> + * Return 1 on success, 0 on error.
> 
> Backwards from the usual return value convention.

Yes. As @Keith pointed, aer_get_device_fatal_error_info() is copied from 
  aer_get_device_error_info(), I will try to add a helper to reduce 
duplication.

> 
>> + * Note that @info is reused among all error devices. Clear fields properly.
>> + */
>> +int aer_get_device_fatal_error_info(struct pci_dev *dev, struct aer_err_info *info)
>> +{
>> +	int type = pci_pcie_type(dev);
>> +	int aer = dev->aer_cap;
>> +	u32 aercc;
>> +
>> +	pci_info(dev, "type :%d\n", type);
> 
> I don't see this line in the sample output in the commit log.  Is this
> debug that you intended to remove?


Sorry, I missed this line, will remove it.

> 
>> +	/* Must reset in this function */
>> +	info->status = 0;
>> +	info->tlp_header_valid = 0;
>> +	info->severity = AER_FATAL;
>> +
>> +	/* The device might not support AER */
> 
> Unnecessary comment.

Will remove it.

Thank you for valuable comments.

Best Regards,
Shuai
Shuai Xue Nov. 7, 2024, 1:27 a.m. UTC | #4
在 2024/11/7 00:39, Keith Busch 写道:
> On Wed, Nov 06, 2024 at 05:03:39PM +0800, Shuai Xue wrote:
>> +int aer_get_device_fatal_error_info(struct pci_dev *dev, struct aer_err_info *info)
>> +{
>> +	int type = pci_pcie_type(dev);
>> +	int aer = dev->aer_cap;
>> +	u32 aercc;
>> +
>> +	pci_info(dev, "type :%d\n", type);
>> +
>> +	/* Must reset in this function */
>> +	info->status = 0;
>> +	info->tlp_header_valid = 0;
>> +	info->severity = AER_FATAL;
>> +
>> +	/* The device might not support AER */
>> +	if (!aer)
>> +		return 0;
>> +
>> +
>> +	if (type == PCI_EXP_TYPE_ENDPOINT || type == PCI_EXP_TYPE_RC_END) {
>> +		/* Link is healthy for IO reads now */
>> +		pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS,
>> +			&info->status);
>> +		pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK,
>> +			&info->mask);
>> +		if (!(info->status & ~info->mask))
>> +			return 0;
>> +
>> +		/* Get First Error Pointer */
>> +		pci_read_config_dword(dev, aer + PCI_ERR_CAP, &aercc);
>> +		info->first_error = PCI_ERR_CAP_FEP(aercc);
>> +
>> +		if (info->status & AER_LOG_TLP_MASKS) {
>> +			info->tlp_header_valid = 1;
>> +			pcie_read_tlp_log(dev, aer + PCI_ERR_HEADER_LOG, &info->tlp);
>> +		}
> 
> This matches the uncorrectable handling in aer_get_device_error_info, so
> perhaps a helper to reduce duplication.

Yes, will do.

> 
>> +	}
>> +
>> +	return 1;
>> +}
> 
> Returning '1' even if type is root or downstream port?
> 
>>   static inline void aer_process_err_devices(struct aer_err_info *e_info)
>>   {
>>   	int i;
>> diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
>> index 31090770fffc..a74ae6a55064 100644
>> --- a/drivers/pci/pcie/err.c
>> +++ b/drivers/pci/pcie/err.c
>> @@ -196,6 +196,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
>>   	struct pci_dev *bridge;
>>   	pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER;
>>   	struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
>> +	struct aer_err_info info;
>>   
>>   	/*
>>   	 * If the error was detected by a Root Port, Downstream Port, RCEC,
>> @@ -223,6 +224,10 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
>>   			pci_warn(bridge, "subordinate device reset failed\n");
>>   			goto failed;
>>   		}
>> +
>> +		/* Link recovered, report fatal errors on RCiEP or EP */
>> +		if (aer_get_device_fatal_error_info(dev, &info))
>> +			aer_print_error(dev, &info);
> 
> This will always print the error info even for root and downstream
> ports, but you initialize "info" status and mask only if it's an EP or
> RCiEP.

Got it. Will fix it.

Thank you for valuable comments.

Best Regards,
Shuai
diff mbox series

Patch

diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 0866f79aec54..143f960a813d 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -505,6 +505,7 @@  struct aer_err_info {
 };
 
 int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
+int aer_get_device_fatal_error_info(struct pci_dev *dev, struct aer_err_info *info);
 void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
 #endif	/* CONFIG_PCIEAER */
 
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 13b8586924ea..0c1e382ce117 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -1252,6 +1252,56 @@  int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
 	return 1;
 }
 
+/**
+ * aer_get_device_fatal_error_info - read fatal error status from EP or RCiEP
+ * and store it to info
+ * @dev: pointer to the device expected to have a error record
+ * @info: pointer to structure to store the error record
+ *
+ * Return 1 on success, 0 on error.
+ *
+ * Note that @info is reused among all error devices. Clear fields properly.
+ */
+int aer_get_device_fatal_error_info(struct pci_dev *dev, struct aer_err_info *info)
+{
+	int type = pci_pcie_type(dev);
+	int aer = dev->aer_cap;
+	u32 aercc;
+
+	pci_info(dev, "type :%d\n", type);
+
+	/* Must reset in this function */
+	info->status = 0;
+	info->tlp_header_valid = 0;
+	info->severity = AER_FATAL;
+
+	/* The device might not support AER */
+	if (!aer)
+		return 0;
+
+
+	if (type == PCI_EXP_TYPE_ENDPOINT || type == PCI_EXP_TYPE_RC_END) {
+		/* Link is healthy for IO reads now */
+		pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS,
+			&info->status);
+		pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK,
+			&info->mask);
+		if (!(info->status & ~info->mask))
+			return 0;
+
+		/* Get First Error Pointer */
+		pci_read_config_dword(dev, aer + PCI_ERR_CAP, &aercc);
+		info->first_error = PCI_ERR_CAP_FEP(aercc);
+
+		if (info->status & AER_LOG_TLP_MASKS) {
+			info->tlp_header_valid = 1;
+			pcie_read_tlp_log(dev, aer + PCI_ERR_HEADER_LOG, &info->tlp);
+		}
+	}
+
+	return 1;
+}
+
 static inline void aer_process_err_devices(struct aer_err_info *e_info)
 {
 	int i;
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 31090770fffc..a74ae6a55064 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -196,6 +196,7 @@  pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
 	struct pci_dev *bridge;
 	pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER;
 	struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
+	struct aer_err_info info;
 
 	/*
 	 * If the error was detected by a Root Port, Downstream Port, RCEC,
@@ -223,6 +224,10 @@  pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
 			pci_warn(bridge, "subordinate device reset failed\n");
 			goto failed;
 		}
+
+		/* Link recovered, report fatal errors on RCiEP or EP */
+		if (aer_get_device_fatal_error_info(dev, &info))
+			aer_print_error(dev, &info);
 	} else {
 		pci_walk_bridge(bridge, report_normal_detected, &status);
 	}
@@ -259,6 +264,7 @@  pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
 	if (host->native_aer || pcie_ports_native) {
 		pcie_clear_device_status(dev);
 		pci_aer_clear_nonfatal_status(dev);
+		pci_aer_clear_fatal_status(dev);
 	}
 
 	pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);