diff mbox series

[1/1] PCI: qcom: Add support for system suspend and resume

Message ID 20230103074907.12784-2-manivannan.sadhasivam@linaro.org (mailing list archive)
State Superseded
Headers show
Series PCI: qcom: Add support for system suspend and resume | expand

Commit Message

Manivannan Sadhasivam Jan. 3, 2023, 7:49 a.m. UTC
During the system suspend, vote for minimal interconnect bandwidth and
also turn OFF the resources like clock and PHY if there are no active
devices connected to the controller. For the controllers with active
devices, the resources are kept ON as removing the resources will
trigger access violation during the late end of suspend cycle as kernel
tries to access the config space of PCIe devices to mask the MSIs.

Also, it is not desirable to put the link into L2/L3 state as that
implies VDD supply will be removed and the devices may go into powerdown
state. This will affect the lifetime of storage devices like NVMe.

And finally, during resume, turn ON the resources if the controller was
truly suspended (resources OFF) and update the interconnect bandwidth
based on PCIe Gen speed.

Suggested-by: Krishna chaitanya chundru <quic_krichai@quicinc.com>
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
---
 drivers/pci/controller/dwc/pcie-qcom.c | 52 ++++++++++++++++++++++++++
 1 file changed, 52 insertions(+)

Comments

Dhruva Gole Jan. 3, 2023, 11:16 a.m. UTC | #1
On 03/01/23 13:19, Manivannan Sadhasivam wrote:
> During the system suspend, vote for minimal interconnect bandwidth and
> also turn OFF the resources like clock and PHY if there are no active
> devices connected to the controller. For the controllers with active
> devices, the resources are kept ON as removing the resources will
> trigger access violation during the late end of suspend cycle as kernel
> tries to access the config space of PCIe devices to mask the MSIs.
> 
> Also, it is not desirable to put the link into L2/L3 state as that
> implies VDD supply will be removed and the devices may go into powerdown
> state. This will affect the lifetime of storage devices like NVMe.
> 
> And finally, during resume, turn ON the resources if the controller was
> truly suspended (resources OFF) and update the interconnect bandwidth
> based on PCIe Gen speed.
> 
> Suggested-by: Krishna chaitanya chundru <quic_krichai@quicinc.com>
> Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
> ---

Nice to have another driver added to the list of system suspend
support!

Acked-by: Dhruva Gole <d-gole@ti.com>

>   drivers/pci/controller/dwc/pcie-qcom.c | 52 ++++++++++++++++++++++++++
>   1 file changed, 52 insertions(+)
> 
> diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
> index 5696e327795b..48810f1f2dba 100644
> --- a/drivers/pci/controller/dwc/pcie-qcom.c
> +++ b/drivers/pci/controller/dwc/pcie-qcom.c
> @@ -227,6 +227,7 @@ struct qcom_pcie {
>   	struct gpio_desc *reset;
>   	struct icc_path *icc_mem;
>   	const struct qcom_pcie_cfg *cfg;qcom_pcie_icc_update
> +	bool suspended;
>   };
>   
>   #define to_qcom_pcie(x)		dev_get_drvdata((x)->dev)
> @@ -1835,6 +1836,52 @@ static int qcom_pcie_remove(struct platform_device *pdev)
>   	return 0;
>   }
>   
> +static int qcom_pcie_suspend_noirq(struct device *dev)
> +{
> +	struct qcom_pcie *pcie = dev_get_drvdata(dev);
> +	int ret;
> +
> +	ret = icc_set_bw(pcie->icc_mem, 0, 0);
> +	if (ret) {
> +		dev_err(pcie->pci->dev, "Failed to set interconnect bandwidth: %d\n", ret);
> +		return ret;
> +	}
> +
> +	/*
> +	 * Turn OFF the resources only for controllers without active PCIe devices. For controllers
> +	 * with active devices, the resources are kept ON and the link is expected to be in L0/L1
> +	 * (sub)states.
> +	 *
> +	 * Turning OFF the resources for controllers with active PCIe devices will trigger access
> +	 * violation during the end of the suspend cycle, as kernel tries to access the PCIe devices
> +	 * config space for masking MSIs.
> +	 *
> +	 * Also, it is not desirable to put the link into L2/L3 state as that implies VDD supply
> +	 * will be removed and the devices may go into powerdown state. This will affect the
> +	 * lifetime of the storage devices like NVMe.
> +	 */
> +	if (!dw_pcie_link_up(pcie->pci)) {
> +		qcom_pcie_host_deinit(&pcie->pci->pp);
> +		pcie->suspended = true;
> +	}
> +
> +	return 0;
> +}
> +
> +static int qcom_pcie_resume_noirq(struct device *dev)
> +{
> +	struct qcom_pcie *pcie = dev_get_drvdata(dev);
> +
> +	if (pcie->suspended) {
> +		qcom_pcie_host_init(&pcie->pci->pp);
> +		pcie->suspended = false;
> +	}
> +
> +	qcom_pcie_icc_update(pcie);
> +
> +	return 0;
> +}
> +
>   static const struct of_device_id qcom_pcie_match[] = {
>   	{ .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
>   	{ .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
> @@ -1870,12 +1917,17 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
>   DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
>   DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
>   
> +static const struct dev_pm_ops qcom_pcie_pm_ops = {
> +	NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq)
> +};
> +
>   static struct platform_driver qcom_pcie_driver = {
>   	.probe = qcom_pcie_probe,
>   	.remove = qcom_pcie_remove,
>   	.driver = {
>   		.name = "qcom-pcie",
>   		.of_match_table = qcom_pcie_match,
> +		.pm = &qcom_pcie_pm_ops,
>   	},
>   };
>   module_platform_driver(qcom_pcie_driver);

Out of curiosity, were you able to measure how much power you were able
to save after adding suspend support for PCIe? I don't know if clock
gating really saves much amount of power, but yeah its true that we
can't really cut off the power domain entirely in this case.
Johan Hovold Jan. 3, 2023, 1:16 p.m. UTC | #2
On Tue, Jan 03, 2023 at 01:19:07PM +0530, Manivannan Sadhasivam wrote:
> During the system suspend, vote for minimal interconnect bandwidth and
> also turn OFF the resources like clock and PHY if there are no active
> devices connected to the controller. For the controllers with active
> devices, the resources are kept ON as removing the resources will
> trigger access violation during the late end of suspend cycle as kernel
> tries to access the config space of PCIe devices to mask the MSIs.
> 
> Also, it is not desirable to put the link into L2/L3 state as that
> implies VDD supply will be removed and the devices may go into powerdown
> state. This will affect the lifetime of storage devices like NVMe.
> 
> And finally, during resume, turn ON the resources if the controller was
> truly suspended (resources OFF) and update the interconnect bandwidth
> based on PCIe Gen speed.
> 
> Suggested-by: Krishna chaitanya chundru <quic_krichai@quicinc.com>
> Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
> ---
>  drivers/pci/controller/dwc/pcie-qcom.c | 52 ++++++++++++++++++++++++++
>  1 file changed, 52 insertions(+)

I just gave this a quick spin on the sc8280xp-crd, and unfortunately
this change appears to break suspend (e.g. hangs during suspend or
resume). Setting a non-zero (250 MBps) peak bandwidth during suspend
makes things work again.

Presumably something is relying on these interconnect clocks to remain
enabled. And isn't that expected as we need to set a non-zero icc bw to
enable the interconnect clocks during probe?

I'm afraid I won't have time to look into this for a while myself, but
have you tried this on the CRD, Mani? 

One obvious difference is the modem on the CRD which I believe neither
of our X13s have, but this seems like more of a general problem.

Johan
Manivannan Sadhasivam Jan. 5, 2023, 1:33 p.m. UTC | #3
On Tue, Jan 03, 2023 at 02:16:47PM +0100, Johan Hovold wrote:
> On Tue, Jan 03, 2023 at 01:19:07PM +0530, Manivannan Sadhasivam wrote:
> > During the system suspend, vote for minimal interconnect bandwidth and
> > also turn OFF the resources like clock and PHY if there are no active
> > devices connected to the controller. For the controllers with active
> > devices, the resources are kept ON as removing the resources will
> > trigger access violation during the late end of suspend cycle as kernel
> > tries to access the config space of PCIe devices to mask the MSIs.
> > 
> > Also, it is not desirable to put the link into L2/L3 state as that
> > implies VDD supply will be removed and the devices may go into powerdown
> > state. This will affect the lifetime of storage devices like NVMe.
> > 
> > And finally, during resume, turn ON the resources if the controller was
> > truly suspended (resources OFF) and update the interconnect bandwidth
> > based on PCIe Gen speed.
> > 
> > Suggested-by: Krishna chaitanya chundru <quic_krichai@quicinc.com>
> > Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
> > ---
> >  drivers/pci/controller/dwc/pcie-qcom.c | 52 ++++++++++++++++++++++++++
> >  1 file changed, 52 insertions(+)
> 
> I just gave this a quick spin on the sc8280xp-crd, and unfortunately
> this change appears to break suspend (e.g. hangs during suspend or
> resume). Setting a non-zero (250 MBps) peak bandwidth during suspend
> makes things work again.
> 
> Presumably something is relying on these interconnect clocks to remain
> enabled. And isn't that expected as we need to set a non-zero icc bw to
> enable the interconnect clocks during probe?
> 

After suspend, I assumed that there won't be any access to the controller
specific registers, so thought it should be fine. And it works on X13s too.
Maybe, the access to device config space is triggering issues on CRD? I will
check with Qcom.

> I'm afraid I won't have time to look into this for a while myself, but
> have you tried this on the CRD, Mani? 
> 

Thanks for testing, Johan!

I did not test this on CRD. Since both X13s and CRD are sharing the same
SoC, I thought it would work on CRD too. But since you have tested and
reported the issue, I will look into it.

> One obvious difference is the modem on the CRD which I believe neither
> of our X13s have, but this seems like more of a general problem.
> 

Yeah, this seems to be a platform issue. I will check on this behaviour and
report back.

Thanks,
Mani

> Johan
Manivannan Sadhasivam Jan. 5, 2023, 1:36 p.m. UTC | #4
On Tue, Jan 03, 2023 at 04:46:11PM +0530, Dhruva Gole wrote:
> 
> 
> On 03/01/23 13:19, Manivannan Sadhasivam wrote:
> > During the system suspend, vote for minimal interconnect bandwidth and
> > also turn OFF the resources like clock and PHY if there are no active
> > devices connected to the controller. For the controllers with active
> > devices, the resources are kept ON as removing the resources will
> > trigger access violation during the late end of suspend cycle as kernel
> > tries to access the config space of PCIe devices to mask the MSIs.
> > 
> > Also, it is not desirable to put the link into L2/L3 state as that
> > implies VDD supply will be removed and the devices may go into powerdown
> > state. This will affect the lifetime of storage devices like NVMe.
> > 
> > And finally, during resume, turn ON the resources if the controller was
> > truly suspended (resources OFF) and update the interconnect bandwidth
> > based on PCIe Gen speed.
> > 
> > Suggested-by: Krishna chaitanya chundru <quic_krichai@quicinc.com>
> > Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
> > ---
> 
> Nice to have another driver added to the list of system suspend
> support!
> 
> Acked-by: Dhruva Gole <d-gole@ti.com>
> 
> >   drivers/pci/controller/dwc/pcie-qcom.c | 52 ++++++++++++++++++++++++++
> >   1 file changed, 52 insertions(+)
> > 
> > diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
> > index 5696e327795b..48810f1f2dba 100644
> > --- a/drivers/pci/controller/dwc/pcie-qcom.c
> > +++ b/drivers/pci/controller/dwc/pcie-qcom.c
> > @@ -227,6 +227,7 @@ struct qcom_pcie {
> >   	struct gpio_desc *reset;
> >   	struct icc_path *icc_mem;
> >   	const struct qcom_pcie_cfg *cfg;qcom_pcie_icc_update
> > +	bool suspended;
> >   };
> >   #define to_qcom_pcie(x)		dev_get_drvdata((x)->dev)
> > @@ -1835,6 +1836,52 @@ static int qcom_pcie_remove(struct platform_device *pdev)
> >   	return 0;
> >   }
> > +static int qcom_pcie_suspend_noirq(struct device *dev)
> > +{
> > +	struct qcom_pcie *pcie = dev_get_drvdata(dev);
> > +	int ret;
> > +
> > +	ret = icc_set_bw(pcie->icc_mem, 0, 0);
> > +	if (ret) {
> > +		dev_err(pcie->pci->dev, "Failed to set interconnect bandwidth: %d\n", ret);
> > +		return ret;
> > +	}
> > +
> > +	/*
> > +	 * Turn OFF the resources only for controllers without active PCIe devices. For controllers
> > +	 * with active devices, the resources are kept ON and the link is expected to be in L0/L1
> > +	 * (sub)states.
> > +	 *
> > +	 * Turning OFF the resources for controllers with active PCIe devices will trigger access
> > +	 * violation during the end of the suspend cycle, as kernel tries to access the PCIe devices
> > +	 * config space for masking MSIs.
> > +	 *
> > +	 * Also, it is not desirable to put the link into L2/L3 state as that implies VDD supply
> > +	 * will be removed and the devices may go into powerdown state. This will affect the
> > +	 * lifetime of the storage devices like NVMe.
> > +	 */
> > +	if (!dw_pcie_link_up(pcie->pci)) {
> > +		qcom_pcie_host_deinit(&pcie->pci->pp);
> > +		pcie->suspended = true;
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> > +static int qcom_pcie_resume_noirq(struct device *dev)
> > +{
> > +	struct qcom_pcie *pcie = dev_get_drvdata(dev);
> > +
> > +	if (pcie->suspended) {
> > +		qcom_pcie_host_init(&pcie->pci->pp);
> > +		pcie->suspended = false;
> > +	}
> > +
> > +	qcom_pcie_icc_update(pcie);
> > +
> > +	return 0;
> > +}
> > +
> >   static const struct of_device_id qcom_pcie_match[] = {
> >   	{ .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
> >   	{ .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
> > @@ -1870,12 +1917,17 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
> >   DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
> >   DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
> > +static const struct dev_pm_ops qcom_pcie_pm_ops = {
> > +	NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq)
> > +};
> > +
> >   static struct platform_driver qcom_pcie_driver = {
> >   	.probe = qcom_pcie_probe,
> >   	.remove = qcom_pcie_remove,
> >   	.driver = {
> >   		.name = "qcom-pcie",
> >   		.of_match_table = qcom_pcie_match,
> > +		.pm = &qcom_pcie_pm_ops,
> >   	},
> >   };
> >   module_platform_driver(qcom_pcie_driver);
> 
> Out of curiosity, were you able to measure how much power you were able
> to save after adding suspend support for PCIe? I don't know if clock
> gating really saves much amount of power, but yeah its true that we
> can't really cut off the power domain entirely in this case.
> 

I did not measure the power consumption and I agree that we won't save much
power with setting icc bandwidth to 0. But it is better to have something
than nothing. And in the coming days, I have plans to look into other power
saving measures also.

Thanks,
Mani

> -- 
> Thanks and Regards,
> Dhruva Gole
Matthias Kaehlcke Jan. 6, 2023, 6:17 p.m. UTC | #5
On Thu, Jan 05, 2023 at 07:06:39PM +0530, Manivannan Sadhasivam wrote:
> On Tue, Jan 03, 2023 at 04:46:11PM +0530, Dhruva Gole wrote:
> > 
> > 
> > On 03/01/23 13:19, Manivannan Sadhasivam wrote:
> > > During the system suspend, vote for minimal interconnect bandwidth and
> > > also turn OFF the resources like clock and PHY if there are no active
> > > devices connected to the controller. For the controllers with active
> > > devices, the resources are kept ON as removing the resources will
> > > trigger access violation during the late end of suspend cycle as kernel
> > > tries to access the config space of PCIe devices to mask the MSIs.
> > > 
> > > Also, it is not desirable to put the link into L2/L3 state as that
> > > implies VDD supply will be removed and the devices may go into powerdown
> > > state. This will affect the lifetime of storage devices like NVMe.
> > > 
> > > And finally, during resume, turn ON the resources if the controller was
> > > truly suspended (resources OFF) and update the interconnect bandwidth
> > > based on PCIe Gen speed.
> > > 
> > > Suggested-by: Krishna chaitanya chundru <quic_krichai@quicinc.com>
> > > Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
> > > ---
> > 
> > Nice to have another driver added to the list of system suspend
> > support!
> > 
> > Acked-by: Dhruva Gole <d-gole@ti.com>
> > 
> > >   drivers/pci/controller/dwc/pcie-qcom.c | 52 ++++++++++++++++++++++++++
> > >   1 file changed, 52 insertions(+)
> > > 
> > > diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
> > > index 5696e327795b..48810f1f2dba 100644
> > > --- a/drivers/pci/controller/dwc/pcie-qcom.c
> > > +++ b/drivers/pci/controller/dwc/pcie-qcom.c
> > > @@ -227,6 +227,7 @@ struct qcom_pcie {
> > >   	struct gpio_desc *reset;
> > >   	struct icc_path *icc_mem;
> > >   	const struct qcom_pcie_cfg *cfg;qcom_pcie_icc_update
> > > +	bool suspended;
> > >   };
> > >   #define to_qcom_pcie(x)		dev_get_drvdata((x)->dev)
> > > @@ -1835,6 +1836,52 @@ static int qcom_pcie_remove(struct platform_device *pdev)
> > >   	return 0;
> > >   }
> > > +static int qcom_pcie_suspend_noirq(struct device *dev)
> > > +{
> > > +	struct qcom_pcie *pcie = dev_get_drvdata(dev);
> > > +	int ret;
> > > +
> > > +	ret = icc_set_bw(pcie->icc_mem, 0, 0);
> > > +	if (ret) {
> > > +		dev_err(pcie->pci->dev, "Failed to set interconnect bandwidth: %d\n", ret);
> > > +		return ret;
> > > +	}
> > > +
> > > +	/*
> > > +	 * Turn OFF the resources only for controllers without active PCIe devices. For controllers
> > > +	 * with active devices, the resources are kept ON and the link is expected to be in L0/L1
> > > +	 * (sub)states.
> > > +	 *
> > > +	 * Turning OFF the resources for controllers with active PCIe devices will trigger access
> > > +	 * violation during the end of the suspend cycle, as kernel tries to access the PCIe devices
> > > +	 * config space for masking MSIs.
> > > +	 *
> > > +	 * Also, it is not desirable to put the link into L2/L3 state as that implies VDD supply
> > > +	 * will be removed and the devices may go into powerdown state. This will affect the
> > > +	 * lifetime of the storage devices like NVMe.
> > > +	 */
> > > +	if (!dw_pcie_link_up(pcie->pci)) {
> > > +		qcom_pcie_host_deinit(&pcie->pci->pp);
> > > +		pcie->suspended = true;
> > > +	}
> > > +
> > > +	return 0;
> > > +}
> > > +
> > > +static int qcom_pcie_resume_noirq(struct device *dev)
> > > +{
> > > +	struct qcom_pcie *pcie = dev_get_drvdata(dev);
> > > +
> > > +	if (pcie->suspended) {
> > > +		qcom_pcie_host_init(&pcie->pci->pp);
> > > +		pcie->suspended = false;
> > > +	}
> > > +
> > > +	qcom_pcie_icc_update(pcie);
> > > +
> > > +	return 0;
> > > +}
> > > +
> > >   static const struct of_device_id qcom_pcie_match[] = {
> > >   	{ .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
> > >   	{ .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
> > > @@ -1870,12 +1917,17 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
> > >   DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
> > >   DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
> > > +static const struct dev_pm_ops qcom_pcie_pm_ops = {
> > > +	NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq)
> > > +};
> > > +
> > >   static struct platform_driver qcom_pcie_driver = {
> > >   	.probe = qcom_pcie_probe,
> > >   	.remove = qcom_pcie_remove,
> > >   	.driver = {
> > >   		.name = "qcom-pcie",
> > >   		.of_match_table = qcom_pcie_match,
> > > +		.pm = &qcom_pcie_pm_ops,
> > >   	},
> > >   };
> > >   module_platform_driver(qcom_pcie_driver);
> > 
> > Out of curiosity, were you able to measure how much power you were able
> > to save after adding suspend support for PCIe? I don't know if clock
> > gating really saves much amount of power, but yeah its true that we
> > can't really cut off the power domain entirely in this case.
> > 
> 
> I did not measure the power consumption and I agree that we won't save much
> power with setting icc bandwidth to 0. But it is better to have something
> than nothing. And in the coming days, I have plans to look into other power
> saving measures also.

On a sc7280 system I see a reduction of ~30mW with this patch when no PCI
card is plugged in. The reduction seems to come from powering the PHY down.

Interestingly on that system power consumption during suspend (without this
patch) is ~30mW higher *without* a PCI card vs. with a card. Maybe the PHY
doesn't enter a low power mode when no card is plugged in?
Manivannan Sadhasivam Jan. 6, 2023, 7:02 p.m. UTC | #6
On Fri, Jan 06, 2023 at 06:17:19PM +0000, Matthias Kaehlcke wrote:
> On Thu, Jan 05, 2023 at 07:06:39PM +0530, Manivannan Sadhasivam wrote:
> > On Tue, Jan 03, 2023 at 04:46:11PM +0530, Dhruva Gole wrote:
> > > 
> > > 
> > > On 03/01/23 13:19, Manivannan Sadhasivam wrote:
> > > > During the system suspend, vote for minimal interconnect bandwidth and
> > > > also turn OFF the resources like clock and PHY if there are no active
> > > > devices connected to the controller. For the controllers with active
> > > > devices, the resources are kept ON as removing the resources will
> > > > trigger access violation during the late end of suspend cycle as kernel
> > > > tries to access the config space of PCIe devices to mask the MSIs.
> > > > 
> > > > Also, it is not desirable to put the link into L2/L3 state as that
> > > > implies VDD supply will be removed and the devices may go into powerdown
> > > > state. This will affect the lifetime of storage devices like NVMe.
> > > > 
> > > > And finally, during resume, turn ON the resources if the controller was
> > > > truly suspended (resources OFF) and update the interconnect bandwidth
> > > > based on PCIe Gen speed.
> > > > 
> > > > Suggested-by: Krishna chaitanya chundru <quic_krichai@quicinc.com>
> > > > Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
> > > > ---
> > > 
> > > Nice to have another driver added to the list of system suspend
> > > support!
> > > 
> > > Acked-by: Dhruva Gole <d-gole@ti.com>
> > > 
> > > >   drivers/pci/controller/dwc/pcie-qcom.c | 52 ++++++++++++++++++++++++++
> > > >   1 file changed, 52 insertions(+)
> > > > 
> > > > diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
> > > > index 5696e327795b..48810f1f2dba 100644
> > > > --- a/drivers/pci/controller/dwc/pcie-qcom.c
> > > > +++ b/drivers/pci/controller/dwc/pcie-qcom.c
> > > > @@ -227,6 +227,7 @@ struct qcom_pcie {
> > > >   	struct gpio_desc *reset;
> > > >   	struct icc_path *icc_mem;
> > > >   	const struct qcom_pcie_cfg *cfg;qcom_pcie_icc_update
> > > > +	bool suspended;
> > > >   };
> > > >   #define to_qcom_pcie(x)		dev_get_drvdata((x)->dev)
> > > > @@ -1835,6 +1836,52 @@ static int qcom_pcie_remove(struct platform_device *pdev)
> > > >   	return 0;
> > > >   }
> > > > +static int qcom_pcie_suspend_noirq(struct device *dev)
> > > > +{
> > > > +	struct qcom_pcie *pcie = dev_get_drvdata(dev);
> > > > +	int ret;
> > > > +
> > > > +	ret = icc_set_bw(pcie->icc_mem, 0, 0);
> > > > +	if (ret) {
> > > > +		dev_err(pcie->pci->dev, "Failed to set interconnect bandwidth: %d\n", ret);
> > > > +		return ret;
> > > > +	}
> > > > +
> > > > +	/*
> > > > +	 * Turn OFF the resources only for controllers without active PCIe devices. For controllers
> > > > +	 * with active devices, the resources are kept ON and the link is expected to be in L0/L1
> > > > +	 * (sub)states.
> > > > +	 *
> > > > +	 * Turning OFF the resources for controllers with active PCIe devices will trigger access
> > > > +	 * violation during the end of the suspend cycle, as kernel tries to access the PCIe devices
> > > > +	 * config space for masking MSIs.
> > > > +	 *
> > > > +	 * Also, it is not desirable to put the link into L2/L3 state as that implies VDD supply
> > > > +	 * will be removed and the devices may go into powerdown state. This will affect the
> > > > +	 * lifetime of the storage devices like NVMe.
> > > > +	 */
> > > > +	if (!dw_pcie_link_up(pcie->pci)) {
> > > > +		qcom_pcie_host_deinit(&pcie->pci->pp);
> > > > +		pcie->suspended = true;
> > > > +	}
> > > > +
> > > > +	return 0;
> > > > +}
> > > > +
> > > > +static int qcom_pcie_resume_noirq(struct device *dev)
> > > > +{
> > > > +	struct qcom_pcie *pcie = dev_get_drvdata(dev);
> > > > +
> > > > +	if (pcie->suspended) {
> > > > +		qcom_pcie_host_init(&pcie->pci->pp);
> > > > +		pcie->suspended = false;
> > > > +	}
> > > > +
> > > > +	qcom_pcie_icc_update(pcie);
> > > > +
> > > > +	return 0;
> > > > +}
> > > > +
> > > >   static const struct of_device_id qcom_pcie_match[] = {
> > > >   	{ .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
> > > >   	{ .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
> > > > @@ -1870,12 +1917,17 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
> > > >   DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
> > > >   DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
> > > > +static const struct dev_pm_ops qcom_pcie_pm_ops = {
> > > > +	NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq)
> > > > +};
> > > > +
> > > >   static struct platform_driver qcom_pcie_driver = {
> > > >   	.probe = qcom_pcie_probe,
> > > >   	.remove = qcom_pcie_remove,
> > > >   	.driver = {
> > > >   		.name = "qcom-pcie",
> > > >   		.of_match_table = qcom_pcie_match,
> > > > +		.pm = &qcom_pcie_pm_ops,
> > > >   	},
> > > >   };
> > > >   module_platform_driver(qcom_pcie_driver);
> > > 
> > > Out of curiosity, were you able to measure how much power you were able
> > > to save after adding suspend support for PCIe? I don't know if clock
> > > gating really saves much amount of power, but yeah its true that we
> > > can't really cut off the power domain entirely in this case.
> > > 
> > 
> > I did not measure the power consumption and I agree that we won't save much
> > power with setting icc bandwidth to 0. But it is better to have something
> > than nothing. And in the coming days, I have plans to look into other power
> > saving measures also.
> 
> On a sc7280 system I see a reduction of ~30mW with this patch when no PCI
> card is plugged in. The reduction seems to come from powering the PHY down.
> 

Thanks a lot for testing!

> Interestingly on that system power consumption during suspend (without this
> patch) is ~30mW higher *without* a PCI card vs. with a card. Maybe the PHY
> doesn't enter a low power mode when no card is plugged in?

Yeah, both PHY and controllers are never put into low power mode even if there
are no devices connected. I don't know if the low power mode is possible at
all with PHY.

Thanks,
Mani
Matthias Kaehlcke Jan. 9, 2023, 10:41 p.m. UTC | #7
On Sat, Jan 07, 2023 at 12:32:52AM +0530, Manivannan Sadhasivam wrote:
> On Fri, Jan 06, 2023 at 06:17:19PM +0000, Matthias Kaehlcke wrote:
> > On Thu, Jan 05, 2023 at 07:06:39PM +0530, Manivannan Sadhasivam wrote:
> > > On Tue, Jan 03, 2023 at 04:46:11PM +0530, Dhruva Gole wrote:
> > > > 
> > > > 
> > > > On 03/01/23 13:19, Manivannan Sadhasivam wrote:
> > > > > During the system suspend, vote for minimal interconnect bandwidth and
> > > > > also turn OFF the resources like clock and PHY if there are no active
> > > > > devices connected to the controller. For the controllers with active
> > > > > devices, the resources are kept ON as removing the resources will
> > > > > trigger access violation during the late end of suspend cycle as kernel
> > > > > tries to access the config space of PCIe devices to mask the MSIs.
> > > > > 
> > > > > Also, it is not desirable to put the link into L2/L3 state as that
> > > > > implies VDD supply will be removed and the devices may go into powerdown
> > > > > state. This will affect the lifetime of storage devices like NVMe.
> > > > > 
> > > > > And finally, during resume, turn ON the resources if the controller was
> > > > > truly suspended (resources OFF) and update the interconnect bandwidth
> > > > > based on PCIe Gen speed.
> > > > > 
> > > > > Suggested-by: Krishna chaitanya chundru <quic_krichai@quicinc.com>
> > > > > Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
> > > > > ---
> > > > 
> > > > Nice to have another driver added to the list of system suspend
> > > > support!
> > > > 
> > > > Acked-by: Dhruva Gole <d-gole@ti.com>
> > > > 
> > > > >   drivers/pci/controller/dwc/pcie-qcom.c | 52 ++++++++++++++++++++++++++
> > > > >   1 file changed, 52 insertions(+)
> > > > > 
> > > > > diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
> > > > > index 5696e327795b..48810f1f2dba 100644
> > > > > --- a/drivers/pci/controller/dwc/pcie-qcom.c
> > > > > +++ b/drivers/pci/controller/dwc/pcie-qcom.c
> > > > > @@ -227,6 +227,7 @@ struct qcom_pcie {
> > > > >   	struct gpio_desc *reset;
> > > > >   	struct icc_path *icc_mem;
> > > > >   	const struct qcom_pcie_cfg *cfg;qcom_pcie_icc_update
> > > > > +	bool suspended;
> > > > >   };
> > > > >   #define to_qcom_pcie(x)		dev_get_drvdata((x)->dev)
> > > > > @@ -1835,6 +1836,52 @@ static int qcom_pcie_remove(struct platform_device *pdev)
> > > > >   	return 0;
> > > > >   }
> > > > > +static int qcom_pcie_suspend_noirq(struct device *dev)
> > > > > +{
> > > > > +	struct qcom_pcie *pcie = dev_get_drvdata(dev);
> > > > > +	int ret;
> > > > > +
> > > > > +	ret = icc_set_bw(pcie->icc_mem, 0, 0);
> > > > > +	if (ret) {
> > > > > +		dev_err(pcie->pci->dev, "Failed to set interconnect bandwidth: %d\n", ret);
> > > > > +		return ret;
> > > > > +	}
> > > > > +
> > > > > +	/*
> > > > > +	 * Turn OFF the resources only for controllers without active PCIe devices. For controllers
> > > > > +	 * with active devices, the resources are kept ON and the link is expected to be in L0/L1
> > > > > +	 * (sub)states.
> > > > > +	 *
> > > > > +	 * Turning OFF the resources for controllers with active PCIe devices will trigger access
> > > > > +	 * violation during the end of the suspend cycle, as kernel tries to access the PCIe devices
> > > > > +	 * config space for masking MSIs.
> > > > > +	 *
> > > > > +	 * Also, it is not desirable to put the link into L2/L3 state as that implies VDD supply
> > > > > +	 * will be removed and the devices may go into powerdown state. This will affect the
> > > > > +	 * lifetime of the storage devices like NVMe.
> > > > > +	 */
> > > > > +	if (!dw_pcie_link_up(pcie->pci)) {
> > > > > +		qcom_pcie_host_deinit(&pcie->pci->pp);
> > > > > +		pcie->suspended = true;
> > > > > +	}
> > > > > +
> > > > > +	return 0;
> > > > > +}
> > > > > +
> > > > > +static int qcom_pcie_resume_noirq(struct device *dev)
> > > > > +{
> > > > > +	struct qcom_pcie *pcie = dev_get_drvdata(dev);
> > > > > +
> > > > > +	if (pcie->suspended) {
> > > > > +		qcom_pcie_host_init(&pcie->pci->pp);
> > > > > +		pcie->suspended = false;
> > > > > +	}
> > > > > +
> > > > > +	qcom_pcie_icc_update(pcie);
> > > > > +
> > > > > +	return 0;
> > > > > +}
> > > > > +
> > > > >   static const struct of_device_id qcom_pcie_match[] = {
> > > > >   	{ .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
> > > > >   	{ .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
> > > > > @@ -1870,12 +1917,17 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
> > > > >   DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
> > > > >   DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
> > > > > +static const struct dev_pm_ops qcom_pcie_pm_ops = {
> > > > > +	NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq)
> > > > > +};
> > > > > +
> > > > >   static struct platform_driver qcom_pcie_driver = {
> > > > >   	.probe = qcom_pcie_probe,
> > > > >   	.remove = qcom_pcie_remove,
> > > > >   	.driver = {
> > > > >   		.name = "qcom-pcie",
> > > > >   		.of_match_table = qcom_pcie_match,
> > > > > +		.pm = &qcom_pcie_pm_ops,
> > > > >   	},
> > > > >   };
> > > > >   module_platform_driver(qcom_pcie_driver);
> > > > 
> > > > Out of curiosity, were you able to measure how much power you were able
> > > > to save after adding suspend support for PCIe? I don't know if clock
> > > > gating really saves much amount of power, but yeah its true that we
> > > > can't really cut off the power domain entirely in this case.
> > > > 
> > > 
> > > I did not measure the power consumption and I agree that we won't save much
> > > power with setting icc bandwidth to 0. But it is better to have something
> > > than nothing. And in the coming days, I have plans to look into other power
> > > saving measures also.
> > 
> > On a sc7280 system I see a reduction of ~30mW with this patch when no PCI
> > card is plugged in. The reduction seems to come from powering the PHY down.
> > 
> 
> Thanks a lot for testing!
> 
> > Interestingly on that system power consumption during suspend (without this
> > patch) is ~30mW higher *without* a PCI card vs. with a card. Maybe the PHY
> > doesn't enter a low power mode when no card is plugged in?
> 
> Yeah, both PHY and controllers are never put into low power mode even if there
> are no devices connected. I don't know if the low power mode is possible at
> all with PHY.

It's still interesting that the PHY apparently at least enters a *lower* power
mode when a card is plugged in, the extra 30mW are only seen without a card.
diff mbox series

Patch

diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 5696e327795b..48810f1f2dba 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -227,6 +227,7 @@  struct qcom_pcie {
 	struct gpio_desc *reset;
 	struct icc_path *icc_mem;
 	const struct qcom_pcie_cfg *cfg;
+	bool suspended;
 };
 
 #define to_qcom_pcie(x)		dev_get_drvdata((x)->dev)
@@ -1835,6 +1836,52 @@  static int qcom_pcie_remove(struct platform_device *pdev)
 	return 0;
 }
 
+static int qcom_pcie_suspend_noirq(struct device *dev)
+{
+	struct qcom_pcie *pcie = dev_get_drvdata(dev);
+	int ret;
+
+	ret = icc_set_bw(pcie->icc_mem, 0, 0);
+	if (ret) {
+		dev_err(pcie->pci->dev, "Failed to set interconnect bandwidth: %d\n", ret);
+		return ret;
+	}
+
+	/*
+	 * Turn OFF the resources only for controllers without active PCIe devices. For controllers
+	 * with active devices, the resources are kept ON and the link is expected to be in L0/L1
+	 * (sub)states.
+	 *
+	 * Turning OFF the resources for controllers with active PCIe devices will trigger access
+	 * violation during the end of the suspend cycle, as kernel tries to access the PCIe devices
+	 * config space for masking MSIs.
+	 *
+	 * Also, it is not desirable to put the link into L2/L3 state as that implies VDD supply
+	 * will be removed and the devices may go into powerdown state. This will affect the
+	 * lifetime of the storage devices like NVMe.
+	 */
+	if (!dw_pcie_link_up(pcie->pci)) {
+		qcom_pcie_host_deinit(&pcie->pci->pp);
+		pcie->suspended = true;
+	}
+
+	return 0;
+}
+
+static int qcom_pcie_resume_noirq(struct device *dev)
+{
+	struct qcom_pcie *pcie = dev_get_drvdata(dev);
+
+	if (pcie->suspended) {
+		qcom_pcie_host_init(&pcie->pci->pp);
+		pcie->suspended = false;
+	}
+
+	qcom_pcie_icc_update(pcie);
+
+	return 0;
+}
+
 static const struct of_device_id qcom_pcie_match[] = {
 	{ .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
 	{ .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
@@ -1870,12 +1917,17 @@  DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
 
+static const struct dev_pm_ops qcom_pcie_pm_ops = {
+	NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq)
+};
+
 static struct platform_driver qcom_pcie_driver = {
 	.probe = qcom_pcie_probe,
 	.remove = qcom_pcie_remove,
 	.driver = {
 		.name = "qcom-pcie",
 		.of_match_table = qcom_pcie_match,
+		.pm = &qcom_pcie_pm_ops,
 	},
 };
 module_platform_driver(qcom_pcie_driver);