diff mbox series

[RFC,2/2] drm/msm/dp: enable pm_runtime support for dp driver

Message ID 20230223135635.30659-3-quic_sbillaka@quicinc.com (mailing list archive)
State Not Applicable
Headers show
Series drm/msm/dp: refactor the msm dp driver resources | expand

Commit Message

Sankeerth Billakanti (QUIC) Feb. 23, 2023, 1:56 p.m. UTC
The current DP driver directly enables or disables the necessary control
resources based on code flow. This could disable a required resource that
is needed in a different usecase. It can also lead to excessive voting of
a resource and may increase power consumption.

The pm_runtime framework can solve this problem in DP driver by monitoring
the resource enable disable calls. This change will enable support for the
pm_runtime resume and suspend operations for DP driver.

Signed-off-by: Sankeerth Billakanti <quic_sbillaka@quicinc.com>
---
 drivers/gpu/drm/msm/dp/dp_aux.c     |   6 ++
 drivers/gpu/drm/msm/dp/dp_display.c | 121 ++++++++++++++++------------
 drivers/gpu/drm/msm/dp/dp_power.c   |   7 --
 3 files changed, 76 insertions(+), 58 deletions(-)

Comments

Bjorn Andersson Feb. 27, 2023, 11:36 p.m. UTC | #1
On Thu, Feb 23, 2023 at 07:26:35PM +0530, Sankeerth Billakanti wrote:
> diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
[..]
> +static int dp_runtime_resume(struct device *dev)
> +{
> +	struct platform_device *pdev = to_platform_device(dev);
> +	struct msm_dp *dp_display = platform_get_drvdata(pdev);
> +	struct dp_display_private *dp;
> +
> +	dp = container_of(dp_display, struct dp_display_private, dp_display);
> +	dp_display_host_init(dp);
> +
> +	if (dp->dp_display.is_edp) {
> +		dp_display_host_phy_init(dp);
> +	} else {
> +		dp_catalog_hpd_config_intr(dp->catalog,
> +				DP_DP_HPD_PLUG_INT_MASK |
> +				DP_DP_HPD_UNPLUG_INT_MASK,
> +				true);

I believe this is backwards.

Only in the event that there's no "downstream" HPD handler should we use
the internal HPD. This is signalled by the DRM framework by a call to
dp_bridge_hpd_enable(). So we should use that to enable/disable the
internal HPD handler.

When this happens, we have a reason for keeping power on; i.e. call
pm_runtime_get(). Once we have power/clocking, we'd call
dp_catalog_hpd_config_intr(), from dp_bridge_hpd_enable().


In the case that the internal HPD handling is not use,
dp_bridge_hpd_enable() will not be called, instead once the downstream
hpd handler switches state dp_bridge_hpd_notify() will be invoked.

In this case, we need the DP controller to be powered/clocked between
connector_status_connected and connector_status_disconnected.


I believe this should allow the DP controller(s) to stay powered down in
the case where we have external HPD handling (e.g. USB Type-C or
gpio-based dp-connector).

Regards,
Bjorn
Sankeerth Billakanti (QUIC) March 1, 2023, 8:19 a.m. UTC | #2
>> diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c
>> b/drivers/gpu/drm/msm/dp/dp_aux.c
>[..]
>> +static int dp_runtime_resume(struct device *dev) {
>> +	struct platform_device *pdev = to_platform_device(dev);
>> +	struct msm_dp *dp_display = platform_get_drvdata(pdev);
>> +	struct dp_display_private *dp;
>> +
>> +	dp = container_of(dp_display, struct dp_display_private, dp_display);
>> +	dp_display_host_init(dp);
>> +
>> +	if (dp->dp_display.is_edp) {
>> +		dp_display_host_phy_init(dp);
>> +	} else {
>> +		dp_catalog_hpd_config_intr(dp->catalog,
>> +				DP_DP_HPD_PLUG_INT_MASK |
>> +				DP_DP_HPD_UNPLUG_INT_MASK,
>> +				true);
>
>I believe this is backwards.
>
>Only in the event that there's no "downstream" HPD handler should we use
>the internal HPD. This is signalled by the DRM framework by a call to
>dp_bridge_hpd_enable(). So we should use that to enable/disable the
>internal HPD handler.
>
>When this happens, we have a reason for keeping power on; i.e. call
>pm_runtime_get(). Once we have power/clocking, we'd call
>dp_catalog_hpd_config_intr(), from dp_bridge_hpd_enable().
>
>
>In the case that the internal HPD handling is not use,
>dp_bridge_hpd_enable() will not be called, instead once the downstream hpd
>handler switches state dp_bridge_hpd_notify() will be invoked.
>
>In this case, we need the DP controller to be powered/clocked between
>connector_status_connected and connector_status_disconnected.
>
>
>I believe this should allow the DP controller(s) to stay powered down in the
>case where we have external HPD handling (e.g. USB Type-C or gpio-based
>dp-connector).
>
>Regards,
>Bjorn

I agree with the approach. I am moving my dev to msm-next. Will make the changes according to the HPD handling and repost

Thank you,
Sankeerth
diff mbox series

Patch

diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 5da95dfdeede..45026827bf7a 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -338,6 +338,7 @@  static ssize_t dp_aux_transfer_init(struct drm_dp_aux *dp_aux,
 		return -EINVAL;
 	}
 
+	pm_runtime_get_sync(dp_aux->dev);
 	mutex_lock(&aux->mutex);
 	if (!aux->initted) {
 		ret = -EIO;
@@ -418,6 +419,8 @@  static ssize_t dp_aux_transfer_init(struct drm_dp_aux *dp_aux,
 
 exit:
 	mutex_unlock(&aux->mutex);
+	pm_runtime_mark_last_busy(dp_aux->dev);
+	pm_runtime_put_autosuspend(dp_aux->dev);
 
 	return ret;
 }
@@ -454,6 +457,7 @@  static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
 		return -EINVAL;
 	}
 
+	pm_runtime_get_sync(dp_aux->dev);
 	mutex_lock(&aux->mutex);
 	if (!aux->initted) {
 		ret = -EIO;
@@ -527,6 +531,8 @@  static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
 
 exit:
 	mutex_unlock(&aux->mutex);
+	pm_runtime_mark_last_busy(dp_aux->dev);
+	pm_runtime_put_autosuspend(dp_aux->dev);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index a5dcef040b74..a23e79e43100 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -307,8 +307,10 @@  static void dp_display_unbind(struct device *dev, struct device *master,
 	struct msm_drm_private *priv = dev_get_drvdata(master);
 
 	/* disable all HPD interrupts */
-	if (dp->core_initialized)
+	if (dp->core_initialized) {
 		dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
+		pm_runtime_put_sync(dev);
+	}
 
 	kthread_stop(dp->ev_tsk);
 
@@ -1083,26 +1085,6 @@  void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
 	mutex_unlock(&dp_display->event_mutex);
 }
 
-static void dp_display_config_hpd(struct dp_display_private *dp)
-{
-
-	dp_display_host_init(dp);
-	dp_catalog_ctrl_hpd_config(dp->catalog);
-
-	/* Enable plug and unplug interrupts only if requested */
-	if (dp->dp_display.internal_hpd)
-		dp_catalog_hpd_config_intr(dp->catalog,
-				DP_DP_HPD_PLUG_INT_MASK |
-				DP_DP_HPD_UNPLUG_INT_MASK,
-				true);
-
-	/* Enable interrupt first time
-	 * we are leaving dp clocks on during disconnect
-	 * and never disable interrupt
-	 */
-	enable_irq(dp->irq);
-}
-
 static int hpd_event_thread(void *data)
 {
 	struct dp_display_private *dp_priv;
@@ -1163,9 +1145,6 @@  static int hpd_event_thread(void *data)
 		spin_unlock_irqrestore(&dp_priv->event_lock, flag);
 
 		switch (todo->event_id) {
-		case EV_HPD_INIT_SETUP:
-			dp_display_config_hpd(dp_priv);
-			break;
 		case EV_HPD_PLUG_INT:
 			dp_hpd_plug_handle(dp_priv, todo->data);
 			break;
@@ -1337,16 +1316,12 @@  static int dp_display_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, &dp->dp_display);
 
-	if (dp->dp_display.is_edp) {
-		dp_display_host_init(dp);
-		dp_display_host_phy_init(dp);
-		dp_catalog_ctrl_hpd_config(dp->catalog);
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
+	pm_runtime_use_autosuspend(&pdev->dev);
 
+	if (dp->dp_display.is_edp) {
 		rc = devm_of_dp_aux_populate_bus(dp->aux, NULL);
-
-		dp_display_host_phy_exit(dp);
-		dp_display_host_deinit(dp);
-
 		if (rc) {
 			DRM_ERROR("failed to initialize panel, rc = %d\n", rc);
 			goto error;
@@ -1367,6 +1342,8 @@  static int dp_display_remove(struct platform_device *pdev)
 {
 	struct dp_display_private *dp = dev_get_dp_display_private(&pdev->dev);
 
+	pm_runtime_dont_use_autosuspend(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
 	dp_display_deinit_sub_modules(dp);
 
 	component_del(&pdev->dev, &dp_display_comp_ops);
@@ -1375,6 +1352,42 @@  static int dp_display_remove(struct platform_device *pdev)
 	return 0;
 }
 
+static int dp_runtime_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_dp *dp_display = platform_get_drvdata(pdev);
+	struct dp_display_private *dp;
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+	dp_display_host_phy_exit(dp);
+	dp_display_host_deinit(dp);
+
+	return 0;
+}
+
+static int dp_runtime_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_dp *dp_display = platform_get_drvdata(pdev);
+	struct dp_display_private *dp;
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+	dp_display_host_init(dp);
+
+	if (dp->dp_display.is_edp) {
+		dp_display_host_phy_init(dp);
+	} else {
+		dp_catalog_hpd_config_intr(dp->catalog,
+				DP_DP_HPD_PLUG_INT_MASK |
+				DP_DP_HPD_UNPLUG_INT_MASK,
+				true);
+	}
+
+	dp_catalog_ctrl_hpd_config(dp->catalog);
+
+	return 0;
+}
+
 static int dp_pm_resume(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
@@ -1384,6 +1397,9 @@  static int dp_pm_resume(struct device *dev)
 
 	dp = container_of(dp_display, struct dp_display_private, dp_display);
 
+	if (pm_runtime_suspended(dev))
+		return 0;
+
 	mutex_lock(&dp->event_mutex);
 
 	drm_dbg_dp(dp->drm_dev,
@@ -1394,16 +1410,7 @@  static int dp_pm_resume(struct device *dev)
 	/* start from disconnected state */
 	dp->hpd_state = ST_DISCONNECTED;
 
-	/* turn on dp ctrl/phy */
-	dp_display_host_init(dp);
-
-	dp_catalog_ctrl_hpd_config(dp->catalog);
-
-	if (dp->dp_display.internal_hpd)
-		dp_catalog_hpd_config_intr(dp->catalog,
-				DP_DP_HPD_PLUG_INT_MASK |
-				DP_DP_HPD_UNPLUG_INT_MASK,
-				true);
+	dp_runtime_resume(dev);
 
 	if (dp_catalog_link_is_connected(dp->catalog)) {
 		/*
@@ -1452,27 +1459,29 @@  static int dp_pm_suspend(struct device *dev)
 
 	dp = container_of(dp_display, struct dp_display_private, dp_display);
 
+	if (pm_runtime_suspended(dev))
+		return 0;
+
 	mutex_lock(&dp->event_mutex);
 
 	drm_dbg_dp(dp->drm_dev,
-		"Before, type=%d core_inited=%d  phy_inited=%d power_on=%d\n",
-		dp->dp_display.connector_type, dp->core_initialized,
+		"Before, type=%d sink=%d conn=%d core_inited=%d  phy_inited=%d power_on=%d\n",
+		dp->dp_display.connector_type, dp->link->sink_count,
+		dp->dp_display.is_connected, dp->core_initialized,
 		dp->phy_initialized, dp_display->power_on);
 
 	/* mainlink enabled */
 	if (dp_power_clk_status(dp->power, DP_CTRL_PM))
 		dp_ctrl_off_link_stream(dp->ctrl);
 
-	dp_display_host_phy_exit(dp);
-
-	/* host_init will be called at pm_resume */
-	dp_display_host_deinit(dp);
+	dp_runtime_suspend(dev);
 
 	dp->hpd_state = ST_SUSPENDED;
 
 	drm_dbg_dp(dp->drm_dev,
-		"After, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
-		dp->dp_display.connector_type, dp->core_initialized,
+		"After, type=%d sink=%d conn=%d core_init=%d phy_init=%d power=%d\n",
+		dp->dp_display.connector_type, dp->link->sink_count,
+		dp->dp_display.is_connected, dp->core_initialized,
 		dp->phy_initialized, dp_display->power_on);
 
 	mutex_unlock(&dp->event_mutex);
@@ -1481,6 +1490,11 @@  static int dp_pm_suspend(struct device *dev)
 }
 
 static const struct dev_pm_ops dp_pm_ops = {
+
+	SET_RUNTIME_PM_OPS(dp_runtime_suspend, dp_runtime_resume, NULL)
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+
 	.suspend = dp_pm_suspend,
 	.resume =  dp_pm_resume,
 };
@@ -1521,8 +1535,11 @@  void msm_dp_irq_postinstall(struct msm_dp *dp_display)
 
 	dp = container_of(dp_display, struct dp_display_private, dp_display);
 
+	/* enable host_init for HPD intr for DP */
 	if (!dp_display->is_edp)
-		dp_add_event(dp, EV_HPD_INIT_SETUP, 0, 0);
+		pm_runtime_get_sync(&dp->pdev->dev);
+
+	enable_irq(dp->irq);
 }
 
 bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
@@ -1645,6 +1662,7 @@  void dp_bridge_enable(struct drm_bridge *drm_bridge)
 		return;
 	}
 
+	pm_runtime_get_sync(&dp_display->pdev->dev);
 	if (dp->is_edp)
 		dp_hpd_plug_handle(dp_display, 0);
 
@@ -1728,6 +1746,7 @@  void dp_bridge_post_disable(struct drm_bridge *drm_bridge)
 
 	drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type);
 	mutex_unlock(&dp_display->event_mutex);
+	pm_runtime_put_sync(&dp_display->pdev->dev);
 }
 
 void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c
index c0aaabb03389..a736d1b0f02f 100644
--- a/drivers/gpu/drm/msm/dp/dp_power.c
+++ b/drivers/gpu/drm/msm/dp/dp_power.c
@@ -172,8 +172,6 @@  int dp_power_client_init(struct dp_power *dp_power)
 
 	power = container_of(dp_power, struct dp_power_private, dp_power);
 
-	pm_runtime_enable(&power->pdev->dev);
-
 	rc = dp_power_clk_init(power);
 	if (rc)
 		DRM_ERROR("failed to init clocks %d\n", rc);
@@ -192,7 +190,6 @@  void dp_power_client_deinit(struct dp_power *dp_power)
 
 	power = container_of(dp_power, struct dp_power_private, dp_power);
 
-	pm_runtime_disable(&power->pdev->dev);
 }
 
 int dp_power_init(struct dp_power *dp_power, bool flip)
@@ -207,8 +204,6 @@  int dp_power_init(struct dp_power *dp_power, bool flip)
 
 	power = container_of(dp_power, struct dp_power_private, dp_power);
 
-	pm_runtime_get_sync(&power->pdev->dev);
-
 	rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true);
 	if (rc) {
 		DRM_ERROR("failed to enable DP core clocks, %d\n", rc);
@@ -218,7 +213,6 @@  int dp_power_init(struct dp_power *dp_power, bool flip)
 	return 0;
 
 exit:
-	pm_runtime_put_sync(&power->pdev->dev);
 	return rc;
 }
 
@@ -229,7 +223,6 @@  int dp_power_deinit(struct dp_power *dp_power)
 	power = container_of(dp_power, struct dp_power_private, dp_power);
 
 	dp_power_clk_enable(dp_power, DP_CORE_PM, false);
-	pm_runtime_put_sync(&power->pdev->dev);
 	return 0;
 }