diff mbox

drm/i915: Allow control of PSR at runtime through debugfs.

Message ID 20180314155832.12957-1-maarten.lankhorst@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Maarten Lankhorst March 14, 2018, 3:58 p.m. UTC
Currently tests modify i915.enable_psr and then do a modeset cycle
to change PSR. We can write a value to i915_edp_psr_status to force
a certain value without a modeset.

To retain compatibility with older userspace, we also still allow
the override through the module parameter, and add some tracking
to check whether a debugfs mode is specified.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c | 193 +++++++++++++++++++++++++++++++++++-
 drivers/gpu/drm/i915/i915_drv.h     |   7 ++
 drivers/gpu/drm/i915/intel_drv.h    |   7 ++
 drivers/gpu/drm/i915/intel_psr.c    | 155 ++++++++++++++++++-----------
 4 files changed, 301 insertions(+), 61 deletions(-)

Comments

Chris Wilson March 14, 2018, 4:07 p.m. UTC | #1
Quoting Maarten Lankhorst (2018-03-14 15:58:32)
> Currently tests modify i915.enable_psr and then do a modeset cycle
> to change PSR. We can write a value to i915_edp_psr_status to force
> a certain value without a modeset.
> 
> To retain compatibility with older userspace, we also still allow
> the override through the module parameter, and add some tracking
> to check whether a debugfs mode is specified.

Is it possible for you to mandate that the process holds the file open
for as long as it wants its value to hold. Then we can automatically
clean up if the process dies; without requiring userspace to dig itself
out of a hole.
-Chris
Maarten Lankhorst March 14, 2018, 4:11 p.m. UTC | #2
Op 14-03-18 om 17:07 schreef Chris Wilson:
> Quoting Maarten Lankhorst (2018-03-14 15:58:32)
>> Currently tests modify i915.enable_psr and then do a modeset cycle
>> to change PSR. We can write a value to i915_edp_psr_status to force
>> a certain value without a modeset.
>>
>> To retain compatibility with older userspace, we also still allow
>> the override through the module parameter, and add some tracking
>> to check whether a debugfs mode is specified.
> Is it possible for you to mandate that the process holds the file open
> for as long as it wants its value to hold. Then we can automatically
> clean up if the process dies; without requiring userspace to dig itself
> out of a hole.
> -Chris

For testing PSR someone might directly echo values to the debugfs file, so I didn't want to keep the fd handle as reference count..
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 574fcf234007..4abf8034d5c7 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2546,16 +2546,13 @@  static const char *psr2_live_status(u32 val)
 
 static int i915_edp_psr_status(struct seq_file *m, void *data)
 {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct drm_i915_private *dev_priv = m->private;
 	u32 psrperf = 0;
 	u32 stat[3];
 	enum pipe pipe;
 	bool enabled = false;
 	bool sink_support;
 
-	if (!HAS_PSR(dev_priv))
-		return -ENODEV;
-
 	sink_support = dev_priv->psr.sink_support;
 	seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
 	if (!sink_support)
@@ -2631,6 +2628,192 @@  static int i915_edp_psr_status(struct seq_file *m, void *data)
 	return 0;
 }
 
+static bool psr_global_enabled(enum i915_psr_debugfs_mode mode)
+{
+	switch (mode) {
+	case PSR_DEBUGFS_MODE_DEFAULT: return i915_modparams.enable_psr;
+	case PSR_DEBUGFS_MODE_DISABLED: return false;
+	case PSR_DEBUGFS_MODE_ENABLED: return true;
+	}
+
+	/* GCC is stupid. */
+	return false;
+}
+
+static bool psr_needs_disable(struct drm_i915_private *dev_priv,
+			      bool enable, bool link_standby)
+{
+	if (!dev_priv->psr.hw_configured)
+		return false;
+
+	if (!enable)
+		return true;
+
+	if (dev_priv->psr.link_standby != link_standby)
+		return true;
+
+	return false;
+}
+
+static bool psr_needs_enable(struct drm_i915_private *dev_priv,
+			     bool enable)
+{
+	return enable && !dev_priv->psr.hw_configured;
+}
+
+static int __i915_edp_psr_write(struct drm_i915_private *dev_priv,
+				struct drm_modeset_acquire_ctx *ctx,
+				enum i915_psr_debugfs_mode mode,
+				bool link_standby)
+{
+	struct drm_device *dev = &dev_priv->drm;
+	struct drm_connector_list_iter conn_iter;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct drm_crtc *crtc;
+	int ret;
+	bool needs_enable, found, enable;
+
+	ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
+	if (ret)
+		return ret;
+
+	mutex_lock(&dev_priv->psr.lock);
+retry:
+	if (!dev_priv->psr.enabled) {
+		dev_priv->psr.debugfs_mode = mode;
+		dev_priv->psr.link_standby = link_standby;
+		goto end;
+	}
+	encoder = &dp_to_dig_port(dev_priv->psr.enabled)->base.base;
+	mutex_unlock(&dev_priv->psr.lock);
+
+	found = false;
+	drm_connector_list_iter_begin(dev, &conn_iter);
+	drm_for_each_connector_iter(connector, &conn_iter)
+		if (connector->state->best_encoder == encoder) {
+			found = true;
+			break;
+		}
+	drm_connector_list_iter_end(&conn_iter);
+
+	if (WARN_ON(!found))
+		return -EINVAL;
+
+	crtc = connector->state->crtc;
+	ret = drm_modeset_lock(&crtc->mutex, ctx);
+	if (ret)
+		return ret;
+
+	mutex_lock(&dev_priv->psr.lock);
+	enable = psr_global_enabled(mode);
+
+	if (dev_priv->psr.enabled != enc_to_intel_dp(encoder))
+		goto retry;
+
+	if ((connector->state->commit && !try_wait_for_completion(&connector->state->commit->hw_done)) ||
+	    (crtc->state->commit && !try_wait_for_completion(&crtc->state->commit->hw_done))) {
+		ret = -EBUSY;
+		goto end;
+	}
+
+	if (psr_needs_disable(dev_priv, enable, link_standby))
+		__intel_psr_disable(dev_priv, dev_priv->psr.enabled, to_intel_crtc_state(crtc->state));
+
+	needs_enable = psr_needs_enable(dev_priv, enable);
+	dev_priv->psr.debugfs_mode = mode;
+	dev_priv->psr.link_standby = link_standby;
+
+	if (needs_enable)
+		__intel_psr_enable(dev_priv, dev_priv->psr.enabled, to_intel_crtc_state(crtc->state));
+
+end:
+	mutex_unlock(&dev_priv->psr.lock);
+	return ret;
+}
+
+static ssize_t i915_edp_psr_write(struct file *file, const char __user *ubuf,
+				  size_t len, loff_t *offp)
+{
+	struct seq_file *m = file->private_data;
+	struct drm_i915_private *dev_priv = m->private;
+	struct drm_modeset_acquire_ctx ctx;
+	int ret, val;
+	bool link_standby;
+	enum i915_psr_debugfs_mode mode;
+
+	if (!dev_priv->psr.sink_support)
+		return -ENODEV;
+
+	ret = kstrtoint_from_user(ubuf, len, 10, &val);
+	if (ret < 0) {
+		bool enable;
+		ret = kstrtobool_from_user(ubuf, len, &enable);
+
+		if (ret < 0)
+			return ret;
+
+		val = enable;
+	}
+
+	switch (val) {
+	case -1:
+	case 0:
+	case 1:
+		link_standby = intel_psr_default_link_standby(dev_priv);
+		mode = val;
+		break;
+	case 2:
+		mode = PSR_DEBUGFS_MODE_ENABLED;
+		link_standby = true;
+		break;
+	case 3:
+		mode = PSR_DEBUGFS_MODE_ENABLED;
+		link_standby = false;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	intel_runtime_pm_get(dev_priv);
+
+	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+
+retry:
+	ret = __i915_edp_psr_write(dev_priv, &ctx, mode, link_standby);
+	if (ret == -EBUSY) {
+		ret = drm_modeset_backoff(&ctx);
+		if (!ret)
+			goto retry;
+	}
+
+	drm_modeset_drop_locks(&ctx);
+	drm_modeset_acquire_fini(&ctx);
+
+	intel_runtime_pm_put(dev_priv);
+
+	return ret ?: len;
+}
+
+static int i915_edp_psr_open(struct inode *inode, struct file *file)
+{
+	struct drm_i915_private *dev_priv = inode->i_private;
+
+	if (!HAS_PSR(dev_priv))
+		return -ENODEV;
+
+	return single_open(file, i915_edp_psr_status, dev_priv);
+}
+
+static const struct file_operations i915_edp_psr_ops = {
+	.owner = THIS_MODULE,
+	.open = i915_edp_psr_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.write = i915_edp_psr_write
+};
+
 static int i915_sink_crc(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -4734,7 +4917,6 @@  static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_swizzle_info", i915_swizzle_info, 0},
 	{"i915_ppgtt_info", i915_ppgtt_info, 0},
 	{"i915_llc", i915_llc, 0},
-	{"i915_edp_psr_status", i915_edp_psr_status, 0},
 	{"i915_sink_crc_eDP1", i915_sink_crc, 0},
 	{"i915_energy_uJ", i915_energy_uJ, 0},
 	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
@@ -4761,6 +4943,7 @@  static const struct i915_debugfs_files {
 	{"i915_wedged", &i915_wedged_fops},
 	{"i915_max_freq", &i915_max_freq_fops},
 	{"i915_min_freq", &i915_min_freq_fops},
+	{"i915_edp_psr_status", &i915_edp_psr_ops},
 	{"i915_cache_sharing", &i915_cache_sharing_fops},
 	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
 	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b39c5f68efb2..6bf1c6d43f7a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -607,6 +607,13 @@  struct i915_psr {
 	bool colorimetry_support;
 	bool alpm;
 	bool has_hw_tracking;
+	bool hw_configured;
+
+	enum i915_psr_debugfs_mode {
+		PSR_DEBUGFS_MODE_DEFAULT,
+		PSR_DEBUGFS_MODE_DISABLED,
+		PSR_DEBUGFS_MODE_ENABLED
+	} debugfs_mode;
 
 	void (*enable_source)(struct intel_dp *,
 			      const struct intel_crtc_state *);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1f0e8f1e4594..c486090f4f52 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1871,11 +1871,18 @@  bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
 
 /* intel_psr.c */
 #define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
+bool intel_psr_default_link_standby(struct drm_i915_private *dev_priv);
 void intel_psr_init_dpcd(struct intel_dp *intel_dp);
 void intel_psr_enable(struct intel_dp *intel_dp,
 		      const struct intel_crtc_state *crtc_state);
+void __intel_psr_enable(struct drm_i915_private *dev_priv,
+			struct intel_dp *intel_dp,
+			const struct intel_crtc_state *crtc_state);
 void intel_psr_disable(struct intel_dp *intel_dp,
 		      const struct intel_crtc_state *old_crtc_state);
+void __intel_psr_disable(struct drm_i915_private *dev_priv,
+			 struct intel_dp *intel_dp,
+			 const struct intel_crtc_state *old_crtc_state);
 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
 			  unsigned frontbuffer_bits,
 			  enum fb_op_origin origin);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 317cb4a12693..cfce5861b9ad 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -56,6 +56,48 @@ 
 #include "intel_drv.h"
 #include "i915_drv.h"
 
+bool intel_psr_default_link_standby(struct drm_i915_private *dev_priv)
+{
+	/* Override link_standby x link_off defaults */
+	if (i915_modparams.enable_psr == 2) {
+		DRM_DEBUG_KMS("PSR: Forcing link standby\n");
+		return true;
+	}
+
+	if (i915_modparams.enable_psr == 3) {
+		DRM_DEBUG_KMS("PSR: Forcing main link off\n");
+		return false;
+	}
+
+	/* Set link_standby x link_off defaults */
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+		/* HSW and BDW require workarounds that we don't implement. */
+		return false;
+	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		/* On VLV and CHV only standby mode is supported. */
+		return true;
+	else
+		/* For new platforms let's respect VBT back again */
+		return dev_priv->vbt.psr.full_link;
+}
+
+static bool intel_psr_global_enabled(struct drm_i915_private *dev_priv)
+{
+	switch (dev_priv->psr.debugfs_mode) {
+	case PSR_DEBUGFS_MODE_DEFAULT: return i915_modparams.enable_psr;
+	case PSR_DEBUGFS_MODE_DISABLED: return false;
+	case PSR_DEBUGFS_MODE_ENABLED: return true;
+	}
+
+	/* GCC is stupid. */
+	return false;
+}
+
+static bool intel_psr_enabled(struct drm_i915_private *dev_priv)
+{
+	return dev_priv->psr.enabled && dev_priv->psr.hw_configured;
+}
+
 static inline enum intel_display_power_domain
 psr_aux_domain(struct intel_dp *intel_dp)
 {
@@ -502,11 +544,6 @@  void intel_psr_compute_config(struct intel_dp *intel_dp,
 	if (!CAN_PSR(dev_priv))
 		return;
 
-	if (!i915_modparams.enable_psr) {
-		DRM_DEBUG_KMS("PSR disable by flag\n");
-		return;
-	}
-
 	/*
 	 * HSW spec explicitly says PSR is tied to port A.
 	 * BDW+ platforms with DDI implementation of PSR have different
@@ -559,7 +596,11 @@  void intel_psr_compute_config(struct intel_dp *intel_dp,
 
 	crtc_state->has_psr = true;
 	crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
-	DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : "");
+
+	if (intel_psr_global_enabled(dev_priv))
+		DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : "");
+	else
+		DRM_DEBUG_KMS("PSR disable by flag\n");
 }
 
 static void intel_psr_activate(struct intel_dp *intel_dp)
@@ -617,6 +658,34 @@  static void hsw_psr_enable_source(struct intel_dp *intel_dp,
 	}
 }
 
+void __intel_psr_enable(struct drm_i915_private *dev_priv,
+			struct intel_dp *intel_dp,
+			const struct intel_crtc_state *crtc_state)
+{
+	dev_priv->psr.hw_configured = true;
+
+	dev_priv->psr.setup_vsc(intel_dp, crtc_state);
+	dev_priv->psr.enable_sink(intel_dp);
+	dev_priv->psr.enable_source(intel_dp, crtc_state);
+
+	if (INTEL_GEN(dev_priv) >= 9) {
+		intel_psr_activate(intel_dp);
+	} else {
+		/*
+		 * FIXME: Activation should happen immediately since this
+		 * function is just called after pipe is fully trained and
+		 * enabled.
+		 * However on some platforms we face issues when first
+		 * activation follows a modeset so quickly.
+		 *     - On VLV/CHV we get bank screen on first activation
+		 *     - On HSW/BDW we get a recoverable frozen screen until
+		 *       next exit-activate sequence.
+		 */
+		schedule_delayed_work(&dev_priv->psr.work,
+				      msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
+	}
+}
+
 /**
  * intel_psr_enable - Enable PSR
  * @intel_dp: Intel DP
@@ -647,27 +716,10 @@  void intel_psr_enable(struct intel_dp *intel_dp,
 	dev_priv->psr.psr2_support = crtc_state->has_psr2;
 	dev_priv->psr.busy_frontbuffer_bits = 0;
 
-	dev_priv->psr.setup_vsc(intel_dp, crtc_state);
-	dev_priv->psr.enable_sink(intel_dp);
-	dev_priv->psr.enable_source(intel_dp, crtc_state);
 	dev_priv->psr.enabled = intel_dp;
 
-	if (INTEL_GEN(dev_priv) >= 9) {
-		intel_psr_activate(intel_dp);
-	} else {
-		/*
-		 * FIXME: Activation should happen immediately since this
-		 * function is just called after pipe is fully trained and
-		 * enabled.
-		 * However on some platforms we face issues when first
-		 * activation follows a modeset so quickly.
-		 *     - On VLV/CHV we get bank screen on first activation
-		 *     - On HSW/BDW we get a recoverable frozen screen until
-		 *       next exit-activate sequence.
-		 */
-		schedule_delayed_work(&dev_priv->psr.work,
-				      msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
-	}
+	if (intel_psr_global_enabled(dev_priv))
+		__intel_psr_enable(dev_priv, intel_dp, crtc_state);
 
 unlock:
 	mutex_unlock(&dev_priv->psr.lock);
@@ -752,6 +804,17 @@  static void hsw_psr_disable(struct intel_dp *intel_dp,
 	psr_aux_io_power_put(intel_dp);
 }
 
+void __intel_psr_disable(struct drm_i915_private *dev_priv,
+			 struct intel_dp *intel_dp,
+			 const struct intel_crtc_state *old_crtc_state)
+{
+	dev_priv->psr.disable_source(intel_dp, old_crtc_state);
+
+	/* Disable PSR on Sink */
+	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
+	dev_priv->psr.hw_configured = false;
+}
+
 /**
  * intel_psr_disable - Disable PSR
  * @intel_dp: Intel DP
@@ -773,15 +836,13 @@  void intel_psr_disable(struct intel_dp *intel_dp,
 		return;
 
 	mutex_lock(&dev_priv->psr.lock);
-	if (!dev_priv->psr.enabled) {
+	if (intel_dp != dev_priv->psr.enabled) {
 		mutex_unlock(&dev_priv->psr.lock);
 		return;
 	}
 
-	dev_priv->psr.disable_source(intel_dp, old_crtc_state);
-
-	/* Disable PSR on Sink */
-	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
+	if (intel_psr_enabled(dev_priv))
+		__intel_psr_disable(dev_priv, intel_dp, old_crtc_state);
 
 	dev_priv->psr.enabled = NULL;
 	mutex_unlock(&dev_priv->psr.lock);
@@ -833,11 +894,11 @@  static void intel_psr_work(struct work_struct *work)
 		}
 	}
 	mutex_lock(&dev_priv->psr.lock);
-	intel_dp = dev_priv->psr.enabled;
-
-	if (!intel_dp)
+	if (!intel_psr_enabled(dev_priv))
 		goto unlock;
 
+	intel_dp = dev_priv->psr.enabled;
+
 	/*
 	 * The delayed work can race with an invalidate hence we need to
 	 * recheck. Since psr_flush first clears this and then reschedules we
@@ -933,7 +994,7 @@  void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
 		return;
 
 	mutex_lock(&dev_priv->psr.lock);
-	if (!dev_priv->psr.enabled) {
+	if (!intel_psr_enabled(dev_priv)) {
 		mutex_unlock(&dev_priv->psr.lock);
 		return;
 	}
@@ -979,7 +1040,7 @@  void intel_psr_invalidate(struct drm_i915_private *dev_priv,
 		return;
 
 	mutex_lock(&dev_priv->psr.lock);
-	if (!dev_priv->psr.enabled) {
+	if (!intel_psr_enabled(dev_priv)) {
 		mutex_unlock(&dev_priv->psr.lock);
 		return;
 	}
@@ -1022,7 +1083,7 @@  void intel_psr_flush(struct drm_i915_private *dev_priv,
 		return;
 
 	mutex_lock(&dev_priv->psr.lock);
-	if (!dev_priv->psr.enabled) {
+	if (!intel_psr_enabled(dev_priv)) {
 		mutex_unlock(&dev_priv->psr.lock);
 		return;
 	}
@@ -1081,26 +1142,8 @@  void intel_psr_init(struct drm_i915_private *dev_priv)
 	if (i915_modparams.enable_psr == -1)
 		i915_modparams.enable_psr = 0;
 
-	/* Set link_standby x link_off defaults */
-	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-		/* HSW and BDW require workarounds that we don't implement. */
-		dev_priv->psr.link_standby = false;
-	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		/* On VLV and CHV only standby mode is supported. */
-		dev_priv->psr.link_standby = true;
-	else
-		/* For new platforms let's respect VBT back again */
-		dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
-
-	/* Override link_standby x link_off defaults */
-	if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) {
-		DRM_DEBUG_KMS("PSR: Forcing link standby\n");
-		dev_priv->psr.link_standby = true;
-	}
-	if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) {
-		DRM_DEBUG_KMS("PSR: Forcing main link off\n");
-		dev_priv->psr.link_standby = false;
-	}
+	dev_priv->psr.link_standby = intel_psr_default_link_standby(dev_priv);
+	dev_priv->psr.debugfs_mode = PSR_DEBUGFS_MODE_DEFAULT;
 
 	INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
 	mutex_init(&dev_priv->psr.lock);