diff mbox

[3/4] drm/i915: Find fallback link rate/lane count

Message ID 1480984058-552-4-git-send-email-manasi.d.navare@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Navare, Manasi Dec. 6, 2016, 12:27 a.m. UTC
If link training fails, then we need to fallback to lower
link rate first and if link training fails at RBR, then
fallback to lower lane count.
This function finds the next lower link rate/lane count
value after link training failure and limits the max
link_rate and lane_count values to these fallback values.

v6:
* Cap the max link rate and lane count to the max
values obtained during fallback link training (Daniel Vetter)
v5:
* Start the fallback at the lane count value passed not
the max lane count (Jani Nikula)
v4:
* Remove the redundant variable link_train_failed
v3:
* Remove fallback_link_rate_index variable, just obtain
that using the helper intel_dp_link_rate_index (Jani Nikula)
v2:
Squash the patch that returns the link rate index (Jani Nikula)

Acked-by: Tony Cheng <tony.cheng@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Cc: Ville Syrjala <ville.syrjala@linux.intel.com>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Daniel Vetter <daniel.vetter@intel.com>
Signed-off-by: Manasi Navare <manasi.d.navare@intel.com>
---
 drivers/gpu/drm/i915/intel_dp.c  | 40 ++++++++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_drv.h |  2 ++
 2 files changed, 42 insertions(+)

Comments

Navare, Manasi Dec. 8, 2016, 6:19 p.m. UTC | #1
Hi Jani,

This patch uses the simplified fallback logic where
if link train fails, we first lower the max sink link rate
to a lower value until it reaches RBR keeping the max lane count
unchanged and then lower the lane count and set max lane count to
this lowered lane count and resetting the link rate to the max
sink link rate based on DPCD values.
Because this directly goes and updates the max link rate
and max lane count values on fallback, we do not need
any special case for fallback during intel_dp_compute_config.

Manasi

On Mon, Dec 05, 2016 at 04:27:37PM -0800, Manasi Navare wrote:
> If link training fails, then we need to fallback to lower
> link rate first and if link training fails at RBR, then
> fallback to lower lane count.
> This function finds the next lower link rate/lane count
> value after link training failure and limits the max
> link_rate and lane_count values to these fallback values.
> 
> v6:
> * Cap the max link rate and lane count to the max
> values obtained during fallback link training (Daniel Vetter)
> v5:
> * Start the fallback at the lane count value passed not
> the max lane count (Jani Nikula)
> v4:
> * Remove the redundant variable link_train_failed
> v3:
> * Remove fallback_link_rate_index variable, just obtain
> that using the helper intel_dp_link_rate_index (Jani Nikula)
> v2:
> Squash the patch that returns the link rate index (Jani Nikula)
> 
> Acked-by: Tony Cheng <tony.cheng@amd.com>
> Acked-by: Harry Wentland <harry.wentland@amd.com>
> Cc: Ville Syrjala <ville.syrjala@linux.intel.com>
> Cc: Jani Nikula <jani.nikula@linux.intel.com>
> Cc: Daniel Vetter <daniel.vetter@intel.com>
> Signed-off-by: Manasi Navare <manasi.d.navare@intel.com>
> ---
>  drivers/gpu/drm/i915/intel_dp.c  | 40 ++++++++++++++++++++++++++++++++++++++++
>  drivers/gpu/drm/i915/intel_drv.h |  2 ++
>  2 files changed, 42 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
> index 434dc7d..b5c7526f 100644
> --- a/drivers/gpu/drm/i915/intel_dp.c
> +++ b/drivers/gpu/drm/i915/intel_dp.c
> @@ -278,6 +278,46 @@ static int intel_dp_common_rates(struct intel_dp *intel_dp,
>  			       common_rates);
>  }
>  
> +static int intel_dp_link_rate_index(struct intel_dp *intel_dp,
> +				    int *common_rates, int link_rate)
> +{
> +	int common_len;
> +	int index;
> +
> +	common_len = intel_dp_common_rates(intel_dp, common_rates);
> +	for (index = 0; index < common_len; index++) {
> +		if (link_rate == common_rates[common_len - index - 1])
> +			return common_len - index - 1;
> +	}
> +
> +	return -1;
> +}
> +
> +int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
> +					    int link_rate, uint8_t lane_count)
> +{
> +	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
> +	int common_len;
> +	int link_rate_index = -1;
> +
> +	common_len = intel_dp_common_rates(intel_dp, common_rates);
> +	link_rate_index = intel_dp_link_rate_index(intel_dp,
> +						   common_rates,
> +						   link_rate);
> +	if (link_rate_index > 0) {
> +		intel_dp->max_sink_link_bw = drm_dp_link_rate_to_bw_code(common_rates[link_rate_index - 1]);
> +		intel_dp->max_sink_lane_count = lane_count;
> +	} else if (lane_count > 1) {
> +		intel_dp->max_sink_link_bw = intel_dp_max_link_bw(intel_dp);
> +		intel_dp->max_sink_lane_count = lane_count >> 1;
> +	} else {
> +		DRM_ERROR("Link Training Unsuccessful\n");
> +		return -1;
> +	}
> +
> +	return 0;
> +}
> +
>  static enum drm_mode_status
>  intel_dp_mode_valid(struct drm_connector *connector,
>  		    struct drm_display_mode *mode)
> diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
> index b6526ad..47e3671 100644
> --- a/drivers/gpu/drm/i915/intel_drv.h
> +++ b/drivers/gpu/drm/i915/intel_drv.h
> @@ -1400,6 +1400,8 @@ bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
>  void intel_dp_set_link_params(struct intel_dp *intel_dp,
>  			      int link_rate, uint8_t lane_count,
>  			      bool link_mst);
> +int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
> +					    int link_rate, uint8_t lane_count);
>  void intel_dp_start_link_train(struct intel_dp *intel_dp);
>  void intel_dp_stop_link_train(struct intel_dp *intel_dp);
>  void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
> -- 
> 1.9.1
>
Jani Nikula Dec. 8, 2016, 9:46 p.m. UTC | #2
On Tue, 06 Dec 2016, Manasi Navare <manasi.d.navare@intel.com> wrote:
> If link training fails, then we need to fallback to lower
> link rate first and if link training fails at RBR, then
> fallback to lower lane count.
> This function finds the next lower link rate/lane count
> value after link training failure and limits the max
> link_rate and lane_count values to these fallback values.
>
> v6:
> * Cap the max link rate and lane count to the max
> values obtained during fallback link training (Daniel Vetter)
> v5:
> * Start the fallback at the lane count value passed not
> the max lane count (Jani Nikula)
> v4:
> * Remove the redundant variable link_train_failed
> v3:
> * Remove fallback_link_rate_index variable, just obtain
> that using the helper intel_dp_link_rate_index (Jani Nikula)
> v2:
> Squash the patch that returns the link rate index (Jani Nikula)
>
> Acked-by: Tony Cheng <tony.cheng@amd.com>
> Acked-by: Harry Wentland <harry.wentland@amd.com>
> Cc: Ville Syrjala <ville.syrjala@linux.intel.com>
> Cc: Jani Nikula <jani.nikula@linux.intel.com>
> Cc: Daniel Vetter <daniel.vetter@intel.com>
> Signed-off-by: Manasi Navare <manasi.d.navare@intel.com>
> ---
>  drivers/gpu/drm/i915/intel_dp.c  | 40 ++++++++++++++++++++++++++++++++++++++++
>  drivers/gpu/drm/i915/intel_drv.h |  2 ++
>  2 files changed, 42 insertions(+)
>
> diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
> index 434dc7d..b5c7526f 100644
> --- a/drivers/gpu/drm/i915/intel_dp.c
> +++ b/drivers/gpu/drm/i915/intel_dp.c
> @@ -278,6 +278,46 @@ static int intel_dp_common_rates(struct intel_dp *intel_dp,
>  			       common_rates);
>  }
>  
> +static int intel_dp_link_rate_index(struct intel_dp *intel_dp,
> +				    int *common_rates, int link_rate)
> +{
> +	int common_len;
> +	int index;
> +
> +	common_len = intel_dp_common_rates(intel_dp, common_rates);
> +	for (index = 0; index < common_len; index++) {
> +		if (link_rate == common_rates[common_len - index - 1])
> +			return common_len - index - 1;

Probably somewhere in the history of the patch series there was a time
when it was necessary to search for the rates in reverse order. What
possible benefit could that offer at this point?

> +	}
> +
> +	return -1;
> +}
> +
> +int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
> +					    int link_rate, uint8_t lane_count)
> +{
> +	int common_rates[DP_MAX_SUPPORTED_RATES] = {};

No need to initialize because you initialize it a couple of lines later.

> +	int common_len;
> +	int link_rate_index = -1;

No need to initialize because you initialize it a couple of lines later.

> +
> +	common_len = intel_dp_common_rates(intel_dp, common_rates);
> +	link_rate_index = intel_dp_link_rate_index(intel_dp,
> +						   common_rates,
> +						   link_rate);

Please stop and think, and don't rush each new iteration of the patches.

What's wrong with the above lines? Please think about it. Answer at the
end of the mail (*).

> +	if (link_rate_index > 0) {
> +		intel_dp->max_sink_link_bw = drm_dp_link_rate_to_bw_code(common_rates[link_rate_index - 1]);
> +		intel_dp->max_sink_lane_count = lane_count;
> +	} else if (lane_count > 1) {
> +		intel_dp->max_sink_link_bw = intel_dp_max_link_bw(intel_dp);
> +		intel_dp->max_sink_lane_count = lane_count >> 1;
> +	} else {
> +		DRM_ERROR("Link Training Unsuccessful\n");
> +		return -1;
> +	}
> +
> +	return 0;
> +}
> +
>  static enum drm_mode_status
>  intel_dp_mode_valid(struct drm_connector *connector,
>  		    struct drm_display_mode *mode)
> diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
> index b6526ad..47e3671 100644
> --- a/drivers/gpu/drm/i915/intel_drv.h
> +++ b/drivers/gpu/drm/i915/intel_drv.h
> @@ -1400,6 +1400,8 @@ bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
>  void intel_dp_set_link_params(struct intel_dp *intel_dp,
>  			      int link_rate, uint8_t lane_count,
>  			      bool link_mst);
> +int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
> +					    int link_rate, uint8_t lane_count);
>  void intel_dp_start_link_train(struct intel_dp *intel_dp);
>  void intel_dp_stop_link_train(struct intel_dp *intel_dp);
>  void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);


(*) You do intel_dp_common_rates(intel_dp, common_rates) twice, for no
reason at all.
Navare, Manasi Dec. 8, 2016, 10:05 p.m. UTC | #3
On Thu, Dec 08, 2016 at 11:46:02PM +0200, Jani Nikula wrote:
> On Tue, 06 Dec 2016, Manasi Navare <manasi.d.navare@intel.com> wrote:
> > If link training fails, then we need to fallback to lower
> > link rate first and if link training fails at RBR, then
> > fallback to lower lane count.
> > This function finds the next lower link rate/lane count
> > value after link training failure and limits the max
> > link_rate and lane_count values to these fallback values.
> >
> > v6:
> > * Cap the max link rate and lane count to the max
> > values obtained during fallback link training (Daniel Vetter)
> > v5:
> > * Start the fallback at the lane count value passed not
> > the max lane count (Jani Nikula)
> > v4:
> > * Remove the redundant variable link_train_failed
> > v3:
> > * Remove fallback_link_rate_index variable, just obtain
> > that using the helper intel_dp_link_rate_index (Jani Nikula)
> > v2:
> > Squash the patch that returns the link rate index (Jani Nikula)
> >
> > Acked-by: Tony Cheng <tony.cheng@amd.com>
> > Acked-by: Harry Wentland <harry.wentland@amd.com>
> > Cc: Ville Syrjala <ville.syrjala@linux.intel.com>
> > Cc: Jani Nikula <jani.nikula@linux.intel.com>
> > Cc: Daniel Vetter <daniel.vetter@intel.com>
> > Signed-off-by: Manasi Navare <manasi.d.navare@intel.com>
> > ---
> >  drivers/gpu/drm/i915/intel_dp.c  | 40 ++++++++++++++++++++++++++++++++++++++++
> >  drivers/gpu/drm/i915/intel_drv.h |  2 ++
> >  2 files changed, 42 insertions(+)
> >
> > diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
> > index 434dc7d..b5c7526f 100644
> > --- a/drivers/gpu/drm/i915/intel_dp.c
> > +++ b/drivers/gpu/drm/i915/intel_dp.c
> > @@ -278,6 +278,46 @@ static int intel_dp_common_rates(struct intel_dp *intel_dp,
> >  			       common_rates);
> >  }
> >  
> > +static int intel_dp_link_rate_index(struct intel_dp *intel_dp,
> > +				    int *common_rates, int link_rate)
> > +{
> > +	int common_len;
> > +	int index;
> > +
> > +	common_len = intel_dp_common_rates(intel_dp, common_rates);
> > +	for (index = 0; index < common_len; index++) {
> > +		if (link_rate == common_rates[common_len - index - 1])
> > +			return common_len - index - 1;
> 
> Probably somewhere in the history of the patch series there was a time
> when it was necessary to search for the rates in reverse order. What
> possible benefit could that offer at this point?
>

The advantage here is that the link rate is more likely to match quicker
if we search in reverse order.

 
> > +	}
> > +
> > +	return -1;
> > +}
> > +
> > +int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
> > +					    int link_rate, uint8_t lane_count)
> > +{
> > +	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
> 
> No need to initialize because you initialize it a couple of lines later.
>

Agreed
 
> > +	int common_len;
> > +	int link_rate_index = -1;
> 
> No need to initialize because you initialize it a couple of lines later.
> 
> > +
> > +	common_len = intel_dp_common_rates(intel_dp, common_rates);
> > +	link_rate_index = intel_dp_link_rate_index(intel_dp,
> > +						   common_rates,
> > +						   link_rate);
> 
> Please stop and think, and don't rush each new iteration of the patches.
> 
> What's wrong with the above lines? Please think about it. Answer at the
> end of the mail (*).
>

> > +	if (link_rate_index > 0) {
> > +		intel_dp->max_sink_link_bw = drm_dp_link_rate_to_bw_code(common_rates[link_rate_index - 1]);
> > +		intel_dp->max_sink_lane_count = lane_count;
> > +	} else if (lane_count > 1) {
> > +		intel_dp->max_sink_link_bw = intel_dp_max_link_bw(intel_dp);
> > +		intel_dp->max_sink_lane_count = lane_count >> 1;
> > +	} else {
> > +		DRM_ERROR("Link Training Unsuccessful\n");
> > +		return -1;
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> >  static enum drm_mode_status
> >  intel_dp_mode_valid(struct drm_connector *connector,
> >  		    struct drm_display_mode *mode)
> > diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
> > index b6526ad..47e3671 100644
> > --- a/drivers/gpu/drm/i915/intel_drv.h
> > +++ b/drivers/gpu/drm/i915/intel_drv.h
> > @@ -1400,6 +1400,8 @@ bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
> >  void intel_dp_set_link_params(struct intel_dp *intel_dp,
> >  			      int link_rate, uint8_t lane_count,
> >  			      bool link_mst);
> > +int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
> > +					    int link_rate, uint8_t lane_count);
> >  void intel_dp_start_link_train(struct intel_dp *intel_dp);
> >  void intel_dp_stop_link_train(struct intel_dp *intel_dp);
> >  void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
> 
> 
> (*) You do intel_dp_common_rates(intel_dp, common_rates) twice, for no
> reason at all.
>

Actually the first call was to obtain the common_len which was needed earlier but
we no longer need it because of the simplified fallback logic modifying the
max sink link rate directly.
So yes I will remove the first call to intel_dp_common_rate()
Good catch! Thanks Jani.

Regards
Manasi 
> -- 
> Jani Nikula, Intel Open Source Technology Center
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 434dc7d..b5c7526f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -278,6 +278,46 @@  static int intel_dp_common_rates(struct intel_dp *intel_dp,
 			       common_rates);
 }
 
+static int intel_dp_link_rate_index(struct intel_dp *intel_dp,
+				    int *common_rates, int link_rate)
+{
+	int common_len;
+	int index;
+
+	common_len = intel_dp_common_rates(intel_dp, common_rates);
+	for (index = 0; index < common_len; index++) {
+		if (link_rate == common_rates[common_len - index - 1])
+			return common_len - index - 1;
+	}
+
+	return -1;
+}
+
+int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
+					    int link_rate, uint8_t lane_count)
+{
+	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
+	int common_len;
+	int link_rate_index = -1;
+
+	common_len = intel_dp_common_rates(intel_dp, common_rates);
+	link_rate_index = intel_dp_link_rate_index(intel_dp,
+						   common_rates,
+						   link_rate);
+	if (link_rate_index > 0) {
+		intel_dp->max_sink_link_bw = drm_dp_link_rate_to_bw_code(common_rates[link_rate_index - 1]);
+		intel_dp->max_sink_lane_count = lane_count;
+	} else if (lane_count > 1) {
+		intel_dp->max_sink_link_bw = intel_dp_max_link_bw(intel_dp);
+		intel_dp->max_sink_lane_count = lane_count >> 1;
+	} else {
+		DRM_ERROR("Link Training Unsuccessful\n");
+		return -1;
+	}
+
+	return 0;
+}
+
 static enum drm_mode_status
 intel_dp_mode_valid(struct drm_connector *connector,
 		    struct drm_display_mode *mode)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index b6526ad..47e3671 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1400,6 +1400,8 @@  bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
 void intel_dp_set_link_params(struct intel_dp *intel_dp,
 			      int link_rate, uint8_t lane_count,
 			      bool link_mst);
+int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
+					    int link_rate, uint8_t lane_count);
 void intel_dp_start_link_train(struct intel_dp *intel_dp);
 void intel_dp_stop_link_train(struct intel_dp *intel_dp);
 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);