diff mbox series

[RFC,v3,5/6] drm/i915/dp: Change a link bandwidth computation for DP YCbCr 4:2:0 output

Message ID 20190305160927.16482-6-gwan-gyeong.mun@intel.com (mailing list archive)
State New, archived
Headers show
Series drm/i915/dp: Preliminary support for DP YCbCr4:2:0 outputs | expand

Commit Message

Gwan-gyeong Mun March 5, 2019, 4:09 p.m. UTC
All of the link bandwidth and Data M/N calculations were assumed a bpp as
RGB format. But When we are using YCbCr 4:2:0 output format on DP,
we should change bpp calculations as YCbCr 4:2:0 format.
The pipe_bpp value was assumed RGB format, therefore, it was multiplied
with 3. But YCbCr 4:2:0 requires a multiplier value to 1.5.
Therefore we need to divide pipe_bpp to 2 while DP output uses YCbCr4:2:0
format.
 - RGB format bpp = bpc x 3
 - YCbCr 4:2:0 format bpp = bpc x 1.5

And it adds missed bpc values for a programming of VSC Header.
It only affects dp and edp port which use YCbCr 4:2:0 output format.
And for now, it does not consider a use case of DSC + YCbCr 4:2:0.

v2:
  Addressed review comments from Ville.
  Remove a changing of pipe_bpp on intel_ddi_set_pipe_settings().
  Because the pipe is running at the full bpp, keep pipe_bpp as RGB
  even though YCbCr 4:2:0 output format is used.
  Add a link bandwidth computation for YCbCr4:2:0 output format.

Signed-off-by: Gwan-gyeong Mun <gwan-gyeong.mun@intel.com>
---
 drivers/gpu/drm/i915/intel_dp.c | 64 +++++++++++++++++++++++++++++----
 1 file changed, 57 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 55c0d70c1ca6..2f3a6620d1ff 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1723,7 +1723,8 @@  void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
 struct link_config_limits {
 	int min_clock, max_clock;
 	int min_lane_count, max_lane_count;
-	int min_bpp, max_bpp;
+	int min_bpp, max_bpp, bpp_step;
+	bool is_ycbcr420;
 };
 
 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
@@ -1833,7 +1834,7 @@  intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
 	int bpp, clock, lane_count;
 	int mode_rate, link_clock, link_avail;
 
-	for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+	for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= limits->bpp_step) {
 		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
 						   bpp);
 
@@ -1847,7 +1848,10 @@  intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
 
 				if (mode_rate <= link_avail) {
 					pipe_config->lane_count = lane_count;
-					pipe_config->pipe_bpp = bpp;
+					if (limits->is_ycbcr420)
+						pipe_config->pipe_bpp = bpp * 2;
+					else
+						pipe_config->pipe_bpp = bpp;
 					pipe_config->port_clock = link_clock;
 
 					return 0;
@@ -1869,7 +1873,7 @@  intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
 	int bpp, clock, lane_count;
 	int mode_rate, link_clock, link_avail;
 
-	for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+	for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= limits->bpp_step) {
 		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
 						   bpp);
 
@@ -1883,7 +1887,10 @@  intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
 
 				if (mode_rate <= link_avail) {
 					pipe_config->lane_count = lane_count;
-					pipe_config->pipe_bpp = bpp;
+					if (limits->is_ycbcr420)
+						pipe_config->pipe_bpp = bpp * 2;
+					else
+						pipe_config->pipe_bpp = bpp;
 					pipe_config->port_clock = link_clock;
 
 					return 0;
@@ -2015,6 +2022,8 @@  intel_dp_compute_link_config(struct intel_encoder *encoder,
 	struct link_config_limits limits;
 	int common_len;
 	int ret;
+	bool is_ycbcr420 =
+		pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ? true : false;
 
 	common_len = intel_dp_common_len_rate_limit(intel_dp,
 						    intel_dp->max_link_rate);
@@ -2030,6 +2039,20 @@  intel_dp_compute_link_config(struct intel_encoder *encoder,
 
 	limits.min_bpp = 6 * 3;
 	limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
+	limits.bpp_step = 2 * 3;
+
+	limits.is_ycbcr420 = is_ycbcr420;
+
+	if (is_ycbcr420) {
+		/*
+		 * bpp value was assumed to RGB therefore it was multiplied
+		 * with 3. But YCbCr 4:2:0 requires multiplier value to 1.5
+		 * therefore it divides pipe_bpp to 2.
+		 */
+		limits.min_bpp /= 2;
+		limits.max_bpp /= 2;
+		limits.bpp_step /= 2;
+	}
 
 	if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
 		/*
@@ -2224,13 +2247,23 @@  intel_dp_compute_config(struct intel_encoder *encoder,
 			intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
 	}
 
-	if (!pipe_config->dsc_params.compression_enable)
-		intel_link_compute_m_n(pipe_config->pipe_bpp,
+	if (!pipe_config->dsc_params.compression_enable) {
+		/*
+		 * Basically, pipe_bpp value is assumed to RGB.
+		 * And on the calculation of Data M and Data N, YCbCr 4:2:0
+		 * output format of the number of bytes per pixel will be half
+		 * the number of bytes of RGB pixel.
+		 */
+		int pipe_bpp = pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ?
+			pipe_config->pipe_bpp / 2 : pipe_config->pipe_bpp;
+
+		intel_link_compute_m_n(pipe_bpp,
 				       pipe_config->lane_count,
 				       adjusted_mode->crtc_clock,
 				       pipe_config->port_clock,
 				       &pipe_config->dp_m_n,
 				       constant_n);
+	}
 	else
 		intel_link_compute_m_n(pipe_config->dsc_params.compressed_bpp,
 				       pipe_config->lane_count,
@@ -4458,6 +4491,23 @@  intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
 	 * 100b = 16bpc.
 	 */
 	vsc_sdp.DB17 = 0x1;
+	switch (crtc_state->pipe_bpp / 3) {
+	case 8: /* 8bpc */
+		vsc_sdp.DB17 = 0x1;
+		break;
+	case 10: /* 10bpc */
+		vsc_sdp.DB17 = 0x2;
+		break;
+	case 12: /* 12bpc */
+		vsc_sdp.DB17 = 0x3;
+		break;
+	case 16: /* 16bpc */
+		vsc_sdp.DB17 = 0x4;
+		break;
+	default:
+		DRM_DEBUG_KMS("Invalid bpp value '%d'\n", crtc_state->pipe_bpp);
+		break;
+	}
 
 	/*
 	 * Content Type (Bits 2:0)