Message ID | 20191030192431.5798-12-mikita.lipski@amd.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | DSC MST support for AMDGPU | expand |
So: one comment down below: On Wed, 2019-10-30 at 15:24 -0400, mikita.lipski@amd.com wrote: > From: David Francis <David.Francis@amd.com> > > If there is limited link bandwidth on a MST network, > it must be divided fairly between the streams on that network > > Implement an algorithm to determine the correct DSC config > for each stream > > The algorithm: > This > [ ] ( ) > represents the range of bandwidths possible for a given stream. > The [] area represents the range of DSC configs, and the () > represents no DSC. The bandwidth used increases from left to right. > > First, try disabling DSC on all streams > [ ] (|) > [ ] (|) > Check this against the bandwidth limits of the link and each branch > (including each endpoint). If it passes, the job is done > > Second, try maximum DSC compression on all streams > that support DSC > [| ] ( ) > [| ] ( ) > If this does not pass, then enabling this combination of streams > is impossible > > Otherwise, divide the remaining bandwidth evenly amongst the streams > [ | ] ( ) > [ | ] ( ) > > If one or more of the streams reach minimum compression, evenly > divide the reamining bandwidth amongst the remaining streams > [ |] ( ) > [ |] ( ) > [ | ] ( ) > [ | ] ( ) > > If all streams can reach minimum compression, disable compression > greedily > [ |] ( ) > [ |] ( ) > [ ] (|) > > Perform this algorithm on each full update, on each MST link > with at least one DSC stream on it > > After the configs are computed, call > dcn20_add_dsc_to_stream_resource on each stream with DSC enabled. > It is only after all streams are created that we can know which > of them will need DSC. > > Do all of this at the end of amdgpu atomic check. If it fails, > fail check; This combination of timings cannot be supported. > > Reviewed-by: Wenjing Liu <Wenjing.Liu@amd.com> > Signed-off-by: David Francis <David.Francis@amd.com> > --- > .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 + > .../display/amdgpu_dm/amdgpu_dm_mst_types.c | 386 ++++++++++++++++++ > .../display/amdgpu_dm/amdgpu_dm_mst_types.h | 4 + > .../drm/amd/display/dc/dcn20/dcn20_resource.c | 7 +- > .../drm/amd/display/dc/dcn20/dcn20_resource.h | 1 + > 5 files changed, 400 insertions(+), 2 deletions(-) > > diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c > b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c > index 1309e9cfdea3..1bbdf29f3c7d 100644 > --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c > +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c > @@ -7780,6 +7780,10 @@ static int amdgpu_dm_atomic_check(struct drm_device > *dev, > if (ret) > goto fail; > > +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT > + if (!compute_mst_dsc_configs_for_state(dm_state->context)) > + goto fail; > +#endif > if (dc_validate_global_state(dc, dm_state->context, false) != > DC_OK) { > ret = -EINVAL; > goto fail; > diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c > b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c > index 804a00082bee..c58cf41f3086 100644 > --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c > +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c > @@ -38,6 +38,8 @@ > > #include "i2caux_interface.h" > > +#include "dc/dcn20/dcn20_resource.h" > + > /* #define TRACE_DPCD */ > > #ifdef TRACE_DPCD > @@ -491,3 +493,387 @@ void amdgpu_dm_initialize_dp_connector(struct > amdgpu_display_manager *dm, > aconnector->connector_id); > } > > +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT > +struct dsc_mst_fairness_params { > + struct dc_crtc_timing *timing; > + struct dc_sink *sink; > + struct dc_dsc_bw_range bw_range; > + bool compression_possible; > + struct drm_dp_mst_port *port; > +}; > + > +struct dsc_mst_fairness_vars { > + int pbn; > + bool dsc_enabled; > + int bpp_x16; > +}; > + > +static bool port_downstream_of_branch(struct drm_dp_mst_port *port, > + struct drm_dp_mst_branch *branch) > +{ > + while (port->parent) { > + if (port->parent == branch) > + return true; > + > + if (port->parent->port_parent) > + port = port->parent->port_parent; > + else > + break; > + } > + return false; > +} > + > +static bool check_pbn_limit_on_branch(struct drm_dp_mst_branch *branch, > + struct dsc_mst_fairness_params *params, > + struct dsc_mst_fairness_vars *vars, int count) > +{ > + struct drm_dp_mst_port *port; > + int i; > + int pbn_limit = 0; > + int pbn_used = 0; > + > + list_for_each_entry(port, &branch->ports, next) { > + if (port->mstb) > + if (!check_pbn_limit_on_branch(port->mstb, params, > vars, count)) > + return false; > + > + if (port->available_pbn > 0) > + pbn_limit = port->available_pbn; > + } > + > + for (i = 0; i < count; i++) { > + if (port_downstream_of_branch(params[i].port, branch)) > + pbn_used += vars[i].pbn; > + } > + > + if (pbn_used > pbn_limit) > + return false; > + > + return true; > +} > + The rest of this looks fine, but this function ^ should definitely be moved into the MST helpers as well especially since verifying PBN values is something we need to add to the MST helpers anyway. Then the algorithm for computing "fair share" as you call it would just look like this from a high level: compute_config_without_compression(); ret = drm_dp_mst_atomic_check(); if (!ret) return ret; compute_config_with_max_compression(); ret = drm_dp_mst_atomic_check(); if (ret) return ret; optimize_config(); How does that sound? > +static bool check_bandwidth_limits(struct dc_link *dc_link, > + struct dsc_mst_fairness_params *params, > + struct dsc_mst_fairness_vars *vars, > + int count) > +{ > + int link_timeslot_limit = 63; > + int link_timeslots_used = 0; > + int pbn_per_timeslot; > + int i; > + struct drm_dp_mst_topology_mgr *mst_mgr; > + > + /* kbits to pbn, dividing by 64 */ > + pbn_per_timeslot = dc_link_bandwidth_kbps(dc_link, > + dc_link_get_link_cap(dc_link)) / (8 * 1000 * 54); > + > + /* Check link bandwidth limit */ > + for (i = 0; i < count; i++) > + link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, > pbn_per_timeslot); > + > + if (link_timeslots_used > link_timeslot_limit) > + return false; > + > + /* Check branch bandwidth limit for each port on each branch */ > + mst_mgr = params[0].port->mgr; > + if (!check_pbn_limit_on_branch(mst_mgr->mst_primary, params, vars, > count)) > + return false; > + > + return true; > +} > + > +static int kbps_to_peak_pbn(int kbps) > +{ > + u64 peak_kbps = kbps; > + > + peak_kbps *= 1006; > + peak_kbps /= 1000; > + return (int) DIV_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000)); > +} > + > +static void set_dsc_configs_from_fairness_vars(struct > dsc_mst_fairness_params *params, > + struct dsc_mst_fairness_vars *vars, > + int count) > +{ > + int i; > + > + for (i = 0; i < count; i++) { > + memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing- > >dsc_cfg)); > + if (vars[i].dsc_enabled && dc_dsc_compute_config( > + params[i].sink->ctx->dc->res_pool- > >dscs[0], > + ¶ms[i].sink- > >sink_dsc_caps.dsc_dec_caps, > + params[i].sink->ctx->dc- > >debug.dsc_min_slice_height_override, > + 0, > + params[i].timing, > + ¶ms[i].timing->dsc_cfg)) { > + params[i].timing->flags.DSC = 1; > + params[i].timing->dsc_cfg.bits_per_pixel = > vars[i].bpp_x16; > + } else { > + params[i].timing->flags.DSC = 0; > + } > + > + } > + > +} > + > +static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) > +{ > + struct dc_dsc_config dsc_config; > + u64 kbps; > + > + kbps = (u64)pbn * 994 * 8 * 54 / 64; > + dc_dsc_compute_config( > + param.sink->ctx->dc->res_pool->dscs[0], > + ¶m.sink->sink_dsc_caps.dsc_dec_caps, > + param.sink->ctx->dc- > >debug.dsc_min_slice_height_override, > + (int) kbps, param.timing, &dsc_config); > + > + return dsc_config.bits_per_pixel; > +} > + > +static void increase_dsc_bpp(struct dc_link *dc_link, > + struct dsc_mst_fairness_params *params, > + struct dsc_mst_fairness_vars *vars, > + int count) > +{ > + int i; > + bool bpp_increased[MAX_PIPES]; > + int initial_slack[MAX_PIPES]; > + int min_initial_slack; > + int next_index; > + int remaining_to_increase = 0; > + int pbn_per_timeslot; > + int link_timeslots_used; > + int fair_pbn_alloc; > + > + for (i = 0; i < count; i++) { > + if (vars[i].dsc_enabled) { > + initial_slack[i] = > kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn; > + bpp_increased[i] = false; > + remaining_to_increase += 1; > + } else { > + initial_slack[i] = 0; > + bpp_increased[i] = true; > + } > + } > + > + pbn_per_timeslot = dc_link_bandwidth_kbps(dc_link, > + dc_link_get_link_cap(dc_link)) / (8 * 1000 * 54); > + > + while (remaining_to_increase) { > + next_index = -1; > + min_initial_slack = -1; > + for (i = 0; i < count; i++) { > + if (!bpp_increased[i]) { > + if (min_initial_slack == -1 || > min_initial_slack > initial_slack[i]) { > + min_initial_slack = initial_slack[i]; > + next_index = i; > + } > + } > + } > + > + if (next_index == -1) > + break; > + > + link_timeslots_used = 0; > + > + for (i = 0; i < count; i++) > + link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, > pbn_per_timeslot); > + > + fair_pbn_alloc = (63 - link_timeslots_used) / > remaining_to_increase * pbn_per_timeslot; > + > + if (initial_slack[next_index] > fair_pbn_alloc) { > + vars[next_index].pbn += fair_pbn_alloc; > + if (check_bandwidth_limits(dc_link, params, vars, > count)) > + vars[next_index].bpp_x16 = > bpp_x16_from_pbn(params[next_index], vars[next_index].pbn); > + else > + vars[next_index].pbn -= fair_pbn_alloc; > + } else { > + vars[next_index].pbn += initial_slack[next_index]; > + if (check_bandwidth_limits(dc_link, params, vars, > count)) > + vars[next_index].bpp_x16 = > params[next_index].bw_range.max_target_bpp_x16; > + else > + vars[next_index].pbn -= > initial_slack[next_index]; > + } > + > + bpp_increased[next_index] = true; > + remaining_to_increase--; > + } > +} > + > +static void try_disable_dsc(struct dc_link *dc_link, > + struct dsc_mst_fairness_params *params, > + struct dsc_mst_fairness_vars *vars, > + int count) > +{ > + int i; > + bool tried[MAX_PIPES]; > + int kbps_increase[MAX_PIPES]; > + int max_kbps_increase; > + int next_index; > + int remaining_to_try = 0; > + > + for (i = 0; i < count; i++) { > + if (vars[i].dsc_enabled && vars[i].bpp_x16 == > params[i].bw_range.max_target_bpp_x16) { > + kbps_increase[i] = params[i].bw_range.stream_kbps - > params[i].bw_range.max_kbps; > + tried[i] = false; > + remaining_to_try += 1; > + } else { > + kbps_increase[i] = 0; > + tried[i] = true; > + } > + } > + > + while (remaining_to_try) { > + next_index = -1; > + max_kbps_increase = -1; > + for (i = 0; i < count; i++) { > + if (!tried[i]) { > + if (max_kbps_increase == -1 || > max_kbps_increase < kbps_increase[i]) { > + max_kbps_increase = kbps_increase[i]; > + next_index = i; > + } > + } > + } > + > + if (next_index == -1) > + break; > + > + vars[next_index].pbn = > kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps); > + > + if (check_bandwidth_limits(dc_link, params, vars, count)) { > + vars[next_index].dsc_enabled = false; > + vars[next_index].bpp_x16 = 0; > + } else { > + vars[next_index].pbn = > kbps_to_peak_pbn(params[next_index].bw_range.max_kbps); > + } > + > + tried[next_index] = true; > + remaining_to_try--; > + } > +} > + > +static bool compute_mst_dsc_configs_for_link(struct dc_state *dc_state, > struct dc_link *dc_link) > +{ > + int i; > + struct dc_stream_state *stream; > + struct dsc_mst_fairness_params params[MAX_PIPES]; > + struct dsc_mst_fairness_vars vars[MAX_PIPES]; > + struct amdgpu_dm_connector *aconnector; > + int count = 0; > + > + memset(params, 0, sizeof(params)); > + > + /* Set up params */ > + for (i = 0; i < dc_state->stream_count; i++) { > + stream = dc_state->streams[i]; > + > + if (stream->link != dc_link) > + continue; > + > + stream->timing.flags.DSC = 0; > + > + params[count].timing = &stream->timing; > + params[count].sink = stream->sink; > + aconnector = (struct amdgpu_dm_connector *)stream- > >dm_stream_context; > + params[count].port = aconnector->port; > + params[count].compression_possible = stream->sink- > >sink_dsc_caps.dsc_dec_caps.is_dsc_supported; > + if (!dc_dsc_compute_bandwidth_range( > + stream->sink->ctx->dc->res_pool->dscs[0], > + stream->sink->ctx->dc- > >debug.dsc_min_slice_height_override, > + 8, 16, > + &stream->sink->sink_dsc_caps.dsc_dec_caps, > + &stream->timing, ¶ms[count].bw_range)) > + params[count].bw_range.stream_kbps = > dc_bandwidth_in_kbps_from_timing(&stream->timing); > + > + count++; > + } > + > + /* Try no compression */ > + for (i = 0; i < count; i++) { > + vars[i].pbn = > kbps_to_peak_pbn(params[i].bw_range.stream_kbps); > + vars[i].dsc_enabled = false; > + vars[i].bpp_x16 = 0; > + } > + > + if (check_bandwidth_limits(dc_link, params, vars, count)) { > + set_dsc_configs_from_fairness_vars(params, vars, count); > + return true; > + } > + > + /* Try max compression */ > + for (i = 0; i < count; i++) { > + if (params[i].compression_possible) { > + vars[i].pbn = > kbps_to_peak_pbn(params[i].bw_range.min_kbps); > + vars[i].dsc_enabled = true; > + vars[i].bpp_x16 = > params[i].bw_range.min_target_bpp_x16; > + } else { > + vars[i].pbn = > kbps_to_peak_pbn(params[i].bw_range.stream_kbps); > + vars[i].dsc_enabled = false; > + vars[i].bpp_x16 = 0; > + } > + } > + > + if (!check_bandwidth_limits(dc_link, params, vars, count)) > + return false; > + > + /* Optimize degree of compression */ > + increase_dsc_bpp(dc_link, params, vars, count); > + > + try_disable_dsc(dc_link, params, vars, count); > + > + set_dsc_configs_from_fairness_vars(params, vars, count); > + > + return true; > +} > + > +bool compute_mst_dsc_configs_for_state(struct dc_state *dc_state) > +{ > + int i, j; > + struct dc_stream_state *stream; > + bool computed_streams[MAX_PIPES]; > + struct amdgpu_dm_connector *aconnector; > + > + for (i = 0; i < dc_state->stream_count; i++) > + computed_streams[i] = false; > + > + for (i = 0; i < dc_state->stream_count; i++) { > + stream = dc_state->streams[i]; > + > + if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) > + continue; > + > + aconnector = (struct amdgpu_dm_connector *)stream- > >dm_stream_context; > + > + if (!aconnector || !aconnector->dc_sink) > + continue; > + > + if (!aconnector->dc_sink- > >sink_dsc_caps.dsc_dec_caps.is_dsc_supported) > + continue; > + > + if (computed_streams[i]) > + continue; > + > + mutex_lock(&aconnector->mst_mgr.lock); > + if (!compute_mst_dsc_configs_for_link(dc_state, stream->link)) > { > + mutex_unlock(&aconnector->mst_mgr.lock); > + return false; > + } > + mutex_unlock(&aconnector->mst_mgr.lock); > + > + for (j = 0; j < dc_state->stream_count; j++) { > + if (dc_state->streams[j]->link == stream->link) > + computed_streams[j] = true; > + } > + } > + > + for (i = 0; i < dc_state->stream_count; i++) { > + stream = dc_state->streams[i]; > + > + if (stream->timing.flags.DSC == 1) > + dcn20_add_dsc_to_stream_resource(stream->ctx->dc, > dc_state, stream); > + } > + > + return true; > +} > +#endif > diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h > b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h > index 2da851b40042..da957611214a 100644 > --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h > +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h > @@ -32,4 +32,8 @@ struct amdgpu_dm_connector; > void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, > struct amdgpu_dm_connector > *aconnector); > > + > +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT > +bool compute_mst_dsc_configs_for_state(struct dc_state *dc_state); > +#endif > #endif > diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c > b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c > index 924c2e303588..55c356109cb9 100644 > --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c > +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c > @@ -1495,7 +1495,7 @@ static void release_dsc(struct resource_context > *res_ctx, > > > #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT > -static enum dc_status add_dsc_to_stream_resource(struct dc *dc, > +enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, > struct dc_state *dc_ctx, > struct dc_stream_state *dc_stream) > { > @@ -1510,6 +1510,9 @@ static enum dc_status > add_dsc_to_stream_resource(struct dc *dc, > if (pipe_ctx->stream != dc_stream) > continue; > > + if (pipe_ctx->stream_res.dsc) > + continue; > + > acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx- > >stream_res.dsc); > > /* The number of DSCs can be less than the number of pipes */ > @@ -1561,7 +1564,7 @@ enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, > struct dc_state *new_ctx, > #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT > /* Get a DSC if required and available */ > if (result == DC_OK && dc_stream->timing.flags.DSC) > - result = add_dsc_to_stream_resource(dc, new_ctx, dc_stream); > + result = dcn20_add_dsc_to_stream_resource(dc, new_ctx, > dc_stream); > #endif > > if (result == DC_OK) > diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h > b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h > index fef473d68a4a..865f684a500a 100644 > --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h > +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h > @@ -159,6 +159,7 @@ void dcn20_calculate_dlg_params( > > enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct > dc_state *context, struct dc_stream_state *stream); > enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state > *new_ctx, struct dc_stream_state *dc_stream); > +enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct > dc_state *dc_ctx, struct dc_stream_state *dc_stream); > enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state > *new_ctx, struct dc_stream_state *dc_stream); > enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state > *plane_state); >
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 1309e9cfdea3..1bbdf29f3c7d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7780,6 +7780,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (ret) goto fail; +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + if (!compute_mst_dsc_configs_for_state(dm_state->context)) + goto fail; +#endif if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) { ret = -EINVAL; goto fail; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 804a00082bee..c58cf41f3086 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -38,6 +38,8 @@ #include "i2caux_interface.h" +#include "dc/dcn20/dcn20_resource.h" + /* #define TRACE_DPCD */ #ifdef TRACE_DPCD @@ -491,3 +493,387 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, aconnector->connector_id); } +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT +struct dsc_mst_fairness_params { + struct dc_crtc_timing *timing; + struct dc_sink *sink; + struct dc_dsc_bw_range bw_range; + bool compression_possible; + struct drm_dp_mst_port *port; +}; + +struct dsc_mst_fairness_vars { + int pbn; + bool dsc_enabled; + int bpp_x16; +}; + +static bool port_downstream_of_branch(struct drm_dp_mst_port *port, + struct drm_dp_mst_branch *branch) +{ + while (port->parent) { + if (port->parent == branch) + return true; + + if (port->parent->port_parent) + port = port->parent->port_parent; + else + break; + } + return false; +} + +static bool check_pbn_limit_on_branch(struct drm_dp_mst_branch *branch, + struct dsc_mst_fairness_params *params, + struct dsc_mst_fairness_vars *vars, int count) +{ + struct drm_dp_mst_port *port; + int i; + int pbn_limit = 0; + int pbn_used = 0; + + list_for_each_entry(port, &branch->ports, next) { + if (port->mstb) + if (!check_pbn_limit_on_branch(port->mstb, params, vars, count)) + return false; + + if (port->available_pbn > 0) + pbn_limit = port->available_pbn; + } + + for (i = 0; i < count; i++) { + if (port_downstream_of_branch(params[i].port, branch)) + pbn_used += vars[i].pbn; + } + + if (pbn_used > pbn_limit) + return false; + + return true; +} + +static bool check_bandwidth_limits(struct dc_link *dc_link, + struct dsc_mst_fairness_params *params, + struct dsc_mst_fairness_vars *vars, + int count) +{ + int link_timeslot_limit = 63; + int link_timeslots_used = 0; + int pbn_per_timeslot; + int i; + struct drm_dp_mst_topology_mgr *mst_mgr; + + /* kbits to pbn, dividing by 64 */ + pbn_per_timeslot = dc_link_bandwidth_kbps(dc_link, + dc_link_get_link_cap(dc_link)) / (8 * 1000 * 54); + + /* Check link bandwidth limit */ + for (i = 0; i < count; i++) + link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot); + + if (link_timeslots_used > link_timeslot_limit) + return false; + + /* Check branch bandwidth limit for each port on each branch */ + mst_mgr = params[0].port->mgr; + if (!check_pbn_limit_on_branch(mst_mgr->mst_primary, params, vars, count)) + return false; + + return true; +} + +static int kbps_to_peak_pbn(int kbps) +{ + u64 peak_kbps = kbps; + + peak_kbps *= 1006; + peak_kbps /= 1000; + return (int) DIV_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000)); +} + +static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params, + struct dsc_mst_fairness_vars *vars, + int count) +{ + int i; + + for (i = 0; i < count; i++) { + memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg)); + if (vars[i].dsc_enabled && dc_dsc_compute_config( + params[i].sink->ctx->dc->res_pool->dscs[0], + ¶ms[i].sink->sink_dsc_caps.dsc_dec_caps, + params[i].sink->ctx->dc->debug.dsc_min_slice_height_override, + 0, + params[i].timing, + ¶ms[i].timing->dsc_cfg)) { + params[i].timing->flags.DSC = 1; + params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16; + } else { + params[i].timing->flags.DSC = 0; + } + + } + +} + +static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) +{ + struct dc_dsc_config dsc_config; + u64 kbps; + + kbps = (u64)pbn * 994 * 8 * 54 / 64; + dc_dsc_compute_config( + param.sink->ctx->dc->res_pool->dscs[0], + ¶m.sink->sink_dsc_caps.dsc_dec_caps, + param.sink->ctx->dc->debug.dsc_min_slice_height_override, + (int) kbps, param.timing, &dsc_config); + + return dsc_config.bits_per_pixel; +} + +static void increase_dsc_bpp(struct dc_link *dc_link, + struct dsc_mst_fairness_params *params, + struct dsc_mst_fairness_vars *vars, + int count) +{ + int i; + bool bpp_increased[MAX_PIPES]; + int initial_slack[MAX_PIPES]; + int min_initial_slack; + int next_index; + int remaining_to_increase = 0; + int pbn_per_timeslot; + int link_timeslots_used; + int fair_pbn_alloc; + + for (i = 0; i < count; i++) { + if (vars[i].dsc_enabled) { + initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn; + bpp_increased[i] = false; + remaining_to_increase += 1; + } else { + initial_slack[i] = 0; + bpp_increased[i] = true; + } + } + + pbn_per_timeslot = dc_link_bandwidth_kbps(dc_link, + dc_link_get_link_cap(dc_link)) / (8 * 1000 * 54); + + while (remaining_to_increase) { + next_index = -1; + min_initial_slack = -1; + for (i = 0; i < count; i++) { + if (!bpp_increased[i]) { + if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) { + min_initial_slack = initial_slack[i]; + next_index = i; + } + } + } + + if (next_index == -1) + break; + + link_timeslots_used = 0; + + for (i = 0; i < count; i++) + link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot); + + fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot; + + if (initial_slack[next_index] > fair_pbn_alloc) { + vars[next_index].pbn += fair_pbn_alloc; + if (check_bandwidth_limits(dc_link, params, vars, count)) + vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn); + else + vars[next_index].pbn -= fair_pbn_alloc; + } else { + vars[next_index].pbn += initial_slack[next_index]; + if (check_bandwidth_limits(dc_link, params, vars, count)) + vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16; + else + vars[next_index].pbn -= initial_slack[next_index]; + } + + bpp_increased[next_index] = true; + remaining_to_increase--; + } +} + +static void try_disable_dsc(struct dc_link *dc_link, + struct dsc_mst_fairness_params *params, + struct dsc_mst_fairness_vars *vars, + int count) +{ + int i; + bool tried[MAX_PIPES]; + int kbps_increase[MAX_PIPES]; + int max_kbps_increase; + int next_index; + int remaining_to_try = 0; + + for (i = 0; i < count; i++) { + if (vars[i].dsc_enabled && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16) { + kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps; + tried[i] = false; + remaining_to_try += 1; + } else { + kbps_increase[i] = 0; + tried[i] = true; + } + } + + while (remaining_to_try) { + next_index = -1; + max_kbps_increase = -1; + for (i = 0; i < count; i++) { + if (!tried[i]) { + if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) { + max_kbps_increase = kbps_increase[i]; + next_index = i; + } + } + } + + if (next_index == -1) + break; + + vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps); + + if (check_bandwidth_limits(dc_link, params, vars, count)) { + vars[next_index].dsc_enabled = false; + vars[next_index].bpp_x16 = 0; + } else { + vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps); + } + + tried[next_index] = true; + remaining_to_try--; + } +} + +static bool compute_mst_dsc_configs_for_link(struct dc_state *dc_state, struct dc_link *dc_link) +{ + int i; + struct dc_stream_state *stream; + struct dsc_mst_fairness_params params[MAX_PIPES]; + struct dsc_mst_fairness_vars vars[MAX_PIPES]; + struct amdgpu_dm_connector *aconnector; + int count = 0; + + memset(params, 0, sizeof(params)); + + /* Set up params */ + for (i = 0; i < dc_state->stream_count; i++) { + stream = dc_state->streams[i]; + + if (stream->link != dc_link) + continue; + + stream->timing.flags.DSC = 0; + + params[count].timing = &stream->timing; + params[count].sink = stream->sink; + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + params[count].port = aconnector->port; + params[count].compression_possible = stream->sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported; + if (!dc_dsc_compute_bandwidth_range( + stream->sink->ctx->dc->res_pool->dscs[0], + stream->sink->ctx->dc->debug.dsc_min_slice_height_override, + 8, 16, + &stream->sink->sink_dsc_caps.dsc_dec_caps, + &stream->timing, ¶ms[count].bw_range)) + params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); + + count++; + } + + /* Try no compression */ + for (i = 0; i < count; i++) { + vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); + vars[i].dsc_enabled = false; + vars[i].bpp_x16 = 0; + } + + if (check_bandwidth_limits(dc_link, params, vars, count)) { + set_dsc_configs_from_fairness_vars(params, vars, count); + return true; + } + + /* Try max compression */ + for (i = 0; i < count; i++) { + if (params[i].compression_possible) { + vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); + vars[i].dsc_enabled = true; + vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16; + } else { + vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); + vars[i].dsc_enabled = false; + vars[i].bpp_x16 = 0; + } + } + + if (!check_bandwidth_limits(dc_link, params, vars, count)) + return false; + + /* Optimize degree of compression */ + increase_dsc_bpp(dc_link, params, vars, count); + + try_disable_dsc(dc_link, params, vars, count); + + set_dsc_configs_from_fairness_vars(params, vars, count); + + return true; +} + +bool compute_mst_dsc_configs_for_state(struct dc_state *dc_state) +{ + int i, j; + struct dc_stream_state *stream; + bool computed_streams[MAX_PIPES]; + struct amdgpu_dm_connector *aconnector; + + for (i = 0; i < dc_state->stream_count; i++) + computed_streams[i] = false; + + for (i = 0; i < dc_state->stream_count; i++) { + stream = dc_state->streams[i]; + + if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) + continue; + + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + + if (!aconnector || !aconnector->dc_sink) + continue; + + if (!aconnector->dc_sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported) + continue; + + if (computed_streams[i]) + continue; + + mutex_lock(&aconnector->mst_mgr.lock); + if (!compute_mst_dsc_configs_for_link(dc_state, stream->link)) { + mutex_unlock(&aconnector->mst_mgr.lock); + return false; + } + mutex_unlock(&aconnector->mst_mgr.lock); + + for (j = 0; j < dc_state->stream_count; j++) { + if (dc_state->streams[j]->link == stream->link) + computed_streams[j] = true; + } + } + + for (i = 0; i < dc_state->stream_count; i++) { + stream = dc_state->streams[i]; + + if (stream->timing.flags.DSC == 1) + dcn20_add_dsc_to_stream_resource(stream->ctx->dc, dc_state, stream); + } + + return true; +} +#endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index 2da851b40042..da957611214a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -32,4 +32,8 @@ struct amdgpu_dm_connector; void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector); + +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT +bool compute_mst_dsc_configs_for_state(struct dc_state *dc_state); +#endif #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 924c2e303588..55c356109cb9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -1495,7 +1495,7 @@ static void release_dsc(struct resource_context *res_ctx, #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT -static enum dc_status add_dsc_to_stream_resource(struct dc *dc, +enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream) { @@ -1510,6 +1510,9 @@ static enum dc_status add_dsc_to_stream_resource(struct dc *dc, if (pipe_ctx->stream != dc_stream) continue; + if (pipe_ctx->stream_res.dsc) + continue; + acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc); /* The number of DSCs can be less than the number of pipes */ @@ -1561,7 +1564,7 @@ enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Get a DSC if required and available */ if (result == DC_OK && dc_stream->timing.flags.DSC) - result = add_dsc_to_stream_resource(dc, new_ctx, dc_stream); + result = dcn20_add_dsc_to_stream_resource(dc, new_ctx, dc_stream); #endif if (result == DC_OK) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h index fef473d68a4a..865f684a500a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h @@ -159,6 +159,7 @@ void dcn20_calculate_dlg_params( enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream); enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); +enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream); enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state);