@@ -202,8 +202,6 @@ struct dpu_crtc {
* @new_perf: new performance state being requested
* @num_mixers : Number of mixers in use
* @mixers : List of active mixers
- * @num_ctls : Number of ctl paths in use
- * @hw_ctls : List of active ctl paths
*/
struct dpu_crtc_state {
struct drm_crtc_state base;
@@ -217,11 +215,8 @@ struct dpu_crtc_state {
struct dpu_core_perf_params new_perf;
/* HW Resources reserved for the crtc */
- u32 num_mixers;
+ int num_mixers;
struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
-
- u32 num_ctls;
- struct dpu_hw_ctl *hw_ctls[CRTC_DUAL_MIXERS];
};
#define to_dpu_crtc_state(x) \
@@ -559,7 +559,6 @@ static int dpu_encoder_virt_atomic_check(
struct dpu_kms *dpu_kms;
const struct drm_display_mode *mode;
struct drm_display_mode *adj_mode;
- struct msm_display_topology topology;
int i = 0;
int ret = 0;
@@ -605,20 +604,24 @@ static int dpu_encoder_virt_atomic_check(
}
}
- topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
+ /*
+ * Reserve dynamic resources now. Indicating AtomicTest phase
+ *
+ * Avoid reserving resources when mode set is pending. Topology
+ * info may not be available to complete reservation.
+ */
+ if (!ret && drm_atomic_crtc_needs_modeset(crtc_state)
+ && dpu_enc->mode_set_complete) {
+ struct msm_display_topology topology;
+ struct dpu_private_state *dpu_priv_state;
- /* Reserve dynamic resources now. Indicating AtomicTest phase */
- if (!ret) {
- /*
- * Avoid reserving resources when mode set is pending. Topology
- * info may not be available to complete reservation.
- */
- if (drm_atomic_crtc_needs_modeset(crtc_state)
- && dpu_enc->mode_set_complete) {
- ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state,
- topology, true);
- dpu_enc->mode_set_complete = false;
- }
+ topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
+ dpu_priv_state = dpu_get_private_state(crtc_state->state,
+ to_dpu_crtc(crtc_state->crtc));
+
+ ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc,
+ &dpu_priv_state->base, topology, true);
+ dpu_enc->mode_set_complete = false;
}
if (!ret)
@@ -962,12 +965,10 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
struct list_head *connector_list;
struct drm_connector *conn = NULL, *conn_iter;
struct drm_crtc *drm_crtc;
- struct dpu_crtc_state *cstate;
- struct dpu_rm_hw_iter hw_iter;
+ struct dpu_crtc_state *dpu_cstate;
struct msm_display_topology topology;
- struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL };
- struct dpu_hw_mixer *hw_lm[MAX_CHANNELS_PER_ENC] = { NULL };
- int num_lm = 0, num_ctl = 0;
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_private_state *dpu_private_state;
int i, j, ret;
if (!drm_enc) {
@@ -1001,100 +1002,59 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
break;
topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
+ dpu_crtc = to_dpu_crtc(drm_crtc);
/* Reserve dynamic resources now. Indicating non-AtomicTest phase */
- ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_crtc->state,
+ ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, dpu_crtc->priv_obj.state,
topology, false);
if (ret) {
DPU_ERROR_ENC(dpu_enc,
- "failed to reserve hw resources, %d\n", ret);
+ "failed to reserve hw resources, %d\n", ret);
return;
}
- dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
- for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
- dpu_enc->hw_pp[i] = NULL;
- if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
- break;
- dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) hw_iter.hw;
- }
-
- dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_CTL);
- for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
- if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
- break;
- hw_ctl[i] = (struct dpu_hw_ctl *)hw_iter.hw;
- num_ctl++;
- }
-
- dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_LM);
- for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
- if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
- break;
- hw_lm[i] = (struct dpu_hw_mixer *)hw_iter.hw;
- num_lm++;
- }
+ dpu_cstate = to_dpu_crtc_state(drm_crtc->state);
+ dpu_private_state = container_of(dpu_crtc->priv_obj.state,
+ struct dpu_private_state, base);
- cstate = to_dpu_crtc_state(drm_crtc->state);
+ for (i = 0; i < dpu_private_state->num_mixers; i++) {
+ int ctl_idx;
- for (i = 0; i < num_lm; i++) {
- int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
+ dpu_cstate->mixers[i].hw_lm = dpu_private_state->hw_lms[i];
- cstate->mixers[i].hw_lm = hw_lm[i];
- cstate->mixers[i].lm_ctl = hw_ctl[ctl_idx];
+ /*
+ * hw_ctl count may be <= hw_lm count, if less, multiple LMs are
+ * controlled by 1 CTL
+ */
+ ctl_idx = min(i, dpu_private_state->num_intfs - 1);
+ dpu_cstate->mixers[i].lm_ctl =
+ dpu_private_state->hw_ctls[ctl_idx];
}
-
- cstate->num_mixers = num_lm;
+ dpu_cstate->num_mixers = dpu_private_state->num_mixers;
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- if (phys) {
- if (!dpu_enc->hw_pp[i]) {
- DPU_ERROR_ENC(dpu_enc, "no pp block assigned"
- "at idx: %d\n", i);
- goto error;
- }
-
- if (!hw_ctl[i]) {
- DPU_ERROR_ENC(dpu_enc, "no ctl block assigned"
- "at idx: %d\n", i);
- goto error;
- }
-
- phys->hw_pp = dpu_enc->hw_pp[i];
- phys->hw_ctl = hw_ctl[i];
-
- dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id,
- DPU_HW_BLK_INTF);
- for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) {
- struct dpu_hw_intf *hw_intf;
+ phys->hw_pp = dpu_private_state->hw_pps[i];
+ dpu_enc->hw_pp[i] = dpu_private_state->hw_pps[i];
- if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
- break;
+ phys->hw_ctl = dpu_cstate->mixers[i].lm_ctl;
- hw_intf = (struct dpu_hw_intf *)hw_iter.hw;
- if (hw_intf->idx == phys->intf_idx)
- phys->hw_intf = hw_intf;
+ for (j = 0; j < dpu_private_state->num_intfs; j++) {
+ struct dpu_hw_intf *hw_intf =
+ dpu_private_state->hw_intfs[j];
+ if (hw_intf->idx == phys->intf_idx) {
+ phys->hw_intf = hw_intf;
+ break;
}
-
- if (!phys->hw_intf) {
- DPU_ERROR_ENC(dpu_enc,
- "no intf block assigned at idx: %d\n",
- i);
- goto error;
- }
-
- phys->connector = conn->state->connector;
- if (phys->ops.mode_set)
- phys->ops.mode_set(phys, mode, adj_mode);
}
+
+ phys->connector = conn->state->connector;
+ if (phys->ops.mode_set)
+ phys->ops.mode_set(phys, mode, adj_mode);
}
dpu_enc->mode_set_complete = true;
-
-error:
- dpu_rm_release(&dpu_kms->rm, drm_enc);
}
static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
@@ -1196,6 +1156,9 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
struct drm_display_mode *mode;
+ struct drm_crtc *drm_crtc;
+ struct dpu_crtc *dpu_crtc;
+ unsigned long lock_flags;
int i = 0;
if (!drm_enc) {
@@ -1212,10 +1175,20 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
dpu_enc = to_dpu_encoder_virt(drm_enc);
DPU_DEBUG_ENC(dpu_enc, "\n");
+ /*
+ * client may have reset the crtc encoder_mask before encoder->disable.
+ * Rely on the dpu_enc->crtc which will be reset only on crtc->disable.
+ */
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ drm_crtc = dpu_enc->crtc;
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+
+ dpu_crtc = to_dpu_crtc(drm_crtc);
+
mutex_lock(&dpu_enc->enc_lock);
dpu_enc->enabled = false;
- mode = &drm_enc->crtc->state->adjusted_mode;
+ mode = &drm_crtc->state->adjusted_mode;
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
@@ -1249,7 +1222,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
- dpu_rm_release(&dpu_kms->rm, drm_enc);
+ dpu_rm_release(&dpu_kms->rm, dpu_crtc->priv_obj.state);
mutex_unlock(&dpu_enc->enc_lock);
}
@@ -490,10 +490,19 @@ struct dpu_private_state *dpu_get_private_state(struct drm_atomic_state *state,
static void dpu_private_obj_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
+ struct msm_drm_private *priv = state->state->dev->dev_private;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct dpu_private_state *dpu_priv_state = container_of(state,
struct dpu_private_state,
base);
+ /*
+ * Destroy state will be triggering RM release only when resources
+ * are allocated during TEST_ONLY commits where resources need
+ * to be freed back to the RM pool
+ */
+ dpu_rm_release(&dpu_kms->rm, state);
+
kfree(dpu_priv_state);
}
@@ -26,6 +26,7 @@
#include "dpu_hw_catalog.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_lm.h"
+#include "dpu_hw_intf.h"
#include "dpu_hw_interrupts.h"
#include "dpu_hw_top.h"
#include "dpu_io_util.h"
@@ -140,6 +141,22 @@ struct dpu_kms {
struct dpu_private_state {
struct drm_private_state base;
+
+ /*
+ * layer mixers and ping pong blocks
+ * are hard chained
+ */
+ int num_mixers;
+ struct dpu_hw_mixer *hw_lms[CRTC_DUAL_MIXERS];
+ struct dpu_hw_pingpong *hw_pps[CRTC_DUAL_MIXERS];
+
+ /*
+ * until SDM845 each interface is controlled
+ * by its own hw_ctl
+ */
+ int num_intfs;
+ struct dpu_hw_ctl *hw_ctls[CRTC_DUAL_MIXERS];
+ struct dpu_hw_intf *hw_intfs[CRTC_DUAL_MIXERS];
};
struct vsync_info {
@@ -368,6 +368,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(
}
static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
+ struct dpu_private_state *dpu_priv_state,
struct dpu_rm_requirements *reqs)
{
@@ -429,8 +430,13 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
lm[i]->enc_id = enc_id;
pp[i]->enc_id = enc_id;
+ dpu_priv_state->hw_lms[i] = to_dpu_hw_mixer(lm[i]->hw);
+ dpu_priv_state->hw_pps[i] = container_of(pp[i]->hw,
+ struct dpu_hw_pingpong,
+ base);
trace_dpu_rm_reserve_lms(lm[i]->id, enc_id, pp[i]->id);
}
+ dpu_priv_state->num_mixers = lm_count;
return rc;
}
@@ -438,6 +444,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
static int _dpu_rm_reserve_ctls(
struct dpu_rm *rm,
uint32_t enc_id,
+ struct dpu_private_state *dpu_priv_state,
const struct msm_display_topology *top)
{
struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
@@ -480,20 +487,20 @@ static int _dpu_rm_reserve_ctls(
for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) {
ctls[i]->enc_id = enc_id;
+ dpu_priv_state->hw_ctls[i] = to_dpu_hw_ctl(ctls[i]->hw);
trace_dpu_rm_reserve_ctls(ctls[i]->id, enc_id);
}
return 0;
}
-static int _dpu_rm_reserve_intf(
+static struct dpu_rm_hw_blk *_dpu_rm_reserve_intf(
struct dpu_rm *rm,
uint32_t enc_id,
uint32_t id,
enum dpu_hw_blk_type type)
{
struct dpu_rm_hw_iter iter;
- int ret = 0;
/* Find the block entry in the rm, and note the reservation */
dpu_rm_init_hw_iter(&iter, 0, type);
@@ -503,7 +510,7 @@ static int _dpu_rm_reserve_intf(
if (RESERVED_BY_OTHER(iter.blk, enc_id)) {
DPU_ERROR("type %d id %d already reserved\n", type, id);
- return -ENAVAIL;
+ return NULL;
}
iter.blk->enc_id = enc_id;
@@ -512,56 +519,63 @@ static int _dpu_rm_reserve_intf(
}
/* Shouldn't happen since intfs are fixed at probe */
- if (!iter.hw) {
+ if (!iter.blk) {
DPU_ERROR("couldn't find type %d id %d\n", type, id);
- return -EINVAL;
+ return NULL;
}
- return ret;
+ return iter.blk;
}
static int _dpu_rm_reserve_intf_related_hw(
struct dpu_rm *rm,
uint32_t enc_id,
+ struct dpu_private_state *dpu_priv_state,
struct dpu_encoder_hw_resources *hw_res)
{
- int i, ret = 0;
- u32 id;
+ struct dpu_rm_hw_blk *blk;
+ int i, num_intfs = 0;
for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
if (hw_res->intfs[i] == INTF_MODE_NONE)
continue;
- id = i + INTF_0;
- ret = _dpu_rm_reserve_intf(rm, enc_id, id,
+
+ blk = _dpu_rm_reserve_intf(rm, enc_id, i + INTF_0,
DPU_HW_BLK_INTF);
- if (ret)
- return ret;
+ if (!blk)
+ return -ENAVAIL;
+
+ dpu_priv_state->hw_intfs[num_intfs++] =
+ container_of(blk->hw, struct dpu_hw_intf, base);
}
+ dpu_priv_state->num_intfs = num_intfs;
- return ret;
+ return 0;
}
static int _dpu_rm_make_reservation(
struct dpu_rm *rm,
struct drm_encoder *enc,
- struct drm_crtc_state *crtc_state,
+ struct dpu_private_state *dpu_priv_state,
struct dpu_rm_requirements *reqs)
{
int ret;
- ret = _dpu_rm_reserve_lms(rm, enc->base.id, reqs);
+ ret = _dpu_rm_reserve_lms(rm, enc->base.id, dpu_priv_state, reqs);
if (ret) {
DPU_ERROR("unable to find appropriate mixers\n");
return ret;
}
- ret = _dpu_rm_reserve_ctls(rm, enc->base.id, &reqs->topology);
+ ret = _dpu_rm_reserve_ctls(rm, enc->base.id, dpu_priv_state,
+ &reqs->topology);
if (ret) {
DPU_ERROR("unable to find appropriate CTL\n");
return ret;
}
- ret = _dpu_rm_reserve_intf_related_hw(rm, enc->base.id, &reqs->hw_res);
+ ret = _dpu_rm_reserve_intf_related_hw(rm, enc->base.id, dpu_priv_state,
+ &reqs->hw_res);
if (ret)
return ret;
@@ -571,7 +585,6 @@ static int _dpu_rm_make_reservation(
static int _dpu_rm_populate_requirements(
struct dpu_rm *rm,
struct drm_encoder *enc,
- struct drm_crtc_state *crtc_state,
struct dpu_rm_requirements *reqs,
struct msm_display_topology req_topology)
{
@@ -586,27 +599,64 @@ static int _dpu_rm_populate_requirements(
return 0;
}
-static void _dpu_rm_release_reservation(struct dpu_rm *rm, uint32_t enc_id)
+static int _dpu_rm_release_hw(struct dpu_rm *rm, enum dpu_hw_blk_type type,
+ int id)
{
struct dpu_rm_hw_blk *blk;
- enum dpu_hw_blk_type type;
-
- for (type = 0; type < DPU_HW_BLK_MAX; type++) {
- list_for_each_entry(blk, &rm->hw_blks[type], list) {
- if (blk->enc_id == enc_id) {
- blk->enc_id = 0;
- DPU_DEBUG("rel enc %d %d %d\n", enc_id,
- type, blk->id);
- }
+ list_for_each_entry(blk, &rm->hw_blks[type], list) {
+ if (blk->hw->id == id) {
+ blk->enc_id = 0;
+ return 0;
}
}
+
+ DRM_DEBUG_KMS("failed to find hw id(%d) of type(%d) for releasing\n",
+ id, type);
+
+ return -EINVAL;
+}
+
+static void _dpu_rm_release_reservation(struct dpu_rm *rm,
+ struct drm_private_state *priv_state)
+{
+ struct dpu_private_state *dpu_priv_state =
+ container_of(priv_state, struct dpu_private_state, base);
+ int i;
+
+ for (i = 0; i < dpu_priv_state->num_mixers; i++) {
+ if (!dpu_priv_state->hw_lms[i])
+ continue;
+
+ if (!_dpu_rm_release_hw(rm, DPU_HW_BLK_LM,
+ dpu_priv_state->hw_lms[i]->base.id))
+ dpu_priv_state->hw_lms[i] = NULL;
+
+ if (!_dpu_rm_release_hw(rm, DPU_HW_BLK_PINGPONG,
+ dpu_priv_state->hw_pps[i]->base.id))
+ dpu_priv_state->hw_pps[i] = NULL;
+ }
+ dpu_priv_state->num_mixers = 0;
+
+ for (i = 0; i < dpu_priv_state->num_intfs; i++) {
+ if (!dpu_priv_state->hw_ctls[i])
+ continue;
+
+ if (!_dpu_rm_release_hw(rm, DPU_HW_BLK_CTL,
+ dpu_priv_state->hw_ctls[i]->base.id))
+ dpu_priv_state->hw_ctls[i] = NULL;
+
+ if (!_dpu_rm_release_hw(rm, DPU_HW_BLK_INTF,
+ dpu_priv_state->hw_intfs[i]->base.id))
+ dpu_priv_state->hw_intfs[i] = NULL;
+ }
+ dpu_priv_state->num_intfs = 0;
}
-void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
+void dpu_rm_release(struct dpu_rm *rm, struct drm_private_state *priv_state)
{
mutex_lock(&rm->rm_lock);
- _dpu_rm_release_reservation(rm, enc->base.id);
+ _dpu_rm_release_reservation(rm, priv_state);
mutex_unlock(&rm->rm_lock);
}
@@ -614,38 +664,35 @@ void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
int dpu_rm_reserve(
struct dpu_rm *rm,
struct drm_encoder *enc,
- struct drm_crtc_state *crtc_state,
+ struct drm_private_state *priv_state,
struct msm_display_topology topology,
bool test_only)
{
struct dpu_rm_requirements reqs;
+ struct dpu_private_state *dpu_priv_state =
+ container_of(priv_state, struct dpu_private_state, base);
int ret;
- /* Check if this is just a page-flip */
- if (!drm_atomic_crtc_needs_modeset(crtc_state))
- return 0;
-
- DRM_DEBUG_KMS("reserving hw for enc %d crtc %d test_only %d\n",
- enc->base.id, crtc_state->crtc->base.id, test_only);
+ DRM_DEBUG_KMS("reserving hw for enc %d test_only %d\n",
+ enc->base.id, test_only);
mutex_lock(&rm->rm_lock);
- ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs,
- topology);
+ ret = _dpu_rm_populate_requirements(rm, enc, &reqs, topology);
if (ret) {
DPU_ERROR("failed to populate hw requirements\n");
goto end;
}
- ret = _dpu_rm_make_reservation(rm, enc, crtc_state, &reqs);
+ ret = _dpu_rm_make_reservation(rm, enc, dpu_priv_state, &reqs);
if (ret) {
DPU_ERROR("failed to reserve hw resources: %d\n", ret);
- _dpu_rm_release_reservation(rm, enc->base.id);
+ _dpu_rm_release_reservation(rm, priv_state);
} else if (test_only) {
/* test_only: test the reservation and then undo */
DPU_DEBUG("test_only: discard test [enc: %d]\n",
enc->base.id);
- _dpu_rm_release_reservation(rm, enc->base.id);
+ _dpu_rm_release_reservation(rm, priv_state);
}
end:
@@ -81,14 +81,14 @@ int dpu_rm_init(struct dpu_rm *rm,
* HW Reservations should be released via dpu_rm_release_hw.
* @rm: DPU Resource Manager handle
* @drm_enc: DRM Encoder handle
- * @crtc_state: Proposed Atomic DRM CRTC State handle
+ * @priv_state: Pointer to drm private obj state
* @topology: Pointer to topology info for the display
* @test_only: Atomic-Test phase, discard results (unless property overrides)
* @Return: 0 on Success otherwise -ERROR
*/
int dpu_rm_reserve(struct dpu_rm *rm,
struct drm_encoder *drm_enc,
- struct drm_crtc_state *crtc_state,
+ struct drm_private_state *priv_state,
struct msm_display_topology topology,
bool test_only);
@@ -96,10 +96,10 @@ int dpu_rm_reserve(struct dpu_rm *rm,
* dpu_rm_reserve - Given the encoder for the display chain, release any
* HW blocks previously reserved for that use case.
* @rm: DPU Resource Manager handle
- * @enc: DRM Encoder handle
+ * @priv_state: Pointer to drm private obj state
* @Return: 0 on Success otherwise -ERROR
*/
-void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc);
+void dpu_rm_release(struct dpu_rm *rm, struct drm_private_state *priv_state);
/**
* dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list
DPU maintained reservation lists to cache assigned HW blocks for the display and a retrieval mechanism for the individual DRM components to query their respective HW blocks. This patch uses the sub-classed private object state to store and track HW blocks assigned for different components of the display pipeline. It helps the driver: - to get rid of unwanted store and retrieval RM API's - to preserve HW resources assigned in atomic_check through atomic swap/duplicate. Resources are reserved only when drm_atomic_crtc_needs_modeset is set to TRUE and are released in atomic disable path. With TEST_ONLY atomic commit path, reserved resources are released back to RM pool in private_obj_destroy_state call. changes in v2 (comments from Sean Paul): - Track resources in private object state instead of crtc state. - Avoid duplicate tracking of hw_ctls in crtc - No explicit count for hw_ctl as they match with hw_intf count Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org> --- drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h | 7 +- drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 157 ++++++++++++---------------- drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c | 9 ++ drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h | 17 +++ drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c | 131 +++++++++++++++-------- drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h | 8 +- 6 files changed, 185 insertions(+), 144 deletions(-)