@@ -24,6 +24,7 @@ static const struct mdp_platform_config mt8195_plat_cfg = {
.mdp_version_6885 = true,
.gce_event_offset = 0,
.support_multi_larb = true,
+ .support_dual_pipe = true,
};
enum mt8195_mdp_comp_id {
@@ -29,6 +29,9 @@ struct mdp_path {
((ctx)->comp->ops && (ctx)->comp->ops->op)
#define call_op(ctx, op, ...) \
(has_op(ctx, op) ? (ctx)->comp->ops->op(ctx, ##__VA_ARGS__) : 0)
+#define get_pipe_num(scenario) \
+ ((scenario) == MDP_STREAM_TYPE_DUAL_BITBLT ? 2 : 1)
+
struct mdp_path_subfrm {
s32 mutex_id;
u32 mutex_mod;
@@ -707,25 +710,31 @@ static void mdp_auto_release_work(struct work_struct *work)
struct mdp_cmdq_cb_param *cb_param;
struct mdp_dev *mdp;
int i;
+ bool finalize;
cb_param = container_of(work, struct mdp_cmdq_cb_param,
auto_release_work);
mdp = cb_param->mdp;
+ finalize = cb_param->finalize;
- i = mdp_get_mutex_idx(mdp->mdp_data, MDP_PIPE_RDMA0);
- mtk_mutex_unprepare(mdp->mdp_mutex[mdp->mdp_data->pipe_info[i].mutex_id]);
+ if (finalize) {
+ i = mdp_get_mutex_idx(mdp->mdp_data, MDP_PIPE_RDMA0);
+ mtk_mutex_unprepare(mdp->mdp_mutex[mdp->mdp_data->pipe_info[i].mutex_id]);
- i = mdp_get_mutex_idx(mdp->mdp_data, MDP_PIPE_RDMA1);
- if (i >= 0)
- mtk_mutex_unprepare(mdp->mdp_mutex2[mdp->mdp_data->pipe_info[i].mutex_id]);
+ i = mdp_get_mutex_idx(mdp->mdp_data, MDP_PIPE_RDMA1);
+ if (i >= 0)
+ mtk_mutex_unprepare(mdp->mdp_mutex2[mdp->mdp_data->pipe_info[i].mutex_id]);
+ }
mdp_comp_clocks_off(&mdp->pdev->dev, cb_param->comps,
cb_param->num_comps);
kfree(cb_param->comps);
kfree(cb_param);
- atomic_dec(&mdp->job_count);
- wake_up(&mdp->callback_wq);
+ if (finalize) {
+ atomic_dec(&mdp->job_count);
+ wake_up(&mdp->callback_wq);
+ }
}
static void mdp_handle_cmdq_callback(struct cmdq_cb_data data)
@@ -744,7 +753,13 @@ static void mdp_handle_cmdq_callback(struct cmdq_cb_data data)
mdp = cb_param->mdp;
dev = &mdp->pdev->dev;
- if (cb_param->mdp_ctx)
+ if (cb_param->dualpipe)
+ cb_param->finalize =
+ (atomic_dec_and_test(&mdp->cmdq_count[cb_param->cmdq_user]));
+ else
+ cb_param->finalize = true;
+
+ if (cb_param->finalize && cb_param->mdp_ctx)
mdp_m2m_job_finish(cb_param->mdp_ctx);
if (cb_param->user_cmdq_cb) {
@@ -779,52 +794,62 @@ static void mdp_handle_cmdq_callback(struct cmdq_cb_data data)
int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param)
{
- struct mmsys_cmdq_cmd cmd;
- struct mdp_path *path = NULL;
- struct mdp_cmdq_cb_param *cb_param = NULL;
- struct mdp_comp *comps = NULL;
+ struct mmsys_cmdq_cmd cmd[MDP_DUAL_PIPE];
+ struct mdp_path *paths[MDP_DUAL_PIPE] = {NULL};
+ struct mdp_cmdq_cb_param *cb_param[MDP_DUAL_PIPE] = {NULL};
+ struct mdp_comp *comps[MDP_DUAL_PIPE] = {NULL};
struct device *dev = &mdp->pdev->dev;
- int i, ret;
+
+ enum mdp_stream_type scenario = param->param->type;
+ int i, j, ret;
if (atomic_read(&mdp->suspended))
return -ECANCELED;
atomic_inc(&mdp->job_count);
- cmd.pkt = cmdq_pkt_create(mdp->cmdq_clt, SZ_16K);
- if (IS_ERR(cmd.pkt)) {
- atomic_dec(&mdp->job_count);
- wake_up(&mdp->callback_wq);
- return PTR_ERR(cmd.pkt);
+ /* Prepare cmdq pkt */
+ for (i = 0; i < get_pipe_num(scenario); i++) {
+ cmd[i].pkt = cmdq_pkt_create(mdp->cmdq_clt[i], SZ_16K);
+ if (IS_ERR(cmd[i].pkt)) {
+ ret = PTR_ERR(cmd[i].pkt);
+ dev_err(dev, "%s path %d cmdq_pkt_create error\n", __func__, i);
+ goto err_destroy_pkt;
+ }
+ cmd[i].event = &mdp->event[0];
}
- cmd.event = &mdp->event[0];
- path = kzalloc(sizeof(*path), GFP_KERNEL);
- if (!path) {
- ret = -ENOMEM;
- goto err_destroy_pkt;
- }
+ /* Prepare path info */
+ for (i = 0; i < get_pipe_num(scenario); i++) {
+ paths[i] = kzalloc(sizeof(*paths[i]), GFP_KERNEL);
+ if (!paths[i]) {
+ ret = -ENOMEM;
+ dev_err(dev, "%s alloc paths error\n", __func__);
+ goto err_destroy_paths;
+ }
- path->mdp_dev = mdp;
- path->config = param->config;
- path->param = param->param;
- for (i = 0; i < param->param->num_outputs; i++) {
- path->bounds[i].left = 0;
- path->bounds[i].top = 0;
- path->bounds[i].width =
- param->param->outputs[i].buffer.format.width;
- path->bounds[i].height =
- param->param->outputs[i].buffer.format.height;
- path->composes[i] = param->composes[i] ?
- param->composes[i] : &path->bounds[i];
- }
+ paths[i]->mdp_dev = mdp;
+ paths[i]->config = ¶m->config[i];
+ paths[i]->param = param->param;
+ for (j = 0; j < param->param->num_outputs; j++) {
+ paths[i]->bounds[j].left = 0;
+ paths[i]->bounds[j].top = 0;
+ paths[i]->bounds[j].width =
+ param->param->outputs[j].buffer.format.width;
+ paths[i]->bounds[j].height =
+ param->param->outputs[j].buffer.format.height;
+ paths[i]->composes[j] = param->composes[j] ?
+ param->composes[j] : &paths[i]->bounds[j];
+ }
- ret = mdp_path_ctx_init(mdp, path);
- if (ret) {
- dev_err(dev, "mdp_path_ctx_init error\n");
- goto err_destroy_pkt;
+ ret = mdp_path_ctx_init(mdp, paths[i]);
+ if (ret) {
+ dev_err(dev, "%s mdp_path_ctx_init error at path %d\n", __func__, i);
+ goto err_destroy_paths;
+ }
}
+ /* Setup clock and cmdq buffer */
i = mdp_get_mutex_idx(mdp->mdp_data, MDP_PIPE_RDMA0);
mtk_mutex_prepare(mdp->mdp_mutex[mdp->mdp_data->pipe_info[i].mutex_id]);
@@ -832,64 +857,94 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param)
if (i >= 0)
mtk_mutex_prepare(mdp->mdp_mutex2[mdp->mdp_data->pipe_info[i].mutex_id]);
- for (i = 0; i < param->config->num_components; i++) {
- if (is_dummy_engine(mdp, path->config->components[i].type))
- continue;
+ for (i = 0; i < get_pipe_num(scenario); i++) {
+ for (j = 0; j < param->config[i].num_components; j++) {
+ if (is_dummy_engine(mdp, paths[i]->config->components[j].type))
+ continue;
- mdp_comp_clock_on(&mdp->pdev->dev, path->comps[i].comp);
+ mdp_comp_clock_on(&mdp->pdev->dev, paths[i]->comps[j].comp);
+ }
}
if (mdp->mdp_data->mdp_cfg->mdp_version_8195) {
- ret = mdp_hyfbc_config(mdp, &cmd, path, param);
- if (ret)
- goto err_destroy_pkt;
+ ret = mdp_hyfbc_config(mdp, &cmd[0], paths[0], param);
+ if (ret) {
+ dev_err(dev, "%s mdp_hyfbc_config error\n", __func__);
+ goto err_clock_off;
+ }
}
- ret = mdp_path_config(mdp, &cmd, path);
- if (ret) {
- dev_err(dev, "mdp_path_config error\n");
- goto err_destroy_pkt;
+ for (i = 0; i < get_pipe_num(scenario); i++) {
+ ret = mdp_path_config(mdp, &cmd[i], paths[i]);
+ if (ret) {
+ dev_err(dev, "path %d mdp_path_config error\n", i);
+ goto err_clock_off;
+ }
}
- cb_param = kzalloc(sizeof(*cb_param), GFP_KERNEL);
- if (!cb_param) {
- ret = -ENOMEM;
- goto err_destroy_pkt;
- }
+ /* Prepare cmdq callback info */
+ for (i = 0; i < get_pipe_num(scenario); i++) {
+ cb_param[i] = kzalloc(sizeof(*cb_param[i]), GFP_KERNEL);
+ if (!cb_param[i]) {
+ ret = -ENOMEM;
+ dev_err(dev, "%s path %d alloc cb_param error\n", __func__, i);
+ goto err_destroy_cb_param;
+ }
- comps = kcalloc(param->config->num_components, sizeof(*comps),
- GFP_KERNEL);
- if (!comps) {
- ret = -ENOMEM;
- goto err_destroy_pkt;
- }
+ comps[i] = kcalloc(param->config[i].num_components,
+ sizeof(struct mdp_comp), GFP_KERNEL);
+ if (!comps[i]) {
+ ret = -ENOMEM;
+ dev_err(dev, "%s path %d alloc comps error\n", __func__, i);
+ goto err_destroy_cb_param;
+ }
- for (i = 0; i < param->config->num_components; i++) {
- if (is_dummy_engine(mdp, path->config->components[i].type))
- continue;
+ for (j = 0; j < param->config[i].num_components; j++) {
+ if (is_dummy_engine(mdp, paths[i]->config->components[j].type))
+ continue;
- memcpy(&comps[i], path->comps[i].comp,
- sizeof(struct mdp_comp));
- }
- cb_param->mdp = mdp;
- cb_param->user_cmdq_cb = param->cmdq_cb;
- cb_param->user_cb_data = param->cb_data;
- cb_param->pkt = cmd.pkt;
- cb_param->comps = comps;
- cb_param->num_comps = param->config->num_components;
- cb_param->mdp_ctx = param->mdp_ctx;
-
- cmdq_pkt_finalize(cmd.pkt);
- ret = cmdq_pkt_flush_async(cmd.pkt,
- mdp_handle_cmdq_callback,
- (void *)cb_param);
- if (ret) {
- dev_err(dev, "cmdq_pkt_flush_async fail!\n");
- goto err_clock_off;
+ memcpy(&comps[i][j], paths[i]->comps[j].comp,
+ sizeof(struct mdp_comp));
+ }
+ cb_param[i]->mdp = mdp;
+ cb_param[i]->user_cmdq_cb = param->cmdq_cb;
+ cb_param[i]->user_cb_data = param->cb_data;
+ cb_param[i]->pkt = cmd[i].pkt;
+ cb_param[i]->comps = comps[i];
+ cb_param[i]->num_comps = param->config[i].num_components;
+ cb_param[i]->mdp_ctx = param->mdp_ctx;
+ cb_param[i]->cmdq_user = param->cmdq_user;
+ cb_param[i]->dualpipe = (get_pipe_num(scenario) > 1 ? true : false);
+ }
+
+ /* Flush cmdq */
+ if (atomic_read(&mdp->cmdq_count[param->cmdq_user]))
+ dev_dbg(dev, "%s: Warning: cmdq_count:%d !\n", __func__,
+ atomic_read(&mdp->cmdq_count[param->cmdq_user]));
+
+ atomic_set(&mdp->cmdq_count[param->cmdq_user], get_pipe_num(scenario));
+ for (i = 0; i < get_pipe_num(scenario); i++) {
+ cmdq_pkt_finalize(cmd[i].pkt);
+
+ ret = cmdq_pkt_flush_async(cmd[i].pkt,
+ mdp_handle_cmdq_callback,
+ (void *)cb_param[i]);
+ if (ret) {
+ dev_err(dev, "pkt %d cmdq_pkt_flush_async fail!\n", i);
+ goto err_destroy_cmdq_request;
+ }
+ kfree(paths[i]);
}
- kfree(path);
+
return 0;
+err_destroy_cmdq_request:
+ atomic_set(&mdp->cmdq_count[param->cmdq_user], 0);
+err_destroy_cb_param:
+ for (i = 0; i < get_pipe_num(scenario); i++) {
+ kfree(comps[i]);
+ kfree(cb_param[i]);
+ }
err_clock_off:
i = mdp_get_mutex_idx(mdp->mdp_data, MDP_PIPE_RDMA0);
mtk_mutex_unprepare(mdp->mdp_mutex[mdp->mdp_data->pipe_info[i].mutex_id]);
@@ -898,15 +953,22 @@ int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param)
if (i >= 0)
mtk_mutex_unprepare(mdp->mdp_mutex2[mdp->mdp_data->pipe_info[i].mutex_id]);
- mdp_comp_clocks_off(&mdp->pdev->dev, cb_param->comps,
- cb_param->num_comps);
+ for (i = 0; i < get_pipe_num(scenario); i++) {
+ for (j = 0; j < param->config[i].num_components; j++) {
+ if (is_dummy_engine(mdp, paths[i]->config->components[j].type) == false)
+ mdp_comp_clock_off(&mdp->pdev->dev, paths[i]->comps[j].comp);
+ }
+ }
+err_destroy_paths:
+ for (i = 0; i < get_pipe_num(scenario); i++)
+ kfree(paths[i]);
err_destroy_pkt:
- cmdq_pkt_destroy(cmd.pkt);
+ for (i = 0; i < get_pipe_num(scenario); i++)
+ if (!IS_ERR(cmd[i].pkt))
+ cmdq_pkt_destroy(cmd[i].pkt);
+
atomic_dec(&mdp->job_count);
wake_up(&mdp->callback_wq);
- kfree(comps);
- kfree(cb_param);
- kfree(path);
return ret;
}
@@ -923,6 +985,7 @@ int mdp_cmdq_sendtask(struct platform_device *pdev, struct img_config *config,
.composes[0] = compose,
.cmdq_cb = cmdq_cb,
.cb_data = cb_data,
+ .cmdq_user = MDP_CMDQ_DL,
};
return mdp_cmdq_send(mdp, &task);
@@ -26,6 +26,7 @@ struct mdp_cmdq_param {
void (*cmdq_cb)(struct cmdq_cb_data data);
void *cb_data;
void *mdp_ctx;
+ u32 cmdq_user;
};
struct mdp_cmdq_cb_param {
@@ -37,6 +38,9 @@ struct mdp_cmdq_cb_param {
struct mdp_comp *comps;
u8 num_comps;
void *mdp_ctx;
+ u32 cmdq_user;
+ bool finalize;
+ bool dualpipe;
};
struct mdp_dev;
@@ -292,12 +292,20 @@ static int mdp_probe(struct platform_device *pdev)
mutex_init(&mdp->vpu_lock);
mutex_init(&mdp->m2m_lock);
- mdp->cmdq_clt = cmdq_mbox_create(dev, 0);
- if (IS_ERR(mdp->cmdq_clt)) {
- ret = PTR_ERR(mdp->cmdq_clt);
+ mdp->cmdq_clt[0] = cmdq_mbox_create(dev, 0);
+ if (IS_ERR(mdp->cmdq_clt[0])) {
+ ret = PTR_ERR(mdp->cmdq_clt[0]);
goto err_put_scp;
}
+ if (mdp->mdp_data->mdp_cfg->support_dual_pipe) {
+ mdp->cmdq_clt[1] = cmdq_mbox_create(dev, 1);
+ if (IS_ERR(mdp->cmdq_clt[1])) {
+ ret = PTR_ERR(mdp->cmdq_clt[1]);
+ goto err_mbox_destroy;
+ }
+ }
+
init_waitqueue_head(&mdp->callback_wq);
ida_init(&mdp->mdp_ida);
platform_set_drvdata(pdev, mdp);
@@ -308,7 +316,7 @@ static int mdp_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "Failed to register v4l2 device\n");
ret = -EINVAL;
- goto err_mbox_destroy;
+ goto err_dual_mbox_destroy;
}
ret = mdp_m2m_device_register(mdp);
@@ -323,8 +331,11 @@ static int mdp_probe(struct platform_device *pdev)
err_unregister_device:
v4l2_device_unregister(&mdp->v4l2_dev);
+err_dual_mbox_destroy:
+ if (mdp->mdp_data->mdp_cfg->support_dual_pipe)
+ cmdq_mbox_destroy(mdp->cmdq_clt[1]);
err_mbox_destroy:
- cmdq_mbox_destroy(mdp->cmdq_clt);
+ cmdq_mbox_destroy(mdp->cmdq_clt[0]);
err_put_scp:
scp_put(mdp->scp);
err_destroy_clock_wq:
@@ -15,6 +15,7 @@
#include "mtk-mdp3-vpu.h"
#define MDP_MODULE_NAME "mtk-mdp3"
+#define MDP_DUAL_PIPE 2
enum mdp_buffer_usage {
MDP_BUFFER_USAGE_HW_READ,
@@ -24,6 +25,12 @@ enum mdp_buffer_usage {
MDP_BUFFER_USAGE_WPE,
};
+enum mdp_cmdq_usage {
+ MDP_CMDQ_V4L2,
+ MDP_CMDQ_DL,
+ MDP_CMDQ_NUM
+};
+
struct mdp_platform_config {
bool rdma_support_10bit;
bool rdma_rsz1_sram_sharing;
@@ -43,6 +50,7 @@ struct mdp_platform_config {
u8 tdshp_dyn_contrast_version;
u32 gce_event_offset;
bool support_multi_larb;
+ bool support_dual_pipe;
};
struct mtk_mdp_driver_data {
@@ -77,7 +85,7 @@ struct mdp_dev {
s32 vpu_count;
u32 id_count;
struct ida mdp_ida;
- struct cmdq_client *cmdq_clt;
+ struct cmdq_client *cmdq_clt[MDP_DUAL_PIPE];
wait_queue_head_t callback_wq;
struct v4l2_device v4l2_dev;
@@ -87,6 +95,7 @@ struct mdp_dev {
struct mutex m2m_lock;
atomic_t suspended;
atomic_t job_count;
+ atomic_t cmdq_count[MDP_CMDQ_NUM];
};
struct mdp_pipe_info {
@@ -95,6 +95,10 @@ static void mdp_m2m_worker(struct work_struct *work)
frame = ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
mdp_set_src_config(¶m.inputs[0], frame, &src_vb->vb2_buf);
+ mdp_set_scenario(ctx->mdp_dev, ¶m, frame);
+ if (param.frame_change)
+ dev_info(&ctx->mdp_dev->pdev->dev,
+ "MDP Scenario: %d\n", param.type);
frame = ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
@@ -115,6 +119,7 @@ static void mdp_m2m_worker(struct work_struct *work)
task.cmdq_cb = NULL;
task.cb_data = NULL;
task.mdp_ctx = ctx;
+ task.cmdq_user = MDP_CMDQ_V4L2;
ret = mdp_cmdq_send(ctx->mdp_dev, &task);
if (ret) {
@@ -11,6 +11,8 @@
#include "mtk-mdp3-regs.h"
#include "mtk-mdp3-m2m.h"
+#define FHD (1920 * 1080)
+
static const struct mdp_format *mdp_formats;
static u32 format_len;
@@ -427,6 +429,22 @@ static u32 mdp_fmt_get_plane_size(const struct mdp_format *fmt,
return 0;
}
+void mdp_set_scenario(struct mdp_dev *mdp,
+ struct img_ipi_frameparam *param,
+ struct mdp_frame *frame)
+{
+ u32 width = frame->format.fmt.pix_mp.width;
+ u32 height = frame->format.fmt.pix_mp.height;
+
+ if (!mdp)
+ return;
+
+ if (mdp->mdp_data->mdp_cfg->support_dual_pipe) {
+ if ((width * height) >= FHD)
+ param->type = MDP_STREAM_TYPE_DUAL_BITBLT;
+ }
+}
+
static void mdp_prepare_buffer(struct img_image_buffer *b,
struct mdp_frame *frame, struct vb2_buffer *vb)
{
@@ -10,6 +10,7 @@
#include <linux/videodev2.h>
#include <media/videobuf2-core.h>
#include "mtk-img-ipi.h"
+#include "mtk-mdp3-cmdq.h"
/*
* MDP native color code
@@ -407,6 +408,9 @@ int mdp_try_crop(struct mdp_m2m_ctx *ctx, struct v4l2_rect *r,
int mdp_check_scaling_ratio(const struct v4l2_rect *crop,
const struct v4l2_rect *compose, s32 rotation,
const struct mdp_limit *limit);
+void mdp_set_scenario(struct mdp_dev *mdp,
+ struct img_ipi_frameparam *param,
+ struct mdp_frame *frame);
void mdp_set_src_config(struct img_input *in,
struct mdp_frame *frame, struct vb2_buffer *vb);
void mdp_set_dst_config(struct img_output *out,
@@ -10,7 +10,7 @@
#include "mtk-mdp3-core.h"
#define MDP_VPU_MESSAGE_TIMEOUT 500U
-#define vpu_alloc_size 0x600000
+#define vpu_alloc_size 0x7F8000
static inline struct mdp_dev *vpu_to_mdp(struct mdp_vpu_dev *vpu)
{
@@ -39,6 +39,7 @@ struct mdp_ipi_deinit_msg {
enum mdp_config_id {
MDP_DEV_M2M = 0,
+ MDP_DEV_M2M_2ND = 1,
MDP_CONFIG_POOL_SIZE /* ALWAYS keep at the end */
};