1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2022 MediaTek Inc. 4 * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com> 5 */ 6 7 #include <linux/mailbox_controller.h> 8 #include <linux/platform_device.h> 9 #include "mtk-mdp3-cfg.h" 10 #include "mtk-mdp3-cmdq.h" 11 #include "mtk-mdp3-comp.h" 12 #include "mtk-mdp3-core.h" 13 #include "mtk-mdp3-m2m.h" 14 #include "mtk-img-ipi.h" 15 16 #define MDP_PATH_MAX_COMPS IMG_MAX_COMPONENTS 17 18 struct mdp_path { 19 struct mdp_dev *mdp_dev; 20 struct mdp_comp_ctx comps[MDP_PATH_MAX_COMPS]; 21 u32 num_comps; 22 const struct img_config *config; 23 const struct img_ipi_frameparam *param; 24 const struct v4l2_rect *composes[IMG_MAX_HW_OUTPUTS]; 25 struct v4l2_rect bounds[IMG_MAX_HW_OUTPUTS]; 26 }; 27 28 #define has_op(ctx, op) \ 29 ((ctx)->comp->ops && (ctx)->comp->ops->op) 30 #define call_op(ctx, op, ...) \ 31 (has_op(ctx, op) ? (ctx)->comp->ops->op(ctx, ##__VA_ARGS__) : 0) 32 33 static bool is_output_disabled(int p_id, const struct img_compparam *param, u32 count) 34 { 35 u32 num = 0; 36 bool dis_output = false; 37 bool dis_tile = false; 38 39 if (CFG_CHECK(MT8183, p_id)) { 40 num = CFG_COMP(MT8183, param, num_subfrms); 41 dis_output = CFG_COMP(MT8183, param, frame.output_disable); 42 dis_tile = CFG_COMP(MT8183, param, frame.output_disable); 43 } else if (CFG_CHECK(MT8195, p_id)) { 44 num = CFG_COMP(MT8195, param, num_subfrms); 45 dis_output = CFG_COMP(MT8195, param, frame.output_disable); 46 dis_tile = CFG_COMP(MT8195, param, frame.output_disable); 47 } 48 49 return (count < num) ? (dis_output || dis_tile) : true; 50 } 51 52 static struct mtk_mutex *__get_mutex(const struct mdp_dev *mdp_dev, 53 const struct mdp_pipe_info *p) 54 { 55 return mdp_dev->mm_subsys[p->sub_id].mdp_mutex[p->mutex_id]; 56 } 57 58 static u8 __get_pp_num(enum mdp_stream_type type) 59 { 60 switch (type) { 61 case MDP_STREAM_TYPE_DUAL_BITBLT: 62 return MDP_PP_USED_2; 63 default: 64 return MDP_PP_USED_1; 65 } 66 } 67 68 static enum mdp_pipe_id __get_pipe(const struct mdp_dev *mdp_dev, 69 enum mtk_mdp_comp_id id) 70 { 71 enum mdp_pipe_id pipe_id; 72 73 switch (id) { 74 case MDP_COMP_RDMA0: 75 pipe_id = MDP_PIPE_RDMA0; 76 break; 77 case MDP_COMP_ISP_IMGI: 78 pipe_id = MDP_PIPE_IMGI; 79 break; 80 case MDP_COMP_WPEI: 81 pipe_id = MDP_PIPE_WPEI; 82 break; 83 case MDP_COMP_WPEI2: 84 pipe_id = MDP_PIPE_WPEI2; 85 break; 86 case MDP_COMP_RDMA1: 87 pipe_id = MDP_PIPE_RDMA1; 88 break; 89 case MDP_COMP_RDMA2: 90 pipe_id = MDP_PIPE_RDMA2; 91 break; 92 case MDP_COMP_RDMA3: 93 pipe_id = MDP_PIPE_RDMA3; 94 break; 95 default: 96 /* Avoid exceptions when operating MUTEX */ 97 pipe_id = MDP_PIPE_RDMA0; 98 dev_err(&mdp_dev->pdev->dev, "Unknown pipeline id %d", id); 99 break; 100 } 101 102 return pipe_id; 103 } 104 105 static struct img_config *__get_config_offset(struct mdp_dev *mdp, 106 struct mdp_cmdq_param *param, 107 u8 pp_idx) 108 { 109 const int p_id = mdp->mdp_data->mdp_plat_id; 110 struct device *dev = &mdp->pdev->dev; 111 void *cfg_c, *cfg_n; 112 long bound = mdp->vpu.config_size; 113 114 if (pp_idx >= mdp->mdp_data->pp_used) 115 goto err_param; 116 117 if (CFG_CHECK(MT8183, p_id)) { 118 cfg_c = CFG_OFST(MT8183, param->config, pp_idx); 119 cfg_n = CFG_OFST(MT8183, param->config, pp_idx + 1); 120 } else if (CFG_CHECK(MT8195, p_id)) { 121 cfg_c = CFG_OFST(MT8195, param->config, pp_idx); 122 cfg_n = CFG_OFST(MT8195, param->config, pp_idx + 1); 123 } else { 124 goto err_param; 125 } 126 127 if ((long)cfg_n - (long)mdp->vpu.config > bound) { 128 dev_err(dev, "config offset %ld OOB %ld\n", (long)cfg_n, bound); 129 cfg_c = ERR_PTR(-EFAULT); 130 } 131 132 return (struct img_config *)cfg_c; 133 134 err_param: 135 cfg_c = ERR_PTR(-EINVAL); 136 return (struct img_config *)cfg_c; 137 } 138 139 static int mdp_path_subfrm_require(const struct mdp_path *path, 140 struct mdp_cmdq_cmd *cmd, 141 struct mdp_pipe_info *p, u32 count) 142 { 143 const int p_id = path->mdp_dev->mdp_data->mdp_plat_id; 144 const struct mdp_comp_ctx *ctx; 145 const struct mtk_mdp_driver_data *data = path->mdp_dev->mdp_data; 146 struct mtk_mutex *mutex; 147 int id, index; 148 u32 num_comp = 0; 149 150 if (CFG_CHECK(MT8183, p_id)) 151 num_comp = CFG_GET(MT8183, path->config, num_components); 152 else if (CFG_CHECK(MT8195, p_id)) 153 num_comp = CFG_GET(MT8195, path->config, num_components); 154 155 /* Decide which mutex to use based on the current pipeline */ 156 index = __get_pipe(path->mdp_dev, path->comps[0].comp->public_id); 157 memcpy(p, &data->pipe_info[index], sizeof(struct mdp_pipe_info)); 158 mutex = __get_mutex(path->mdp_dev, p); 159 160 /* Set mutex mod */ 161 for (index = 0; index < num_comp; index++) { 162 s32 inner_id = MDP_COMP_NONE; 163 const u32 *mutex_idx; 164 const struct mdp_comp_blend *b; 165 166 if (CFG_CHECK(MT8183, p_id)) 167 inner_id = CFG_GET(MT8183, path->config, components[index].type); 168 else if (CFG_CHECK(MT8195, p_id)) 169 inner_id = CFG_GET(MT8195, path->config, components[index].type); 170 171 if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id)) 172 continue; 173 174 ctx = &path->comps[index]; 175 if (is_output_disabled(p_id, ctx->param, count)) 176 continue; 177 178 mutex_idx = data->mdp_mutex_table_idx; 179 id = ctx->comp->public_id; 180 mtk_mutex_write_mod(mutex, mutex_idx[id], false); 181 182 b = &data->comp_data[id].blend; 183 if (b && b->aid_mod) 184 mtk_mutex_write_mod(mutex, mutex_idx[b->b_id], false); 185 } 186 187 mtk_mutex_write_sof(mutex, MUTEX_SOF_IDX_SINGLE_MODE); 188 189 return 0; 190 } 191 192 static int mdp_path_subfrm_run(const struct mdp_path *path, 193 struct mdp_cmdq_cmd *cmd, 194 struct mdp_pipe_info *p, u32 count) 195 { 196 const int p_id = path->mdp_dev->mdp_data->mdp_plat_id; 197 const struct mdp_comp_ctx *ctx; 198 struct device *dev = &path->mdp_dev->pdev->dev; 199 struct mtk_mutex *mutex; 200 int index; 201 u32 num_comp = 0; 202 s32 event; 203 s32 inner_id = MDP_COMP_NONE; 204 205 if (-1 == p->mutex_id) { 206 dev_err(dev, "Incorrect mutex id"); 207 return -EINVAL; 208 } 209 210 if (CFG_CHECK(MT8183, p_id)) 211 num_comp = CFG_GET(MT8183, path->config, num_components); 212 else if (CFG_CHECK(MT8195, p_id)) 213 num_comp = CFG_GET(MT8195, path->config, num_components); 214 215 /* Wait WROT SRAM shared to DISP RDMA */ 216 /* Clear SOF event for each engine */ 217 for (index = 0; index < num_comp; index++) { 218 if (CFG_CHECK(MT8183, p_id)) 219 inner_id = CFG_GET(MT8183, path->config, components[index].type); 220 else if (CFG_CHECK(MT8195, p_id)) 221 inner_id = CFG_GET(MT8195, path->config, components[index].type); 222 223 if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id)) 224 continue; 225 ctx = &path->comps[index]; 226 if (is_output_disabled(p_id, ctx->param, count)) 227 continue; 228 event = ctx->comp->gce_event[MDP_GCE_EVENT_SOF]; 229 if (event != MDP_GCE_NO_EVENT) 230 MM_REG_CLEAR(cmd, event); 231 } 232 233 /* Enable the mutex */ 234 mutex = __get_mutex(path->mdp_dev, p); 235 mtk_mutex_enable_by_cmdq(mutex, (void *)&cmd->pkt); 236 237 /* Wait SOF events and clear mutex modules (optional) */ 238 for (index = 0; index < num_comp; index++) { 239 if (CFG_CHECK(MT8183, p_id)) 240 inner_id = CFG_GET(MT8183, path->config, components[index].type); 241 else if (CFG_CHECK(MT8195, p_id)) 242 inner_id = CFG_GET(MT8195, path->config, components[index].type); 243 244 if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id)) 245 continue; 246 ctx = &path->comps[index]; 247 if (is_output_disabled(p_id, ctx->param, count)) 248 continue; 249 event = ctx->comp->gce_event[MDP_GCE_EVENT_SOF]; 250 if (event != MDP_GCE_NO_EVENT) 251 MM_REG_WAIT(cmd, event); 252 } 253 254 return 0; 255 } 256 257 static int mdp_path_ctx_init(struct mdp_dev *mdp, struct mdp_path *path) 258 { 259 const int p_id = mdp->mdp_data->mdp_plat_id; 260 void *param = NULL; 261 int index, ret; 262 u32 num_comp = 0; 263 264 if (CFG_CHECK(MT8183, p_id)) 265 num_comp = CFG_GET(MT8183, path->config, num_components); 266 else if (CFG_CHECK(MT8195, p_id)) 267 num_comp = CFG_GET(MT8195, path->config, num_components); 268 269 if (num_comp < 1) 270 return -EINVAL; 271 272 for (index = 0; index < num_comp; index++) { 273 s32 inner_id = MDP_COMP_NONE; 274 275 if (CFG_CHECK(MT8183, p_id)) 276 inner_id = CFG_GET(MT8183, path->config, components[index].type); 277 else if (CFG_CHECK(MT8195, p_id)) 278 inner_id = CFG_GET(MT8195, path->config, components[index].type); 279 280 if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id)) 281 continue; 282 if (CFG_CHECK(MT8183, p_id)) 283 param = (void *)CFG_ADDR(MT8183, path->config, components[index]); 284 else if (CFG_CHECK(MT8195, p_id)) 285 param = (void *)CFG_ADDR(MT8195, path->config, components[index]); 286 ret = mdp_comp_ctx_config(mdp, &path->comps[index], 287 param, path->param); 288 if (ret) 289 return ret; 290 } 291 292 return 0; 293 } 294 295 static int mdp_path_config_subfrm(struct mdp_cmdq_cmd *cmd, 296 struct mdp_path *path, u32 count) 297 { 298 const int p_id = path->mdp_dev->mdp_data->mdp_plat_id; 299 const struct img_mmsys_ctrl *ctrl = NULL; 300 const struct img_mux *set; 301 struct mdp_comp_ctx *ctx; 302 struct mdp_pipe_info pipe; 303 int index, ret; 304 u32 num_comp = 0; 305 s32 inner_id = MDP_COMP_NONE; 306 307 if (CFG_CHECK(MT8183, p_id)) 308 num_comp = CFG_GET(MT8183, path->config, num_components); 309 else if (CFG_CHECK(MT8195, p_id)) 310 num_comp = CFG_GET(MT8195, path->config, num_components); 311 312 if (CFG_CHECK(MT8183, p_id)) 313 ctrl = CFG_ADDR(MT8183, path->config, ctrls[count]); 314 else if (CFG_CHECK(MT8195, p_id)) 315 ctrl = CFG_ADDR(MT8195, path->config, ctrls[count]); 316 317 /* Acquire components */ 318 ret = mdp_path_subfrm_require(path, cmd, &pipe, count); 319 if (ret) 320 return ret; 321 /* Enable mux settings */ 322 for (index = 0; index < ctrl->num_sets; index++) { 323 set = &ctrl->sets[index]; 324 cmdq_pkt_write(&cmd->pkt, set->subsys_id, set->reg, set->value); 325 } 326 /* Config sub-frame information */ 327 for (index = (num_comp - 1); index >= 0; index--) { 328 if (CFG_CHECK(MT8183, p_id)) 329 inner_id = CFG_GET(MT8183, path->config, components[index].type); 330 else if (CFG_CHECK(MT8195, p_id)) 331 inner_id = CFG_GET(MT8195, path->config, components[index].type); 332 333 if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id)) 334 continue; 335 ctx = &path->comps[index]; 336 if (is_output_disabled(p_id, ctx->param, count)) 337 continue; 338 ret = call_op(ctx, config_subfrm, cmd, count); 339 if (ret) 340 return ret; 341 } 342 /* Run components */ 343 ret = mdp_path_subfrm_run(path, cmd, &pipe, count); 344 if (ret) 345 return ret; 346 /* Wait components done */ 347 for (index = 0; index < num_comp; index++) { 348 if (CFG_CHECK(MT8183, p_id)) 349 inner_id = CFG_GET(MT8183, path->config, components[index].type); 350 else if (CFG_CHECK(MT8195, p_id)) 351 inner_id = CFG_GET(MT8195, path->config, components[index].type); 352 353 if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id)) 354 continue; 355 ctx = &path->comps[index]; 356 if (is_output_disabled(p_id, ctx->param, count)) 357 continue; 358 ret = call_op(ctx, wait_comp_event, cmd); 359 if (ret) 360 return ret; 361 } 362 /* Advance to the next sub-frame */ 363 for (index = 0; index < num_comp; index++) { 364 if (CFG_CHECK(MT8183, p_id)) 365 inner_id = CFG_GET(MT8183, path->config, components[index].type); 366 else if (CFG_CHECK(MT8195, p_id)) 367 inner_id = CFG_GET(MT8195, path->config, components[index].type); 368 369 if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id)) 370 continue; 371 ctx = &path->comps[index]; 372 ret = call_op(ctx, advance_subfrm, cmd, count); 373 if (ret) 374 return ret; 375 } 376 /* Disable mux settings */ 377 for (index = 0; index < ctrl->num_sets; index++) { 378 set = &ctrl->sets[index]; 379 cmdq_pkt_write(&cmd->pkt, set->subsys_id, set->reg, 0); 380 } 381 382 return 0; 383 } 384 385 static int mdp_path_config(struct mdp_dev *mdp, struct mdp_cmdq_cmd *cmd, 386 struct mdp_path *path) 387 { 388 const int p_id = mdp->mdp_data->mdp_plat_id; 389 struct mdp_comp_ctx *ctx; 390 int index, count, ret; 391 u32 num_comp = 0; 392 u32 num_sub = 0; 393 s32 inner_id = MDP_COMP_NONE; 394 395 if (CFG_CHECK(MT8183, p_id)) 396 num_comp = CFG_GET(MT8183, path->config, num_components); 397 else if (CFG_CHECK(MT8195, p_id)) 398 num_comp = CFG_GET(MT8195, path->config, num_components); 399 400 if (CFG_CHECK(MT8183, p_id)) 401 num_sub = CFG_GET(MT8183, path->config, num_subfrms); 402 else if (CFG_CHECK(MT8195, p_id)) 403 num_sub = CFG_GET(MT8195, path->config, num_subfrms); 404 405 /* Config path frame */ 406 /* Reset components */ 407 for (index = 0; index < num_comp; index++) { 408 if (CFG_CHECK(MT8183, p_id)) 409 inner_id = CFG_GET(MT8183, path->config, components[index].type); 410 else if (CFG_CHECK(MT8195, p_id)) 411 inner_id = CFG_GET(MT8195, path->config, components[index].type); 412 413 if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id)) 414 continue; 415 ctx = &path->comps[index]; 416 ret = call_op(ctx, init_comp, cmd); 417 if (ret) 418 return ret; 419 } 420 /* Config frame mode */ 421 for (index = 0; index < num_comp; index++) { 422 const struct v4l2_rect *compose; 423 u32 out = 0; 424 425 ctx = &path->comps[index]; 426 if (CFG_CHECK(MT8183, p_id)) 427 inner_id = CFG_GET(MT8183, path->config, components[index].type); 428 else if (CFG_CHECK(MT8195, p_id)) 429 inner_id = CFG_GET(MT8195, path->config, components[index].type); 430 431 if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id)) 432 continue; 433 434 if (CFG_CHECK(MT8183, p_id)) 435 out = CFG_COMP(MT8183, ctx->param, outputs[0]); 436 else if (CFG_CHECK(MT8195, p_id)) 437 out = CFG_COMP(MT8195, ctx->param, outputs[0]); 438 439 compose = path->composes[out]; 440 ret = call_op(ctx, config_frame, cmd, compose); 441 if (ret) 442 return ret; 443 } 444 445 /* Config path sub-frames */ 446 for (count = 0; count < num_sub; count++) { 447 ret = mdp_path_config_subfrm(cmd, path, count); 448 if (ret) 449 return ret; 450 } 451 /* Post processing information */ 452 for (index = 0; index < num_comp; index++) { 453 if (CFG_CHECK(MT8183, p_id)) 454 inner_id = CFG_GET(MT8183, path->config, components[index].type); 455 else if (CFG_CHECK(MT8195, p_id)) 456 inner_id = CFG_GET(MT8195, path->config, components[index].type); 457 458 if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id)) 459 continue; 460 ctx = &path->comps[index]; 461 ret = call_op(ctx, post_process, cmd); 462 if (ret) 463 return ret; 464 } 465 return 0; 466 } 467 468 static void mdp_auto_release_work(struct work_struct *work) 469 { 470 struct mdp_cmdq_cmd *cmd; 471 struct mdp_dev *mdp; 472 struct mtk_mutex *mutex; 473 enum mdp_pipe_id pipe_id; 474 475 cmd = container_of(work, struct mdp_cmdq_cmd, auto_release_work); 476 mdp = cmd->mdp; 477 478 pipe_id = __get_pipe(mdp, cmd->comps[0].public_id); 479 mutex = __get_mutex(mdp, &mdp->mdp_data->pipe_info[pipe_id]); 480 mtk_mutex_unprepare(mutex); 481 mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps, 482 cmd->num_comps); 483 484 if (refcount_dec_and_test(&mdp->job_count)) { 485 if (cmd->mdp_ctx) 486 mdp_m2m_job_finish(cmd->mdp_ctx); 487 488 if (cmd->user_cmdq_cb) { 489 struct cmdq_cb_data user_cb_data; 490 491 user_cb_data.sta = cmd->data->sta; 492 user_cb_data.pkt = cmd->data->pkt; 493 cmd->user_cmdq_cb(user_cb_data); 494 } 495 wake_up(&mdp->callback_wq); 496 } 497 498 cmdq_pkt_destroy(mdp->cmdq_clt[cmd->pp_idx], &cmd->pkt); 499 kfree(cmd->comps); 500 cmd->comps = NULL; 501 kfree(cmd); 502 cmd = NULL; 503 } 504 505 static void mdp_handle_cmdq_callback(struct mbox_client *cl, void *mssg) 506 { 507 struct mdp_cmdq_cmd *cmd; 508 struct cmdq_cb_data *data; 509 struct mdp_dev *mdp; 510 struct device *dev; 511 enum mdp_pipe_id pipe_id; 512 513 if (!mssg) { 514 pr_info("%s:no callback data\n", __func__); 515 return; 516 } 517 518 data = (struct cmdq_cb_data *)mssg; 519 cmd = container_of(data->pkt, struct mdp_cmdq_cmd, pkt); 520 cmd->data = data; 521 mdp = cmd->mdp; 522 dev = &mdp->pdev->dev; 523 524 INIT_WORK(&cmd->auto_release_work, mdp_auto_release_work); 525 if (!queue_work(mdp->clock_wq, &cmd->auto_release_work)) { 526 struct mtk_mutex *mutex; 527 528 dev_err(dev, "%s:queue_work fail!\n", __func__); 529 pipe_id = __get_pipe(mdp, cmd->comps[0].public_id); 530 mutex = __get_mutex(mdp, &mdp->mdp_data->pipe_info[pipe_id]); 531 mtk_mutex_unprepare(mutex); 532 mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps, 533 cmd->num_comps); 534 535 if (refcount_dec_and_test(&mdp->job_count)) 536 wake_up(&mdp->callback_wq); 537 538 cmdq_pkt_destroy(mdp->cmdq_clt[cmd->pp_idx], &cmd->pkt); 539 kfree(cmd->comps); 540 cmd->comps = NULL; 541 kfree(cmd); 542 cmd = NULL; 543 } 544 } 545 546 static struct mdp_cmdq_cmd *mdp_cmdq_prepare(struct mdp_dev *mdp, 547 struct mdp_cmdq_param *param, 548 u8 pp_idx) 549 { 550 struct mdp_path *path = NULL; 551 struct mdp_cmdq_cmd *cmd = NULL; 552 struct mdp_comp *comps = NULL; 553 struct device *dev = &mdp->pdev->dev; 554 const int p_id = mdp->mdp_data->mdp_plat_id; 555 struct img_config *config; 556 struct mtk_mutex *mutex = NULL; 557 enum mdp_pipe_id pipe_id; 558 int i, ret = -ECANCELED; 559 u32 num_comp; 560 561 config = __get_config_offset(mdp, param, pp_idx); 562 if (IS_ERR(config)) { 563 ret = PTR_ERR(config); 564 goto err_uninit; 565 } 566 567 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 568 if (!cmd) { 569 ret = -ENOMEM; 570 goto err_uninit; 571 } 572 573 ret = cmdq_pkt_create(mdp->cmdq_clt[pp_idx], &cmd->pkt, SZ_16K); 574 if (ret) 575 goto err_free_cmd; 576 577 if (CFG_CHECK(MT8183, p_id)) { 578 num_comp = CFG_GET(MT8183, param->config, num_components); 579 } else if (CFG_CHECK(MT8195, p_id)) { 580 num_comp = CFG_GET(MT8195, param->config, num_components); 581 } else { 582 ret = -EINVAL; 583 goto err_destroy_pkt; 584 } 585 586 comps = kcalloc(num_comp, sizeof(*comps), GFP_KERNEL); 587 if (!comps) { 588 ret = -ENOMEM; 589 goto err_destroy_pkt; 590 } 591 592 path = kzalloc(sizeof(*path), GFP_KERNEL); 593 if (!path) { 594 ret = -ENOMEM; 595 goto err_free_comps; 596 } 597 598 path->mdp_dev = mdp; 599 path->config = config; 600 path->param = param->param; 601 for (i = 0; i < param->param->num_outputs; i++) { 602 path->bounds[i].left = 0; 603 path->bounds[i].top = 0; 604 path->bounds[i].width = 605 param->param->outputs[i].buffer.format.width; 606 path->bounds[i].height = 607 param->param->outputs[i].buffer.format.height; 608 path->composes[i] = param->composes[i] ? 609 param->composes[i] : &path->bounds[i]; 610 } 611 ret = mdp_path_ctx_init(mdp, path); 612 if (ret) { 613 dev_err(dev, "mdp_path_ctx_init error %d\n", pp_idx); 614 goto err_free_path; 615 } 616 617 pipe_id = __get_pipe(mdp, path->comps[0].comp->public_id); 618 mutex = __get_mutex(mdp, &mdp->mdp_data->pipe_info[pipe_id]); 619 ret = mtk_mutex_prepare(mutex); 620 if (ret) { 621 dev_err(dev, "Fail to enable mutex %d clk\n", pp_idx); 622 goto err_free_path; 623 } 624 625 ret = mdp_path_config(mdp, cmd, path); 626 if (ret) { 627 dev_err(dev, "mdp_path_config error %d\n", pp_idx); 628 goto err_free_path; 629 } 630 cmdq_pkt_eoc(&cmd->pkt); 631 cmdq_pkt_jump_rel(&cmd->pkt, CMDQ_INST_SIZE, mdp->cmdq_shift_pa[pp_idx]); 632 633 for (i = 0; i < num_comp; i++) { 634 s32 inner_id = MDP_COMP_NONE; 635 636 if (CFG_CHECK(MT8183, p_id)) 637 inner_id = CFG_GET(MT8183, path->config, components[i].type); 638 else if (CFG_CHECK(MT8195, p_id)) 639 inner_id = CFG_GET(MT8195, path->config, components[i].type); 640 641 if (mdp_cfg_comp_is_dummy(mdp, inner_id)) 642 continue; 643 memcpy(&comps[i], path->comps[i].comp, 644 sizeof(struct mdp_comp)); 645 } 646 647 mdp->cmdq_clt[pp_idx]->client.rx_callback = mdp_handle_cmdq_callback; 648 cmd->mdp = mdp; 649 cmd->user_cmdq_cb = param->cmdq_cb; 650 cmd->user_cb_data = param->cb_data; 651 cmd->comps = comps; 652 cmd->num_comps = num_comp; 653 cmd->mdp_ctx = param->mdp_ctx; 654 cmd->pp_idx = pp_idx; 655 656 kfree(path); 657 return cmd; 658 659 err_free_path: 660 if (mutex) 661 mtk_mutex_unprepare(mutex); 662 kfree(path); 663 err_free_comps: 664 kfree(comps); 665 err_destroy_pkt: 666 cmdq_pkt_destroy(mdp->cmdq_clt[pp_idx], &cmd->pkt); 667 err_free_cmd: 668 kfree(cmd); 669 err_uninit: 670 return ERR_PTR(ret); 671 } 672 673 int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param) 674 { 675 struct mdp_cmdq_cmd *cmd[MDP_PP_MAX] = {NULL}; 676 struct device *dev = &mdp->pdev->dev; 677 int i, ret; 678 u8 pp_used = __get_pp_num(param->param->type); 679 680 refcount_set(&mdp->job_count, pp_used); 681 if (atomic_read(&mdp->suspended)) { 682 refcount_set(&mdp->job_count, 0); 683 return -ECANCELED; 684 } 685 686 for (i = 0; i < pp_used; i++) { 687 cmd[i] = mdp_cmdq_prepare(mdp, param, i); 688 if (IS_ERR_OR_NULL(cmd[i])) { 689 ret = PTR_ERR(cmd[i]); 690 goto err_cancel_job; 691 } 692 } 693 694 for (i = 0; i < pp_used; i++) { 695 ret = mdp_comp_clocks_on(&mdp->pdev->dev, cmd[i]->comps, cmd[i]->num_comps); 696 if (ret) 697 goto err_clock_off; 698 } 699 700 for (i = 0; i < pp_used; i++) { 701 dma_sync_single_for_device(mdp->cmdq_clt[i]->chan->mbox->dev, 702 cmd[i]->pkt.pa_base, cmd[i]->pkt.cmd_buf_size, 703 DMA_TO_DEVICE); 704 705 ret = mbox_send_message(mdp->cmdq_clt[i]->chan, &cmd[i]->pkt); 706 if (ret < 0) { 707 dev_err(dev, "mbox send message fail %d!\n", ret); 708 i = pp_used; 709 goto err_clock_off; 710 } 711 mbox_client_txdone(mdp->cmdq_clt[i]->chan, 0); 712 } 713 return 0; 714 715 err_clock_off: 716 while (--i >= 0) 717 mdp_comp_clocks_off(&mdp->pdev->dev, cmd[i]->comps, 718 cmd[i]->num_comps); 719 err_cancel_job: 720 refcount_set(&mdp->job_count, 0); 721 722 return ret; 723 } 724 EXPORT_SYMBOL_GPL(mdp_cmdq_send); 725