1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. 4 * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #define pr_fmt(fmt) "[drm:%s] " fmt, __func__ 8 #include "dpu_kms.h" 9 #include "dpu_hw_lm.h" 10 #include "dpu_hw_ctl.h" 11 #include "dpu_hw_cdm.h" 12 #include "dpu_hw_cwb.h" 13 #include "dpu_hw_pingpong.h" 14 #include "dpu_hw_sspp.h" 15 #include "dpu_hw_intf.h" 16 #include "dpu_hw_wb.h" 17 #include "dpu_hw_dspp.h" 18 #include "dpu_hw_merge3d.h" 19 #include "dpu_hw_dsc.h" 20 #include "dpu_encoder.h" 21 #include "dpu_trace.h" 22 23 24 static inline bool reserved_by_other(uint32_t *res_map, int idx, 25 uint32_t crtc_id) 26 { 27 return res_map[idx] && res_map[idx] != crtc_id; 28 } 29 30 /** 31 * dpu_rm_init - Read hardware catalog and create reservation tracking objects 32 * for all HW blocks. 33 * @dev: Corresponding device for devres management 34 * @rm: DPU Resource Manager handle 35 * @cat: Pointer to hardware catalog 36 * @mdss_data: Pointer to MDSS / UBWC configuration 37 * @mmio: mapped register io address of MDP 38 * @return: 0 on Success otherwise -ERROR 39 */ 40 int dpu_rm_init(struct drm_device *dev, 41 struct dpu_rm *rm, 42 const struct dpu_mdss_cfg *cat, 43 const struct qcom_ubwc_cfg_data *mdss_data, 44 void __iomem *mmio) 45 { 46 int rc, i; 47 48 if (!rm || !cat || !mmio) { 49 DPU_ERROR("invalid kms\n"); 50 return -EINVAL; 51 } 52 53 /* Clear, setup lists */ 54 memset(rm, 0, sizeof(*rm)); 55 56 rm->has_legacy_ctls = (cat->mdss_ver->core_major_ver < 5); 57 58 /* Interrogate HW catalog and create tracking items for hw blocks */ 59 for (i = 0; i < cat->mixer_count; i++) { 60 struct dpu_hw_mixer *hw; 61 const struct dpu_lm_cfg *lm = &cat->mixer[i]; 62 63 hw = dpu_hw_lm_init(dev, lm, mmio, cat->mdss_ver); 64 if (IS_ERR(hw)) { 65 rc = PTR_ERR(hw); 66 DPU_ERROR("failed lm object creation: err %d\n", rc); 67 goto fail; 68 } 69 rm->mixer_blks[lm->id - LM_0] = &hw->base; 70 } 71 72 for (i = 0; i < cat->merge_3d_count; i++) { 73 struct dpu_hw_merge_3d *hw; 74 const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i]; 75 76 hw = dpu_hw_merge_3d_init(dev, merge_3d, mmio); 77 if (IS_ERR(hw)) { 78 rc = PTR_ERR(hw); 79 DPU_ERROR("failed merge_3d object creation: err %d\n", 80 rc); 81 goto fail; 82 } 83 rm->merge_3d_blks[merge_3d->id - MERGE_3D_0] = &hw->base; 84 } 85 86 for (i = 0; i < cat->pingpong_count; i++) { 87 struct dpu_hw_pingpong *hw; 88 const struct dpu_pingpong_cfg *pp = &cat->pingpong[i]; 89 90 hw = dpu_hw_pingpong_init(dev, pp, mmio, cat->mdss_ver); 91 if (IS_ERR(hw)) { 92 rc = PTR_ERR(hw); 93 DPU_ERROR("failed pingpong object creation: err %d\n", 94 rc); 95 goto fail; 96 } 97 if (pp->merge_3d && pp->merge_3d < MERGE_3D_MAX) 98 hw->merge_3d = to_dpu_hw_merge_3d(rm->merge_3d_blks[pp->merge_3d - MERGE_3D_0]); 99 rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base; 100 } 101 102 for (i = 0; i < cat->intf_count; i++) { 103 struct dpu_hw_intf *hw; 104 const struct dpu_intf_cfg *intf = &cat->intf[i]; 105 106 hw = dpu_hw_intf_init(dev, intf, mmio, cat->mdss_ver); 107 if (IS_ERR(hw)) { 108 rc = PTR_ERR(hw); 109 DPU_ERROR("failed intf object creation: err %d\n", rc); 110 goto fail; 111 } 112 rm->hw_intf[intf->id - INTF_0] = hw; 113 } 114 115 for (i = 0; i < cat->wb_count; i++) { 116 struct dpu_hw_wb *hw; 117 const struct dpu_wb_cfg *wb = &cat->wb[i]; 118 119 hw = dpu_hw_wb_init(dev, wb, mmio, cat->mdss_ver); 120 if (IS_ERR(hw)) { 121 rc = PTR_ERR(hw); 122 DPU_ERROR("failed wb object creation: err %d\n", rc); 123 goto fail; 124 } 125 rm->hw_wb[wb->id - WB_0] = hw; 126 } 127 128 for (i = 0; i < cat->cwb_count; i++) { 129 struct dpu_hw_cwb *hw; 130 const struct dpu_cwb_cfg *cwb = &cat->cwb[i]; 131 132 hw = dpu_hw_cwb_init(dev, cwb, mmio); 133 if (IS_ERR(hw)) { 134 rc = PTR_ERR(hw); 135 DPU_ERROR("failed cwb object creation: err %d\n", rc); 136 goto fail; 137 } 138 rm->cwb_blks[cwb->id - CWB_0] = &hw->base; 139 } 140 141 for (i = 0; i < cat->ctl_count; i++) { 142 struct dpu_hw_ctl *hw; 143 const struct dpu_ctl_cfg *ctl = &cat->ctl[i]; 144 145 hw = dpu_hw_ctl_init(dev, ctl, mmio, cat->mdss_ver, cat->mixer_count, cat->mixer); 146 if (IS_ERR(hw)) { 147 rc = PTR_ERR(hw); 148 DPU_ERROR("failed ctl object creation: err %d\n", rc); 149 goto fail; 150 } 151 rm->ctl_blks[ctl->id - CTL_0] = &hw->base; 152 } 153 154 for (i = 0; i < cat->dspp_count; i++) { 155 struct dpu_hw_dspp *hw; 156 const struct dpu_dspp_cfg *dspp = &cat->dspp[i]; 157 158 hw = dpu_hw_dspp_init(dev, dspp, mmio); 159 if (IS_ERR(hw)) { 160 rc = PTR_ERR(hw); 161 DPU_ERROR("failed dspp object creation: err %d\n", rc); 162 goto fail; 163 } 164 rm->dspp_blks[dspp->id - DSPP_0] = &hw->base; 165 } 166 167 for (i = 0; i < cat->dsc_count; i++) { 168 struct dpu_hw_dsc *hw; 169 const struct dpu_dsc_cfg *dsc = &cat->dsc[i]; 170 171 if (cat->mdss_ver->core_major_ver >= 7) 172 hw = dpu_hw_dsc_init_1_2(dev, dsc, mmio); 173 else 174 hw = dpu_hw_dsc_init(dev, dsc, mmio, cat->mdss_ver); 175 176 if (IS_ERR(hw)) { 177 rc = PTR_ERR(hw); 178 DPU_ERROR("failed dsc object creation: err %d\n", rc); 179 goto fail; 180 } 181 rm->dsc_blks[dsc->id - DSC_0] = &hw->base; 182 } 183 184 for (i = 0; i < cat->sspp_count; i++) { 185 struct dpu_hw_sspp *hw; 186 const struct dpu_sspp_cfg *sspp = &cat->sspp[i]; 187 188 hw = dpu_hw_sspp_init(dev, sspp, mmio, mdss_data, cat->mdss_ver); 189 if (IS_ERR(hw)) { 190 rc = PTR_ERR(hw); 191 DPU_ERROR("failed sspp object creation: err %d\n", rc); 192 goto fail; 193 } 194 rm->hw_sspp[sspp->id - SSPP_NONE] = hw; 195 } 196 197 if (cat->cdm) { 198 struct dpu_hw_cdm *hw; 199 200 hw = dpu_hw_cdm_init(dev, cat->cdm, mmio, cat->mdss_ver); 201 if (IS_ERR(hw)) { 202 rc = PTR_ERR(hw); 203 DPU_ERROR("failed cdm object creation: err %d\n", rc); 204 goto fail; 205 } 206 rm->cdm_blk = &hw->base; 207 } 208 209 return 0; 210 211 fail: 212 return rc ? rc : -EFAULT; 213 } 214 215 static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) 216 { 217 return top->num_intf > 1; 218 } 219 220 /** 221 * _dpu_rm_get_lm_peer - get the id of a mixer which is a peer of the primary 222 * @rm: dpu resource manager handle 223 * @primary_idx: index of primary mixer in rm->mixer_blks[] 224 * 225 * Returns: lm peer mixed id on success or %-EINVAL on error 226 */ 227 static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx) 228 { 229 const struct dpu_lm_cfg *prim_lm_cfg; 230 231 prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap; 232 233 if (prim_lm_cfg->lm_pair >= LM_0 && prim_lm_cfg->lm_pair < LM_MAX) 234 return prim_lm_cfg->lm_pair - LM_0; 235 return -EINVAL; 236 } 237 238 static int _dpu_rm_reserve_cwb_mux_and_pingpongs(struct dpu_rm *rm, 239 struct dpu_global_state *global_state, 240 uint32_t crtc_id, 241 struct msm_display_topology *topology) 242 { 243 int num_cwb_mux = topology->num_lm, cwb_mux_count = 0; 244 int cwb_pp_start_idx = PINGPONG_CWB_0 - PINGPONG_0; 245 int cwb_pp_idx[MAX_BLOCKS]; 246 int cwb_mux_idx[MAX_BLOCKS]; 247 248 /* 249 * Reserve additional dedicated CWB PINGPONG blocks and muxes for each 250 * mixer 251 * 252 * TODO: add support reserving resources for platforms with no 253 * PINGPONG_CWB 254 */ 255 for (int i = 0; i < ARRAY_SIZE(rm->mixer_blks) && 256 cwb_mux_count < num_cwb_mux; i++) { 257 for (int j = 0; j < ARRAY_SIZE(rm->cwb_blks); j++) { 258 /* 259 * Odd LMs must be assigned to odd CWB muxes and even 260 * LMs with even CWB muxes. 261 * 262 * Since the RM HW block array index is based on the HW 263 * block ids, we can also use the array index to enforce 264 * the odd/even rule. See dpu_rm_init() for more 265 * information 266 */ 267 if (reserved_by_other(global_state->cwb_to_crtc_id, j, crtc_id) || 268 i % 2 != j % 2) 269 continue; 270 271 cwb_mux_idx[cwb_mux_count] = j; 272 cwb_pp_idx[cwb_mux_count] = j + cwb_pp_start_idx; 273 cwb_mux_count++; 274 break; 275 } 276 } 277 278 if (cwb_mux_count != num_cwb_mux) { 279 DPU_ERROR("Unable to reserve all CWB PINGPONGs\n"); 280 return -ENAVAIL; 281 } 282 283 for (int i = 0; i < cwb_mux_count; i++) { 284 global_state->pingpong_to_crtc_id[cwb_pp_idx[i]] = crtc_id; 285 global_state->cwb_to_crtc_id[cwb_mux_idx[i]] = crtc_id; 286 } 287 288 return 0; 289 } 290 291 /** 292 * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets 293 * proposed use case requirements, incl. hardwired dependent blocks like 294 * pingpong 295 * @rm: dpu resource manager handle 296 * @global_state: resources shared across multiple kms objects 297 * @crtc_id: crtc id requesting for allocation 298 * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks 299 * if lm, and all other hardwired blocks connected to the lm (pp) is 300 * available and appropriate 301 * @pp_idx: output parameter, index of pingpong block attached to the layer 302 * mixer in rm->pingpong_blks[]. 303 * @dspp_idx: output parameter, index of dspp block attached to the layer 304 * mixer in rm->dspp_blks[]. 305 * @topology: selected topology for the display 306 * Return: true if lm matches all requirements, false otherwise 307 */ 308 static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm, 309 struct dpu_global_state *global_state, 310 uint32_t crtc_id, int lm_idx, int *pp_idx, int *dspp_idx, 311 struct msm_display_topology *topology) 312 { 313 const struct dpu_lm_cfg *lm_cfg; 314 int idx; 315 316 /* Already reserved? */ 317 if (reserved_by_other(global_state->mixer_to_crtc_id, lm_idx, crtc_id)) { 318 DPU_DEBUG("LM_%d already reserved\n", lm_idx); 319 return false; 320 } 321 322 lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap; 323 idx = lm_cfg->pingpong - PINGPONG_0; 324 if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks) || !rm->pingpong_blks[idx]) { 325 DPU_ERROR("LM_%d, invalid PP_%d\n", lm_idx, idx); 326 return false; 327 } 328 329 if (reserved_by_other(global_state->pingpong_to_crtc_id, idx, crtc_id)) { 330 DPU_DEBUG("LM_%d PP_%d already reserved\n", lm_idx, idx); 331 return false; 332 } 333 *pp_idx = idx; 334 335 if (!topology->num_dspp) 336 return true; 337 338 idx = lm_cfg->dspp - DSPP_0; 339 if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks) || !rm->dspp_blks[idx]) { 340 DPU_ERROR("LM_%d, invalid DSPP_%d\n", lm_idx, idx); 341 return false; 342 } 343 344 if (reserved_by_other(global_state->dspp_to_crtc_id, idx, crtc_id)) { 345 DPU_DEBUG("LM_%d DSPP_%d already reserved\n", lm_idx, idx); 346 return false; 347 } 348 *dspp_idx = idx; 349 350 return true; 351 } 352 353 static int _dpu_rm_reserve_lms(struct dpu_rm *rm, 354 struct dpu_global_state *global_state, 355 uint32_t crtc_id, 356 struct msm_display_topology *topology) 357 358 { 359 int lm_idx[MAX_BLOCKS]; 360 int pp_idx[MAX_BLOCKS]; 361 int dspp_idx[MAX_BLOCKS] = {0}; 362 int i, lm_count = 0; 363 364 if (!topology->num_lm) { 365 DPU_ERROR("zero LMs in topology\n"); 366 return -EINVAL; 367 } 368 369 /* Find a primary mixer */ 370 for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) && 371 lm_count < topology->num_lm; i++) { 372 if (!rm->mixer_blks[i]) 373 continue; 374 375 /* 376 * Reset lm_count to an even index. This will drop the previous 377 * primary mixer if failed to find its peer. 378 */ 379 lm_count &= ~1; 380 lm_idx[lm_count] = i; 381 382 if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state, 383 crtc_id, i, &pp_idx[lm_count], 384 &dspp_idx[lm_count], topology)) { 385 continue; 386 } 387 388 ++lm_count; 389 390 /* Valid primary mixer found, find matching peers */ 391 if (lm_count < topology->num_lm) { 392 int j = _dpu_rm_get_lm_peer(rm, i); 393 394 /* ignore the peer if there is an error or if the peer was already processed */ 395 if (j < 0 || j < i) 396 continue; 397 398 if (!rm->mixer_blks[j]) 399 continue; 400 401 if (!_dpu_rm_check_lm_and_get_connected_blks(rm, 402 global_state, crtc_id, j, 403 &pp_idx[lm_count], &dspp_idx[lm_count], 404 topology)) { 405 continue; 406 } 407 408 lm_idx[lm_count] = j; 409 ++lm_count; 410 } 411 } 412 413 if (lm_count != topology->num_lm) { 414 DPU_DEBUG("unable to find appropriate mixers\n"); 415 return -ENAVAIL; 416 } 417 418 for (i = 0; i < lm_count; i++) { 419 global_state->mixer_to_crtc_id[lm_idx[i]] = crtc_id; 420 global_state->pingpong_to_crtc_id[pp_idx[i]] = crtc_id; 421 global_state->dspp_to_crtc_id[dspp_idx[i]] = 422 topology->num_dspp ? crtc_id : 0; 423 424 trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, crtc_id, 425 pp_idx[i] + PINGPONG_0); 426 } 427 428 return 0; 429 } 430 431 static int _dpu_rm_reserve_ctls( 432 struct dpu_rm *rm, 433 struct dpu_global_state *global_state, 434 uint32_t crtc_id, 435 const struct msm_display_topology *top) 436 { 437 int ctl_idx[MAX_BLOCKS]; 438 int i = 0, j, num_ctls; 439 bool needs_split_display; 440 441 if (rm->has_legacy_ctls) { 442 /* 443 * TODO: check if there is a need for special handling if 444 * DPU < 5.0 get CWB support. 445 */ 446 num_ctls = top->num_intf; 447 448 needs_split_display = _dpu_rm_needs_split_display(top); 449 } else { 450 /* use single CTL */ 451 num_ctls = 1; 452 needs_split_display = false; 453 } 454 455 for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) { 456 const struct dpu_hw_ctl *ctl; 457 unsigned long features; 458 bool has_split_display; 459 460 if (!rm->ctl_blks[j]) 461 continue; 462 if (reserved_by_other(global_state->ctl_to_crtc_id, j, crtc_id)) 463 continue; 464 465 ctl = to_dpu_hw_ctl(rm->ctl_blks[j]); 466 features = ctl->caps->features; 467 has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features; 468 469 DPU_DEBUG("CTL_%d caps 0x%lX\n", j, features); 470 471 if (needs_split_display != has_split_display) 472 continue; 473 474 ctl_idx[i] = j; 475 DPU_DEBUG("CTL_%d match\n", j); 476 477 if (++i == num_ctls) 478 break; 479 480 } 481 482 if (i != num_ctls) 483 return -ENAVAIL; 484 485 for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) { 486 global_state->ctl_to_crtc_id[ctl_idx[i]] = crtc_id; 487 trace_dpu_rm_reserve_ctls(i + CTL_0, crtc_id); 488 } 489 490 return 0; 491 } 492 493 static int _dpu_rm_pingpong_next_index(struct dpu_global_state *global_state, 494 int start, 495 uint32_t crtc_id) 496 { 497 int i; 498 499 for (i = start; i < (PINGPONG_MAX - PINGPONG_0); i++) { 500 if (global_state->pingpong_to_crtc_id[i] == crtc_id) 501 return i; 502 } 503 504 return -ENAVAIL; 505 } 506 507 static int _dpu_rm_pingpong_dsc_check(int dsc_idx, int pp_idx) 508 { 509 /* 510 * DSC with even index must be used with the PINGPONG with even index 511 * DSC with odd index must be used with the PINGPONG with odd index 512 */ 513 if ((dsc_idx & 0x01) != (pp_idx & 0x01)) 514 return -ENAVAIL; 515 516 return 0; 517 } 518 519 static int _dpu_rm_dsc_alloc(struct dpu_rm *rm, 520 struct dpu_global_state *global_state, 521 uint32_t crtc_id, 522 const struct msm_display_topology *top) 523 { 524 int num_dsc = 0; 525 int pp_idx = 0; 526 int dsc_idx; 527 int ret; 528 529 for (dsc_idx = 0; dsc_idx < ARRAY_SIZE(rm->dsc_blks) && 530 num_dsc < top->num_dsc; dsc_idx++) { 531 if (!rm->dsc_blks[dsc_idx]) 532 continue; 533 534 if (reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx, crtc_id)) 535 continue; 536 537 pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, crtc_id); 538 if (pp_idx < 0) 539 return -ENAVAIL; 540 541 ret = _dpu_rm_pingpong_dsc_check(dsc_idx, pp_idx); 542 if (ret) 543 return -ENAVAIL; 544 545 global_state->dsc_to_crtc_id[dsc_idx] = crtc_id; 546 num_dsc++; 547 pp_idx++; 548 } 549 550 if (num_dsc < top->num_dsc) { 551 DPU_ERROR("DSC allocation failed num_dsc=%d required=%d\n", 552 num_dsc, top->num_dsc); 553 return -ENAVAIL; 554 } 555 556 return 0; 557 } 558 559 static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm, 560 struct dpu_global_state *global_state, 561 uint32_t crtc_id, 562 const struct msm_display_topology *top) 563 { 564 int num_dsc = 0; 565 int dsc_idx, pp_idx = 0; 566 int ret; 567 568 /* only start from even dsc index */ 569 for (dsc_idx = 0; dsc_idx < ARRAY_SIZE(rm->dsc_blks) && 570 num_dsc < top->num_dsc; dsc_idx += 2) { 571 if (!rm->dsc_blks[dsc_idx] || 572 !rm->dsc_blks[dsc_idx + 1]) 573 continue; 574 575 /* consective dsc index to be paired */ 576 if (reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx, crtc_id) || 577 reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx + 1, crtc_id)) 578 continue; 579 580 pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, crtc_id); 581 if (pp_idx < 0) 582 return -ENAVAIL; 583 584 ret = _dpu_rm_pingpong_dsc_check(dsc_idx, pp_idx); 585 if (ret) { 586 pp_idx = 0; 587 continue; 588 } 589 590 pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx + 1, crtc_id); 591 if (pp_idx < 0) 592 return -ENAVAIL; 593 594 ret = _dpu_rm_pingpong_dsc_check(dsc_idx + 1, pp_idx); 595 if (ret) { 596 pp_idx = 0; 597 continue; 598 } 599 600 global_state->dsc_to_crtc_id[dsc_idx] = crtc_id; 601 global_state->dsc_to_crtc_id[dsc_idx + 1] = crtc_id; 602 num_dsc += 2; 603 pp_idx++; /* start for next pair */ 604 } 605 606 if (num_dsc < top->num_dsc) { 607 DPU_ERROR("DSC allocation failed num_dsc=%d required=%d\n", 608 num_dsc, top->num_dsc); 609 return -ENAVAIL; 610 } 611 612 return 0; 613 } 614 615 static int _dpu_rm_reserve_dsc(struct dpu_rm *rm, 616 struct dpu_global_state *global_state, 617 uint32_t crtc_id, 618 const struct msm_display_topology *top) 619 { 620 if (!top->num_dsc || !top->num_intf) 621 return 0; 622 623 /* 624 * Facts: 625 * 1) no pingpong split (two layer mixers shared one pingpong) 626 * 2) DSC pair starts from even index, such as index(0,1), (2,3), etc 627 * 3) even PINGPONG connects to even DSC 628 * 4) odd PINGPONG connects to odd DSC 629 * 5) pair: encoder +--> pp_idx_0 --> dsc_idx_0 630 * +--> pp_idx_1 --> dsc_idx_1 631 */ 632 633 /* num_dsc should be either 1, 2 or 4 */ 634 if (top->num_dsc > top->num_intf) /* merge mode */ 635 return _dpu_rm_dsc_alloc_pair(rm, global_state, crtc_id, top); 636 else 637 return _dpu_rm_dsc_alloc(rm, global_state, crtc_id, top); 638 639 return 0; 640 } 641 642 static int _dpu_rm_reserve_cdm(struct dpu_rm *rm, 643 struct dpu_global_state *global_state, 644 uint32_t crtc_id, 645 int num_cdm) 646 { 647 /* try allocating only one CDM block */ 648 if (!rm->cdm_blk) { 649 DPU_ERROR("CDM block does not exist\n"); 650 return -EIO; 651 } 652 653 if (num_cdm > 1) { 654 DPU_ERROR("More than 1 INTF requesting CDM\n"); 655 return -EINVAL; 656 } 657 658 if (global_state->cdm_to_crtc_id) { 659 DPU_ERROR("CDM_0 is already allocated\n"); 660 return -EIO; 661 } 662 663 global_state->cdm_to_crtc_id = crtc_id; 664 665 return 0; 666 } 667 668 static int _dpu_rm_make_reservation( 669 struct dpu_rm *rm, 670 struct dpu_global_state *global_state, 671 uint32_t crtc_id, 672 struct msm_display_topology *topology) 673 { 674 int ret; 675 676 ret = _dpu_rm_reserve_lms(rm, global_state, crtc_id, topology); 677 if (ret) { 678 DPU_ERROR("unable to find appropriate mixers\n"); 679 return ret; 680 } 681 682 if (topology->cwb_enabled) { 683 ret = _dpu_rm_reserve_cwb_mux_and_pingpongs(rm, global_state, 684 crtc_id, topology); 685 if (ret) 686 return ret; 687 } 688 689 ret = _dpu_rm_reserve_ctls(rm, global_state, crtc_id, 690 topology); 691 if (ret) { 692 DPU_ERROR("unable to find appropriate CTL\n"); 693 return ret; 694 } 695 696 ret = _dpu_rm_reserve_dsc(rm, global_state, crtc_id, topology); 697 if (ret) 698 return ret; 699 700 if (topology->num_cdm > 0) { 701 ret = _dpu_rm_reserve_cdm(rm, global_state, crtc_id, topology->num_cdm); 702 if (ret) { 703 DPU_ERROR("unable to find CDM blk\n"); 704 return ret; 705 } 706 } 707 708 return ret; 709 } 710 711 static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt, 712 uint32_t crtc_id) 713 { 714 int i; 715 716 for (i = 0; i < cnt; i++) { 717 if (res_mapping[i] == crtc_id) 718 res_mapping[i] = 0; 719 } 720 } 721 722 /** 723 * dpu_rm_release - Given the encoder for the display chain, release any 724 * HW blocks previously reserved for that use case. 725 * @global_state: resources shared across multiple kms objects 726 * @crtc: DRM CRTC handle 727 * @return: 0 on Success otherwise -ERROR 728 */ 729 void dpu_rm_release(struct dpu_global_state *global_state, 730 struct drm_crtc *crtc) 731 { 732 uint32_t crtc_id = crtc->base.id; 733 734 _dpu_rm_clear_mapping(global_state->pingpong_to_crtc_id, 735 ARRAY_SIZE(global_state->pingpong_to_crtc_id), crtc_id); 736 _dpu_rm_clear_mapping(global_state->mixer_to_crtc_id, 737 ARRAY_SIZE(global_state->mixer_to_crtc_id), crtc_id); 738 _dpu_rm_clear_mapping(global_state->ctl_to_crtc_id, 739 ARRAY_SIZE(global_state->ctl_to_crtc_id), crtc_id); 740 _dpu_rm_clear_mapping(global_state->dsc_to_crtc_id, 741 ARRAY_SIZE(global_state->dsc_to_crtc_id), crtc_id); 742 _dpu_rm_clear_mapping(global_state->dspp_to_crtc_id, 743 ARRAY_SIZE(global_state->dspp_to_crtc_id), crtc_id); 744 _dpu_rm_clear_mapping(&global_state->cdm_to_crtc_id, 1, crtc_id); 745 _dpu_rm_clear_mapping(global_state->cwb_to_crtc_id, 746 ARRAY_SIZE(global_state->cwb_to_crtc_id), crtc_id); 747 } 748 749 /** 750 * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze 751 * the use connections and user requirements, specified through related 752 * topology control properties, and reserve hardware blocks to that 753 * display chain. 754 * HW blocks can then be accessed through dpu_rm_get_* functions. 755 * HW Reservations should be released via dpu_rm_release_hw. 756 * @rm: DPU Resource Manager handle 757 * @global_state: resources shared across multiple kms objects 758 * @crtc: DRM CRTC handle 759 * @topology: Pointer to topology info for the display 760 * @return: 0 on Success otherwise -ERROR 761 */ 762 int dpu_rm_reserve( 763 struct dpu_rm *rm, 764 struct dpu_global_state *global_state, 765 struct drm_crtc *crtc, 766 struct msm_display_topology *topology) 767 { 768 int ret; 769 770 if (IS_ERR(global_state)) { 771 DPU_ERROR("failed to global state\n"); 772 return PTR_ERR(global_state); 773 } 774 775 DRM_DEBUG_KMS("reserving hw for crtc %d\n", crtc->base.id); 776 777 DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n", 778 topology->num_lm, topology->num_dsc, 779 topology->num_intf); 780 781 ret = _dpu_rm_make_reservation(rm, global_state, crtc->base.id, topology); 782 if (ret) 783 DPU_ERROR("failed to reserve hw resources: %d\n", ret); 784 785 return ret; 786 } 787 788 static struct dpu_hw_sspp *dpu_rm_try_sspp(struct dpu_rm *rm, 789 struct dpu_global_state *global_state, 790 struct drm_crtc *crtc, 791 struct dpu_rm_sspp_requirements *reqs, 792 unsigned int type) 793 { 794 uint32_t crtc_id = crtc->base.id; 795 struct dpu_hw_sspp *hw_sspp; 796 int i; 797 798 for (i = 0; i < ARRAY_SIZE(rm->hw_sspp); i++) { 799 if (!rm->hw_sspp[i]) 800 continue; 801 802 if (global_state->sspp_to_crtc_id[i]) 803 continue; 804 805 hw_sspp = rm->hw_sspp[i]; 806 807 if (hw_sspp->cap->type != type) 808 continue; 809 810 if (reqs->scale && !hw_sspp->cap->sblk->scaler_blk.len) 811 continue; 812 813 // TODO: QSEED2 and RGB scalers are not yet supported 814 if (reqs->scale && !hw_sspp->ops.setup_scaler) 815 continue; 816 817 if (reqs->yuv && !hw_sspp->cap->sblk->csc_blk.len) 818 continue; 819 820 if (reqs->rot90 && !(hw_sspp->cap->features & DPU_SSPP_INLINE_ROTATION)) 821 continue; 822 823 global_state->sspp_to_crtc_id[i] = crtc_id; 824 825 return rm->hw_sspp[i]; 826 } 827 828 return NULL; 829 } 830 831 /** 832 * dpu_rm_reserve_sspp - Reserve the required SSPP for the provided CRTC 833 * @rm: DPU Resource Manager handle 834 * @global_state: private global state 835 * @crtc: DRM CRTC handle 836 * @reqs: SSPP required features 837 */ 838 struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm, 839 struct dpu_global_state *global_state, 840 struct drm_crtc *crtc, 841 struct dpu_rm_sspp_requirements *reqs) 842 { 843 struct dpu_hw_sspp *hw_sspp = NULL; 844 845 if (!reqs->scale && !reqs->yuv) 846 hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_DMA); 847 if (!hw_sspp && !reqs->yuv) 848 hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_RGB); 849 if (!hw_sspp) 850 hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_VIG); 851 852 return hw_sspp; 853 } 854 855 /** 856 * dpu_rm_release_all_sspp - Given the CRTC, release all SSPP 857 * blocks previously reserved for that use case. 858 * @global_state: resources shared across multiple kms objects 859 * @crtc: DRM CRTC handle 860 */ 861 void dpu_rm_release_all_sspp(struct dpu_global_state *global_state, 862 struct drm_crtc *crtc) 863 { 864 uint32_t crtc_id = crtc->base.id; 865 866 _dpu_rm_clear_mapping(global_state->sspp_to_crtc_id, 867 ARRAY_SIZE(global_state->sspp_to_crtc_id), crtc_id); 868 } 869 870 static char *dpu_hw_blk_type_name[] = { 871 [DPU_HW_BLK_TOP] = "TOP", 872 [DPU_HW_BLK_SSPP] = "SSPP", 873 [DPU_HW_BLK_LM] = "LM", 874 [DPU_HW_BLK_CTL] = "CTL", 875 [DPU_HW_BLK_PINGPONG] = "pingpong", 876 [DPU_HW_BLK_INTF] = "INTF", 877 [DPU_HW_BLK_WB] = "WB", 878 [DPU_HW_BLK_DSPP] = "DSPP", 879 [DPU_HW_BLK_MERGE_3D] = "merge_3d", 880 [DPU_HW_BLK_DSC] = "DSC", 881 [DPU_HW_BLK_CDM] = "CDM", 882 [DPU_HW_BLK_MAX] = "unknown", 883 }; 884 885 /** 886 * dpu_rm_get_assigned_resources - Get hw resources of the given type that are 887 * assigned to this encoder 888 * @rm: DPU Resource Manager handle 889 * @global_state: resources shared across multiple kms objects 890 * @crtc: DRM CRTC handle 891 * @type: resource type to return data for 892 * @blks: pointer to the array to be filled by HW resources 893 * @blks_size: size of the @blks array 894 */ 895 int dpu_rm_get_assigned_resources(struct dpu_rm *rm, 896 struct dpu_global_state *global_state, struct drm_crtc *crtc, 897 enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size) 898 { 899 uint32_t crtc_id = crtc->base.id; 900 struct dpu_hw_blk **hw_blks; 901 uint32_t *hw_to_crtc_id; 902 int i, num_blks, max_blks; 903 904 switch (type) { 905 case DPU_HW_BLK_PINGPONG: 906 case DPU_HW_BLK_DCWB_PINGPONG: 907 hw_blks = rm->pingpong_blks; 908 hw_to_crtc_id = global_state->pingpong_to_crtc_id; 909 max_blks = ARRAY_SIZE(rm->pingpong_blks); 910 break; 911 case DPU_HW_BLK_LM: 912 hw_blks = rm->mixer_blks; 913 hw_to_crtc_id = global_state->mixer_to_crtc_id; 914 max_blks = ARRAY_SIZE(rm->mixer_blks); 915 break; 916 case DPU_HW_BLK_CTL: 917 hw_blks = rm->ctl_blks; 918 hw_to_crtc_id = global_state->ctl_to_crtc_id; 919 max_blks = ARRAY_SIZE(rm->ctl_blks); 920 break; 921 case DPU_HW_BLK_DSPP: 922 hw_blks = rm->dspp_blks; 923 hw_to_crtc_id = global_state->dspp_to_crtc_id; 924 max_blks = ARRAY_SIZE(rm->dspp_blks); 925 break; 926 case DPU_HW_BLK_DSC: 927 hw_blks = rm->dsc_blks; 928 hw_to_crtc_id = global_state->dsc_to_crtc_id; 929 max_blks = ARRAY_SIZE(rm->dsc_blks); 930 break; 931 case DPU_HW_BLK_CDM: 932 hw_blks = &rm->cdm_blk; 933 hw_to_crtc_id = &global_state->cdm_to_crtc_id; 934 max_blks = 1; 935 break; 936 case DPU_HW_BLK_CWB: 937 hw_blks = rm->cwb_blks; 938 hw_to_crtc_id = global_state->cwb_to_crtc_id; 939 max_blks = ARRAY_SIZE(rm->cwb_blks); 940 break; 941 default: 942 DPU_ERROR("blk type %d not managed by rm\n", type); 943 return 0; 944 } 945 946 num_blks = 0; 947 for (i = 0; i < max_blks; i++) { 948 if (hw_to_crtc_id[i] != crtc_id) 949 continue; 950 951 if (type == DPU_HW_BLK_PINGPONG) { 952 struct dpu_hw_pingpong *pp = to_dpu_hw_pingpong(hw_blks[i]); 953 954 if (pp->idx >= PINGPONG_CWB_0) 955 continue; 956 } 957 958 if (type == DPU_HW_BLK_DCWB_PINGPONG) { 959 struct dpu_hw_pingpong *pp = to_dpu_hw_pingpong(hw_blks[i]); 960 961 if (pp->idx < PINGPONG_CWB_0) 962 continue; 963 } 964 965 if (num_blks == blks_size) { 966 DPU_ERROR("More than %d %s assigned to crtc %d\n", 967 blks_size, dpu_hw_blk_type_name[type], crtc_id); 968 break; 969 } 970 if (!hw_blks[i]) { 971 DPU_ERROR("%s unavailable to assign to crtc %d\n", 972 dpu_hw_blk_type_name[type], crtc_id); 973 break; 974 } 975 blks[num_blks++] = hw_blks[i]; 976 } 977 978 return num_blks; 979 } 980 981 static void dpu_rm_print_state_helper(struct drm_printer *p, 982 struct dpu_hw_blk *blk, 983 uint32_t mapping) 984 { 985 if (!blk) 986 drm_puts(p, "- "); 987 else if (!mapping) 988 drm_puts(p, "# "); 989 else 990 drm_printf(p, "%d ", mapping); 991 } 992 993 994 /** 995 * dpu_rm_print_state - output the RM private state 996 * @p: DRM printer 997 * @global_state: global state 998 */ 999 void dpu_rm_print_state(struct drm_printer *p, 1000 const struct dpu_global_state *global_state) 1001 { 1002 const struct dpu_rm *rm = global_state->rm; 1003 int i; 1004 1005 drm_puts(p, "resource mapping:\n"); 1006 drm_puts(p, "\tpingpong="); 1007 for (i = 0; i < ARRAY_SIZE(global_state->pingpong_to_crtc_id); i++) 1008 dpu_rm_print_state_helper(p, rm->pingpong_blks[i], 1009 global_state->pingpong_to_crtc_id[i]); 1010 drm_puts(p, "\n"); 1011 1012 drm_puts(p, "\tmixer="); 1013 for (i = 0; i < ARRAY_SIZE(global_state->mixer_to_crtc_id); i++) 1014 dpu_rm_print_state_helper(p, rm->mixer_blks[i], 1015 global_state->mixer_to_crtc_id[i]); 1016 drm_puts(p, "\n"); 1017 1018 drm_puts(p, "\tctl="); 1019 for (i = 0; i < ARRAY_SIZE(global_state->ctl_to_crtc_id); i++) 1020 dpu_rm_print_state_helper(p, rm->ctl_blks[i], 1021 global_state->ctl_to_crtc_id[i]); 1022 drm_puts(p, "\n"); 1023 1024 drm_puts(p, "\tdspp="); 1025 for (i = 0; i < ARRAY_SIZE(global_state->dspp_to_crtc_id); i++) 1026 dpu_rm_print_state_helper(p, rm->dspp_blks[i], 1027 global_state->dspp_to_crtc_id[i]); 1028 drm_puts(p, "\n"); 1029 1030 drm_puts(p, "\tdsc="); 1031 for (i = 0; i < ARRAY_SIZE(global_state->dsc_to_crtc_id); i++) 1032 dpu_rm_print_state_helper(p, rm->dsc_blks[i], 1033 global_state->dsc_to_crtc_id[i]); 1034 drm_puts(p, "\n"); 1035 1036 drm_puts(p, "\tcdm="); 1037 dpu_rm_print_state_helper(p, rm->cdm_blk, 1038 global_state->cdm_to_crtc_id); 1039 drm_puts(p, "\n"); 1040 1041 drm_puts(p, "\tsspp="); 1042 /* skip SSPP_NONE and start from the next index */ 1043 for (i = SSPP_NONE + 1; i < ARRAY_SIZE(global_state->sspp_to_crtc_id); i++) 1044 dpu_rm_print_state_helper(p, rm->hw_sspp[i] ? &rm->hw_sspp[i]->base : NULL, 1045 global_state->sspp_to_crtc_id[i]); 1046 drm_puts(p, "\n"); 1047 1048 drm_puts(p, "\tcwb="); 1049 for (i = 0; i < ARRAY_SIZE(global_state->cwb_to_crtc_id); i++) 1050 dpu_rm_print_state_helper(p, rm->cwb_blks[i], 1051 global_state->cwb_to_crtc_id[i]); 1052 drm_puts(p, "\n"); 1053 } 1054