1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. 4 * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #define pr_fmt(fmt) "[drm:%s] " fmt, __func__ 8 #include "dpu_kms.h" 9 #include "dpu_hw_lm.h" 10 #include "dpu_hw_ctl.h" 11 #include "dpu_hw_cdm.h" 12 #include "dpu_hw_cwb.h" 13 #include "dpu_hw_pingpong.h" 14 #include "dpu_hw_sspp.h" 15 #include "dpu_hw_intf.h" 16 #include "dpu_hw_wb.h" 17 #include "dpu_hw_dspp.h" 18 #include "dpu_hw_merge3d.h" 19 #include "dpu_hw_dsc.h" 20 #include "dpu_encoder.h" 21 #include "dpu_trace.h" 22 23 24 static inline bool reserved_by_other(uint32_t *res_map, int idx, 25 uint32_t crtc_id) 26 { 27 return res_map[idx] && res_map[idx] != crtc_id; 28 } 29 30 /** 31 * dpu_rm_init - Read hardware catalog and create reservation tracking objects 32 * for all HW blocks. 33 * @dev: Corresponding device for devres management 34 * @rm: DPU Resource Manager handle 35 * @cat: Pointer to hardware catalog 36 * @mdss_data: Pointer to MDSS / UBWC configuration 37 * @mmio: mapped register io address of MDP 38 * @return: 0 on Success otherwise -ERROR 39 */ 40 int dpu_rm_init(struct drm_device *dev, 41 struct dpu_rm *rm, 42 const struct dpu_mdss_cfg *cat, 43 const struct qcom_ubwc_cfg_data *mdss_data, 44 void __iomem *mmio) 45 { 46 int rc, i; 47 48 if (!rm || !cat || !mmio) { 49 DPU_ERROR("invalid kms\n"); 50 return -EINVAL; 51 } 52 53 /* Clear, setup lists */ 54 memset(rm, 0, sizeof(*rm)); 55 56 rm->has_legacy_ctls = (cat->mdss_ver->core_major_ver < 5); 57 58 /* Interrogate HW catalog and create tracking items for hw blocks */ 59 for (i = 0; i < cat->mixer_count; i++) { 60 struct dpu_hw_mixer *hw; 61 const struct dpu_lm_cfg *lm = &cat->mixer[i]; 62 63 hw = dpu_hw_lm_init(dev, lm, mmio, cat->mdss_ver); 64 if (IS_ERR(hw)) { 65 rc = PTR_ERR(hw); 66 DPU_ERROR("failed lm object creation: err %d\n", rc); 67 goto fail; 68 } 69 rm->mixer_blks[lm->id - LM_0] = &hw->base; 70 } 71 72 for (i = 0; i < cat->merge_3d_count; i++) { 73 struct dpu_hw_merge_3d *hw; 74 const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i]; 75 76 hw = dpu_hw_merge_3d_init(dev, merge_3d, mmio); 77 if (IS_ERR(hw)) { 78 rc = PTR_ERR(hw); 79 DPU_ERROR("failed merge_3d object creation: err %d\n", 80 rc); 81 goto fail; 82 } 83 rm->merge_3d_blks[merge_3d->id - MERGE_3D_0] = &hw->base; 84 } 85 86 for (i = 0; i < cat->pingpong_count; i++) { 87 struct dpu_hw_pingpong *hw; 88 const struct dpu_pingpong_cfg *pp = &cat->pingpong[i]; 89 90 hw = dpu_hw_pingpong_init(dev, pp, mmio, cat->mdss_ver); 91 if (IS_ERR(hw)) { 92 rc = PTR_ERR(hw); 93 DPU_ERROR("failed pingpong object creation: err %d\n", 94 rc); 95 goto fail; 96 } 97 if (pp->merge_3d && pp->merge_3d < MERGE_3D_MAX) 98 hw->merge_3d = to_dpu_hw_merge_3d(rm->merge_3d_blks[pp->merge_3d - MERGE_3D_0]); 99 rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base; 100 } 101 102 for (i = 0; i < cat->intf_count; i++) { 103 struct dpu_hw_intf *hw; 104 const struct dpu_intf_cfg *intf = &cat->intf[i]; 105 106 hw = dpu_hw_intf_init(dev, intf, mmio, cat->mdss_ver); 107 if (IS_ERR(hw)) { 108 rc = PTR_ERR(hw); 109 DPU_ERROR("failed intf object creation: err %d\n", rc); 110 goto fail; 111 } 112 rm->hw_intf[intf->id - INTF_0] = hw; 113 } 114 115 for (i = 0; i < cat->wb_count; i++) { 116 struct dpu_hw_wb *hw; 117 const struct dpu_wb_cfg *wb = &cat->wb[i]; 118 119 hw = dpu_hw_wb_init(dev, wb, mmio, cat->mdss_ver); 120 if (IS_ERR(hw)) { 121 rc = PTR_ERR(hw); 122 DPU_ERROR("failed wb object creation: err %d\n", rc); 123 goto fail; 124 } 125 rm->hw_wb[wb->id - WB_0] = hw; 126 } 127 128 for (i = 0; i < cat->cwb_count; i++) { 129 struct dpu_hw_cwb *hw; 130 const struct dpu_cwb_cfg *cwb = &cat->cwb[i]; 131 132 hw = dpu_hw_cwb_init(dev, cwb, mmio); 133 if (IS_ERR(hw)) { 134 rc = PTR_ERR(hw); 135 DPU_ERROR("failed cwb object creation: err %d\n", rc); 136 goto fail; 137 } 138 rm->cwb_blks[cwb->id - CWB_0] = &hw->base; 139 } 140 141 for (i = 0; i < cat->ctl_count; i++) { 142 struct dpu_hw_ctl *hw; 143 const struct dpu_ctl_cfg *ctl = &cat->ctl[i]; 144 145 hw = dpu_hw_ctl_init(dev, ctl, mmio, cat->mdss_ver, cat->mixer_count, cat->mixer); 146 if (IS_ERR(hw)) { 147 rc = PTR_ERR(hw); 148 DPU_ERROR("failed ctl object creation: err %d\n", rc); 149 goto fail; 150 } 151 rm->ctl_blks[ctl->id - CTL_0] = &hw->base; 152 } 153 154 for (i = 0; i < cat->dspp_count; i++) { 155 struct dpu_hw_dspp *hw; 156 const struct dpu_dspp_cfg *dspp = &cat->dspp[i]; 157 158 hw = dpu_hw_dspp_init(dev, dspp, mmio); 159 if (IS_ERR(hw)) { 160 rc = PTR_ERR(hw); 161 DPU_ERROR("failed dspp object creation: err %d\n", rc); 162 goto fail; 163 } 164 rm->dspp_blks[dspp->id - DSPP_0] = &hw->base; 165 } 166 167 for (i = 0; i < cat->dsc_count; i++) { 168 struct dpu_hw_dsc *hw; 169 const struct dpu_dsc_cfg *dsc = &cat->dsc[i]; 170 171 if (cat->mdss_ver->core_major_ver >= 7) 172 hw = dpu_hw_dsc_init_1_2(dev, dsc, mmio); 173 else 174 hw = dpu_hw_dsc_init(dev, dsc, mmio, cat->mdss_ver); 175 176 if (IS_ERR(hw)) { 177 rc = PTR_ERR(hw); 178 DPU_ERROR("failed dsc object creation: err %d\n", rc); 179 goto fail; 180 } 181 rm->dsc_blks[dsc->id - DSC_0] = &hw->base; 182 } 183 184 for (i = 0; i < cat->sspp_count; i++) { 185 struct dpu_hw_sspp *hw; 186 const struct dpu_sspp_cfg *sspp = &cat->sspp[i]; 187 188 hw = dpu_hw_sspp_init(dev, sspp, mmio, mdss_data, cat->mdss_ver); 189 if (IS_ERR(hw)) { 190 rc = PTR_ERR(hw); 191 DPU_ERROR("failed sspp object creation: err %d\n", rc); 192 goto fail; 193 } 194 rm->hw_sspp[sspp->id - SSPP_NONE] = hw; 195 } 196 197 if (cat->cdm) { 198 struct dpu_hw_cdm *hw; 199 200 hw = dpu_hw_cdm_init(dev, cat->cdm, mmio, cat->mdss_ver); 201 if (IS_ERR(hw)) { 202 rc = PTR_ERR(hw); 203 DPU_ERROR("failed cdm object creation: err %d\n", rc); 204 goto fail; 205 } 206 rm->cdm_blk = &hw->base; 207 } 208 209 return 0; 210 211 fail: 212 return rc ? rc : -EFAULT; 213 } 214 215 static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) 216 { 217 return top->num_intf > 1; 218 } 219 220 /** 221 * _dpu_rm_get_lm_peer - get the id of a mixer which is a peer of the primary 222 * @rm: dpu resource manager handle 223 * @primary_idx: index of primary mixer in rm->mixer_blks[] 224 * 225 * Returns: lm peer mixed id on success or %-EINVAL on error 226 */ 227 static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx) 228 { 229 const struct dpu_lm_cfg *prim_lm_cfg; 230 231 prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap; 232 233 if (prim_lm_cfg->lm_pair >= LM_0 && prim_lm_cfg->lm_pair < LM_MAX) 234 return prim_lm_cfg->lm_pair - LM_0; 235 return -EINVAL; 236 } 237 238 static int _dpu_rm_reserve_cwb_mux_and_pingpongs(struct dpu_rm *rm, 239 struct dpu_global_state *global_state, 240 uint32_t crtc_id, 241 struct msm_display_topology *topology) 242 { 243 int num_cwb_mux = topology->num_lm, cwb_mux_count = 0; 244 int cwb_pp_start_idx = PINGPONG_CWB_0 - PINGPONG_0; 245 int cwb_pp_idx[MAX_BLOCKS]; 246 int cwb_mux_idx[MAX_BLOCKS]; 247 248 /* 249 * Reserve additional dedicated CWB PINGPONG blocks and muxes for each 250 * mixer 251 * 252 * TODO: add support reserving resources for platforms with no 253 * PINGPONG_CWB 254 */ 255 for (int i = 0; i < ARRAY_SIZE(rm->mixer_blks) && 256 cwb_mux_count < num_cwb_mux; i++) { 257 for (int j = 0; j < ARRAY_SIZE(rm->cwb_blks); j++) { 258 /* 259 * Odd LMs must be assigned to odd CWB muxes and even 260 * LMs with even CWB muxes. 261 * 262 * Since the RM HW block array index is based on the HW 263 * block ids, we can also use the array index to enforce 264 * the odd/even rule. See dpu_rm_init() for more 265 * information 266 */ 267 if (reserved_by_other(global_state->cwb_to_crtc_id, j, crtc_id) || 268 i % 2 != j % 2) 269 continue; 270 271 cwb_mux_idx[cwb_mux_count] = j; 272 cwb_pp_idx[cwb_mux_count] = j + cwb_pp_start_idx; 273 cwb_mux_count++; 274 break; 275 } 276 } 277 278 if (cwb_mux_count != num_cwb_mux) { 279 DPU_ERROR("Unable to reserve all CWB PINGPONGs\n"); 280 return -ENAVAIL; 281 } 282 283 for (int i = 0; i < cwb_mux_count; i++) { 284 global_state->pingpong_to_crtc_id[cwb_pp_idx[i]] = crtc_id; 285 global_state->cwb_to_crtc_id[cwb_mux_idx[i]] = crtc_id; 286 } 287 288 return 0; 289 } 290 291 /** 292 * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets 293 * proposed use case requirements, incl. hardwired dependent blocks like 294 * pingpong 295 * @rm: dpu resource manager handle 296 * @global_state: resources shared across multiple kms objects 297 * @crtc_id: crtc id requesting for allocation 298 * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks 299 * if lm, and all other hardwired blocks connected to the lm (pp) is 300 * available and appropriate 301 * @pp_idx: output parameter, index of pingpong block attached to the layer 302 * mixer in rm->pingpong_blks[]. 303 * @dspp_idx: output parameter, index of dspp block attached to the layer 304 * mixer in rm->dspp_blks[]. 305 * @topology: selected topology for the display 306 * Return: true if lm matches all requirements, false otherwise 307 */ 308 static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm, 309 struct dpu_global_state *global_state, 310 uint32_t crtc_id, int lm_idx, int *pp_idx, int *dspp_idx, 311 struct msm_display_topology *topology) 312 { 313 const struct dpu_lm_cfg *lm_cfg; 314 int idx; 315 316 /* Already reserved? */ 317 if (reserved_by_other(global_state->mixer_to_crtc_id, lm_idx, crtc_id)) { 318 DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0); 319 return false; 320 } 321 322 lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap; 323 idx = lm_cfg->pingpong - PINGPONG_0; 324 if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) { 325 DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong); 326 return false; 327 } 328 329 if (reserved_by_other(global_state->pingpong_to_crtc_id, idx, crtc_id)) { 330 DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id, 331 lm_cfg->pingpong); 332 return false; 333 } 334 *pp_idx = idx; 335 336 if (!topology->num_dspp) 337 return true; 338 339 idx = lm_cfg->dspp - DSPP_0; 340 if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) { 341 DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp); 342 return false; 343 } 344 345 if (reserved_by_other(global_state->dspp_to_crtc_id, idx, crtc_id)) { 346 DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id, 347 lm_cfg->dspp); 348 return false; 349 } 350 *dspp_idx = idx; 351 352 return true; 353 } 354 355 static int _dpu_rm_reserve_lms(struct dpu_rm *rm, 356 struct dpu_global_state *global_state, 357 uint32_t crtc_id, 358 struct msm_display_topology *topology) 359 360 { 361 int lm_idx[MAX_BLOCKS]; 362 int pp_idx[MAX_BLOCKS]; 363 int dspp_idx[MAX_BLOCKS] = {0}; 364 int i, lm_count = 0; 365 366 if (!topology->num_lm) { 367 DPU_ERROR("invalid number of lm: %d\n", topology->num_lm); 368 return -EINVAL; 369 } 370 371 /* Find a primary mixer */ 372 for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) && 373 lm_count < topology->num_lm; i++) { 374 if (!rm->mixer_blks[i]) 375 continue; 376 377 lm_count = 0; 378 lm_idx[lm_count] = i; 379 380 if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state, 381 crtc_id, i, &pp_idx[lm_count], 382 &dspp_idx[lm_count], topology)) { 383 continue; 384 } 385 386 ++lm_count; 387 388 /* Valid primary mixer found, find matching peers */ 389 if (lm_count < topology->num_lm) { 390 int j = _dpu_rm_get_lm_peer(rm, i); 391 392 /* ignore the peer if there is an error or if the peer was already processed */ 393 if (j < 0 || j < i) 394 continue; 395 396 if (!rm->mixer_blks[j]) 397 continue; 398 399 if (!_dpu_rm_check_lm_and_get_connected_blks(rm, 400 global_state, crtc_id, j, 401 &pp_idx[lm_count], &dspp_idx[lm_count], 402 topology)) { 403 continue; 404 } 405 406 lm_idx[lm_count] = j; 407 ++lm_count; 408 } 409 } 410 411 if (lm_count != topology->num_lm) { 412 DPU_DEBUG("unable to find appropriate mixers\n"); 413 return -ENAVAIL; 414 } 415 416 for (i = 0; i < lm_count; i++) { 417 global_state->mixer_to_crtc_id[lm_idx[i]] = crtc_id; 418 global_state->pingpong_to_crtc_id[pp_idx[i]] = crtc_id; 419 global_state->dspp_to_crtc_id[dspp_idx[i]] = 420 topology->num_dspp ? crtc_id : 0; 421 422 trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, crtc_id, 423 pp_idx[i] + PINGPONG_0); 424 } 425 426 return 0; 427 } 428 429 static int _dpu_rm_reserve_ctls( 430 struct dpu_rm *rm, 431 struct dpu_global_state *global_state, 432 uint32_t crtc_id, 433 const struct msm_display_topology *top) 434 { 435 int ctl_idx[MAX_BLOCKS]; 436 int i = 0, j, num_ctls; 437 bool needs_split_display; 438 439 if (rm->has_legacy_ctls) { 440 /* 441 * TODO: check if there is a need for special handling if 442 * DPU < 5.0 get CWB support. 443 */ 444 num_ctls = top->num_intf; 445 446 needs_split_display = _dpu_rm_needs_split_display(top); 447 } else { 448 /* use single CTL */ 449 num_ctls = 1; 450 needs_split_display = false; 451 } 452 453 for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) { 454 const struct dpu_hw_ctl *ctl; 455 unsigned long features; 456 bool has_split_display; 457 458 if (!rm->ctl_blks[j]) 459 continue; 460 if (reserved_by_other(global_state->ctl_to_crtc_id, j, crtc_id)) 461 continue; 462 463 ctl = to_dpu_hw_ctl(rm->ctl_blks[j]); 464 features = ctl->caps->features; 465 has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features; 466 467 DPU_DEBUG("ctl %d caps 0x%lX\n", j + CTL_0, features); 468 469 if (needs_split_display != has_split_display) 470 continue; 471 472 ctl_idx[i] = j; 473 DPU_DEBUG("ctl %d match\n", j + CTL_0); 474 475 if (++i == num_ctls) 476 break; 477 478 } 479 480 if (i != num_ctls) 481 return -ENAVAIL; 482 483 for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) { 484 global_state->ctl_to_crtc_id[ctl_idx[i]] = crtc_id; 485 trace_dpu_rm_reserve_ctls(i + CTL_0, crtc_id); 486 } 487 488 return 0; 489 } 490 491 static int _dpu_rm_pingpong_next_index(struct dpu_global_state *global_state, 492 int start, 493 uint32_t crtc_id) 494 { 495 int i; 496 497 for (i = start; i < (PINGPONG_MAX - PINGPONG_0); i++) { 498 if (global_state->pingpong_to_crtc_id[i] == crtc_id) 499 return i; 500 } 501 502 return -ENAVAIL; 503 } 504 505 static int _dpu_rm_pingpong_dsc_check(int dsc_idx, int pp_idx) 506 { 507 /* 508 * DSC with even index must be used with the PINGPONG with even index 509 * DSC with odd index must be used with the PINGPONG with odd index 510 */ 511 if ((dsc_idx & 0x01) != (pp_idx & 0x01)) 512 return -ENAVAIL; 513 514 return 0; 515 } 516 517 static int _dpu_rm_dsc_alloc(struct dpu_rm *rm, 518 struct dpu_global_state *global_state, 519 uint32_t crtc_id, 520 const struct msm_display_topology *top) 521 { 522 int num_dsc = 0; 523 int pp_idx = 0; 524 int dsc_idx; 525 int ret; 526 527 for (dsc_idx = 0; dsc_idx < ARRAY_SIZE(rm->dsc_blks) && 528 num_dsc < top->num_dsc; dsc_idx++) { 529 if (!rm->dsc_blks[dsc_idx]) 530 continue; 531 532 if (reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx, crtc_id)) 533 continue; 534 535 pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, crtc_id); 536 if (pp_idx < 0) 537 return -ENAVAIL; 538 539 ret = _dpu_rm_pingpong_dsc_check(dsc_idx, pp_idx); 540 if (ret) 541 return -ENAVAIL; 542 543 global_state->dsc_to_crtc_id[dsc_idx] = crtc_id; 544 num_dsc++; 545 pp_idx++; 546 } 547 548 if (num_dsc < top->num_dsc) { 549 DPU_ERROR("DSC allocation failed num_dsc=%d required=%d\n", 550 num_dsc, top->num_dsc); 551 return -ENAVAIL; 552 } 553 554 return 0; 555 } 556 557 static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm, 558 struct dpu_global_state *global_state, 559 uint32_t crtc_id, 560 const struct msm_display_topology *top) 561 { 562 int num_dsc = 0; 563 int dsc_idx, pp_idx = 0; 564 int ret; 565 566 /* only start from even dsc index */ 567 for (dsc_idx = 0; dsc_idx < ARRAY_SIZE(rm->dsc_blks) && 568 num_dsc < top->num_dsc; dsc_idx += 2) { 569 if (!rm->dsc_blks[dsc_idx] || 570 !rm->dsc_blks[dsc_idx + 1]) 571 continue; 572 573 /* consective dsc index to be paired */ 574 if (reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx, crtc_id) || 575 reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx + 1, crtc_id)) 576 continue; 577 578 pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, crtc_id); 579 if (pp_idx < 0) 580 return -ENAVAIL; 581 582 ret = _dpu_rm_pingpong_dsc_check(dsc_idx, pp_idx); 583 if (ret) { 584 pp_idx = 0; 585 continue; 586 } 587 588 pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx + 1, crtc_id); 589 if (pp_idx < 0) 590 return -ENAVAIL; 591 592 ret = _dpu_rm_pingpong_dsc_check(dsc_idx + 1, pp_idx); 593 if (ret) { 594 pp_idx = 0; 595 continue; 596 } 597 598 global_state->dsc_to_crtc_id[dsc_idx] = crtc_id; 599 global_state->dsc_to_crtc_id[dsc_idx + 1] = crtc_id; 600 num_dsc += 2; 601 pp_idx++; /* start for next pair */ 602 } 603 604 if (num_dsc < top->num_dsc) { 605 DPU_ERROR("DSC allocation failed num_dsc=%d required=%d\n", 606 num_dsc, top->num_dsc); 607 return -ENAVAIL; 608 } 609 610 return 0; 611 } 612 613 static int _dpu_rm_reserve_dsc(struct dpu_rm *rm, 614 struct dpu_global_state *global_state, 615 uint32_t crtc_id, 616 const struct msm_display_topology *top) 617 { 618 if (!top->num_dsc || !top->num_intf) 619 return 0; 620 621 /* 622 * Facts: 623 * 1) no pingpong split (two layer mixers shared one pingpong) 624 * 2) DSC pair starts from even index, such as index(0,1), (2,3), etc 625 * 3) even PINGPONG connects to even DSC 626 * 4) odd PINGPONG connects to odd DSC 627 * 5) pair: encoder +--> pp_idx_0 --> dsc_idx_0 628 * +--> pp_idx_1 --> dsc_idx_1 629 */ 630 631 /* num_dsc should be either 1, 2 or 4 */ 632 if (top->num_dsc > top->num_intf) /* merge mode */ 633 return _dpu_rm_dsc_alloc_pair(rm, global_state, crtc_id, top); 634 else 635 return _dpu_rm_dsc_alloc(rm, global_state, crtc_id, top); 636 637 return 0; 638 } 639 640 static int _dpu_rm_reserve_cdm(struct dpu_rm *rm, 641 struct dpu_global_state *global_state, 642 uint32_t crtc_id, 643 int num_cdm) 644 { 645 /* try allocating only one CDM block */ 646 if (!rm->cdm_blk) { 647 DPU_ERROR("CDM block does not exist\n"); 648 return -EIO; 649 } 650 651 if (num_cdm > 1) { 652 DPU_ERROR("More than 1 INTF requesting CDM\n"); 653 return -EINVAL; 654 } 655 656 if (global_state->cdm_to_crtc_id) { 657 DPU_ERROR("CDM_0 is already allocated\n"); 658 return -EIO; 659 } 660 661 global_state->cdm_to_crtc_id = crtc_id; 662 663 return 0; 664 } 665 666 static int _dpu_rm_make_reservation( 667 struct dpu_rm *rm, 668 struct dpu_global_state *global_state, 669 uint32_t crtc_id, 670 struct msm_display_topology *topology) 671 { 672 int ret; 673 674 ret = _dpu_rm_reserve_lms(rm, global_state, crtc_id, topology); 675 if (ret) { 676 DPU_ERROR("unable to find appropriate mixers\n"); 677 return ret; 678 } 679 680 if (topology->cwb_enabled) { 681 ret = _dpu_rm_reserve_cwb_mux_and_pingpongs(rm, global_state, 682 crtc_id, topology); 683 if (ret) 684 return ret; 685 } 686 687 ret = _dpu_rm_reserve_ctls(rm, global_state, crtc_id, 688 topology); 689 if (ret) { 690 DPU_ERROR("unable to find appropriate CTL\n"); 691 return ret; 692 } 693 694 ret = _dpu_rm_reserve_dsc(rm, global_state, crtc_id, topology); 695 if (ret) 696 return ret; 697 698 if (topology->num_cdm > 0) { 699 ret = _dpu_rm_reserve_cdm(rm, global_state, crtc_id, topology->num_cdm); 700 if (ret) { 701 DPU_ERROR("unable to find CDM blk\n"); 702 return ret; 703 } 704 } 705 706 return ret; 707 } 708 709 static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt, 710 uint32_t crtc_id) 711 { 712 int i; 713 714 for (i = 0; i < cnt; i++) { 715 if (res_mapping[i] == crtc_id) 716 res_mapping[i] = 0; 717 } 718 } 719 720 /** 721 * dpu_rm_release - Given the encoder for the display chain, release any 722 * HW blocks previously reserved for that use case. 723 * @global_state: resources shared across multiple kms objects 724 * @crtc: DRM CRTC handle 725 * @return: 0 on Success otherwise -ERROR 726 */ 727 void dpu_rm_release(struct dpu_global_state *global_state, 728 struct drm_crtc *crtc) 729 { 730 uint32_t crtc_id = crtc->base.id; 731 732 _dpu_rm_clear_mapping(global_state->pingpong_to_crtc_id, 733 ARRAY_SIZE(global_state->pingpong_to_crtc_id), crtc_id); 734 _dpu_rm_clear_mapping(global_state->mixer_to_crtc_id, 735 ARRAY_SIZE(global_state->mixer_to_crtc_id), crtc_id); 736 _dpu_rm_clear_mapping(global_state->ctl_to_crtc_id, 737 ARRAY_SIZE(global_state->ctl_to_crtc_id), crtc_id); 738 _dpu_rm_clear_mapping(global_state->dsc_to_crtc_id, 739 ARRAY_SIZE(global_state->dsc_to_crtc_id), crtc_id); 740 _dpu_rm_clear_mapping(global_state->dspp_to_crtc_id, 741 ARRAY_SIZE(global_state->dspp_to_crtc_id), crtc_id); 742 _dpu_rm_clear_mapping(&global_state->cdm_to_crtc_id, 1, crtc_id); 743 _dpu_rm_clear_mapping(global_state->cwb_to_crtc_id, 744 ARRAY_SIZE(global_state->cwb_to_crtc_id), crtc_id); 745 } 746 747 /** 748 * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze 749 * the use connections and user requirements, specified through related 750 * topology control properties, and reserve hardware blocks to that 751 * display chain. 752 * HW blocks can then be accessed through dpu_rm_get_* functions. 753 * HW Reservations should be released via dpu_rm_release_hw. 754 * @rm: DPU Resource Manager handle 755 * @global_state: resources shared across multiple kms objects 756 * @crtc: DRM CRTC handle 757 * @topology: Pointer to topology info for the display 758 * @return: 0 on Success otherwise -ERROR 759 */ 760 int dpu_rm_reserve( 761 struct dpu_rm *rm, 762 struct dpu_global_state *global_state, 763 struct drm_crtc *crtc, 764 struct msm_display_topology *topology) 765 { 766 int ret; 767 768 if (IS_ERR(global_state)) { 769 DPU_ERROR("failed to global state\n"); 770 return PTR_ERR(global_state); 771 } 772 773 DRM_DEBUG_KMS("reserving hw for crtc %d\n", crtc->base.id); 774 775 DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n", 776 topology->num_lm, topology->num_dsc, 777 topology->num_intf); 778 779 ret = _dpu_rm_make_reservation(rm, global_state, crtc->base.id, topology); 780 if (ret) 781 DPU_ERROR("failed to reserve hw resources: %d\n", ret); 782 783 return ret; 784 } 785 786 static struct dpu_hw_sspp *dpu_rm_try_sspp(struct dpu_rm *rm, 787 struct dpu_global_state *global_state, 788 struct drm_crtc *crtc, 789 struct dpu_rm_sspp_requirements *reqs, 790 unsigned int type) 791 { 792 uint32_t crtc_id = crtc->base.id; 793 struct dpu_hw_sspp *hw_sspp; 794 int i; 795 796 for (i = 0; i < ARRAY_SIZE(rm->hw_sspp); i++) { 797 if (!rm->hw_sspp[i]) 798 continue; 799 800 if (global_state->sspp_to_crtc_id[i]) 801 continue; 802 803 hw_sspp = rm->hw_sspp[i]; 804 805 if (hw_sspp->cap->type != type) 806 continue; 807 808 if (reqs->scale && !hw_sspp->cap->sblk->scaler_blk.len) 809 continue; 810 811 // TODO: QSEED2 and RGB scalers are not yet supported 812 if (reqs->scale && !hw_sspp->ops.setup_scaler) 813 continue; 814 815 if (reqs->yuv && !hw_sspp->cap->sblk->csc_blk.len) 816 continue; 817 818 if (reqs->rot90 && !(hw_sspp->cap->features & DPU_SSPP_INLINE_ROTATION)) 819 continue; 820 821 global_state->sspp_to_crtc_id[i] = crtc_id; 822 823 return rm->hw_sspp[i]; 824 } 825 826 return NULL; 827 } 828 829 /** 830 * dpu_rm_reserve_sspp - Reserve the required SSPP for the provided CRTC 831 * @rm: DPU Resource Manager handle 832 * @global_state: private global state 833 * @crtc: DRM CRTC handle 834 * @reqs: SSPP required features 835 */ 836 struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm, 837 struct dpu_global_state *global_state, 838 struct drm_crtc *crtc, 839 struct dpu_rm_sspp_requirements *reqs) 840 { 841 struct dpu_hw_sspp *hw_sspp = NULL; 842 843 if (!reqs->scale && !reqs->yuv) 844 hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_DMA); 845 if (!hw_sspp && reqs->scale) 846 hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_RGB); 847 if (!hw_sspp) 848 hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_VIG); 849 850 return hw_sspp; 851 } 852 853 /** 854 * dpu_rm_release_all_sspp - Given the CRTC, release all SSPP 855 * blocks previously reserved for that use case. 856 * @global_state: resources shared across multiple kms objects 857 * @crtc: DRM CRTC handle 858 */ 859 void dpu_rm_release_all_sspp(struct dpu_global_state *global_state, 860 struct drm_crtc *crtc) 861 { 862 uint32_t crtc_id = crtc->base.id; 863 864 _dpu_rm_clear_mapping(global_state->sspp_to_crtc_id, 865 ARRAY_SIZE(global_state->sspp_to_crtc_id), crtc_id); 866 } 867 868 /** 869 * dpu_rm_get_assigned_resources - Get hw resources of the given type that are 870 * assigned to this encoder 871 * @rm: DPU Resource Manager handle 872 * @global_state: resources shared across multiple kms objects 873 * @crtc: DRM CRTC handle 874 * @type: resource type to return data for 875 * @blks: pointer to the array to be filled by HW resources 876 * @blks_size: size of the @blks array 877 */ 878 int dpu_rm_get_assigned_resources(struct dpu_rm *rm, 879 struct dpu_global_state *global_state, struct drm_crtc *crtc, 880 enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size) 881 { 882 uint32_t crtc_id = crtc->base.id; 883 struct dpu_hw_blk **hw_blks; 884 uint32_t *hw_to_crtc_id; 885 int i, num_blks, max_blks; 886 887 switch (type) { 888 case DPU_HW_BLK_PINGPONG: 889 case DPU_HW_BLK_DCWB_PINGPONG: 890 hw_blks = rm->pingpong_blks; 891 hw_to_crtc_id = global_state->pingpong_to_crtc_id; 892 max_blks = ARRAY_SIZE(rm->pingpong_blks); 893 break; 894 case DPU_HW_BLK_LM: 895 hw_blks = rm->mixer_blks; 896 hw_to_crtc_id = global_state->mixer_to_crtc_id; 897 max_blks = ARRAY_SIZE(rm->mixer_blks); 898 break; 899 case DPU_HW_BLK_CTL: 900 hw_blks = rm->ctl_blks; 901 hw_to_crtc_id = global_state->ctl_to_crtc_id; 902 max_blks = ARRAY_SIZE(rm->ctl_blks); 903 break; 904 case DPU_HW_BLK_DSPP: 905 hw_blks = rm->dspp_blks; 906 hw_to_crtc_id = global_state->dspp_to_crtc_id; 907 max_blks = ARRAY_SIZE(rm->dspp_blks); 908 break; 909 case DPU_HW_BLK_DSC: 910 hw_blks = rm->dsc_blks; 911 hw_to_crtc_id = global_state->dsc_to_crtc_id; 912 max_blks = ARRAY_SIZE(rm->dsc_blks); 913 break; 914 case DPU_HW_BLK_CDM: 915 hw_blks = &rm->cdm_blk; 916 hw_to_crtc_id = &global_state->cdm_to_crtc_id; 917 max_blks = 1; 918 break; 919 case DPU_HW_BLK_CWB: 920 hw_blks = rm->cwb_blks; 921 hw_to_crtc_id = global_state->cwb_to_crtc_id; 922 max_blks = ARRAY_SIZE(rm->cwb_blks); 923 break; 924 default: 925 DPU_ERROR("blk type %d not managed by rm\n", type); 926 return 0; 927 } 928 929 num_blks = 0; 930 for (i = 0; i < max_blks; i++) { 931 if (hw_to_crtc_id[i] != crtc_id) 932 continue; 933 934 if (type == DPU_HW_BLK_PINGPONG) { 935 struct dpu_hw_pingpong *pp = to_dpu_hw_pingpong(hw_blks[i]); 936 937 if (pp->idx >= PINGPONG_CWB_0) 938 continue; 939 } 940 941 if (type == DPU_HW_BLK_DCWB_PINGPONG) { 942 struct dpu_hw_pingpong *pp = to_dpu_hw_pingpong(hw_blks[i]); 943 944 if (pp->idx < PINGPONG_CWB_0) 945 continue; 946 } 947 948 if (num_blks == blks_size) { 949 DPU_ERROR("More than %d resources assigned to crtc %d\n", 950 blks_size, crtc_id); 951 break; 952 } 953 if (!hw_blks[i]) { 954 DPU_ERROR("Allocated resource %d unavailable to assign to crtc %d\n", 955 type, crtc_id); 956 break; 957 } 958 blks[num_blks++] = hw_blks[i]; 959 } 960 961 return num_blks; 962 } 963 964 static void dpu_rm_print_state_helper(struct drm_printer *p, 965 struct dpu_hw_blk *blk, 966 uint32_t mapping) 967 { 968 if (!blk) 969 drm_puts(p, "- "); 970 else if (!mapping) 971 drm_puts(p, "# "); 972 else 973 drm_printf(p, "%d ", mapping); 974 } 975 976 977 /** 978 * dpu_rm_print_state - output the RM private state 979 * @p: DRM printer 980 * @global_state: global state 981 */ 982 void dpu_rm_print_state(struct drm_printer *p, 983 const struct dpu_global_state *global_state) 984 { 985 const struct dpu_rm *rm = global_state->rm; 986 int i; 987 988 drm_puts(p, "resource mapping:\n"); 989 drm_puts(p, "\tpingpong="); 990 for (i = 0; i < ARRAY_SIZE(global_state->pingpong_to_crtc_id); i++) 991 dpu_rm_print_state_helper(p, rm->pingpong_blks[i], 992 global_state->pingpong_to_crtc_id[i]); 993 drm_puts(p, "\n"); 994 995 drm_puts(p, "\tmixer="); 996 for (i = 0; i < ARRAY_SIZE(global_state->mixer_to_crtc_id); i++) 997 dpu_rm_print_state_helper(p, rm->mixer_blks[i], 998 global_state->mixer_to_crtc_id[i]); 999 drm_puts(p, "\n"); 1000 1001 drm_puts(p, "\tctl="); 1002 for (i = 0; i < ARRAY_SIZE(global_state->ctl_to_crtc_id); i++) 1003 dpu_rm_print_state_helper(p, rm->ctl_blks[i], 1004 global_state->ctl_to_crtc_id[i]); 1005 drm_puts(p, "\n"); 1006 1007 drm_puts(p, "\tdspp="); 1008 for (i = 0; i < ARRAY_SIZE(global_state->dspp_to_crtc_id); i++) 1009 dpu_rm_print_state_helper(p, rm->dspp_blks[i], 1010 global_state->dspp_to_crtc_id[i]); 1011 drm_puts(p, "\n"); 1012 1013 drm_puts(p, "\tdsc="); 1014 for (i = 0; i < ARRAY_SIZE(global_state->dsc_to_crtc_id); i++) 1015 dpu_rm_print_state_helper(p, rm->dsc_blks[i], 1016 global_state->dsc_to_crtc_id[i]); 1017 drm_puts(p, "\n"); 1018 1019 drm_puts(p, "\tcdm="); 1020 dpu_rm_print_state_helper(p, rm->cdm_blk, 1021 global_state->cdm_to_crtc_id); 1022 drm_puts(p, "\n"); 1023 1024 drm_puts(p, "\tsspp="); 1025 /* skip SSPP_NONE and start from the next index */ 1026 for (i = SSPP_NONE + 1; i < ARRAY_SIZE(global_state->sspp_to_crtc_id); i++) 1027 dpu_rm_print_state_helper(p, rm->hw_sspp[i] ? &rm->hw_sspp[i]->base : NULL, 1028 global_state->sspp_to_crtc_id[i]); 1029 drm_puts(p, "\n"); 1030 1031 drm_puts(p, "\tcwb="); 1032 for (i = 0; i < ARRAY_SIZE(global_state->cwb_to_crtc_id); i++) 1033 dpu_rm_print_state_helper(p, rm->cwb_blks[i], 1034 global_state->cwb_to_crtc_id[i]); 1035 drm_puts(p, "\n"); 1036 } 1037