1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. 4 * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #define pr_fmt(fmt) "[drm:%s] " fmt, __func__ 8 #include "dpu_kms.h" 9 #include "dpu_hw_lm.h" 10 #include "dpu_hw_ctl.h" 11 #include "dpu_hw_cdm.h" 12 #include "dpu_hw_pingpong.h" 13 #include "dpu_hw_sspp.h" 14 #include "dpu_hw_intf.h" 15 #include "dpu_hw_wb.h" 16 #include "dpu_hw_dspp.h" 17 #include "dpu_hw_merge3d.h" 18 #include "dpu_hw_dsc.h" 19 #include "dpu_encoder.h" 20 #include "dpu_trace.h" 21 22 23 static inline bool reserved_by_other(uint32_t *res_map, int idx, 24 uint32_t enc_id) 25 { 26 return res_map[idx] && res_map[idx] != enc_id; 27 } 28 29 /** 30 * struct dpu_rm_requirements - Reservation requirements parameter bundle 31 * @topology: selected topology for the display 32 */ 33 struct dpu_rm_requirements { 34 struct msm_display_topology topology; 35 }; 36 37 int dpu_rm_init(struct drm_device *dev, 38 struct dpu_rm *rm, 39 const struct dpu_mdss_cfg *cat, 40 const struct msm_mdss_data *mdss_data, 41 void __iomem *mmio) 42 { 43 int rc, i; 44 45 if (!rm || !cat || !mmio) { 46 DPU_ERROR("invalid kms\n"); 47 return -EINVAL; 48 } 49 50 /* Clear, setup lists */ 51 memset(rm, 0, sizeof(*rm)); 52 53 /* Interrogate HW catalog and create tracking items for hw blocks */ 54 for (i = 0; i < cat->mixer_count; i++) { 55 struct dpu_hw_mixer *hw; 56 const struct dpu_lm_cfg *lm = &cat->mixer[i]; 57 58 hw = dpu_hw_lm_init(dev, lm, mmio); 59 if (IS_ERR(hw)) { 60 rc = PTR_ERR(hw); 61 DPU_ERROR("failed lm object creation: err %d\n", rc); 62 goto fail; 63 } 64 rm->mixer_blks[lm->id - LM_0] = &hw->base; 65 } 66 67 for (i = 0; i < cat->merge_3d_count; i++) { 68 struct dpu_hw_merge_3d *hw; 69 const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i]; 70 71 hw = dpu_hw_merge_3d_init(dev, merge_3d, mmio); 72 if (IS_ERR(hw)) { 73 rc = PTR_ERR(hw); 74 DPU_ERROR("failed merge_3d object creation: err %d\n", 75 rc); 76 goto fail; 77 } 78 rm->merge_3d_blks[merge_3d->id - MERGE_3D_0] = &hw->base; 79 } 80 81 for (i = 0; i < cat->pingpong_count; i++) { 82 struct dpu_hw_pingpong *hw; 83 const struct dpu_pingpong_cfg *pp = &cat->pingpong[i]; 84 85 hw = dpu_hw_pingpong_init(dev, pp, mmio, cat->mdss_ver); 86 if (IS_ERR(hw)) { 87 rc = PTR_ERR(hw); 88 DPU_ERROR("failed pingpong object creation: err %d\n", 89 rc); 90 goto fail; 91 } 92 if (pp->merge_3d && pp->merge_3d < MERGE_3D_MAX) 93 hw->merge_3d = to_dpu_hw_merge_3d(rm->merge_3d_blks[pp->merge_3d - MERGE_3D_0]); 94 rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base; 95 } 96 97 for (i = 0; i < cat->intf_count; i++) { 98 struct dpu_hw_intf *hw; 99 const struct dpu_intf_cfg *intf = &cat->intf[i]; 100 101 hw = dpu_hw_intf_init(dev, intf, mmio, cat->mdss_ver); 102 if (IS_ERR(hw)) { 103 rc = PTR_ERR(hw); 104 DPU_ERROR("failed intf object creation: err %d\n", rc); 105 goto fail; 106 } 107 rm->hw_intf[intf->id - INTF_0] = hw; 108 } 109 110 for (i = 0; i < cat->wb_count; i++) { 111 struct dpu_hw_wb *hw; 112 const struct dpu_wb_cfg *wb = &cat->wb[i]; 113 114 hw = dpu_hw_wb_init(dev, wb, mmio, cat->mdss_ver); 115 if (IS_ERR(hw)) { 116 rc = PTR_ERR(hw); 117 DPU_ERROR("failed wb object creation: err %d\n", rc); 118 goto fail; 119 } 120 rm->hw_wb[wb->id - WB_0] = hw; 121 } 122 123 for (i = 0; i < cat->ctl_count; i++) { 124 struct dpu_hw_ctl *hw; 125 const struct dpu_ctl_cfg *ctl = &cat->ctl[i]; 126 127 hw = dpu_hw_ctl_init(dev, ctl, mmio, cat->mixer_count, cat->mixer); 128 if (IS_ERR(hw)) { 129 rc = PTR_ERR(hw); 130 DPU_ERROR("failed ctl object creation: err %d\n", rc); 131 goto fail; 132 } 133 rm->ctl_blks[ctl->id - CTL_0] = &hw->base; 134 } 135 136 for (i = 0; i < cat->dspp_count; i++) { 137 struct dpu_hw_dspp *hw; 138 const struct dpu_dspp_cfg *dspp = &cat->dspp[i]; 139 140 hw = dpu_hw_dspp_init(dev, dspp, mmio); 141 if (IS_ERR(hw)) { 142 rc = PTR_ERR(hw); 143 DPU_ERROR("failed dspp object creation: err %d\n", rc); 144 goto fail; 145 } 146 rm->dspp_blks[dspp->id - DSPP_0] = &hw->base; 147 } 148 149 for (i = 0; i < cat->dsc_count; i++) { 150 struct dpu_hw_dsc *hw; 151 const struct dpu_dsc_cfg *dsc = &cat->dsc[i]; 152 153 if (test_bit(DPU_DSC_HW_REV_1_2, &dsc->features)) 154 hw = dpu_hw_dsc_init_1_2(dev, dsc, mmio); 155 else 156 hw = dpu_hw_dsc_init(dev, dsc, mmio); 157 158 if (IS_ERR(hw)) { 159 rc = PTR_ERR(hw); 160 DPU_ERROR("failed dsc object creation: err %d\n", rc); 161 goto fail; 162 } 163 rm->dsc_blks[dsc->id - DSC_0] = &hw->base; 164 } 165 166 for (i = 0; i < cat->sspp_count; i++) { 167 struct dpu_hw_sspp *hw; 168 const struct dpu_sspp_cfg *sspp = &cat->sspp[i]; 169 170 hw = dpu_hw_sspp_init(dev, sspp, mmio, mdss_data, cat->mdss_ver); 171 if (IS_ERR(hw)) { 172 rc = PTR_ERR(hw); 173 DPU_ERROR("failed sspp object creation: err %d\n", rc); 174 goto fail; 175 } 176 rm->hw_sspp[sspp->id - SSPP_NONE] = hw; 177 } 178 179 if (cat->cdm) { 180 struct dpu_hw_cdm *hw; 181 182 hw = dpu_hw_cdm_init(dev, cat->cdm, mmio, cat->mdss_ver); 183 if (IS_ERR(hw)) { 184 rc = PTR_ERR(hw); 185 DPU_ERROR("failed cdm object creation: err %d\n", rc); 186 goto fail; 187 } 188 rm->cdm_blk = &hw->base; 189 } 190 191 return 0; 192 193 fail: 194 return rc ? rc : -EFAULT; 195 } 196 197 static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) 198 { 199 return top->num_intf > 1; 200 } 201 202 /** 203 * _dpu_rm_get_lm_peer - get the id of a mixer which is a peer of the primary 204 * @rm: dpu resource manager handle 205 * @primary_idx: index of primary mixer in rm->mixer_blks[] 206 * 207 * Returns: lm peer mixed id on success or %-EINVAL on error 208 */ 209 static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx) 210 { 211 const struct dpu_lm_cfg *prim_lm_cfg; 212 213 prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap; 214 215 if (prim_lm_cfg->lm_pair >= LM_0 && prim_lm_cfg->lm_pair < LM_MAX) 216 return prim_lm_cfg->lm_pair - LM_0; 217 return -EINVAL; 218 } 219 220 /** 221 * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets 222 * proposed use case requirements, incl. hardwired dependent blocks like 223 * pingpong 224 * @rm: dpu resource manager handle 225 * @global_state: resources shared across multiple kms objects 226 * @enc_id: encoder id requesting for allocation 227 * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks 228 * if lm, and all other hardwired blocks connected to the lm (pp) is 229 * available and appropriate 230 * @pp_idx: output parameter, index of pingpong block attached to the layer 231 * mixer in rm->pingpong_blks[]. 232 * @dspp_idx: output parameter, index of dspp block attached to the layer 233 * mixer in rm->dspp_blks[]. 234 * @reqs: input parameter, rm requirements for HW blocks needed in the 235 * datapath. 236 * Return: true if lm matches all requirements, false otherwise 237 */ 238 static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm, 239 struct dpu_global_state *global_state, 240 uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx, 241 struct dpu_rm_requirements *reqs) 242 { 243 const struct dpu_lm_cfg *lm_cfg; 244 int idx; 245 246 /* Already reserved? */ 247 if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) { 248 DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0); 249 return false; 250 } 251 252 lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap; 253 idx = lm_cfg->pingpong - PINGPONG_0; 254 if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) { 255 DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong); 256 return false; 257 } 258 259 if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) { 260 DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id, 261 lm_cfg->pingpong); 262 return false; 263 } 264 *pp_idx = idx; 265 266 if (!reqs->topology.num_dspp) 267 return true; 268 269 idx = lm_cfg->dspp - DSPP_0; 270 if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) { 271 DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp); 272 return false; 273 } 274 275 if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) { 276 DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id, 277 lm_cfg->dspp); 278 return false; 279 } 280 *dspp_idx = idx; 281 282 return true; 283 } 284 285 static int _dpu_rm_reserve_lms(struct dpu_rm *rm, 286 struct dpu_global_state *global_state, 287 uint32_t enc_id, 288 struct dpu_rm_requirements *reqs) 289 290 { 291 int lm_idx[MAX_BLOCKS]; 292 int pp_idx[MAX_BLOCKS]; 293 int dspp_idx[MAX_BLOCKS] = {0}; 294 int i, lm_count = 0; 295 296 if (!reqs->topology.num_lm) { 297 DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm); 298 return -EINVAL; 299 } 300 301 /* Find a primary mixer */ 302 for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) && 303 lm_count < reqs->topology.num_lm; i++) { 304 if (!rm->mixer_blks[i]) 305 continue; 306 307 lm_count = 0; 308 lm_idx[lm_count] = i; 309 310 if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state, 311 enc_id, i, &pp_idx[lm_count], 312 &dspp_idx[lm_count], reqs)) { 313 continue; 314 } 315 316 ++lm_count; 317 318 /* Valid primary mixer found, find matching peers */ 319 if (lm_count < reqs->topology.num_lm) { 320 int j = _dpu_rm_get_lm_peer(rm, i); 321 322 /* ignore the peer if there is an error or if the peer was already processed */ 323 if (j < 0 || j < i) 324 continue; 325 326 if (!rm->mixer_blks[j]) 327 continue; 328 329 if (!_dpu_rm_check_lm_and_get_connected_blks(rm, 330 global_state, enc_id, j, 331 &pp_idx[lm_count], &dspp_idx[lm_count], 332 reqs)) { 333 continue; 334 } 335 336 lm_idx[lm_count] = j; 337 ++lm_count; 338 } 339 } 340 341 if (lm_count != reqs->topology.num_lm) { 342 DPU_DEBUG("unable to find appropriate mixers\n"); 343 return -ENAVAIL; 344 } 345 346 for (i = 0; i < lm_count; i++) { 347 global_state->mixer_to_enc_id[lm_idx[i]] = enc_id; 348 global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id; 349 global_state->dspp_to_enc_id[dspp_idx[i]] = 350 reqs->topology.num_dspp ? enc_id : 0; 351 352 trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id, 353 pp_idx[i] + PINGPONG_0); 354 } 355 356 return 0; 357 } 358 359 static int _dpu_rm_reserve_ctls( 360 struct dpu_rm *rm, 361 struct dpu_global_state *global_state, 362 uint32_t enc_id, 363 const struct msm_display_topology *top) 364 { 365 int ctl_idx[MAX_BLOCKS]; 366 int i = 0, j, num_ctls; 367 bool needs_split_display; 368 369 /* each hw_intf needs its own hw_ctrl to program its control path */ 370 num_ctls = top->num_intf; 371 372 needs_split_display = _dpu_rm_needs_split_display(top); 373 374 for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) { 375 const struct dpu_hw_ctl *ctl; 376 unsigned long features; 377 bool has_split_display; 378 379 if (!rm->ctl_blks[j]) 380 continue; 381 if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id)) 382 continue; 383 384 ctl = to_dpu_hw_ctl(rm->ctl_blks[j]); 385 features = ctl->caps->features; 386 has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features; 387 388 DPU_DEBUG("ctl %d caps 0x%lX\n", j + CTL_0, features); 389 390 if (needs_split_display != has_split_display) 391 continue; 392 393 ctl_idx[i] = j; 394 DPU_DEBUG("ctl %d match\n", j + CTL_0); 395 396 if (++i == num_ctls) 397 break; 398 399 } 400 401 if (i != num_ctls) 402 return -ENAVAIL; 403 404 for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) { 405 global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id; 406 trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id); 407 } 408 409 return 0; 410 } 411 412 static int _dpu_rm_pingpong_next_index(struct dpu_global_state *global_state, 413 int start, 414 uint32_t enc_id) 415 { 416 int i; 417 418 for (i = start; i < (PINGPONG_MAX - PINGPONG_0); i++) { 419 if (global_state->pingpong_to_enc_id[i] == enc_id) 420 return i; 421 } 422 423 return -ENAVAIL; 424 } 425 426 static int _dpu_rm_pingpong_dsc_check(int dsc_idx, int pp_idx) 427 { 428 /* 429 * DSC with even index must be used with the PINGPONG with even index 430 * DSC with odd index must be used with the PINGPONG with odd index 431 */ 432 if ((dsc_idx & 0x01) != (pp_idx & 0x01)) 433 return -ENAVAIL; 434 435 return 0; 436 } 437 438 static int _dpu_rm_dsc_alloc(struct dpu_rm *rm, 439 struct dpu_global_state *global_state, 440 uint32_t enc_id, 441 const struct msm_display_topology *top) 442 { 443 int num_dsc = 0; 444 int pp_idx = 0; 445 int dsc_idx; 446 int ret; 447 448 for (dsc_idx = 0; dsc_idx < ARRAY_SIZE(rm->dsc_blks) && 449 num_dsc < top->num_dsc; dsc_idx++) { 450 if (!rm->dsc_blks[dsc_idx]) 451 continue; 452 453 if (reserved_by_other(global_state->dsc_to_enc_id, dsc_idx, enc_id)) 454 continue; 455 456 pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, enc_id); 457 if (pp_idx < 0) 458 return -ENAVAIL; 459 460 ret = _dpu_rm_pingpong_dsc_check(dsc_idx, pp_idx); 461 if (ret) 462 return -ENAVAIL; 463 464 global_state->dsc_to_enc_id[dsc_idx] = enc_id; 465 num_dsc++; 466 pp_idx++; 467 } 468 469 if (num_dsc < top->num_dsc) { 470 DPU_ERROR("DSC allocation failed num_dsc=%d required=%d\n", 471 num_dsc, top->num_dsc); 472 return -ENAVAIL; 473 } 474 475 return 0; 476 } 477 478 static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm, 479 struct dpu_global_state *global_state, 480 uint32_t enc_id, 481 const struct msm_display_topology *top) 482 { 483 int num_dsc = 0; 484 int dsc_idx, pp_idx = 0; 485 int ret; 486 487 /* only start from even dsc index */ 488 for (dsc_idx = 0; dsc_idx < ARRAY_SIZE(rm->dsc_blks) && 489 num_dsc < top->num_dsc; dsc_idx += 2) { 490 if (!rm->dsc_blks[dsc_idx] || 491 !rm->dsc_blks[dsc_idx + 1]) 492 continue; 493 494 /* consective dsc index to be paired */ 495 if (reserved_by_other(global_state->dsc_to_enc_id, dsc_idx, enc_id) || 496 reserved_by_other(global_state->dsc_to_enc_id, dsc_idx + 1, enc_id)) 497 continue; 498 499 pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, enc_id); 500 if (pp_idx < 0) 501 return -ENAVAIL; 502 503 ret = _dpu_rm_pingpong_dsc_check(dsc_idx, pp_idx); 504 if (ret) { 505 pp_idx = 0; 506 continue; 507 } 508 509 pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx + 1, enc_id); 510 if (pp_idx < 0) 511 return -ENAVAIL; 512 513 ret = _dpu_rm_pingpong_dsc_check(dsc_idx + 1, pp_idx); 514 if (ret) { 515 pp_idx = 0; 516 continue; 517 } 518 519 global_state->dsc_to_enc_id[dsc_idx] = enc_id; 520 global_state->dsc_to_enc_id[dsc_idx + 1] = enc_id; 521 num_dsc += 2; 522 pp_idx++; /* start for next pair */ 523 } 524 525 if (num_dsc < top->num_dsc) { 526 DPU_ERROR("DSC allocation failed num_dsc=%d required=%d\n", 527 num_dsc, top->num_dsc); 528 return -ENAVAIL; 529 } 530 531 return 0; 532 } 533 534 static int _dpu_rm_reserve_dsc(struct dpu_rm *rm, 535 struct dpu_global_state *global_state, 536 struct drm_encoder *enc, 537 const struct msm_display_topology *top) 538 { 539 uint32_t enc_id = enc->base.id; 540 541 if (!top->num_dsc || !top->num_intf) 542 return 0; 543 544 /* 545 * Facts: 546 * 1) no pingpong split (two layer mixers shared one pingpong) 547 * 2) DSC pair starts from even index, such as index(0,1), (2,3), etc 548 * 3) even PINGPONG connects to even DSC 549 * 4) odd PINGPONG connects to odd DSC 550 * 5) pair: encoder +--> pp_idx_0 --> dsc_idx_0 551 * +--> pp_idx_1 --> dsc_idx_1 552 */ 553 554 /* num_dsc should be either 1, 2 or 4 */ 555 if (top->num_dsc > top->num_intf) /* merge mode */ 556 return _dpu_rm_dsc_alloc_pair(rm, global_state, enc_id, top); 557 else 558 return _dpu_rm_dsc_alloc(rm, global_state, enc_id, top); 559 560 return 0; 561 } 562 563 static int _dpu_rm_reserve_cdm(struct dpu_rm *rm, 564 struct dpu_global_state *global_state, 565 struct drm_encoder *enc) 566 { 567 /* try allocating only one CDM block */ 568 if (!rm->cdm_blk) { 569 DPU_ERROR("CDM block does not exist\n"); 570 return -EIO; 571 } 572 573 if (global_state->cdm_to_enc_id) { 574 DPU_ERROR("CDM_0 is already allocated\n"); 575 return -EIO; 576 } 577 578 global_state->cdm_to_enc_id = enc->base.id; 579 580 return 0; 581 } 582 583 static int _dpu_rm_make_reservation( 584 struct dpu_rm *rm, 585 struct dpu_global_state *global_state, 586 struct drm_encoder *enc, 587 struct dpu_rm_requirements *reqs) 588 { 589 int ret; 590 591 ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs); 592 if (ret) { 593 DPU_ERROR("unable to find appropriate mixers\n"); 594 return ret; 595 } 596 597 ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id, 598 &reqs->topology); 599 if (ret) { 600 DPU_ERROR("unable to find appropriate CTL\n"); 601 return ret; 602 } 603 604 ret = _dpu_rm_reserve_dsc(rm, global_state, enc, &reqs->topology); 605 if (ret) 606 return ret; 607 608 if (reqs->topology.needs_cdm) { 609 ret = _dpu_rm_reserve_cdm(rm, global_state, enc); 610 if (ret) { 611 DPU_ERROR("unable to find CDM blk\n"); 612 return ret; 613 } 614 } 615 616 return ret; 617 } 618 619 static int _dpu_rm_populate_requirements( 620 struct drm_encoder *enc, 621 struct dpu_rm_requirements *reqs, 622 struct msm_display_topology req_topology) 623 { 624 reqs->topology = req_topology; 625 626 DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d cdm: %d\n", 627 reqs->topology.num_lm, reqs->topology.num_dsc, 628 reqs->topology.num_intf, reqs->topology.needs_cdm); 629 630 return 0; 631 } 632 633 static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt, 634 uint32_t enc_id) 635 { 636 int i; 637 638 for (i = 0; i < cnt; i++) { 639 if (res_mapping[i] == enc_id) 640 res_mapping[i] = 0; 641 } 642 } 643 644 void dpu_rm_release(struct dpu_global_state *global_state, 645 struct drm_encoder *enc) 646 { 647 _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id, 648 ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id); 649 _dpu_rm_clear_mapping(global_state->mixer_to_enc_id, 650 ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id); 651 _dpu_rm_clear_mapping(global_state->ctl_to_enc_id, 652 ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id); 653 _dpu_rm_clear_mapping(global_state->dsc_to_enc_id, 654 ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id); 655 _dpu_rm_clear_mapping(global_state->dspp_to_enc_id, 656 ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id); 657 _dpu_rm_clear_mapping(&global_state->cdm_to_enc_id, 1, enc->base.id); 658 } 659 660 int dpu_rm_reserve( 661 struct dpu_rm *rm, 662 struct dpu_global_state *global_state, 663 struct drm_encoder *enc, 664 struct drm_crtc_state *crtc_state, 665 struct msm_display_topology topology) 666 { 667 struct dpu_rm_requirements reqs; 668 int ret; 669 670 /* Check if this is just a page-flip */ 671 if (!drm_atomic_crtc_needs_modeset(crtc_state)) 672 return 0; 673 674 if (IS_ERR(global_state)) { 675 DPU_ERROR("failed to global state\n"); 676 return PTR_ERR(global_state); 677 } 678 679 DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n", 680 enc->base.id, crtc_state->crtc->base.id); 681 682 ret = _dpu_rm_populate_requirements(enc, &reqs, topology); 683 if (ret) { 684 DPU_ERROR("failed to populate hw requirements\n"); 685 return ret; 686 } 687 688 ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs); 689 if (ret) 690 DPU_ERROR("failed to reserve hw resources: %d\n", ret); 691 692 693 694 return ret; 695 } 696 697 int dpu_rm_get_assigned_resources(struct dpu_rm *rm, 698 struct dpu_global_state *global_state, uint32_t enc_id, 699 enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size) 700 { 701 struct dpu_hw_blk **hw_blks; 702 uint32_t *hw_to_enc_id; 703 int i, num_blks, max_blks; 704 705 switch (type) { 706 case DPU_HW_BLK_PINGPONG: 707 hw_blks = rm->pingpong_blks; 708 hw_to_enc_id = global_state->pingpong_to_enc_id; 709 max_blks = ARRAY_SIZE(rm->pingpong_blks); 710 break; 711 case DPU_HW_BLK_LM: 712 hw_blks = rm->mixer_blks; 713 hw_to_enc_id = global_state->mixer_to_enc_id; 714 max_blks = ARRAY_SIZE(rm->mixer_blks); 715 break; 716 case DPU_HW_BLK_CTL: 717 hw_blks = rm->ctl_blks; 718 hw_to_enc_id = global_state->ctl_to_enc_id; 719 max_blks = ARRAY_SIZE(rm->ctl_blks); 720 break; 721 case DPU_HW_BLK_DSPP: 722 hw_blks = rm->dspp_blks; 723 hw_to_enc_id = global_state->dspp_to_enc_id; 724 max_blks = ARRAY_SIZE(rm->dspp_blks); 725 break; 726 case DPU_HW_BLK_DSC: 727 hw_blks = rm->dsc_blks; 728 hw_to_enc_id = global_state->dsc_to_enc_id; 729 max_blks = ARRAY_SIZE(rm->dsc_blks); 730 break; 731 case DPU_HW_BLK_CDM: 732 hw_blks = &rm->cdm_blk; 733 hw_to_enc_id = &global_state->cdm_to_enc_id; 734 max_blks = 1; 735 break; 736 default: 737 DPU_ERROR("blk type %d not managed by rm\n", type); 738 return 0; 739 } 740 741 num_blks = 0; 742 for (i = 0; i < max_blks; i++) { 743 if (hw_to_enc_id[i] != enc_id) 744 continue; 745 746 if (num_blks == blks_size) { 747 DPU_ERROR("More than %d resources assigned to enc %d\n", 748 blks_size, enc_id); 749 break; 750 } 751 if (!hw_blks[i]) { 752 DPU_ERROR("Allocated resource %d unavailable to assign to enc %d\n", 753 type, enc_id); 754 break; 755 } 756 blks[num_blks++] = hw_blks[i]; 757 } 758 759 return num_blks; 760 } 761 762 static void dpu_rm_print_state_helper(struct drm_printer *p, 763 struct dpu_hw_blk *blk, 764 uint32_t mapping) 765 { 766 if (!blk) 767 drm_puts(p, "- "); 768 else if (!mapping) 769 drm_puts(p, "# "); 770 else 771 drm_printf(p, "%d ", mapping); 772 } 773 774 775 void dpu_rm_print_state(struct drm_printer *p, 776 const struct dpu_global_state *global_state) 777 { 778 const struct dpu_rm *rm = global_state->rm; 779 int i; 780 781 drm_puts(p, "resource mapping:\n"); 782 drm_puts(p, "\tpingpong="); 783 for (i = 0; i < ARRAY_SIZE(global_state->pingpong_to_enc_id); i++) 784 dpu_rm_print_state_helper(p, rm->pingpong_blks[i], 785 global_state->pingpong_to_enc_id[i]); 786 drm_puts(p, "\n"); 787 788 drm_puts(p, "\tmixer="); 789 for (i = 0; i < ARRAY_SIZE(global_state->mixer_to_enc_id); i++) 790 dpu_rm_print_state_helper(p, rm->mixer_blks[i], 791 global_state->mixer_to_enc_id[i]); 792 drm_puts(p, "\n"); 793 794 drm_puts(p, "\tctl="); 795 for (i = 0; i < ARRAY_SIZE(global_state->ctl_to_enc_id); i++) 796 dpu_rm_print_state_helper(p, rm->ctl_blks[i], 797 global_state->ctl_to_enc_id[i]); 798 drm_puts(p, "\n"); 799 800 drm_puts(p, "\tdspp="); 801 for (i = 0; i < ARRAY_SIZE(global_state->dspp_to_enc_id); i++) 802 dpu_rm_print_state_helper(p, rm->dspp_blks[i], 803 global_state->dspp_to_enc_id[i]); 804 drm_puts(p, "\n"); 805 806 drm_puts(p, "\tdsc="); 807 for (i = 0; i < ARRAY_SIZE(global_state->dsc_to_enc_id); i++) 808 dpu_rm_print_state_helper(p, rm->dsc_blks[i], 809 global_state->dsc_to_enc_id[i]); 810 drm_puts(p, "\n"); 811 812 drm_puts(p, "\tcdm="); 813 dpu_rm_print_state_helper(p, rm->cdm_blk, 814 global_state->cdm_to_enc_id); 815 drm_puts(p, "\n"); 816 } 817