1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. 4 * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #define pr_fmt(fmt) "[drm:%s] " fmt, __func__ 8 #include "dpu_kms.h" 9 #include "dpu_hw_lm.h" 10 #include "dpu_hw_ctl.h" 11 #include "dpu_hw_pingpong.h" 12 #include "dpu_hw_sspp.h" 13 #include "dpu_hw_intf.h" 14 #include "dpu_hw_wb.h" 15 #include "dpu_hw_dspp.h" 16 #include "dpu_hw_merge3d.h" 17 #include "dpu_hw_dsc.h" 18 #include "dpu_encoder.h" 19 #include "dpu_trace.h" 20 21 22 static inline bool reserved_by_other(uint32_t *res_map, int idx, 23 uint32_t enc_id) 24 { 25 return res_map[idx] && res_map[idx] != enc_id; 26 } 27 28 /** 29 * struct dpu_rm_requirements - Reservation requirements parameter bundle 30 * @topology: selected topology for the display 31 * @hw_res: Hardware resources required as reported by the encoders 32 */ 33 struct dpu_rm_requirements { 34 struct msm_display_topology topology; 35 }; 36 37 int dpu_rm_destroy(struct dpu_rm *rm) 38 { 39 int i; 40 41 for (i = 0; i < ARRAY_SIZE(rm->dspp_blks); i++) { 42 struct dpu_hw_dspp *hw; 43 44 if (rm->dspp_blks[i]) { 45 hw = to_dpu_hw_dspp(rm->dspp_blks[i]); 46 dpu_hw_dspp_destroy(hw); 47 } 48 } 49 for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) { 50 struct dpu_hw_pingpong *hw; 51 52 if (rm->pingpong_blks[i]) { 53 hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]); 54 dpu_hw_pingpong_destroy(hw); 55 } 56 } 57 for (i = 0; i < ARRAY_SIZE(rm->merge_3d_blks); i++) { 58 struct dpu_hw_merge_3d *hw; 59 60 if (rm->merge_3d_blks[i]) { 61 hw = to_dpu_hw_merge_3d(rm->merge_3d_blks[i]); 62 dpu_hw_merge_3d_destroy(hw); 63 } 64 } 65 for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) { 66 struct dpu_hw_mixer *hw; 67 68 if (rm->mixer_blks[i]) { 69 hw = to_dpu_hw_mixer(rm->mixer_blks[i]); 70 dpu_hw_lm_destroy(hw); 71 } 72 } 73 for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) { 74 struct dpu_hw_ctl *hw; 75 76 if (rm->ctl_blks[i]) { 77 hw = to_dpu_hw_ctl(rm->ctl_blks[i]); 78 dpu_hw_ctl_destroy(hw); 79 } 80 } 81 for (i = 0; i < ARRAY_SIZE(rm->hw_intf); i++) 82 dpu_hw_intf_destroy(rm->hw_intf[i]); 83 84 for (i = 0; i < ARRAY_SIZE(rm->dsc_blks); i++) { 85 struct dpu_hw_dsc *hw; 86 87 if (rm->dsc_blks[i]) { 88 hw = to_dpu_hw_dsc(rm->dsc_blks[i]); 89 dpu_hw_dsc_destroy(hw); 90 } 91 } 92 93 for (i = 0; i < ARRAY_SIZE(rm->hw_wb); i++) 94 dpu_hw_wb_destroy(rm->hw_wb[i]); 95 96 for (i = 0; i < ARRAY_SIZE(rm->hw_sspp); i++) 97 dpu_hw_sspp_destroy(rm->hw_sspp[i]); 98 99 return 0; 100 } 101 102 int dpu_rm_init(struct dpu_rm *rm, 103 const struct dpu_mdss_cfg *cat, 104 const struct msm_mdss_data *mdss_data, 105 void __iomem *mmio) 106 { 107 int rc, i; 108 109 if (!rm || !cat || !mmio) { 110 DPU_ERROR("invalid kms\n"); 111 return -EINVAL; 112 } 113 114 /* Clear, setup lists */ 115 memset(rm, 0, sizeof(*rm)); 116 117 /* Interrogate HW catalog and create tracking items for hw blocks */ 118 for (i = 0; i < cat->mixer_count; i++) { 119 struct dpu_hw_mixer *hw; 120 const struct dpu_lm_cfg *lm = &cat->mixer[i]; 121 122 hw = dpu_hw_lm_init(lm, mmio); 123 if (IS_ERR(hw)) { 124 rc = PTR_ERR(hw); 125 DPU_ERROR("failed lm object creation: err %d\n", rc); 126 goto fail; 127 } 128 rm->mixer_blks[lm->id - LM_0] = &hw->base; 129 } 130 131 for (i = 0; i < cat->merge_3d_count; i++) { 132 struct dpu_hw_merge_3d *hw; 133 const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i]; 134 135 hw = dpu_hw_merge_3d_init(merge_3d, mmio); 136 if (IS_ERR(hw)) { 137 rc = PTR_ERR(hw); 138 DPU_ERROR("failed merge_3d object creation: err %d\n", 139 rc); 140 goto fail; 141 } 142 rm->merge_3d_blks[merge_3d->id - MERGE_3D_0] = &hw->base; 143 } 144 145 for (i = 0; i < cat->pingpong_count; i++) { 146 struct dpu_hw_pingpong *hw; 147 const struct dpu_pingpong_cfg *pp = &cat->pingpong[i]; 148 149 hw = dpu_hw_pingpong_init(pp, mmio, cat->mdss_ver); 150 if (IS_ERR(hw)) { 151 rc = PTR_ERR(hw); 152 DPU_ERROR("failed pingpong object creation: err %d\n", 153 rc); 154 goto fail; 155 } 156 if (pp->merge_3d && pp->merge_3d < MERGE_3D_MAX) 157 hw->merge_3d = to_dpu_hw_merge_3d(rm->merge_3d_blks[pp->merge_3d - MERGE_3D_0]); 158 rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base; 159 } 160 161 for (i = 0; i < cat->intf_count; i++) { 162 struct dpu_hw_intf *hw; 163 const struct dpu_intf_cfg *intf = &cat->intf[i]; 164 165 hw = dpu_hw_intf_init(intf, mmio, cat->mdss_ver); 166 if (IS_ERR(hw)) { 167 rc = PTR_ERR(hw); 168 DPU_ERROR("failed intf object creation: err %d\n", rc); 169 goto fail; 170 } 171 rm->hw_intf[intf->id - INTF_0] = hw; 172 } 173 174 for (i = 0; i < cat->wb_count; i++) { 175 struct dpu_hw_wb *hw; 176 const struct dpu_wb_cfg *wb = &cat->wb[i]; 177 178 hw = dpu_hw_wb_init(wb, mmio, cat->mdss_ver); 179 if (IS_ERR(hw)) { 180 rc = PTR_ERR(hw); 181 DPU_ERROR("failed wb object creation: err %d\n", rc); 182 goto fail; 183 } 184 rm->hw_wb[wb->id - WB_0] = hw; 185 } 186 187 for (i = 0; i < cat->ctl_count; i++) { 188 struct dpu_hw_ctl *hw; 189 const struct dpu_ctl_cfg *ctl = &cat->ctl[i]; 190 191 hw = dpu_hw_ctl_init(ctl, mmio, cat->mixer_count, cat->mixer); 192 if (IS_ERR(hw)) { 193 rc = PTR_ERR(hw); 194 DPU_ERROR("failed ctl object creation: err %d\n", rc); 195 goto fail; 196 } 197 rm->ctl_blks[ctl->id - CTL_0] = &hw->base; 198 } 199 200 for (i = 0; i < cat->dspp_count; i++) { 201 struct dpu_hw_dspp *hw; 202 const struct dpu_dspp_cfg *dspp = &cat->dspp[i]; 203 204 hw = dpu_hw_dspp_init(dspp, mmio); 205 if (IS_ERR(hw)) { 206 rc = PTR_ERR(hw); 207 DPU_ERROR("failed dspp object creation: err %d\n", rc); 208 goto fail; 209 } 210 rm->dspp_blks[dspp->id - DSPP_0] = &hw->base; 211 } 212 213 for (i = 0; i < cat->dsc_count; i++) { 214 struct dpu_hw_dsc *hw; 215 const struct dpu_dsc_cfg *dsc = &cat->dsc[i]; 216 217 if (test_bit(DPU_DSC_HW_REV_1_2, &dsc->features)) 218 hw = dpu_hw_dsc_init_1_2(dsc, mmio); 219 else 220 hw = dpu_hw_dsc_init(dsc, mmio); 221 222 if (IS_ERR(hw)) { 223 rc = PTR_ERR(hw); 224 DPU_ERROR("failed dsc object creation: err %d\n", rc); 225 goto fail; 226 } 227 rm->dsc_blks[dsc->id - DSC_0] = &hw->base; 228 } 229 230 for (i = 0; i < cat->sspp_count; i++) { 231 struct dpu_hw_sspp *hw; 232 const struct dpu_sspp_cfg *sspp = &cat->sspp[i]; 233 234 hw = dpu_hw_sspp_init(sspp, mmio, mdss_data, cat->mdss_ver); 235 if (IS_ERR(hw)) { 236 rc = PTR_ERR(hw); 237 DPU_ERROR("failed sspp object creation: err %d\n", rc); 238 goto fail; 239 } 240 rm->hw_sspp[sspp->id - SSPP_NONE] = hw; 241 } 242 243 return 0; 244 245 fail: 246 dpu_rm_destroy(rm); 247 248 return rc ? rc : -EFAULT; 249 } 250 251 static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) 252 { 253 return top->num_intf > 1; 254 } 255 256 /** 257 * _dpu_rm_get_lm_peer - get the id of a mixer which is a peer of the primary 258 * @rm: dpu resource manager handle 259 * @primary_idx: index of primary mixer in rm->mixer_blks[] 260 */ 261 static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx) 262 { 263 const struct dpu_lm_cfg *prim_lm_cfg; 264 265 prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap; 266 267 if (prim_lm_cfg->lm_pair >= LM_0 && prim_lm_cfg->lm_pair < LM_MAX) 268 return prim_lm_cfg->lm_pair - LM_0; 269 return -EINVAL; 270 } 271 272 /** 273 * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets 274 * proposed use case requirements, incl. hardwired dependent blocks like 275 * pingpong 276 * @rm: dpu resource manager handle 277 * @global_state: resources shared across multiple kms objects 278 * @enc_id: encoder id requesting for allocation 279 * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks 280 * if lm, and all other hardwired blocks connected to the lm (pp) is 281 * available and appropriate 282 * @pp_idx: output parameter, index of pingpong block attached to the layer 283 * mixer in rm->pingpong_blks[]. 284 * @dspp_idx: output parameter, index of dspp block attached to the layer 285 * mixer in rm->dspp_blks[]. 286 * @reqs: input parameter, rm requirements for HW blocks needed in the 287 * datapath. 288 * Return: true if lm matches all requirements, false otherwise 289 */ 290 static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm, 291 struct dpu_global_state *global_state, 292 uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx, 293 struct dpu_rm_requirements *reqs) 294 { 295 const struct dpu_lm_cfg *lm_cfg; 296 int idx; 297 298 /* Already reserved? */ 299 if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) { 300 DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0); 301 return false; 302 } 303 304 lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap; 305 idx = lm_cfg->pingpong - PINGPONG_0; 306 if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) { 307 DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong); 308 return false; 309 } 310 311 if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) { 312 DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id, 313 lm_cfg->pingpong); 314 return false; 315 } 316 *pp_idx = idx; 317 318 if (!reqs->topology.num_dspp) 319 return true; 320 321 idx = lm_cfg->dspp - DSPP_0; 322 if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) { 323 DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp); 324 return false; 325 } 326 327 if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) { 328 DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id, 329 lm_cfg->dspp); 330 return false; 331 } 332 *dspp_idx = idx; 333 334 return true; 335 } 336 337 static int _dpu_rm_reserve_lms(struct dpu_rm *rm, 338 struct dpu_global_state *global_state, 339 uint32_t enc_id, 340 struct dpu_rm_requirements *reqs) 341 342 { 343 int lm_idx[MAX_BLOCKS]; 344 int pp_idx[MAX_BLOCKS]; 345 int dspp_idx[MAX_BLOCKS] = {0}; 346 int i, lm_count = 0; 347 348 if (!reqs->topology.num_lm) { 349 DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm); 350 return -EINVAL; 351 } 352 353 /* Find a primary mixer */ 354 for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) && 355 lm_count < reqs->topology.num_lm; i++) { 356 if (!rm->mixer_blks[i]) 357 continue; 358 359 lm_count = 0; 360 lm_idx[lm_count] = i; 361 362 if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state, 363 enc_id, i, &pp_idx[lm_count], 364 &dspp_idx[lm_count], reqs)) { 365 continue; 366 } 367 368 ++lm_count; 369 370 /* Valid primary mixer found, find matching peers */ 371 if (lm_count < reqs->topology.num_lm) { 372 int j = _dpu_rm_get_lm_peer(rm, i); 373 374 /* ignore the peer if there is an error or if the peer was already processed */ 375 if (j < 0 || j < i) 376 continue; 377 378 if (!rm->mixer_blks[j]) 379 continue; 380 381 if (!_dpu_rm_check_lm_and_get_connected_blks(rm, 382 global_state, enc_id, j, 383 &pp_idx[lm_count], &dspp_idx[lm_count], 384 reqs)) { 385 continue; 386 } 387 388 lm_idx[lm_count] = j; 389 ++lm_count; 390 } 391 } 392 393 if (lm_count != reqs->topology.num_lm) { 394 DPU_DEBUG("unable to find appropriate mixers\n"); 395 return -ENAVAIL; 396 } 397 398 for (i = 0; i < lm_count; i++) { 399 global_state->mixer_to_enc_id[lm_idx[i]] = enc_id; 400 global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id; 401 global_state->dspp_to_enc_id[dspp_idx[i]] = 402 reqs->topology.num_dspp ? enc_id : 0; 403 404 trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id, 405 pp_idx[i] + PINGPONG_0); 406 } 407 408 return 0; 409 } 410 411 static int _dpu_rm_reserve_ctls( 412 struct dpu_rm *rm, 413 struct dpu_global_state *global_state, 414 uint32_t enc_id, 415 const struct msm_display_topology *top) 416 { 417 int ctl_idx[MAX_BLOCKS]; 418 int i = 0, j, num_ctls; 419 bool needs_split_display; 420 421 /* each hw_intf needs its own hw_ctrl to program its control path */ 422 num_ctls = top->num_intf; 423 424 needs_split_display = _dpu_rm_needs_split_display(top); 425 426 for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) { 427 const struct dpu_hw_ctl *ctl; 428 unsigned long features; 429 bool has_split_display; 430 431 if (!rm->ctl_blks[j]) 432 continue; 433 if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id)) 434 continue; 435 436 ctl = to_dpu_hw_ctl(rm->ctl_blks[j]); 437 features = ctl->caps->features; 438 has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features; 439 440 DPU_DEBUG("ctl %d caps 0x%lX\n", j + CTL_0, features); 441 442 if (needs_split_display != has_split_display) 443 continue; 444 445 ctl_idx[i] = j; 446 DPU_DEBUG("ctl %d match\n", j + CTL_0); 447 448 if (++i == num_ctls) 449 break; 450 451 } 452 453 if (i != num_ctls) 454 return -ENAVAIL; 455 456 for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) { 457 global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id; 458 trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id); 459 } 460 461 return 0; 462 } 463 464 static int _dpu_rm_reserve_dsc(struct dpu_rm *rm, 465 struct dpu_global_state *global_state, 466 struct drm_encoder *enc, 467 const struct msm_display_topology *top) 468 { 469 int num_dsc = top->num_dsc; 470 int i; 471 472 /* check if DSC required are allocated or not */ 473 for (i = 0; i < num_dsc; i++) { 474 if (!rm->dsc_blks[i]) { 475 DPU_ERROR("DSC %d does not exist\n", i); 476 return -EIO; 477 } 478 479 if (global_state->dsc_to_enc_id[i]) { 480 DPU_ERROR("DSC %d is already allocated\n", i); 481 return -EIO; 482 } 483 } 484 485 for (i = 0; i < num_dsc; i++) 486 global_state->dsc_to_enc_id[i] = enc->base.id; 487 488 return 0; 489 } 490 491 static int _dpu_rm_make_reservation( 492 struct dpu_rm *rm, 493 struct dpu_global_state *global_state, 494 struct drm_encoder *enc, 495 struct dpu_rm_requirements *reqs) 496 { 497 int ret; 498 499 ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs); 500 if (ret) { 501 DPU_ERROR("unable to find appropriate mixers\n"); 502 return ret; 503 } 504 505 ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id, 506 &reqs->topology); 507 if (ret) { 508 DPU_ERROR("unable to find appropriate CTL\n"); 509 return ret; 510 } 511 512 ret = _dpu_rm_reserve_dsc(rm, global_state, enc, &reqs->topology); 513 if (ret) 514 return ret; 515 516 return ret; 517 } 518 519 static int _dpu_rm_populate_requirements( 520 struct drm_encoder *enc, 521 struct dpu_rm_requirements *reqs, 522 struct msm_display_topology req_topology) 523 { 524 reqs->topology = req_topology; 525 526 DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n", 527 reqs->topology.num_lm, reqs->topology.num_dsc, 528 reqs->topology.num_intf); 529 530 return 0; 531 } 532 533 static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt, 534 uint32_t enc_id) 535 { 536 int i; 537 538 for (i = 0; i < cnt; i++) { 539 if (res_mapping[i] == enc_id) 540 res_mapping[i] = 0; 541 } 542 } 543 544 void dpu_rm_release(struct dpu_global_state *global_state, 545 struct drm_encoder *enc) 546 { 547 _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id, 548 ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id); 549 _dpu_rm_clear_mapping(global_state->mixer_to_enc_id, 550 ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id); 551 _dpu_rm_clear_mapping(global_state->ctl_to_enc_id, 552 ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id); 553 _dpu_rm_clear_mapping(global_state->dsc_to_enc_id, 554 ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id); 555 _dpu_rm_clear_mapping(global_state->dspp_to_enc_id, 556 ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id); 557 } 558 559 int dpu_rm_reserve( 560 struct dpu_rm *rm, 561 struct dpu_global_state *global_state, 562 struct drm_encoder *enc, 563 struct drm_crtc_state *crtc_state, 564 struct msm_display_topology topology) 565 { 566 struct dpu_rm_requirements reqs; 567 int ret; 568 569 /* Check if this is just a page-flip */ 570 if (!drm_atomic_crtc_needs_modeset(crtc_state)) 571 return 0; 572 573 if (IS_ERR(global_state)) { 574 DPU_ERROR("failed to global state\n"); 575 return PTR_ERR(global_state); 576 } 577 578 DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n", 579 enc->base.id, crtc_state->crtc->base.id); 580 581 ret = _dpu_rm_populate_requirements(enc, &reqs, topology); 582 if (ret) { 583 DPU_ERROR("failed to populate hw requirements\n"); 584 return ret; 585 } 586 587 ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs); 588 if (ret) 589 DPU_ERROR("failed to reserve hw resources: %d\n", ret); 590 591 592 593 return ret; 594 } 595 596 int dpu_rm_get_assigned_resources(struct dpu_rm *rm, 597 struct dpu_global_state *global_state, uint32_t enc_id, 598 enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size) 599 { 600 struct dpu_hw_blk **hw_blks; 601 uint32_t *hw_to_enc_id; 602 int i, num_blks, max_blks; 603 604 switch (type) { 605 case DPU_HW_BLK_PINGPONG: 606 hw_blks = rm->pingpong_blks; 607 hw_to_enc_id = global_state->pingpong_to_enc_id; 608 max_blks = ARRAY_SIZE(rm->pingpong_blks); 609 break; 610 case DPU_HW_BLK_LM: 611 hw_blks = rm->mixer_blks; 612 hw_to_enc_id = global_state->mixer_to_enc_id; 613 max_blks = ARRAY_SIZE(rm->mixer_blks); 614 break; 615 case DPU_HW_BLK_CTL: 616 hw_blks = rm->ctl_blks; 617 hw_to_enc_id = global_state->ctl_to_enc_id; 618 max_blks = ARRAY_SIZE(rm->ctl_blks); 619 break; 620 case DPU_HW_BLK_DSPP: 621 hw_blks = rm->dspp_blks; 622 hw_to_enc_id = global_state->dspp_to_enc_id; 623 max_blks = ARRAY_SIZE(rm->dspp_blks); 624 break; 625 case DPU_HW_BLK_DSC: 626 hw_blks = rm->dsc_blks; 627 hw_to_enc_id = global_state->dsc_to_enc_id; 628 max_blks = ARRAY_SIZE(rm->dsc_blks); 629 break; 630 default: 631 DPU_ERROR("blk type %d not managed by rm\n", type); 632 return 0; 633 } 634 635 num_blks = 0; 636 for (i = 0; i < max_blks; i++) { 637 if (hw_to_enc_id[i] != enc_id) 638 continue; 639 640 if (num_blks == blks_size) { 641 DPU_ERROR("More than %d resources assigned to enc %d\n", 642 blks_size, enc_id); 643 break; 644 } 645 if (!hw_blks[i]) { 646 DPU_ERROR("Allocated resource %d unavailable to assign to enc %d\n", 647 type, enc_id); 648 break; 649 } 650 blks[num_blks++] = hw_blks[i]; 651 } 652 653 return num_blks; 654 } 655