xref: /linux/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c (revision e7d759f31ca295d589f7420719c311870bb3166f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4  * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #define pr_fmt(fmt)	"[drm:%s] " fmt, __func__
8 #include "dpu_kms.h"
9 #include "dpu_hw_lm.h"
10 #include "dpu_hw_ctl.h"
11 #include "dpu_hw_cdm.h"
12 #include "dpu_hw_pingpong.h"
13 #include "dpu_hw_sspp.h"
14 #include "dpu_hw_intf.h"
15 #include "dpu_hw_wb.h"
16 #include "dpu_hw_dspp.h"
17 #include "dpu_hw_merge3d.h"
18 #include "dpu_hw_dsc.h"
19 #include "dpu_encoder.h"
20 #include "dpu_trace.h"
21 
22 
23 static inline bool reserved_by_other(uint32_t *res_map, int idx,
24 				     uint32_t enc_id)
25 {
26 	return res_map[idx] && res_map[idx] != enc_id;
27 }
28 
29 /**
30  * struct dpu_rm_requirements - Reservation requirements parameter bundle
31  * @topology:  selected topology for the display
32  * @hw_res:	   Hardware resources required as reported by the encoders
33  */
34 struct dpu_rm_requirements {
35 	struct msm_display_topology topology;
36 };
37 
38 int dpu_rm_init(struct drm_device *dev,
39 		struct dpu_rm *rm,
40 		const struct dpu_mdss_cfg *cat,
41 		const struct msm_mdss_data *mdss_data,
42 		void __iomem *mmio)
43 {
44 	int rc, i;
45 
46 	if (!rm || !cat || !mmio) {
47 		DPU_ERROR("invalid kms\n");
48 		return -EINVAL;
49 	}
50 
51 	/* Clear, setup lists */
52 	memset(rm, 0, sizeof(*rm));
53 
54 	/* Interrogate HW catalog and create tracking items for hw blocks */
55 	for (i = 0; i < cat->mixer_count; i++) {
56 		struct dpu_hw_mixer *hw;
57 		const struct dpu_lm_cfg *lm = &cat->mixer[i];
58 
59 		hw = dpu_hw_lm_init(dev, lm, mmio);
60 		if (IS_ERR(hw)) {
61 			rc = PTR_ERR(hw);
62 			DPU_ERROR("failed lm object creation: err %d\n", rc);
63 			goto fail;
64 		}
65 		rm->mixer_blks[lm->id - LM_0] = &hw->base;
66 	}
67 
68 	for (i = 0; i < cat->merge_3d_count; i++) {
69 		struct dpu_hw_merge_3d *hw;
70 		const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i];
71 
72 		hw = dpu_hw_merge_3d_init(dev, merge_3d, mmio);
73 		if (IS_ERR(hw)) {
74 			rc = PTR_ERR(hw);
75 			DPU_ERROR("failed merge_3d object creation: err %d\n",
76 				rc);
77 			goto fail;
78 		}
79 		rm->merge_3d_blks[merge_3d->id - MERGE_3D_0] = &hw->base;
80 	}
81 
82 	for (i = 0; i < cat->pingpong_count; i++) {
83 		struct dpu_hw_pingpong *hw;
84 		const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
85 
86 		hw = dpu_hw_pingpong_init(dev, pp, mmio, cat->mdss_ver);
87 		if (IS_ERR(hw)) {
88 			rc = PTR_ERR(hw);
89 			DPU_ERROR("failed pingpong object creation: err %d\n",
90 				rc);
91 			goto fail;
92 		}
93 		if (pp->merge_3d && pp->merge_3d < MERGE_3D_MAX)
94 			hw->merge_3d = to_dpu_hw_merge_3d(rm->merge_3d_blks[pp->merge_3d - MERGE_3D_0]);
95 		rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base;
96 	}
97 
98 	for (i = 0; i < cat->intf_count; i++) {
99 		struct dpu_hw_intf *hw;
100 		const struct dpu_intf_cfg *intf = &cat->intf[i];
101 
102 		hw = dpu_hw_intf_init(dev, intf, mmio, cat->mdss_ver);
103 		if (IS_ERR(hw)) {
104 			rc = PTR_ERR(hw);
105 			DPU_ERROR("failed intf object creation: err %d\n", rc);
106 			goto fail;
107 		}
108 		rm->hw_intf[intf->id - INTF_0] = hw;
109 	}
110 
111 	for (i = 0; i < cat->wb_count; i++) {
112 		struct dpu_hw_wb *hw;
113 		const struct dpu_wb_cfg *wb = &cat->wb[i];
114 
115 		hw = dpu_hw_wb_init(dev, wb, mmio, cat->mdss_ver);
116 		if (IS_ERR(hw)) {
117 			rc = PTR_ERR(hw);
118 			DPU_ERROR("failed wb object creation: err %d\n", rc);
119 			goto fail;
120 		}
121 		rm->hw_wb[wb->id - WB_0] = hw;
122 	}
123 
124 	for (i = 0; i < cat->ctl_count; i++) {
125 		struct dpu_hw_ctl *hw;
126 		const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
127 
128 		hw = dpu_hw_ctl_init(dev, ctl, mmio, cat->mixer_count, cat->mixer);
129 		if (IS_ERR(hw)) {
130 			rc = PTR_ERR(hw);
131 			DPU_ERROR("failed ctl object creation: err %d\n", rc);
132 			goto fail;
133 		}
134 		rm->ctl_blks[ctl->id - CTL_0] = &hw->base;
135 	}
136 
137 	for (i = 0; i < cat->dspp_count; i++) {
138 		struct dpu_hw_dspp *hw;
139 		const struct dpu_dspp_cfg *dspp = &cat->dspp[i];
140 
141 		hw = dpu_hw_dspp_init(dev, dspp, mmio);
142 		if (IS_ERR(hw)) {
143 			rc = PTR_ERR(hw);
144 			DPU_ERROR("failed dspp object creation: err %d\n", rc);
145 			goto fail;
146 		}
147 		rm->dspp_blks[dspp->id - DSPP_0] = &hw->base;
148 	}
149 
150 	for (i = 0; i < cat->dsc_count; i++) {
151 		struct dpu_hw_dsc *hw;
152 		const struct dpu_dsc_cfg *dsc = &cat->dsc[i];
153 
154 		if (test_bit(DPU_DSC_HW_REV_1_2, &dsc->features))
155 			hw = dpu_hw_dsc_init_1_2(dev, dsc, mmio);
156 		else
157 			hw = dpu_hw_dsc_init(dev, dsc, mmio);
158 
159 		if (IS_ERR(hw)) {
160 			rc = PTR_ERR(hw);
161 			DPU_ERROR("failed dsc object creation: err %d\n", rc);
162 			goto fail;
163 		}
164 		rm->dsc_blks[dsc->id - DSC_0] = &hw->base;
165 	}
166 
167 	for (i = 0; i < cat->sspp_count; i++) {
168 		struct dpu_hw_sspp *hw;
169 		const struct dpu_sspp_cfg *sspp = &cat->sspp[i];
170 
171 		hw = dpu_hw_sspp_init(dev, sspp, mmio, mdss_data, cat->mdss_ver);
172 		if (IS_ERR(hw)) {
173 			rc = PTR_ERR(hw);
174 			DPU_ERROR("failed sspp object creation: err %d\n", rc);
175 			goto fail;
176 		}
177 		rm->hw_sspp[sspp->id - SSPP_NONE] = hw;
178 	}
179 
180 	if (cat->cdm) {
181 		struct dpu_hw_cdm *hw;
182 
183 		hw = dpu_hw_cdm_init(dev, cat->cdm, mmio, cat->mdss_ver);
184 		if (IS_ERR(hw)) {
185 			rc = PTR_ERR(hw);
186 			DPU_ERROR("failed cdm object creation: err %d\n", rc);
187 			goto fail;
188 		}
189 		rm->cdm_blk = &hw->base;
190 	}
191 
192 	return 0;
193 
194 fail:
195 	return rc ? rc : -EFAULT;
196 }
197 
198 static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
199 {
200 	return top->num_intf > 1;
201 }
202 
203 /**
204  * _dpu_rm_get_lm_peer - get the id of a mixer which is a peer of the primary
205  * @rm: dpu resource manager handle
206  * @primary_idx: index of primary mixer in rm->mixer_blks[]
207  */
208 static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
209 {
210 	const struct dpu_lm_cfg *prim_lm_cfg;
211 
212 	prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap;
213 
214 	if (prim_lm_cfg->lm_pair >= LM_0 && prim_lm_cfg->lm_pair < LM_MAX)
215 		return prim_lm_cfg->lm_pair - LM_0;
216 	return -EINVAL;
217 }
218 
219 /**
220  * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
221  *	proposed use case requirements, incl. hardwired dependent blocks like
222  *	pingpong
223  * @rm: dpu resource manager handle
224  * @global_state: resources shared across multiple kms objects
225  * @enc_id: encoder id requesting for allocation
226  * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks
227  *      if lm, and all other hardwired blocks connected to the lm (pp) is
228  *      available and appropriate
229  * @pp_idx: output parameter, index of pingpong block attached to the layer
230  *      mixer in rm->pingpong_blks[].
231  * @dspp_idx: output parameter, index of dspp block attached to the layer
232  *      mixer in rm->dspp_blks[].
233  * @reqs: input parameter, rm requirements for HW blocks needed in the
234  *      datapath.
235  * Return: true if lm matches all requirements, false otherwise
236  */
237 static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
238 		struct dpu_global_state *global_state,
239 		uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
240 		struct dpu_rm_requirements *reqs)
241 {
242 	const struct dpu_lm_cfg *lm_cfg;
243 	int idx;
244 
245 	/* Already reserved? */
246 	if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) {
247 		DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0);
248 		return false;
249 	}
250 
251 	lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap;
252 	idx = lm_cfg->pingpong - PINGPONG_0;
253 	if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) {
254 		DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
255 		return false;
256 	}
257 
258 	if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) {
259 		DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id,
260 				lm_cfg->pingpong);
261 		return false;
262 	}
263 	*pp_idx = idx;
264 
265 	if (!reqs->topology.num_dspp)
266 		return true;
267 
268 	idx = lm_cfg->dspp - DSPP_0;
269 	if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) {
270 		DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp);
271 		return false;
272 	}
273 
274 	if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) {
275 		DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id,
276 				lm_cfg->dspp);
277 		return false;
278 	}
279 	*dspp_idx = idx;
280 
281 	return true;
282 }
283 
284 static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
285 			       struct dpu_global_state *global_state,
286 			       uint32_t enc_id,
287 			       struct dpu_rm_requirements *reqs)
288 
289 {
290 	int lm_idx[MAX_BLOCKS];
291 	int pp_idx[MAX_BLOCKS];
292 	int dspp_idx[MAX_BLOCKS] = {0};
293 	int i, lm_count = 0;
294 
295 	if (!reqs->topology.num_lm) {
296 		DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
297 		return -EINVAL;
298 	}
299 
300 	/* Find a primary mixer */
301 	for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
302 			lm_count < reqs->topology.num_lm; i++) {
303 		if (!rm->mixer_blks[i])
304 			continue;
305 
306 		lm_count = 0;
307 		lm_idx[lm_count] = i;
308 
309 		if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
310 				enc_id, i, &pp_idx[lm_count],
311 				&dspp_idx[lm_count], reqs)) {
312 			continue;
313 		}
314 
315 		++lm_count;
316 
317 		/* Valid primary mixer found, find matching peers */
318 		if (lm_count < reqs->topology.num_lm) {
319 			int j = _dpu_rm_get_lm_peer(rm, i);
320 
321 			/* ignore the peer if there is an error or if the peer was already processed */
322 			if (j < 0 || j < i)
323 				continue;
324 
325 			if (!rm->mixer_blks[j])
326 				continue;
327 
328 			if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
329 					global_state, enc_id, j,
330 					&pp_idx[lm_count], &dspp_idx[lm_count],
331 					reqs)) {
332 				continue;
333 			}
334 
335 			lm_idx[lm_count] = j;
336 			++lm_count;
337 		}
338 	}
339 
340 	if (lm_count != reqs->topology.num_lm) {
341 		DPU_DEBUG("unable to find appropriate mixers\n");
342 		return -ENAVAIL;
343 	}
344 
345 	for (i = 0; i < lm_count; i++) {
346 		global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
347 		global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
348 		global_state->dspp_to_enc_id[dspp_idx[i]] =
349 			reqs->topology.num_dspp ? enc_id : 0;
350 
351 		trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
352 					 pp_idx[i] + PINGPONG_0);
353 	}
354 
355 	return 0;
356 }
357 
358 static int _dpu_rm_reserve_ctls(
359 		struct dpu_rm *rm,
360 		struct dpu_global_state *global_state,
361 		uint32_t enc_id,
362 		const struct msm_display_topology *top)
363 {
364 	int ctl_idx[MAX_BLOCKS];
365 	int i = 0, j, num_ctls;
366 	bool needs_split_display;
367 
368 	/* each hw_intf needs its own hw_ctrl to program its control path */
369 	num_ctls = top->num_intf;
370 
371 	needs_split_display = _dpu_rm_needs_split_display(top);
372 
373 	for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) {
374 		const struct dpu_hw_ctl *ctl;
375 		unsigned long features;
376 		bool has_split_display;
377 
378 		if (!rm->ctl_blks[j])
379 			continue;
380 		if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id))
381 			continue;
382 
383 		ctl = to_dpu_hw_ctl(rm->ctl_blks[j]);
384 		features = ctl->caps->features;
385 		has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
386 
387 		DPU_DEBUG("ctl %d caps 0x%lX\n", j + CTL_0, features);
388 
389 		if (needs_split_display != has_split_display)
390 			continue;
391 
392 		ctl_idx[i] = j;
393 		DPU_DEBUG("ctl %d match\n", j + CTL_0);
394 
395 		if (++i == num_ctls)
396 			break;
397 
398 	}
399 
400 	if (i != num_ctls)
401 		return -ENAVAIL;
402 
403 	for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) {
404 		global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id;
405 		trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id);
406 	}
407 
408 	return 0;
409 }
410 
411 static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
412 			       struct dpu_global_state *global_state,
413 			       struct drm_encoder *enc,
414 			       const struct msm_display_topology *top)
415 {
416 	int num_dsc = top->num_dsc;
417 	int i;
418 
419 	/* check if DSC required are allocated or not */
420 	for (i = 0; i < num_dsc; i++) {
421 		if (!rm->dsc_blks[i]) {
422 			DPU_ERROR("DSC %d does not exist\n", i);
423 			return -EIO;
424 		}
425 
426 		if (global_state->dsc_to_enc_id[i]) {
427 			DPU_ERROR("DSC %d is already allocated\n", i);
428 			return -EIO;
429 		}
430 	}
431 
432 	for (i = 0; i < num_dsc; i++)
433 		global_state->dsc_to_enc_id[i] = enc->base.id;
434 
435 	return 0;
436 }
437 
438 static int _dpu_rm_reserve_cdm(struct dpu_rm *rm,
439 			       struct dpu_global_state *global_state,
440 			       struct drm_encoder *enc)
441 {
442 	/* try allocating only one CDM block */
443 	if (!rm->cdm_blk) {
444 		DPU_ERROR("CDM block does not exist\n");
445 		return -EIO;
446 	}
447 
448 	if (global_state->cdm_to_enc_id) {
449 		DPU_ERROR("CDM_0 is already allocated\n");
450 		return -EIO;
451 	}
452 
453 	global_state->cdm_to_enc_id = enc->base.id;
454 
455 	return 0;
456 }
457 
458 static int _dpu_rm_make_reservation(
459 		struct dpu_rm *rm,
460 		struct dpu_global_state *global_state,
461 		struct drm_encoder *enc,
462 		struct dpu_rm_requirements *reqs)
463 {
464 	int ret;
465 
466 	ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs);
467 	if (ret) {
468 		DPU_ERROR("unable to find appropriate mixers\n");
469 		return ret;
470 	}
471 
472 	ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
473 				&reqs->topology);
474 	if (ret) {
475 		DPU_ERROR("unable to find appropriate CTL\n");
476 		return ret;
477 	}
478 
479 	ret  = _dpu_rm_reserve_dsc(rm, global_state, enc, &reqs->topology);
480 	if (ret)
481 		return ret;
482 
483 	if (reqs->topology.needs_cdm) {
484 		ret = _dpu_rm_reserve_cdm(rm, global_state, enc);
485 		if (ret) {
486 			DPU_ERROR("unable to find CDM blk\n");
487 			return ret;
488 		}
489 	}
490 
491 	return ret;
492 }
493 
494 static int _dpu_rm_populate_requirements(
495 		struct drm_encoder *enc,
496 		struct dpu_rm_requirements *reqs,
497 		struct msm_display_topology req_topology)
498 {
499 	reqs->topology = req_topology;
500 
501 	DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d cdm: %d\n",
502 		      reqs->topology.num_lm, reqs->topology.num_dsc,
503 		      reqs->topology.num_intf, reqs->topology.needs_cdm);
504 
505 	return 0;
506 }
507 
508 static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
509 				  uint32_t enc_id)
510 {
511 	int i;
512 
513 	for (i = 0; i < cnt; i++) {
514 		if (res_mapping[i] == enc_id)
515 			res_mapping[i] = 0;
516 	}
517 }
518 
519 void dpu_rm_release(struct dpu_global_state *global_state,
520 		    struct drm_encoder *enc)
521 {
522 	_dpu_rm_clear_mapping(global_state->pingpong_to_enc_id,
523 		ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id);
524 	_dpu_rm_clear_mapping(global_state->mixer_to_enc_id,
525 		ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
526 	_dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
527 		ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
528 	_dpu_rm_clear_mapping(global_state->dsc_to_enc_id,
529 		ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id);
530 	_dpu_rm_clear_mapping(global_state->dspp_to_enc_id,
531 		ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id);
532 	_dpu_rm_clear_mapping(&global_state->cdm_to_enc_id, 1, enc->base.id);
533 }
534 
535 int dpu_rm_reserve(
536 		struct dpu_rm *rm,
537 		struct dpu_global_state *global_state,
538 		struct drm_encoder *enc,
539 		struct drm_crtc_state *crtc_state,
540 		struct msm_display_topology topology)
541 {
542 	struct dpu_rm_requirements reqs;
543 	int ret;
544 
545 	/* Check if this is just a page-flip */
546 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
547 		return 0;
548 
549 	if (IS_ERR(global_state)) {
550 		DPU_ERROR("failed to global state\n");
551 		return PTR_ERR(global_state);
552 	}
553 
554 	DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
555 		      enc->base.id, crtc_state->crtc->base.id);
556 
557 	ret = _dpu_rm_populate_requirements(enc, &reqs, topology);
558 	if (ret) {
559 		DPU_ERROR("failed to populate hw requirements\n");
560 		return ret;
561 	}
562 
563 	ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs);
564 	if (ret)
565 		DPU_ERROR("failed to reserve hw resources: %d\n", ret);
566 
567 
568 
569 	return ret;
570 }
571 
572 int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
573 	struct dpu_global_state *global_state, uint32_t enc_id,
574 	enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
575 {
576 	struct dpu_hw_blk **hw_blks;
577 	uint32_t *hw_to_enc_id;
578 	int i, num_blks, max_blks;
579 
580 	switch (type) {
581 	case DPU_HW_BLK_PINGPONG:
582 		hw_blks = rm->pingpong_blks;
583 		hw_to_enc_id = global_state->pingpong_to_enc_id;
584 		max_blks = ARRAY_SIZE(rm->pingpong_blks);
585 		break;
586 	case DPU_HW_BLK_LM:
587 		hw_blks = rm->mixer_blks;
588 		hw_to_enc_id = global_state->mixer_to_enc_id;
589 		max_blks = ARRAY_SIZE(rm->mixer_blks);
590 		break;
591 	case DPU_HW_BLK_CTL:
592 		hw_blks = rm->ctl_blks;
593 		hw_to_enc_id = global_state->ctl_to_enc_id;
594 		max_blks = ARRAY_SIZE(rm->ctl_blks);
595 		break;
596 	case DPU_HW_BLK_DSPP:
597 		hw_blks = rm->dspp_blks;
598 		hw_to_enc_id = global_state->dspp_to_enc_id;
599 		max_blks = ARRAY_SIZE(rm->dspp_blks);
600 		break;
601 	case DPU_HW_BLK_DSC:
602 		hw_blks = rm->dsc_blks;
603 		hw_to_enc_id = global_state->dsc_to_enc_id;
604 		max_blks = ARRAY_SIZE(rm->dsc_blks);
605 		break;
606 	case DPU_HW_BLK_CDM:
607 		hw_blks = &rm->cdm_blk;
608 		hw_to_enc_id = &global_state->cdm_to_enc_id;
609 		max_blks = 1;
610 		break;
611 	default:
612 		DPU_ERROR("blk type %d not managed by rm\n", type);
613 		return 0;
614 	}
615 
616 	num_blks = 0;
617 	for (i = 0; i < max_blks; i++) {
618 		if (hw_to_enc_id[i] != enc_id)
619 			continue;
620 
621 		if (num_blks == blks_size) {
622 			DPU_ERROR("More than %d resources assigned to enc %d\n",
623 				  blks_size, enc_id);
624 			break;
625 		}
626 		if (!hw_blks[i]) {
627 			DPU_ERROR("Allocated resource %d unavailable to assign to enc %d\n",
628 				  type, enc_id);
629 			break;
630 		}
631 		blks[num_blks++] = hw_blks[i];
632 	}
633 
634 	return num_blks;
635 }
636