1 /*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include "dm_services.h"
27
28 #include "resource.h"
29 #include "include/irq_service_interface.h"
30 #include "link_encoder.h"
31 #include "stream_encoder.h"
32 #include "opp.h"
33 #include "timing_generator.h"
34 #include "transform.h"
35 #include "dccg.h"
36 #include "dchubbub.h"
37 #include "dpp.h"
38 #include "core_types.h"
39 #include "set_mode_types.h"
40 #include "virtual/virtual_stream_encoder.h"
41 #include "dpcd_defs.h"
42 #include "link_enc_cfg.h"
43 #include "link.h"
44 #include "clk_mgr.h"
45 #include "dc_state_priv.h"
46 #include "dc_stream_priv.h"
47
48 #include "virtual/virtual_link_hwss.h"
49 #include "link/hwss/link_hwss_dio.h"
50 #include "link/hwss/link_hwss_dpia.h"
51 #include "link/hwss/link_hwss_hpo_dp.h"
52 #include "link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h"
53 #include "link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h"
54
55 #if defined(CONFIG_DRM_AMD_DC_SI)
56 #include "dce60/dce60_resource.h"
57 #endif
58 #include "dce80/dce80_resource.h"
59 #include "dce100/dce100_resource.h"
60 #include "dce110/dce110_resource.h"
61 #include "dce112/dce112_resource.h"
62 #include "dce120/dce120_resource.h"
63 #include "dcn10/dcn10_resource.h"
64 #include "dcn20/dcn20_resource.h"
65 #include "dcn21/dcn21_resource.h"
66 #include "dcn201/dcn201_resource.h"
67 #include "dcn30/dcn30_resource.h"
68 #include "dcn301/dcn301_resource.h"
69 #include "dcn302/dcn302_resource.h"
70 #include "dcn303/dcn303_resource.h"
71 #include "dcn31/dcn31_resource.h"
72 #include "dcn314/dcn314_resource.h"
73 #include "dcn315/dcn315_resource.h"
74 #include "dcn316/dcn316_resource.h"
75 #include "dcn32/dcn32_resource.h"
76 #include "dcn321/dcn321_resource.h"
77 #include "dcn35/dcn35_resource.h"
78 #include "dcn351/dcn351_resource.h"
79 #include "dcn36/dcn36_resource.h"
80 #include "dcn401/dcn401_resource.h"
81 #if defined(CONFIG_DRM_AMD_DC_FP)
82 #include "dc_spl_translate.h"
83 #endif
84
85 #define VISUAL_CONFIRM_BASE_DEFAULT 3
86 #define VISUAL_CONFIRM_BASE_MIN 1
87 #define VISUAL_CONFIRM_BASE_MAX 10
88 /* we choose 240 because it is a common denominator of common v addressable
89 * such as 2160, 1440, 1200, 960. So we take 1/240 portion of v addressable as
90 * the visual confirm dpp offset height. So visual confirm height can stay
91 * relatively the same independent from timing used.
92 */
93 #define VISUAL_CONFIRM_DPP_OFFSET_DENO 240
94
95 #define DC_LOGGER \
96 dc->ctx->logger
97 #define DC_LOGGER_INIT(logger)
98
99 #include "dml2/dml2_wrapper.h"
100
101 #define UNABLE_TO_SPLIT -1
102
resource_parse_asic_id(struct hw_asic_id asic_id)103 enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
104 {
105 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
106
107 switch (asic_id.chip_family) {
108
109 #if defined(CONFIG_DRM_AMD_DC_SI)
110 case FAMILY_SI:
111 if (ASIC_REV_IS_TAHITI_P(asic_id.hw_internal_rev) ||
112 ASIC_REV_IS_PITCAIRN_PM(asic_id.hw_internal_rev) ||
113 ASIC_REV_IS_CAPEVERDE_M(asic_id.hw_internal_rev))
114 dc_version = DCE_VERSION_6_0;
115 else if (ASIC_REV_IS_OLAND_M(asic_id.hw_internal_rev))
116 dc_version = DCE_VERSION_6_4;
117 else
118 dc_version = DCE_VERSION_6_1;
119 break;
120 #endif
121 case FAMILY_CI:
122 dc_version = DCE_VERSION_8_0;
123 break;
124 case FAMILY_KV:
125 if (ASIC_REV_IS_KALINDI(asic_id.hw_internal_rev) ||
126 ASIC_REV_IS_BHAVANI(asic_id.hw_internal_rev) ||
127 ASIC_REV_IS_GODAVARI(asic_id.hw_internal_rev))
128 dc_version = DCE_VERSION_8_3;
129 else
130 dc_version = DCE_VERSION_8_1;
131 break;
132 case FAMILY_CZ:
133 dc_version = DCE_VERSION_11_0;
134 break;
135
136 case FAMILY_VI:
137 if (ASIC_REV_IS_TONGA_P(asic_id.hw_internal_rev) ||
138 ASIC_REV_IS_FIJI_P(asic_id.hw_internal_rev)) {
139 dc_version = DCE_VERSION_10_0;
140 break;
141 }
142 if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev) ||
143 ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
144 ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
145 dc_version = DCE_VERSION_11_2;
146 }
147 if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev))
148 dc_version = DCE_VERSION_11_22;
149 break;
150 case FAMILY_AI:
151 if (ASICREV_IS_VEGA20_P(asic_id.hw_internal_rev))
152 dc_version = DCE_VERSION_12_1;
153 else
154 dc_version = DCE_VERSION_12_0;
155 break;
156 case FAMILY_RV:
157 dc_version = DCN_VERSION_1_0;
158 if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev))
159 dc_version = DCN_VERSION_1_01;
160 if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev))
161 dc_version = DCN_VERSION_2_1;
162 if (ASICREV_IS_GREEN_SARDINE(asic_id.hw_internal_rev))
163 dc_version = DCN_VERSION_2_1;
164 break;
165
166 case FAMILY_NV:
167 dc_version = DCN_VERSION_2_0;
168 if (asic_id.chip_id == DEVICE_ID_NV_13FE || asic_id.chip_id == DEVICE_ID_NV_143F) {
169 dc_version = DCN_VERSION_2_01;
170 break;
171 }
172 if (ASICREV_IS_SIENNA_CICHLID_P(asic_id.hw_internal_rev))
173 dc_version = DCN_VERSION_3_0;
174 if (ASICREV_IS_DIMGREY_CAVEFISH_P(asic_id.hw_internal_rev))
175 dc_version = DCN_VERSION_3_02;
176 if (ASICREV_IS_BEIGE_GOBY_P(asic_id.hw_internal_rev))
177 dc_version = DCN_VERSION_3_03;
178 break;
179
180 case FAMILY_VGH:
181 dc_version = DCN_VERSION_3_01;
182 break;
183
184 case FAMILY_YELLOW_CARP:
185 if (ASICREV_IS_YELLOW_CARP(asic_id.hw_internal_rev))
186 dc_version = DCN_VERSION_3_1;
187 break;
188 case AMDGPU_FAMILY_GC_10_3_6:
189 if (ASICREV_IS_GC_10_3_6(asic_id.hw_internal_rev))
190 dc_version = DCN_VERSION_3_15;
191 break;
192 case AMDGPU_FAMILY_GC_10_3_7:
193 if (ASICREV_IS_GC_10_3_7(asic_id.hw_internal_rev))
194 dc_version = DCN_VERSION_3_16;
195 break;
196 case AMDGPU_FAMILY_GC_11_0_0:
197 dc_version = DCN_VERSION_3_2;
198 if (ASICREV_IS_GC_11_0_2(asic_id.hw_internal_rev))
199 dc_version = DCN_VERSION_3_21;
200 break;
201 case AMDGPU_FAMILY_GC_11_0_1:
202 dc_version = DCN_VERSION_3_14;
203 break;
204 case AMDGPU_FAMILY_GC_11_5_0:
205 dc_version = DCN_VERSION_3_5;
206 if (ASICREV_IS_GC_11_0_4(asic_id.hw_internal_rev))
207 dc_version = DCN_VERSION_3_51;
208 if (ASICREV_IS_DCN36(asic_id.hw_internal_rev))
209 dc_version = DCN_VERSION_3_6;
210 break;
211 case AMDGPU_FAMILY_GC_12_0_0:
212 if (ASICREV_IS_GC_12_0_1_A0(asic_id.hw_internal_rev) ||
213 ASICREV_IS_GC_12_0_0_A0(asic_id.hw_internal_rev))
214 dc_version = DCN_VERSION_4_01;
215 break;
216 default:
217 dc_version = DCE_VERSION_UNKNOWN;
218 break;
219 }
220 return dc_version;
221 }
222
dc_create_resource_pool(struct dc * dc,const struct dc_init_data * init_data,enum dce_version dc_version)223 struct resource_pool *dc_create_resource_pool(struct dc *dc,
224 const struct dc_init_data *init_data,
225 enum dce_version dc_version)
226 {
227 struct resource_pool *res_pool = NULL;
228
229 switch (dc_version) {
230 #if defined(CONFIG_DRM_AMD_DC_SI)
231 case DCE_VERSION_6_0:
232 res_pool = dce60_create_resource_pool(
233 init_data->num_virtual_links, dc);
234 break;
235 case DCE_VERSION_6_1:
236 res_pool = dce61_create_resource_pool(
237 init_data->num_virtual_links, dc);
238 break;
239 case DCE_VERSION_6_4:
240 res_pool = dce64_create_resource_pool(
241 init_data->num_virtual_links, dc);
242 break;
243 #endif
244 case DCE_VERSION_8_0:
245 res_pool = dce80_create_resource_pool(
246 init_data->num_virtual_links, dc);
247 break;
248 case DCE_VERSION_8_1:
249 res_pool = dce81_create_resource_pool(
250 init_data->num_virtual_links, dc);
251 break;
252 case DCE_VERSION_8_3:
253 res_pool = dce83_create_resource_pool(
254 init_data->num_virtual_links, dc);
255 break;
256 case DCE_VERSION_10_0:
257 res_pool = dce100_create_resource_pool(
258 init_data->num_virtual_links, dc);
259 break;
260 case DCE_VERSION_11_0:
261 res_pool = dce110_create_resource_pool(
262 init_data->num_virtual_links, dc,
263 init_data->asic_id);
264 break;
265 case DCE_VERSION_11_2:
266 case DCE_VERSION_11_22:
267 res_pool = dce112_create_resource_pool(
268 init_data->num_virtual_links, dc);
269 break;
270 case DCE_VERSION_12_0:
271 case DCE_VERSION_12_1:
272 res_pool = dce120_create_resource_pool(
273 init_data->num_virtual_links, dc);
274 break;
275
276 #if defined(CONFIG_DRM_AMD_DC_FP)
277 case DCN_VERSION_1_0:
278 case DCN_VERSION_1_01:
279 res_pool = dcn10_create_resource_pool(init_data, dc);
280 break;
281 case DCN_VERSION_2_0:
282 res_pool = dcn20_create_resource_pool(init_data, dc);
283 break;
284 case DCN_VERSION_2_1:
285 res_pool = dcn21_create_resource_pool(init_data, dc);
286 break;
287 case DCN_VERSION_2_01:
288 res_pool = dcn201_create_resource_pool(init_data, dc);
289 break;
290 case DCN_VERSION_3_0:
291 res_pool = dcn30_create_resource_pool(init_data, dc);
292 break;
293 case DCN_VERSION_3_01:
294 res_pool = dcn301_create_resource_pool(init_data, dc);
295 break;
296 case DCN_VERSION_3_02:
297 res_pool = dcn302_create_resource_pool(init_data, dc);
298 break;
299 case DCN_VERSION_3_03:
300 res_pool = dcn303_create_resource_pool(init_data, dc);
301 break;
302 case DCN_VERSION_3_1:
303 res_pool = dcn31_create_resource_pool(init_data, dc);
304 break;
305 case DCN_VERSION_3_14:
306 res_pool = dcn314_create_resource_pool(init_data, dc);
307 break;
308 case DCN_VERSION_3_15:
309 res_pool = dcn315_create_resource_pool(init_data, dc);
310 break;
311 case DCN_VERSION_3_16:
312 res_pool = dcn316_create_resource_pool(init_data, dc);
313 break;
314 case DCN_VERSION_3_2:
315 res_pool = dcn32_create_resource_pool(init_data, dc);
316 break;
317 case DCN_VERSION_3_21:
318 res_pool = dcn321_create_resource_pool(init_data, dc);
319 break;
320 case DCN_VERSION_3_5:
321 res_pool = dcn35_create_resource_pool(init_data, dc);
322 break;
323 case DCN_VERSION_3_51:
324 res_pool = dcn351_create_resource_pool(init_data, dc);
325 break;
326 case DCN_VERSION_3_6:
327 res_pool = dcn36_create_resource_pool(init_data, dc);
328 break;
329 case DCN_VERSION_4_01:
330 res_pool = dcn401_create_resource_pool(init_data, dc);
331 break;
332 #endif /* CONFIG_DRM_AMD_DC_FP */
333 default:
334 break;
335 }
336
337 if (res_pool != NULL) {
338 if (dc->ctx->dc_bios->fw_info_valid) {
339 res_pool->ref_clocks.xtalin_clock_inKhz =
340 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
341 /* initialize with firmware data first, no all
342 * ASIC have DCCG SW component. FPGA or
343 * simulation need initialization of
344 * dccg_ref_clock_inKhz, dchub_ref_clock_inKhz
345 * with xtalin_clock_inKhz
346 */
347 res_pool->ref_clocks.dccg_ref_clock_inKhz =
348 res_pool->ref_clocks.xtalin_clock_inKhz;
349 res_pool->ref_clocks.dchub_ref_clock_inKhz =
350 res_pool->ref_clocks.xtalin_clock_inKhz;
351 } else
352 ASSERT_CRITICAL(false);
353 }
354
355 return res_pool;
356 }
357
dc_destroy_resource_pool(struct dc * dc)358 void dc_destroy_resource_pool(struct dc *dc)
359 {
360 if (dc) {
361 if (dc->res_pool)
362 dc->res_pool->funcs->destroy(&dc->res_pool);
363
364 kfree(dc->hwseq);
365 }
366 }
367
update_num_audio(const struct resource_straps * straps,unsigned int * num_audio,struct audio_support * aud_support)368 static void update_num_audio(
369 const struct resource_straps *straps,
370 unsigned int *num_audio,
371 struct audio_support *aud_support)
372 {
373 aud_support->dp_audio = true;
374 aud_support->hdmi_audio_native = false;
375 aud_support->hdmi_audio_on_dongle = false;
376
377 if (straps->hdmi_disable == 0) {
378 if (straps->dc_pinstraps_audio & 0x2) {
379 aud_support->hdmi_audio_on_dongle = true;
380 aud_support->hdmi_audio_native = true;
381 }
382 }
383
384 switch (straps->audio_stream_number) {
385 case 0: /* multi streams supported */
386 break;
387 case 1: /* multi streams not supported */
388 *num_audio = 1;
389 break;
390 default:
391 DC_ERR("DC: unexpected audio fuse!\n");
392 }
393 }
394
resource_construct(unsigned int num_virtual_links,struct dc * dc,struct resource_pool * pool,const struct resource_create_funcs * create_funcs)395 bool resource_construct(
396 unsigned int num_virtual_links,
397 struct dc *dc,
398 struct resource_pool *pool,
399 const struct resource_create_funcs *create_funcs)
400 {
401 struct dc_context *ctx = dc->ctx;
402 const struct resource_caps *caps = pool->res_cap;
403 int i;
404 unsigned int num_audio = caps->num_audio;
405 struct resource_straps straps = {0};
406
407 if (create_funcs->read_dce_straps)
408 create_funcs->read_dce_straps(dc->ctx, &straps);
409
410 pool->audio_count = 0;
411 if (create_funcs->create_audio) {
412 /* find the total number of streams available via the
413 * AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
414 * registers (one for each pin) starting from pin 1
415 * up to the max number of audio pins.
416 * We stop on the first pin where
417 * PORT_CONNECTIVITY == 1 (as instructed by HW team).
418 */
419 update_num_audio(&straps, &num_audio, &pool->audio_support);
420 for (i = 0; i < caps->num_audio; i++) {
421 struct audio *aud = create_funcs->create_audio(ctx, i);
422
423 if (aud == NULL) {
424 DC_ERR("DC: failed to create audio!\n");
425 return false;
426 }
427 if (!aud->funcs->endpoint_valid(aud)) {
428 aud->funcs->destroy(&aud);
429 break;
430 }
431 pool->audios[i] = aud;
432 pool->audio_count++;
433 }
434 }
435
436 pool->stream_enc_count = 0;
437 if (create_funcs->create_stream_encoder) {
438 for (i = 0; i < caps->num_stream_encoder; i++) {
439 pool->stream_enc[i] = create_funcs->create_stream_encoder(i, ctx);
440 if (pool->stream_enc[i] == NULL)
441 DC_ERR("DC: failed to create stream_encoder!\n");
442 pool->stream_enc_count++;
443 }
444 }
445
446 pool->hpo_dp_stream_enc_count = 0;
447 if (create_funcs->create_hpo_dp_stream_encoder) {
448 for (i = 0; i < caps->num_hpo_dp_stream_encoder; i++) {
449 pool->hpo_dp_stream_enc[i] = create_funcs->create_hpo_dp_stream_encoder(i+ENGINE_ID_HPO_DP_0, ctx);
450 if (pool->hpo_dp_stream_enc[i] == NULL)
451 DC_ERR("DC: failed to create HPO DP stream encoder!\n");
452 pool->hpo_dp_stream_enc_count++;
453
454 }
455 }
456
457 pool->hpo_dp_link_enc_count = 0;
458 if (create_funcs->create_hpo_dp_link_encoder) {
459 for (i = 0; i < caps->num_hpo_dp_link_encoder; i++) {
460 pool->hpo_dp_link_enc[i] = create_funcs->create_hpo_dp_link_encoder(i, ctx);
461 if (pool->hpo_dp_link_enc[i] == NULL)
462 DC_ERR("DC: failed to create HPO DP link encoder!\n");
463 pool->hpo_dp_link_enc_count++;
464 }
465 }
466
467 for (i = 0; i < caps->num_mpc_3dlut; i++) {
468 pool->mpc_lut[i] = dc_create_3dlut_func();
469 if (pool->mpc_lut[i] == NULL)
470 DC_ERR("DC: failed to create MPC 3dlut!\n");
471 pool->mpc_shaper[i] = dc_create_transfer_func();
472 if (pool->mpc_shaper[i] == NULL)
473 DC_ERR("DC: failed to create MPC shaper!\n");
474 }
475
476 dc->caps.dynamic_audio = false;
477 if (pool->audio_count < pool->stream_enc_count) {
478 dc->caps.dynamic_audio = true;
479 }
480 for (i = 0; i < num_virtual_links; i++) {
481 pool->stream_enc[pool->stream_enc_count] =
482 virtual_stream_encoder_create(
483 ctx, ctx->dc_bios);
484 if (pool->stream_enc[pool->stream_enc_count] == NULL) {
485 DC_ERR("DC: failed to create stream_encoder!\n");
486 return false;
487 }
488 pool->stream_enc_count++;
489 }
490
491 dc->hwseq = create_funcs->create_hwseq(ctx);
492
493 return true;
494 }
find_matching_clock_source(const struct resource_pool * pool,struct clock_source * clock_source)495 static int find_matching_clock_source(
496 const struct resource_pool *pool,
497 struct clock_source *clock_source)
498 {
499
500 int i;
501
502 for (i = 0; i < pool->clk_src_count; i++) {
503 if (pool->clock_sources[i] == clock_source)
504 return i;
505 }
506 return -1;
507 }
508
resource_unreference_clock_source(struct resource_context * res_ctx,const struct resource_pool * pool,struct clock_source * clock_source)509 void resource_unreference_clock_source(
510 struct resource_context *res_ctx,
511 const struct resource_pool *pool,
512 struct clock_source *clock_source)
513 {
514 int i = find_matching_clock_source(pool, clock_source);
515
516 if (i > -1)
517 res_ctx->clock_source_ref_count[i]--;
518
519 if (pool->dp_clock_source == clock_source)
520 res_ctx->dp_clock_source_ref_count--;
521 }
522
resource_reference_clock_source(struct resource_context * res_ctx,const struct resource_pool * pool,struct clock_source * clock_source)523 void resource_reference_clock_source(
524 struct resource_context *res_ctx,
525 const struct resource_pool *pool,
526 struct clock_source *clock_source)
527 {
528 int i = find_matching_clock_source(pool, clock_source);
529
530 if (i > -1)
531 res_ctx->clock_source_ref_count[i]++;
532
533 if (pool->dp_clock_source == clock_source)
534 res_ctx->dp_clock_source_ref_count++;
535 }
536
resource_get_clock_source_reference(struct resource_context * res_ctx,const struct resource_pool * pool,struct clock_source * clock_source)537 int resource_get_clock_source_reference(
538 struct resource_context *res_ctx,
539 const struct resource_pool *pool,
540 struct clock_source *clock_source)
541 {
542 int i = find_matching_clock_source(pool, clock_source);
543
544 if (i > -1)
545 return res_ctx->clock_source_ref_count[i];
546
547 if (pool->dp_clock_source == clock_source)
548 return res_ctx->dp_clock_source_ref_count;
549
550 return -1;
551 }
552
resource_are_vblanks_synchronizable(struct dc_stream_state * stream1,struct dc_stream_state * stream2)553 bool resource_are_vblanks_synchronizable(
554 struct dc_stream_state *stream1,
555 struct dc_stream_state *stream2)
556 {
557 uint32_t base60_refresh_rates[] = {10, 20, 5};
558 uint8_t i;
559 uint8_t rr_count = ARRAY_SIZE(base60_refresh_rates);
560 uint64_t frame_time_diff;
561
562 if (stream1->ctx->dc->config.vblank_alignment_dto_params &&
563 stream1->ctx->dc->config.vblank_alignment_max_frame_time_diff > 0 &&
564 dc_is_dp_signal(stream1->signal) &&
565 dc_is_dp_signal(stream2->signal) &&
566 false == stream1->has_non_synchronizable_pclk &&
567 false == stream2->has_non_synchronizable_pclk &&
568 stream1->timing.flags.VBLANK_SYNCHRONIZABLE &&
569 stream2->timing.flags.VBLANK_SYNCHRONIZABLE) {
570 /* disable refresh rates higher than 60Hz for now */
571 if (stream1->timing.pix_clk_100hz*100/stream1->timing.h_total/
572 stream1->timing.v_total > 60)
573 return false;
574 if (stream2->timing.pix_clk_100hz*100/stream2->timing.h_total/
575 stream2->timing.v_total > 60)
576 return false;
577 frame_time_diff = (uint64_t)10000 *
578 stream1->timing.h_total *
579 stream1->timing.v_total *
580 stream2->timing.pix_clk_100hz;
581 frame_time_diff = div_u64(frame_time_diff, stream1->timing.pix_clk_100hz);
582 frame_time_diff = div_u64(frame_time_diff, stream2->timing.h_total);
583 frame_time_diff = div_u64(frame_time_diff, stream2->timing.v_total);
584 for (i = 0; i < rr_count; i++) {
585 int64_t diff = (int64_t)div_u64(frame_time_diff * base60_refresh_rates[i], 10) - 10000;
586
587 if (diff < 0)
588 diff = -diff;
589 if (diff < stream1->ctx->dc->config.vblank_alignment_max_frame_time_diff)
590 return true;
591 }
592 }
593 return false;
594 }
595
resource_are_streams_timing_synchronizable(struct dc_stream_state * stream1,struct dc_stream_state * stream2)596 bool resource_are_streams_timing_synchronizable(
597 struct dc_stream_state *stream1,
598 struct dc_stream_state *stream2)
599 {
600 if (stream1->timing.h_total != stream2->timing.h_total)
601 return false;
602
603 if (stream1->timing.v_total != stream2->timing.v_total)
604 return false;
605
606 if (stream1->timing.h_addressable
607 != stream2->timing.h_addressable)
608 return false;
609
610 if (stream1->timing.v_addressable
611 != stream2->timing.v_addressable)
612 return false;
613
614 if (stream1->timing.v_front_porch
615 != stream2->timing.v_front_porch)
616 return false;
617
618 if (stream1->timing.pix_clk_100hz
619 != stream2->timing.pix_clk_100hz)
620 return false;
621
622 if (stream1->clamping.c_depth != stream2->clamping.c_depth)
623 return false;
624
625 if (stream1->phy_pix_clk != stream2->phy_pix_clk
626 && (!dc_is_dp_signal(stream1->signal)
627 || !dc_is_dp_signal(stream2->signal)))
628 return false;
629
630 if (stream1->view_format != stream2->view_format)
631 return false;
632
633 if (stream1->ignore_msa_timing_param || stream2->ignore_msa_timing_param)
634 return false;
635
636 return true;
637 }
is_dp_and_hdmi_sharable(struct dc_stream_state * stream1,struct dc_stream_state * stream2)638 static bool is_dp_and_hdmi_sharable(
639 struct dc_stream_state *stream1,
640 struct dc_stream_state *stream2)
641 {
642 if (stream1->ctx->dc->caps.disable_dp_clk_share)
643 return false;
644
645 if (stream1->clamping.c_depth != COLOR_DEPTH_888 ||
646 stream2->clamping.c_depth != COLOR_DEPTH_888)
647 return false;
648
649 return true;
650
651 }
652
is_sharable_clk_src(const struct pipe_ctx * pipe_with_clk_src,const struct pipe_ctx * pipe)653 static bool is_sharable_clk_src(
654 const struct pipe_ctx *pipe_with_clk_src,
655 const struct pipe_ctx *pipe)
656 {
657 if (pipe_with_clk_src->clock_source == NULL)
658 return false;
659
660 if (pipe_with_clk_src->stream->signal == SIGNAL_TYPE_VIRTUAL)
661 return false;
662
663 if (dc_is_dp_signal(pipe_with_clk_src->stream->signal) ||
664 (dc_is_dp_signal(pipe->stream->signal) &&
665 !is_dp_and_hdmi_sharable(pipe_with_clk_src->stream,
666 pipe->stream)))
667 return false;
668
669 if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal)
670 && dc_is_dual_link_signal(pipe->stream->signal))
671 return false;
672
673 if (dc_is_hdmi_signal(pipe->stream->signal)
674 && dc_is_dual_link_signal(pipe_with_clk_src->stream->signal))
675 return false;
676
677 if (!resource_are_streams_timing_synchronizable(
678 pipe_with_clk_src->stream, pipe->stream))
679 return false;
680
681 return true;
682 }
683
resource_find_used_clk_src_for_sharing(struct resource_context * res_ctx,struct pipe_ctx * pipe_ctx)684 struct clock_source *resource_find_used_clk_src_for_sharing(
685 struct resource_context *res_ctx,
686 struct pipe_ctx *pipe_ctx)
687 {
688 int i;
689
690 for (i = 0; i < MAX_PIPES; i++) {
691 if (is_sharable_clk_src(&res_ctx->pipe_ctx[i], pipe_ctx))
692 return res_ctx->pipe_ctx[i].clock_source;
693 }
694
695 return NULL;
696 }
697
convert_pixel_format_to_dalsurface(enum surface_pixel_format surface_pixel_format)698 static enum pixel_format convert_pixel_format_to_dalsurface(
699 enum surface_pixel_format surface_pixel_format)
700 {
701 enum pixel_format dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
702
703 switch (surface_pixel_format) {
704 case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
705 dal_pixel_format = PIXEL_FORMAT_INDEX8;
706 break;
707 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
708 dal_pixel_format = PIXEL_FORMAT_RGB565;
709 break;
710 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
711 dal_pixel_format = PIXEL_FORMAT_RGB565;
712 break;
713 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
714 dal_pixel_format = PIXEL_FORMAT_ARGB8888;
715 break;
716 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
717 dal_pixel_format = PIXEL_FORMAT_ARGB8888;
718 break;
719 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
720 dal_pixel_format = PIXEL_FORMAT_ARGB2101010;
721 break;
722 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
723 dal_pixel_format = PIXEL_FORMAT_ARGB2101010;
724 break;
725 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
726 dal_pixel_format = PIXEL_FORMAT_ARGB2101010_XRBIAS;
727 break;
728 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
729 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
730 dal_pixel_format = PIXEL_FORMAT_FP16;
731 break;
732 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
733 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
734 dal_pixel_format = PIXEL_FORMAT_420BPP8;
735 break;
736 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
737 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
738 dal_pixel_format = PIXEL_FORMAT_420BPP10;
739 break;
740 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
741 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
742 default:
743 dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
744 break;
745 }
746 return dal_pixel_format;
747 }
748
get_vp_scan_direction(enum dc_rotation_angle rotation,bool horizontal_mirror,bool * orthogonal_rotation,bool * flip_vert_scan_dir,bool * flip_horz_scan_dir)749 static inline void get_vp_scan_direction(
750 enum dc_rotation_angle rotation,
751 bool horizontal_mirror,
752 bool *orthogonal_rotation,
753 bool *flip_vert_scan_dir,
754 bool *flip_horz_scan_dir)
755 {
756 *orthogonal_rotation = false;
757 *flip_vert_scan_dir = false;
758 *flip_horz_scan_dir = false;
759 if (rotation == ROTATION_ANGLE_180) {
760 *flip_vert_scan_dir = true;
761 *flip_horz_scan_dir = true;
762 } else if (rotation == ROTATION_ANGLE_90) {
763 *orthogonal_rotation = true;
764 *flip_horz_scan_dir = true;
765 } else if (rotation == ROTATION_ANGLE_270) {
766 *orthogonal_rotation = true;
767 *flip_vert_scan_dir = true;
768 }
769
770 if (horizontal_mirror)
771 *flip_horz_scan_dir = !*flip_horz_scan_dir;
772 }
773
intersect_rec(const struct rect * r0,const struct rect * r1)774 static struct rect intersect_rec(const struct rect *r0, const struct rect *r1)
775 {
776 struct rect rec;
777 int r0_x_end = r0->x + r0->width;
778 int r1_x_end = r1->x + r1->width;
779 int r0_y_end = r0->y + r0->height;
780 int r1_y_end = r1->y + r1->height;
781
782 rec.x = r0->x > r1->x ? r0->x : r1->x;
783 rec.width = r0_x_end > r1_x_end ? r1_x_end - rec.x : r0_x_end - rec.x;
784 rec.y = r0->y > r1->y ? r0->y : r1->y;
785 rec.height = r0_y_end > r1_y_end ? r1_y_end - rec.y : r0_y_end - rec.y;
786
787 /* in case that there is no intersection */
788 if (rec.width < 0 || rec.height < 0)
789 memset(&rec, 0, sizeof(rec));
790
791 return rec;
792 }
793
shift_rec(const struct rect * rec_in,int x,int y)794 static struct rect shift_rec(const struct rect *rec_in, int x, int y)
795 {
796 struct rect rec_out = *rec_in;
797
798 rec_out.x += x;
799 rec_out.y += y;
800
801 return rec_out;
802 }
803
calculate_plane_rec_in_timing_active(struct pipe_ctx * pipe_ctx,const struct rect * rec_in)804 static struct rect calculate_plane_rec_in_timing_active(
805 struct pipe_ctx *pipe_ctx,
806 const struct rect *rec_in)
807 {
808 /*
809 * The following diagram shows an example where we map a 1920x1200
810 * desktop to a 2560x1440 timing with a plane rect in the middle
811 * of the screen. To map a plane rect from Stream Source to Timing
812 * Active space, we first multiply stream scaling ratios (i.e 2304/1920
813 * horizontal and 1440/1200 vertical) to the plane's x and y, then
814 * we add stream destination offsets (i.e 128 horizontal, 0 vertical).
815 * This will give us a plane rect's position in Timing Active. However
816 * we have to remove the fractional. The rule is that we find left/right
817 * and top/bottom positions and round the value to the adjacent integer.
818 *
819 * Stream Source Space
820 * ------------
821 * __________________________________________________
822 * |Stream Source (1920 x 1200) ^ |
823 * | y |
824 * | <------- w --------|> |
825 * | __________________V |
826 * |<-- x -->|Plane//////////////| ^ |
827 * | |(pre scale)////////| | |
828 * | |///////////////////| | |
829 * | |///////////////////| h |
830 * | |///////////////////| | |
831 * | |///////////////////| | |
832 * | |///////////////////| V |
833 * | |
834 * | |
835 * |__________________________________________________|
836 *
837 *
838 * Timing Active Space
839 * ---------------------------------
840 *
841 * Timing Active (2560 x 1440)
842 * __________________________________________________
843 * |*****| Stteam Destination (2304 x 1440) |*****|
844 * |*****| |*****|
845 * |<128>| |*****|
846 * |*****| __________________ |*****|
847 * |*****| |Plane/////////////| |*****|
848 * |*****| |(post scale)//////| |*****|
849 * |*****| |//////////////////| |*****|
850 * |*****| |//////////////////| |*****|
851 * |*****| |//////////////////| |*****|
852 * |*****| |//////////////////| |*****|
853 * |*****| |*****|
854 * |*****| |*****|
855 * |*****| |*****|
856 * |*****|______________________________________|*****|
857 *
858 * So the resulting formulas are shown below:
859 *
860 * recout_x = 128 + round(plane_x * 2304 / 1920)
861 * recout_w = 128 + round((plane_x + plane_w) * 2304 / 1920) - recout_x
862 * recout_y = 0 + round(plane_y * 1440 / 1280)
863 * recout_h = 0 + round((plane_y + plane_h) * 1440 / 1200) - recout_y
864 *
865 * NOTE: fixed point division is not error free. To reduce errors
866 * introduced by fixed point division, we divide only after
867 * multiplication is complete.
868 */
869 const struct dc_stream_state *stream = pipe_ctx->stream;
870 struct rect rec_out = {0};
871 struct fixed31_32 temp;
872
873 temp = dc_fixpt_from_fraction(rec_in->x * (long long)stream->dst.width,
874 stream->src.width);
875 rec_out.x = stream->dst.x + dc_fixpt_round(temp);
876
877 temp = dc_fixpt_from_fraction(
878 (rec_in->x + rec_in->width) * (long long)stream->dst.width,
879 stream->src.width);
880 rec_out.width = stream->dst.x + dc_fixpt_round(temp) - rec_out.x;
881
882 temp = dc_fixpt_from_fraction(rec_in->y * (long long)stream->dst.height,
883 stream->src.height);
884 rec_out.y = stream->dst.y + dc_fixpt_round(temp);
885
886 temp = dc_fixpt_from_fraction(
887 (rec_in->y + rec_in->height) * (long long)stream->dst.height,
888 stream->src.height);
889 rec_out.height = stream->dst.y + dc_fixpt_round(temp) - rec_out.y;
890
891 return rec_out;
892 }
893
calculate_mpc_slice_in_timing_active(struct pipe_ctx * pipe_ctx,struct rect * plane_clip_rec)894 static struct rect calculate_mpc_slice_in_timing_active(
895 struct pipe_ctx *pipe_ctx,
896 struct rect *plane_clip_rec)
897 {
898 const struct dc_stream_state *stream = pipe_ctx->stream;
899 int mpc_slice_count = resource_get_mpc_slice_count(pipe_ctx);
900 int mpc_slice_idx = resource_get_mpc_slice_index(pipe_ctx);
901 int epimo = mpc_slice_count - plane_clip_rec->width % mpc_slice_count - 1;
902 struct rect mpc_rec;
903
904 mpc_rec.width = plane_clip_rec->width / mpc_slice_count;
905 mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx;
906 mpc_rec.height = plane_clip_rec->height;
907 mpc_rec.y = plane_clip_rec->y;
908 ASSERT(mpc_slice_count == 1 ||
909 stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE ||
910 mpc_rec.width % 2 == 0);
911
912 if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
913 mpc_rec.x -= (mpc_rec.width * mpc_slice_idx);
914
915 /* extra pixels in the division remainder need to go to pipes after
916 * the extra pixel index minus one(epimo) defined here as:
917 */
918 if (mpc_slice_idx > epimo) {
919 mpc_rec.x += mpc_slice_idx - epimo - 1;
920 mpc_rec.width += 1;
921 }
922
923 if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
924 ASSERT(mpc_rec.height % 2 == 0);
925 mpc_rec.height /= 2;
926 }
927 return mpc_rec;
928 }
929
calculate_adjust_recout_for_visual_confirm(struct pipe_ctx * pipe_ctx,int * base_offset,int * dpp_offset)930 static void calculate_adjust_recout_for_visual_confirm(struct pipe_ctx *pipe_ctx,
931 int *base_offset, int *dpp_offset)
932 {
933 struct dc *dc = pipe_ctx->stream->ctx->dc;
934 *base_offset = 0;
935 *dpp_offset = 0;
936
937 if (dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE || !pipe_ctx->plane_res.dpp)
938 return;
939
940 *dpp_offset = pipe_ctx->stream->timing.v_addressable / VISUAL_CONFIRM_DPP_OFFSET_DENO;
941 *dpp_offset *= pipe_ctx->plane_res.dpp->inst;
942
943 if ((dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_BASE_MIN) &&
944 dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_BASE_MAX)
945 *base_offset = dc->debug.visual_confirm_rect_height;
946 else
947 *base_offset = VISUAL_CONFIRM_BASE_DEFAULT;
948 }
949
reverse_adjust_recout_for_visual_confirm(struct rect * recout,struct pipe_ctx * pipe_ctx)950 static void reverse_adjust_recout_for_visual_confirm(struct rect *recout,
951 struct pipe_ctx *pipe_ctx)
952 {
953 int dpp_offset, base_offset;
954
955 calculate_adjust_recout_for_visual_confirm(pipe_ctx, &base_offset,
956 &dpp_offset);
957 recout->height += base_offset;
958 recout->height += dpp_offset;
959 }
960
adjust_recout_for_visual_confirm(struct rect * recout,struct pipe_ctx * pipe_ctx)961 static void adjust_recout_for_visual_confirm(struct rect *recout,
962 struct pipe_ctx *pipe_ctx)
963 {
964 int dpp_offset, base_offset;
965
966 calculate_adjust_recout_for_visual_confirm(pipe_ctx, &base_offset,
967 &dpp_offset);
968 recout->height -= base_offset;
969 recout->height -= dpp_offset;
970 }
971
972 /*
973 * The function maps a plane clip from Stream Source Space to ODM Slice Space
974 * and calculates the rec of the overlapping area of MPC slice of the plane
975 * clip, ODM slice associated with the pipe context and stream destination rec.
976 */
calculate_recout(struct pipe_ctx * pipe_ctx)977 static void calculate_recout(struct pipe_ctx *pipe_ctx)
978 {
979 /*
980 * A plane clip represents the desired plane size and position in Stream
981 * Source Space. Stream Source is the destination where all planes are
982 * blended (i.e. positioned, scaled and overlaid). It is a canvas where
983 * all planes associated with the current stream are drawn together.
984 * After Stream Source is completed, we will further scale and
985 * reposition the entire canvas of the stream source to Stream
986 * Destination in Timing Active Space. This could be due to display
987 * overscan adjustment where we will need to rescale and reposition all
988 * the planes so they can fit into a TV with overscan or downscale
989 * upscale features such as GPU scaling or VSR.
990 *
991 * This two step blending is a virtual procedure in software. In
992 * hardware there is no such thing as Stream Source. all planes are
993 * blended once in Timing Active Space. Software virtualizes a Stream
994 * Source space to decouple the math complicity so scaling param
995 * calculation focuses on one step at a time.
996 *
997 * In the following two diagrams, user applied 10% overscan adjustment
998 * so the Stream Source needs to be scaled down a little before mapping
999 * to Timing Active Space. As a result the Plane Clip is also scaled
1000 * down by the same ratio, Plane Clip position (i.e. x and y) with
1001 * respect to Stream Source is also scaled down. To map it in Timing
1002 * Active Space additional x and y offsets from Stream Destination are
1003 * added to Plane Clip as well.
1004 *
1005 * Stream Source Space
1006 * ------------
1007 * __________________________________________________
1008 * |Stream Source (3840 x 2160) ^ |
1009 * | y |
1010 * | | |
1011 * | __________________V |
1012 * |<-- x -->|Plane Clip/////////| |
1013 * | |(pre scale)////////| |
1014 * | |///////////////////| |
1015 * | |///////////////////| |
1016 * | |///////////////////| |
1017 * | |///////////////////| |
1018 * | |///////////////////| |
1019 * | |
1020 * | |
1021 * |__________________________________________________|
1022 *
1023 *
1024 * Timing Active Space (3840 x 2160)
1025 * ---------------------------------
1026 *
1027 * Timing Active
1028 * __________________________________________________
1029 * | y_____________________________________________ |
1030 * |x |Stream Destination (3456 x 1944) | |
1031 * | | | |
1032 * | | __________________ | |
1033 * | | |Plane Clip////////| | |
1034 * | | |(post scale)//////| | |
1035 * | | |//////////////////| | |
1036 * | | |//////////////////| | |
1037 * | | |//////////////////| | |
1038 * | | |//////////////////| | |
1039 * | | | |
1040 * | | | |
1041 * | |____________________________________________| |
1042 * |__________________________________________________|
1043 *
1044 *
1045 * In Timing Active Space a plane clip could be further sliced into
1046 * pieces called MPC slices. Each Pipe Context is responsible for
1047 * processing only one MPC slice so the plane processing workload can be
1048 * distributed to multiple DPP Pipes. MPC slices could be blended
1049 * together to a single ODM slice. Each ODM slice is responsible for
1050 * processing a portion of Timing Active divided horizontally so the
1051 * output pixel processing workload can be distributed to multiple OPP
1052 * pipes. All ODM slices are mapped together in ODM block so all MPC
1053 * slices belong to different ODM slices could be pieced together to
1054 * form a single image in Timing Active. MPC slices must belong to
1055 * single ODM slice. If an MPC slice goes across ODM slice boundary, it
1056 * needs to be divided into two MPC slices one for each ODM slice.
1057 *
1058 * In the following diagram the output pixel processing workload is
1059 * divided horizontally into two ODM slices one for each OPP blend tree.
1060 * OPP0 blend tree is responsible for processing left half of Timing
1061 * Active, while OPP2 blend tree is responsible for processing right
1062 * half.
1063 *
1064 * The plane has two MPC slices. However since the right MPC slice goes
1065 * across ODM boundary, two DPP pipes are needed one for each OPP blend
1066 * tree. (i.e. DPP1 for OPP0 blend tree and DPP2 for OPP2 blend tree).
1067 *
1068 * Assuming that we have a Pipe Context associated with OPP0 and DPP1
1069 * working on processing the plane in the diagram. We want to know the
1070 * width and height of the shaded rectangle and its relative position
1071 * with respect to the ODM slice0. This is called the recout of the pipe
1072 * context.
1073 *
1074 * Planes can be at arbitrary size and position and there could be an
1075 * arbitrary number of MPC and ODM slices. The algorithm needs to take
1076 * all scenarios into account.
1077 *
1078 * Timing Active Space (3840 x 2160)
1079 * ---------------------------------
1080 *
1081 * Timing Active
1082 * __________________________________________________
1083 * |OPP0(ODM slice0)^ |OPP2(ODM slice1) |
1084 * | y | |
1085 * | | <- w -> |
1086 * | _____V________|____ |
1087 * | |DPP0 ^ |DPP1 |DPP2| |
1088 * |<------ x |-----|->|/////| | |
1089 * | | | |/////| | |
1090 * | | h |/////| | |
1091 * | | | |/////| | |
1092 * | |_____V__|/////|____| |
1093 * | | |
1094 * | | |
1095 * | | |
1096 * |_________________________|________________________|
1097 *
1098 *
1099 */
1100 struct rect plane_clip;
1101 struct rect mpc_slice_of_plane_clip;
1102 struct rect odm_slice_src;
1103 struct rect overlapping_area;
1104
1105 plane_clip = calculate_plane_rec_in_timing_active(pipe_ctx,
1106 &pipe_ctx->plane_state->clip_rect);
1107 /* guard plane clip from drawing beyond stream dst here */
1108 plane_clip = intersect_rec(&plane_clip,
1109 &pipe_ctx->stream->dst);
1110 mpc_slice_of_plane_clip = calculate_mpc_slice_in_timing_active(
1111 pipe_ctx, &plane_clip);
1112 odm_slice_src = resource_get_odm_slice_src_rect(pipe_ctx);
1113 overlapping_area = intersect_rec(&mpc_slice_of_plane_clip, &odm_slice_src);
1114 if (overlapping_area.height > 0 &&
1115 overlapping_area.width > 0) {
1116 /* shift the overlapping area so it is with respect to current
1117 * ODM slice source's position
1118 */
1119 pipe_ctx->plane_res.scl_data.recout = shift_rec(
1120 &overlapping_area,
1121 -odm_slice_src.x, -odm_slice_src.y);
1122 adjust_recout_for_visual_confirm(
1123 &pipe_ctx->plane_res.scl_data.recout,
1124 pipe_ctx);
1125 } else {
1126 /* if there is no overlap, zero recout */
1127 memset(&pipe_ctx->plane_res.scl_data.recout, 0,
1128 sizeof(struct rect));
1129 }
1130
1131 }
1132
calculate_scaling_ratios(struct pipe_ctx * pipe_ctx)1133 static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
1134 {
1135 const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1136 const struct dc_stream_state *stream = pipe_ctx->stream;
1137 struct rect surf_src = plane_state->src_rect;
1138 const int in_w = stream->src.width;
1139 const int in_h = stream->src.height;
1140 const int out_w = stream->dst.width;
1141 const int out_h = stream->dst.height;
1142
1143 /*Swap surf_src height and width since scaling ratios are in recout rotation*/
1144 if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
1145 pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
1146 swap(surf_src.height, surf_src.width);
1147
1148 pipe_ctx->plane_res.scl_data.ratios.horz = dc_fixpt_from_fraction(
1149 surf_src.width,
1150 plane_state->dst_rect.width);
1151 pipe_ctx->plane_res.scl_data.ratios.vert = dc_fixpt_from_fraction(
1152 surf_src.height,
1153 plane_state->dst_rect.height);
1154
1155 if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
1156 pipe_ctx->plane_res.scl_data.ratios.horz.value *= 2;
1157 else if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM)
1158 pipe_ctx->plane_res.scl_data.ratios.vert.value *= 2;
1159
1160 pipe_ctx->plane_res.scl_data.ratios.vert.value = div64_s64(
1161 pipe_ctx->plane_res.scl_data.ratios.vert.value * in_h, out_h);
1162 pipe_ctx->plane_res.scl_data.ratios.horz.value = div64_s64(
1163 pipe_ctx->plane_res.scl_data.ratios.horz.value * in_w, out_w);
1164
1165 pipe_ctx->plane_res.scl_data.ratios.horz_c = pipe_ctx->plane_res.scl_data.ratios.horz;
1166 pipe_ctx->plane_res.scl_data.ratios.vert_c = pipe_ctx->plane_res.scl_data.ratios.vert;
1167
1168 if (pipe_ctx->plane_res.scl_data.format == PIXEL_FORMAT_420BPP8
1169 || pipe_ctx->plane_res.scl_data.format == PIXEL_FORMAT_420BPP10) {
1170 pipe_ctx->plane_res.scl_data.ratios.horz_c.value /= 2;
1171 pipe_ctx->plane_res.scl_data.ratios.vert_c.value /= 2;
1172 }
1173 pipe_ctx->plane_res.scl_data.ratios.horz = dc_fixpt_truncate(
1174 pipe_ctx->plane_res.scl_data.ratios.horz, 19);
1175 pipe_ctx->plane_res.scl_data.ratios.vert = dc_fixpt_truncate(
1176 pipe_ctx->plane_res.scl_data.ratios.vert, 19);
1177 pipe_ctx->plane_res.scl_data.ratios.horz_c = dc_fixpt_truncate(
1178 pipe_ctx->plane_res.scl_data.ratios.horz_c, 19);
1179 pipe_ctx->plane_res.scl_data.ratios.vert_c = dc_fixpt_truncate(
1180 pipe_ctx->plane_res.scl_data.ratios.vert_c, 19);
1181 }
1182
1183
1184 /*
1185 * We completely calculate vp offset, size and inits here based entirely on scaling
1186 * ratios and recout for pixel perfect pipe combine.
1187 */
calculate_init_and_vp(bool flip_scan_dir,int recout_offset_within_recout_full,int recout_size,int src_size,int taps,struct fixed31_32 ratio,struct fixed31_32 * init,int * vp_offset,int * vp_size)1188 static void calculate_init_and_vp(
1189 bool flip_scan_dir,
1190 int recout_offset_within_recout_full,
1191 int recout_size,
1192 int src_size,
1193 int taps,
1194 struct fixed31_32 ratio,
1195 struct fixed31_32 *init,
1196 int *vp_offset,
1197 int *vp_size)
1198 {
1199 struct fixed31_32 temp;
1200 int int_part;
1201
1202 /*
1203 * First of the taps starts sampling pixel number <init_int_part> corresponding to recout
1204 * pixel 1. Next recout pixel samples int part of <init + scaling ratio> and so on.
1205 * All following calculations are based on this logic.
1206 *
1207 * Init calculated according to formula:
1208 * init = (scaling_ratio + number_of_taps + 1) / 2
1209 * init_bot = init + scaling_ratio
1210 * to get pixel perfect combine add the fraction from calculating vp offset
1211 */
1212 temp = dc_fixpt_mul_int(ratio, recout_offset_within_recout_full);
1213 *vp_offset = dc_fixpt_floor(temp);
1214 temp.value &= 0xffffffff;
1215 *init = dc_fixpt_truncate(dc_fixpt_add(dc_fixpt_div_int(
1216 dc_fixpt_add_int(ratio, taps + 1), 2), temp), 19);
1217 /*
1218 * If viewport has non 0 offset and there are more taps than covered by init then
1219 * we should decrease the offset and increase init so we are never sampling
1220 * outside of viewport.
1221 */
1222 int_part = dc_fixpt_floor(*init);
1223 if (int_part < taps) {
1224 int_part = taps - int_part;
1225 if (int_part > *vp_offset)
1226 int_part = *vp_offset;
1227 *vp_offset -= int_part;
1228 *init = dc_fixpt_add_int(*init, int_part);
1229 }
1230 /*
1231 * If taps are sampling outside of viewport at end of recout and there are more pixels
1232 * available in the surface we should increase the viewport size, regardless set vp to
1233 * only what is used.
1234 */
1235 temp = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_size - 1));
1236 *vp_size = dc_fixpt_floor(temp);
1237 if (*vp_size + *vp_offset > src_size)
1238 *vp_size = src_size - *vp_offset;
1239
1240 /* We did all the math assuming we are scanning same direction as display does,
1241 * however mirror/rotation changes how vp scans vs how it is offset. If scan direction
1242 * is flipped we simply need to calculate offset from the other side of plane.
1243 * Note that outside of viewport all scaling hardware works in recout space.
1244 */
1245 if (flip_scan_dir)
1246 *vp_offset = src_size - *vp_offset - *vp_size;
1247 }
1248
calculate_inits_and_viewports(struct pipe_ctx * pipe_ctx)1249 static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
1250 {
1251 const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1252 struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
1253 struct rect src = plane_state->src_rect;
1254 struct rect recout_dst_in_active_timing;
1255 struct rect recout_clip_in_active_timing;
1256 struct rect recout_clip_in_recout_dst;
1257 struct rect overlap_in_active_timing;
1258 struct rect odm_slice_src = resource_get_odm_slice_src_rect(pipe_ctx);
1259 int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
1260 || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
1261 bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
1262
1263 recout_clip_in_active_timing = shift_rec(
1264 &data->recout, odm_slice_src.x, odm_slice_src.y);
1265 recout_dst_in_active_timing = calculate_plane_rec_in_timing_active(
1266 pipe_ctx, &plane_state->dst_rect);
1267 overlap_in_active_timing = intersect_rec(&recout_clip_in_active_timing,
1268 &recout_dst_in_active_timing);
1269 if (overlap_in_active_timing.width > 0 &&
1270 overlap_in_active_timing.height > 0)
1271 recout_clip_in_recout_dst = shift_rec(&overlap_in_active_timing,
1272 -recout_dst_in_active_timing.x,
1273 -recout_dst_in_active_timing.y);
1274 else
1275 memset(&recout_clip_in_recout_dst, 0, sizeof(struct rect));
1276
1277 /*
1278 * Work in recout rotation since that requires less transformations
1279 */
1280 get_vp_scan_direction(
1281 plane_state->rotation,
1282 plane_state->horizontal_mirror,
1283 &orthogonal_rotation,
1284 &flip_vert_scan_dir,
1285 &flip_horz_scan_dir);
1286
1287 if (orthogonal_rotation) {
1288 swap(src.width, src.height);
1289 swap(flip_vert_scan_dir, flip_horz_scan_dir);
1290 }
1291
1292 calculate_init_and_vp(
1293 flip_horz_scan_dir,
1294 recout_clip_in_recout_dst.x,
1295 data->recout.width,
1296 src.width,
1297 data->taps.h_taps,
1298 data->ratios.horz,
1299 &data->inits.h,
1300 &data->viewport.x,
1301 &data->viewport.width);
1302 calculate_init_and_vp(
1303 flip_horz_scan_dir,
1304 recout_clip_in_recout_dst.x,
1305 data->recout.width,
1306 src.width / vpc_div,
1307 data->taps.h_taps_c,
1308 data->ratios.horz_c,
1309 &data->inits.h_c,
1310 &data->viewport_c.x,
1311 &data->viewport_c.width);
1312 calculate_init_and_vp(
1313 flip_vert_scan_dir,
1314 recout_clip_in_recout_dst.y,
1315 data->recout.height,
1316 src.height,
1317 data->taps.v_taps,
1318 data->ratios.vert,
1319 &data->inits.v,
1320 &data->viewport.y,
1321 &data->viewport.height);
1322 calculate_init_and_vp(
1323 flip_vert_scan_dir,
1324 recout_clip_in_recout_dst.y,
1325 data->recout.height,
1326 src.height / vpc_div,
1327 data->taps.v_taps_c,
1328 data->ratios.vert_c,
1329 &data->inits.v_c,
1330 &data->viewport_c.y,
1331 &data->viewport_c.height);
1332 if (orthogonal_rotation) {
1333 swap(data->viewport.x, data->viewport.y);
1334 swap(data->viewport.width, data->viewport.height);
1335 swap(data->viewport_c.x, data->viewport_c.y);
1336 swap(data->viewport_c.width, data->viewport_c.height);
1337 }
1338 data->viewport.x += src.x;
1339 data->viewport.y += src.y;
1340 ASSERT(src.x % vpc_div == 0 && src.y % vpc_div == 0);
1341 data->viewport_c.x += src.x / vpc_div;
1342 data->viewport_c.y += src.y / vpc_div;
1343 }
1344
is_subvp_high_refresh_candidate(struct dc_stream_state * stream)1345 static bool is_subvp_high_refresh_candidate(struct dc_stream_state *stream)
1346 {
1347 uint32_t refresh_rate;
1348 struct dc *dc = stream->ctx->dc;
1349
1350 refresh_rate = (stream->timing.pix_clk_100hz * (uint64_t)100 +
1351 stream->timing.v_total * stream->timing.h_total - (uint64_t)1);
1352 refresh_rate = div_u64(refresh_rate, stream->timing.v_total);
1353 refresh_rate = div_u64(refresh_rate, stream->timing.h_total);
1354
1355 /* If there's any stream that fits the SubVP high refresh criteria,
1356 * we must return true. This is because cursor updates are asynchronous
1357 * with full updates, so we could transition into a SubVP config and
1358 * remain in HW cursor mode if there's no cursor update which will
1359 * then cause corruption.
1360 */
1361 if ((refresh_rate >= 120 && refresh_rate <= 175 &&
1362 stream->timing.v_addressable >= 1080 &&
1363 stream->timing.v_addressable <= 2160) &&
1364 (dc->current_state->stream_count > 1 ||
1365 (dc->current_state->stream_count == 1 && !stream->allow_freesync)))
1366 return true;
1367
1368 return false;
1369 }
1370
convert_dp_to_controller_test_pattern(enum dp_test_pattern test_pattern)1371 static enum controller_dp_test_pattern convert_dp_to_controller_test_pattern(
1372 enum dp_test_pattern test_pattern)
1373 {
1374 enum controller_dp_test_pattern controller_test_pattern;
1375
1376 switch (test_pattern) {
1377 case DP_TEST_PATTERN_COLOR_SQUARES:
1378 controller_test_pattern =
1379 CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
1380 break;
1381 case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
1382 controller_test_pattern =
1383 CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA;
1384 break;
1385 case DP_TEST_PATTERN_VERTICAL_BARS:
1386 controller_test_pattern =
1387 CONTROLLER_DP_TEST_PATTERN_VERTICALBARS;
1388 break;
1389 case DP_TEST_PATTERN_HORIZONTAL_BARS:
1390 controller_test_pattern =
1391 CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS;
1392 break;
1393 case DP_TEST_PATTERN_COLOR_RAMP:
1394 controller_test_pattern =
1395 CONTROLLER_DP_TEST_PATTERN_COLORRAMP;
1396 break;
1397 default:
1398 controller_test_pattern =
1399 CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
1400 break;
1401 }
1402
1403 return controller_test_pattern;
1404 }
1405
convert_dp_to_controller_color_space(enum dp_test_pattern_color_space color_space)1406 static enum controller_dp_color_space convert_dp_to_controller_color_space(
1407 enum dp_test_pattern_color_space color_space)
1408 {
1409 enum controller_dp_color_space controller_color_space;
1410
1411 switch (color_space) {
1412 case DP_TEST_PATTERN_COLOR_SPACE_RGB:
1413 controller_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
1414 break;
1415 case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601:
1416 controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR601;
1417 break;
1418 case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709:
1419 controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR709;
1420 break;
1421 case DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED:
1422 default:
1423 controller_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
1424 break;
1425 }
1426
1427 return controller_color_space;
1428 }
1429
resource_build_test_pattern_params(struct resource_context * res_ctx,struct pipe_ctx * otg_master)1430 void resource_build_test_pattern_params(struct resource_context *res_ctx,
1431 struct pipe_ctx *otg_master)
1432 {
1433 struct pipe_ctx *opp_heads[MAX_PIPES];
1434 struct test_pattern_params *params;
1435 int odm_cnt;
1436 enum controller_dp_test_pattern controller_test_pattern;
1437 enum controller_dp_color_space controller_color_space;
1438 enum dc_color_depth color_depth = otg_master->stream->timing.display_color_depth;
1439 struct rect odm_slice_src;
1440 int i;
1441
1442 controller_test_pattern = convert_dp_to_controller_test_pattern(
1443 otg_master->stream->test_pattern.type);
1444 controller_color_space = convert_dp_to_controller_color_space(
1445 otg_master->stream->test_pattern.color_space);
1446
1447 if (controller_test_pattern == CONTROLLER_DP_TEST_PATTERN_VIDEOMODE)
1448 return;
1449
1450 odm_cnt = resource_get_opp_heads_for_otg_master(otg_master, res_ctx, opp_heads);
1451
1452 for (i = 0; i < odm_cnt; i++) {
1453 odm_slice_src = resource_get_odm_slice_src_rect(opp_heads[i]);
1454 params = &opp_heads[i]->stream_res.test_pattern_params;
1455 params->test_pattern = controller_test_pattern;
1456 params->color_space = controller_color_space;
1457 params->color_depth = color_depth;
1458 params->height = odm_slice_src.height;
1459 params->offset = odm_slice_src.x;
1460 params->width = odm_slice_src.width;
1461 }
1462 }
1463
resource_build_scaling_params(struct pipe_ctx * pipe_ctx)1464 bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
1465 {
1466 const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1467 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
1468 const struct rect odm_slice_src = resource_get_odm_slice_src_rect(pipe_ctx);
1469 struct scaling_taps temp = {0};
1470 bool res = false;
1471
1472 DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
1473
1474 /* Invalid input */
1475 if (!plane_state ||
1476 !plane_state->dst_rect.width ||
1477 !plane_state->dst_rect.height ||
1478 !plane_state->src_rect.width ||
1479 !plane_state->src_rect.height) {
1480 ASSERT(0);
1481 return false;
1482 }
1483
1484 /* Timing borders are part of vactive that we are also supposed to skip in addition
1485 * to any stream dst offset. Since dm logic assumes dst is in addressable
1486 * space we need to add the left and top borders to dst offsets temporarily.
1487 * TODO: fix in DM, stream dst is supposed to be in vactive
1488 */
1489 pipe_ctx->stream->dst.x += timing->h_border_left;
1490 pipe_ctx->stream->dst.y += timing->v_border_top;
1491
1492 /* Calculate H and V active size */
1493 pipe_ctx->plane_res.scl_data.h_active = odm_slice_src.width;
1494 pipe_ctx->plane_res.scl_data.v_active = odm_slice_src.height;
1495 pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
1496 pipe_ctx->plane_state->format);
1497
1498 #if defined(CONFIG_DRM_AMD_DC_FP)
1499 if ((pipe_ctx->stream->ctx->dc->config.use_spl) && (!pipe_ctx->stream->ctx->dc->debug.disable_spl)) {
1500 struct spl_in *spl_in = &pipe_ctx->plane_res.spl_in;
1501 struct spl_out *spl_out = &pipe_ctx->plane_res.spl_out;
1502
1503 if (plane_state->ctx->dce_version > DCE_VERSION_MAX)
1504 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
1505 else
1506 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
1507
1508 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha;
1509
1510 // Convert pipe_ctx to respective input params for SPL
1511 translate_SPL_in_params_from_pipe_ctx(pipe_ctx, spl_in);
1512 /* Pass visual confirm debug information */
1513 calculate_adjust_recout_for_visual_confirm(pipe_ctx,
1514 &spl_in->debug.visual_confirm_base_offset,
1515 &spl_in->debug.visual_confirm_dpp_offset);
1516 // Set SPL output parameters to dscl_prog_data to be used for hw registers
1517 spl_out->dscl_prog_data = resource_get_dscl_prog_data(pipe_ctx);
1518 // Calculate scaler parameters from SPL
1519 res = spl_calculate_scaler_params(spl_in, spl_out);
1520 // Convert respective out params from SPL to scaler data
1521 translate_SPL_out_params_to_pipe_ctx(pipe_ctx, spl_out);
1522
1523 /* Ignore scaler failure if pipe context plane is phantom plane */
1524 if (!res && plane_state->is_phantom)
1525 res = true;
1526 } else {
1527 #endif
1528 /* depends on h_active */
1529 calculate_recout(pipe_ctx);
1530 /* depends on pixel format */
1531 calculate_scaling_ratios(pipe_ctx);
1532
1533 /*
1534 * LB calculations depend on vp size, h/v_active and scaling ratios
1535 * Setting line buffer pixel depth to 24bpp yields banding
1536 * on certain displays, such as the Sharp 4k. 36bpp is needed
1537 * to support SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 and
1538 * SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616 with actual > 10 bpc
1539 * precision on DCN display engines, but apparently not for DCE, as
1540 * far as testing on DCE-11.2 and DCE-8 showed. Various DCE parts have
1541 * problems: Carrizo with DCE_VERSION_11_0 does not like 36 bpp lb depth,
1542 * neither do DCE-8 at 4k resolution, or DCE-11.2 (broken identify pixel
1543 * passthrough). Therefore only use 36 bpp on DCN where it is actually needed.
1544 */
1545 if (plane_state->ctx->dce_version > DCE_VERSION_MAX)
1546 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
1547 else
1548 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
1549
1550 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha;
1551
1552 // get TAP value with 100x100 dummy data for max scaling qualify, override
1553 // if a new scaling quality required
1554 pipe_ctx->plane_res.scl_data.viewport.width = 100;
1555 pipe_ctx->plane_res.scl_data.viewport.height = 100;
1556 pipe_ctx->plane_res.scl_data.viewport_c.width = 100;
1557 pipe_ctx->plane_res.scl_data.viewport_c.height = 100;
1558 if (pipe_ctx->plane_res.xfm != NULL)
1559 res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
1560 pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
1561
1562 if (pipe_ctx->plane_res.dpp != NULL)
1563 res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
1564 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
1565
1566 temp = pipe_ctx->plane_res.scl_data.taps;
1567
1568 calculate_inits_and_viewports(pipe_ctx);
1569
1570 if (pipe_ctx->plane_res.xfm != NULL)
1571 res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
1572 pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
1573
1574 if (pipe_ctx->plane_res.dpp != NULL)
1575 res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
1576 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
1577
1578
1579 if (!res) {
1580 /* Try 24 bpp linebuffer */
1581 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_24BPP;
1582
1583 if (pipe_ctx->plane_res.xfm != NULL)
1584 res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
1585 pipe_ctx->plane_res.xfm,
1586 &pipe_ctx->plane_res.scl_data,
1587 &plane_state->scaling_quality);
1588
1589 if (pipe_ctx->plane_res.dpp != NULL)
1590 res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
1591 pipe_ctx->plane_res.dpp,
1592 &pipe_ctx->plane_res.scl_data,
1593 &plane_state->scaling_quality);
1594 }
1595
1596 /* Ignore scaler failure if pipe context plane is phantom plane */
1597 if (!res && plane_state->is_phantom)
1598 res = true;
1599
1600 if (res && (pipe_ctx->plane_res.scl_data.taps.v_taps != temp.v_taps ||
1601 pipe_ctx->plane_res.scl_data.taps.h_taps != temp.h_taps ||
1602 pipe_ctx->plane_res.scl_data.taps.v_taps_c != temp.v_taps_c ||
1603 pipe_ctx->plane_res.scl_data.taps.h_taps_c != temp.h_taps_c))
1604 calculate_inits_and_viewports(pipe_ctx);
1605
1606 /*
1607 * Handle side by side and top bottom 3d recout offsets after vp calculation
1608 * since 3d is special and needs to calculate vp as if there is no recout offset
1609 * This may break with rotation, good thing we aren't mixing hw rotation and 3d
1610 */
1611 if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state == plane_state) {
1612 ASSERT(plane_state->rotation == ROTATION_ANGLE_0 ||
1613 (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_TOP_AND_BOTTOM &&
1614 pipe_ctx->stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE));
1615 if (pipe_ctx->stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM)
1616 pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height;
1617 else if (pipe_ctx->stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
1618 pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width;
1619 }
1620
1621 /* Clamp minimum viewport size */
1622 if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE)
1623 pipe_ctx->plane_res.scl_data.viewport.height = MIN_VIEWPORT_SIZE;
1624 if (pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE)
1625 pipe_ctx->plane_res.scl_data.viewport.width = MIN_VIEWPORT_SIZE;
1626 #ifdef CONFIG_DRM_AMD_DC_FP
1627 }
1628 #endif
1629 DC_LOG_SCALER("%s pipe %d:\nViewport: height:%d width:%d x:%d y:%d Recout: height:%d width:%d x:%d y:%d HACTIVE:%d VACTIVE:%d\n"
1630 "src_rect: height:%d width:%d x:%d y:%d dst_rect: height:%d width:%d x:%d y:%d clip_rect: height:%d width:%d x:%d y:%d\n",
1631 __func__,
1632 pipe_ctx->pipe_idx,
1633 pipe_ctx->plane_res.scl_data.viewport.height,
1634 pipe_ctx->plane_res.scl_data.viewport.width,
1635 pipe_ctx->plane_res.scl_data.viewport.x,
1636 pipe_ctx->plane_res.scl_data.viewport.y,
1637 pipe_ctx->plane_res.scl_data.recout.height,
1638 pipe_ctx->plane_res.scl_data.recout.width,
1639 pipe_ctx->plane_res.scl_data.recout.x,
1640 pipe_ctx->plane_res.scl_data.recout.y,
1641 pipe_ctx->plane_res.scl_data.h_active,
1642 pipe_ctx->plane_res.scl_data.v_active,
1643 plane_state->src_rect.height,
1644 plane_state->src_rect.width,
1645 plane_state->src_rect.x,
1646 plane_state->src_rect.y,
1647 plane_state->dst_rect.height,
1648 plane_state->dst_rect.width,
1649 plane_state->dst_rect.x,
1650 plane_state->dst_rect.y,
1651 plane_state->clip_rect.height,
1652 plane_state->clip_rect.width,
1653 plane_state->clip_rect.x,
1654 plane_state->clip_rect.y);
1655
1656 pipe_ctx->stream->dst.x -= timing->h_border_left;
1657 pipe_ctx->stream->dst.y -= timing->v_border_top;
1658
1659 return res;
1660 }
1661
resource_can_pipe_disable_cursor(struct pipe_ctx * pipe_ctx)1662 bool resource_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
1663 {
1664 struct pipe_ctx *test_pipe, *split_pipe;
1665 struct rect r1 = pipe_ctx->plane_res.scl_data.recout;
1666 int r1_right, r1_bottom;
1667 int cur_layer = pipe_ctx->plane_state->layer_index;
1668
1669 reverse_adjust_recout_for_visual_confirm(&r1, pipe_ctx);
1670 r1_right = r1.x + r1.width;
1671 r1_bottom = r1.y + r1.height;
1672
1673 /**
1674 * Disable the cursor if there's another pipe above this with a
1675 * plane that contains this pipe's viewport to prevent double cursor
1676 * and incorrect scaling artifacts.
1677 */
1678 for (test_pipe = pipe_ctx->top_pipe; test_pipe;
1679 test_pipe = test_pipe->top_pipe) {
1680 struct rect r2;
1681 int r2_right, r2_bottom;
1682 // Skip invisible layer and pipe-split plane on same layer
1683 if (!test_pipe->plane_state ||
1684 !test_pipe->plane_state->visible ||
1685 test_pipe->plane_state->layer_index == cur_layer)
1686 continue;
1687
1688 r2 = test_pipe->plane_res.scl_data.recout;
1689 reverse_adjust_recout_for_visual_confirm(&r2, test_pipe);
1690 r2_right = r2.x + r2.width;
1691 r2_bottom = r2.y + r2.height;
1692
1693 /**
1694 * There is another half plane on same layer because of
1695 * pipe-split, merge together per same height.
1696 */
1697 for (split_pipe = pipe_ctx->top_pipe; split_pipe;
1698 split_pipe = split_pipe->top_pipe)
1699 if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
1700 struct rect r2_half;
1701
1702 r2_half = split_pipe->plane_res.scl_data.recout;
1703 reverse_adjust_recout_for_visual_confirm(&r2_half, split_pipe);
1704 r2.x = min(r2_half.x, r2.x);
1705 r2.width = r2.width + r2_half.width;
1706 r2_right = r2.x + r2.width;
1707 r2_bottom = min(r2_bottom, r2_half.y + r2_half.height);
1708 break;
1709 }
1710
1711 if (r1.x >= r2.x && r1.y >= r2.y && r1_right <= r2_right && r1_bottom <= r2_bottom)
1712 return true;
1713 }
1714
1715 return false;
1716 }
1717
1718
resource_build_scaling_params_for_context(const struct dc * dc,struct dc_state * context)1719 enum dc_status resource_build_scaling_params_for_context(
1720 const struct dc *dc,
1721 struct dc_state *context)
1722 {
1723 int i;
1724
1725 for (i = 0; i < MAX_PIPES; i++) {
1726 if (context->res_ctx.pipe_ctx[i].plane_state != NULL &&
1727 context->res_ctx.pipe_ctx[i].stream != NULL)
1728 if (!resource_build_scaling_params(&context->res_ctx.pipe_ctx[i]))
1729 return DC_FAIL_SCALING;
1730 }
1731
1732 return DC_OK;
1733 }
1734
resource_find_free_secondary_pipe_legacy(struct resource_context * res_ctx,const struct resource_pool * pool,const struct pipe_ctx * primary_pipe)1735 struct pipe_ctx *resource_find_free_secondary_pipe_legacy(
1736 struct resource_context *res_ctx,
1737 const struct resource_pool *pool,
1738 const struct pipe_ctx *primary_pipe)
1739 {
1740 int i;
1741 struct pipe_ctx *secondary_pipe = NULL;
1742
1743 /*
1744 * We add a preferred pipe mapping to avoid the chance that
1745 * MPCCs already in use will need to be reassigned to other trees.
1746 * For example, if we went with the strict, assign backwards logic:
1747 *
1748 * (State 1)
1749 * Display A on, no surface, top pipe = 0
1750 * Display B on, no surface, top pipe = 1
1751 *
1752 * (State 2)
1753 * Display A on, no surface, top pipe = 0
1754 * Display B on, surface enable, top pipe = 1, bottom pipe = 5
1755 *
1756 * (State 3)
1757 * Display A on, surface enable, top pipe = 0, bottom pipe = 5
1758 * Display B on, surface enable, top pipe = 1, bottom pipe = 4
1759 *
1760 * The state 2->3 transition requires remapping MPCC 5 from display B
1761 * to display A.
1762 *
1763 * However, with the preferred pipe logic, state 2 would look like:
1764 *
1765 * (State 2)
1766 * Display A on, no surface, top pipe = 0
1767 * Display B on, surface enable, top pipe = 1, bottom pipe = 4
1768 *
1769 * This would then cause 2->3 to not require remapping any MPCCs.
1770 */
1771 if (primary_pipe) {
1772 int preferred_pipe_idx = (pool->pipe_count - 1) - primary_pipe->pipe_idx;
1773 if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
1774 secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
1775 secondary_pipe->pipe_idx = preferred_pipe_idx;
1776 }
1777 }
1778
1779 /*
1780 * search backwards for the second pipe to keep pipe
1781 * assignment more consistent
1782 */
1783 if (!secondary_pipe)
1784 for (i = pool->pipe_count - 1; i >= 0; i--) {
1785 if (res_ctx->pipe_ctx[i].stream == NULL) {
1786 secondary_pipe = &res_ctx->pipe_ctx[i];
1787 secondary_pipe->pipe_idx = i;
1788 break;
1789 }
1790 }
1791
1792 return secondary_pipe;
1793 }
1794
resource_find_free_pipe_used_as_sec_opp_head_by_cur_otg_master(const struct resource_context * cur_res_ctx,struct resource_context * new_res_ctx,const struct pipe_ctx * cur_otg_master)1795 int resource_find_free_pipe_used_as_sec_opp_head_by_cur_otg_master(
1796 const struct resource_context *cur_res_ctx,
1797 struct resource_context *new_res_ctx,
1798 const struct pipe_ctx *cur_otg_master)
1799 {
1800 const struct pipe_ctx *cur_sec_opp_head = cur_otg_master->next_odm_pipe;
1801 struct pipe_ctx *new_pipe;
1802 int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
1803
1804 while (cur_sec_opp_head) {
1805 new_pipe = &new_res_ctx->pipe_ctx[cur_sec_opp_head->pipe_idx];
1806 if (resource_is_pipe_type(new_pipe, FREE_PIPE)) {
1807 free_pipe_idx = cur_sec_opp_head->pipe_idx;
1808 break;
1809 }
1810 cur_sec_opp_head = cur_sec_opp_head->next_odm_pipe;
1811 }
1812
1813 return free_pipe_idx;
1814 }
1815
resource_find_free_pipe_used_in_cur_mpc_blending_tree(const struct resource_context * cur_res_ctx,struct resource_context * new_res_ctx,const struct pipe_ctx * cur_opp_head)1816 int resource_find_free_pipe_used_in_cur_mpc_blending_tree(
1817 const struct resource_context *cur_res_ctx,
1818 struct resource_context *new_res_ctx,
1819 const struct pipe_ctx *cur_opp_head)
1820 {
1821 const struct pipe_ctx *cur_sec_dpp = cur_opp_head->bottom_pipe;
1822 struct pipe_ctx *new_pipe;
1823 int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
1824
1825 while (cur_sec_dpp) {
1826 /* find a free pipe used in current opp blend tree,
1827 * this is to avoid MPO pipe switching to different opp blending
1828 * tree
1829 */
1830 new_pipe = &new_res_ctx->pipe_ctx[cur_sec_dpp->pipe_idx];
1831 if (resource_is_pipe_type(new_pipe, FREE_PIPE)) {
1832 free_pipe_idx = cur_sec_dpp->pipe_idx;
1833 break;
1834 }
1835 cur_sec_dpp = cur_sec_dpp->bottom_pipe;
1836 }
1837
1838 return free_pipe_idx;
1839 }
1840
recource_find_free_pipe_not_used_in_cur_res_ctx(const struct resource_context * cur_res_ctx,struct resource_context * new_res_ctx,const struct resource_pool * pool)1841 int recource_find_free_pipe_not_used_in_cur_res_ctx(
1842 const struct resource_context *cur_res_ctx,
1843 struct resource_context *new_res_ctx,
1844 const struct resource_pool *pool)
1845 {
1846 int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
1847 const struct pipe_ctx *new_pipe, *cur_pipe;
1848 int i;
1849
1850 for (i = 0; i < pool->pipe_count; i++) {
1851 cur_pipe = &cur_res_ctx->pipe_ctx[i];
1852 new_pipe = &new_res_ctx->pipe_ctx[i];
1853
1854 if (resource_is_pipe_type(cur_pipe, FREE_PIPE) &&
1855 resource_is_pipe_type(new_pipe, FREE_PIPE)) {
1856 free_pipe_idx = i;
1857 break;
1858 }
1859 }
1860
1861 return free_pipe_idx;
1862 }
1863
recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx(const struct resource_context * cur_res_ctx,struct resource_context * new_res_ctx,const struct resource_pool * pool)1864 int recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx(
1865 const struct resource_context *cur_res_ctx,
1866 struct resource_context *new_res_ctx,
1867 const struct resource_pool *pool)
1868 {
1869 int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
1870 const struct pipe_ctx *new_pipe, *cur_pipe;
1871 int i;
1872
1873 for (i = 0; i < pool->pipe_count; i++) {
1874 cur_pipe = &cur_res_ctx->pipe_ctx[i];
1875 new_pipe = &new_res_ctx->pipe_ctx[i];
1876
1877 if (resource_is_pipe_type(cur_pipe, OTG_MASTER) &&
1878 resource_is_pipe_type(new_pipe, FREE_PIPE)) {
1879 free_pipe_idx = i;
1880 break;
1881 }
1882 }
1883
1884 return free_pipe_idx;
1885 }
1886
resource_find_free_pipe_used_as_cur_sec_dpp(const struct resource_context * cur_res_ctx,struct resource_context * new_res_ctx,const struct resource_pool * pool)1887 int resource_find_free_pipe_used_as_cur_sec_dpp(
1888 const struct resource_context *cur_res_ctx,
1889 struct resource_context *new_res_ctx,
1890 const struct resource_pool *pool)
1891 {
1892 int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
1893 const struct pipe_ctx *new_pipe, *cur_pipe;
1894 int i;
1895
1896 for (i = 0; i < pool->pipe_count; i++) {
1897 cur_pipe = &cur_res_ctx->pipe_ctx[i];
1898 new_pipe = &new_res_ctx->pipe_ctx[i];
1899
1900 if (resource_is_pipe_type(cur_pipe, DPP_PIPE) &&
1901 !resource_is_pipe_type(cur_pipe, OPP_HEAD) &&
1902 resource_is_pipe_type(new_pipe, FREE_PIPE)) {
1903 free_pipe_idx = i;
1904 break;
1905 }
1906 }
1907
1908 return free_pipe_idx;
1909 }
1910
resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine(const struct resource_context * cur_res_ctx,struct resource_context * new_res_ctx,const struct resource_pool * pool)1911 int resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine(
1912 const struct resource_context *cur_res_ctx,
1913 struct resource_context *new_res_ctx,
1914 const struct resource_pool *pool)
1915 {
1916 int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
1917 const struct pipe_ctx *new_pipe, *cur_pipe;
1918 int i;
1919
1920 for (i = 0; i < pool->pipe_count; i++) {
1921 cur_pipe = &cur_res_ctx->pipe_ctx[i];
1922 new_pipe = &new_res_ctx->pipe_ctx[i];
1923
1924 if (resource_is_pipe_type(cur_pipe, DPP_PIPE) &&
1925 !resource_is_pipe_type(cur_pipe, OPP_HEAD) &&
1926 resource_get_mpc_slice_index(cur_pipe) > 0 &&
1927 resource_is_pipe_type(new_pipe, FREE_PIPE)) {
1928 free_pipe_idx = i;
1929 break;
1930 }
1931 }
1932
1933 return free_pipe_idx;
1934 }
1935
resource_find_any_free_pipe(struct resource_context * new_res_ctx,const struct resource_pool * pool)1936 int resource_find_any_free_pipe(struct resource_context *new_res_ctx,
1937 const struct resource_pool *pool)
1938 {
1939 int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
1940 const struct pipe_ctx *new_pipe;
1941 int i;
1942
1943 for (i = 0; i < pool->pipe_count; i++) {
1944 new_pipe = &new_res_ctx->pipe_ctx[i];
1945
1946 if (resource_is_pipe_type(new_pipe, FREE_PIPE)) {
1947 free_pipe_idx = i;
1948 break;
1949 }
1950 }
1951
1952 return free_pipe_idx;
1953 }
1954
resource_is_pipe_type(const struct pipe_ctx * pipe_ctx,enum pipe_type type)1955 bool resource_is_pipe_type(const struct pipe_ctx *pipe_ctx, enum pipe_type type)
1956 {
1957 switch (type) {
1958 case OTG_MASTER:
1959 return !pipe_ctx->prev_odm_pipe &&
1960 !pipe_ctx->top_pipe &&
1961 pipe_ctx->stream;
1962 case OPP_HEAD:
1963 return !pipe_ctx->top_pipe && pipe_ctx->stream;
1964 case DPP_PIPE:
1965 return pipe_ctx->plane_state && pipe_ctx->stream;
1966 case FREE_PIPE:
1967 return !pipe_ctx->plane_state && !pipe_ctx->stream;
1968 default:
1969 return false;
1970 }
1971 }
1972
resource_get_otg_master_for_stream(struct resource_context * res_ctx,const struct dc_stream_state * stream)1973 struct pipe_ctx *resource_get_otg_master_for_stream(
1974 struct resource_context *res_ctx,
1975 const struct dc_stream_state *stream)
1976 {
1977 int i;
1978
1979 for (i = 0; i < MAX_PIPES; i++) {
1980 if (res_ctx->pipe_ctx[i].stream == stream &&
1981 resource_is_pipe_type(&res_ctx->pipe_ctx[i], OTG_MASTER))
1982 return &res_ctx->pipe_ctx[i];
1983 }
1984 return NULL;
1985 }
1986
resource_get_opp_heads_for_otg_master(const struct pipe_ctx * otg_master,struct resource_context * res_ctx,struct pipe_ctx * opp_heads[MAX_PIPES])1987 int resource_get_opp_heads_for_otg_master(const struct pipe_ctx *otg_master,
1988 struct resource_context *res_ctx,
1989 struct pipe_ctx *opp_heads[MAX_PIPES])
1990 {
1991 struct pipe_ctx *opp_head = &res_ctx->pipe_ctx[otg_master->pipe_idx];
1992 struct dc *dc = otg_master->stream->ctx->dc;
1993 int i = 0;
1994
1995 DC_LOGGER_INIT(dc->ctx->logger);
1996
1997 if (!resource_is_pipe_type(otg_master, OTG_MASTER)) {
1998 DC_LOG_WARNING("%s called from a non OTG master, something "
1999 "is wrong in the pipe configuration",
2000 __func__);
2001 ASSERT(0);
2002 return 0;
2003 }
2004 while (opp_head) {
2005 ASSERT(i < MAX_PIPES);
2006 opp_heads[i++] = opp_head;
2007 opp_head = opp_head->next_odm_pipe;
2008 }
2009 return i;
2010 }
2011
resource_get_dpp_pipes_for_opp_head(const struct pipe_ctx * opp_head,struct resource_context * res_ctx,struct pipe_ctx * dpp_pipes[MAX_PIPES])2012 int resource_get_dpp_pipes_for_opp_head(const struct pipe_ctx *opp_head,
2013 struct resource_context *res_ctx,
2014 struct pipe_ctx *dpp_pipes[MAX_PIPES])
2015 {
2016 struct pipe_ctx *pipe = &res_ctx->pipe_ctx[opp_head->pipe_idx];
2017 int i = 0;
2018
2019 if (!resource_is_pipe_type(opp_head, OPP_HEAD)) {
2020 ASSERT(0);
2021 return 0;
2022 }
2023 while (pipe && resource_is_pipe_type(pipe, DPP_PIPE)) {
2024 ASSERT(i < MAX_PIPES);
2025 dpp_pipes[i++] = pipe;
2026 pipe = pipe->bottom_pipe;
2027 }
2028 return i;
2029 }
2030
resource_get_dpp_pipes_for_plane(const struct dc_plane_state * plane,struct resource_context * res_ctx,struct pipe_ctx * dpp_pipes[MAX_PIPES])2031 int resource_get_dpp_pipes_for_plane(const struct dc_plane_state *plane,
2032 struct resource_context *res_ctx,
2033 struct pipe_ctx *dpp_pipes[MAX_PIPES])
2034 {
2035 int i = 0, j;
2036 struct pipe_ctx *pipe;
2037
2038 for (j = 0; j < MAX_PIPES; j++) {
2039 pipe = &res_ctx->pipe_ctx[j];
2040 if (pipe->plane_state == plane && pipe->prev_odm_pipe == NULL) {
2041 if (resource_is_pipe_type(pipe, OPP_HEAD) ||
2042 pipe->top_pipe->plane_state != plane)
2043 break;
2044 }
2045 }
2046
2047 if (j < MAX_PIPES) {
2048 if (pipe->next_odm_pipe)
2049 while (pipe) {
2050 dpp_pipes[i++] = pipe;
2051 pipe = pipe->next_odm_pipe;
2052 }
2053 else
2054 while (pipe && pipe->plane_state == plane) {
2055 dpp_pipes[i++] = pipe;
2056 pipe = pipe->bottom_pipe;
2057 }
2058 }
2059 return i;
2060 }
2061
resource_get_otg_master(const struct pipe_ctx * pipe_ctx)2062 struct pipe_ctx *resource_get_otg_master(const struct pipe_ctx *pipe_ctx)
2063 {
2064 struct pipe_ctx *otg_master = resource_get_opp_head(pipe_ctx);
2065
2066 while (otg_master->prev_odm_pipe)
2067 otg_master = otg_master->prev_odm_pipe;
2068 return otg_master;
2069 }
2070
resource_get_opp_head(const struct pipe_ctx * pipe_ctx)2071 struct pipe_ctx *resource_get_opp_head(const struct pipe_ctx *pipe_ctx)
2072 {
2073 struct pipe_ctx *opp_head = (struct pipe_ctx *) pipe_ctx;
2074
2075 ASSERT(!resource_is_pipe_type(opp_head, FREE_PIPE));
2076 while (opp_head->top_pipe)
2077 opp_head = opp_head->top_pipe;
2078 return opp_head;
2079 }
2080
resource_get_primary_dpp_pipe(const struct pipe_ctx * dpp_pipe)2081 struct pipe_ctx *resource_get_primary_dpp_pipe(const struct pipe_ctx *dpp_pipe)
2082 {
2083 struct pipe_ctx *pri_dpp_pipe = (struct pipe_ctx *) dpp_pipe;
2084
2085 ASSERT(resource_is_pipe_type(dpp_pipe, DPP_PIPE));
2086 while (pri_dpp_pipe->prev_odm_pipe)
2087 pri_dpp_pipe = pri_dpp_pipe->prev_odm_pipe;
2088 while (pri_dpp_pipe->top_pipe &&
2089 pri_dpp_pipe->top_pipe->plane_state == pri_dpp_pipe->plane_state)
2090 pri_dpp_pipe = pri_dpp_pipe->top_pipe;
2091 return pri_dpp_pipe;
2092 }
2093
2094
resource_get_mpc_slice_index(const struct pipe_ctx * pipe_ctx)2095 int resource_get_mpc_slice_index(const struct pipe_ctx *pipe_ctx)
2096 {
2097 struct pipe_ctx *split_pipe = pipe_ctx->top_pipe;
2098 int index = 0;
2099
2100 while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
2101 index++;
2102 split_pipe = split_pipe->top_pipe;
2103 }
2104
2105 return index;
2106 }
2107
resource_get_mpc_slice_count(const struct pipe_ctx * pipe)2108 int resource_get_mpc_slice_count(const struct pipe_ctx *pipe)
2109 {
2110 int mpc_split_count = 1;
2111 const struct pipe_ctx *other_pipe = pipe->bottom_pipe;
2112
2113 while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
2114 mpc_split_count++;
2115 other_pipe = other_pipe->bottom_pipe;
2116 }
2117 other_pipe = pipe->top_pipe;
2118 while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
2119 mpc_split_count++;
2120 other_pipe = other_pipe->top_pipe;
2121 }
2122
2123 return mpc_split_count;
2124 }
2125
resource_get_odm_slice_count(const struct pipe_ctx * pipe)2126 int resource_get_odm_slice_count(const struct pipe_ctx *pipe)
2127 {
2128 int odm_split_count = 1;
2129
2130 pipe = resource_get_otg_master(pipe);
2131
2132 while (pipe->next_odm_pipe) {
2133 odm_split_count++;
2134 pipe = pipe->next_odm_pipe;
2135 }
2136 return odm_split_count;
2137 }
2138
resource_get_odm_slice_index(const struct pipe_ctx * pipe_ctx)2139 int resource_get_odm_slice_index(const struct pipe_ctx *pipe_ctx)
2140 {
2141 int index = 0;
2142
2143 pipe_ctx = resource_get_opp_head(pipe_ctx);
2144 if (!pipe_ctx)
2145 return 0;
2146
2147 while (pipe_ctx->prev_odm_pipe) {
2148 index++;
2149 pipe_ctx = pipe_ctx->prev_odm_pipe;
2150 }
2151
2152 return index;
2153 }
2154
resource_get_odm_slice_dst_width(struct pipe_ctx * otg_master,bool is_last_segment)2155 int resource_get_odm_slice_dst_width(struct pipe_ctx *otg_master,
2156 bool is_last_segment)
2157 {
2158 const struct dc_crtc_timing *timing;
2159 int count;
2160 int h_active;
2161 int width;
2162 bool two_pixel_alignment_required = false;
2163
2164 if (!otg_master || !otg_master->stream)
2165 return 0;
2166
2167 timing = &otg_master->stream->timing;
2168 count = resource_get_odm_slice_count(otg_master);
2169 h_active = timing->h_addressable +
2170 timing->h_border_left +
2171 timing->h_border_right +
2172 otg_master->hblank_borrow;
2173 width = h_active / count;
2174
2175 if (otg_master->stream_res.tg)
2176 two_pixel_alignment_required =
2177 otg_master->stream_res.tg->funcs->is_two_pixels_per_container(timing) ||
2178 /*
2179 * 422 is sub-sampled horizontally. 1 set of chromas
2180 * (Cb/Cr) is shared for 2 lumas (i.e 2 Y values).
2181 * Therefore even if 422 is still 1 pixel per container,
2182 * ODM segment width still needs to be 2 pixel aligned.
2183 */
2184 timing->pixel_encoding == PIXEL_ENCODING_YCBCR422;
2185 if ((width % 2) && two_pixel_alignment_required)
2186 width++;
2187
2188 return is_last_segment ?
2189 h_active - width * (count - 1) :
2190 width;
2191 }
2192
resource_get_odm_slice_dst_rect(struct pipe_ctx * pipe_ctx)2193 struct rect resource_get_odm_slice_dst_rect(struct pipe_ctx *pipe_ctx)
2194 {
2195 const struct dc_stream_state *stream = pipe_ctx->stream;
2196 bool is_last_odm_slice = pipe_ctx->next_odm_pipe == NULL;
2197 struct pipe_ctx *otg_master = resource_get_otg_master(pipe_ctx);
2198 int odm_slice_idx = resource_get_odm_slice_index(pipe_ctx);
2199 int odm_segment_offset = resource_get_odm_slice_dst_width(otg_master, false);
2200 struct rect odm_slice_dst;
2201
2202 odm_slice_dst.x = odm_segment_offset * odm_slice_idx;
2203 odm_slice_dst.width = resource_get_odm_slice_dst_width(otg_master, is_last_odm_slice);
2204 odm_slice_dst.y = 0;
2205 odm_slice_dst.height = stream->timing.v_addressable +
2206 stream->timing.v_border_bottom +
2207 stream->timing.v_border_top;
2208
2209 return odm_slice_dst;
2210 }
2211
resource_get_odm_slice_src_rect(struct pipe_ctx * pipe_ctx)2212 struct rect resource_get_odm_slice_src_rect(struct pipe_ctx *pipe_ctx)
2213 {
2214 struct rect odm_slice_dst;
2215 struct rect odm_slice_src;
2216 struct pipe_ctx *opp_head = resource_get_opp_head(pipe_ctx);
2217 struct output_pixel_processor *opp = opp_head->stream_res.opp;
2218 uint32_t left_edge_extra_pixel_count;
2219
2220 odm_slice_dst = resource_get_odm_slice_dst_rect(opp_head);
2221 odm_slice_src = odm_slice_dst;
2222
2223 if (opp && opp->funcs->opp_get_left_edge_extra_pixel_count)
2224 left_edge_extra_pixel_count =
2225 opp->funcs->opp_get_left_edge_extra_pixel_count(
2226 opp, pipe_ctx->stream->timing.pixel_encoding,
2227 resource_is_pipe_type(opp_head, OTG_MASTER));
2228 else
2229 left_edge_extra_pixel_count = 0;
2230
2231 odm_slice_src.x -= left_edge_extra_pixel_count;
2232 odm_slice_src.width += left_edge_extra_pixel_count;
2233
2234 return odm_slice_src;
2235 }
2236
resource_is_pipe_topology_changed(const struct dc_state * state_a,const struct dc_state * state_b)2237 bool resource_is_pipe_topology_changed(const struct dc_state *state_a,
2238 const struct dc_state *state_b)
2239 {
2240 int i;
2241 const struct pipe_ctx *pipe_a, *pipe_b;
2242
2243 if (state_a->stream_count != state_b->stream_count)
2244 return true;
2245
2246 for (i = 0; i < MAX_PIPES; i++) {
2247 pipe_a = &state_a->res_ctx.pipe_ctx[i];
2248 pipe_b = &state_b->res_ctx.pipe_ctx[i];
2249
2250 if (pipe_a->stream && !pipe_b->stream)
2251 return true;
2252 else if (!pipe_a->stream && pipe_b->stream)
2253 return true;
2254
2255 if (pipe_a->plane_state && !pipe_b->plane_state)
2256 return true;
2257 else if (!pipe_a->plane_state && pipe_b->plane_state)
2258 return true;
2259
2260 if (pipe_a->bottom_pipe && pipe_b->bottom_pipe) {
2261 if (pipe_a->bottom_pipe->pipe_idx != pipe_b->bottom_pipe->pipe_idx)
2262 return true;
2263 if ((pipe_a->bottom_pipe->plane_state == pipe_a->plane_state) &&
2264 (pipe_b->bottom_pipe->plane_state != pipe_b->plane_state))
2265 return true;
2266 else if ((pipe_a->bottom_pipe->plane_state != pipe_a->plane_state) &&
2267 (pipe_b->bottom_pipe->plane_state == pipe_b->plane_state))
2268 return true;
2269 } else if (pipe_a->bottom_pipe || pipe_b->bottom_pipe) {
2270 return true;
2271 }
2272
2273 if (pipe_a->next_odm_pipe && pipe_b->next_odm_pipe) {
2274 if (pipe_a->next_odm_pipe->pipe_idx != pipe_b->next_odm_pipe->pipe_idx)
2275 return true;
2276 } else if (pipe_a->next_odm_pipe || pipe_b->next_odm_pipe) {
2277 return true;
2278 }
2279 }
2280 return false;
2281 }
2282
resource_is_odm_topology_changed(const struct pipe_ctx * otg_master_a,const struct pipe_ctx * otg_master_b)2283 bool resource_is_odm_topology_changed(const struct pipe_ctx *otg_master_a,
2284 const struct pipe_ctx *otg_master_b)
2285 {
2286 const struct pipe_ctx *opp_head_a = otg_master_a;
2287 const struct pipe_ctx *opp_head_b = otg_master_b;
2288
2289 if (!resource_is_pipe_type(otg_master_a, OTG_MASTER) ||
2290 !resource_is_pipe_type(otg_master_b, OTG_MASTER))
2291 return true;
2292
2293 while (opp_head_a && opp_head_b) {
2294 if (opp_head_a->stream_res.opp != opp_head_b->stream_res.opp)
2295 return true;
2296 if ((opp_head_a->next_odm_pipe && !opp_head_b->next_odm_pipe) ||
2297 (!opp_head_a->next_odm_pipe && opp_head_b->next_odm_pipe))
2298 return true;
2299 opp_head_a = opp_head_a->next_odm_pipe;
2300 opp_head_b = opp_head_b->next_odm_pipe;
2301 }
2302
2303 return false;
2304 }
2305
2306 /*
2307 * Sample log:
2308 * pipe topology update
2309 * ________________________
2310 * | plane0 slice0 stream0|
2311 * |DPP0----OPP0----OTG0----| <--- case 0 (OTG master pipe with plane)
2312 * | plane1 | | |
2313 * |DPP1----| | | <--- case 5 (DPP pipe not in last slice)
2314 * | plane0 slice1 | |
2315 * |DPP2----OPP2----| | <--- case 2 (OPP head pipe with plane)
2316 * | plane1 | |
2317 * |DPP3----| | <--- case 4 (DPP pipe in last slice)
2318 * | slice0 stream1|
2319 * |DPG4----OPP4----OTG4----| <--- case 1 (OTG master pipe without plane)
2320 * | slice1 | |
2321 * |DPG5----OPP5----| | <--- case 3 (OPP head pipe without plane)
2322 * |________________________|
2323 */
2324
resource_log_pipe(struct dc * dc,struct pipe_ctx * pipe,int stream_idx,int slice_idx,int plane_idx,int slice_count,bool is_primary)2325 static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
2326 int stream_idx, int slice_idx, int plane_idx, int slice_count,
2327 bool is_primary)
2328 {
2329 DC_LOGGER_INIT(dc->ctx->logger);
2330
2331 if (slice_idx == 0 && plane_idx == 0 && is_primary) {
2332 /* case 0 (OTG master pipe with plane) */
2333 DC_LOG_DC(" | plane%d slice%d stream%d|",
2334 plane_idx, slice_idx, stream_idx);
2335 DC_LOG_DC(" |DPP%d----OPP%d----OTG%d----|",
2336 pipe->plane_res.dpp->inst,
2337 pipe->stream_res.opp->inst,
2338 pipe->stream_res.tg->inst);
2339 } else if (slice_idx == 0 && plane_idx == -1) {
2340 /* case 1 (OTG master pipe without plane) */
2341 DC_LOG_DC(" | slice%d stream%d|",
2342 slice_idx, stream_idx);
2343 DC_LOG_DC(" |DPG%d----OPP%d----OTG%d----|",
2344 pipe->stream_res.opp->inst,
2345 pipe->stream_res.opp->inst,
2346 pipe->stream_res.tg->inst);
2347 } else if (slice_idx != 0 && plane_idx == 0 && is_primary) {
2348 /* case 2 (OPP head pipe with plane) */
2349 DC_LOG_DC(" | plane%d slice%d | |",
2350 plane_idx, slice_idx);
2351 DC_LOG_DC(" |DPP%d----OPP%d----| |",
2352 pipe->plane_res.dpp->inst,
2353 pipe->stream_res.opp->inst);
2354 } else if (slice_idx != 0 && plane_idx == -1) {
2355 /* case 3 (OPP head pipe without plane) */
2356 DC_LOG_DC(" | slice%d | |", slice_idx);
2357 DC_LOG_DC(" |DPG%d----OPP%d----| |",
2358 pipe->plane_res.dpp->inst,
2359 pipe->stream_res.opp->inst);
2360 } else if (slice_idx == slice_count - 1) {
2361 /* case 4 (DPP pipe in last slice) */
2362 DC_LOG_DC(" | plane%d | |", plane_idx);
2363 DC_LOG_DC(" |DPP%d----| |",
2364 pipe->plane_res.dpp->inst);
2365 } else {
2366 /* case 5 (DPP pipe not in last slice) */
2367 DC_LOG_DC(" | plane%d | | |", plane_idx);
2368 DC_LOG_DC(" |DPP%d----| | |",
2369 pipe->plane_res.dpp->inst);
2370 }
2371 }
2372
resource_log_pipe_for_stream(struct dc * dc,struct dc_state * state,struct pipe_ctx * otg_master,int stream_idx)2373 static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state,
2374 struct pipe_ctx *otg_master, int stream_idx)
2375 {
2376 struct pipe_ctx *opp_heads[MAX_PIPES];
2377 struct pipe_ctx *dpp_pipes[MAX_PIPES];
2378
2379 int slice_idx, dpp_idx, plane_idx, slice_count, dpp_count;
2380 bool is_primary;
2381 DC_LOGGER_INIT(dc->ctx->logger);
2382
2383 slice_count = resource_get_opp_heads_for_otg_master(otg_master,
2384 &state->res_ctx, opp_heads);
2385 for (slice_idx = 0; slice_idx < slice_count; slice_idx++) {
2386 plane_idx = -1;
2387 if (opp_heads[slice_idx]->plane_state) {
2388 dpp_count = resource_get_dpp_pipes_for_opp_head(
2389 opp_heads[slice_idx],
2390 &state->res_ctx,
2391 dpp_pipes);
2392 for (dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) {
2393 is_primary = !dpp_pipes[dpp_idx]->top_pipe ||
2394 dpp_pipes[dpp_idx]->top_pipe->plane_state != dpp_pipes[dpp_idx]->plane_state;
2395 if (is_primary)
2396 plane_idx++;
2397 resource_log_pipe(dc, dpp_pipes[dpp_idx],
2398 stream_idx, slice_idx,
2399 plane_idx, slice_count,
2400 is_primary);
2401 }
2402 } else {
2403 resource_log_pipe(dc, opp_heads[slice_idx],
2404 stream_idx, slice_idx, plane_idx,
2405 slice_count, true);
2406 }
2407
2408 }
2409 }
2410
resource_stream_to_stream_idx(struct dc_state * state,struct dc_stream_state * stream)2411 static int resource_stream_to_stream_idx(struct dc_state *state,
2412 struct dc_stream_state *stream)
2413 {
2414 int i, stream_idx = -1;
2415
2416 for (i = 0; i < state->stream_count; i++)
2417 if (state->streams[i] == stream) {
2418 stream_idx = i;
2419 break;
2420 }
2421
2422 /* never return negative array index */
2423 if (stream_idx == -1) {
2424 ASSERT(0);
2425 return 0;
2426 }
2427
2428 return stream_idx;
2429 }
2430
resource_log_pipe_topology_update(struct dc * dc,struct dc_state * state)2431 void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
2432 {
2433 struct pipe_ctx *otg_master;
2434 int stream_idx, phantom_stream_idx;
2435 DC_LOGGER_INIT(dc->ctx->logger);
2436
2437 DC_LOG_DC(" pipe topology update");
2438 DC_LOG_DC(" ________________________");
2439 for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) {
2440 if (state->streams[stream_idx]->is_phantom)
2441 continue;
2442
2443 otg_master = resource_get_otg_master_for_stream(
2444 &state->res_ctx, state->streams[stream_idx]);
2445
2446 if (!otg_master)
2447 continue;
2448
2449 resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
2450 }
2451 if (state->phantom_stream_count > 0) {
2452 DC_LOG_DC(" | (phantom pipes) |");
2453 for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) {
2454 if (state->stream_status[stream_idx].mall_stream_config.type != SUBVP_MAIN)
2455 continue;
2456
2457 phantom_stream_idx = resource_stream_to_stream_idx(state,
2458 state->stream_status[stream_idx].mall_stream_config.paired_stream);
2459 otg_master = resource_get_otg_master_for_stream(
2460 &state->res_ctx, state->streams[phantom_stream_idx]);
2461 if (!otg_master)
2462 continue;
2463
2464 resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
2465 }
2466 }
2467 DC_LOG_DC(" |________________________|\n");
2468 }
2469
get_tail_pipe(struct pipe_ctx * head_pipe)2470 static struct pipe_ctx *get_tail_pipe(
2471 struct pipe_ctx *head_pipe)
2472 {
2473 struct pipe_ctx *tail_pipe = head_pipe->bottom_pipe;
2474
2475 while (tail_pipe) {
2476 head_pipe = tail_pipe;
2477 tail_pipe = tail_pipe->bottom_pipe;
2478 }
2479
2480 return head_pipe;
2481 }
2482
get_last_opp_head(struct pipe_ctx * opp_head)2483 static struct pipe_ctx *get_last_opp_head(
2484 struct pipe_ctx *opp_head)
2485 {
2486 ASSERT(resource_is_pipe_type(opp_head, OPP_HEAD));
2487 while (opp_head->next_odm_pipe)
2488 opp_head = opp_head->next_odm_pipe;
2489 return opp_head;
2490 }
2491
get_last_dpp_pipe_in_mpcc_combine(struct pipe_ctx * dpp_pipe)2492 static struct pipe_ctx *get_last_dpp_pipe_in_mpcc_combine(
2493 struct pipe_ctx *dpp_pipe)
2494 {
2495 ASSERT(resource_is_pipe_type(dpp_pipe, DPP_PIPE));
2496 while (dpp_pipe->bottom_pipe &&
2497 dpp_pipe->plane_state == dpp_pipe->bottom_pipe->plane_state)
2498 dpp_pipe = dpp_pipe->bottom_pipe;
2499 return dpp_pipe;
2500 }
2501
update_pipe_params_after_odm_slice_count_change(struct pipe_ctx * otg_master,struct dc_state * context,const struct resource_pool * pool)2502 static bool update_pipe_params_after_odm_slice_count_change(
2503 struct pipe_ctx *otg_master,
2504 struct dc_state *context,
2505 const struct resource_pool *pool)
2506 {
2507 int i;
2508 struct pipe_ctx *pipe;
2509 bool result = true;
2510
2511 for (i = 0; i < pool->pipe_count && result; i++) {
2512 pipe = &context->res_ctx.pipe_ctx[i];
2513 if (pipe->stream == otg_master->stream && pipe->plane_state)
2514 result = resource_build_scaling_params(pipe);
2515 }
2516
2517 if (pool->funcs->build_pipe_pix_clk_params)
2518 pool->funcs->build_pipe_pix_clk_params(otg_master);
2519
2520 resource_build_test_pattern_params(&context->res_ctx, otg_master);
2521
2522 return result;
2523 }
2524
update_pipe_params_after_mpc_slice_count_change(const struct dc_plane_state * plane,struct dc_state * context,const struct resource_pool * pool)2525 static bool update_pipe_params_after_mpc_slice_count_change(
2526 const struct dc_plane_state *plane,
2527 struct dc_state *context,
2528 const struct resource_pool *pool)
2529 {
2530 int i;
2531 struct pipe_ctx *pipe;
2532 bool result = true;
2533
2534 for (i = 0; i < pool->pipe_count && result; i++) {
2535 pipe = &context->res_ctx.pipe_ctx[i];
2536 if (pipe->plane_state == plane)
2537 result = resource_build_scaling_params(pipe);
2538 }
2539 return result;
2540 }
2541
acquire_first_split_pipe(struct resource_context * res_ctx,const struct resource_pool * pool,struct dc_stream_state * stream)2542 static int acquire_first_split_pipe(
2543 struct resource_context *res_ctx,
2544 const struct resource_pool *pool,
2545 struct dc_stream_state *stream)
2546 {
2547 int i;
2548
2549 for (i = 0; i < pool->pipe_count; i++) {
2550 struct pipe_ctx *split_pipe = &res_ctx->pipe_ctx[i];
2551
2552 if (split_pipe->top_pipe &&
2553 split_pipe->top_pipe->plane_state == split_pipe->plane_state) {
2554 split_pipe->top_pipe->bottom_pipe = split_pipe->bottom_pipe;
2555 if (split_pipe->bottom_pipe)
2556 split_pipe->bottom_pipe->top_pipe = split_pipe->top_pipe;
2557
2558 if (split_pipe->top_pipe->plane_state)
2559 resource_build_scaling_params(split_pipe->top_pipe);
2560
2561 memset(split_pipe, 0, sizeof(*split_pipe));
2562 split_pipe->stream_res.tg = pool->timing_generators[i];
2563 split_pipe->plane_res.hubp = pool->hubps[i];
2564 split_pipe->plane_res.ipp = pool->ipps[i];
2565 split_pipe->plane_res.dpp = pool->dpps[i];
2566 split_pipe->stream_res.opp = pool->opps[i];
2567 split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst;
2568 split_pipe->pipe_idx = i;
2569
2570 split_pipe->stream = stream;
2571 return i;
2572 }
2573 }
2574 return FREE_PIPE_INDEX_NOT_FOUND;
2575 }
2576
update_stream_engine_usage(struct resource_context * res_ctx,const struct resource_pool * pool,struct stream_encoder * stream_enc,bool acquired)2577 static void update_stream_engine_usage(
2578 struct resource_context *res_ctx,
2579 const struct resource_pool *pool,
2580 struct stream_encoder *stream_enc,
2581 bool acquired)
2582 {
2583 int i;
2584
2585 for (i = 0; i < pool->stream_enc_count; i++) {
2586 if (pool->stream_enc[i] == stream_enc)
2587 res_ctx->is_stream_enc_acquired[i] = acquired;
2588 }
2589 }
2590
update_hpo_dp_stream_engine_usage(struct resource_context * res_ctx,const struct resource_pool * pool,struct hpo_dp_stream_encoder * hpo_dp_stream_enc,bool acquired)2591 static void update_hpo_dp_stream_engine_usage(
2592 struct resource_context *res_ctx,
2593 const struct resource_pool *pool,
2594 struct hpo_dp_stream_encoder *hpo_dp_stream_enc,
2595 bool acquired)
2596 {
2597 int i;
2598
2599 for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
2600 if (pool->hpo_dp_stream_enc[i] == hpo_dp_stream_enc)
2601 res_ctx->is_hpo_dp_stream_enc_acquired[i] = acquired;
2602 }
2603 }
2604
find_acquired_hpo_dp_link_enc_for_link(const struct resource_context * res_ctx,const struct dc_link * link)2605 static inline int find_acquired_hpo_dp_link_enc_for_link(
2606 const struct resource_context *res_ctx,
2607 const struct dc_link *link)
2608 {
2609 int i;
2610
2611 for (i = 0; i < ARRAY_SIZE(res_ctx->hpo_dp_link_enc_to_link_idx); i++)
2612 if (res_ctx->hpo_dp_link_enc_ref_cnts[i] > 0 &&
2613 res_ctx->hpo_dp_link_enc_to_link_idx[i] == link->link_index)
2614 return i;
2615
2616 return -1;
2617 }
2618
find_free_hpo_dp_link_enc(const struct resource_context * res_ctx,const struct resource_pool * pool)2619 static inline int find_free_hpo_dp_link_enc(const struct resource_context *res_ctx,
2620 const struct resource_pool *pool)
2621 {
2622 int i;
2623
2624 for (i = 0; i < ARRAY_SIZE(res_ctx->hpo_dp_link_enc_ref_cnts); i++)
2625 if (res_ctx->hpo_dp_link_enc_ref_cnts[i] == 0)
2626 break;
2627
2628 return (i < ARRAY_SIZE(res_ctx->hpo_dp_link_enc_ref_cnts) &&
2629 i < pool->hpo_dp_link_enc_count) ? i : -1;
2630 }
2631
acquire_hpo_dp_link_enc(struct resource_context * res_ctx,unsigned int link_index,int enc_index)2632 static inline void acquire_hpo_dp_link_enc(
2633 struct resource_context *res_ctx,
2634 unsigned int link_index,
2635 int enc_index)
2636 {
2637 res_ctx->hpo_dp_link_enc_to_link_idx[enc_index] = link_index;
2638 res_ctx->hpo_dp_link_enc_ref_cnts[enc_index] = 1;
2639 }
2640
retain_hpo_dp_link_enc(struct resource_context * res_ctx,int enc_index)2641 static inline void retain_hpo_dp_link_enc(
2642 struct resource_context *res_ctx,
2643 int enc_index)
2644 {
2645 res_ctx->hpo_dp_link_enc_ref_cnts[enc_index]++;
2646 }
2647
release_hpo_dp_link_enc(struct resource_context * res_ctx,int enc_index)2648 static inline void release_hpo_dp_link_enc(
2649 struct resource_context *res_ctx,
2650 int enc_index)
2651 {
2652 ASSERT(res_ctx->hpo_dp_link_enc_ref_cnts[enc_index] > 0);
2653 res_ctx->hpo_dp_link_enc_ref_cnts[enc_index]--;
2654 }
2655
add_hpo_dp_link_enc_to_ctx(struct resource_context * res_ctx,const struct resource_pool * pool,struct pipe_ctx * pipe_ctx,struct dc_stream_state * stream)2656 static bool add_hpo_dp_link_enc_to_ctx(struct resource_context *res_ctx,
2657 const struct resource_pool *pool,
2658 struct pipe_ctx *pipe_ctx,
2659 struct dc_stream_state *stream)
2660 {
2661 int enc_index;
2662
2663 enc_index = find_acquired_hpo_dp_link_enc_for_link(res_ctx, stream->link);
2664
2665 if (enc_index >= 0) {
2666 retain_hpo_dp_link_enc(res_ctx, enc_index);
2667 } else {
2668 enc_index = find_free_hpo_dp_link_enc(res_ctx, pool);
2669 if (enc_index >= 0)
2670 acquire_hpo_dp_link_enc(res_ctx, stream->link->link_index, enc_index);
2671 }
2672
2673 if (enc_index >= 0)
2674 pipe_ctx->link_res.hpo_dp_link_enc = pool->hpo_dp_link_enc[enc_index];
2675
2676 return pipe_ctx->link_res.hpo_dp_link_enc != NULL;
2677 }
2678
remove_hpo_dp_link_enc_from_ctx(struct resource_context * res_ctx,struct pipe_ctx * pipe_ctx,struct dc_stream_state * stream)2679 static void remove_hpo_dp_link_enc_from_ctx(struct resource_context *res_ctx,
2680 struct pipe_ctx *pipe_ctx,
2681 struct dc_stream_state *stream)
2682 {
2683 int enc_index;
2684
2685 enc_index = find_acquired_hpo_dp_link_enc_for_link(res_ctx, stream->link);
2686
2687 if (enc_index >= 0) {
2688 release_hpo_dp_link_enc(res_ctx, enc_index);
2689 pipe_ctx->link_res.hpo_dp_link_enc = NULL;
2690 }
2691 }
2692
find_acquired_dio_link_enc_for_link(const struct resource_context * res_ctx,const struct dc_link * link)2693 static inline int find_acquired_dio_link_enc_for_link(
2694 const struct resource_context *res_ctx,
2695 const struct dc_link *link)
2696 {
2697 int i;
2698
2699 for (i = 0; i < ARRAY_SIZE(res_ctx->dio_link_enc_ref_cnts); i++)
2700 if (res_ctx->dio_link_enc_ref_cnts[i] > 0 &&
2701 res_ctx->dio_link_enc_to_link_idx[i] == link->link_index)
2702 return i;
2703
2704 return -1;
2705 }
2706
find_fixed_dio_link_enc(const struct dc_link * link)2707 static inline int find_fixed_dio_link_enc(const struct dc_link *link)
2708 {
2709 /* the 8b10b dp phy can only use fixed link encoder */
2710 return link->eng_id;
2711 }
2712
find_free_dio_link_enc(const struct resource_context * res_ctx,const struct dc_link * link,const struct resource_pool * pool)2713 static inline int find_free_dio_link_enc(const struct resource_context *res_ctx,
2714 const struct dc_link *link, const struct resource_pool *pool)
2715 {
2716 int i;
2717 int enc_count = pool->dig_link_enc_count;
2718
2719 /* for dpia, check preferred encoder first and then the next one */
2720 for (i = 0; i < enc_count; i++)
2721 if (res_ctx->dio_link_enc_ref_cnts[(link->dpia_preferred_eng_id + i) % enc_count] == 0)
2722 break;
2723
2724 return (i >= 0 && i < enc_count) ? (link->dpia_preferred_eng_id + i) % enc_count : -1;
2725 }
2726
acquire_dio_link_enc(struct resource_context * res_ctx,unsigned int link_index,int enc_index)2727 static inline void acquire_dio_link_enc(
2728 struct resource_context *res_ctx,
2729 unsigned int link_index,
2730 int enc_index)
2731 {
2732 res_ctx->dio_link_enc_to_link_idx[enc_index] = link_index;
2733 res_ctx->dio_link_enc_ref_cnts[enc_index] = 1;
2734 }
2735
retain_dio_link_enc(struct resource_context * res_ctx,int enc_index)2736 static inline void retain_dio_link_enc(
2737 struct resource_context *res_ctx,
2738 int enc_index)
2739 {
2740 res_ctx->dio_link_enc_ref_cnts[enc_index]++;
2741 }
2742
release_dio_link_enc(struct resource_context * res_ctx,int enc_index)2743 static inline void release_dio_link_enc(
2744 struct resource_context *res_ctx,
2745 int enc_index)
2746 {
2747 ASSERT(res_ctx->dio_link_enc_ref_cnts[enc_index] > 0);
2748 res_ctx->dio_link_enc_ref_cnts[enc_index]--;
2749 }
2750
is_dio_enc_acquired_by_other_link(const struct dc_link * link,int enc_index,int * link_index)2751 static bool is_dio_enc_acquired_by_other_link(const struct dc_link *link,
2752 int enc_index,
2753 int *link_index)
2754 {
2755 const struct dc *dc = link->dc;
2756 const struct resource_context *res_ctx = &dc->current_state->res_ctx;
2757
2758 /* pass the link_index that acquired the enc_index */
2759 if (res_ctx->dio_link_enc_ref_cnts[enc_index] > 0 &&
2760 res_ctx->dio_link_enc_to_link_idx[enc_index] != link->link_index) {
2761 *link_index = res_ctx->dio_link_enc_to_link_idx[enc_index];
2762 return true;
2763 }
2764
2765 return false;
2766 }
2767
swap_dio_link_enc_to_muxable_ctx(struct dc_state * context,const struct resource_pool * pool,int new_encoder,int old_encoder)2768 static void swap_dio_link_enc_to_muxable_ctx(struct dc_state *context,
2769 const struct resource_pool *pool,
2770 int new_encoder,
2771 int old_encoder)
2772 {
2773 struct resource_context *res_ctx = &context->res_ctx;
2774 int stream_count = context->stream_count;
2775 int i = 0;
2776
2777 res_ctx->dio_link_enc_ref_cnts[new_encoder] = res_ctx->dio_link_enc_ref_cnts[old_encoder];
2778 res_ctx->dio_link_enc_to_link_idx[new_encoder] = res_ctx->dio_link_enc_to_link_idx[old_encoder];
2779 res_ctx->dio_link_enc_ref_cnts[old_encoder] = 0;
2780
2781 for (i = 0; i < stream_count; i++) {
2782 struct dc_stream_state *stream = context->streams[i];
2783 struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
2784
2785 if (pipe_ctx && pipe_ctx->link_res.dio_link_enc == pool->link_encoders[old_encoder])
2786 pipe_ctx->link_res.dio_link_enc = pool->link_encoders[new_encoder];
2787 }
2788 }
2789
add_dio_link_enc_to_ctx(const struct dc * dc,struct dc_state * context,const struct resource_pool * pool,struct pipe_ctx * pipe_ctx,struct dc_stream_state * stream)2790 static bool add_dio_link_enc_to_ctx(const struct dc *dc,
2791 struct dc_state *context,
2792 const struct resource_pool *pool,
2793 struct pipe_ctx *pipe_ctx,
2794 struct dc_stream_state *stream)
2795 {
2796 struct resource_context *res_ctx = &context->res_ctx;
2797 int enc_index;
2798
2799 enc_index = find_acquired_dio_link_enc_for_link(res_ctx, stream->link);
2800
2801 if (enc_index >= 0) {
2802 retain_dio_link_enc(res_ctx, enc_index);
2803 } else {
2804 if (stream->link->is_dig_mapping_flexible)
2805 enc_index = find_free_dio_link_enc(res_ctx, stream->link, pool);
2806 else {
2807 int link_index = 0;
2808
2809 enc_index = find_fixed_dio_link_enc(stream->link);
2810 /* Fixed mapping link can only use its fixed link encoder.
2811 * If the encoder is acquired by other link then get a new free encoder and swap the new
2812 * one into the acquiring link.
2813 */
2814 if (enc_index >= 0 && is_dio_enc_acquired_by_other_link(stream->link, enc_index, &link_index)) {
2815 int new_enc_index = find_free_dio_link_enc(res_ctx, dc->links[link_index], pool);
2816
2817 if (new_enc_index >= 0)
2818 swap_dio_link_enc_to_muxable_ctx(context, pool, new_enc_index, enc_index);
2819 else
2820 return false;
2821 }
2822 }
2823
2824 if (enc_index >= 0)
2825 acquire_dio_link_enc(res_ctx, stream->link->link_index, enc_index);
2826 }
2827
2828 if (enc_index >= 0)
2829 pipe_ctx->link_res.dio_link_enc = pool->link_encoders[enc_index];
2830
2831 return pipe_ctx->link_res.dio_link_enc != NULL;
2832 }
2833
remove_dio_link_enc_from_ctx(struct resource_context * res_ctx,struct pipe_ctx * pipe_ctx,struct dc_stream_state * stream)2834 static void remove_dio_link_enc_from_ctx(struct resource_context *res_ctx,
2835 struct pipe_ctx *pipe_ctx,
2836 struct dc_stream_state *stream)
2837 {
2838 int enc_index = -1;
2839
2840 if (stream->link)
2841 enc_index = find_acquired_dio_link_enc_for_link(res_ctx, stream->link);
2842
2843 if (enc_index >= 0) {
2844 release_dio_link_enc(res_ctx, enc_index);
2845 pipe_ctx->link_res.dio_link_enc = NULL;
2846 }
2847 }
2848
get_num_of_free_pipes(const struct resource_pool * pool,const struct dc_state * context)2849 static int get_num_of_free_pipes(const struct resource_pool *pool, const struct dc_state *context)
2850 {
2851 int i;
2852 int count = 0;
2853
2854 for (i = 0; i < pool->pipe_count; i++)
2855 if (resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], FREE_PIPE))
2856 count++;
2857 return count;
2858 }
2859
resource_add_otg_master_for_stream_output(struct dc_state * new_ctx,const struct resource_pool * pool,struct dc_stream_state * stream)2860 enum dc_status resource_add_otg_master_for_stream_output(struct dc_state *new_ctx,
2861 const struct resource_pool *pool,
2862 struct dc_stream_state *stream)
2863 {
2864 struct dc *dc = stream->ctx->dc;
2865
2866 return dc->res_pool->funcs->add_stream_to_ctx(dc, new_ctx, stream);
2867 }
2868
resource_remove_otg_master_for_stream_output(struct dc_state * context,const struct resource_pool * pool,struct dc_stream_state * stream)2869 void resource_remove_otg_master_for_stream_output(struct dc_state *context,
2870 const struct resource_pool *pool,
2871 struct dc_stream_state *stream)
2872 {
2873 struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(
2874 &context->res_ctx, stream);
2875
2876 if (!otg_master)
2877 return;
2878
2879 ASSERT(resource_get_odm_slice_count(otg_master) == 1);
2880 ASSERT(otg_master->plane_state == NULL);
2881 ASSERT(otg_master->stream_res.stream_enc);
2882 update_stream_engine_usage(
2883 &context->res_ctx,
2884 pool,
2885 otg_master->stream_res.stream_enc,
2886 false);
2887
2888 if (stream->ctx->dc->link_srv->dp_is_128b_132b_signal(otg_master)) {
2889 update_hpo_dp_stream_engine_usage(
2890 &context->res_ctx, pool,
2891 otg_master->stream_res.hpo_dp_stream_enc,
2892 false);
2893 remove_hpo_dp_link_enc_from_ctx(
2894 &context->res_ctx, otg_master, stream);
2895 }
2896
2897 if (stream->ctx->dc->config.unify_link_enc_assignment)
2898 remove_dio_link_enc_from_ctx(&context->res_ctx, otg_master, stream);
2899
2900 if (otg_master->stream_res.audio)
2901 update_audio_usage(
2902 &context->res_ctx,
2903 pool,
2904 otg_master->stream_res.audio,
2905 false);
2906
2907 resource_unreference_clock_source(&context->res_ctx,
2908 pool,
2909 otg_master->clock_source);
2910
2911 if (pool->funcs->remove_stream_from_ctx)
2912 pool->funcs->remove_stream_from_ctx(
2913 stream->ctx->dc, context, stream);
2914
2915 memset(otg_master, 0, sizeof(*otg_master));
2916 }
2917
2918 /* For each OPP head of an OTG master, add top plane at plane index 0.
2919 *
2920 * In the following example, the stream has 2 ODM slices without a top plane.
2921 * By adding a plane 0 to OPP heads, we are configuring our hardware to render
2922 * plane 0 by using each OPP head's DPP.
2923 *
2924 * Inter-pipe Relation (Before Adding Plane)
2925 * __________________________________________________
2926 * |PIPE IDX| DPP PIPES | OPP HEADS | OTG MASTER |
2927 * | | | slice 0 | |
2928 * | 0 | |blank ----ODM----------- |
2929 * | | | slice 1 | | |
2930 * | 1 | |blank ---- | |
2931 * |________|_______________|___________|_____________|
2932 *
2933 * Inter-pipe Relation (After Adding Plane)
2934 * __________________________________________________
2935 * |PIPE IDX| DPP PIPES | OPP HEADS | OTG MASTER |
2936 * | | plane 0 | slice 0 | |
2937 * | 0 | -------------------------ODM----------- |
2938 * | | plane 0 | slice 1 | | |
2939 * | 1 | ------------------------- | |
2940 * |________|_______________|___________|_____________|
2941 */
add_plane_to_opp_head_pipes(struct pipe_ctx * otg_master_pipe,struct dc_plane_state * plane_state,struct dc_state * context)2942 static bool add_plane_to_opp_head_pipes(struct pipe_ctx *otg_master_pipe,
2943 struct dc_plane_state *plane_state,
2944 struct dc_state *context)
2945 {
2946 struct pipe_ctx *opp_head_pipe = otg_master_pipe;
2947
2948 while (opp_head_pipe) {
2949 if (opp_head_pipe->plane_state) {
2950 ASSERT(0);
2951 return false;
2952 }
2953 opp_head_pipe->plane_state = plane_state;
2954 opp_head_pipe = opp_head_pipe->next_odm_pipe;
2955 }
2956
2957 return true;
2958 }
2959
2960 /* For each OPP head of an OTG master, acquire a secondary DPP pipe and add
2961 * the plane. So the plane is added to all ODM slices associated with the OTG
2962 * master pipe in the bottom layer.
2963 *
2964 * In the following example, the stream has 2 ODM slices and a top plane 0.
2965 * By acquiring secondary DPP pipes and adding a plane 1, we are configuring our
2966 * hardware to render the plane 1 by acquiring a new pipe for each ODM slice and
2967 * render plane 1 using new pipes' DPP in the Z axis below plane 0.
2968 *
2969 * Inter-pipe Relation (Before Adding Plane)
2970 * __________________________________________________
2971 * |PIPE IDX| DPP PIPES | OPP HEADS | OTG MASTER |
2972 * | | plane 0 | slice 0 | |
2973 * | 0 | -------------------------ODM----------- |
2974 * | | plane 0 | slice 1 | | |
2975 * | 1 | ------------------------- | |
2976 * |________|_______________|___________|_____________|
2977 *
2978 * Inter-pipe Relation (After Acquiring and Adding Plane)
2979 * __________________________________________________
2980 * |PIPE IDX| DPP PIPES | OPP HEADS | OTG MASTER |
2981 * | | plane 0 | slice 0 | |
2982 * | 0 | -------------MPC---------ODM----------- |
2983 * | | plane 1 | | | | |
2984 * | 2 | ------------- | | | |
2985 * | | plane 0 | slice 1 | | |
2986 * | 1 | -------------MPC--------- | |
2987 * | | plane 1 | | | |
2988 * | 3 | ------------- | | |
2989 * |________|_______________|___________|_____________|
2990 */
acquire_secondary_dpp_pipes_and_add_plane(struct pipe_ctx * otg_master_pipe,struct dc_plane_state * plane_state,struct dc_state * new_ctx,struct dc_state * cur_ctx,struct resource_pool * pool)2991 static bool acquire_secondary_dpp_pipes_and_add_plane(
2992 struct pipe_ctx *otg_master_pipe,
2993 struct dc_plane_state *plane_state,
2994 struct dc_state *new_ctx,
2995 struct dc_state *cur_ctx,
2996 struct resource_pool *pool)
2997 {
2998 struct pipe_ctx *sec_pipe, *tail_pipe;
2999 struct pipe_ctx *opp_heads[MAX_PIPES];
3000 int opp_head_count;
3001 int i;
3002
3003 if (!pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe) {
3004 ASSERT(0);
3005 return false;
3006 }
3007
3008 opp_head_count = resource_get_opp_heads_for_otg_master(otg_master_pipe,
3009 &new_ctx->res_ctx, opp_heads);
3010 if (get_num_of_free_pipes(pool, new_ctx) < opp_head_count)
3011 /* not enough free pipes */
3012 return false;
3013
3014 for (i = 0; i < opp_head_count; i++) {
3015 sec_pipe = pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe(
3016 cur_ctx,
3017 new_ctx,
3018 pool,
3019 opp_heads[i]);
3020 ASSERT(sec_pipe);
3021 sec_pipe->plane_state = plane_state;
3022
3023 /* establish pipe relationship */
3024 tail_pipe = get_tail_pipe(opp_heads[i]);
3025 tail_pipe->bottom_pipe = sec_pipe;
3026 sec_pipe->top_pipe = tail_pipe;
3027 sec_pipe->bottom_pipe = NULL;
3028 if (tail_pipe->prev_odm_pipe) {
3029 ASSERT(tail_pipe->prev_odm_pipe->bottom_pipe);
3030 sec_pipe->prev_odm_pipe = tail_pipe->prev_odm_pipe->bottom_pipe;
3031 tail_pipe->prev_odm_pipe->bottom_pipe->next_odm_pipe = sec_pipe;
3032 } else {
3033 sec_pipe->prev_odm_pipe = NULL;
3034 }
3035 }
3036 return true;
3037 }
3038
resource_append_dpp_pipes_for_plane_composition(struct dc_state * new_ctx,struct dc_state * cur_ctx,struct resource_pool * pool,struct pipe_ctx * otg_master_pipe,struct dc_plane_state * plane_state)3039 bool resource_append_dpp_pipes_for_plane_composition(
3040 struct dc_state *new_ctx,
3041 struct dc_state *cur_ctx,
3042 struct resource_pool *pool,
3043 struct pipe_ctx *otg_master_pipe,
3044 struct dc_plane_state *plane_state)
3045 {
3046 bool success;
3047
3048 if (otg_master_pipe->plane_state == NULL)
3049 success = add_plane_to_opp_head_pipes(otg_master_pipe,
3050 plane_state, new_ctx);
3051 else
3052 success = acquire_secondary_dpp_pipes_and_add_plane(
3053 otg_master_pipe, plane_state, new_ctx,
3054 cur_ctx, pool);
3055 if (success) {
3056 /* when appending a plane mpc slice count changes from 0 to 1 */
3057 success = update_pipe_params_after_mpc_slice_count_change(
3058 plane_state, new_ctx, pool);
3059 if (!success)
3060 resource_remove_dpp_pipes_for_plane_composition(new_ctx,
3061 pool, plane_state);
3062 }
3063
3064 return success;
3065 }
3066
resource_remove_dpp_pipes_for_plane_composition(struct dc_state * context,const struct resource_pool * pool,const struct dc_plane_state * plane_state)3067 void resource_remove_dpp_pipes_for_plane_composition(
3068 struct dc_state *context,
3069 const struct resource_pool *pool,
3070 const struct dc_plane_state *plane_state)
3071 {
3072 int i;
3073
3074 for (i = pool->pipe_count - 1; i >= 0; i--) {
3075 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3076
3077 if (pipe_ctx->plane_state == plane_state) {
3078 if (pipe_ctx->top_pipe)
3079 pipe_ctx->top_pipe->bottom_pipe = pipe_ctx->bottom_pipe;
3080
3081 /* Second condition is to avoid setting NULL to top pipe
3082 * of tail pipe making it look like head pipe in subsequent
3083 * deletes
3084 */
3085 if (pipe_ctx->bottom_pipe && pipe_ctx->top_pipe)
3086 pipe_ctx->bottom_pipe->top_pipe = pipe_ctx->top_pipe;
3087
3088 /*
3089 * For head pipe detach surfaces from pipe for tail
3090 * pipe just zero it out
3091 */
3092 if (!pipe_ctx->top_pipe)
3093 pipe_ctx->plane_state = NULL;
3094 else
3095 memset(pipe_ctx, 0, sizeof(*pipe_ctx));
3096 }
3097 }
3098 }
3099
3100 /*
3101 * Increase ODM slice count by 1 by acquiring pipes and adding a new ODM slice
3102 * at the last index.
3103 * return - true if a new ODM slice is added and required pipes are acquired.
3104 * false if new_ctx is no longer a valid state after new ODM slice is added.
3105 *
3106 * This is achieved by duplicating MPC blending tree from previous ODM slice.
3107 * In the following example, we have a single MPC tree and 1 ODM slice 0. We
3108 * want to add a new odm slice by duplicating the MPC blending tree and add
3109 * ODM slice 1.
3110 *
3111 * Inter-pipe Relation (Before Acquiring and Adding ODM Slice)
3112 * __________________________________________________
3113 * |PIPE IDX| DPP PIPES | OPP HEADS | OTG MASTER |
3114 * | | plane 0 | slice 0 | |
3115 * | 0 | -------------MPC---------ODM----------- |
3116 * | | plane 1 | | | |
3117 * | 1 | ------------- | | |
3118 * |________|_______________|___________|_____________|
3119 *
3120 * Inter-pipe Relation (After Acquiring and Adding ODM Slice)
3121 * __________________________________________________
3122 * |PIPE IDX| DPP PIPES | OPP HEADS | OTG MASTER |
3123 * | | plane 0 | slice 0 | |
3124 * | 0 | -------------MPC---------ODM----------- |
3125 * | | plane 1 | | | | |
3126 * | 1 | ------------- | | | |
3127 * | | plane 0 | slice 1 | | |
3128 * | 2 | -------------MPC--------- | |
3129 * | | plane 1 | | | |
3130 * | 3 | ------------- | | |
3131 * |________|_______________|___________|_____________|
3132 */
acquire_pipes_and_add_odm_slice(struct pipe_ctx * otg_master_pipe,struct dc_state * new_ctx,const struct dc_state * cur_ctx,const struct resource_pool * pool)3133 static bool acquire_pipes_and_add_odm_slice(
3134 struct pipe_ctx *otg_master_pipe,
3135 struct dc_state *new_ctx,
3136 const struct dc_state *cur_ctx,
3137 const struct resource_pool *pool)
3138 {
3139 struct pipe_ctx *last_opp_head = get_last_opp_head(otg_master_pipe);
3140 struct pipe_ctx *new_opp_head;
3141 struct pipe_ctx *last_top_dpp_pipe, *last_bottom_dpp_pipe,
3142 *new_top_dpp_pipe, *new_bottom_dpp_pipe;
3143
3144 if (!pool->funcs->acquire_free_pipe_as_secondary_opp_head) {
3145 ASSERT(0);
3146 return false;
3147 }
3148 new_opp_head = pool->funcs->acquire_free_pipe_as_secondary_opp_head(
3149 cur_ctx, new_ctx, pool,
3150 otg_master_pipe);
3151 if (!new_opp_head)
3152 return false;
3153
3154 last_opp_head->next_odm_pipe = new_opp_head;
3155 new_opp_head->prev_odm_pipe = last_opp_head;
3156 new_opp_head->next_odm_pipe = NULL;
3157 new_opp_head->plane_state = last_opp_head->plane_state;
3158 last_top_dpp_pipe = last_opp_head;
3159 new_top_dpp_pipe = new_opp_head;
3160
3161 while (last_top_dpp_pipe->bottom_pipe) {
3162 last_bottom_dpp_pipe = last_top_dpp_pipe->bottom_pipe;
3163 new_bottom_dpp_pipe = pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe(
3164 cur_ctx, new_ctx, pool,
3165 new_opp_head);
3166 if (!new_bottom_dpp_pipe)
3167 return false;
3168
3169 new_bottom_dpp_pipe->plane_state = last_bottom_dpp_pipe->plane_state;
3170 new_top_dpp_pipe->bottom_pipe = new_bottom_dpp_pipe;
3171 new_bottom_dpp_pipe->top_pipe = new_top_dpp_pipe;
3172 last_bottom_dpp_pipe->next_odm_pipe = new_bottom_dpp_pipe;
3173 new_bottom_dpp_pipe->prev_odm_pipe = last_bottom_dpp_pipe;
3174 new_bottom_dpp_pipe->next_odm_pipe = NULL;
3175 last_top_dpp_pipe = last_bottom_dpp_pipe;
3176 }
3177
3178 return true;
3179 }
3180
3181 /*
3182 * Decrease ODM slice count by 1 by releasing pipes and removing the ODM slice
3183 * at the last index.
3184 * return - true if the last ODM slice is removed and related pipes are
3185 * released. false if there is no removable ODM slice.
3186 *
3187 * In the following example, we have 2 MPC trees and ODM slice 0 and slice 1.
3188 * We want to remove the last ODM i.e slice 1. We are releasing secondary DPP
3189 * pipe 3 and OPP head pipe 2.
3190 *
3191 * Inter-pipe Relation (Before Releasing and Removing ODM Slice)
3192 * __________________________________________________
3193 * |PIPE IDX| DPP PIPES | OPP HEADS | OTG MASTER |
3194 * | | plane 0 | slice 0 | |
3195 * | 0 | -------------MPC---------ODM----------- |
3196 * | | plane 1 | | | | |
3197 * | 1 | ------------- | | | |
3198 * | | plane 0 | slice 1 | | |
3199 * | 2 | -------------MPC--------- | |
3200 * | | plane 1 | | | |
3201 * | 3 | ------------- | | |
3202 * |________|_______________|___________|_____________|
3203 *
3204 * Inter-pipe Relation (After Releasing and Removing ODM Slice)
3205 * __________________________________________________
3206 * |PIPE IDX| DPP PIPES | OPP HEADS | OTG MASTER |
3207 * | | plane 0 | slice 0 | |
3208 * | 0 | -------------MPC---------ODM----------- |
3209 * | | plane 1 | | | |
3210 * | 1 | ------------- | | |
3211 * |________|_______________|___________|_____________|
3212 */
release_pipes_and_remove_odm_slice(struct pipe_ctx * otg_master_pipe,struct dc_state * context,const struct resource_pool * pool)3213 static bool release_pipes_and_remove_odm_slice(
3214 struct pipe_ctx *otg_master_pipe,
3215 struct dc_state *context,
3216 const struct resource_pool *pool)
3217 {
3218 struct pipe_ctx *last_opp_head = get_last_opp_head(otg_master_pipe);
3219 struct pipe_ctx *tail_pipe = get_tail_pipe(last_opp_head);
3220
3221 if (!pool->funcs->release_pipe) {
3222 ASSERT(0);
3223 return false;
3224 }
3225
3226 if (resource_is_pipe_type(last_opp_head, OTG_MASTER))
3227 return false;
3228
3229 while (tail_pipe->top_pipe) {
3230 tail_pipe->prev_odm_pipe->next_odm_pipe = NULL;
3231 tail_pipe = tail_pipe->top_pipe;
3232 pool->funcs->release_pipe(context, tail_pipe->bottom_pipe, pool);
3233 tail_pipe->bottom_pipe = NULL;
3234 }
3235 last_opp_head->prev_odm_pipe->next_odm_pipe = NULL;
3236 pool->funcs->release_pipe(context, last_opp_head, pool);
3237
3238 return true;
3239 }
3240
3241 /*
3242 * Increase MPC slice count by 1 by acquiring a new DPP pipe and add it as the
3243 * last MPC slice of the plane associated with dpp_pipe.
3244 *
3245 * return - true if a new MPC slice is added and required pipes are acquired.
3246 * false if new_ctx is no longer a valid state after new MPC slice is added.
3247 *
3248 * In the following example, we add a new MPC slice for plane 0 into the
3249 * new_ctx. To do so we pass pipe 0 as dpp_pipe. The function acquires a new DPP
3250 * pipe 2 for plane 0 as the bottom most pipe for plane 0.
3251 *
3252 * Inter-pipe Relation (Before Acquiring and Adding MPC Slice)
3253 * __________________________________________________
3254 * |PIPE IDX| DPP PIPES | OPP HEADS | OTG MASTER |
3255 * | | plane 0 | | |
3256 * | 0 | -------------MPC----------------------- |
3257 * | | plane 1 | | | |
3258 * | 1 | ------------- | | |
3259 * |________|_______________|___________|_____________|
3260 *
3261 * Inter-pipe Relation (After Acquiring and Adding MPC Slice)
3262 * __________________________________________________
3263 * |PIPE IDX| DPP PIPES | OPP HEADS | OTG MASTER |
3264 * | | plane 0 | | |
3265 * | 0 | -------------MPC----------------------- |
3266 * | | plane 0 | | | |
3267 * | 2 | ------------- | | |
3268 * | | plane 1 | | | |
3269 * | 1 | ------------- | | |
3270 * |________|_______________|___________|_____________|
3271 */
acquire_dpp_pipe_and_add_mpc_slice(struct pipe_ctx * dpp_pipe,struct dc_state * new_ctx,const struct dc_state * cur_ctx,const struct resource_pool * pool)3272 static bool acquire_dpp_pipe_and_add_mpc_slice(
3273 struct pipe_ctx *dpp_pipe,
3274 struct dc_state *new_ctx,
3275 const struct dc_state *cur_ctx,
3276 const struct resource_pool *pool)
3277 {
3278 struct pipe_ctx *last_dpp_pipe =
3279 get_last_dpp_pipe_in_mpcc_combine(dpp_pipe);
3280 struct pipe_ctx *opp_head = resource_get_opp_head(dpp_pipe);
3281 struct pipe_ctx *new_dpp_pipe;
3282
3283 if (!pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe) {
3284 ASSERT(0);
3285 return false;
3286 }
3287 new_dpp_pipe = pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe(
3288 cur_ctx, new_ctx, pool, opp_head);
3289 if (!new_dpp_pipe || resource_get_odm_slice_count(dpp_pipe) > 1)
3290 return false;
3291
3292 new_dpp_pipe->bottom_pipe = last_dpp_pipe->bottom_pipe;
3293 if (new_dpp_pipe->bottom_pipe)
3294 new_dpp_pipe->bottom_pipe->top_pipe = new_dpp_pipe;
3295 new_dpp_pipe->top_pipe = last_dpp_pipe;
3296 last_dpp_pipe->bottom_pipe = new_dpp_pipe;
3297 new_dpp_pipe->plane_state = last_dpp_pipe->plane_state;
3298
3299 return true;
3300 }
3301
3302 /*
3303 * Reduce MPC slice count by 1 by releasing the bottom DPP pipe in MPCC combine
3304 * with dpp_pipe and removing last MPC slice of the plane associated with
3305 * dpp_pipe.
3306 *
3307 * return - true if the last MPC slice of the plane associated with dpp_pipe is
3308 * removed and last DPP pipe in MPCC combine with dpp_pipe is released.
3309 * false if there is no removable MPC slice.
3310 *
3311 * In the following example, we remove an MPC slice for plane 0 from the
3312 * context. To do so we pass pipe 0 as dpp_pipe. The function releases pipe 1 as
3313 * it is the last pipe for plane 0.
3314 *
3315 * Inter-pipe Relation (Before Releasing and Removing MPC Slice)
3316 * __________________________________________________
3317 * |PIPE IDX| DPP PIPES | OPP HEADS | OTG MASTER |
3318 * | | plane 0 | | |
3319 * | 0 | -------------MPC----------------------- |
3320 * | | plane 0 | | | |
3321 * | 1 | ------------- | | |
3322 * | | plane 1 | | | |
3323 * | 2 | ------------- | | |
3324 * |________|_______________|___________|_____________|
3325 *
3326 * Inter-pipe Relation (After Releasing and Removing MPC Slice)
3327 * __________________________________________________
3328 * |PIPE IDX| DPP PIPES | OPP HEADS | OTG MASTER |
3329 * | | plane 0 | | |
3330 * | 0 | -------------MPC----------------------- |
3331 * | | plane 1 | | | |
3332 * | 2 | ------------- | | |
3333 * |________|_______________|___________|_____________|
3334 */
release_dpp_pipe_and_remove_mpc_slice(struct pipe_ctx * dpp_pipe,struct dc_state * context,const struct resource_pool * pool)3335 static bool release_dpp_pipe_and_remove_mpc_slice(
3336 struct pipe_ctx *dpp_pipe,
3337 struct dc_state *context,
3338 const struct resource_pool *pool)
3339 {
3340 struct pipe_ctx *last_dpp_pipe =
3341 get_last_dpp_pipe_in_mpcc_combine(dpp_pipe);
3342
3343 if (!pool->funcs->release_pipe) {
3344 ASSERT(0);
3345 return false;
3346 }
3347
3348 if (resource_is_pipe_type(last_dpp_pipe, OPP_HEAD) ||
3349 resource_get_odm_slice_count(dpp_pipe) > 1)
3350 return false;
3351
3352 last_dpp_pipe->top_pipe->bottom_pipe = last_dpp_pipe->bottom_pipe;
3353 if (last_dpp_pipe->bottom_pipe)
3354 last_dpp_pipe->bottom_pipe->top_pipe = last_dpp_pipe->top_pipe;
3355 pool->funcs->release_pipe(context, last_dpp_pipe, pool);
3356
3357 return true;
3358 }
3359
resource_update_pipes_for_stream_with_slice_count(struct dc_state * new_ctx,const struct dc_state * cur_ctx,const struct resource_pool * pool,const struct dc_stream_state * stream,int new_slice_count)3360 bool resource_update_pipes_for_stream_with_slice_count(
3361 struct dc_state *new_ctx,
3362 const struct dc_state *cur_ctx,
3363 const struct resource_pool *pool,
3364 const struct dc_stream_state *stream,
3365 int new_slice_count)
3366 {
3367 int i;
3368 struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(
3369 &new_ctx->res_ctx, stream);
3370 int cur_slice_count;
3371 bool result = true;
3372
3373 if (!otg_master)
3374 return false;
3375
3376 cur_slice_count = resource_get_odm_slice_count(otg_master);
3377
3378 if (new_slice_count == cur_slice_count)
3379 return result;
3380
3381 if (new_slice_count > cur_slice_count)
3382 for (i = 0; i < new_slice_count - cur_slice_count && result; i++)
3383 result = acquire_pipes_and_add_odm_slice(
3384 otg_master, new_ctx, cur_ctx, pool);
3385 else
3386 for (i = 0; i < cur_slice_count - new_slice_count && result; i++)
3387 result = release_pipes_and_remove_odm_slice(
3388 otg_master, new_ctx, pool);
3389 if (result)
3390 result = update_pipe_params_after_odm_slice_count_change(
3391 otg_master, new_ctx, pool);
3392 return result;
3393 }
3394
resource_update_pipes_for_plane_with_slice_count(struct dc_state * new_ctx,const struct dc_state * cur_ctx,const struct resource_pool * pool,const struct dc_plane_state * plane,int new_slice_count)3395 bool resource_update_pipes_for_plane_with_slice_count(
3396 struct dc_state *new_ctx,
3397 const struct dc_state *cur_ctx,
3398 const struct resource_pool *pool,
3399 const struct dc_plane_state *plane,
3400 int new_slice_count)
3401 {
3402 int i;
3403 int dpp_pipe_count;
3404 int cur_slice_count;
3405 struct pipe_ctx *dpp_pipes[MAX_PIPES] = {0};
3406 bool result = true;
3407
3408 dpp_pipe_count = resource_get_dpp_pipes_for_plane(plane,
3409 &new_ctx->res_ctx, dpp_pipes);
3410 ASSERT(dpp_pipe_count > 0);
3411 cur_slice_count = resource_get_mpc_slice_count(dpp_pipes[0]);
3412
3413 if (new_slice_count == cur_slice_count)
3414 return result;
3415
3416 if (new_slice_count > cur_slice_count)
3417 for (i = 0; i < new_slice_count - cur_slice_count && result; i++)
3418 result = acquire_dpp_pipe_and_add_mpc_slice(
3419 dpp_pipes[0], new_ctx, cur_ctx, pool);
3420 else
3421 for (i = 0; i < cur_slice_count - new_slice_count && result; i++)
3422 result = release_dpp_pipe_and_remove_mpc_slice(
3423 dpp_pipes[0], new_ctx, pool);
3424 if (result)
3425 result = update_pipe_params_after_mpc_slice_count_change(
3426 dpp_pipes[0]->plane_state, new_ctx, pool);
3427 return result;
3428 }
3429
dc_is_timing_changed(struct dc_stream_state * cur_stream,struct dc_stream_state * new_stream)3430 bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
3431 struct dc_stream_state *new_stream)
3432 {
3433 if (cur_stream == NULL)
3434 return true;
3435
3436 /* If output color space is changed, need to reprogram info frames */
3437 if (cur_stream->output_color_space != new_stream->output_color_space)
3438 return true;
3439
3440 return memcmp(
3441 &cur_stream->timing,
3442 &new_stream->timing,
3443 sizeof(struct dc_crtc_timing)) != 0;
3444 }
3445
are_stream_backends_same(struct dc_stream_state * stream_a,struct dc_stream_state * stream_b)3446 static bool are_stream_backends_same(
3447 struct dc_stream_state *stream_a, struct dc_stream_state *stream_b)
3448 {
3449 if (stream_a == stream_b)
3450 return true;
3451
3452 if (stream_a == NULL || stream_b == NULL)
3453 return false;
3454
3455 if (dc_is_timing_changed(stream_a, stream_b))
3456 return false;
3457
3458 if (stream_a->signal != stream_b->signal)
3459 return false;
3460
3461 if (stream_a->dpms_off != stream_b->dpms_off)
3462 return false;
3463
3464 return true;
3465 }
3466
3467 /*
3468 * dc_is_stream_unchanged() - Compare two stream states for equivalence.
3469 *
3470 * Checks if there a difference between the two states
3471 * that would require a mode change.
3472 *
3473 * Does not compare cursor position or attributes.
3474 */
dc_is_stream_unchanged(struct dc_stream_state * old_stream,struct dc_stream_state * stream)3475 bool dc_is_stream_unchanged(
3476 struct dc_stream_state *old_stream, struct dc_stream_state *stream)
3477 {
3478 if (!old_stream || !stream)
3479 return false;
3480
3481 if (!are_stream_backends_same(old_stream, stream))
3482 return false;
3483
3484 if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
3485 return false;
3486
3487 /*compare audio info*/
3488 if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0)
3489 return false;
3490
3491 return true;
3492 }
3493
3494 /*
3495 * dc_is_stream_scaling_unchanged() - Compare scaling rectangles of two streams.
3496 */
dc_is_stream_scaling_unchanged(struct dc_stream_state * old_stream,struct dc_stream_state * stream)3497 bool dc_is_stream_scaling_unchanged(struct dc_stream_state *old_stream,
3498 struct dc_stream_state *stream)
3499 {
3500 if (old_stream == stream)
3501 return true;
3502
3503 if (old_stream == NULL || stream == NULL)
3504 return false;
3505
3506 if (memcmp(&old_stream->src,
3507 &stream->src,
3508 sizeof(struct rect)) != 0)
3509 return false;
3510
3511 if (memcmp(&old_stream->dst,
3512 &stream->dst,
3513 sizeof(struct rect)) != 0)
3514 return false;
3515
3516 return true;
3517 }
3518
3519 /* TODO: release audio object */
update_audio_usage(struct resource_context * res_ctx,const struct resource_pool * pool,struct audio * audio,bool acquired)3520 void update_audio_usage(
3521 struct resource_context *res_ctx,
3522 const struct resource_pool *pool,
3523 struct audio *audio,
3524 bool acquired)
3525 {
3526 int i;
3527 for (i = 0; i < pool->audio_count; i++) {
3528 if (pool->audios[i] == audio)
3529 res_ctx->is_audio_acquired[i] = acquired;
3530 }
3531 }
3532
find_first_free_match_hpo_dp_stream_enc_for_link(struct resource_context * res_ctx,const struct resource_pool * pool,struct dc_stream_state * stream)3533 static struct hpo_dp_stream_encoder *find_first_free_match_hpo_dp_stream_enc_for_link(
3534 struct resource_context *res_ctx,
3535 const struct resource_pool *pool,
3536 struct dc_stream_state *stream)
3537 {
3538 int i;
3539
3540 for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
3541 if (!res_ctx->is_hpo_dp_stream_enc_acquired[i] &&
3542 pool->hpo_dp_stream_enc[i]) {
3543
3544 return pool->hpo_dp_stream_enc[i];
3545 }
3546 }
3547
3548 return NULL;
3549 }
3550
find_first_free_audio(struct resource_context * res_ctx,const struct resource_pool * pool,enum engine_id id,enum dce_version dc_version)3551 static struct audio *find_first_free_audio(
3552 struct resource_context *res_ctx,
3553 const struct resource_pool *pool,
3554 enum engine_id id,
3555 enum dce_version dc_version)
3556 {
3557 int i, available_audio_count;
3558
3559 if (id == ENGINE_ID_UNKNOWN)
3560 return NULL;
3561
3562 available_audio_count = pool->audio_count;
3563
3564 for (i = 0; i < available_audio_count; i++) {
3565 if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) {
3566 /*we have enough audio endpoint, find the matching inst*/
3567 if (id != i)
3568 continue;
3569 return pool->audios[i];
3570 }
3571 }
3572
3573 /* use engine id to find free audio */
3574 if ((id < available_audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
3575 return pool->audios[id];
3576 }
3577 /*not found the matching one, first come first serve*/
3578 for (i = 0; i < available_audio_count; i++) {
3579 if (res_ctx->is_audio_acquired[i] == false) {
3580 return pool->audios[i];
3581 }
3582 }
3583 return NULL;
3584 }
3585
find_pll_sharable_stream(struct dc_stream_state * stream_needs_pll,struct dc_state * context)3586 static struct dc_stream_state *find_pll_sharable_stream(
3587 struct dc_stream_state *stream_needs_pll,
3588 struct dc_state *context)
3589 {
3590 int i;
3591
3592 for (i = 0; i < context->stream_count; i++) {
3593 struct dc_stream_state *stream_has_pll = context->streams[i];
3594
3595 /* We are looking for non dp, non virtual stream */
3596 if (resource_are_streams_timing_synchronizable(
3597 stream_needs_pll, stream_has_pll)
3598 && !dc_is_dp_signal(stream_has_pll->signal)
3599 && stream_has_pll->link->connector_signal
3600 != SIGNAL_TYPE_VIRTUAL)
3601 return stream_has_pll;
3602
3603 }
3604
3605 return NULL;
3606 }
3607
get_norm_pix_clk(const struct dc_crtc_timing * timing)3608 static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
3609 {
3610 uint32_t pix_clk = timing->pix_clk_100hz;
3611 uint32_t normalized_pix_clk = pix_clk;
3612
3613 if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3614 pix_clk /= 2;
3615 if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) {
3616 switch (timing->display_color_depth) {
3617 case COLOR_DEPTH_666:
3618 case COLOR_DEPTH_888:
3619 normalized_pix_clk = pix_clk;
3620 break;
3621 case COLOR_DEPTH_101010:
3622 normalized_pix_clk = (pix_clk * 30) / 24;
3623 break;
3624 case COLOR_DEPTH_121212:
3625 normalized_pix_clk = (pix_clk * 36) / 24;
3626 break;
3627 case COLOR_DEPTH_141414:
3628 normalized_pix_clk = (pix_clk * 42) / 24;
3629 break;
3630 case COLOR_DEPTH_161616:
3631 normalized_pix_clk = (pix_clk * 48) / 24;
3632 break;
3633 default:
3634 ASSERT(0);
3635 break;
3636 }
3637 }
3638 return normalized_pix_clk;
3639 }
3640
calculate_phy_pix_clks(struct dc_stream_state * stream)3641 static void calculate_phy_pix_clks(struct dc_stream_state *stream)
3642 {
3643 /* update actual pixel clock on all streams */
3644 if (dc_is_hdmi_signal(stream->signal))
3645 stream->phy_pix_clk = get_norm_pix_clk(
3646 &stream->timing) / 10;
3647 else
3648 stream->phy_pix_clk =
3649 stream->timing.pix_clk_100hz / 10;
3650
3651 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3652 stream->phy_pix_clk *= 2;
3653 }
3654
acquire_resource_from_hw_enabled_state(struct resource_context * res_ctx,const struct resource_pool * pool,struct dc_stream_state * stream)3655 static int acquire_resource_from_hw_enabled_state(
3656 struct resource_context *res_ctx,
3657 const struct resource_pool *pool,
3658 struct dc_stream_state *stream)
3659 {
3660 struct dc_link *link = stream->link;
3661 unsigned int i, inst, tg_inst = 0;
3662 uint32_t numPipes = 1;
3663 uint32_t id_src[4] = {0};
3664
3665 /* Check for enabled DIG to identify enabled display */
3666 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
3667 return -1;
3668
3669 inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
3670
3671 if (inst == ENGINE_ID_UNKNOWN)
3672 return -1;
3673
3674 for (i = 0; i < pool->stream_enc_count; i++) {
3675 if (pool->stream_enc[i]->id == inst) {
3676 tg_inst = pool->stream_enc[i]->funcs->dig_source_otg(
3677 pool->stream_enc[i]);
3678 break;
3679 }
3680 }
3681
3682 // tg_inst not found
3683 if (i == pool->stream_enc_count)
3684 return -1;
3685
3686 if (tg_inst >= pool->timing_generator_count)
3687 return -1;
3688
3689 if (!res_ctx->pipe_ctx[tg_inst].stream) {
3690 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[tg_inst];
3691
3692 pipe_ctx->stream_res.tg = pool->timing_generators[tg_inst];
3693 id_src[0] = tg_inst;
3694
3695 if (pipe_ctx->stream_res.tg->funcs->get_optc_source)
3696 pipe_ctx->stream_res.tg->funcs->get_optc_source(pipe_ctx->stream_res.tg,
3697 &numPipes, &id_src[0], &id_src[1]);
3698
3699 if (id_src[0] == 0xf && id_src[1] == 0xf) {
3700 id_src[0] = tg_inst;
3701 numPipes = 1;
3702 }
3703
3704 for (i = 0; i < numPipes; i++) {
3705 //Check if src id invalid
3706 if (id_src[i] == 0xf)
3707 return -1;
3708
3709 pipe_ctx = &res_ctx->pipe_ctx[id_src[i]];
3710
3711 pipe_ctx->stream_res.tg = pool->timing_generators[tg_inst];
3712 pipe_ctx->plane_res.mi = pool->mis[id_src[i]];
3713 pipe_ctx->plane_res.hubp = pool->hubps[id_src[i]];
3714 pipe_ctx->plane_res.ipp = pool->ipps[id_src[i]];
3715 pipe_ctx->plane_res.xfm = pool->transforms[id_src[i]];
3716 pipe_ctx->plane_res.dpp = pool->dpps[id_src[i]];
3717 pipe_ctx->stream_res.opp = pool->opps[id_src[i]];
3718
3719 if (pool->dpps[id_src[i]]) {
3720 pipe_ctx->plane_res.mpcc_inst = pool->dpps[id_src[i]]->inst;
3721
3722 if (pool->mpc->funcs->read_mpcc_state) {
3723 struct mpcc_state s = {0};
3724
3725 pool->mpc->funcs->read_mpcc_state(pool->mpc, pipe_ctx->plane_res.mpcc_inst, &s);
3726
3727 if (s.dpp_id < MAX_MPCC)
3728 pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].dpp_id =
3729 s.dpp_id;
3730
3731 if (s.bot_mpcc_id < MAX_MPCC)
3732 pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].mpcc_bot =
3733 &pool->mpc->mpcc_array[s.bot_mpcc_id];
3734
3735 if (s.opp_id < MAX_OPP)
3736 pipe_ctx->stream_res.opp->mpc_tree_params.opp_id = s.opp_id;
3737 }
3738 }
3739 pipe_ctx->pipe_idx = id_src[i];
3740
3741 if (id_src[i] >= pool->timing_generator_count) {
3742 id_src[i] = pool->timing_generator_count - 1;
3743
3744 pipe_ctx->stream_res.tg = pool->timing_generators[id_src[i]];
3745 pipe_ctx->stream_res.opp = pool->opps[id_src[i]];
3746 }
3747
3748 pipe_ctx->stream = stream;
3749 }
3750
3751 if (numPipes == 2) {
3752 stream->apply_boot_odm_mode = dm_odm_combine_policy_2to1;
3753 res_ctx->pipe_ctx[id_src[0]].next_odm_pipe = &res_ctx->pipe_ctx[id_src[1]];
3754 res_ctx->pipe_ctx[id_src[0]].prev_odm_pipe = NULL;
3755 res_ctx->pipe_ctx[id_src[1]].next_odm_pipe = NULL;
3756 res_ctx->pipe_ctx[id_src[1]].prev_odm_pipe = &res_ctx->pipe_ctx[id_src[0]];
3757 } else
3758 stream->apply_boot_odm_mode = dm_odm_combine_mode_disabled;
3759
3760 return id_src[0];
3761 }
3762
3763 return -1;
3764 }
3765
mark_seamless_boot_stream(const struct dc * dc,struct dc_stream_state * stream)3766 static void mark_seamless_boot_stream(const struct dc *dc,
3767 struct dc_stream_state *stream)
3768 {
3769 struct dc_bios *dcb = dc->ctx->dc_bios;
3770
3771 DC_LOGGER_INIT(dc->ctx->logger);
3772
3773 if (stream->apply_seamless_boot_optimization)
3774 return;
3775 if (!dc->config.allow_seamless_boot_optimization)
3776 return;
3777 if (dcb->funcs->is_accelerated_mode(dcb))
3778 return;
3779 if (dc_validate_boot_timing(dc, stream->sink, &stream->timing)) {
3780 stream->apply_seamless_boot_optimization = true;
3781 DC_LOG_DC("Marked stream for seamless boot optimization\n");
3782 }
3783 }
3784
3785 /*
3786 * Acquire a pipe as OTG master and assign to the stream in new dc context.
3787 * return - true if OTG master pipe is acquired and new dc context is updated.
3788 * false if it fails to acquire an OTG master pipe for this stream.
3789 *
3790 * In the example below, we acquired pipe 0 as OTG master pipe for the stream.
3791 * After the function its Inter-pipe Relation is represented by the diagram
3792 * below.
3793 *
3794 * Inter-pipe Relation
3795 * __________________________________________________
3796 * |PIPE IDX| DPP PIPES | OPP HEADS | OTG MASTER |
3797 * | | | | |
3798 * | 0 | |blank ------------------ |
3799 * |________|_______________|___________|_____________|
3800 */
acquire_otg_master_pipe_for_stream(const struct dc_state * cur_ctx,struct dc_state * new_ctx,const struct resource_pool * pool,struct dc_stream_state * stream)3801 static bool acquire_otg_master_pipe_for_stream(
3802 const struct dc_state *cur_ctx,
3803 struct dc_state *new_ctx,
3804 const struct resource_pool *pool,
3805 struct dc_stream_state *stream)
3806 {
3807 /* TODO: Move this function to DCN specific resource file and acquire
3808 * DSC resource here. The reason is that the function should have the
3809 * same level of responsibility as when we acquire secondary OPP head.
3810 * We acquire DSC when we acquire secondary OPP head, so we should
3811 * acquire DSC when we acquire OTG master.
3812 */
3813 int pipe_idx;
3814 struct pipe_ctx *pipe_ctx = NULL;
3815
3816 /*
3817 * Upper level code is responsible to optimize unnecessary addition and
3818 * removal for unchanged streams. So unchanged stream will keep the same
3819 * OTG master instance allocated. When current stream is removed and a
3820 * new stream is added, we want to reuse the OTG instance made available
3821 * by the removed stream first. If not found, we try to avoid of using
3822 * any free pipes already used in current context as this could tear
3823 * down exiting ODM/MPC/MPO configuration unnecessarily.
3824 */
3825
3826 /*
3827 * Try to acquire the same OTG master already in use. This is not
3828 * optimal because resetting an enabled OTG master pipe for a new stream
3829 * requires an extra frame of wait. However there are test automation
3830 * and eDP assumptions that rely on reusing the same OTG master pipe
3831 * during mode change. We have to keep this logic as is for now.
3832 */
3833 pipe_idx = recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx(
3834 &cur_ctx->res_ctx, &new_ctx->res_ctx, pool);
3835 /*
3836 * Try to acquire a pipe not used in current resource context to avoid
3837 * pipe swapping.
3838 */
3839 if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
3840 pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx(
3841 &cur_ctx->res_ctx, &new_ctx->res_ctx, pool);
3842 /*
3843 * If pipe swapping is unavoidable, try to acquire pipe used as
3844 * secondary DPP pipe in current state as we prioritize to support more
3845 * streams over supporting MPO planes.
3846 */
3847 if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
3848 pipe_idx = resource_find_free_pipe_used_as_cur_sec_dpp(
3849 &cur_ctx->res_ctx, &new_ctx->res_ctx, pool);
3850 if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
3851 pipe_idx = resource_find_any_free_pipe(&new_ctx->res_ctx, pool);
3852 if (pipe_idx != FREE_PIPE_INDEX_NOT_FOUND) {
3853 pipe_ctx = &new_ctx->res_ctx.pipe_ctx[pipe_idx];
3854 memset(pipe_ctx, 0, sizeof(*pipe_ctx));
3855 pipe_ctx->pipe_idx = pipe_idx;
3856 pipe_ctx->stream_res.tg = pool->timing_generators[pipe_idx];
3857 pipe_ctx->plane_res.mi = pool->mis[pipe_idx];
3858 pipe_ctx->plane_res.hubp = pool->hubps[pipe_idx];
3859 pipe_ctx->plane_res.ipp = pool->ipps[pipe_idx];
3860 pipe_ctx->plane_res.xfm = pool->transforms[pipe_idx];
3861 pipe_ctx->plane_res.dpp = pool->dpps[pipe_idx];
3862 pipe_ctx->stream_res.opp = pool->opps[pipe_idx];
3863 if (pool->dpps[pipe_idx])
3864 pipe_ctx->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst;
3865
3866 if (pipe_idx >= pool->timing_generator_count && pool->timing_generator_count != 0) {
3867 int tg_inst = pool->timing_generator_count - 1;
3868
3869 pipe_ctx->stream_res.tg = pool->timing_generators[tg_inst];
3870 pipe_ctx->stream_res.opp = pool->opps[tg_inst];
3871 }
3872
3873 pipe_ctx->stream = stream;
3874 } else {
3875 pipe_idx = acquire_first_split_pipe(&new_ctx->res_ctx, pool, stream);
3876 }
3877
3878 return pipe_idx != FREE_PIPE_INDEX_NOT_FOUND;
3879 }
3880
resource_map_pool_resources(const struct dc * dc,struct dc_state * context,struct dc_stream_state * stream)3881 enum dc_status resource_map_pool_resources(
3882 const struct dc *dc,
3883 struct dc_state *context,
3884 struct dc_stream_state *stream)
3885 {
3886 const struct resource_pool *pool = dc->res_pool;
3887 int i;
3888 struct dc_context *dc_ctx = dc->ctx;
3889 struct pipe_ctx *pipe_ctx = NULL;
3890 int pipe_idx = -1;
3891 bool acquired = false;
3892 bool is_dio_encoder = true;
3893
3894 calculate_phy_pix_clks(stream);
3895
3896 mark_seamless_boot_stream(dc, stream);
3897
3898 if (stream->apply_seamless_boot_optimization) {
3899 pipe_idx = acquire_resource_from_hw_enabled_state(
3900 &context->res_ctx,
3901 pool,
3902 stream);
3903 if (pipe_idx < 0)
3904 /* hw resource was assigned to other stream */
3905 stream->apply_seamless_boot_optimization = false;
3906 else
3907 acquired = true;
3908 }
3909
3910 if (!acquired)
3911 /* acquire new resources */
3912 acquired = acquire_otg_master_pipe_for_stream(dc->current_state,
3913 context, pool, stream);
3914
3915 pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
3916
3917 if (!pipe_ctx || pipe_ctx->stream_res.tg == NULL)
3918 return DC_NO_CONTROLLER_RESOURCE;
3919
3920 pipe_ctx->stream_res.stream_enc =
3921 dc->res_pool->funcs->find_first_free_match_stream_enc_for_link(
3922 &context->res_ctx, pool, stream);
3923
3924 if (!pipe_ctx->stream_res.stream_enc)
3925 return DC_NO_STREAM_ENC_RESOURCE;
3926
3927 update_stream_engine_usage(
3928 &context->res_ctx, pool,
3929 pipe_ctx->stream_res.stream_enc,
3930 true);
3931
3932 /* Allocate DP HPO Stream Encoder based on signal, hw capabilities
3933 * and link settings
3934 */
3935 if (dc_is_dp_signal(stream->signal) ||
3936 dc_is_virtual_signal(stream->signal)) {
3937 if (!dc->link_srv->dp_decide_link_settings(stream,
3938 &pipe_ctx->link_config.dp_link_settings))
3939 return DC_FAIL_DP_LINK_BANDWIDTH;
3940 if (dc->link_srv->dp_get_encoding_format(
3941 &pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
3942 pipe_ctx->stream_res.hpo_dp_stream_enc =
3943 find_first_free_match_hpo_dp_stream_enc_for_link(
3944 &context->res_ctx, pool, stream);
3945
3946 if (!pipe_ctx->stream_res.hpo_dp_stream_enc)
3947 return DC_NO_STREAM_ENC_RESOURCE;
3948
3949 update_hpo_dp_stream_engine_usage(
3950 &context->res_ctx, pool,
3951 pipe_ctx->stream_res.hpo_dp_stream_enc,
3952 true);
3953 if (!add_hpo_dp_link_enc_to_ctx(&context->res_ctx, pool, pipe_ctx, stream))
3954 return DC_NO_LINK_ENC_RESOURCE;
3955 }
3956 }
3957
3958 if (dc->config.unify_link_enc_assignment && is_dio_encoder)
3959 if (!add_dio_link_enc_to_ctx(dc, context, pool, pipe_ctx, stream))
3960 return DC_NO_LINK_ENC_RESOURCE;
3961
3962 /* TODO: Add check if ASIC support and EDID audio */
3963 if (!stream->converter_disable_audio &&
3964 dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
3965 stream->audio_info.mode_count && stream->audio_info.flags.all) {
3966 pipe_ctx->stream_res.audio = find_first_free_audio(
3967 &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id, dc_ctx->dce_version);
3968
3969 /*
3970 * Audio assigned in order first come first get.
3971 * There are asics which has number of audio
3972 * resources less then number of pipes
3973 */
3974 if (pipe_ctx->stream_res.audio)
3975 update_audio_usage(&context->res_ctx, pool,
3976 pipe_ctx->stream_res.audio, true);
3977 }
3978
3979 /* Add ABM to the resource if on EDP */
3980 if (pipe_ctx->stream && dc_is_embedded_signal(pipe_ctx->stream->signal)) {
3981 if (pool->abm)
3982 pipe_ctx->stream_res.abm = pool->abm;
3983 else
3984 pipe_ctx->stream_res.abm = pool->multiple_abms[pipe_ctx->stream_res.tg->inst];
3985 }
3986
3987 for (i = 0; i < context->stream_count; i++)
3988 if (context->streams[i] == stream) {
3989 context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst;
3990 context->stream_status[i].stream_enc_inst = pipe_ctx->stream_res.stream_enc->stream_enc_inst;
3991 context->stream_status[i].audio_inst =
3992 pipe_ctx->stream_res.audio ? pipe_ctx->stream_res.audio->inst : -1;
3993
3994 return DC_OK;
3995 }
3996
3997 DC_ERROR("Stream %p not found in new ctx!\n", stream);
3998 return DC_ERROR_UNEXPECTED;
3999 }
4000
dc_resource_is_dsc_encoding_supported(const struct dc * dc)4001 bool dc_resource_is_dsc_encoding_supported(const struct dc *dc)
4002 {
4003 if (dc->res_pool == NULL)
4004 return false;
4005
4006 return dc->res_pool->res_cap->num_dsc > 0;
4007 }
4008
planes_changed_for_existing_stream(struct dc_state * context,struct dc_stream_state * stream,const struct dc_validation_set set[],int set_count)4009 static bool planes_changed_for_existing_stream(struct dc_state *context,
4010 struct dc_stream_state *stream,
4011 const struct dc_validation_set set[],
4012 int set_count)
4013 {
4014 int i, j;
4015 struct dc_stream_status *stream_status = NULL;
4016
4017 for (i = 0; i < context->stream_count; i++) {
4018 if (context->streams[i] == stream) {
4019 stream_status = &context->stream_status[i];
4020 break;
4021 }
4022 }
4023
4024 if (!stream_status) {
4025 ASSERT(0);
4026 return false;
4027 }
4028
4029 for (i = 0; i < set_count; i++)
4030 if (set[i].stream == stream)
4031 break;
4032
4033 if (i == set_count)
4034 ASSERT(0);
4035
4036 if (set[i].plane_count != stream_status->plane_count)
4037 return true;
4038
4039 for (j = 0; j < set[i].plane_count; j++)
4040 if (set[i].plane_states[j] != stream_status->plane_states[j])
4041 return true;
4042
4043 return false;
4044 }
4045
add_all_planes_for_stream(const struct dc * dc,struct dc_stream_state * stream,const struct dc_validation_set set[],int set_count,struct dc_state * state)4046 static bool add_all_planes_for_stream(
4047 const struct dc *dc,
4048 struct dc_stream_state *stream,
4049 const struct dc_validation_set set[],
4050 int set_count,
4051 struct dc_state *state)
4052 {
4053 int i, j;
4054
4055 for (i = 0; i < set_count; i++)
4056 if (set[i].stream == stream)
4057 break;
4058
4059 if (i == set_count) {
4060 dm_error("Stream %p not found in set!\n", stream);
4061 return false;
4062 }
4063
4064 for (j = 0; j < set[i].plane_count; j++)
4065 if (!dc_state_add_plane(dc, stream, set[i].plane_states[j], state))
4066 return false;
4067
4068 return true;
4069 }
4070
4071 /**
4072 * dc_validate_with_context - Validate and update the potential new stream in the context object
4073 *
4074 * @dc: Used to get the current state status
4075 * @set: An array of dc_validation_set with all the current streams reference
4076 * @set_count: Total of streams
4077 * @context: New context
4078 * @fast_validate: Enable or disable fast validation
4079 *
4080 * This function updates the potential new stream in the context object. It
4081 * creates multiple lists for the add, remove, and unchanged streams. In
4082 * particular, if the unchanged streams have a plane that changed, it is
4083 * necessary to remove all planes from the unchanged streams. In summary, this
4084 * function is responsible for validating the new context.
4085 *
4086 * Return:
4087 * In case of success, return DC_OK (1), otherwise, return a DC error.
4088 */
dc_validate_with_context(struct dc * dc,const struct dc_validation_set set[],int set_count,struct dc_state * context,bool fast_validate)4089 enum dc_status dc_validate_with_context(struct dc *dc,
4090 const struct dc_validation_set set[],
4091 int set_count,
4092 struct dc_state *context,
4093 bool fast_validate)
4094 {
4095 struct dc_stream_state *unchanged_streams[MAX_PIPES] = { 0 };
4096 struct dc_stream_state *del_streams[MAX_PIPES] = { 0 };
4097 struct dc_stream_state *add_streams[MAX_PIPES] = { 0 };
4098 int old_stream_count = context->stream_count;
4099 enum dc_status res = DC_ERROR_UNEXPECTED;
4100 int unchanged_streams_count = 0;
4101 int del_streams_count = 0;
4102 int add_streams_count = 0;
4103 bool found = false;
4104 int i, j, k;
4105
4106 DC_LOGGER_INIT(dc->ctx->logger);
4107
4108 /* First build a list of streams to be remove from current context */
4109 for (i = 0; i < old_stream_count; i++) {
4110 struct dc_stream_state *stream = context->streams[i];
4111
4112 for (j = 0; j < set_count; j++) {
4113 if (stream == set[j].stream) {
4114 found = true;
4115 break;
4116 }
4117 }
4118
4119 if (!found)
4120 del_streams[del_streams_count++] = stream;
4121
4122 found = false;
4123 }
4124
4125 /* Second, build a list of new streams */
4126 for (i = 0; i < set_count; i++) {
4127 struct dc_stream_state *stream = set[i].stream;
4128
4129 for (j = 0; j < old_stream_count; j++) {
4130 if (stream == context->streams[j]) {
4131 found = true;
4132 break;
4133 }
4134 }
4135
4136 if (!found)
4137 add_streams[add_streams_count++] = stream;
4138
4139 found = false;
4140 }
4141
4142 /* Build a list of unchanged streams which is necessary for handling
4143 * planes change such as added, removed, and updated.
4144 */
4145 for (i = 0; i < set_count; i++) {
4146 /* Check if stream is part of the delete list */
4147 for (j = 0; j < del_streams_count; j++) {
4148 if (set[i].stream == del_streams[j]) {
4149 found = true;
4150 break;
4151 }
4152 }
4153
4154 if (!found) {
4155 /* Check if stream is part of the add list */
4156 for (j = 0; j < add_streams_count; j++) {
4157 if (set[i].stream == add_streams[j]) {
4158 found = true;
4159 break;
4160 }
4161 }
4162 }
4163
4164 if (!found)
4165 unchanged_streams[unchanged_streams_count++] = set[i].stream;
4166
4167 found = false;
4168 }
4169
4170 /* Remove all planes for unchanged streams if planes changed */
4171 for (i = 0; i < unchanged_streams_count; i++) {
4172 if (planes_changed_for_existing_stream(context,
4173 unchanged_streams[i],
4174 set,
4175 set_count)) {
4176
4177 if (!dc_state_rem_all_planes_for_stream(dc,
4178 unchanged_streams[i],
4179 context)) {
4180 res = DC_FAIL_DETACH_SURFACES;
4181 goto fail;
4182 }
4183 }
4184 }
4185
4186 /* Remove all planes for removed streams and then remove the streams */
4187 for (i = 0; i < del_streams_count; i++) {
4188 /* Need to cpy the dwb data from the old stream in order to efc to work */
4189 if (del_streams[i]->num_wb_info > 0) {
4190 for (j = 0; j < add_streams_count; j++) {
4191 if (del_streams[i]->sink == add_streams[j]->sink) {
4192 add_streams[j]->num_wb_info = del_streams[i]->num_wb_info;
4193 for (k = 0; k < del_streams[i]->num_wb_info; k++)
4194 add_streams[j]->writeback_info[k] = del_streams[i]->writeback_info[k];
4195 }
4196 }
4197 }
4198
4199 if (dc_state_get_stream_subvp_type(context, del_streams[i]) == SUBVP_PHANTOM) {
4200 /* remove phantoms specifically */
4201 if (!dc_state_rem_all_phantom_planes_for_stream(dc, del_streams[i], context, true)) {
4202 res = DC_FAIL_DETACH_SURFACES;
4203 goto fail;
4204 }
4205
4206 res = dc_state_remove_phantom_stream(dc, context, del_streams[i]);
4207 dc_state_release_phantom_stream(dc, context, del_streams[i]);
4208 } else {
4209 if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) {
4210 res = DC_FAIL_DETACH_SURFACES;
4211 goto fail;
4212 }
4213
4214 res = dc_state_remove_stream(dc, context, del_streams[i]);
4215 }
4216
4217 if (res != DC_OK)
4218 goto fail;
4219 }
4220
4221 /* Swap seamless boot stream to pipe 0 (if needed) to ensure pipe_ctx
4222 * matches. This may change in the future if seamless_boot_stream can be
4223 * multiple.
4224 */
4225 for (i = 0; i < add_streams_count; i++) {
4226 mark_seamless_boot_stream(dc, add_streams[i]);
4227 if (add_streams[i]->apply_seamless_boot_optimization && i != 0) {
4228 struct dc_stream_state *temp = add_streams[0];
4229
4230 add_streams[0] = add_streams[i];
4231 add_streams[i] = temp;
4232 break;
4233 }
4234 }
4235
4236 /* Add new streams and then add all planes for the new stream */
4237 for (i = 0; i < add_streams_count; i++) {
4238 calculate_phy_pix_clks(add_streams[i]);
4239 res = dc_state_add_stream(dc, context, add_streams[i]);
4240 if (res != DC_OK)
4241 goto fail;
4242
4243 if (!add_all_planes_for_stream(dc, add_streams[i], set, set_count, context)) {
4244 res = DC_FAIL_ATTACH_SURFACES;
4245 goto fail;
4246 }
4247 }
4248
4249 /* Add all planes for unchanged streams if planes changed */
4250 for (i = 0; i < unchanged_streams_count; i++) {
4251 if (planes_changed_for_existing_stream(context,
4252 unchanged_streams[i],
4253 set,
4254 set_count)) {
4255 if (!add_all_planes_for_stream(dc, unchanged_streams[i], set, set_count, context)) {
4256 res = DC_FAIL_ATTACH_SURFACES;
4257 goto fail;
4258 }
4259 }
4260 }
4261
4262 res = dc_validate_global_state(dc, context, fast_validate);
4263
4264 /* calculate pixel rate divider after deciding pxiel clock & odm combine */
4265 if ((dc->hwss.calculate_pix_rate_divider) && (res == DC_OK)) {
4266 for (i = 0; i < add_streams_count; i++)
4267 dc->hwss.calculate_pix_rate_divider(dc, context, add_streams[i]);
4268 }
4269
4270 fail:
4271 if (res != DC_OK)
4272 DC_LOG_WARNING("%s:resource validation failed, dc_status:%d\n",
4273 __func__,
4274 res);
4275
4276 return res;
4277 }
4278
4279 /**
4280 * decide_hblank_borrow - Decides the horizontal blanking borrow value for a given pipe context.
4281 * @pipe_ctx: Pointer to the pipe context structure.
4282 *
4283 * This function calculates the horizontal blanking borrow value for a given pipe context based on the
4284 * display stream compression (DSC) configuration. If the horizontal active pixels (hactive) are less
4285 * than the total width of the DSC slices, it sets the hblank_borrow value to the difference. If the
4286 * total horizontal timing minus the hblank_borrow value is less than 32, it resets the hblank_borrow
4287 * value to 0.
4288 */
decide_hblank_borrow(struct pipe_ctx * pipe_ctx)4289 static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx)
4290 {
4291 uint32_t hactive;
4292 uint32_t ceil_slice_width;
4293 struct dc_stream_state *stream = NULL;
4294
4295 if (!pipe_ctx)
4296 return;
4297
4298 stream = pipe_ctx->stream;
4299
4300 if (stream->timing.flags.DSC) {
4301 hactive = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
4302
4303 /* Assume if determined slices does not divide Hactive evenly, Hborrow is needed for padding*/
4304 if (hactive % stream->timing.dsc_cfg.num_slices_h != 0) {
4305 ceil_slice_width = (hactive / stream->timing.dsc_cfg.num_slices_h) + 1;
4306 pipe_ctx->hblank_borrow = ceil_slice_width * stream->timing.dsc_cfg.num_slices_h - hactive;
4307
4308 if (stream->timing.h_total - hactive - pipe_ctx->hblank_borrow < 32)
4309 pipe_ctx->hblank_borrow = 0;
4310 }
4311 }
4312 }
4313
4314 /**
4315 * dc_validate_global_state() - Determine if hardware can support a given state
4316 *
4317 * @dc: dc struct for this driver
4318 * @new_ctx: state to be validated
4319 * @fast_validate: set to true if only yes/no to support matters
4320 *
4321 * Checks hardware resource availability and bandwidth requirement.
4322 *
4323 * Return:
4324 * DC_OK if the result can be programmed. Otherwise, an error code.
4325 */
dc_validate_global_state(struct dc * dc,struct dc_state * new_ctx,bool fast_validate)4326 enum dc_status dc_validate_global_state(
4327 struct dc *dc,
4328 struct dc_state *new_ctx,
4329 bool fast_validate)
4330 {
4331 enum dc_status result = DC_ERROR_UNEXPECTED;
4332 int i, j;
4333
4334 if (!new_ctx)
4335 return DC_ERROR_UNEXPECTED;
4336
4337 if (dc->res_pool->funcs->validate_global) {
4338 result = dc->res_pool->funcs->validate_global(dc, new_ctx);
4339 if (result != DC_OK)
4340 return result;
4341 }
4342
4343 for (i = 0; i < new_ctx->stream_count; i++) {
4344 struct dc_stream_state *stream = new_ctx->streams[i];
4345
4346 for (j = 0; j < dc->res_pool->pipe_count; j++) {
4347 struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[j];
4348
4349 if (pipe_ctx->stream != stream)
4350 continue;
4351
4352 /* Decide whether hblank borrow is needed and save it in pipe_ctx */
4353 if (dc->debug.enable_hblank_borrow)
4354 decide_hblank_borrow(pipe_ctx);
4355
4356 if (dc->res_pool->funcs->patch_unknown_plane_state &&
4357 pipe_ctx->plane_state &&
4358 pipe_ctx->plane_state->tiling_info.gfx9.swizzle == DC_SW_UNKNOWN) {
4359 result = dc->res_pool->funcs->patch_unknown_plane_state(pipe_ctx->plane_state);
4360 if (result != DC_OK)
4361 return result;
4362 }
4363
4364 /* Switch to dp clock source only if there is
4365 * no non dp stream that shares the same timing
4366 * with the dp stream.
4367 */
4368 if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
4369 !find_pll_sharable_stream(stream, new_ctx)) {
4370
4371 resource_unreference_clock_source(
4372 &new_ctx->res_ctx,
4373 dc->res_pool,
4374 pipe_ctx->clock_source);
4375
4376 pipe_ctx->clock_source = dc->res_pool->dp_clock_source;
4377 resource_reference_clock_source(
4378 &new_ctx->res_ctx,
4379 dc->res_pool,
4380 pipe_ctx->clock_source);
4381 }
4382 }
4383 }
4384
4385 result = resource_build_scaling_params_for_context(dc, new_ctx);
4386
4387 if (result == DC_OK)
4388 if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
4389 result = DC_FAIL_BANDWIDTH_VALIDATE;
4390
4391 return result;
4392 }
4393
patch_gamut_packet_checksum(struct dc_info_packet * gamut_packet)4394 static void patch_gamut_packet_checksum(
4395 struct dc_info_packet *gamut_packet)
4396 {
4397 /* For gamut we recalc checksum */
4398 if (gamut_packet->valid) {
4399 uint8_t chk_sum = 0;
4400 uint8_t *ptr;
4401 uint8_t i;
4402
4403 /*start of the Gamut data. */
4404 ptr = &gamut_packet->sb[3];
4405
4406 for (i = 0; i <= gamut_packet->sb[1]; i++)
4407 chk_sum += ptr[i];
4408
4409 gamut_packet->sb[2] = (uint8_t) (0x100 - chk_sum);
4410 }
4411 }
4412
set_avi_info_frame(struct dc_info_packet * info_packet,struct pipe_ctx * pipe_ctx)4413 static void set_avi_info_frame(
4414 struct dc_info_packet *info_packet,
4415 struct pipe_ctx *pipe_ctx)
4416 {
4417 struct dc_stream_state *stream = pipe_ctx->stream;
4418 enum dc_color_space color_space = COLOR_SPACE_UNKNOWN;
4419 uint32_t pixel_encoding = 0;
4420 enum scanning_type scan_type = SCANNING_TYPE_NODATA;
4421 enum dc_aspect_ratio aspect = ASPECT_RATIO_NO_DATA;
4422 uint8_t *check_sum = NULL;
4423 uint8_t byte_index = 0;
4424 union hdmi_info_packet hdmi_info;
4425 unsigned int vic = pipe_ctx->stream->timing.vic;
4426 unsigned int rid = pipe_ctx->stream->timing.rid;
4427 unsigned int fr_ind = pipe_ctx->stream->timing.fr_index;
4428 enum dc_timing_3d_format format;
4429
4430 memset(&hdmi_info, 0, sizeof(union hdmi_info_packet));
4431
4432 color_space = pipe_ctx->stream->output_color_space;
4433 if (color_space == COLOR_SPACE_UNKNOWN)
4434 color_space = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ?
4435 COLOR_SPACE_SRGB:COLOR_SPACE_YCBCR709;
4436
4437 /* Initialize header */
4438 hdmi_info.bits.header.info_frame_type = HDMI_INFOFRAME_TYPE_AVI;
4439 /* InfoFrameVersion_3 is defined by CEA861F (Section 6.4), but shall
4440 * not be used in HDMI 2.0 (Section 10.1) */
4441 hdmi_info.bits.header.version = 2;
4442 hdmi_info.bits.header.length = HDMI_AVI_INFOFRAME_SIZE;
4443
4444 /*
4445 * IDO-defined (Y2,Y1,Y0 = 1,1,1) shall not be used by devices built
4446 * according to HDMI 2.0 spec (Section 10.1)
4447 */
4448
4449 switch (stream->timing.pixel_encoding) {
4450 case PIXEL_ENCODING_YCBCR422:
4451 pixel_encoding = 1;
4452 break;
4453
4454 case PIXEL_ENCODING_YCBCR444:
4455 pixel_encoding = 2;
4456 break;
4457 case PIXEL_ENCODING_YCBCR420:
4458 pixel_encoding = 3;
4459 break;
4460
4461 case PIXEL_ENCODING_RGB:
4462 default:
4463 pixel_encoding = 0;
4464 }
4465
4466 /* Y0_Y1_Y2 : The pixel encoding */
4467 /* H14b AVI InfoFrame has extension on Y-field from 2 bits to 3 bits */
4468 hdmi_info.bits.Y0_Y1_Y2 = pixel_encoding;
4469
4470 /* A0 = 1 Active Format Information valid */
4471 hdmi_info.bits.A0 = ACTIVE_FORMAT_VALID;
4472
4473 /* B0, B1 = 3; Bar info data is valid */
4474 hdmi_info.bits.B0_B1 = BAR_INFO_BOTH_VALID;
4475
4476 hdmi_info.bits.SC0_SC1 = PICTURE_SCALING_UNIFORM;
4477
4478 /* S0, S1 : Underscan / Overscan */
4479 /* TODO: un-hardcode scan type */
4480 scan_type = SCANNING_TYPE_UNDERSCAN;
4481 hdmi_info.bits.S0_S1 = scan_type;
4482
4483 /* C0, C1 : Colorimetry */
4484 switch (color_space) {
4485 case COLOR_SPACE_YCBCR709:
4486 case COLOR_SPACE_YCBCR709_LIMITED:
4487 hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709;
4488 break;
4489 case COLOR_SPACE_YCBCR601:
4490 case COLOR_SPACE_YCBCR601_LIMITED:
4491 hdmi_info.bits.C0_C1 = COLORIMETRY_ITU601;
4492 break;
4493 case COLOR_SPACE_2020_RGB_FULLRANGE:
4494 case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
4495 case COLOR_SPACE_2020_YCBCR_LIMITED:
4496 hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR;
4497 hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED;
4498 break;
4499 case COLOR_SPACE_ADOBERGB:
4500 hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_ADOBERGB;
4501 hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED;
4502 break;
4503 case COLOR_SPACE_SRGB:
4504 default:
4505 hdmi_info.bits.C0_C1 = COLORIMETRY_NO_DATA;
4506 break;
4507 }
4508
4509 if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR_LIMITED &&
4510 stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) {
4511 hdmi_info.bits.EC0_EC2 = 0;
4512 hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709;
4513 }
4514
4515 /* TODO: un-hardcode aspect ratio */
4516 aspect = stream->timing.aspect_ratio;
4517
4518 switch (aspect) {
4519 case ASPECT_RATIO_4_3:
4520 case ASPECT_RATIO_16_9:
4521 hdmi_info.bits.M0_M1 = aspect;
4522 break;
4523
4524 case ASPECT_RATIO_NO_DATA:
4525 case ASPECT_RATIO_64_27:
4526 case ASPECT_RATIO_256_135:
4527 default:
4528 hdmi_info.bits.M0_M1 = 0;
4529 }
4530
4531 /* Active Format Aspect ratio - same as Picture Aspect Ratio. */
4532 hdmi_info.bits.R0_R3 = ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE;
4533
4534 switch (stream->content_type) {
4535 case DISPLAY_CONTENT_TYPE_NO_DATA:
4536 hdmi_info.bits.CN0_CN1 = 0;
4537 hdmi_info.bits.ITC = 1;
4538 break;
4539 case DISPLAY_CONTENT_TYPE_GRAPHICS:
4540 hdmi_info.bits.CN0_CN1 = 0;
4541 hdmi_info.bits.ITC = 1;
4542 break;
4543 case DISPLAY_CONTENT_TYPE_PHOTO:
4544 hdmi_info.bits.CN0_CN1 = 1;
4545 hdmi_info.bits.ITC = 1;
4546 break;
4547 case DISPLAY_CONTENT_TYPE_CINEMA:
4548 hdmi_info.bits.CN0_CN1 = 2;
4549 hdmi_info.bits.ITC = 1;
4550 break;
4551 case DISPLAY_CONTENT_TYPE_GAME:
4552 hdmi_info.bits.CN0_CN1 = 3;
4553 hdmi_info.bits.ITC = 1;
4554 break;
4555 }
4556
4557 if (stream->qs_bit == 1) {
4558 if (color_space == COLOR_SPACE_SRGB ||
4559 color_space == COLOR_SPACE_2020_RGB_FULLRANGE)
4560 hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE;
4561 else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
4562 color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE)
4563 hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE;
4564 else
4565 hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
4566 } else
4567 hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
4568
4569 /* TODO : We should handle YCC quantization */
4570 /* but we do not have matrix calculation */
4571 hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
4572
4573 ///VIC
4574 if (pipe_ctx->stream->timing.hdmi_vic != 0)
4575 vic = 0;
4576 format = stream->timing.timing_3d_format;
4577 /*todo, add 3DStereo support*/
4578 if (format != TIMING_3D_FORMAT_NONE) {
4579 // Based on HDMI specs hdmi vic needs to be converted to cea vic when 3D is enabled
4580 switch (pipe_ctx->stream->timing.hdmi_vic) {
4581 case 1:
4582 vic = 95;
4583 break;
4584 case 2:
4585 vic = 94;
4586 break;
4587 case 3:
4588 vic = 93;
4589 break;
4590 case 4:
4591 vic = 98;
4592 break;
4593 default:
4594 break;
4595 }
4596 }
4597 /* If VIC >= 128, the Source shall use AVI InfoFrame Version 3*/
4598 hdmi_info.bits.VIC0_VIC7 = vic;
4599 if (vic >= 128)
4600 hdmi_info.bits.header.version = 3;
4601 /* If (C1, C0)=(1, 1) and (EC2, EC1, EC0)=(1, 1, 1),
4602 * the Source shall use 20 AVI InfoFrame Version 4
4603 */
4604 if (hdmi_info.bits.C0_C1 == COLORIMETRY_EXTENDED &&
4605 hdmi_info.bits.EC0_EC2 == COLORIMETRYEX_RESERVED) {
4606 hdmi_info.bits.header.version = 4;
4607 hdmi_info.bits.header.length = 14;
4608 }
4609
4610 if (rid != 0 && fr_ind != 0) {
4611 hdmi_info.bits.header.version = 4;
4612 hdmi_info.bits.header.length = 15;
4613
4614 hdmi_info.bits.FR0_FR3 = fr_ind & 0xF;
4615 hdmi_info.bits.FR4 = (fr_ind >> 4) & 0x1;
4616 hdmi_info.bits.RID0_RID5 = rid;
4617 }
4618
4619 /* pixel repetition
4620 * PR0 - PR3 start from 0 whereas pHwPathMode->mode.timing.flags.pixel
4621 * repetition start from 1 */
4622 hdmi_info.bits.PR0_PR3 = 0;
4623
4624 /* Bar Info
4625 * barTop: Line Number of End of Top Bar.
4626 * barBottom: Line Number of Start of Bottom Bar.
4627 * barLeft: Pixel Number of End of Left Bar.
4628 * barRight: Pixel Number of Start of Right Bar. */
4629 hdmi_info.bits.bar_top = stream->timing.v_border_top;
4630 hdmi_info.bits.bar_bottom = (stream->timing.v_total
4631 - stream->timing.v_border_bottom + 1);
4632 hdmi_info.bits.bar_left = stream->timing.h_border_left;
4633 hdmi_info.bits.bar_right = (stream->timing.h_total
4634 - stream->timing.h_border_right + 1);
4635
4636 /* Additional Colorimetry Extension
4637 * Used in conduction with C0-C1 and EC0-EC2
4638 * 0 = DCI-P3 RGB (D65)
4639 * 1 = DCI-P3 RGB (theater)
4640 */
4641 hdmi_info.bits.ACE0_ACE3 = 0;
4642
4643 /* check_sum - Calculate AFMT_AVI_INFO0 ~ AFMT_AVI_INFO3 */
4644 check_sum = &hdmi_info.packet_raw_data.sb[0];
4645
4646 *check_sum = HDMI_INFOFRAME_TYPE_AVI + hdmi_info.bits.header.length + hdmi_info.bits.header.version;
4647
4648 for (byte_index = 1; byte_index <= hdmi_info.bits.header.length; byte_index++)
4649 *check_sum += hdmi_info.packet_raw_data.sb[byte_index];
4650
4651 /* one byte complement */
4652 *check_sum = (uint8_t) (0x100 - *check_sum);
4653
4654 /* Store in hw_path_mode */
4655 info_packet->hb0 = hdmi_info.packet_raw_data.hb0;
4656 info_packet->hb1 = hdmi_info.packet_raw_data.hb1;
4657 info_packet->hb2 = hdmi_info.packet_raw_data.hb2;
4658
4659 for (byte_index = 0; byte_index < sizeof(hdmi_info.packet_raw_data.sb); byte_index++)
4660 info_packet->sb[byte_index] = hdmi_info.packet_raw_data.sb[byte_index];
4661
4662 info_packet->valid = true;
4663 }
4664
set_vendor_info_packet(struct dc_info_packet * info_packet,struct dc_stream_state * stream)4665 static void set_vendor_info_packet(
4666 struct dc_info_packet *info_packet,
4667 struct dc_stream_state *stream)
4668 {
4669 /* SPD info packet for FreeSync */
4670
4671 /* Check if Freesync is supported. Return if false. If true,
4672 * set the corresponding bit in the info packet
4673 */
4674 if (!stream->vsp_infopacket.valid)
4675 return;
4676
4677 *info_packet = stream->vsp_infopacket;
4678 }
4679
set_spd_info_packet(struct dc_info_packet * info_packet,struct dc_stream_state * stream)4680 static void set_spd_info_packet(
4681 struct dc_info_packet *info_packet,
4682 struct dc_stream_state *stream)
4683 {
4684 /* SPD info packet for FreeSync */
4685
4686 /* Check if Freesync is supported. Return if false. If true,
4687 * set the corresponding bit in the info packet
4688 */
4689 if (!stream->vrr_infopacket.valid)
4690 return;
4691
4692 *info_packet = stream->vrr_infopacket;
4693 }
4694
set_hdr_static_info_packet(struct dc_info_packet * info_packet,struct dc_stream_state * stream)4695 static void set_hdr_static_info_packet(
4696 struct dc_info_packet *info_packet,
4697 struct dc_stream_state *stream)
4698 {
4699 /* HDR Static Metadata info packet for HDR10 */
4700
4701 if (!stream->hdr_static_metadata.valid ||
4702 stream->use_dynamic_meta)
4703 return;
4704
4705 *info_packet = stream->hdr_static_metadata;
4706 }
4707
set_vsc_info_packet(struct dc_info_packet * info_packet,struct dc_stream_state * stream)4708 static void set_vsc_info_packet(
4709 struct dc_info_packet *info_packet,
4710 struct dc_stream_state *stream)
4711 {
4712 if (!stream->vsc_infopacket.valid)
4713 return;
4714
4715 *info_packet = stream->vsc_infopacket;
4716 }
set_hfvs_info_packet(struct dc_info_packet * info_packet,struct dc_stream_state * stream)4717 static void set_hfvs_info_packet(
4718 struct dc_info_packet *info_packet,
4719 struct dc_stream_state *stream)
4720 {
4721 if (!stream->hfvsif_infopacket.valid)
4722 return;
4723
4724 *info_packet = stream->hfvsif_infopacket;
4725 }
4726
adaptive_sync_override_dp_info_packets_sdp_line_num(const struct dc_crtc_timing * timing,struct enc_sdp_line_num * sdp_line_num,unsigned int vstartup_start)4727 static void adaptive_sync_override_dp_info_packets_sdp_line_num(
4728 const struct dc_crtc_timing *timing,
4729 struct enc_sdp_line_num *sdp_line_num,
4730 unsigned int vstartup_start)
4731 {
4732 uint32_t asic_blank_start = 0;
4733 uint32_t asic_blank_end = 0;
4734 uint32_t v_update = 0;
4735
4736 const struct dc_crtc_timing *tg = timing;
4737
4738 /* blank_start = frame end - front porch */
4739 asic_blank_start = tg->v_total - tg->v_front_porch;
4740
4741 /* blank_end = blank_start - active */
4742 asic_blank_end = (asic_blank_start - tg->v_border_bottom -
4743 tg->v_addressable - tg->v_border_top);
4744
4745 if (vstartup_start > asic_blank_end) {
4746 v_update = (tg->v_total - (vstartup_start - asic_blank_end));
4747 sdp_line_num->adaptive_sync_line_num_valid = true;
4748 sdp_line_num->adaptive_sync_line_num = (tg->v_total - v_update - 1);
4749 } else {
4750 sdp_line_num->adaptive_sync_line_num_valid = false;
4751 sdp_line_num->adaptive_sync_line_num = 0;
4752 }
4753 }
4754
set_adaptive_sync_info_packet(struct dc_info_packet * info_packet,const struct dc_stream_state * stream,struct encoder_info_frame * info_frame,unsigned int vstartup_start)4755 static void set_adaptive_sync_info_packet(
4756 struct dc_info_packet *info_packet,
4757 const struct dc_stream_state *stream,
4758 struct encoder_info_frame *info_frame,
4759 unsigned int vstartup_start)
4760 {
4761 if (!stream->adaptive_sync_infopacket.valid)
4762 return;
4763
4764 adaptive_sync_override_dp_info_packets_sdp_line_num(
4765 &stream->timing,
4766 &info_frame->sdp_line_num,
4767 vstartup_start);
4768
4769 *info_packet = stream->adaptive_sync_infopacket;
4770 }
4771
set_vtem_info_packet(struct dc_info_packet * info_packet,struct dc_stream_state * stream)4772 static void set_vtem_info_packet(
4773 struct dc_info_packet *info_packet,
4774 struct dc_stream_state *stream)
4775 {
4776 if (!stream->vtem_infopacket.valid)
4777 return;
4778
4779 *info_packet = stream->vtem_infopacket;
4780 }
4781
dc_resource_find_first_free_pll(struct resource_context * res_ctx,const struct resource_pool * pool)4782 struct clock_source *dc_resource_find_first_free_pll(
4783 struct resource_context *res_ctx,
4784 const struct resource_pool *pool)
4785 {
4786 int i;
4787
4788 for (i = 0; i < pool->clk_src_count; ++i) {
4789 if (res_ctx->clock_source_ref_count[i] == 0)
4790 return pool->clock_sources[i];
4791 }
4792
4793 return NULL;
4794 }
4795
resource_build_info_frame(struct pipe_ctx * pipe_ctx)4796 void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
4797 {
4798 enum signal_type signal = SIGNAL_TYPE_NONE;
4799 struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
4800 unsigned int vstartup_start = 0;
4801
4802 /* default all packets to invalid */
4803 info->avi.valid = false;
4804 info->gamut.valid = false;
4805 info->vendor.valid = false;
4806 info->spd.valid = false;
4807 info->hdrsmd.valid = false;
4808 info->vsc.valid = false;
4809 info->hfvsif.valid = false;
4810 info->vtem.valid = false;
4811 info->adaptive_sync.valid = false;
4812 signal = pipe_ctx->stream->signal;
4813
4814 if (pipe_ctx->stream->ctx->dc->res_pool->funcs->get_vstartup_for_pipe)
4815 vstartup_start = pipe_ctx->stream->ctx->dc->res_pool->funcs->get_vstartup_for_pipe(pipe_ctx);
4816
4817 /* HDMi and DP have different info packets*/
4818 if (dc_is_hdmi_signal(signal)) {
4819 set_avi_info_frame(&info->avi, pipe_ctx);
4820
4821 set_vendor_info_packet(&info->vendor, pipe_ctx->stream);
4822 set_hfvs_info_packet(&info->hfvsif, pipe_ctx->stream);
4823 set_vtem_info_packet(&info->vtem, pipe_ctx->stream);
4824
4825 set_spd_info_packet(&info->spd, pipe_ctx->stream);
4826
4827 set_hdr_static_info_packet(&info->hdrsmd, pipe_ctx->stream);
4828
4829 } else if (dc_is_dp_signal(signal)) {
4830 set_vsc_info_packet(&info->vsc, pipe_ctx->stream);
4831
4832 set_spd_info_packet(&info->spd, pipe_ctx->stream);
4833
4834 set_hdr_static_info_packet(&info->hdrsmd, pipe_ctx->stream);
4835 set_adaptive_sync_info_packet(&info->adaptive_sync,
4836 pipe_ctx->stream,
4837 info,
4838 vstartup_start);
4839 }
4840
4841 patch_gamut_packet_checksum(&info->gamut);
4842 }
4843
resource_map_clock_resources(const struct dc * dc,struct dc_state * context,struct dc_stream_state * stream)4844 enum dc_status resource_map_clock_resources(
4845 const struct dc *dc,
4846 struct dc_state *context,
4847 struct dc_stream_state *stream)
4848 {
4849 /* acquire new resources */
4850 const struct resource_pool *pool = dc->res_pool;
4851 struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(
4852 &context->res_ctx, stream);
4853
4854 if (!pipe_ctx)
4855 return DC_ERROR_UNEXPECTED;
4856
4857 if (dc_is_dp_signal(pipe_ctx->stream->signal)
4858 || pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
4859 pipe_ctx->clock_source = pool->dp_clock_source;
4860 else {
4861 pipe_ctx->clock_source = NULL;
4862
4863 if (!dc->config.disable_disp_pll_sharing)
4864 pipe_ctx->clock_source = resource_find_used_clk_src_for_sharing(
4865 &context->res_ctx,
4866 pipe_ctx);
4867
4868 if (pipe_ctx->clock_source == NULL)
4869 pipe_ctx->clock_source =
4870 dc_resource_find_first_free_pll(
4871 &context->res_ctx,
4872 pool);
4873 }
4874
4875 if (pipe_ctx->clock_source == NULL)
4876 return DC_NO_CLOCK_SOURCE_RESOURCE;
4877
4878 resource_reference_clock_source(
4879 &context->res_ctx, pool,
4880 pipe_ctx->clock_source);
4881
4882 return DC_OK;
4883 }
4884
4885 /*
4886 * Note: We need to disable output if clock sources change,
4887 * since bios does optimization and doesn't apply if changing
4888 * PHY when not already disabled.
4889 */
pipe_need_reprogram(struct pipe_ctx * pipe_ctx_old,struct pipe_ctx * pipe_ctx)4890 bool pipe_need_reprogram(
4891 struct pipe_ctx *pipe_ctx_old,
4892 struct pipe_ctx *pipe_ctx)
4893 {
4894 if (!pipe_ctx_old->stream)
4895 return false;
4896
4897 if (pipe_ctx_old->stream->sink != pipe_ctx->stream->sink)
4898 return true;
4899
4900 if (pipe_ctx_old->stream->signal != pipe_ctx->stream->signal)
4901 return true;
4902
4903 if (pipe_ctx_old->stream_res.audio != pipe_ctx->stream_res.audio)
4904 return true;
4905
4906 if (pipe_ctx_old->clock_source != pipe_ctx->clock_source
4907 && pipe_ctx_old->stream != pipe_ctx->stream)
4908 return true;
4909
4910 if (pipe_ctx_old->stream_res.stream_enc != pipe_ctx->stream_res.stream_enc)
4911 return true;
4912
4913 if (dc_is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
4914 return true;
4915
4916 if (pipe_ctx_old->stream->dpms_off != pipe_ctx->stream->dpms_off)
4917 return true;
4918
4919 if (false == pipe_ctx_old->stream->link->link_state_valid &&
4920 false == pipe_ctx_old->stream->dpms_off)
4921 return true;
4922
4923 if (pipe_ctx_old->stream_res.dsc != pipe_ctx->stream_res.dsc)
4924 return true;
4925
4926 if (pipe_ctx_old->stream_res.hpo_dp_stream_enc != pipe_ctx->stream_res.hpo_dp_stream_enc)
4927 return true;
4928 if (pipe_ctx_old->link_res.hpo_dp_link_enc != pipe_ctx->link_res.hpo_dp_link_enc)
4929 return true;
4930
4931 /* DIG link encoder resource assignment for stream changed. */
4932 if (pipe_ctx_old->stream->ctx->dc->config.unify_link_enc_assignment) {
4933 if (pipe_ctx_old->link_res.dio_link_enc != pipe_ctx->link_res.dio_link_enc)
4934 return true;
4935 } else if (pipe_ctx_old->stream->ctx->dc->res_pool->funcs->link_encs_assign) {
4936 bool need_reprogram = false;
4937 struct dc *dc = pipe_ctx_old->stream->ctx->dc;
4938 struct link_encoder *link_enc_prev =
4939 link_enc_cfg_get_link_enc_used_by_stream_current(dc, pipe_ctx_old->stream);
4940
4941 if (link_enc_prev != pipe_ctx->stream->link_enc)
4942 need_reprogram = true;
4943
4944 return need_reprogram;
4945 }
4946
4947 return false;
4948 }
4949
resource_build_bit_depth_reduction_params(struct dc_stream_state * stream,struct bit_depth_reduction_params * fmt_bit_depth)4950 void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
4951 struct bit_depth_reduction_params *fmt_bit_depth)
4952 {
4953 enum dc_dither_option option = stream->dither_option;
4954 enum dc_pixel_encoding pixel_encoding =
4955 stream->timing.pixel_encoding;
4956
4957 memset(fmt_bit_depth, 0, sizeof(*fmt_bit_depth));
4958
4959 if (option == DITHER_OPTION_DEFAULT) {
4960 switch (stream->timing.display_color_depth) {
4961 case COLOR_DEPTH_666:
4962 option = DITHER_OPTION_SPATIAL6;
4963 break;
4964 case COLOR_DEPTH_888:
4965 option = DITHER_OPTION_SPATIAL8;
4966 break;
4967 case COLOR_DEPTH_101010:
4968 option = DITHER_OPTION_TRUN10;
4969 break;
4970 default:
4971 option = DITHER_OPTION_DISABLE;
4972 }
4973 }
4974
4975 if (option == DITHER_OPTION_DISABLE)
4976 return;
4977
4978 if (option == DITHER_OPTION_TRUN6) {
4979 fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
4980 fmt_bit_depth->flags.TRUNCATE_DEPTH = 0;
4981 } else if (option == DITHER_OPTION_TRUN8 ||
4982 option == DITHER_OPTION_TRUN8_SPATIAL6 ||
4983 option == DITHER_OPTION_TRUN8_FM6) {
4984 fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
4985 fmt_bit_depth->flags.TRUNCATE_DEPTH = 1;
4986 } else if (option == DITHER_OPTION_TRUN10 ||
4987 option == DITHER_OPTION_TRUN10_SPATIAL6 ||
4988 option == DITHER_OPTION_TRUN10_SPATIAL8 ||
4989 option == DITHER_OPTION_TRUN10_FM8 ||
4990 option == DITHER_OPTION_TRUN10_FM6 ||
4991 option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
4992 fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
4993 fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
4994 if (option == DITHER_OPTION_TRUN10)
4995 fmt_bit_depth->flags.TRUNCATE_MODE = 1;
4996 }
4997
4998 /* special case - Formatter can only reduce by 4 bits at most.
4999 * When reducing from 12 to 6 bits,
5000 * HW recommends we use trunc with round mode
5001 * (if we did nothing, trunc to 10 bits would be used)
5002 * note that any 12->10 bit reduction is ignored prior to DCE8,
5003 * as the input was 10 bits.
5004 */
5005 if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM ||
5006 option == DITHER_OPTION_SPATIAL6 ||
5007 option == DITHER_OPTION_FM6) {
5008 fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
5009 fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
5010 fmt_bit_depth->flags.TRUNCATE_MODE = 1;
5011 }
5012
5013 /* spatial dither
5014 * note that spatial modes 1-3 are never used
5015 */
5016 if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM ||
5017 option == DITHER_OPTION_SPATIAL6 ||
5018 option == DITHER_OPTION_TRUN10_SPATIAL6 ||
5019 option == DITHER_OPTION_TRUN8_SPATIAL6) {
5020 fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
5021 fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 0;
5022 fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
5023 fmt_bit_depth->flags.RGB_RANDOM =
5024 (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
5025 } else if (option == DITHER_OPTION_SPATIAL8_FRAME_RANDOM ||
5026 option == DITHER_OPTION_SPATIAL8 ||
5027 option == DITHER_OPTION_SPATIAL8_FM6 ||
5028 option == DITHER_OPTION_TRUN10_SPATIAL8 ||
5029 option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
5030 fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
5031 fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 1;
5032 fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
5033 fmt_bit_depth->flags.RGB_RANDOM =
5034 (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
5035 } else if (option == DITHER_OPTION_SPATIAL10_FRAME_RANDOM ||
5036 option == DITHER_OPTION_SPATIAL10 ||
5037 option == DITHER_OPTION_SPATIAL10_FM8 ||
5038 option == DITHER_OPTION_SPATIAL10_FM6) {
5039 fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
5040 fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 2;
5041 fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
5042 fmt_bit_depth->flags.RGB_RANDOM =
5043 (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
5044 }
5045
5046 if (option == DITHER_OPTION_SPATIAL6 ||
5047 option == DITHER_OPTION_SPATIAL8 ||
5048 option == DITHER_OPTION_SPATIAL10) {
5049 fmt_bit_depth->flags.FRAME_RANDOM = 0;
5050 } else {
5051 fmt_bit_depth->flags.FRAME_RANDOM = 1;
5052 }
5053
5054 //////////////////////
5055 //// temporal dither
5056 //////////////////////
5057 if (option == DITHER_OPTION_FM6 ||
5058 option == DITHER_OPTION_SPATIAL8_FM6 ||
5059 option == DITHER_OPTION_SPATIAL10_FM6 ||
5060 option == DITHER_OPTION_TRUN10_FM6 ||
5061 option == DITHER_OPTION_TRUN8_FM6 ||
5062 option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
5063 fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
5064 fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 0;
5065 } else if (option == DITHER_OPTION_FM8 ||
5066 option == DITHER_OPTION_SPATIAL10_FM8 ||
5067 option == DITHER_OPTION_TRUN10_FM8) {
5068 fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
5069 fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 1;
5070 } else if (option == DITHER_OPTION_FM10) {
5071 fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
5072 fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 2;
5073 }
5074
5075 fmt_bit_depth->pixel_encoding = pixel_encoding;
5076 }
5077
dc_validate_stream(struct dc * dc,struct dc_stream_state * stream)5078 enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
5079 {
5080 if (dc == NULL || stream == NULL)
5081 return DC_ERROR_UNEXPECTED;
5082
5083 struct dc_link *link = stream->link;
5084 struct timing_generator *tg = dc->res_pool->timing_generators[0];
5085 enum dc_status res = DC_OK;
5086
5087 calculate_phy_pix_clks(stream);
5088
5089 if (!tg->funcs->validate_timing(tg, &stream->timing))
5090 res = DC_FAIL_CONTROLLER_VALIDATE;
5091
5092 if (res == DC_OK) {
5093 if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
5094 !link->link_enc->funcs->validate_output_with_stream(
5095 link->link_enc, stream))
5096 res = DC_FAIL_ENC_VALIDATE;
5097 }
5098
5099 /* TODO: validate audio ASIC caps, encoder */
5100
5101 if (res == DC_OK)
5102 res = dc->link_srv->validate_mode_timing(stream,
5103 link,
5104 &stream->timing);
5105
5106 return res;
5107 }
5108
dc_validate_plane(struct dc * dc,const struct dc_plane_state * plane_state)5109 enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *plane_state)
5110 {
5111 enum dc_status res = DC_OK;
5112
5113 /* check if surface has invalid dimensions */
5114 if (plane_state->src_rect.width == 0 || plane_state->src_rect.height == 0 ||
5115 plane_state->dst_rect.width == 0 || plane_state->dst_rect.height == 0)
5116 return DC_FAIL_SURFACE_VALIDATE;
5117
5118 /* TODO For now validates pixel format only */
5119 if (dc->res_pool->funcs->validate_plane)
5120 return dc->res_pool->funcs->validate_plane(plane_state, &dc->caps);
5121
5122 return res;
5123 }
5124
resource_pixel_format_to_bpp(enum surface_pixel_format format)5125 unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format)
5126 {
5127 switch (format) {
5128 case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
5129 return 8;
5130 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
5131 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
5132 return 12;
5133 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
5134 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
5135 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
5136 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
5137 return 16;
5138 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
5139 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
5140 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
5141 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
5142 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
5143 case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
5144 case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
5145 return 32;
5146 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
5147 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
5148 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
5149 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
5150 return 64;
5151 default:
5152 ASSERT_CRITICAL(false);
5153 return -1;
5154 }
5155 }
get_max_audio_sample_rate(struct audio_mode * modes)5156 static unsigned int get_max_audio_sample_rate(struct audio_mode *modes)
5157 {
5158 if (modes) {
5159 if (modes->sample_rates.rate.RATE_192)
5160 return 192000;
5161 if (modes->sample_rates.rate.RATE_176_4)
5162 return 176400;
5163 if (modes->sample_rates.rate.RATE_96)
5164 return 96000;
5165 if (modes->sample_rates.rate.RATE_88_2)
5166 return 88200;
5167 if (modes->sample_rates.rate.RATE_48)
5168 return 48000;
5169 if (modes->sample_rates.rate.RATE_44_1)
5170 return 44100;
5171 if (modes->sample_rates.rate.RATE_32)
5172 return 32000;
5173 }
5174 /*original logic when no audio info*/
5175 return 441000;
5176 }
5177
get_audio_check(struct audio_info * aud_modes,struct audio_check * audio_chk)5178 void get_audio_check(struct audio_info *aud_modes,
5179 struct audio_check *audio_chk)
5180 {
5181 unsigned int i;
5182 unsigned int max_sample_rate = 0;
5183
5184 if (aud_modes) {
5185 audio_chk->audio_packet_type = 0x2;/*audio sample packet AP = .25 for layout0, 1 for layout1*/
5186
5187 audio_chk->max_audiosample_rate = 0;
5188 for (i = 0; i < aud_modes->mode_count; i++) {
5189 max_sample_rate = get_max_audio_sample_rate(&aud_modes->modes[i]);
5190 if (audio_chk->max_audiosample_rate < max_sample_rate)
5191 audio_chk->max_audiosample_rate = max_sample_rate;
5192 /*dts takes the same as type 2: AP = 0.25*/
5193 }
5194 /*check which one take more bandwidth*/
5195 if (audio_chk->max_audiosample_rate > 192000)
5196 audio_chk->audio_packet_type = 0x9;/*AP =1*/
5197 audio_chk->acat = 0;/*not support*/
5198 }
5199 }
5200
get_temp_dio_link_enc(const struct resource_context * res_ctx,const struct resource_pool * const pool,const struct dc_link * link)5201 struct link_encoder *get_temp_dio_link_enc(
5202 const struct resource_context *res_ctx,
5203 const struct resource_pool *const pool,
5204 const struct dc_link *link)
5205 {
5206 struct link_encoder *link_enc = NULL;
5207 int enc_index;
5208
5209 if (link->is_dig_mapping_flexible)
5210 enc_index = find_acquired_dio_link_enc_for_link(res_ctx, link);
5211 else
5212 enc_index = link->eng_id;
5213
5214 if (enc_index < 0)
5215 enc_index = find_free_dio_link_enc(res_ctx, link, pool);
5216
5217 if (enc_index >= 0)
5218 link_enc = pool->link_encoders[enc_index];
5219
5220 return link_enc;
5221 }
5222
get_temp_hpo_dp_link_enc(const struct resource_context * res_ctx,const struct resource_pool * const pool,const struct dc_link * link)5223 static struct hpo_dp_link_encoder *get_temp_hpo_dp_link_enc(
5224 const struct resource_context *res_ctx,
5225 const struct resource_pool *const pool,
5226 const struct dc_link *link)
5227 {
5228 struct hpo_dp_link_encoder *hpo_dp_link_enc = NULL;
5229 int enc_index;
5230
5231 enc_index = find_acquired_hpo_dp_link_enc_for_link(res_ctx, link);
5232
5233 if (enc_index < 0)
5234 enc_index = find_free_hpo_dp_link_enc(res_ctx, pool);
5235
5236 if (enc_index >= 0)
5237 hpo_dp_link_enc = pool->hpo_dp_link_enc[enc_index];
5238
5239 return hpo_dp_link_enc;
5240 }
5241
get_temp_dp_link_res(struct dc_link * link,struct link_resource * link_res,struct dc_link_settings * link_settings)5242 bool get_temp_dp_link_res(struct dc_link *link,
5243 struct link_resource *link_res,
5244 struct dc_link_settings *link_settings)
5245 {
5246 const struct dc *dc = link->dc;
5247 const struct resource_context *res_ctx = &dc->current_state->res_ctx;
5248
5249 memset(link_res, 0, sizeof(*link_res));
5250
5251 if (dc->link_srv->dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
5252 link_res->hpo_dp_link_enc = get_temp_hpo_dp_link_enc(res_ctx, dc->res_pool, link);
5253 if (!link_res->hpo_dp_link_enc)
5254 return false;
5255 } else if (dc->link_srv->dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
5256 dc->config.unify_link_enc_assignment) {
5257 link_res->dio_link_enc = get_temp_dio_link_enc(res_ctx,
5258 dc->res_pool, link);
5259 if (!link_res->dio_link_enc)
5260 return false;
5261 }
5262
5263 return true;
5264 }
5265
reset_syncd_pipes_from_disabled_pipes(struct dc * dc,struct dc_state * context)5266 void reset_syncd_pipes_from_disabled_pipes(struct dc *dc,
5267 struct dc_state *context)
5268 {
5269 int i, j;
5270 struct pipe_ctx *pipe_ctx_old, *pipe_ctx, *pipe_ctx_syncd;
5271
5272 /* If pipe backend is reset, need to reset pipe syncd status */
5273 for (i = 0; i < dc->res_pool->pipe_count; i++) {
5274 pipe_ctx_old = &dc->current_state->res_ctx.pipe_ctx[i];
5275 pipe_ctx = &context->res_ctx.pipe_ctx[i];
5276
5277 if (!resource_is_pipe_type(pipe_ctx_old, OTG_MASTER))
5278 continue;
5279
5280 if (!pipe_ctx->stream ||
5281 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
5282
5283 /* Reset all the syncd pipes from the disabled pipe */
5284 for (j = 0; j < dc->res_pool->pipe_count; j++) {
5285 pipe_ctx_syncd = &context->res_ctx.pipe_ctx[j];
5286 if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_syncd) == pipe_ctx_old->pipe_idx) ||
5287 !IS_PIPE_SYNCD_VALID(pipe_ctx_syncd))
5288 SET_PIPE_SYNCD_TO_PIPE(pipe_ctx_syncd, j);
5289 }
5290 }
5291 }
5292 }
5293
check_syncd_pipes_for_disabled_master_pipe(struct dc * dc,struct dc_state * context,uint8_t disabled_master_pipe_idx)5294 void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
5295 struct dc_state *context,
5296 uint8_t disabled_master_pipe_idx)
5297 {
5298 int i;
5299 struct pipe_ctx *pipe_ctx, *pipe_ctx_check;
5300
5301 pipe_ctx = &context->res_ctx.pipe_ctx[disabled_master_pipe_idx];
5302 if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx) != disabled_master_pipe_idx) ||
5303 !IS_PIPE_SYNCD_VALID(pipe_ctx))
5304 SET_PIPE_SYNCD_TO_PIPE(pipe_ctx, disabled_master_pipe_idx);
5305
5306 /* for the pipe disabled, check if any slave pipe exists and assert */
5307 for (i = 0; i < dc->res_pool->pipe_count; i++) {
5308 pipe_ctx_check = &context->res_ctx.pipe_ctx[i];
5309
5310 if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_check) == disabled_master_pipe_idx) &&
5311 IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx)) {
5312 struct pipe_ctx *first_pipe = pipe_ctx_check;
5313
5314 while (first_pipe->prev_odm_pipe)
5315 first_pipe = first_pipe->prev_odm_pipe;
5316 /* When ODM combine is enabled, this case is expected. If the disabled pipe
5317 * is part of the ODM tree, then we should not print an error.
5318 * */
5319 if (first_pipe->pipe_idx == disabled_master_pipe_idx)
5320 continue;
5321
5322 DC_ERR("DC: Failure: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n",
5323 i, disabled_master_pipe_idx);
5324 }
5325 }
5326 }
5327
reset_sync_context_for_pipe(const struct dc * dc,struct dc_state * context,uint8_t pipe_idx)5328 void reset_sync_context_for_pipe(const struct dc *dc,
5329 struct dc_state *context,
5330 uint8_t pipe_idx)
5331 {
5332 int i;
5333 struct pipe_ctx *pipe_ctx_reset;
5334
5335 /* reset the otg sync context for the pipe and its slave pipes if any */
5336 for (i = 0; i < dc->res_pool->pipe_count; i++) {
5337 pipe_ctx_reset = &context->res_ctx.pipe_ctx[i];
5338
5339 if (((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_reset) == pipe_idx) &&
5340 IS_PIPE_SYNCD_VALID(pipe_ctx_reset)) || (i == pipe_idx))
5341 SET_PIPE_SYNCD_TO_PIPE(pipe_ctx_reset, i);
5342 }
5343 }
5344
resource_transmitter_to_phy_idx(const struct dc * dc,enum transmitter transmitter)5345 uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter)
5346 {
5347 /* TODO - get transmitter to phy idx mapping from DMUB */
5348 uint8_t phy_idx = transmitter - TRANSMITTER_UNIPHY_A;
5349
5350 if (dc->ctx->dce_version == DCN_VERSION_3_1 &&
5351 dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
5352 switch (transmitter) {
5353 case TRANSMITTER_UNIPHY_A:
5354 phy_idx = 0;
5355 break;
5356 case TRANSMITTER_UNIPHY_B:
5357 phy_idx = 1;
5358 break;
5359 case TRANSMITTER_UNIPHY_C:
5360 phy_idx = 5;
5361 break;
5362 case TRANSMITTER_UNIPHY_D:
5363 phy_idx = 6;
5364 break;
5365 case TRANSMITTER_UNIPHY_E:
5366 phy_idx = 4;
5367 break;
5368 default:
5369 phy_idx = 0;
5370 break;
5371 }
5372 }
5373
5374 return phy_idx;
5375 }
5376
get_link_hwss(const struct dc_link * link,const struct link_resource * link_res)5377 const struct link_hwss *get_link_hwss(const struct dc_link *link,
5378 const struct link_resource *link_res)
5379 {
5380 /* Link_hwss is only accessible by getter function instead of accessing
5381 * by pointers in dc with the intent to protect against breaking polymorphism.
5382 */
5383 if (can_use_hpo_dp_link_hwss(link, link_res))
5384 /* TODO: some assumes that if decided link settings is 128b/132b
5385 * channel coding format hpo_dp_link_enc should be used.
5386 * Others believe that if hpo_dp_link_enc is available in link
5387 * resource then hpo_dp_link_enc must be used. This bound between
5388 * hpo_dp_link_enc != NULL and decided link settings is loosely coupled
5389 * with a premise that both hpo_dp_link_enc pointer and decided link
5390 * settings are determined based on single policy function like
5391 * "decide_link_settings" from upper layer. This "convention"
5392 * cannot be maintained and enforced at current level.
5393 * Therefore a refactor is due so we can enforce a strong bound
5394 * between those two parameters at this level.
5395 *
5396 * To put it simple, we want to make enforcement at low level so that
5397 * we will not return link hwss if caller plans to do 8b/10b
5398 * with an hpo encoder. Or we can return a very dummy one that doesn't
5399 * do work for all functions
5400 */
5401 return (requires_fixed_vs_pe_retimer_hpo_link_hwss(link) ?
5402 get_hpo_fixed_vs_pe_retimer_dp_link_hwss() : get_hpo_dp_link_hwss());
5403 else if (can_use_dpia_link_hwss(link, link_res))
5404 return get_dpia_link_hwss();
5405 else if (can_use_dio_link_hwss(link, link_res))
5406 return (requires_fixed_vs_pe_retimer_dio_link_hwss(link)) ?
5407 get_dio_fixed_vs_pe_retimer_link_hwss() : get_dio_link_hwss();
5408 else
5409 return get_virtual_link_hwss();
5410 }
5411
is_h_timing_divisible_by_2(struct dc_stream_state * stream)5412 bool is_h_timing_divisible_by_2(struct dc_stream_state *stream)
5413 {
5414 bool divisible = false;
5415 uint16_t h_blank_start = 0;
5416 uint16_t h_blank_end = 0;
5417
5418 if (stream) {
5419 h_blank_start = stream->timing.h_total - stream->timing.h_front_porch;
5420 h_blank_end = h_blank_start - stream->timing.h_addressable;
5421
5422 /* HTOTAL, Hblank start/end, and Hsync start/end all must be
5423 * divisible by 2 in order for the horizontal timing params
5424 * to be considered divisible by 2. Hsync start is always 0.
5425 */
5426 divisible = (stream->timing.h_total % 2 == 0) &&
5427 (h_blank_start % 2 == 0) &&
5428 (h_blank_end % 2 == 0) &&
5429 (stream->timing.h_sync_width % 2 == 0);
5430 }
5431 return divisible;
5432 }
5433
5434 /* This interface is deprecated for new DCNs. It is replaced by the following
5435 * new interfaces. These two interfaces encapsulate pipe selection priority
5436 * with DCN specific minimum hardware transition optimization algorithm. With
5437 * the new interfaces caller no longer needs to know the implementation detail
5438 * of a pipe topology.
5439 *
5440 * resource_update_pipes_with_odm_slice_count
5441 * resource_update_pipes_with_mpc_slice_count
5442 *
5443 */
dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy(const struct dc * dc,struct dc_state * state,struct pipe_ctx * pri_pipe,struct pipe_ctx * sec_pipe,bool odm)5444 bool dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy(
5445 const struct dc *dc,
5446 struct dc_state *state,
5447 struct pipe_ctx *pri_pipe,
5448 struct pipe_ctx *sec_pipe,
5449 bool odm)
5450 {
5451 int pipe_idx = sec_pipe->pipe_idx;
5452 struct pipe_ctx *sec_top, *sec_bottom, *sec_next, *sec_prev;
5453 const struct resource_pool *pool = dc->res_pool;
5454
5455 sec_top = sec_pipe->top_pipe;
5456 sec_bottom = sec_pipe->bottom_pipe;
5457 sec_next = sec_pipe->next_odm_pipe;
5458 sec_prev = sec_pipe->prev_odm_pipe;
5459
5460 if (pri_pipe == NULL)
5461 return false;
5462
5463 *sec_pipe = *pri_pipe;
5464
5465 sec_pipe->top_pipe = sec_top;
5466 sec_pipe->bottom_pipe = sec_bottom;
5467 sec_pipe->next_odm_pipe = sec_next;
5468 sec_pipe->prev_odm_pipe = sec_prev;
5469
5470 sec_pipe->pipe_idx = pipe_idx;
5471 sec_pipe->plane_res.mi = pool->mis[pipe_idx];
5472 sec_pipe->plane_res.hubp = pool->hubps[pipe_idx];
5473 sec_pipe->plane_res.ipp = pool->ipps[pipe_idx];
5474 sec_pipe->plane_res.xfm = pool->transforms[pipe_idx];
5475 sec_pipe->plane_res.dpp = pool->dpps[pipe_idx];
5476 sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst;
5477 sec_pipe->stream_res.dsc = NULL;
5478 if (odm) {
5479 if (!sec_pipe->top_pipe)
5480 sec_pipe->stream_res.opp = pool->opps[pipe_idx];
5481 else
5482 sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
5483 if (sec_pipe->stream->timing.flags.DSC == 1) {
5484 #if defined(CONFIG_DRM_AMD_DC_FP)
5485 dcn20_acquire_dsc(dc, &state->res_ctx, &sec_pipe->stream_res.dsc, sec_pipe->stream_res.opp->inst);
5486 #endif
5487 ASSERT(sec_pipe->stream_res.dsc);
5488 if (sec_pipe->stream_res.dsc == NULL)
5489 return false;
5490 }
5491 #if defined(CONFIG_DRM_AMD_DC_FP)
5492 dcn20_build_mapped_resource(dc, state, sec_pipe->stream);
5493 #endif
5494 }
5495
5496 return true;
5497 }
5498
update_dp_encoder_resources_for_test_harness(const struct dc * dc,struct dc_state * context,struct pipe_ctx * pipe_ctx)5499 enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
5500 struct dc_state *context,
5501 struct pipe_ctx *pipe_ctx)
5502 {
5503 if (dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
5504 if (pipe_ctx->stream_res.hpo_dp_stream_enc == NULL) {
5505 pipe_ctx->stream_res.hpo_dp_stream_enc =
5506 find_first_free_match_hpo_dp_stream_enc_for_link(
5507 &context->res_ctx, dc->res_pool, pipe_ctx->stream);
5508
5509 if (!pipe_ctx->stream_res.hpo_dp_stream_enc)
5510 return DC_NO_STREAM_ENC_RESOURCE;
5511
5512 update_hpo_dp_stream_engine_usage(
5513 &context->res_ctx, dc->res_pool,
5514 pipe_ctx->stream_res.hpo_dp_stream_enc,
5515 true);
5516 }
5517
5518 if (pipe_ctx->link_res.hpo_dp_link_enc == NULL) {
5519 if (!add_hpo_dp_link_enc_to_ctx(&context->res_ctx, dc->res_pool, pipe_ctx, pipe_ctx->stream))
5520 return DC_NO_LINK_ENC_RESOURCE;
5521 }
5522 } else {
5523 if (pipe_ctx->stream_res.hpo_dp_stream_enc) {
5524 update_hpo_dp_stream_engine_usage(
5525 &context->res_ctx, dc->res_pool,
5526 pipe_ctx->stream_res.hpo_dp_stream_enc,
5527 false);
5528 pipe_ctx->stream_res.hpo_dp_stream_enc = NULL;
5529 }
5530 if (pipe_ctx->link_res.hpo_dp_link_enc)
5531 remove_hpo_dp_link_enc_from_ctx(&context->res_ctx, pipe_ctx, pipe_ctx->stream);
5532 }
5533
5534 if (pipe_ctx->link_res.dio_link_enc == NULL && dc->config.unify_link_enc_assignment)
5535 if (!add_dio_link_enc_to_ctx(dc, context, dc->res_pool, pipe_ctx, pipe_ctx->stream))
5536 return DC_NO_LINK_ENC_RESOURCE;
5537
5538 return DC_OK;
5539 }
5540
check_subvp_sw_cursor_fallback_req(const struct dc * dc,struct dc_stream_state * stream)5541 bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream)
5542 {
5543 if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream))
5544 return true;
5545 if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 &&
5546 ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)
5547 return true;
5548 else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 1080 &&
5549 ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)
5550 return true;
5551
5552 return false;
5553 }
5554
resource_get_dscl_prog_data(struct pipe_ctx * pipe_ctx)5555 struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx)
5556 {
5557 return &pipe_ctx->plane_res.scl_data.dscl_prog_data;
5558 }
5559
resource_init_common_dml2_callbacks(struct dc * dc,struct dml2_configuration_options * dml2_options)5560 void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options)
5561 {
5562 dml2_options->callbacks.dc = dc;
5563 dml2_options->callbacks.build_scaling_params = &resource_build_scaling_params;
5564 dml2_options->callbacks.build_test_pattern_params = &resource_build_test_pattern_params;
5565 dml2_options->callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy;
5566 dml2_options->callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count;
5567 dml2_options->callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count;
5568 dml2_options->callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index;
5569 dml2_options->callbacks.get_mpc_slice_count = &resource_get_mpc_slice_count;
5570 dml2_options->callbacks.get_odm_slice_index = &resource_get_odm_slice_index;
5571 dml2_options->callbacks.get_odm_slice_count = &resource_get_odm_slice_count;
5572 dml2_options->callbacks.get_opp_head = &resource_get_opp_head;
5573 dml2_options->callbacks.get_otg_master_for_stream = &resource_get_otg_master_for_stream;
5574 dml2_options->callbacks.get_opp_heads_for_otg_master = &resource_get_opp_heads_for_otg_master;
5575 dml2_options->callbacks.get_dpp_pipes_for_plane = &resource_get_dpp_pipes_for_plane;
5576 dml2_options->callbacks.get_stream_status = &dc_state_get_stream_status;
5577 dml2_options->callbacks.get_stream_from_id = &dc_state_get_stream_from_id;
5578 dml2_options->callbacks.get_max_flickerless_instant_vtotal_increase = &dc_stream_get_max_flickerless_instant_vtotal_increase;
5579
5580 dml2_options->svp_pstate.callbacks.dc = dc;
5581 dml2_options->svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
5582 dml2_options->svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream;
5583 dml2_options->svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params;
5584 dml2_options->svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane;
5585 dml2_options->svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane;
5586 dml2_options->svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream;
5587 dml2_options->svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream;
5588 dml2_options->svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane;
5589 dml2_options->svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream;
5590 dml2_options->svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type;
5591 dml2_options->svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type;
5592 dml2_options->svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream;
5593 dml2_options->svp_pstate.callbacks.remove_phantom_streams_and_planes = &dc_state_remove_phantom_streams_and_planes;
5594 dml2_options->svp_pstate.callbacks.release_phantom_streams_and_planes = &dc_state_release_phantom_streams_and_planes;
5595 }
5596
5597 /* Returns number of DET segments allocated for a given OTG_MASTER pipe */
resource_calculate_det_for_stream(struct dc_state * state,struct pipe_ctx * otg_master)5598 int resource_calculate_det_for_stream(struct dc_state *state, struct pipe_ctx *otg_master)
5599 {
5600 struct pipe_ctx *opp_heads[MAX_PIPES];
5601 struct pipe_ctx *dpp_pipes[MAX_PIPES];
5602
5603 int dpp_count = 0;
5604 int det_segments = 0;
5605
5606 if (!otg_master->stream)
5607 return 0;
5608
5609 int slice_count = resource_get_opp_heads_for_otg_master(otg_master,
5610 &state->res_ctx, opp_heads);
5611
5612 for (int slice_idx = 0; slice_idx < slice_count; slice_idx++) {
5613 if (opp_heads[slice_idx]->plane_state) {
5614 dpp_count = resource_get_dpp_pipes_for_opp_head(
5615 opp_heads[slice_idx],
5616 &state->res_ctx,
5617 dpp_pipes);
5618 for (int dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++)
5619 det_segments += dpp_pipes[dpp_idx]->hubp_regs.det_size;
5620 }
5621 }
5622 return det_segments;
5623 }
5624
resource_is_hpo_acquired(struct dc_state * context)5625 bool resource_is_hpo_acquired(struct dc_state *context)
5626 {
5627 int i;
5628
5629 for (i = 0; i < MAX_HPO_DP2_ENCODERS; i++) {
5630 if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i]) {
5631 return true;
5632 }
5633 }
5634
5635 return false;
5636 }
5637