1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * Copyright 2019 Raptor Engineering, LLC
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: AMD
24 *
25 */
26
27 #include "dm_services.h"
28 #include "dc.h"
29
30 #include "dcn20/dcn20_init.h"
31
32 #include "resource.h"
33 #include "include/irq_service_interface.h"
34 #include "dcn20/dcn20_resource.h"
35
36 #include "dml/dcn20/dcn20_fpu.h"
37
38 #include "dcn10/dcn10_hubp.h"
39 #include "dcn10/dcn10_ipp.h"
40 #include "dcn20/dcn20_hubbub.h"
41 #include "dcn20/dcn20_mpc.h"
42 #include "dcn20/dcn20_hubp.h"
43 #include "irq/dcn20/irq_service_dcn20.h"
44 #include "dcn20/dcn20_dpp.h"
45 #include "dcn20/dcn20_optc.h"
46 #include "dcn20/dcn20_hwseq.h"
47 #include "dce110/dce110_hwseq.h"
48 #include "dcn10/dcn10_resource.h"
49 #include "dcn20/dcn20_opp.h"
50
51 #include "dcn20/dcn20_dsc.h"
52
53 #include "dcn20/dcn20_link_encoder.h"
54 #include "dcn20/dcn20_stream_encoder.h"
55 #include "dce/dce_clock_source.h"
56 #include "dce/dce_audio.h"
57 #include "dce/dce_hwseq.h"
58 #include "dio/virtual/virtual_stream_encoder.h"
59 #include "dce110/dce110_resource.h"
60 #include "dml/display_mode_vba.h"
61 #include "dcn20/dcn20_dccg.h"
62 #include "dcn20/dcn20_vmid.h"
63 #include "dce/dce_panel_cntl.h"
64
65 #include "dcn20/dcn20_dwb.h"
66 #include "dcn20/dcn20_mmhubbub.h"
67
68 #include "navi10_ip_offset.h"
69
70 #include "dcn/dcn_2_0_0_offset.h"
71 #include "dcn/dcn_2_0_0_sh_mask.h"
72 #include "dpcs/dpcs_2_0_0_offset.h"
73 #include "dpcs/dpcs_2_0_0_sh_mask.h"
74
75 #include "nbio/nbio_2_3_offset.h"
76
77 #include "mmhub/mmhub_2_0_0_offset.h"
78 #include "mmhub/mmhub_2_0_0_sh_mask.h"
79
80 #include "reg_helper.h"
81 #include "dce/dce_abm.h"
82 #include "dce/dce_dmcu.h"
83 #include "dce/dce_aux.h"
84 #include "dce/dce_i2c.h"
85 #include "dio/dcn10/dcn10_dio.h"
86 #include "vm_helper.h"
87
88 #include "link_enc_cfg.h"
89 #include "link_service.h"
90
91 #define DC_LOGGER_INIT(logger)
92
93 #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
94 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
95 #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
96 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f
97 #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
98 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f
99 #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
100 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f
101 #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
102 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f
103 #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
104 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f
105 #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
106 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f
107 #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
108 #endif
109
110
111 enum dcn20_clk_src_array_id {
112 DCN20_CLK_SRC_PLL0,
113 DCN20_CLK_SRC_PLL1,
114 DCN20_CLK_SRC_PLL2,
115 DCN20_CLK_SRC_PLL3,
116 DCN20_CLK_SRC_PLL4,
117 DCN20_CLK_SRC_PLL5,
118 DCN20_CLK_SRC_TOTAL
119 };
120
121 /* begin *********************
122 * macros to expend register list macro defined in HW object header file */
123
124 /* DCN */
125 #define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
126
127 #define BASE(seg) BASE_INNER(seg)
128
129 #define SR(reg_name)\
130 .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
131 mm ## reg_name
132
133 #define SRI(reg_name, block, id)\
134 .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
135 mm ## block ## id ## _ ## reg_name
136
137 #define SRI2_DWB(reg_name, block, id)\
138 .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
139 mm ## reg_name
140 #define SF_DWB(reg_name, field_name, post_fix)\
141 .field_name = reg_name ## __ ## field_name ## post_fix
142
143 #define SF_DWB2(reg_name, block, id, field_name, post_fix) \
144 .field_name = reg_name ## __ ## field_name ## post_fix
145
146 #define SRIR(var_name, reg_name, block, id)\
147 .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
148 mm ## block ## id ## _ ## reg_name
149
150 #define SRII(reg_name, block, id)\
151 .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
152 mm ## block ## id ## _ ## reg_name
153
154 #define DCCG_SRII(reg_name, block, id)\
155 .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
156 mm ## block ## id ## _ ## reg_name
157
158 #define VUPDATE_SRII(reg_name, block, id)\
159 .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
160 mm ## reg_name ## _ ## block ## id
161
162 /* NBIO */
163 #define NBIO_BASE_INNER(seg) \
164 NBIO_BASE__INST0_SEG ## seg
165
166 #define NBIO_BASE(seg) \
167 NBIO_BASE_INNER(seg)
168
169 #define NBIO_SR(reg_name)\
170 .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
171 mm ## reg_name
172
173 /* MMHUB */
174 #define MMHUB_BASE_INNER(seg) \
175 MMHUB_BASE__INST0_SEG ## seg
176
177 #define MMHUB_BASE(seg) \
178 MMHUB_BASE_INNER(seg)
179
180 #define MMHUB_SR(reg_name)\
181 .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \
182 mmMM ## reg_name
183
184 static const struct bios_registers bios_regs = {
185 NBIO_SR(BIOS_SCRATCH_3),
186 NBIO_SR(BIOS_SCRATCH_6)
187 };
188
189 #define clk_src_regs(index, pllid)\
190 [index] = {\
191 CS_COMMON_REG_LIST_DCN2_0(index, pllid),\
192 }
193
194 static const struct dce110_clk_src_regs clk_src_regs[] = {
195 clk_src_regs(0, A),
196 clk_src_regs(1, B),
197 clk_src_regs(2, C),
198 clk_src_regs(3, D),
199 clk_src_regs(4, E),
200 clk_src_regs(5, F)
201 };
202
203 static const struct dce110_clk_src_shift cs_shift = {
204 CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
205 };
206
207 static const struct dce110_clk_src_mask cs_mask = {
208 CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
209 };
210
211 static const struct dce_dmcu_registers dmcu_regs = {
212 DMCU_DCN10_REG_LIST()
213 };
214
215 static const struct dce_dmcu_shift dmcu_shift = {
216 DMCU_MASK_SH_LIST_DCN10(__SHIFT)
217 };
218
219 static const struct dce_dmcu_mask dmcu_mask = {
220 DMCU_MASK_SH_LIST_DCN10(_MASK)
221 };
222
223 static const struct dce_abm_registers abm_regs = {
224 ABM_DCN20_REG_LIST()
225 };
226
227 static const struct dce_abm_shift abm_shift = {
228 ABM_MASK_SH_LIST_DCN20(__SHIFT)
229 };
230
231 static const struct dce_abm_mask abm_mask = {
232 ABM_MASK_SH_LIST_DCN20(_MASK)
233 };
234
235 #define audio_regs(id)\
236 [id] = {\
237 AUD_COMMON_REG_LIST(id)\
238 }
239
240 static const struct dce_audio_registers audio_regs[] = {
241 audio_regs(0),
242 audio_regs(1),
243 audio_regs(2),
244 audio_regs(3),
245 audio_regs(4),
246 audio_regs(5),
247 audio_regs(6),
248 };
249
250 #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
251 SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
252 SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
253 AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
254
255 static const struct dce_audio_shift audio_shift = {
256 DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
257 };
258
259 static const struct dce_audio_mask audio_mask = {
260 DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
261 };
262
263 #define stream_enc_regs(id)\
264 [id] = {\
265 SE_DCN2_REG_LIST(id)\
266 }
267
268 static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
269 stream_enc_regs(0),
270 stream_enc_regs(1),
271 stream_enc_regs(2),
272 stream_enc_regs(3),
273 stream_enc_regs(4),
274 stream_enc_regs(5),
275 };
276
277 static const struct dcn10_stream_encoder_shift se_shift = {
278 SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT)
279 };
280
281 static const struct dcn10_stream_encoder_mask se_mask = {
282 SE_COMMON_MASK_SH_LIST_DCN20(_MASK)
283 };
284
285
286 #define aux_regs(id)\
287 [id] = {\
288 DCN2_AUX_REG_LIST(id)\
289 }
290
291 static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
292 aux_regs(0),
293 aux_regs(1),
294 aux_regs(2),
295 aux_regs(3),
296 aux_regs(4),
297 aux_regs(5)
298 };
299
300 #define hpd_regs(id)\
301 [id] = {\
302 HPD_REG_LIST(id)\
303 }
304
305 static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
306 hpd_regs(0),
307 hpd_regs(1),
308 hpd_regs(2),
309 hpd_regs(3),
310 hpd_regs(4),
311 hpd_regs(5)
312 };
313
314 #define link_regs(id, phyid)\
315 [id] = {\
316 LE_DCN10_REG_LIST(id), \
317 UNIPHY_DCN2_REG_LIST(phyid), \
318 DPCS_DCN2_REG_LIST(id), \
319 SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
320 }
321
322 static const struct dcn10_link_enc_registers link_enc_regs[] = {
323 link_regs(0, A),
324 link_regs(1, B),
325 link_regs(2, C),
326 link_regs(3, D),
327 link_regs(4, E),
328 link_regs(5, F)
329 };
330
331 static const struct dcn10_link_enc_shift le_shift = {
332 LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\
333 DPCS_DCN2_MASK_SH_LIST(__SHIFT)
334 };
335
336 static const struct dcn10_link_enc_mask le_mask = {
337 LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\
338 DPCS_DCN2_MASK_SH_LIST(_MASK)
339 };
340
341 static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
342 { DCN_PANEL_CNTL_REG_LIST() }
343 };
344
345 static const struct dce_panel_cntl_shift panel_cntl_shift = {
346 DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
347 };
348
349 static const struct dce_panel_cntl_mask panel_cntl_mask = {
350 DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
351 };
352
353 #define ipp_regs(id)\
354 [id] = {\
355 IPP_REG_LIST_DCN20(id),\
356 }
357
358 static const struct dcn10_ipp_registers ipp_regs[] = {
359 ipp_regs(0),
360 ipp_regs(1),
361 ipp_regs(2),
362 ipp_regs(3),
363 ipp_regs(4),
364 ipp_regs(5),
365 };
366
367 static const struct dcn10_ipp_shift ipp_shift = {
368 IPP_MASK_SH_LIST_DCN20(__SHIFT)
369 };
370
371 static const struct dcn10_ipp_mask ipp_mask = {
372 IPP_MASK_SH_LIST_DCN20(_MASK),
373 };
374
375 #define opp_regs(id)\
376 [id] = {\
377 OPP_REG_LIST_DCN20(id),\
378 }
379
380 static const struct dcn20_opp_registers opp_regs[] = {
381 opp_regs(0),
382 opp_regs(1),
383 opp_regs(2),
384 opp_regs(3),
385 opp_regs(4),
386 opp_regs(5),
387 };
388
389 static const struct dcn20_opp_shift opp_shift = {
390 OPP_MASK_SH_LIST_DCN20(__SHIFT)
391 };
392
393 static const struct dcn20_opp_mask opp_mask = {
394 OPP_MASK_SH_LIST_DCN20(_MASK)
395 };
396
397 #define aux_engine_regs(id)\
398 [id] = {\
399 AUX_COMMON_REG_LIST0(id), \
400 .AUXN_IMPCAL = 0, \
401 .AUXP_IMPCAL = 0, \
402 .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
403 }
404
405 static const struct dce110_aux_registers aux_engine_regs[] = {
406 aux_engine_regs(0),
407 aux_engine_regs(1),
408 aux_engine_regs(2),
409 aux_engine_regs(3),
410 aux_engine_regs(4),
411 aux_engine_regs(5)
412 };
413
414 #define tf_regs(id)\
415 [id] = {\
416 TF_REG_LIST_DCN20(id),\
417 TF_REG_LIST_DCN20_COMMON_APPEND(id),\
418 }
419
420 static const struct dcn2_dpp_registers tf_regs[] = {
421 tf_regs(0),
422 tf_regs(1),
423 tf_regs(2),
424 tf_regs(3),
425 tf_regs(4),
426 tf_regs(5),
427 };
428
429 static const struct dcn2_dpp_shift tf_shift = {
430 TF_REG_LIST_SH_MASK_DCN20(__SHIFT),
431 TF_DEBUG_REG_LIST_SH_DCN20
432 };
433
434 static const struct dcn2_dpp_mask tf_mask = {
435 TF_REG_LIST_SH_MASK_DCN20(_MASK),
436 TF_DEBUG_REG_LIST_MASK_DCN20
437 };
438
439 #define dwbc_regs_dcn2(id)\
440 [id] = {\
441 DWBC_COMMON_REG_LIST_DCN2_0(id),\
442 }
443
444 static const struct dcn20_dwbc_registers dwbc20_regs[] = {
445 dwbc_regs_dcn2(0),
446 };
447
448 static const struct dcn20_dwbc_shift dwbc20_shift = {
449 DWBC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
450 };
451
452 static const struct dcn20_dwbc_mask dwbc20_mask = {
453 DWBC_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
454 };
455
456 #define mcif_wb_regs_dcn2(id)\
457 [id] = {\
458 MCIF_WB_COMMON_REG_LIST_DCN2_0(id),\
459 }
460
461 static const struct dcn20_mmhubbub_registers mcif_wb20_regs[] = {
462 mcif_wb_regs_dcn2(0),
463 };
464
465 static const struct dcn20_mmhubbub_shift mcif_wb20_shift = {
466 MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
467 };
468
469 static const struct dcn20_mmhubbub_mask mcif_wb20_mask = {
470 MCIF_WB_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
471 };
472
473 static const struct dcn20_mpc_registers mpc_regs = {
474 MPC_REG_LIST_DCN2_0(0),
475 MPC_REG_LIST_DCN2_0(1),
476 MPC_REG_LIST_DCN2_0(2),
477 MPC_REG_LIST_DCN2_0(3),
478 MPC_REG_LIST_DCN2_0(4),
479 MPC_REG_LIST_DCN2_0(5),
480 MPC_OUT_MUX_REG_LIST_DCN2_0(0),
481 MPC_OUT_MUX_REG_LIST_DCN2_0(1),
482 MPC_OUT_MUX_REG_LIST_DCN2_0(2),
483 MPC_OUT_MUX_REG_LIST_DCN2_0(3),
484 MPC_OUT_MUX_REG_LIST_DCN2_0(4),
485 MPC_OUT_MUX_REG_LIST_DCN2_0(5),
486 MPC_DBG_REG_LIST_DCN2_0()
487 };
488
489 static const struct dcn20_mpc_shift mpc_shift = {
490 MPC_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT),
491 MPC_DEBUG_REG_LIST_SH_DCN20
492 };
493
494 static const struct dcn20_mpc_mask mpc_mask = {
495 MPC_COMMON_MASK_SH_LIST_DCN2_0(_MASK),
496 MPC_DEBUG_REG_LIST_MASK_DCN20
497 };
498
499 #define tg_regs(id)\
500 [id] = {TG_COMMON_REG_LIST_DCN2_0(id)}
501
502
503 static const struct dcn_optc_registers tg_regs[] = {
504 tg_regs(0),
505 tg_regs(1),
506 tg_regs(2),
507 tg_regs(3),
508 tg_regs(4),
509 tg_regs(5)
510 };
511
512 static const struct dcn_optc_shift tg_shift = {
513 TG_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
514 };
515
516 static const struct dcn_optc_mask tg_mask = {
517 TG_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
518 };
519
520 #define hubp_regs(id)\
521 [id] = {\
522 HUBP_REG_LIST_DCN20(id)\
523 }
524
525 static const struct dcn_hubp2_registers hubp_regs[] = {
526 hubp_regs(0),
527 hubp_regs(1),
528 hubp_regs(2),
529 hubp_regs(3),
530 hubp_regs(4),
531 hubp_regs(5)
532 };
533
534 static const struct dcn_hubp2_shift hubp_shift = {
535 HUBP_MASK_SH_LIST_DCN20(__SHIFT)
536 };
537
538 static const struct dcn_hubp2_mask hubp_mask = {
539 HUBP_MASK_SH_LIST_DCN20(_MASK)
540 };
541
542 static const struct dcn_hubbub_registers hubbub_reg = {
543 HUBBUB_REG_LIST_DCN20(0)
544 };
545
546 static const struct dcn_hubbub_shift hubbub_shift = {
547 HUBBUB_MASK_SH_LIST_DCN20(__SHIFT)
548 };
549
550 static const struct dcn_hubbub_mask hubbub_mask = {
551 HUBBUB_MASK_SH_LIST_DCN20(_MASK)
552 };
553
554 static const struct dcn_dio_registers dio_regs = {
555 DIO_REG_LIST_DCN10()
556 };
557
558 #define DIO_MASK_SH_LIST(mask_sh)\
559 HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh)
560
561 static const struct dcn_dio_shift dio_shift = {
562 DIO_MASK_SH_LIST(__SHIFT)
563 };
564
565 static const struct dcn_dio_mask dio_mask = {
566 DIO_MASK_SH_LIST(_MASK)
567 };
568
dcn20_dio_create(struct dc_context * ctx)569 static struct dio *dcn20_dio_create(struct dc_context *ctx)
570 {
571 struct dcn10_dio *dio10 = kzalloc_obj(struct dcn10_dio);
572
573 if (!dio10)
574 return NULL;
575
576 dcn10_dio_construct(dio10, ctx, &dio_regs, &dio_shift, &dio_mask);
577
578 return &dio10->base;
579 }
580
581 #define vmid_regs(id)\
582 [id] = {\
583 DCN20_VMID_REG_LIST(id)\
584 }
585
586 static const struct dcn_vmid_registers vmid_regs[] = {
587 vmid_regs(0),
588 vmid_regs(1),
589 vmid_regs(2),
590 vmid_regs(3),
591 vmid_regs(4),
592 vmid_regs(5),
593 vmid_regs(6),
594 vmid_regs(7),
595 vmid_regs(8),
596 vmid_regs(9),
597 vmid_regs(10),
598 vmid_regs(11),
599 vmid_regs(12),
600 vmid_regs(13),
601 vmid_regs(14),
602 vmid_regs(15)
603 };
604
605 static const struct dcn20_vmid_shift vmid_shifts = {
606 DCN20_VMID_MASK_SH_LIST(__SHIFT)
607 };
608
609 static const struct dcn20_vmid_mask vmid_masks = {
610 DCN20_VMID_MASK_SH_LIST(_MASK)
611 };
612
613 static const struct dce110_aux_registers_shift aux_shift = {
614 DCN_AUX_MASK_SH_LIST(__SHIFT)
615 };
616
617 static const struct dce110_aux_registers_mask aux_mask = {
618 DCN_AUX_MASK_SH_LIST(_MASK)
619 };
620
map_transmitter_id_to_phy_instance(enum transmitter transmitter)621 static int map_transmitter_id_to_phy_instance(
622 enum transmitter transmitter)
623 {
624 switch (transmitter) {
625 case TRANSMITTER_UNIPHY_A:
626 return 0;
627 break;
628 case TRANSMITTER_UNIPHY_B:
629 return 1;
630 break;
631 case TRANSMITTER_UNIPHY_C:
632 return 2;
633 break;
634 case TRANSMITTER_UNIPHY_D:
635 return 3;
636 break;
637 case TRANSMITTER_UNIPHY_E:
638 return 4;
639 break;
640 case TRANSMITTER_UNIPHY_F:
641 return 5;
642 break;
643 default:
644 ASSERT(0);
645 return 0;
646 }
647 }
648
649 #define dsc_regsDCN20(id)\
650 [id] = {\
651 DSC_REG_LIST_DCN20(id)\
652 }
653
654 static const struct dcn20_dsc_registers dsc_regs[] = {
655 dsc_regsDCN20(0),
656 dsc_regsDCN20(1),
657 dsc_regsDCN20(2),
658 dsc_regsDCN20(3),
659 dsc_regsDCN20(4),
660 dsc_regsDCN20(5)
661 };
662
663 static const struct dcn20_dsc_shift dsc_shift = {
664 DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
665 };
666
667 static const struct dcn20_dsc_mask dsc_mask = {
668 DSC_REG_LIST_SH_MASK_DCN20(_MASK)
669 };
670
671 static const struct dccg_registers dccg_regs = {
672 DCCG_REG_LIST_DCN2()
673 };
674
675 static const struct dccg_shift dccg_shift = {
676 DCCG_MASK_SH_LIST_DCN2(__SHIFT)
677 };
678
679 static const struct dccg_mask dccg_mask = {
680 DCCG_MASK_SH_LIST_DCN2(_MASK)
681 };
682
683 static const struct resource_caps res_cap_nv10 = {
684 .num_timing_generator = 6,
685 .num_opp = 6,
686 .num_video_plane = 6,
687 .num_audio = 7,
688 .num_stream_encoder = 6,
689 .num_pll = 6,
690 .num_dwb = 1,
691 .num_ddc = 6,
692 .num_vmid = 16,
693 .num_dsc = 6,
694 };
695
696 static const struct dc_plane_cap plane_cap = {
697 .type = DC_PLANE_TYPE_DCN_UNIVERSAL,
698 .per_pixel_alpha = true,
699
700 .pixel_format_support = {
701 .argb8888 = true,
702 .nv12 = true,
703 .fp16 = true,
704 .p010 = true
705 },
706
707 .max_upscale_factor = {
708 .argb8888 = 16000,
709 .nv12 = 16000,
710 .fp16 = 1
711 },
712
713 .max_downscale_factor = {
714 .argb8888 = 250,
715 .nv12 = 250,
716 .fp16 = 1
717 },
718 16,
719 16
720 };
721 static const struct resource_caps res_cap_nv14 = {
722 .num_timing_generator = 5,
723 .num_opp = 5,
724 .num_video_plane = 5,
725 .num_audio = 6,
726 .num_stream_encoder = 5,
727 .num_pll = 5,
728 .num_dwb = 1,
729 .num_ddc = 5,
730 .num_vmid = 16,
731 .num_dsc = 5,
732 };
733
734 static const struct dc_debug_options debug_defaults_drv = {
735 .disable_dmcu = false,
736 .force_abm_enable = false,
737 .clock_trace = true,
738 .disable_pplib_clock_request = true,
739 .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
740 .force_single_disp_pipe_split = false,
741 .disable_dcc = DCC_ENABLE,
742 .vsr_support = true,
743 .performance_trace = false,
744 .max_downscale_src_width = 5120,/*upto 5K*/
745 .disable_pplib_wm_range = false,
746 .scl_reset_length10 = true,
747 .sanity_checks = false,
748 .underflow_assert_delay_us = 0xFFFFFFFF,
749 .using_dml2 = false,
750 };
751
752 static const struct dc_check_config config_defaults = {
753 .enable_legacy_fast_update = true,
754 };
755
dcn20_dpp_destroy(struct dpp ** dpp)756 void dcn20_dpp_destroy(struct dpp **dpp)
757 {
758 kfree(TO_DCN20_DPP(*dpp));
759 *dpp = NULL;
760 }
761
dcn20_dpp_create(struct dc_context * ctx,uint32_t inst)762 struct dpp *dcn20_dpp_create(
763 struct dc_context *ctx,
764 uint32_t inst)
765 {
766 struct dcn20_dpp *dpp =
767 kzalloc_obj(struct dcn20_dpp);
768
769 if (!dpp)
770 return NULL;
771
772 if (dpp2_construct(dpp, ctx, inst,
773 &tf_regs[inst], &tf_shift, &tf_mask))
774 return &dpp->base;
775
776 BREAK_TO_DEBUGGER();
777 kfree(dpp);
778 return NULL;
779 }
780
dcn20_ipp_create(struct dc_context * ctx,uint32_t inst)781 struct input_pixel_processor *dcn20_ipp_create(
782 struct dc_context *ctx, uint32_t inst)
783 {
784 struct dcn10_ipp *ipp =
785 kzalloc_obj(struct dcn10_ipp);
786
787 if (!ipp) {
788 BREAK_TO_DEBUGGER();
789 return NULL;
790 }
791
792 dcn20_ipp_construct(ipp, ctx, inst,
793 &ipp_regs[inst], &ipp_shift, &ipp_mask);
794 return &ipp->base;
795 }
796
797
dcn20_opp_create(struct dc_context * ctx,uint32_t inst)798 struct output_pixel_processor *dcn20_opp_create(
799 struct dc_context *ctx, uint32_t inst)
800 {
801 struct dcn20_opp *opp =
802 kzalloc_obj(struct dcn20_opp);
803
804 if (!opp) {
805 BREAK_TO_DEBUGGER();
806 return NULL;
807 }
808
809 dcn20_opp_construct(opp, ctx, inst,
810 &opp_regs[inst], &opp_shift, &opp_mask);
811 return &opp->base;
812 }
813
dcn20_aux_engine_create(struct dc_context * ctx,uint32_t inst)814 struct dce_aux *dcn20_aux_engine_create(
815 struct dc_context *ctx,
816 uint32_t inst)
817 {
818 struct aux_engine_dce110 *aux_engine =
819 kzalloc_obj(struct aux_engine_dce110);
820
821 if (!aux_engine)
822 return NULL;
823
824 dce110_aux_engine_construct(aux_engine, ctx, inst,
825 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
826 &aux_engine_regs[inst],
827 &aux_mask,
828 &aux_shift,
829 ctx->dc->caps.extended_aux_timeout_support);
830
831 return &aux_engine->base;
832 }
833 #define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
834
835 static const struct dce_i2c_registers i2c_hw_regs[] = {
836 i2c_inst_regs(1),
837 i2c_inst_regs(2),
838 i2c_inst_regs(3),
839 i2c_inst_regs(4),
840 i2c_inst_regs(5),
841 i2c_inst_regs(6),
842 };
843
844 static const struct dce_i2c_shift i2c_shifts = {
845 I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT)
846 };
847
848 static const struct dce_i2c_mask i2c_masks = {
849 I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)
850 };
851
dcn20_i2c_hw_create(struct dc_context * ctx,uint32_t inst)852 struct dce_i2c_hw *dcn20_i2c_hw_create(
853 struct dc_context *ctx,
854 uint32_t inst)
855 {
856 struct dce_i2c_hw *dce_i2c_hw =
857 kzalloc_obj(struct dce_i2c_hw);
858
859 if (!dce_i2c_hw)
860 return NULL;
861
862 dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
863 &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
864
865 return dce_i2c_hw;
866 }
dcn20_mpc_create(struct dc_context * ctx)867 struct mpc *dcn20_mpc_create(struct dc_context *ctx)
868 {
869 struct dcn20_mpc *mpc20 = kzalloc_obj(struct dcn20_mpc);
870
871 if (!mpc20)
872 return NULL;
873
874 dcn20_mpc_construct(mpc20, ctx,
875 &mpc_regs,
876 &mpc_shift,
877 &mpc_mask,
878 6);
879
880 return &mpc20->base;
881 }
882
dcn20_hubbub_create(struct dc_context * ctx)883 struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
884 {
885 int i;
886 struct dcn20_hubbub *hubbub = kzalloc_obj(struct dcn20_hubbub);
887
888 if (!hubbub)
889 return NULL;
890
891 hubbub2_construct(hubbub, ctx,
892 &hubbub_reg,
893 &hubbub_shift,
894 &hubbub_mask);
895
896 for (i = 0; i < res_cap_nv10.num_vmid; i++) {
897 struct dcn20_vmid *vmid = &hubbub->vmid[i];
898
899 vmid->ctx = ctx;
900
901 vmid->regs = &vmid_regs[i];
902 vmid->shifts = &vmid_shifts;
903 vmid->masks = &vmid_masks;
904 }
905
906 return &hubbub->base;
907 }
908
dcn20_timing_generator_create(struct dc_context * ctx,uint32_t instance)909 struct timing_generator *dcn20_timing_generator_create(
910 struct dc_context *ctx,
911 uint32_t instance)
912 {
913 struct optc *tgn10 =
914 kzalloc_obj(struct optc);
915
916 if (!tgn10)
917 return NULL;
918
919 tgn10->base.inst = instance;
920 tgn10->base.ctx = ctx;
921
922 tgn10->tg_regs = &tg_regs[instance];
923 tgn10->tg_shift = &tg_shift;
924 tgn10->tg_mask = &tg_mask;
925
926 dcn20_timing_generator_init(tgn10);
927
928 return &tgn10->base;
929 }
930
931 static const struct encoder_feature_support link_enc_feature = {
932 .max_hdmi_deep_color = COLOR_DEPTH_121212,
933 .max_hdmi_pixel_clock = 600000,
934 .hdmi_ycbcr420_supported = true,
935 .dp_ycbcr420_supported = true,
936 .fec_supported = true,
937 .flags.bits.IS_HBR2_CAPABLE = true,
938 .flags.bits.IS_HBR3_CAPABLE = true,
939 .flags.bits.IS_TPS3_CAPABLE = true,
940 .flags.bits.IS_TPS4_CAPABLE = true
941 };
942
dcn20_link_encoder_create(struct dc_context * ctx,const struct encoder_init_data * enc_init_data)943 struct link_encoder *dcn20_link_encoder_create(
944 struct dc_context *ctx,
945 const struct encoder_init_data *enc_init_data)
946 {
947 (void)ctx;
948 struct dcn20_link_encoder *enc20 =
949 kzalloc_obj(struct dcn20_link_encoder);
950 int link_regs_id;
951
952 if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
953 return NULL;
954
955 link_regs_id =
956 map_transmitter_id_to_phy_instance(enc_init_data->transmitter);
957
958 dcn20_link_encoder_construct(enc20,
959 enc_init_data,
960 &link_enc_feature,
961 &link_enc_regs[link_regs_id],
962 &link_enc_aux_regs[enc_init_data->channel - 1],
963 &link_enc_hpd_regs[enc_init_data->hpd_source],
964 &le_shift,
965 &le_mask);
966
967 return &enc20->enc10.base;
968 }
969
dcn20_panel_cntl_create(const struct panel_cntl_init_data * init_data)970 static struct panel_cntl *dcn20_panel_cntl_create(const struct panel_cntl_init_data *init_data)
971 {
972 struct dce_panel_cntl *panel_cntl =
973 kzalloc_obj(struct dce_panel_cntl);
974
975 if (!panel_cntl)
976 return NULL;
977
978 dce_panel_cntl_construct(panel_cntl,
979 init_data,
980 &panel_cntl_regs[init_data->inst],
981 &panel_cntl_shift,
982 &panel_cntl_mask);
983
984 return &panel_cntl->base;
985 }
986
dcn20_clock_source_create(struct dc_context * ctx,struct dc_bios * bios,enum clock_source_id id,const struct dce110_clk_src_regs * regs,bool dp_clk_src)987 static struct clock_source *dcn20_clock_source_create(
988 struct dc_context *ctx,
989 struct dc_bios *bios,
990 enum clock_source_id id,
991 const struct dce110_clk_src_regs *regs,
992 bool dp_clk_src)
993 {
994 struct dce110_clk_src *clk_src =
995 kzalloc_obj(struct dce110_clk_src);
996
997 if (!clk_src)
998 return NULL;
999
1000 if (dcn20_clk_src_construct(clk_src, ctx, bios, id,
1001 regs, &cs_shift, &cs_mask)) {
1002 clk_src->base.dp_clk_src = dp_clk_src;
1003 return &clk_src->base;
1004 }
1005
1006 kfree(clk_src);
1007 BREAK_TO_DEBUGGER();
1008 return NULL;
1009 }
1010
read_dce_straps(struct dc_context * ctx,struct resource_straps * straps)1011 static void read_dce_straps(
1012 struct dc_context *ctx,
1013 struct resource_straps *straps)
1014 {
1015 generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
1016 FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
1017 }
1018
dcn20_create_audio(struct dc_context * ctx,unsigned int inst)1019 static struct audio *dcn20_create_audio(
1020 struct dc_context *ctx, unsigned int inst)
1021 {
1022 return dce_audio_create(ctx, inst,
1023 &audio_regs[inst], &audio_shift, &audio_mask);
1024 }
1025
dcn20_stream_encoder_create(enum engine_id eng_id,struct dc_context * ctx)1026 struct stream_encoder *dcn20_stream_encoder_create(
1027 enum engine_id eng_id,
1028 struct dc_context *ctx)
1029 {
1030 struct dcn10_stream_encoder *enc1 =
1031 kzalloc_obj(struct dcn10_stream_encoder);
1032
1033 if (!enc1)
1034 return NULL;
1035
1036 if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) {
1037 if (eng_id >= ENGINE_ID_DIGD)
1038 eng_id++;
1039 }
1040
1041 dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
1042 &stream_enc_regs[eng_id],
1043 &se_shift, &se_mask);
1044
1045 return &enc1->base;
1046 }
1047
1048 static const struct dce_hwseq_registers hwseq_reg = {
1049 HWSEQ_DCN2_REG_LIST()
1050 };
1051
1052 static const struct dce_hwseq_shift hwseq_shift = {
1053 HWSEQ_DCN2_MASK_SH_LIST(__SHIFT)
1054 };
1055
1056 static const struct dce_hwseq_mask hwseq_mask = {
1057 HWSEQ_DCN2_MASK_SH_LIST(_MASK)
1058 };
1059
dcn20_hwseq_create(struct dc_context * ctx)1060 struct dce_hwseq *dcn20_hwseq_create(
1061 struct dc_context *ctx)
1062 {
1063 struct dce_hwseq *hws = kzalloc_obj(struct dce_hwseq);
1064
1065 if (hws) {
1066 hws->ctx = ctx;
1067 hws->regs = &hwseq_reg;
1068 hws->shifts = &hwseq_shift;
1069 hws->masks = &hwseq_mask;
1070 }
1071 return hws;
1072 }
1073
1074 static const struct resource_create_funcs res_create_funcs = {
1075 .read_dce_straps = read_dce_straps,
1076 .create_audio = dcn20_create_audio,
1077 .create_stream_encoder = dcn20_stream_encoder_create,
1078 .create_hwseq = dcn20_hwseq_create,
1079 };
1080
1081 static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu);
1082
dcn20_clock_source_destroy(struct clock_source ** clk_src)1083 void dcn20_clock_source_destroy(struct clock_source **clk_src)
1084 {
1085 kfree(TO_DCE110_CLK_SRC(*clk_src));
1086 *clk_src = NULL;
1087 }
1088
1089
dcn20_dsc_create(struct dc_context * ctx,uint32_t inst)1090 struct display_stream_compressor *dcn20_dsc_create(
1091 struct dc_context *ctx, uint32_t inst)
1092 {
1093 struct dcn20_dsc *dsc =
1094 kzalloc_obj(struct dcn20_dsc);
1095
1096 if (!dsc) {
1097 BREAK_TO_DEBUGGER();
1098 return NULL;
1099 }
1100
1101 dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
1102 return &dsc->base;
1103 }
1104
dcn20_dsc_destroy(struct display_stream_compressor ** dsc)1105 void dcn20_dsc_destroy(struct display_stream_compressor **dsc)
1106 {
1107 kfree(container_of(*dsc, struct dcn20_dsc, base));
1108 *dsc = NULL;
1109 }
1110
1111
dcn20_resource_destruct(struct dcn20_resource_pool * pool)1112 static void dcn20_resource_destruct(struct dcn20_resource_pool *pool)
1113 {
1114 unsigned int i;
1115
1116 for (i = 0; i < pool->base.stream_enc_count; i++) {
1117 if (pool->base.stream_enc[i] != NULL) {
1118 kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
1119 pool->base.stream_enc[i] = NULL;
1120 }
1121 }
1122
1123 for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
1124 if (pool->base.dscs[i] != NULL)
1125 dcn20_dsc_destroy(&pool->base.dscs[i]);
1126 }
1127
1128 if (pool->base.mpc != NULL) {
1129 kfree(TO_DCN20_MPC(pool->base.mpc));
1130 pool->base.mpc = NULL;
1131 }
1132 if (pool->base.hubbub != NULL) {
1133 kfree(pool->base.hubbub);
1134 pool->base.hubbub = NULL;
1135 }
1136
1137 if (pool->base.dio != NULL) {
1138 kfree(TO_DCN10_DIO(pool->base.dio));
1139 pool->base.dio = NULL;
1140 }
1141
1142 for (i = 0; i < pool->base.pipe_count; i++) {
1143 if (pool->base.dpps[i] != NULL)
1144 dcn20_dpp_destroy(&pool->base.dpps[i]);
1145
1146 if (pool->base.ipps[i] != NULL)
1147 pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
1148
1149 if (pool->base.hubps[i] != NULL) {
1150 kfree(TO_DCN20_HUBP(pool->base.hubps[i]));
1151 pool->base.hubps[i] = NULL;
1152 }
1153
1154 if (pool->base.irqs != NULL) {
1155 dal_irq_service_destroy(&pool->base.irqs);
1156 }
1157 }
1158
1159 for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
1160 if (pool->base.engines[i] != NULL)
1161 dce110_engine_destroy(&pool->base.engines[i]);
1162 if (pool->base.hw_i2cs[i] != NULL) {
1163 kfree(pool->base.hw_i2cs[i]);
1164 pool->base.hw_i2cs[i] = NULL;
1165 }
1166 if (pool->base.sw_i2cs[i] != NULL) {
1167 kfree(pool->base.sw_i2cs[i]);
1168 pool->base.sw_i2cs[i] = NULL;
1169 }
1170 }
1171
1172 for (i = 0; i < pool->base.res_cap->num_opp; i++) {
1173 if (pool->base.opps[i] != NULL)
1174 pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
1175 }
1176
1177 for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
1178 if (pool->base.timing_generators[i] != NULL) {
1179 kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
1180 pool->base.timing_generators[i] = NULL;
1181 }
1182 }
1183
1184 for (i = 0; i < pool->base.res_cap->num_dwb; i++) {
1185 if (pool->base.dwbc[i] != NULL) {
1186 kfree(TO_DCN20_DWBC(pool->base.dwbc[i]));
1187 pool->base.dwbc[i] = NULL;
1188 }
1189 if (pool->base.mcif_wb[i] != NULL) {
1190 kfree(TO_DCN20_MMHUBBUB(pool->base.mcif_wb[i]));
1191 pool->base.mcif_wb[i] = NULL;
1192 }
1193 }
1194
1195 for (i = 0; i < pool->base.audio_count; i++) {
1196 if (pool->base.audios[i])
1197 dce_aud_destroy(&pool->base.audios[i]);
1198 }
1199
1200 for (i = 0; i < pool->base.clk_src_count; i++) {
1201 if (pool->base.clock_sources[i] != NULL) {
1202 dcn20_clock_source_destroy(&pool->base.clock_sources[i]);
1203 pool->base.clock_sources[i] = NULL;
1204 }
1205 }
1206
1207 if (pool->base.dp_clock_source != NULL) {
1208 dcn20_clock_source_destroy(&pool->base.dp_clock_source);
1209 pool->base.dp_clock_source = NULL;
1210 }
1211
1212
1213 if (pool->base.abm != NULL)
1214 dce_abm_destroy(&pool->base.abm);
1215
1216 if (pool->base.dmcu != NULL)
1217 dce_dmcu_destroy(&pool->base.dmcu);
1218
1219 if (pool->base.dccg != NULL)
1220 dcn_dccg_destroy(&pool->base.dccg);
1221
1222 if (pool->base.pp_smu != NULL)
1223 dcn20_pp_smu_destroy(&pool->base.pp_smu);
1224
1225 if (pool->base.oem_device != NULL) {
1226 struct dc *dc = pool->base.oem_device->ctx->dc;
1227
1228 dc->link_srv->destroy_ddc_service(&pool->base.oem_device);
1229 }
1230 }
1231
dcn20_hubp_create(struct dc_context * ctx,uint32_t inst)1232 struct hubp *dcn20_hubp_create(
1233 struct dc_context *ctx,
1234 uint32_t inst)
1235 {
1236 struct dcn20_hubp *hubp2 =
1237 kzalloc_obj(struct dcn20_hubp);
1238
1239 if (!hubp2)
1240 return NULL;
1241
1242 if (hubp2_construct(hubp2, ctx, inst,
1243 &hubp_regs[inst], &hubp_shift, &hubp_mask))
1244 return &hubp2->base;
1245
1246 BREAK_TO_DEBUGGER();
1247 kfree(hubp2);
1248 return NULL;
1249 }
1250
get_pixel_clock_parameters(struct pipe_ctx * pipe_ctx,struct pixel_clk_params * pixel_clk_params)1251 static void get_pixel_clock_parameters(
1252 struct pipe_ctx *pipe_ctx,
1253 struct pixel_clk_params *pixel_clk_params)
1254 {
1255 const struct dc_stream_state *stream = pipe_ctx->stream;
1256 struct pipe_ctx *odm_pipe;
1257 int opp_cnt = 1;
1258 struct dc_link *link = stream->link;
1259 struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
1260 struct dc *dc = pipe_ctx->stream->ctx->dc;
1261 struct dce_hwseq *hws = dc->hwseq;
1262
1263 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
1264 opp_cnt++;
1265
1266 pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
1267
1268 if (!dc->config.unify_link_enc_assignment)
1269 link_enc = link_enc_cfg_get_link_enc(link);
1270 if (link_enc)
1271 pixel_clk_params->encoder_object_id = link_enc->id;
1272
1273 pixel_clk_params->signal_type = pipe_ctx->stream->signal;
1274 pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
1275 /* TODO: un-hardcode*/
1276 /* TODO - DP2.0 HW: calculate requested_sym_clk for UHBR rates */
1277 pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
1278 LINK_RATE_REF_FREQ_IN_KHZ;
1279 pixel_clk_params->flags.ENABLE_SS = 0;
1280 pixel_clk_params->color_depth =
1281 stream->timing.display_color_depth;
1282 pixel_clk_params->flags.DISPLAY_BLANKED = 1;
1283 pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
1284
1285 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
1286 pixel_clk_params->color_depth = COLOR_DEPTH_888;
1287
1288 if (opp_cnt == 4)
1289 pixel_clk_params->requested_pix_clk_100hz /= 4;
1290 else if (pipe_ctx->stream_res.tg->funcs->is_two_pixels_per_container(&stream->timing) || opp_cnt == 2)
1291 pixel_clk_params->requested_pix_clk_100hz /= 2;
1292 else if (hws->funcs.is_dp_dig_pixel_rate_div_policy) {
1293 if (hws->funcs.is_dp_dig_pixel_rate_div_policy(pipe_ctx))
1294 pixel_clk_params->requested_pix_clk_100hz /= 2;
1295 }
1296
1297 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
1298 pixel_clk_params->requested_pix_clk_100hz *= 2;
1299
1300 if ((pipe_ctx->stream_res.tg->funcs->is_two_pixels_per_container &&
1301 pipe_ctx->stream_res.tg->funcs->is_two_pixels_per_container(&pipe_ctx->stream->timing)) ||
1302 (hws->funcs.is_dp_dig_pixel_rate_div_policy &&
1303 hws->funcs.is_dp_dig_pixel_rate_div_policy(pipe_ctx)) ||
1304 opp_cnt > 1) {
1305 pixel_clk_params->dio_se_pix_per_cycle = 2;
1306 } else {
1307 pixel_clk_params->dio_se_pix_per_cycle = 1;
1308 }
1309 }
1310
build_clamping_params(struct dc_stream_state * stream)1311 static void build_clamping_params(struct dc_stream_state *stream)
1312 {
1313 stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
1314 stream->clamping.c_depth = stream->timing.display_color_depth;
1315 stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
1316 }
1317
dcn20_build_pipe_pix_clk_params(struct pipe_ctx * pipe_ctx)1318 void dcn20_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx)
1319 {
1320 get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
1321 pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
1322 pipe_ctx->clock_source,
1323 &pipe_ctx->stream_res.pix_clk_params,
1324 &pipe_ctx->pll_settings);
1325 }
1326
build_pipe_hw_param(struct pipe_ctx * pipe_ctx)1327 static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
1328 {
1329 struct resource_pool *pool = pipe_ctx->stream->ctx->dc->res_pool;
1330
1331 if (pool->funcs->build_pipe_pix_clk_params) {
1332 pool->funcs->build_pipe_pix_clk_params(pipe_ctx);
1333 } else {
1334 dcn20_build_pipe_pix_clk_params(pipe_ctx);
1335 }
1336
1337 pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
1338
1339 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
1340 &pipe_ctx->stream->bit_depth_params);
1341 build_clamping_params(pipe_ctx->stream);
1342
1343 return DC_OK;
1344 }
1345
dcn20_build_mapped_resource(const struct dc * dc,struct dc_state * context,struct dc_stream_state * stream)1346 enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream)
1347 {
1348 (void)dc;
1349 enum dc_status status = DC_OK;
1350 struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
1351
1352 if (!pipe_ctx)
1353 return DC_ERROR_UNEXPECTED;
1354
1355
1356 status = build_pipe_hw_param(pipe_ctx);
1357
1358 return status;
1359 }
1360
1361
dcn20_acquire_dsc(const struct dc * dc,struct resource_context * res_ctx,struct display_stream_compressor ** dsc,int pipe_idx)1362 void dcn20_acquire_dsc(const struct dc *dc,
1363 struct resource_context *res_ctx,
1364 struct display_stream_compressor **dsc,
1365 int pipe_idx)
1366 {
1367 int i;
1368 const struct resource_pool *pool = dc->res_pool;
1369 struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc;
1370
1371 ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */
1372 *dsc = NULL;
1373
1374 /* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */
1375 if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
1376 *dsc = pool->dscs[pipe_idx];
1377 res_ctx->is_dsc_acquired[pipe_idx] = true;
1378 return;
1379 }
1380
1381 /* Return old DSC to avoid the need for re-programming */
1382 if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) {
1383 *dsc = dsc_old;
1384 res_ctx->is_dsc_acquired[dsc_old->inst] = true;
1385 return ;
1386 }
1387
1388 /* Find first free DSC */
1389 for (i = 0; i < pool->res_cap->num_dsc; i++)
1390 if (!res_ctx->is_dsc_acquired[i]) {
1391 *dsc = pool->dscs[i];
1392 res_ctx->is_dsc_acquired[i] = true;
1393 break;
1394 }
1395 }
1396
dcn20_release_dsc(struct resource_context * res_ctx,const struct resource_pool * pool,struct display_stream_compressor ** dsc)1397 void dcn20_release_dsc(struct resource_context *res_ctx,
1398 const struct resource_pool *pool,
1399 struct display_stream_compressor **dsc)
1400 {
1401 int i;
1402
1403 for (i = 0; i < pool->res_cap->num_dsc; i++)
1404 if (pool->dscs[i] == *dsc) {
1405 res_ctx->is_dsc_acquired[i] = false;
1406 *dsc = NULL;
1407 break;
1408 }
1409 }
1410
1411
1412
dcn20_add_dsc_to_stream_resource(struct dc * dc,struct dc_state * dc_ctx,struct dc_stream_state * dc_stream)1413 enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
1414 struct dc_state *dc_ctx,
1415 struct dc_stream_state *dc_stream)
1416 {
1417 enum dc_status result = DC_OK;
1418 int i;
1419
1420 /* Get a DSC if required and available */
1421 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1422 struct pipe_ctx *pipe_ctx = &dc_ctx->res_ctx.pipe_ctx[i];
1423
1424 if (pipe_ctx->top_pipe)
1425 continue;
1426
1427 if (pipe_ctx->stream != dc_stream)
1428 continue;
1429
1430 if (pipe_ctx->stream_res.dsc)
1431 continue;
1432
1433 dcn20_acquire_dsc(dc, &dc_ctx->res_ctx, &pipe_ctx->stream_res.dsc, i);
1434
1435 /* The number of DSCs can be less than the number of pipes */
1436 if (!pipe_ctx->stream_res.dsc) {
1437 result = DC_NO_DSC_RESOURCE;
1438 }
1439
1440 break;
1441 }
1442
1443 return result;
1444 }
1445
1446
remove_dsc_from_stream_resource(struct dc * dc,struct dc_state * new_ctx,struct dc_stream_state * dc_stream)1447 static enum dc_status remove_dsc_from_stream_resource(struct dc *dc,
1448 struct dc_state *new_ctx,
1449 struct dc_stream_state *dc_stream)
1450 {
1451 struct pipe_ctx *pipe_ctx = NULL;
1452 int i;
1453
1454 for (i = 0; i < MAX_PIPES; i++) {
1455 if (new_ctx->res_ctx.pipe_ctx[i].stream == dc_stream && !new_ctx->res_ctx.pipe_ctx[i].top_pipe) {
1456 pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
1457
1458 if (pipe_ctx->stream_res.dsc)
1459 dcn20_release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
1460 }
1461 }
1462
1463 if (!pipe_ctx)
1464 return DC_ERROR_UNEXPECTED;
1465 else
1466 return DC_OK;
1467 }
1468
1469
dcn20_add_stream_to_ctx(struct dc * dc,struct dc_state * new_ctx,struct dc_stream_state * dc_stream)1470 enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
1471 {
1472 enum dc_status result = DC_ERROR_UNEXPECTED;
1473
1474 result = resource_map_pool_resources(dc, new_ctx, dc_stream);
1475
1476 if (result == DC_OK)
1477 result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream);
1478
1479 /* Get a DSC if required and available */
1480 if (result == DC_OK && dc_stream->timing.flags.DSC)
1481 result = dcn20_add_dsc_to_stream_resource(dc, new_ctx, dc_stream);
1482
1483 if (result == DC_OK)
1484 result = dcn20_build_mapped_resource(dc, new_ctx, dc_stream);
1485
1486 return result;
1487 }
1488
1489
dcn20_remove_stream_from_ctx(struct dc * dc,struct dc_state * new_ctx,struct dc_stream_state * dc_stream)1490 enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream)
1491 {
1492 enum dc_status result = DC_OK;
1493
1494 result = remove_dsc_from_stream_resource(dc, new_ctx, dc_stream);
1495
1496 return result;
1497 }
1498
1499 /**
1500 * dcn20_split_stream_for_odm - Check if stream can be splited for ODM
1501 *
1502 * @dc: DC object with resource pool info required for pipe split
1503 * @res_ctx: Persistent state of resources
1504 * @prev_odm_pipe: Reference to the previous ODM pipe
1505 * @next_odm_pipe: Reference to the next ODM pipe
1506 *
1507 * This function takes a logically active pipe and a logically free pipe and
1508 * halves all the scaling parameters that need to be halved while populating
1509 * the free pipe with the required resources and configuring the next/previous
1510 * ODM pipe pointers.
1511 *
1512 * Return:
1513 * Return true if split stream for ODM is possible, otherwise, return false.
1514 */
dcn20_split_stream_for_odm(const struct dc * dc,struct resource_context * res_ctx,struct pipe_ctx * prev_odm_pipe,struct pipe_ctx * next_odm_pipe)1515 bool dcn20_split_stream_for_odm(
1516 const struct dc *dc,
1517 struct resource_context *res_ctx,
1518 struct pipe_ctx *prev_odm_pipe,
1519 struct pipe_ctx *next_odm_pipe)
1520 {
1521 int pipe_idx = next_odm_pipe->pipe_idx;
1522 const struct resource_pool *pool = dc->res_pool;
1523
1524 *next_odm_pipe = *prev_odm_pipe;
1525
1526 next_odm_pipe->pipe_idx = pipe_idx;
1527 next_odm_pipe->plane_res.mi = pool->mis[next_odm_pipe->pipe_idx];
1528 next_odm_pipe->plane_res.hubp = pool->hubps[next_odm_pipe->pipe_idx];
1529 next_odm_pipe->plane_res.ipp = pool->ipps[next_odm_pipe->pipe_idx];
1530 next_odm_pipe->plane_res.xfm = pool->transforms[next_odm_pipe->pipe_idx];
1531 next_odm_pipe->plane_res.dpp = pool->dpps[next_odm_pipe->pipe_idx];
1532 next_odm_pipe->plane_res.mpcc_inst = pool->dpps[next_odm_pipe->pipe_idx]->inst;
1533 next_odm_pipe->stream_res.dsc = NULL;
1534 if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) {
1535 next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe;
1536 next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe;
1537 }
1538 if (prev_odm_pipe->top_pipe && prev_odm_pipe->top_pipe->next_odm_pipe) {
1539 prev_odm_pipe->top_pipe->next_odm_pipe->bottom_pipe = next_odm_pipe;
1540 next_odm_pipe->top_pipe = prev_odm_pipe->top_pipe->next_odm_pipe;
1541 }
1542 if (prev_odm_pipe->bottom_pipe && prev_odm_pipe->bottom_pipe->next_odm_pipe) {
1543 prev_odm_pipe->bottom_pipe->next_odm_pipe->top_pipe = next_odm_pipe;
1544 next_odm_pipe->bottom_pipe = prev_odm_pipe->bottom_pipe->next_odm_pipe;
1545 }
1546 prev_odm_pipe->next_odm_pipe = next_odm_pipe;
1547 next_odm_pipe->prev_odm_pipe = prev_odm_pipe;
1548
1549 if (prev_odm_pipe->plane_state) {
1550 if (!resource_build_scaling_params(prev_odm_pipe) ||
1551 !resource_build_scaling_params(next_odm_pipe)) {
1552 return false;
1553 }
1554 }
1555
1556 if (!next_odm_pipe->top_pipe)
1557 next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
1558 else
1559 next_odm_pipe->stream_res.opp = next_odm_pipe->top_pipe->stream_res.opp;
1560 if (next_odm_pipe->stream->timing.flags.DSC == 1 && !next_odm_pipe->top_pipe) {
1561 dcn20_acquire_dsc(dc, res_ctx, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
1562 ASSERT(next_odm_pipe->stream_res.dsc);
1563 if (next_odm_pipe->stream_res.dsc == NULL)
1564 return false;
1565 }
1566
1567 return true;
1568 }
1569
dcn20_split_stream_for_mpc(struct resource_context * res_ctx,const struct resource_pool * pool,struct pipe_ctx * primary_pipe,struct pipe_ctx * secondary_pipe)1570 void dcn20_split_stream_for_mpc(
1571 struct resource_context *res_ctx,
1572 const struct resource_pool *pool,
1573 struct pipe_ctx *primary_pipe,
1574 struct pipe_ctx *secondary_pipe)
1575 {
1576 (void)res_ctx;
1577 int pipe_idx = secondary_pipe->pipe_idx;
1578 struct pipe_ctx *sec_bot_pipe = secondary_pipe->bottom_pipe;
1579
1580 *secondary_pipe = *primary_pipe;
1581 secondary_pipe->bottom_pipe = sec_bot_pipe;
1582
1583 secondary_pipe->pipe_idx = pipe_idx;
1584 secondary_pipe->plane_res.mi = pool->mis[secondary_pipe->pipe_idx];
1585 secondary_pipe->plane_res.hubp = pool->hubps[secondary_pipe->pipe_idx];
1586 secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx];
1587 secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];
1588 secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];
1589 secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst;
1590 secondary_pipe->stream_res.dsc = NULL;
1591 if (primary_pipe->bottom_pipe && primary_pipe->bottom_pipe != secondary_pipe) {
1592 ASSERT(!secondary_pipe->bottom_pipe);
1593 secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe;
1594 secondary_pipe->bottom_pipe->top_pipe = secondary_pipe;
1595 }
1596 primary_pipe->bottom_pipe = secondary_pipe;
1597 secondary_pipe->top_pipe = primary_pipe;
1598
1599 ASSERT(primary_pipe->plane_state);
1600 }
1601
dcn20_calc_max_scaled_time(unsigned int time_per_pixel,enum mmhubbub_wbif_mode mode,unsigned int urgent_watermark)1602 unsigned int dcn20_calc_max_scaled_time(
1603 unsigned int time_per_pixel,
1604 enum mmhubbub_wbif_mode mode,
1605 unsigned int urgent_watermark)
1606 {
1607 unsigned int time_per_byte = 0;
1608 unsigned int total_y_free_entry = 0x200; /* two memory piece for luma */
1609 unsigned int total_c_free_entry = 0x140; /* two memory piece for chroma */
1610 unsigned int small_free_entry, max_free_entry;
1611 unsigned int buf_lh_capability;
1612 unsigned int max_scaled_time;
1613
1614 if (mode == PACKED_444) /* packed mode */
1615 time_per_byte = time_per_pixel/4;
1616 else if (mode == PLANAR_420_8BPC)
1617 time_per_byte = time_per_pixel;
1618 else if (mode == PLANAR_420_10BPC) /* p010 */
1619 time_per_byte = time_per_pixel * 819/1024;
1620
1621 if (time_per_byte == 0)
1622 time_per_byte = 1;
1623
1624 small_free_entry = total_c_free_entry;
1625 max_free_entry = (mode == PACKED_444) ? total_y_free_entry + total_c_free_entry : small_free_entry;
1626 buf_lh_capability = max_free_entry*time_per_byte*32/16; /* there is 4bit fraction */
1627 max_scaled_time = buf_lh_capability - urgent_watermark;
1628 return max_scaled_time;
1629 }
1630
dcn20_set_mcif_arb_params(struct dc * dc,struct dc_state * context,display_e2e_pipe_params_st * pipes,int pipe_cnt)1631 void dcn20_set_mcif_arb_params(
1632 struct dc *dc,
1633 struct dc_state *context,
1634 display_e2e_pipe_params_st *pipes,
1635 int pipe_cnt)
1636 {
1637 enum mmhubbub_wbif_mode wbif_mode;
1638 struct mcif_arb_params *wb_arb_params;
1639 int i, j, dwb_pipe;
1640
1641 /* Writeback MCIF_WB arbitration parameters */
1642 dwb_pipe = 0;
1643 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1644
1645 if (!context->res_ctx.pipe_ctx[i].stream)
1646 continue;
1647
1648 for (j = 0; j < MAX_DWB_PIPES; j++) {
1649 if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].wb_enabled == false)
1650 continue;
1651
1652 //wb_arb_params = &context->res_ctx.pipe_ctx[i].stream->writeback_info[j].mcif_arb_params;
1653 wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[dwb_pipe];
1654
1655 if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.out_format == dwb_scaler_mode_yuv420) {
1656 if (context->res_ctx.pipe_ctx[i].stream->writeback_info[j].dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC)
1657 wbif_mode = PLANAR_420_8BPC;
1658 else
1659 wbif_mode = PLANAR_420_10BPC;
1660 } else
1661 wbif_mode = PACKED_444;
1662
1663 DC_FP_START();
1664 dcn20_fpu_set_wb_arb_params(wb_arb_params, context, pipes, pipe_cnt, i);
1665 DC_FP_END();
1666
1667 wb_arb_params->slice_lines = 32;
1668 wb_arb_params->arbitration_slice = 2;
1669 wb_arb_params->max_scaled_time = dcn20_calc_max_scaled_time(wb_arb_params->time_per_pixel,
1670 wbif_mode,
1671 wb_arb_params->cli_watermark[0]); /* assume 4 watermark sets have the same value */
1672
1673 dwb_pipe++;
1674
1675 if (dwb_pipe >= MAX_DWB_PIPES)
1676 return;
1677 }
1678 }
1679 }
1680
dcn20_validate_dsc(struct dc * dc,struct dc_state * new_ctx)1681 bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
1682 {
1683 int i;
1684
1685 /* Validate DSC config, dsc count validation is already done */
1686 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1687 struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
1688 struct dc_stream_state *stream = pipe_ctx->stream;
1689 struct dsc_config dsc_cfg;
1690 struct pipe_ctx *odm_pipe;
1691 int opp_cnt = 1;
1692
1693 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
1694 opp_cnt++;
1695
1696 /* Only need to validate top pipe */
1697 if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC)
1698 continue;
1699
1700 dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding
1701 + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
1702 dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top
1703 + stream->timing.v_border_bottom;
1704 dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
1705 dsc_cfg.color_depth = stream->timing.display_color_depth;
1706 dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
1707 dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
1708 dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
1709 dsc_cfg.dsc_padding = 0;
1710
1711 if (!pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg))
1712 return false;
1713 }
1714 return true;
1715 }
1716
dcn20_find_secondary_pipe(struct dc * dc,struct resource_context * res_ctx,const struct resource_pool * pool,const struct pipe_ctx * primary_pipe)1717 struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
1718 struct resource_context *res_ctx,
1719 const struct resource_pool *pool,
1720 const struct pipe_ctx *primary_pipe)
1721 {
1722 (void)pool;
1723 struct pipe_ctx *secondary_pipe = NULL;
1724
1725 if (dc && primary_pipe) {
1726 int j;
1727 int preferred_pipe_idx = 0;
1728
1729 /* first check the prev dc state:
1730 * if this primary pipe has a bottom pipe in prev. state
1731 * and if the bottom pipe is still available (which it should be),
1732 * pick that pipe as secondary
1733 * Same logic applies for ODM pipes
1734 */
1735 if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
1736 preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx;
1737 if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
1738 secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
1739 secondary_pipe->pipe_idx = preferred_pipe_idx;
1740 }
1741 }
1742 if (secondary_pipe == NULL &&
1743 dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) {
1744 preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx;
1745 if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
1746 secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
1747 secondary_pipe->pipe_idx = preferred_pipe_idx;
1748 }
1749 }
1750
1751 /*
1752 * if this primary pipe does not have a bottom pipe in prev. state
1753 * start backward and find a pipe that did not used to be a bottom pipe in
1754 * prev. dc state. This way we make sure we keep the same assignment as
1755 * last state and will not have to reprogram every pipe
1756 */
1757 if (secondary_pipe == NULL) {
1758 for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) {
1759 if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL
1760 && dc->current_state->res_ctx.pipe_ctx[j].prev_odm_pipe == NULL) {
1761 preferred_pipe_idx = j;
1762
1763 if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
1764 secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
1765 secondary_pipe->pipe_idx = preferred_pipe_idx;
1766 break;
1767 }
1768 }
1769 }
1770 }
1771 /*
1772 * We should never hit this assert unless assignments are shuffled around
1773 * if this happens we will prob. hit a vsync tdr
1774 */
1775 ASSERT(secondary_pipe);
1776 /*
1777 * search backwards for the second pipe to keep pipe
1778 * assignment more consistent
1779 */
1780 if (secondary_pipe == NULL) {
1781 for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) {
1782 preferred_pipe_idx = j;
1783
1784 if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
1785 secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
1786 secondary_pipe->pipe_idx = preferred_pipe_idx;
1787 break;
1788 }
1789 }
1790 }
1791 }
1792
1793 return secondary_pipe;
1794 }
1795
dcn20_merge_pipes_for_validate(struct dc * dc,struct dc_state * context)1796 void dcn20_merge_pipes_for_validate(
1797 struct dc *dc,
1798 struct dc_state *context)
1799 {
1800 int i;
1801
1802 /* merge previously split odm pipes since mode support needs to make the decision */
1803 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1804 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1805 struct pipe_ctx *odm_pipe = pipe->next_odm_pipe;
1806
1807 if (pipe->prev_odm_pipe)
1808 continue;
1809
1810 pipe->next_odm_pipe = NULL;
1811 while (odm_pipe) {
1812 struct pipe_ctx *next_odm_pipe = odm_pipe->next_odm_pipe;
1813
1814 odm_pipe->plane_state = NULL;
1815 odm_pipe->stream = NULL;
1816 odm_pipe->top_pipe = NULL;
1817 odm_pipe->bottom_pipe = NULL;
1818 odm_pipe->prev_odm_pipe = NULL;
1819 odm_pipe->next_odm_pipe = NULL;
1820 if (odm_pipe->stream_res.dsc)
1821 dcn20_release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
1822 /* Clear plane_res and stream_res */
1823 memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res));
1824 memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res));
1825 odm_pipe = next_odm_pipe;
1826 }
1827 if (pipe->plane_state)
1828 resource_build_scaling_params(pipe);
1829 }
1830
1831 /* merge previously mpc split pipes since mode support needs to make the decision */
1832 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1833 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1834 struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
1835
1836 if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state)
1837 continue;
1838
1839 pipe->bottom_pipe = hsplit_pipe->bottom_pipe;
1840 if (hsplit_pipe->bottom_pipe)
1841 hsplit_pipe->bottom_pipe->top_pipe = pipe;
1842 hsplit_pipe->plane_state = NULL;
1843 hsplit_pipe->stream = NULL;
1844 hsplit_pipe->top_pipe = NULL;
1845 hsplit_pipe->bottom_pipe = NULL;
1846
1847 /* Clear plane_res and stream_res */
1848 memset(&hsplit_pipe->plane_res, 0, sizeof(hsplit_pipe->plane_res));
1849 memset(&hsplit_pipe->stream_res, 0, sizeof(hsplit_pipe->stream_res));
1850 if (pipe->plane_state)
1851 resource_build_scaling_params(pipe);
1852 }
1853 }
1854
is_dual_plane(enum surface_pixel_format format)1855 static bool is_dual_plane(enum surface_pixel_format format)
1856 {
1857 return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
1858 }
1859
dcn20_validate_apply_pipe_split_flags(struct dc * dc,struct dc_state * context,int vlevel,int * split,bool * merge)1860 int dcn20_validate_apply_pipe_split_flags(
1861 struct dc *dc,
1862 struct dc_state *context,
1863 int vlevel,
1864 int *split,
1865 bool *merge)
1866 {
1867 int i, pipe_idx, vlevel_split;
1868 int plane_count = 0;
1869 bool force_split = false;
1870 bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID;
1871 struct vba_vars_st *v = &context->bw_ctx.dml.vba;
1872 int max_mpc_comb = v->maxMpcComb;
1873
1874 if (context->stream_count > 1) {
1875 if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP)
1876 avoid_split = true;
1877 } else if (dc->debug.force_single_disp_pipe_split)
1878 force_split = true;
1879
1880 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1881 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1882
1883 /**
1884 * Workaround for avoiding pipe-split in cases where we'd split
1885 * planes that are too small, resulting in splits that aren't
1886 * valid for the scaler.
1887 */
1888 if (pipe->plane_state &&
1889 (pipe->plane_state->dst_rect.width <= 16 ||
1890 pipe->plane_state->dst_rect.height <= 16 ||
1891 pipe->plane_state->src_rect.width <= 16 ||
1892 pipe->plane_state->src_rect.height <= 16))
1893 avoid_split = true;
1894
1895 /* TODO: fix dc bugs and remove this split threshold thing */
1896 if (pipe->stream && !pipe->prev_odm_pipe &&
1897 (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
1898 ++plane_count;
1899 }
1900 if (plane_count > dc->res_pool->pipe_count / 2)
1901 avoid_split = true;
1902
1903 /* W/A: Mode timing with borders may not work well with pipe split, avoid for this corner case */
1904 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1905 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1906 struct dc_crtc_timing timing;
1907
1908 if (!pipe->stream)
1909 continue;
1910 else {
1911 timing = pipe->stream->timing;
1912 if (timing.h_border_left + timing.h_border_right
1913 + timing.v_border_top + timing.v_border_bottom > 0) {
1914 avoid_split = true;
1915 break;
1916 }
1917 }
1918 }
1919
1920 /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
1921 if (avoid_split) {
1922 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
1923 if (!context->res_ctx.pipe_ctx[i].stream)
1924 continue;
1925
1926 for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++)
1927 if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 &&
1928 v->ModeSupport[vlevel][0])
1929 break;
1930 /* Impossible to not split this pipe */
1931 if (vlevel > context->bw_ctx.dml.soc.num_states)
1932 vlevel = vlevel_split;
1933 else
1934 max_mpc_comb = 0;
1935 pipe_idx++;
1936 }
1937 v->maxMpcComb = max_mpc_comb;
1938 }
1939
1940 /* Split loop sets which pipe should be split based on dml outputs and dc flags */
1941 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
1942 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1943 int pipe_plane = v->pipe_plane[pipe_idx];
1944 bool split4mpc = false;
1945
1946 if (context->stream_count == 1 && plane_count == 1
1947 && dc->config.allow_4to1MPC && dc->res_pool->pipe_count >= 4
1948 && !dc->debug.disable_z9_mpc
1949 && pipe->plane_state && is_dual_plane(pipe->plane_state->format)
1950 && pipe->plane_state->src_rect.width <= 1920
1951 && pipe->plane_state->src_rect.height <= 1080)
1952 split4mpc = true;
1953
1954 if (!context->res_ctx.pipe_ctx[i].stream)
1955 continue;
1956
1957 if (split4mpc || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 4)
1958 split[i] = 4;
1959 else if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 2)
1960 split[i] = 2;
1961
1962 if ((pipe->stream->view_format ==
1963 VIEW_3D_FORMAT_SIDE_BY_SIDE ||
1964 pipe->stream->view_format ==
1965 VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
1966 (pipe->stream->timing.timing_3d_format ==
1967 TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
1968 pipe->stream->timing.timing_3d_format ==
1969 TIMING_3D_FORMAT_SIDE_BY_SIDE))
1970 split[i] = 2;
1971 if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
1972 split[i] = 2;
1973 v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
1974 }
1975 if (dc->debug.force_odm_combine_4to1 & (1 << pipe->stream_res.tg->inst)) {
1976 split[i] = 4;
1977 v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_4to1;
1978 }
1979 /*420 format workaround*/
1980 if (pipe->stream->timing.h_addressable > 7680 &&
1981 pipe->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
1982 split[i] = 4;
1983 }
1984 v->ODMCombineEnabled[pipe_plane] =
1985 v->ODMCombineEnablePerState[vlevel][pipe_plane];
1986
1987 if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
1988 if (resource_get_mpc_slice_count(pipe) == 2) {
1989 /*If need split for mpc but 2 way split already*/
1990 if (split[i] == 4)
1991 split[i] = 2; /* 2 -> 4 MPC */
1992 else if (split[i] == 2)
1993 split[i] = 0; /* 2 -> 2 MPC */
1994 else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
1995 merge[i] = true; /* 2 -> 1 MPC */
1996 } else if (resource_get_mpc_slice_count(pipe) == 4) {
1997 /*If need split for mpc but 4 way split already*/
1998 if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe)
1999 || !pipe->bottom_pipe)) {
2000 merge[i] = true; /* 4 -> 2 MPC */
2001 } else if (split[i] == 0 && pipe->top_pipe &&
2002 pipe->top_pipe->plane_state == pipe->plane_state)
2003 merge[i] = true; /* 4 -> 1 MPC */
2004 split[i] = 0;
2005 } else if (resource_get_odm_slice_count(pipe) > 1) {
2006 /* ODM -> MPC transition */
2007 if (pipe->prev_odm_pipe) {
2008 split[i] = 0;
2009 merge[i] = true;
2010 }
2011 }
2012 } else {
2013 if (resource_get_odm_slice_count(pipe) == 2) {
2014 /*If need split for odm but 2 way split already*/
2015 if (split[i] == 4)
2016 split[i] = 2; /* 2 -> 4 ODM */
2017 else if (split[i] == 2)
2018 split[i] = 0; /* 2 -> 2 ODM */
2019 else if (pipe->prev_odm_pipe) {
2020 ASSERT(0); /* NOT expected yet */
2021 merge[i] = true; /* exit ODM */
2022 }
2023 } else if (resource_get_odm_slice_count(pipe) == 4) {
2024 /*If need split for odm but 4 way split already*/
2025 if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
2026 || !pipe->next_odm_pipe)) {
2027 merge[i] = true; /* 4 -> 2 ODM */
2028 } else if (split[i] == 0 && pipe->prev_odm_pipe) {
2029 ASSERT(0); /* NOT expected yet */
2030 merge[i] = true; /* exit ODM */
2031 }
2032 split[i] = 0;
2033 } else if (resource_get_mpc_slice_count(pipe) > 1) {
2034 /* MPC -> ODM transition */
2035 ASSERT(0); /* NOT expected yet */
2036 if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
2037 split[i] = 0;
2038 merge[i] = true;
2039 }
2040 }
2041 }
2042
2043 /* Adjust dppclk when split is forced, do not bother with dispclk */
2044 if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1) {
2045 DC_FP_START();
2046 dcn20_fpu_adjust_dppclk(v, vlevel, max_mpc_comb, pipe_idx, false);
2047 DC_FP_END();
2048 }
2049 pipe_idx++;
2050 }
2051
2052 return vlevel;
2053 }
2054
dcn20_fast_validate_bw(struct dc * dc,struct dc_state * context,display_e2e_pipe_params_st * pipes,int * pipe_cnt_out,int * pipe_split_from,int * vlevel_out,enum dc_validate_mode validate_mode)2055 bool dcn20_fast_validate_bw(
2056 struct dc *dc,
2057 struct dc_state *context,
2058 display_e2e_pipe_params_st *pipes,
2059 int *pipe_cnt_out,
2060 int *pipe_split_from,
2061 int *vlevel_out,
2062 enum dc_validate_mode validate_mode)
2063 {
2064 bool out = false;
2065 int split[MAX_PIPES] = { 0 };
2066 bool merge[MAX_PIPES] = { false };
2067 int pipe_cnt, i, pipe_idx, vlevel;
2068
2069 ASSERT(pipes);
2070 if (!pipes)
2071 return false;
2072
2073 dcn20_merge_pipes_for_validate(dc, context);
2074
2075 pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
2076
2077 *pipe_cnt_out = pipe_cnt;
2078
2079 if (!pipe_cnt) {
2080 out = true;
2081 goto validate_out;
2082 }
2083
2084 vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
2085
2086 if (vlevel > context->bw_ctx.dml.soc.num_states)
2087 goto validate_fail;
2088
2089 vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
2090
2091 /*initialize pipe_just_split_from to invalid idx*/
2092 for (i = 0; i < MAX_PIPES; i++)
2093 pipe_split_from[i] = -1;
2094
2095 for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
2096 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2097 struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
2098
2099 if (!pipe->stream || pipe_split_from[i] >= 0)
2100 continue;
2101
2102 pipe_idx++;
2103
2104 if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
2105 hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
2106 ASSERT(hsplit_pipe);
2107 if (!dcn20_split_stream_for_odm(
2108 dc, &context->res_ctx,
2109 pipe, hsplit_pipe))
2110 goto validate_fail;
2111 pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
2112 dcn20_build_mapped_resource(dc, context, pipe->stream);
2113 }
2114
2115 if (!pipe->plane_state)
2116 continue;
2117 /* Skip 2nd half of already split pipe */
2118 if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)
2119 continue;
2120
2121 /* We do not support mpo + odm at the moment */
2122 if (hsplit_pipe && hsplit_pipe->plane_state != pipe->plane_state
2123 && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx])
2124 goto validate_fail;
2125
2126 if (split[i] == 2) {
2127 if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) {
2128 /* pipe not split previously needs split */
2129 hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
2130 ASSERT(hsplit_pipe);
2131 if (!hsplit_pipe) {
2132 DC_FP_START();
2133 dcn20_fpu_adjust_dppclk(&context->bw_ctx.dml.vba, vlevel, context->bw_ctx.dml.vba.maxMpcComb, pipe_idx, true);
2134 DC_FP_END();
2135 continue;
2136 }
2137 if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
2138 if (!dcn20_split_stream_for_odm(
2139 dc, &context->res_ctx,
2140 pipe, hsplit_pipe))
2141 goto validate_fail;
2142 dcn20_build_mapped_resource(dc, context, pipe->stream);
2143 } else {
2144 dcn20_split_stream_for_mpc(
2145 &context->res_ctx, dc->res_pool,
2146 pipe, hsplit_pipe);
2147 resource_build_scaling_params(pipe);
2148 resource_build_scaling_params(hsplit_pipe);
2149 }
2150 pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
2151 }
2152 } else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
2153 /* merge should already have been done */
2154 ASSERT(0);
2155 }
2156 }
2157
2158 /* Actual dsc count per stream dsc validation*/
2159 if (!dcn20_validate_dsc(dc, context)) {
2160 context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
2161 DML_FAIL_DSC_VALIDATION_FAILURE;
2162 goto validate_fail;
2163 }
2164
2165 *vlevel_out = vlevel;
2166
2167 out = true;
2168 goto validate_out;
2169
2170 validate_fail:
2171 out = false;
2172
2173 validate_out:
2174 return out;
2175 }
2176
dcn20_validate_bandwidth(struct dc * dc,struct dc_state * context,enum dc_validate_mode validate_mode)2177 enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
2178 enum dc_validate_mode validate_mode)
2179 {
2180 bool voltage_supported;
2181 display_e2e_pipe_params_st *pipes;
2182
2183 pipes = kzalloc_objs(display_e2e_pipe_params_st,
2184 dc->res_pool->pipe_count);
2185 if (!pipes)
2186 return DC_FAIL_BANDWIDTH_VALIDATE;
2187
2188 DC_FP_START();
2189 voltage_supported = dcn20_validate_bandwidth_fp(dc, context, validate_mode, pipes);
2190 DC_FP_END();
2191
2192 kfree(pipes);
2193 return voltage_supported ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
2194 }
2195
dcn20_acquire_free_pipe_for_layer(const struct dc_state * cur_ctx,struct dc_state * new_ctx,const struct resource_pool * pool,const struct pipe_ctx * opp_head)2196 struct pipe_ctx *dcn20_acquire_free_pipe_for_layer(
2197 const struct dc_state *cur_ctx,
2198 struct dc_state *new_ctx,
2199 const struct resource_pool *pool,
2200 const struct pipe_ctx *opp_head)
2201 {
2202 (void)cur_ctx;
2203 struct resource_context *res_ctx = &new_ctx->res_ctx;
2204 struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(res_ctx, opp_head->stream);
2205 struct pipe_ctx *sec_dpp_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, otg_master);
2206
2207 ASSERT(otg_master);
2208
2209 if (!sec_dpp_pipe)
2210 return NULL;
2211
2212 sec_dpp_pipe->stream = opp_head->stream;
2213 sec_dpp_pipe->stream_res.tg = opp_head->stream_res.tg;
2214 sec_dpp_pipe->stream_res.opp = opp_head->stream_res.opp;
2215
2216 sec_dpp_pipe->plane_res.hubp = pool->hubps[sec_dpp_pipe->pipe_idx];
2217 sec_dpp_pipe->plane_res.ipp = pool->ipps[sec_dpp_pipe->pipe_idx];
2218 sec_dpp_pipe->plane_res.dpp = pool->dpps[sec_dpp_pipe->pipe_idx];
2219 sec_dpp_pipe->plane_res.mpcc_inst = pool->dpps[sec_dpp_pipe->pipe_idx]->inst;
2220
2221 return sec_dpp_pipe;
2222 }
2223
dcn20_get_dcc_compression_cap(const struct dc * dc,const struct dc_dcc_surface_param * input,struct dc_surface_dcc_cap * output)2224 bool dcn20_get_dcc_compression_cap(const struct dc *dc,
2225 const struct dc_dcc_surface_param *input,
2226 struct dc_surface_dcc_cap *output)
2227 {
2228 if (dc->res_pool->hubbub->funcs->get_dcc_compression_cap)
2229 return dc->res_pool->hubbub->funcs->get_dcc_compression_cap(
2230 dc->res_pool->hubbub, input, output);
2231
2232 return false;
2233 }
2234
dcn20_destroy_resource_pool(struct resource_pool ** pool)2235 static void dcn20_destroy_resource_pool(struct resource_pool **pool)
2236 {
2237 struct dcn20_resource_pool *dcn20_pool = TO_DCN20_RES_POOL(*pool);
2238
2239 dcn20_resource_destruct(dcn20_pool);
2240 kfree(dcn20_pool);
2241 *pool = NULL;
2242 }
2243
2244
2245 static struct dc_cap_funcs cap_funcs = {
2246 .get_dcc_compression_cap = dcn20_get_dcc_compression_cap
2247 };
2248
2249
dcn20_patch_unknown_plane_state(struct dc_plane_state * plane_state)2250 enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state)
2251 {
2252 enum surface_pixel_format surf_pix_format = plane_state->format;
2253 unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format);
2254
2255 plane_state->tiling_info.gfx9.swizzle = DC_SW_64KB_S;
2256 if (bpp == 64)
2257 plane_state->tiling_info.gfx9.swizzle = DC_SW_64KB_D;
2258
2259 return DC_OK;
2260 }
2261
dcn20_release_pipe(struct dc_state * context,struct pipe_ctx * pipe,const struct resource_pool * pool)2262 void dcn20_release_pipe(struct dc_state *context,
2263 struct pipe_ctx *pipe,
2264 const struct resource_pool *pool)
2265 {
2266 if (resource_is_pipe_type(pipe, OPP_HEAD) && pipe->stream_res.dsc)
2267 dcn20_release_dsc(&context->res_ctx, pool, &pipe->stream_res.dsc);
2268 memset(pipe, 0, sizeof(*pipe));
2269 }
2270
2271 static const struct resource_funcs dcn20_res_pool_funcs = {
2272 .destroy = dcn20_destroy_resource_pool,
2273 .link_enc_create = dcn20_link_encoder_create,
2274 .panel_cntl_create = dcn20_panel_cntl_create,
2275 .validate_bandwidth = dcn20_validate_bandwidth,
2276 .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
2277 .release_pipe = dcn20_release_pipe,
2278 .add_stream_to_ctx = dcn20_add_stream_to_ctx,
2279 .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
2280 .remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
2281 .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
2282 .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
2283 .set_mcif_arb_params = dcn20_set_mcif_arb_params,
2284 .populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
2285 .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
2286 .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe,
2287 .get_default_tiling_info = dcn10_get_default_tiling_info
2288 };
2289
dcn20_dwbc_create(struct dc_context * ctx,struct resource_pool * pool)2290 bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)
2291 {
2292 int i;
2293 uint32_t pipe_count = pool->res_cap->num_dwb;
2294
2295 for (i = 0; i < pipe_count; i++) {
2296 struct dcn20_dwbc *dwbc20 = kzalloc_obj(struct dcn20_dwbc);
2297
2298 if (!dwbc20) {
2299 dm_error("DC: failed to create dwbc20!\n");
2300 return false;
2301 }
2302 dcn20_dwbc_construct(dwbc20, ctx,
2303 &dwbc20_regs[i],
2304 &dwbc20_shift,
2305 &dwbc20_mask,
2306 i);
2307 pool->dwbc[i] = &dwbc20->base;
2308 }
2309 return true;
2310 }
2311
dcn20_mmhubbub_create(struct dc_context * ctx,struct resource_pool * pool)2312 bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
2313 {
2314 int i;
2315 uint32_t pipe_count = pool->res_cap->num_dwb;
2316
2317 ASSERT(pipe_count > 0);
2318
2319 for (i = 0; i < pipe_count; i++) {
2320 struct dcn20_mmhubbub *mcif_wb20 = kzalloc_obj(struct dcn20_mmhubbub);
2321
2322 if (!mcif_wb20) {
2323 dm_error("DC: failed to create mcif_wb20!\n");
2324 return false;
2325 }
2326
2327 dcn20_mmhubbub_construct(mcif_wb20, ctx,
2328 &mcif_wb20_regs[i],
2329 &mcif_wb20_shift,
2330 &mcif_wb20_mask,
2331 i);
2332
2333 pool->mcif_wb[i] = &mcif_wb20->base;
2334 }
2335 return true;
2336 }
2337
dcn20_pp_smu_create(struct dc_context * ctx)2338 static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
2339 {
2340 struct pp_smu_funcs *pp_smu = kzalloc_obj(*pp_smu);
2341
2342 if (!pp_smu)
2343 return pp_smu;
2344
2345 dm_pp_get_funcs(ctx, pp_smu);
2346
2347 if (pp_smu->ctx.ver != PP_SMU_VER_NV)
2348 pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs));
2349
2350 return pp_smu;
2351 }
2352
dcn20_pp_smu_destroy(struct pp_smu_funcs ** pp_smu)2353 static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu)
2354 {
2355 if (pp_smu && *pp_smu) {
2356 kfree(*pp_smu);
2357 *pp_smu = NULL;
2358 }
2359 }
2360
get_asic_rev_soc_bb(uint32_t hw_internal_rev)2361 static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
2362 uint32_t hw_internal_rev)
2363 {
2364 if (ASICREV_IS_NAVI14_M(hw_internal_rev))
2365 return &dcn2_0_nv14_soc;
2366
2367 if (ASICREV_IS_NAVI12_P(hw_internal_rev))
2368 return &dcn2_0_nv12_soc;
2369
2370 return &dcn2_0_soc;
2371 }
2372
get_asic_rev_ip_params(uint32_t hw_internal_rev)2373 static struct _vcs_dpi_ip_params_st *get_asic_rev_ip_params(
2374 uint32_t hw_internal_rev)
2375 {
2376 if (ASICREV_IS_NAVI14_M(hw_internal_rev))
2377 return &dcn2_0_nv14_ip;
2378
2379 /* NV12 and NV10 */
2380 return &dcn2_0_ip;
2381 }
2382
get_dml_project_version(uint32_t hw_internal_rev)2383 static enum dml_project get_dml_project_version(uint32_t hw_internal_rev)
2384 {
2385 (void)hw_internal_rev;
2386 return DML_PROJECT_NAVI10v2;
2387 }
2388
init_soc_bounding_box(struct dc * dc,struct dcn20_resource_pool * pool)2389 static bool init_soc_bounding_box(struct dc *dc,
2390 struct dcn20_resource_pool *pool)
2391 {
2392 struct _vcs_dpi_soc_bounding_box_st *loaded_bb =
2393 get_asic_rev_soc_bb(dc->ctx->asic_id.hw_internal_rev);
2394 struct _vcs_dpi_ip_params_st *loaded_ip =
2395 get_asic_rev_ip_params(dc->ctx->asic_id.hw_internal_rev);
2396
2397 if (pool->base.pp_smu) {
2398 struct pp_smu_nv_clock_table max_clocks = {0};
2399 unsigned int uclk_states[8] = {0};
2400 unsigned int num_states = 0;
2401 enum pp_smu_status status;
2402 bool clock_limits_available = false;
2403 bool uclk_states_available = false;
2404
2405 if (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states) {
2406 status = (pool->base.pp_smu->nv_funcs.get_uclk_dpm_states)
2407 (&pool->base.pp_smu->nv_funcs.pp_smu, uclk_states, &num_states);
2408
2409 uclk_states_available = (status == PP_SMU_RESULT_OK);
2410 }
2411
2412 if (pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks) {
2413 status = (*pool->base.pp_smu->nv_funcs.get_maximum_sustainable_clocks)
2414 (&pool->base.pp_smu->nv_funcs.pp_smu, &max_clocks);
2415 /* SMU cannot set DCF clock to anything equal to or higher than SOC clock
2416 */
2417 if (max_clocks.dcfClockInKhz >= max_clocks.socClockInKhz)
2418 max_clocks.dcfClockInKhz = max_clocks.socClockInKhz - 1000;
2419 clock_limits_available = (status == PP_SMU_RESULT_OK);
2420 }
2421
2422 if (clock_limits_available && uclk_states_available && num_states) {
2423 DC_FP_START();
2424 dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states);
2425 DC_FP_END();
2426 } else if (clock_limits_available) {
2427 DC_FP_START();
2428 dcn20_cap_soc_clocks(loaded_bb, max_clocks);
2429 DC_FP_END();
2430 }
2431 }
2432
2433 loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
2434 loaded_ip->max_num_dpp = pool->base.pipe_count;
2435 DC_FP_START();
2436 dcn20_patch_bounding_box(dc, loaded_bb);
2437 DC_FP_END();
2438 return true;
2439 }
2440
dcn20_resource_construct(uint8_t num_virtual_links,struct dc * dc,struct dcn20_resource_pool * pool)2441 static bool dcn20_resource_construct(
2442 uint8_t num_virtual_links,
2443 struct dc *dc,
2444 struct dcn20_resource_pool *pool)
2445 {
2446 int i;
2447 struct dc_context *ctx = dc->ctx;
2448 struct irq_service_init_data init_data;
2449 struct ddc_service_init_data ddc_init_data = {0};
2450 struct _vcs_dpi_soc_bounding_box_st *loaded_bb =
2451 get_asic_rev_soc_bb(ctx->asic_id.hw_internal_rev);
2452 struct _vcs_dpi_ip_params_st *loaded_ip =
2453 get_asic_rev_ip_params(ctx->asic_id.hw_internal_rev);
2454 enum dml_project dml_project_version =
2455 get_dml_project_version(ctx->asic_id.hw_internal_rev);
2456
2457 ctx->dc_bios->regs = &bios_regs;
2458 pool->base.funcs = &dcn20_res_pool_funcs;
2459
2460 if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) {
2461 pool->base.res_cap = &res_cap_nv14;
2462 pool->base.pipe_count = 5;
2463 pool->base.mpcc_count = 5;
2464 } else {
2465 pool->base.res_cap = &res_cap_nv10;
2466 pool->base.pipe_count = 6;
2467 pool->base.mpcc_count = 6;
2468 }
2469 /*************************************************
2470 * Resource + asic cap harcoding *
2471 *************************************************/
2472 pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE;
2473
2474 dc->caps.max_downscale_ratio = 200;
2475 dc->caps.i2c_speed_in_khz = 100;
2476 dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a not applied by default*/
2477 dc->caps.max_cursor_size = 256;
2478 dc->caps.min_horizontal_blanking_period = 80;
2479 dc->caps.dmdata_alloc_size = 2048;
2480
2481 dc->caps.max_slave_planes = 1;
2482 dc->caps.max_slave_yuv_planes = 1;
2483 dc->caps.max_slave_rgb_planes = 1;
2484 dc->caps.post_blend_color_processing = true;
2485 dc->caps.force_dp_tps4_for_cp2520 = true;
2486 dc->caps.extended_aux_timeout_support = true;
2487 dc->caps.dmcub_support = true;
2488
2489 /* Color pipeline capabilities */
2490 dc->caps.color.dpp.dcn_arch = 1;
2491 dc->caps.color.dpp.input_lut_shared = 0;
2492 dc->caps.color.dpp.icsc = 1;
2493 dc->caps.color.dpp.dgam_ram = 1;
2494 dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
2495 dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
2496 dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
2497 dc->caps.color.dpp.dgam_rom_caps.pq = 0;
2498 dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
2499 dc->caps.color.dpp.post_csc = 0;
2500 dc->caps.color.dpp.gamma_corr = 0;
2501 dc->caps.color.dpp.dgam_rom_for_yuv = 1;
2502
2503 dc->caps.color.dpp.hw_3d_lut = 1;
2504 dc->caps.color.dpp.ogam_ram = 1;
2505 // no OGAM ROM on DCN2, only MPC ROM
2506 dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
2507 dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
2508 dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
2509 dc->caps.color.dpp.ogam_rom_caps.pq = 0;
2510 dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
2511 dc->caps.color.dpp.ocsc = 0;
2512
2513 dc->caps.color.mpc.gamut_remap = 0;
2514 dc->caps.color.mpc.num_3dluts = 0;
2515 dc->caps.color.mpc.shared_3d_lut = 0;
2516 dc->caps.color.mpc.ogam_ram = 1;
2517 dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
2518 dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
2519 dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
2520 dc->caps.color.mpc.ogam_rom_caps.pq = 0;
2521 dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
2522 dc->caps.color.mpc.ocsc = 1;
2523
2524 dc->caps.dp_hdmi21_pcon_support = true;
2525 dc->check_config = config_defaults;
2526
2527 if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
2528 dc->debug = debug_defaults_drv;
2529
2530 //dcn2.0x
2531 dc->work_arounds.dedcn20_305_wa = true;
2532
2533 // Init the vm_helper
2534 if (dc->vm_helper)
2535 vm_helper_init(dc->vm_helper, 16);
2536
2537 /*************************************************
2538 * Create resources *
2539 *************************************************/
2540
2541 pool->base.clock_sources[DCN20_CLK_SRC_PLL0] =
2542 dcn20_clock_source_create(ctx, ctx->dc_bios,
2543 CLOCK_SOURCE_COMBO_PHY_PLL0,
2544 &clk_src_regs[0], false);
2545 pool->base.clock_sources[DCN20_CLK_SRC_PLL1] =
2546 dcn20_clock_source_create(ctx, ctx->dc_bios,
2547 CLOCK_SOURCE_COMBO_PHY_PLL1,
2548 &clk_src_regs[1], false);
2549 pool->base.clock_sources[DCN20_CLK_SRC_PLL2] =
2550 dcn20_clock_source_create(ctx, ctx->dc_bios,
2551 CLOCK_SOURCE_COMBO_PHY_PLL2,
2552 &clk_src_regs[2], false);
2553 pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
2554 dcn20_clock_source_create(ctx, ctx->dc_bios,
2555 CLOCK_SOURCE_COMBO_PHY_PLL3,
2556 &clk_src_regs[3], false);
2557 pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
2558 dcn20_clock_source_create(ctx, ctx->dc_bios,
2559 CLOCK_SOURCE_COMBO_PHY_PLL4,
2560 &clk_src_regs[4], false);
2561 pool->base.clock_sources[DCN20_CLK_SRC_PLL5] =
2562 dcn20_clock_source_create(ctx, ctx->dc_bios,
2563 CLOCK_SOURCE_COMBO_PHY_PLL5,
2564 &clk_src_regs[5], false);
2565 pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL;
2566 /* todo: not reuse phy_pll registers */
2567 pool->base.dp_clock_source =
2568 dcn20_clock_source_create(ctx, ctx->dc_bios,
2569 CLOCK_SOURCE_ID_DP_DTO,
2570 &clk_src_regs[0], true);
2571
2572 for (i = 0; i < pool->base.clk_src_count; i++) {
2573 if (pool->base.clock_sources[i] == NULL) {
2574 dm_error("DC: failed to create clock sources!\n");
2575 BREAK_TO_DEBUGGER();
2576 goto create_fail;
2577 }
2578 }
2579
2580 pool->base.dccg = dccg2_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
2581 if (pool->base.dccg == NULL) {
2582 dm_error("DC: failed to create dccg!\n");
2583 BREAK_TO_DEBUGGER();
2584 goto create_fail;
2585 }
2586
2587 pool->base.dmcu = dcn20_dmcu_create(ctx,
2588 &dmcu_regs,
2589 &dmcu_shift,
2590 &dmcu_mask);
2591 if (pool->base.dmcu == NULL) {
2592 dm_error("DC: failed to create dmcu!\n");
2593 BREAK_TO_DEBUGGER();
2594 goto create_fail;
2595 }
2596
2597 pool->base.abm = dce_abm_create(ctx,
2598 &abm_regs,
2599 &abm_shift,
2600 &abm_mask);
2601 if (pool->base.abm == NULL) {
2602 dm_error("DC: failed to create abm!\n");
2603 BREAK_TO_DEBUGGER();
2604 goto create_fail;
2605 }
2606
2607 pool->base.pp_smu = dcn20_pp_smu_create(ctx);
2608
2609
2610 if (!init_soc_bounding_box(dc, pool)) {
2611 dm_error("DC: failed to initialize soc bounding box!\n");
2612 BREAK_TO_DEBUGGER();
2613 goto create_fail;
2614 }
2615
2616 dml_init_instance(&dc->dml, loaded_bb, loaded_ip, dml_project_version);
2617
2618 if (!dc->debug.disable_pplib_wm_range) {
2619 struct pp_smu_wm_range_sets ranges = {0};
2620 int i = 0;
2621
2622 ranges.num_reader_wm_sets = 0;
2623
2624 if (loaded_bb->num_states == 1) {
2625 ranges.reader_wm_sets[0].wm_inst = i;
2626 ranges.reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
2627 ranges.reader_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
2628 ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
2629 ranges.reader_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
2630
2631 ranges.num_reader_wm_sets = 1;
2632 } else if (loaded_bb->num_states > 1) {
2633 for (i = 0; i < 4 && i < loaded_bb->num_states; i++) {
2634 ranges.reader_wm_sets[i].wm_inst = i;
2635 ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
2636 ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
2637 DC_FP_START();
2638 dcn20_fpu_set_wm_ranges(i, &ranges, loaded_bb);
2639 DC_FP_END();
2640
2641 ranges.num_reader_wm_sets = i + 1;
2642 }
2643
2644 ranges.reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
2645 ranges.reader_wm_sets[ranges.num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
2646 }
2647
2648 ranges.num_writer_wm_sets = 1;
2649
2650 ranges.writer_wm_sets[0].wm_inst = 0;
2651 ranges.writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
2652 ranges.writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
2653 ranges.writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
2654 ranges.writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
2655
2656 /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
2657 if (pool->base.pp_smu && pool->base.pp_smu->nv_funcs.set_wm_ranges)
2658 pool->base.pp_smu->nv_funcs.set_wm_ranges(&pool->base.pp_smu->nv_funcs.pp_smu, &ranges);
2659 }
2660
2661 init_data.ctx = dc->ctx;
2662 pool->base.irqs = dal_irq_service_dcn20_create(&init_data);
2663 if (!pool->base.irqs)
2664 goto create_fail;
2665
2666 /* mem input -> ipp -> dpp -> opp -> TG */
2667 for (i = 0; i < pool->base.pipe_count; i++) {
2668 pool->base.hubps[i] = dcn20_hubp_create(ctx, i);
2669 if (pool->base.hubps[i] == NULL) {
2670 BREAK_TO_DEBUGGER();
2671 dm_error(
2672 "DC: failed to create memory input!\n");
2673 goto create_fail;
2674 }
2675
2676 pool->base.ipps[i] = dcn20_ipp_create(ctx, i);
2677 if (pool->base.ipps[i] == NULL) {
2678 BREAK_TO_DEBUGGER();
2679 dm_error(
2680 "DC: failed to create input pixel processor!\n");
2681 goto create_fail;
2682 }
2683
2684 pool->base.dpps[i] = dcn20_dpp_create(ctx, i);
2685 if (pool->base.dpps[i] == NULL) {
2686 BREAK_TO_DEBUGGER();
2687 dm_error(
2688 "DC: failed to create dpps!\n");
2689 goto create_fail;
2690 }
2691 }
2692 for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
2693 pool->base.engines[i] = dcn20_aux_engine_create(ctx, i);
2694 if (pool->base.engines[i] == NULL) {
2695 BREAK_TO_DEBUGGER();
2696 dm_error(
2697 "DC:failed to create aux engine!!\n");
2698 goto create_fail;
2699 }
2700 pool->base.hw_i2cs[i] = dcn20_i2c_hw_create(ctx, i);
2701 if (pool->base.hw_i2cs[i] == NULL) {
2702 BREAK_TO_DEBUGGER();
2703 dm_error(
2704 "DC:failed to create hw i2c!!\n");
2705 goto create_fail;
2706 }
2707 pool->base.sw_i2cs[i] = NULL;
2708 }
2709
2710 for (i = 0; i < pool->base.res_cap->num_opp; i++) {
2711 pool->base.opps[i] = dcn20_opp_create(ctx, i);
2712 if (pool->base.opps[i] == NULL) {
2713 BREAK_TO_DEBUGGER();
2714 dm_error(
2715 "DC: failed to create output pixel processor!\n");
2716 goto create_fail;
2717 }
2718 }
2719
2720 for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
2721 pool->base.timing_generators[i] = dcn20_timing_generator_create(
2722 ctx, i);
2723 if (pool->base.timing_generators[i] == NULL) {
2724 BREAK_TO_DEBUGGER();
2725 dm_error("DC: failed to create tg!\n");
2726 goto create_fail;
2727 }
2728 }
2729
2730 pool->base.timing_generator_count = i;
2731
2732 pool->base.mpc = dcn20_mpc_create(ctx);
2733 if (pool->base.mpc == NULL) {
2734 BREAK_TO_DEBUGGER();
2735 dm_error("DC: failed to create mpc!\n");
2736 goto create_fail;
2737 }
2738
2739 pool->base.hubbub = dcn20_hubbub_create(ctx);
2740 if (pool->base.hubbub == NULL) {
2741 BREAK_TO_DEBUGGER();
2742 dm_error("DC: failed to create hubbub!\n");
2743 goto create_fail;
2744 }
2745
2746 /* DIO */
2747 pool->base.dio = dcn20_dio_create(ctx);
2748 if (pool->base.dio == NULL) {
2749 BREAK_TO_DEBUGGER();
2750 dm_error("DC: failed to create dio!\n");
2751 goto create_fail;
2752 }
2753
2754 for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
2755 pool->base.dscs[i] = dcn20_dsc_create(ctx, i);
2756 if (pool->base.dscs[i] == NULL) {
2757 BREAK_TO_DEBUGGER();
2758 dm_error("DC: failed to create display stream compressor %d!\n", i);
2759 goto create_fail;
2760 }
2761 }
2762
2763 if (!dcn20_dwbc_create(ctx, &pool->base)) {
2764 BREAK_TO_DEBUGGER();
2765 dm_error("DC: failed to create dwbc!\n");
2766 goto create_fail;
2767 }
2768 if (!dcn20_mmhubbub_create(ctx, &pool->base)) {
2769 BREAK_TO_DEBUGGER();
2770 dm_error("DC: failed to create mcif_wb!\n");
2771 goto create_fail;
2772 }
2773
2774 if (!resource_construct(num_virtual_links, dc, &pool->base,
2775 &res_create_funcs))
2776 goto create_fail;
2777
2778 dcn20_hw_sequencer_construct(dc);
2779
2780 // IF NV12, set PG function pointer to NULL. It's not that
2781 // PG isn't supported for NV12, it's that we don't want to
2782 // program the registers because that will cause more power
2783 // to be consumed. We could have created dcn20_init_hw to get
2784 // the same effect by checking ASIC rev, but there was a
2785 // request at some point to not check ASIC rev on hw sequencer.
2786 if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) {
2787 dc->hwseq->funcs.enable_power_gating_plane = NULL;
2788 dc->debug.disable_dpp_power_gate = true;
2789 dc->debug.disable_hubp_power_gate = true;
2790 }
2791
2792
2793 dc->caps.max_planes = pool->base.pipe_count;
2794
2795 for (i = 0; i < dc->caps.max_planes; ++i)
2796 dc->caps.planes[i] = plane_cap;
2797
2798 dc->caps.max_odm_combine_factor = 2;
2799
2800 dc->cap_funcs = cap_funcs;
2801
2802 if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
2803 ddc_init_data.ctx = dc->ctx;
2804 ddc_init_data.link = NULL;
2805 ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
2806 ddc_init_data.id.enum_id = 0;
2807 ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
2808 pool->base.oem_device = dc->link_srv->create_ddc_service(&ddc_init_data);
2809 } else {
2810 pool->base.oem_device = NULL;
2811 }
2812
2813 return true;
2814
2815 create_fail:
2816
2817 dcn20_resource_destruct(pool);
2818
2819 return false;
2820 }
2821
dcn20_create_resource_pool(const struct dc_init_data * init_data,struct dc * dc)2822 struct resource_pool *dcn20_create_resource_pool(
2823 const struct dc_init_data *init_data,
2824 struct dc *dc)
2825 {
2826 struct dcn20_resource_pool *pool =
2827 kzalloc_obj(struct dcn20_resource_pool);
2828
2829 if (!pool)
2830 return NULL;
2831
2832 if (dcn20_resource_construct(init_data->num_virtual_links, dc, pool))
2833 return &pool->base;
2834
2835 BREAK_TO_DEBUGGER();
2836 kfree(pool);
2837 return NULL;
2838 }
2839