1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright 2023 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: AMD
24 *
25 */
26
27 #include "dm_services.h"
28 #include "dm_helpers.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "dccg.h"
32 #include "dce/dce_hwseq.h"
33 #include "clk_mgr.h"
34 #include "reg_helper.h"
35 #include "abm.h"
36 #include "hubp.h"
37 #include "dchubbub.h"
38 #include "timing_generator.h"
39 #include "opp.h"
40 #include "ipp.h"
41 #include "mpc.h"
42 #include "mcif_wb.h"
43 #include "dc_dmub_srv.h"
44 #include "dcn35_hwseq.h"
45 #include "dcn35/dcn35_dccg.h"
46 #include "link_hwss.h"
47 #include "dpcd_defs.h"
48 #include "dce/dmub_outbox.h"
49 #include "link_service.h"
50 #include "dcn10/dcn10_hwseq.h"
51 #include "inc/link_enc_cfg.h"
52 #include "dcn30/dcn30_vpg.h"
53 #include "dce/dce_i2c_hw.h"
54 #include "dsc.h"
55 #include "dcn20/dcn20_optc.h"
56 #include "dcn30/dcn30_cm_common.h"
57 #include "dcn31/dcn31_hwseq.h"
58 #include "dcn20/dcn20_hwseq.h"
59 #include "dc_state_priv.h"
60
61 #define DC_LOGGER_INIT(logger) \
62 struct dal_logger *dc_logger = logger
63
64 #define CTX \
65 hws->ctx
66 #define REG(reg)\
67 hws->regs->reg
68 #define DC_LOGGER \
69 dc_logger
70
71
72 #undef FN
73 #define FN(reg_name, field_name) \
74 hws->shifts->field_name, hws->masks->field_name
75 #if 0
76 static void enable_memory_low_power(struct dc *dc)
77 {
78 struct dce_hwseq *hws = dc->hwseq;
79 int i;
80
81 if (dc->debug.enable_mem_low_power.bits.dmcu) {
82 // Force ERAM to shutdown if DMCU is not enabled
83 if (dc->debug.disable_dmcu || dc->config.disable_dmcu) {
84 REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3);
85 }
86 }
87 /*dcn35 has default MEM_PWR enabled, make sure wake them up*/
88 // Set default OPTC memory power states
89 if (dc->debug.enable_mem_low_power.bits.optc) {
90 // Shutdown when unassigned and light sleep in VBLANK
91 REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
92 }
93
94 if (dc->debug.enable_mem_low_power.bits.vga) {
95 // Power down VGA memory
96 REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
97 }
98
99 if (dc->debug.enable_mem_low_power.bits.mpc &&
100 dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode)
101 dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode(dc->res_pool->mpc);
102
103 if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) {
104 // Power down VPGs
105 for (i = 0; i < dc->res_pool->stream_enc_count; i++)
106 dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
107 #if defined(CONFIG_DRM_AMD_DC_DP2_0)
108 for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
109 dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
110 #endif
111 }
112
113 }
114 #endif
115
print_pg_status(struct dc * dc,const char * debug_func,const char * debug_log)116 static void print_pg_status(struct dc *dc, const char *debug_func, const char *debug_log)
117 {
118 if (dc->debug.enable_pg_cntl_debug_logs && dc->res_pool->pg_cntl) {
119 if (dc->res_pool->pg_cntl->funcs->print_pg_status)
120 dc->res_pool->pg_cntl->funcs->print_pg_status(dc->res_pool->pg_cntl, debug_func, debug_log);
121 }
122 }
123
dcn35_set_dmu_fgcg(struct dce_hwseq * hws,bool enable)124 void dcn35_set_dmu_fgcg(struct dce_hwseq *hws, bool enable)
125 {
126 REG_UPDATE_3(DMU_CLK_CNTL,
127 RBBMIF_FGCG_REP_DIS, !enable,
128 IHC_FGCG_REP_DIS, !enable,
129 LONO_FGCG_REP_DIS, !enable
130 );
131 }
132
dcn35_setup_hpo_hw_control(const struct dce_hwseq * hws,bool enable)133 void dcn35_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
134 {
135 REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable);
136 }
137
dcn35_init_hw(struct dc * dc)138 void dcn35_init_hw(struct dc *dc)
139 {
140 struct abm **abms = dc->res_pool->multiple_abms;
141 struct dce_hwseq *hws = dc->hwseq;
142 struct dc_bios *dcb = dc->ctx->dc_bios;
143 struct resource_pool *res_pool = dc->res_pool;
144 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
145 uint32_t user_level = MAX_BACKLIGHT_LEVEL;
146 int i;
147
148 print_pg_status(dc, __func__, ": start");
149
150 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
151 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
152
153 //dcn35_set_dmu_fgcg(hws, dc->debug.enable_fine_grain_clock_gating.bits.dmu);
154
155 if (!dcb->funcs->is_accelerated_mode(dcb)) {
156 /*this calls into dmubfw to do the init*/
157 hws->funcs.bios_golden_init(dc);
158 }
159
160 // Initialize the dccg
161 if (res_pool->dccg->funcs->dccg_init)
162 res_pool->dccg->funcs->dccg_init(res_pool->dccg);
163
164 //enable_memory_low_power(dc);
165
166 if (dc->ctx->dc_bios->fw_info_valid) {
167 res_pool->ref_clocks.xtalin_clock_inKhz =
168 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
169
170 if (res_pool->hubbub) {
171
172 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
173 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
174 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
175
176 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
177 res_pool->ref_clocks.dccg_ref_clock_inKhz,
178 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
179 } else {
180 // Not all ASICs have DCCG sw component
181 res_pool->ref_clocks.dccg_ref_clock_inKhz =
182 res_pool->ref_clocks.xtalin_clock_inKhz;
183 res_pool->ref_clocks.dchub_ref_clock_inKhz =
184 res_pool->ref_clocks.xtalin_clock_inKhz;
185 }
186 } else
187 ASSERT_CRITICAL(false);
188
189 for (i = 0; i < dc->link_count; i++) {
190 /* Power up AND update implementation according to the
191 * required signal (which may be different from the
192 * default signal on connector).
193 */
194 struct dc_link *link = dc->links[i];
195
196 if (link->ep_type != DISPLAY_ENDPOINT_PHY)
197 continue;
198
199 link->link_enc->funcs->hw_init(link->link_enc);
200
201 /* Check for enabled DIG to identify enabled display */
202 if (link->link_enc->funcs->is_dig_enabled &&
203 link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
204 link->link_status.link_active = true;
205 if (link->link_enc->funcs->fec_is_active &&
206 link->link_enc->funcs->fec_is_active(link->link_enc))
207 link->fec_state = dc_link_fec_enabled;
208 }
209 }
210
211 /* we want to turn off all dp displays before doing detection */
212 dc->link_srv->blank_all_dp_displays(dc);
213
214 if (res_pool->hubbub && res_pool->hubbub->funcs->dchubbub_init)
215 res_pool->hubbub->funcs->dchubbub_init(dc->res_pool->hubbub);
216 /* If taking control over from VBIOS, we may want to optimize our first
217 * mode set, so we need to skip powering down pipes until we know which
218 * pipes we want to use.
219 * Otherwise, if taking control is not possible, we need to power
220 * everything down.
221 */
222 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
223
224 // we want to turn off edp displays if odm is enabled and no seamless boot
225 if (!dc->caps.seamless_odm) {
226 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
227 struct timing_generator *tg = dc->res_pool->timing_generators[i];
228 uint32_t num_opps, opp_id_src0, opp_id_src1;
229
230 num_opps = 1;
231 if (tg) {
232 if (tg->funcs->is_tg_enabled(tg) && tg->funcs->get_optc_source) {
233 tg->funcs->get_optc_source(tg, &num_opps,
234 &opp_id_src0, &opp_id_src1);
235 }
236 }
237
238 if (num_opps > 1) {
239 dc->link_srv->blank_all_edp_displays(dc);
240 break;
241 }
242 }
243 }
244
245 hws->funcs.init_pipes(dc, dc->current_state);
246 print_pg_status(dc, __func__, ": after init_pipes");
247
248 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control &&
249 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter)
250 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
251 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
252 }
253 for (i = 0; i < res_pool->audio_count; i++) {
254 struct audio *audio = res_pool->audios[i];
255
256 audio->funcs->hw_init(audio);
257 }
258
259 for (i = 0; i < dc->link_count; i++) {
260 struct dc_link *link = dc->links[i];
261
262 if (link->panel_cntl) {
263 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
264 user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
265 }
266 }
267 if (dc->ctx->dmub_srv) {
268 for (i = 0; i < dc->res_pool->pipe_count; i++) {
269 if (abms[i] != NULL && abms[i]->funcs != NULL)
270 abms[i]->funcs->abm_init(abms[i], backlight, user_level);
271 }
272 }
273
274 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
275 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
276
277 // Set i2c to light sleep until engine is setup
278 if (dc->debug.enable_mem_low_power.bits.i2c)
279 REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 0);
280
281 if (hws->funcs.setup_hpo_hw_control)
282 hws->funcs.setup_hpo_hw_control(hws, false);
283
284 if (!dc->debug.disable_clock_gate) {
285 /* enable all DCN clock gating */
286 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
287 }
288
289 if (dc->debug.disable_mem_low_power) {
290 REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, 1);
291 }
292 if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
293 dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
294
295 if (dc->clk_mgr && dc->clk_mgr->funcs->notify_wm_ranges)
296 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
297
298 if (dc->clk_mgr && dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
299 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
300
301
302
303 if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
304 dc->res_pool->hubbub->funcs->force_pstate_change_control(
305 dc->res_pool->hubbub, false, false);
306
307 if (dc->res_pool->hubbub->funcs->init_crb)
308 dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
309
310 if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0)
311 dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc);
312 // Get DMCUB capabilities
313 if (dc->ctx->dmub_srv) {
314 dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
315 dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
316 dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
317 dc->caps.dmub_caps.aux_backlight_support = dc->ctx->dmub_srv->dmub->feature_caps.abm_aux_backlight_support;
318 }
319
320 if (dc->res_pool->pg_cntl) {
321 if (dc->res_pool->pg_cntl->funcs->init_pg_status)
322 dc->res_pool->pg_cntl->funcs->init_pg_status(dc->res_pool->pg_cntl);
323 }
324 print_pg_status(dc, __func__, ": after init_pg_status");
325 }
326
update_dsc_on_stream(struct pipe_ctx * pipe_ctx,bool enable)327 static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
328 {
329 struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
330 struct dc_stream_state *stream = pipe_ctx->stream;
331 struct pipe_ctx *odm_pipe;
332 int opp_cnt = 1;
333
334 DC_LOGGER_INIT(stream->ctx->logger);
335
336 ASSERT(dsc);
337 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
338 opp_cnt++;
339
340 if (enable) {
341 struct dsc_config dsc_cfg;
342 struct dsc_optc_config dsc_optc_cfg = {0};
343 enum optc_dsc_mode optc_dsc_mode;
344 struct dcn_dsc_state dsc_state = {0};
345
346 if (!dsc) {
347 DC_LOG_DSC("DSC is NULL for tg instance %d:", pipe_ctx->stream_res.tg->inst);
348 return;
349 }
350
351 if (dsc->funcs->dsc_read_state) {
352 dsc->funcs->dsc_read_state(dsc, &dsc_state);
353 if (!dsc_state.dsc_fw_en) {
354 DC_LOG_DSC("DSC has been disabled for tg instance %d:", pipe_ctx->stream_res.tg->inst);
355 return;
356 }
357 }
358 /* Enable DSC hw block */
359 dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
360 dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
361 dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
362 dsc_cfg.color_depth = stream->timing.display_color_depth;
363 dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
364 dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
365 ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
366 dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
367
368 dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
369 dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
370 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
371 struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
372
373 ASSERT(odm_dsc);
374 odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
375 odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
376 }
377 dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt;
378 dsc_cfg.pic_width *= opp_cnt;
379
380 optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
381
382 /* Enable DSC in OPTC */
383 DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst);
384 pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg,
385 optc_dsc_mode,
386 dsc_optc_cfg.bytes_per_pixel,
387 dsc_optc_cfg.slice_width);
388 } else {
389 /* disable DSC in OPTC */
390 pipe_ctx->stream_res.tg->funcs->set_dsc_config(
391 pipe_ctx->stream_res.tg,
392 OPTC_DSC_DISABLED, 0, 0);
393
394 /* disable DSC block */
395 dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
396 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
397 ASSERT(odm_pipe->stream_res.dsc);
398 odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
399 }
400 }
401 }
402
403 // Given any pipe_ctx, return the total ODM combine factor, and optionally return
404 // the OPPids which are used
get_odm_config(struct pipe_ctx * pipe_ctx,unsigned int * opp_instances)405 static unsigned int get_odm_config(struct pipe_ctx *pipe_ctx, unsigned int *opp_instances)
406 {
407 unsigned int opp_count = 1;
408 struct pipe_ctx *odm_pipe;
409
410 // First get to the top pipe
411 for (odm_pipe = pipe_ctx; odm_pipe->prev_odm_pipe; odm_pipe = odm_pipe->prev_odm_pipe)
412 ;
413
414 // First pipe is always used
415 if (opp_instances)
416 opp_instances[0] = odm_pipe->stream_res.opp->inst;
417
418 // Find and count odm pipes, if any
419 for (odm_pipe = odm_pipe->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
420 if (opp_instances)
421 opp_instances[opp_count] = odm_pipe->stream_res.opp->inst;
422 opp_count++;
423 }
424
425 return opp_count;
426 }
427
dcn35_update_odm(struct dc * dc,struct dc_state * context,struct pipe_ctx * pipe_ctx)428 void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
429 {
430 struct pipe_ctx *odm_pipe;
431 int opp_cnt = 0;
432 int opp_inst[MAX_PIPES] = {0};
433 int odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false);
434 int last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true);
435 struct mpc *mpc = dc->res_pool->mpc;
436 int i;
437
438 opp_cnt = get_odm_config(pipe_ctx, opp_inst);
439
440 if (opp_cnt > 1)
441 pipe_ctx->stream_res.tg->funcs->set_odm_combine(
442 pipe_ctx->stream_res.tg,
443 opp_inst, opp_cnt,
444 odm_slice_width, last_odm_slice_width);
445 else
446 pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
447 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
448
449 if (mpc->funcs->set_out_rate_control) {
450 for (i = 0; i < opp_cnt; ++i) {
451 mpc->funcs->set_out_rate_control(
452 mpc, opp_inst[i],
453 false,
454 0,
455 NULL);
456 }
457 }
458
459 for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
460 odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
461 odm_pipe->stream_res.opp,
462 true);
463 }
464
465 if (pipe_ctx->stream_res.dsc) {
466 struct pipe_ctx *current_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
467
468 update_dsc_on_stream(pipe_ctx, pipe_ctx->stream->timing.flags.DSC);
469
470 /* Check if no longer using pipe for ODM, then need to disconnect DSC for that pipe */
471 if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe &&
472 current_pipe_ctx->next_odm_pipe->stream_res.dsc) {
473 struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc;
474 /* disconnect DSC block from stream */
475 dsc->funcs->dsc_disconnect(dsc);
476 }
477 }
478 }
479
dcn35_dpp_root_clock_control(struct dce_hwseq * hws,unsigned int dpp_inst,bool clock_on)480 void dcn35_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on)
481 {
482 if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpp)
483 return;
484
485 if (hws->ctx->dc->res_pool->dccg->funcs->dpp_root_clock_control) {
486 hws->ctx->dc->res_pool->dccg->funcs->dpp_root_clock_control(
487 hws->ctx->dc->res_pool->dccg, dpp_inst, clock_on);
488 }
489 }
490
dcn35_dpstream_root_clock_control(struct dce_hwseq * hws,unsigned int dp_hpo_inst,bool clock_on)491 void dcn35_dpstream_root_clock_control(struct dce_hwseq *hws, unsigned int dp_hpo_inst, bool clock_on)
492 {
493 if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpstream)
494 return;
495
496 if (hws->ctx->dc->res_pool->dccg->funcs->set_dpstreamclk_root_clock_gating) {
497 hws->ctx->dc->res_pool->dccg->funcs->set_dpstreamclk_root_clock_gating(
498 hws->ctx->dc->res_pool->dccg, dp_hpo_inst, clock_on);
499 }
500 }
501
dcn35_physymclk_root_clock_control(struct dce_hwseq * hws,unsigned int phy_inst,bool clock_on)502 void dcn35_physymclk_root_clock_control(struct dce_hwseq *hws, unsigned int phy_inst, bool clock_on)
503 {
504 if (!hws->ctx->dc->debug.root_clock_optimization.bits.physymclk)
505 return;
506
507 if (hws->ctx->dc->res_pool->dccg->funcs->set_physymclk_root_clock_gating) {
508 hws->ctx->dc->res_pool->dccg->funcs->set_physymclk_root_clock_gating(
509 hws->ctx->dc->res_pool->dccg, phy_inst, clock_on);
510 }
511 }
512
513 /* In headless boot cases, DIG may be turned
514 * on which causes HW/SW discrepancies.
515 * To avoid this, power down hardware on boot
516 * if DIG is turned on
517 */
dcn35_power_down_on_boot(struct dc * dc)518 void dcn35_power_down_on_boot(struct dc *dc)
519 {
520 struct dc_link *edp_links[MAX_NUM_EDP];
521 struct dc_link *edp_link = NULL;
522 int edp_num;
523 int i = 0;
524
525 dc_get_edp_links(dc, edp_links, &edp_num);
526 if (edp_num)
527 edp_link = edp_links[0];
528
529 if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
530 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
531 dc->hwseq->funcs.edp_backlight_control &&
532 dc->hwseq->funcs.power_down &&
533 dc->hwss.edp_power_control) {
534 dc->hwseq->funcs.edp_backlight_control(edp_link, false);
535 dc->hwseq->funcs.power_down(dc);
536 dc->hwss.edp_power_control(edp_link, false);
537 } else {
538 for (i = 0; i < dc->link_count; i++) {
539 struct dc_link *link = dc->links[i];
540
541 if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
542 link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
543 dc->hwseq->funcs.power_down) {
544 dc->hwseq->funcs.power_down(dc);
545 break;
546 }
547
548 }
549 }
550
551 /*
552 * Call update_clocks with empty context
553 * to send DISPLAY_OFF
554 * Otherwise DISPLAY_OFF may not be asserted
555 */
556 if (dc->clk_mgr->funcs->set_low_power_state)
557 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
558
559 if (dc->clk_mgr->clks.pwr_state == DCN_PWR_STATE_LOW_POWER)
560 dc_allow_idle_optimizations(dc, true);
561 }
562
dcn35_apply_idle_power_optimizations(struct dc * dc,bool enable)563 bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
564 {
565 if (dc->debug.dmcub_emulation)
566 return true;
567
568 if (enable) {
569 uint32_t num_active_edp = 0;
570 int i;
571
572 for (i = 0; i < dc->current_state->stream_count; ++i) {
573 struct dc_stream_state *stream = dc->current_state->streams[i];
574 struct dc_link *link = stream->link;
575 bool is_psr = link && !link->panel_config.psr.disable_psr &&
576 (link->psr_settings.psr_version == DC_PSR_VERSION_1 ||
577 link->psr_settings.psr_version == DC_PSR_VERSION_SU_1);
578 bool is_replay = link && link->replay_settings.replay_feature_enabled;
579
580 /* Ignore streams that disabled. */
581 if (stream->dpms_off)
582 continue;
583
584 /* Active external displays block idle optimizations. */
585 if (!dc_is_embedded_signal(stream->signal))
586 return false;
587
588 /* If not PWRSEQ0 can't enter idle optimizations */
589 if (link && link->link_index != 0)
590 return false;
591
592 /* Check for panel power features required for idle optimizations. */
593 if (!is_psr && !is_replay)
594 return false;
595
596 num_active_edp += 1;
597 }
598
599 /* If more than one active eDP then disallow. */
600 if (num_active_edp > 1)
601 return false;
602 }
603
604 // TODO: review other cases when idle optimization is allowed
605 dc_dmub_srv_apply_idle_power_optimizations(dc, enable);
606
607 return true;
608 }
609
dcn35_z10_restore(const struct dc * dc)610 void dcn35_z10_restore(const struct dc *dc)
611 {
612 if (dc->debug.disable_z10)
613 return;
614
615 dc_dmub_srv_apply_idle_power_optimizations(dc, false);
616
617 dcn31_z10_restore(dc);
618 }
619
dcn35_init_pipes(struct dc * dc,struct dc_state * context)620 void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
621 {
622 int i;
623 struct dce_hwseq *hws = dc->hwseq;
624 struct hubbub *hubbub = dc->res_pool->hubbub;
625 struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
626 bool can_apply_seamless_boot = false;
627 bool tg_enabled[MAX_PIPES] = {false};
628
629 for (i = 0; i < context->stream_count; i++) {
630 if (context->streams[i]->apply_seamless_boot_optimization) {
631 can_apply_seamless_boot = true;
632 break;
633 }
634 }
635
636 for (i = 0; i < dc->res_pool->pipe_count; i++) {
637 struct timing_generator *tg = dc->res_pool->timing_generators[i];
638 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
639
640 /* There is assumption that pipe_ctx is not mapping irregularly
641 * to non-preferred front end. If pipe_ctx->stream is not NULL,
642 * we will use the pipe, so don't disable
643 */
644 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
645 continue;
646
647 /* Blank controller using driver code instead of
648 * command table.
649 */
650 if (tg->funcs->is_tg_enabled(tg)) {
651 if (hws->funcs.init_blank != NULL) {
652 hws->funcs.init_blank(dc, tg);
653 tg->funcs->lock(tg);
654 } else {
655 tg->funcs->lock(tg);
656 tg->funcs->set_blank(tg, true);
657 hwss_wait_for_blank_complete(tg);
658 }
659 }
660 }
661
662 /* Reset det size */
663 for (i = 0; i < dc->res_pool->pipe_count; i++) {
664 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
665 struct hubp *hubp = dc->res_pool->hubps[i];
666
667 /* Do not need to reset for seamless boot */
668 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
669 continue;
670
671 if (hubbub && hubp) {
672 if (hubbub->funcs->program_det_size)
673 hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
674 if (hubbub->funcs->program_det_segments)
675 hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0);
676 }
677 }
678
679 /* num_opp will be equal to number of mpcc */
680 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
681 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
682
683 /* Cannot reset the MPC mux if seamless boot */
684 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
685 continue;
686
687 dc->res_pool->mpc->funcs->mpc_init_single_inst(
688 dc->res_pool->mpc, i);
689 }
690
691 for (i = 0; i < dc->res_pool->pipe_count; i++) {
692 struct timing_generator *tg = dc->res_pool->timing_generators[i];
693 struct hubp *hubp = dc->res_pool->hubps[i];
694 struct dpp *dpp = dc->res_pool->dpps[i];
695 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
696
697 /* There is assumption that pipe_ctx is not mapping irregularly
698 * to non-preferred front end. If pipe_ctx->stream is not NULL,
699 * we will use the pipe, so don't disable
700 */
701 if (can_apply_seamless_boot &&
702 pipe_ctx->stream != NULL &&
703 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
704 pipe_ctx->stream_res.tg)) {
705 // Enable double buffering for OTG_BLANK no matter if
706 // seamless boot is enabled or not to suppress global sync
707 // signals when OTG blanked. This is to prevent pipe from
708 // requesting data while in PSR.
709 tg->funcs->tg_init(tg);
710 hubp->power_gated = true;
711 tg_enabled[i] = true;
712 continue;
713 }
714
715 /* Disable on the current state so the new one isn't cleared. */
716 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
717
718 hubp->funcs->hubp_reset(hubp);
719 dpp->funcs->dpp_reset(dpp);
720
721 pipe_ctx->stream_res.tg = tg;
722 pipe_ctx->pipe_idx = i;
723
724 pipe_ctx->plane_res.hubp = hubp;
725 pipe_ctx->plane_res.dpp = dpp;
726 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
727 hubp->mpcc_id = dpp->inst;
728 hubp->opp_id = OPP_ID_INVALID;
729 hubp->power_gated = false;
730
731 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
732 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
733 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
734 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
735
736 hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
737
738 if (tg->funcs->is_tg_enabled(tg))
739 tg->funcs->unlock(tg);
740
741 dc->hwss.disable_plane(dc, context, pipe_ctx);
742
743 pipe_ctx->stream_res.tg = NULL;
744 pipe_ctx->plane_res.hubp = NULL;
745
746 if (tg->funcs->is_tg_enabled(tg)) {
747 if (tg->funcs->init_odm)
748 tg->funcs->init_odm(tg);
749 }
750
751 tg->funcs->tg_init(tg);
752 }
753
754 /* Clean up MPC tree */
755 for (i = 0; i < dc->res_pool->pipe_count; i++) {
756 if (tg_enabled[i]) {
757 if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) {
758 if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) {
759 int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id;
760
761 if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id]))
762 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
763 }
764 }
765 }
766 }
767
768 if (pg_cntl != NULL) {
769 if (pg_cntl->funcs->dsc_pg_control != NULL) {
770 uint32_t num_opps = 0;
771 uint32_t opp_id_src0 = OPP_ID_INVALID;
772 uint32_t opp_id_src1 = OPP_ID_INVALID;
773 uint32_t optc_dsc_state = 0;
774
775 // Step 1: To find out which OPTC is running & OPTC DSC is ON
776 // We can't use res_pool->res_cap->num_timing_generator to check
777 // Because it records display pipes default setting built in driver,
778 // not display pipes of the current chip.
779 // Some ASICs would be fused display pipes less than the default setting.
780 // In dcnxx_resource_construct function, driver would obatin real information.
781 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
782 struct timing_generator *tg = dc->res_pool->timing_generators[i];
783
784 if (tg->funcs->is_tg_enabled(tg)) {
785 if (tg->funcs->get_dsc_status)
786 tg->funcs->get_dsc_status(tg, &optc_dsc_state);
787 // Only one OPTC with DSC is ON, so if we got one result,
788 // we would exit this block. non-zero value is DSC enabled
789 if (optc_dsc_state != 0) {
790 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
791 break;
792 }
793 }
794 }
795
796 // Step 2: To power down DSC but skip DSC of running OPTC
797 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
798 struct dcn_dsc_state s = {0};
799
800 /* avoid reading DSC state when it is not in use as it may be power gated */
801 if (optc_dsc_state) {
802 dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
803
804 if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
805 s.dsc_clock_en && s.dsc_fw_en)
806 continue;
807 }
808
809 pg_cntl->funcs->dsc_pg_control(pg_cntl, dc->res_pool->dscs[i]->inst, false);
810 }
811 }
812 }
813 }
814
dcn35_enable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)815 void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
816 struct dc_state *context)
817 {
818 struct dpp *dpp = pipe_ctx->plane_res.dpp;
819 struct dccg *dccg = dc->res_pool->dccg;
820
821
822 /* enable DCFCLK current DCHUB */
823 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
824
825 /* initialize HUBP on power up */
826 pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp);
827 /*make sure DPPCLK is on*/
828 dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, true);
829 dpp->funcs->dpp_dppclk_control(dpp, false, true);
830 /* make sure OPP_PIPE_CLOCK_EN = 1 */
831 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
832 pipe_ctx->stream_res.opp,
833 true);
834 /*to do: insert PG here*/
835 if (dc->vm_pa_config.valid) {
836 struct vm_system_aperture_param apt;
837
838 apt.sys_default.quad_part = 0;
839
840 apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr;
841 apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr;
842
843 // Program system aperture settings
844 pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt);
845 }
846 //DC_LOG_DEBUG("%s: dpp_inst(%d) =\n", __func__, dpp->inst);
847
848 if (!pipe_ctx->top_pipe
849 && pipe_ctx->plane_state
850 && pipe_ctx->plane_state->flip_int_enabled
851 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
852 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
853 }
854
855 /* disable HW used by plane.
856 * note: cannot disable until disconnect is complete
857 */
dcn35_plane_atomic_disable(struct dc * dc,struct pipe_ctx * pipe_ctx)858 void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
859 {
860 struct hubp *hubp = pipe_ctx->plane_res.hubp;
861 struct dpp *dpp = pipe_ctx->plane_res.dpp;
862 struct dccg *dccg = dc->res_pool->dccg;
863
864
865 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
866
867 /* In flip immediate with pipe splitting case GSL is used for
868 * synchronization so we must disable it when the plane is disabled.
869 */
870 if (pipe_ctx->stream_res.gsl_group != 0)
871 dcn20_setup_gsl_group_as_lock(dc, pipe_ctx, false);
872 /*
873 if (hubp->funcs->hubp_update_mall_sel)
874 hubp->funcs->hubp_update_mall_sel(hubp, 0, false);
875 */
876 dc->hwss.set_flip_control_gsl(pipe_ctx, false);
877
878 hubp->funcs->hubp_clk_cntl(hubp, false);
879
880 dpp->funcs->dpp_dppclk_control(dpp, false, false);
881 dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, false);
882
883 hubp->power_gated = true;
884
885 hubp->funcs->hubp_reset(hubp);
886 dpp->funcs->dpp_reset(dpp);
887
888 pipe_ctx->stream = NULL;
889 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
890 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
891 pipe_ctx->top_pipe = NULL;
892 pipe_ctx->bottom_pipe = NULL;
893 pipe_ctx->plane_state = NULL;
894 //DC_LOG_DEBUG("%s: dpp_inst(%d)=\n", __func__, dpp->inst);
895
896 }
897
dcn35_disable_plane(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx)898 void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
899 {
900 struct dce_hwseq *hws = dc->hwseq;
901 bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
902 struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
903
904 DC_LOGGER_INIT(dc->ctx->logger);
905
906 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
907 return;
908
909 if (hws->funcs.plane_atomic_disable)
910 hws->funcs.plane_atomic_disable(dc, pipe_ctx);
911
912 /* Turn back off the phantom OTG after the phantom plane is fully disabled
913 */
914 if (is_phantom)
915 if (tg && tg->funcs->disable_phantom_crtc)
916 tg->funcs->disable_phantom_crtc(tg);
917
918 DC_LOG_DC("Power down front end %d\n",
919 pipe_ctx->pipe_idx);
920 }
921
dcn35_calc_blocks_to_gate(struct dc * dc,struct dc_state * context,struct pg_block_update * update_state)922 void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
923 struct pg_block_update *update_state)
924 {
925 bool hpo_frl_stream_enc_acquired = false;
926 bool hpo_dp_stream_enc_acquired = false;
927 int i = 0, j = 0;
928 int edp_num = 0;
929 struct dc_link *edp_links[MAX_NUM_EDP] = { NULL };
930
931 memset(update_state, 0, sizeof(struct pg_block_update));
932
933 for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) {
934 if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i] &&
935 dc->res_pool->hpo_dp_stream_enc[i]) {
936 hpo_dp_stream_enc_acquired = true;
937 break;
938 }
939 }
940
941 if (!hpo_frl_stream_enc_acquired && !hpo_dp_stream_enc_acquired)
942 update_state->pg_res_update[PG_HPO] = true;
943
944 update_state->pg_res_update[PG_DWB] = true;
945
946 for (i = 0; i < dc->res_pool->pipe_count; i++) {
947 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
948
949 for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++)
950 update_state->pg_pipe_res_update[j][i] = true;
951
952 if (!pipe_ctx)
953 continue;
954
955 if (pipe_ctx->plane_res.hubp)
956 update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->plane_res.hubp->inst] = false;
957
958 if (pipe_ctx->plane_res.dpp && pipe_ctx->plane_res.hubp)
959 update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->plane_res.hubp->inst] = false;
960
961 if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp)
962 update_state->pg_pipe_res_update[PG_MPCC][pipe_ctx->plane_res.mpcc_inst] = false;
963
964 if (pipe_ctx->stream_res.dsc) {
965 update_state->pg_pipe_res_update[PG_DSC][pipe_ctx->stream_res.dsc->inst] = false;
966 if (dc->caps.sequential_ono) {
967 update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->stream_res.dsc->inst] = false;
968 update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->stream_res.dsc->inst] = false;
969
970 /* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */
971 if (!pipe_ctx->top_pipe && pipe_ctx->plane_res.hubp &&
972 pipe_ctx->plane_res.hubp->inst != pipe_ctx->stream_res.dsc->inst) {
973 for (j = 0; j < dc->res_pool->pipe_count; ++j) {
974 update_state->pg_pipe_res_update[PG_HUBP][j] = false;
975 update_state->pg_pipe_res_update[PG_DPP][j] = false;
976 }
977 }
978 }
979 }
980
981 if (pipe_ctx->stream_res.opp)
982 update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false;
983
984 if (pipe_ctx->stream_res.hpo_dp_stream_enc)
985 update_state->pg_pipe_res_update[PG_DPSTREAM][pipe_ctx->stream_res.hpo_dp_stream_enc->inst] = false;
986 }
987
988 for (i = 0; i < dc->link_count; i++) {
989 update_state->pg_pipe_res_update[PG_PHYSYMCLK][dc->links[i]->link_enc_hw_inst] = true;
990 if (dc->links[i]->type != dc_connection_none)
991 update_state->pg_pipe_res_update[PG_PHYSYMCLK][dc->links[i]->link_enc_hw_inst] = false;
992 }
993
994 /*domain24 controls all the otg, mpc, opp, as long as one otg is still up, avoid enabling OTG PG*/
995 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
996 struct timing_generator *tg = dc->res_pool->timing_generators[i];
997 if (tg && tg->funcs->is_tg_enabled(tg)) {
998 update_state->pg_pipe_res_update[PG_OPTC][i] = false;
999 break;
1000 }
1001 }
1002
1003 dc_get_edp_links(dc, edp_links, &edp_num);
1004 if (edp_num == 0 ||
1005 ((!edp_links[0] || !edp_links[0]->edp_sink_present) &&
1006 (!edp_links[1] || !edp_links[1]->edp_sink_present))) {
1007 /*eDP not exist on this config, keep Domain24 power on, for S0i3, this will be handled in dmubfw*/
1008 update_state->pg_pipe_res_update[PG_OPTC][0] = false;
1009 }
1010
1011 if (dc->caps.sequential_ono) {
1012 for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
1013 if (!update_state->pg_pipe_res_update[PG_HUBP][i] &&
1014 !update_state->pg_pipe_res_update[PG_DPP][i]) {
1015 for (j = i - 1; j >= 0; j--) {
1016 update_state->pg_pipe_res_update[PG_HUBP][j] = false;
1017 update_state->pg_pipe_res_update[PG_DPP][j] = false;
1018 }
1019
1020 break;
1021 }
1022 }
1023 }
1024 }
1025
dcn35_calc_blocks_to_ungate(struct dc * dc,struct dc_state * context,struct pg_block_update * update_state)1026 void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
1027 struct pg_block_update *update_state)
1028 {
1029 bool hpo_frl_stream_enc_acquired = false;
1030 bool hpo_dp_stream_enc_acquired = false;
1031 int i = 0, j = 0;
1032
1033 memset(update_state, 0, sizeof(struct pg_block_update));
1034
1035 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1036 struct pipe_ctx *cur_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1037 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
1038
1039 if (cur_pipe == NULL || new_pipe == NULL)
1040 continue;
1041
1042 if ((!cur_pipe->plane_state && new_pipe->plane_state) ||
1043 (!cur_pipe->stream && new_pipe->stream) ||
1044 (cur_pipe->stream != new_pipe->stream && new_pipe->stream)) {
1045 // New pipe addition
1046 for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++) {
1047 if (j == PG_HUBP && new_pipe->plane_res.hubp)
1048 update_state->pg_pipe_res_update[j][new_pipe->plane_res.hubp->inst] = true;
1049
1050 if (j == PG_DPP && new_pipe->plane_res.dpp)
1051 update_state->pg_pipe_res_update[j][new_pipe->plane_res.dpp->inst] = true;
1052
1053 if (j == PG_MPCC && new_pipe->plane_res.dpp)
1054 update_state->pg_pipe_res_update[j][new_pipe->plane_res.mpcc_inst] = true;
1055
1056 if (j == PG_DSC && new_pipe->stream_res.dsc)
1057 update_state->pg_pipe_res_update[j][new_pipe->stream_res.dsc->inst] = true;
1058
1059 if (j == PG_OPP && new_pipe->stream_res.opp)
1060 update_state->pg_pipe_res_update[j][new_pipe->stream_res.opp->inst] = true;
1061
1062 if (j == PG_OPTC && new_pipe->stream_res.tg)
1063 update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true;
1064
1065 if (j == PG_DPSTREAM && new_pipe->stream_res.hpo_dp_stream_enc)
1066 update_state->pg_pipe_res_update[j][new_pipe->stream_res.hpo_dp_stream_enc->inst] = true;
1067 }
1068 } else if (cur_pipe->plane_state == new_pipe->plane_state ||
1069 cur_pipe == new_pipe) {
1070 //unchanged pipes
1071 for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++) {
1072 if (j == PG_HUBP &&
1073 cur_pipe->plane_res.hubp != new_pipe->plane_res.hubp &&
1074 new_pipe->plane_res.hubp)
1075 update_state->pg_pipe_res_update[j][new_pipe->plane_res.hubp->inst] = true;
1076
1077 if (j == PG_DPP &&
1078 cur_pipe->plane_res.dpp != new_pipe->plane_res.dpp &&
1079 new_pipe->plane_res.dpp)
1080 update_state->pg_pipe_res_update[j][new_pipe->plane_res.dpp->inst] = true;
1081
1082 if (j == PG_OPP &&
1083 cur_pipe->stream_res.opp != new_pipe->stream_res.opp &&
1084 new_pipe->stream_res.opp)
1085 update_state->pg_pipe_res_update[j][new_pipe->stream_res.opp->inst] = true;
1086
1087 if (j == PG_DSC &&
1088 cur_pipe->stream_res.dsc != new_pipe->stream_res.dsc &&
1089 new_pipe->stream_res.dsc)
1090 update_state->pg_pipe_res_update[j][new_pipe->stream_res.dsc->inst] = true;
1091
1092 if (j == PG_OPTC &&
1093 cur_pipe->stream_res.tg != new_pipe->stream_res.tg &&
1094 new_pipe->stream_res.tg)
1095 update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true;
1096
1097 if (j == PG_DPSTREAM &&
1098 cur_pipe->stream_res.hpo_dp_stream_enc != new_pipe->stream_res.hpo_dp_stream_enc &&
1099 new_pipe->stream_res.hpo_dp_stream_enc)
1100 update_state->pg_pipe_res_update[j][new_pipe->stream_res.hpo_dp_stream_enc->inst] = true;
1101 }
1102 }
1103 }
1104
1105 for (i = 0; i < dc->link_count; i++)
1106 if (dc->links[i]->type != dc_connection_none)
1107 update_state->pg_pipe_res_update[PG_PHYSYMCLK][dc->links[i]->link_enc_hw_inst] = true;
1108
1109 for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) {
1110 if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i] &&
1111 dc->res_pool->hpo_dp_stream_enc[i]) {
1112 hpo_dp_stream_enc_acquired = true;
1113 break;
1114 }
1115 }
1116
1117 if (hpo_frl_stream_enc_acquired || hpo_dp_stream_enc_acquired)
1118 update_state->pg_res_update[PG_HPO] = true;
1119
1120 if (hpo_frl_stream_enc_acquired)
1121 update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true;
1122
1123 if (dc->caps.sequential_ono) {
1124 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1125 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
1126
1127 if (new_pipe->stream_res.dsc && !new_pipe->top_pipe &&
1128 update_state->pg_pipe_res_update[PG_DSC][new_pipe->stream_res.dsc->inst]) {
1129 update_state->pg_pipe_res_update[PG_HUBP][new_pipe->stream_res.dsc->inst] = true;
1130 update_state->pg_pipe_res_update[PG_DPP][new_pipe->stream_res.dsc->inst] = true;
1131
1132 /* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */
1133 if (new_pipe->plane_res.hubp &&
1134 new_pipe->plane_res.hubp->inst != new_pipe->stream_res.dsc->inst) {
1135 for (j = 0; j < dc->res_pool->pipe_count; ++j) {
1136 update_state->pg_pipe_res_update[PG_HUBP][j] = true;
1137 update_state->pg_pipe_res_update[PG_DPP][j] = true;
1138 }
1139 }
1140 }
1141 }
1142
1143 for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
1144 if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1145 update_state->pg_pipe_res_update[PG_DPP][i]) {
1146 for (j = i - 1; j >= 0; j--) {
1147 update_state->pg_pipe_res_update[PG_HUBP][j] = true;
1148 update_state->pg_pipe_res_update[PG_DPP][j] = true;
1149 }
1150
1151 break;
1152 }
1153 }
1154 }
1155 }
1156
1157 /**
1158 * dcn35_hw_block_power_down() - power down sequence
1159 *
1160 * The following sequence describes the ON-OFF (ONO) for power down:
1161 *
1162 * ONO Region 3, DCPG 25: hpo - SKIPPED
1163 * ONO Region 4, DCPG 0: dchubp0, dpp0
1164 * ONO Region 6, DCPG 1: dchubp1, dpp1
1165 * ONO Region 8, DCPG 2: dchubp2, dpp2
1166 * ONO Region 10, DCPG 3: dchubp3, dpp3
1167 * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will pwr dwn at IPS2 entry
1168 * ONO Region 5, DCPG 16: dsc0
1169 * ONO Region 7, DCPG 17: dsc1
1170 * ONO Region 9, DCPG 18: dsc2
1171 * ONO Region 11, DCPG 19: dsc3
1172 * ONO Region 2, DCPG 24: mpc opp optc dwb
1173 * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed
1174 *
1175 * If sequential ONO is specified the order is modified from ONO Region 11 -> ONO Region 0 descending.
1176 *
1177 * @dc: Current DC state
1178 * @update_state: update PG sequence states for HW block
1179 */
dcn35_hw_block_power_down(struct dc * dc,struct pg_block_update * update_state)1180 void dcn35_hw_block_power_down(struct dc *dc,
1181 struct pg_block_update *update_state)
1182 {
1183 int i = 0;
1184 struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
1185
1186 if (!pg_cntl)
1187 return;
1188 if (dc->debug.ignore_pg)
1189 return;
1190
1191 if (update_state->pg_res_update[PG_HPO]) {
1192 if (pg_cntl->funcs->hpo_pg_control)
1193 pg_cntl->funcs->hpo_pg_control(pg_cntl, false);
1194 }
1195
1196 if (!dc->caps.sequential_ono) {
1197 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1198 if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1199 update_state->pg_pipe_res_update[PG_DPP][i]) {
1200 if (pg_cntl->funcs->hubp_dpp_pg_control)
1201 pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false);
1202 }
1203 }
1204
1205 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1206 if (update_state->pg_pipe_res_update[PG_DSC][i]) {
1207 if (pg_cntl->funcs->dsc_pg_control)
1208 pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false);
1209 }
1210 }
1211 } else {
1212 for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
1213 if (update_state->pg_pipe_res_update[PG_DSC][i]) {
1214 if (pg_cntl->funcs->dsc_pg_control)
1215 pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false);
1216 }
1217
1218 if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1219 update_state->pg_pipe_res_update[PG_DPP][i]) {
1220 if (pg_cntl->funcs->hubp_dpp_pg_control)
1221 pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false);
1222 }
1223 }
1224 }
1225
1226 /*this will need all the clients to unregister optc interruts let dmubfw handle this*/
1227 if (pg_cntl->funcs->plane_otg_pg_control)
1228 pg_cntl->funcs->plane_otg_pg_control(pg_cntl, false);
1229
1230 //domain22, 23, 25 currently always on.
1231
1232 }
1233
1234 /**
1235 * dcn35_hw_block_power_up() - power up sequence
1236 *
1237 * The following sequence describes the ON-OFF (ONO) for power up:
1238 *
1239 * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED
1240 * ONO Region 2, DCPG 24: mpc opp optc dwb
1241 * ONO Region 5, DCPG 16: dsc0
1242 * ONO Region 7, DCPG 17: dsc1
1243 * ONO Region 9, DCPG 18: dsc2
1244 * ONO Region 11, DCPG 19: dsc3
1245 * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will power up at IPS2 exit
1246 * ONO Region 4, DCPG 0: dchubp0, dpp0
1247 * ONO Region 6, DCPG 1: dchubp1, dpp1
1248 * ONO Region 8, DCPG 2: dchubp2, dpp2
1249 * ONO Region 10, DCPG 3: dchubp3, dpp3
1250 * ONO Region 3, DCPG 25: hpo - SKIPPED
1251 *
1252 * If sequential ONO is specified the order is modified from ONO Region 0 -> ONO Region 11 ascending.
1253 *
1254 * @dc: Current DC state
1255 * @update_state: update PG sequence states for HW block
1256 */
dcn35_hw_block_power_up(struct dc * dc,struct pg_block_update * update_state)1257 void dcn35_hw_block_power_up(struct dc *dc,
1258 struct pg_block_update *update_state)
1259 {
1260 int i = 0;
1261 struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
1262
1263 if (!pg_cntl)
1264 return;
1265 if (dc->debug.ignore_pg)
1266 return;
1267 //domain22, 23, 25 currently always on.
1268 /*this will need all the clients to unregister optc interruts let dmubfw handle this*/
1269 if (pg_cntl->funcs->plane_otg_pg_control)
1270 pg_cntl->funcs->plane_otg_pg_control(pg_cntl, true);
1271
1272 if (!dc->caps.sequential_ono) {
1273 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++)
1274 if (update_state->pg_pipe_res_update[PG_DSC][i]) {
1275 if (pg_cntl->funcs->dsc_pg_control)
1276 pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true);
1277 }
1278 }
1279
1280 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1281 if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1282 update_state->pg_pipe_res_update[PG_DPP][i]) {
1283 if (pg_cntl->funcs->hubp_dpp_pg_control)
1284 pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, true);
1285 }
1286
1287 if (dc->caps.sequential_ono) {
1288 if (update_state->pg_pipe_res_update[PG_DSC][i]) {
1289 if (pg_cntl->funcs->dsc_pg_control)
1290 pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true);
1291 }
1292 }
1293 }
1294 if (update_state->pg_res_update[PG_HPO]) {
1295 if (pg_cntl->funcs->hpo_pg_control)
1296 pg_cntl->funcs->hpo_pg_control(pg_cntl, true);
1297 }
1298 }
dcn35_root_clock_control(struct dc * dc,struct pg_block_update * update_state,bool power_on)1299 void dcn35_root_clock_control(struct dc *dc,
1300 struct pg_block_update *update_state, bool power_on)
1301 {
1302 int i = 0;
1303 struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
1304
1305 if (!pg_cntl)
1306 return;
1307 /*enable root clock first when power up*/
1308 if (power_on) {
1309 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1310 if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1311 update_state->pg_pipe_res_update[PG_DPP][i]) {
1312 if (dc->hwseq->funcs.dpp_root_clock_control)
1313 dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
1314 }
1315 if (update_state->pg_pipe_res_update[PG_DPSTREAM][i])
1316 if (dc->hwseq->funcs.dpstream_root_clock_control)
1317 dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on);
1318 }
1319
1320 for (i = 0; i < dc->res_pool->dig_link_enc_count; i++)
1321 if (update_state->pg_pipe_res_update[PG_PHYSYMCLK][i])
1322 if (dc->hwseq->funcs.physymclk_root_clock_control)
1323 dc->hwseq->funcs.physymclk_root_clock_control(dc->hwseq, i, power_on);
1324
1325 }
1326 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1327 if (update_state->pg_pipe_res_update[PG_DSC][i]) {
1328 if (power_on) {
1329 if (dc->res_pool->dccg->funcs->enable_dsc)
1330 dc->res_pool->dccg->funcs->enable_dsc(dc->res_pool->dccg, i);
1331 } else {
1332 if (dc->res_pool->dccg->funcs->disable_dsc)
1333 dc->res_pool->dccg->funcs->disable_dsc(dc->res_pool->dccg, i);
1334 }
1335 }
1336 }
1337 /*disable root clock first when power down*/
1338 if (!power_on) {
1339 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1340 if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1341 update_state->pg_pipe_res_update[PG_DPP][i]) {
1342 if (dc->hwseq->funcs.dpp_root_clock_control)
1343 dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
1344 }
1345 if (update_state->pg_pipe_res_update[PG_DPSTREAM][i])
1346 if (dc->hwseq->funcs.dpstream_root_clock_control)
1347 dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on);
1348 }
1349
1350 for (i = 0; i < dc->res_pool->dig_link_enc_count; i++)
1351 if (update_state->pg_pipe_res_update[PG_PHYSYMCLK][i])
1352 if (dc->hwseq->funcs.physymclk_root_clock_control)
1353 dc->hwseq->funcs.physymclk_root_clock_control(dc->hwseq, i, power_on);
1354
1355 }
1356 }
1357
dcn35_prepare_bandwidth(struct dc * dc,struct dc_state * context)1358 void dcn35_prepare_bandwidth(
1359 struct dc *dc,
1360 struct dc_state *context)
1361 {
1362 struct pg_block_update pg_update_state;
1363
1364 if (dc->hwss.calc_blocks_to_ungate) {
1365 dc->hwss.calc_blocks_to_ungate(dc, context, &pg_update_state);
1366
1367 if (dc->hwss.root_clock_control)
1368 dc->hwss.root_clock_control(dc, &pg_update_state, true);
1369 /*power up required HW block*/
1370 if (dc->hwss.hw_block_power_up)
1371 dc->hwss.hw_block_power_up(dc, &pg_update_state);
1372 }
1373
1374 dcn20_prepare_bandwidth(dc, context);
1375
1376 print_pg_status(dc, __func__, ": after rcg and power up");
1377 }
1378
dcn35_optimize_bandwidth(struct dc * dc,struct dc_state * context)1379 void dcn35_optimize_bandwidth(
1380 struct dc *dc,
1381 struct dc_state *context)
1382 {
1383 struct pg_block_update pg_update_state;
1384
1385 print_pg_status(dc, __func__, ": before rcg and power up");
1386
1387 dcn20_optimize_bandwidth(dc, context);
1388
1389 if (dc->hwss.calc_blocks_to_gate) {
1390 dc->hwss.calc_blocks_to_gate(dc, context, &pg_update_state);
1391 /*try to power down unused block*/
1392 if (dc->hwss.hw_block_power_down)
1393 dc->hwss.hw_block_power_down(dc, &pg_update_state);
1394
1395 if (dc->hwss.root_clock_control)
1396 dc->hwss.root_clock_control(dc, &pg_update_state, false);
1397 }
1398
1399 print_pg_status(dc, __func__, ": after rcg and power up");
1400 }
1401
dcn35_set_drr(struct pipe_ctx ** pipe_ctx,int num_pipes,struct dc_crtc_timing_adjust adjust)1402 void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
1403 int num_pipes, struct dc_crtc_timing_adjust adjust)
1404 {
1405 int i = 0;
1406 struct drr_params params = {0};
1407 // DRR set trigger event mapped to OTG_TRIG_A
1408 unsigned int event_triggers = 0x2;//Bit[1]: OTG_TRIG_A
1409 // Note DRR trigger events are generated regardless of whether num frames met.
1410 unsigned int num_frames = 2;
1411
1412 params.vertical_total_max = adjust.v_total_max;
1413 params.vertical_total_min = adjust.v_total_min;
1414 params.vertical_total_mid = adjust.v_total_mid;
1415 params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
1416
1417 for (i = 0; i < num_pipes; i++) {
1418 /* dc_state_destruct() might null the stream resources, so fetch tg
1419 * here first to avoid a race condition. The lifetime of the pointee
1420 * itself (the timing_generator object) is not a problem here.
1421 */
1422 struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
1423
1424 if ((tg != NULL) && tg->funcs) {
1425 if (pipe_ctx[i]->stream && pipe_ctx[i]->stream->ctx->dc->debug.static_screen_wait_frames) {
1426 struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing;
1427 struct dc *dc = pipe_ctx[i]->stream->ctx->dc;
1428 unsigned int frame_rate = timing->pix_clk_100hz / (timing->h_total * timing->v_total);
1429
1430 if (frame_rate >= 120 && dc->caps.ips_support &&
1431 dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) {
1432 /*ips enable case*/
1433 num_frames = 2 * (frame_rate % 60);
1434 }
1435 }
1436 set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, ¶ms);
1437 if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
1438 if (tg->funcs->set_static_screen_control)
1439 tg->funcs->set_static_screen_control(
1440 tg, event_triggers, num_frames);
1441 }
1442 }
1443 }
dcn35_set_static_screen_control(struct pipe_ctx ** pipe_ctx,int num_pipes,const struct dc_static_screen_params * params)1444 void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
1445 int num_pipes, const struct dc_static_screen_params *params)
1446 {
1447 unsigned int i;
1448 unsigned int triggers = 0;
1449
1450 if (params->triggers.surface_update)
1451 triggers |= 0x200;/*bit 9 : 10 0000 0000*/
1452 if (params->triggers.cursor_update)
1453 triggers |= 0x8;/*bit3*/
1454 if (params->triggers.force_trigger)
1455 triggers |= 0x1;
1456 for (i = 0; i < num_pipes; i++)
1457 pipe_ctx[i]->stream_res.tg->funcs->
1458 set_static_screen_control(pipe_ctx[i]->stream_res.tg,
1459 triggers, params->num_frames);
1460 }
1461
dcn35_set_long_vblank(struct pipe_ctx ** pipe_ctx,int num_pipes,uint32_t v_total_min,uint32_t v_total_max)1462 void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
1463 int num_pipes, uint32_t v_total_min, uint32_t v_total_max)
1464 {
1465 int i = 0;
1466 struct long_vtotal_params params = {0};
1467
1468 params.vertical_total_max = v_total_max;
1469 params.vertical_total_min = v_total_min;
1470
1471 for (i = 0; i < num_pipes; i++) {
1472 if (!pipe_ctx[i])
1473 continue;
1474
1475 if (pipe_ctx[i]->stream) {
1476 struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing;
1477
1478 if (timing)
1479 params.vertical_blank_start = timing->v_total - timing->v_front_porch;
1480 else
1481 params.vertical_blank_start = 0;
1482
1483 if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs &&
1484 pipe_ctx[i]->stream_res.tg->funcs->set_long_vtotal)
1485 pipe_ctx[i]->stream_res.tg->funcs->set_long_vtotal(pipe_ctx[i]->stream_res.tg, ¶ms);
1486 }
1487 }
1488 }
1489
should_avoid_empty_tu(struct pipe_ctx * pipe_ctx)1490 static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx)
1491 {
1492 /* Calculate average pixel count per TU, return false if under ~2.00 to
1493 * avoid empty TUs. This is only required for DPIA tunneling as empty TUs
1494 * are legal to generate for native DP links. Assume TU size 64 as there
1495 * is currently no scenario where it's reprogrammed from HW default.
1496 * MTPs have no such limitation, so this does not affect MST use cases.
1497 */
1498 unsigned int pix_clk_mhz;
1499 unsigned int symclk_mhz;
1500 unsigned int avg_pix_per_tu_x1000;
1501 unsigned int tu_size_bytes = 64;
1502 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
1503 struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings;
1504 const struct dc *dc = pipe_ctx->stream->link->dc;
1505
1506 if (pipe_ctx->link_config.dp_tunnel_settings.should_enable_dp_tunneling == false)
1507 return false;
1508
1509 // Not necessary for MST configurations
1510 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
1511 return false;
1512
1513 pix_clk_mhz = timing->pix_clk_100hz / 10000;
1514
1515 // If this is true, can't block due to dynamic ODM
1516 if (pix_clk_mhz > dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz)
1517 return false;
1518
1519 switch (link_settings->link_rate) {
1520 case LINK_RATE_LOW:
1521 symclk_mhz = 162;
1522 break;
1523 case LINK_RATE_HIGH:
1524 symclk_mhz = 270;
1525 break;
1526 case LINK_RATE_HIGH2:
1527 symclk_mhz = 540;
1528 break;
1529 case LINK_RATE_HIGH3:
1530 symclk_mhz = 810;
1531 break;
1532 default:
1533 // We shouldn't be tunneling any other rates, something is wrong
1534 ASSERT(0);
1535 return false;
1536 }
1537
1538 avg_pix_per_tu_x1000 = (1000 * pix_clk_mhz * tu_size_bytes)
1539 / (symclk_mhz * link_settings->lane_count);
1540
1541 // Add small empirically-decided margin to account for potential jitter
1542 return (avg_pix_per_tu_x1000 < 2020);
1543 }
1544
dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx * pipe_ctx)1545 bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
1546 {
1547 struct dc *dc = pipe_ctx->stream->ctx->dc;
1548
1549 if (!is_h_timing_divisible_by_2(pipe_ctx->stream))
1550 return false;
1551
1552 if (should_avoid_empty_tu(pipe_ctx))
1553 return false;
1554
1555 if (dc_is_dp_signal(pipe_ctx->stream->signal) && !dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) &&
1556 dc->debug.enable_dp_dig_pixel_rate_div_policy)
1557 return true;
1558
1559 return false;
1560 }
1561
1562 /*
1563 * Set powerup to true for every pipe to match pre-OS configuration.
1564 */
dcn35_calc_blocks_to_ungate_for_hw_release(struct dc * dc,struct pg_block_update * update_state)1565 static void dcn35_calc_blocks_to_ungate_for_hw_release(struct dc *dc, struct pg_block_update *update_state)
1566 {
1567 int i = 0, j = 0;
1568
1569 memset(update_state, 0, sizeof(struct pg_block_update));
1570
1571 for (i = 0; i < dc->res_pool->pipe_count; i++)
1572 for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++)
1573 update_state->pg_pipe_res_update[j][i] = true;
1574
1575 update_state->pg_res_update[PG_HPO] = true;
1576 update_state->pg_res_update[PG_DWB] = true;
1577 }
1578
1579 /*
1580 * The purpose is to power up all gatings to restore optimization to pre-OS env.
1581 * Re-use hwss func and existing PG&RCG flags to decide powerup sequence.
1582 */
dcn35_hardware_release(struct dc * dc)1583 void dcn35_hardware_release(struct dc *dc)
1584 {
1585 struct pg_block_update pg_update_state;
1586
1587 dcn35_calc_blocks_to_ungate_for_hw_release(dc, &pg_update_state);
1588
1589 if (dc->hwss.root_clock_control)
1590 dc->hwss.root_clock_control(dc, &pg_update_state, true);
1591 /*power up required HW block*/
1592 if (dc->hwss.hw_block_power_up)
1593 dc->hwss.hw_block_power_up(dc, &pg_update_state);
1594 }
1595