xref: /linux/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: AMD
24  *
25  */
26 
27 #include "dm_services.h"
28 #include "dm_helpers.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "dccg.h"
32 #include "dce/dce_hwseq.h"
33 #include "clk_mgr.h"
34 #include "reg_helper.h"
35 #include "abm.h"
36 #include "hubp.h"
37 #include "dchubbub.h"
38 #include "timing_generator.h"
39 #include "opp.h"
40 #include "ipp.h"
41 #include "mpc.h"
42 #include "mcif_wb.h"
43 #include "dc_dmub_srv.h"
44 #include "dcn35_hwseq.h"
45 #include "dcn35/dcn35_dccg.h"
46 #include "link_hwss.h"
47 #include "dpcd_defs.h"
48 #include "dce/dmub_outbox.h"
49 #include "link_service.h"
50 #include "dcn10/dcn10_hwseq.h"
51 #include "inc/link_enc_cfg.h"
52 #include "dcn30/dcn30_vpg.h"
53 #include "dce/dce_i2c_hw.h"
54 #include "dsc.h"
55 #include "dcn20/dcn20_optc.h"
56 #include "dcn30/dcn30_cm_common.h"
57 #include "dcn31/dcn31_hwseq.h"
58 #include "dcn20/dcn20_hwseq.h"
59 #include "dc_state_priv.h"
60 
61 #define DC_LOGGER_INIT(logger) \
62 	struct dal_logger *dc_logger = logger
63 
64 #define CTX \
65 	hws->ctx
66 #define REG(reg)\
67 	hws->regs->reg
68 #define DC_LOGGER \
69 	dc_logger
70 
71 
72 #undef FN
73 #define FN(reg_name, field_name) \
74 	hws->shifts->field_name, hws->masks->field_name
75 #if 0
76 static void enable_memory_low_power(struct dc *dc)
77 {
78 	struct dce_hwseq *hws = dc->hwseq;
79 	int i;
80 
81 	if (dc->debug.enable_mem_low_power.bits.dmcu) {
82 		// Force ERAM to shutdown if DMCU is not enabled
83 		if (dc->debug.disable_dmcu || dc->config.disable_dmcu) {
84 			REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3);
85 		}
86 	}
87 	/*dcn35 has default MEM_PWR enabled, make sure wake them up*/
88 	// Set default OPTC memory power states
89 	if (dc->debug.enable_mem_low_power.bits.optc) {
90 		// Shutdown when unassigned and light sleep in VBLANK
91 		REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
92 	}
93 
94 	if (dc->debug.enable_mem_low_power.bits.vga) {
95 		// Power down VGA memory
96 		REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
97 	}
98 
99 	if (dc->debug.enable_mem_low_power.bits.mpc &&
100 		dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode)
101 		dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode(dc->res_pool->mpc);
102 
103 	if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) {
104 		// Power down VPGs
105 		for (i = 0; i < dc->res_pool->stream_enc_count; i++)
106 			dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
107 #if defined(CONFIG_DRM_AMD_DC_DP2_0)
108 		for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
109 			dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
110 #endif
111 	}
112 
113 }
114 #endif
115 
116 static void print_pg_status(struct dc *dc, const char *debug_func, const char *debug_log)
117 {
118 	if (dc->debug.enable_pg_cntl_debug_logs && dc->res_pool->pg_cntl) {
119 		if (dc->res_pool->pg_cntl->funcs->print_pg_status)
120 			dc->res_pool->pg_cntl->funcs->print_pg_status(dc->res_pool->pg_cntl, debug_func, debug_log);
121 	}
122 }
123 
124 void dcn35_set_dmu_fgcg(struct dce_hwseq *hws, bool enable)
125 {
126 	REG_UPDATE_3(DMU_CLK_CNTL,
127 		RBBMIF_FGCG_REP_DIS, !enable,
128 		IHC_FGCG_REP_DIS, !enable,
129 		LONO_FGCG_REP_DIS, !enable
130 	);
131 }
132 
133 void dcn35_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
134 {
135 	REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable);
136 }
137 
138 void dcn35_init_hw(struct dc *dc)
139 {
140 	struct abm **abms = dc->res_pool->multiple_abms;
141 	struct dce_hwseq *hws = dc->hwseq;
142 	struct dc_bios *dcb = dc->ctx->dc_bios;
143 	struct resource_pool *res_pool = dc->res_pool;
144 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
145 	uint32_t user_level = MAX_BACKLIGHT_LEVEL;
146 	int i;
147 
148 	print_pg_status(dc, __func__, ": start");
149 
150 	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
151 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
152 
153 	//dcn35_set_dmu_fgcg(hws, dc->debug.enable_fine_grain_clock_gating.bits.dmu);
154 
155 	if (!dcb->funcs->is_accelerated_mode(dcb)) {
156 		/*this calls into dmubfw to do the init*/
157 		hws->funcs.bios_golden_init(dc);
158 	}
159 
160 	// Initialize the dccg
161 	if (res_pool->dccg->funcs->dccg_init)
162 		res_pool->dccg->funcs->dccg_init(res_pool->dccg);
163 
164 	//enable_memory_low_power(dc);
165 
166 	if (dc->ctx->dc_bios->fw_info_valid) {
167 		res_pool->ref_clocks.xtalin_clock_inKhz =
168 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
169 
170 		if (res_pool->hubbub) {
171 
172 			(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
173 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
174 				&res_pool->ref_clocks.dccg_ref_clock_inKhz);
175 
176 			(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
177 				res_pool->ref_clocks.dccg_ref_clock_inKhz,
178 				&res_pool->ref_clocks.dchub_ref_clock_inKhz);
179 		} else {
180 			// Not all ASICs have DCCG sw component
181 			res_pool->ref_clocks.dccg_ref_clock_inKhz =
182 				res_pool->ref_clocks.xtalin_clock_inKhz;
183 			res_pool->ref_clocks.dchub_ref_clock_inKhz =
184 				res_pool->ref_clocks.xtalin_clock_inKhz;
185 		}
186 	} else
187 		ASSERT_CRITICAL(false);
188 
189 	for (i = 0; i < dc->link_count; i++) {
190 		/* Power up AND update implementation according to the
191 		 * required signal (which may be different from the
192 		 * default signal on connector).
193 		 */
194 		struct dc_link *link = dc->links[i];
195 
196 		if (link->ep_type != DISPLAY_ENDPOINT_PHY)
197 			continue;
198 
199 		link->link_enc->funcs->hw_init(link->link_enc);
200 
201 		/* Check for enabled DIG to identify enabled display */
202 		if (link->link_enc->funcs->is_dig_enabled &&
203 			link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
204 			link->link_status.link_active = true;
205 			if (link->link_enc->funcs->fec_is_active &&
206 					link->link_enc->funcs->fec_is_active(link->link_enc))
207 				link->fec_state = dc_link_fec_enabled;
208 		}
209 	}
210 
211 	/* we want to turn off all dp displays before doing detection */
212 	dc->link_srv->blank_all_dp_displays(dc);
213 
214 	if (res_pool->hubbub && res_pool->hubbub->funcs->dchubbub_init)
215 		res_pool->hubbub->funcs->dchubbub_init(dc->res_pool->hubbub);
216 	/* If taking control over from VBIOS, we may want to optimize our first
217 	 * mode set, so we need to skip powering down pipes until we know which
218 	 * pipes we want to use.
219 	 * Otherwise, if taking control is not possible, we need to power
220 	 * everything down.
221 	 */
222 	if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
223 
224 		// we want to turn off edp displays if odm is enabled and no seamless boot
225 		if (!dc->caps.seamless_odm) {
226 			for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
227 				struct timing_generator *tg = dc->res_pool->timing_generators[i];
228 				uint32_t num_opps, opp_id_src0, opp_id_src1;
229 
230 				num_opps = 1;
231 				if (tg) {
232 					if (tg->funcs->is_tg_enabled(tg) && tg->funcs->get_optc_source) {
233 						tg->funcs->get_optc_source(tg, &num_opps,
234 								&opp_id_src0, &opp_id_src1);
235 					}
236 				}
237 
238 				if (num_opps > 1) {
239 					dc->link_srv->blank_all_edp_displays(dc);
240 					break;
241 				}
242 			}
243 		}
244 
245 		hws->funcs.init_pipes(dc, dc->current_state);
246 		print_pg_status(dc, __func__, ": after init_pipes");
247 
248 		if (dc->res_pool->hubbub->funcs->allow_self_refresh_control &&
249 			!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter)
250 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
251 					!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
252 	}
253 	for (i = 0; i < res_pool->audio_count; i++) {
254 		struct audio *audio = res_pool->audios[i];
255 
256 		audio->funcs->hw_init(audio);
257 	}
258 
259 	for (i = 0; i < dc->link_count; i++) {
260 		struct dc_link *link = dc->links[i];
261 
262 		if (link->panel_cntl) {
263 			backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
264 			user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
265 		}
266 	}
267 	if (dc->ctx->dmub_srv) {
268 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
269 		if (abms[i] != NULL && abms[i]->funcs != NULL)
270 			abms[i]->funcs->abm_init(abms[i], backlight, user_level);
271 		}
272 	}
273 
274 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
275 	REG_WRITE(DIO_MEM_PWR_CTRL, 0);
276 
277 	// Set i2c to light sleep until engine is setup
278 	if (dc->debug.enable_mem_low_power.bits.i2c)
279 		REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 0);
280 
281 	if (hws->funcs.setup_hpo_hw_control)
282 		hws->funcs.setup_hpo_hw_control(hws, false);
283 
284 	if (!dc->debug.disable_clock_gate) {
285 		/* enable all DCN clock gating */
286 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
287 	}
288 
289 	if (dc->debug.disable_mem_low_power) {
290 		REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, 1);
291 	}
292 	if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
293 		dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
294 
295 	if (dc->clk_mgr && dc->clk_mgr->funcs->notify_wm_ranges)
296 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
297 
298 	if (dc->clk_mgr && dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled)
299 		dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
300 
301 
302 
303 	if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
304 		dc->res_pool->hubbub->funcs->force_pstate_change_control(
305 				dc->res_pool->hubbub, false, false);
306 
307 	if (dc->res_pool->hubbub->funcs->init_crb)
308 		dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
309 
310 	if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0)
311 		dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc);
312 	// Get DMCUB capabilities
313 	if (dc->ctx->dmub_srv) {
314 		dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
315 		dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
316 		dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
317 		dc->caps.dmub_caps.aux_backlight_support = dc->ctx->dmub_srv->dmub->feature_caps.abm_aux_backlight_support;
318 	}
319 
320 	if (dc->res_pool->pg_cntl) {
321 		if (dc->res_pool->pg_cntl->funcs->init_pg_status)
322 			dc->res_pool->pg_cntl->funcs->init_pg_status(dc->res_pool->pg_cntl);
323 	}
324 	print_pg_status(dc, __func__, ": after init_pg_status");
325 }
326 
327 static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
328 {
329 	struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
330 	struct dc_stream_state *stream = pipe_ctx->stream;
331 	struct pipe_ctx *odm_pipe;
332 	int opp_cnt = 1;
333 
334 	DC_LOGGER_INIT(stream->ctx->logger);
335 
336 	ASSERT(dsc);
337 	for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
338 		opp_cnt++;
339 
340 	if (enable) {
341 		struct dsc_config dsc_cfg;
342 		struct dsc_optc_config dsc_optc_cfg = {0};
343 		enum optc_dsc_mode optc_dsc_mode;
344 		struct dcn_dsc_state dsc_state = {0};
345 
346 		if (!dsc) {
347 			DC_LOG_DSC("DSC is NULL for tg instance %d:", pipe_ctx->stream_res.tg->inst);
348 			return;
349 		}
350 
351 		if (dsc->funcs->dsc_read_state) {
352 			dsc->funcs->dsc_read_state(dsc, &dsc_state);
353 			if (!dsc_state.dsc_fw_en) {
354 				DC_LOG_DSC("DSC has been disabled for tg instance %d:", pipe_ctx->stream_res.tg->inst);
355 				return;
356 			}
357 		}
358 		/* Enable DSC hw block */
359 		dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
360 		dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
361 		dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
362 		dsc_cfg.color_depth = stream->timing.display_color_depth;
363 		dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
364 		dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
365 		ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
366 		dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
367 		dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
368 
369 		dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
370 		dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
371 		for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
372 			struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
373 
374 			ASSERT(odm_dsc);
375 			odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
376 			odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
377 		}
378 		dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt;
379 		dsc_cfg.pic_width *= opp_cnt;
380 
381 		optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
382 
383 		/* Enable DSC in OPTC */
384 		DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst);
385 		pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg,
386 							optc_dsc_mode,
387 							dsc_optc_cfg.bytes_per_pixel,
388 							dsc_optc_cfg.slice_width);
389 	} else {
390 		/* disable DSC in OPTC */
391 		pipe_ctx->stream_res.tg->funcs->set_dsc_config(
392 				pipe_ctx->stream_res.tg,
393 				OPTC_DSC_DISABLED, 0, 0);
394 
395 		/* disable DSC block */
396 		dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
397 		for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
398 			ASSERT(odm_pipe->stream_res.dsc);
399 			odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
400 		}
401 	}
402 }
403 
404 // Given any pipe_ctx, return the total ODM combine factor, and optionally return
405 // the OPPids which are used
406 static unsigned int get_odm_config(struct pipe_ctx *pipe_ctx, unsigned int *opp_instances)
407 {
408 	unsigned int opp_count = 1;
409 	struct pipe_ctx *odm_pipe;
410 
411 	// First get to the top pipe
412 	for (odm_pipe = pipe_ctx; odm_pipe->prev_odm_pipe; odm_pipe = odm_pipe->prev_odm_pipe)
413 		;
414 
415 	// First pipe is always used
416 	if (opp_instances)
417 		opp_instances[0] = odm_pipe->stream_res.opp->inst;
418 
419 	// Find and count odm pipes, if any
420 	for (odm_pipe = odm_pipe->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
421 		if (opp_instances)
422 			opp_instances[opp_count] = odm_pipe->stream_res.opp->inst;
423 		opp_count++;
424 	}
425 
426 	return opp_count;
427 }
428 
429 void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
430 {
431 	struct pipe_ctx *odm_pipe;
432 	int opp_cnt = 0;
433 	int opp_inst[MAX_PIPES] = {0};
434 	int odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false);
435 	int last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true);
436 	struct mpc *mpc = dc->res_pool->mpc;
437 	int i;
438 
439 	opp_cnt = get_odm_config(pipe_ctx, opp_inst);
440 
441 	if (opp_cnt > 1)
442 		pipe_ctx->stream_res.tg->funcs->set_odm_combine(
443 				pipe_ctx->stream_res.tg,
444 				opp_inst, opp_cnt,
445 				odm_slice_width, last_odm_slice_width);
446 	else
447 		pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
448 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
449 
450 	if (mpc->funcs->set_out_rate_control) {
451 		for (i = 0; i < opp_cnt; ++i) {
452 			mpc->funcs->set_out_rate_control(
453 					mpc, opp_inst[i],
454 					false,
455 					0,
456 					NULL);
457 		}
458 	}
459 
460 	for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
461 		odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
462 				odm_pipe->stream_res.opp,
463 				true);
464 	}
465 
466 	if (pipe_ctx->stream_res.dsc) {
467 		struct pipe_ctx *current_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
468 
469 		update_dsc_on_stream(pipe_ctx, pipe_ctx->stream->timing.flags.DSC);
470 
471 		/* Check if no longer using pipe for ODM, then need to disconnect DSC for that pipe */
472 		if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe &&
473 				current_pipe_ctx->next_odm_pipe->stream_res.dsc) {
474 			struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc;
475 			/* disconnect DSC block from stream */
476 			dsc->funcs->dsc_disconnect(dsc);
477 		}
478 	}
479 }
480 
481 void dcn35_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on)
482 {
483 	if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpp)
484 		return;
485 
486 	if (hws->ctx->dc->res_pool->dccg->funcs->dpp_root_clock_control) {
487 		hws->ctx->dc->res_pool->dccg->funcs->dpp_root_clock_control(
488 			hws->ctx->dc->res_pool->dccg, dpp_inst, clock_on);
489 	}
490 }
491 
492 void dcn35_dpstream_root_clock_control(struct dce_hwseq *hws, unsigned int dp_hpo_inst, bool clock_on)
493 {
494 	if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpstream)
495 		return;
496 
497 	if (hws->ctx->dc->res_pool->dccg->funcs->set_dpstreamclk_root_clock_gating) {
498 		hws->ctx->dc->res_pool->dccg->funcs->set_dpstreamclk_root_clock_gating(
499 			hws->ctx->dc->res_pool->dccg, dp_hpo_inst, clock_on);
500 	}
501 }
502 
503 void dcn35_physymclk_root_clock_control(struct dce_hwseq *hws, unsigned int phy_inst, bool clock_on)
504 {
505 	if (!hws->ctx->dc->debug.root_clock_optimization.bits.physymclk)
506 		return;
507 
508 	if (hws->ctx->dc->res_pool->dccg->funcs->set_physymclk_root_clock_gating) {
509 		hws->ctx->dc->res_pool->dccg->funcs->set_physymclk_root_clock_gating(
510 			hws->ctx->dc->res_pool->dccg, phy_inst, clock_on);
511 	}
512 }
513 
514 /* In headless boot cases, DIG may be turned
515  * on which causes HW/SW discrepancies.
516  * To avoid this, power down hardware on boot
517  * if DIG is turned on
518  */
519 void dcn35_power_down_on_boot(struct dc *dc)
520 {
521 	struct dc_link *edp_links[MAX_NUM_EDP];
522 	struct dc_link *edp_link = NULL;
523 	int edp_num;
524 	int i = 0;
525 
526 	dc_get_edp_links(dc, edp_links, &edp_num);
527 	if (edp_num)
528 		edp_link = edp_links[0];
529 
530 	if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
531 			edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
532 			dc->hwseq->funcs.edp_backlight_control &&
533 			dc->hwseq->funcs.power_down &&
534 			dc->hwss.edp_power_control) {
535 		dc->hwseq->funcs.edp_backlight_control(edp_link, false);
536 		dc->hwseq->funcs.power_down(dc);
537 		dc->hwss.edp_power_control(edp_link, false);
538 	} else {
539 		for (i = 0; i < dc->link_count; i++) {
540 			struct dc_link *link = dc->links[i];
541 
542 			if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
543 					link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
544 					dc->hwseq->funcs.power_down) {
545 				dc->hwseq->funcs.power_down(dc);
546 				break;
547 			}
548 
549 		}
550 	}
551 
552 	/*
553 	 * Call update_clocks with empty context
554 	 * to send DISPLAY_OFF
555 	 * Otherwise DISPLAY_OFF may not be asserted
556 	 */
557 	if (dc->clk_mgr->funcs->set_low_power_state)
558 		dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
559 
560 	if (dc->clk_mgr->clks.pwr_state == DCN_PWR_STATE_LOW_POWER)
561 		dc_allow_idle_optimizations(dc, true);
562 }
563 
564 bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
565 {
566 	if (dc->debug.dmcub_emulation)
567 		return true;
568 
569 	if (enable) {
570 		uint32_t num_active_edp = 0;
571 		int i;
572 
573 		for (i = 0; i < dc->current_state->stream_count; ++i) {
574 			struct dc_stream_state *stream = dc->current_state->streams[i];
575 			struct dc_link *link = stream->link;
576 			bool is_psr = link && !link->panel_config.psr.disable_psr &&
577 				      (link->psr_settings.psr_version == DC_PSR_VERSION_1 ||
578 				       link->psr_settings.psr_version == DC_PSR_VERSION_SU_1);
579 			bool is_replay = link && link->replay_settings.replay_feature_enabled;
580 
581 			/* Ignore streams that disabled. */
582 			if (stream->dpms_off)
583 				continue;
584 
585 			/* Active external displays block idle optimizations. */
586 			if (!dc_is_embedded_signal(stream->signal))
587 				return false;
588 
589 			/* If not PWRSEQ0 can't enter idle optimizations */
590 			if (link && link->link_index != 0)
591 				return false;
592 
593 			/* Check for panel power features required for idle optimizations. */
594 			if (!is_psr && !is_replay)
595 				return false;
596 
597 			num_active_edp += 1;
598 		}
599 
600 		/* If more than one active eDP then disallow. */
601 		if (num_active_edp > 1)
602 			return false;
603 	}
604 
605 	// TODO: review other cases when idle optimization is allowed
606 	dc_dmub_srv_apply_idle_power_optimizations(dc, enable);
607 
608 	return true;
609 }
610 
611 void dcn35_z10_restore(const struct dc *dc)
612 {
613 	if (dc->debug.disable_z10)
614 		return;
615 
616 	dc_dmub_srv_apply_idle_power_optimizations(dc, false);
617 
618 	dcn31_z10_restore(dc);
619 }
620 
621 void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
622 {
623 	int i;
624 	struct dce_hwseq *hws = dc->hwseq;
625 	struct hubbub *hubbub = dc->res_pool->hubbub;
626 	struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
627 	bool can_apply_seamless_boot = false;
628 	bool tg_enabled[MAX_PIPES] = {false};
629 
630 	for (i = 0; i < context->stream_count; i++) {
631 		if (context->streams[i]->apply_seamless_boot_optimization) {
632 			can_apply_seamless_boot = true;
633 			break;
634 		}
635 	}
636 
637 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
638 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
639 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
640 
641 		/* There is assumption that pipe_ctx is not mapping irregularly
642 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
643 		 * we will use the pipe, so don't disable
644 		 */
645 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
646 			continue;
647 
648 		/* Blank controller using driver code instead of
649 		 * command table.
650 		 */
651 		if (tg->funcs->is_tg_enabled(tg)) {
652 			if (hws->funcs.init_blank != NULL) {
653 				hws->funcs.init_blank(dc, tg);
654 				tg->funcs->lock(tg);
655 			} else {
656 				tg->funcs->lock(tg);
657 				tg->funcs->set_blank(tg, true);
658 				hwss_wait_for_blank_complete(tg);
659 			}
660 		}
661 	}
662 
663 	/* Reset det size */
664 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
665 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
666 		struct hubp *hubp = dc->res_pool->hubps[i];
667 
668 		/* Do not need to reset for seamless boot */
669 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
670 			continue;
671 
672 		if (hubbub && hubp) {
673 			if (hubbub->funcs->program_det_size)
674 				hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
675 			if (hubbub->funcs->program_det_segments)
676 				hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0);
677 		}
678 	}
679 
680 	/* num_opp will be equal to number of mpcc */
681 	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
682 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
683 
684 		/* Cannot reset the MPC mux if seamless boot */
685 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
686 			continue;
687 
688 		dc->res_pool->mpc->funcs->mpc_init_single_inst(
689 				dc->res_pool->mpc, i);
690 	}
691 
692 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
693 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
694 		struct hubp *hubp = dc->res_pool->hubps[i];
695 		struct dpp *dpp = dc->res_pool->dpps[i];
696 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
697 
698 		/* There is assumption that pipe_ctx is not mapping irregularly
699 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
700 		 * we will use the pipe, so don't disable
701 		 */
702 		if (can_apply_seamless_boot &&
703 			pipe_ctx->stream != NULL &&
704 			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
705 				pipe_ctx->stream_res.tg)) {
706 			// Enable double buffering for OTG_BLANK no matter if
707 			// seamless boot is enabled or not to suppress global sync
708 			// signals when OTG blanked. This is to prevent pipe from
709 			// requesting data while in PSR.
710 			tg->funcs->tg_init(tg);
711 			hubp->power_gated = true;
712 			tg_enabled[i] = true;
713 			continue;
714 		}
715 
716 		/* Disable on the current state so the new one isn't cleared. */
717 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
718 
719 		hubp->funcs->hubp_reset(hubp);
720 		dpp->funcs->dpp_reset(dpp);
721 
722 		pipe_ctx->stream_res.tg = tg;
723 		pipe_ctx->pipe_idx = i;
724 
725 		pipe_ctx->plane_res.hubp = hubp;
726 		pipe_ctx->plane_res.dpp = dpp;
727 		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
728 		hubp->mpcc_id = dpp->inst;
729 		hubp->opp_id = OPP_ID_INVALID;
730 		hubp->power_gated = false;
731 
732 		dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
733 		dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
734 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
735 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
736 
737 		hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
738 
739 		if (tg->funcs->is_tg_enabled(tg))
740 			tg->funcs->unlock(tg);
741 
742 		dc->hwss.disable_plane(dc, context, pipe_ctx);
743 
744 		pipe_ctx->stream_res.tg = NULL;
745 		pipe_ctx->plane_res.hubp = NULL;
746 
747 		if (tg->funcs->is_tg_enabled(tg)) {
748 			if (tg->funcs->init_odm)
749 				tg->funcs->init_odm(tg);
750 		}
751 
752 		tg->funcs->tg_init(tg);
753 	}
754 
755 	/* Clean up MPC tree */
756 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
757 		if (tg_enabled[i]) {
758 			if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) {
759 				if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) {
760 					int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id;
761 
762 					if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id]))
763 						dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
764 				}
765 			}
766 		}
767 	}
768 
769 	if (pg_cntl != NULL) {
770 		if (pg_cntl->funcs->dsc_pg_control != NULL) {
771 			uint32_t num_opps = 0;
772 			uint32_t opp_id_src0 = OPP_ID_INVALID;
773 			uint32_t opp_id_src1 = OPP_ID_INVALID;
774 			uint32_t optc_dsc_state = 0;
775 
776 			// Step 1: To find out which OPTC is running & OPTC DSC is ON
777 			// We can't use res_pool->res_cap->num_timing_generator to check
778 			// Because it records display pipes default setting built in driver,
779 			// not display pipes of the current chip.
780 			// Some ASICs would be fused display pipes less than the default setting.
781 			// In dcnxx_resource_construct function, driver would obatin real information.
782 			for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
783 				struct timing_generator *tg = dc->res_pool->timing_generators[i];
784 
785 				if (tg->funcs->is_tg_enabled(tg)) {
786 					if (tg->funcs->get_dsc_status)
787 						tg->funcs->get_dsc_status(tg, &optc_dsc_state);
788 					// Only one OPTC with DSC is ON, so if we got one result,
789 					// we would exit this block. non-zero value is DSC enabled
790 					if (optc_dsc_state != 0) {
791 						tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
792 						break;
793 					}
794 				}
795 			}
796 
797 			// Step 2: To power down DSC but skip DSC of running OPTC
798 			for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
799 				struct dcn_dsc_state s  = {0};
800 
801 				/* avoid reading DSC state when it is not in use as it may be power gated */
802 				if (optc_dsc_state) {
803 					dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
804 
805 					if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
806 						s.dsc_clock_en && s.dsc_fw_en)
807 						continue;
808 				}
809 
810 				pg_cntl->funcs->dsc_pg_control(pg_cntl, dc->res_pool->dscs[i]->inst, false);
811 			}
812 		}
813 	}
814 }
815 
816 void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
817 			       struct dc_state *context)
818 {
819 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
820 
821 	/* enable DCFCLK current DCHUB */
822 	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
823 
824 	/* initialize HUBP on power up */
825 	pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp);
826 	/*make sure DPPCLK is on*/
827 	dpp->funcs->dpp_dppclk_control(dpp, false, true);
828 	/* make sure OPP_PIPE_CLOCK_EN = 1 */
829 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
830 			pipe_ctx->stream_res.opp,
831 			true);
832 	/*to do: insert PG here*/
833 	if (dc->vm_pa_config.valid) {
834 		struct vm_system_aperture_param apt;
835 
836 		apt.sys_default.quad_part = 0;
837 
838 		apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr;
839 		apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr;
840 
841 		// Program system aperture settings
842 		pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt);
843 	}
844 	//DC_LOG_DEBUG("%s: dpp_inst(%d) =\n", __func__, dpp->inst);
845 
846 	if (!pipe_ctx->top_pipe
847 		&& pipe_ctx->plane_state
848 		&& pipe_ctx->plane_state->flip_int_enabled
849 		&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
850 		pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
851 }
852 
853 /* disable HW used by plane.
854  * note:  cannot disable until disconnect is complete
855  */
856 void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
857 {
858 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
859 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
860 
861 
862 	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
863 
864 	/* In flip immediate with pipe splitting case GSL is used for
865 	 * synchronization so we must disable it when the plane is disabled.
866 	 */
867 	if (pipe_ctx->stream_res.gsl_group != 0)
868 		dcn20_setup_gsl_group_as_lock(dc, pipe_ctx, false);
869 /*
870 	if (hubp->funcs->hubp_update_mall_sel)
871 		hubp->funcs->hubp_update_mall_sel(hubp, 0, false);
872 */
873 	dc->hwss.set_flip_control_gsl(pipe_ctx, false);
874 
875 	hubp->funcs->hubp_clk_cntl(hubp, false);
876 
877 	dpp->funcs->dpp_dppclk_control(dpp, false, false);
878 
879 	hubp->power_gated = true;
880 
881 	hubp->funcs->hubp_reset(hubp);
882 	dpp->funcs->dpp_reset(dpp);
883 
884 	pipe_ctx->stream = NULL;
885 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
886 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
887 	pipe_ctx->top_pipe = NULL;
888 	pipe_ctx->bottom_pipe = NULL;
889 	pipe_ctx->plane_state = NULL;
890 	//DC_LOG_DEBUG("%s: dpp_inst(%d)=\n", __func__, dpp->inst);
891 
892 }
893 
894 void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
895 {
896 	struct dce_hwseq *hws = dc->hwseq;
897 	bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
898 	struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
899 
900 	DC_LOGGER_INIT(dc->ctx->logger);
901 
902 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
903 		return;
904 
905 	if (hws->funcs.plane_atomic_disable)
906 		hws->funcs.plane_atomic_disable(dc, pipe_ctx);
907 
908 	/* Turn back off the phantom OTG after the phantom plane is fully disabled
909 	 */
910 	if (is_phantom)
911 		if (tg && tg->funcs->disable_phantom_crtc)
912 			tg->funcs->disable_phantom_crtc(tg);
913 
914 	DC_LOG_DC("Power down front end %d\n",
915 					pipe_ctx->pipe_idx);
916 }
917 
918 void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
919 	struct pg_block_update *update_state)
920 {
921 	bool hpo_frl_stream_enc_acquired = false;
922 	bool hpo_dp_stream_enc_acquired = false;
923 	int i = 0, j = 0;
924 	int edp_num = 0;
925 	struct dc_link *edp_links[MAX_NUM_EDP] = { NULL };
926 
927 	memset(update_state, 0, sizeof(struct pg_block_update));
928 
929 	for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) {
930 		if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i] &&
931 				dc->res_pool->hpo_dp_stream_enc[i]) {
932 			hpo_dp_stream_enc_acquired = true;
933 			break;
934 		}
935 	}
936 
937 	if (!hpo_frl_stream_enc_acquired && !hpo_dp_stream_enc_acquired)
938 		update_state->pg_res_update[PG_HPO] = true;
939 
940 	update_state->pg_res_update[PG_DWB] = true;
941 
942 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
943 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
944 
945 		for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++)
946 			update_state->pg_pipe_res_update[j][i] = true;
947 
948 		if (!pipe_ctx)
949 			continue;
950 
951 		if (pipe_ctx->plane_res.hubp)
952 			update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->plane_res.hubp->inst] = false;
953 
954 		if (pipe_ctx->plane_res.dpp && pipe_ctx->plane_res.hubp)
955 			update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->plane_res.hubp->inst] = false;
956 
957 		if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp)
958 			update_state->pg_pipe_res_update[PG_MPCC][pipe_ctx->plane_res.mpcc_inst] = false;
959 
960 		if (pipe_ctx->stream_res.dsc) {
961 			update_state->pg_pipe_res_update[PG_DSC][pipe_ctx->stream_res.dsc->inst] = false;
962 			if (dc->caps.sequential_ono) {
963 				update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->stream_res.dsc->inst] = false;
964 				update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->stream_res.dsc->inst] = false;
965 
966 				/* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */
967 				if (!pipe_ctx->top_pipe && pipe_ctx->plane_res.hubp &&
968 				    pipe_ctx->plane_res.hubp->inst != pipe_ctx->stream_res.dsc->inst) {
969 					for (j = 0; j < dc->res_pool->pipe_count; ++j) {
970 						update_state->pg_pipe_res_update[PG_HUBP][j] = false;
971 						update_state->pg_pipe_res_update[PG_DPP][j] = false;
972 					}
973 				}
974 			}
975 		}
976 
977 		if (pipe_ctx->stream_res.opp)
978 			update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false;
979 
980 		if (pipe_ctx->stream_res.hpo_dp_stream_enc)
981 			update_state->pg_pipe_res_update[PG_DPSTREAM][pipe_ctx->stream_res.hpo_dp_stream_enc->inst] = false;
982 	}
983 
984 	for (i = 0; i < dc->link_count; i++) {
985 		update_state->pg_pipe_res_update[PG_PHYSYMCLK][dc->links[i]->link_enc_hw_inst] = true;
986 		if (dc->links[i]->type != dc_connection_none)
987 			update_state->pg_pipe_res_update[PG_PHYSYMCLK][dc->links[i]->link_enc_hw_inst] = false;
988 	}
989 
990 	/*domain24 controls all the otg, mpc, opp, as long as one otg is still up, avoid enabling OTG PG*/
991 	for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
992 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
993 		if (tg && tg->funcs->is_tg_enabled(tg)) {
994 			update_state->pg_pipe_res_update[PG_OPTC][i] = false;
995 			break;
996 		}
997 	}
998 
999 	dc_get_edp_links(dc, edp_links, &edp_num);
1000 	if (edp_num == 0 ||
1001 		((!edp_links[0] || !edp_links[0]->edp_sink_present) &&
1002 			(!edp_links[1] || !edp_links[1]->edp_sink_present))) {
1003 		/*eDP not exist on this config, keep Domain24 power on, for S0i3, this will be handled in dmubfw*/
1004 		update_state->pg_pipe_res_update[PG_OPTC][0] = false;
1005 	}
1006 
1007 	if (dc->caps.sequential_ono) {
1008 		for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
1009 			if (!update_state->pg_pipe_res_update[PG_HUBP][i] &&
1010 			    !update_state->pg_pipe_res_update[PG_DPP][i]) {
1011 				for (j = i - 1; j >= 0; j--) {
1012 					update_state->pg_pipe_res_update[PG_HUBP][j] = false;
1013 					update_state->pg_pipe_res_update[PG_DPP][j] = false;
1014 				}
1015 
1016 				break;
1017 			}
1018 		}
1019 	}
1020 }
1021 
1022 void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
1023 	struct pg_block_update *update_state)
1024 {
1025 	bool hpo_frl_stream_enc_acquired = false;
1026 	bool hpo_dp_stream_enc_acquired = false;
1027 	int i = 0, j = 0;
1028 
1029 	memset(update_state, 0, sizeof(struct pg_block_update));
1030 
1031 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1032 		struct pipe_ctx *cur_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1033 		struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
1034 
1035 		if (cur_pipe == NULL || new_pipe == NULL)
1036 			continue;
1037 
1038 		if ((!cur_pipe->plane_state && new_pipe->plane_state) ||
1039 			(!cur_pipe->stream && new_pipe->stream) ||
1040 			(cur_pipe->stream != new_pipe->stream && new_pipe->stream)) {
1041 			// New pipe addition
1042 			for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++) {
1043 				if (j == PG_HUBP && new_pipe->plane_res.hubp)
1044 					update_state->pg_pipe_res_update[j][new_pipe->plane_res.hubp->inst] = true;
1045 
1046 				if (j == PG_DPP && new_pipe->plane_res.dpp)
1047 					update_state->pg_pipe_res_update[j][new_pipe->plane_res.dpp->inst] = true;
1048 
1049 				if (j == PG_MPCC && new_pipe->plane_res.dpp)
1050 					update_state->pg_pipe_res_update[j][new_pipe->plane_res.mpcc_inst] = true;
1051 
1052 				if (j == PG_DSC && new_pipe->stream_res.dsc)
1053 					update_state->pg_pipe_res_update[j][new_pipe->stream_res.dsc->inst] = true;
1054 
1055 				if (j == PG_OPP && new_pipe->stream_res.opp)
1056 					update_state->pg_pipe_res_update[j][new_pipe->stream_res.opp->inst] = true;
1057 
1058 				if (j == PG_OPTC && new_pipe->stream_res.tg)
1059 					update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true;
1060 
1061 				if (j == PG_DPSTREAM && new_pipe->stream_res.hpo_dp_stream_enc)
1062 					update_state->pg_pipe_res_update[j][new_pipe->stream_res.hpo_dp_stream_enc->inst] = true;
1063 			}
1064 		} else if (cur_pipe->plane_state == new_pipe->plane_state ||
1065 				cur_pipe == new_pipe) {
1066 			//unchanged pipes
1067 			for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++) {
1068 				if (j == PG_HUBP &&
1069 					cur_pipe->plane_res.hubp != new_pipe->plane_res.hubp &&
1070 					new_pipe->plane_res.hubp)
1071 					update_state->pg_pipe_res_update[j][new_pipe->plane_res.hubp->inst] = true;
1072 
1073 				if (j == PG_DPP &&
1074 					cur_pipe->plane_res.dpp != new_pipe->plane_res.dpp &&
1075 					new_pipe->plane_res.dpp)
1076 					update_state->pg_pipe_res_update[j][new_pipe->plane_res.dpp->inst] = true;
1077 
1078 				if (j == PG_OPP &&
1079 					cur_pipe->stream_res.opp != new_pipe->stream_res.opp &&
1080 					new_pipe->stream_res.opp)
1081 					update_state->pg_pipe_res_update[j][new_pipe->stream_res.opp->inst] = true;
1082 
1083 				if (j == PG_DSC &&
1084 					cur_pipe->stream_res.dsc != new_pipe->stream_res.dsc &&
1085 					new_pipe->stream_res.dsc)
1086 					update_state->pg_pipe_res_update[j][new_pipe->stream_res.dsc->inst] = true;
1087 
1088 				if (j == PG_OPTC &&
1089 					cur_pipe->stream_res.tg != new_pipe->stream_res.tg &&
1090 					new_pipe->stream_res.tg)
1091 					update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true;
1092 
1093 				if (j == PG_DPSTREAM &&
1094 					cur_pipe->stream_res.hpo_dp_stream_enc != new_pipe->stream_res.hpo_dp_stream_enc &&
1095 					new_pipe->stream_res.hpo_dp_stream_enc)
1096 					update_state->pg_pipe_res_update[j][new_pipe->stream_res.hpo_dp_stream_enc->inst] = true;
1097 			}
1098 		}
1099 	}
1100 
1101 	for (i = 0; i < dc->link_count; i++)
1102 		if (dc->links[i]->type != dc_connection_none)
1103 			update_state->pg_pipe_res_update[PG_PHYSYMCLK][dc->links[i]->link_enc_hw_inst] = true;
1104 
1105 	for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) {
1106 		if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i] &&
1107 				dc->res_pool->hpo_dp_stream_enc[i]) {
1108 			hpo_dp_stream_enc_acquired = true;
1109 			break;
1110 		}
1111 	}
1112 
1113 	if (hpo_frl_stream_enc_acquired || hpo_dp_stream_enc_acquired)
1114 		update_state->pg_res_update[PG_HPO] = true;
1115 
1116 	if (hpo_frl_stream_enc_acquired)
1117 		update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true;
1118 
1119 	if (dc->caps.sequential_ono) {
1120 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1121 			struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
1122 
1123 			if (new_pipe->stream_res.dsc && !new_pipe->top_pipe &&
1124 			    update_state->pg_pipe_res_update[PG_DSC][new_pipe->stream_res.dsc->inst]) {
1125 				update_state->pg_pipe_res_update[PG_HUBP][new_pipe->stream_res.dsc->inst] = true;
1126 				update_state->pg_pipe_res_update[PG_DPP][new_pipe->stream_res.dsc->inst] = true;
1127 
1128 				/* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */
1129 				if (new_pipe->plane_res.hubp &&
1130 				    new_pipe->plane_res.hubp->inst != new_pipe->stream_res.dsc->inst) {
1131 					for (j = 0; j < dc->res_pool->pipe_count; ++j) {
1132 						update_state->pg_pipe_res_update[PG_HUBP][j] = true;
1133 						update_state->pg_pipe_res_update[PG_DPP][j] = true;
1134 					}
1135 				}
1136 			}
1137 		}
1138 
1139 		for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
1140 			if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1141 			    update_state->pg_pipe_res_update[PG_DPP][i]) {
1142 				for (j = i - 1; j >= 0; j--) {
1143 					update_state->pg_pipe_res_update[PG_HUBP][j] = true;
1144 					update_state->pg_pipe_res_update[PG_DPP][j] = true;
1145 				}
1146 
1147 				break;
1148 			}
1149 		}
1150 	}
1151 }
1152 
1153 /**
1154  * dcn35_hw_block_power_down() - power down sequence
1155  *
1156  * The following sequence describes the ON-OFF (ONO) for power down:
1157  *
1158  *	ONO Region 3, DCPG 25: hpo - SKIPPED
1159  *	ONO Region 4, DCPG 0: dchubp0, dpp0
1160  *	ONO Region 6, DCPG 1: dchubp1, dpp1
1161  *	ONO Region 8, DCPG 2: dchubp2, dpp2
1162  *	ONO Region 10, DCPG 3: dchubp3, dpp3
1163  *	ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will pwr dwn at IPS2 entry
1164  *	ONO Region 5, DCPG 16: dsc0
1165  *	ONO Region 7, DCPG 17: dsc1
1166  *	ONO Region 9, DCPG 18: dsc2
1167  *	ONO Region 11, DCPG 19: dsc3
1168  *	ONO Region 2, DCPG 24: mpc opp optc dwb
1169  *	ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed
1170  *
1171  * If sequential ONO is specified the order is modified from ONO Region 11 -> ONO Region 0 descending.
1172  *
1173  * @dc: Current DC state
1174  * @update_state: update PG sequence states for HW block
1175  */
1176 void dcn35_hw_block_power_down(struct dc *dc,
1177 	struct pg_block_update *update_state)
1178 {
1179 	int i = 0;
1180 	struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
1181 
1182 	if (!pg_cntl)
1183 		return;
1184 	if (dc->debug.ignore_pg)
1185 		return;
1186 
1187 	if (update_state->pg_res_update[PG_HPO]) {
1188 		if (pg_cntl->funcs->hpo_pg_control)
1189 			pg_cntl->funcs->hpo_pg_control(pg_cntl, false);
1190 	}
1191 
1192 	if (!dc->caps.sequential_ono) {
1193 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1194 			if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1195 			    update_state->pg_pipe_res_update[PG_DPP][i]) {
1196 				if (pg_cntl->funcs->hubp_dpp_pg_control)
1197 					pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false);
1198 			}
1199 		}
1200 
1201 		for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1202 			if (update_state->pg_pipe_res_update[PG_DSC][i]) {
1203 				if (pg_cntl->funcs->dsc_pg_control)
1204 					pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false);
1205 			}
1206 		}
1207 	} else {
1208 		for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
1209 			if (update_state->pg_pipe_res_update[PG_DSC][i]) {
1210 				if (pg_cntl->funcs->dsc_pg_control)
1211 					pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false);
1212 			}
1213 
1214 			if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1215 			    update_state->pg_pipe_res_update[PG_DPP][i]) {
1216 				if (pg_cntl->funcs->hubp_dpp_pg_control)
1217 					pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false);
1218 			}
1219 		}
1220 	}
1221 
1222 	/*this will need all the clients to unregister optc interruts let dmubfw handle this*/
1223 	if (pg_cntl->funcs->plane_otg_pg_control)
1224 		pg_cntl->funcs->plane_otg_pg_control(pg_cntl, false);
1225 
1226 	//domain22, 23, 25 currently always on.
1227 
1228 }
1229 
1230 /**
1231  * dcn35_hw_block_power_up() - power up sequence
1232  *
1233  * The following sequence describes the ON-OFF (ONO) for power up:
1234  *
1235  *	ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED
1236  *	ONO Region 2, DCPG 24: mpc opp optc dwb
1237  *	ONO Region 5, DCPG 16: dsc0
1238  *	ONO Region 7, DCPG 17: dsc1
1239  *	ONO Region 9, DCPG 18: dsc2
1240  *	ONO Region 11, DCPG 19: dsc3
1241  *	ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will power up at IPS2 exit
1242  *	ONO Region 4, DCPG 0: dchubp0, dpp0
1243  *	ONO Region 6, DCPG 1: dchubp1, dpp1
1244  *	ONO Region 8, DCPG 2: dchubp2, dpp2
1245  *	ONO Region 10, DCPG 3: dchubp3, dpp3
1246  *	ONO Region 3, DCPG 25: hpo - SKIPPED
1247  *
1248  * If sequential ONO is specified the order is modified from ONO Region 0 -> ONO Region 11 ascending.
1249  *
1250  * @dc: Current DC state
1251  * @update_state: update PG sequence states for HW block
1252  */
1253 void dcn35_hw_block_power_up(struct dc *dc,
1254 	struct pg_block_update *update_state)
1255 {
1256 	int i = 0;
1257 	struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
1258 
1259 	if (!pg_cntl)
1260 		return;
1261 	if (dc->debug.ignore_pg)
1262 		return;
1263 	//domain22, 23, 25 currently always on.
1264 	/*this will need all the clients to unregister optc interruts let dmubfw handle this*/
1265 	if (pg_cntl->funcs->plane_otg_pg_control)
1266 		pg_cntl->funcs->plane_otg_pg_control(pg_cntl, true);
1267 
1268 	if (!dc->caps.sequential_ono) {
1269 		for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++)
1270 			if (update_state->pg_pipe_res_update[PG_DSC][i]) {
1271 				if (pg_cntl->funcs->dsc_pg_control)
1272 					pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true);
1273 			}
1274 	}
1275 
1276 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1277 		if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1278 			update_state->pg_pipe_res_update[PG_DPP][i]) {
1279 			if (pg_cntl->funcs->hubp_dpp_pg_control)
1280 				pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, true);
1281 		}
1282 
1283 		if (dc->caps.sequential_ono) {
1284 			if (update_state->pg_pipe_res_update[PG_DSC][i]) {
1285 				if (pg_cntl->funcs->dsc_pg_control)
1286 					pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true);
1287 			}
1288 		}
1289 	}
1290 	if (update_state->pg_res_update[PG_HPO]) {
1291 		if (pg_cntl->funcs->hpo_pg_control)
1292 			pg_cntl->funcs->hpo_pg_control(pg_cntl, true);
1293 	}
1294 }
1295 void dcn35_root_clock_control(struct dc *dc,
1296 	struct pg_block_update *update_state, bool power_on)
1297 {
1298 	int i = 0;
1299 	struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
1300 
1301 	if (!pg_cntl)
1302 		return;
1303 	/*enable root clock first when power up*/
1304 	if (power_on) {
1305 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1306 			if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1307 				update_state->pg_pipe_res_update[PG_DPP][i]) {
1308 				if (dc->hwseq->funcs.dpp_root_clock_control)
1309 					dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
1310 			}
1311 			if (update_state->pg_pipe_res_update[PG_DPSTREAM][i])
1312 				if (dc->hwseq->funcs.dpstream_root_clock_control)
1313 					dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on);
1314 		}
1315 
1316 		for (i = 0; i < dc->res_pool->dig_link_enc_count; i++)
1317 			if (update_state->pg_pipe_res_update[PG_PHYSYMCLK][i])
1318 				if (dc->hwseq->funcs.physymclk_root_clock_control)
1319 					dc->hwseq->funcs.physymclk_root_clock_control(dc->hwseq, i, power_on);
1320 
1321 	}
1322 	for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1323 		if (update_state->pg_pipe_res_update[PG_DSC][i]) {
1324 			if (power_on) {
1325 				if (dc->res_pool->dccg->funcs->enable_dsc)
1326 					dc->res_pool->dccg->funcs->enable_dsc(dc->res_pool->dccg, i);
1327 			} else {
1328 				if (dc->res_pool->dccg->funcs->disable_dsc)
1329 					dc->res_pool->dccg->funcs->disable_dsc(dc->res_pool->dccg, i);
1330 			}
1331 		}
1332 	}
1333 	/*disable root clock first when power down*/
1334 	if (!power_on) {
1335 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1336 			if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
1337 				update_state->pg_pipe_res_update[PG_DPP][i]) {
1338 				if (dc->hwseq->funcs.dpp_root_clock_control)
1339 					dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
1340 			}
1341 			if (update_state->pg_pipe_res_update[PG_DPSTREAM][i])
1342 				if (dc->hwseq->funcs.dpstream_root_clock_control)
1343 					dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on);
1344 		}
1345 
1346 		for (i = 0; i < dc->res_pool->dig_link_enc_count; i++)
1347 			if (update_state->pg_pipe_res_update[PG_PHYSYMCLK][i])
1348 				if (dc->hwseq->funcs.physymclk_root_clock_control)
1349 					dc->hwseq->funcs.physymclk_root_clock_control(dc->hwseq, i, power_on);
1350 
1351 	}
1352 }
1353 
1354 void dcn35_prepare_bandwidth(
1355 		struct dc *dc,
1356 		struct dc_state *context)
1357 {
1358 	struct pg_block_update pg_update_state;
1359 
1360 	if (dc->hwss.calc_blocks_to_ungate) {
1361 		dc->hwss.calc_blocks_to_ungate(dc, context, &pg_update_state);
1362 
1363 		if (dc->hwss.root_clock_control)
1364 			dc->hwss.root_clock_control(dc, &pg_update_state, true);
1365 		/*power up required HW block*/
1366 		if (dc->hwss.hw_block_power_up)
1367 			dc->hwss.hw_block_power_up(dc, &pg_update_state);
1368 	}
1369 
1370 	dcn20_prepare_bandwidth(dc, context);
1371 
1372 	print_pg_status(dc, __func__, ": after rcg and power up");
1373 }
1374 
1375 void dcn35_optimize_bandwidth(
1376 		struct dc *dc,
1377 		struct dc_state *context)
1378 {
1379 	struct pg_block_update pg_update_state;
1380 
1381 	print_pg_status(dc, __func__, ": before rcg and power up");
1382 
1383 	dcn20_optimize_bandwidth(dc, context);
1384 
1385 	if (dc->hwss.calc_blocks_to_gate) {
1386 		dc->hwss.calc_blocks_to_gate(dc, context, &pg_update_state);
1387 		/*try to power down unused block*/
1388 		if (dc->hwss.hw_block_power_down)
1389 			dc->hwss.hw_block_power_down(dc, &pg_update_state);
1390 
1391 		if (dc->hwss.root_clock_control)
1392 			dc->hwss.root_clock_control(dc, &pg_update_state, false);
1393 	}
1394 
1395 	print_pg_status(dc, __func__, ": after rcg and power up");
1396 }
1397 
1398 void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
1399 		int num_pipes, struct dc_crtc_timing_adjust adjust)
1400 {
1401 	int i = 0;
1402 	struct drr_params params = {0};
1403 	// DRR set trigger event mapped to OTG_TRIG_A
1404 	unsigned int event_triggers = 0x2;//Bit[1]: OTG_TRIG_A
1405 	// Note DRR trigger events are generated regardless of whether num frames met.
1406 	unsigned int num_frames = 2;
1407 
1408 	params.vertical_total_max = adjust.v_total_max;
1409 	params.vertical_total_min = adjust.v_total_min;
1410 	params.vertical_total_mid = adjust.v_total_mid;
1411 	params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
1412 
1413 	for (i = 0; i < num_pipes; i++) {
1414 		/* dc_state_destruct() might null the stream resources, so fetch tg
1415 		 * here first to avoid a race condition. The lifetime of the pointee
1416 		 * itself (the timing_generator object) is not a problem here.
1417 		 */
1418 		struct timing_generator *tg = pipe_ctx[i]->stream_res.tg;
1419 
1420 		if ((tg != NULL) && tg->funcs) {
1421 			if (pipe_ctx[i]->stream && pipe_ctx[i]->stream->ctx->dc->debug.static_screen_wait_frames) {
1422 				struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing;
1423 				struct dc *dc = pipe_ctx[i]->stream->ctx->dc;
1424 				unsigned int frame_rate = timing->pix_clk_100hz / (timing->h_total * timing->v_total);
1425 
1426 				if (frame_rate >= 120 && dc->caps.ips_support &&
1427 					dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) {
1428 					/*ips enable case*/
1429 					num_frames = 2 * (frame_rate % 60);
1430 				}
1431 			}
1432 			set_drr_and_clear_adjust_pending(pipe_ctx[i], pipe_ctx[i]->stream, &params);
1433 			if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
1434 				if (tg->funcs->set_static_screen_control)
1435 					tg->funcs->set_static_screen_control(
1436 						tg, event_triggers, num_frames);
1437 		}
1438 	}
1439 }
1440 void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
1441 		int num_pipes, const struct dc_static_screen_params *params)
1442 {
1443 	unsigned int i;
1444 	unsigned int triggers = 0;
1445 
1446 	if (params->triggers.surface_update)
1447 		triggers |= 0x200;/*bit 9  : 10 0000 0000*/
1448 	if (params->triggers.cursor_update)
1449 		triggers |= 0x8;/*bit3*/
1450 	if (params->triggers.force_trigger)
1451 		triggers |= 0x1;
1452 	for (i = 0; i < num_pipes; i++)
1453 		pipe_ctx[i]->stream_res.tg->funcs->
1454 			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
1455 					triggers, params->num_frames);
1456 }
1457 
1458 void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx,
1459 		int num_pipes, uint32_t v_total_min, uint32_t v_total_max)
1460 {
1461 	int i = 0;
1462 	struct long_vtotal_params params = {0};
1463 
1464 	params.vertical_total_max = v_total_max;
1465 	params.vertical_total_min = v_total_min;
1466 
1467 	for (i = 0; i < num_pipes; i++) {
1468 		if (!pipe_ctx[i])
1469 			continue;
1470 
1471 		if (pipe_ctx[i]->stream) {
1472 			struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing;
1473 
1474 			if (timing)
1475 				params.vertical_blank_start = timing->v_total - timing->v_front_porch;
1476 			else
1477 				params.vertical_blank_start = 0;
1478 
1479 			if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs &&
1480 				pipe_ctx[i]->stream_res.tg->funcs->set_long_vtotal)
1481 				pipe_ctx[i]->stream_res.tg->funcs->set_long_vtotal(pipe_ctx[i]->stream_res.tg, &params);
1482 		}
1483 	}
1484 }
1485 
1486 static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx)
1487 {
1488 	/* Calculate average pixel count per TU, return false if under ~2.00 to
1489 	 * avoid empty TUs. This is only required for DPIA tunneling as empty TUs
1490 	 * are legal to generate for native DP links. Assume TU size 64 as there
1491 	 * is currently no scenario where it's reprogrammed from HW default.
1492 	 * MTPs have no such limitation, so this does not affect MST use cases.
1493 	 */
1494 	unsigned int pix_clk_mhz;
1495 	unsigned int symclk_mhz;
1496 	unsigned int avg_pix_per_tu_x1000;
1497 	unsigned int tu_size_bytes = 64;
1498 	struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
1499 	struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings;
1500 	const struct dc *dc = pipe_ctx->stream->link->dc;
1501 
1502 	if (pipe_ctx->link_config.dp_tunnel_settings.should_enable_dp_tunneling == false)
1503 		return false;
1504 
1505 	// Not necessary for MST configurations
1506 	if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
1507 		return false;
1508 
1509 	pix_clk_mhz = timing->pix_clk_100hz / 10000;
1510 
1511 	// If this is true, can't block due to dynamic ODM
1512 	if (pix_clk_mhz > dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz)
1513 		return false;
1514 
1515 	switch (link_settings->link_rate) {
1516 	case LINK_RATE_LOW:
1517 		symclk_mhz = 162;
1518 		break;
1519 	case LINK_RATE_HIGH:
1520 		symclk_mhz = 270;
1521 		break;
1522 	case LINK_RATE_HIGH2:
1523 		symclk_mhz = 540;
1524 		break;
1525 	case LINK_RATE_HIGH3:
1526 		symclk_mhz = 810;
1527 		break;
1528 	default:
1529 		// We shouldn't be tunneling any other rates, something is wrong
1530 		ASSERT(0);
1531 		return false;
1532 	}
1533 
1534 	avg_pix_per_tu_x1000 = (1000 * pix_clk_mhz * tu_size_bytes)
1535 		/ (symclk_mhz * link_settings->lane_count);
1536 
1537 	// Add small empirically-decided margin to account for potential jitter
1538 	return (avg_pix_per_tu_x1000 < 2020);
1539 }
1540 
1541 bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
1542 {
1543 	struct dc *dc = pipe_ctx->stream->ctx->dc;
1544 
1545 	if (!is_h_timing_divisible_by_2(pipe_ctx->stream))
1546 		return false;
1547 
1548 	if (should_avoid_empty_tu(pipe_ctx))
1549 		return false;
1550 
1551 	if (dc_is_dp_signal(pipe_ctx->stream->signal) && !dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) &&
1552 		dc->debug.enable_dp_dig_pixel_rate_div_policy)
1553 		return true;
1554 
1555 	return false;
1556 }
1557 
1558 /*
1559  * Set powerup to true for every pipe to match pre-OS configuration.
1560  */
1561 static void dcn35_calc_blocks_to_ungate_for_hw_release(struct dc *dc, struct pg_block_update *update_state)
1562 {
1563 	int i = 0, j = 0;
1564 
1565 	memset(update_state, 0, sizeof(struct pg_block_update));
1566 
1567 	for (i = 0; i < dc->res_pool->pipe_count; i++)
1568 		for (j = 0; j < PG_HW_PIPE_RESOURCES_NUM_ELEMENT; j++)
1569 			update_state->pg_pipe_res_update[j][i] = true;
1570 
1571 	update_state->pg_res_update[PG_HPO] = true;
1572 	update_state->pg_res_update[PG_DWB] = true;
1573 }
1574 
1575 /*
1576  * The purpose is to power up all gatings to restore optimization to pre-OS env.
1577  * Re-use hwss func and existing PG&RCG flags to decide powerup sequence.
1578  */
1579 void dcn35_hardware_release(struct dc *dc)
1580 {
1581 	struct pg_block_update pg_update_state;
1582 
1583 	dcn35_calc_blocks_to_ungate_for_hw_release(dc, &pg_update_state);
1584 
1585 	if (dc->hwss.root_clock_control)
1586 		dc->hwss.root_clock_control(dc, &pg_update_state, true);
1587 	/*power up required HW block*/
1588 	if (dc->hwss.hw_block_power_up)
1589 		dc->hwss.hw_block_power_up(dc, &pg_update_state);
1590 }
1591 
1592 void dcn35_abort_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe)
1593 {
1594 	if (!dc_dmub_srv_is_cursor_offload_enabled(dc))
1595 		return;
1596 
1597 	/*
1598 	 * Insert a blank update to modify the write index and set pipe_mask to 0.
1599 	 *
1600 	 * While the DMU is interlocked with driver full pipe programming via
1601 	 * the DMU HW lock, if the cursor update begins to execute after a full
1602 	 * pipe programming occurs there are two possible issues:
1603 	 *
1604 	 * 1. Outdated cursor information is programmed, replacing the current update
1605 	 * 2. The cursor update in firmware holds the cursor lock, preventing
1606 	 *    the current update from being latched atomically in the same frame
1607 	 *    as the rest of the update.
1608 	 *
1609 	 * This blank update, treated as a no-op, will allow the firmware to skip
1610 	 * the programming.
1611 	 */
1612 
1613 	if (dc->hwss.begin_cursor_offload_update)
1614 		dc->hwss.begin_cursor_offload_update(dc, pipe);
1615 
1616 	if (dc->hwss.commit_cursor_offload_update)
1617 		dc->hwss.commit_cursor_offload_update(dc, pipe);
1618 }
1619 
1620 void dcn35_begin_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe)
1621 {
1622 	volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
1623 	const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
1624 	uint32_t stream_idx, write_idx, payload_idx;
1625 
1626 	if (!top_pipe)
1627 		return;
1628 
1629 	stream_idx = top_pipe->pipe_idx;
1630 	write_idx = cs->offload_streams[stream_idx].write_idx + 1; /*  new payload (+1) */
1631 	payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
1632 
1633 	cs->offload_streams[stream_idx].payloads[payload_idx].write_idx_start = write_idx;
1634 
1635 	if (pipe->plane_res.hubp)
1636 		pipe->plane_res.hubp->cursor_offload = true;
1637 
1638 	if (pipe->plane_res.dpp)
1639 		pipe->plane_res.dpp->cursor_offload = true;
1640 }
1641 
1642 void dcn35_commit_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe)
1643 {
1644 	volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
1645 	volatile struct dmub_shared_state_cursor_offload_stream_v1 *shared_stream;
1646 	const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
1647 	uint32_t stream_idx, write_idx, payload_idx;
1648 
1649 	if (pipe->plane_res.hubp)
1650 		pipe->plane_res.hubp->cursor_offload = false;
1651 
1652 	if (pipe->plane_res.dpp)
1653 		pipe->plane_res.dpp->cursor_offload = false;
1654 
1655 	if (!top_pipe)
1656 		return;
1657 
1658 	stream_idx = top_pipe->pipe_idx;
1659 	write_idx = cs->offload_streams[stream_idx].write_idx + 1; /*  new payload (+1) */
1660 	payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
1661 
1662 	shared_stream = &dc->ctx->dmub_srv->dmub->shared_state[DMUB_SHARED_STATE_FEATURE__CURSOR_OFFLOAD_V1]
1663 				 .data.cursor_offload_v1.offload_streams[stream_idx];
1664 
1665 	shared_stream->last_write_idx = write_idx;
1666 
1667 	cs->offload_streams[stream_idx].write_idx = write_idx;
1668 	cs->offload_streams[stream_idx].payloads[payload_idx].write_idx_finish = write_idx;
1669 }
1670 
1671 void dcn35_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe)
1672 {
1673 	volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
1674 	const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
1675 	const struct hubp *hubp = pipe->plane_res.hubp;
1676 	const struct dpp *dpp = pipe->plane_res.dpp;
1677 	volatile struct dmub_cursor_offload_pipe_data_dcn30_v1 *p;
1678 	uint32_t stream_idx, write_idx, payload_idx;
1679 
1680 	if (!top_pipe || !hubp || !dpp)
1681 		return;
1682 
1683 	stream_idx = top_pipe->pipe_idx;
1684 	write_idx = cs->offload_streams[stream_idx].write_idx + 1; /*  new payload (+1) */
1685 	payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
1686 
1687 	p = &cs->offload_streams[stream_idx].payloads[payload_idx].pipe_data[pipe->pipe_idx].dcn30;
1688 
1689 	p->CURSOR0_0_CURSOR_SURFACE_ADDRESS = hubp->att.SURFACE_ADDR;
1690 	p->CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH = hubp->att.SURFACE_ADDR_HIGH;
1691 	p->CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH = hubp->att.size.bits.width;
1692 	p->CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT = hubp->att.size.bits.height;
1693 	p->CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION = hubp->pos.position.bits.x_pos;
1694 	p->CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION = hubp->pos.position.bits.y_pos;
1695 	p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X = hubp->pos.hot_spot.bits.x_hot;
1696 	p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y = hubp->pos.hot_spot.bits.y_hot;
1697 	p->CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET = hubp->pos.dst_offset.bits.dst_x_offset;
1698 	p->CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE = hubp->pos.cur_ctl.bits.cur_enable;
1699 	p->CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE = hubp->att.cur_ctl.bits.mode;
1700 	p->CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY = hubp->pos.cur_ctl.bits.cur_2x_magnify;
1701 	p->CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH = hubp->att.cur_ctl.bits.pitch;
1702 	p->CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK = hubp->att.cur_ctl.bits.line_per_chunk;
1703 
1704 	p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE = dpp->att.cur0_ctl.bits.cur0_enable;
1705 	p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE = dpp->att.cur0_ctl.bits.mode;
1706 	p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE = dpp->att.cur0_ctl.bits.expansion_mode;
1707 	p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN = dpp->att.cur0_ctl.bits.cur0_rom_en;
1708 	p->CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 = 0x000000;
1709 	p->CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 = 0xFFFFFF;
1710 	p->CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS = dpp->att.fp_scale_bias.bits.fp_bias;
1711 	p->CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE = dpp->att.fp_scale_bias.bits.fp_scale;
1712 
1713 	p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET = hubp->att.settings.bits.dst_y_offset;
1714 	p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST = hubp->att.settings.bits.chunk_hdl_adjust;
1715 
1716 	cs->offload_streams[stream_idx].payloads[payload_idx].pipe_mask |= (1u << pipe->pipe_idx);
1717 }
1718 
1719 void dcn35_notify_cursor_offload_drr_update(struct dc *dc, struct dc_state *context,
1720 					    const struct dc_stream_state *stream)
1721 {
1722 	dc_dmub_srv_control_cursor_offload(dc, context, stream, true);
1723 }
1724 
1725 void dcn35_program_cursor_offload_now(struct dc *dc, const struct pipe_ctx *pipe)
1726 {
1727 	dc_dmub_srv_program_cursor_now(dc, pipe);
1728 }
1729