xref: /linux/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c (revision fa3c727e05996811a2a57c5114e88200c05b6161)
1 // SPDX-License-Identifier: MIT
2 //
3 // Copyright 2024 Advanced Micro Devices, Inc.
4 
5 
6 #include "os_types.h"
7 #include "dm_services.h"
8 #include "basics/dc_common.h"
9 #include "dm_helpers.h"
10 #include "core_types.h"
11 #include "resource.h"
12 #include "dccg.h"
13 #include "dce/dce_hwseq.h"
14 #include "reg_helper.h"
15 #include "abm.h"
16 #include "hubp.h"
17 #include "dchubbub.h"
18 #include "timing_generator.h"
19 #include "opp.h"
20 #include "ipp.h"
21 #include "mpc.h"
22 #include "mcif_wb.h"
23 #include "dc_dmub_srv.h"
24 #include "link_hwss.h"
25 #include "dpcd_defs.h"
26 #include "clk_mgr.h"
27 #include "dsc.h"
28 #include "link_service.h"
29 
30 #include "dce/dmub_hw_lock_mgr.h"
31 #include "dcn10/dcn10_cm_common.h"
32 #include "dcn20/dcn20_optc.h"
33 #include "dcn30/dcn30_cm_common.h"
34 #include "dcn32/dcn32_hwseq.h"
35 #include "dcn401_hwseq.h"
36 #include "dcn401/dcn401_resource.h"
37 #include "dc_state_priv.h"
38 #include "link_enc_cfg.h"
39 
40 #define DC_LOGGER_INIT(logger)
41 
42 #define CTX \
43 	hws->ctx
44 #define REG(reg)\
45 	hws->regs->reg
46 #define DC_LOGGER \
47 	dc->ctx->logger
48 
49 
50 #undef FN
51 #define FN(reg_name, field_name) \
52 	hws->shifts->field_name, hws->masks->field_name
53 
dcn401_initialize_min_clocks(struct dc * dc)54 void dcn401_initialize_min_clocks(struct dc *dc)
55 {
56 	struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk;
57 
58 	clocks->dcfclk_deep_sleep_khz = DCN3_2_DCFCLK_DS_INIT_KHZ;
59 	clocks->dcfclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz * 1000;
60 	clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000;
61 	clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000;
62 	clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000;
63 	if (dc->debug.disable_boot_optimizations) {
64 		clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000;
65 	} else {
66 		/* Even though DPG_EN = 1 for the connected display, it still requires the
67 		 * correct timing so we cannot set DISPCLK to min freq or it could cause
68 		 * audio corruption. Read current DISPCLK from DENTIST and request the same
69 		 * freq to ensure that the timing is valid and unchanged.
70 		 */
71 		clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr);
72 	}
73 	clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
74 	clocks->fclk_p_state_change_support = true;
75 	clocks->p_state_change_support = true;
76 
77 	dc->clk_mgr->funcs->update_clocks(
78 			dc->clk_mgr,
79 			dc->current_state,
80 			true);
81 }
82 
dcn401_program_gamut_remap(struct pipe_ctx * pipe_ctx)83 void dcn401_program_gamut_remap(struct pipe_ctx *pipe_ctx)
84 {
85 	unsigned int i = 0;
86 	struct mpc_grph_gamut_adjustment mpc_adjust;
87 	unsigned int mpcc_id = pipe_ctx->plane_res.mpcc_inst;
88 	struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
89 
90 	//For now assert if location is not pre-blend
91 	if (pipe_ctx->plane_state)
92 		ASSERT(pipe_ctx->plane_state->mcm_location == MPCC_MOVABLE_CM_LOCATION_BEFORE);
93 
94 	// program MPCC_MCM_FIRST_GAMUT_REMAP
95 	memset(&mpc_adjust, 0, sizeof(mpc_adjust));
96 	mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
97 	mpc_adjust.mpcc_gamut_remap_block_id = MPCC_MCM_FIRST_GAMUT_REMAP;
98 
99 	if (pipe_ctx->plane_state &&
100 		pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
101 		mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
102 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
103 			mpc_adjust.temperature_matrix[i] =
104 			pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
105 	}
106 
107 	mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
108 
109 	// program MPCC_MCM_SECOND_GAMUT_REMAP for Bypass / Disable for now
110 	mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
111 	mpc_adjust.mpcc_gamut_remap_block_id = MPCC_MCM_SECOND_GAMUT_REMAP;
112 
113 	mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
114 
115 	// program MPCC_OGAM_GAMUT_REMAP same as is currently used on DCN3x
116 	memset(&mpc_adjust, 0, sizeof(mpc_adjust));
117 	mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
118 	mpc_adjust.mpcc_gamut_remap_block_id = MPCC_OGAM_GAMUT_REMAP;
119 
120 	if (pipe_ctx->top_pipe == NULL) {
121 		if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
122 			mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
123 			for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
124 				mpc_adjust.temperature_matrix[i] =
125 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
126 		}
127 	}
128 
129 	mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
130 }
131 
dcn401_init_hw(struct dc * dc)132 void dcn401_init_hw(struct dc *dc)
133 {
134 	struct abm **abms = dc->res_pool->multiple_abms;
135 	struct dce_hwseq *hws = dc->hwseq;
136 	struct dc_bios *dcb = dc->ctx->dc_bios;
137 	struct resource_pool *res_pool = dc->res_pool;
138 	int i;
139 	int edp_num;
140 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
141 	uint32_t user_level = MAX_BACKLIGHT_LEVEL;
142 	int current_dchub_ref_freq = 0;
143 
144 	if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) {
145 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
146 
147 		// mark dcmode limits present if any clock has distinct AC and DC values from SMU
148 		dc->caps.dcmode_power_limits_present = dc->clk_mgr->funcs->is_dc_mode_present &&
149 				dc->clk_mgr->funcs->is_dc_mode_present(dc->clk_mgr);
150 	}
151 
152 	// Initialize the dccg
153 	if (res_pool->dccg->funcs->dccg_init)
154 		res_pool->dccg->funcs->dccg_init(res_pool->dccg);
155 
156 	// Disable DMUB Initialization until IPS state programming is finalized
157 	//if (!dcb->funcs->is_accelerated_mode(dcb)) {
158 	//	hws->funcs.bios_golden_init(dc);
159 	//}
160 
161 	// Set default OPTC memory power states
162 	if (dc->debug.enable_mem_low_power.bits.optc) {
163 		// Shutdown when unassigned and light sleep in VBLANK
164 		REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
165 	}
166 
167 	if (dc->debug.enable_mem_low_power.bits.vga) {
168 		// Power down VGA memory
169 		REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
170 	}
171 
172 	if (dc->ctx->dc_bios->fw_info_valid) {
173 		res_pool->ref_clocks.xtalin_clock_inKhz =
174 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
175 
176 		if (res_pool->hubbub) {
177 			(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
178 					dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
179 					&res_pool->ref_clocks.dccg_ref_clock_inKhz);
180 
181 			current_dchub_ref_freq = res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
182 
183 			(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
184 					res_pool->ref_clocks.dccg_ref_clock_inKhz,
185 					&res_pool->ref_clocks.dchub_ref_clock_inKhz);
186 		} else {
187 			// Not all ASICs have DCCG sw component
188 			res_pool->ref_clocks.dccg_ref_clock_inKhz =
189 					res_pool->ref_clocks.xtalin_clock_inKhz;
190 			res_pool->ref_clocks.dchub_ref_clock_inKhz =
191 					res_pool->ref_clocks.xtalin_clock_inKhz;
192 		}
193 	} else
194 		ASSERT_CRITICAL(false);
195 
196 	for (i = 0; i < dc->link_count; i++) {
197 		/* Power up AND update implementation according to the
198 		 * required signal (which may be different from the
199 		 * default signal on connector).
200 		 */
201 		struct dc_link *link = dc->links[i];
202 
203 		if (link->ep_type != DISPLAY_ENDPOINT_PHY)
204 			continue;
205 
206 		link->link_enc->funcs->hw_init(link->link_enc);
207 
208 		/* Check for enabled DIG to identify enabled display */
209 		if (link->link_enc->funcs->is_dig_enabled &&
210 			link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
211 			link->link_status.link_active = true;
212 			link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
213 			if (link->link_enc->funcs->fec_is_active &&
214 					link->link_enc->funcs->fec_is_active(link->link_enc))
215 				link->fec_state = dc_link_fec_enabled;
216 		}
217 	}
218 
219 	/* enable_power_gating_plane before dsc_pg_control because
220 	 * FORCEON = 1 with hw default value on bootup, resume from s3
221 	 */
222 	if (hws->funcs.enable_power_gating_plane)
223 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
224 
225 	/* we want to turn off all dp displays before doing detection */
226 	dc->link_srv->blank_all_dp_displays(dc);
227 
228 	/* If taking control over from VBIOS, we may want to optimize our first
229 	 * mode set, so we need to skip powering down pipes until we know which
230 	 * pipes we want to use.
231 	 * Otherwise, if taking control is not possible, we need to power
232 	 * everything down.
233 	 */
234 	if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
235 		/* Disable boot optimizations means power down everything including PHY, DIG,
236 		 * and OTG (i.e. the boot is not optimized because we do a full power down).
237 		 */
238 		if (dc->hwss.enable_accelerated_mode && dc->debug.disable_boot_optimizations)
239 			dc->hwss.enable_accelerated_mode(dc, dc->current_state);
240 		else
241 			hws->funcs.init_pipes(dc, dc->current_state);
242 
243 		if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
244 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
245 					!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
246 
247 		dcn401_initialize_min_clocks(dc);
248 
249 		/* On HW init, allow idle optimizations after pipes have been turned off.
250 		 *
251 		 * In certain D3 cases (i.e. BOCO / BOMACO) it's possible that hardware state
252 		 * is reset (i.e. not in idle at the time hw init is called), but software state
253 		 * still has idle_optimizations = true, so we must disable idle optimizations first
254 		 * (i.e. set false), then re-enable (set true).
255 		 */
256 		dc_allow_idle_optimizations(dc, false);
257 		dc_allow_idle_optimizations(dc, true);
258 	}
259 
260 	/* In headless boot cases, DIG may be turned
261 	 * on which causes HW/SW discrepancies.
262 	 * To avoid this, power down hardware on boot
263 	 * if DIG is turned on and seamless boot not enabled
264 	 */
265 	if (!dc->config.seamless_boot_edp_requested) {
266 		struct dc_link *edp_links[MAX_NUM_EDP];
267 		struct dc_link *edp_link;
268 
269 		dc_get_edp_links(dc, edp_links, &edp_num);
270 		if (edp_num) {
271 			for (i = 0; i < edp_num; i++) {
272 				edp_link = edp_links[i];
273 				if (edp_link->link_enc->funcs->is_dig_enabled &&
274 						edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
275 						dc->hwss.edp_backlight_control &&
276 						hws->funcs.power_down &&
277 						dc->hwss.edp_power_control) {
278 					dc->hwss.edp_backlight_control(edp_link, false);
279 					hws->funcs.power_down(dc);
280 					dc->hwss.edp_power_control(edp_link, false);
281 				}
282 			}
283 		} else {
284 			for (i = 0; i < dc->link_count; i++) {
285 				struct dc_link *link = dc->links[i];
286 
287 				if (link->link_enc->funcs->is_dig_enabled &&
288 						link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
289 						hws->funcs.power_down) {
290 					hws->funcs.power_down(dc);
291 					break;
292 				}
293 
294 			}
295 		}
296 	}
297 
298 	for (i = 0; i < res_pool->audio_count; i++) {
299 		struct audio *audio = res_pool->audios[i];
300 
301 		audio->funcs->hw_init(audio);
302 	}
303 
304 	for (i = 0; i < dc->link_count; i++) {
305 		struct dc_link *link = dc->links[i];
306 
307 		if (link->panel_cntl) {
308 			backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
309 			user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
310 		}
311 	}
312 
313 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
314 		if (abms[i] != NULL && abms[i]->funcs != NULL)
315 			abms[i]->funcs->abm_init(abms[i], backlight, user_level);
316 	}
317 
318 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
319 	REG_WRITE(DIO_MEM_PWR_CTRL, 0);
320 
321 	if (!dc->debug.disable_clock_gate) {
322 		/* enable all DCN clock gating */
323 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
324 
325 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
326 
327 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
328 	}
329 
330 	dcn401_setup_hpo_hw_control(hws, true);
331 
332 	if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
333 		dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
334 
335 	if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges)
336 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
337 
338 	if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
339 		dc->res_pool->hubbub->funcs->force_pstate_change_control(
340 				dc->res_pool->hubbub, false, false);
341 
342 	if (dc->res_pool->hubbub->funcs->init_crb)
343 		dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
344 
345 	if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0)
346 		dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc);
347 
348 	// Get DMCUB capabilities
349 	if (dc->ctx->dmub_srv) {
350 		dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
351 		dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
352 		dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver > 0;
353 		dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
354 		dc->debug.fams2_config.bits.enable &=
355 				dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver; // sw & fw fams versions must match for support
356 		if ((!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box)
357 			|| res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) {
358 			/* update bounding box if FAMS2 disabled, or if dchub clk has changed */
359 			if (dc->clk_mgr)
360 				dc->res_pool->funcs->update_bw_bounding_box(dc,
361 									    dc->clk_mgr->bw_params);
362 		}
363 	}
364 }
365 
dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc * dc,struct pipe_ctx * pipe_ctx,enum MCM_LUT_XABLE * shaper_xable,enum MCM_LUT_XABLE * lut3d_xable,enum MCM_LUT_XABLE * lut1d_xable)366 static void dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc *dc, struct pipe_ctx *pipe_ctx,
367 		enum MCM_LUT_XABLE *shaper_xable,
368 		enum MCM_LUT_XABLE *lut3d_xable,
369 		enum MCM_LUT_XABLE *lut1d_xable)
370 {
371 	enum dc_cm2_shaper_3dlut_setting shaper_3dlut_setting = DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL;
372 	bool lut1d_enable = false;
373 	struct mpc *mpc = dc->res_pool->mpc;
374 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
375 
376 	if (!pipe_ctx->plane_state)
377 		return;
378 	shaper_3dlut_setting = pipe_ctx->plane_state->mcm_shaper_3dlut_setting;
379 	lut1d_enable = pipe_ctx->plane_state->mcm_lut1d_enable;
380 	mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
381 	pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
382 
383 	*lut1d_xable = lut1d_enable ? MCM_LUT_ENABLE : MCM_LUT_DISABLE;
384 
385 	switch (shaper_3dlut_setting) {
386 	case DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL:
387 		*lut3d_xable = *shaper_xable = MCM_LUT_DISABLE;
388 		break;
389 	case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER:
390 		*lut3d_xable = MCM_LUT_DISABLE;
391 		*shaper_xable = MCM_LUT_ENABLE;
392 		break;
393 	case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT:
394 		*lut3d_xable = *shaper_xable = MCM_LUT_ENABLE;
395 		break;
396 	}
397 }
398 
dcn401_populate_mcm_luts(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_cm2_func_luts mcm_luts,bool lut_bank_a)399 void dcn401_populate_mcm_luts(struct dc *dc,
400 		struct pipe_ctx *pipe_ctx,
401 		struct dc_cm2_func_luts mcm_luts,
402 		bool lut_bank_a)
403 {
404 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
405 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
406 	int mpcc_id = hubp->inst;
407 	struct mpc *mpc = dc->res_pool->mpc;
408 	union mcm_lut_params m_lut_params;
409 	enum dc_cm2_transfer_func_source lut3d_src = mcm_luts.lut3d_data.lut3d_src;
410 	enum hubp_3dlut_fl_format format = 0;
411 	enum hubp_3dlut_fl_mode mode;
412 	enum hubp_3dlut_fl_width width = 0;
413 	enum hubp_3dlut_fl_addressing_mode addr_mode;
414 	enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0;
415 	enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0;
416 	enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0;
417 	enum MCM_LUT_XABLE shaper_xable = MCM_LUT_DISABLE;
418 	enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE;
419 	enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE;
420 	bool rval;
421 
422 	dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
423 
424 	/* 1D LUT */
425 	if (mcm_luts.lut1d_func) {
426 		memset(&m_lut_params, 0, sizeof(m_lut_params));
427 		if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL)
428 			m_lut_params.pwl = &mcm_luts.lut1d_func->pwl;
429 		else if (mcm_luts.lut1d_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
430 			rval = cm3_helper_translate_curve_to_hw_format(
431 					mcm_luts.lut1d_func,
432 					&dpp_base->regamma_params, false);
433 			m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
434 		}
435 		if (m_lut_params.pwl) {
436 			if (mpc->funcs->populate_lut)
437 				mpc->funcs->populate_lut(mpc, MCM_LUT_1DLUT, m_lut_params, lut_bank_a, mpcc_id);
438 		}
439 		if (mpc->funcs->program_lut_mode)
440 			mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable && m_lut_params.pwl, lut_bank_a, mpcc_id);
441 	}
442 
443 	/* Shaper */
444 	if (mcm_luts.shaper && mcm_luts.lut3d_data.mpc_3dlut_enable) {
445 		memset(&m_lut_params, 0, sizeof(m_lut_params));
446 		if (mcm_luts.shaper->type == TF_TYPE_HWPWL)
447 			m_lut_params.pwl = &mcm_luts.shaper->pwl;
448 		else if (mcm_luts.shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
449 			ASSERT(false);
450 			rval = cm3_helper_translate_curve_to_hw_format(
451 					mcm_luts.shaper,
452 					&dpp_base->regamma_params, true);
453 			m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
454 		}
455 		if (m_lut_params.pwl) {
456 			if (mpc->funcs->mcm.populate_lut)
457 				mpc->funcs->mcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id);
458 			if (mpc->funcs->program_lut_mode)
459 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_ENABLE, lut_bank_a, mpcc_id);
460 		}
461 	}
462 
463 	/* 3DLUT */
464 	switch (lut3d_src) {
465 	case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
466 		memset(&m_lut_params, 0, sizeof(m_lut_params));
467 		if (hubp->funcs->hubp_enable_3dlut_fl)
468 			hubp->funcs->hubp_enable_3dlut_fl(hubp, false);
469 
470 		if (mcm_luts.lut3d_data.lut3d_func && mcm_luts.lut3d_data.lut3d_func->state.bits.initialized) {
471 			m_lut_params.lut3d = &mcm_luts.lut3d_data.lut3d_func->lut_3d;
472 			if (mpc->funcs->populate_lut)
473 				mpc->funcs->populate_lut(mpc, MCM_LUT_3DLUT, m_lut_params, lut_bank_a, mpcc_id);
474 			if (mpc->funcs->program_lut_mode)
475 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a,
476 						mpcc_id);
477 		}
478 		break;
479 		case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
480 		switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
481 		case DC_CM2_GPU_MEM_SIZE_171717:
482 			width = hubp_3dlut_fl_width_17;
483 			break;
484 		case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
485 			width = hubp_3dlut_fl_width_transformed;
486 			break;
487 		default:
488 			//TODO: handle default case
489 			break;
490 		}
491 
492 		//check for support
493 		if (mpc->funcs->mcm.is_config_supported &&
494 			!mpc->funcs->mcm.is_config_supported(width))
495 			break;
496 
497 		if (mpc->funcs->program_lut_read_write_control)
498 			mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, mpcc_id);
499 		if (mpc->funcs->program_lut_mode)
500 			mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a, mpcc_id);
501 
502 		if (hubp->funcs->hubp_program_3dlut_fl_addr)
503 			hubp->funcs->hubp_program_3dlut_fl_addr(hubp, mcm_luts.lut3d_data.gpu_mem_params.addr);
504 
505 		if (mpc->funcs->mcm.program_bit_depth)
506 			mpc->funcs->mcm.program_bit_depth(mpc, mcm_luts.lut3d_data.gpu_mem_params.bit_depth, mpcc_id);
507 
508 		switch (mcm_luts.lut3d_data.gpu_mem_params.layout) {
509 		case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
510 			mode = hubp_3dlut_fl_mode_native_1;
511 			addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
512 			break;
513 		case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
514 			mode = hubp_3dlut_fl_mode_native_2;
515 			addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
516 			break;
517 		case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR:
518 			mode = hubp_3dlut_fl_mode_transform;
519 			addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear;
520 			break;
521 		default:
522 			mode = hubp_3dlut_fl_mode_disable;
523 			addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
524 			break;
525 		}
526 		if (hubp->funcs->hubp_program_3dlut_fl_mode)
527 			hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode);
528 
529 		if (hubp->funcs->hubp_program_3dlut_fl_addressing_mode)
530 			hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode);
531 
532 		switch (mcm_luts.lut3d_data.gpu_mem_params.format_params.format) {
533 		case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
534 			format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
535 			break;
536 		case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
537 			format = hubp_3dlut_fl_format_unorm_12lsb_bitslice;
538 			break;
539 		case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10:
540 			format = hubp_3dlut_fl_format_float_fp1_5_10;
541 			break;
542 		}
543 		if (hubp->funcs->hubp_program_3dlut_fl_format)
544 			hubp->funcs->hubp_program_3dlut_fl_format(hubp, format);
545 		if (hubp->funcs->hubp_update_3dlut_fl_bias_scale &&
546 				mpc->funcs->mcm.program_bias_scale) {
547 			mpc->funcs->mcm.program_bias_scale(mpc,
548 				mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
549 				mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale,
550 				mpcc_id);
551 			hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
552 						mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
553 						mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale);
554 		}
555 
556 		//navi 4x has a bug and r and blue are swapped and need to be worked around here in
557 		//TODO: need to make a method for get_xbar per asic OR do the workaround in program_crossbar for 4x
558 		switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) {
559 		case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
560 		default:
561 			crossbar_bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
562 			crossbar_bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
563 			crossbar_bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
564 			break;
565 		}
566 
567 		if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
568 			hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
569 					crossbar_bit_slice_cr_r,
570 					crossbar_bit_slice_y_g,
571 					crossbar_bit_slice_cb_b);
572 
573 		if (mpc->funcs->mcm.program_lut_read_write_control)
574 			mpc->funcs->mcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, true, mpcc_id);
575 
576 		if (mpc->funcs->mcm.program_3dlut_size)
577 			mpc->funcs->mcm.program_3dlut_size(mpc, width, mpcc_id);
578 
579 		if (mpc->funcs->update_3dlut_fast_load_select)
580 			mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
581 
582 		if (hubp->funcs->hubp_enable_3dlut_fl)
583 			hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
584 		else {
585 			if (mpc->funcs->program_lut_mode) {
586 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
587 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
588 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
589 			}
590 		}
591 		break;
592 
593 	}
594 }
595 
dcn401_trigger_3dlut_dma_load(struct dc * dc,struct pipe_ctx * pipe_ctx)596 void dcn401_trigger_3dlut_dma_load(struct dc *dc, struct pipe_ctx *pipe_ctx)
597 {
598 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
599 
600 	if (hubp->funcs->hubp_enable_3dlut_fl) {
601 		hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
602 	}
603 }
604 
dcn401_set_mcm_luts(struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)605 bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
606 				const struct dc_plane_state *plane_state)
607 {
608 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
609 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
610 	struct dc *dc = pipe_ctx->stream_res.opp->ctx->dc;
611 	struct mpc *mpc = dc->res_pool->mpc;
612 	bool result;
613 	const struct pwl_params *lut_params = NULL;
614 	bool rval;
615 
616 	if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
617 		dcn401_populate_mcm_luts(dc, pipe_ctx, plane_state->mcm_luts, plane_state->lut_bank_a);
618 		return true;
619 	}
620 
621 	mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
622 	pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
623 	// 1D LUT
624 	if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
625 		lut_params = &plane_state->blend_tf.pwl;
626 	else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
627 		rval = cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf,
628 				&dpp_base->regamma_params, false);
629 		lut_params = rval ? &dpp_base->regamma_params : NULL;
630 	}
631 	result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id);
632 	lut_params = NULL;
633 
634 	// Shaper
635 	if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL)
636 		lut_params = &plane_state->in_shaper_func.pwl;
637 	else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
638 		// TODO: dpp_base replace
639 		rval = cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func,
640 				&dpp_base->shaper_params, true);
641 		lut_params = rval ? &dpp_base->shaper_params : NULL;
642 	}
643 	result &= mpc->funcs->program_shaper(mpc, lut_params, mpcc_id);
644 
645 	// 3D
646 	if (mpc->funcs->program_3dlut) {
647 		if (plane_state->lut3d_func.state.bits.initialized == 1)
648 			result &= mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id);
649 		else
650 			result &= mpc->funcs->program_3dlut(mpc, NULL, mpcc_id);
651 	}
652 
653 	return result;
654 }
655 
dcn401_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)656 bool dcn401_set_output_transfer_func(struct dc *dc,
657 				struct pipe_ctx *pipe_ctx,
658 				const struct dc_stream_state *stream)
659 {
660 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
661 	struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
662 	const struct pwl_params *params = NULL;
663 	bool ret = false;
664 
665 	/* program OGAM or 3DLUT only for the top pipe*/
666 	if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) {
667 		/*program shaper and 3dlut in MPC*/
668 		ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream);
669 		if (ret == false && mpc->funcs->set_output_gamma) {
670 			if (stream->out_transfer_func.type == TF_TYPE_HWPWL)
671 				params = &stream->out_transfer_func.pwl;
672 			else if (pipe_ctx->stream->out_transfer_func.type ==
673 					TF_TYPE_DISTRIBUTED_POINTS &&
674 					cm3_helper_translate_curve_to_hw_format(
675 					&stream->out_transfer_func,
676 					&mpc->blender_params, false))
677 				params = &mpc->blender_params;
678 			/* there are no ROM LUTs in OUTGAM */
679 			if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED)
680 				BREAK_TO_DEBUGGER();
681 		}
682 	}
683 
684 	if (mpc->funcs->set_output_gamma)
685 		mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
686 
687 	return ret;
688 }
689 
dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx * pipe_ctx,unsigned int * tmds_div)690 void dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx *pipe_ctx,
691 				unsigned int *tmds_div)
692 {
693 	struct dc_stream_state *stream = pipe_ctx->stream;
694 
695 	if (dc_is_tmds_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
696 		if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
697 			*tmds_div = PIXEL_RATE_DIV_BY_2;
698 		else
699 			*tmds_div = PIXEL_RATE_DIV_BY_4;
700 	} else {
701 		*tmds_div = PIXEL_RATE_DIV_BY_1;
702 	}
703 
704 	if (*tmds_div == PIXEL_RATE_DIV_NA)
705 		ASSERT(false);
706 
707 }
708 
enable_stream_timing_calc(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc,unsigned int * tmds_div,int * opp_inst,int * opp_cnt,struct pipe_ctx * opp_heads[MAX_PIPES],bool * manual_mode,struct drr_params * params,unsigned int * event_triggers)709 static void enable_stream_timing_calc(
710 		struct pipe_ctx *pipe_ctx,
711 		struct dc_state *context,
712 		struct dc *dc,
713 		unsigned int *tmds_div,
714 		int *opp_inst,
715 		int *opp_cnt,
716 		struct pipe_ctx *opp_heads[MAX_PIPES],
717 		bool *manual_mode,
718 		struct drr_params *params,
719 		unsigned int *event_triggers)
720 {
721 	struct dc_stream_state *stream = pipe_ctx->stream;
722 	int i;
723 
724 	if (dc_is_tmds_signal(stream->signal) || dc_is_virtual_signal(stream->signal))
725 		dcn401_calculate_dccg_tmds_div_value(pipe_ctx, tmds_div);
726 
727 	*opp_cnt = resource_get_opp_heads_for_otg_master(pipe_ctx, &context->res_ctx, opp_heads);
728 	for (i = 0; i < *opp_cnt; i++)
729 		opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
730 
731 	if (dc_is_tmds_signal(stream->signal)) {
732 		stream->link->phy_state.symclk_ref_cnts.otg = 1;
733 		if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
734 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
735 		else
736 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
737 	}
738 
739 	params->vertical_total_min = stream->adjust.v_total_min;
740 	params->vertical_total_max = stream->adjust.v_total_max;
741 	params->vertical_total_mid = stream->adjust.v_total_mid;
742 	params->vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num;
743 
744 	// DRR should set trigger event to monitor surface update event
745 	if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
746 		*event_triggers = 0x80;
747 }
748 
dcn401_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)749 enum dc_status dcn401_enable_stream_timing(
750 		struct pipe_ctx *pipe_ctx,
751 		struct dc_state *context,
752 		struct dc *dc)
753 {
754 	struct dce_hwseq *hws = dc->hwseq;
755 	struct dc_stream_state *stream = pipe_ctx->stream;
756 	struct drr_params params = {0};
757 	unsigned int event_triggers = 0;
758 	int opp_cnt = 1;
759 	int opp_inst[MAX_PIPES] = {0};
760 	struct pipe_ctx *opp_heads[MAX_PIPES] = {0};
761 	struct dc_crtc_timing patched_crtc_timing = stream->timing;
762 	bool manual_mode = false;
763 	unsigned int tmds_div = PIXEL_RATE_DIV_NA;
764 	unsigned int unused_div = PIXEL_RATE_DIV_NA;
765 	int odm_slice_width;
766 	int last_odm_slice_width;
767 	int i;
768 
769 	if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER))
770 		return DC_OK;
771 
772 	enable_stream_timing_calc(pipe_ctx, context, dc, &tmds_div, opp_inst,
773 			&opp_cnt, opp_heads, &manual_mode, &params, &event_triggers);
774 
775 	if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
776 		dc->res_pool->dccg->funcs->set_pixel_rate_div(
777 			dc->res_pool->dccg, pipe_ctx->stream_res.tg->inst,
778 			tmds_div, unused_div);
779 	}
780 
781 	/* TODO check if timing_changed, disable stream if timing changed */
782 
783 	if (opp_cnt > 1) {
784 		odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false);
785 		last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true);
786 		pipe_ctx->stream_res.tg->funcs->set_odm_combine(
787 				pipe_ctx->stream_res.tg,
788 				opp_inst, opp_cnt,
789 				odm_slice_width, last_odm_slice_width);
790 	}
791 
792 	/* set DTBCLK_P */
793 	if (dc->res_pool->dccg->funcs->set_dtbclk_p_src) {
794 		if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
795 			dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, DPREFCLK, pipe_ctx->stream_res.tg->inst);
796 		}
797 	}
798 
799 	/* HW program guide assume display already disable
800 	 * by unplug sequence. OTG assume stop.
801 	 */
802 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
803 
804 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
805 			pipe_ctx->clock_source,
806 			&pipe_ctx->stream_res.pix_clk_params,
807 			dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
808 			&pipe_ctx->pll_settings)) {
809 		BREAK_TO_DEBUGGER();
810 		return DC_ERROR_UNEXPECTED;
811 	}
812 
813 	if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
814 		dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
815 
816 	/* if we are padding, h_addressable needs to be adjusted */
817 	if (dc->debug.enable_hblank_borrow) {
818 		patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding;
819 		patched_crtc_timing.h_total = patched_crtc_timing.h_total + pipe_ctx->dsc_padding_params.dsc_htotal_padding;
820 		patched_crtc_timing.pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz;
821 	}
822 
823 	pipe_ctx->stream_res.tg->funcs->program_timing(
824 		pipe_ctx->stream_res.tg,
825 		&patched_crtc_timing,
826 		(unsigned int)pipe_ctx->global_sync.dcn4x.vready_offset_pixels,
827 		(unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
828 		(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
829 		(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
830 		(unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines,
831 		pipe_ctx->stream->signal,
832 		true);
833 
834 	for (i = 0; i < opp_cnt; i++) {
835 		opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control(
836 				opp_heads[i]->stream_res.opp,
837 				true);
838 		opp_heads[i]->stream_res.opp->funcs->opp_program_left_edge_extra_pixel(
839 				opp_heads[i]->stream_res.opp,
840 				stream->timing.pixel_encoding,
841 				resource_is_pipe_type(opp_heads[i], OTG_MASTER));
842 	}
843 
844 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
845 			pipe_ctx->stream_res.opp,
846 			true);
847 
848 	hws->funcs.blank_pixel_data(dc, pipe_ctx, true);
849 
850 	/* VTG is  within DCHUB command block. DCFCLK is always on */
851 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
852 		BREAK_TO_DEBUGGER();
853 		return DC_ERROR_UNEXPECTED;
854 	}
855 
856 	hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
857 	set_drr_and_clear_adjust_pending(pipe_ctx, stream, &params);
858 
859 	/* Event triggers and num frames initialized for DRR, but can be
860 	 * later updated for PSR use. Note DRR trigger events are generated
861 	 * regardless of whether num frames met.
862 	 */
863 	if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
864 		pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
865 				pipe_ctx->stream_res.tg, event_triggers, 2);
866 
867 	/* TODO program crtc source select for non-virtual signal*/
868 	/* TODO program FMT */
869 	/* TODO setup link_enc */
870 	/* TODO set stream attributes */
871 	/* TODO program audio */
872 	/* TODO enable stream if timing changed */
873 	/* TODO unblank stream if DP */
874 
875 	if (dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
876 		if (pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable)
877 			pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg);
878 	}
879 
880 	return DC_OK;
881 }
882 
get_phyd32clk_src(struct dc_link * link)883 static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
884 {
885 	switch (link->link_enc->transmitter) {
886 	case TRANSMITTER_UNIPHY_A:
887 		return PHYD32CLKA;
888 	case TRANSMITTER_UNIPHY_B:
889 		return PHYD32CLKB;
890 	case TRANSMITTER_UNIPHY_C:
891 		return PHYD32CLKC;
892 	case TRANSMITTER_UNIPHY_D:
893 		return PHYD32CLKD;
894 	case TRANSMITTER_UNIPHY_E:
895 		return PHYD32CLKE;
896 	default:
897 		return PHYD32CLKA;
898 	}
899 }
900 
dcn401_enable_stream_calc(struct pipe_ctx * pipe_ctx,int * dp_hpo_inst,enum phyd32clk_clock_source * phyd32clk,unsigned int * tmds_div,uint32_t * early_control)901 static void dcn401_enable_stream_calc(
902 		struct pipe_ctx *pipe_ctx,
903 		int *dp_hpo_inst,
904 		enum phyd32clk_clock_source *phyd32clk,
905 		unsigned int *tmds_div,
906 		uint32_t *early_control)
907 {
908 
909 	struct dc *dc = pipe_ctx->stream->ctx->dc;
910 	struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
911 	enum dc_lane_count lane_count =
912 			pipe_ctx->stream->link->cur_link_settings.lane_count;
913 	uint32_t active_total_with_borders;
914 
915 	if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx))
916 		*dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
917 
918 	*phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link);
919 
920 	if (dc_is_tmds_signal(pipe_ctx->stream->signal))
921 		dcn401_calculate_dccg_tmds_div_value(pipe_ctx, tmds_div);
922 	else
923 		*tmds_div = PIXEL_RATE_DIV_BY_1;
924 
925 	/* enable early control to avoid corruption on DP monitor*/
926 	active_total_with_borders =
927 			timing->h_addressable
928 				+ timing->h_border_left
929 				+ timing->h_border_right;
930 
931 	if (lane_count != 0)
932 		*early_control = active_total_with_borders % lane_count;
933 
934 	if (*early_control == 0)
935 		*early_control = lane_count;
936 
937 }
938 
dcn401_enable_stream(struct pipe_ctx * pipe_ctx)939 void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
940 {
941 	uint32_t early_control = 0;
942 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
943 	struct dc_link *link = pipe_ctx->stream->link;
944 	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
945 	struct dc *dc = pipe_ctx->stream->ctx->dc;
946 	struct dccg *dccg = dc->res_pool->dccg;
947 	enum phyd32clk_clock_source phyd32clk;
948 	int dp_hpo_inst = 0;
949 	unsigned int tmds_div = PIXEL_RATE_DIV_NA;
950 	unsigned int unused_div = PIXEL_RATE_DIV_NA;
951 	struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
952 	struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
953 
954 	if (!dc->config.unify_link_enc_assignment)
955 		link_enc = link_enc_cfg_get_link_enc(link);
956 
957 	dcn401_enable_stream_calc(pipe_ctx, &dp_hpo_inst, &phyd32clk,
958 				&tmds_div, &early_control);
959 
960 	if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) {
961 		if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
962 			dccg->funcs->set_dpstreamclk(dccg, DPREFCLK, tg->inst, dp_hpo_inst);
963 			if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) {
964 				dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
965 			} else {
966 				dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
967 			}
968 		} else {
969 			dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,
970 					link_enc->transmitter - TRANSMITTER_UNIPHY_A);
971 		}
972 	}
973 
974 	link_hwss->setup_stream_attribute(pipe_ctx);
975 
976 	if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
977 		dc->res_pool->dccg->funcs->set_pixel_rate_div(
978 			dc->res_pool->dccg,
979 			pipe_ctx->stream_res.tg->inst,
980 			tmds_div,
981 			unused_div);
982 	}
983 
984 	link_hwss->setup_stream_encoder(pipe_ctx);
985 
986 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
987 		if (dc->hwss.program_dmdata_engine)
988 			dc->hwss.program_dmdata_engine(pipe_ctx);
989 	}
990 
991 	dc->hwss.update_info_frame(pipe_ctx);
992 
993 	if (dc_is_dp_signal(pipe_ctx->stream->signal))
994 		dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
995 
996 	tg->funcs->set_early_control(tg, early_control);
997 }
998 
dcn401_setup_hpo_hw_control(const struct dce_hwseq * hws,bool enable)999 void dcn401_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
1000 {
1001 	REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, enable);
1002 }
1003 
adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width,struct dc_cursor_position * pos_cpy)1004 void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy)
1005 {
1006 	if (cursor_width <= 128) {
1007 		pos_cpy->x_hotspot /= 2;
1008 		pos_cpy->x_hotspot += 1;
1009 	} else {
1010 		pos_cpy->x_hotspot /= 2;
1011 		pos_cpy->x_hotspot += 2;
1012 	}
1013 }
1014 
disable_link_output_symclk_on_tx_off(struct dc_link * link,enum dp_link_encoding link_encoding)1015 static void disable_link_output_symclk_on_tx_off(struct dc_link *link, enum dp_link_encoding link_encoding)
1016 {
1017 	struct dc *dc = link->ctx->dc;
1018 	struct pipe_ctx *pipe_ctx = NULL;
1019 	uint8_t i;
1020 
1021 	for (i = 0; i < MAX_PIPES; i++) {
1022 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1023 		if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) {
1024 			pipe_ctx->clock_source->funcs->program_pix_clk(
1025 					pipe_ctx->clock_source,
1026 					&pipe_ctx->stream_res.pix_clk_params,
1027 					link_encoding,
1028 					&pipe_ctx->pll_settings);
1029 			break;
1030 		}
1031 	}
1032 }
1033 
dcn401_disable_link_output(struct dc_link * link,const struct link_resource * link_res,enum signal_type signal)1034 void dcn401_disable_link_output(struct dc_link *link,
1035 		const struct link_resource *link_res,
1036 		enum signal_type signal)
1037 {
1038 	struct dc *dc = link->ctx->dc;
1039 	const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
1040 	struct dmcu *dmcu = dc->res_pool->dmcu;
1041 
1042 	if (signal == SIGNAL_TYPE_EDP &&
1043 			link->dc->hwss.edp_backlight_control &&
1044 			!link->skip_implict_edp_power_control)
1045 		link->dc->hwss.edp_backlight_control(link, false);
1046 	else if (dmcu != NULL && dmcu->funcs->lock_phy)
1047 		dmcu->funcs->lock_phy(dmcu);
1048 
1049 	if (dc_is_tmds_signal(signal) && link->phy_state.symclk_ref_cnts.otg > 0) {
1050 		disable_link_output_symclk_on_tx_off(link, DP_UNKNOWN_ENCODING);
1051 		link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
1052 	} else {
1053 		link_hwss->disable_link_output(link, link_res, signal);
1054 		link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
1055 	}
1056 
1057 	if (signal == SIGNAL_TYPE_EDP &&
1058 			link->dc->hwss.edp_backlight_control &&
1059 			!link->skip_implict_edp_power_control)
1060 		link->dc->hwss.edp_power_control(link, false);
1061 	else if (dmcu != NULL && dmcu->funcs->lock_phy)
1062 		dmcu->funcs->unlock_phy(dmcu);
1063 
1064 	dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
1065 }
1066 
dcn401_set_cursor_position(struct pipe_ctx * pipe_ctx)1067 void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
1068 {
1069 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
1070 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1071 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1072 	struct dc_cursor_mi_param param = {
1073 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
1074 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
1075 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
1076 		.recout = pipe_ctx->plane_res.scl_data.recout,
1077 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
1078 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
1079 		.rotation = pipe_ctx->plane_state->rotation,
1080 		.mirror = pipe_ctx->plane_state->horizontal_mirror,
1081 		.stream = pipe_ctx->stream
1082 	};
1083 	struct rect odm_slice_src = { 0 };
1084 	bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
1085 		(pipe_ctx->prev_odm_pipe != NULL);
1086 	int prev_odm_width = 0;
1087 	struct pipe_ctx *prev_odm_pipe = NULL;
1088 	bool mpc_combine_on = false;
1089 	int  bottom_pipe_x_pos = 0;
1090 
1091 	int x_pos = pos_cpy.x;
1092 	int y_pos = pos_cpy.y;
1093 	int recout_x_pos = 0;
1094 	int recout_y_pos = 0;
1095 
1096 	if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
1097 		if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
1098 			(pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
1099 			mpc_combine_on = true;
1100 		}
1101 	}
1102 
1103 	/* DCN4 moved cursor composition after Scaler, so in HW it is in
1104 	 * recout space and for HW Cursor position programming need to
1105 	 * translate to recout space.
1106 	 *
1107 	 * Cursor X and Y position programmed into HW can't be negative,
1108 	 * in fact it is X, Y coordinate shifted for the HW Cursor Hot spot
1109 	 * position that goes into HW X and Y coordinates while HW Hot spot
1110 	 * X and Y coordinates are length relative to the cursor top left
1111 	 * corner, hotspot must be smaller than the cursor size.
1112 	 *
1113 	 * DMs/DC interface for Cursor position is in stream->src space, and
1114 	 * DMs supposed to transform Cursor coordinates to stream->src space,
1115 	 * then here we need to translate Cursor coordinates to stream->dst
1116 	 * space, as now in HW, Cursor coordinates are in per pipe recout
1117 	 * space, and for the given pipe valid coordinates are only in range
1118 	 * from 0,0 - recout width, recout height space.
1119 	 * If certain pipe combining is in place, need to further adjust per
1120 	 * pipe to make sure each pipe enabling cursor on its part of the
1121 	 * screen.
1122 	 */
1123 	x_pos = pipe_ctx->stream->dst.x + x_pos * pipe_ctx->stream->dst.width /
1124 		pipe_ctx->stream->src.width;
1125 	y_pos = pipe_ctx->stream->dst.y + y_pos * pipe_ctx->stream->dst.height /
1126 		pipe_ctx->stream->src.height;
1127 
1128 	/* If the cursor's source viewport is clipped then we need to
1129 	 * translate the cursor to appear in the correct position on
1130 	 * the screen.
1131 	 *
1132 	 * This translation isn't affected by scaling so it needs to be
1133 	 * done *after* we adjust the position for the scale factor.
1134 	 *
1135 	 * This is only done by opt-in for now since there are still
1136 	 * some usecases like tiled display that might enable the
1137 	 * cursor on both streams while expecting dc to clip it.
1138 	 */
1139 	if (pos_cpy.translate_by_source) {
1140 		x_pos += pipe_ctx->plane_state->src_rect.x;
1141 		y_pos += pipe_ctx->plane_state->src_rect.y;
1142 	}
1143 
1144 	/* Adjust for ODM Combine
1145 	 * next/prev_odm_offset is to account for scaled modes that have underscan
1146 	 */
1147 	if (odm_combine_on) {
1148 		prev_odm_pipe = pipe_ctx->prev_odm_pipe;
1149 
1150 		while (prev_odm_pipe != NULL) {
1151 			odm_slice_src = resource_get_odm_slice_src_rect(prev_odm_pipe);
1152 			prev_odm_width += odm_slice_src.width;
1153 			prev_odm_pipe = prev_odm_pipe->prev_odm_pipe;
1154 		}
1155 
1156 		x_pos -= (prev_odm_width);
1157 	}
1158 
1159 	/* If the position is negative then we need to add to the hotspot
1160 	 * to fix cursor size between ODM slices
1161 	 */
1162 
1163 	if (x_pos < 0) {
1164 		pos_cpy.x_hotspot -= x_pos;
1165 		if (hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION)
1166 			adjust_hotspot_between_slices_for_2x_magnify(hubp->curs_attr.width, &pos_cpy);
1167 		x_pos = 0;
1168 	}
1169 
1170 	if (y_pos < 0) {
1171 		pos_cpy.y_hotspot -= y_pos;
1172 		y_pos = 0;
1173 	}
1174 
1175 	/* If the position on bottom MPC pipe is negative then we need to add to the hotspot and
1176 	 * adjust x_pos on bottom pipe to make cursor visible when crossing between MPC slices.
1177 	 */
1178 	if (mpc_combine_on &&
1179 		pipe_ctx->top_pipe &&
1180 		(pipe_ctx == pipe_ctx->top_pipe->bottom_pipe)) {
1181 
1182 		bottom_pipe_x_pos = x_pos - pipe_ctx->plane_res.scl_data.recout.x;
1183 		if (bottom_pipe_x_pos < 0) {
1184 			x_pos = pipe_ctx->plane_res.scl_data.recout.x;
1185 			pos_cpy.x_hotspot -= bottom_pipe_x_pos;
1186 			if (hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION)
1187 				adjust_hotspot_between_slices_for_2x_magnify(hubp->curs_attr.width, &pos_cpy);
1188 		}
1189 	}
1190 
1191 	pos_cpy.x = (uint32_t)x_pos;
1192 	pos_cpy.y = (uint32_t)y_pos;
1193 
1194 	if (pos_cpy.enable && resource_can_pipe_disable_cursor(pipe_ctx))
1195 		pos_cpy.enable = false;
1196 
1197 	x_pos = pos_cpy.x - param.recout.x;
1198 	y_pos = pos_cpy.y - param.recout.y;
1199 
1200 	recout_x_pos = x_pos - pos_cpy.x_hotspot;
1201 	recout_y_pos = y_pos - pos_cpy.y_hotspot;
1202 
1203 	if (recout_x_pos >= (int)param.recout.width)
1204 		pos_cpy.enable = false;  /* not visible beyond right edge*/
1205 
1206 	if (recout_y_pos >= (int)param.recout.height)
1207 		pos_cpy.enable = false;  /* not visible beyond bottom edge*/
1208 
1209 	if (recout_x_pos + (int)hubp->curs_attr.width <= 0)
1210 		pos_cpy.enable = false;  /* not visible beyond left edge*/
1211 
1212 	if (recout_y_pos + (int)hubp->curs_attr.height <= 0)
1213 		pos_cpy.enable = false;  /* not visible beyond top edge*/
1214 
1215 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
1216 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
1217 }
1218 
dcn401_check_no_memory_request_for_cab(struct dc * dc)1219 static bool dcn401_check_no_memory_request_for_cab(struct dc *dc)
1220 {
1221 	int i;
1222 
1223 	/* First, check no-memory-request case */
1224 	for (i = 0; i < dc->current_state->stream_count; i++) {
1225 		if ((dc->current_state->stream_status[i].plane_count) &&
1226 			(dc->current_state->streams[i]->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED))
1227 			/* Fail eligibility on a visible stream */
1228 			return false;
1229 	}
1230 
1231 	return true;
1232 }
1233 
dcn401_calculate_cab_allocation(struct dc * dc,struct dc_state * ctx)1234 static uint32_t dcn401_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
1235 {
1236 	int i;
1237 	uint8_t num_ways = 0;
1238 	uint32_t mall_ss_size_bytes = 0;
1239 
1240 	mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes;
1241 	// TODO add additional logic for PSR active stream exclusion optimization
1242 	// mall_ss_psr_active_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes;
1243 
1244 	// Include cursor size for CAB allocation
1245 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1246 		struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[i];
1247 
1248 		if (!pipe->stream || !pipe->plane_state)
1249 			continue;
1250 
1251 		mall_ss_size_bytes += dcn32_helper_calculate_mall_bytes_for_cursor(dc, pipe, false);
1252 	}
1253 
1254 	// Convert number of cache lines required to number of ways
1255 	if (dc->debug.force_mall_ss_num_ways > 0)
1256 		num_ways = dc->debug.force_mall_ss_num_ways;
1257 	else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes)
1258 		num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, mall_ss_size_bytes);
1259 	else
1260 		num_ways = 0;
1261 
1262 	return num_ways;
1263 }
1264 
dcn401_apply_idle_power_optimizations(struct dc * dc,bool enable)1265 bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable)
1266 {
1267 	union dmub_rb_cmd cmd;
1268 	uint8_t ways, i;
1269 	int j;
1270 	bool mall_ss_unsupported = false;
1271 	struct dc_plane_state *plane = NULL;
1272 
1273 	if (!dc->ctx->dmub_srv || !dc->current_state)
1274 		return false;
1275 
1276 	for (i = 0; i < dc->current_state->stream_count; i++) {
1277 		/* MALL SS messaging is not supported with PSR at this time */
1278 		if (dc->current_state->streams[i] != NULL &&
1279 				dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) {
1280 			DC_LOG_MALL("MALL SS not supported with PSR at this time\n");
1281 			return false;
1282 		}
1283 	}
1284 
1285 	memset(&cmd, 0, sizeof(cmd));
1286 	cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
1287 	cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
1288 
1289 	if (enable) {
1290 		if (dcn401_check_no_memory_request_for_cab(dc)) {
1291 			/* 1. Check no memory request case for CAB.
1292 			 * If no memory request case, send CAB_ACTION NO_DCN_REQ DMUB message
1293 			 */
1294 			DC_LOG_MALL("sending CAB action NO_DCN_REQ\n");
1295 			cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ;
1296 		} else {
1297 			/* 2. Check if all surfaces can fit in CAB.
1298 			 * If surfaces can fit into CAB, send CAB_ACTION_ALLOW DMUB message
1299 			 * and configure HUBP's to fetch from MALL
1300 			 */
1301 			ways = dcn401_calculate_cab_allocation(dc, dc->current_state);
1302 
1303 			/* MALL not supported with Stereo3D or TMZ surface. If any plane is using stereo,
1304 			 * or TMZ surface, don't try to enter MALL.
1305 			 */
1306 			for (i = 0; i < dc->current_state->stream_count; i++) {
1307 				for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
1308 					plane = dc->current_state->stream_status[i].plane_states[j];
1309 
1310 					if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO ||
1311 							plane->address.tmz_surface) {
1312 						mall_ss_unsupported = true;
1313 						break;
1314 					}
1315 				}
1316 				if (mall_ss_unsupported)
1317 					break;
1318 			}
1319 			if (ways <= dc->caps.cache_num_ways && !mall_ss_unsupported) {
1320 				cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
1321 				cmd.cab.cab_alloc_ways = ways;
1322 				DC_LOG_MALL("cab allocation: %d ways. CAB action: DCN_SS_FIT_IN_CAB\n", ways);
1323 			} else {
1324 				cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_NOT_FIT_IN_CAB;
1325 				DC_LOG_MALL("frame does not fit in CAB: %d ways required. CAB action: DCN_SS_NOT_FIT_IN_CAB\n", ways);
1326 			}
1327 		}
1328 	} else {
1329 		/* Disable CAB */
1330 		cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_IDLE_OPTIMIZATION;
1331 		DC_LOG_MALL("idle optimization disabled\n");
1332 	}
1333 
1334 	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
1335 
1336 	return true;
1337 }
1338 
dcn401_wait_for_dcc_meta_propagation(const struct dc * dc,const struct pipe_ctx * top_pipe)1339 void dcn401_wait_for_dcc_meta_propagation(const struct dc *dc,
1340 		const struct pipe_ctx *top_pipe)
1341 {
1342 	bool is_wait_needed = false;
1343 	const struct pipe_ctx *pipe_ctx = top_pipe;
1344 
1345 	/* check if any surfaces are updating address while using flip immediate and dcc */
1346 	while (pipe_ctx != NULL) {
1347 		if (pipe_ctx->plane_state &&
1348 				pipe_ctx->plane_state->dcc.enable &&
1349 				pipe_ctx->plane_state->flip_immediate &&
1350 				pipe_ctx->plane_state->update_flags.bits.addr_update) {
1351 			is_wait_needed = true;
1352 			break;
1353 		}
1354 
1355 		/* check next pipe */
1356 		pipe_ctx = pipe_ctx->bottom_pipe;
1357 	}
1358 
1359 	if (is_wait_needed && dc->debug.dcc_meta_propagation_delay_us > 0) {
1360 		udelay(dc->debug.dcc_meta_propagation_delay_us);
1361 	}
1362 }
1363 
dcn401_prepare_bandwidth(struct dc * dc,struct dc_state * context)1364 void dcn401_prepare_bandwidth(struct dc *dc,
1365 	struct dc_state *context)
1366 {
1367 	struct hubbub *hubbub = dc->res_pool->hubbub;
1368 	bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
1369 	unsigned int compbuf_size = 0;
1370 
1371 	/* Any transition into P-State support should disable MCLK switching first to avoid hangs */
1372 	if (p_state_change_support) {
1373 		dc->optimized_required = true;
1374 		context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
1375 	}
1376 
1377 	if (dc->clk_mgr->dc_mode_softmax_enabled)
1378 		if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
1379 				context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
1380 			dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
1381 
1382 	/* Increase clocks */
1383 	dc->clk_mgr->funcs->update_clocks(
1384 			dc->clk_mgr,
1385 			context,
1386 			false);
1387 
1388 	/* program dchubbub watermarks:
1389 	 * For assigning optimized_required, use |= operator since we don't want
1390 	 * to clear the value if the optimize has not happened yet
1391 	 */
1392 	dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
1393 					&context->bw_ctx.bw.dcn.watermarks,
1394 					dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
1395 					false);
1396 	/* update timeout thresholds */
1397 	if (hubbub->funcs->program_arbiter) {
1398 		dc->optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false);
1399 	}
1400 
1401 	/* decrease compbuf size */
1402 	if (hubbub->funcs->program_compbuf_segments) {
1403 		compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
1404 		dc->optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
1405 
1406 		hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size, false);
1407 	}
1408 
1409 	if (dc->debug.fams2_config.bits.enable) {
1410 		dcn401_fams2_global_control_lock(dc, context, true);
1411 		dcn401_fams2_update_config(dc, context, false);
1412 		dcn401_fams2_global_control_lock(dc, context, false);
1413 	}
1414 
1415 	if (p_state_change_support != context->bw_ctx.bw.dcn.clk.p_state_change_support) {
1416 		/* After disabling P-State, restore the original value to ensure we get the correct P-State
1417 		 * on the next optimize. */
1418 		context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
1419 	}
1420 }
1421 
dcn401_optimize_bandwidth(struct dc * dc,struct dc_state * context)1422 void dcn401_optimize_bandwidth(
1423 		struct dc *dc,
1424 		struct dc_state *context)
1425 {
1426 	int i;
1427 	struct hubbub *hubbub = dc->res_pool->hubbub;
1428 
1429 	/* enable fams2 if needed */
1430 	if (dc->debug.fams2_config.bits.enable) {
1431 		dcn401_fams2_global_control_lock(dc, context, true);
1432 		dcn401_fams2_update_config(dc, context, true);
1433 		dcn401_fams2_global_control_lock(dc, context, false);
1434 	}
1435 
1436 	/* program dchubbub watermarks */
1437 	hubbub->funcs->program_watermarks(hubbub,
1438 					&context->bw_ctx.bw.dcn.watermarks,
1439 					dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
1440 					true);
1441 	/* update timeout thresholds */
1442 	if (hubbub->funcs->program_arbiter) {
1443 		hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, true);
1444 	}
1445 
1446 	if (dc->clk_mgr->dc_mode_softmax_enabled)
1447 		if (dc->clk_mgr->clks.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
1448 				context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
1449 			dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
1450 
1451 	/* increase compbuf size */
1452 	if (hubbub->funcs->program_compbuf_segments)
1453 		hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true);
1454 
1455 	dc->clk_mgr->funcs->update_clocks(
1456 			dc->clk_mgr,
1457 			context,
1458 			true);
1459 	if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
1460 		for (i = 0; i < dc->res_pool->pipe_count; ++i) {
1461 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1462 
1463 			if (pipe_ctx->stream && pipe_ctx->plane_res.hubp->funcs->program_extended_blank
1464 				&& pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
1465 				&& pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
1466 					pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
1467 						pipe_ctx->dlg_regs.min_dst_y_next_start);
1468 		}
1469 	}
1470 }
1471 
dcn401_fams2_global_control_lock(struct dc * dc,struct dc_state * context,bool lock)1472 void dcn401_fams2_global_control_lock(struct dc *dc,
1473 		struct dc_state *context,
1474 		bool lock)
1475 {
1476 	/* use always for now */
1477 	union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
1478 
1479 	if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable)
1480 		return;
1481 
1482 	hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
1483 	hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER;
1484 	hw_lock_cmd.bits.lock = lock;
1485 	hw_lock_cmd.bits.should_release = !lock;
1486 	dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
1487 }
1488 
dcn401_fams2_global_control_lock_fast(union block_sequence_params * params)1489 void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params)
1490 {
1491 	struct dc *dc = params->fams2_global_control_lock_fast_params.dc;
1492 	bool lock = params->fams2_global_control_lock_fast_params.lock;
1493 
1494 	if (params->fams2_global_control_lock_fast_params.is_required) {
1495 		union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
1496 
1497 		hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
1498 		hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER;
1499 		hw_lock_cmd.bits.lock = lock;
1500 		hw_lock_cmd.bits.should_release = !lock;
1501 		dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
1502 	}
1503 }
1504 
dcn401_fams2_update_config(struct dc * dc,struct dc_state * context,bool enable)1505 void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable)
1506 {
1507 	bool fams2_required;
1508 
1509 	if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable)
1510 		return;
1511 
1512 	fams2_required = context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable;
1513 
1514 	dc_dmub_srv_fams2_update_config(dc, context, enable && fams2_required);
1515 }
1516 
update_dsc_for_odm_change(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1517 static void update_dsc_for_odm_change(struct dc *dc, struct dc_state *context,
1518 		struct pipe_ctx *otg_master)
1519 {
1520 	int i;
1521 	struct pipe_ctx *old_pipe;
1522 	struct pipe_ctx *new_pipe;
1523 	struct pipe_ctx *old_opp_heads[MAX_PIPES];
1524 	struct pipe_ctx *old_otg_master;
1525 	int old_opp_head_count = 0;
1526 
1527 	old_otg_master = &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx];
1528 
1529 	if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) {
1530 		old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master,
1531 									   &dc->current_state->res_ctx,
1532 									   old_opp_heads);
1533 	} else {
1534 		// DC cannot assume that the current state and the new state
1535 		// share the same OTG pipe since this is not true when called
1536 		// in the context of a commit stream not checked. Hence, set
1537 		// old_otg_master to NULL to skip the DSC configuration.
1538 		old_otg_master = NULL;
1539 	}
1540 
1541 
1542 	if (otg_master->stream_res.dsc)
1543 		dcn32_update_dsc_on_stream(otg_master,
1544 				otg_master->stream->timing.flags.DSC);
1545 	if (old_otg_master && old_otg_master->stream_res.dsc) {
1546 		for (i = 0; i < old_opp_head_count; i++) {
1547 			old_pipe = old_opp_heads[i];
1548 			new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx];
1549 			if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc)
1550 				old_pipe->stream_res.dsc->funcs->dsc_disconnect(
1551 						old_pipe->stream_res.dsc);
1552 		}
1553 	}
1554 }
1555 
dcn401_update_odm(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1556 void dcn401_update_odm(struct dc *dc, struct dc_state *context,
1557 		struct pipe_ctx *otg_master)
1558 {
1559 	struct pipe_ctx *opp_heads[MAX_PIPES];
1560 	int opp_inst[MAX_PIPES] = {0};
1561 	int opp_head_count;
1562 	int odm_slice_width = resource_get_odm_slice_dst_width(otg_master, false);
1563 	int last_odm_slice_width = resource_get_odm_slice_dst_width(otg_master, true);
1564 	int i;
1565 
1566 	opp_head_count = resource_get_opp_heads_for_otg_master(
1567 			otg_master, &context->res_ctx, opp_heads);
1568 
1569 	for (i = 0; i < opp_head_count; i++)
1570 		opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
1571 	if (opp_head_count > 1)
1572 		otg_master->stream_res.tg->funcs->set_odm_combine(
1573 				otg_master->stream_res.tg,
1574 				opp_inst, opp_head_count,
1575 				odm_slice_width, last_odm_slice_width);
1576 	else
1577 		otg_master->stream_res.tg->funcs->set_odm_bypass(
1578 				otg_master->stream_res.tg,
1579 				&otg_master->stream->timing);
1580 
1581 	for (i = 0; i < opp_head_count; i++) {
1582 		opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control(
1583 				opp_heads[i]->stream_res.opp,
1584 				true);
1585 		opp_heads[i]->stream_res.opp->funcs->opp_program_left_edge_extra_pixel(
1586 				opp_heads[i]->stream_res.opp,
1587 				opp_heads[i]->stream->timing.pixel_encoding,
1588 				resource_is_pipe_type(opp_heads[i], OTG_MASTER));
1589 	}
1590 
1591 	update_dsc_for_odm_change(dc, context, otg_master);
1592 
1593 	if (!resource_is_pipe_type(otg_master, DPP_PIPE))
1594 		/*
1595 		 * blank pattern is generated by OPP, reprogram blank pattern
1596 		 * due to OPP count change
1597 		 */
1598 		dc->hwseq->funcs.blank_pixel_data(dc, otg_master, true);
1599 }
1600 
dcn401_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)1601 void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx,
1602 		struct dc_link_settings *link_settings)
1603 {
1604 	struct encoder_unblank_param params = {0};
1605 	struct dc_stream_state *stream = pipe_ctx->stream;
1606 	struct dc_link *link = stream->link;
1607 	struct dce_hwseq *hws = link->dc->hwseq;
1608 
1609 	/* calculate parameters for unblank */
1610 	params.opp_cnt = resource_get_odm_slice_count(pipe_ctx);
1611 
1612 	params.timing = pipe_ctx->stream->timing;
1613 	params.link_settings.link_rate = link_settings->link_rate;
1614 	params.pix_per_cycle = pipe_ctx->stream_res.pix_clk_params.dio_se_pix_per_cycle;
1615 
1616 	if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
1617 		pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
1618 				pipe_ctx->stream_res.hpo_dp_stream_enc,
1619 				pipe_ctx->stream_res.tg->inst);
1620 	} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
1621 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
1622 	}
1623 
1624 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP)
1625 		hws->funcs.edp_backlight_control(link, true);
1626 }
1627 
dcn401_hardware_release(struct dc * dc)1628 void dcn401_hardware_release(struct dc *dc)
1629 {
1630 	if (!dc->debug.disable_force_pstate_allow_on_hw_release) {
1631 		dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
1632 
1633 		/* If pstate unsupported, or still supported
1634 		* by firmware, force it supported by dcn
1635 		*/
1636 		if (dc->current_state) {
1637 			if ((!dc->clk_mgr->clks.p_state_change_support ||
1638 					dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
1639 					dc->res_pool->hubbub->funcs->force_pstate_change_control)
1640 				dc->res_pool->hubbub->funcs->force_pstate_change_control(
1641 						dc->res_pool->hubbub, true, true);
1642 
1643 			dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true;
1644 			dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
1645 		}
1646 	} else {
1647 		if (dc->current_state) {
1648 			dc->clk_mgr->clks.p_state_change_support = false;
1649 			dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
1650 		}
1651 		dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
1652 	}
1653 }
1654 
dcn401_wait_for_det_buffer_update_under_otg_master(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1655 void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master)
1656 {
1657 	struct pipe_ctx *opp_heads[MAX_PIPES];
1658 	struct pipe_ctx *dpp_pipes[MAX_PIPES];
1659 	struct hubbub *hubbub = dc->res_pool->hubbub;
1660 	int dpp_count = 0;
1661 
1662 	if (!otg_master->stream)
1663 		return;
1664 
1665 	int slice_count = resource_get_opp_heads_for_otg_master(otg_master,
1666 			&context->res_ctx, opp_heads);
1667 
1668 	for (int slice_idx = 0; slice_idx < slice_count; slice_idx++) {
1669 		if (opp_heads[slice_idx]->plane_state) {
1670 			dpp_count = resource_get_dpp_pipes_for_opp_head(
1671 					opp_heads[slice_idx],
1672 					&context->res_ctx,
1673 					dpp_pipes);
1674 			for (int dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) {
1675 				struct pipe_ctx *dpp_pipe = dpp_pipes[dpp_idx];
1676 					if (dpp_pipe && hubbub &&
1677 						dpp_pipe->plane_res.hubp &&
1678 						hubbub->funcs->wait_for_det_update)
1679 						hubbub->funcs->wait_for_det_update(hubbub, dpp_pipe->plane_res.hubp->inst);
1680 			}
1681 		} else {
1682 			if (hubbub && opp_heads[slice_idx]->plane_res.hubp && hubbub->funcs->wait_for_det_update)
1683 				hubbub->funcs->wait_for_det_update(hubbub, opp_heads[slice_idx]->plane_res.hubp->inst);
1684 		}
1685 	}
1686 }
1687 
dcn401_interdependent_update_lock(struct dc * dc,struct dc_state * context,bool lock)1688 void dcn401_interdependent_update_lock(struct dc *dc,
1689 		struct dc_state *context, bool lock)
1690 {
1691 	unsigned int i = 0;
1692 	struct pipe_ctx *pipe = NULL;
1693 	struct timing_generator *tg = NULL;
1694 
1695 	if (lock) {
1696 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1697 			pipe = &context->res_ctx.pipe_ctx[i];
1698 			tg = pipe->stream_res.tg;
1699 
1700 			if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
1701 					!tg->funcs->is_tg_enabled(tg) ||
1702 					dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
1703 				continue;
1704 			dc->hwss.pipe_control_lock(dc, pipe, true);
1705 		}
1706 	} else {
1707 		/* Need to free DET being used first and have pipe update, then unlock the remaining pipes*/
1708 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1709 			pipe = &context->res_ctx.pipe_ctx[i];
1710 			tg = pipe->stream_res.tg;
1711 
1712 			if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
1713 					!tg->funcs->is_tg_enabled(tg) ||
1714 					dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
1715 				continue;
1716 			}
1717 
1718 			if (dc->scratch.pipes_to_unlock_first[i]) {
1719 				struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1720 				dc->hwss.pipe_control_lock(dc, pipe, false);
1721 				/* Assumes pipe of the same index in current_state is also an OTG_MASTER pipe*/
1722 				dcn401_wait_for_det_buffer_update_under_otg_master(dc, dc->current_state, old_pipe);
1723 			}
1724 		}
1725 
1726 		/* Unlocking the rest of the pipes */
1727 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1728 			if (dc->scratch.pipes_to_unlock_first[i])
1729 				continue;
1730 
1731 			pipe = &context->res_ctx.pipe_ctx[i];
1732 			tg = pipe->stream_res.tg;
1733 			if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
1734 					!tg->funcs->is_tg_enabled(tg) ||
1735 					dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
1736 				continue;
1737 			}
1738 
1739 			dc->hwss.pipe_control_lock(dc, pipe, false);
1740 		}
1741 	}
1742 }
1743 
dcn401_perform_3dlut_wa_unlock(struct pipe_ctx * pipe_ctx)1744 void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx)
1745 {
1746 	/* If 3DLUT FL is enabled and 3DLUT is in use, follow the workaround sequence for pipe unlock to make sure that
1747 	 * HUBP will properly fetch 3DLUT contents after unlock.
1748 	 *
1749 	 * This is meant to work around a known HW issue where VREADY will cancel the pending 3DLUT_ENABLE signal regardless
1750 	 * of whether OTG lock is currently being held or not.
1751 	 */
1752 	struct pipe_ctx *wa_pipes[MAX_PIPES] = { NULL };
1753 	struct pipe_ctx *odm_pipe, *mpc_pipe;
1754 	int i, wa_pipe_ct = 0;
1755 
1756 	for (odm_pipe = pipe_ctx; odm_pipe != NULL; odm_pipe = odm_pipe->next_odm_pipe) {
1757 		for (mpc_pipe = odm_pipe; mpc_pipe != NULL; mpc_pipe = mpc_pipe->bottom_pipe) {
1758 			if (mpc_pipe->plane_state && mpc_pipe->plane_state->mcm_luts.lut3d_data.lut3d_src
1759 						== DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM
1760 					&& mpc_pipe->plane_state->mcm_shaper_3dlut_setting
1761 						== DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT) {
1762 				wa_pipes[wa_pipe_ct++] = mpc_pipe;
1763 			}
1764 		}
1765 	}
1766 
1767 	if (wa_pipe_ct > 0) {
1768 		if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
1769 			pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, true);
1770 
1771 		for (i = 0; i < wa_pipe_ct; ++i) {
1772 			if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
1773 				wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
1774 		}
1775 
1776 		pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
1777 		if (pipe_ctx->stream_res.tg->funcs->wait_update_lock_status)
1778 			pipe_ctx->stream_res.tg->funcs->wait_update_lock_status(pipe_ctx->stream_res.tg, false);
1779 
1780 		for (i = 0; i < wa_pipe_ct; ++i) {
1781 			if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
1782 				wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
1783 		}
1784 
1785 		if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
1786 			pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, false);
1787 	} else {
1788 		pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
1789 	}
1790 }
1791 
dcn401_program_outstanding_updates(struct dc * dc,struct dc_state * context)1792 void dcn401_program_outstanding_updates(struct dc *dc,
1793 		struct dc_state *context)
1794 {
1795 	struct hubbub *hubbub = dc->res_pool->hubbub;
1796 
1797 	/* update compbuf if required */
1798 	if (hubbub->funcs->program_compbuf_segments)
1799 		hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true);
1800 }
1801 
dcn401_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)1802 void dcn401_reset_back_end_for_pipe(
1803 		struct dc *dc,
1804 		struct pipe_ctx *pipe_ctx,
1805 		struct dc_state *context)
1806 {
1807 	struct dc_link *link = pipe_ctx->stream->link;
1808 	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
1809 
1810 	DC_LOGGER_INIT(dc->ctx->logger);
1811 	if (pipe_ctx->stream_res.stream_enc == NULL) {
1812 		pipe_ctx->stream = NULL;
1813 		return;
1814 	}
1815 
1816 	/* DPMS may already disable or */
1817 	/* dpms_off status is incorrect due to fastboot
1818 	 * feature. When system resume from S4 with second
1819 	 * screen only, the dpms_off would be true but
1820 	 * VBIOS lit up eDP, so check link status too.
1821 	 */
1822 	if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1823 		dc->link_srv->set_dpms_off(pipe_ctx);
1824 	else if (pipe_ctx->stream_res.audio)
1825 		dc->hwss.disable_audio_stream(pipe_ctx);
1826 
1827 	/* free acquired resources */
1828 	if (pipe_ctx->stream_res.audio) {
1829 		/*disable az_endpoint*/
1830 		pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1831 
1832 		/*free audio*/
1833 		if (dc->caps.dynamic_audio == true) {
1834 			/*we have to dynamic arbitrate the audio endpoints*/
1835 			/*we free the resource, need reset is_audio_acquired*/
1836 			update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1837 					pipe_ctx->stream_res.audio, false);
1838 			pipe_ctx->stream_res.audio = NULL;
1839 		}
1840 	}
1841 
1842 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
1843 	 * back end share by all pipes and will be disable only when disable
1844 	 * parent pipe.
1845 	 */
1846 	if (pipe_ctx->top_pipe == NULL) {
1847 
1848 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
1849 
1850 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1851 
1852 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1853 		if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
1854 			pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
1855 					pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
1856 
1857 		set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
1858 
1859 		/* TODO - convert symclk_ref_cnts for otg to a bit map to solve
1860 		 * the case where the same symclk is shared across multiple otg
1861 		 * instances
1862 		 */
1863 		if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
1864 			link->phy_state.symclk_ref_cnts.otg = 0;
1865 		if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
1866 			link_hwss->disable_link_output(link,
1867 					&pipe_ctx->link_res, pipe_ctx->stream->signal);
1868 			link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
1869 		}
1870 
1871 		/* reset DTBCLK_P */
1872 		if (dc->res_pool->dccg->funcs->set_dtbclk_p_src)
1873 			dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, REFCLK, pipe_ctx->stream_res.tg->inst);
1874 	}
1875 
1876 /*
1877  * In case of a dangling plane, setting this to NULL unconditionally
1878  * causes failures during reset hw ctx where, if stream is NULL,
1879  * it is expected that the pipe_ctx pointers to pipes and plane are NULL.
1880  */
1881 	pipe_ctx->stream = NULL;
1882 	pipe_ctx->top_pipe = NULL;
1883 	pipe_ctx->bottom_pipe = NULL;
1884 	pipe_ctx->next_odm_pipe = NULL;
1885 	pipe_ctx->prev_odm_pipe = NULL;
1886 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1887 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1888 }
1889 
dcn401_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)1890 void dcn401_reset_hw_ctx_wrap(
1891 		struct dc *dc,
1892 		struct dc_state *context)
1893 {
1894 	int i;
1895 	struct dce_hwseq *hws = dc->hwseq;
1896 
1897 	/* Reset Back End*/
1898 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1899 		struct pipe_ctx *pipe_ctx_old =
1900 			&dc->current_state->res_ctx.pipe_ctx[i];
1901 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1902 
1903 		if (!pipe_ctx_old->stream)
1904 			continue;
1905 
1906 		if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
1907 			continue;
1908 
1909 		if (!pipe_ctx->stream ||
1910 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1911 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
1912 
1913 			if (hws->funcs.reset_back_end_for_pipe)
1914 				hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1915 			if (hws->funcs.enable_stream_gating)
1916 				hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1917 			if (old_clk)
1918 				old_clk->funcs->cs_power_down(old_clk);
1919 		}
1920 	}
1921 }
1922 
dcn401_calculate_vready_offset_for_group(struct pipe_ctx * pipe)1923 static unsigned int dcn401_calculate_vready_offset_for_group(struct pipe_ctx *pipe)
1924 {
1925 	struct pipe_ctx *other_pipe;
1926 	unsigned int vready_offset = pipe->global_sync.dcn4x.vready_offset_pixels;
1927 
1928 	/* Always use the largest vready_offset of all connected pipes */
1929 	for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
1930 		if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
1931 			vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
1932 	}
1933 	for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
1934 		if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
1935 			vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
1936 	}
1937 	for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
1938 		if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
1939 			vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
1940 	}
1941 	for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
1942 		if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
1943 			vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
1944 	}
1945 
1946 	return vready_offset;
1947 }
1948 
dcn401_program_tg(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dce_hwseq * hws)1949 static void dcn401_program_tg(
1950 	struct dc *dc,
1951 	struct pipe_ctx *pipe_ctx,
1952 	struct dc_state *context,
1953 	struct dce_hwseq *hws)
1954 {
1955 	pipe_ctx->stream_res.tg->funcs->program_global_sync(
1956 		pipe_ctx->stream_res.tg,
1957 		dcn401_calculate_vready_offset_for_group(pipe_ctx),
1958 		(unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
1959 		(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
1960 		(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
1961 		(unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
1962 
1963 	if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
1964 		pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
1965 
1966 	pipe_ctx->stream_res.tg->funcs->set_vtg_params(
1967 		pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
1968 
1969 	if (hws->funcs.setup_vupdate_interrupt)
1970 		hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
1971 }
1972 
dcn401_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)1973 void dcn401_program_pipe(
1974 	struct dc *dc,
1975 	struct pipe_ctx *pipe_ctx,
1976 	struct dc_state *context)
1977 {
1978 	struct dce_hwseq *hws = dc->hwseq;
1979 
1980 	/* Only need to unblank on top pipe */
1981 	if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
1982 		if (pipe_ctx->update_flags.bits.enable ||
1983 			pipe_ctx->update_flags.bits.odm ||
1984 			pipe_ctx->stream->update_flags.bits.abm_level)
1985 			hws->funcs.blank_pixel_data(dc, pipe_ctx,
1986 				!pipe_ctx->plane_state ||
1987 				!pipe_ctx->plane_state->visible);
1988 	}
1989 
1990 	/* Only update TG on top pipe */
1991 	if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
1992 		&& !pipe_ctx->prev_odm_pipe)
1993 		dcn401_program_tg(dc, pipe_ctx, context, hws);
1994 
1995 	if (pipe_ctx->update_flags.bits.odm)
1996 		hws->funcs.update_odm(dc, context, pipe_ctx);
1997 
1998 	if (pipe_ctx->update_flags.bits.enable) {
1999 		if (hws->funcs.enable_plane)
2000 			hws->funcs.enable_plane(dc, pipe_ctx, context);
2001 		else
2002 			dc->hwss.enable_plane(dc, pipe_ctx, context);
2003 
2004 		if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes)
2005 			dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
2006 	}
2007 
2008 	if (pipe_ctx->update_flags.bits.det_size) {
2009 		if (dc->res_pool->hubbub->funcs->program_det_size)
2010 			dc->res_pool->hubbub->funcs->program_det_size(
2011 				dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
2012 		if (dc->res_pool->hubbub->funcs->program_det_segments)
2013 			dc->res_pool->hubbub->funcs->program_det_segments(
2014 				dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
2015 	}
2016 
2017 	if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw ||
2018 	    pipe_ctx->plane_state->update_flags.raw ||
2019 	    pipe_ctx->stream->update_flags.raw))
2020 		dc->hwss.update_dchubp_dpp(dc, pipe_ctx, context);
2021 
2022 	if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
2023 		pipe_ctx->plane_state->update_flags.bits.hdr_mult))
2024 		hws->funcs.set_hdr_multiplier(pipe_ctx);
2025 
2026 	if (pipe_ctx->plane_state &&
2027 		(pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2028 			pipe_ctx->plane_state->update_flags.bits.gamma_change ||
2029 			pipe_ctx->plane_state->update_flags.bits.lut_3d ||
2030 			pipe_ctx->update_flags.bits.enable))
2031 		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2032 
2033 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2034 	 * only do gamma programming for powering on, internal memcmp to avoid
2035 	 * updating on slave planes
2036 	 */
2037 	if (pipe_ctx->update_flags.bits.enable ||
2038 	    pipe_ctx->update_flags.bits.plane_changed ||
2039 	    pipe_ctx->stream->update_flags.bits.out_tf)
2040 		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2041 
2042 	/* If the pipe has been enabled or has a different opp, we
2043 	 * should reprogram the fmt. This deals with cases where
2044 	 * interation between mpc and odm combine on different streams
2045 	 * causes a different pipe to be chosen to odm combine with.
2046 	 */
2047 	if (pipe_ctx->update_flags.bits.enable
2048 		|| pipe_ctx->update_flags.bits.opp_changed) {
2049 
2050 		pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
2051 			pipe_ctx->stream_res.opp,
2052 			COLOR_SPACE_YCBCR601,
2053 			pipe_ctx->stream->timing.display_color_depth,
2054 			pipe_ctx->stream->signal);
2055 
2056 		pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
2057 			pipe_ctx->stream_res.opp,
2058 			&pipe_ctx->stream->bit_depth_params,
2059 			&pipe_ctx->stream->clamping);
2060 	}
2061 
2062 	/* Set ABM pipe after other pipe configurations done */
2063 	if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) {
2064 		if (pipe_ctx->stream_res.abm) {
2065 			dc->hwss.set_pipe(pipe_ctx);
2066 			pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm,
2067 				pipe_ctx->stream->abm_level);
2068 		}
2069 	}
2070 
2071 	if (pipe_ctx->update_flags.bits.test_pattern_changed) {
2072 		struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp;
2073 		struct bit_depth_reduction_params params;
2074 
2075 		memset(&params, 0, sizeof(params));
2076 		odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
2077 		dc->hwss.set_disp_pattern_generator(dc,
2078 			pipe_ctx,
2079 			pipe_ctx->stream_res.test_pattern_params.test_pattern,
2080 			pipe_ctx->stream_res.test_pattern_params.color_space,
2081 			pipe_ctx->stream_res.test_pattern_params.color_depth,
2082 			NULL,
2083 			pipe_ctx->stream_res.test_pattern_params.width,
2084 			pipe_ctx->stream_res.test_pattern_params.height,
2085 			pipe_ctx->stream_res.test_pattern_params.offset);
2086 	}
2087 }
2088 
dcn401_program_front_end_for_ctx(struct dc * dc,struct dc_state * context)2089 void dcn401_program_front_end_for_ctx(
2090 	struct dc *dc,
2091 	struct dc_state *context)
2092 {
2093 	int i;
2094 	unsigned int prev_hubp_count = 0;
2095 	unsigned int hubp_count = 0;
2096 	struct dce_hwseq *hws = dc->hwseq;
2097 	struct pipe_ctx *pipe = NULL;
2098 
2099 	DC_LOGGER_INIT(dc->ctx->logger);
2100 
2101 	if (resource_is_pipe_topology_changed(dc->current_state, context))
2102 		resource_log_pipe_topology_update(dc, context);
2103 
2104 	if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
2105 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2106 			pipe = &context->res_ctx.pipe_ctx[i];
2107 
2108 			if (pipe->plane_state) {
2109 				if (pipe->plane_state->triplebuffer_flips)
2110 					BREAK_TO_DEBUGGER();
2111 
2112 				/*turn off triple buffer for full update*/
2113 				dc->hwss.program_triplebuffer(
2114 					dc, pipe, pipe->plane_state->triplebuffer_flips);
2115 			}
2116 		}
2117 	}
2118 
2119 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2120 		if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
2121 			prev_hubp_count++;
2122 		if (context->res_ctx.pipe_ctx[i].plane_state)
2123 			hubp_count++;
2124 	}
2125 
2126 	if (prev_hubp_count == 0 && hubp_count > 0) {
2127 		if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
2128 			dc->res_pool->hubbub->funcs->force_pstate_change_control(
2129 				dc->res_pool->hubbub, true, false);
2130 		udelay(500);
2131 	}
2132 
2133 	/* Set pipe update flags and lock pipes */
2134 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2135 		dc->hwss.detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i],
2136 			&context->res_ctx.pipe_ctx[i]);
2137 
2138 	/* When disabling phantom pipes, turn on phantom OTG first (so we can get double
2139 	 * buffer updates properly)
2140 	 */
2141 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2142 		struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
2143 
2144 		pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2145 
2146 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
2147 			dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
2148 			struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
2149 
2150 			if (tg->funcs->enable_crtc) {
2151 				if (dc->hwseq->funcs.blank_pixel_data)
2152 					dc->hwseq->funcs.blank_pixel_data(dc, pipe, true);
2153 
2154 				tg->funcs->enable_crtc(tg);
2155 			}
2156 		}
2157 	}
2158 	/* OTG blank before disabling all front ends */
2159 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2160 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
2161 			&& !context->res_ctx.pipe_ctx[i].top_pipe
2162 			&& !context->res_ctx.pipe_ctx[i].prev_odm_pipe
2163 			&& context->res_ctx.pipe_ctx[i].stream)
2164 			hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
2165 
2166 
2167 	/* Disconnect mpcc */
2168 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2169 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
2170 			|| context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
2171 			struct hubbub *hubbub = dc->res_pool->hubbub;
2172 
2173 			/* Phantom pipe DET should be 0, but if a pipe in use is being transitioned to phantom
2174 			 * then we want to do the programming here (effectively it's being disabled). If we do
2175 			 * the programming later the DET won't be updated until the OTG for the phantom pipe is
2176 			 * turned on (i.e. in an MCLK switch) which can come in too late and cause issues with
2177 			 * DET allocation.
2178 			 */
2179 			if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
2180 				(context->res_ctx.pipe_ctx[i].plane_state &&
2181 				dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) ==
2182 				SUBVP_PHANTOM))) {
2183 				if (hubbub->funcs->program_det_size)
2184 					hubbub->funcs->program_det_size(hubbub,
2185 						dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
2186 				if (dc->res_pool->hubbub->funcs->program_det_segments)
2187 					dc->res_pool->hubbub->funcs->program_det_segments(
2188 						hubbub,	dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
2189 			}
2190 			hws->funcs.plane_atomic_disconnect(dc, dc->current_state,
2191 				&dc->current_state->res_ctx.pipe_ctx[i]);
2192 			DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
2193 		}
2194 
2195 	/* update ODM for blanked OTG master pipes */
2196 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2197 		pipe = &context->res_ctx.pipe_ctx[i];
2198 		if (resource_is_pipe_type(pipe, OTG_MASTER) &&
2199 			!resource_is_pipe_type(pipe, DPP_PIPE) &&
2200 			pipe->update_flags.bits.odm &&
2201 			hws->funcs.update_odm)
2202 			hws->funcs.update_odm(dc, context, pipe);
2203 	}
2204 
2205 	/*
2206 	 * Program all updated pipes, order matters for mpcc setup. Start with
2207 	 * top pipe and program all pipes that follow in order
2208 	 */
2209 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2210 		pipe = &context->res_ctx.pipe_ctx[i];
2211 
2212 		if (pipe->plane_state && !pipe->top_pipe) {
2213 			while (pipe) {
2214 				if (hws->funcs.program_pipe)
2215 					hws->funcs.program_pipe(dc, pipe, context);
2216 				else {
2217 					/* Don't program phantom pipes in the regular front end programming sequence.
2218 					 * There is an MPO transition case where a pipe being used by a video plane is
2219 					 * transitioned directly to be a phantom pipe when closing the MPO video.
2220 					 * However the phantom pipe will program a new HUBP_VTG_SEL (update takes place
2221 					 * right away) but the MPO still exists until the double buffered update of the
2222 					 * main pipe so we will get a frame of underflow if the phantom pipe is
2223 					 * programmed here.
2224 					 */
2225 					if (pipe->stream &&
2226 						dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM)
2227 						dcn401_program_pipe(dc, pipe, context);
2228 				}
2229 
2230 				pipe = pipe->bottom_pipe;
2231 			}
2232 		}
2233 
2234 		/* Program secondary blending tree and writeback pipes */
2235 		pipe = &context->res_ctx.pipe_ctx[i];
2236 		if (!pipe->top_pipe && !pipe->prev_odm_pipe
2237 			&& pipe->stream && pipe->stream->num_wb_info > 0
2238 			&& (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
2239 				|| pipe->stream->update_flags.raw)
2240 			&& hws->funcs.program_all_writeback_pipes_in_tree)
2241 			hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
2242 
2243 		/* Avoid underflow by check of pipe line read when adding 2nd plane. */
2244 		if (hws->wa.wait_hubpret_read_start_during_mpo_transition &&
2245 			!pipe->top_pipe &&
2246 			pipe->stream &&
2247 			pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start &&
2248 			dc->current_state->stream_status[0].plane_count == 1 &&
2249 			context->stream_status[0].plane_count > 1) {
2250 			pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
2251 		}
2252 	}
2253 }
2254 
dcn401_post_unlock_program_front_end(struct dc * dc,struct dc_state * context)2255 void dcn401_post_unlock_program_front_end(
2256 	struct dc *dc,
2257 	struct dc_state *context)
2258 {
2259 	// Timeout for pipe enable
2260 	unsigned int timeout_us = 100000;
2261 	unsigned int polling_interval_us = 1;
2262 	struct dce_hwseq *hwseq = dc->hwseq;
2263 	int i;
2264 
2265 	DC_LOGGER_INIT(dc->ctx->logger);
2266 
2267 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2268 		if (resource_is_pipe_type(&dc->current_state->res_ctx.pipe_ctx[i], OPP_HEAD) &&
2269 			!resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD))
2270 			dc->hwss.post_unlock_reset_opp(dc,
2271 				&dc->current_state->res_ctx.pipe_ctx[i]);
2272 
2273 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2274 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
2275 			dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
2276 
2277 	/*
2278 	 * If we are enabling a pipe, we need to wait for pending clear as this is a critical
2279 	 * part of the enable operation otherwise, DM may request an immediate flip which
2280 	 * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which
2281 	 * is unsupported on DCN.
2282 	 */
2283 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2284 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2285 		// Don't check flip pending on phantom pipes
2286 		if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable &&
2287 			dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
2288 			struct hubp *hubp = pipe->plane_res.hubp;
2289 			int j = 0;
2290 
2291 			for (j = 0; j < timeout_us / polling_interval_us
2292 				&& hubp->funcs->hubp_is_flip_pending(hubp); j++)
2293 				udelay(polling_interval_us);
2294 		}
2295 	}
2296 
2297 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2298 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2299 		struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2300 
2301 		/* When going from a smaller ODM slice count to larger, we must ensure double
2302 		 * buffer update completes before we return to ensure we don't reduce DISPCLK
2303 		 * before we've transitioned to 2:1 or 4:1
2304 		 */
2305 		if (resource_is_pipe_type(old_pipe, OTG_MASTER) && resource_is_pipe_type(pipe, OTG_MASTER) &&
2306 			resource_get_odm_slice_count(old_pipe) < resource_get_odm_slice_count(pipe) &&
2307 			dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
2308 			int j = 0;
2309 			struct timing_generator *tg = pipe->stream_res.tg;
2310 
2311 			if (tg->funcs->get_optc_double_buffer_pending) {
2312 				for (j = 0; j < timeout_us / polling_interval_us
2313 					&& tg->funcs->get_optc_double_buffer_pending(tg); j++)
2314 					udelay(polling_interval_us);
2315 			}
2316 		}
2317 	}
2318 
2319 	if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
2320 		dc->res_pool->hubbub->funcs->force_pstate_change_control(
2321 			dc->res_pool->hubbub, false, false);
2322 
2323 
2324 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2325 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2326 
2327 		if (pipe->plane_state && !pipe->top_pipe) {
2328 			/* Program phantom pipe here to prevent a frame of underflow in the MPO transition
2329 			 * case (if a pipe being used for a video plane transitions to a phantom pipe, it
2330 			 * can underflow due to HUBP_VTG_SEL programming if done in the regular front end
2331 			 * programming sequence).
2332 			 */
2333 			while (pipe) {
2334 				if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
2335 					/* When turning on the phantom pipe we want to run through the
2336 					 * entire enable sequence, so apply all the "enable" flags.
2337 					 */
2338 					if (dc->hwss.apply_update_flags_for_phantom)
2339 						dc->hwss.apply_update_flags_for_phantom(pipe);
2340 					if (dc->hwss.update_phantom_vp_position)
2341 						dc->hwss.update_phantom_vp_position(dc, context, pipe);
2342 					dcn401_program_pipe(dc, pipe, context);
2343 				}
2344 				pipe = pipe->bottom_pipe;
2345 			}
2346 		}
2347 	}
2348 
2349 	if (!hwseq)
2350 		return;
2351 
2352 	/* P-State support transitions:
2353 	 * Natural -> FPO:      P-State disabled in prepare, force disallow anytime is safe
2354 	 * FPO -> Natural:      Unforce anytime after FW disable is safe (P-State will assert naturally)
2355 	 * Unsupported -> FPO:  P-State enabled in optimize, force disallow anytime is safe
2356 	 * FPO -> Unsupported:  P-State disabled in prepare, unforce disallow anytime is safe
2357 	 * FPO <-> SubVP:       Force disallow is maintained on the FPO / SubVP pipes
2358 	 */
2359 	if (hwseq->funcs.update_force_pstate)
2360 		dc->hwseq->funcs.update_force_pstate(dc, context);
2361 
2362 	/* Only program the MALL registers after all the main and phantom pipes
2363 	 * are done programming.
2364 	 */
2365 	if (hwseq->funcs.program_mall_pipe_config)
2366 		hwseq->funcs.program_mall_pipe_config(dc, context);
2367 
2368 	/* WA to apply WM setting*/
2369 	if (hwseq->wa.DEGVIDCN21)
2370 		dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
2371 
2372 
2373 	/* WA for stutter underflow during MPO transitions when adding 2nd plane */
2374 	if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
2375 
2376 		if (dc->current_state->stream_status[0].plane_count == 1 &&
2377 			context->stream_status[0].plane_count > 1) {
2378 
2379 			struct timing_generator *tg = dc->res_pool->timing_generators[0];
2380 
2381 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false);
2382 
2383 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true;
2384 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame =
2385 				tg->funcs->get_frame_count(tg);
2386 		}
2387 	}
2388 }
2389 
dcn401_update_bandwidth(struct dc * dc,struct dc_state * context)2390 bool dcn401_update_bandwidth(
2391 	struct dc *dc,
2392 	struct dc_state *context)
2393 {
2394 	int i;
2395 	struct dce_hwseq *hws = dc->hwseq;
2396 
2397 	/* recalculate DML parameters */
2398 	if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK)
2399 		return false;
2400 
2401 	/* apply updated bandwidth parameters */
2402 	dc->hwss.prepare_bandwidth(dc, context);
2403 
2404 	/* update hubp configs for all pipes */
2405 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2406 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2407 
2408 		if (pipe_ctx->plane_state == NULL)
2409 			continue;
2410 
2411 		if (pipe_ctx->top_pipe == NULL) {
2412 			bool blank = !is_pipe_tree_visible(pipe_ctx);
2413 
2414 			pipe_ctx->stream_res.tg->funcs->program_global_sync(
2415 				pipe_ctx->stream_res.tg,
2416 				dcn401_calculate_vready_offset_for_group(pipe_ctx),
2417 				(unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
2418 				(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
2419 				(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
2420 				(unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
2421 
2422 			pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2423 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false);
2424 
2425 			if (pipe_ctx->prev_odm_pipe == NULL)
2426 				hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2427 
2428 			if (hws->funcs.setup_vupdate_interrupt)
2429 				hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2430 		}
2431 
2432 		if (pipe_ctx->plane_res.hubp->funcs->hubp_setup2)
2433 			pipe_ctx->plane_res.hubp->funcs->hubp_setup2(
2434 				pipe_ctx->plane_res.hubp,
2435 				&pipe_ctx->hubp_regs,
2436 				&pipe_ctx->global_sync,
2437 				&pipe_ctx->stream->timing);
2438 	}
2439 
2440 	return true;
2441 }
2442 
dcn401_detect_pipe_changes(struct dc_state * old_state,struct dc_state * new_state,struct pipe_ctx * old_pipe,struct pipe_ctx * new_pipe)2443 void dcn401_detect_pipe_changes(struct dc_state *old_state,
2444 	struct dc_state *new_state,
2445 	struct pipe_ctx *old_pipe,
2446 	struct pipe_ctx *new_pipe)
2447 {
2448 	bool old_is_phantom = dc_state_get_pipe_subvp_type(old_state, old_pipe) == SUBVP_PHANTOM;
2449 	bool new_is_phantom = dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM;
2450 
2451 	unsigned int old_pipe_vready_offset_pixels = old_pipe->global_sync.dcn4x.vready_offset_pixels;
2452 	unsigned int new_pipe_vready_offset_pixels = new_pipe->global_sync.dcn4x.vready_offset_pixels;
2453 	unsigned int old_pipe_vstartup_lines = old_pipe->global_sync.dcn4x.vstartup_lines;
2454 	unsigned int new_pipe_vstartup_lines = new_pipe->global_sync.dcn4x.vstartup_lines;
2455 	unsigned int old_pipe_vupdate_offset_pixels = old_pipe->global_sync.dcn4x.vupdate_offset_pixels;
2456 	unsigned int new_pipe_vupdate_offset_pixels = new_pipe->global_sync.dcn4x.vupdate_offset_pixels;
2457 	unsigned int old_pipe_vupdate_width_pixels = old_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels;
2458 	unsigned int new_pipe_vupdate_width_pixels = new_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels;
2459 
2460 	new_pipe->update_flags.raw = 0;
2461 
2462 	/* If non-phantom pipe is being transitioned to a phantom pipe,
2463 	 * set disable and return immediately. This is because the pipe
2464 	 * that was previously in use must be fully disabled before we
2465 	 * can "enable" it as a phantom pipe (since the OTG will certainly
2466 	 * be different). The post_unlock sequence will set the correct
2467 	 * update flags to enable the phantom pipe.
2468 	 */
2469 	if (old_pipe->plane_state && !old_is_phantom &&
2470 		new_pipe->plane_state && new_is_phantom) {
2471 		new_pipe->update_flags.bits.disable = 1;
2472 		return;
2473 	}
2474 
2475 	if (resource_is_pipe_type(new_pipe, OTG_MASTER) &&
2476 		resource_is_odm_topology_changed(new_pipe, old_pipe))
2477 		/* Detect odm changes */
2478 		new_pipe->update_flags.bits.odm = 1;
2479 
2480 	/* Exit on unchanged, unused pipe */
2481 	if (!old_pipe->plane_state && !new_pipe->plane_state)
2482 		return;
2483 	/* Detect pipe enable/disable */
2484 	if (!old_pipe->plane_state && new_pipe->plane_state) {
2485 		new_pipe->update_flags.bits.enable = 1;
2486 		new_pipe->update_flags.bits.mpcc = 1;
2487 		new_pipe->update_flags.bits.dppclk = 1;
2488 		new_pipe->update_flags.bits.hubp_interdependent = 1;
2489 		new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
2490 		new_pipe->update_flags.bits.unbounded_req = 1;
2491 		new_pipe->update_flags.bits.gamut_remap = 1;
2492 		new_pipe->update_flags.bits.scaler = 1;
2493 		new_pipe->update_flags.bits.viewport = 1;
2494 		new_pipe->update_flags.bits.det_size = 1;
2495 		if (new_pipe->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE &&
2496 			new_pipe->stream_res.test_pattern_params.width != 0 &&
2497 			new_pipe->stream_res.test_pattern_params.height != 0)
2498 			new_pipe->update_flags.bits.test_pattern_changed = 1;
2499 		if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
2500 			new_pipe->update_flags.bits.odm = 1;
2501 			new_pipe->update_flags.bits.global_sync = 1;
2502 		}
2503 		return;
2504 	}
2505 
2506 	/* For SubVP we need to unconditionally enable because any phantom pipes are
2507 	 * always removed then newly added for every full updates whenever SubVP is in use.
2508 	 * The remove-add sequence of the phantom pipe always results in the pipe
2509 	 * being blanked in enable_stream_timing (DPG).
2510 	 */
2511 	if (new_pipe->stream && dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM)
2512 		new_pipe->update_flags.bits.enable = 1;
2513 
2514 	/* Phantom pipes are effectively disabled, if the pipe was previously phantom
2515 	 * we have to enable
2516 	 */
2517 	if (old_pipe->plane_state && old_is_phantom &&
2518 		new_pipe->plane_state && !new_is_phantom)
2519 		new_pipe->update_flags.bits.enable = 1;
2520 
2521 	if (old_pipe->plane_state && !new_pipe->plane_state) {
2522 		new_pipe->update_flags.bits.disable = 1;
2523 		return;
2524 	}
2525 
2526 	/* Detect plane change */
2527 	if (old_pipe->plane_state != new_pipe->plane_state)
2528 		new_pipe->update_flags.bits.plane_changed = true;
2529 
2530 	/* Detect top pipe only changes */
2531 	if (resource_is_pipe_type(new_pipe, OTG_MASTER)) {
2532 		/* Detect global sync changes */
2533 		if ((old_pipe_vready_offset_pixels != new_pipe_vready_offset_pixels)
2534 			|| (old_pipe_vstartup_lines != new_pipe_vstartup_lines)
2535 			|| (old_pipe_vupdate_offset_pixels != new_pipe_vupdate_offset_pixels)
2536 			|| (old_pipe_vupdate_width_pixels != new_pipe_vupdate_width_pixels))
2537 			new_pipe->update_flags.bits.global_sync = 1;
2538 	}
2539 
2540 	if (old_pipe->det_buffer_size_kb != new_pipe->det_buffer_size_kb)
2541 		new_pipe->update_flags.bits.det_size = 1;
2542 
2543 	/*
2544 	 * Detect opp / tg change, only set on change, not on enable
2545 	 * Assume mpcc inst = pipe index, if not this code needs to be updated
2546 	 * since mpcc is what is affected by these. In fact all of our sequence
2547 	 * makes this assumption at the moment with how hubp reset is matched to
2548 	 * same index mpcc reset.
2549 	 */
2550 	if (old_pipe->stream_res.opp != new_pipe->stream_res.opp)
2551 		new_pipe->update_flags.bits.opp_changed = 1;
2552 	if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
2553 		new_pipe->update_flags.bits.tg_changed = 1;
2554 
2555 	/*
2556 	 * Detect mpcc blending changes, only dpp inst and opp matter here,
2557 	 * mpccs getting removed/inserted update connected ones during their own
2558 	 * programming
2559 	 */
2560 	if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp
2561 		|| old_pipe->stream_res.opp != new_pipe->stream_res.opp)
2562 		new_pipe->update_flags.bits.mpcc = 1;
2563 
2564 	/* Detect dppclk change */
2565 	if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz)
2566 		new_pipe->update_flags.bits.dppclk = 1;
2567 
2568 	/* Check for scl update */
2569 	if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
2570 		new_pipe->update_flags.bits.scaler = 1;
2571 	/* Check for vp update */
2572 	if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
2573 		|| memcmp(&old_pipe->plane_res.scl_data.viewport_c,
2574 			&new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
2575 		new_pipe->update_flags.bits.viewport = 1;
2576 
2577 	/* Detect dlg/ttu/rq updates */
2578 	{
2579 		struct dml2_display_dlg_regs old_dlg_regs = old_pipe->hubp_regs.dlg_regs;
2580 		struct dml2_display_ttu_regs old_ttu_regs = old_pipe->hubp_regs.ttu_regs;
2581 		struct dml2_display_rq_regs	 old_rq_regs = old_pipe->hubp_regs.rq_regs;
2582 		struct dml2_display_dlg_regs *new_dlg_regs = &new_pipe->hubp_regs.dlg_regs;
2583 		struct dml2_display_ttu_regs *new_ttu_regs = &new_pipe->hubp_regs.ttu_regs;
2584 		struct dml2_display_rq_regs	 *new_rq_regs = &new_pipe->hubp_regs.rq_regs;
2585 
2586 		/* Detect pipe interdependent updates */
2587 		if ((old_dlg_regs.dst_y_prefetch != new_dlg_regs->dst_y_prefetch)
2588 			|| (old_dlg_regs.vratio_prefetch != new_dlg_regs->vratio_prefetch)
2589 			|| (old_dlg_regs.vratio_prefetch_c != new_dlg_regs->vratio_prefetch_c)
2590 			|| (old_dlg_regs.dst_y_per_vm_vblank != new_dlg_regs->dst_y_per_vm_vblank)
2591 			|| (old_dlg_regs.dst_y_per_row_vblank != new_dlg_regs->dst_y_per_row_vblank)
2592 			|| (old_dlg_regs.dst_y_per_vm_flip != new_dlg_regs->dst_y_per_vm_flip)
2593 			|| (old_dlg_regs.dst_y_per_row_flip != new_dlg_regs->dst_y_per_row_flip)
2594 			|| (old_dlg_regs.refcyc_per_meta_chunk_vblank_l != new_dlg_regs->refcyc_per_meta_chunk_vblank_l)
2595 			|| (old_dlg_regs.refcyc_per_meta_chunk_vblank_c != new_dlg_regs->refcyc_per_meta_chunk_vblank_c)
2596 			|| (old_dlg_regs.refcyc_per_meta_chunk_flip_l != new_dlg_regs->refcyc_per_meta_chunk_flip_l)
2597 			|| (old_dlg_regs.refcyc_per_line_delivery_pre_l != new_dlg_regs->refcyc_per_line_delivery_pre_l)
2598 			|| (old_dlg_regs.refcyc_per_line_delivery_pre_c != new_dlg_regs->refcyc_per_line_delivery_pre_c)
2599 			|| (old_ttu_regs.refcyc_per_req_delivery_pre_l != new_ttu_regs->refcyc_per_req_delivery_pre_l)
2600 			|| (old_ttu_regs.refcyc_per_req_delivery_pre_c != new_ttu_regs->refcyc_per_req_delivery_pre_c)
2601 			|| (old_ttu_regs.refcyc_per_req_delivery_pre_cur0 !=
2602 				new_ttu_regs->refcyc_per_req_delivery_pre_cur0)
2603 			|| (old_ttu_regs.min_ttu_vblank != new_ttu_regs->min_ttu_vblank)
2604 			|| (old_ttu_regs.qos_level_flip != new_ttu_regs->qos_level_flip)) {
2605 			old_dlg_regs.dst_y_prefetch = new_dlg_regs->dst_y_prefetch;
2606 			old_dlg_regs.vratio_prefetch = new_dlg_regs->vratio_prefetch;
2607 			old_dlg_regs.vratio_prefetch_c = new_dlg_regs->vratio_prefetch_c;
2608 			old_dlg_regs.dst_y_per_vm_vblank = new_dlg_regs->dst_y_per_vm_vblank;
2609 			old_dlg_regs.dst_y_per_row_vblank = new_dlg_regs->dst_y_per_row_vblank;
2610 			old_dlg_regs.dst_y_per_vm_flip = new_dlg_regs->dst_y_per_vm_flip;
2611 			old_dlg_regs.dst_y_per_row_flip = new_dlg_regs->dst_y_per_row_flip;
2612 			old_dlg_regs.refcyc_per_meta_chunk_vblank_l = new_dlg_regs->refcyc_per_meta_chunk_vblank_l;
2613 			old_dlg_regs.refcyc_per_meta_chunk_vblank_c = new_dlg_regs->refcyc_per_meta_chunk_vblank_c;
2614 			old_dlg_regs.refcyc_per_meta_chunk_flip_l = new_dlg_regs->refcyc_per_meta_chunk_flip_l;
2615 			old_dlg_regs.refcyc_per_line_delivery_pre_l = new_dlg_regs->refcyc_per_line_delivery_pre_l;
2616 			old_dlg_regs.refcyc_per_line_delivery_pre_c = new_dlg_regs->refcyc_per_line_delivery_pre_c;
2617 			old_ttu_regs.refcyc_per_req_delivery_pre_l = new_ttu_regs->refcyc_per_req_delivery_pre_l;
2618 			old_ttu_regs.refcyc_per_req_delivery_pre_c = new_ttu_regs->refcyc_per_req_delivery_pre_c;
2619 			old_ttu_regs.refcyc_per_req_delivery_pre_cur0 = new_ttu_regs->refcyc_per_req_delivery_pre_cur0;
2620 			old_ttu_regs.min_ttu_vblank = new_ttu_regs->min_ttu_vblank;
2621 			old_ttu_regs.qos_level_flip = new_ttu_regs->qos_level_flip;
2622 			new_pipe->update_flags.bits.hubp_interdependent = 1;
2623 		}
2624 		/* Detect any other updates to ttu/rq/dlg */
2625 		if (memcmp(&old_dlg_regs, new_dlg_regs, sizeof(old_dlg_regs)) ||
2626 			memcmp(&old_ttu_regs, new_ttu_regs, sizeof(old_ttu_regs)) ||
2627 			memcmp(&old_rq_regs, new_rq_regs, sizeof(old_rq_regs)))
2628 			new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
2629 	}
2630 
2631 	if (old_pipe->unbounded_req != new_pipe->unbounded_req)
2632 		new_pipe->update_flags.bits.unbounded_req = 1;
2633 
2634 	if (memcmp(&old_pipe->stream_res.test_pattern_params,
2635 		&new_pipe->stream_res.test_pattern_params, sizeof(struct test_pattern_params))) {
2636 		new_pipe->update_flags.bits.test_pattern_changed = 1;
2637 	}
2638 }
2639 
dcn401_plane_atomic_power_down(struct dc * dc,struct dpp * dpp,struct hubp * hubp)2640 void dcn401_plane_atomic_power_down(struct dc *dc,
2641 		struct dpp *dpp,
2642 		struct hubp *hubp)
2643 {
2644 	struct dce_hwseq *hws = dc->hwseq;
2645 	uint32_t org_ip_request_cntl = 0;
2646 
2647 	DC_LOGGER_INIT(dc->ctx->logger);
2648 
2649 	if (REG(DC_IP_REQUEST_CNTL)) {
2650 		REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
2651 		if (org_ip_request_cntl == 0)
2652 			REG_SET(DC_IP_REQUEST_CNTL, 0,
2653 				IP_REQUEST_EN, 1);
2654 	}
2655 
2656 	if (hws->funcs.dpp_pg_control)
2657 		hws->funcs.dpp_pg_control(hws, dpp->inst, false);
2658 
2659 	if (hws->funcs.hubp_pg_control)
2660 		hws->funcs.hubp_pg_control(hws, hubp->inst, false);
2661 
2662 	hubp->funcs->hubp_reset(hubp);
2663 	dpp->funcs->dpp_reset(dpp);
2664 
2665 	if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL))
2666 		REG_SET(DC_IP_REQUEST_CNTL, 0,
2667 			IP_REQUEST_EN, 0);
2668 
2669 	DC_LOG_DEBUG(
2670 			"Power gated front end %d\n", hubp->inst);
2671 
2672 	if (hws->funcs.dpp_root_clock_control)
2673 		hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
2674 }
2675