xref: /linux/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c (revision a5210135489ae7bc1ef1cb4a8157361dd7b468cd)
1 // SPDX-License-Identifier: MIT
2 //
3 // Copyright 2024 Advanced Micro Devices, Inc.
4 
5 
6 #include "os_types.h"
7 #include "dm_services.h"
8 #include "basics/dc_common.h"
9 #include "dm_helpers.h"
10 #include "core_types.h"
11 #include "resource.h"
12 #include "dccg.h"
13 #include "dce/dce_hwseq.h"
14 #include "reg_helper.h"
15 #include "abm.h"
16 #include "hubp.h"
17 #include "dchubbub.h"
18 #include "timing_generator.h"
19 #include "opp.h"
20 #include "ipp.h"
21 #include "mpc.h"
22 #include "mcif_wb.h"
23 #include "dc_dmub_srv.h"
24 #include "link_hwss.h"
25 #include "dpcd_defs.h"
26 #include "clk_mgr.h"
27 #include "dsc.h"
28 #include "link_service.h"
29 #include "custom_float.h"
30 
31 #include "dce/dmub_hw_lock_mgr.h"
32 #include "dcn10/dcn10_cm_common.h"
33 #include "dcn10/dcn10_hubbub.h"
34 #include "dcn20/dcn20_optc.h"
35 #include "dcn30/dcn30_cm_common.h"
36 #include "dcn32/dcn32_hwseq.h"
37 #include "dcn401_hwseq.h"
38 #include "dcn401/dcn401_resource.h"
39 #include "dc_state_priv.h"
40 #include "link_enc_cfg.h"
41 #include "../hw_sequencer.h"
42 #include "dio/dcn10/dcn10_dio.h"
43 
44 #define DC_LOGGER_INIT(logger)
45 
46 #define CTX \
47 	hws->ctx
48 #define REG(reg)\
49 	hws->regs->reg
50 #define DC_LOGGER \
51 	dc->ctx->logger
52 
53 
54 #undef FN
55 #define FN(reg_name, field_name) \
56 	hws->shifts->field_name, hws->masks->field_name
57 
dcn401_initialize_min_clocks(struct dc * dc)58 void dcn401_initialize_min_clocks(struct dc *dc)
59 {
60 	struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk;
61 
62 	clocks->dcfclk_deep_sleep_khz = DCN3_2_DCFCLK_DS_INIT_KHZ;
63 	clocks->dcfclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz * 1000;
64 	clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000;
65 	clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000;
66 	clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000;
67 	if (dc->debug.disable_boot_optimizations) {
68 		clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000;
69 	} else {
70 		/* Even though DPG_EN = 1 for the connected display, it still requires the
71 		 * correct timing so we cannot set DISPCLK to min freq or it could cause
72 		 * audio corruption. Read current DISPCLK from DENTIST and request the same
73 		 * freq to ensure that the timing is valid and unchanged.
74 		 */
75 		if (dc->clk_mgr->funcs->get_dispclk_from_dentist) {
76 			clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr);
77 		} else {
78 			clocks->dispclk_khz = dc->clk_mgr->boot_snapshot.dispclk * 1000;
79 		}
80 	}
81 	clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
82 	clocks->fclk_p_state_change_support = true;
83 	clocks->p_state_change_support = true;
84 
85 	dc->clk_mgr->funcs->update_clocks(
86 			dc->clk_mgr,
87 			dc->current_state,
88 			true);
89 }
90 
dcn401_program_gamut_remap(struct pipe_ctx * pipe_ctx)91 void dcn401_program_gamut_remap(struct pipe_ctx *pipe_ctx)
92 {
93 	unsigned int i = 0;
94 	struct mpc_grph_gamut_adjustment mpc_adjust;
95 	unsigned int mpcc_id = pipe_ctx->plane_res.mpcc_inst;
96 	struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
97 
98 	// program MPCC_MCM_FIRST_GAMUT_REMAP
99 	memset(&mpc_adjust, 0, sizeof(mpc_adjust));
100 	mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
101 	mpc_adjust.mpcc_gamut_remap_block_id = MPCC_MCM_FIRST_GAMUT_REMAP;
102 
103 	if (pipe_ctx->plane_state &&
104 		pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
105 		mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
106 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
107 			mpc_adjust.temperature_matrix[i] =
108 			pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
109 	}
110 
111 	mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
112 
113 	// program MPCC_MCM_SECOND_GAMUT_REMAP for Bypass / Disable for now
114 	mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
115 	mpc_adjust.mpcc_gamut_remap_block_id = MPCC_MCM_SECOND_GAMUT_REMAP;
116 
117 	mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
118 
119 	// program MPCC_OGAM_GAMUT_REMAP same as is currently used on DCN3x
120 	memset(&mpc_adjust, 0, sizeof(mpc_adjust));
121 	mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
122 	mpc_adjust.mpcc_gamut_remap_block_id = MPCC_OGAM_GAMUT_REMAP;
123 
124 	if (pipe_ctx->top_pipe == NULL) {
125 		if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
126 			mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
127 			for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
128 				mpc_adjust.temperature_matrix[i] =
129 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
130 		}
131 	}
132 
133 	mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
134 }
135 
dcn401_init_hw(struct dc * dc)136 void dcn401_init_hw(struct dc *dc)
137 {
138 	struct abm **abms = dc->res_pool->multiple_abms;
139 	struct dce_hwseq *hws = dc->hwseq;
140 	struct dc_bios *dcb = dc->ctx->dc_bios;
141 	struct resource_pool *res_pool = dc->res_pool;
142 	int i;
143 	unsigned int edp_num;
144 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
145 	uint32_t user_level = MAX_BACKLIGHT_LEVEL;
146 	bool dchub_ref_freq_changed;
147 	int current_dchub_ref_freq = 0;
148 
149 	if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) {
150 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
151 
152 		// mark dcmode limits present if any clock has distinct AC and DC values from SMU
153 		dc->caps.dcmode_power_limits_present = dc->clk_mgr->funcs->is_dc_mode_present &&
154 				dc->clk_mgr->funcs->is_dc_mode_present(dc->clk_mgr);
155 	}
156 
157 	// Initialize the dccg
158 	if (res_pool->dccg->funcs->dccg_init)
159 		res_pool->dccg->funcs->dccg_init(res_pool->dccg);
160 
161 	// Disable DMUB Initialization until IPS state programming is finalized
162 	//if (!dcb->funcs->is_accelerated_mode(dcb)) {
163 	//	hws->funcs.bios_golden_init(dc);
164 	//}
165 
166 	// Set default OPTC memory power states
167 	if (dc->debug.enable_mem_low_power.bits.optc) {
168 		// Shutdown when unassigned and light sleep in VBLANK
169 		REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
170 	}
171 
172 	if (dc->debug.enable_mem_low_power.bits.vga) {
173 		// Power down VGA memory
174 		REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
175 	}
176 
177 	if (dc->ctx->dc_bios->fw_info_valid) {
178 		res_pool->ref_clocks.xtalin_clock_inKhz =
179 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
180 
181 		if (res_pool->hubbub) {
182 			(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
183 					dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
184 					&res_pool->ref_clocks.dccg_ref_clock_inKhz);
185 
186 			current_dchub_ref_freq = res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
187 
188 			(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
189 					res_pool->ref_clocks.dccg_ref_clock_inKhz,
190 					&res_pool->ref_clocks.dchub_ref_clock_inKhz);
191 		} else {
192 			// Not all ASICs have DCCG sw component
193 			res_pool->ref_clocks.dccg_ref_clock_inKhz =
194 					res_pool->ref_clocks.xtalin_clock_inKhz;
195 			res_pool->ref_clocks.dchub_ref_clock_inKhz =
196 					res_pool->ref_clocks.xtalin_clock_inKhz;
197 		}
198 	} else
199 		ASSERT_CRITICAL(false);
200 
201 	for (i = 0; i < dc->link_count; i++) {
202 		/* Power up AND update implementation according to the
203 		 * required signal (which may be different from the
204 		 * default signal on connector).
205 		 */
206 		struct dc_link *link = dc->links[i];
207 
208 		if (link->ep_type != DISPLAY_ENDPOINT_PHY)
209 			continue;
210 
211 		link->link_enc->funcs->hw_init(link->link_enc);
212 
213 		/* Check for enabled DIG to identify enabled display */
214 		if (link->link_enc->funcs->is_dig_enabled &&
215 			link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
216 			link->link_status.link_active = true;
217 			link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
218 			if (link->link_enc->funcs->fec_is_active &&
219 					link->link_enc->funcs->fec_is_active(link->link_enc))
220 				link->fec_state = dc_link_fec_enabled;
221 		}
222 	}
223 
224 	/* enable_power_gating_plane before dsc_pg_control because
225 	 * FORCEON = 1 with hw default value on bootup, resume from s3
226 	 */
227 	if (hws->funcs.enable_power_gating_plane)
228 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
229 
230 	/* we want to turn off all dp displays before doing detection */
231 	dc->link_srv->blank_all_dp_displays(dc);
232 
233 	/* If taking control over from VBIOS, we may want to optimize our first
234 	 * mode set, so we need to skip powering down pipes until we know which
235 	 * pipes we want to use.
236 	 * Otherwise, if taking control is not possible, we need to power
237 	 * everything down.
238 	 */
239 	if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
240 		/* Disable boot optimizations means power down everything including PHY, DIG,
241 		 * and OTG (i.e. the boot is not optimized because we do a full power down).
242 		 */
243 		if (dc->hwss.enable_accelerated_mode && dc->debug.disable_boot_optimizations)
244 			dc->hwss.enable_accelerated_mode(dc, dc->current_state);
245 		else
246 			hws->funcs.init_pipes(dc, dc->current_state);
247 
248 		if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
249 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
250 					!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
251 
252 		dcn401_initialize_min_clocks(dc);
253 
254 		/* On HW init, allow idle optimizations after pipes have been turned off.
255 		 *
256 		 * In certain D3 cases (i.e. BOCO / BOMACO) it's possible that hardware state
257 		 * is reset (i.e. not in idle at the time hw init is called), but software state
258 		 * still has idle_optimizations = true, so we must disable idle optimizations first
259 		 * (i.e. set false), then re-enable (set true).
260 		 */
261 		dc_allow_idle_optimizations(dc, false);
262 		dc_allow_idle_optimizations(dc, true);
263 	}
264 
265 	/* In headless boot cases, DIG may be turned
266 	 * on which causes HW/SW discrepancies.
267 	 * To avoid this, power down hardware on boot
268 	 * if DIG is turned on and seamless boot not enabled
269 	 */
270 	if (!dc->config.seamless_boot_edp_requested) {
271 		struct dc_link *edp_links[MAX_NUM_EDP];
272 		struct dc_link *edp_link;
273 
274 		dc_get_edp_links(dc, edp_links, &edp_num);
275 		if (edp_num) {
276 			for (i = 0; i < edp_num; i++) {
277 				edp_link = edp_links[i];
278 				if (edp_link->link_enc->funcs->is_dig_enabled &&
279 						edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
280 						dc->hwss.edp_backlight_control &&
281 						hws->funcs.power_down &&
282 						dc->hwss.edp_power_control) {
283 					dc->hwss.edp_backlight_control(edp_link, false);
284 					hws->funcs.power_down(dc);
285 					dc->hwss.edp_power_control(edp_link, false);
286 				}
287 			}
288 		} else {
289 			for (i = 0; i < dc->link_count; i++) {
290 				struct dc_link *link = dc->links[i];
291 
292 				if (link->ep_type != DISPLAY_ENDPOINT_PHY)
293 					continue;
294 				if (link->link_enc->funcs->is_dig_enabled &&
295 						link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
296 						hws->funcs.power_down) {
297 					hws->funcs.power_down(dc);
298 					break;
299 				}
300 
301 			}
302 		}
303 	}
304 
305 	for (i = 0; i < res_pool->audio_count; i++) {
306 		struct audio *audio = res_pool->audios[i];
307 
308 		audio->funcs->hw_init(audio);
309 	}
310 
311 	for (i = 0; i < dc->link_count; i++) {
312 		struct dc_link *link = dc->links[i];
313 
314 		if (link->panel_cntl) {
315 			backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
316 			user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
317 		}
318 	}
319 
320 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
321 		if (abms[i] != NULL && abms[i]->funcs != NULL)
322 			abms[i]->funcs->abm_init(abms[i], backlight, user_level);
323 	}
324 
325 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
326 	if (dc->res_pool->dio && dc->res_pool->dio->funcs->mem_pwr_ctrl)
327 		dc->res_pool->dio->funcs->mem_pwr_ctrl(dc->res_pool->dio, false);
328 
329 	if (!dc->debug.disable_clock_gate) {
330 		/* enable all DCN clock gating */
331 		if (dc->res_pool->dccg && dc->res_pool->dccg->funcs && dc->res_pool->dccg->funcs->allow_clock_gating)
332 			dc->res_pool->dccg->funcs->allow_clock_gating(dc->res_pool->dccg, true);
333 
334 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
335 	}
336 
337 	dcn401_setup_hpo_hw_control(hws, true);
338 
339 	if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
340 		dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
341 
342 	if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges)
343 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
344 
345 	if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
346 		dc->res_pool->hubbub->funcs->force_pstate_change_control(
347 				dc->res_pool->hubbub, false, false);
348 
349 	if (dc->res_pool->hubbub->funcs->init_crb)
350 		dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
351 
352 	if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0)
353 		dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc);
354 
355 	// Get DMCUB capabilities
356 	if (dc->ctx->dmub_srv) {
357 		dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
358 		dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
359 		dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver > 0;
360 		dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
361 
362 		/* sw and fw FAMS versions must match for support */
363 		dc->debug.fams2_config.bits.enable &=
364 			dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver;
365 		dchub_ref_freq_changed =
366 			res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq;
367 		if ((!dc->debug.fams2_config.bits.enable || dchub_ref_freq_changed) &&
368 		    dc->res_pool->funcs->update_bw_bounding_box &&
369 		    dc->clk_mgr && dc->clk_mgr->bw_params) {
370 			/* update bounding box if FAMS2 disabled, or if dchub clk has changed */
371 			dc->res_pool->funcs->update_bw_bounding_box(dc,
372 								    dc->clk_mgr->bw_params);
373 		}
374 	}
375 }
376 
dcn401_trigger_3dlut_dma_load(struct dc * dc,struct pipe_ctx * pipe_ctx)377 void dcn401_trigger_3dlut_dma_load(struct dc *dc, struct pipe_ctx *pipe_ctx)
378 {
379 	(void)dc;
380 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
381 
382 	if (hubp->funcs->hubp_enable_3dlut_fl) {
383 		hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
384 	}
385 }
386 
dcn401_set_mcm_luts(struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)387 bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
388 				const struct dc_plane_state *plane_state)
389 {
390 	struct dc *dc = pipe_ctx->plane_res.hubp->ctx->dc;
391 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
392 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
393 	const struct dc_plane_cm *cm = &plane_state->cm;
394 	int mpcc_id = hubp->inst;
395 	struct mpc *mpc = dc->res_pool->mpc;
396 	union mcm_lut_params m_lut_params;
397 	struct dc_3dlut_dma lut3d_dma;
398 	bool lut_enable;
399 	bool lut_bank_a;
400 	bool rval;
401 	bool result = true;
402 
403 	/* decide LUT bank based on current in use */
404 	mpc->funcs->get_lut_mode(mpc, MCM_LUT_1DLUT, mpcc_id, &lut_enable, &lut_bank_a);
405 	if (!lut_enable) {
406 		mpc->funcs->get_lut_mode(mpc, MCM_LUT_SHAPER, mpcc_id, &lut_enable, &lut_bank_a);
407 	}
408 	if (!lut_enable) {
409 		mpc->funcs->get_lut_mode(mpc, MCM_LUT_3DLUT, mpcc_id, &lut_enable, &lut_bank_a);
410 	}
411 
412 	/* switch to the next bank */
413 	if (lut_enable) {
414 		lut_bank_a = !lut_bank_a;
415 	}
416 
417 	/* MCM location fixed to pre-blend */
418 	mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
419 
420 	/* 1D LUT */
421 	lut_enable = cm->flags.bits.blend_enable;
422 	memset(&m_lut_params, 0, sizeof(m_lut_params));
423 	if (lut_enable) {
424 		if (cm->blend_func.type == TF_TYPE_HWPWL)
425 			m_lut_params.pwl = &cm->blend_func.pwl;
426 		else if (cm->blend_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
427 			rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx,
428 					&cm->blend_func,
429 					&dpp_base->regamma_params,
430 					false);
431 			m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
432 		}
433 
434 		if (!m_lut_params.pwl) {
435 			lut_enable = false;
436 		}
437 	} else {
438 		lut_enable = false;
439 	}
440 
441 	if (mpc->funcs->program_lut_mode)
442 		mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut_enable, lut_bank_a, CM_LUT_SIZE_NONE, mpcc_id);
443 	if (lut_enable && mpc->funcs->populate_lut)
444 		mpc->funcs->populate_lut(mpc, MCM_LUT_1DLUT, &m_lut_params, lut_bank_a, mpcc_id);
445 
446 	/* Shaper */
447 	lut_enable = cm->flags.bits.shaper_enable;
448 	if (lut_enable) {
449 		memset(&m_lut_params, 0, sizeof(m_lut_params));
450 		if (cm->shaper_func.type == TF_TYPE_HWPWL)
451 			m_lut_params.pwl = &cm->shaper_func.pwl;
452 		else if (cm->shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
453 			ASSERT(false);
454 			rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx,
455 					&cm->shaper_func,
456 					&dpp_base->shaper_params,
457 					true);
458 			m_lut_params.pwl = rval ? &dpp_base->shaper_params : NULL;
459 		}
460 		if (!m_lut_params.pwl) {
461 			lut_enable = false;
462 		}
463 	} else {
464 		lut_enable = false;
465 	}
466 
467 	if (mpc->funcs->program_lut_mode)
468 		mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, lut_enable, lut_bank_a, CM_LUT_SIZE_NONE, mpcc_id);
469 	if (lut_enable && mpc->funcs->populate_lut)
470 		mpc->funcs->populate_lut(mpc, MCM_LUT_SHAPER, &m_lut_params, lut_bank_a, mpcc_id);
471 
472 	/* NOTE: Toggling from DMA->Host is not supported atomically as hardware
473 	 * blocks writes until 3DLUT FL mode is cleared from HUBP on VUpdate.
474 	 * Expectation is either option is used consistently.
475 	 */
476 
477 	/* 3DLUT */
478 	lut_enable = cm->flags.bits.lut3d_enable;
479 	if (lut_enable && cm->flags.bits.lut3d_dma_enable) {
480 		/* Fast (DMA) Load Mode */
481 		/* MPC */
482 		if (mpc->funcs->program_lut_mode)
483 			mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut_enable, lut_bank_a, cm->lut3d_dma.size, mpcc_id);
484 
485 		/* only supports 12 bit */
486 		if (mpc->funcs->program_lut_read_write_control)
487 			mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, 12, mpcc_id);
488 
489 		if (mpc->funcs->update_3dlut_fast_load_select)
490 			mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
491 
492 		/* HUBP */
493 		if (hubp->funcs->hubp_program_3dlut_fl_config)
494 			hubp->funcs->hubp_program_3dlut_fl_config(hubp, &cm->lut3d_dma);
495 
496 		if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
497 			hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp, cm->lut3d_dma.format);
498 
499 		if (hubp->funcs->hubp_program_3dlut_fl_addr)
500 			hubp->funcs->hubp_program_3dlut_fl_addr(hubp, &cm->lut3d_dma.addr);
501 
502 		if (hubp->funcs->hubp_enable_3dlut_fl) {
503 			hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
504 		} else {
505 			/* GPU memory only supports fast load path */
506 			BREAK_TO_DEBUGGER();
507 			lut_enable = false;
508 			result = false;
509 		}
510 	} else {
511 		/* Legacy (Host) Load Mode */
512 		memset(&m_lut_params, 0, sizeof(m_lut_params));
513 
514 		if (cm->flags.bits.lut3d_enable && cm->lut3d_func.state.bits.initialized) {
515 			m_lut_params.lut3d = &cm->lut3d_func.lut_3d;
516 		} else {
517 			lut_enable = false;
518 		}
519 
520 		/* MPC */
521 		if (mpc->funcs->program_lut_mode)
522 			mpc->funcs->program_lut_mode(mpc,
523 					MCM_LUT_3DLUT,
524 					lut_enable,
525 					lut_bank_a,
526 					cm->lut3d_func.lut_3d.use_tetrahedral_9 ? CM_LUT_SIZE_999 : CM_LUT_SIZE_171717,
527 					mpcc_id);
528 
529 		if (lut_enable) {
530 			if (mpc->funcs->program_lut_read_write_control)
531 				mpc->funcs->program_lut_read_write_control(mpc,
532 						MCM_LUT_3DLUT,
533 						lut_bank_a,
534 						cm->lut3d_func.lut_3d.use_12bits ? 12 : 10,
535 						mpcc_id);
536 
537 			if (mpc->funcs->update_3dlut_fast_load_select)
538 				mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, 0xf);
539 
540 			if (mpc->funcs->populate_lut)
541 				mpc->funcs->populate_lut(mpc, MCM_LUT_3DLUT, &m_lut_params, lut_bank_a, mpcc_id);
542 		}
543 
544 		/* HUBP */
545 		memset(&lut3d_dma, 0, sizeof(lut3d_dma));
546 		if (hubp->funcs->hubp_program_3dlut_fl_config)
547 			hubp->funcs->hubp_program_3dlut_fl_config(hubp, &lut3d_dma);
548 
549 		if (hubp->funcs->hubp_enable_3dlut_fl)
550 			hubp->funcs->hubp_enable_3dlut_fl(hubp, false);
551 	}
552 
553 	return result;
554 }
555 
dcn401_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)556 bool dcn401_set_output_transfer_func(struct dc *dc,
557 				struct pipe_ctx *pipe_ctx,
558 				const struct dc_stream_state *stream)
559 {
560 	(void)dc;
561 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
562 	struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
563 	const struct pwl_params *params = NULL;
564 	bool ret = false;
565 
566 	/* program OGAM or 3DLUT only for the top pipe*/
567 	if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) {
568 		/*program shaper and 3dlut in MPC*/
569 		ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream);
570 		if (ret == false && mpc->funcs->set_output_gamma) {
571 			if (stream->out_transfer_func.type == TF_TYPE_HWPWL)
572 				params = &stream->out_transfer_func.pwl;
573 			else if (pipe_ctx->stream->out_transfer_func.type ==
574 					TF_TYPE_DISTRIBUTED_POINTS &&
575 					cm3_helper_translate_curve_to_hw_format(stream->ctx,
576 					&stream->out_transfer_func,
577 					&mpc->blender_params, false))
578 				params = &mpc->blender_params;
579 			/* there are no ROM LUTs in OUTGAM */
580 			if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED)
581 				BREAK_TO_DEBUGGER();
582 		}
583 	}
584 
585 	if (mpc->funcs->set_output_gamma)
586 		mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
587 
588 	return ret;
589 }
590 
dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx * pipe_ctx,unsigned int * tmds_div)591 void dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx *pipe_ctx,
592 				unsigned int *tmds_div)
593 {
594 	struct dc_stream_state *stream = pipe_ctx->stream;
595 
596 	if (dc_is_tmds_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
597 		if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
598 			*tmds_div = PIXEL_RATE_DIV_BY_2;
599 		else
600 			*tmds_div = PIXEL_RATE_DIV_BY_4;
601 	} else {
602 		*tmds_div = PIXEL_RATE_DIV_BY_1;
603 	}
604 
605 	if (*tmds_div == PIXEL_RATE_DIV_NA)
606 		ASSERT(false);
607 
608 }
609 
enable_stream_timing_calc(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc,unsigned int * tmds_div,int * opp_inst,int * opp_cnt,struct pipe_ctx * opp_heads[MAX_PIPES],bool * manual_mode,struct drr_params * params,unsigned int * event_triggers)610 static void enable_stream_timing_calc(
611 		struct pipe_ctx *pipe_ctx,
612 		struct dc_state *context,
613 		struct dc *dc,
614 		unsigned int *tmds_div,
615 		int *opp_inst,
616 		int *opp_cnt,
617 		struct pipe_ctx *opp_heads[MAX_PIPES],
618 		bool *manual_mode,
619 		struct drr_params *params,
620 		unsigned int *event_triggers)
621 {
622 	(void)dc;
623 	struct dc_stream_state *stream = pipe_ctx->stream;
624 	int i;
625 
626 	if (dc_is_tmds_signal(stream->signal) || dc_is_virtual_signal(stream->signal))
627 		dcn401_calculate_dccg_tmds_div_value(pipe_ctx, tmds_div);
628 
629 	*opp_cnt = resource_get_opp_heads_for_otg_master(pipe_ctx, &context->res_ctx, opp_heads);
630 	for (i = 0; i < *opp_cnt; i++)
631 		opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
632 
633 	if (dc_is_tmds_signal(stream->signal)) {
634 		stream->link->phy_state.symclk_ref_cnts.otg = 1;
635 		if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
636 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
637 		else
638 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
639 	}
640 
641 	params->vertical_total_min = stream->adjust.v_total_min;
642 	params->vertical_total_max = stream->adjust.v_total_max;
643 	params->vertical_total_mid = stream->adjust.v_total_mid;
644 	params->vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num;
645 
646 	// DRR should set trigger event to monitor surface update event
647 	if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
648 		*event_triggers = 0x80;
649 }
650 
dcn401_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)651 enum dc_status dcn401_enable_stream_timing(
652 		struct pipe_ctx *pipe_ctx,
653 		struct dc_state *context,
654 		struct dc *dc)
655 {
656 	struct dce_hwseq *hws = dc->hwseq;
657 	struct dc_stream_state *stream = pipe_ctx->stream;
658 	struct drr_params params = {0};
659 	unsigned int event_triggers = 0;
660 	int opp_cnt = 1;
661 	int opp_inst[MAX_PIPES] = {0};
662 	struct pipe_ctx *opp_heads[MAX_PIPES] = {0};
663 	struct dc_crtc_timing patched_crtc_timing = stream->timing;
664 	bool manual_mode = false;
665 	unsigned int tmds_div = PIXEL_RATE_DIV_NA;
666 	unsigned int unused_div = PIXEL_RATE_DIV_NA;
667 	int odm_slice_width;
668 	int last_odm_slice_width;
669 	int i;
670 
671 	if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER))
672 		return DC_OK;
673 
674 	enable_stream_timing_calc(pipe_ctx, context, dc, &tmds_div, opp_inst,
675 			&opp_cnt, opp_heads, &manual_mode, &params, &event_triggers);
676 
677 	if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
678 		dc->res_pool->dccg->funcs->set_pixel_rate_div(
679 			dc->res_pool->dccg, pipe_ctx->stream_res.tg->inst,
680 			tmds_div, unused_div);
681 	}
682 
683 	/* TODO check if timing_changed, disable stream if timing changed */
684 
685 	if (opp_cnt > 1) {
686 		odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false);
687 		last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true);
688 		pipe_ctx->stream_res.tg->funcs->set_odm_combine(
689 				pipe_ctx->stream_res.tg,
690 				opp_inst, opp_cnt,
691 				odm_slice_width, last_odm_slice_width);
692 	}
693 
694 	/* set DTBCLK_P */
695 	if (dc->res_pool->dccg->funcs->set_dtbclk_p_src) {
696 		if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
697 			dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, DPREFCLK, pipe_ctx->stream_res.tg->inst);
698 		}
699 	}
700 
701 	/* HW program guide assume display already disable
702 	 * by unplug sequence. OTG assume stop.
703 	 */
704 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
705 
706 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
707 			pipe_ctx->clock_source,
708 			&pipe_ctx->stream_res.pix_clk_params,
709 			dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
710 			&pipe_ctx->pll_settings)) {
711 		BREAK_TO_DEBUGGER();
712 		return DC_ERROR_UNEXPECTED;
713 	}
714 
715 	if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
716 		dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
717 
718 	/* if we are padding, h_addressable needs to be adjusted */
719 	if (dc->debug.enable_hblank_borrow) {
720 		patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding;
721 		patched_crtc_timing.h_total = patched_crtc_timing.h_total + pipe_ctx->dsc_padding_params.dsc_htotal_padding;
722 		patched_crtc_timing.pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz;
723 	}
724 
725 	pipe_ctx->stream_res.tg->funcs->program_timing(
726 		pipe_ctx->stream_res.tg,
727 		&patched_crtc_timing,
728 		(unsigned int)pipe_ctx->global_sync.dcn4x.vready_offset_pixels,
729 		(unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
730 		(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
731 		(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
732 		(unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines,
733 		pipe_ctx->stream->signal,
734 		true);
735 
736 	for (i = 0; i < opp_cnt; i++) {
737 		opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control(
738 				opp_heads[i]->stream_res.opp,
739 				true);
740 		opp_heads[i]->stream_res.opp->funcs->opp_program_left_edge_extra_pixel(
741 				opp_heads[i]->stream_res.opp,
742 				stream->timing.pixel_encoding,
743 				resource_is_pipe_type(opp_heads[i], OTG_MASTER));
744 	}
745 
746 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
747 			pipe_ctx->stream_res.opp,
748 			true);
749 
750 	hws->funcs.blank_pixel_data(dc, pipe_ctx, true);
751 
752 	/* VTG is  within DCHUB command block. DCFCLK is always on */
753 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
754 		BREAK_TO_DEBUGGER();
755 		return DC_ERROR_UNEXPECTED;
756 	}
757 
758 	hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
759 	set_drr_and_clear_adjust_pending(pipe_ctx, stream, &params);
760 
761 	/* Event triggers and num frames initialized for DRR, but can be
762 	 * later updated for PSR use. Note DRR trigger events are generated
763 	 * regardless of whether num frames met.
764 	 */
765 	if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
766 		pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
767 				pipe_ctx->stream_res.tg, event_triggers, 2);
768 
769 	/* TODO program crtc source select for non-virtual signal*/
770 	/* TODO program FMT */
771 	/* TODO setup link_enc */
772 	/* TODO set stream attributes */
773 	/* TODO program audio */
774 	/* TODO enable stream if timing changed */
775 	/* TODO unblank stream if DP */
776 
777 	if (dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
778 		if (pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable)
779 			pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg);
780 	}
781 
782 	return DC_OK;
783 }
784 
get_phyd32clk_src(struct dc_link * link)785 static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
786 {
787 	switch (link->link_enc->transmitter) {
788 	case TRANSMITTER_UNIPHY_A:
789 		return PHYD32CLKA;
790 	case TRANSMITTER_UNIPHY_B:
791 		return PHYD32CLKB;
792 	case TRANSMITTER_UNIPHY_C:
793 		return PHYD32CLKC;
794 	case TRANSMITTER_UNIPHY_D:
795 		return PHYD32CLKD;
796 	case TRANSMITTER_UNIPHY_E:
797 		return PHYD32CLKE;
798 	default:
799 		return PHYD32CLKA;
800 	}
801 }
802 
dcn401_enable_stream_calc(struct pipe_ctx * pipe_ctx,int * dp_hpo_inst,enum phyd32clk_clock_source * phyd32clk,unsigned int * tmds_div,uint32_t * early_control)803 static void dcn401_enable_stream_calc(
804 		struct pipe_ctx *pipe_ctx,
805 		int *dp_hpo_inst,
806 		enum phyd32clk_clock_source *phyd32clk,
807 		unsigned int *tmds_div,
808 		uint32_t *early_control)
809 {
810 
811 	struct dc *dc = pipe_ctx->stream->ctx->dc;
812 	struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
813 	enum dc_lane_count lane_count =
814 			pipe_ctx->stream->link->cur_link_settings.lane_count;
815 	uint32_t active_total_with_borders;
816 
817 	if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
818 		*dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
819 		*phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link);
820 	}
821 
822 	if (dc_is_tmds_signal(pipe_ctx->stream->signal))
823 		dcn401_calculate_dccg_tmds_div_value(pipe_ctx, tmds_div);
824 	else
825 		*tmds_div = PIXEL_RATE_DIV_BY_1;
826 
827 	/* enable early control to avoid corruption on DP monitor*/
828 	active_total_with_borders =
829 			timing->h_addressable
830 				+ timing->h_border_left
831 				+ timing->h_border_right;
832 
833 	if (lane_count != 0)
834 		*early_control = active_total_with_borders % lane_count;
835 
836 	if (*early_control == 0)
837 		*early_control = lane_count;
838 
839 }
840 
dcn401_enable_stream(struct pipe_ctx * pipe_ctx)841 void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
842 {
843 	uint32_t early_control = 0;
844 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
845 	struct dc_link *link = pipe_ctx->stream->link;
846 	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
847 	struct dc *dc = pipe_ctx->stream->ctx->dc;
848 	struct dccg *dccg = dc->res_pool->dccg;
849 	enum phyd32clk_clock_source phyd32clk = PHYD32CLKA;
850 	int dp_hpo_inst = 0;
851 	unsigned int tmds_div = PIXEL_RATE_DIV_NA;
852 	unsigned int unused_div = PIXEL_RATE_DIV_NA;
853 	struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
854 	struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
855 
856 	if (!dc->config.unify_link_enc_assignment)
857 		link_enc = link_enc_cfg_get_link_enc(link);
858 
859 	dcn401_enable_stream_calc(pipe_ctx, &dp_hpo_inst, &phyd32clk,
860 				&tmds_div, &early_control);
861 
862 	if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) {
863 		if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
864 			dccg->funcs->set_dpstreamclk(dccg, DPREFCLK, tg->inst, dp_hpo_inst);
865 			if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) {
866 				dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
867 			} else {
868 				dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
869 			}
870 		} else {
871 			dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,
872 					link_enc->transmitter - TRANSMITTER_UNIPHY_A);
873 		}
874 	}
875 
876 	link_hwss->setup_stream_attribute(pipe_ctx);
877 
878 	if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
879 		dc->res_pool->dccg->funcs->set_pixel_rate_div(
880 			dc->res_pool->dccg,
881 			pipe_ctx->stream_res.tg->inst,
882 			tmds_div,
883 			unused_div);
884 	}
885 
886 	link_hwss->setup_stream_encoder(pipe_ctx);
887 
888 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
889 		if (dc->hwss.program_dmdata_engine)
890 			dc->hwss.program_dmdata_engine(pipe_ctx);
891 	}
892 
893 	dc->hwss.update_info_frame(pipe_ctx);
894 
895 	if (dc_is_dp_signal(pipe_ctx->stream->signal))
896 		dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
897 
898 	tg->funcs->set_early_control(tg, early_control);
899 }
900 
dcn401_setup_hpo_hw_control(const struct dce_hwseq * hws,bool enable)901 void dcn401_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
902 {
903 	REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, enable);
904 }
905 
adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width,struct dc_cursor_position * pos_cpy)906 void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy)
907 {
908 	if (cursor_width <= 128) {
909 		pos_cpy->x_hotspot /= 2;
910 		pos_cpy->x_hotspot += 1;
911 	} else {
912 		pos_cpy->x_hotspot /= 2;
913 		pos_cpy->x_hotspot += 2;
914 	}
915 }
916 
disable_link_output_symclk_on_tx_off(struct dc_link * link,enum dp_link_encoding link_encoding)917 static void disable_link_output_symclk_on_tx_off(struct dc_link *link, enum dp_link_encoding link_encoding)
918 {
919 	struct dc *dc = link->ctx->dc;
920 	struct pipe_ctx *pipe_ctx = NULL;
921 	uint8_t i;
922 
923 	for (i = 0; i < MAX_PIPES; i++) {
924 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
925 		if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) {
926 			pipe_ctx->clock_source->funcs->program_pix_clk(
927 					pipe_ctx->clock_source,
928 					&pipe_ctx->stream_res.pix_clk_params,
929 					link_encoding,
930 					&pipe_ctx->pll_settings);
931 			break;
932 		}
933 	}
934 }
935 
dcn401_disable_link_output(struct dc_link * link,const struct link_resource * link_res,enum signal_type signal)936 void dcn401_disable_link_output(struct dc_link *link,
937 		const struct link_resource *link_res,
938 		enum signal_type signal)
939 {
940 	struct dc *dc = link->ctx->dc;
941 	const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
942 	struct dmcu *dmcu = dc->res_pool->dmcu;
943 
944 	if (signal == SIGNAL_TYPE_EDP &&
945 			link->dc->hwss.edp_backlight_control &&
946 			!link->skip_implict_edp_power_control)
947 		link->dc->hwss.edp_backlight_control(link, false);
948 	else if (dmcu != NULL && dmcu->funcs->lock_phy)
949 		dmcu->funcs->lock_phy(dmcu);
950 
951 	if (dc_is_tmds_signal(signal) && link->phy_state.symclk_ref_cnts.otg > 0) {
952 		disable_link_output_symclk_on_tx_off(link, DP_UNKNOWN_ENCODING);
953 		link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
954 	} else {
955 		link_hwss->disable_link_output(link, link_res, signal);
956 		link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
957 	}
958 
959 	if (signal == SIGNAL_TYPE_EDP &&
960 			link->dc->hwss.edp_backlight_control &&
961 			!link->skip_implict_edp_power_control)
962 		link->dc->hwss.edp_power_control(link, false);
963 	else if (dmcu != NULL && dmcu->funcs->lock_phy)
964 		dmcu->funcs->unlock_phy(dmcu);
965 
966 	dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
967 }
968 
dcn401_set_cursor_position(struct pipe_ctx * pipe_ctx)969 void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
970 {
971 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
972 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
973 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
974 	struct dc_cursor_mi_param param = {
975 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
976 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
977 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
978 		.recout = pipe_ctx->plane_res.scl_data.recout,
979 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
980 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
981 		.rotation = pipe_ctx->plane_state->rotation,
982 		.mirror = pipe_ctx->plane_state->horizontal_mirror,
983 		.stream = pipe_ctx->stream
984 	};
985 	struct rect odm_slice_src = { 0 };
986 	bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
987 		(pipe_ctx->prev_odm_pipe != NULL);
988 	int prev_odm_width = 0;
989 	struct pipe_ctx *prev_odm_pipe = NULL;
990 	bool mpc_combine_on = false;
991 	int  bottom_pipe_x_pos = 0;
992 
993 	int x_pos = pos_cpy.x;
994 	int y_pos = pos_cpy.y;
995 	int recout_x_pos = 0;
996 	int recout_y_pos = 0;
997 
998 	if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
999 		if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
1000 			(pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
1001 			mpc_combine_on = true;
1002 		}
1003 	}
1004 
1005 	/* DCN4 moved cursor composition after Scaler, so in HW it is in
1006 	 * recout space and for HW Cursor position programming need to
1007 	 * translate to recout space.
1008 	 *
1009 	 * Cursor X and Y position programmed into HW can't be negative,
1010 	 * in fact it is X, Y coordinate shifted for the HW Cursor Hot spot
1011 	 * position that goes into HW X and Y coordinates while HW Hot spot
1012 	 * X and Y coordinates are length relative to the cursor top left
1013 	 * corner, hotspot must be smaller than the cursor size.
1014 	 *
1015 	 * DMs/DC interface for Cursor position is in stream->src space, and
1016 	 * DMs supposed to transform Cursor coordinates to stream->src space,
1017 	 * then here we need to translate Cursor coordinates to stream->dst
1018 	 * space, as now in HW, Cursor coordinates are in per pipe recout
1019 	 * space, and for the given pipe valid coordinates are only in range
1020 	 * from 0,0 - recout width, recout height space.
1021 	 * If certain pipe combining is in place, need to further adjust per
1022 	 * pipe to make sure each pipe enabling cursor on its part of the
1023 	 * screen.
1024 	 */
1025 	x_pos = pipe_ctx->stream->dst.x + x_pos * pipe_ctx->stream->dst.width /
1026 		pipe_ctx->stream->src.width;
1027 	y_pos = pipe_ctx->stream->dst.y + y_pos * pipe_ctx->stream->dst.height /
1028 		pipe_ctx->stream->src.height;
1029 
1030 	/* If the cursor's source viewport is clipped then we need to
1031 	 * translate the cursor to appear in the correct position on
1032 	 * the screen.
1033 	 *
1034 	 * This translation isn't affected by scaling so it needs to be
1035 	 * done *after* we adjust the position for the scale factor.
1036 	 *
1037 	 * This is only done by opt-in for now since there are still
1038 	 * some usecases like tiled display that might enable the
1039 	 * cursor on both streams while expecting dc to clip it.
1040 	 */
1041 	if (pos_cpy.translate_by_source) {
1042 		x_pos += pipe_ctx->plane_state->src_rect.x;
1043 		y_pos += pipe_ctx->plane_state->src_rect.y;
1044 	}
1045 
1046 	/* Adjust for ODM Combine
1047 	 * next/prev_odm_offset is to account for scaled modes that have underscan
1048 	 */
1049 	if (odm_combine_on) {
1050 		prev_odm_pipe = pipe_ctx->prev_odm_pipe;
1051 
1052 		while (prev_odm_pipe != NULL) {
1053 			odm_slice_src = resource_get_odm_slice_src_rect(prev_odm_pipe);
1054 			prev_odm_width += odm_slice_src.width;
1055 			prev_odm_pipe = prev_odm_pipe->prev_odm_pipe;
1056 		}
1057 
1058 		x_pos -= (prev_odm_width);
1059 	}
1060 
1061 	/* If the position is negative then we need to add to the hotspot
1062 	 * to fix cursor size between ODM slices
1063 	 */
1064 
1065 	if (x_pos < 0) {
1066 		pos_cpy.x_hotspot -= x_pos;
1067 		if (hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION)
1068 			adjust_hotspot_between_slices_for_2x_magnify(hubp->curs_attr.width, &pos_cpy);
1069 		x_pos = 0;
1070 	}
1071 
1072 	if (y_pos < 0) {
1073 		pos_cpy.y_hotspot -= y_pos;
1074 		y_pos = 0;
1075 	}
1076 
1077 	/* If the position on bottom MPC pipe is negative then we need to add to the hotspot and
1078 	 * adjust x_pos on bottom pipe to make cursor visible when crossing between MPC slices.
1079 	 */
1080 	if (mpc_combine_on &&
1081 		pipe_ctx->top_pipe &&
1082 		(pipe_ctx == pipe_ctx->top_pipe->bottom_pipe)) {
1083 
1084 		bottom_pipe_x_pos = x_pos - pipe_ctx->plane_res.scl_data.recout.x;
1085 		if (bottom_pipe_x_pos < 0) {
1086 			x_pos = pipe_ctx->plane_res.scl_data.recout.x;
1087 			pos_cpy.x_hotspot -= bottom_pipe_x_pos;
1088 			if (hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION)
1089 				adjust_hotspot_between_slices_for_2x_magnify(hubp->curs_attr.width, &pos_cpy);
1090 		}
1091 	}
1092 
1093 	pos_cpy.x = (uint32_t)x_pos;
1094 	pos_cpy.y = (uint32_t)y_pos;
1095 
1096 	if (pos_cpy.enable && resource_can_pipe_disable_cursor(pipe_ctx))
1097 		pos_cpy.enable = false;
1098 
1099 	x_pos = pos_cpy.x - param.recout.x;
1100 	y_pos = pos_cpy.y - param.recout.y;
1101 
1102 	/**
1103 	 * If the cursor position is negative after recout adjustment, we need
1104 	 * to shift the hotspot to compensate and clamp position to 0. This
1105 	 * handles the case where cursor straddles the left/top edge of an
1106 	 * overlay plane - the cursor is partially visible and needs correct
1107 	 * hotspot adjustment to render the visible portion.
1108 	 */
1109 	if (x_pos < 0) {
1110 		pos_cpy.x_hotspot -= x_pos;
1111 		if (hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION)
1112 			adjust_hotspot_between_slices_for_2x_magnify(hubp->curs_attr.width, &pos_cpy);
1113 		x_pos = 0;
1114 	}
1115 
1116 	if (y_pos < 0) {
1117 		pos_cpy.y_hotspot -= y_pos;
1118 		y_pos = 0;
1119 	}
1120 
1121 	recout_x_pos = x_pos - pos_cpy.x_hotspot;
1122 	recout_y_pos = y_pos - pos_cpy.y_hotspot;
1123 
1124 	if (recout_x_pos >= (int)param.recout.width)
1125 		pos_cpy.enable = false;  /* not visible beyond right edge*/
1126 
1127 	if (recout_y_pos >= (int)param.recout.height)
1128 		pos_cpy.enable = false;  /* not visible beyond bottom edge*/
1129 
1130 	if (recout_x_pos + (int)hubp->curs_attr.width <= 0)
1131 		pos_cpy.enable = false;  /* not visible beyond left edge*/
1132 
1133 	if (recout_y_pos + (int)hubp->curs_attr.height <= 0)
1134 		pos_cpy.enable = false;  /* not visible beyond top edge*/
1135 
1136 	pos_cpy.x = x_pos;
1137 	pos_cpy.y = y_pos;
1138 
1139 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
1140 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
1141 }
1142 
dcn401_check_no_memory_request_for_cab(struct dc * dc)1143 static bool dcn401_check_no_memory_request_for_cab(struct dc *dc)
1144 {
1145 	int i;
1146 
1147 	/* First, check no-memory-request case */
1148 	for (i = 0; i < dc->current_state->stream_count; i++) {
1149 		if ((dc->current_state->stream_status[i].plane_count) &&
1150 			(dc->current_state->streams[i]->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED))
1151 			/* Fail eligibility on a visible stream */
1152 			return false;
1153 	}
1154 
1155 	return true;
1156 }
1157 
dcn401_calculate_cab_allocation(struct dc * dc,struct dc_state * ctx)1158 static uint32_t dcn401_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
1159 {
1160 	int i;
1161 	uint8_t num_ways = 0;
1162 	uint32_t mall_ss_size_bytes = 0;
1163 
1164 	mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes;
1165 	// TODO add additional logic for PSR active stream exclusion optimization
1166 	// mall_ss_psr_active_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes;
1167 
1168 	// Include cursor size for CAB allocation
1169 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1170 		struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[i];
1171 
1172 		if (!pipe->stream || !pipe->plane_state)
1173 			continue;
1174 
1175 		mall_ss_size_bytes += dcn32_helper_calculate_mall_bytes_for_cursor(dc, pipe, false);
1176 	}
1177 
1178 	// Convert number of cache lines required to number of ways
1179 	if (dc->debug.force_mall_ss_num_ways > 0)
1180 		num_ways = dc->debug.force_mall_ss_num_ways;
1181 	else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes)
1182 		num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, mall_ss_size_bytes);
1183 	else
1184 		num_ways = 0;
1185 
1186 	return num_ways;
1187 }
1188 
dcn401_apply_idle_power_optimizations(struct dc * dc,bool enable)1189 bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable)
1190 {
1191 	union dmub_rb_cmd cmd;
1192 	uint8_t ways, i;
1193 	int j;
1194 	bool mall_ss_unsupported = false;
1195 	struct dc_plane_state *plane = NULL;
1196 
1197 	if (!dc->ctx->dmub_srv || !dc->current_state)
1198 		return false;
1199 
1200 	for (i = 0; i < dc->current_state->stream_count; i++) {
1201 		/* MALL SS messaging is not supported with PSR at this time */
1202 		if (dc->current_state->streams[i] != NULL &&
1203 				dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) {
1204 			DC_LOG_MALL("MALL SS not supported with PSR at this time\n");
1205 			return false;
1206 		}
1207 	}
1208 
1209 	memset(&cmd, 0, sizeof(cmd));
1210 	cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
1211 	cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
1212 
1213 	if (enable) {
1214 		if (dcn401_check_no_memory_request_for_cab(dc)) {
1215 			/* 1. Check no memory request case for CAB.
1216 			 * If no memory request case, send CAB_ACTION NO_DCN_REQ DMUB message
1217 			 */
1218 			DC_LOG_MALL("sending CAB action NO_DCN_REQ\n");
1219 			cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ;
1220 		} else {
1221 			/* 2. Check if all surfaces can fit in CAB.
1222 			 * If surfaces can fit into CAB, send CAB_ACTION_ALLOW DMUB message
1223 			 * and configure HUBP's to fetch from MALL
1224 			 */
1225 			ways = dcn401_calculate_cab_allocation(dc, dc->current_state);
1226 
1227 			/* MALL not supported with Stereo3D or TMZ surface. If any plane is using stereo,
1228 			 * or TMZ surface, don't try to enter MALL.
1229 			 */
1230 			for (i = 0; i < dc->current_state->stream_count; i++) {
1231 				for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
1232 					plane = dc->current_state->stream_status[i].plane_states[j];
1233 
1234 					if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO ||
1235 							plane->address.tmz_surface) {
1236 						mall_ss_unsupported = true;
1237 						break;
1238 					}
1239 				}
1240 				if (mall_ss_unsupported)
1241 					break;
1242 			}
1243 			if (ways <= dc->caps.cache_num_ways && !mall_ss_unsupported) {
1244 				cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
1245 				cmd.cab.cab_alloc_ways = ways;
1246 				DC_LOG_MALL("cab allocation: %d ways. CAB action: DCN_SS_FIT_IN_CAB\n", ways);
1247 			} else {
1248 				cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_NOT_FIT_IN_CAB;
1249 				DC_LOG_MALL("frame does not fit in CAB: %d ways required. CAB action: DCN_SS_NOT_FIT_IN_CAB\n", ways);
1250 			}
1251 		}
1252 	} else {
1253 		/* Disable CAB */
1254 		cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_IDLE_OPTIMIZATION;
1255 		DC_LOG_MALL("idle optimization disabled\n");
1256 	}
1257 
1258 	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
1259 
1260 	return true;
1261 }
1262 
dcn401_wait_for_dcc_meta_propagation(const struct dc * dc,const struct pipe_ctx * top_pipe)1263 void dcn401_wait_for_dcc_meta_propagation(const struct dc *dc,
1264 		const struct pipe_ctx *top_pipe)
1265 {
1266 	bool is_wait_needed = false;
1267 	const struct pipe_ctx *pipe_ctx = top_pipe;
1268 
1269 	/* check if any surfaces are updating address while using flip immediate and dcc */
1270 	while (pipe_ctx != NULL) {
1271 		if (pipe_ctx->plane_state &&
1272 				pipe_ctx->plane_state->dcc.enable &&
1273 				pipe_ctx->plane_state->flip_immediate &&
1274 				pipe_ctx->plane_state->update_flags.bits.addr_update) {
1275 			is_wait_needed = true;
1276 			break;
1277 		}
1278 
1279 		/* check next pipe */
1280 		pipe_ctx = pipe_ctx->bottom_pipe;
1281 	}
1282 
1283 	if (is_wait_needed && dc->debug.dcc_meta_propagation_delay_us > 0) {
1284 		udelay(dc->debug.dcc_meta_propagation_delay_us);
1285 	}
1286 }
1287 
dcn401_prepare_bandwidth(struct dc * dc,struct dc_state * context)1288 void dcn401_prepare_bandwidth(struct dc *dc,
1289 	struct dc_state *context)
1290 {
1291 	struct hubbub *hubbub = dc->res_pool->hubbub;
1292 	bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
1293 	unsigned int compbuf_size = 0;
1294 
1295 	/* Any transition into P-State support should disable MCLK switching first to avoid hangs */
1296 	if (p_state_change_support) {
1297 		dc->optimized_required = true;
1298 		context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
1299 	}
1300 
1301 	if (dc->clk_mgr->dc_mode_softmax_enabled)
1302 		if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
1303 				context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
1304 			dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
1305 
1306 	/* Increase clocks */
1307 	dc->clk_mgr->funcs->update_clocks(
1308 			dc->clk_mgr,
1309 			context,
1310 			false);
1311 
1312 	/* program dchubbub watermarks:
1313 	 * For assigning optimized_required, use |= operator since we don't want
1314 	 * to clear the value if the optimize has not happened yet
1315 	 */
1316 	dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
1317 					&context->bw_ctx.bw.dcn.watermarks,
1318 					dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
1319 					false);
1320 	/* update timeout thresholds */
1321 	if (hubbub->funcs->program_arbiter) {
1322 		dc->optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false);
1323 	}
1324 
1325 	/* decrease compbuf size */
1326 	if (hubbub->funcs->program_compbuf_segments) {
1327 		compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
1328 		dc->optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
1329 
1330 		hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size, false);
1331 	}
1332 
1333 	if (dc->debug.fams2_config.bits.enable) {
1334 		dcn401_dmub_hw_control_lock(dc, context, true);
1335 		dcn401_fams2_update_config(dc, context, false);
1336 		dcn401_dmub_hw_control_lock(dc, context, false);
1337 	}
1338 
1339 	if (p_state_change_support != context->bw_ctx.bw.dcn.clk.p_state_change_support) {
1340 		/* After disabling P-State, restore the original value to ensure we get the correct P-State
1341 		 * on the next optimize. */
1342 		context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
1343 	}
1344 }
1345 
dcn401_optimize_bandwidth(struct dc * dc,struct dc_state * context)1346 void dcn401_optimize_bandwidth(
1347 		struct dc *dc,
1348 		struct dc_state *context)
1349 {
1350 	int i;
1351 	struct hubbub *hubbub = dc->res_pool->hubbub;
1352 
1353 	/* enable fams2 if needed */
1354 	if (dc->debug.fams2_config.bits.enable) {
1355 		dcn401_dmub_hw_control_lock(dc, context, true);
1356 		dcn401_fams2_update_config(dc, context, true);
1357 		dcn401_dmub_hw_control_lock(dc, context, false);
1358 	}
1359 
1360 	/* program dchubbub watermarks */
1361 	hubbub->funcs->program_watermarks(hubbub,
1362 					&context->bw_ctx.bw.dcn.watermarks,
1363 					dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
1364 					true);
1365 	/* update timeout thresholds */
1366 	if (hubbub->funcs->program_arbiter) {
1367 		hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, true);
1368 	}
1369 
1370 	if (dc->clk_mgr->dc_mode_softmax_enabled)
1371 		if (dc->clk_mgr->clks.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
1372 				context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
1373 			dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
1374 
1375 	/* increase compbuf size */
1376 	if (hubbub->funcs->program_compbuf_segments)
1377 		hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true);
1378 
1379 	dc->clk_mgr->funcs->update_clocks(
1380 			dc->clk_mgr,
1381 			context,
1382 			true);
1383 	if (context->bw_ctx.bw.dcn.clk.zstate_support != DCN_ZSTATE_SUPPORT_DISALLOW) {
1384 		for (i = 0; i < dc->res_pool->pipe_count; ++i) {
1385 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1386 
1387 			if (pipe_ctx->stream && pipe_ctx->plane_res.hubp->funcs->program_extended_blank
1388 				&& pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
1389 				&& pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
1390 					pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
1391 						pipe_ctx->hubp_regs.dlg_regs.min_dst_y_next_start);
1392 		}
1393 	}
1394 }
1395 
dcn401_dmub_hw_control_lock(struct dc * dc,struct dc_state * context,bool lock)1396 void dcn401_dmub_hw_control_lock(struct dc *dc,
1397 		struct dc_state *context,
1398 		bool lock)
1399 {
1400 	(void)context;
1401 	/* use always for now */
1402 	union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
1403 
1404 	if (!dc->ctx || !dc->ctx->dmub_srv)
1405 		return;
1406 
1407 	if (!dc->debug.fams2_config.bits.enable && !dc_dmub_srv_is_cursor_offload_enabled(dc))
1408 		return;
1409 
1410 	hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
1411 	hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER;
1412 	hw_lock_cmd.bits.lock = lock;
1413 	hw_lock_cmd.bits.should_release = !lock;
1414 	dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
1415 }
1416 
dcn401_dmub_hw_control_lock_fast(union block_sequence_params * params)1417 void dcn401_dmub_hw_control_lock_fast(union block_sequence_params *params)
1418 {
1419 	struct dc *dc = params->dmub_hw_control_lock_fast_params.dc;
1420 	bool lock = params->dmub_hw_control_lock_fast_params.lock;
1421 
1422 	if (params->dmub_hw_control_lock_fast_params.is_required) {
1423 		union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
1424 
1425 		hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
1426 		hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER;
1427 		hw_lock_cmd.bits.lock = lock;
1428 		hw_lock_cmd.bits.should_release = !lock;
1429 		dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
1430 	}
1431 }
1432 
dcn401_fams2_update_config(struct dc * dc,struct dc_state * context,bool enable)1433 void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable)
1434 {
1435 	bool fams2_info_required;
1436 
1437 	if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable)
1438 		return;
1439 
1440 	fams2_info_required = context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable;
1441 	fams2_info_required |= context->bw_ctx.bw.dcn.fams2_global_config.features.bits.legacy_method_no_fams2;
1442 
1443 	dc_dmub_srv_fams2_update_config(dc, context, enable && fams2_info_required);
1444 }
1445 
update_dsc_for_odm_change(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1446 static void update_dsc_for_odm_change(struct dc *dc, struct dc_state *context,
1447 		struct pipe_ctx *otg_master)
1448 {
1449 	int i;
1450 	struct pipe_ctx *old_pipe;
1451 	struct pipe_ctx *new_pipe;
1452 	struct pipe_ctx *old_opp_heads[MAX_PIPES];
1453 	struct pipe_ctx *old_otg_master;
1454 	int old_opp_head_count = 0;
1455 
1456 	old_otg_master = &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx];
1457 
1458 	if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) {
1459 		old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master,
1460 									   &dc->current_state->res_ctx,
1461 									   old_opp_heads);
1462 	} else {
1463 		// DC cannot assume that the current state and the new state
1464 		// share the same OTG pipe since this is not true when called
1465 		// in the context of a commit stream not checked. Hence, set
1466 		// old_otg_master to NULL to skip the DSC configuration.
1467 		old_otg_master = NULL;
1468 	}
1469 
1470 
1471 	if (otg_master->stream_res.dsc)
1472 		dcn32_update_dsc_on_stream(otg_master,
1473 				otg_master->stream->timing.flags.DSC);
1474 	if (old_otg_master && old_otg_master->stream_res.dsc) {
1475 		for (i = 0; i < old_opp_head_count; i++) {
1476 			old_pipe = old_opp_heads[i];
1477 			new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx];
1478 			if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc)
1479 				old_pipe->stream_res.dsc->funcs->dsc_disconnect(
1480 						old_pipe->stream_res.dsc);
1481 		}
1482 	}
1483 }
1484 
dcn401_update_odm(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1485 void dcn401_update_odm(struct dc *dc, struct dc_state *context,
1486 		struct pipe_ctx *otg_master)
1487 {
1488 	struct pipe_ctx *opp_heads[MAX_PIPES];
1489 	int opp_inst[MAX_PIPES] = {0};
1490 	int opp_head_count;
1491 	int odm_slice_width = resource_get_odm_slice_dst_width(otg_master, false);
1492 	int last_odm_slice_width = resource_get_odm_slice_dst_width(otg_master, true);
1493 	int i;
1494 
1495 	opp_head_count = resource_get_opp_heads_for_otg_master(
1496 			otg_master, &context->res_ctx, opp_heads);
1497 
1498 	for (i = 0; i < opp_head_count; i++)
1499 		opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
1500 	if (opp_head_count > 1)
1501 		otg_master->stream_res.tg->funcs->set_odm_combine(
1502 				otg_master->stream_res.tg,
1503 				opp_inst, opp_head_count,
1504 				odm_slice_width, last_odm_slice_width);
1505 	else
1506 		otg_master->stream_res.tg->funcs->set_odm_bypass(
1507 				otg_master->stream_res.tg,
1508 				&otg_master->stream->timing);
1509 
1510 	for (i = 0; i < opp_head_count; i++) {
1511 		opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control(
1512 				opp_heads[i]->stream_res.opp,
1513 				true);
1514 		opp_heads[i]->stream_res.opp->funcs->opp_program_left_edge_extra_pixel(
1515 				opp_heads[i]->stream_res.opp,
1516 				opp_heads[i]->stream->timing.pixel_encoding,
1517 				resource_is_pipe_type(opp_heads[i], OTG_MASTER));
1518 	}
1519 
1520 	update_dsc_for_odm_change(dc, context, otg_master);
1521 
1522 	if (!resource_is_pipe_type(otg_master, DPP_PIPE))
1523 		/*
1524 		 * blank pattern is generated by OPP, reprogram blank pattern
1525 		 * due to OPP count change
1526 		 */
1527 		dc->hwseq->funcs.blank_pixel_data(dc, otg_master, true);
1528 }
1529 
dcn401_add_dsc_sequence_for_odm_change(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master,struct block_sequence_state * seq_state)1530 static void dcn401_add_dsc_sequence_for_odm_change(struct dc *dc, struct dc_state *context,
1531 		struct pipe_ctx *otg_master, struct block_sequence_state *seq_state)
1532 {
1533 	struct pipe_ctx *old_pipe;
1534 	struct pipe_ctx *new_pipe;
1535 	struct pipe_ctx *old_opp_heads[MAX_PIPES];
1536 	struct pipe_ctx *old_otg_master;
1537 	int old_opp_head_count = 0;
1538 	int i;
1539 
1540 	old_otg_master = &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx];
1541 
1542 	if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) {
1543 		old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master,
1544 			&dc->current_state->res_ctx,
1545 			old_opp_heads);
1546 	} else {
1547 		old_otg_master = NULL;
1548 	}
1549 
1550 	/* Process new DSC configuration if DSC is enabled */
1551 	if (otg_master->stream_res.dsc && otg_master->stream->timing.flags.DSC) {
1552 		struct dc_stream_state *stream = otg_master->stream;
1553 		struct pipe_ctx *odm_pipe;
1554 		int opp_cnt = 1;
1555 		int last_dsc_calc = 0;
1556 		bool should_use_dto_dscclk = (dc->res_pool->dccg->funcs->set_dto_dscclk != NULL) &&
1557 				stream->timing.pix_clk_100hz > 480000;
1558 
1559 		/* Count ODM pipes */
1560 		for (odm_pipe = otg_master->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
1561 			opp_cnt++;
1562 
1563 		int num_slices_h = stream->timing.dsc_cfg.num_slices_h / opp_cnt;
1564 
1565 		/* Step 1: Set DTO DSCCLK for main DSC if needed */
1566 		if (should_use_dto_dscclk) {
1567 			hwss_add_dccg_set_dto_dscclk(seq_state, dc->res_pool->dccg,
1568 					otg_master->stream_res.dsc->inst, num_slices_h);
1569 		}
1570 
1571 		/* Step 2: Calculate and set DSC config for main DSC */
1572 		last_dsc_calc = *seq_state->num_steps;
1573 		hwss_add_dsc_calculate_and_set_config(seq_state, otg_master, true, opp_cnt);
1574 
1575 		/* Step 3: Enable main DSC block */
1576 		hwss_add_dsc_enable_with_opp(seq_state, otg_master);
1577 
1578 		/* Step 4: Configure and enable ODM DSC blocks */
1579 		for (odm_pipe = otg_master->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
1580 			if (!odm_pipe->stream_res.dsc)
1581 				continue;
1582 
1583 			/* Set DTO DSCCLK for ODM DSC if needed */
1584 			if (should_use_dto_dscclk) {
1585 				hwss_add_dccg_set_dto_dscclk(seq_state, dc->res_pool->dccg,
1586 						odm_pipe->stream_res.dsc->inst, num_slices_h);
1587 			}
1588 
1589 			/* Calculate and set DSC config for ODM DSC */
1590 			last_dsc_calc = *seq_state->num_steps;
1591 			hwss_add_dsc_calculate_and_set_config(seq_state, odm_pipe, true, opp_cnt);
1592 
1593 			/* Enable ODM DSC block */
1594 			hwss_add_dsc_enable_with_opp(seq_state, odm_pipe);
1595 		}
1596 
1597 		/* Step 5: Configure DSC in timing generator */
1598 		hwss_add_tg_set_dsc_config(seq_state, otg_master->stream_res.tg,
1599 			&seq_state->steps[last_dsc_calc].params.dsc_calculate_and_set_config_params.dsc_optc_cfg, true);
1600 	} else if (otg_master->stream_res.dsc && !otg_master->stream->timing.flags.DSC) {
1601 		/* Disable DSC in OPTC */
1602 		hwss_add_tg_set_dsc_config(seq_state, otg_master->stream_res.tg, NULL, false);
1603 
1604 		hwss_add_dsc_disconnect(seq_state, otg_master->stream_res.dsc);
1605 	}
1606 
1607 	/* Disable DSC for old pipes that no longer need it */
1608 	if (old_otg_master && old_otg_master->stream_res.dsc) {
1609 		for (i = 0; i < old_opp_head_count; i++) {
1610 			old_pipe = old_opp_heads[i];
1611 			new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx];
1612 
1613 			/* If old pipe had DSC but new pipe doesn't, disable the old DSC */
1614 			if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc) {
1615 				/* Then disconnect DSC block */
1616 				hwss_add_dsc_disconnect(seq_state, old_pipe->stream_res.dsc);
1617 			}
1618 		}
1619 	}
1620 }
1621 
dcn401_update_odm_sequence(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master,struct block_sequence_state * seq_state)1622 void dcn401_update_odm_sequence(struct dc *dc, struct dc_state *context,
1623 		struct pipe_ctx *otg_master, struct block_sequence_state *seq_state)
1624 {
1625 	struct pipe_ctx *opp_heads[MAX_PIPES];
1626 	int opp_inst[MAX_PIPES] = {0};
1627 	int opp_head_count;
1628 	int odm_slice_width = resource_get_odm_slice_dst_width(otg_master, false);
1629 	int last_odm_slice_width = resource_get_odm_slice_dst_width(otg_master, true);
1630 	int i;
1631 
1632 	opp_head_count = resource_get_opp_heads_for_otg_master(
1633 			otg_master, &context->res_ctx, opp_heads);
1634 
1635 	for (i = 0; i < opp_head_count; i++)
1636 		opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
1637 
1638 	/* Add ODM combine/bypass operation to sequence */
1639 	if (opp_head_count > 1) {
1640 		hwss_add_optc_set_odm_combine(seq_state, otg_master->stream_res.tg, opp_inst,
1641 			opp_head_count, odm_slice_width, last_odm_slice_width);
1642 	} else {
1643 		hwss_add_optc_set_odm_bypass(seq_state, otg_master->stream_res.tg, &otg_master->stream->timing);
1644 	}
1645 
1646 	/* Add OPP operations to sequence */
1647 	for (i = 0; i < opp_head_count; i++) {
1648 		/* Add OPP pipe clock control operation */
1649 		hwss_add_opp_pipe_clock_control(seq_state, opp_heads[i]->stream_res.opp, true);
1650 
1651 		/* Add OPP program left edge extra pixel operation */
1652 		hwss_add_opp_program_left_edge_extra_pixel(seq_state, opp_heads[i]->stream_res.opp,
1653 			opp_heads[i]->stream->timing.pixel_encoding, resource_is_pipe_type(opp_heads[i], OTG_MASTER));
1654 	}
1655 
1656 	/* Add DSC update operations to sequence */
1657 	dcn401_add_dsc_sequence_for_odm_change(dc, context, otg_master, seq_state);
1658 
1659 	/* Add blank pixel data operation if needed */
1660 	if (!resource_is_pipe_type(otg_master, DPP_PIPE)) {
1661 		if (dc->hwseq->funcs.blank_pixel_data_sequence)
1662 			dc->hwseq->funcs.blank_pixel_data_sequence(
1663 				dc, otg_master, true, seq_state);
1664 	}
1665 }
1666 
dcn401_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)1667 void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx,
1668 		struct dc_link_settings *link_settings)
1669 {
1670 	struct encoder_unblank_param params = {0};
1671 	struct dc_stream_state *stream = pipe_ctx->stream;
1672 	struct dc_link *link = stream->link;
1673 	struct dce_hwseq *hws = link->dc->hwseq;
1674 
1675 	/* calculate parameters for unblank */
1676 	params.opp_cnt = resource_get_odm_slice_count(pipe_ctx);
1677 
1678 	params.timing = pipe_ctx->stream->timing;
1679 	params.link_settings.link_rate = link_settings->link_rate;
1680 	params.pix_per_cycle = pipe_ctx->stream_res.pix_clk_params.dio_se_pix_per_cycle;
1681 
1682 	if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
1683 		pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
1684 				pipe_ctx->stream_res.hpo_dp_stream_enc,
1685 				pipe_ctx->stream_res.tg->inst);
1686 	} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
1687 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
1688 	}
1689 
1690 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP)
1691 		hws->funcs.edp_backlight_control(link, true);
1692 }
1693 
dcn401_hardware_release(struct dc * dc)1694 void dcn401_hardware_release(struct dc *dc)
1695 {
1696 	if (!dc->debug.disable_force_pstate_allow_on_hw_release) {
1697 		if (dc->ctx->dmub_srv && dc->debug.fams2_config.bits.enable)
1698 			dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
1699 
1700 		/* If pstate unsupported, or still supported
1701 		* by firmware, force it supported by dcn
1702 		*/
1703 		if (dc->current_state) {
1704 			if ((!dc->clk_mgr->clks.p_state_change_support ||
1705 					dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
1706 					dc->res_pool->hubbub->funcs->force_pstate_change_control)
1707 				dc->res_pool->hubbub->funcs->force_pstate_change_control(
1708 						dc->res_pool->hubbub, true, true);
1709 
1710 			dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true;
1711 			dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
1712 		}
1713 	} else {
1714 		if (dc->current_state) {
1715 			dc->clk_mgr->clks.p_state_change_support = false;
1716 			dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
1717 		}
1718 
1719 		if (dc->ctx->dmub_srv && dc->debug.fams2_config.bits.enable)
1720 			dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
1721 	}
1722 }
1723 
dcn401_wait_for_det_buffer_update_under_otg_master(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1724 void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master)
1725 {
1726 	struct pipe_ctx *opp_heads[MAX_PIPES];
1727 	struct pipe_ctx *dpp_pipes[MAX_PIPES];
1728 	struct hubbub *hubbub = dc->res_pool->hubbub;
1729 	int dpp_count = 0;
1730 
1731 	if (!otg_master->stream)
1732 		return;
1733 
1734 	int slice_count = resource_get_opp_heads_for_otg_master(otg_master,
1735 			&context->res_ctx, opp_heads);
1736 
1737 	for (int slice_idx = 0; slice_idx < slice_count; slice_idx++) {
1738 		if (opp_heads[slice_idx]->plane_state) {
1739 			dpp_count = resource_get_dpp_pipes_for_opp_head(
1740 					opp_heads[slice_idx],
1741 					&context->res_ctx,
1742 					dpp_pipes);
1743 			for (int dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) {
1744 				struct pipe_ctx *dpp_pipe = dpp_pipes[dpp_idx];
1745 					if (dpp_pipe && hubbub &&
1746 						dpp_pipe->plane_res.hubp &&
1747 						hubbub->funcs->wait_for_det_update)
1748 						hubbub->funcs->wait_for_det_update(hubbub, dpp_pipe->plane_res.hubp->inst);
1749 			}
1750 		} else {
1751 			if (hubbub && opp_heads[slice_idx]->plane_res.hubp && hubbub->funcs->wait_for_det_update)
1752 				hubbub->funcs->wait_for_det_update(hubbub, opp_heads[slice_idx]->plane_res.hubp->inst);
1753 		}
1754 	}
1755 }
1756 
dcn401_interdependent_update_lock(struct dc * dc,struct dc_state * context,bool lock)1757 void dcn401_interdependent_update_lock(struct dc *dc,
1758 		struct dc_state *context, bool lock)
1759 {
1760 	unsigned int i = 0;
1761 	struct pipe_ctx *pipe = NULL;
1762 	struct timing_generator *tg = NULL;
1763 
1764 	if (lock) {
1765 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1766 			pipe = &context->res_ctx.pipe_ctx[i];
1767 			tg = pipe->stream_res.tg;
1768 
1769 			if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
1770 					!tg->funcs->is_tg_enabled(tg) ||
1771 					dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
1772 				continue;
1773 			dc->hwss.pipe_control_lock(dc, pipe, true);
1774 		}
1775 	} else {
1776 		/* Need to free DET being used first and have pipe update, then unlock the remaining pipes*/
1777 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1778 			pipe = &context->res_ctx.pipe_ctx[i];
1779 			tg = pipe->stream_res.tg;
1780 
1781 			if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
1782 					!tg->funcs->is_tg_enabled(tg) ||
1783 					dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
1784 				continue;
1785 			}
1786 
1787 			if (dc->scratch.pipes_to_unlock_first[i]) {
1788 				struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1789 				dc->hwss.pipe_control_lock(dc, pipe, false);
1790 				/* Assumes pipe of the same index in current_state is also an OTG_MASTER pipe*/
1791 				dcn401_wait_for_det_buffer_update_under_otg_master(dc, dc->current_state, old_pipe);
1792 			}
1793 		}
1794 
1795 		/* Unlocking the rest of the pipes */
1796 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1797 			if (dc->scratch.pipes_to_unlock_first[i])
1798 				continue;
1799 
1800 			pipe = &context->res_ctx.pipe_ctx[i];
1801 			tg = pipe->stream_res.tg;
1802 			if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
1803 					!tg->funcs->is_tg_enabled(tg) ||
1804 					dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
1805 				continue;
1806 			}
1807 
1808 			dc->hwss.pipe_control_lock(dc, pipe, false);
1809 		}
1810 	}
1811 }
1812 
dcn401_perform_3dlut_wa_unlock(struct pipe_ctx * pipe_ctx)1813 void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx)
1814 {
1815 	/* If 3DLUT FL is enabled and 3DLUT is in use, follow the workaround sequence for pipe unlock to make sure that
1816 	 * HUBP will properly fetch 3DLUT contents after unlock.
1817 	 *
1818 	 * This is meant to work around a known HW issue where VREADY will cancel the pending 3DLUT_ENABLE signal regardless
1819 	 * of whether OTG lock is currently being held or not.
1820 	 */
1821 	struct pipe_ctx *wa_pipes[MAX_PIPES] = { NULL };
1822 	struct pipe_ctx *odm_pipe, *mpc_pipe;
1823 	int i, wa_pipe_ct = 0;
1824 
1825 	for (odm_pipe = pipe_ctx; odm_pipe != NULL; odm_pipe = odm_pipe->next_odm_pipe) {
1826 		for (mpc_pipe = odm_pipe; mpc_pipe != NULL; mpc_pipe = mpc_pipe->bottom_pipe) {
1827 			if (mpc_pipe->plane_state &&
1828 					mpc_pipe->plane_state->cm.flags.bits.lut3d_enable &&
1829 					mpc_pipe->plane_state->cm.flags.bits.lut3d_dma_enable) {
1830 				wa_pipes[wa_pipe_ct++] = mpc_pipe;
1831 			}
1832 		}
1833 	}
1834 
1835 	if (wa_pipe_ct > 0) {
1836 		if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
1837 			pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, true);
1838 
1839 		for (i = 0; i < wa_pipe_ct; ++i) {
1840 			if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
1841 				wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
1842 		}
1843 
1844 		pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
1845 		if (pipe_ctx->stream_res.tg->funcs->wait_update_lock_status)
1846 			pipe_ctx->stream_res.tg->funcs->wait_update_lock_status(pipe_ctx->stream_res.tg, false);
1847 
1848 		for (i = 0; i < wa_pipe_ct; ++i) {
1849 			if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
1850 				wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
1851 		}
1852 
1853 		if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
1854 			pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, false);
1855 	} else {
1856 		pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
1857 	}
1858 }
1859 
dcn401_program_outstanding_updates(struct dc * dc,struct dc_state * context)1860 void dcn401_program_outstanding_updates(struct dc *dc,
1861 		struct dc_state *context)
1862 {
1863 	struct hubbub *hubbub = dc->res_pool->hubbub;
1864 
1865 	/* update compbuf if required */
1866 	if (hubbub->funcs->program_compbuf_segments)
1867 		hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true);
1868 }
1869 
dcn401_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)1870 void dcn401_reset_back_end_for_pipe(
1871 		struct dc *dc,
1872 		struct pipe_ctx *pipe_ctx,
1873 		struct dc_state *context)
1874 {
1875 	(void)context;
1876 	struct dc_link *link = pipe_ctx->stream->link;
1877 	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
1878 
1879 	if (pipe_ctx->stream_res.stream_enc == NULL) {
1880 		pipe_ctx->stream = NULL;
1881 		return;
1882 	}
1883 
1884 	/* DPMS may already disable or */
1885 	/* dpms_off status is incorrect due to fastboot
1886 	 * feature. When system resume from S4 with second
1887 	 * screen only, the dpms_off would be true but
1888 	 * VBIOS lit up eDP, so check link status too.
1889 	 */
1890 	if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1891 		dc->link_srv->set_dpms_off(pipe_ctx);
1892 	else if (pipe_ctx->stream_res.audio)
1893 		dc->hwss.disable_audio_stream(pipe_ctx);
1894 
1895 	/* free acquired resources */
1896 	if (pipe_ctx->stream_res.audio) {
1897 		/*disable az_endpoint*/
1898 		pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1899 
1900 		/*free audio*/
1901 		if (dc->caps.dynamic_audio == true) {
1902 			/*we have to dynamic arbitrate the audio endpoints*/
1903 			/*we free the resource, need reset is_audio_acquired*/
1904 			update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1905 					pipe_ctx->stream_res.audio, false);
1906 			pipe_ctx->stream_res.audio = NULL;
1907 		}
1908 	}
1909 
1910 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
1911 	 * back end share by all pipes and will be disable only when disable
1912 	 * parent pipe.
1913 	 */
1914 	if (pipe_ctx->top_pipe == NULL) {
1915 
1916 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
1917 
1918 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1919 
1920 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1921 		if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
1922 			pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
1923 					pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
1924 
1925 		set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
1926 
1927 		/* TODO - convert symclk_ref_cnts for otg to a bit map to solve
1928 		 * the case where the same symclk is shared across multiple otg
1929 		 * instances
1930 		 */
1931 		if (dc_is_tmds_signal(pipe_ctx->stream->signal))
1932 			link->phy_state.symclk_ref_cnts.otg = 0;
1933 		if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
1934 			link_hwss->disable_link_output(link,
1935 					&pipe_ctx->link_res, pipe_ctx->stream->signal);
1936 			link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
1937 		}
1938 
1939 		/* reset DTBCLK_P */
1940 		if (dc->res_pool->dccg->funcs->set_dtbclk_p_src)
1941 			dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, REFCLK, pipe_ctx->stream_res.tg->inst);
1942 	}
1943 
1944 /*
1945  * In case of a dangling plane, setting this to NULL unconditionally
1946  * causes failures during reset hw ctx where, if stream is NULL,
1947  * it is expected that the pipe_ctx pointers to pipes and plane are NULL.
1948  */
1949 	pipe_ctx->stream = NULL;
1950 	pipe_ctx->top_pipe = NULL;
1951 	pipe_ctx->bottom_pipe = NULL;
1952 	pipe_ctx->next_odm_pipe = NULL;
1953 	pipe_ctx->prev_odm_pipe = NULL;
1954 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1955 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1956 }
dc_hwss_disable_otg_pwa(struct dc * dc)1957 static void dc_hwss_disable_otg_pwa(struct dc *dc)
1958 {
1959 	if (dc->debug.enable_otg_frame_sync_pwa) {
1960 		int i;
1961 
1962 		/*reset all the otg*/
1963 		for (i = dc->res_pool->timing_generator_count - 1; i >= 0 ; i--) {
1964 			struct timing_generator *tg = dc->res_pool->timing_generators[i];
1965 
1966 			if (tg->funcs->disable_otg_pwa) {
1967 				tg->funcs->disable_otg_pwa(tg);
1968 				DC_LOG_DC("otg frame sync pwa disabled on otg%d\n", tg->inst);
1969 			}
1970 		}
1971 	}
1972 }
1973 
dcn401_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)1974 void dcn401_reset_hw_ctx_wrap(
1975 		struct dc *dc,
1976 		struct dc_state *context)
1977 {
1978 	int i;
1979 	struct dce_hwseq *hws = dc->hwseq;
1980 
1981 	dc_hwss_disable_otg_pwa(dc);
1982 	/* Reset Back End*/
1983 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1984 		struct pipe_ctx *pipe_ctx_old =
1985 			&dc->current_state->res_ctx.pipe_ctx[i];
1986 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1987 
1988 		if (!pipe_ctx_old->stream)
1989 			continue;
1990 
1991 		if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
1992 			continue;
1993 
1994 		if (!pipe_ctx->stream ||
1995 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1996 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
1997 
1998 			if (hws->funcs.reset_back_end_for_pipe)
1999 				hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
2000 			if (hws->funcs.enable_stream_gating)
2001 				hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
2002 			if (old_clk)
2003 				old_clk->funcs->cs_power_down(old_clk);
2004 		}
2005 	}
2006 }
2007 
dcn401_calculate_vready_offset_for_group(struct pipe_ctx * pipe)2008 static unsigned int dcn401_calculate_vready_offset_for_group(struct pipe_ctx *pipe)
2009 {
2010 	struct pipe_ctx *other_pipe;
2011 	unsigned int vready_offset = pipe->global_sync.dcn4x.vready_offset_pixels;
2012 
2013 	/* Always use the largest vready_offset of all connected pipes */
2014 	for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
2015 		if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
2016 			vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
2017 	}
2018 	for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
2019 		if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
2020 			vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
2021 	}
2022 	for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
2023 		if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
2024 			vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
2025 	}
2026 	for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
2027 		if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
2028 			vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
2029 	}
2030 
2031 	return vready_offset;
2032 }
2033 
dcn401_program_tg(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dce_hwseq * hws)2034 static void dcn401_program_tg(
2035 	struct dc *dc,
2036 	struct pipe_ctx *pipe_ctx,
2037 	struct dc_state *context,
2038 	struct dce_hwseq *hws)
2039 {
2040 	pipe_ctx->stream_res.tg->funcs->program_global_sync(
2041 		pipe_ctx->stream_res.tg,
2042 		dcn401_calculate_vready_offset_for_group(pipe_ctx),
2043 		(unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
2044 		(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
2045 		(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
2046 		(unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
2047 
2048 	if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
2049 		pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2050 
2051 	pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2052 		pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2053 
2054 	if (hws->funcs.setup_vupdate_interrupt)
2055 		hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2056 }
2057 
dcn401_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2058 void dcn401_program_pipe(
2059 	struct dc *dc,
2060 	struct pipe_ctx *pipe_ctx,
2061 	struct dc_state *context)
2062 {
2063 	struct dce_hwseq *hws = dc->hwseq;
2064 
2065 	/* Only need to unblank on top pipe */
2066 	if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
2067 		if (pipe_ctx->update_flags.bits.enable ||
2068 			pipe_ctx->update_flags.bits.odm ||
2069 			pipe_ctx->stream->update_flags.bits.abm_level)
2070 			hws->funcs.blank_pixel_data(dc, pipe_ctx,
2071 				!pipe_ctx->plane_state ||
2072 				!pipe_ctx->plane_state->visible);
2073 	}
2074 
2075 	/* Only update TG on top pipe */
2076 	if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
2077 		&& !pipe_ctx->prev_odm_pipe)
2078 		dcn401_program_tg(dc, pipe_ctx, context, hws);
2079 
2080 	if (pipe_ctx->update_flags.bits.odm)
2081 		hws->funcs.update_odm(dc, context, pipe_ctx);
2082 
2083 	if (pipe_ctx->update_flags.bits.enable) {
2084 		if (hws->funcs.enable_plane)
2085 			hws->funcs.enable_plane(dc, pipe_ctx, context);
2086 		else
2087 			dc->hwss.enable_plane(dc, pipe_ctx, context);
2088 
2089 		if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes)
2090 			dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
2091 	}
2092 
2093 	if (pipe_ctx->update_flags.bits.det_size) {
2094 		if (dc->res_pool->hubbub->funcs->program_det_size)
2095 			dc->res_pool->hubbub->funcs->program_det_size(
2096 				dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
2097 		if (dc->res_pool->hubbub->funcs->program_det_segments)
2098 			dc->res_pool->hubbub->funcs->program_det_segments(
2099 				dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
2100 	}
2101 
2102 	if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw ||
2103 	    pipe_ctx->plane_state->update_flags.raw ||
2104 	    pipe_ctx->stream->update_flags.raw))
2105 		dc->hwss.update_dchubp_dpp(dc, pipe_ctx, context);
2106 
2107 	if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
2108 		pipe_ctx->plane_state->update_flags.bits.hdr_mult))
2109 		hws->funcs.set_hdr_multiplier(pipe_ctx);
2110 
2111 	if (pipe_ctx->plane_state &&
2112 		(pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2113 			pipe_ctx->plane_state->update_flags.bits.gamma_change ||
2114 			pipe_ctx->plane_state->update_flags.bits.lut_3d ||
2115 			pipe_ctx->update_flags.bits.enable))
2116 		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2117 
2118 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2119 	 * only do gamma programming for powering on, internal memcmp to avoid
2120 	 * updating on slave planes
2121 	 */
2122 	if (pipe_ctx->update_flags.bits.enable ||
2123 	    pipe_ctx->update_flags.bits.plane_changed ||
2124 	    pipe_ctx->stream->update_flags.bits.out_tf)
2125 		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2126 
2127 	/* If the pipe has been enabled or has a different opp, we
2128 	 * should reprogram the fmt. This deals with cases where
2129 	 * interation between mpc and odm combine on different streams
2130 	 * causes a different pipe to be chosen to odm combine with.
2131 	 */
2132 	if (pipe_ctx->update_flags.bits.enable
2133 		|| pipe_ctx->update_flags.bits.opp_changed) {
2134 
2135 		pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
2136 			pipe_ctx->stream_res.opp,
2137 			COLOR_SPACE_YCBCR601,
2138 			pipe_ctx->stream->timing.display_color_depth,
2139 			pipe_ctx->stream->signal);
2140 
2141 		pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
2142 			pipe_ctx->stream_res.opp,
2143 			&pipe_ctx->stream->bit_depth_params,
2144 			&pipe_ctx->stream->clamping);
2145 	}
2146 
2147 	/* Set ABM pipe after other pipe configurations done */
2148 	if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) {
2149 		if (pipe_ctx->stream_res.abm) {
2150 			dc->hwss.set_pipe(pipe_ctx);
2151 			pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm,
2152 				pipe_ctx->stream->abm_level);
2153 		}
2154 	}
2155 
2156 	if (pipe_ctx->update_flags.bits.test_pattern_changed) {
2157 		struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp;
2158 		struct bit_depth_reduction_params params;
2159 
2160 		memset(&params, 0, sizeof(params));
2161 		odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
2162 		dc->hwss.set_disp_pattern_generator(dc,
2163 			pipe_ctx,
2164 			pipe_ctx->stream_res.test_pattern_params.test_pattern,
2165 			pipe_ctx->stream_res.test_pattern_params.color_space,
2166 			pipe_ctx->stream_res.test_pattern_params.color_depth,
2167 			NULL,
2168 			pipe_ctx->stream_res.test_pattern_params.width,
2169 			pipe_ctx->stream_res.test_pattern_params.height,
2170 			pipe_ctx->stream_res.test_pattern_params.offset);
2171 	}
2172 	if (pipe_ctx->plane_state
2173 		&& pipe_ctx->plane_state->update_flags.bits.cm_hist_change
2174 		&& hws->funcs.program_cm_hist)
2175 		hws->funcs.program_cm_hist(dc, pipe_ctx, pipe_ctx->plane_state);
2176 }
2177 
2178 /*
2179  * dcn401_program_pipe_sequence - Sequence-based version of dcn401_program_pipe
2180  *
2181  * This function creates a sequence-based version of the original dcn401_program_pipe
2182  * function. Instead of directly calling hardware programming functions, it appends
2183  * sequence steps to the provided block_sequence array that can later be executed
2184  * as part of hwss_execute_sequence.
2185  *
2186  */
dcn401_program_pipe_sequence(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context,struct block_sequence_state * seq_state)2187 void dcn401_program_pipe_sequence(
2188 	struct dc *dc,
2189 	struct pipe_ctx *pipe_ctx,
2190 	struct dc_state *context,
2191 	struct block_sequence_state *seq_state)
2192 {
2193 	struct dce_hwseq *hws = dc->hwseq;
2194 
2195 	/* Only need to unblank on top pipe */
2196 	if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
2197 		if (pipe_ctx->update_flags.bits.enable ||
2198 				pipe_ctx->update_flags.bits.odm ||
2199 				pipe_ctx->stream->update_flags.bits.abm_level) {
2200 			if (dc->hwseq->funcs.blank_pixel_data_sequence)
2201 				dc->hwseq->funcs.blank_pixel_data_sequence(dc, pipe_ctx,
2202 					 !pipe_ctx->plane_state || !pipe_ctx->plane_state->visible,
2203 					 seq_state);
2204 		}
2205 	}
2206 
2207 	/* Only update TG on top pipe */
2208 	if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
2209 		&& !pipe_ctx->prev_odm_pipe) {
2210 
2211 		/* Step 1: Program global sync */
2212 		hwss_add_tg_program_global_sync(seq_state, pipe_ctx->stream_res.tg,
2213 			dcn401_calculate_vready_offset_for_group(pipe_ctx),
2214 			(unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
2215 			(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
2216 			(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
2217 			(unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
2218 
2219 		/* Step 2: Wait for VACTIVE state (if not phantom pipe) */
2220 		if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
2221 			hwss_add_tg_wait_for_state(seq_state, pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2222 
2223 		/* Step 3: Set VTG params */
2224 		hwss_add_tg_set_vtg_params(seq_state, pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2225 
2226 		/* Step 4: Setup vupdate interrupt (if available) */
2227 		if (hws->funcs.setup_vupdate_interrupt)
2228 			dcn401_setup_vupdate_interrupt_sequence(dc, pipe_ctx, seq_state);
2229 	}
2230 
2231 	if (pipe_ctx->update_flags.bits.odm) {
2232 		if (hws->funcs.update_odm_sequence)
2233 			hws->funcs.update_odm_sequence(dc, context, pipe_ctx, seq_state);
2234 	}
2235 
2236 	if (pipe_ctx->update_flags.bits.enable) {
2237 		if (dc->hwss.enable_plane_sequence)
2238 			dc->hwss.enable_plane_sequence(dc, pipe_ctx, context, seq_state);
2239 	}
2240 
2241 	if (pipe_ctx->update_flags.bits.det_size) {
2242 		if (dc->res_pool->hubbub->funcs->program_det_size) {
2243 			hwss_add_hubp_program_det_size(seq_state, dc->res_pool->hubbub,
2244 				pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
2245 		}
2246 
2247 		if (dc->res_pool->hubbub->funcs->program_det_segments) {
2248 			hwss_add_hubp_program_det_segments(seq_state, dc->res_pool->hubbub,
2249 				pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
2250 		}
2251 	}
2252 
2253 	if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw ||
2254 	    pipe_ctx->plane_state->update_flags.raw ||
2255 	    pipe_ctx->stream->update_flags.raw)) {
2256 
2257 		if (dc->hwss.update_dchubp_dpp_sequence)
2258 			dc->hwss.update_dchubp_dpp_sequence(dc, pipe_ctx, context, seq_state);
2259 	}
2260 
2261 	if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
2262 		pipe_ctx->plane_state->update_flags.bits.hdr_mult)) {
2263 
2264 		hws->funcs.set_hdr_multiplier_sequence(pipe_ctx, seq_state);
2265 	}
2266 
2267 	if (pipe_ctx->plane_state &&
2268 		(pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2269 			pipe_ctx->plane_state->update_flags.bits.gamma_change ||
2270 			pipe_ctx->plane_state->update_flags.bits.lut_3d ||
2271 			pipe_ctx->update_flags.bits.enable)) {
2272 
2273 		hwss_add_dpp_set_input_transfer_func(seq_state, dc, pipe_ctx, pipe_ctx->plane_state);
2274 	}
2275 
2276 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2277 	 * only do gamma programming for powering on, internal memcmp to avoid
2278 	 * updating on slave planes
2279 	 */
2280 	if (pipe_ctx->update_flags.bits.enable ||
2281 			pipe_ctx->update_flags.bits.plane_changed ||
2282 			pipe_ctx->stream->update_flags.bits.out_tf) {
2283 		hwss_add_dpp_set_output_transfer_func(seq_state, dc, pipe_ctx, pipe_ctx->stream);
2284 	}
2285 
2286 	/* If the pipe has been enabled or has a different opp, we
2287 	 * should reprogram the fmt. This deals with cases where
2288 	 * interation between mpc and odm combine on different streams
2289 	 * causes a different pipe to be chosen to odm combine with.
2290 	 */
2291 	if (pipe_ctx->update_flags.bits.enable
2292 		|| pipe_ctx->update_flags.bits.opp_changed) {
2293 
2294 		hwss_add_opp_set_dyn_expansion(seq_state, pipe_ctx->stream_res.opp, COLOR_SPACE_YCBCR601,
2295 			pipe_ctx->stream->timing.display_color_depth, pipe_ctx->stream->signal);
2296 
2297 		hwss_add_opp_program_fmt(seq_state, pipe_ctx->stream_res.opp,
2298 			&pipe_ctx->stream->bit_depth_params, &pipe_ctx->stream->clamping);
2299 	}
2300 
2301 	/* Set ABM pipe after other pipe configurations done */
2302 	if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) {
2303 		if (pipe_ctx->stream_res.abm) {
2304 			hwss_add_abm_set_pipe(seq_state, dc, pipe_ctx);
2305 
2306 			hwss_add_abm_set_level(seq_state, pipe_ctx->stream_res.abm, pipe_ctx->stream->abm_level);
2307 		}
2308 	}
2309 
2310 	if (pipe_ctx->update_flags.bits.test_pattern_changed) {
2311 		struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp;
2312 
2313 		hwss_add_opp_program_bit_depth_reduction(seq_state, odm_opp, true, pipe_ctx);
2314 
2315 		hwss_add_opp_set_disp_pattern_generator(seq_state,
2316 			odm_opp,
2317 			pipe_ctx->stream_res.test_pattern_params.test_pattern,
2318 			pipe_ctx->stream_res.test_pattern_params.color_space,
2319 			pipe_ctx->stream_res.test_pattern_params.color_depth,
2320 			(struct tg_color){0},
2321 			false,
2322 			pipe_ctx->stream_res.test_pattern_params.width,
2323 			pipe_ctx->stream_res.test_pattern_params.height,
2324 			pipe_ctx->stream_res.test_pattern_params.offset);
2325 	}
2326 
2327 	if (pipe_ctx->plane_state
2328 			&& pipe_ctx->plane_state->update_flags.bits.cm_hist_change
2329 			&& hws->funcs.program_cm_hist) {
2330 
2331 		hwss_add_dpp_program_cm_hist(seq_state, pipe_ctx->plane_res.dpp,
2332 			pipe_ctx->plane_state->cm_hist_control, pipe_ctx->plane_state->color_space);
2333 	}
2334 }
2335 
dcn401_program_front_end_for_ctx(struct dc * dc,struct dc_state * context)2336 void dcn401_program_front_end_for_ctx(
2337 	struct dc *dc,
2338 	struct dc_state *context)
2339 {
2340 	int i;
2341 	unsigned int prev_hubp_count = 0;
2342 	unsigned int hubp_count = 0;
2343 	struct dce_hwseq *hws = dc->hwseq;
2344 	struct pipe_ctx *pipe = NULL;
2345 
2346 	if (resource_is_pipe_topology_changed(dc->current_state, context))
2347 		resource_log_pipe_topology_update(dc, context);
2348 
2349 	if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
2350 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2351 			pipe = &context->res_ctx.pipe_ctx[i];
2352 
2353 			if (pipe->plane_state) {
2354 				if (pipe->plane_state->triplebuffer_flips)
2355 					BREAK_TO_DEBUGGER();
2356 
2357 				/*turn off triple buffer for full update*/
2358 				dc->hwss.program_triplebuffer(
2359 					dc, pipe, pipe->plane_state->triplebuffer_flips);
2360 			}
2361 		}
2362 	}
2363 
2364 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2365 		if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
2366 			prev_hubp_count++;
2367 		if (context->res_ctx.pipe_ctx[i].plane_state)
2368 			hubp_count++;
2369 	}
2370 
2371 	if (prev_hubp_count == 0 && hubp_count > 0) {
2372 		if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
2373 			dc->res_pool->hubbub->funcs->force_pstate_change_control(
2374 				dc->res_pool->hubbub, true, false);
2375 		udelay(500);
2376 	}
2377 
2378 	/* Set pipe update flags and lock pipes */
2379 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2380 		dc->hwss.detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i],
2381 			&context->res_ctx.pipe_ctx[i]);
2382 
2383 	/* When disabling phantom pipes, turn on phantom OTG first (so we can get double
2384 	 * buffer updates properly)
2385 	 */
2386 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2387 		struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
2388 
2389 		pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2390 
2391 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
2392 			dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
2393 			struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
2394 
2395 			if (tg->funcs->enable_crtc) {
2396 				if (dc->hwseq->funcs.blank_pixel_data)
2397 					dc->hwseq->funcs.blank_pixel_data(dc, pipe, true);
2398 
2399 				tg->funcs->enable_crtc(tg);
2400 			}
2401 		}
2402 	}
2403 	/* OTG blank before disabling all front ends */
2404 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2405 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
2406 			&& !context->res_ctx.pipe_ctx[i].top_pipe
2407 			&& !context->res_ctx.pipe_ctx[i].prev_odm_pipe
2408 			&& context->res_ctx.pipe_ctx[i].stream)
2409 			hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
2410 
2411 	/* Disconnect mpcc */
2412 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2413 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
2414 			|| context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
2415 			struct hubbub *hubbub = dc->res_pool->hubbub;
2416 
2417 			/* Phantom pipe DET should be 0, but if a pipe in use is being transitioned to phantom
2418 			 * then we want to do the programming here (effectively it's being disabled). If we do
2419 			 * the programming later the DET won't be updated until the OTG for the phantom pipe is
2420 			 * turned on (i.e. in an MCLK switch) which can come in too late and cause issues with
2421 			 * DET allocation.
2422 			 */
2423 			if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
2424 				(context->res_ctx.pipe_ctx[i].plane_state &&
2425 				dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) ==
2426 				SUBVP_PHANTOM))) {
2427 				if (hubbub->funcs->program_det_size)
2428 					hubbub->funcs->program_det_size(hubbub,
2429 						dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
2430 				if (dc->res_pool->hubbub->funcs->program_det_segments)
2431 					dc->res_pool->hubbub->funcs->program_det_segments(
2432 						hubbub,	dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
2433 			}
2434 			hws->funcs.plane_atomic_disconnect(dc, dc->current_state,
2435 				&dc->current_state->res_ctx.pipe_ctx[i]);
2436 			DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
2437 		}
2438 
2439 	/* update ODM for blanked OTG master pipes */
2440 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2441 		pipe = &context->res_ctx.pipe_ctx[i];
2442 		if (resource_is_pipe_type(pipe, OTG_MASTER) &&
2443 			!resource_is_pipe_type(pipe, DPP_PIPE) &&
2444 			pipe->update_flags.bits.odm &&
2445 			hws->funcs.update_odm)
2446 			hws->funcs.update_odm(dc, context, pipe);
2447 	}
2448 
2449 	/*
2450 	 * Program all updated pipes, order matters for mpcc setup. Start with
2451 	 * top pipe and program all pipes that follow in order
2452 	 */
2453 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2454 		pipe = &context->res_ctx.pipe_ctx[i];
2455 
2456 		if (pipe->plane_state && !pipe->top_pipe) {
2457 			while (pipe) {
2458 				if (hws->funcs.program_pipe)
2459 					hws->funcs.program_pipe(dc, pipe, context);
2460 				else {
2461 					/* Don't program phantom pipes in the regular front end programming sequence.
2462 					 * There is an MPO transition case where a pipe being used by a video plane is
2463 					 * transitioned directly to be a phantom pipe when closing the MPO video.
2464 					 * However the phantom pipe will program a new HUBP_VTG_SEL (update takes place
2465 					 * right away) but the MPO still exists until the double buffered update of the
2466 					 * main pipe so we will get a frame of underflow if the phantom pipe is
2467 					 * programmed here.
2468 					 */
2469 					if (pipe->stream &&
2470 						dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM)
2471 						dcn401_program_pipe(dc, pipe, context);
2472 				}
2473 
2474 				pipe = pipe->bottom_pipe;
2475 			}
2476 		}
2477 
2478 		/* Program secondary blending tree and writeback pipes */
2479 		pipe = &context->res_ctx.pipe_ctx[i];
2480 		if (!pipe->top_pipe && !pipe->prev_odm_pipe
2481 			&& pipe->stream && pipe->stream->num_wb_info > 0
2482 			&& (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
2483 				|| pipe->stream->update_flags.raw)
2484 			&& hws->funcs.program_all_writeback_pipes_in_tree)
2485 			hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
2486 
2487 		/* Avoid underflow by check of pipe line read when adding 2nd plane. */
2488 		if (hws->wa.wait_hubpret_read_start_during_mpo_transition &&
2489 				!pipe->top_pipe &&
2490 				pipe->stream &&
2491 				pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start &&
2492 				dc->current_state->stream_status[0].plane_count == 1 &&
2493 				context->stream_status[0].plane_count > 1) {
2494 			pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
2495 		}
2496 	}
2497 }
2498 
dcn401_post_unlock_program_front_end(struct dc * dc,struct dc_state * context)2499 void dcn401_post_unlock_program_front_end(
2500 	struct dc *dc,
2501 	struct dc_state *context)
2502 {
2503 	// Timeout for pipe enable
2504 	unsigned int timeout_us = 100000;
2505 	unsigned int polling_interval_us = 1;
2506 	struct dce_hwseq *hwseq = dc->hwseq;
2507 	int i;
2508 
2509 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2510 		if (resource_is_pipe_type(&dc->current_state->res_ctx.pipe_ctx[i], OPP_HEAD) &&
2511 			!resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD))
2512 			dc->hwss.post_unlock_reset_opp(dc,
2513 				&dc->current_state->res_ctx.pipe_ctx[i]);
2514 
2515 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2516 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
2517 			dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
2518 
2519 	/*
2520 	 * If we are enabling a pipe, we need to wait for pending clear as this is a critical
2521 	 * part of the enable operation otherwise, DM may request an immediate flip which
2522 	 * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which
2523 	 * is unsupported on DCN.
2524 	 */
2525 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2526 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2527 		// Don't check flip pending on phantom pipes
2528 		if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable &&
2529 			dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
2530 			struct hubp *hubp = pipe->plane_res.hubp;
2531 			int j = 0;
2532 
2533 			for (j = 0; j < timeout_us / polling_interval_us
2534 				&& hubp->funcs->hubp_is_flip_pending(hubp); j++)
2535 				udelay(polling_interval_us);
2536 		}
2537 	}
2538 
2539 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2540 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2541 		struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2542 
2543 		/* When going from a smaller ODM slice count to larger, we must ensure double
2544 		 * buffer update completes before we return to ensure we don't reduce DISPCLK
2545 		 * before we've transitioned to 2:1 or 4:1
2546 		 */
2547 		if (resource_is_pipe_type(old_pipe, OTG_MASTER) && resource_is_pipe_type(pipe, OTG_MASTER) &&
2548 			resource_get_odm_slice_count(old_pipe) < resource_get_odm_slice_count(pipe) &&
2549 			dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
2550 			int j = 0;
2551 			struct timing_generator *tg = pipe->stream_res.tg;
2552 
2553 			if (tg->funcs->get_optc_double_buffer_pending) {
2554 				for (j = 0; j < timeout_us / polling_interval_us
2555 					&& tg->funcs->get_optc_double_buffer_pending(tg); j++)
2556 					udelay(polling_interval_us);
2557 			}
2558 		}
2559 	}
2560 
2561 	if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
2562 		dc->res_pool->hubbub->funcs->force_pstate_change_control(
2563 			dc->res_pool->hubbub, false, false);
2564 
2565 
2566 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2567 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2568 
2569 		if (pipe->plane_state && !pipe->top_pipe) {
2570 			/* Program phantom pipe here to prevent a frame of underflow in the MPO transition
2571 			 * case (if a pipe being used for a video plane transitions to a phantom pipe, it
2572 			 * can underflow due to HUBP_VTG_SEL programming if done in the regular front end
2573 			 * programming sequence).
2574 			 */
2575 			while (pipe) {
2576 				if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
2577 					/* When turning on the phantom pipe we want to run through the
2578 					 * entire enable sequence, so apply all the "enable" flags.
2579 					 */
2580 					if (dc->hwss.apply_update_flags_for_phantom)
2581 						dc->hwss.apply_update_flags_for_phantom(pipe);
2582 					if (dc->hwss.update_phantom_vp_position)
2583 						dc->hwss.update_phantom_vp_position(dc, context, pipe);
2584 					dcn401_program_pipe(dc, pipe, context);
2585 				}
2586 				pipe = pipe->bottom_pipe;
2587 			}
2588 		}
2589 	}
2590 
2591 	if (!hwseq)
2592 		return;
2593 
2594 	/* P-State support transitions:
2595 	 * Natural -> FPO:      P-State disabled in prepare, force disallow anytime is safe
2596 	 * FPO -> Natural:      Unforce anytime after FW disable is safe (P-State will assert naturally)
2597 	 * Unsupported -> FPO:  P-State enabled in optimize, force disallow anytime is safe
2598 	 * FPO -> Unsupported:  P-State disabled in prepare, unforce disallow anytime is safe
2599 	 * FPO <-> SubVP:       Force disallow is maintained on the FPO / SubVP pipes
2600 	 */
2601 	if (hwseq->funcs.update_force_pstate)
2602 		dc->hwseq->funcs.update_force_pstate(dc, context);
2603 	/* Only program the MALL registers after all the main and phantom pipes
2604 	 * are done programming.
2605 	 */
2606 	if (hwseq->funcs.program_mall_pipe_config)
2607 		hwseq->funcs.program_mall_pipe_config(dc, context);
2608 
2609 	/* WA to apply WM setting*/
2610 	if (hwseq->wa.DEGVIDCN21)
2611 		dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
2612 
2613 
2614 	/* WA for stutter underflow during MPO transitions when adding 2nd plane */
2615 	if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
2616 
2617 		if (dc->current_state->stream_status[0].plane_count == 1 &&
2618 			context->stream_status[0].plane_count > 1) {
2619 
2620 			struct timing_generator *tg = dc->res_pool->timing_generators[0];
2621 
2622 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false);
2623 
2624 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true;
2625 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame =
2626 				tg->funcs->get_frame_count(tg);
2627 		}
2628 	}
2629 }
2630 
dcn401_update_bandwidth(struct dc * dc,struct dc_state * context)2631 bool dcn401_update_bandwidth(
2632 	struct dc *dc,
2633 	struct dc_state *context)
2634 {
2635 	int i;
2636 	struct dce_hwseq *hws = dc->hwseq;
2637 
2638 	/* recalculate DML parameters */
2639 	if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK)
2640 		return false;
2641 
2642 	/* apply updated bandwidth parameters */
2643 	dc->hwss.prepare_bandwidth(dc, context);
2644 
2645 	/* update hubp configs for all pipes */
2646 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2647 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2648 
2649 		if (pipe_ctx->plane_state == NULL)
2650 			continue;
2651 
2652 		if (pipe_ctx->top_pipe == NULL) {
2653 			bool blank = !is_pipe_tree_visible(pipe_ctx);
2654 
2655 			pipe_ctx->stream_res.tg->funcs->program_global_sync(
2656 				pipe_ctx->stream_res.tg,
2657 				dcn401_calculate_vready_offset_for_group(pipe_ctx),
2658 				(unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
2659 				(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
2660 				(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
2661 				(unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
2662 
2663 			pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2664 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false);
2665 
2666 			if (pipe_ctx->prev_odm_pipe == NULL)
2667 				hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2668 
2669 			if (hws->funcs.setup_vupdate_interrupt)
2670 				hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2671 		}
2672 
2673 		if (pipe_ctx->plane_res.hubp->funcs->hubp_setup2)
2674 			pipe_ctx->plane_res.hubp->funcs->hubp_setup2(
2675 				pipe_ctx->plane_res.hubp,
2676 				&pipe_ctx->hubp_regs,
2677 				&pipe_ctx->global_sync,
2678 				&pipe_ctx->stream->timing);
2679 	}
2680 
2681 	return true;
2682 }
2683 
dcn401_detect_pipe_changes(struct dc_state * old_state,struct dc_state * new_state,struct pipe_ctx * old_pipe,struct pipe_ctx * new_pipe)2684 void dcn401_detect_pipe_changes(struct dc_state *old_state,
2685 	struct dc_state *new_state,
2686 	struct pipe_ctx *old_pipe,
2687 	struct pipe_ctx *new_pipe)
2688 {
2689 	bool old_is_phantom = dc_state_get_pipe_subvp_type(old_state, old_pipe) == SUBVP_PHANTOM;
2690 	bool new_is_phantom = dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM;
2691 
2692 	unsigned int old_pipe_vready_offset_pixels = old_pipe->global_sync.dcn4x.vready_offset_pixels;
2693 	unsigned int new_pipe_vready_offset_pixels = new_pipe->global_sync.dcn4x.vready_offset_pixels;
2694 	unsigned int old_pipe_vstartup_lines = old_pipe->global_sync.dcn4x.vstartup_lines;
2695 	unsigned int new_pipe_vstartup_lines = new_pipe->global_sync.dcn4x.vstartup_lines;
2696 	unsigned int old_pipe_vupdate_offset_pixels = old_pipe->global_sync.dcn4x.vupdate_offset_pixels;
2697 	unsigned int new_pipe_vupdate_offset_pixels = new_pipe->global_sync.dcn4x.vupdate_offset_pixels;
2698 	unsigned int old_pipe_vupdate_width_pixels = old_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels;
2699 	unsigned int new_pipe_vupdate_width_pixels = new_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels;
2700 
2701 	new_pipe->update_flags.raw = 0;
2702 
2703 	/* If non-phantom pipe is being transitioned to a phantom pipe,
2704 	 * set disable and return immediately. This is because the pipe
2705 	 * that was previously in use must be fully disabled before we
2706 	 * can "enable" it as a phantom pipe (since the OTG will certainly
2707 	 * be different). The post_unlock sequence will set the correct
2708 	 * update flags to enable the phantom pipe.
2709 	 */
2710 	if (old_pipe->plane_state && !old_is_phantom &&
2711 		new_pipe->plane_state && new_is_phantom) {
2712 		new_pipe->update_flags.bits.disable = 1;
2713 		return;
2714 	}
2715 
2716 	if (resource_is_pipe_type(new_pipe, OTG_MASTER) &&
2717 		resource_is_odm_topology_changed(new_pipe, old_pipe))
2718 		/* Detect odm changes */
2719 		new_pipe->update_flags.bits.odm = 1;
2720 
2721 	/* Exit on unchanged, unused pipe */
2722 	if (!old_pipe->plane_state && !new_pipe->plane_state)
2723 		return;
2724 	/* Detect pipe enable/disable */
2725 	if (!old_pipe->plane_state && new_pipe->plane_state) {
2726 		new_pipe->update_flags.bits.enable = 1;
2727 		new_pipe->update_flags.bits.mpcc = 1;
2728 		new_pipe->update_flags.bits.dppclk = 1;
2729 		new_pipe->update_flags.bits.hubp_interdependent = 1;
2730 		new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
2731 		new_pipe->update_flags.bits.unbounded_req = 1;
2732 		new_pipe->update_flags.bits.gamut_remap = 1;
2733 		new_pipe->update_flags.bits.scaler = 1;
2734 		new_pipe->update_flags.bits.viewport = 1;
2735 		new_pipe->update_flags.bits.det_size = 1;
2736 		if (new_pipe->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE &&
2737 			new_pipe->stream_res.test_pattern_params.width != 0 &&
2738 			new_pipe->stream_res.test_pattern_params.height != 0)
2739 			new_pipe->update_flags.bits.test_pattern_changed = 1;
2740 		if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
2741 			new_pipe->update_flags.bits.odm = 1;
2742 			new_pipe->update_flags.bits.global_sync = 1;
2743 		}
2744 		return;
2745 	}
2746 
2747 	/* For SubVP we need to unconditionally enable because any phantom pipes are
2748 	 * always removed then newly added for every full updates whenever SubVP is in use.
2749 	 * The remove-add sequence of the phantom pipe always results in the pipe
2750 	 * being blanked in enable_stream_timing (DPG).
2751 	 */
2752 	if (new_pipe->stream && dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM)
2753 		new_pipe->update_flags.bits.enable = 1;
2754 
2755 	/* Phantom pipes are effectively disabled, if the pipe was previously phantom
2756 	 * we have to enable
2757 	 */
2758 	if (old_pipe->plane_state && old_is_phantom &&
2759 		new_pipe->plane_state && !new_is_phantom)
2760 		new_pipe->update_flags.bits.enable = 1;
2761 
2762 	if (old_pipe->plane_state && !new_pipe->plane_state) {
2763 		new_pipe->update_flags.bits.disable = 1;
2764 		return;
2765 	}
2766 
2767 	/* Detect plane change */
2768 	if (old_pipe->plane_state != new_pipe->plane_state)
2769 		new_pipe->update_flags.bits.plane_changed = true;
2770 
2771 	/* Detect top pipe only changes */
2772 	if (resource_is_pipe_type(new_pipe, OTG_MASTER)) {
2773 		/* Detect global sync changes */
2774 		if ((old_pipe_vready_offset_pixels != new_pipe_vready_offset_pixels)
2775 			|| (old_pipe_vstartup_lines != new_pipe_vstartup_lines)
2776 			|| (old_pipe_vupdate_offset_pixels != new_pipe_vupdate_offset_pixels)
2777 			|| (old_pipe_vupdate_width_pixels != new_pipe_vupdate_width_pixels))
2778 			new_pipe->update_flags.bits.global_sync = 1;
2779 	}
2780 
2781 	if (old_pipe->det_buffer_size_kb != new_pipe->det_buffer_size_kb)
2782 		new_pipe->update_flags.bits.det_size = 1;
2783 
2784 	/*
2785 	 * Detect opp / tg change, only set on change, not on enable
2786 	 * Assume mpcc inst = pipe index, if not this code needs to be updated
2787 	 * since mpcc is what is affected by these. In fact all of our sequence
2788 	 * makes this assumption at the moment with how hubp reset is matched to
2789 	 * same index mpcc reset.
2790 	 */
2791 	if (old_pipe->stream_res.opp != new_pipe->stream_res.opp)
2792 		new_pipe->update_flags.bits.opp_changed = 1;
2793 	if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
2794 		new_pipe->update_flags.bits.tg_changed = 1;
2795 
2796 	/*
2797 	 * Detect mpcc blending changes, only dpp inst and opp matter here,
2798 	 * mpccs getting removed/inserted update connected ones during their own
2799 	 * programming
2800 	 */
2801 	if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp
2802 		|| old_pipe->stream_res.opp != new_pipe->stream_res.opp)
2803 		new_pipe->update_flags.bits.mpcc = 1;
2804 
2805 	/* Detect dppclk change */
2806 	if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz)
2807 		new_pipe->update_flags.bits.dppclk = 1;
2808 
2809 	/* Check for scl update */
2810 	if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
2811 		new_pipe->update_flags.bits.scaler = 1;
2812 	/* Check for vp update */
2813 	if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
2814 		|| memcmp(&old_pipe->plane_res.scl_data.viewport_c,
2815 			&new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
2816 		new_pipe->update_flags.bits.viewport = 1;
2817 
2818 	/* Detect dlg/ttu/rq updates */
2819 	{
2820 		struct dml2_display_dlg_regs old_dlg_regs = old_pipe->hubp_regs.dlg_regs;
2821 		struct dml2_display_ttu_regs old_ttu_regs = old_pipe->hubp_regs.ttu_regs;
2822 		struct dml2_display_rq_regs	 old_rq_regs = old_pipe->hubp_regs.rq_regs;
2823 		struct dml2_display_dlg_regs *new_dlg_regs = &new_pipe->hubp_regs.dlg_regs;
2824 		struct dml2_display_ttu_regs *new_ttu_regs = &new_pipe->hubp_regs.ttu_regs;
2825 		struct dml2_display_rq_regs	 *new_rq_regs = &new_pipe->hubp_regs.rq_regs;
2826 
2827 		/* Detect pipe interdependent updates */
2828 		if ((old_dlg_regs.dst_y_prefetch != new_dlg_regs->dst_y_prefetch)
2829 			|| (old_dlg_regs.vratio_prefetch != new_dlg_regs->vratio_prefetch)
2830 			|| (old_dlg_regs.vratio_prefetch_c != new_dlg_regs->vratio_prefetch_c)
2831 			|| (old_dlg_regs.dst_y_per_vm_vblank != new_dlg_regs->dst_y_per_vm_vblank)
2832 			|| (old_dlg_regs.dst_y_per_row_vblank != new_dlg_regs->dst_y_per_row_vblank)
2833 			|| (old_dlg_regs.dst_y_per_vm_flip != new_dlg_regs->dst_y_per_vm_flip)
2834 			|| (old_dlg_regs.dst_y_per_row_flip != new_dlg_regs->dst_y_per_row_flip)
2835 			|| (old_dlg_regs.refcyc_per_meta_chunk_vblank_l != new_dlg_regs->refcyc_per_meta_chunk_vblank_l)
2836 			|| (old_dlg_regs.refcyc_per_meta_chunk_vblank_c != new_dlg_regs->refcyc_per_meta_chunk_vblank_c)
2837 			|| (old_dlg_regs.refcyc_per_meta_chunk_flip_l != new_dlg_regs->refcyc_per_meta_chunk_flip_l)
2838 			|| (old_dlg_regs.refcyc_per_line_delivery_pre_l != new_dlg_regs->refcyc_per_line_delivery_pre_l)
2839 			|| (old_dlg_regs.refcyc_per_line_delivery_pre_c != new_dlg_regs->refcyc_per_line_delivery_pre_c)
2840 			|| (old_ttu_regs.refcyc_per_req_delivery_pre_l != new_ttu_regs->refcyc_per_req_delivery_pre_l)
2841 			|| (old_ttu_regs.refcyc_per_req_delivery_pre_c != new_ttu_regs->refcyc_per_req_delivery_pre_c)
2842 			|| (old_ttu_regs.refcyc_per_req_delivery_pre_cur0 !=
2843 				new_ttu_regs->refcyc_per_req_delivery_pre_cur0)
2844 			|| (old_ttu_regs.min_ttu_vblank != new_ttu_regs->min_ttu_vblank)
2845 			|| (old_ttu_regs.qos_level_flip != new_ttu_regs->qos_level_flip)) {
2846 			old_dlg_regs.dst_y_prefetch = new_dlg_regs->dst_y_prefetch;
2847 			old_dlg_regs.vratio_prefetch = new_dlg_regs->vratio_prefetch;
2848 			old_dlg_regs.vratio_prefetch_c = new_dlg_regs->vratio_prefetch_c;
2849 			old_dlg_regs.dst_y_per_vm_vblank = new_dlg_regs->dst_y_per_vm_vblank;
2850 			old_dlg_regs.dst_y_per_row_vblank = new_dlg_regs->dst_y_per_row_vblank;
2851 			old_dlg_regs.dst_y_per_vm_flip = new_dlg_regs->dst_y_per_vm_flip;
2852 			old_dlg_regs.dst_y_per_row_flip = new_dlg_regs->dst_y_per_row_flip;
2853 			old_dlg_regs.refcyc_per_meta_chunk_vblank_l = new_dlg_regs->refcyc_per_meta_chunk_vblank_l;
2854 			old_dlg_regs.refcyc_per_meta_chunk_vblank_c = new_dlg_regs->refcyc_per_meta_chunk_vblank_c;
2855 			old_dlg_regs.refcyc_per_meta_chunk_flip_l = new_dlg_regs->refcyc_per_meta_chunk_flip_l;
2856 			old_dlg_regs.refcyc_per_line_delivery_pre_l = new_dlg_regs->refcyc_per_line_delivery_pre_l;
2857 			old_dlg_regs.refcyc_per_line_delivery_pre_c = new_dlg_regs->refcyc_per_line_delivery_pre_c;
2858 			old_ttu_regs.refcyc_per_req_delivery_pre_l = new_ttu_regs->refcyc_per_req_delivery_pre_l;
2859 			old_ttu_regs.refcyc_per_req_delivery_pre_c = new_ttu_regs->refcyc_per_req_delivery_pre_c;
2860 			old_ttu_regs.refcyc_per_req_delivery_pre_cur0 = new_ttu_regs->refcyc_per_req_delivery_pre_cur0;
2861 			old_ttu_regs.min_ttu_vblank = new_ttu_regs->min_ttu_vblank;
2862 			old_ttu_regs.qos_level_flip = new_ttu_regs->qos_level_flip;
2863 			new_pipe->update_flags.bits.hubp_interdependent = 1;
2864 		}
2865 		/* Detect any other updates to ttu/rq/dlg */
2866 		if (memcmp(&old_dlg_regs, new_dlg_regs, sizeof(old_dlg_regs)) ||
2867 			memcmp(&old_ttu_regs, new_ttu_regs, sizeof(old_ttu_regs)) ||
2868 			memcmp(&old_rq_regs, new_rq_regs, sizeof(old_rq_regs)))
2869 			new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
2870 	}
2871 
2872 	if (old_pipe->unbounded_req != new_pipe->unbounded_req)
2873 		new_pipe->update_flags.bits.unbounded_req = 1;
2874 
2875 	if (memcmp(&old_pipe->stream_res.test_pattern_params,
2876 		&new_pipe->stream_res.test_pattern_params, sizeof(struct test_pattern_params))) {
2877 		new_pipe->update_flags.bits.test_pattern_changed = 1;
2878 	}
2879 }
2880 
dcn401_plane_atomic_power_down(struct dc * dc,struct dpp * dpp,struct hubp * hubp)2881 void dcn401_plane_atomic_power_down(struct dc *dc,
2882 		struct dpp *dpp,
2883 		struct hubp *hubp)
2884 {
2885 	struct dce_hwseq *hws = dc->hwseq;
2886 	uint32_t org_ip_request_cntl = 0;
2887 
2888 	if (REG(DC_IP_REQUEST_CNTL)) {
2889 		REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
2890 		if (org_ip_request_cntl == 0)
2891 			REG_SET(DC_IP_REQUEST_CNTL, 0,
2892 				IP_REQUEST_EN, 1);
2893 	}
2894 
2895 	if (hws->funcs.dpp_pg_control)
2896 		hws->funcs.dpp_pg_control(hws, dpp->inst, false);
2897 
2898 	if (hws->funcs.hubp_pg_control)
2899 		hws->funcs.hubp_pg_control(hws, hubp->inst, false);
2900 
2901 	hubp->funcs->hubp_reset(hubp);
2902 	dpp->funcs->dpp_reset(dpp);
2903 
2904 	if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL))
2905 		REG_SET(DC_IP_REQUEST_CNTL, 0,
2906 			IP_REQUEST_EN, 0);
2907 
2908 	DC_LOG_DEBUG(
2909 			"Power gated front end %d\n", hubp->inst);
2910 
2911 	if (hws->funcs.dpp_root_clock_control)
2912 		hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
2913 }
2914 
dcn401_update_cursor_offload_pipe(struct dc * dc,const struct pipe_ctx * pipe)2915 void dcn401_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe)
2916 {
2917 	volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
2918 	const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
2919 	const struct hubp *hubp = pipe->plane_res.hubp;
2920 	const struct dpp *dpp = pipe->plane_res.dpp;
2921 	volatile struct dmub_cursor_offload_pipe_data_dcn401_v1 *p;
2922 	uint32_t stream_idx, write_idx, payload_idx;
2923 
2924 	if (!top_pipe || !hubp || !dpp)
2925 		return;
2926 
2927 	stream_idx = top_pipe->pipe_idx;
2928 	write_idx = cs->offload_streams[stream_idx].write_idx + 1; /*  new payload (+1) */
2929 	payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
2930 
2931 	p = &cs->offload_streams[stream_idx].payloads[payload_idx].pipe_data[pipe->pipe_idx].dcn401;
2932 
2933 	p->CURSOR0_0_CURSOR_SURFACE_ADDRESS = hubp->att.SURFACE_ADDR;
2934 	p->CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH = hubp->att.SURFACE_ADDR_HIGH;
2935 	p->CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH = hubp->att.size.bits.width;
2936 	p->CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT = hubp->att.size.bits.height;
2937 	p->CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION = hubp->pos.position.bits.x_pos;
2938 	p->CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION = hubp->pos.position.bits.y_pos;
2939 	p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X = hubp->pos.hot_spot.bits.x_hot;
2940 	p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y = hubp->pos.hot_spot.bits.y_hot;
2941 	p->CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET = hubp->pos.dst_offset.bits.dst_x_offset;
2942 	p->CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE = hubp->pos.cur_ctl.bits.cur_enable;
2943 	p->CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE = hubp->att.cur_ctl.bits.mode;
2944 	p->CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY = hubp->pos.cur_ctl.bits.cur_2x_magnify;
2945 	p->CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH = hubp->att.cur_ctl.bits.pitch;
2946 	p->CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK = hubp->att.cur_ctl.bits.line_per_chunk;
2947 
2948 	p->CM_CUR0_CURSOR0_CONTROL__CUR0_ENABLE = dpp->att.cur0_ctl.bits.cur0_enable;
2949 	p->CM_CUR0_CURSOR0_CONTROL__CUR0_MODE = dpp->att.cur0_ctl.bits.mode;
2950 	p->CM_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE = dpp->att.cur0_ctl.bits.expansion_mode;
2951 	p->CM_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN = dpp->att.cur0_ctl.bits.cur0_rom_en;
2952 	p->CM_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 = 0x000000;
2953 	p->CM_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 = 0xFFFFFF;
2954 
2955 	p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_BIAS_G_Y =
2956 		dpp->att.fp_scale_bias_g_y.bits.fp_bias_g_y;
2957 	p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_SCALE_G_Y =
2958 		dpp->att.fp_scale_bias_g_y.bits.fp_scale_g_y;
2959 	p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_BIAS_RB_CRCB =
2960 		dpp->att.fp_scale_bias_rb_crcb.bits.fp_bias_rb_crcb;
2961 	p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_SCALE_RB_CRCB =
2962 		dpp->att.fp_scale_bias_rb_crcb.bits.fp_scale_rb_crcb;
2963 
2964 	p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET = hubp->att.settings.bits.dst_y_offset;
2965 	p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST = hubp->att.settings.bits.chunk_hdl_adjust;
2966 	p->HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR = hubp->use_mall_for_cursor;
2967 
2968 	cs->offload_streams[stream_idx].payloads[payload_idx].pipe_mask |= (1u << pipe->pipe_idx);
2969 }
2970 
dcn401_plane_atomic_power_down_sequence(struct dc * dc,struct dpp * dpp,struct hubp * hubp,struct block_sequence_state * seq_state)2971 void dcn401_plane_atomic_power_down_sequence(struct dc *dc,
2972 		struct dpp *dpp,
2973 		struct hubp *hubp,
2974 		struct block_sequence_state *seq_state)
2975 {
2976 	struct dce_hwseq *hws = dc->hwseq;
2977 	uint32_t org_ip_request_cntl = 0;
2978 
2979 	/* Check and set DC_IP_REQUEST_CNTL if needed */
2980 	if (REG(DC_IP_REQUEST_CNTL)) {
2981 		REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
2982 		if (org_ip_request_cntl == 0)
2983 			hwss_add_dc_ip_request_cntl(seq_state, dc, true);
2984 	}
2985 
2986 	/* DPP power gating control */
2987 	hwss_add_dpp_pg_control(seq_state, hws, dpp->inst, false);
2988 
2989 	/* HUBP power gating control */
2990 	hwss_add_hubp_pg_control(seq_state, hws, hubp->inst, false);
2991 
2992 	/* HUBP reset */
2993 	hwss_add_hubp_reset(seq_state, hubp);
2994 
2995 	/* DPP reset */
2996 	hwss_add_dpp_reset(seq_state, dpp);
2997 
2998 	/* Restore DC_IP_REQUEST_CNTL if it was originally 0 */
2999 	if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL))
3000 		hwss_add_dc_ip_request_cntl(seq_state, dc, false);
3001 
3002 	DC_LOG_DEBUG("Power gated front end %d\n", hubp->inst);
3003 
3004 	/* DPP root clock control */
3005 	hwss_add_dpp_root_clock_control(seq_state, hws, dpp->inst, false);
3006 }
3007 
3008 /* trigger HW to start disconnect plane from stream on the next vsync using block sequence */
dcn401_plane_atomic_disconnect_sequence(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx,struct block_sequence_state * seq_state)3009 void dcn401_plane_atomic_disconnect_sequence(struct dc *dc,
3010 		struct dc_state *state,
3011 		struct pipe_ctx *pipe_ctx,
3012 		struct block_sequence_state *seq_state)
3013 {
3014 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3015 	int dpp_id = pipe_ctx->plane_res.dpp->inst;
3016 	struct mpc *mpc = dc->res_pool->mpc;
3017 	struct mpc_tree *mpc_tree_params;
3018 	struct mpcc *mpcc_to_remove = NULL;
3019 	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
3020 
3021 	mpc_tree_params = &(opp->mpc_tree_params);
3022 	mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
3023 
3024 	/*Already reset*/
3025 	if (mpcc_to_remove == NULL)
3026 		return;
3027 
3028 	/* Step 1: Remove MPCC from MPC tree */
3029 	hwss_add_mpc_remove_mpcc(seq_state, mpc, mpc_tree_params, mpcc_to_remove);
3030 
3031 	// Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
3032 	// so don't wait for MPCC_IDLE in the programming sequence
3033 	if (dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM) {
3034 		/* Step 2: Set MPCC disconnect pending flag */
3035 		hwss_add_opp_set_mpcc_disconnect_pending(seq_state, opp, pipe_ctx->plane_res.mpcc_inst, true);
3036 	}
3037 
3038 	/* Step 3: Set optimized required flag */
3039 	hwss_add_dc_set_optimized_required(seq_state, dc, true);
3040 
3041 	/* Step 4: Disconnect HUBP if function exists */
3042 	if (hubp->funcs->hubp_disconnect)
3043 		hwss_add_hubp_disconnect(seq_state, hubp);
3044 
3045 	/* Step 5: Verify pstate change high if debug sanity checks are enabled */
3046 	if (dc->debug.sanity_checks)
3047 		dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
3048 }
3049 
dcn401_blank_pixel_data_sequence(struct dc * dc,struct pipe_ctx * pipe_ctx,bool blank,struct block_sequence_state * seq_state)3050 void dcn401_blank_pixel_data_sequence(
3051 	struct dc *dc,
3052 	struct pipe_ctx *pipe_ctx,
3053 	bool blank,
3054 	struct block_sequence_state *seq_state)
3055 {
3056 	struct tg_color black_color = {0};
3057 	struct stream_resource *stream_res = &pipe_ctx->stream_res;
3058 	struct dc_stream_state *stream = pipe_ctx->stream;
3059 	enum dc_color_space color_space = stream->output_color_space;
3060 	enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR;
3061 	enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
3062 	struct pipe_ctx *odm_pipe;
3063 	struct rect odm_slice_src;
3064 
3065 	if (stream->link->test_pattern_enabled)
3066 		return;
3067 
3068 	/* get opp dpg blank color */
3069 	color_space_to_black_color(dc, color_space, &black_color);
3070 
3071 	if (blank) {
3072 		/* Set ABM immediate disable */
3073 		hwss_add_abm_set_immediate_disable(seq_state, dc, pipe_ctx);
3074 
3075 		if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
3076 			test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
3077 			test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
3078 		}
3079 	} else {
3080 		test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
3081 	}
3082 
3083 	odm_pipe = pipe_ctx;
3084 
3085 	/* Set display pattern generator for all ODM pipes */
3086 	while (odm_pipe->next_odm_pipe) {
3087 		odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe);
3088 
3089 		hwss_add_opp_set_disp_pattern_generator(seq_state,
3090 			odm_pipe->stream_res.opp,
3091 			test_pattern,
3092 			test_pattern_color_space,
3093 			stream->timing.display_color_depth,
3094 			black_color,
3095 			true,
3096 			odm_slice_src.width,
3097 			odm_slice_src.height,
3098 			odm_slice_src.x);
3099 
3100 		odm_pipe = odm_pipe->next_odm_pipe;
3101 	}
3102 
3103 	/* Set display pattern generator for final ODM pipe */
3104 	odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe);
3105 
3106 	hwss_add_opp_set_disp_pattern_generator(seq_state,
3107 		odm_pipe->stream_res.opp,
3108 		test_pattern,
3109 		test_pattern_color_space,
3110 		stream->timing.display_color_depth,
3111 		black_color,
3112 		true,
3113 		odm_slice_src.width,
3114 		odm_slice_src.height,
3115 		odm_slice_src.x);
3116 
3117 	/* Handle ABM level setting when not blanking */
3118 	if (!blank) {
3119 		if (stream_res->abm) {
3120 			/* Set pipe for ABM */
3121 			hwss_add_abm_set_pipe(seq_state, dc, pipe_ctx);
3122 
3123 			/* Set ABM level */
3124 			hwss_add_abm_set_level(seq_state, stream_res->abm, stream->abm_level);
3125 		}
3126 	}
3127 }
3128 
dcn401_program_all_writeback_pipes_in_tree_sequence(struct dc * dc,const struct dc_stream_state * stream,struct dc_state * context,struct block_sequence_state * seq_state)3129 void dcn401_program_all_writeback_pipes_in_tree_sequence(
3130 		struct dc *dc,
3131 		const struct dc_stream_state *stream,
3132 		struct dc_state *context,
3133 		struct block_sequence_state *seq_state)
3134 {
3135 	struct dwbc *dwb;
3136 	int i_wb, i_pipe;
3137 
3138 	if (!stream || stream->num_wb_info > dc->res_pool->res_cap->num_dwb)
3139 		return;
3140 
3141 	/* For each writeback pipe */
3142 	for (i_wb = 0; i_wb < stream->num_wb_info; i_wb++) {
3143 		/* Get direct pointer to writeback info */
3144 		struct dc_writeback_info *wb_info = (struct dc_writeback_info *)&stream->writeback_info[i_wb];
3145 		int mpcc_inst = -1;
3146 
3147 		if (wb_info->wb_enabled) {
3148 			/* Get the MPCC instance for writeback_source_plane */
3149 			for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) {
3150 				struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe];
3151 
3152 				if (!pipe_ctx->plane_state)
3153 					continue;
3154 
3155 				if (pipe_ctx->plane_state == wb_info->writeback_source_plane) {
3156 					mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
3157 					break;
3158 				}
3159 			}
3160 
3161 			if (mpcc_inst == -1) {
3162 				/* Disable writeback pipe and disconnect from MPCC
3163 				 * if source plane has been removed
3164 				 */
3165 				dcn401_disable_writeback_sequence(dc, wb_info, seq_state);
3166 				continue;
3167 			}
3168 
3169 			ASSERT(wb_info->dwb_pipe_inst < dc->res_pool->res_cap->num_dwb);
3170 			dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
3171 
3172 			if (dwb->funcs->is_enabled(dwb)) {
3173 				/* Writeback pipe already enabled, only need to update */
3174 				dcn401_update_writeback_sequence(dc, wb_info, context, seq_state);
3175 			} else {
3176 				/* Enable writeback pipe and connect to MPCC */
3177 				dcn401_enable_writeback_sequence(dc, wb_info, context, mpcc_inst, seq_state);
3178 			}
3179 		} else {
3180 			/* Disable writeback pipe and disconnect from MPCC */
3181 			dcn401_disable_writeback_sequence(dc, wb_info, seq_state);
3182 		}
3183 	}
3184 }
3185 
dcn401_enable_writeback_sequence(struct dc * dc,struct dc_writeback_info * wb_info,struct dc_state * context,int mpcc_inst,struct block_sequence_state * seq_state)3186 void dcn401_enable_writeback_sequence(
3187 		struct dc *dc,
3188 		struct dc_writeback_info *wb_info,
3189 		struct dc_state *context,
3190 		int mpcc_inst,
3191 		struct block_sequence_state *seq_state)
3192 {
3193 	struct dwbc *dwb;
3194 	struct mcif_wb *mcif_wb;
3195 
3196 	if (!wb_info->wb_enabled || wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
3197 		return;
3198 
3199 	dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
3200 	mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
3201 
3202 	/* Update DWBC with new parameters */
3203 	hwss_add_dwbc_update(seq_state, dwb, &wb_info->dwb_params);
3204 
3205 	/* Configure MCIF_WB buffer settings */
3206 	hwss_add_mcif_wb_config_buf(seq_state, mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
3207 
3208 	/* Configure MCIF_WB arbitration */
3209 	hwss_add_mcif_wb_config_arb(seq_state, mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
3210 
3211 	/* Enable MCIF_WB */
3212 	hwss_add_mcif_wb_enable(seq_state, mcif_wb);
3213 
3214 	/* Set DWB MUX to connect writeback to MPCC */
3215 	hwss_add_mpc_set_dwb_mux(seq_state, dc->res_pool->mpc, wb_info->dwb_pipe_inst, mpcc_inst);
3216 
3217 	/* Enable DWBC */
3218 	hwss_add_dwbc_enable(seq_state, dwb, &wb_info->dwb_params);
3219 }
3220 
dcn401_disable_writeback_sequence(struct dc * dc,struct dc_writeback_info * wb_info,struct block_sequence_state * seq_state)3221 void dcn401_disable_writeback_sequence(
3222 		struct dc *dc,
3223 		struct dc_writeback_info *wb_info,
3224 		struct block_sequence_state *seq_state)
3225 {
3226 	struct dwbc *dwb;
3227 	struct mcif_wb *mcif_wb;
3228 
3229 	if (wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
3230 		return;
3231 
3232 	dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
3233 	mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
3234 
3235 	/* Disable DWBC */
3236 	hwss_add_dwbc_disable(seq_state, dwb);
3237 
3238 	/* Disable DWB MUX */
3239 	hwss_add_mpc_disable_dwb_mux(seq_state, dc->res_pool->mpc, wb_info->dwb_pipe_inst);
3240 
3241 	/* Disable MCIF_WB */
3242 	hwss_add_mcif_wb_disable(seq_state, mcif_wb);
3243 }
3244 
dcn401_update_writeback_sequence(struct dc * dc,struct dc_writeback_info * wb_info,struct dc_state * context,struct block_sequence_state * seq_state)3245 void dcn401_update_writeback_sequence(
3246 		struct dc *dc,
3247 		struct dc_writeback_info *wb_info,
3248 		struct dc_state *context,
3249 		struct block_sequence_state *seq_state)
3250 {
3251 	(void)context;
3252 	struct dwbc *dwb;
3253 	struct mcif_wb *mcif_wb;
3254 
3255 	if (!wb_info->wb_enabled || wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
3256 		return;
3257 
3258 	dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
3259 	mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
3260 
3261 	/* Update writeback pipe */
3262 	hwss_add_dwbc_update(seq_state, dwb, &wb_info->dwb_params);
3263 
3264 	/* Update MCIF_WB buffer settings if needed */
3265 	hwss_add_mcif_wb_config_buf(seq_state, mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
3266 }
3267 
find_free_gsl_group(const struct dc * dc)3268 static int find_free_gsl_group(const struct dc *dc)
3269 {
3270 	if (dc->res_pool->gsl_groups.gsl_0 == 0)
3271 		return 1;
3272 	if (dc->res_pool->gsl_groups.gsl_1 == 0)
3273 		return 2;
3274 	if (dc->res_pool->gsl_groups.gsl_2 == 0)
3275 		return 3;
3276 
3277 	return 0;
3278 }
3279 
dcn401_setup_gsl_group_as_lock_sequence(const struct dc * dc,struct pipe_ctx * pipe_ctx,bool enable,struct block_sequence_state * seq_state)3280 void dcn401_setup_gsl_group_as_lock_sequence(
3281 		const struct dc *dc,
3282 		struct pipe_ctx *pipe_ctx,
3283 		bool enable,
3284 		struct block_sequence_state *seq_state)
3285 {
3286 	struct gsl_params gsl;
3287 	int group_idx;
3288 
3289 	memset(&gsl, 0, sizeof(struct gsl_params));
3290 
3291 	if (enable) {
3292 		/* return if group already assigned since GSL was set up
3293 		 * for vsync flip, we would unassign so it can't be "left over"
3294 		 */
3295 		if (pipe_ctx->stream_res.gsl_group > 0)
3296 			return;
3297 
3298 		group_idx = find_free_gsl_group(dc);
3299 		ASSERT(group_idx != 0);
3300 		pipe_ctx->stream_res.gsl_group = group_idx;
3301 
3302 		/* set gsl group reg field and mark resource used */
3303 		switch (group_idx) {
3304 		case 1:
3305 			gsl.gsl0_en = 1;
3306 			dc->res_pool->gsl_groups.gsl_0 = 1;
3307 			break;
3308 		case 2:
3309 			gsl.gsl1_en = 1;
3310 			dc->res_pool->gsl_groups.gsl_1 = 1;
3311 			break;
3312 		case 3:
3313 			gsl.gsl2_en = 1;
3314 			dc->res_pool->gsl_groups.gsl_2 = 1;
3315 			break;
3316 		default:
3317 			BREAK_TO_DEBUGGER();
3318 			return; // invalid case
3319 		}
3320 		gsl.gsl_master_en = 1;
3321 	} else {
3322 		group_idx = pipe_ctx->stream_res.gsl_group;
3323 		if (group_idx == 0)
3324 			return; // if not in use, just return
3325 
3326 		pipe_ctx->stream_res.gsl_group = 0;
3327 
3328 		/* unset gsl group reg field and mark resource free */
3329 		switch (group_idx) {
3330 		case 1:
3331 			gsl.gsl0_en = 0;
3332 			dc->res_pool->gsl_groups.gsl_0 = 0;
3333 			break;
3334 		case 2:
3335 			gsl.gsl1_en = 0;
3336 			dc->res_pool->gsl_groups.gsl_1 = 0;
3337 			break;
3338 		case 3:
3339 			gsl.gsl2_en = 0;
3340 			dc->res_pool->gsl_groups.gsl_2 = 0;
3341 			break;
3342 		default:
3343 			BREAK_TO_DEBUGGER();
3344 			return;
3345 		}
3346 		gsl.gsl_master_en = 0;
3347 	}
3348 
3349 	hwss_add_tg_set_gsl(seq_state, pipe_ctx->stream_res.tg, gsl);
3350 	hwss_add_tg_set_gsl_source_select(seq_state, pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
3351 }
3352 
dcn401_disable_plane_sequence(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx,struct block_sequence_state * seq_state)3353 void dcn401_disable_plane_sequence(
3354 		struct dc *dc,
3355 		struct dc_state *state,
3356 		struct pipe_ctx *pipe_ctx,
3357 		struct block_sequence_state *seq_state)
3358 {
3359 	bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
3360 	struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
3361 
3362 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
3363 		return;
3364 
3365 	/* Wait for MPCC disconnect */
3366 	if (dc->hwss.wait_for_mpcc_disconnect_sequence)
3367 		dc->hwss.wait_for_mpcc_disconnect_sequence(dc, dc->res_pool, pipe_ctx, seq_state);
3368 
3369 	/* In flip immediate with pipe splitting case GSL is used for synchronization
3370 	 * so we must disable it when the plane is disabled.
3371 	 */
3372 	if (pipe_ctx->stream_res.gsl_group != 0)
3373 		dcn401_setup_gsl_group_as_lock_sequence(dc, pipe_ctx, false, seq_state);
3374 
3375 	/* Update HUBP mall sel */
3376 	if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs->hubp_update_mall_sel)
3377 		hwss_add_hubp_update_mall_sel(seq_state, pipe_ctx->plane_res.hubp, 0, false);
3378 
3379 	/* Set flip control GSL */
3380 	hwss_add_hubp_set_flip_control_gsl(seq_state, pipe_ctx->plane_res.hubp, false);
3381 
3382 	/* HUBP clock control */
3383 	hwss_add_hubp_clk_cntl(seq_state, pipe_ctx->plane_res.hubp, false);
3384 
3385 	/* DPP clock control */
3386 	hwss_add_dpp_dppclk_control(seq_state, pipe_ctx->plane_res.dpp, false, false);
3387 
3388 	/* Plane atomic power down */
3389 	if (dc->hwseq->funcs.plane_atomic_power_down_sequence)
3390 		dc->hwseq->funcs.plane_atomic_power_down_sequence(dc, pipe_ctx->plane_res.dpp,
3391 			pipe_ctx->plane_res.hubp, seq_state);
3392 
3393 	pipe_ctx->stream = NULL;
3394 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
3395 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
3396 	pipe_ctx->top_pipe = NULL;
3397 	pipe_ctx->bottom_pipe = NULL;
3398 	pipe_ctx->prev_odm_pipe = NULL;
3399 	pipe_ctx->next_odm_pipe = NULL;
3400 	pipe_ctx->plane_state = NULL;
3401 
3402 	/* Turn back off the phantom OTG after the phantom plane is fully disabled */
3403 	if (is_phantom && tg && tg->funcs->disable_phantom_crtc)
3404 		hwss_add_disable_phantom_crtc(seq_state, tg);
3405 }
3406 
dcn401_post_unlock_reset_opp_sequence(struct dc * dc,struct pipe_ctx * opp_head,struct block_sequence_state * seq_state)3407 void dcn401_post_unlock_reset_opp_sequence(
3408 		struct dc *dc,
3409 		struct pipe_ctx *opp_head,
3410 		struct block_sequence_state *seq_state)
3411 {
3412 	struct display_stream_compressor *dsc = opp_head->stream_res.dsc;
3413 	struct dccg *dccg = dc->res_pool->dccg;
3414 
3415 	/* Wait for all DPP pipes in current mpc blending tree completes double
3416 	 * buffered disconnection before resetting OPP
3417 	 */
3418 	if (dc->hwss.wait_for_mpcc_disconnect_sequence)
3419 		dc->hwss.wait_for_mpcc_disconnect_sequence(dc, dc->res_pool, opp_head, seq_state);
3420 
3421 	if (dsc) {
3422 		bool *is_ungated = NULL;
3423 		/* Check DSC power gate status */
3424 		if (dc->hwseq && dc->hwseq->funcs.dsc_pg_status)
3425 			hwss_add_dsc_pg_status(seq_state, dc->hwseq, dsc->inst, false);
3426 
3427 		/* Seamless update specific where we will postpone non
3428 		 * double buffered DSCCLK disable logic in post unlock
3429 		 * sequence after DSC is disconnected from OPP but not
3430 		 * yet power gated.
3431 		 */
3432 
3433 		/* DSC wait disconnect pending clear */
3434 		hwss_add_dsc_wait_disconnect_pending_clear(seq_state, dsc, is_ungated);
3435 
3436 		/* DSC disable */
3437 		hwss_add_dsc_disable(seq_state, dsc, is_ungated);
3438 
3439 		/* Set reference DSCCLK */
3440 		if (dccg && dccg->funcs->set_ref_dscclk)
3441 			hwss_add_dccg_set_ref_dscclk(seq_state, dccg, dsc->inst, 0);
3442 	}
3443 }
3444 
dcn401_dc_ip_request_cntl(struct dc * dc,bool enable)3445 void dcn401_dc_ip_request_cntl(struct dc *dc, bool enable)
3446 {
3447 	struct dce_hwseq *hws = dc->hwseq;
3448 
3449 	if (REG(DC_IP_REQUEST_CNTL))
3450 		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, enable ? 1 : 0);
3451 }
3452 
dcn401_enable_plane_sequence(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context,struct block_sequence_state * seq_state)3453 void dcn401_enable_plane_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
3454 				 struct dc_state *context,
3455 				 struct block_sequence_state *seq_state)
3456 {
3457 	(void)context;
3458 	struct dce_hwseq *hws = dc->hwseq;
3459 	uint32_t org_ip_request_cntl = 0;
3460 
3461 	if (!pipe_ctx->plane_res.dpp || !pipe_ctx->plane_res.hubp || !pipe_ctx->stream_res.opp)
3462 		return;
3463 
3464 	if (REG(DC_IP_REQUEST_CNTL))
3465 		REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
3466 
3467 	/* Step 1: DPP root clock control - enable clock */
3468 	if (hws->funcs.dpp_root_clock_control)
3469 		hwss_add_dpp_root_clock_control(seq_state, hws, pipe_ctx->plane_res.dpp->inst, true);
3470 
3471 	/* Step 2: Enable DC IP request (if needed) */
3472 	if (hws->funcs.dc_ip_request_cntl)
3473 		hwss_add_dc_ip_request_cntl(seq_state, dc, true);
3474 
3475 	/* Step 3: DPP power gating control - power on */
3476 	if (REG(DC_IP_REQUEST_CNTL) && hws->funcs.dpp_pg_control)
3477 		hwss_add_dpp_pg_control(seq_state, hws, pipe_ctx->plane_res.dpp->inst, true);
3478 
3479 	/* Step 4: HUBP power gating control - power on */
3480 	if (REG(DC_IP_REQUEST_CNTL) && hws->funcs.hubp_pg_control)
3481 		hwss_add_hubp_pg_control(seq_state, hws, pipe_ctx->plane_res.hubp->inst, true);
3482 
3483 	/* Step 5: Disable DC IP request (restore state) */
3484 	if (org_ip_request_cntl == 0 && hws->funcs.dc_ip_request_cntl)
3485 		hwss_add_dc_ip_request_cntl(seq_state, dc, false);
3486 
3487 	/* Step 6: HUBP clock control - enable DCFCLK */
3488 	if (pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl)
3489 		hwss_add_hubp_clk_cntl(seq_state, pipe_ctx->plane_res.hubp, true);
3490 
3491 	/* Step 7: HUBP initialization */
3492 	if (pipe_ctx->plane_res.hubp->funcs->hubp_init)
3493 		hwss_add_hubp_init(seq_state, pipe_ctx->plane_res.hubp);
3494 
3495 	/* Step 8: OPP pipe clock control - enable */
3496 	if (pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control)
3497 		hwss_add_opp_pipe_clock_control(seq_state, pipe_ctx->stream_res.opp, true);
3498 
3499 	/* Step 9: VM system aperture settings */
3500 	if (dc->vm_pa_config.valid && pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings) {
3501 		hwss_add_hubp_set_vm_system_aperture_settings(seq_state, pipe_ctx->plane_res.hubp, 0,
3502 			dc->vm_pa_config.system_aperture.start_addr, dc->vm_pa_config.system_aperture.end_addr);
3503 	}
3504 
3505 	/* Step 10: Flip interrupt setup */
3506 	if (!pipe_ctx->top_pipe
3507 			&& pipe_ctx->plane_state
3508 			&& pipe_ctx->plane_state->flip_int_enabled
3509 			&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int) {
3510 		hwss_add_hubp_set_flip_int(seq_state, pipe_ctx->plane_res.hubp);
3511 	}
3512 }
3513 
dcn401_update_dchubp_dpp_sequence(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context,struct block_sequence_state * seq_state)3514 void dcn401_update_dchubp_dpp_sequence(struct dc *dc,
3515 				       struct pipe_ctx *pipe_ctx,
3516 				       struct dc_state *context,
3517 				       struct block_sequence_state *seq_state)
3518 {
3519 	struct dce_hwseq *hws = dc->hwseq;
3520 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3521 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
3522 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3523 	struct dccg *dccg = dc->res_pool->dccg;
3524 	bool viewport_changed = false;
3525 	enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe_ctx);
3526 
3527 	if (!hubp || !dpp || !plane_state)
3528 		return;
3529 
3530 	/* Step 1: DPP DPPCLK control */
3531 	if (pipe_ctx->update_flags.bits.dppclk)
3532 		hwss_add_dpp_dppclk_control(seq_state, dpp, false, true);
3533 
3534 	/* Step 2: DCCG update DPP DTO */
3535 	if (pipe_ctx->update_flags.bits.enable)
3536 		hwss_add_dccg_update_dpp_dto(seq_state, dccg, dpp->inst, pipe_ctx->plane_res.bw.dppclk_khz);
3537 
3538 	/* Step 3: HUBP VTG selection */
3539 	if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) {
3540 		hwss_add_hubp_vtg_sel(seq_state, hubp, pipe_ctx->stream_res.tg->inst);
3541 
3542 		/* Step 4: HUBP setup (choose setup2 or setup) */
3543 		if (hubp->funcs->hubp_setup2) {
3544 			hwss_add_hubp_setup2(seq_state, hubp, &pipe_ctx->hubp_regs,
3545 				&pipe_ctx->global_sync, &pipe_ctx->stream->timing);
3546 		} else if (hubp->funcs->hubp_setup) {
3547 			hwss_add_hubp_setup(seq_state, hubp, &pipe_ctx->dlg_regs,
3548 				&pipe_ctx->ttu_regs, &pipe_ctx->rq_regs, &pipe_ctx->pipe_dlg_param);
3549 		}
3550 	}
3551 
3552 	/* Step 5: Set unbounded requesting */
3553 	if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting)
3554 		hwss_add_hubp_set_unbounded_requesting(seq_state, hubp, pipe_ctx->unbounded_req);
3555 
3556 	/* Step 6: HUBP interdependent setup */
3557 	if (pipe_ctx->update_flags.bits.hubp_interdependent) {
3558 		if (hubp->funcs->hubp_setup_interdependent2)
3559 			hwss_add_hubp_setup_interdependent2(seq_state, hubp, &pipe_ctx->hubp_regs);
3560 		else if (hubp->funcs->hubp_setup_interdependent)
3561 			hwss_add_hubp_setup_interdependent(seq_state, hubp, &pipe_ctx->dlg_regs, &pipe_ctx->ttu_regs);
3562 	}
3563 
3564 	/* Step 7: DPP setup - input CSC and format setup */
3565 	if (pipe_ctx->update_flags.bits.enable ||
3566 			pipe_ctx->update_flags.bits.plane_changed ||
3567 			plane_state->update_flags.bits.bpp_change ||
3568 			plane_state->update_flags.bits.input_csc_change ||
3569 			plane_state->update_flags.bits.color_space_change ||
3570 			plane_state->update_flags.bits.coeff_reduction_change) {
3571 		hwss_add_dpp_setup_dpp(seq_state, pipe_ctx);
3572 
3573 		/* Step 8: DPP cursor matrix setup */
3574 		if (dpp->funcs->set_cursor_matrix) {
3575 			hwss_add_dpp_set_cursor_matrix(seq_state, dpp, plane_state->color_space,
3576 				&plane_state->cursor_csc_color_matrix);
3577 		}
3578 
3579 		/* Step 9: DPP program bias and scale */
3580 		if (dpp->funcs->dpp_program_bias_and_scale)
3581 			hwss_add_dpp_program_bias_and_scale(seq_state, pipe_ctx);
3582 	}
3583 
3584 	/* Step 10: MPCC updates */
3585 	if (pipe_ctx->update_flags.bits.mpcc ||
3586 	     pipe_ctx->update_flags.bits.plane_changed ||
3587 	     plane_state->update_flags.bits.global_alpha_change ||
3588 	     plane_state->update_flags.bits.per_pixel_alpha_change) {
3589 
3590 		/* Check if update_mpcc_sequence is implemented and prefer it over single MPC_UPDATE_MPCC step */
3591 		if (hws->funcs.update_mpcc_sequence)
3592 			hws->funcs.update_mpcc_sequence(dc, pipe_ctx, seq_state);
3593 	}
3594 
3595 	/* Step 11: DPP scaler setup */
3596 	if (pipe_ctx->update_flags.bits.scaler ||
3597 			plane_state->update_flags.bits.scaling_change ||
3598 			plane_state->update_flags.bits.position_change ||
3599 			plane_state->update_flags.bits.per_pixel_alpha_change ||
3600 			pipe_ctx->stream->update_flags.bits.scaling) {
3601 		pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
3602 		ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_36BPP);
3603 		hwss_add_dpp_set_scaler(seq_state, pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
3604 	}
3605 
3606 	/* Step 12: HUBP viewport programming */
3607 	if (pipe_ctx->update_flags.bits.viewport ||
3608 	     (context == dc->current_state && plane_state->update_flags.bits.position_change) ||
3609 	     (context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
3610 	     (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
3611 		hwss_add_hubp_mem_program_viewport(seq_state, hubp,
3612 			&pipe_ctx->plane_res.scl_data.viewport, &pipe_ctx->plane_res.scl_data.viewport_c);
3613 		viewport_changed = true;
3614 	}
3615 
3616 	/* Step 13: HUBP program mcache if available */
3617 	if (hubp->funcs->hubp_program_mcache_id_and_split_coordinate)
3618 		hwss_add_hubp_program_mcache_id(seq_state, hubp, &pipe_ctx->mcache_regs);
3619 
3620 	/* Step 14: Cursor attribute setup */
3621 	if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
3622 	     pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
3623 	    pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
3624 
3625 		hwss_add_abort_cursor_offload_update(seq_state, dc, pipe_ctx);
3626 
3627 		hwss_add_set_cursor_attribute(seq_state, dc, pipe_ctx);
3628 
3629 		/* Step 15: Cursor position setup */
3630 		hwss_add_set_cursor_position(seq_state, dc, pipe_ctx);
3631 
3632 		/* Step 16: Cursor SDR white level */
3633 		if (dc->hwss.set_cursor_sdr_white_level)
3634 			hwss_add_set_cursor_sdr_white_level(seq_state, dc, pipe_ctx);
3635 	}
3636 
3637 	/* Step 17: Gamut remap and output CSC */
3638 	if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
3639 			pipe_ctx->update_flags.bits.plane_changed ||
3640 			pipe_ctx->stream->update_flags.bits.gamut_remap ||
3641 			plane_state->update_flags.bits.gamut_remap_change ||
3642 			pipe_ctx->stream->update_flags.bits.out_csc) {
3643 
3644 		/* Gamut remap */
3645 		hwss_add_dpp_program_gamut_remap(seq_state, pipe_ctx);
3646 
3647 		/* Output CSC */
3648 		hwss_add_program_output_csc(seq_state, dc, pipe_ctx, pipe_ctx->stream->output_color_space,
3649 			pipe_ctx->stream->csc_color_matrix.matrix, hubp->opp_id);
3650 	}
3651 
3652 	/* Step 18: HUBP surface configuration */
3653 	if (pipe_ctx->update_flags.bits.enable ||
3654 			pipe_ctx->update_flags.bits.plane_changed ||
3655 			pipe_ctx->update_flags.bits.opp_changed ||
3656 			plane_state->update_flags.bits.pixel_format_change ||
3657 			plane_state->update_flags.bits.horizontal_mirror_change ||
3658 			plane_state->update_flags.bits.rotation_change ||
3659 			plane_state->update_flags.bits.swizzle_change ||
3660 			plane_state->update_flags.bits.dcc_change ||
3661 			plane_state->update_flags.bits.bpp_change ||
3662 			plane_state->update_flags.bits.scaling_change ||
3663 			plane_state->update_flags.bits.plane_size_change) {
3664 		struct plane_size size = plane_state->plane_size;
3665 
3666 		size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
3667 		hwss_add_hubp_program_surface_config(seq_state, hubp,
3668 				plane_state->format, &plane_state->tiling_info, size,
3669 				plane_state->rotation, &plane_state->dcc,
3670 				plane_state->horizontal_mirror, 0);
3671 		hubp->power_gated = false;
3672 	}
3673 
3674 	/* Step 19: Update plane address (with SubVP support) */
3675 	if (pipe_ctx->update_flags.bits.enable ||
3676 	     pipe_ctx->update_flags.bits.plane_changed ||
3677 	     plane_state->update_flags.bits.addr_update) {
3678 
3679 		/* SubVP save surface address if needed */
3680 		if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_mall_type == SUBVP_MAIN) {
3681 			hwss_add_dmub_subvp_save_surf_addr(seq_state, dc->ctx->dmub_srv,
3682 				&pipe_ctx->plane_state->address, pipe_ctx->subvp_index);
3683 		}
3684 
3685 		/* Update plane address */
3686 		hwss_add_hubp_update_plane_addr(seq_state, dc, pipe_ctx);
3687 	}
3688 
3689 	/* Step 20: HUBP set blank - enable plane */
3690 	if (pipe_ctx->update_flags.bits.enable)
3691 		hwss_add_hubp_set_blank(seq_state, hubp, false);
3692 
3693 	/* Step 21: Phantom HUBP post enable */
3694 	if (pipe_mall_type == SUBVP_PHANTOM && hubp->funcs->phantom_hubp_post_enable)
3695 		hwss_add_phantom_hubp_post_enable(seq_state, hubp);
3696 }
3697 
dcn401_update_mpcc_sequence(struct dc * dc,struct pipe_ctx * pipe_ctx,struct block_sequence_state * seq_state)3698 void dcn401_update_mpcc_sequence(struct dc *dc,
3699 				struct pipe_ctx *pipe_ctx,
3700 				struct block_sequence_state *seq_state)
3701 {
3702 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3703 	struct mpcc_blnd_cfg blnd_cfg = {0};
3704 	bool per_pixel_alpha;
3705 	int mpcc_id;
3706 	struct mpcc *new_mpcc;
3707 	struct mpc *mpc = dc->res_pool->mpc;
3708 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
3709 
3710 	if (!hubp || !pipe_ctx->plane_state)
3711 		return;
3712 
3713 	per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha;
3714 
3715 	/* Initialize blend configuration */
3716 	blnd_cfg.overlap_only = false;
3717 	blnd_cfg.global_gain = 0xff;
3718 
3719 	if (per_pixel_alpha) {
3720 		blnd_cfg.pre_multiplied_alpha = pipe_ctx->plane_state->pre_multiplied_alpha;
3721 		if (pipe_ctx->plane_state->global_alpha) {
3722 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
3723 			blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
3724 		} else {
3725 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
3726 		}
3727 	} else {
3728 		blnd_cfg.pre_multiplied_alpha = false;
3729 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
3730 	}
3731 
3732 	if (pipe_ctx->plane_state->global_alpha)
3733 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
3734 	else
3735 		blnd_cfg.global_alpha = 0xff;
3736 
3737 	blnd_cfg.background_color_bpc = 4;
3738 	blnd_cfg.bottom_gain_mode = 0;
3739 	blnd_cfg.top_gain = 0x1f000;
3740 	blnd_cfg.bottom_inside_gain = 0x1f000;
3741 	blnd_cfg.bottom_outside_gain = 0x1f000;
3742 
3743 	if (pipe_ctx->plane_state->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA)
3744 		blnd_cfg.pre_multiplied_alpha = false;
3745 
3746 	/* MPCC instance is equal to HUBP instance */
3747 	mpcc_id = hubp->inst;
3748 
3749 	/* Step 1: Update blending if no full update needed */
3750 	if (!pipe_ctx->plane_state->update_flags.bits.full_update &&
3751 	    !pipe_ctx->update_flags.bits.mpcc) {
3752 
3753 		/* Update blending configuration */
3754 		hwss_add_mpc_update_blending(seq_state, mpc, blnd_cfg, mpcc_id);
3755 
3756 		/* Update visual confirm color */
3757 		hwss_add_mpc_update_visual_confirm(seq_state, dc, pipe_ctx, mpcc_id);
3758 		return;
3759 	}
3760 
3761 	/* Step 2: Get existing MPCC for DPP */
3762 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
3763 
3764 	/* Step 3: Remove MPCC if being used */
3765 	if (new_mpcc != NULL) {
3766 		hwss_add_mpc_remove_mpcc(seq_state, mpc, mpc_tree_params, new_mpcc);
3767 	} else {
3768 		/* Step 4: Assert MPCC idle (debug only) */
3769 		if (dc->debug.sanity_checks)
3770 			hwss_add_mpc_assert_idle_mpcc(seq_state, mpc, mpcc_id);
3771 	}
3772 
3773 	/* Step 5: Insert new plane into MPC tree */
3774 	hwss_add_mpc_insert_plane(seq_state, mpc, mpc_tree_params, blnd_cfg, NULL, NULL, hubp->inst, mpcc_id);
3775 
3776 	/* Step 6: Update visual confirm color */
3777 	hwss_add_mpc_update_visual_confirm(seq_state, dc, pipe_ctx, mpcc_id);
3778 
3779 	/* Step 7: Set HUBP OPP and MPCC IDs */
3780 	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
3781 	hubp->mpcc_id = mpcc_id;
3782 }
3783 
get_hubp_by_inst(struct resource_pool * res_pool,int mpcc_inst)3784 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3785 {
3786 	int i;
3787 
3788 	for (i = 0; i < res_pool->pipe_count; i++) {
3789 		if (res_pool->hubps[i]->inst == mpcc_inst)
3790 			return res_pool->hubps[i];
3791 	}
3792 	ASSERT(false);
3793 	return NULL;
3794 }
3795 
dcn401_wait_for_mpcc_disconnect_sequence(struct dc * dc,struct resource_pool * res_pool,struct pipe_ctx * pipe_ctx,struct block_sequence_state * seq_state)3796 void dcn401_wait_for_mpcc_disconnect_sequence(
3797 		struct dc *dc,
3798 		struct resource_pool *res_pool,
3799 		struct pipe_ctx *pipe_ctx,
3800 		struct block_sequence_state *seq_state)
3801 {
3802 	int mpcc_inst;
3803 
3804 	if (dc->debug.sanity_checks)
3805 		dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
3806 
3807 	if (!pipe_ctx->stream_res.opp)
3808 		return;
3809 
3810 	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3811 		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3812 			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3813 
3814 			if (pipe_ctx->stream_res.tg &&
3815 				pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) {
3816 				hwss_add_mpc_assert_idle_mpcc(seq_state, res_pool->mpc, mpcc_inst);
3817 			}
3818 			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3819 			if (hubp)
3820 				hwss_add_hubp_set_blank(seq_state, hubp, true);
3821 		}
3822 	}
3823 
3824 	if (dc->debug.sanity_checks)
3825 		dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
3826 }
3827 
dcn401_setup_vupdate_interrupt_sequence(struct dc * dc,struct pipe_ctx * pipe_ctx,struct block_sequence_state * seq_state)3828 void dcn401_setup_vupdate_interrupt_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
3829 		struct block_sequence_state *seq_state)
3830 {
3831 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3832 	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3833 
3834 	if (start_line < 0)
3835 		start_line = 0;
3836 
3837 	if (tg->funcs->setup_vertical_interrupt2)
3838 		hwss_add_tg_setup_vertical_interrupt2(seq_state, tg, start_line);
3839 }
3840 
dcn401_set_hdr_multiplier_sequence(struct pipe_ctx * pipe_ctx,struct block_sequence_state * seq_state)3841 void dcn401_set_hdr_multiplier_sequence(struct pipe_ctx *pipe_ctx,
3842 		struct block_sequence_state *seq_state)
3843 {
3844 	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
3845 	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
3846 	struct custom_float_format fmt;
3847 
3848 	fmt.exponenta_bits = 6;
3849 	fmt.mantissa_bits = 12;
3850 	fmt.sign = true;
3851 
3852 	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
3853 		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
3854 
3855 	hwss_add_dpp_set_hdr_multiplier(seq_state, pipe_ctx->plane_res.dpp, hw_mult);
3856 }
3857 
dcn401_program_mall_pipe_config_sequence(struct dc * dc,struct dc_state * context,struct block_sequence_state * seq_state)3858 void dcn401_program_mall_pipe_config_sequence(struct dc *dc, struct dc_state *context,
3859 		struct block_sequence_state *seq_state)
3860 {
3861 	int i;
3862 	unsigned int num_ways = dcn401_calculate_cab_allocation(dc, context);
3863 	bool cache_cursor = false;
3864 
3865 	// Don't force p-state disallow -- can't block dummy p-state
3866 
3867 	// Update MALL_SEL register for each pipe (break down update_mall_sel call)
3868 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3869 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3870 		struct hubp *hubp = pipe->plane_res.hubp;
3871 
3872 		if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) {
3873 			int cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
3874 
3875 			switch (hubp->curs_attr.color_format) {
3876 			case CURSOR_MODE_MONO:
3877 				cursor_size /= 2;
3878 				break;
3879 			case CURSOR_MODE_COLOR_1BIT_AND:
3880 			case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
3881 			case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
3882 				cursor_size *= 4;
3883 				break;
3884 
3885 			case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
3886 			case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
3887 			default:
3888 				cursor_size *= 8;
3889 				break;
3890 			}
3891 
3892 			if (cursor_size > 16384)
3893 				cache_cursor = true;
3894 
3895 			if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
3896 				hwss_add_hubp_update_mall_sel(seq_state, hubp, 1, false);
3897 			} else {
3898 				// MALL not supported with Stereo3D
3899 				uint32_t mall_sel = (num_ways <= dc->caps.cache_num_ways &&
3900 					pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
3901 					pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO &&
3902 					!pipe->plane_state->address.tmz_surface) ? 2 : 0;
3903 				hwss_add_hubp_update_mall_sel(seq_state, hubp, mall_sel, cache_cursor);
3904 			}
3905 		}
3906 	}
3907 
3908 	// Program FORCE_ONE_ROW_FOR_FRAME and CURSOR_REQ_MODE for main subvp pipes
3909 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3910 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3911 		struct hubp *hubp = pipe->plane_res.hubp;
3912 
3913 		if (pipe->stream && hubp && hubp->funcs->hubp_prepare_subvp_buffering) {
3914 			if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
3915 				hwss_add_hubp_prepare_subvp_buffering(seq_state, hubp, true);
3916 		}
3917 	}
3918 }
3919 
dcn401_verify_allow_pstate_change_high_sequence(struct dc * dc,struct block_sequence_state * seq_state)3920 void dcn401_verify_allow_pstate_change_high_sequence(struct dc *dc,
3921 		struct block_sequence_state *seq_state)
3922 {
3923 	struct hubbub *hubbub = dc->res_pool->hubbub;
3924 
3925 	if (!hubbub->funcs->verify_allow_pstate_change_high)
3926 		return;
3927 
3928 	if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
3929 		/* Attempt hardware workaround force recovery */
3930 		dcn401_hw_wa_force_recovery_sequence(dc, seq_state);
3931 	}
3932 }
3933 
dcn401_hw_wa_force_recovery_sequence(struct dc * dc,struct block_sequence_state * seq_state)3934 bool dcn401_hw_wa_force_recovery_sequence(struct dc *dc,
3935 		struct block_sequence_state *seq_state)
3936 {
3937 	struct hubp *hubp;
3938 	unsigned int i;
3939 
3940 	if (!dc->debug.recovery_enabled)
3941 		return false;
3942 
3943 	/* Step 1: Set HUBP_BLANK_EN=1 for all active pipes */
3944 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3945 		struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
3946 
3947 		if (pipe_ctx != NULL) {
3948 			hubp = pipe_ctx->plane_res.hubp;
3949 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
3950 				hwss_add_hubp_set_blank_en(seq_state, hubp, true);
3951 		}
3952 	}
3953 
3954 	/* Step 2: DCHUBBUB_GLOBAL_SOFT_RESET=1 */
3955 	hwss_add_hubbub_soft_reset(seq_state, dc->res_pool->hubbub, hubbub1_soft_reset, true);
3956 
3957 	/* Step 3: Set HUBP_DISABLE=1 for all active pipes */
3958 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3959 		struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
3960 
3961 		if (pipe_ctx != NULL) {
3962 			hubp = pipe_ctx->plane_res.hubp;
3963 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
3964 				hwss_add_hubp_disable_control(seq_state, hubp, true);
3965 		}
3966 	}
3967 
3968 	/* Step 4: Set HUBP_DISABLE=0 for all active pipes */
3969 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3970 		struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
3971 
3972 		if (pipe_ctx != NULL) {
3973 			hubp = pipe_ctx->plane_res.hubp;
3974 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
3975 				hwss_add_hubp_disable_control(seq_state, hubp, false);
3976 		}
3977 	}
3978 
3979 	/* Step 5: DCHUBBUB_GLOBAL_SOFT_RESET=0 */
3980 	hwss_add_hubbub_soft_reset(seq_state, dc->res_pool->hubbub, hubbub1_soft_reset, false);
3981 
3982 	/* Step 6: Set HUBP_BLANK_EN=0 for all active pipes */
3983 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3984 		struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
3985 
3986 		if (pipe_ctx != NULL) {
3987 			hubp = pipe_ctx->plane_res.hubp;
3988 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
3989 				hwss_add_hubp_set_blank_en(seq_state, hubp, false);
3990 		}
3991 	}
3992 
3993 	return true;
3994 }
3995