xref: /linux/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c (revision 8185461e531c39d67aa4705d7f94873feb87adfd)
1 // SPDX-License-Identifier: MIT
2 //
3 // Copyright 2024 Advanced Micro Devices, Inc.
4 
5 
6 #include "os_types.h"
7 #include "dm_services.h"
8 #include "basics/dc_common.h"
9 #include "dm_helpers.h"
10 #include "core_types.h"
11 #include "resource.h"
12 #include "dccg.h"
13 #include "dce/dce_hwseq.h"
14 #include "reg_helper.h"
15 #include "abm.h"
16 #include "hubp.h"
17 #include "dchubbub.h"
18 #include "timing_generator.h"
19 #include "opp.h"
20 #include "ipp.h"
21 #include "mpc.h"
22 #include "mcif_wb.h"
23 #include "dc_dmub_srv.h"
24 #include "link_hwss.h"
25 #include "dpcd_defs.h"
26 #include "clk_mgr.h"
27 #include "dsc.h"
28 #include "link_service.h"
29 #include "custom_float.h"
30 
31 #include "dce/dmub_hw_lock_mgr.h"
32 #include "dcn10/dcn10_cm_common.h"
33 #include "dcn10/dcn10_hubbub.h"
34 #include "dcn20/dcn20_optc.h"
35 #include "dcn30/dcn30_cm_common.h"
36 #include "dcn32/dcn32_hwseq.h"
37 #include "dcn401_hwseq.h"
38 #include "dcn401/dcn401_resource.h"
39 #include "dc_state_priv.h"
40 #include "link_enc_cfg.h"
41 #include "../hw_sequencer.h"
42 
43 #define DC_LOGGER_INIT(logger)
44 
45 #define CTX \
46 	hws->ctx
47 #define REG(reg)\
48 	hws->regs->reg
49 #define DC_LOGGER \
50 	dc->ctx->logger
51 
52 
53 #undef FN
54 #define FN(reg_name, field_name) \
55 	hws->shifts->field_name, hws->masks->field_name
56 
dcn401_initialize_min_clocks(struct dc * dc)57 void dcn401_initialize_min_clocks(struct dc *dc)
58 {
59 	struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk;
60 
61 	clocks->dcfclk_deep_sleep_khz = DCN3_2_DCFCLK_DS_INIT_KHZ;
62 	clocks->dcfclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz * 1000;
63 	clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000;
64 	clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000;
65 	clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000;
66 	if (dc->debug.disable_boot_optimizations) {
67 		clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000;
68 	} else {
69 		/* Even though DPG_EN = 1 for the connected display, it still requires the
70 		 * correct timing so we cannot set DISPCLK to min freq or it could cause
71 		 * audio corruption. Read current DISPCLK from DENTIST and request the same
72 		 * freq to ensure that the timing is valid and unchanged.
73 		 */
74 		clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr);
75 	}
76 	clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
77 	clocks->fclk_p_state_change_support = true;
78 	clocks->p_state_change_support = true;
79 
80 	dc->clk_mgr->funcs->update_clocks(
81 			dc->clk_mgr,
82 			dc->current_state,
83 			true);
84 }
85 
dcn401_program_gamut_remap(struct pipe_ctx * pipe_ctx)86 void dcn401_program_gamut_remap(struct pipe_ctx *pipe_ctx)
87 {
88 	unsigned int i = 0;
89 	struct mpc_grph_gamut_adjustment mpc_adjust;
90 	unsigned int mpcc_id = pipe_ctx->plane_res.mpcc_inst;
91 	struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
92 
93 	//For now assert if location is not pre-blend
94 	if (pipe_ctx->plane_state)
95 		ASSERT(pipe_ctx->plane_state->mcm_location == MPCC_MOVABLE_CM_LOCATION_BEFORE);
96 
97 	// program MPCC_MCM_FIRST_GAMUT_REMAP
98 	memset(&mpc_adjust, 0, sizeof(mpc_adjust));
99 	mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
100 	mpc_adjust.mpcc_gamut_remap_block_id = MPCC_MCM_FIRST_GAMUT_REMAP;
101 
102 	if (pipe_ctx->plane_state &&
103 		pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
104 		mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
105 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
106 			mpc_adjust.temperature_matrix[i] =
107 			pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
108 	}
109 
110 	mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
111 
112 	// program MPCC_MCM_SECOND_GAMUT_REMAP for Bypass / Disable for now
113 	mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
114 	mpc_adjust.mpcc_gamut_remap_block_id = MPCC_MCM_SECOND_GAMUT_REMAP;
115 
116 	mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
117 
118 	// program MPCC_OGAM_GAMUT_REMAP same as is currently used on DCN3x
119 	memset(&mpc_adjust, 0, sizeof(mpc_adjust));
120 	mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
121 	mpc_adjust.mpcc_gamut_remap_block_id = MPCC_OGAM_GAMUT_REMAP;
122 
123 	if (pipe_ctx->top_pipe == NULL) {
124 		if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
125 			mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
126 			for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
127 				mpc_adjust.temperature_matrix[i] =
128 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
129 		}
130 	}
131 
132 	mpc->funcs->set_gamut_remap(mpc, mpcc_id, &mpc_adjust);
133 }
134 
dcn401_init_hw(struct dc * dc)135 void dcn401_init_hw(struct dc *dc)
136 {
137 	struct abm **abms = dc->res_pool->multiple_abms;
138 	struct dce_hwseq *hws = dc->hwseq;
139 	struct dc_bios *dcb = dc->ctx->dc_bios;
140 	struct resource_pool *res_pool = dc->res_pool;
141 	int i;
142 	int edp_num;
143 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
144 	uint32_t user_level = MAX_BACKLIGHT_LEVEL;
145 	int current_dchub_ref_freq = 0;
146 
147 	if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) {
148 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
149 
150 		// mark dcmode limits present if any clock has distinct AC and DC values from SMU
151 		dc->caps.dcmode_power_limits_present = dc->clk_mgr->funcs->is_dc_mode_present &&
152 				dc->clk_mgr->funcs->is_dc_mode_present(dc->clk_mgr);
153 	}
154 
155 	// Initialize the dccg
156 	if (res_pool->dccg->funcs->dccg_init)
157 		res_pool->dccg->funcs->dccg_init(res_pool->dccg);
158 
159 	// Disable DMUB Initialization until IPS state programming is finalized
160 	//if (!dcb->funcs->is_accelerated_mode(dcb)) {
161 	//	hws->funcs.bios_golden_init(dc);
162 	//}
163 
164 	// Set default OPTC memory power states
165 	if (dc->debug.enable_mem_low_power.bits.optc) {
166 		// Shutdown when unassigned and light sleep in VBLANK
167 		REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1);
168 	}
169 
170 	if (dc->debug.enable_mem_low_power.bits.vga) {
171 		// Power down VGA memory
172 		REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
173 	}
174 
175 	if (dc->ctx->dc_bios->fw_info_valid) {
176 		res_pool->ref_clocks.xtalin_clock_inKhz =
177 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
178 
179 		if (res_pool->hubbub) {
180 			(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
181 					dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
182 					&res_pool->ref_clocks.dccg_ref_clock_inKhz);
183 
184 			current_dchub_ref_freq = res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
185 
186 			(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
187 					res_pool->ref_clocks.dccg_ref_clock_inKhz,
188 					&res_pool->ref_clocks.dchub_ref_clock_inKhz);
189 		} else {
190 			// Not all ASICs have DCCG sw component
191 			res_pool->ref_clocks.dccg_ref_clock_inKhz =
192 					res_pool->ref_clocks.xtalin_clock_inKhz;
193 			res_pool->ref_clocks.dchub_ref_clock_inKhz =
194 					res_pool->ref_clocks.xtalin_clock_inKhz;
195 		}
196 	} else
197 		ASSERT_CRITICAL(false);
198 
199 	for (i = 0; i < dc->link_count; i++) {
200 		/* Power up AND update implementation according to the
201 		 * required signal (which may be different from the
202 		 * default signal on connector).
203 		 */
204 		struct dc_link *link = dc->links[i];
205 
206 		if (link->ep_type != DISPLAY_ENDPOINT_PHY)
207 			continue;
208 
209 		link->link_enc->funcs->hw_init(link->link_enc);
210 
211 		/* Check for enabled DIG to identify enabled display */
212 		if (link->link_enc->funcs->is_dig_enabled &&
213 			link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
214 			link->link_status.link_active = true;
215 			link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
216 			if (link->link_enc->funcs->fec_is_active &&
217 					link->link_enc->funcs->fec_is_active(link->link_enc))
218 				link->fec_state = dc_link_fec_enabled;
219 		}
220 	}
221 
222 	/* enable_power_gating_plane before dsc_pg_control because
223 	 * FORCEON = 1 with hw default value on bootup, resume from s3
224 	 */
225 	if (hws->funcs.enable_power_gating_plane)
226 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
227 
228 	/* we want to turn off all dp displays before doing detection */
229 	dc->link_srv->blank_all_dp_displays(dc);
230 
231 	/* If taking control over from VBIOS, we may want to optimize our first
232 	 * mode set, so we need to skip powering down pipes until we know which
233 	 * pipes we want to use.
234 	 * Otherwise, if taking control is not possible, we need to power
235 	 * everything down.
236 	 */
237 	if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
238 		/* Disable boot optimizations means power down everything including PHY, DIG,
239 		 * and OTG (i.e. the boot is not optimized because we do a full power down).
240 		 */
241 		if (dc->hwss.enable_accelerated_mode && dc->debug.disable_boot_optimizations)
242 			dc->hwss.enable_accelerated_mode(dc, dc->current_state);
243 		else
244 			hws->funcs.init_pipes(dc, dc->current_state);
245 
246 		if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
247 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
248 					!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
249 
250 		dcn401_initialize_min_clocks(dc);
251 
252 		/* On HW init, allow idle optimizations after pipes have been turned off.
253 		 *
254 		 * In certain D3 cases (i.e. BOCO / BOMACO) it's possible that hardware state
255 		 * is reset (i.e. not in idle at the time hw init is called), but software state
256 		 * still has idle_optimizations = true, so we must disable idle optimizations first
257 		 * (i.e. set false), then re-enable (set true).
258 		 */
259 		dc_allow_idle_optimizations(dc, false);
260 		dc_allow_idle_optimizations(dc, true);
261 	}
262 
263 	/* In headless boot cases, DIG may be turned
264 	 * on which causes HW/SW discrepancies.
265 	 * To avoid this, power down hardware on boot
266 	 * if DIG is turned on and seamless boot not enabled
267 	 */
268 	if (!dc->config.seamless_boot_edp_requested) {
269 		struct dc_link *edp_links[MAX_NUM_EDP];
270 		struct dc_link *edp_link;
271 
272 		dc_get_edp_links(dc, edp_links, &edp_num);
273 		if (edp_num) {
274 			for (i = 0; i < edp_num; i++) {
275 				edp_link = edp_links[i];
276 				if (edp_link->link_enc->funcs->is_dig_enabled &&
277 						edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
278 						dc->hwss.edp_backlight_control &&
279 						hws->funcs.power_down &&
280 						dc->hwss.edp_power_control) {
281 					dc->hwss.edp_backlight_control(edp_link, false);
282 					hws->funcs.power_down(dc);
283 					dc->hwss.edp_power_control(edp_link, false);
284 				}
285 			}
286 		} else {
287 			for (i = 0; i < dc->link_count; i++) {
288 				struct dc_link *link = dc->links[i];
289 
290 				if (link->link_enc->funcs->is_dig_enabled &&
291 						link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
292 						hws->funcs.power_down) {
293 					hws->funcs.power_down(dc);
294 					break;
295 				}
296 
297 			}
298 		}
299 	}
300 
301 	for (i = 0; i < res_pool->audio_count; i++) {
302 		struct audio *audio = res_pool->audios[i];
303 
304 		audio->funcs->hw_init(audio);
305 	}
306 
307 	for (i = 0; i < dc->link_count; i++) {
308 		struct dc_link *link = dc->links[i];
309 
310 		if (link->panel_cntl) {
311 			backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
312 			user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
313 		}
314 	}
315 
316 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
317 		if (abms[i] != NULL && abms[i]->funcs != NULL)
318 			abms[i]->funcs->abm_init(abms[i], backlight, user_level);
319 	}
320 
321 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
322 	REG_WRITE(DIO_MEM_PWR_CTRL, 0);
323 
324 	if (!dc->debug.disable_clock_gate) {
325 		/* enable all DCN clock gating */
326 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
327 
328 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
329 
330 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
331 	}
332 
333 	dcn401_setup_hpo_hw_control(hws, true);
334 
335 	if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
336 		dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
337 
338 	if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges)
339 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
340 
341 	if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
342 		dc->res_pool->hubbub->funcs->force_pstate_change_control(
343 				dc->res_pool->hubbub, false, false);
344 
345 	if (dc->res_pool->hubbub->funcs->init_crb)
346 		dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
347 
348 	if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0)
349 		dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc);
350 
351 	// Get DMCUB capabilities
352 	if (dc->ctx->dmub_srv) {
353 		dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
354 		dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
355 		dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver > 0;
356 		dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver;
357 		dc->debug.fams2_config.bits.enable &=
358 				dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver; // sw & fw fams versions must match for support
359 		if ((!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box)
360 			|| res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) {
361 			/* update bounding box if FAMS2 disabled, or if dchub clk has changed */
362 			if (dc->clk_mgr)
363 				dc->res_pool->funcs->update_bw_bounding_box(dc,
364 									    dc->clk_mgr->bw_params);
365 		}
366 	}
367 }
368 
dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc * dc,struct pipe_ctx * pipe_ctx,enum MCM_LUT_XABLE * shaper_xable,enum MCM_LUT_XABLE * lut3d_xable,enum MCM_LUT_XABLE * lut1d_xable)369 static void dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc *dc, struct pipe_ctx *pipe_ctx,
370 		enum MCM_LUT_XABLE *shaper_xable,
371 		enum MCM_LUT_XABLE *lut3d_xable,
372 		enum MCM_LUT_XABLE *lut1d_xable)
373 {
374 	enum dc_cm2_shaper_3dlut_setting shaper_3dlut_setting = DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL;
375 	bool lut1d_enable = false;
376 	struct mpc *mpc = dc->res_pool->mpc;
377 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
378 
379 	if (!pipe_ctx->plane_state)
380 		return;
381 	shaper_3dlut_setting = pipe_ctx->plane_state->mcm_shaper_3dlut_setting;
382 	lut1d_enable = pipe_ctx->plane_state->mcm_lut1d_enable;
383 	mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
384 	pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
385 
386 	*lut1d_xable = lut1d_enable ? MCM_LUT_ENABLE : MCM_LUT_DISABLE;
387 
388 	switch (shaper_3dlut_setting) {
389 	case DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL:
390 		*lut3d_xable = *shaper_xable = MCM_LUT_DISABLE;
391 		break;
392 	case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER:
393 		*lut3d_xable = MCM_LUT_DISABLE;
394 		*shaper_xable = MCM_LUT_ENABLE;
395 		break;
396 	case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT:
397 		*lut3d_xable = *shaper_xable = MCM_LUT_ENABLE;
398 		break;
399 	}
400 }
401 
dcn401_populate_mcm_luts(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_cm2_func_luts mcm_luts,bool lut_bank_a)402 void dcn401_populate_mcm_luts(struct dc *dc,
403 		struct pipe_ctx *pipe_ctx,
404 		struct dc_cm2_func_luts mcm_luts,
405 		bool lut_bank_a)
406 {
407 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
408 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
409 	int mpcc_id = hubp->inst;
410 	struct mpc *mpc = dc->res_pool->mpc;
411 	union mcm_lut_params m_lut_params;
412 	enum dc_cm2_transfer_func_source lut3d_src = mcm_luts.lut3d_data.lut3d_src;
413 	enum hubp_3dlut_fl_format format = 0;
414 	enum hubp_3dlut_fl_mode mode;
415 	enum hubp_3dlut_fl_width width = 0;
416 	enum hubp_3dlut_fl_addressing_mode addr_mode;
417 	enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0;
418 	enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0;
419 	enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0;
420 	enum MCM_LUT_XABLE shaper_xable = MCM_LUT_DISABLE;
421 	enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE;
422 	enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE;
423 	bool rval;
424 
425 	dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
426 
427 	/* 1D LUT */
428 	if (mcm_luts.lut1d_func) {
429 		memset(&m_lut_params, 0, sizeof(m_lut_params));
430 		if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL)
431 			m_lut_params.pwl = &mcm_luts.lut1d_func->pwl;
432 		else if (mcm_luts.lut1d_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
433 			rval = cm3_helper_translate_curve_to_hw_format(mpc->ctx,
434 					mcm_luts.lut1d_func,
435 					&dpp_base->regamma_params, false);
436 			m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
437 		}
438 		if (m_lut_params.pwl) {
439 			if (mpc->funcs->populate_lut)
440 				mpc->funcs->populate_lut(mpc, MCM_LUT_1DLUT, m_lut_params, lut_bank_a, mpcc_id);
441 		}
442 		if (mpc->funcs->program_lut_mode)
443 			mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable && m_lut_params.pwl, lut_bank_a, mpcc_id);
444 	}
445 
446 	/* Shaper */
447 	if (mcm_luts.shaper && mcm_luts.lut3d_data.mpc_3dlut_enable) {
448 		memset(&m_lut_params, 0, sizeof(m_lut_params));
449 		if (mcm_luts.shaper->type == TF_TYPE_HWPWL)
450 			m_lut_params.pwl = &mcm_luts.shaper->pwl;
451 		else if (mcm_luts.shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
452 			ASSERT(false);
453 			rval = cm3_helper_translate_curve_to_hw_format(mpc->ctx,
454 					mcm_luts.shaper,
455 					&dpp_base->regamma_params, true);
456 			m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
457 		}
458 		if (m_lut_params.pwl) {
459 			if (mpc->funcs->mcm.populate_lut)
460 				mpc->funcs->mcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id);
461 			if (mpc->funcs->program_lut_mode)
462 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_ENABLE, lut_bank_a, mpcc_id);
463 		}
464 	}
465 
466 	/* 3DLUT */
467 	switch (lut3d_src) {
468 	case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
469 		memset(&m_lut_params, 0, sizeof(m_lut_params));
470 		if (hubp->funcs->hubp_enable_3dlut_fl)
471 			hubp->funcs->hubp_enable_3dlut_fl(hubp, false);
472 
473 		if (mcm_luts.lut3d_data.lut3d_func && mcm_luts.lut3d_data.lut3d_func->state.bits.initialized) {
474 			m_lut_params.lut3d = &mcm_luts.lut3d_data.lut3d_func->lut_3d;
475 			if (mpc->funcs->populate_lut)
476 				mpc->funcs->populate_lut(mpc, MCM_LUT_3DLUT, m_lut_params, lut_bank_a, mpcc_id);
477 			if (mpc->funcs->program_lut_mode)
478 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a,
479 						mpcc_id);
480 		}
481 		break;
482 		case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
483 		switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
484 		case DC_CM2_GPU_MEM_SIZE_171717:
485 			width = hubp_3dlut_fl_width_17;
486 			break;
487 		case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
488 			width = hubp_3dlut_fl_width_transformed;
489 			break;
490 		default:
491 			//TODO: handle default case
492 			break;
493 		}
494 
495 		//check for support
496 		if (mpc->funcs->mcm.is_config_supported &&
497 			!mpc->funcs->mcm.is_config_supported(width))
498 			break;
499 
500 		if (mpc->funcs->program_lut_read_write_control)
501 			mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, mpcc_id);
502 		if (mpc->funcs->program_lut_mode)
503 			mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a, mpcc_id);
504 
505 		if (hubp->funcs->hubp_program_3dlut_fl_addr)
506 			hubp->funcs->hubp_program_3dlut_fl_addr(hubp, mcm_luts.lut3d_data.gpu_mem_params.addr);
507 
508 		if (mpc->funcs->mcm.program_bit_depth)
509 			mpc->funcs->mcm.program_bit_depth(mpc, mcm_luts.lut3d_data.gpu_mem_params.bit_depth, mpcc_id);
510 
511 		switch (mcm_luts.lut3d_data.gpu_mem_params.layout) {
512 		case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
513 			mode = hubp_3dlut_fl_mode_native_1;
514 			addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
515 			break;
516 		case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
517 			mode = hubp_3dlut_fl_mode_native_2;
518 			addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
519 			break;
520 		case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR:
521 			mode = hubp_3dlut_fl_mode_transform;
522 			addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear;
523 			break;
524 		default:
525 			mode = hubp_3dlut_fl_mode_disable;
526 			addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
527 			break;
528 		}
529 		if (hubp->funcs->hubp_program_3dlut_fl_mode)
530 			hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode);
531 
532 		if (hubp->funcs->hubp_program_3dlut_fl_addressing_mode)
533 			hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode);
534 
535 		switch (mcm_luts.lut3d_data.gpu_mem_params.format_params.format) {
536 		case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
537 			format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
538 			break;
539 		case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
540 			format = hubp_3dlut_fl_format_unorm_12lsb_bitslice;
541 			break;
542 		case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10:
543 			format = hubp_3dlut_fl_format_float_fp1_5_10;
544 			break;
545 		}
546 		if (hubp->funcs->hubp_program_3dlut_fl_format)
547 			hubp->funcs->hubp_program_3dlut_fl_format(hubp, format);
548 		if (hubp->funcs->hubp_update_3dlut_fl_bias_scale &&
549 				mpc->funcs->mcm.program_bias_scale) {
550 			mpc->funcs->mcm.program_bias_scale(mpc,
551 				mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
552 				mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale,
553 				mpcc_id);
554 			hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
555 						mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
556 						mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale);
557 		}
558 
559 		//navi 4x has a bug and r and blue are swapped and need to be worked around here in
560 		//TODO: need to make a method for get_xbar per asic OR do the workaround in program_crossbar for 4x
561 		switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) {
562 		case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
563 		default:
564 			crossbar_bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
565 			crossbar_bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
566 			crossbar_bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
567 			break;
568 		}
569 
570 		if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
571 			hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
572 					crossbar_bit_slice_cr_r,
573 					crossbar_bit_slice_y_g,
574 					crossbar_bit_slice_cb_b);
575 
576 		if (mpc->funcs->mcm.program_lut_read_write_control)
577 			mpc->funcs->mcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, true, mpcc_id);
578 
579 		if (mpc->funcs->mcm.program_3dlut_size)
580 			mpc->funcs->mcm.program_3dlut_size(mpc, width, mpcc_id);
581 
582 		if (mpc->funcs->update_3dlut_fast_load_select)
583 			mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
584 
585 		if (hubp->funcs->hubp_enable_3dlut_fl)
586 			hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
587 		else {
588 			if (mpc->funcs->program_lut_mode) {
589 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
590 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
591 				mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
592 			}
593 		}
594 		break;
595 
596 	}
597 }
598 
dcn401_trigger_3dlut_dma_load(struct dc * dc,struct pipe_ctx * pipe_ctx)599 void dcn401_trigger_3dlut_dma_load(struct dc *dc, struct pipe_ctx *pipe_ctx)
600 {
601 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
602 
603 	if (hubp->funcs->hubp_enable_3dlut_fl) {
604 		hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
605 	}
606 }
607 
dcn401_set_mcm_luts(struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)608 bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
609 				const struct dc_plane_state *plane_state)
610 {
611 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
612 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
613 	struct dc *dc = pipe_ctx->stream_res.opp->ctx->dc;
614 	struct mpc *mpc = dc->res_pool->mpc;
615 	bool result;
616 	const struct pwl_params *lut_params = NULL;
617 	bool rval;
618 
619 	if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
620 		dcn401_populate_mcm_luts(dc, pipe_ctx, plane_state->mcm_luts, plane_state->lut_bank_a);
621 		return true;
622 	}
623 
624 	mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
625 	pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
626 	// 1D LUT
627 	if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
628 		lut_params = &plane_state->blend_tf.pwl;
629 	else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
630 		rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx,
631 							       &plane_state->blend_tf,
632 							       &dpp_base->regamma_params, false);
633 		lut_params = rval ? &dpp_base->regamma_params : NULL;
634 	}
635 	result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id);
636 	lut_params = NULL;
637 
638 	// Shaper
639 	if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL)
640 		lut_params = &plane_state->in_shaper_func.pwl;
641 	else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
642 		// TODO: dpp_base replace
643 		rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx,
644 							       &plane_state->in_shaper_func,
645 							       &dpp_base->shaper_params, true);
646 		lut_params = rval ? &dpp_base->shaper_params : NULL;
647 	}
648 	result &= mpc->funcs->program_shaper(mpc, lut_params, mpcc_id);
649 
650 	// 3D
651 	if (mpc->funcs->program_3dlut) {
652 		if (plane_state->lut3d_func.state.bits.initialized == 1)
653 			result &= mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id);
654 		else
655 			result &= mpc->funcs->program_3dlut(mpc, NULL, mpcc_id);
656 	}
657 
658 	return result;
659 }
660 
dcn401_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)661 bool dcn401_set_output_transfer_func(struct dc *dc,
662 				struct pipe_ctx *pipe_ctx,
663 				const struct dc_stream_state *stream)
664 {
665 	int mpcc_id = pipe_ctx->plane_res.hubp->inst;
666 	struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
667 	const struct pwl_params *params = NULL;
668 	bool ret = false;
669 
670 	/* program OGAM or 3DLUT only for the top pipe*/
671 	if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) {
672 		/*program shaper and 3dlut in MPC*/
673 		ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream);
674 		if (ret == false && mpc->funcs->set_output_gamma) {
675 			if (stream->out_transfer_func.type == TF_TYPE_HWPWL)
676 				params = &stream->out_transfer_func.pwl;
677 			else if (pipe_ctx->stream->out_transfer_func.type ==
678 					TF_TYPE_DISTRIBUTED_POINTS &&
679 					cm3_helper_translate_curve_to_hw_format(stream->ctx,
680 					&stream->out_transfer_func,
681 					&mpc->blender_params, false))
682 				params = &mpc->blender_params;
683 			/* there are no ROM LUTs in OUTGAM */
684 			if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED)
685 				BREAK_TO_DEBUGGER();
686 		}
687 	}
688 
689 	if (mpc->funcs->set_output_gamma)
690 		mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
691 
692 	return ret;
693 }
694 
dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx * pipe_ctx,unsigned int * tmds_div)695 void dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx *pipe_ctx,
696 				unsigned int *tmds_div)
697 {
698 	struct dc_stream_state *stream = pipe_ctx->stream;
699 
700 	if (dc_is_tmds_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
701 		if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
702 			*tmds_div = PIXEL_RATE_DIV_BY_2;
703 		else
704 			*tmds_div = PIXEL_RATE_DIV_BY_4;
705 	} else {
706 		*tmds_div = PIXEL_RATE_DIV_BY_1;
707 	}
708 
709 	if (*tmds_div == PIXEL_RATE_DIV_NA)
710 		ASSERT(false);
711 
712 }
713 
enable_stream_timing_calc(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc,unsigned int * tmds_div,int * opp_inst,int * opp_cnt,struct pipe_ctx * opp_heads[MAX_PIPES],bool * manual_mode,struct drr_params * params,unsigned int * event_triggers)714 static void enable_stream_timing_calc(
715 		struct pipe_ctx *pipe_ctx,
716 		struct dc_state *context,
717 		struct dc *dc,
718 		unsigned int *tmds_div,
719 		int *opp_inst,
720 		int *opp_cnt,
721 		struct pipe_ctx *opp_heads[MAX_PIPES],
722 		bool *manual_mode,
723 		struct drr_params *params,
724 		unsigned int *event_triggers)
725 {
726 	struct dc_stream_state *stream = pipe_ctx->stream;
727 	int i;
728 
729 	if (dc_is_tmds_signal(stream->signal) || dc_is_virtual_signal(stream->signal))
730 		dcn401_calculate_dccg_tmds_div_value(pipe_ctx, tmds_div);
731 
732 	*opp_cnt = resource_get_opp_heads_for_otg_master(pipe_ctx, &context->res_ctx, opp_heads);
733 	for (i = 0; i < *opp_cnt; i++)
734 		opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
735 
736 	if (dc_is_tmds_signal(stream->signal)) {
737 		stream->link->phy_state.symclk_ref_cnts.otg = 1;
738 		if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
739 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
740 		else
741 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
742 	}
743 
744 	params->vertical_total_min = stream->adjust.v_total_min;
745 	params->vertical_total_max = stream->adjust.v_total_max;
746 	params->vertical_total_mid = stream->adjust.v_total_mid;
747 	params->vertical_total_mid_frame_num = stream->adjust.v_total_mid_frame_num;
748 
749 	// DRR should set trigger event to monitor surface update event
750 	if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0)
751 		*event_triggers = 0x80;
752 }
753 
dcn401_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)754 enum dc_status dcn401_enable_stream_timing(
755 		struct pipe_ctx *pipe_ctx,
756 		struct dc_state *context,
757 		struct dc *dc)
758 {
759 	struct dce_hwseq *hws = dc->hwseq;
760 	struct dc_stream_state *stream = pipe_ctx->stream;
761 	struct drr_params params = {0};
762 	unsigned int event_triggers = 0;
763 	int opp_cnt = 1;
764 	int opp_inst[MAX_PIPES] = {0};
765 	struct pipe_ctx *opp_heads[MAX_PIPES] = {0};
766 	struct dc_crtc_timing patched_crtc_timing = stream->timing;
767 	bool manual_mode = false;
768 	unsigned int tmds_div = PIXEL_RATE_DIV_NA;
769 	unsigned int unused_div = PIXEL_RATE_DIV_NA;
770 	int odm_slice_width;
771 	int last_odm_slice_width;
772 	int i;
773 
774 	if (!resource_is_pipe_type(pipe_ctx, OTG_MASTER))
775 		return DC_OK;
776 
777 	enable_stream_timing_calc(pipe_ctx, context, dc, &tmds_div, opp_inst,
778 			&opp_cnt, opp_heads, &manual_mode, &params, &event_triggers);
779 
780 	if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
781 		dc->res_pool->dccg->funcs->set_pixel_rate_div(
782 			dc->res_pool->dccg, pipe_ctx->stream_res.tg->inst,
783 			tmds_div, unused_div);
784 	}
785 
786 	/* TODO check if timing_changed, disable stream if timing changed */
787 
788 	if (opp_cnt > 1) {
789 		odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, false);
790 		last_odm_slice_width = resource_get_odm_slice_dst_width(pipe_ctx, true);
791 		pipe_ctx->stream_res.tg->funcs->set_odm_combine(
792 				pipe_ctx->stream_res.tg,
793 				opp_inst, opp_cnt,
794 				odm_slice_width, last_odm_slice_width);
795 	}
796 
797 	/* set DTBCLK_P */
798 	if (dc->res_pool->dccg->funcs->set_dtbclk_p_src) {
799 		if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
800 			dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, DPREFCLK, pipe_ctx->stream_res.tg->inst);
801 		}
802 	}
803 
804 	/* HW program guide assume display already disable
805 	 * by unplug sequence. OTG assume stop.
806 	 */
807 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
808 
809 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
810 			pipe_ctx->clock_source,
811 			&pipe_ctx->stream_res.pix_clk_params,
812 			dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
813 			&pipe_ctx->pll_settings)) {
814 		BREAK_TO_DEBUGGER();
815 		return DC_ERROR_UNEXPECTED;
816 	}
817 
818 	if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
819 		dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
820 
821 	/* if we are padding, h_addressable needs to be adjusted */
822 	if (dc->debug.enable_hblank_borrow) {
823 		patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding;
824 		patched_crtc_timing.h_total = patched_crtc_timing.h_total + pipe_ctx->dsc_padding_params.dsc_htotal_padding;
825 		patched_crtc_timing.pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz;
826 	}
827 
828 	pipe_ctx->stream_res.tg->funcs->program_timing(
829 		pipe_ctx->stream_res.tg,
830 		&patched_crtc_timing,
831 		(unsigned int)pipe_ctx->global_sync.dcn4x.vready_offset_pixels,
832 		(unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
833 		(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
834 		(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
835 		(unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines,
836 		pipe_ctx->stream->signal,
837 		true);
838 
839 	for (i = 0; i < opp_cnt; i++) {
840 		opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control(
841 				opp_heads[i]->stream_res.opp,
842 				true);
843 		opp_heads[i]->stream_res.opp->funcs->opp_program_left_edge_extra_pixel(
844 				opp_heads[i]->stream_res.opp,
845 				stream->timing.pixel_encoding,
846 				resource_is_pipe_type(opp_heads[i], OTG_MASTER));
847 	}
848 
849 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
850 			pipe_ctx->stream_res.opp,
851 			true);
852 
853 	hws->funcs.blank_pixel_data(dc, pipe_ctx, true);
854 
855 	/* VTG is  within DCHUB command block. DCFCLK is always on */
856 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
857 		BREAK_TO_DEBUGGER();
858 		return DC_ERROR_UNEXPECTED;
859 	}
860 
861 	hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp);
862 	set_drr_and_clear_adjust_pending(pipe_ctx, stream, &params);
863 
864 	/* Event triggers and num frames initialized for DRR, but can be
865 	 * later updated for PSR use. Note DRR trigger events are generated
866 	 * regardless of whether num frames met.
867 	 */
868 	if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control)
869 		pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
870 				pipe_ctx->stream_res.tg, event_triggers, 2);
871 
872 	/* TODO program crtc source select for non-virtual signal*/
873 	/* TODO program FMT */
874 	/* TODO setup link_enc */
875 	/* TODO set stream attributes */
876 	/* TODO program audio */
877 	/* TODO enable stream if timing changed */
878 	/* TODO unblank stream if DP */
879 
880 	if (dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
881 		if (pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable)
882 			pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg);
883 	}
884 
885 	return DC_OK;
886 }
887 
get_phyd32clk_src(struct dc_link * link)888 static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
889 {
890 	switch (link->link_enc->transmitter) {
891 	case TRANSMITTER_UNIPHY_A:
892 		return PHYD32CLKA;
893 	case TRANSMITTER_UNIPHY_B:
894 		return PHYD32CLKB;
895 	case TRANSMITTER_UNIPHY_C:
896 		return PHYD32CLKC;
897 	case TRANSMITTER_UNIPHY_D:
898 		return PHYD32CLKD;
899 	case TRANSMITTER_UNIPHY_E:
900 		return PHYD32CLKE;
901 	default:
902 		return PHYD32CLKA;
903 	}
904 }
905 
dcn401_enable_stream_calc(struct pipe_ctx * pipe_ctx,int * dp_hpo_inst,enum phyd32clk_clock_source * phyd32clk,unsigned int * tmds_div,uint32_t * early_control)906 static void dcn401_enable_stream_calc(
907 		struct pipe_ctx *pipe_ctx,
908 		int *dp_hpo_inst,
909 		enum phyd32clk_clock_source *phyd32clk,
910 		unsigned int *tmds_div,
911 		uint32_t *early_control)
912 {
913 
914 	struct dc *dc = pipe_ctx->stream->ctx->dc;
915 	struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
916 	enum dc_lane_count lane_count =
917 			pipe_ctx->stream->link->cur_link_settings.lane_count;
918 	uint32_t active_total_with_borders;
919 
920 	if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx))
921 		*dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
922 
923 	*phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link);
924 
925 	if (dc_is_tmds_signal(pipe_ctx->stream->signal))
926 		dcn401_calculate_dccg_tmds_div_value(pipe_ctx, tmds_div);
927 	else
928 		*tmds_div = PIXEL_RATE_DIV_BY_1;
929 
930 	/* enable early control to avoid corruption on DP monitor*/
931 	active_total_with_borders =
932 			timing->h_addressable
933 				+ timing->h_border_left
934 				+ timing->h_border_right;
935 
936 	if (lane_count != 0)
937 		*early_control = active_total_with_borders % lane_count;
938 
939 	if (*early_control == 0)
940 		*early_control = lane_count;
941 
942 }
943 
dcn401_enable_stream(struct pipe_ctx * pipe_ctx)944 void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
945 {
946 	uint32_t early_control = 0;
947 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
948 	struct dc_link *link = pipe_ctx->stream->link;
949 	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
950 	struct dc *dc = pipe_ctx->stream->ctx->dc;
951 	struct dccg *dccg = dc->res_pool->dccg;
952 	enum phyd32clk_clock_source phyd32clk;
953 	int dp_hpo_inst = 0;
954 	unsigned int tmds_div = PIXEL_RATE_DIV_NA;
955 	unsigned int unused_div = PIXEL_RATE_DIV_NA;
956 	struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
957 	struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
958 
959 	if (!dc->config.unify_link_enc_assignment)
960 		link_enc = link_enc_cfg_get_link_enc(link);
961 
962 	dcn401_enable_stream_calc(pipe_ctx, &dp_hpo_inst, &phyd32clk,
963 				&tmds_div, &early_control);
964 
965 	if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) {
966 		if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
967 			dccg->funcs->set_dpstreamclk(dccg, DPREFCLK, tg->inst, dp_hpo_inst);
968 			if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) {
969 				dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
970 			} else {
971 				dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
972 			}
973 		} else {
974 			dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,
975 					link_enc->transmitter - TRANSMITTER_UNIPHY_A);
976 		}
977 	}
978 
979 	if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
980 		dc->res_pool->dccg->funcs->set_pixel_rate_div(
981 			dc->res_pool->dccg,
982 			pipe_ctx->stream_res.tg->inst,
983 			tmds_div,
984 			unused_div);
985 	}
986 
987 	link_hwss->setup_stream_encoder(pipe_ctx);
988 
989 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
990 		if (dc->hwss.program_dmdata_engine)
991 			dc->hwss.program_dmdata_engine(pipe_ctx);
992 	}
993 
994 	dc->hwss.update_info_frame(pipe_ctx);
995 
996 	if (dc_is_dp_signal(pipe_ctx->stream->signal))
997 		dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
998 
999 	tg->funcs->set_early_control(tg, early_control);
1000 }
1001 
dcn401_setup_hpo_hw_control(const struct dce_hwseq * hws,bool enable)1002 void dcn401_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
1003 {
1004 	REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, enable);
1005 }
1006 
adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width,struct dc_cursor_position * pos_cpy)1007 void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy)
1008 {
1009 	if (cursor_width <= 128) {
1010 		pos_cpy->x_hotspot /= 2;
1011 		pos_cpy->x_hotspot += 1;
1012 	} else {
1013 		pos_cpy->x_hotspot /= 2;
1014 		pos_cpy->x_hotspot += 2;
1015 	}
1016 }
1017 
disable_link_output_symclk_on_tx_off(struct dc_link * link,enum dp_link_encoding link_encoding)1018 static void disable_link_output_symclk_on_tx_off(struct dc_link *link, enum dp_link_encoding link_encoding)
1019 {
1020 	struct dc *dc = link->ctx->dc;
1021 	struct pipe_ctx *pipe_ctx = NULL;
1022 	uint8_t i;
1023 
1024 	for (i = 0; i < MAX_PIPES; i++) {
1025 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1026 		if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) {
1027 			pipe_ctx->clock_source->funcs->program_pix_clk(
1028 					pipe_ctx->clock_source,
1029 					&pipe_ctx->stream_res.pix_clk_params,
1030 					link_encoding,
1031 					&pipe_ctx->pll_settings);
1032 			break;
1033 		}
1034 	}
1035 }
1036 
dcn401_disable_link_output(struct dc_link * link,const struct link_resource * link_res,enum signal_type signal)1037 void dcn401_disable_link_output(struct dc_link *link,
1038 		const struct link_resource *link_res,
1039 		enum signal_type signal)
1040 {
1041 	struct dc *dc = link->ctx->dc;
1042 	const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
1043 	struct dmcu *dmcu = dc->res_pool->dmcu;
1044 
1045 	if (signal == SIGNAL_TYPE_EDP &&
1046 			link->dc->hwss.edp_backlight_control &&
1047 			!link->skip_implict_edp_power_control)
1048 		link->dc->hwss.edp_backlight_control(link, false);
1049 	else if (dmcu != NULL && dmcu->funcs->lock_phy)
1050 		dmcu->funcs->lock_phy(dmcu);
1051 
1052 	if (dc_is_tmds_signal(signal) && link->phy_state.symclk_ref_cnts.otg > 0) {
1053 		disable_link_output_symclk_on_tx_off(link, DP_UNKNOWN_ENCODING);
1054 		link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
1055 	} else {
1056 		link_hwss->disable_link_output(link, link_res, signal);
1057 		link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
1058 	}
1059 
1060 	if (signal == SIGNAL_TYPE_EDP &&
1061 			link->dc->hwss.edp_backlight_control &&
1062 			!link->skip_implict_edp_power_control)
1063 		link->dc->hwss.edp_power_control(link, false);
1064 	else if (dmcu != NULL && dmcu->funcs->lock_phy)
1065 		dmcu->funcs->unlock_phy(dmcu);
1066 
1067 	dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
1068 }
1069 
dcn401_set_cursor_position(struct pipe_ctx * pipe_ctx)1070 void dcn401_set_cursor_position(struct pipe_ctx *pipe_ctx)
1071 {
1072 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
1073 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1074 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1075 	struct dc_cursor_mi_param param = {
1076 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
1077 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
1078 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
1079 		.recout = pipe_ctx->plane_res.scl_data.recout,
1080 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
1081 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
1082 		.rotation = pipe_ctx->plane_state->rotation,
1083 		.mirror = pipe_ctx->plane_state->horizontal_mirror,
1084 		.stream = pipe_ctx->stream
1085 	};
1086 	struct rect odm_slice_src = { 0 };
1087 	bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
1088 		(pipe_ctx->prev_odm_pipe != NULL);
1089 	int prev_odm_width = 0;
1090 	struct pipe_ctx *prev_odm_pipe = NULL;
1091 	bool mpc_combine_on = false;
1092 	int  bottom_pipe_x_pos = 0;
1093 
1094 	int x_pos = pos_cpy.x;
1095 	int y_pos = pos_cpy.y;
1096 	int recout_x_pos = 0;
1097 	int recout_y_pos = 0;
1098 
1099 	if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
1100 		if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
1101 			(pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
1102 			mpc_combine_on = true;
1103 		}
1104 	}
1105 
1106 	/* DCN4 moved cursor composition after Scaler, so in HW it is in
1107 	 * recout space and for HW Cursor position programming need to
1108 	 * translate to recout space.
1109 	 *
1110 	 * Cursor X and Y position programmed into HW can't be negative,
1111 	 * in fact it is X, Y coordinate shifted for the HW Cursor Hot spot
1112 	 * position that goes into HW X and Y coordinates while HW Hot spot
1113 	 * X and Y coordinates are length relative to the cursor top left
1114 	 * corner, hotspot must be smaller than the cursor size.
1115 	 *
1116 	 * DMs/DC interface for Cursor position is in stream->src space, and
1117 	 * DMs supposed to transform Cursor coordinates to stream->src space,
1118 	 * then here we need to translate Cursor coordinates to stream->dst
1119 	 * space, as now in HW, Cursor coordinates are in per pipe recout
1120 	 * space, and for the given pipe valid coordinates are only in range
1121 	 * from 0,0 - recout width, recout height space.
1122 	 * If certain pipe combining is in place, need to further adjust per
1123 	 * pipe to make sure each pipe enabling cursor on its part of the
1124 	 * screen.
1125 	 */
1126 	x_pos = pipe_ctx->stream->dst.x + x_pos * pipe_ctx->stream->dst.width /
1127 		pipe_ctx->stream->src.width;
1128 	y_pos = pipe_ctx->stream->dst.y + y_pos * pipe_ctx->stream->dst.height /
1129 		pipe_ctx->stream->src.height;
1130 
1131 	/* If the cursor's source viewport is clipped then we need to
1132 	 * translate the cursor to appear in the correct position on
1133 	 * the screen.
1134 	 *
1135 	 * This translation isn't affected by scaling so it needs to be
1136 	 * done *after* we adjust the position for the scale factor.
1137 	 *
1138 	 * This is only done by opt-in for now since there are still
1139 	 * some usecases like tiled display that might enable the
1140 	 * cursor on both streams while expecting dc to clip it.
1141 	 */
1142 	if (pos_cpy.translate_by_source) {
1143 		x_pos += pipe_ctx->plane_state->src_rect.x;
1144 		y_pos += pipe_ctx->plane_state->src_rect.y;
1145 	}
1146 
1147 	/* Adjust for ODM Combine
1148 	 * next/prev_odm_offset is to account for scaled modes that have underscan
1149 	 */
1150 	if (odm_combine_on) {
1151 		prev_odm_pipe = pipe_ctx->prev_odm_pipe;
1152 
1153 		while (prev_odm_pipe != NULL) {
1154 			odm_slice_src = resource_get_odm_slice_src_rect(prev_odm_pipe);
1155 			prev_odm_width += odm_slice_src.width;
1156 			prev_odm_pipe = prev_odm_pipe->prev_odm_pipe;
1157 		}
1158 
1159 		x_pos -= (prev_odm_width);
1160 	}
1161 
1162 	/* If the position is negative then we need to add to the hotspot
1163 	 * to fix cursor size between ODM slices
1164 	 */
1165 
1166 	if (x_pos < 0) {
1167 		pos_cpy.x_hotspot -= x_pos;
1168 		if (hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION)
1169 			adjust_hotspot_between_slices_for_2x_magnify(hubp->curs_attr.width, &pos_cpy);
1170 		x_pos = 0;
1171 	}
1172 
1173 	if (y_pos < 0) {
1174 		pos_cpy.y_hotspot -= y_pos;
1175 		y_pos = 0;
1176 	}
1177 
1178 	/* If the position on bottom MPC pipe is negative then we need to add to the hotspot and
1179 	 * adjust x_pos on bottom pipe to make cursor visible when crossing between MPC slices.
1180 	 */
1181 	if (mpc_combine_on &&
1182 		pipe_ctx->top_pipe &&
1183 		(pipe_ctx == pipe_ctx->top_pipe->bottom_pipe)) {
1184 
1185 		bottom_pipe_x_pos = x_pos - pipe_ctx->plane_res.scl_data.recout.x;
1186 		if (bottom_pipe_x_pos < 0) {
1187 			x_pos = pipe_ctx->plane_res.scl_data.recout.x;
1188 			pos_cpy.x_hotspot -= bottom_pipe_x_pos;
1189 			if (hubp->curs_attr.attribute_flags.bits.ENABLE_MAGNIFICATION)
1190 				adjust_hotspot_between_slices_for_2x_magnify(hubp->curs_attr.width, &pos_cpy);
1191 		}
1192 	}
1193 
1194 	pos_cpy.x = (uint32_t)x_pos;
1195 	pos_cpy.y = (uint32_t)y_pos;
1196 
1197 	if (pos_cpy.enable && resource_can_pipe_disable_cursor(pipe_ctx))
1198 		pos_cpy.enable = false;
1199 
1200 	x_pos = pos_cpy.x - param.recout.x;
1201 	y_pos = pos_cpy.y - param.recout.y;
1202 
1203 	recout_x_pos = x_pos - pos_cpy.x_hotspot;
1204 	recout_y_pos = y_pos - pos_cpy.y_hotspot;
1205 
1206 	if (recout_x_pos >= (int)param.recout.width)
1207 		pos_cpy.enable = false;  /* not visible beyond right edge*/
1208 
1209 	if (recout_y_pos >= (int)param.recout.height)
1210 		pos_cpy.enable = false;  /* not visible beyond bottom edge*/
1211 
1212 	if (recout_x_pos + (int)hubp->curs_attr.width <= 0)
1213 		pos_cpy.enable = false;  /* not visible beyond left edge*/
1214 
1215 	if (recout_y_pos + (int)hubp->curs_attr.height <= 0)
1216 		pos_cpy.enable = false;  /* not visible beyond top edge*/
1217 
1218 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
1219 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
1220 }
1221 
dcn401_check_no_memory_request_for_cab(struct dc * dc)1222 static bool dcn401_check_no_memory_request_for_cab(struct dc *dc)
1223 {
1224 	int i;
1225 
1226 	/* First, check no-memory-request case */
1227 	for (i = 0; i < dc->current_state->stream_count; i++) {
1228 		if ((dc->current_state->stream_status[i].plane_count) &&
1229 			(dc->current_state->streams[i]->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED))
1230 			/* Fail eligibility on a visible stream */
1231 			return false;
1232 	}
1233 
1234 	return true;
1235 }
1236 
dcn401_calculate_cab_allocation(struct dc * dc,struct dc_state * ctx)1237 static uint32_t dcn401_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
1238 {
1239 	int i;
1240 	uint8_t num_ways = 0;
1241 	uint32_t mall_ss_size_bytes = 0;
1242 
1243 	mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes;
1244 	// TODO add additional logic for PSR active stream exclusion optimization
1245 	// mall_ss_psr_active_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes;
1246 
1247 	// Include cursor size for CAB allocation
1248 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1249 		struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[i];
1250 
1251 		if (!pipe->stream || !pipe->plane_state)
1252 			continue;
1253 
1254 		mall_ss_size_bytes += dcn32_helper_calculate_mall_bytes_for_cursor(dc, pipe, false);
1255 	}
1256 
1257 	// Convert number of cache lines required to number of ways
1258 	if (dc->debug.force_mall_ss_num_ways > 0)
1259 		num_ways = dc->debug.force_mall_ss_num_ways;
1260 	else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes)
1261 		num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, mall_ss_size_bytes);
1262 	else
1263 		num_ways = 0;
1264 
1265 	return num_ways;
1266 }
1267 
dcn401_apply_idle_power_optimizations(struct dc * dc,bool enable)1268 bool dcn401_apply_idle_power_optimizations(struct dc *dc, bool enable)
1269 {
1270 	union dmub_rb_cmd cmd;
1271 	uint8_t ways, i;
1272 	int j;
1273 	bool mall_ss_unsupported = false;
1274 	struct dc_plane_state *plane = NULL;
1275 
1276 	if (!dc->ctx->dmub_srv || !dc->current_state)
1277 		return false;
1278 
1279 	for (i = 0; i < dc->current_state->stream_count; i++) {
1280 		/* MALL SS messaging is not supported with PSR at this time */
1281 		if (dc->current_state->streams[i] != NULL &&
1282 				dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) {
1283 			DC_LOG_MALL("MALL SS not supported with PSR at this time\n");
1284 			return false;
1285 		}
1286 	}
1287 
1288 	memset(&cmd, 0, sizeof(cmd));
1289 	cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
1290 	cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
1291 
1292 	if (enable) {
1293 		if (dcn401_check_no_memory_request_for_cab(dc)) {
1294 			/* 1. Check no memory request case for CAB.
1295 			 * If no memory request case, send CAB_ACTION NO_DCN_REQ DMUB message
1296 			 */
1297 			DC_LOG_MALL("sending CAB action NO_DCN_REQ\n");
1298 			cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ;
1299 		} else {
1300 			/* 2. Check if all surfaces can fit in CAB.
1301 			 * If surfaces can fit into CAB, send CAB_ACTION_ALLOW DMUB message
1302 			 * and configure HUBP's to fetch from MALL
1303 			 */
1304 			ways = dcn401_calculate_cab_allocation(dc, dc->current_state);
1305 
1306 			/* MALL not supported with Stereo3D or TMZ surface. If any plane is using stereo,
1307 			 * or TMZ surface, don't try to enter MALL.
1308 			 */
1309 			for (i = 0; i < dc->current_state->stream_count; i++) {
1310 				for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
1311 					plane = dc->current_state->stream_status[i].plane_states[j];
1312 
1313 					if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO ||
1314 							plane->address.tmz_surface) {
1315 						mall_ss_unsupported = true;
1316 						break;
1317 					}
1318 				}
1319 				if (mall_ss_unsupported)
1320 					break;
1321 			}
1322 			if (ways <= dc->caps.cache_num_ways && !mall_ss_unsupported) {
1323 				cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
1324 				cmd.cab.cab_alloc_ways = ways;
1325 				DC_LOG_MALL("cab allocation: %d ways. CAB action: DCN_SS_FIT_IN_CAB\n", ways);
1326 			} else {
1327 				cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_NOT_FIT_IN_CAB;
1328 				DC_LOG_MALL("frame does not fit in CAB: %d ways required. CAB action: DCN_SS_NOT_FIT_IN_CAB\n", ways);
1329 			}
1330 		}
1331 	} else {
1332 		/* Disable CAB */
1333 		cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_IDLE_OPTIMIZATION;
1334 		DC_LOG_MALL("idle optimization disabled\n");
1335 	}
1336 
1337 	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
1338 
1339 	return true;
1340 }
1341 
dcn401_wait_for_dcc_meta_propagation(const struct dc * dc,const struct pipe_ctx * top_pipe)1342 void dcn401_wait_for_dcc_meta_propagation(const struct dc *dc,
1343 		const struct pipe_ctx *top_pipe)
1344 {
1345 	bool is_wait_needed = false;
1346 	const struct pipe_ctx *pipe_ctx = top_pipe;
1347 
1348 	/* check if any surfaces are updating address while using flip immediate and dcc */
1349 	while (pipe_ctx != NULL) {
1350 		if (pipe_ctx->plane_state &&
1351 				pipe_ctx->plane_state->dcc.enable &&
1352 				pipe_ctx->plane_state->flip_immediate &&
1353 				pipe_ctx->plane_state->update_flags.bits.addr_update) {
1354 			is_wait_needed = true;
1355 			break;
1356 		}
1357 
1358 		/* check next pipe */
1359 		pipe_ctx = pipe_ctx->bottom_pipe;
1360 	}
1361 
1362 	if (is_wait_needed && dc->debug.dcc_meta_propagation_delay_us > 0) {
1363 		udelay(dc->debug.dcc_meta_propagation_delay_us);
1364 	}
1365 }
1366 
dcn401_prepare_bandwidth(struct dc * dc,struct dc_state * context)1367 void dcn401_prepare_bandwidth(struct dc *dc,
1368 	struct dc_state *context)
1369 {
1370 	struct hubbub *hubbub = dc->res_pool->hubbub;
1371 	bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
1372 	unsigned int compbuf_size = 0;
1373 
1374 	/* Any transition into P-State support should disable MCLK switching first to avoid hangs */
1375 	if (p_state_change_support) {
1376 		dc->optimized_required = true;
1377 		context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
1378 	}
1379 
1380 	if (dc->clk_mgr->dc_mode_softmax_enabled)
1381 		if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
1382 				context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
1383 			dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
1384 
1385 	/* Increase clocks */
1386 	dc->clk_mgr->funcs->update_clocks(
1387 			dc->clk_mgr,
1388 			context,
1389 			false);
1390 
1391 	/* program dchubbub watermarks:
1392 	 * For assigning optimized_required, use |= operator since we don't want
1393 	 * to clear the value if the optimize has not happened yet
1394 	 */
1395 	dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
1396 					&context->bw_ctx.bw.dcn.watermarks,
1397 					dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
1398 					false);
1399 	/* update timeout thresholds */
1400 	if (hubbub->funcs->program_arbiter) {
1401 		dc->optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false);
1402 	}
1403 
1404 	/* decrease compbuf size */
1405 	if (hubbub->funcs->program_compbuf_segments) {
1406 		compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
1407 		dc->optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
1408 
1409 		hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size, false);
1410 	}
1411 
1412 	if (dc->debug.fams2_config.bits.enable) {
1413 		dcn401_dmub_hw_control_lock(dc, context, true);
1414 		dcn401_fams2_update_config(dc, context, false);
1415 		dcn401_dmub_hw_control_lock(dc, context, false);
1416 	}
1417 
1418 	if (p_state_change_support != context->bw_ctx.bw.dcn.clk.p_state_change_support) {
1419 		/* After disabling P-State, restore the original value to ensure we get the correct P-State
1420 		 * on the next optimize. */
1421 		context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
1422 	}
1423 }
1424 
dcn401_optimize_bandwidth(struct dc * dc,struct dc_state * context)1425 void dcn401_optimize_bandwidth(
1426 		struct dc *dc,
1427 		struct dc_state *context)
1428 {
1429 	int i;
1430 	struct hubbub *hubbub = dc->res_pool->hubbub;
1431 
1432 	/* enable fams2 if needed */
1433 	if (dc->debug.fams2_config.bits.enable) {
1434 		dcn401_dmub_hw_control_lock(dc, context, true);
1435 		dcn401_fams2_update_config(dc, context, true);
1436 		dcn401_dmub_hw_control_lock(dc, context, false);
1437 	}
1438 
1439 	/* program dchubbub watermarks */
1440 	hubbub->funcs->program_watermarks(hubbub,
1441 					&context->bw_ctx.bw.dcn.watermarks,
1442 					dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
1443 					true);
1444 	/* update timeout thresholds */
1445 	if (hubbub->funcs->program_arbiter) {
1446 		hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, true);
1447 	}
1448 
1449 	if (dc->clk_mgr->dc_mode_softmax_enabled)
1450 		if (dc->clk_mgr->clks.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
1451 				context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
1452 			dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
1453 
1454 	/* increase compbuf size */
1455 	if (hubbub->funcs->program_compbuf_segments)
1456 		hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true);
1457 
1458 	dc->clk_mgr->funcs->update_clocks(
1459 			dc->clk_mgr,
1460 			context,
1461 			true);
1462 	if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
1463 		for (i = 0; i < dc->res_pool->pipe_count; ++i) {
1464 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1465 
1466 			if (pipe_ctx->stream && pipe_ctx->plane_res.hubp->funcs->program_extended_blank
1467 				&& pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
1468 				&& pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
1469 					pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
1470 						pipe_ctx->dlg_regs.min_dst_y_next_start);
1471 		}
1472 	}
1473 }
1474 
dcn401_dmub_hw_control_lock(struct dc * dc,struct dc_state * context,bool lock)1475 void dcn401_dmub_hw_control_lock(struct dc *dc,
1476 		struct dc_state *context,
1477 		bool lock)
1478 {
1479 	/* use always for now */
1480 	union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
1481 
1482 	if (!dc->ctx || !dc->ctx->dmub_srv)
1483 		return;
1484 
1485 	if (!dc->debug.fams2_config.bits.enable && !dc_dmub_srv_is_cursor_offload_enabled(dc))
1486 		return;
1487 
1488 	hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
1489 	hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER;
1490 	hw_lock_cmd.bits.lock = lock;
1491 	hw_lock_cmd.bits.should_release = !lock;
1492 	dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
1493 }
1494 
dcn401_dmub_hw_control_lock_fast(union block_sequence_params * params)1495 void dcn401_dmub_hw_control_lock_fast(union block_sequence_params *params)
1496 {
1497 	struct dc *dc = params->dmub_hw_control_lock_fast_params.dc;
1498 	bool lock = params->dmub_hw_control_lock_fast_params.lock;
1499 
1500 	if (params->dmub_hw_control_lock_fast_params.is_required) {
1501 		union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
1502 
1503 		hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
1504 		hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER;
1505 		hw_lock_cmd.bits.lock = lock;
1506 		hw_lock_cmd.bits.should_release = !lock;
1507 		dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
1508 	}
1509 }
1510 
dcn401_fams2_update_config(struct dc * dc,struct dc_state * context,bool enable)1511 void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable)
1512 {
1513 	bool fams2_required;
1514 
1515 	if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable)
1516 		return;
1517 
1518 	fams2_required = context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable;
1519 
1520 	dc_dmub_srv_fams2_update_config(dc, context, enable && fams2_required);
1521 }
1522 
update_dsc_for_odm_change(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1523 static void update_dsc_for_odm_change(struct dc *dc, struct dc_state *context,
1524 		struct pipe_ctx *otg_master)
1525 {
1526 	int i;
1527 	struct pipe_ctx *old_pipe;
1528 	struct pipe_ctx *new_pipe;
1529 	struct pipe_ctx *old_opp_heads[MAX_PIPES];
1530 	struct pipe_ctx *old_otg_master;
1531 	int old_opp_head_count = 0;
1532 
1533 	old_otg_master = &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx];
1534 
1535 	if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) {
1536 		old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master,
1537 									   &dc->current_state->res_ctx,
1538 									   old_opp_heads);
1539 	} else {
1540 		// DC cannot assume that the current state and the new state
1541 		// share the same OTG pipe since this is not true when called
1542 		// in the context of a commit stream not checked. Hence, set
1543 		// old_otg_master to NULL to skip the DSC configuration.
1544 		old_otg_master = NULL;
1545 	}
1546 
1547 
1548 	if (otg_master->stream_res.dsc)
1549 		dcn32_update_dsc_on_stream(otg_master,
1550 				otg_master->stream->timing.flags.DSC);
1551 	if (old_otg_master && old_otg_master->stream_res.dsc) {
1552 		for (i = 0; i < old_opp_head_count; i++) {
1553 			old_pipe = old_opp_heads[i];
1554 			new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx];
1555 			if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc)
1556 				old_pipe->stream_res.dsc->funcs->dsc_disconnect(
1557 						old_pipe->stream_res.dsc);
1558 		}
1559 	}
1560 }
1561 
dcn401_update_odm(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1562 void dcn401_update_odm(struct dc *dc, struct dc_state *context,
1563 		struct pipe_ctx *otg_master)
1564 {
1565 	struct pipe_ctx *opp_heads[MAX_PIPES];
1566 	int opp_inst[MAX_PIPES] = {0};
1567 	int opp_head_count;
1568 	int odm_slice_width = resource_get_odm_slice_dst_width(otg_master, false);
1569 	int last_odm_slice_width = resource_get_odm_slice_dst_width(otg_master, true);
1570 	int i;
1571 
1572 	opp_head_count = resource_get_opp_heads_for_otg_master(
1573 			otg_master, &context->res_ctx, opp_heads);
1574 
1575 	for (i = 0; i < opp_head_count; i++)
1576 		opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
1577 	if (opp_head_count > 1)
1578 		otg_master->stream_res.tg->funcs->set_odm_combine(
1579 				otg_master->stream_res.tg,
1580 				opp_inst, opp_head_count,
1581 				odm_slice_width, last_odm_slice_width);
1582 	else
1583 		otg_master->stream_res.tg->funcs->set_odm_bypass(
1584 				otg_master->stream_res.tg,
1585 				&otg_master->stream->timing);
1586 
1587 	for (i = 0; i < opp_head_count; i++) {
1588 		opp_heads[i]->stream_res.opp->funcs->opp_pipe_clock_control(
1589 				opp_heads[i]->stream_res.opp,
1590 				true);
1591 		opp_heads[i]->stream_res.opp->funcs->opp_program_left_edge_extra_pixel(
1592 				opp_heads[i]->stream_res.opp,
1593 				opp_heads[i]->stream->timing.pixel_encoding,
1594 				resource_is_pipe_type(opp_heads[i], OTG_MASTER));
1595 	}
1596 
1597 	update_dsc_for_odm_change(dc, context, otg_master);
1598 
1599 	if (!resource_is_pipe_type(otg_master, DPP_PIPE))
1600 		/*
1601 		 * blank pattern is generated by OPP, reprogram blank pattern
1602 		 * due to OPP count change
1603 		 */
1604 		dc->hwseq->funcs.blank_pixel_data(dc, otg_master, true);
1605 }
1606 
dcn401_add_dsc_sequence_for_odm_change(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master,struct block_sequence_state * seq_state)1607 static void dcn401_add_dsc_sequence_for_odm_change(struct dc *dc, struct dc_state *context,
1608 		struct pipe_ctx *otg_master, struct block_sequence_state *seq_state)
1609 {
1610 	struct pipe_ctx *old_pipe;
1611 	struct pipe_ctx *new_pipe;
1612 	struct pipe_ctx *old_opp_heads[MAX_PIPES];
1613 	struct pipe_ctx *old_otg_master;
1614 	int old_opp_head_count = 0;
1615 	int i;
1616 
1617 	old_otg_master = &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx];
1618 
1619 	if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) {
1620 		old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master,
1621 			&dc->current_state->res_ctx,
1622 			old_opp_heads);
1623 	} else {
1624 		old_otg_master = NULL;
1625 	}
1626 
1627 	/* Process new DSC configuration if DSC is enabled */
1628 	if (otg_master->stream_res.dsc && otg_master->stream->timing.flags.DSC) {
1629 		struct dc_stream_state *stream = otg_master->stream;
1630 		struct pipe_ctx *odm_pipe;
1631 		int opp_cnt = 1;
1632 		int last_dsc_calc = 0;
1633 		bool should_use_dto_dscclk = (dc->res_pool->dccg->funcs->set_dto_dscclk != NULL) &&
1634 				stream->timing.pix_clk_100hz > 480000;
1635 
1636 		/* Count ODM pipes */
1637 		for (odm_pipe = otg_master->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
1638 			opp_cnt++;
1639 
1640 		int num_slices_h = stream->timing.dsc_cfg.num_slices_h / opp_cnt;
1641 
1642 		/* Step 1: Set DTO DSCCLK for main DSC if needed */
1643 		if (should_use_dto_dscclk) {
1644 			hwss_add_dccg_set_dto_dscclk(seq_state, dc->res_pool->dccg,
1645 					otg_master->stream_res.dsc->inst, num_slices_h);
1646 		}
1647 
1648 		/* Step 2: Calculate and set DSC config for main DSC */
1649 		last_dsc_calc = *seq_state->num_steps;
1650 		hwss_add_dsc_calculate_and_set_config(seq_state, otg_master, true, opp_cnt);
1651 
1652 		/* Step 3: Enable main DSC block */
1653 		hwss_add_dsc_enable_with_opp(seq_state, otg_master);
1654 
1655 		/* Step 4: Configure and enable ODM DSC blocks */
1656 		for (odm_pipe = otg_master->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
1657 			if (!odm_pipe->stream_res.dsc)
1658 				continue;
1659 
1660 			/* Set DTO DSCCLK for ODM DSC if needed */
1661 			if (should_use_dto_dscclk) {
1662 				hwss_add_dccg_set_dto_dscclk(seq_state, dc->res_pool->dccg,
1663 						odm_pipe->stream_res.dsc->inst, num_slices_h);
1664 			}
1665 
1666 			/* Calculate and set DSC config for ODM DSC */
1667 			last_dsc_calc = *seq_state->num_steps;
1668 			hwss_add_dsc_calculate_and_set_config(seq_state, odm_pipe, true, opp_cnt);
1669 
1670 			/* Enable ODM DSC block */
1671 			hwss_add_dsc_enable_with_opp(seq_state, odm_pipe);
1672 		}
1673 
1674 		/* Step 5: Configure DSC in timing generator */
1675 		hwss_add_tg_set_dsc_config(seq_state, otg_master->stream_res.tg,
1676 			&seq_state->steps[last_dsc_calc].params.dsc_calculate_and_set_config_params.dsc_optc_cfg, true);
1677 	} else if (otg_master->stream_res.dsc && !otg_master->stream->timing.flags.DSC) {
1678 		/* Disable DSC in OPTC */
1679 		hwss_add_tg_set_dsc_config(seq_state, otg_master->stream_res.tg, NULL, false);
1680 
1681 		hwss_add_dsc_disconnect(seq_state, otg_master->stream_res.dsc);
1682 	}
1683 
1684 	/* Disable DSC for old pipes that no longer need it */
1685 	if (old_otg_master && old_otg_master->stream_res.dsc) {
1686 		for (i = 0; i < old_opp_head_count; i++) {
1687 			old_pipe = old_opp_heads[i];
1688 			new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx];
1689 
1690 			/* If old pipe had DSC but new pipe doesn't, disable the old DSC */
1691 			if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc) {
1692 				/* Then disconnect DSC block */
1693 				hwss_add_dsc_disconnect(seq_state, old_pipe->stream_res.dsc);
1694 			}
1695 		}
1696 	}
1697 }
1698 
dcn401_update_odm_sequence(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master,struct block_sequence_state * seq_state)1699 void dcn401_update_odm_sequence(struct dc *dc, struct dc_state *context,
1700 		struct pipe_ctx *otg_master, struct block_sequence_state *seq_state)
1701 {
1702 	struct pipe_ctx *opp_heads[MAX_PIPES];
1703 	int opp_inst[MAX_PIPES] = {0};
1704 	int opp_head_count;
1705 	int odm_slice_width = resource_get_odm_slice_dst_width(otg_master, false);
1706 	int last_odm_slice_width = resource_get_odm_slice_dst_width(otg_master, true);
1707 	int i;
1708 
1709 	opp_head_count = resource_get_opp_heads_for_otg_master(
1710 			otg_master, &context->res_ctx, opp_heads);
1711 
1712 	for (i = 0; i < opp_head_count; i++)
1713 		opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
1714 
1715 	/* Add ODM combine/bypass operation to sequence */
1716 	if (opp_head_count > 1) {
1717 		hwss_add_optc_set_odm_combine(seq_state, otg_master->stream_res.tg, opp_inst,
1718 			opp_head_count, odm_slice_width, last_odm_slice_width);
1719 	} else {
1720 		hwss_add_optc_set_odm_bypass(seq_state, otg_master->stream_res.tg, &otg_master->stream->timing);
1721 	}
1722 
1723 	/* Add OPP operations to sequence */
1724 	for (i = 0; i < opp_head_count; i++) {
1725 		/* Add OPP pipe clock control operation */
1726 		hwss_add_opp_pipe_clock_control(seq_state, opp_heads[i]->stream_res.opp, true);
1727 
1728 		/* Add OPP program left edge extra pixel operation */
1729 		hwss_add_opp_program_left_edge_extra_pixel(seq_state, opp_heads[i]->stream_res.opp,
1730 			opp_heads[i]->stream->timing.pixel_encoding, resource_is_pipe_type(opp_heads[i], OTG_MASTER));
1731 	}
1732 
1733 	/* Add DSC update operations to sequence */
1734 	dcn401_add_dsc_sequence_for_odm_change(dc, context, otg_master, seq_state);
1735 
1736 	/* Add blank pixel data operation if needed */
1737 	if (!resource_is_pipe_type(otg_master, DPP_PIPE)) {
1738 		if (dc->hwseq->funcs.blank_pixel_data_sequence)
1739 			dc->hwseq->funcs.blank_pixel_data_sequence(
1740 				dc, otg_master, true, seq_state);
1741 	}
1742 }
1743 
dcn401_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)1744 void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx,
1745 		struct dc_link_settings *link_settings)
1746 {
1747 	struct encoder_unblank_param params = {0};
1748 	struct dc_stream_state *stream = pipe_ctx->stream;
1749 	struct dc_link *link = stream->link;
1750 	struct dce_hwseq *hws = link->dc->hwseq;
1751 
1752 	/* calculate parameters for unblank */
1753 	params.opp_cnt = resource_get_odm_slice_count(pipe_ctx);
1754 
1755 	params.timing = pipe_ctx->stream->timing;
1756 	params.link_settings.link_rate = link_settings->link_rate;
1757 	params.pix_per_cycle = pipe_ctx->stream_res.pix_clk_params.dio_se_pix_per_cycle;
1758 
1759 	if (link->dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
1760 		pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
1761 				pipe_ctx->stream_res.hpo_dp_stream_enc,
1762 				pipe_ctx->stream_res.tg->inst);
1763 	} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
1764 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
1765 	}
1766 
1767 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP)
1768 		hws->funcs.edp_backlight_control(link, true);
1769 }
1770 
dcn401_hardware_release(struct dc * dc)1771 void dcn401_hardware_release(struct dc *dc)
1772 {
1773 	if (!dc->debug.disable_force_pstate_allow_on_hw_release) {
1774 		dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
1775 
1776 		/* If pstate unsupported, or still supported
1777 		* by firmware, force it supported by dcn
1778 		*/
1779 		if (dc->current_state) {
1780 			if ((!dc->clk_mgr->clks.p_state_change_support ||
1781 					dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
1782 					dc->res_pool->hubbub->funcs->force_pstate_change_control)
1783 				dc->res_pool->hubbub->funcs->force_pstate_change_control(
1784 						dc->res_pool->hubbub, true, true);
1785 
1786 			dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true;
1787 			dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
1788 		}
1789 	} else {
1790 		if (dc->current_state) {
1791 			dc->clk_mgr->clks.p_state_change_support = false;
1792 			dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
1793 		}
1794 		dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
1795 	}
1796 }
1797 
dcn401_wait_for_det_buffer_update_under_otg_master(struct dc * dc,struct dc_state * context,struct pipe_ctx * otg_master)1798 void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master)
1799 {
1800 	struct pipe_ctx *opp_heads[MAX_PIPES];
1801 	struct pipe_ctx *dpp_pipes[MAX_PIPES];
1802 	struct hubbub *hubbub = dc->res_pool->hubbub;
1803 	int dpp_count = 0;
1804 
1805 	if (!otg_master->stream)
1806 		return;
1807 
1808 	int slice_count = resource_get_opp_heads_for_otg_master(otg_master,
1809 			&context->res_ctx, opp_heads);
1810 
1811 	for (int slice_idx = 0; slice_idx < slice_count; slice_idx++) {
1812 		if (opp_heads[slice_idx]->plane_state) {
1813 			dpp_count = resource_get_dpp_pipes_for_opp_head(
1814 					opp_heads[slice_idx],
1815 					&context->res_ctx,
1816 					dpp_pipes);
1817 			for (int dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) {
1818 				struct pipe_ctx *dpp_pipe = dpp_pipes[dpp_idx];
1819 					if (dpp_pipe && hubbub &&
1820 						dpp_pipe->plane_res.hubp &&
1821 						hubbub->funcs->wait_for_det_update)
1822 						hubbub->funcs->wait_for_det_update(hubbub, dpp_pipe->plane_res.hubp->inst);
1823 			}
1824 		} else {
1825 			if (hubbub && opp_heads[slice_idx]->plane_res.hubp && hubbub->funcs->wait_for_det_update)
1826 				hubbub->funcs->wait_for_det_update(hubbub, opp_heads[slice_idx]->plane_res.hubp->inst);
1827 		}
1828 	}
1829 }
1830 
dcn401_interdependent_update_lock(struct dc * dc,struct dc_state * context,bool lock)1831 void dcn401_interdependent_update_lock(struct dc *dc,
1832 		struct dc_state *context, bool lock)
1833 {
1834 	unsigned int i = 0;
1835 	struct pipe_ctx *pipe = NULL;
1836 	struct timing_generator *tg = NULL;
1837 
1838 	if (lock) {
1839 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1840 			pipe = &context->res_ctx.pipe_ctx[i];
1841 			tg = pipe->stream_res.tg;
1842 
1843 			if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
1844 					!tg->funcs->is_tg_enabled(tg) ||
1845 					dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
1846 				continue;
1847 			dc->hwss.pipe_control_lock(dc, pipe, true);
1848 		}
1849 	} else {
1850 		/* Need to free DET being used first and have pipe update, then unlock the remaining pipes*/
1851 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1852 			pipe = &context->res_ctx.pipe_ctx[i];
1853 			tg = pipe->stream_res.tg;
1854 
1855 			if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
1856 					!tg->funcs->is_tg_enabled(tg) ||
1857 					dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
1858 				continue;
1859 			}
1860 
1861 			if (dc->scratch.pipes_to_unlock_first[i]) {
1862 				struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1863 				dc->hwss.pipe_control_lock(dc, pipe, false);
1864 				/* Assumes pipe of the same index in current_state is also an OTG_MASTER pipe*/
1865 				dcn401_wait_for_det_buffer_update_under_otg_master(dc, dc->current_state, old_pipe);
1866 			}
1867 		}
1868 
1869 		/* Unlocking the rest of the pipes */
1870 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1871 			if (dc->scratch.pipes_to_unlock_first[i])
1872 				continue;
1873 
1874 			pipe = &context->res_ctx.pipe_ctx[i];
1875 			tg = pipe->stream_res.tg;
1876 			if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
1877 					!tg->funcs->is_tg_enabled(tg) ||
1878 					dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
1879 				continue;
1880 			}
1881 
1882 			dc->hwss.pipe_control_lock(dc, pipe, false);
1883 		}
1884 	}
1885 }
1886 
dcn401_perform_3dlut_wa_unlock(struct pipe_ctx * pipe_ctx)1887 void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx)
1888 {
1889 	/* If 3DLUT FL is enabled and 3DLUT is in use, follow the workaround sequence for pipe unlock to make sure that
1890 	 * HUBP will properly fetch 3DLUT contents after unlock.
1891 	 *
1892 	 * This is meant to work around a known HW issue where VREADY will cancel the pending 3DLUT_ENABLE signal regardless
1893 	 * of whether OTG lock is currently being held or not.
1894 	 */
1895 	struct pipe_ctx *wa_pipes[MAX_PIPES] = { NULL };
1896 	struct pipe_ctx *odm_pipe, *mpc_pipe;
1897 	int i, wa_pipe_ct = 0;
1898 
1899 	for (odm_pipe = pipe_ctx; odm_pipe != NULL; odm_pipe = odm_pipe->next_odm_pipe) {
1900 		for (mpc_pipe = odm_pipe; mpc_pipe != NULL; mpc_pipe = mpc_pipe->bottom_pipe) {
1901 			if (mpc_pipe->plane_state && mpc_pipe->plane_state->mcm_luts.lut3d_data.lut3d_src
1902 						== DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM
1903 					&& mpc_pipe->plane_state->mcm_shaper_3dlut_setting
1904 						== DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT) {
1905 				wa_pipes[wa_pipe_ct++] = mpc_pipe;
1906 			}
1907 		}
1908 	}
1909 
1910 	if (wa_pipe_ct > 0) {
1911 		if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
1912 			pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, true);
1913 
1914 		for (i = 0; i < wa_pipe_ct; ++i) {
1915 			if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
1916 				wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
1917 		}
1918 
1919 		pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
1920 		if (pipe_ctx->stream_res.tg->funcs->wait_update_lock_status)
1921 			pipe_ctx->stream_res.tg->funcs->wait_update_lock_status(pipe_ctx->stream_res.tg, false);
1922 
1923 		for (i = 0; i < wa_pipe_ct; ++i) {
1924 			if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl)
1925 				wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true);
1926 		}
1927 
1928 		if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout)
1929 			pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, false);
1930 	} else {
1931 		pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
1932 	}
1933 }
1934 
dcn401_program_outstanding_updates(struct dc * dc,struct dc_state * context)1935 void dcn401_program_outstanding_updates(struct dc *dc,
1936 		struct dc_state *context)
1937 {
1938 	struct hubbub *hubbub = dc->res_pool->hubbub;
1939 
1940 	/* update compbuf if required */
1941 	if (hubbub->funcs->program_compbuf_segments)
1942 		hubbub->funcs->program_compbuf_segments(hubbub, context->bw_ctx.bw.dcn.arb_regs.compbuf_size, true);
1943 }
1944 
dcn401_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)1945 void dcn401_reset_back_end_for_pipe(
1946 		struct dc *dc,
1947 		struct pipe_ctx *pipe_ctx,
1948 		struct dc_state *context)
1949 {
1950 	struct dc_link *link = pipe_ctx->stream->link;
1951 	const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
1952 
1953 	DC_LOGGER_INIT(dc->ctx->logger);
1954 	if (pipe_ctx->stream_res.stream_enc == NULL) {
1955 		pipe_ctx->stream = NULL;
1956 		return;
1957 	}
1958 
1959 	/* DPMS may already disable or */
1960 	/* dpms_off status is incorrect due to fastboot
1961 	 * feature. When system resume from S4 with second
1962 	 * screen only, the dpms_off would be true but
1963 	 * VBIOS lit up eDP, so check link status too.
1964 	 */
1965 	if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1966 		dc->link_srv->set_dpms_off(pipe_ctx);
1967 	else if (pipe_ctx->stream_res.audio)
1968 		dc->hwss.disable_audio_stream(pipe_ctx);
1969 
1970 	/* free acquired resources */
1971 	if (pipe_ctx->stream_res.audio) {
1972 		/*disable az_endpoint*/
1973 		pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1974 
1975 		/*free audio*/
1976 		if (dc->caps.dynamic_audio == true) {
1977 			/*we have to dynamic arbitrate the audio endpoints*/
1978 			/*we free the resource, need reset is_audio_acquired*/
1979 			update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1980 					pipe_ctx->stream_res.audio, false);
1981 			pipe_ctx->stream_res.audio = NULL;
1982 		}
1983 	}
1984 
1985 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
1986 	 * back end share by all pipes and will be disable only when disable
1987 	 * parent pipe.
1988 	 */
1989 	if (pipe_ctx->top_pipe == NULL) {
1990 
1991 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
1992 
1993 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1994 
1995 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1996 		if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
1997 			pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
1998 					pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
1999 
2000 		set_drr_and_clear_adjust_pending(pipe_ctx, pipe_ctx->stream, NULL);
2001 
2002 		/* TODO - convert symclk_ref_cnts for otg to a bit map to solve
2003 		 * the case where the same symclk is shared across multiple otg
2004 		 * instances
2005 		 */
2006 		if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
2007 			link->phy_state.symclk_ref_cnts.otg = 0;
2008 		if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
2009 			link_hwss->disable_link_output(link,
2010 					&pipe_ctx->link_res, pipe_ctx->stream->signal);
2011 			link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
2012 		}
2013 
2014 		/* reset DTBCLK_P */
2015 		if (dc->res_pool->dccg->funcs->set_dtbclk_p_src)
2016 			dc->res_pool->dccg->funcs->set_dtbclk_p_src(dc->res_pool->dccg, REFCLK, pipe_ctx->stream_res.tg->inst);
2017 	}
2018 
2019 /*
2020  * In case of a dangling plane, setting this to NULL unconditionally
2021  * causes failures during reset hw ctx where, if stream is NULL,
2022  * it is expected that the pipe_ctx pointers to pipes and plane are NULL.
2023  */
2024 	pipe_ctx->stream = NULL;
2025 	pipe_ctx->top_pipe = NULL;
2026 	pipe_ctx->bottom_pipe = NULL;
2027 	pipe_ctx->next_odm_pipe = NULL;
2028 	pipe_ctx->prev_odm_pipe = NULL;
2029 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
2030 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
2031 }
2032 
dcn401_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)2033 void dcn401_reset_hw_ctx_wrap(
2034 		struct dc *dc,
2035 		struct dc_state *context)
2036 {
2037 	int i;
2038 	struct dce_hwseq *hws = dc->hwseq;
2039 
2040 	/* Reset Back End*/
2041 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
2042 		struct pipe_ctx *pipe_ctx_old =
2043 			&dc->current_state->res_ctx.pipe_ctx[i];
2044 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2045 
2046 		if (!pipe_ctx_old->stream)
2047 			continue;
2048 
2049 		if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
2050 			continue;
2051 
2052 		if (!pipe_ctx->stream ||
2053 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
2054 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
2055 
2056 			if (hws->funcs.reset_back_end_for_pipe)
2057 				hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
2058 			if (hws->funcs.enable_stream_gating)
2059 				hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
2060 			if (old_clk)
2061 				old_clk->funcs->cs_power_down(old_clk);
2062 		}
2063 	}
2064 }
2065 
dcn401_calculate_vready_offset_for_group(struct pipe_ctx * pipe)2066 static unsigned int dcn401_calculate_vready_offset_for_group(struct pipe_ctx *pipe)
2067 {
2068 	struct pipe_ctx *other_pipe;
2069 	unsigned int vready_offset = pipe->global_sync.dcn4x.vready_offset_pixels;
2070 
2071 	/* Always use the largest vready_offset of all connected pipes */
2072 	for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
2073 		if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
2074 			vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
2075 	}
2076 	for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
2077 		if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
2078 			vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
2079 	}
2080 	for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
2081 		if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
2082 			vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
2083 	}
2084 	for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
2085 		if (other_pipe->global_sync.dcn4x.vready_offset_pixels > vready_offset)
2086 			vready_offset = other_pipe->global_sync.dcn4x.vready_offset_pixels;
2087 	}
2088 
2089 	return vready_offset;
2090 }
2091 
dcn401_program_tg(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dce_hwseq * hws)2092 static void dcn401_program_tg(
2093 	struct dc *dc,
2094 	struct pipe_ctx *pipe_ctx,
2095 	struct dc_state *context,
2096 	struct dce_hwseq *hws)
2097 {
2098 	pipe_ctx->stream_res.tg->funcs->program_global_sync(
2099 		pipe_ctx->stream_res.tg,
2100 		dcn401_calculate_vready_offset_for_group(pipe_ctx),
2101 		(unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
2102 		(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
2103 		(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
2104 		(unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
2105 
2106 	if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
2107 		pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2108 
2109 	pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2110 		pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2111 
2112 	if (hws->funcs.setup_vupdate_interrupt)
2113 		hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2114 }
2115 
dcn401_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2116 void dcn401_program_pipe(
2117 	struct dc *dc,
2118 	struct pipe_ctx *pipe_ctx,
2119 	struct dc_state *context)
2120 {
2121 	struct dce_hwseq *hws = dc->hwseq;
2122 
2123 	/* Only need to unblank on top pipe */
2124 	if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
2125 		if (pipe_ctx->update_flags.bits.enable ||
2126 			pipe_ctx->update_flags.bits.odm ||
2127 			pipe_ctx->stream->update_flags.bits.abm_level)
2128 			hws->funcs.blank_pixel_data(dc, pipe_ctx,
2129 				!pipe_ctx->plane_state ||
2130 				!pipe_ctx->plane_state->visible);
2131 	}
2132 
2133 	/* Only update TG on top pipe */
2134 	if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
2135 		&& !pipe_ctx->prev_odm_pipe)
2136 		dcn401_program_tg(dc, pipe_ctx, context, hws);
2137 
2138 	if (pipe_ctx->update_flags.bits.odm)
2139 		hws->funcs.update_odm(dc, context, pipe_ctx);
2140 
2141 	if (pipe_ctx->update_flags.bits.enable) {
2142 		if (hws->funcs.enable_plane)
2143 			hws->funcs.enable_plane(dc, pipe_ctx, context);
2144 		else
2145 			dc->hwss.enable_plane(dc, pipe_ctx, context);
2146 
2147 		if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes)
2148 			dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
2149 	}
2150 
2151 	if (pipe_ctx->update_flags.bits.det_size) {
2152 		if (dc->res_pool->hubbub->funcs->program_det_size)
2153 			dc->res_pool->hubbub->funcs->program_det_size(
2154 				dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
2155 		if (dc->res_pool->hubbub->funcs->program_det_segments)
2156 			dc->res_pool->hubbub->funcs->program_det_segments(
2157 				dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
2158 	}
2159 
2160 	if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw ||
2161 	    pipe_ctx->plane_state->update_flags.raw ||
2162 	    pipe_ctx->stream->update_flags.raw))
2163 		dc->hwss.update_dchubp_dpp(dc, pipe_ctx, context);
2164 
2165 	if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
2166 		pipe_ctx->plane_state->update_flags.bits.hdr_mult))
2167 		hws->funcs.set_hdr_multiplier(pipe_ctx);
2168 
2169 	if (pipe_ctx->plane_state &&
2170 		(pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2171 			pipe_ctx->plane_state->update_flags.bits.gamma_change ||
2172 			pipe_ctx->plane_state->update_flags.bits.lut_3d ||
2173 			pipe_ctx->update_flags.bits.enable))
2174 		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2175 
2176 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2177 	 * only do gamma programming for powering on, internal memcmp to avoid
2178 	 * updating on slave planes
2179 	 */
2180 	if (pipe_ctx->update_flags.bits.enable ||
2181 	    pipe_ctx->update_flags.bits.plane_changed ||
2182 	    pipe_ctx->stream->update_flags.bits.out_tf)
2183 		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2184 
2185 	/* If the pipe has been enabled or has a different opp, we
2186 	 * should reprogram the fmt. This deals with cases where
2187 	 * interation between mpc and odm combine on different streams
2188 	 * causes a different pipe to be chosen to odm combine with.
2189 	 */
2190 	if (pipe_ctx->update_flags.bits.enable
2191 		|| pipe_ctx->update_flags.bits.opp_changed) {
2192 
2193 		pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
2194 			pipe_ctx->stream_res.opp,
2195 			COLOR_SPACE_YCBCR601,
2196 			pipe_ctx->stream->timing.display_color_depth,
2197 			pipe_ctx->stream->signal);
2198 
2199 		pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
2200 			pipe_ctx->stream_res.opp,
2201 			&pipe_ctx->stream->bit_depth_params,
2202 			&pipe_ctx->stream->clamping);
2203 	}
2204 
2205 	/* Set ABM pipe after other pipe configurations done */
2206 	if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) {
2207 		if (pipe_ctx->stream_res.abm) {
2208 			dc->hwss.set_pipe(pipe_ctx);
2209 			pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm,
2210 				pipe_ctx->stream->abm_level);
2211 		}
2212 	}
2213 
2214 	if (pipe_ctx->update_flags.bits.test_pattern_changed) {
2215 		struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp;
2216 		struct bit_depth_reduction_params params;
2217 
2218 		memset(&params, 0, sizeof(params));
2219 		odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
2220 		dc->hwss.set_disp_pattern_generator(dc,
2221 			pipe_ctx,
2222 			pipe_ctx->stream_res.test_pattern_params.test_pattern,
2223 			pipe_ctx->stream_res.test_pattern_params.color_space,
2224 			pipe_ctx->stream_res.test_pattern_params.color_depth,
2225 			NULL,
2226 			pipe_ctx->stream_res.test_pattern_params.width,
2227 			pipe_ctx->stream_res.test_pattern_params.height,
2228 			pipe_ctx->stream_res.test_pattern_params.offset);
2229 	}
2230 }
2231 
2232 /*
2233  * dcn401_program_pipe_sequence - Sequence-based version of dcn401_program_pipe
2234  *
2235  * This function creates a sequence-based version of the original dcn401_program_pipe
2236  * function. Instead of directly calling hardware programming functions, it appends
2237  * sequence steps to the provided block_sequence array that can later be executed
2238  * as part of hwss_execute_sequence.
2239  *
2240  */
dcn401_program_pipe_sequence(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context,struct block_sequence_state * seq_state)2241 void dcn401_program_pipe_sequence(
2242 	struct dc *dc,
2243 	struct pipe_ctx *pipe_ctx,
2244 	struct dc_state *context,
2245 	struct block_sequence_state *seq_state)
2246 {
2247 	struct dce_hwseq *hws = dc->hwseq;
2248 
2249 	/* Only need to unblank on top pipe */
2250 	if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
2251 		if (pipe_ctx->update_flags.bits.enable ||
2252 				pipe_ctx->update_flags.bits.odm ||
2253 				pipe_ctx->stream->update_flags.bits.abm_level) {
2254 			if (dc->hwseq->funcs.blank_pixel_data_sequence)
2255 				dc->hwseq->funcs.blank_pixel_data_sequence(dc, pipe_ctx,
2256 					 !pipe_ctx->plane_state || !pipe_ctx->plane_state->visible,
2257 					 seq_state);
2258 		}
2259 	}
2260 
2261 	/* Only update TG on top pipe */
2262 	if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
2263 		&& !pipe_ctx->prev_odm_pipe) {
2264 
2265 		/* Step 1: Program global sync */
2266 		hwss_add_tg_program_global_sync(seq_state, pipe_ctx->stream_res.tg,
2267 			dcn401_calculate_vready_offset_for_group(pipe_ctx),
2268 			(unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
2269 			(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
2270 			(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
2271 			(unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
2272 
2273 		/* Step 2: Wait for VACTIVE state (if not phantom pipe) */
2274 		if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
2275 			hwss_add_tg_wait_for_state(seq_state, pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2276 
2277 		/* Step 3: Set VTG params */
2278 		hwss_add_tg_set_vtg_params(seq_state, pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2279 
2280 		/* Step 4: Setup vupdate interrupt (if available) */
2281 		if (hws->funcs.setup_vupdate_interrupt)
2282 			dcn401_setup_vupdate_interrupt_sequence(dc, pipe_ctx, seq_state);
2283 	}
2284 
2285 	if (pipe_ctx->update_flags.bits.odm) {
2286 		if (hws->funcs.update_odm_sequence)
2287 			hws->funcs.update_odm_sequence(dc, context, pipe_ctx, seq_state);
2288 	}
2289 
2290 	if (pipe_ctx->update_flags.bits.enable) {
2291 		if (dc->hwss.enable_plane_sequence)
2292 			dc->hwss.enable_plane_sequence(dc, pipe_ctx, context, seq_state);
2293 	}
2294 
2295 	if (pipe_ctx->update_flags.bits.det_size) {
2296 		if (dc->res_pool->hubbub->funcs->program_det_size) {
2297 			hwss_add_hubp_program_det_size(seq_state, dc->res_pool->hubbub,
2298 				pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
2299 		}
2300 
2301 		if (dc->res_pool->hubbub->funcs->program_det_segments) {
2302 			hwss_add_hubp_program_det_segments(seq_state, dc->res_pool->hubbub,
2303 				pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
2304 		}
2305 	}
2306 
2307 	if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw ||
2308 	    pipe_ctx->plane_state->update_flags.raw ||
2309 	    pipe_ctx->stream->update_flags.raw)) {
2310 
2311 		if (dc->hwss.update_dchubp_dpp_sequence)
2312 			dc->hwss.update_dchubp_dpp_sequence(dc, pipe_ctx, context, seq_state);
2313 	}
2314 
2315 	if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
2316 		pipe_ctx->plane_state->update_flags.bits.hdr_mult)) {
2317 
2318 		hws->funcs.set_hdr_multiplier_sequence(pipe_ctx, seq_state);
2319 	}
2320 
2321 	if (pipe_ctx->plane_state &&
2322 		(pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2323 			pipe_ctx->plane_state->update_flags.bits.gamma_change ||
2324 			pipe_ctx->plane_state->update_flags.bits.lut_3d ||
2325 			pipe_ctx->update_flags.bits.enable)) {
2326 
2327 		hwss_add_dpp_set_input_transfer_func(seq_state, dc, pipe_ctx, pipe_ctx->plane_state);
2328 	}
2329 
2330 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2331 	 * only do gamma programming for powering on, internal memcmp to avoid
2332 	 * updating on slave planes
2333 	 */
2334 	if (pipe_ctx->update_flags.bits.enable ||
2335 			pipe_ctx->update_flags.bits.plane_changed ||
2336 			pipe_ctx->stream->update_flags.bits.out_tf) {
2337 		hwss_add_dpp_set_output_transfer_func(seq_state, dc, pipe_ctx, pipe_ctx->stream);
2338 	}
2339 
2340 	/* If the pipe has been enabled or has a different opp, we
2341 	 * should reprogram the fmt. This deals with cases where
2342 	 * interation between mpc and odm combine on different streams
2343 	 * causes a different pipe to be chosen to odm combine with.
2344 	 */
2345 	if (pipe_ctx->update_flags.bits.enable
2346 		|| pipe_ctx->update_flags.bits.opp_changed) {
2347 
2348 		hwss_add_opp_set_dyn_expansion(seq_state, pipe_ctx->stream_res.opp, COLOR_SPACE_YCBCR601,
2349 			pipe_ctx->stream->timing.display_color_depth, pipe_ctx->stream->signal);
2350 
2351 		hwss_add_opp_program_fmt(seq_state, pipe_ctx->stream_res.opp,
2352 			&pipe_ctx->stream->bit_depth_params, &pipe_ctx->stream->clamping);
2353 	}
2354 
2355 	/* Set ABM pipe after other pipe configurations done */
2356 	if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) {
2357 		if (pipe_ctx->stream_res.abm) {
2358 			hwss_add_abm_set_pipe(seq_state, dc, pipe_ctx);
2359 
2360 			hwss_add_abm_set_level(seq_state, pipe_ctx->stream_res.abm, pipe_ctx->stream->abm_level);
2361 		}
2362 	}
2363 
2364 	if (pipe_ctx->update_flags.bits.test_pattern_changed) {
2365 		struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp;
2366 
2367 		hwss_add_opp_program_bit_depth_reduction(seq_state, odm_opp, true, pipe_ctx);
2368 
2369 		hwss_add_opp_set_disp_pattern_generator(seq_state,
2370 			odm_opp,
2371 			pipe_ctx->stream_res.test_pattern_params.test_pattern,
2372 			pipe_ctx->stream_res.test_pattern_params.color_space,
2373 			pipe_ctx->stream_res.test_pattern_params.color_depth,
2374 			(struct tg_color){0},
2375 			false,
2376 			pipe_ctx->stream_res.test_pattern_params.width,
2377 			pipe_ctx->stream_res.test_pattern_params.height,
2378 			pipe_ctx->stream_res.test_pattern_params.offset);
2379 	}
2380 
2381 }
2382 
dcn401_program_front_end_for_ctx(struct dc * dc,struct dc_state * context)2383 void dcn401_program_front_end_for_ctx(
2384 	struct dc *dc,
2385 	struct dc_state *context)
2386 {
2387 	int i;
2388 	unsigned int prev_hubp_count = 0;
2389 	unsigned int hubp_count = 0;
2390 	struct dce_hwseq *hws = dc->hwseq;
2391 	struct pipe_ctx *pipe = NULL;
2392 
2393 	DC_LOGGER_INIT(dc->ctx->logger);
2394 
2395 	if (resource_is_pipe_topology_changed(dc->current_state, context))
2396 		resource_log_pipe_topology_update(dc, context);
2397 
2398 	if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
2399 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2400 			pipe = &context->res_ctx.pipe_ctx[i];
2401 
2402 			if (pipe->plane_state) {
2403 				if (pipe->plane_state->triplebuffer_flips)
2404 					BREAK_TO_DEBUGGER();
2405 
2406 				/*turn off triple buffer for full update*/
2407 				dc->hwss.program_triplebuffer(
2408 					dc, pipe, pipe->plane_state->triplebuffer_flips);
2409 			}
2410 		}
2411 	}
2412 
2413 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2414 		if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
2415 			prev_hubp_count++;
2416 		if (context->res_ctx.pipe_ctx[i].plane_state)
2417 			hubp_count++;
2418 	}
2419 
2420 	if (prev_hubp_count == 0 && hubp_count > 0) {
2421 		if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
2422 			dc->res_pool->hubbub->funcs->force_pstate_change_control(
2423 				dc->res_pool->hubbub, true, false);
2424 		udelay(500);
2425 	}
2426 
2427 	/* Set pipe update flags and lock pipes */
2428 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2429 		dc->hwss.detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i],
2430 			&context->res_ctx.pipe_ctx[i]);
2431 
2432 	/* When disabling phantom pipes, turn on phantom OTG first (so we can get double
2433 	 * buffer updates properly)
2434 	 */
2435 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2436 		struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
2437 
2438 		pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2439 
2440 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
2441 			dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
2442 			struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
2443 
2444 			if (tg->funcs->enable_crtc) {
2445 				if (dc->hwseq->funcs.blank_pixel_data)
2446 					dc->hwseq->funcs.blank_pixel_data(dc, pipe, true);
2447 
2448 				tg->funcs->enable_crtc(tg);
2449 			}
2450 		}
2451 	}
2452 	/* OTG blank before disabling all front ends */
2453 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2454 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
2455 			&& !context->res_ctx.pipe_ctx[i].top_pipe
2456 			&& !context->res_ctx.pipe_ctx[i].prev_odm_pipe
2457 			&& context->res_ctx.pipe_ctx[i].stream)
2458 			hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
2459 
2460 	/* Disconnect mpcc */
2461 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2462 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
2463 			|| context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) {
2464 			struct hubbub *hubbub = dc->res_pool->hubbub;
2465 
2466 			/* Phantom pipe DET should be 0, but if a pipe in use is being transitioned to phantom
2467 			 * then we want to do the programming here (effectively it's being disabled). If we do
2468 			 * the programming later the DET won't be updated until the OTG for the phantom pipe is
2469 			 * turned on (i.e. in an MCLK switch) which can come in too late and cause issues with
2470 			 * DET allocation.
2471 			 */
2472 			if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
2473 				(context->res_ctx.pipe_ctx[i].plane_state &&
2474 				dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) ==
2475 				SUBVP_PHANTOM))) {
2476 				if (hubbub->funcs->program_det_size)
2477 					hubbub->funcs->program_det_size(hubbub,
2478 						dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
2479 				if (dc->res_pool->hubbub->funcs->program_det_segments)
2480 					dc->res_pool->hubbub->funcs->program_det_segments(
2481 						hubbub,	dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
2482 			}
2483 			hws->funcs.plane_atomic_disconnect(dc, dc->current_state,
2484 				&dc->current_state->res_ctx.pipe_ctx[i]);
2485 			DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
2486 		}
2487 
2488 	/* update ODM for blanked OTG master pipes */
2489 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2490 		pipe = &context->res_ctx.pipe_ctx[i];
2491 		if (resource_is_pipe_type(pipe, OTG_MASTER) &&
2492 			!resource_is_pipe_type(pipe, DPP_PIPE) &&
2493 			pipe->update_flags.bits.odm &&
2494 			hws->funcs.update_odm)
2495 			hws->funcs.update_odm(dc, context, pipe);
2496 	}
2497 
2498 	/*
2499 	 * Program all updated pipes, order matters for mpcc setup. Start with
2500 	 * top pipe and program all pipes that follow in order
2501 	 */
2502 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2503 		pipe = &context->res_ctx.pipe_ctx[i];
2504 
2505 		if (pipe->plane_state && !pipe->top_pipe) {
2506 			while (pipe) {
2507 				if (hws->funcs.program_pipe)
2508 					hws->funcs.program_pipe(dc, pipe, context);
2509 				else {
2510 					/* Don't program phantom pipes in the regular front end programming sequence.
2511 					 * There is an MPO transition case where a pipe being used by a video plane is
2512 					 * transitioned directly to be a phantom pipe when closing the MPO video.
2513 					 * However the phantom pipe will program a new HUBP_VTG_SEL (update takes place
2514 					 * right away) but the MPO still exists until the double buffered update of the
2515 					 * main pipe so we will get a frame of underflow if the phantom pipe is
2516 					 * programmed here.
2517 					 */
2518 					if (pipe->stream &&
2519 						dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM)
2520 						dcn401_program_pipe(dc, pipe, context);
2521 				}
2522 
2523 				pipe = pipe->bottom_pipe;
2524 			}
2525 		}
2526 
2527 		/* Program secondary blending tree and writeback pipes */
2528 		pipe = &context->res_ctx.pipe_ctx[i];
2529 		if (!pipe->top_pipe && !pipe->prev_odm_pipe
2530 			&& pipe->stream && pipe->stream->num_wb_info > 0
2531 			&& (pipe->update_flags.raw || (pipe->plane_state && pipe->plane_state->update_flags.raw)
2532 				|| pipe->stream->update_flags.raw)
2533 			&& hws->funcs.program_all_writeback_pipes_in_tree)
2534 			hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
2535 
2536 		/* Avoid underflow by check of pipe line read when adding 2nd plane. */
2537 		if (hws->wa.wait_hubpret_read_start_during_mpo_transition &&
2538 				!pipe->top_pipe &&
2539 				pipe->stream &&
2540 				pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start &&
2541 				dc->current_state->stream_status[0].plane_count == 1 &&
2542 				context->stream_status[0].plane_count > 1) {
2543 			pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
2544 		}
2545 	}
2546 }
2547 
dcn401_post_unlock_program_front_end(struct dc * dc,struct dc_state * context)2548 void dcn401_post_unlock_program_front_end(
2549 	struct dc *dc,
2550 	struct dc_state *context)
2551 {
2552 	// Timeout for pipe enable
2553 	unsigned int timeout_us = 100000;
2554 	unsigned int polling_interval_us = 1;
2555 	struct dce_hwseq *hwseq = dc->hwseq;
2556 	int i;
2557 
2558 	DC_LOGGER_INIT(dc->ctx->logger);
2559 
2560 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2561 		if (resource_is_pipe_type(&dc->current_state->res_ctx.pipe_ctx[i], OPP_HEAD) &&
2562 			!resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD))
2563 			dc->hwss.post_unlock_reset_opp(dc,
2564 				&dc->current_state->res_ctx.pipe_ctx[i]);
2565 
2566 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2567 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
2568 			dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
2569 
2570 	/*
2571 	 * If we are enabling a pipe, we need to wait for pending clear as this is a critical
2572 	 * part of the enable operation otherwise, DM may request an immediate flip which
2573 	 * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which
2574 	 * is unsupported on DCN.
2575 	 */
2576 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2577 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2578 		// Don't check flip pending on phantom pipes
2579 		if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable &&
2580 			dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
2581 			struct hubp *hubp = pipe->plane_res.hubp;
2582 			int j = 0;
2583 
2584 			for (j = 0; j < timeout_us / polling_interval_us
2585 				&& hubp->funcs->hubp_is_flip_pending(hubp); j++)
2586 				udelay(polling_interval_us);
2587 		}
2588 	}
2589 
2590 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2591 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2592 		struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2593 
2594 		/* When going from a smaller ODM slice count to larger, we must ensure double
2595 		 * buffer update completes before we return to ensure we don't reduce DISPCLK
2596 		 * before we've transitioned to 2:1 or 4:1
2597 		 */
2598 		if (resource_is_pipe_type(old_pipe, OTG_MASTER) && resource_is_pipe_type(pipe, OTG_MASTER) &&
2599 			resource_get_odm_slice_count(old_pipe) < resource_get_odm_slice_count(pipe) &&
2600 			dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
2601 			int j = 0;
2602 			struct timing_generator *tg = pipe->stream_res.tg;
2603 
2604 			if (tg->funcs->get_optc_double_buffer_pending) {
2605 				for (j = 0; j < timeout_us / polling_interval_us
2606 					&& tg->funcs->get_optc_double_buffer_pending(tg); j++)
2607 					udelay(polling_interval_us);
2608 			}
2609 		}
2610 	}
2611 
2612 	if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
2613 		dc->res_pool->hubbub->funcs->force_pstate_change_control(
2614 			dc->res_pool->hubbub, false, false);
2615 
2616 
2617 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2618 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2619 
2620 		if (pipe->plane_state && !pipe->top_pipe) {
2621 			/* Program phantom pipe here to prevent a frame of underflow in the MPO transition
2622 			 * case (if a pipe being used for a video plane transitions to a phantom pipe, it
2623 			 * can underflow due to HUBP_VTG_SEL programming if done in the regular front end
2624 			 * programming sequence).
2625 			 */
2626 			while (pipe) {
2627 				if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
2628 					/* When turning on the phantom pipe we want to run through the
2629 					 * entire enable sequence, so apply all the "enable" flags.
2630 					 */
2631 					if (dc->hwss.apply_update_flags_for_phantom)
2632 						dc->hwss.apply_update_flags_for_phantom(pipe);
2633 					if (dc->hwss.update_phantom_vp_position)
2634 						dc->hwss.update_phantom_vp_position(dc, context, pipe);
2635 					dcn401_program_pipe(dc, pipe, context);
2636 				}
2637 				pipe = pipe->bottom_pipe;
2638 			}
2639 		}
2640 	}
2641 
2642 	if (!hwseq)
2643 		return;
2644 
2645 	/* P-State support transitions:
2646 	 * Natural -> FPO:      P-State disabled in prepare, force disallow anytime is safe
2647 	 * FPO -> Natural:      Unforce anytime after FW disable is safe (P-State will assert naturally)
2648 	 * Unsupported -> FPO:  P-State enabled in optimize, force disallow anytime is safe
2649 	 * FPO -> Unsupported:  P-State disabled in prepare, unforce disallow anytime is safe
2650 	 * FPO <-> SubVP:       Force disallow is maintained on the FPO / SubVP pipes
2651 	 */
2652 	if (hwseq->funcs.update_force_pstate)
2653 		dc->hwseq->funcs.update_force_pstate(dc, context);
2654 	/* Only program the MALL registers after all the main and phantom pipes
2655 	 * are done programming.
2656 	 */
2657 	if (hwseq->funcs.program_mall_pipe_config)
2658 		hwseq->funcs.program_mall_pipe_config(dc, context);
2659 
2660 	/* WA to apply WM setting*/
2661 	if (hwseq->wa.DEGVIDCN21)
2662 		dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
2663 
2664 
2665 	/* WA for stutter underflow during MPO transitions when adding 2nd plane */
2666 	if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
2667 
2668 		if (dc->current_state->stream_status[0].plane_count == 1 &&
2669 			context->stream_status[0].plane_count > 1) {
2670 
2671 			struct timing_generator *tg = dc->res_pool->timing_generators[0];
2672 
2673 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false);
2674 
2675 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true;
2676 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame =
2677 				tg->funcs->get_frame_count(tg);
2678 		}
2679 	}
2680 }
2681 
dcn401_update_bandwidth(struct dc * dc,struct dc_state * context)2682 bool dcn401_update_bandwidth(
2683 	struct dc *dc,
2684 	struct dc_state *context)
2685 {
2686 	int i;
2687 	struct dce_hwseq *hws = dc->hwseq;
2688 
2689 	/* recalculate DML parameters */
2690 	if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK)
2691 		return false;
2692 
2693 	/* apply updated bandwidth parameters */
2694 	dc->hwss.prepare_bandwidth(dc, context);
2695 
2696 	/* update hubp configs for all pipes */
2697 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2698 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2699 
2700 		if (pipe_ctx->plane_state == NULL)
2701 			continue;
2702 
2703 		if (pipe_ctx->top_pipe == NULL) {
2704 			bool blank = !is_pipe_tree_visible(pipe_ctx);
2705 
2706 			pipe_ctx->stream_res.tg->funcs->program_global_sync(
2707 				pipe_ctx->stream_res.tg,
2708 				dcn401_calculate_vready_offset_for_group(pipe_ctx),
2709 				(unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
2710 				(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
2711 				(unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
2712 				(unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
2713 
2714 			pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2715 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false);
2716 
2717 			if (pipe_ctx->prev_odm_pipe == NULL)
2718 				hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2719 
2720 			if (hws->funcs.setup_vupdate_interrupt)
2721 				hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2722 		}
2723 
2724 		if (pipe_ctx->plane_res.hubp->funcs->hubp_setup2)
2725 			pipe_ctx->plane_res.hubp->funcs->hubp_setup2(
2726 				pipe_ctx->plane_res.hubp,
2727 				&pipe_ctx->hubp_regs,
2728 				&pipe_ctx->global_sync,
2729 				&pipe_ctx->stream->timing);
2730 	}
2731 
2732 	return true;
2733 }
2734 
dcn401_detect_pipe_changes(struct dc_state * old_state,struct dc_state * new_state,struct pipe_ctx * old_pipe,struct pipe_ctx * new_pipe)2735 void dcn401_detect_pipe_changes(struct dc_state *old_state,
2736 	struct dc_state *new_state,
2737 	struct pipe_ctx *old_pipe,
2738 	struct pipe_ctx *new_pipe)
2739 {
2740 	bool old_is_phantom = dc_state_get_pipe_subvp_type(old_state, old_pipe) == SUBVP_PHANTOM;
2741 	bool new_is_phantom = dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM;
2742 
2743 	unsigned int old_pipe_vready_offset_pixels = old_pipe->global_sync.dcn4x.vready_offset_pixels;
2744 	unsigned int new_pipe_vready_offset_pixels = new_pipe->global_sync.dcn4x.vready_offset_pixels;
2745 	unsigned int old_pipe_vstartup_lines = old_pipe->global_sync.dcn4x.vstartup_lines;
2746 	unsigned int new_pipe_vstartup_lines = new_pipe->global_sync.dcn4x.vstartup_lines;
2747 	unsigned int old_pipe_vupdate_offset_pixels = old_pipe->global_sync.dcn4x.vupdate_offset_pixels;
2748 	unsigned int new_pipe_vupdate_offset_pixels = new_pipe->global_sync.dcn4x.vupdate_offset_pixels;
2749 	unsigned int old_pipe_vupdate_width_pixels = old_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels;
2750 	unsigned int new_pipe_vupdate_width_pixels = new_pipe->global_sync.dcn4x.vupdate_vupdate_width_pixels;
2751 
2752 	new_pipe->update_flags.raw = 0;
2753 
2754 	/* If non-phantom pipe is being transitioned to a phantom pipe,
2755 	 * set disable and return immediately. This is because the pipe
2756 	 * that was previously in use must be fully disabled before we
2757 	 * can "enable" it as a phantom pipe (since the OTG will certainly
2758 	 * be different). The post_unlock sequence will set the correct
2759 	 * update flags to enable the phantom pipe.
2760 	 */
2761 	if (old_pipe->plane_state && !old_is_phantom &&
2762 		new_pipe->plane_state && new_is_phantom) {
2763 		new_pipe->update_flags.bits.disable = 1;
2764 		return;
2765 	}
2766 
2767 	if (resource_is_pipe_type(new_pipe, OTG_MASTER) &&
2768 		resource_is_odm_topology_changed(new_pipe, old_pipe))
2769 		/* Detect odm changes */
2770 		new_pipe->update_flags.bits.odm = 1;
2771 
2772 	/* Exit on unchanged, unused pipe */
2773 	if (!old_pipe->plane_state && !new_pipe->plane_state)
2774 		return;
2775 	/* Detect pipe enable/disable */
2776 	if (!old_pipe->plane_state && new_pipe->plane_state) {
2777 		new_pipe->update_flags.bits.enable = 1;
2778 		new_pipe->update_flags.bits.mpcc = 1;
2779 		new_pipe->update_flags.bits.dppclk = 1;
2780 		new_pipe->update_flags.bits.hubp_interdependent = 1;
2781 		new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
2782 		new_pipe->update_flags.bits.unbounded_req = 1;
2783 		new_pipe->update_flags.bits.gamut_remap = 1;
2784 		new_pipe->update_flags.bits.scaler = 1;
2785 		new_pipe->update_flags.bits.viewport = 1;
2786 		new_pipe->update_flags.bits.det_size = 1;
2787 		if (new_pipe->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE &&
2788 			new_pipe->stream_res.test_pattern_params.width != 0 &&
2789 			new_pipe->stream_res.test_pattern_params.height != 0)
2790 			new_pipe->update_flags.bits.test_pattern_changed = 1;
2791 		if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
2792 			new_pipe->update_flags.bits.odm = 1;
2793 			new_pipe->update_flags.bits.global_sync = 1;
2794 		}
2795 		return;
2796 	}
2797 
2798 	/* For SubVP we need to unconditionally enable because any phantom pipes are
2799 	 * always removed then newly added for every full updates whenever SubVP is in use.
2800 	 * The remove-add sequence of the phantom pipe always results in the pipe
2801 	 * being blanked in enable_stream_timing (DPG).
2802 	 */
2803 	if (new_pipe->stream && dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM)
2804 		new_pipe->update_flags.bits.enable = 1;
2805 
2806 	/* Phantom pipes are effectively disabled, if the pipe was previously phantom
2807 	 * we have to enable
2808 	 */
2809 	if (old_pipe->plane_state && old_is_phantom &&
2810 		new_pipe->plane_state && !new_is_phantom)
2811 		new_pipe->update_flags.bits.enable = 1;
2812 
2813 	if (old_pipe->plane_state && !new_pipe->plane_state) {
2814 		new_pipe->update_flags.bits.disable = 1;
2815 		return;
2816 	}
2817 
2818 	/* Detect plane change */
2819 	if (old_pipe->plane_state != new_pipe->plane_state)
2820 		new_pipe->update_flags.bits.plane_changed = true;
2821 
2822 	/* Detect top pipe only changes */
2823 	if (resource_is_pipe_type(new_pipe, OTG_MASTER)) {
2824 		/* Detect global sync changes */
2825 		if ((old_pipe_vready_offset_pixels != new_pipe_vready_offset_pixels)
2826 			|| (old_pipe_vstartup_lines != new_pipe_vstartup_lines)
2827 			|| (old_pipe_vupdate_offset_pixels != new_pipe_vupdate_offset_pixels)
2828 			|| (old_pipe_vupdate_width_pixels != new_pipe_vupdate_width_pixels))
2829 			new_pipe->update_flags.bits.global_sync = 1;
2830 	}
2831 
2832 	if (old_pipe->det_buffer_size_kb != new_pipe->det_buffer_size_kb)
2833 		new_pipe->update_flags.bits.det_size = 1;
2834 
2835 	/*
2836 	 * Detect opp / tg change, only set on change, not on enable
2837 	 * Assume mpcc inst = pipe index, if not this code needs to be updated
2838 	 * since mpcc is what is affected by these. In fact all of our sequence
2839 	 * makes this assumption at the moment with how hubp reset is matched to
2840 	 * same index mpcc reset.
2841 	 */
2842 	if (old_pipe->stream_res.opp != new_pipe->stream_res.opp)
2843 		new_pipe->update_flags.bits.opp_changed = 1;
2844 	if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
2845 		new_pipe->update_flags.bits.tg_changed = 1;
2846 
2847 	/*
2848 	 * Detect mpcc blending changes, only dpp inst and opp matter here,
2849 	 * mpccs getting removed/inserted update connected ones during their own
2850 	 * programming
2851 	 */
2852 	if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp
2853 		|| old_pipe->stream_res.opp != new_pipe->stream_res.opp)
2854 		new_pipe->update_flags.bits.mpcc = 1;
2855 
2856 	/* Detect dppclk change */
2857 	if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz)
2858 		new_pipe->update_flags.bits.dppclk = 1;
2859 
2860 	/* Check for scl update */
2861 	if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
2862 		new_pipe->update_flags.bits.scaler = 1;
2863 	/* Check for vp update */
2864 	if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
2865 		|| memcmp(&old_pipe->plane_res.scl_data.viewport_c,
2866 			&new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
2867 		new_pipe->update_flags.bits.viewport = 1;
2868 
2869 	/* Detect dlg/ttu/rq updates */
2870 	{
2871 		struct dml2_display_dlg_regs old_dlg_regs = old_pipe->hubp_regs.dlg_regs;
2872 		struct dml2_display_ttu_regs old_ttu_regs = old_pipe->hubp_regs.ttu_regs;
2873 		struct dml2_display_rq_regs	 old_rq_regs = old_pipe->hubp_regs.rq_regs;
2874 		struct dml2_display_dlg_regs *new_dlg_regs = &new_pipe->hubp_regs.dlg_regs;
2875 		struct dml2_display_ttu_regs *new_ttu_regs = &new_pipe->hubp_regs.ttu_regs;
2876 		struct dml2_display_rq_regs	 *new_rq_regs = &new_pipe->hubp_regs.rq_regs;
2877 
2878 		/* Detect pipe interdependent updates */
2879 		if ((old_dlg_regs.dst_y_prefetch != new_dlg_regs->dst_y_prefetch)
2880 			|| (old_dlg_regs.vratio_prefetch != new_dlg_regs->vratio_prefetch)
2881 			|| (old_dlg_regs.vratio_prefetch_c != new_dlg_regs->vratio_prefetch_c)
2882 			|| (old_dlg_regs.dst_y_per_vm_vblank != new_dlg_regs->dst_y_per_vm_vblank)
2883 			|| (old_dlg_regs.dst_y_per_row_vblank != new_dlg_regs->dst_y_per_row_vblank)
2884 			|| (old_dlg_regs.dst_y_per_vm_flip != new_dlg_regs->dst_y_per_vm_flip)
2885 			|| (old_dlg_regs.dst_y_per_row_flip != new_dlg_regs->dst_y_per_row_flip)
2886 			|| (old_dlg_regs.refcyc_per_meta_chunk_vblank_l != new_dlg_regs->refcyc_per_meta_chunk_vblank_l)
2887 			|| (old_dlg_regs.refcyc_per_meta_chunk_vblank_c != new_dlg_regs->refcyc_per_meta_chunk_vblank_c)
2888 			|| (old_dlg_regs.refcyc_per_meta_chunk_flip_l != new_dlg_regs->refcyc_per_meta_chunk_flip_l)
2889 			|| (old_dlg_regs.refcyc_per_line_delivery_pre_l != new_dlg_regs->refcyc_per_line_delivery_pre_l)
2890 			|| (old_dlg_regs.refcyc_per_line_delivery_pre_c != new_dlg_regs->refcyc_per_line_delivery_pre_c)
2891 			|| (old_ttu_regs.refcyc_per_req_delivery_pre_l != new_ttu_regs->refcyc_per_req_delivery_pre_l)
2892 			|| (old_ttu_regs.refcyc_per_req_delivery_pre_c != new_ttu_regs->refcyc_per_req_delivery_pre_c)
2893 			|| (old_ttu_regs.refcyc_per_req_delivery_pre_cur0 !=
2894 				new_ttu_regs->refcyc_per_req_delivery_pre_cur0)
2895 			|| (old_ttu_regs.min_ttu_vblank != new_ttu_regs->min_ttu_vblank)
2896 			|| (old_ttu_regs.qos_level_flip != new_ttu_regs->qos_level_flip)) {
2897 			old_dlg_regs.dst_y_prefetch = new_dlg_regs->dst_y_prefetch;
2898 			old_dlg_regs.vratio_prefetch = new_dlg_regs->vratio_prefetch;
2899 			old_dlg_regs.vratio_prefetch_c = new_dlg_regs->vratio_prefetch_c;
2900 			old_dlg_regs.dst_y_per_vm_vblank = new_dlg_regs->dst_y_per_vm_vblank;
2901 			old_dlg_regs.dst_y_per_row_vblank = new_dlg_regs->dst_y_per_row_vblank;
2902 			old_dlg_regs.dst_y_per_vm_flip = new_dlg_regs->dst_y_per_vm_flip;
2903 			old_dlg_regs.dst_y_per_row_flip = new_dlg_regs->dst_y_per_row_flip;
2904 			old_dlg_regs.refcyc_per_meta_chunk_vblank_l = new_dlg_regs->refcyc_per_meta_chunk_vblank_l;
2905 			old_dlg_regs.refcyc_per_meta_chunk_vblank_c = new_dlg_regs->refcyc_per_meta_chunk_vblank_c;
2906 			old_dlg_regs.refcyc_per_meta_chunk_flip_l = new_dlg_regs->refcyc_per_meta_chunk_flip_l;
2907 			old_dlg_regs.refcyc_per_line_delivery_pre_l = new_dlg_regs->refcyc_per_line_delivery_pre_l;
2908 			old_dlg_regs.refcyc_per_line_delivery_pre_c = new_dlg_regs->refcyc_per_line_delivery_pre_c;
2909 			old_ttu_regs.refcyc_per_req_delivery_pre_l = new_ttu_regs->refcyc_per_req_delivery_pre_l;
2910 			old_ttu_regs.refcyc_per_req_delivery_pre_c = new_ttu_regs->refcyc_per_req_delivery_pre_c;
2911 			old_ttu_regs.refcyc_per_req_delivery_pre_cur0 = new_ttu_regs->refcyc_per_req_delivery_pre_cur0;
2912 			old_ttu_regs.min_ttu_vblank = new_ttu_regs->min_ttu_vblank;
2913 			old_ttu_regs.qos_level_flip = new_ttu_regs->qos_level_flip;
2914 			new_pipe->update_flags.bits.hubp_interdependent = 1;
2915 		}
2916 		/* Detect any other updates to ttu/rq/dlg */
2917 		if (memcmp(&old_dlg_regs, new_dlg_regs, sizeof(old_dlg_regs)) ||
2918 			memcmp(&old_ttu_regs, new_ttu_regs, sizeof(old_ttu_regs)) ||
2919 			memcmp(&old_rq_regs, new_rq_regs, sizeof(old_rq_regs)))
2920 			new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
2921 	}
2922 
2923 	if (old_pipe->unbounded_req != new_pipe->unbounded_req)
2924 		new_pipe->update_flags.bits.unbounded_req = 1;
2925 
2926 	if (memcmp(&old_pipe->stream_res.test_pattern_params,
2927 		&new_pipe->stream_res.test_pattern_params, sizeof(struct test_pattern_params))) {
2928 		new_pipe->update_flags.bits.test_pattern_changed = 1;
2929 	}
2930 }
2931 
dcn401_plane_atomic_power_down(struct dc * dc,struct dpp * dpp,struct hubp * hubp)2932 void dcn401_plane_atomic_power_down(struct dc *dc,
2933 		struct dpp *dpp,
2934 		struct hubp *hubp)
2935 {
2936 	struct dce_hwseq *hws = dc->hwseq;
2937 	uint32_t org_ip_request_cntl = 0;
2938 
2939 	DC_LOGGER_INIT(dc->ctx->logger);
2940 
2941 	if (REG(DC_IP_REQUEST_CNTL)) {
2942 		REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
2943 		if (org_ip_request_cntl == 0)
2944 			REG_SET(DC_IP_REQUEST_CNTL, 0,
2945 				IP_REQUEST_EN, 1);
2946 	}
2947 
2948 	if (hws->funcs.dpp_pg_control)
2949 		hws->funcs.dpp_pg_control(hws, dpp->inst, false);
2950 
2951 	if (hws->funcs.hubp_pg_control)
2952 		hws->funcs.hubp_pg_control(hws, hubp->inst, false);
2953 
2954 	hubp->funcs->hubp_reset(hubp);
2955 	dpp->funcs->dpp_reset(dpp);
2956 
2957 	if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL))
2958 		REG_SET(DC_IP_REQUEST_CNTL, 0,
2959 			IP_REQUEST_EN, 0);
2960 
2961 	DC_LOG_DEBUG(
2962 			"Power gated front end %d\n", hubp->inst);
2963 
2964 	if (hws->funcs.dpp_root_clock_control)
2965 		hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
2966 }
2967 
dcn401_update_cursor_offload_pipe(struct dc * dc,const struct pipe_ctx * pipe)2968 void dcn401_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe)
2969 {
2970 	volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
2971 	const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
2972 	const struct hubp *hubp = pipe->plane_res.hubp;
2973 	const struct dpp *dpp = pipe->plane_res.dpp;
2974 	volatile struct dmub_cursor_offload_pipe_data_dcn401_v1 *p;
2975 	uint32_t stream_idx, write_idx, payload_idx;
2976 
2977 	if (!top_pipe || !hubp || !dpp)
2978 		return;
2979 
2980 	stream_idx = top_pipe->pipe_idx;
2981 	write_idx = cs->offload_streams[stream_idx].write_idx + 1; /*  new payload (+1) */
2982 	payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
2983 
2984 	p = &cs->offload_streams[stream_idx].payloads[payload_idx].pipe_data[pipe->pipe_idx].dcn401;
2985 
2986 	p->CURSOR0_0_CURSOR_SURFACE_ADDRESS = hubp->att.SURFACE_ADDR;
2987 	p->CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH = hubp->att.SURFACE_ADDR_HIGH;
2988 	p->CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH = hubp->att.size.bits.width;
2989 	p->CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT = hubp->att.size.bits.height;
2990 	p->CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION = hubp->pos.position.bits.x_pos;
2991 	p->CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION = hubp->pos.position.bits.y_pos;
2992 	p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X = hubp->pos.hot_spot.bits.x_hot;
2993 	p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y = hubp->pos.hot_spot.bits.y_hot;
2994 	p->CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET = hubp->pos.dst_offset.bits.dst_x_offset;
2995 	p->CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE = hubp->pos.cur_ctl.bits.cur_enable;
2996 	p->CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE = hubp->att.cur_ctl.bits.mode;
2997 	p->CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY = hubp->pos.cur_ctl.bits.cur_2x_magnify;
2998 	p->CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH = hubp->att.cur_ctl.bits.pitch;
2999 	p->CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK = hubp->att.cur_ctl.bits.line_per_chunk;
3000 
3001 	p->CM_CUR0_CURSOR0_CONTROL__CUR0_ENABLE = dpp->att.cur0_ctl.bits.cur0_enable;
3002 	p->CM_CUR0_CURSOR0_CONTROL__CUR0_MODE = dpp->att.cur0_ctl.bits.mode;
3003 	p->CM_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE = dpp->att.cur0_ctl.bits.expansion_mode;
3004 	p->CM_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN = dpp->att.cur0_ctl.bits.cur0_rom_en;
3005 	p->CM_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 = 0x000000;
3006 	p->CM_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 = 0xFFFFFF;
3007 
3008 	p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_BIAS_G_Y =
3009 		dpp->att.fp_scale_bias_g_y.bits.fp_bias_g_y;
3010 	p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_SCALE_G_Y =
3011 		dpp->att.fp_scale_bias_g_y.bits.fp_scale_g_y;
3012 	p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_BIAS_RB_CRCB =
3013 		dpp->att.fp_scale_bias_rb_crcb.bits.fp_bias_rb_crcb;
3014 	p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_SCALE_RB_CRCB =
3015 		dpp->att.fp_scale_bias_rb_crcb.bits.fp_scale_rb_crcb;
3016 
3017 	p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET = hubp->att.settings.bits.dst_y_offset;
3018 	p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST = hubp->att.settings.bits.chunk_hdl_adjust;
3019 	p->HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR = hubp->use_mall_for_cursor;
3020 
3021 	cs->offload_streams[stream_idx].payloads[payload_idx].pipe_mask |= (1u << pipe->pipe_idx);
3022 }
3023 
dcn401_plane_atomic_power_down_sequence(struct dc * dc,struct dpp * dpp,struct hubp * hubp,struct block_sequence_state * seq_state)3024 void dcn401_plane_atomic_power_down_sequence(struct dc *dc,
3025 		struct dpp *dpp,
3026 		struct hubp *hubp,
3027 		struct block_sequence_state *seq_state)
3028 {
3029 	struct dce_hwseq *hws = dc->hwseq;
3030 	uint32_t org_ip_request_cntl = 0;
3031 
3032 	DC_LOGGER_INIT(dc->ctx->logger);
3033 
3034 	/* Check and set DC_IP_REQUEST_CNTL if needed */
3035 	if (REG(DC_IP_REQUEST_CNTL)) {
3036 		REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
3037 		if (org_ip_request_cntl == 0)
3038 			hwss_add_dc_ip_request_cntl(seq_state, dc, true);
3039 	}
3040 
3041 	/* DPP power gating control */
3042 	hwss_add_dpp_pg_control(seq_state, hws, dpp->inst, false);
3043 
3044 	/* HUBP power gating control */
3045 	hwss_add_hubp_pg_control(seq_state, hws, hubp->inst, false);
3046 
3047 	/* HUBP reset */
3048 	hwss_add_hubp_reset(seq_state, hubp);
3049 
3050 	/* DPP reset */
3051 	hwss_add_dpp_reset(seq_state, dpp);
3052 
3053 	/* Restore DC_IP_REQUEST_CNTL if it was originally 0 */
3054 	if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL))
3055 		hwss_add_dc_ip_request_cntl(seq_state, dc, false);
3056 
3057 	DC_LOG_DEBUG("Power gated front end %d\n", hubp->inst);
3058 
3059 	/* DPP root clock control */
3060 	hwss_add_dpp_root_clock_control(seq_state, hws, dpp->inst, false);
3061 }
3062 
3063 /* trigger HW to start disconnect plane from stream on the next vsync using block sequence */
dcn401_plane_atomic_disconnect_sequence(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx,struct block_sequence_state * seq_state)3064 void dcn401_plane_atomic_disconnect_sequence(struct dc *dc,
3065 		struct dc_state *state,
3066 		struct pipe_ctx *pipe_ctx,
3067 		struct block_sequence_state *seq_state)
3068 {
3069 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3070 	int dpp_id = pipe_ctx->plane_res.dpp->inst;
3071 	struct mpc *mpc = dc->res_pool->mpc;
3072 	struct mpc_tree *mpc_tree_params;
3073 	struct mpcc *mpcc_to_remove = NULL;
3074 	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
3075 
3076 	mpc_tree_params = &(opp->mpc_tree_params);
3077 	mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
3078 
3079 	/*Already reset*/
3080 	if (mpcc_to_remove == NULL)
3081 		return;
3082 
3083 	/* Step 1: Remove MPCC from MPC tree */
3084 	hwss_add_mpc_remove_mpcc(seq_state, mpc, mpc_tree_params, mpcc_to_remove);
3085 
3086 	// Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
3087 	// so don't wait for MPCC_IDLE in the programming sequence
3088 	if (dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM) {
3089 		/* Step 2: Set MPCC disconnect pending flag */
3090 		hwss_add_opp_set_mpcc_disconnect_pending(seq_state, opp, pipe_ctx->plane_res.mpcc_inst, true);
3091 	}
3092 
3093 	/* Step 3: Set optimized required flag */
3094 	hwss_add_dc_set_optimized_required(seq_state, dc, true);
3095 
3096 	/* Step 4: Disconnect HUBP if function exists */
3097 	if (hubp->funcs->hubp_disconnect)
3098 		hwss_add_hubp_disconnect(seq_state, hubp);
3099 
3100 	/* Step 5: Verify pstate change high if debug sanity checks are enabled */
3101 	if (dc->debug.sanity_checks)
3102 		dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
3103 }
3104 
dcn401_blank_pixel_data_sequence(struct dc * dc,struct pipe_ctx * pipe_ctx,bool blank,struct block_sequence_state * seq_state)3105 void dcn401_blank_pixel_data_sequence(
3106 	struct dc *dc,
3107 	struct pipe_ctx *pipe_ctx,
3108 	bool blank,
3109 	struct block_sequence_state *seq_state)
3110 {
3111 	struct tg_color black_color = {0};
3112 	struct stream_resource *stream_res = &pipe_ctx->stream_res;
3113 	struct dc_stream_state *stream = pipe_ctx->stream;
3114 	enum dc_color_space color_space = stream->output_color_space;
3115 	enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR;
3116 	enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
3117 	struct pipe_ctx *odm_pipe;
3118 	struct rect odm_slice_src;
3119 
3120 	if (stream->link->test_pattern_enabled)
3121 		return;
3122 
3123 	/* get opp dpg blank color */
3124 	color_space_to_black_color(dc, color_space, &black_color);
3125 
3126 	if (blank) {
3127 		/* Set ABM immediate disable */
3128 		hwss_add_abm_set_immediate_disable(seq_state, dc, pipe_ctx);
3129 
3130 		if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
3131 			test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
3132 			test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
3133 		}
3134 	} else {
3135 		test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
3136 	}
3137 
3138 	odm_pipe = pipe_ctx;
3139 
3140 	/* Set display pattern generator for all ODM pipes */
3141 	while (odm_pipe->next_odm_pipe) {
3142 		odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe);
3143 
3144 		hwss_add_opp_set_disp_pattern_generator(seq_state,
3145 			odm_pipe->stream_res.opp,
3146 			test_pattern,
3147 			test_pattern_color_space,
3148 			stream->timing.display_color_depth,
3149 			black_color,
3150 			true,
3151 			odm_slice_src.width,
3152 			odm_slice_src.height,
3153 			odm_slice_src.x);
3154 
3155 		odm_pipe = odm_pipe->next_odm_pipe;
3156 	}
3157 
3158 	/* Set display pattern generator for final ODM pipe */
3159 	odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe);
3160 
3161 	hwss_add_opp_set_disp_pattern_generator(seq_state,
3162 		odm_pipe->stream_res.opp,
3163 		test_pattern,
3164 		test_pattern_color_space,
3165 		stream->timing.display_color_depth,
3166 		black_color,
3167 		true,
3168 		odm_slice_src.width,
3169 		odm_slice_src.height,
3170 		odm_slice_src.x);
3171 
3172 	/* Handle ABM level setting when not blanking */
3173 	if (!blank) {
3174 		if (stream_res->abm) {
3175 			/* Set pipe for ABM */
3176 			hwss_add_abm_set_pipe(seq_state, dc, pipe_ctx);
3177 
3178 			/* Set ABM level */
3179 			hwss_add_abm_set_level(seq_state, stream_res->abm, stream->abm_level);
3180 		}
3181 	}
3182 }
3183 
dcn401_program_all_writeback_pipes_in_tree_sequence(struct dc * dc,const struct dc_stream_state * stream,struct dc_state * context,struct block_sequence_state * seq_state)3184 void dcn401_program_all_writeback_pipes_in_tree_sequence(
3185 		struct dc *dc,
3186 		const struct dc_stream_state *stream,
3187 		struct dc_state *context,
3188 		struct block_sequence_state *seq_state)
3189 {
3190 	struct dwbc *dwb;
3191 	int i_wb, i_pipe;
3192 
3193 	if (!stream || stream->num_wb_info > dc->res_pool->res_cap->num_dwb)
3194 		return;
3195 
3196 	/* For each writeback pipe */
3197 	for (i_wb = 0; i_wb < stream->num_wb_info; i_wb++) {
3198 		/* Get direct pointer to writeback info */
3199 		struct dc_writeback_info *wb_info = (struct dc_writeback_info *)&stream->writeback_info[i_wb];
3200 		int mpcc_inst = -1;
3201 
3202 		if (wb_info->wb_enabled) {
3203 			/* Get the MPCC instance for writeback_source_plane */
3204 			for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) {
3205 				struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe];
3206 
3207 				if (!pipe_ctx->plane_state)
3208 					continue;
3209 
3210 				if (pipe_ctx->plane_state == wb_info->writeback_source_plane) {
3211 					mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
3212 					break;
3213 				}
3214 			}
3215 
3216 			if (mpcc_inst == -1) {
3217 				/* Disable writeback pipe and disconnect from MPCC
3218 				 * if source plane has been removed
3219 				 */
3220 				dcn401_disable_writeback_sequence(dc, wb_info, seq_state);
3221 				continue;
3222 			}
3223 
3224 			ASSERT(wb_info->dwb_pipe_inst < dc->res_pool->res_cap->num_dwb);
3225 			dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
3226 
3227 			if (dwb->funcs->is_enabled(dwb)) {
3228 				/* Writeback pipe already enabled, only need to update */
3229 				dcn401_update_writeback_sequence(dc, wb_info, context, seq_state);
3230 			} else {
3231 				/* Enable writeback pipe and connect to MPCC */
3232 				dcn401_enable_writeback_sequence(dc, wb_info, context, mpcc_inst, seq_state);
3233 			}
3234 		} else {
3235 			/* Disable writeback pipe and disconnect from MPCC */
3236 			dcn401_disable_writeback_sequence(dc, wb_info, seq_state);
3237 		}
3238 	}
3239 }
3240 
dcn401_enable_writeback_sequence(struct dc * dc,struct dc_writeback_info * wb_info,struct dc_state * context,int mpcc_inst,struct block_sequence_state * seq_state)3241 void dcn401_enable_writeback_sequence(
3242 		struct dc *dc,
3243 		struct dc_writeback_info *wb_info,
3244 		struct dc_state *context,
3245 		int mpcc_inst,
3246 		struct block_sequence_state *seq_state)
3247 {
3248 	struct dwbc *dwb;
3249 	struct mcif_wb *mcif_wb;
3250 
3251 	if (!wb_info->wb_enabled || wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
3252 		return;
3253 
3254 	dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
3255 	mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
3256 
3257 	/* Update DWBC with new parameters */
3258 	hwss_add_dwbc_update(seq_state, dwb, &wb_info->dwb_params);
3259 
3260 	/* Configure MCIF_WB buffer settings */
3261 	hwss_add_mcif_wb_config_buf(seq_state, mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
3262 
3263 	/* Configure MCIF_WB arbitration */
3264 	hwss_add_mcif_wb_config_arb(seq_state, mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
3265 
3266 	/* Enable MCIF_WB */
3267 	hwss_add_mcif_wb_enable(seq_state, mcif_wb);
3268 
3269 	/* Set DWB MUX to connect writeback to MPCC */
3270 	hwss_add_mpc_set_dwb_mux(seq_state, dc->res_pool->mpc, wb_info->dwb_pipe_inst, mpcc_inst);
3271 
3272 	/* Enable DWBC */
3273 	hwss_add_dwbc_enable(seq_state, dwb, &wb_info->dwb_params);
3274 }
3275 
dcn401_disable_writeback_sequence(struct dc * dc,struct dc_writeback_info * wb_info,struct block_sequence_state * seq_state)3276 void dcn401_disable_writeback_sequence(
3277 		struct dc *dc,
3278 		struct dc_writeback_info *wb_info,
3279 		struct block_sequence_state *seq_state)
3280 {
3281 	struct dwbc *dwb;
3282 	struct mcif_wb *mcif_wb;
3283 
3284 	if (wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
3285 		return;
3286 
3287 	dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
3288 	mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
3289 
3290 	/* Disable DWBC */
3291 	hwss_add_dwbc_disable(seq_state, dwb);
3292 
3293 	/* Disable DWB MUX */
3294 	hwss_add_mpc_disable_dwb_mux(seq_state, dc->res_pool->mpc, wb_info->dwb_pipe_inst);
3295 
3296 	/* Disable MCIF_WB */
3297 	hwss_add_mcif_wb_disable(seq_state, mcif_wb);
3298 }
3299 
dcn401_update_writeback_sequence(struct dc * dc,struct dc_writeback_info * wb_info,struct dc_state * context,struct block_sequence_state * seq_state)3300 void dcn401_update_writeback_sequence(
3301 		struct dc *dc,
3302 		struct dc_writeback_info *wb_info,
3303 		struct dc_state *context,
3304 		struct block_sequence_state *seq_state)
3305 {
3306 	struct dwbc *dwb;
3307 	struct mcif_wb *mcif_wb;
3308 
3309 	if (!wb_info->wb_enabled || wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
3310 		return;
3311 
3312 	dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
3313 	mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
3314 
3315 	/* Update writeback pipe */
3316 	hwss_add_dwbc_update(seq_state, dwb, &wb_info->dwb_params);
3317 
3318 	/* Update MCIF_WB buffer settings if needed */
3319 	hwss_add_mcif_wb_config_buf(seq_state, mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
3320 }
3321 
find_free_gsl_group(const struct dc * dc)3322 static int find_free_gsl_group(const struct dc *dc)
3323 {
3324 	if (dc->res_pool->gsl_groups.gsl_0 == 0)
3325 		return 1;
3326 	if (dc->res_pool->gsl_groups.gsl_1 == 0)
3327 		return 2;
3328 	if (dc->res_pool->gsl_groups.gsl_2 == 0)
3329 		return 3;
3330 
3331 	return 0;
3332 }
3333 
dcn401_setup_gsl_group_as_lock_sequence(const struct dc * dc,struct pipe_ctx * pipe_ctx,bool enable,struct block_sequence_state * seq_state)3334 void dcn401_setup_gsl_group_as_lock_sequence(
3335 		const struct dc *dc,
3336 		struct pipe_ctx *pipe_ctx,
3337 		bool enable,
3338 		struct block_sequence_state *seq_state)
3339 {
3340 	struct gsl_params gsl;
3341 	int group_idx;
3342 
3343 	memset(&gsl, 0, sizeof(struct gsl_params));
3344 
3345 	if (enable) {
3346 		/* return if group already assigned since GSL was set up
3347 		 * for vsync flip, we would unassign so it can't be "left over"
3348 		 */
3349 		if (pipe_ctx->stream_res.gsl_group > 0)
3350 			return;
3351 
3352 		group_idx = find_free_gsl_group(dc);
3353 		ASSERT(group_idx != 0);
3354 		pipe_ctx->stream_res.gsl_group = group_idx;
3355 
3356 		/* set gsl group reg field and mark resource used */
3357 		switch (group_idx) {
3358 		case 1:
3359 			gsl.gsl0_en = 1;
3360 			dc->res_pool->gsl_groups.gsl_0 = 1;
3361 			break;
3362 		case 2:
3363 			gsl.gsl1_en = 1;
3364 			dc->res_pool->gsl_groups.gsl_1 = 1;
3365 			break;
3366 		case 3:
3367 			gsl.gsl2_en = 1;
3368 			dc->res_pool->gsl_groups.gsl_2 = 1;
3369 			break;
3370 		default:
3371 			BREAK_TO_DEBUGGER();
3372 			return; // invalid case
3373 		}
3374 		gsl.gsl_master_en = 1;
3375 	} else {
3376 		group_idx = pipe_ctx->stream_res.gsl_group;
3377 		if (group_idx == 0)
3378 			return; // if not in use, just return
3379 
3380 		pipe_ctx->stream_res.gsl_group = 0;
3381 
3382 		/* unset gsl group reg field and mark resource free */
3383 		switch (group_idx) {
3384 		case 1:
3385 			gsl.gsl0_en = 0;
3386 			dc->res_pool->gsl_groups.gsl_0 = 0;
3387 			break;
3388 		case 2:
3389 			gsl.gsl1_en = 0;
3390 			dc->res_pool->gsl_groups.gsl_1 = 0;
3391 			break;
3392 		case 3:
3393 			gsl.gsl2_en = 0;
3394 			dc->res_pool->gsl_groups.gsl_2 = 0;
3395 			break;
3396 		default:
3397 			BREAK_TO_DEBUGGER();
3398 			return;
3399 		}
3400 		gsl.gsl_master_en = 0;
3401 	}
3402 
3403 	hwss_add_tg_set_gsl(seq_state, pipe_ctx->stream_res.tg, gsl);
3404 	hwss_add_tg_set_gsl_source_select(seq_state, pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
3405 }
3406 
dcn401_disable_plane_sequence(struct dc * dc,struct dc_state * state,struct pipe_ctx * pipe_ctx,struct block_sequence_state * seq_state)3407 void dcn401_disable_plane_sequence(
3408 		struct dc *dc,
3409 		struct dc_state *state,
3410 		struct pipe_ctx *pipe_ctx,
3411 		struct block_sequence_state *seq_state)
3412 {
3413 	bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
3414 	struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
3415 
3416 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
3417 		return;
3418 
3419 	/* Wait for MPCC disconnect */
3420 	if (dc->hwss.wait_for_mpcc_disconnect_sequence)
3421 		dc->hwss.wait_for_mpcc_disconnect_sequence(dc, dc->res_pool, pipe_ctx, seq_state);
3422 
3423 	/* In flip immediate with pipe splitting case GSL is used for synchronization
3424 	 * so we must disable it when the plane is disabled.
3425 	 */
3426 	if (pipe_ctx->stream_res.gsl_group != 0)
3427 		dcn401_setup_gsl_group_as_lock_sequence(dc, pipe_ctx, false, seq_state);
3428 
3429 	/* Update HUBP mall sel */
3430 	if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs->hubp_update_mall_sel)
3431 		hwss_add_hubp_update_mall_sel(seq_state, pipe_ctx->plane_res.hubp, 0, false);
3432 
3433 	/* Set flip control GSL */
3434 	hwss_add_hubp_set_flip_control_gsl(seq_state, pipe_ctx->plane_res.hubp, false);
3435 
3436 	/* HUBP clock control */
3437 	hwss_add_hubp_clk_cntl(seq_state, pipe_ctx->plane_res.hubp, false);
3438 
3439 	/* DPP clock control */
3440 	hwss_add_dpp_dppclk_control(seq_state, pipe_ctx->plane_res.dpp, false, false);
3441 
3442 	/* Plane atomic power down */
3443 	if (dc->hwseq->funcs.plane_atomic_power_down_sequence)
3444 		dc->hwseq->funcs.plane_atomic_power_down_sequence(dc, pipe_ctx->plane_res.dpp,
3445 			pipe_ctx->plane_res.hubp, seq_state);
3446 
3447 	pipe_ctx->stream = NULL;
3448 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
3449 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
3450 	pipe_ctx->top_pipe = NULL;
3451 	pipe_ctx->bottom_pipe = NULL;
3452 	pipe_ctx->prev_odm_pipe = NULL;
3453 	pipe_ctx->next_odm_pipe = NULL;
3454 	pipe_ctx->plane_state = NULL;
3455 
3456 	/* Turn back off the phantom OTG after the phantom plane is fully disabled */
3457 	if (is_phantom && tg && tg->funcs->disable_phantom_crtc)
3458 		hwss_add_disable_phantom_crtc(seq_state, tg);
3459 }
3460 
dcn401_post_unlock_reset_opp_sequence(struct dc * dc,struct pipe_ctx * opp_head,struct block_sequence_state * seq_state)3461 void dcn401_post_unlock_reset_opp_sequence(
3462 		struct dc *dc,
3463 		struct pipe_ctx *opp_head,
3464 		struct block_sequence_state *seq_state)
3465 {
3466 	struct display_stream_compressor *dsc = opp_head->stream_res.dsc;
3467 	struct dccg *dccg = dc->res_pool->dccg;
3468 
3469 	/* Wait for all DPP pipes in current mpc blending tree completes double
3470 	 * buffered disconnection before resetting OPP
3471 	 */
3472 	if (dc->hwss.wait_for_mpcc_disconnect_sequence)
3473 		dc->hwss.wait_for_mpcc_disconnect_sequence(dc, dc->res_pool, opp_head, seq_state);
3474 
3475 	if (dsc) {
3476 		bool *is_ungated = NULL;
3477 		/* Check DSC power gate status */
3478 		if (dc->hwseq && dc->hwseq->funcs.dsc_pg_status)
3479 			hwss_add_dsc_pg_status(seq_state, dc->hwseq, dsc->inst, false);
3480 
3481 		/* Seamless update specific where we will postpone non
3482 		 * double buffered DSCCLK disable logic in post unlock
3483 		 * sequence after DSC is disconnected from OPP but not
3484 		 * yet power gated.
3485 		 */
3486 
3487 		/* DSC wait disconnect pending clear */
3488 		hwss_add_dsc_wait_disconnect_pending_clear(seq_state, dsc, is_ungated);
3489 
3490 		/* DSC disable */
3491 		hwss_add_dsc_disable(seq_state, dsc, is_ungated);
3492 
3493 		/* Set reference DSCCLK */
3494 		if (dccg && dccg->funcs->set_ref_dscclk)
3495 			hwss_add_dccg_set_ref_dscclk(seq_state, dccg, dsc->inst, 0);
3496 	}
3497 }
3498 
dcn401_dc_ip_request_cntl(struct dc * dc,bool enable)3499 void dcn401_dc_ip_request_cntl(struct dc *dc, bool enable)
3500 {
3501 	struct dce_hwseq *hws = dc->hwseq;
3502 
3503 	if (REG(DC_IP_REQUEST_CNTL))
3504 		REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, enable ? 1 : 0);
3505 }
3506 
dcn401_enable_plane_sequence(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context,struct block_sequence_state * seq_state)3507 void dcn401_enable_plane_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
3508 				 struct dc_state *context,
3509 				 struct block_sequence_state *seq_state)
3510 {
3511 	struct dce_hwseq *hws = dc->hwseq;
3512 	uint32_t org_ip_request_cntl = 0;
3513 
3514 	if (!pipe_ctx->plane_res.dpp || !pipe_ctx->plane_res.hubp || !pipe_ctx->stream_res.opp)
3515 		return;
3516 
3517 	if (REG(DC_IP_REQUEST_CNTL))
3518 		REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
3519 
3520 	/* Step 1: DPP root clock control - enable clock */
3521 	if (hws->funcs.dpp_root_clock_control)
3522 		hwss_add_dpp_root_clock_control(seq_state, hws, pipe_ctx->plane_res.dpp->inst, true);
3523 
3524 	/* Step 2: Enable DC IP request (if needed) */
3525 	if (hws->funcs.dc_ip_request_cntl)
3526 		hwss_add_dc_ip_request_cntl(seq_state, dc, true);
3527 
3528 	/* Step 3: DPP power gating control - power on */
3529 	if (REG(DC_IP_REQUEST_CNTL) && hws->funcs.dpp_pg_control)
3530 		hwss_add_dpp_pg_control(seq_state, hws, pipe_ctx->plane_res.dpp->inst, true);
3531 
3532 	/* Step 4: HUBP power gating control - power on */
3533 	if (REG(DC_IP_REQUEST_CNTL) && hws->funcs.hubp_pg_control)
3534 		hwss_add_hubp_pg_control(seq_state, hws, pipe_ctx->plane_res.hubp->inst, true);
3535 
3536 	/* Step 5: Disable DC IP request (restore state) */
3537 	if (org_ip_request_cntl == 0 && hws->funcs.dc_ip_request_cntl)
3538 		hwss_add_dc_ip_request_cntl(seq_state, dc, false);
3539 
3540 	/* Step 6: HUBP clock control - enable DCFCLK */
3541 	if (pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl)
3542 		hwss_add_hubp_clk_cntl(seq_state, pipe_ctx->plane_res.hubp, true);
3543 
3544 	/* Step 7: HUBP initialization */
3545 	if (pipe_ctx->plane_res.hubp->funcs->hubp_init)
3546 		hwss_add_hubp_init(seq_state, pipe_ctx->plane_res.hubp);
3547 
3548 	/* Step 8: OPP pipe clock control - enable */
3549 	if (pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control)
3550 		hwss_add_opp_pipe_clock_control(seq_state, pipe_ctx->stream_res.opp, true);
3551 
3552 	/* Step 9: VM system aperture settings */
3553 	if (dc->vm_pa_config.valid && pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings) {
3554 		hwss_add_hubp_set_vm_system_aperture_settings(seq_state, pipe_ctx->plane_res.hubp, 0,
3555 			dc->vm_pa_config.system_aperture.start_addr, dc->vm_pa_config.system_aperture.end_addr);
3556 	}
3557 
3558 	/* Step 10: Flip interrupt setup */
3559 	if (!pipe_ctx->top_pipe
3560 			&& pipe_ctx->plane_state
3561 			&& pipe_ctx->plane_state->flip_int_enabled
3562 			&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int) {
3563 		hwss_add_hubp_set_flip_int(seq_state, pipe_ctx->plane_res.hubp);
3564 	}
3565 }
3566 
dcn401_update_dchubp_dpp_sequence(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context,struct block_sequence_state * seq_state)3567 void dcn401_update_dchubp_dpp_sequence(struct dc *dc,
3568 				       struct pipe_ctx *pipe_ctx,
3569 				       struct dc_state *context,
3570 				       struct block_sequence_state *seq_state)
3571 {
3572 	struct dce_hwseq *hws = dc->hwseq;
3573 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3574 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
3575 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3576 	struct dccg *dccg = dc->res_pool->dccg;
3577 	bool viewport_changed = false;
3578 	enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe_ctx);
3579 
3580 	if (!hubp || !dpp || !plane_state)
3581 		return;
3582 
3583 	/* Step 1: DPP DPPCLK control */
3584 	if (pipe_ctx->update_flags.bits.dppclk)
3585 		hwss_add_dpp_dppclk_control(seq_state, dpp, false, true);
3586 
3587 	/* Step 2: DCCG update DPP DTO */
3588 	if (pipe_ctx->update_flags.bits.enable)
3589 		hwss_add_dccg_update_dpp_dto(seq_state, dccg, dpp->inst, pipe_ctx->plane_res.bw.dppclk_khz);
3590 
3591 	/* Step 3: HUBP VTG selection */
3592 	if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) {
3593 		hwss_add_hubp_vtg_sel(seq_state, hubp, pipe_ctx->stream_res.tg->inst);
3594 
3595 		/* Step 4: HUBP setup (choose setup2 or setup) */
3596 		if (hubp->funcs->hubp_setup2) {
3597 			hwss_add_hubp_setup2(seq_state, hubp, &pipe_ctx->hubp_regs,
3598 				&pipe_ctx->global_sync, &pipe_ctx->stream->timing);
3599 		} else if (hubp->funcs->hubp_setup) {
3600 			hwss_add_hubp_setup(seq_state, hubp, &pipe_ctx->dlg_regs,
3601 				&pipe_ctx->ttu_regs, &pipe_ctx->rq_regs, &pipe_ctx->pipe_dlg_param);
3602 		}
3603 	}
3604 
3605 	/* Step 5: Set unbounded requesting */
3606 	if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting)
3607 		hwss_add_hubp_set_unbounded_requesting(seq_state, hubp, pipe_ctx->unbounded_req);
3608 
3609 	/* Step 6: HUBP interdependent setup */
3610 	if (pipe_ctx->update_flags.bits.hubp_interdependent) {
3611 		if (hubp->funcs->hubp_setup_interdependent2)
3612 			hwss_add_hubp_setup_interdependent2(seq_state, hubp, &pipe_ctx->hubp_regs);
3613 		else if (hubp->funcs->hubp_setup_interdependent)
3614 			hwss_add_hubp_setup_interdependent(seq_state, hubp, &pipe_ctx->dlg_regs, &pipe_ctx->ttu_regs);
3615 	}
3616 
3617 	/* Step 7: DPP setup - input CSC and format setup */
3618 	if (pipe_ctx->update_flags.bits.enable ||
3619 			pipe_ctx->update_flags.bits.plane_changed ||
3620 			plane_state->update_flags.bits.bpp_change ||
3621 			plane_state->update_flags.bits.input_csc_change ||
3622 			plane_state->update_flags.bits.color_space_change ||
3623 			plane_state->update_flags.bits.coeff_reduction_change) {
3624 		hwss_add_dpp_setup_dpp(seq_state, pipe_ctx);
3625 
3626 		/* Step 8: DPP cursor matrix setup */
3627 		if (dpp->funcs->set_cursor_matrix) {
3628 			hwss_add_dpp_set_cursor_matrix(seq_state, dpp, plane_state->color_space,
3629 				&plane_state->cursor_csc_color_matrix);
3630 		}
3631 
3632 		/* Step 9: DPP program bias and scale */
3633 		if (dpp->funcs->dpp_program_bias_and_scale)
3634 			hwss_add_dpp_program_bias_and_scale(seq_state, pipe_ctx);
3635 	}
3636 
3637 	/* Step 10: MPCC updates */
3638 	if (pipe_ctx->update_flags.bits.mpcc ||
3639 	     pipe_ctx->update_flags.bits.plane_changed ||
3640 	     plane_state->update_flags.bits.global_alpha_change ||
3641 	     plane_state->update_flags.bits.per_pixel_alpha_change) {
3642 
3643 		/* Check if update_mpcc_sequence is implemented and prefer it over single MPC_UPDATE_MPCC step */
3644 		if (hws->funcs.update_mpcc_sequence)
3645 			hws->funcs.update_mpcc_sequence(dc, pipe_ctx, seq_state);
3646 	}
3647 
3648 	/* Step 11: DPP scaler setup */
3649 	if (pipe_ctx->update_flags.bits.scaler ||
3650 			plane_state->update_flags.bits.scaling_change ||
3651 			plane_state->update_flags.bits.position_change ||
3652 			plane_state->update_flags.bits.per_pixel_alpha_change ||
3653 			pipe_ctx->stream->update_flags.bits.scaling) {
3654 		pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
3655 		ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_36BPP);
3656 		hwss_add_dpp_set_scaler(seq_state, pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
3657 	}
3658 
3659 	/* Step 12: HUBP viewport programming */
3660 	if (pipe_ctx->update_flags.bits.viewport ||
3661 	     (context == dc->current_state && plane_state->update_flags.bits.position_change) ||
3662 	     (context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
3663 	     (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
3664 		hwss_add_hubp_mem_program_viewport(seq_state, hubp,
3665 			&pipe_ctx->plane_res.scl_data.viewport, &pipe_ctx->plane_res.scl_data.viewport_c);
3666 		viewport_changed = true;
3667 	}
3668 
3669 	/* Step 13: HUBP program mcache if available */
3670 	if (hubp->funcs->hubp_program_mcache_id_and_split_coordinate)
3671 		hwss_add_hubp_program_mcache_id(seq_state, hubp, &pipe_ctx->mcache_regs);
3672 
3673 	/* Step 14: Cursor attribute setup */
3674 	if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
3675 	     pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
3676 	    pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
3677 
3678 		hwss_add_abort_cursor_offload_update(seq_state, dc, pipe_ctx);
3679 
3680 		hwss_add_set_cursor_attribute(seq_state, dc, pipe_ctx);
3681 
3682 		/* Step 15: Cursor position setup */
3683 		hwss_add_set_cursor_position(seq_state, dc, pipe_ctx);
3684 
3685 		/* Step 16: Cursor SDR white level */
3686 		if (dc->hwss.set_cursor_sdr_white_level)
3687 			hwss_add_set_cursor_sdr_white_level(seq_state, dc, pipe_ctx);
3688 	}
3689 
3690 	/* Step 17: Gamut remap and output CSC */
3691 	if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
3692 			pipe_ctx->update_flags.bits.plane_changed ||
3693 			pipe_ctx->stream->update_flags.bits.gamut_remap ||
3694 			plane_state->update_flags.bits.gamut_remap_change ||
3695 			pipe_ctx->stream->update_flags.bits.out_csc) {
3696 
3697 		/* Gamut remap */
3698 		hwss_add_dpp_program_gamut_remap(seq_state, pipe_ctx);
3699 
3700 		/* Output CSC */
3701 		hwss_add_program_output_csc(seq_state, dc, pipe_ctx, pipe_ctx->stream->output_color_space,
3702 			pipe_ctx->stream->csc_color_matrix.matrix, hubp->opp_id);
3703 	}
3704 
3705 	/* Step 18: HUBP surface configuration */
3706 	if (pipe_ctx->update_flags.bits.enable ||
3707 			pipe_ctx->update_flags.bits.plane_changed ||
3708 			pipe_ctx->update_flags.bits.opp_changed ||
3709 			plane_state->update_flags.bits.pixel_format_change ||
3710 			plane_state->update_flags.bits.horizontal_mirror_change ||
3711 			plane_state->update_flags.bits.rotation_change ||
3712 			plane_state->update_flags.bits.swizzle_change ||
3713 			plane_state->update_flags.bits.dcc_change ||
3714 			plane_state->update_flags.bits.bpp_change ||
3715 			plane_state->update_flags.bits.scaling_change ||
3716 			plane_state->update_flags.bits.plane_size_change) {
3717 		struct plane_size size = plane_state->plane_size;
3718 
3719 		size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
3720 		hwss_add_hubp_program_surface_config(seq_state, hubp,
3721 				plane_state->format, &plane_state->tiling_info, size,
3722 				plane_state->rotation, &plane_state->dcc,
3723 				plane_state->horizontal_mirror, 0);
3724 		hubp->power_gated = false;
3725 	}
3726 
3727 	/* Step 19: Update plane address (with SubVP support) */
3728 	if (pipe_ctx->update_flags.bits.enable ||
3729 	     pipe_ctx->update_flags.bits.plane_changed ||
3730 	     plane_state->update_flags.bits.addr_update) {
3731 
3732 		/* SubVP save surface address if needed */
3733 		if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_mall_type == SUBVP_MAIN) {
3734 			hwss_add_dmub_subvp_save_surf_addr(seq_state, dc->ctx->dmub_srv,
3735 				&pipe_ctx->plane_state->address, pipe_ctx->subvp_index);
3736 		}
3737 
3738 		/* Update plane address */
3739 		hwss_add_hubp_update_plane_addr(seq_state, dc, pipe_ctx);
3740 	}
3741 
3742 	/* Step 20: HUBP set blank - enable plane */
3743 	if (pipe_ctx->update_flags.bits.enable)
3744 		hwss_add_hubp_set_blank(seq_state, hubp, false);
3745 
3746 	/* Step 21: Phantom HUBP post enable */
3747 	if (pipe_mall_type == SUBVP_PHANTOM && hubp->funcs->phantom_hubp_post_enable)
3748 		hwss_add_phantom_hubp_post_enable(seq_state, hubp);
3749 }
3750 
dcn401_update_mpcc_sequence(struct dc * dc,struct pipe_ctx * pipe_ctx,struct block_sequence_state * seq_state)3751 void dcn401_update_mpcc_sequence(struct dc *dc,
3752 				struct pipe_ctx *pipe_ctx,
3753 				struct block_sequence_state *seq_state)
3754 {
3755 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3756 	struct mpcc_blnd_cfg blnd_cfg = {0};
3757 	bool per_pixel_alpha;
3758 	int mpcc_id;
3759 	struct mpcc *new_mpcc;
3760 	struct mpc *mpc = dc->res_pool->mpc;
3761 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
3762 
3763 	if (!hubp || !pipe_ctx->plane_state)
3764 		return;
3765 
3766 	per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha;
3767 
3768 	/* Initialize blend configuration */
3769 	blnd_cfg.overlap_only = false;
3770 	blnd_cfg.global_gain = 0xff;
3771 
3772 	if (per_pixel_alpha) {
3773 		blnd_cfg.pre_multiplied_alpha = pipe_ctx->plane_state->pre_multiplied_alpha;
3774 		if (pipe_ctx->plane_state->global_alpha) {
3775 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
3776 			blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
3777 		} else {
3778 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
3779 		}
3780 	} else {
3781 		blnd_cfg.pre_multiplied_alpha = false;
3782 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
3783 	}
3784 
3785 	if (pipe_ctx->plane_state->global_alpha)
3786 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
3787 	else
3788 		blnd_cfg.global_alpha = 0xff;
3789 
3790 	blnd_cfg.background_color_bpc = 4;
3791 	blnd_cfg.bottom_gain_mode = 0;
3792 	blnd_cfg.top_gain = 0x1f000;
3793 	blnd_cfg.bottom_inside_gain = 0x1f000;
3794 	blnd_cfg.bottom_outside_gain = 0x1f000;
3795 
3796 	if (pipe_ctx->plane_state->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA)
3797 		blnd_cfg.pre_multiplied_alpha = false;
3798 
3799 	/* MPCC instance is equal to HUBP instance */
3800 	mpcc_id = hubp->inst;
3801 
3802 	/* Step 1: Update blending if no full update needed */
3803 	if (!pipe_ctx->plane_state->update_flags.bits.full_update &&
3804 	    !pipe_ctx->update_flags.bits.mpcc) {
3805 
3806 		/* Update blending configuration */
3807 		hwss_add_mpc_update_blending(seq_state, mpc, blnd_cfg, mpcc_id);
3808 
3809 		/* Update visual confirm color */
3810 		hwss_add_mpc_update_visual_confirm(seq_state, dc, pipe_ctx, mpcc_id);
3811 		return;
3812 	}
3813 
3814 	/* Step 2: Get existing MPCC for DPP */
3815 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
3816 
3817 	/* Step 3: Remove MPCC if being used */
3818 	if (new_mpcc != NULL) {
3819 		hwss_add_mpc_remove_mpcc(seq_state, mpc, mpc_tree_params, new_mpcc);
3820 	} else {
3821 		/* Step 4: Assert MPCC idle (debug only) */
3822 		if (dc->debug.sanity_checks)
3823 			hwss_add_mpc_assert_idle_mpcc(seq_state, mpc, mpcc_id);
3824 	}
3825 
3826 	/* Step 5: Insert new plane into MPC tree */
3827 	hwss_add_mpc_insert_plane(seq_state, mpc, mpc_tree_params, blnd_cfg, NULL, NULL, hubp->inst, mpcc_id);
3828 
3829 	/* Step 6: Update visual confirm color */
3830 	hwss_add_mpc_update_visual_confirm(seq_state, dc, pipe_ctx, mpcc_id);
3831 
3832 	/* Step 7: Set HUBP OPP and MPCC IDs */
3833 	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
3834 	hubp->mpcc_id = mpcc_id;
3835 }
3836 
get_hubp_by_inst(struct resource_pool * res_pool,int mpcc_inst)3837 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3838 {
3839 	int i;
3840 
3841 	for (i = 0; i < res_pool->pipe_count; i++) {
3842 		if (res_pool->hubps[i]->inst == mpcc_inst)
3843 			return res_pool->hubps[i];
3844 	}
3845 	ASSERT(false);
3846 	return NULL;
3847 }
3848 
dcn401_wait_for_mpcc_disconnect_sequence(struct dc * dc,struct resource_pool * res_pool,struct pipe_ctx * pipe_ctx,struct block_sequence_state * seq_state)3849 void dcn401_wait_for_mpcc_disconnect_sequence(
3850 		struct dc *dc,
3851 		struct resource_pool *res_pool,
3852 		struct pipe_ctx *pipe_ctx,
3853 		struct block_sequence_state *seq_state)
3854 {
3855 	int mpcc_inst;
3856 
3857 	if (dc->debug.sanity_checks)
3858 		dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
3859 
3860 	if (!pipe_ctx->stream_res.opp)
3861 		return;
3862 
3863 	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3864 		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3865 			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3866 
3867 			if (pipe_ctx->stream_res.tg &&
3868 				pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) {
3869 				hwss_add_mpc_assert_idle_mpcc(seq_state, res_pool->mpc, mpcc_inst);
3870 			}
3871 			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3872 			if (hubp)
3873 				hwss_add_hubp_set_blank(seq_state, hubp, true);
3874 		}
3875 	}
3876 
3877 	if (dc->debug.sanity_checks)
3878 		dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
3879 }
3880 
dcn401_setup_vupdate_interrupt_sequence(struct dc * dc,struct pipe_ctx * pipe_ctx,struct block_sequence_state * seq_state)3881 void dcn401_setup_vupdate_interrupt_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
3882 		struct block_sequence_state *seq_state)
3883 {
3884 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3885 	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3886 
3887 	if (start_line < 0)
3888 		start_line = 0;
3889 
3890 	if (tg->funcs->setup_vertical_interrupt2)
3891 		hwss_add_tg_setup_vertical_interrupt2(seq_state, tg, start_line);
3892 }
3893 
dcn401_set_hdr_multiplier_sequence(struct pipe_ctx * pipe_ctx,struct block_sequence_state * seq_state)3894 void dcn401_set_hdr_multiplier_sequence(struct pipe_ctx *pipe_ctx,
3895 		struct block_sequence_state *seq_state)
3896 {
3897 	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
3898 	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
3899 	struct custom_float_format fmt;
3900 
3901 	fmt.exponenta_bits = 6;
3902 	fmt.mantissa_bits = 12;
3903 	fmt.sign = true;
3904 
3905 	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
3906 		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
3907 
3908 	hwss_add_dpp_set_hdr_multiplier(seq_state, pipe_ctx->plane_res.dpp, hw_mult);
3909 }
3910 
dcn401_program_mall_pipe_config_sequence(struct dc * dc,struct dc_state * context,struct block_sequence_state * seq_state)3911 void dcn401_program_mall_pipe_config_sequence(struct dc *dc, struct dc_state *context,
3912 		struct block_sequence_state *seq_state)
3913 {
3914 	int i;
3915 	unsigned int num_ways = dcn401_calculate_cab_allocation(dc, context);
3916 	bool cache_cursor = false;
3917 
3918 	// Don't force p-state disallow -- can't block dummy p-state
3919 
3920 	// Update MALL_SEL register for each pipe (break down update_mall_sel call)
3921 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3922 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3923 		struct hubp *hubp = pipe->plane_res.hubp;
3924 
3925 		if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) {
3926 			int cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
3927 
3928 			switch (hubp->curs_attr.color_format) {
3929 			case CURSOR_MODE_MONO:
3930 				cursor_size /= 2;
3931 				break;
3932 			case CURSOR_MODE_COLOR_1BIT_AND:
3933 			case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
3934 			case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
3935 				cursor_size *= 4;
3936 				break;
3937 
3938 			case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
3939 			case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
3940 			default:
3941 				cursor_size *= 8;
3942 				break;
3943 			}
3944 
3945 			if (cursor_size > 16384)
3946 				cache_cursor = true;
3947 
3948 			if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
3949 				hwss_add_hubp_update_mall_sel(seq_state, hubp, 1, false);
3950 			} else {
3951 				// MALL not supported with Stereo3D
3952 				uint32_t mall_sel = (num_ways <= dc->caps.cache_num_ways &&
3953 					pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
3954 					pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO &&
3955 					!pipe->plane_state->address.tmz_surface) ? 2 : 0;
3956 				hwss_add_hubp_update_mall_sel(seq_state, hubp, mall_sel, cache_cursor);
3957 			}
3958 		}
3959 	}
3960 
3961 	// Program FORCE_ONE_ROW_FOR_FRAME and CURSOR_REQ_MODE for main subvp pipes
3962 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3963 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3964 		struct hubp *hubp = pipe->plane_res.hubp;
3965 
3966 		if (pipe->stream && hubp && hubp->funcs->hubp_prepare_subvp_buffering) {
3967 			if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
3968 				hwss_add_hubp_prepare_subvp_buffering(seq_state, hubp, true);
3969 		}
3970 	}
3971 }
3972 
dcn401_verify_allow_pstate_change_high_sequence(struct dc * dc,struct block_sequence_state * seq_state)3973 void dcn401_verify_allow_pstate_change_high_sequence(struct dc *dc,
3974 		struct block_sequence_state *seq_state)
3975 {
3976 	struct hubbub *hubbub = dc->res_pool->hubbub;
3977 
3978 	if (!hubbub->funcs->verify_allow_pstate_change_high)
3979 		return;
3980 
3981 	if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
3982 		/* Attempt hardware workaround force recovery */
3983 		dcn401_hw_wa_force_recovery_sequence(dc, seq_state);
3984 	}
3985 }
3986 
dcn401_hw_wa_force_recovery_sequence(struct dc * dc,struct block_sequence_state * seq_state)3987 bool dcn401_hw_wa_force_recovery_sequence(struct dc *dc,
3988 		struct block_sequence_state *seq_state)
3989 {
3990 	struct hubp *hubp;
3991 	unsigned int i;
3992 
3993 	if (!dc->debug.recovery_enabled)
3994 		return false;
3995 
3996 	/* Step 1: Set HUBP_BLANK_EN=1 for all active pipes */
3997 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3998 		struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
3999 
4000 		if (pipe_ctx != NULL) {
4001 			hubp = pipe_ctx->plane_res.hubp;
4002 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
4003 				hwss_add_hubp_set_blank_en(seq_state, hubp, true);
4004 		}
4005 	}
4006 
4007 	/* Step 2: DCHUBBUB_GLOBAL_SOFT_RESET=1 */
4008 	hwss_add_hubbub_soft_reset(seq_state, dc->res_pool->hubbub, hubbub1_soft_reset, true);
4009 
4010 	/* Step 3: Set HUBP_DISABLE=1 for all active pipes */
4011 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
4012 		struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
4013 
4014 		if (pipe_ctx != NULL) {
4015 			hubp = pipe_ctx->plane_res.hubp;
4016 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
4017 				hwss_add_hubp_disable_control(seq_state, hubp, true);
4018 		}
4019 	}
4020 
4021 	/* Step 4: Set HUBP_DISABLE=0 for all active pipes */
4022 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
4023 		struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
4024 
4025 		if (pipe_ctx != NULL) {
4026 			hubp = pipe_ctx->plane_res.hubp;
4027 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
4028 				hwss_add_hubp_disable_control(seq_state, hubp, false);
4029 		}
4030 	}
4031 
4032 	/* Step 5: DCHUBBUB_GLOBAL_SOFT_RESET=0 */
4033 	hwss_add_hubbub_soft_reset(seq_state, dc->res_pool->hubbub, hubbub1_soft_reset, false);
4034 
4035 	/* Step 6: Set HUBP_BLANK_EN=0 for all active pipes */
4036 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
4037 		struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
4038 
4039 		if (pipe_ctx != NULL) {
4040 			hubp = pipe_ctx->plane_res.hubp;
4041 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
4042 				hwss_add_hubp_set_blank_en(seq_state, hubp, false);
4043 		}
4044 	}
4045 
4046 	return true;
4047 }
4048