xref: /linux/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c (revision 92c4c9fdc838d3b41a996bb700ea64b9e78fc7ea)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: AMD
24  *
25  */
26 #include "dcn32_fpu.h"
27 #include "dcn32/dcn32_resource.h"
28 #include "dcn20/dcn20_resource.h"
29 #include "display_mode_vba_util_32.h"
30 #include "dml/dcn32/display_mode_vba_32.h"
31 // We need this includes for WATERMARKS_* defines
32 #include "clk_mgr/dcn32/dcn32_smu13_driver_if.h"
33 #include "dcn30/dcn30_resource.h"
34 #include "link_service.h"
35 #include "dc_state_priv.h"
36 
37 #define DC_LOGGER_INIT(logger)
38 
39 static const struct subvp_high_refresh_list subvp_high_refresh_list = {
40 			.min_refresh = 120,
41 			.max_refresh = 175,
42 			.res = {
43 				{.width = 3840, .height = 2160, },
44 				{.width = 3440, .height = 1440, },
45 				{.width = 2560, .height = 1440, },
46 				{.width = 1920, .height = 1080, }},
47 };
48 
49 static const struct subvp_active_margin_list subvp_active_margin_list = {
50 			.min_refresh = 55,
51 			.max_refresh = 65,
52 			.res = {
53 				{.width = 2560, .height = 1440, },
54 				{.width = 1920, .height = 1080, }},
55 };
56 
57 struct _vcs_dpi_ip_params_st dcn3_2_ip = {
58 	.gpuvm_enable = 0,
59 	.gpuvm_max_page_table_levels = 4,
60 	.hostvm_enable = 0,
61 	.rob_buffer_size_kbytes = 128,
62 	.det_buffer_size_kbytes = DCN3_2_DEFAULT_DET_SIZE,
63 	.config_return_buffer_size_in_kbytes = 1280,
64 	.compressed_buffer_segment_size_in_kbytes = 64,
65 	.meta_fifo_size_in_kentries = 22,
66 	.zero_size_buffer_entries = 512,
67 	.compbuf_reserved_space_64b = 256,
68 	.compbuf_reserved_space_zs = 64,
69 	.dpp_output_buffer_pixels = 2560,
70 	.opp_output_buffer_lines = 1,
71 	.pixel_chunk_size_kbytes = 8,
72 	.alpha_pixel_chunk_size_kbytes = 4,
73 	.min_pixel_chunk_size_bytes = 1024,
74 	.dcc_meta_buffer_size_bytes = 6272,
75 	.meta_chunk_size_kbytes = 2,
76 	.min_meta_chunk_size_bytes = 256,
77 	.writeback_chunk_size_kbytes = 8,
78 	.ptoi_supported = false,
79 	.num_dsc = 4,
80 	.maximum_dsc_bits_per_component = 12,
81 	.maximum_pixels_per_line_per_dsc_unit = 6016,
82 	.dsc422_native_support = true,
83 	.is_line_buffer_bpp_fixed = true,
84 	.line_buffer_fixed_bpp = 57,
85 	.line_buffer_size_bits = 1171920,
86 	.max_line_buffer_lines = 32,
87 	.writeback_interface_buffer_size_kbytes = 90,
88 	.max_num_dpp = 4,
89 	.max_num_otg = 4,
90 	.max_num_hdmi_frl_outputs = 1,
91 	.max_num_wb = 1,
92 	.max_dchub_pscl_bw_pix_per_clk = 4,
93 	.max_pscl_lb_bw_pix_per_clk = 2,
94 	.max_lb_vscl_bw_pix_per_clk = 4,
95 	.max_vscl_hscl_bw_pix_per_clk = 4,
96 	.max_hscl_ratio = 6,
97 	.max_vscl_ratio = 6,
98 	.max_hscl_taps = 8,
99 	.max_vscl_taps = 8,
100 	.dpte_buffer_size_in_pte_reqs_luma = 64,
101 	.dpte_buffer_size_in_pte_reqs_chroma = 34,
102 	.dispclk_ramp_margin_percent = 1,
103 	.max_inter_dcn_tile_repeaters = 8,
104 	.cursor_buffer_size = 16,
105 	.cursor_chunk_size = 2,
106 	.writeback_line_buffer_buffer_size = 0,
107 	.writeback_min_hscl_ratio = 1,
108 	.writeback_min_vscl_ratio = 1,
109 	.writeback_max_hscl_ratio = 1,
110 	.writeback_max_vscl_ratio = 1,
111 	.writeback_max_hscl_taps = 1,
112 	.writeback_max_vscl_taps = 1,
113 	.dppclk_delay_subtotal = 47,
114 	.dppclk_delay_scl = 50,
115 	.dppclk_delay_scl_lb_only = 16,
116 	.dppclk_delay_cnvc_formatter = 28,
117 	.dppclk_delay_cnvc_cursor = 6,
118 	.dispclk_delay_subtotal = 125,
119 	.dynamic_metadata_vm_enabled = false,
120 	.odm_combine_4to1_supported = false,
121 	.dcc_supported = true,
122 	.max_num_dp2p0_outputs = 2,
123 	.max_num_dp2p0_streams = 4,
124 };
125 
126 struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
127 	.clock_limits = {
128 		{
129 			.state = 0,
130 			.dcfclk_mhz = 1564.0,
131 			.fabricclk_mhz = 2500.0,
132 			.dispclk_mhz = 2150.0,
133 			.dppclk_mhz = 2150.0,
134 			.phyclk_mhz = 810.0,
135 			.phyclk_d18_mhz = 667.0,
136 			.phyclk_d32_mhz = 625.0,
137 			.socclk_mhz = 1200.0,
138 			.dscclk_mhz = 716.667,
139 			.dram_speed_mts = 18000.0,
140 			.dtbclk_mhz = 1564.0,
141 		},
142 	},
143 	.num_states = 1,
144 	.sr_exit_time_us = 42.97,
145 	.sr_enter_plus_exit_time_us = 49.94,
146 	.sr_exit_z8_time_us = 285.0,
147 	.sr_enter_plus_exit_z8_time_us = 320,
148 	.writeback_latency_us = 12.0,
149 	.round_trip_ping_latency_dcfclk_cycles = 263,
150 	.urgent_latency_pixel_data_only_us = 4.0,
151 	.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
152 	.urgent_latency_vm_data_only_us = 4.0,
153 	.fclk_change_latency_us = 25,
154 	.usr_retraining_latency_us = 2,
155 	.smn_latency_us = 2,
156 	.mall_allocated_for_dcn_mbytes = 64,
157 	.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
158 	.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
159 	.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
160 	.pct_ideal_sdp_bw_after_urgent = 90.0,
161 	.pct_ideal_fabric_bw_after_urgent = 67.0,
162 	.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
163 	.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
164 	.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
165 	.pct_ideal_dram_bw_after_urgent_strobe = 67.0,
166 	.max_avg_sdp_bw_use_normal_percent = 80.0,
167 	.max_avg_fabric_bw_use_normal_percent = 60.0,
168 	.max_avg_dram_bw_use_normal_strobe_percent = 50.0,
169 	.max_avg_dram_bw_use_normal_percent = 15.0,
170 	.num_chans = 24,
171 	.dram_channel_width_bytes = 2,
172 	.fabric_datapath_to_dcn_data_return_bytes = 64,
173 	.return_bus_width_bytes = 64,
174 	.downspread_percent = 0.38,
175 	.dcn_downspread_percent = 0.5,
176 	.dram_clock_change_latency_us = 400,
177 	.dispclk_dppclk_vco_speed_mhz = 4300.0,
178 	.do_urgent_latency_adjustment = true,
179 	.urgent_latency_adjustment_fabric_clock_component_us = 1.0,
180 	.urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
181 };
182 
183 static bool dcn32_apply_merge_split_flags_helper(struct dc *dc, struct dc_state *context,
184 	bool *repopulate_pipes, int *split, bool *merge);
185 
dcn32_build_wm_range_table_fpu(struct clk_mgr_internal * clk_mgr)186 void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
187 {
188 	/* defaults */
189 	double pstate_latency_us = clk_mgr->base.ctx->dc->dml.soc.dram_clock_change_latency_us;
190 	double fclk_change_latency_us = clk_mgr->base.ctx->dc->dml.soc.fclk_change_latency_us;
191 	double sr_exit_time_us = clk_mgr->base.ctx->dc->dml.soc.sr_exit_time_us;
192 	double sr_enter_plus_exit_time_us = clk_mgr->base.ctx->dc->dml.soc.sr_enter_plus_exit_time_us;
193 	/* For min clocks use as reported by PM FW and report those as min */
194 	uint16_t min_uclk_mhz			= clk_mgr->base.bw_params->clk_table.entries[0].memclk_mhz;
195 	uint16_t min_dcfclk_mhz			= clk_mgr->base.bw_params->clk_table.entries[0].dcfclk_mhz;
196 	uint16_t setb_min_uclk_mhz		= min_uclk_mhz;
197 	uint16_t dcfclk_mhz_for_the_second_state = clk_mgr->base.ctx->dc->dml.soc.clock_limits[2].dcfclk_mhz;
198 
199 	dc_assert_fp_enabled();
200 
201 	/* For Set B ranges use min clocks state 2 when available, and report those to PM FW */
202 	if (dcfclk_mhz_for_the_second_state)
203 		clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = dcfclk_mhz_for_the_second_state;
204 	else
205 		clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = clk_mgr->base.bw_params->clk_table.entries[0].dcfclk_mhz;
206 
207 	if (clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz)
208 		setb_min_uclk_mhz = clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz;
209 
210 	/* Set A - Normal - default values */
211 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].valid = true;
212 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us = pstate_latency_us;
213 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us = fclk_change_latency_us;
214 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us = sr_exit_time_us;
215 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
216 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
217 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
218 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
219 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
220 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
221 
222 	/* Set B - Performance - higher clocks, using DPM[2] DCFCLK and UCLK */
223 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].valid = true;
224 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us = pstate_latency_us;
225 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.fclk_change_latency_us = fclk_change_latency_us;
226 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us = sr_exit_time_us;
227 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
228 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
229 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_dcfclk = 0xFFFF;
230 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_uclk = setb_min_uclk_mhz;
231 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_uclk = 0xFFFF;
232 
233 	/* Set C - Dummy P-State - P-State latency set to "dummy p-state" value */
234 	/* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
235 	if (clk_mgr->base.ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
236 		clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].valid = true;
237 		clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 50;
238 		clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us = fclk_change_latency_us;
239 		clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us;
240 		clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
241 		clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
242 		clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
243 		clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_dcfclk = 0xFFFF;
244 		clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz;
245 		clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF;
246 		clk_mgr->base.bw_params->dummy_pstate_table[0].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[0].memclk_mhz * 16;
247 		clk_mgr->base.bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 50;
248 		clk_mgr->base.bw_params->dummy_pstate_table[1].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[1].memclk_mhz * 16;
249 		clk_mgr->base.bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9;
250 		clk_mgr->base.bw_params->dummy_pstate_table[2].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz * 16;
251 		clk_mgr->base.bw_params->dummy_pstate_table[2].dummy_pstate_latency_us = 8;
252 		clk_mgr->base.bw_params->dummy_pstate_table[3].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[3].memclk_mhz * 16;
253 		clk_mgr->base.bw_params->dummy_pstate_table[3].dummy_pstate_latency_us = 5;
254 	}
255 	/* Set D - MALL - SR enter and exit time specific to MALL, TBD after bringup or later phase for now use DRAM values / 2 */
256 	/* For MALL DRAM clock change latency is N/A, for watermak calculations use lowest value dummy P state latency */
257 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].valid = true;
258 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = clk_mgr->base.bw_params->dummy_pstate_table[3].dummy_pstate_latency_us;
259 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.fclk_change_latency_us = fclk_change_latency_us;
260 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = sr_exit_time_us / 2; // TBD
261 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us / 2; // TBD
262 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL;
263 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
264 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF;
265 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz;
266 	clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
267 }
268 
269 /*
270  * Finds dummy_latency_index when MCLK switching using firmware based
271  * vblank stretch is enabled. This function will iterate through the
272  * table of dummy pstate latencies until the lowest value that allows
273  * dm_allow_self_refresh_and_mclk_switch to happen is found
274  */
dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc * dc,struct dc_state * context,display_e2e_pipe_params_st * pipes,int pipe_cnt,int vlevel)275 int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
276 							    struct dc_state *context,
277 							    display_e2e_pipe_params_st *pipes,
278 							    int pipe_cnt,
279 							    int vlevel)
280 {
281 	const int max_latency_table_entries = 4;
282 	struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
283 	int dummy_latency_index = 0;
284 	enum clock_change_support temp_clock_change_support = vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
285 
286 	dc_assert_fp_enabled();
287 
288 	while (dummy_latency_index < max_latency_table_entries) {
289 		if (temp_clock_change_support != dm_dram_clock_change_unsupported)
290 			vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support;
291 		context->bw_ctx.dml.soc.dram_clock_change_latency_us =
292 				dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
293 		dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, DC_VALIDATE_MODE_AND_PROGRAMMING);
294 
295 		/* for subvp + DRR case, if subvp pipes are still present we support pstate */
296 		if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported &&
297 				dcn32_subvp_in_use(dc, context))
298 			vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support;
299 
300 		if (vlevel < context->bw_ctx.dml.vba.soc.num_states &&
301 				vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported)
302 			break;
303 
304 		dummy_latency_index++;
305 	}
306 
307 	if (dummy_latency_index == max_latency_table_entries) {
308 		ASSERT(dummy_latency_index != max_latency_table_entries);
309 		/* If the execution gets here, it means dummy p_states are
310 		 * not possible. This should never happen and would mean
311 		 * something is severely wrong.
312 		 * Here we reset dummy_latency_index to 3, because it is
313 		 * better to have underflows than system crashes.
314 		 */
315 		dummy_latency_index = max_latency_table_entries - 1;
316 	}
317 
318 	return dummy_latency_index;
319 }
320 
321 /**
322  * dcn32_helper_populate_phantom_dlg_params - Get DLG params for phantom pipes
323  * and populate pipe_ctx with those params.
324  * @dc: [in] current dc state
325  * @context: [in] new dc state
326  * @pipes: [in] DML pipe params array
327  * @pipe_cnt: [in] DML pipe count
328  *
329  * This function must be called AFTER the phantom pipes are added to context
330  * and run through DML (so that the DLG params for the phantom pipes can be
331  * populated), and BEFORE we program the timing for the phantom pipes.
332  */
dcn32_helper_populate_phantom_dlg_params(struct dc * dc,struct dc_state * context,display_e2e_pipe_params_st * pipes,int pipe_cnt)333 void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
334 					      struct dc_state *context,
335 					      display_e2e_pipe_params_st *pipes,
336 					      int pipe_cnt)
337 {
338 	uint32_t i, pipe_idx;
339 
340 	dc_assert_fp_enabled();
341 
342 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
343 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
344 
345 		if (!pipe->stream)
346 			continue;
347 
348 		if (pipe->plane_state && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
349 			pipes[pipe_idx].pipe.dest.vstartup_start =
350 				get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
351 			pipes[pipe_idx].pipe.dest.vupdate_offset =
352 				get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
353 			pipes[pipe_idx].pipe.dest.vupdate_width =
354 				get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
355 			pipes[pipe_idx].pipe.dest.vready_offset =
356 				get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
357 			pipe->pipe_dlg_param = pipes[pipe_idx].pipe.dest;
358 		}
359 		pipe_idx++;
360 	}
361 }
362 
calculate_net_bw_in_kbytes_sec(struct _vcs_dpi_voltage_scaling_st * entry)363 static float calculate_net_bw_in_kbytes_sec(struct _vcs_dpi_voltage_scaling_st *entry)
364 {
365 	float memory_bw_kbytes_sec;
366 	float fabric_bw_kbytes_sec;
367 	float sdp_bw_kbytes_sec;
368 	float limiting_bw_kbytes_sec;
369 
370 	memory_bw_kbytes_sec = entry->dram_speed_mts *
371 				dcn3_2_soc.num_chans *
372 				dcn3_2_soc.dram_channel_width_bytes *
373 				((float)dcn3_2_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100);
374 
375 	fabric_bw_kbytes_sec = entry->fabricclk_mhz *
376 				dcn3_2_soc.return_bus_width_bytes *
377 				((float)dcn3_2_soc.pct_ideal_fabric_bw_after_urgent / 100);
378 
379 	sdp_bw_kbytes_sec = entry->dcfclk_mhz *
380 				dcn3_2_soc.return_bus_width_bytes *
381 				((float)dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / 100);
382 
383 	limiting_bw_kbytes_sec = memory_bw_kbytes_sec;
384 
385 	if (fabric_bw_kbytes_sec < limiting_bw_kbytes_sec)
386 		limiting_bw_kbytes_sec = fabric_bw_kbytes_sec;
387 
388 	if (sdp_bw_kbytes_sec < limiting_bw_kbytes_sec)
389 		limiting_bw_kbytes_sec = sdp_bw_kbytes_sec;
390 
391 	return limiting_bw_kbytes_sec;
392 }
393 
get_optimal_ntuple(struct _vcs_dpi_voltage_scaling_st * entry)394 static void get_optimal_ntuple(struct _vcs_dpi_voltage_scaling_st *entry)
395 {
396 	if (entry->dcfclk_mhz > 0) {
397 		float bw_on_sdp = entry->dcfclk_mhz * dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / 100);
398 
399 		entry->fabricclk_mhz = bw_on_sdp / (dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_fabric_bw_after_urgent / 100));
400 		entry->dram_speed_mts = bw_on_sdp / (dcn3_2_soc.num_chans *
401 				dcn3_2_soc.dram_channel_width_bytes * ((float)dcn3_2_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100));
402 	} else if (entry->fabricclk_mhz > 0) {
403 		float bw_on_fabric = entry->fabricclk_mhz * dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_fabric_bw_after_urgent / 100);
404 
405 		entry->dcfclk_mhz = bw_on_fabric / (dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / 100));
406 		entry->dram_speed_mts = bw_on_fabric / (dcn3_2_soc.num_chans *
407 				dcn3_2_soc.dram_channel_width_bytes * ((float)dcn3_2_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100));
408 	} else if (entry->dram_speed_mts > 0) {
409 		float bw_on_dram = entry->dram_speed_mts * dcn3_2_soc.num_chans *
410 				dcn3_2_soc.dram_channel_width_bytes * ((float)dcn3_2_soc.pct_ideal_dram_sdp_bw_after_urgent_pixel_only / 100);
411 
412 		entry->fabricclk_mhz = bw_on_dram / (dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_fabric_bw_after_urgent / 100));
413 		entry->dcfclk_mhz = bw_on_dram / (dcn3_2_soc.return_bus_width_bytes * ((float)dcn3_2_soc.pct_ideal_sdp_bw_after_urgent / 100));
414 	}
415 }
416 
insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st * table,unsigned int * num_entries,struct _vcs_dpi_voltage_scaling_st * entry)417 static void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
418 				    unsigned int *num_entries,
419 				    struct _vcs_dpi_voltage_scaling_st *entry)
420 {
421 	int i = 0;
422 	int index = 0;
423 
424 	dc_assert_fp_enabled();
425 
426 	if (*num_entries == 0) {
427 		table[0] = *entry;
428 		(*num_entries)++;
429 	} else {
430 		while (entry->net_bw_in_kbytes_sec > table[index].net_bw_in_kbytes_sec) {
431 			index++;
432 			if (index >= *num_entries)
433 				break;
434 		}
435 
436 		for (i = *num_entries; i > index; i--)
437 			table[i] = table[i - 1];
438 
439 		table[index] = *entry;
440 		(*num_entries)++;
441 	}
442 }
443 
444 /**
445  * dcn32_set_phantom_stream_timing - Set timing params for the phantom stream
446  * @dc: current dc state
447  * @context: new dc state
448  * @ref_pipe: Main pipe for the phantom stream
449  * @phantom_stream: target phantom stream state
450  * @pipes: DML pipe params
451  * @pipe_cnt: number of DML pipes
452  * @dc_pipe_idx: DC pipe index for the main pipe (i.e. ref_pipe)
453  *
454  * Set timing params of the phantom stream based on calculated output from DML.
455  * This function first gets the DML pipe index using the DC pipe index, then
456  * calls into DML (get_subviewport_lines_needed_in_mall) to get the number of
457  * lines required for SubVP MCLK switching and assigns to the phantom stream
458  * accordingly.
459  *
460  * - The number of SubVP lines calculated in DML does not take into account
461  * FW processing delays and required pstate allow width, so we must include
462  * that separately.
463  *
464  * - Set phantom backporch = vstartup of main pipe
465  */
dcn32_set_phantom_stream_timing(struct dc * dc,struct dc_state * context,struct pipe_ctx * ref_pipe,struct dc_stream_state * phantom_stream,display_e2e_pipe_params_st * pipes,unsigned int pipe_cnt,unsigned int dc_pipe_idx)466 void dcn32_set_phantom_stream_timing(struct dc *dc,
467 				     struct dc_state *context,
468 				     struct pipe_ctx *ref_pipe,
469 				     struct dc_stream_state *phantom_stream,
470 				     display_e2e_pipe_params_st *pipes,
471 				     unsigned int pipe_cnt,
472 				     unsigned int dc_pipe_idx)
473 {
474 	unsigned int i, pipe_idx;
475 	struct pipe_ctx *pipe;
476 	uint32_t phantom_vactive, phantom_bp, pstate_width_fw_delay_lines;
477 	unsigned int num_dpp;
478 	unsigned int vlevel = context->bw_ctx.dml.vba.VoltageLevel;
479 	unsigned int dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
480 	unsigned int socclk = context->bw_ctx.dml.vba.SOCCLKPerState[vlevel];
481 	struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
482 	struct dc_stream_state *main_stream = ref_pipe->stream;
483 
484 	dc_assert_fp_enabled();
485 
486 	// Find DML pipe index (pipe_idx) using dc_pipe_idx
487 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
488 		pipe = &context->res_ctx.pipe_ctx[i];
489 
490 		if (!pipe->stream)
491 			continue;
492 
493 		if (i == dc_pipe_idx)
494 			break;
495 
496 		pipe_idx++;
497 	}
498 
499 	// Calculate lines required for pstate allow width and FW processing delays
500 	pstate_width_fw_delay_lines = ((double)(dc->caps.subvp_fw_processing_delay_us +
501 			dc->caps.subvp_pstate_allow_width_us) / 1000000) *
502 			(ref_pipe->stream->timing.pix_clk_100hz * 100) /
503 			(double)ref_pipe->stream->timing.h_total;
504 
505 	// Update clks_cfg for calling into recalculate
506 	pipes[0].clks_cfg.voltage = vlevel;
507 	pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
508 	pipes[0].clks_cfg.socclk_mhz = socclk;
509 
510 	// DML calculation for MALL region doesn't take into account FW delay
511 	// and required pstate allow width for multi-display cases
512 	/* Add 16 lines margin to the MALL REGION because SUB_VP_START_LINE must be aligned
513 	 * to 2 swaths (i.e. 16 lines)
514 	 */
515 	phantom_vactive = get_subviewport_lines_needed_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx) +
516 				pstate_width_fw_delay_lines + dc->caps.subvp_swath_height_margin_lines;
517 
518 	// W/A for DCC corruption with certain high resolution timings.
519 	// Determing if pipesplit is used. If so, add meta_row_height to the phantom vactive.
520 	num_dpp = vba->NoOfDPP[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]];
521 	phantom_vactive += num_dpp > 1 ? vba->meta_row_height[vba->pipe_plane[pipe_idx]] : 0;
522 
523 	/* dc->debug.subvp_extra_lines 0 by default*/
524 	phantom_vactive += dc->debug.subvp_extra_lines;
525 
526 	// For backporch of phantom pipe, use vstartup of the main pipe
527 	phantom_bp = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
528 
529 	phantom_stream->dst.y = 0;
530 	phantom_stream->dst.height = phantom_vactive;
531 	/* When scaling, DML provides the end to end required number of lines for MALL.
532 	 * dst.height is always correct for this case, but src.height is not which causes a
533 	 * delta between main and phantom pipe scaling outputs. Need to adjust src.height on
534 	 * phantom for this case.
535 	 */
536 	phantom_stream->src.y = 0;
537 	phantom_stream->src.height = (double)phantom_vactive * (double)main_stream->src.height / (double)main_stream->dst.height;
538 
539 	phantom_stream->timing.v_addressable = phantom_vactive;
540 	phantom_stream->timing.v_front_porch = 1;
541 	phantom_stream->timing.v_total = phantom_stream->timing.v_addressable +
542 						phantom_stream->timing.v_front_porch +
543 						phantom_stream->timing.v_sync_width +
544 						phantom_bp;
545 	phantom_stream->timing.flags.DSC = 0; // Don't need DSC for phantom timing
546 }
547 
548 /**
549  * dcn32_get_num_free_pipes - Calculate number of free pipes
550  * @dc: current dc state
551  * @context: new dc state
552  *
553  * This function assumes that a "used" pipe is a pipe that has
554  * both a stream and a plane assigned to it.
555  *
556  * Return: Number of free pipes available in the context
557  */
dcn32_get_num_free_pipes(struct dc * dc,struct dc_state * context)558 static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *context)
559 {
560 	unsigned int i;
561 	unsigned int free_pipes = 0;
562 	unsigned int num_pipes = 0;
563 
564 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
565 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
566 
567 		if (pipe->stream && !pipe->top_pipe) {
568 			while (pipe) {
569 				num_pipes++;
570 				pipe = pipe->bottom_pipe;
571 			}
572 		}
573 	}
574 
575 	free_pipes = dc->res_pool->pipe_count - num_pipes;
576 	return free_pipes;
577 }
578 
579 /**
580  * dcn32_assign_subvp_pipe - Function to decide which pipe will use Sub-VP.
581  * @dc: current dc state
582  * @context: new dc state
583  * @index: [out] dc pipe index for the pipe chosen to have phantom pipes assigned
584  *
585  * We enter this function if we are Sub-VP capable (i.e. enough pipes available)
586  * and regular P-State switching (i.e. VACTIVE/VBLANK) is not supported, or if
587  * we are forcing SubVP P-State switching on the current config.
588  *
589  * The number of pipes used for the chosen surface must be less than or equal to the
590  * number of free pipes available.
591  *
592  * In general we choose surfaces with the longest frame time first (better for SubVP + VBLANK).
593  * For multi-display cases the ActiveDRAMClockChangeMargin doesn't provide enough info on its own
594  * for determining which should be the SubVP pipe (need a way to determine if a pipe / plane doesn't
595  * support MCLK switching naturally [i.e. ACTIVE or VBLANK]).
596  *
597  * Return: True if a valid pipe assignment was found for Sub-VP. Otherwise false.
598  */
dcn32_assign_subvp_pipe(struct dc * dc,struct dc_state * context,unsigned int * index)599 static bool dcn32_assign_subvp_pipe(struct dc *dc,
600 				    struct dc_state *context,
601 				    unsigned int *index)
602 {
603 	unsigned int i, pipe_idx;
604 	unsigned int max_frame_time = 0;
605 	bool valid_assignment_found = false;
606 	unsigned int free_pipes = dcn32_get_num_free_pipes(dc, context);
607 	struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
608 
609 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
610 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
611 		unsigned int num_pipes = 0;
612 		unsigned int refresh_rate = 0;
613 
614 		if (!pipe->stream)
615 			continue;
616 
617 		// Round up
618 		refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
619 				pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
620 				/ (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
621 		/* SubVP pipe candidate requirements:
622 		 * - Refresh rate < 120hz
623 		 * - Not able to switch in vactive naturally (switching in active means the
624 		 *   DET provides enough buffer to hide the P-State switch latency -- trying
625 		 *   to combine this with SubVP can cause issues with the scheduling).
626 		 * - Not TMZ surface
627 		 */
628 		if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && !dcn32_is_center_timing(pipe) &&
629 				!pipe->stream->hw_cursor_req &&
630 				!dc_state_get_stream_cursor_subvp_limit(pipe->stream, context) &&
631 				!(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
632 				(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
633 				dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
634 				(refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) &&
635 				!pipe->plane_state->address.tmz_surface &&
636 				(vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 ||
637 				(vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 &&
638 						dcn32_allow_subvp_with_active_margin(pipe)))) {
639 			while (pipe) {
640 				num_pipes++;
641 				pipe = pipe->bottom_pipe;
642 			}
643 
644 			pipe = &context->res_ctx.pipe_ctx[i];
645 			if (num_pipes <= free_pipes) {
646 				struct dc_stream_state *stream = pipe->stream;
647 				unsigned int frame_us = (stream->timing.v_total * stream->timing.h_total /
648 						(double)(stream->timing.pix_clk_100hz * 100)) * 1000000;
649 				if (frame_us > max_frame_time) {
650 					*index = i;
651 					max_frame_time = frame_us;
652 					valid_assignment_found = true;
653 				}
654 			}
655 		}
656 		pipe_idx++;
657 	}
658 	return valid_assignment_found;
659 }
660 
661 /**
662  * dcn32_enough_pipes_for_subvp - Function to check if there are "enough" pipes for SubVP.
663  * @dc: current dc state
664  * @context: new dc state
665  *
666  * This function returns true if there are enough free pipes
667  * to create the required phantom pipes for any given stream
668  * (that does not already have phantom pipe assigned).
669  *
670  * e.g. For a 2 stream config where the first stream uses one
671  * pipe and the second stream uses 2 pipes (i.e. pipe split),
672  * this function will return true because there is 1 remaining
673  * pipe which can be used as the phantom pipe for the non pipe
674  * split pipe.
675  *
676  * Return:
677  * True if there are enough free pipes to assign phantom pipes to at least one
678  * stream that does not already have phantom pipes assigned. Otherwise false.
679  */
dcn32_enough_pipes_for_subvp(struct dc * dc,struct dc_state * context)680 static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context)
681 {
682 	unsigned int i, split_cnt, free_pipes;
683 	unsigned int min_pipe_split = dc->res_pool->pipe_count + 1; // init as max number of pipes + 1
684 	bool subvp_possible = false;
685 
686 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
687 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
688 
689 		// Find the minimum pipe split count for non SubVP pipes
690 		if (resource_is_pipe_type(pipe, OPP_HEAD) &&
691 			dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE) {
692 			split_cnt = 0;
693 			while (pipe) {
694 				split_cnt++;
695 				pipe = pipe->bottom_pipe;
696 			}
697 
698 			if (split_cnt < min_pipe_split)
699 				min_pipe_split = split_cnt;
700 		}
701 	}
702 
703 	free_pipes = dcn32_get_num_free_pipes(dc, context);
704 
705 	// SubVP only possible if at least one pipe is being used (i.e. free_pipes
706 	// should not equal to the pipe_count)
707 	if (free_pipes >= min_pipe_split && free_pipes < dc->res_pool->pipe_count)
708 		subvp_possible = true;
709 
710 	return subvp_possible;
711 }
712 
713 /**
714  * subvp_subvp_schedulable - Determine if SubVP + SubVP config is schedulable
715  * @dc: current dc state
716  * @context: new dc state
717  *
718  * High level algorithm:
719  * 1. Find longest microschedule length (in us) between the two SubVP pipes
720  * 2. Check if the worst case overlap (VBLANK in middle of ACTIVE) for both
721  * pipes still allows for the maximum microschedule to fit in the active
722  * region for both pipes.
723  *
724  * Return: True if the SubVP + SubVP config is schedulable, false otherwise
725  */
subvp_subvp_schedulable(struct dc * dc,struct dc_state * context)726 static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
727 {
728 	struct pipe_ctx *subvp_pipes[2] = {0};
729 	struct dc_stream_state *phantom = NULL;
730 	uint32_t microschedule_lines = 0;
731 	uint32_t index = 0;
732 	uint32_t i;
733 	uint32_t max_microschedule_us = 0;
734 	int32_t vactive1_us, vactive2_us, vblank1_us, vblank2_us;
735 
736 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
737 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
738 		uint32_t time_us = 0;
739 
740 		/* Loop to calculate the maximum microschedule time between the two SubVP pipes,
741 		 * and also to store the two main SubVP pipe pointers in subvp_pipes[2].
742 		 */
743 		phantom = dc_state_get_paired_subvp_stream(context, pipe->stream);
744 		if (phantom && pipe->stream && pipe->plane_state && !pipe->top_pipe &&
745 			dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
746 			microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) +
747 					phantom->timing.v_addressable;
748 
749 			// Round up when calculating microschedule time (+ 1 at the end)
750 			time_us = (microschedule_lines * phantom->timing.h_total) /
751 					(double)(phantom->timing.pix_clk_100hz * 100) * 1000000 +
752 						dc->caps.subvp_prefetch_end_to_mall_start_us +
753 						dc->caps.subvp_fw_processing_delay_us + 1;
754 			if (time_us > max_microschedule_us)
755 				max_microschedule_us = time_us;
756 
757 			subvp_pipes[index] = pipe;
758 			index++;
759 
760 			// Maximum 2 SubVP pipes
761 			if (index == 2)
762 				break;
763 		}
764 	}
765 	vactive1_us = ((subvp_pipes[0]->stream->timing.v_addressable * subvp_pipes[0]->stream->timing.h_total) /
766 			(double)(subvp_pipes[0]->stream->timing.pix_clk_100hz * 100)) * 1000000;
767 	vactive2_us = ((subvp_pipes[1]->stream->timing.v_addressable * subvp_pipes[1]->stream->timing.h_total) /
768 				(double)(subvp_pipes[1]->stream->timing.pix_clk_100hz * 100)) * 1000000;
769 	vblank1_us = (((subvp_pipes[0]->stream->timing.v_total - subvp_pipes[0]->stream->timing.v_addressable) *
770 			subvp_pipes[0]->stream->timing.h_total) /
771 			(double)(subvp_pipes[0]->stream->timing.pix_clk_100hz * 100)) * 1000000;
772 	vblank2_us = (((subvp_pipes[1]->stream->timing.v_total - subvp_pipes[1]->stream->timing.v_addressable) *
773 			subvp_pipes[1]->stream->timing.h_total) /
774 			(double)(subvp_pipes[1]->stream->timing.pix_clk_100hz * 100)) * 1000000;
775 
776 	if ((vactive1_us - vblank2_us) / 2 > max_microschedule_us &&
777 	    (vactive2_us - vblank1_us) / 2 > max_microschedule_us)
778 		return true;
779 
780 	return false;
781 }
782 
783 /**
784  * subvp_drr_schedulable() - Determine if SubVP + DRR config is schedulable
785  * @dc: current dc state
786  * @context: new dc state
787  *
788  * High level algorithm:
789  * 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe
790  * 2. Determine the frame time for the DRR display when adding required margin for MCLK switching
791  * (the margin is equal to the MALL region + DRR margin (500us))
792  * 3.If (SubVP Active - Prefetch > Stretched DRR frame + max(MALL region, Stretched DRR frame))
793  * then report the configuration as supported
794  *
795  * Return: True if the SubVP + DRR config is schedulable, false otherwise
796  */
subvp_drr_schedulable(struct dc * dc,struct dc_state * context)797 static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
798 {
799 	bool schedulable = false;
800 	uint32_t i;
801 	struct pipe_ctx *pipe = NULL;
802 	struct pipe_ctx *drr_pipe = NULL;
803 	struct dc_crtc_timing *main_timing = NULL;
804 	struct dc_crtc_timing *phantom_timing = NULL;
805 	struct dc_crtc_timing *drr_timing = NULL;
806 	int16_t prefetch_us = 0;
807 	int16_t mall_region_us = 0;
808 	int16_t drr_frame_us = 0;	// nominal frame time
809 	int16_t subvp_active_us = 0;
810 	int16_t stretched_drr_us = 0;
811 	int16_t drr_stretched_vblank_us = 0;
812 	int16_t max_vblank_mallregion = 0;
813 	struct dc_stream_state *phantom_stream;
814 	bool subvp_found = false;
815 	bool drr_found = false;
816 
817 	// Find SubVP pipe
818 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
819 		pipe = &context->res_ctx.pipe_ctx[i];
820 
821 		// We check for master pipe, but it shouldn't matter since we only need
822 		// the pipe for timing info (stream should be same for any pipe splits)
823 		if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
824 				!resource_is_pipe_type(pipe, DPP_PIPE))
825 			continue;
826 
827 		// Find the SubVP pipe
828 		if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
829 			subvp_found = true;
830 			break;
831 		}
832 	}
833 
834 	// Find the DRR pipe
835 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
836 		drr_pipe = &context->res_ctx.pipe_ctx[i];
837 
838 		// We check for master pipe only
839 		if (!resource_is_pipe_type(drr_pipe, OTG_MASTER) ||
840 				!resource_is_pipe_type(drr_pipe, DPP_PIPE))
841 			continue;
842 
843 		if (dc_state_get_pipe_subvp_type(context, drr_pipe) == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param &&
844 				(drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable || drr_pipe->stream->vrr_active_fixed)) {
845 			drr_found = true;
846 			break;
847 		}
848 	}
849 
850 	phantom_stream = dc_state_get_paired_subvp_stream(context, pipe->stream);
851 	if (phantom_stream && subvp_found && drr_found) {
852 		main_timing = &pipe->stream->timing;
853 		phantom_timing = &phantom_stream->timing;
854 		drr_timing = &drr_pipe->stream->timing;
855 		prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total /
856 				(double)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
857 				dc->caps.subvp_prefetch_end_to_mall_start_us;
858 		subvp_active_us = main_timing->v_addressable * main_timing->h_total /
859 				(double)(main_timing->pix_clk_100hz * 100) * 1000000;
860 		drr_frame_us = drr_timing->v_total * drr_timing->h_total /
861 				(double)(drr_timing->pix_clk_100hz * 100) * 1000000;
862 		// P-State allow width and FW delays already included phantom_timing->v_addressable
863 		mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total /
864 				(double)(phantom_timing->pix_clk_100hz * 100) * 1000000;
865 		stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
866 		drr_stretched_vblank_us = (drr_timing->v_total - drr_timing->v_addressable) * drr_timing->h_total /
867 				(double)(drr_timing->pix_clk_100hz * 100) * 1000000 + (stretched_drr_us - drr_frame_us);
868 		max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us;
869 	}
870 
871 	/* We consider SubVP + DRR schedulable if the stretched frame duration of the DRR display (i.e. the
872 	 * highest refresh rate + margin that can support UCLK P-State switch) passes the static analysis
873 	 * for VBLANK: (VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time,
874 	 * and the max of (VBLANK blanking time, MALL region)).
875 	 */
876 	if (drr_timing &&
877 	    stretched_drr_us < (1 / (double)drr_timing->min_refresh_in_uhz) * 1000000 * 1000000 &&
878 	    subvp_active_us - prefetch_us - stretched_drr_us - max_vblank_mallregion > 0)
879 		schedulable = true;
880 
881 	return schedulable;
882 }
883 
884 
885 /**
886  * subvp_vblank_schedulable - Determine if SubVP + VBLANK config is schedulable
887  * @dc: current dc state
888  * @context: new dc state
889  *
890  * High level algorithm:
891  * 1. Get timing for SubVP pipe, phantom pipe, and VBLANK pipe
892  * 2. If (SubVP Active - Prefetch > Vblank Frame Time + max(MALL region, Vblank blanking time))
893  * then report the configuration as supported
894  * 3. If the VBLANK display is DRR, then take the DRR static schedulability path
895  *
896  * Return: True if the SubVP + VBLANK/DRR config is schedulable, false otherwise
897  */
subvp_vblank_schedulable(struct dc * dc,struct dc_state * context)898 static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
899 {
900 	struct pipe_ctx *pipe = NULL;
901 	struct pipe_ctx *subvp_pipe = NULL;
902 	bool found = false;
903 	bool schedulable = false;
904 	uint32_t i = 0;
905 	uint8_t vblank_index = 0;
906 	uint16_t prefetch_us = 0;
907 	uint16_t mall_region_us = 0;
908 	uint16_t vblank_frame_us = 0;
909 	uint16_t subvp_active_us = 0;
910 	uint16_t vblank_blank_us = 0;
911 	uint16_t max_vblank_mallregion = 0;
912 	struct dc_crtc_timing *main_timing = NULL;
913 	struct dc_crtc_timing *phantom_timing = NULL;
914 	struct dc_crtc_timing *vblank_timing = NULL;
915 	struct dc_stream_state *phantom_stream;
916 	enum mall_stream_type pipe_mall_type;
917 
918 	/* For SubVP + VBLANK/DRR cases, we assume there can only be
919 	 * a single VBLANK/DRR display. If DML outputs SubVP + VBLANK
920 	 * is supported, it is either a single VBLANK case or two VBLANK
921 	 * displays which are synchronized (in which case they have identical
922 	 * timings).
923 	 */
924 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
925 		pipe = &context->res_ctx.pipe_ctx[i];
926 		pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
927 
928 		// We check for master pipe, but it shouldn't matter since we only need
929 		// the pipe for timing info (stream should be same for any pipe splits)
930 		if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
931 				!resource_is_pipe_type(pipe, DPP_PIPE))
932 			continue;
933 
934 		if (!found && pipe_mall_type == SUBVP_NONE) {
935 			// Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe).
936 			vblank_index = i;
937 			found = true;
938 		}
939 
940 		if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN)
941 			subvp_pipe = pipe;
942 	}
943 	if (found && subvp_pipe) {
944 		phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
945 		main_timing = &subvp_pipe->stream->timing;
946 		phantom_timing = &phantom_stream->timing;
947 		vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing;
948 		// Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe
949 		// Also include the prefetch end to mallstart delay time
950 		prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total /
951 				(double)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
952 				dc->caps.subvp_prefetch_end_to_mall_start_us;
953 		// P-State allow width and FW delays already included phantom_timing->v_addressable
954 		mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total /
955 				(double)(phantom_timing->pix_clk_100hz * 100) * 1000000;
956 		vblank_frame_us = vblank_timing->v_total * vblank_timing->h_total /
957 				(double)(vblank_timing->pix_clk_100hz * 100) * 1000000;
958 		vblank_blank_us =  (vblank_timing->v_total - vblank_timing->v_addressable) * vblank_timing->h_total /
959 				(double)(vblank_timing->pix_clk_100hz * 100) * 1000000;
960 		subvp_active_us = main_timing->v_addressable * main_timing->h_total /
961 				(double)(main_timing->pix_clk_100hz * 100) * 1000000;
962 		max_vblank_mallregion = vblank_blank_us > mall_region_us ? vblank_blank_us : mall_region_us;
963 
964 		// Schedulable if VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time,
965 		// and the max of (VBLANK blanking time, MALL region)
966 		// TODO: Possibly add some margin (i.e. the below conditions should be [...] > X instead of [...] > 0)
967 		if (subvp_active_us - prefetch_us - vblank_frame_us - max_vblank_mallregion > 0)
968 			schedulable = true;
969 	}
970 	return schedulable;
971 }
972 
973 /**
974  * subvp_subvp_admissable() - Determine if subvp + subvp config is admissible
975  *
976  * @dc: Current DC state
977  * @context: New DC state to be programmed
978  *
979  * SubVP + SubVP is admissible under the following conditions:
980  * - All SubVP pipes are < 120Hz OR
981  * - All SubVP pipes are >= 120hz
982  *
983  * Return: True if admissible, false otherwise
984  */
subvp_subvp_admissable(struct dc * dc,struct dc_state * context)985 static bool subvp_subvp_admissable(struct dc *dc,
986 				struct dc_state *context)
987 {
988 	bool result = false;
989 	uint32_t i;
990 	uint8_t subvp_count = 0;
991 	uint32_t min_refresh = subvp_high_refresh_list.min_refresh, max_refresh = 0;
992 	uint64_t refresh_rate = 0;
993 
994 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
995 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
996 
997 		if (!pipe->stream)
998 			continue;
999 
1000 		if (pipe->plane_state && !pipe->top_pipe &&
1001 				dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
1002 			refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
1003 				pipe->stream->timing.v_total * (uint64_t)pipe->stream->timing.h_total - (uint64_t)1);
1004 			refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
1005 			refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
1006 
1007 			if ((uint32_t)refresh_rate < min_refresh)
1008 				min_refresh = (uint32_t)refresh_rate;
1009 			if ((uint32_t)refresh_rate > max_refresh)
1010 				max_refresh = (uint32_t)refresh_rate;
1011 			subvp_count++;
1012 		}
1013 	}
1014 
1015 	if (subvp_count == 2 && ((min_refresh < 120 && max_refresh < 120) ||
1016 		(min_refresh >= subvp_high_refresh_list.min_refresh &&
1017 				max_refresh <= subvp_high_refresh_list.max_refresh)))
1018 		result = true;
1019 
1020 	return result;
1021 }
1022 
1023 /**
1024  * subvp_validate_static_schedulability - Check which SubVP case is calculated
1025  * and handle static analysis based on the case.
1026  * @dc: current dc state
1027  * @context: new dc state
1028  * @vlevel: Voltage level calculated by DML
1029  *
1030  * Three cases:
1031  * 1. SubVP + SubVP
1032  * 2. SubVP + VBLANK (DRR checked internally)
1033  * 3. SubVP + VACTIVE (currently unsupported)
1034  *
1035  * Return: True if statically schedulable, false otherwise
1036  */
subvp_validate_static_schedulability(struct dc * dc,struct dc_state * context,int vlevel)1037 static bool subvp_validate_static_schedulability(struct dc *dc,
1038 				struct dc_state *context,
1039 				int vlevel)
1040 {
1041 	bool schedulable = false;
1042 	struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
1043 	uint32_t i, pipe_idx;
1044 	uint8_t subvp_count = 0;
1045 	uint8_t vactive_count = 0;
1046 	uint8_t non_subvp_pipes = 0;
1047 
1048 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
1049 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1050 		enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
1051 
1052 		if (!pipe->stream)
1053 			continue;
1054 
1055 		if (pipe->plane_state && !pipe->top_pipe) {
1056 			if (pipe_mall_type == SUBVP_MAIN)
1057 				subvp_count++;
1058 			if (pipe_mall_type == SUBVP_NONE)
1059 				non_subvp_pipes++;
1060 		}
1061 
1062 		// Count how many planes that aren't SubVP/phantom are capable of VACTIVE
1063 		// switching (SubVP + VACTIVE unsupported). In situations where we force
1064 		// SubVP for a VACTIVE plane, we don't want to increment the vactive_count.
1065 		if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vlevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 &&
1066 				pipe_mall_type == SUBVP_NONE) {
1067 			vactive_count++;
1068 		}
1069 		pipe_idx++;
1070 	}
1071 
1072 	if (subvp_count == 2) {
1073 		// Static schedulability check for SubVP + SubVP case
1074 		schedulable = subvp_subvp_admissable(dc, context) && subvp_subvp_schedulable(dc, context);
1075 	} else if (subvp_count == 1 && non_subvp_pipes == 0) {
1076 		// Single SubVP configs will be supported by default as long as it's suppported by DML
1077 		schedulable = true;
1078 	} else if (subvp_count == 1 && non_subvp_pipes == 1) {
1079 		if (dcn32_subvp_drr_admissable(dc, context))
1080 			schedulable = subvp_drr_schedulable(dc, context);
1081 		else if (dcn32_subvp_vblank_admissable(dc, context, vlevel))
1082 			schedulable = subvp_vblank_schedulable(dc, context);
1083 	} else if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vactive_w_mall_sub_vp &&
1084 			vactive_count > 0) {
1085 		// For single display SubVP cases, DML will output dm_dram_clock_change_vactive_w_mall_sub_vp by default.
1086 		// We tell the difference between SubVP vs. SubVP + VACTIVE by checking the vactive_count.
1087 		// SubVP + VACTIVE currently unsupported
1088 		schedulable = false;
1089 	}
1090 	return schedulable;
1091 }
1092 
assign_subvp_index(struct dc * dc,struct dc_state * context)1093 static void assign_subvp_index(struct dc *dc, struct dc_state *context)
1094 {
1095 	int i;
1096 	int index = 0;
1097 
1098 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1099 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1100 
1101 		if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) &&
1102 				dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) {
1103 			pipe_ctx->subvp_index = index++;
1104 		} else {
1105 			pipe_ctx->subvp_index = 0;
1106 		}
1107 	}
1108 }
1109 
1110 struct pipe_slice_table {
1111 	struct {
1112 		struct dc_stream_state *stream;
1113 		int slice_count;
1114 	} odm_combines[MAX_STREAMS];
1115 	int odm_combine_count;
1116 
1117 	struct {
1118 		struct pipe_ctx *pri_pipe;
1119 		struct dc_plane_state *plane;
1120 		int slice_count;
1121 	} mpc_combines[MAX_PLANES];
1122 	int mpc_combine_count;
1123 };
1124 
1125 
update_slice_table_for_stream(struct pipe_slice_table * table,struct dc_stream_state * stream,int diff)1126 static void update_slice_table_for_stream(struct pipe_slice_table *table,
1127 		struct dc_stream_state *stream, int diff)
1128 {
1129 	int i;
1130 
1131 	for (i = 0; i < table->odm_combine_count; i++) {
1132 		if (table->odm_combines[i].stream == stream) {
1133 			table->odm_combines[i].slice_count += diff;
1134 			break;
1135 		}
1136 	}
1137 
1138 	if (i == table->odm_combine_count) {
1139 		table->odm_combine_count++;
1140 		table->odm_combines[i].stream = stream;
1141 		table->odm_combines[i].slice_count = diff;
1142 	}
1143 }
1144 
update_slice_table_for_plane(struct pipe_slice_table * table,struct pipe_ctx * dpp_pipe,struct dc_plane_state * plane,int diff)1145 static void update_slice_table_for_plane(struct pipe_slice_table *table,
1146 		struct pipe_ctx *dpp_pipe, struct dc_plane_state *plane, int diff)
1147 {
1148 	int i;
1149 	struct pipe_ctx *pri_dpp_pipe = resource_get_primary_dpp_pipe(dpp_pipe);
1150 
1151 	for (i = 0; i < table->mpc_combine_count; i++) {
1152 		if (table->mpc_combines[i].plane == plane &&
1153 				table->mpc_combines[i].pri_pipe == pri_dpp_pipe) {
1154 			table->mpc_combines[i].slice_count += diff;
1155 			break;
1156 		}
1157 	}
1158 
1159 	if (i == table->mpc_combine_count) {
1160 		table->mpc_combine_count++;
1161 		table->mpc_combines[i].plane = plane;
1162 		table->mpc_combines[i].pri_pipe = pri_dpp_pipe;
1163 		table->mpc_combines[i].slice_count = diff;
1164 	}
1165 }
1166 
init_pipe_slice_table_from_context(struct pipe_slice_table * table,struct dc_state * context)1167 static void init_pipe_slice_table_from_context(
1168 		struct pipe_slice_table *table,
1169 		struct dc_state *context)
1170 {
1171 	int i, j;
1172 	struct pipe_ctx *otg_master;
1173 	struct pipe_ctx *dpp_pipes[MAX_PIPES];
1174 	struct dc_stream_state *stream;
1175 	int count;
1176 
1177 	memset(table, 0, sizeof(*table));
1178 
1179 	for (i = 0; i < context->stream_count; i++) {
1180 		stream = context->streams[i];
1181 		otg_master = resource_get_otg_master_for_stream(
1182 				&context->res_ctx, stream);
1183 		if (!otg_master)
1184 			continue;
1185 
1186 		count = resource_get_odm_slice_count(otg_master);
1187 		update_slice_table_for_stream(table, stream, count);
1188 
1189 		count = resource_get_dpp_pipes_for_opp_head(otg_master,
1190 				&context->res_ctx, dpp_pipes);
1191 		for (j = 0; j < count; j++)
1192 			if (dpp_pipes[j]->plane_state)
1193 				update_slice_table_for_plane(table, dpp_pipes[j],
1194 						dpp_pipes[j]->plane_state, 1);
1195 	}
1196 }
1197 
update_pipe_slice_table_with_split_flags(struct pipe_slice_table * table,struct dc * dc,struct dc_state * context,struct vba_vars_st * vba,int split[MAX_PIPES],bool merge[MAX_PIPES])1198 static bool update_pipe_slice_table_with_split_flags(
1199 		struct pipe_slice_table *table,
1200 		struct dc *dc,
1201 		struct dc_state *context,
1202 		struct vba_vars_st *vba,
1203 		int split[MAX_PIPES],
1204 		bool merge[MAX_PIPES])
1205 {
1206 	/* NOTE: we are deprecating the support for the concept of pipe splitting
1207 	 * or pipe merging. Instead we append slices to the end and remove
1208 	 * slices from the end. The following code converts a pipe split or
1209 	 * merge to an append or remove operation.
1210 	 *
1211 	 * For example:
1212 	 * When split flags describe the following pipe connection transition
1213 	 *
1214 	 * from:
1215 	 *  pipe 0 (split=2) -> pipe 1 (split=2)
1216 	 * to: (old behavior)
1217 	 *  pipe 0 -> pipe 2 -> pipe 1 -> pipe 3
1218 	 *
1219 	 * the code below actually does:
1220 	 *  pipe 0 -> pipe 1 -> pipe 2 -> pipe 3
1221 	 *
1222 	 * This is the new intended behavior and for future DCNs we will retire
1223 	 * the old concept completely.
1224 	 */
1225 	struct pipe_ctx *pipe;
1226 	bool odm;
1227 	int dc_pipe_idx, dml_pipe_idx = 0;
1228 	bool updated = false;
1229 
1230 	for (dc_pipe_idx = 0;
1231 			dc_pipe_idx < dc->res_pool->pipe_count; dc_pipe_idx++) {
1232 		pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx];
1233 		if (resource_is_pipe_type(pipe, FREE_PIPE))
1234 			continue;
1235 
1236 		if (merge[dc_pipe_idx]) {
1237 			if (resource_is_pipe_type(pipe, OPP_HEAD))
1238 				/* merging OPP head means reducing ODM slice
1239 				 * count by 1
1240 				 */
1241 				update_slice_table_for_stream(table, pipe->stream, -1);
1242 			else if (resource_is_pipe_type(pipe, DPP_PIPE) &&
1243 					resource_get_odm_slice_index(resource_get_opp_head(pipe)) == 0)
1244 				/* merging DPP pipe of the first ODM slice means
1245 				 * reducing MPC slice count by 1
1246 				 */
1247 				update_slice_table_for_plane(table, pipe, pipe->plane_state, -1);
1248 			updated = true;
1249 		}
1250 
1251 		if (split[dc_pipe_idx]) {
1252 			odm = vba->ODMCombineEnabled[vba->pipe_plane[dml_pipe_idx]] !=
1253 					dm_odm_combine_mode_disabled;
1254 			if (odm && resource_is_pipe_type(pipe, OPP_HEAD))
1255 				update_slice_table_for_stream(
1256 						table, pipe->stream, split[dc_pipe_idx] - 1);
1257 			else if (!odm && resource_is_pipe_type(pipe, DPP_PIPE))
1258 				update_slice_table_for_plane(table, pipe,
1259 						pipe->plane_state, split[dc_pipe_idx] - 1);
1260 			updated = true;
1261 		}
1262 		dml_pipe_idx++;
1263 	}
1264 	return updated;
1265 }
1266 
update_pipes_with_slice_table(struct dc * dc,struct dc_state * context,struct pipe_slice_table * table)1267 static void update_pipes_with_slice_table(struct dc *dc, struct dc_state *context,
1268 		struct pipe_slice_table *table)
1269 {
1270 	int i;
1271 
1272 	for (i = 0; i < table->odm_combine_count; i++)
1273 		resource_update_pipes_for_stream_with_slice_count(context,
1274 				dc->current_state, dc->res_pool,
1275 				table->odm_combines[i].stream,
1276 				table->odm_combines[i].slice_count);
1277 
1278 	for (i = 0; i < table->mpc_combine_count; i++)
1279 		resource_update_pipes_for_plane_with_slice_count(context,
1280 				dc->current_state, dc->res_pool,
1281 				table->mpc_combines[i].plane,
1282 				table->mpc_combines[i].slice_count);
1283 }
1284 
update_pipes_with_split_flags(struct dc * dc,struct dc_state * context,struct vba_vars_st * vba,int split[MAX_PIPES],bool merge[MAX_PIPES])1285 static bool update_pipes_with_split_flags(struct dc *dc, struct dc_state *context,
1286 		struct vba_vars_st *vba, int split[MAX_PIPES],
1287 		bool merge[MAX_PIPES])
1288 {
1289 	struct pipe_slice_table slice_table;
1290 	bool updated;
1291 
1292 	init_pipe_slice_table_from_context(&slice_table, context);
1293 	updated = update_pipe_slice_table_with_split_flags(
1294 			&slice_table, dc, context, vba,
1295 			split, merge);
1296 	update_pipes_with_slice_table(dc, context, &slice_table);
1297 	return updated;
1298 }
1299 
should_apply_odm_power_optimization(struct dc * dc,struct dc_state * context,struct vba_vars_st * v,int * split,bool * merge)1300 static bool should_apply_odm_power_optimization(struct dc *dc,
1301 		struct dc_state *context, struct vba_vars_st *v, int *split,
1302 		bool *merge)
1303 {
1304 	struct dc_stream_state *stream = context->streams[0];
1305 	struct pipe_slice_table slice_table;
1306 	int i;
1307 
1308 	/*
1309 	 * this debug flag allows us to disable ODM power optimization feature
1310 	 * unconditionally. we force the feature off if this is set to false.
1311 	 */
1312 	if (!dc->debug.enable_single_display_2to1_odm_policy)
1313 		return false;
1314 
1315 	/* current design and test coverage is only limited to allow ODM power
1316 	 * optimization for single stream. Supporting it for multiple streams
1317 	 * use case would require additional algorithm to decide how to
1318 	 * optimize power consumption when there are not enough free pipes to
1319 	 * allocate for all the streams. This level of optimization would
1320 	 * require multiple attempts of revalidation to make an optimized
1321 	 * decision. Unfortunately We do not support revalidation flow in
1322 	 * current version of DML.
1323 	 */
1324 	if (context->stream_count != 1)
1325 		return false;
1326 
1327 	/*
1328 	 * Our hardware doesn't support ODM for HDMI TMDS
1329 	 */
1330 	if (dc_is_hdmi_signal(stream->signal))
1331 		return false;
1332 
1333 	/*
1334 	 * ODM Combine 2:1 requires horizontal timing divisible by 2 so each
1335 	 * ODM segment has the same size.
1336 	 */
1337 	if (!is_h_timing_divisible_by_2(stream))
1338 		return false;
1339 
1340 	/*
1341 	 * No power benefits if the timing's pixel clock is not high enough to
1342 	 * raise display clock from minimum power state.
1343 	 */
1344 	if (stream->timing.pix_clk_100hz * 100 <= DCN3_2_VMIN_DISPCLK_HZ)
1345 		return false;
1346 
1347 	if (dc->config.enable_windowed_mpo_odm) {
1348 		/*
1349 		 * ODM power optimization should only be allowed if the feature
1350 		 * can be seamlessly toggled off within an update. This would
1351 		 * require that the feature is applied on top of a minimal
1352 		 * state. A minimal state is defined as a state validated
1353 		 * without the need of pipe split. Therefore, when transition to
1354 		 * toggle the feature off, the same stream and plane
1355 		 * configuration can be supported by the pipe resource in the
1356 		 * first ODM slice alone without the need to acquire extra
1357 		 * resources.
1358 		 */
1359 		init_pipe_slice_table_from_context(&slice_table, context);
1360 		update_pipe_slice_table_with_split_flags(
1361 				&slice_table, dc, context, v,
1362 				split, merge);
1363 		for (i = 0; i < slice_table.mpc_combine_count; i++)
1364 			if (slice_table.mpc_combines[i].slice_count > 1)
1365 				return false;
1366 
1367 		for (i = 0; i < slice_table.odm_combine_count; i++)
1368 			if (slice_table.odm_combines[i].slice_count > 1)
1369 				return false;
1370 	} else {
1371 		/*
1372 		 * the new ODM power optimization feature reduces software
1373 		 * design limitation and allows ODM power optimization to be
1374 		 * supported even with presence of overlay planes. The new
1375 		 * feature is enabled based on enable_windowed_mpo_odm flag. If
1376 		 * the flag is not set, we limit our feature scope due to
1377 		 * previous software design limitation
1378 		 */
1379 		if (context->stream_status[0].plane_count != 1)
1380 			return false;
1381 
1382 		if (memcmp(&context->stream_status[0].plane_states[0]->clip_rect,
1383 				&stream->src, sizeof(struct rect)) != 0)
1384 			return false;
1385 
1386 		if (stream->src.width >= 5120 &&
1387 				stream->src.width > stream->dst.width)
1388 			return false;
1389 	}
1390 	return true;
1391 }
1392 
try_odm_power_optimization_and_revalidate(struct dc * dc,struct dc_state * context,display_e2e_pipe_params_st * pipes,int * split,bool * merge,unsigned int * vlevel,int pipe_cnt)1393 static void try_odm_power_optimization_and_revalidate(
1394 		struct dc *dc,
1395 		struct dc_state *context,
1396 		display_e2e_pipe_params_st *pipes,
1397 		int *split,
1398 		bool *merge,
1399 		unsigned int *vlevel,
1400 		int pipe_cnt)
1401 {
1402 	int i;
1403 	unsigned int new_vlevel;
1404 	unsigned int cur_policy[MAX_PIPES];
1405 
1406 	for (i = 0; i < pipe_cnt; i++) {
1407 		cur_policy[i] = pipes[i].pipe.dest.odm_combine_policy;
1408 		pipes[i].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
1409 	}
1410 
1411 	new_vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
1412 
1413 	if (new_vlevel < context->bw_ctx.dml.soc.num_states) {
1414 		memset(split, 0, MAX_PIPES * sizeof(int));
1415 		memset(merge, 0, MAX_PIPES * sizeof(bool));
1416 		*vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, new_vlevel, split, merge);
1417 		context->bw_ctx.dml.vba.VoltageLevel = *vlevel;
1418 	} else {
1419 		for (i = 0; i < pipe_cnt; i++)
1420 			pipes[i].pipe.dest.odm_combine_policy = cur_policy[i];
1421 	}
1422 }
1423 
is_test_pattern_enabled(struct dc_state * context)1424 static bool is_test_pattern_enabled(
1425 		struct dc_state *context)
1426 {
1427 	int i;
1428 
1429 	for (i = 0; i < context->stream_count; i++) {
1430 		if (context->streams[i]->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE)
1431 			return true;
1432 	}
1433 
1434 	return false;
1435 }
1436 
dcn32_full_validate_bw_helper(struct dc * dc,struct dc_state * context,display_e2e_pipe_params_st * pipes,int * vlevel,int * split,bool * merge,int * pipe_cnt,bool * repopulate_pipes)1437 static bool dcn32_full_validate_bw_helper(struct dc *dc,
1438 				   struct dc_state *context,
1439 				   display_e2e_pipe_params_st *pipes,
1440 				   int *vlevel,
1441 				   int *split,
1442 				   bool *merge,
1443 				   int *pipe_cnt,
1444 				   bool *repopulate_pipes)
1445 {
1446 	struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
1447 	unsigned int dc_pipe_idx = 0;
1448 	int i = 0;
1449 	bool found_supported_config = false;
1450 	int vlevel_temp = 0;
1451 
1452 	dc_assert_fp_enabled();
1453 
1454 	/*
1455 	 * DML favors voltage over p-state, but we're more interested in
1456 	 * supporting p-state over voltage. We can't support p-state in
1457 	 * prefetch mode > 0 so try capping the prefetch mode to start.
1458 	 * Override present for testing.
1459 	 */
1460 	if (dc->debug.dml_disallow_alternate_prefetch_modes)
1461 		context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
1462 			dm_prefetch_support_uclk_fclk_and_stutter;
1463 	else
1464 		context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
1465 			dm_prefetch_support_uclk_fclk_and_stutter_if_possible;
1466 
1467 	*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
1468 	/* This may adjust vlevel and maxMpcComb */
1469 	if (*vlevel < context->bw_ctx.dml.soc.num_states) {
1470 		*vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
1471 		vba->VoltageLevel = *vlevel;
1472 	}
1473 
1474 	/* Apply split and merge flags before checking for subvp */
1475 	if (!dcn32_apply_merge_split_flags_helper(dc, context, repopulate_pipes, split, merge))
1476 		return false;
1477 	memset(split, 0, MAX_PIPES * sizeof(int));
1478 	memset(merge, 0, MAX_PIPES * sizeof(bool));
1479 
1480 	/* Conditions for setting up phantom pipes for SubVP:
1481 	 * 1. Not force disable SubVP
1482 	 * 2. Full update (i.e. DC_VALIDATE_MODE_AND_PROGRAMMING)
1483 	 * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?)
1484 	 * 4. Display configuration passes validation
1485 	 * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
1486 	 */
1487 	if (!dc->debug.force_disable_subvp && !dc->caps.dmub_caps.gecc_enable && dcn32_all_pipes_have_stream_and_plane(dc, context) &&
1488 	    !dcn32_mpo_in_use(context) && !dcn32_any_surfaces_rotated(dc, context) && !is_test_pattern_enabled(context) &&
1489 		(*vlevel == context->bw_ctx.dml.soc.num_states || (vba->DRAMSpeedPerState[*vlevel] != vba->DRAMSpeedPerState[0] &&
1490 				vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported) ||
1491 	    vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
1492 	    dc->debug.force_subvp_mclk_switch)) {
1493 
1494 		vlevel_temp = *vlevel;
1495 
1496 		while (!found_supported_config && dcn32_enough_pipes_for_subvp(dc, context) &&
1497 			dcn32_assign_subvp_pipe(dc, context, &dc_pipe_idx)) {
1498 			/* For the case where *vlevel = num_states, bandwidth validation has failed for this config.
1499 			 * Adding phantom pipes won't change the validation result, so change the DML input param
1500 			 * for P-State support before adding phantom pipes and recalculating the DML result.
1501 			 * However, this case is only applicable for SubVP + DRR cases because the prefetch mode
1502 			 * will not allow for switch in VBLANK. The DRR display must have it's VBLANK stretched
1503 			 * enough to support MCLK switching.
1504 			 */
1505 			if (*vlevel == context->bw_ctx.dml.soc.num_states &&
1506 				context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final ==
1507 					dm_prefetch_support_uclk_fclk_and_stutter) {
1508 				context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
1509 								dm_prefetch_support_fclk_and_stutter;
1510 				/* There are params (such as FabricClock) that need to be recalculated
1511 				 * after validation fails (otherwise it will be 0). Calculation for
1512 				 * phantom vactive requires call into DML, so we must ensure all the
1513 				 * vba params are valid otherwise we'll get incorrect phantom vactive.
1514 				 */
1515 				*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
1516 			}
1517 
1518 			dc->res_pool->funcs->add_phantom_pipes(dc, context, pipes, *pipe_cnt, dc_pipe_idx);
1519 
1520 			*pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes,
1521 				DC_VALIDATE_MODE_AND_PROGRAMMING);
1522 			// Populate dppclk to trigger a recalculate in dml_get_voltage_level
1523 			// so the phantom pipe DLG params can be assigned correctly.
1524 			pipes[0].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, *pipe_cnt, 0);
1525 			*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
1526 
1527 			/* Check that vlevel requested supports pstate or not
1528 			 * if not, select the lowest vlevel that supports it
1529 			 */
1530 			for (i = *vlevel; i < context->bw_ctx.dml.soc.num_states; i++) {
1531 				if (vba->DRAMClockChangeSupport[i][vba->maxMpcComb] != dm_dram_clock_change_unsupported) {
1532 					*vlevel = i;
1533 					break;
1534 				}
1535 			}
1536 
1537 			if (*vlevel < context->bw_ctx.dml.soc.num_states
1538 			    && subvp_validate_static_schedulability(dc, context, *vlevel))
1539 				found_supported_config = true;
1540 			if (found_supported_config) {
1541 				// For SubVP + DRR cases, we can force the lowest vlevel that supports the mode
1542 				if (dcn32_subvp_drr_admissable(dc, context) && subvp_drr_schedulable(dc, context)) {
1543 					/* find lowest vlevel that supports the config */
1544 					for (i = *vlevel; i >= 0; i--) {
1545 						if (vba->ModeSupport[i][vba->maxMpcComb]) {
1546 							*vlevel = i;
1547 						} else {
1548 							break;
1549 						}
1550 					}
1551 				}
1552 			}
1553 		}
1554 
1555 		if (vba->DRAMSpeedPerState[*vlevel] >= vba->DRAMSpeedPerState[vlevel_temp])
1556 			found_supported_config = false;
1557 
1558 		// If SubVP pipe config is unsupported (or cannot be used for UCLK switching)
1559 		// remove phantom pipes and repopulate dml pipes
1560 		if (!found_supported_config) {
1561 			dc_state_remove_phantom_streams_and_planes(dc, context);
1562 			dc_state_release_phantom_streams_and_planes(dc, context);
1563 			vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported;
1564 			*pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes,
1565 				DC_VALIDATE_MODE_AND_PROGRAMMING);
1566 
1567 			*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
1568 			/* This may adjust vlevel and maxMpcComb */
1569 			if (*vlevel < context->bw_ctx.dml.soc.num_states) {
1570 				*vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
1571 				vba->VoltageLevel = *vlevel;
1572 			}
1573 		} else {
1574 			// Most populate phantom DLG params before programming hardware / timing for phantom pipe
1575 			dcn32_helper_populate_phantom_dlg_params(dc, context, pipes, *pipe_cnt);
1576 
1577 			/* Call validate_apply_pipe_split flags after calling DML getters for
1578 			 * phantom dlg params, or some of the VBA params indicating pipe split
1579 			 * can be overwritten by the getters.
1580 			 *
1581 			 * When setting up SubVP config, all pipes are merged before attempting to
1582 			 * add phantom pipes. If pipe split (ODM / MPC) is required, both the main
1583 			 * and phantom pipes will be split in the regular pipe splitting sequence.
1584 			 */
1585 			*vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
1586 			vba->VoltageLevel = *vlevel;
1587 			// Note: We can't apply the phantom pipes to hardware at this time. We have to wait
1588 			// until driver has acquired the DMCUB lock to do it safely.
1589 			assign_subvp_index(dc, context);
1590 		}
1591 	}
1592 
1593 	if (should_apply_odm_power_optimization(dc, context, vba, split, merge))
1594 		try_odm_power_optimization_and_revalidate(
1595 				dc, context, pipes, split, merge, vlevel, *pipe_cnt);
1596 
1597 	return true;
1598 }
1599 
is_dtbclk_required(struct dc * dc,struct dc_state * context)1600 static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
1601 {
1602 	int i;
1603 
1604 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1605 		if (!context->res_ctx.pipe_ctx[i].stream)
1606 			continue;
1607 		if (dc->link_srv->dp_is_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
1608 			return true;
1609 	}
1610 	return false;
1611 }
1612 
dcn32_calculate_dlg_params(struct dc * dc,struct dc_state * context,display_e2e_pipe_params_st * pipes,int pipe_cnt,int vlevel)1613 static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
1614 				       display_e2e_pipe_params_st *pipes,
1615 				       int pipe_cnt, int vlevel)
1616 {
1617 	int i, pipe_idx, active_hubp_count = 0;
1618 	bool usr_retraining_support = false;
1619 	bool unbounded_req_enabled = false;
1620 	struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
1621 
1622 	dc_assert_fp_enabled();
1623 
1624 	/* Writeback MCIF_WB arbitration parameters */
1625 	dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
1626 
1627 	context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000;
1628 	context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000;
1629 	context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
1630 	context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
1631 	context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
1632 	context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
1633 	context->bw_ctx.bw.dcn.clk.p_state_change_support =
1634 			context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
1635 					!= dm_dram_clock_change_unsupported;
1636 
1637 	/* Pstate change might not be supported by hardware, but it might be
1638 	 * possible with firmware driven vertical blank stretching.
1639 	 */
1640 	context->bw_ctx.bw.dcn.clk.p_state_change_support |= context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching;
1641 
1642 	context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
1643 	context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
1644 	context->bw_ctx.bw.dcn.clk.ref_dtbclk_khz = context->bw_ctx.dml.vba.DTBCLKPerState[vlevel] * 1000;
1645 	if (context->bw_ctx.dml.vba.FCLKChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_fclock_change_unsupported)
1646 		context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = false;
1647 	else
1648 		context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = true;
1649 
1650 	usr_retraining_support = context->bw_ctx.dml.vba.USRRetrainingSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
1651 	ASSERT(usr_retraining_support);
1652 
1653 	if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
1654 		context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
1655 
1656 	unbounded_req_enabled = get_unbounded_request_enabled(&context->bw_ctx.dml, pipes, pipe_cnt);
1657 
1658 	if (unbounded_req_enabled && pipe_cnt > 1) {
1659 		// Unbounded requesting should not ever be used when more than 1 pipe is enabled.
1660 		ASSERT(false);
1661 		unbounded_req_enabled = false;
1662 	}
1663 
1664 	context->bw_ctx.bw.dcn.mall_ss_size_bytes = 0;
1665 	context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes = 0;
1666 	context->bw_ctx.bw.dcn.mall_subvp_size_bytes = 0;
1667 
1668 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
1669 		if (!context->res_ctx.pipe_ctx[i].stream)
1670 			continue;
1671 		if (context->res_ctx.pipe_ctx[i].plane_state)
1672 			active_hubp_count++;
1673 		pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt,
1674 				pipe_idx);
1675 		pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt,
1676 				pipe_idx);
1677 		pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt,
1678 				pipe_idx);
1679 		pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt,
1680 				pipe_idx);
1681 
1682 		if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) {
1683 			// Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
1684 			context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
1685 			context->res_ctx.pipe_ctx[i].unbounded_req = false;
1686 		} else {
1687 			context->res_ctx.pipe_ctx[i].det_buffer_size_kb = get_det_buffer_size_kbytes(&context->bw_ctx.dml, pipes, pipe_cnt,
1688 							pipe_idx);
1689 			context->res_ctx.pipe_ctx[i].unbounded_req = unbounded_req_enabled;
1690 		}
1691 
1692 		if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
1693 			context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
1694 		if (context->res_ctx.pipe_ctx[i].plane_state)
1695 			context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
1696 		else
1697 			context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0;
1698 		context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
1699 
1700 		context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes = get_surface_size_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
1701 
1702 		if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0)
1703 			context->res_ctx.pipe_ctx[i].has_vactive_margin = true;
1704 		else
1705 			context->res_ctx.pipe_ctx[i].has_vactive_margin = false;
1706 
1707 		/* MALL Allocation Sizes */
1708 		/* count from active, top pipes per plane only */
1709 		if (context->res_ctx.pipe_ctx[i].stream && context->res_ctx.pipe_ctx[i].plane_state &&
1710 				(context->res_ctx.pipe_ctx[i].top_pipe == NULL ||
1711 				context->res_ctx.pipe_ctx[i].plane_state != context->res_ctx.pipe_ctx[i].top_pipe->plane_state) &&
1712 				context->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
1713 			/* SS: all active surfaces stored in MALL */
1714 			if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) != SUBVP_PHANTOM) {
1715 				context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
1716 
1717 				if (context->res_ctx.pipe_ctx[i].stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) {
1718 					/* SS PSR On: all active surfaces part of streams not supporting PSR stored in MALL */
1719 					context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
1720 				}
1721 			} else {
1722 				/* SUBVP: phantom surfaces only stored in MALL */
1723 				context->bw_ctx.bw.dcn.mall_subvp_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
1724 			}
1725 		}
1726 
1727 		pipe_idx++;
1728 	}
1729 	/* If DCN isn't making memory requests we can allow pstate change and lower clocks */
1730 	if (!active_hubp_count) {
1731 		context->bw_ctx.bw.dcn.clk.socclk_khz = 0;
1732 		context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
1733 		context->bw_ctx.bw.dcn.clk.dcfclk_khz = 0;
1734 		context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = 0;
1735 		context->bw_ctx.bw.dcn.clk.dramclk_khz = 0;
1736 		context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
1737 		context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
1738 		context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = true;
1739 	}
1740 	/*save a original dppclock copy*/
1741 	context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
1742 	context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
1743 	context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz
1744 			* 1000;
1745 	context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz
1746 			* 1000;
1747 
1748 	context->bw_ctx.bw.dcn.clk.num_ways = dcn32_helper_calculate_num_ways_for_subvp(dc, context);
1749 
1750 	context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes;
1751 
1752 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1753 		if (context->res_ctx.pipe_ctx[i].stream)
1754 			context->bw_ctx.bw.dcn.compbuf_size_kb -= context->res_ctx.pipe_ctx[i].det_buffer_size_kb;
1755 	}
1756 
1757 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
1758 
1759 		if (!context->res_ctx.pipe_ctx[i].stream)
1760 			continue;
1761 
1762 		context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg_v2(&context->bw_ctx.dml,
1763 				&context->res_ctx.pipe_ctx[i].dlg_regs, &context->res_ctx.pipe_ctx[i].ttu_regs, pipes,
1764 				pipe_cnt, pipe_idx);
1765 
1766 		context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg_v2(&context->res_ctx.pipe_ctx[i].rq_regs,
1767 				&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
1768 		pipe_idx++;
1769 	}
1770 }
1771 
dcn32_find_split_pipe(struct dc * dc,struct dc_state * context,int old_index)1772 static struct pipe_ctx *dcn32_find_split_pipe(
1773 		struct dc *dc,
1774 		struct dc_state *context,
1775 		int old_index)
1776 {
1777 	struct pipe_ctx *pipe = NULL;
1778 	int i;
1779 
1780 	if (old_index >= 0 && context->res_ctx.pipe_ctx[old_index].stream == NULL) {
1781 		pipe = &context->res_ctx.pipe_ctx[old_index];
1782 		pipe->pipe_idx = old_index;
1783 	}
1784 
1785 	if (!pipe)
1786 		for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
1787 			if (dc->current_state->res_ctx.pipe_ctx[i].top_pipe == NULL
1788 					&& dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
1789 				if (context->res_ctx.pipe_ctx[i].stream == NULL) {
1790 					pipe = &context->res_ctx.pipe_ctx[i];
1791 					pipe->pipe_idx = i;
1792 					break;
1793 				}
1794 			}
1795 		}
1796 
1797 	/*
1798 	 * May need to fix pipes getting tossed from 1 opp to another on flip
1799 	 * Add for debugging transient underflow during topology updates:
1800 	 * ASSERT(pipe);
1801 	 */
1802 	if (!pipe)
1803 		for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
1804 			if (context->res_ctx.pipe_ctx[i].stream == NULL) {
1805 				pipe = &context->res_ctx.pipe_ctx[i];
1806 				pipe->pipe_idx = i;
1807 				break;
1808 			}
1809 		}
1810 
1811 	return pipe;
1812 }
1813 
dcn32_split_stream_for_mpc_or_odm(const struct dc * dc,struct resource_context * res_ctx,struct pipe_ctx * pri_pipe,struct pipe_ctx * sec_pipe,bool odm)1814 static bool dcn32_split_stream_for_mpc_or_odm(
1815 		const struct dc *dc,
1816 		struct resource_context *res_ctx,
1817 		struct pipe_ctx *pri_pipe,
1818 		struct pipe_ctx *sec_pipe,
1819 		bool odm)
1820 {
1821 	int pipe_idx = sec_pipe->pipe_idx;
1822 	const struct resource_pool *pool = dc->res_pool;
1823 
1824 	DC_LOGGER_INIT(dc->ctx->logger);
1825 
1826 	if (odm && pri_pipe->plane_state) {
1827 		/* ODM + window MPO, where MPO window is on left half only */
1828 		if (pri_pipe->plane_state->clip_rect.x + pri_pipe->plane_state->clip_rect.width <=
1829 				pri_pipe->stream->src.x + pri_pipe->stream->src.width/2) {
1830 
1831 			DC_LOG_SCALER("%s - ODM + window MPO(left). pri_pipe:%d\n",
1832 					__func__,
1833 					pri_pipe->pipe_idx);
1834 			return true;
1835 		}
1836 
1837 		/* ODM + window MPO, where MPO window is on right half only */
1838 		if (pri_pipe->plane_state->clip_rect.x >= pri_pipe->stream->src.x +  pri_pipe->stream->src.width/2) {
1839 
1840 			DC_LOG_SCALER("%s - ODM + window MPO(right). pri_pipe:%d\n",
1841 					__func__,
1842 					pri_pipe->pipe_idx);
1843 			return true;
1844 		}
1845 	}
1846 
1847 	*sec_pipe = *pri_pipe;
1848 
1849 	sec_pipe->pipe_idx = pipe_idx;
1850 	sec_pipe->plane_res.mi = pool->mis[pipe_idx];
1851 	sec_pipe->plane_res.hubp = pool->hubps[pipe_idx];
1852 	sec_pipe->plane_res.ipp = pool->ipps[pipe_idx];
1853 	sec_pipe->plane_res.xfm = pool->transforms[pipe_idx];
1854 	sec_pipe->plane_res.dpp = pool->dpps[pipe_idx];
1855 	sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst;
1856 	sec_pipe->stream_res.dsc = NULL;
1857 	if (odm) {
1858 		if (pri_pipe->next_odm_pipe) {
1859 			ASSERT(pri_pipe->next_odm_pipe != sec_pipe);
1860 			sec_pipe->next_odm_pipe = pri_pipe->next_odm_pipe;
1861 			sec_pipe->next_odm_pipe->prev_odm_pipe = sec_pipe;
1862 		}
1863 		if (pri_pipe->top_pipe && pri_pipe->top_pipe->next_odm_pipe) {
1864 			pri_pipe->top_pipe->next_odm_pipe->bottom_pipe = sec_pipe;
1865 			sec_pipe->top_pipe = pri_pipe->top_pipe->next_odm_pipe;
1866 		}
1867 		if (pri_pipe->bottom_pipe && pri_pipe->bottom_pipe->next_odm_pipe) {
1868 			pri_pipe->bottom_pipe->next_odm_pipe->top_pipe = sec_pipe;
1869 			sec_pipe->bottom_pipe = pri_pipe->bottom_pipe->next_odm_pipe;
1870 		}
1871 		pri_pipe->next_odm_pipe = sec_pipe;
1872 		sec_pipe->prev_odm_pipe = pri_pipe;
1873 		ASSERT(sec_pipe->top_pipe == NULL);
1874 
1875 		if (!sec_pipe->top_pipe)
1876 			sec_pipe->stream_res.opp = pool->opps[pipe_idx];
1877 		else
1878 			sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
1879 		if (sec_pipe->stream->timing.flags.DSC == 1) {
1880 			dcn20_acquire_dsc(dc, res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
1881 			ASSERT(sec_pipe->stream_res.dsc);
1882 			if (sec_pipe->stream_res.dsc == NULL)
1883 				return false;
1884 		}
1885 	} else {
1886 		if (pri_pipe->bottom_pipe) {
1887 			ASSERT(pri_pipe->bottom_pipe != sec_pipe);
1888 			sec_pipe->bottom_pipe = pri_pipe->bottom_pipe;
1889 			sec_pipe->bottom_pipe->top_pipe = sec_pipe;
1890 		}
1891 		pri_pipe->bottom_pipe = sec_pipe;
1892 		sec_pipe->top_pipe = pri_pipe;
1893 
1894 		ASSERT(pri_pipe->plane_state);
1895 	}
1896 
1897 	return true;
1898 }
1899 
dcn32_apply_merge_split_flags_helper(struct dc * dc,struct dc_state * context,bool * repopulate_pipes,int * split,bool * merge)1900 static bool dcn32_apply_merge_split_flags_helper(
1901 		struct dc *dc,
1902 		struct dc_state *context,
1903 		bool *repopulate_pipes,
1904 		int *split,
1905 		bool *merge)
1906 {
1907 	int i, pipe_idx;
1908 	bool newly_split[MAX_PIPES] = { false };
1909 	struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
1910 
1911 	if (dc->config.enable_windowed_mpo_odm) {
1912 		if (update_pipes_with_split_flags(
1913 			dc, context, vba, split, merge))
1914 			*repopulate_pipes = true;
1915 	} else {
1916 
1917 		/* the code below will be removed once windowed mpo odm is fully
1918 		 * enabled.
1919 		 */
1920 		/* merge pipes if necessary */
1921 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1922 			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1923 
1924 			/*skip pipes that don't need merging*/
1925 			if (!merge[i])
1926 				continue;
1927 
1928 			/* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */
1929 			if (pipe->prev_odm_pipe) {
1930 				/*split off odm pipe*/
1931 				pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe;
1932 				if (pipe->next_odm_pipe)
1933 					pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
1934 
1935 				/*2:1ODM+MPC Split MPO to Single Pipe + MPC Split MPO*/
1936 				if (pipe->bottom_pipe) {
1937 					if (pipe->bottom_pipe->prev_odm_pipe || pipe->bottom_pipe->next_odm_pipe) {
1938 						/*MPC split rules will handle this case*/
1939 						pipe->bottom_pipe->top_pipe = NULL;
1940 					} else {
1941 						/* when merging an ODM pipes, the bottom MPC pipe must now point to
1942 						 * the previous ODM pipe and its associated stream assets
1943 						 */
1944 						if (pipe->prev_odm_pipe->bottom_pipe) {
1945 							/* 3 plane MPO*/
1946 							pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe->bottom_pipe;
1947 							pipe->prev_odm_pipe->bottom_pipe->bottom_pipe = pipe->bottom_pipe;
1948 						} else {
1949 							/* 2 plane MPO*/
1950 							pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe;
1951 							pipe->prev_odm_pipe->bottom_pipe = pipe->bottom_pipe;
1952 						}
1953 
1954 						memcpy(&pipe->bottom_pipe->stream_res, &pipe->bottom_pipe->top_pipe->stream_res, sizeof(struct stream_resource));
1955 					}
1956 				}
1957 
1958 				if (pipe->top_pipe) {
1959 					pipe->top_pipe->bottom_pipe = NULL;
1960 				}
1961 
1962 				pipe->bottom_pipe = NULL;
1963 				pipe->next_odm_pipe = NULL;
1964 				pipe->plane_state = NULL;
1965 				pipe->stream = NULL;
1966 				pipe->top_pipe = NULL;
1967 				pipe->prev_odm_pipe = NULL;
1968 				if (pipe->stream_res.dsc)
1969 					dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
1970 				memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
1971 				memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
1972 				memset(&pipe->link_res, 0, sizeof(pipe->link_res));
1973 				*repopulate_pipes = true;
1974 			} else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
1975 				struct pipe_ctx *top_pipe = pipe->top_pipe;
1976 				struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
1977 
1978 				top_pipe->bottom_pipe = bottom_pipe;
1979 				if (bottom_pipe)
1980 					bottom_pipe->top_pipe = top_pipe;
1981 
1982 				pipe->top_pipe = NULL;
1983 				pipe->bottom_pipe = NULL;
1984 				pipe->plane_state = NULL;
1985 				pipe->stream = NULL;
1986 				memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
1987 				memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
1988 				memset(&pipe->link_res, 0, sizeof(pipe->link_res));
1989 				*repopulate_pipes = true;
1990 			} else
1991 				ASSERT(0); /* Should never try to merge master pipe */
1992 
1993 		}
1994 
1995 		for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
1996 			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1997 			struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1998 			struct pipe_ctx *hsplit_pipe = NULL;
1999 			bool odm;
2000 			int old_index = -1;
2001 
2002 			if (!pipe->stream || newly_split[i])
2003 				continue;
2004 
2005 			pipe_idx++;
2006 			odm = vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled;
2007 
2008 			if (!pipe->plane_state && !odm)
2009 				continue;
2010 
2011 			if (split[i]) {
2012 				if (odm) {
2013 					if (split[i] == 4 && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe)
2014 						old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
2015 					else if (old_pipe->next_odm_pipe)
2016 						old_index = old_pipe->next_odm_pipe->pipe_idx;
2017 				} else {
2018 					if (split[i] == 4 && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
2019 							old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
2020 						old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx;
2021 					else if (old_pipe->bottom_pipe &&
2022 							old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
2023 						old_index = old_pipe->bottom_pipe->pipe_idx;
2024 				}
2025 				hsplit_pipe = dcn32_find_split_pipe(dc, context, old_index);
2026 				ASSERT(hsplit_pipe);
2027 				if (!hsplit_pipe)
2028 					return false;
2029 
2030 				if (!dcn32_split_stream_for_mpc_or_odm(
2031 						dc, &context->res_ctx,
2032 						pipe, hsplit_pipe, odm))
2033 					return false;
2034 
2035 				newly_split[hsplit_pipe->pipe_idx] = true;
2036 				*repopulate_pipes = true;
2037 			}
2038 			if (split[i] == 4) {
2039 				struct pipe_ctx *pipe_4to1;
2040 
2041 				if (odm && old_pipe->next_odm_pipe)
2042 					old_index = old_pipe->next_odm_pipe->pipe_idx;
2043 				else if (!odm && old_pipe->bottom_pipe &&
2044 							old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
2045 					old_index = old_pipe->bottom_pipe->pipe_idx;
2046 				else
2047 					old_index = -1;
2048 				pipe_4to1 = dcn32_find_split_pipe(dc, context, old_index);
2049 				ASSERT(pipe_4to1);
2050 				if (!pipe_4to1)
2051 					return false;
2052 				if (!dcn32_split_stream_for_mpc_or_odm(
2053 						dc, &context->res_ctx,
2054 						pipe, pipe_4to1, odm))
2055 					return false;
2056 				newly_split[pipe_4to1->pipe_idx] = true;
2057 
2058 				if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe
2059 						&& old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe)
2060 					old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
2061 				else if (!odm && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
2062 						old_pipe->bottom_pipe->bottom_pipe->bottom_pipe &&
2063 						old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
2064 					old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx;
2065 				else
2066 					old_index = -1;
2067 				pipe_4to1 = dcn32_find_split_pipe(dc, context, old_index);
2068 				ASSERT(pipe_4to1);
2069 				if (!pipe_4to1)
2070 					return false;
2071 				if (!dcn32_split_stream_for_mpc_or_odm(
2072 						dc, &context->res_ctx,
2073 						hsplit_pipe, pipe_4to1, odm))
2074 					return false;
2075 				newly_split[pipe_4to1->pipe_idx] = true;
2076 			}
2077 			if (odm)
2078 				dcn20_build_mapped_resource(dc, context, pipe->stream);
2079 		}
2080 
2081 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2082 			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2083 
2084 			if (pipe->plane_state) {
2085 				if (!resource_build_scaling_params(pipe))
2086 					return false;
2087 			}
2088 		}
2089 
2090 		for (i = 0; i < context->stream_count; i++) {
2091 			struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx,
2092 					context->streams[i]);
2093 
2094 			if (otg_master)
2095 				resource_build_test_pattern_params(&context->res_ctx, otg_master);
2096 		}
2097 	}
2098 	return true;
2099 }
2100 
dcn32_internal_validate_bw(struct dc * dc,struct dc_state * context,display_e2e_pipe_params_st * pipes,int * pipe_cnt_out,int * vlevel_out,enum dc_validate_mode validate_mode)2101 bool dcn32_internal_validate_bw(struct dc *dc,
2102 				struct dc_state *context,
2103 				display_e2e_pipe_params_st *pipes,
2104 				int *pipe_cnt_out,
2105 				int *vlevel_out,
2106 				enum dc_validate_mode validate_mode)
2107 {
2108 	bool out = false;
2109 	bool repopulate_pipes = false;
2110 	int split[MAX_PIPES] = { 0 };
2111 	bool merge[MAX_PIPES] = { false };
2112 	int pipe_cnt, i, pipe_idx;
2113 	int vlevel = context->bw_ctx.dml.soc.num_states;
2114 	struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
2115 
2116 	dc_assert_fp_enabled();
2117 
2118 	ASSERT(pipes);
2119 	if (!pipes)
2120 		return false;
2121 
2122 	/* For each full update, remove all existing phantom pipes first */
2123 	dc_state_remove_phantom_streams_and_planes(dc, context);
2124 	dc_state_release_phantom_streams_and_planes(dc, context);
2125 
2126 	dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
2127 
2128 	for (i = 0; i < context->stream_count; i++)
2129 		resource_update_pipes_for_stream_with_slice_count(context, dc->current_state, dc->res_pool, context->streams[i], 1);
2130 	pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
2131 
2132 	if (!pipe_cnt) {
2133 		out = true;
2134 		goto validate_out;
2135 	}
2136 
2137 	dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
2138 	context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context);
2139 
2140 	if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) {
2141 		if (!dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge,
2142 			&pipe_cnt, &repopulate_pipes))
2143 			goto validate_fail;
2144 	}
2145 
2146 	if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING ||
2147 			(dc->debug.dml_disallow_alternate_prefetch_modes &&
2148 			(vlevel == context->bw_ctx.dml.soc.num_states ||
2149 				vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) {
2150 		/*
2151 		 * If dml_disallow_alternate_prefetch_modes is false, then we have already
2152 		 * tried alternate prefetch modes during full validation.
2153 		 *
2154 		 * If mode is unsupported or there is no p-state support, then
2155 		 * fall back to favouring voltage.
2156 		 *
2157 		 * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try
2158 		 * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
2159 		 */
2160 		context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
2161 			dm_prefetch_support_none;
2162 
2163 		context->bw_ctx.dml.validate_max_state = (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING);
2164 		vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
2165 
2166 		context->bw_ctx.dml.validate_max_state = false;
2167 
2168 		if (vlevel < context->bw_ctx.dml.soc.num_states) {
2169 			memset(split, 0, sizeof(split));
2170 			memset(merge, 0, sizeof(merge));
2171 			vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
2172 			/* dcn20_validate_apply_pipe_split_flags can modify voltage level outside of DML */
2173 			vba->VoltageLevel = vlevel;
2174 		}
2175 	}
2176 
2177 	dml_log_mode_support_params(&context->bw_ctx.dml);
2178 
2179 	if (vlevel == context->bw_ctx.dml.soc.num_states)
2180 		goto validate_fail;
2181 
2182 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
2183 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
2184 		struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
2185 
2186 		if (!pipe->stream)
2187 			continue;
2188 
2189 		if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
2190 				&& !dc->config.enable_windowed_mpo_odm
2191 				&& pipe->plane_state && mpo_pipe
2192 				&& memcmp(&mpo_pipe->plane_state->clip_rect,
2193 						&pipe->stream->src,
2194 						sizeof(struct rect)) != 0) {
2195 			ASSERT(mpo_pipe->plane_state != pipe->plane_state);
2196 			goto validate_fail;
2197 		}
2198 		pipe_idx++;
2199 	}
2200 
2201 	if (!dcn32_apply_merge_split_flags_helper(dc, context, &repopulate_pipes, split, merge))
2202 		goto validate_fail;
2203 
2204 	/* Actual dsc count per stream dsc validation*/
2205 	if (!dcn20_validate_dsc(dc, context)) {
2206 		vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE;
2207 		goto validate_fail;
2208 	}
2209 
2210 	if (repopulate_pipes) {
2211 		int flag_max_mpc_comb = vba->maxMpcComb;
2212 		int flag_vlevel = vlevel;
2213 		int i;
2214 
2215 		pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode);
2216 		if (!dc->config.enable_windowed_mpo_odm)
2217 			dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes);
2218 
2219 		/* repopulate_pipes = 1 means the pipes were either split or merged. In this case
2220 		 * we have to re-calculate the DET allocation and run through DML once more to
2221 		 * ensure all the params are calculated correctly. We do not need to run the
2222 		 * pipe split check again after this call (pipes are already split / merged).
2223 		 * */
2224 		context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
2225 					dm_prefetch_support_uclk_fclk_and_stutter_if_possible;
2226 
2227 		vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
2228 
2229 		if (vlevel == context->bw_ctx.dml.soc.num_states) {
2230 			/* failed after DET size changes */
2231 			goto validate_fail;
2232 		} else if (flag_max_mpc_comb == 0 &&
2233 				flag_max_mpc_comb != context->bw_ctx.dml.vba.maxMpcComb) {
2234 			/* check the context constructed with pipe split flags is still valid*/
2235 			bool flags_valid = false;
2236 			for (i = flag_vlevel; i < context->bw_ctx.dml.soc.num_states; i++) {
2237 				if (vba->ModeSupport[i][flag_max_mpc_comb]) {
2238 					vba->maxMpcComb = flag_max_mpc_comb;
2239 					vba->VoltageLevel = i;
2240 					vlevel = i;
2241 					flags_valid = true;
2242 					break;
2243 				}
2244 			}
2245 
2246 			/* this should never happen */
2247 			if (!flags_valid)
2248 				goto validate_fail;
2249 		}
2250 	}
2251 	*vlevel_out = vlevel;
2252 	*pipe_cnt_out = pipe_cnt;
2253 
2254 	out = true;
2255 	goto validate_out;
2256 
2257 validate_fail:
2258 	out = false;
2259 
2260 validate_out:
2261 	return out;
2262 }
2263 
2264 
dcn32_calculate_wm_and_dlg_fpu(struct dc * dc,struct dc_state * context,display_e2e_pipe_params_st * pipes,int pipe_cnt,int vlevel)2265 void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
2266 				display_e2e_pipe_params_st *pipes,
2267 				int pipe_cnt,
2268 				int vlevel)
2269 {
2270 	int i, pipe_idx, vlevel_temp = 0;
2271 	double dcfclk = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
2272 	double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
2273 	double dram_speed_from_validation = context->bw_ctx.dml.vba.DRAMSpeed;
2274 	double dcfclk_from_fw_based_mclk_switching = dcfclk_from_validation;
2275 	bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
2276 			dm_dram_clock_change_unsupported;
2277 	unsigned int dummy_latency_index = 0;
2278 	int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
2279 	unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
2280 	bool subvp_in_use = dcn32_subvp_in_use(dc, context);
2281 	unsigned int min_dram_speed_mts_margin;
2282 	bool need_fclk_lat_as_dummy = false;
2283 	bool is_subvp_p_drr = false;
2284 	struct dc_stream_state *fpo_candidate_stream = NULL;
2285 	struct dc_stream_status *stream_status = NULL;
2286 
2287 	dc_assert_fp_enabled();
2288 
2289 	/* need to find dummy latency index for subvp */
2290 	if (subvp_in_use) {
2291 		/* Override DRAMClockChangeSupport for SubVP + DRR case where the DRR cannot switch without stretching it's VBLANK */
2292 		if (!pstate_en) {
2293 			context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
2294 			context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = dm_prefetch_support_fclk_and_stutter;
2295 			pstate_en = true;
2296 			is_subvp_p_drr = true;
2297 		}
2298 		dummy_latency_index = dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
2299 						context, pipes, pipe_cnt, vlevel);
2300 
2301 		/* For DCN32/321 need to validate with fclk pstate change latency equal to dummy so prefetch is
2302 		 * scheduled correctly to account for dummy pstate.
2303 		 */
2304 		if (context->bw_ctx.dml.soc.fclk_change_latency_us < dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us) {
2305 			need_fclk_lat_as_dummy = true;
2306 			context->bw_ctx.dml.soc.fclk_change_latency_us =
2307 					dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
2308 		}
2309 		context->bw_ctx.dml.soc.dram_clock_change_latency_us =
2310 							dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
2311 		dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, DC_VALIDATE_MODE_AND_PROGRAMMING);
2312 		maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
2313 		if (is_subvp_p_drr) {
2314 			context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
2315 		}
2316 	}
2317 
2318 	context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
2319 	for (i = 0; i < context->stream_count; i++) {
2320 		stream_status = NULL;
2321 		if (context->streams[i])
2322 			stream_status = dc_state_get_stream_status(context, context->streams[i]);
2323 		if (stream_status)
2324 			stream_status->fpo_in_use = false;
2325 	}
2326 
2327 	if (!pstate_en || (!dc->debug.disable_fpo_optimizations &&
2328 			pstate_en && vlevel != 0)) {
2329 		/* only when the mclk switch can not be natural, is the fw based vblank stretch attempted */
2330 		fpo_candidate_stream = dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(dc, context);
2331 		if (fpo_candidate_stream) {
2332 			stream_status = dc_state_get_stream_status(context, fpo_candidate_stream);
2333 			if (stream_status)
2334 				stream_status->fpo_in_use = true;
2335 			context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = true;
2336 		}
2337 
2338 		if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
2339 			dummy_latency_index = dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
2340 				context, pipes, pipe_cnt, vlevel);
2341 
2342 			/* After calling dcn30_find_dummy_latency_index_for_fw_based_mclk_switch
2343 			 * we reinstate the original dram_clock_change_latency_us on the context
2344 			 * and all variables that may have changed up to this point, except the
2345 			 * newly found dummy_latency_index
2346 			 */
2347 			context->bw_ctx.dml.soc.dram_clock_change_latency_us =
2348 					dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
2349 			/* For DCN32/321 need to validate with fclk pstate change latency equal to dummy so
2350 			 * prefetch is scheduled correctly to account for dummy pstate.
2351 			 */
2352 			if (context->bw_ctx.dml.soc.fclk_change_latency_us < dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us) {
2353 				need_fclk_lat_as_dummy = true;
2354 				context->bw_ctx.dml.soc.fclk_change_latency_us =
2355 						dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
2356 			}
2357 			dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp,
2358 				DC_VALIDATE_MODE_AND_PROGRAMMING);
2359 			if (vlevel_temp < vlevel) {
2360 				vlevel = vlevel_temp;
2361 				maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
2362 				dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
2363 				pstate_en = true;
2364 				context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank;
2365 			} else {
2366 				/* Restore FCLK latency and re-run validation to go back to original validation
2367 				 * output if we find that enabling FPO does not give us any benefit (i.e. lower
2368 				 * voltage level)
2369 				 */
2370 				context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
2371 				for (i = 0; i < context->stream_count; i++) {
2372 					stream_status = NULL;
2373 					if (context->streams[i])
2374 						stream_status = dc_state_get_stream_status(context, context->streams[i]);
2375 					if (stream_status)
2376 						stream_status->fpo_in_use = false;
2377 				}
2378 				context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us;
2379 				dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel,
2380 					DC_VALIDATE_MODE_AND_PROGRAMMING);
2381 			}
2382 		}
2383 	}
2384 
2385 	/* Set B:
2386 	 * For Set B calculations use clocks from clock_limits[2] when available i.e. when SMU is present,
2387 	 * otherwise use arbitrary low value from spreadsheet for DCFCLK as lower is safer for watermark
2388 	 * calculations to cover bootup clocks.
2389 	 * DCFCLK: soc.clock_limits[2] when available
2390 	 * UCLK: soc.clock_limits[2] when available
2391 	 */
2392 	if (dcn3_2_soc.num_states > 2) {
2393 		vlevel_temp = 2;
2394 		dcfclk = dcn3_2_soc.clock_limits[2].dcfclk_mhz;
2395 	} else
2396 		dcfclk = 615; //DCFCLK Vmin_lv
2397 
2398 	pipes[0].clks_cfg.voltage = vlevel_temp;
2399 	pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
2400 	pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz;
2401 
2402 	if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
2403 		context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
2404 		context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.fclk_change_latency_us;
2405 		context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
2406 		context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
2407 	}
2408 	context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2409 	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2410 	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2411 	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2412 	context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2413 	context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2414 	context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2415 	context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2416 	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2417 	context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2418 
2419 	/* Set D:
2420 	 * All clocks min.
2421 	 * DCFCLK: Min, as reported by PM FW when available
2422 	 * UCLK  : Min, as reported by PM FW when available
2423 	 * sr_enter_exit/sr_exit should be lower than used for DRAM (TBD after bringup or later, use as decided in Clk Mgr)
2424 	 */
2425 
2426 	/*
2427 	if (dcn3_2_soc.num_states > 2) {
2428 		vlevel_temp = 0;
2429 		dcfclk = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
2430 	} else
2431 		dcfclk = 615; //DCFCLK Vmin_lv
2432 
2433 	pipes[0].clks_cfg.voltage = vlevel_temp;
2434 	pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
2435 	pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz;
2436 
2437 	if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
2438 		context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
2439 		context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.fclk_change_latency_us;
2440 		context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
2441 		context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
2442 	}
2443 	context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2444 	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2445 	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2446 	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2447 	context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2448 	context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2449 	context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2450 	context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2451 	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2452 	context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2453 	*/
2454 
2455 	/* Set C, for Dummy P-State:
2456 	 * All clocks min.
2457 	 * DCFCLK: Min, as reported by PM FW, when available
2458 	 * UCLK  : Min,  as reported by PM FW, when available
2459 	 * pstate latency as per UCLK state dummy pstate latency
2460 	 */
2461 
2462 	// For Set A and Set C use values from validation
2463 	pipes[0].clks_cfg.voltage = vlevel;
2464 	pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_validation;
2465 	pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
2466 
2467 	if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
2468 		pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_fw_based_mclk_switching;
2469 	}
2470 
2471 	if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
2472 		min_dram_speed_mts = dram_speed_from_validation;
2473 		min_dram_speed_mts_margin = 160;
2474 
2475 		context->bw_ctx.dml.soc.dram_clock_change_latency_us =
2476 			dc->clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us;
2477 
2478 		if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] ==
2479 			dm_dram_clock_change_unsupported) {
2480 			int min_dram_speed_mts_offset = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1;
2481 
2482 			min_dram_speed_mts =
2483 				dc->clk_mgr->bw_params->clk_table.entries[min_dram_speed_mts_offset].memclk_mhz * 16;
2484 		}
2485 
2486 		if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && !subvp_in_use) {
2487 			/* find largest table entry that is lower than dram speed,
2488 			 * but lower than DPM0 still uses DPM0
2489 			 */
2490 			for (dummy_latency_index = 3; dummy_latency_index > 0; dummy_latency_index--)
2491 				if (min_dram_speed_mts + min_dram_speed_mts_margin >
2492 					dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dram_speed_mts)
2493 					break;
2494 		}
2495 
2496 		context->bw_ctx.dml.soc.dram_clock_change_latency_us =
2497 			dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
2498 
2499 		context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us;
2500 		context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
2501 		context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
2502 	}
2503 
2504 	context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2505 	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2506 	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2507 	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2508 	context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2509 	context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2510 	context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2511 	context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2512 	/* On DCN32/321, PMFW will set PSTATE_CHANGE_TYPE = 1 (FCLK) for UCLK dummy p-state.
2513 	 * In this case we must program FCLK WM Set C to use the UCLK dummy p-state WM
2514 	 * value.
2515 	 */
2516 	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2517 	context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2518 
2519 	if ((!pstate_en) && (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid)) {
2520 		/* The only difference between A and C is p-state latency, if p-state is not supported
2521 		 * with full p-state latency we want to calculate DLG based on dummy p-state latency,
2522 		 * Set A p-state watermark set to 0 on DCN30, when p-state unsupported, for now keep as DCN30.
2523 		 */
2524 		context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
2525 		context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
2526 		/* Calculate FCLK p-state change watermark based on FCLK pstate change latency in case
2527 		 * UCLK p-state is not supported, to avoid underflow in case FCLK pstate is supported
2528 		 */
2529 		context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2530 	} else {
2531 		/* Set A:
2532 		 * All clocks min.
2533 		 * DCFCLK: Min, as reported by PM FW, when available
2534 		 * UCLK: Min, as reported by PM FW, when available
2535 		 */
2536 
2537 		/* For set A set the correct latency values (i.e. non-dummy values) unconditionally
2538 		 */
2539 		context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
2540 		context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
2541 		context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
2542 
2543 		context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2544 		context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2545 		context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2546 		context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2547 		context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2548 		context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2549 		context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2550 		context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2551 		context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2552 		context->bw_ctx.bw.dcn.watermarks.a.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
2553 	}
2554 
2555 	/* Make set D = set A since we do not optimized watermarks for MALL */
2556 	context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
2557 
2558 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
2559 		if (!context->res_ctx.pipe_ctx[i].stream)
2560 			continue;
2561 
2562 		pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
2563 		pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
2564 
2565 		if (dc->config.forced_clocks) {
2566 			pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
2567 			pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
2568 		}
2569 		if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
2570 			pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
2571 		if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
2572 			pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
2573 
2574 		pipe_idx++;
2575 	}
2576 
2577 	context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
2578 
2579 	/* for proper prefetch calculations, if dummy lat > fclk lat, use fclk lat = dummy lat */
2580 	if (need_fclk_lat_as_dummy)
2581 		context->bw_ctx.dml.soc.fclk_change_latency_us =
2582 				dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
2583 
2584 	dcn32_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
2585 
2586 	if (!pstate_en)
2587 		/* Restore full p-state latency */
2588 		context->bw_ctx.dml.soc.dram_clock_change_latency_us =
2589 				dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
2590 
2591 	/* revert fclk lat changes if required */
2592 	if (need_fclk_lat_as_dummy)
2593 		context->bw_ctx.dml.soc.fclk_change_latency_us =
2594 				dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us;
2595 }
2596 
dcn32_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,unsigned int * optimal_dcfclk,unsigned int * optimal_fclk)2597 static void dcn32_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
2598 		unsigned int *optimal_dcfclk,
2599 		unsigned int *optimal_fclk)
2600 {
2601 	double bw_from_dram, bw_from_dram1, bw_from_dram2;
2602 
2603 	bw_from_dram1 = uclk_mts * dcn3_2_soc.num_chans *
2604 		dcn3_2_soc.dram_channel_width_bytes * (dcn3_2_soc.max_avg_dram_bw_use_normal_percent / 100);
2605 	bw_from_dram2 = uclk_mts * dcn3_2_soc.num_chans *
2606 		dcn3_2_soc.dram_channel_width_bytes * (dcn3_2_soc.max_avg_sdp_bw_use_normal_percent / 100);
2607 
2608 	bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
2609 
2610 	if (optimal_fclk)
2611 		*optimal_fclk = bw_from_dram /
2612 		(dcn3_2_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_2_soc.max_avg_sdp_bw_use_normal_percent / 100));
2613 
2614 	if (optimal_dcfclk)
2615 		*optimal_dcfclk =  bw_from_dram /
2616 		(dcn3_2_soc.return_bus_width_bytes * (dcn3_2_soc.max_avg_sdp_bw_use_normal_percent / 100));
2617 }
2618 
remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st * table,unsigned int * num_entries,unsigned int index)2619 static void remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries,
2620 		unsigned int index)
2621 {
2622 	int i;
2623 
2624 	if (*num_entries == 0)
2625 		return;
2626 
2627 	for (i = index; i < *num_entries - 1; i++) {
2628 		table[i] = table[i + 1];
2629 	}
2630 	memset(&table[--(*num_entries)], 0, sizeof(struct _vcs_dpi_voltage_scaling_st));
2631 }
2632 
dcn32_patch_dpm_table(struct clk_bw_params * bw_params)2633 void dcn32_patch_dpm_table(struct clk_bw_params *bw_params)
2634 {
2635 	int i;
2636 	unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0,
2637 			max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0;
2638 
2639 	for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
2640 		if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
2641 			max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
2642 		if (bw_params->clk_table.entries[i].fclk_mhz > max_fclk_mhz)
2643 			max_fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
2644 		if (bw_params->clk_table.entries[i].memclk_mhz > max_uclk_mhz)
2645 			max_uclk_mhz = bw_params->clk_table.entries[i].memclk_mhz;
2646 		if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
2647 			max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
2648 		if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
2649 			max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
2650 		if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
2651 			max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
2652 		if (bw_params->clk_table.entries[i].dtbclk_mhz > max_dtbclk_mhz)
2653 			max_dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
2654 	}
2655 
2656 	/* Scan through clock values we currently have and if they are 0,
2657 	 *  then populate it with dcn3_2_soc.clock_limits[] value.
2658 	 *
2659 	 * Do it for DCFCLK, DISPCLK, DTBCLK and UCLK as any of those being
2660 	 *  0, will cause it to skip building the clock table.
2661 	 */
2662 	if (max_dcfclk_mhz == 0)
2663 		bw_params->clk_table.entries[0].dcfclk_mhz = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
2664 	if (max_dispclk_mhz == 0)
2665 		bw_params->clk_table.entries[0].dispclk_mhz = dcn3_2_soc.clock_limits[0].dispclk_mhz;
2666 	if (max_dtbclk_mhz == 0)
2667 		bw_params->clk_table.entries[0].dtbclk_mhz = dcn3_2_soc.clock_limits[0].dtbclk_mhz;
2668 	if (max_uclk_mhz == 0)
2669 		bw_params->clk_table.entries[0].memclk_mhz = dcn3_2_soc.clock_limits[0].dram_speed_mts / 16;
2670 }
2671 
swap_table_entries(struct _vcs_dpi_voltage_scaling_st * first_entry,struct _vcs_dpi_voltage_scaling_st * second_entry)2672 static void swap_table_entries(struct _vcs_dpi_voltage_scaling_st *first_entry,
2673 		struct _vcs_dpi_voltage_scaling_st *second_entry)
2674 {
2675 	struct _vcs_dpi_voltage_scaling_st temp_entry = *first_entry;
2676 	*first_entry = *second_entry;
2677 	*second_entry = temp_entry;
2678 }
2679 
2680 /*
2681  * sort_entries_with_same_bw - Sort entries sharing the same bandwidth by DCFCLK
2682  */
sort_entries_with_same_bw(struct _vcs_dpi_voltage_scaling_st * table,unsigned int * num_entries)2683 static void sort_entries_with_same_bw(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
2684 {
2685 	unsigned int start_index = 0;
2686 	unsigned int end_index = 0;
2687 	unsigned int current_bw = 0;
2688 
2689 	for (int i = 0; i < (*num_entries - 1); i++) {
2690 		if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) {
2691 			current_bw = table[i].net_bw_in_kbytes_sec;
2692 			start_index = i;
2693 			end_index = ++i;
2694 
2695 			while ((i < (*num_entries - 1)) && (table[i+1].net_bw_in_kbytes_sec == current_bw))
2696 				end_index = ++i;
2697 		}
2698 
2699 		if (start_index != end_index) {
2700 			for (int j = start_index; j < end_index; j++) {
2701 				for (int k = start_index; k < end_index; k++) {
2702 					if (table[k].dcfclk_mhz > table[k+1].dcfclk_mhz)
2703 						swap_table_entries(&table[k], &table[k+1]);
2704 				}
2705 			}
2706 		}
2707 
2708 		start_index = 0;
2709 		end_index = 0;
2710 
2711 	}
2712 }
2713 
2714 /*
2715  * remove_inconsistent_entries - Ensure entries with the same bandwidth have MEMCLK and FCLK monotonically increasing
2716  *                               and remove entries that do not
2717  */
remove_inconsistent_entries(struct _vcs_dpi_voltage_scaling_st * table,unsigned int * num_entries)2718 static void remove_inconsistent_entries(struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
2719 {
2720 	for (int i = 0; i < (*num_entries - 1); i++) {
2721 		if (table[i].net_bw_in_kbytes_sec == table[i+1].net_bw_in_kbytes_sec) {
2722 			if ((table[i].dram_speed_mts > table[i+1].dram_speed_mts) ||
2723 				(table[i].fabricclk_mhz > table[i+1].fabricclk_mhz))
2724 				remove_entry_from_table_at_index(table, num_entries, i);
2725 		}
2726 	}
2727 }
2728 
2729 /*
2730  * override_max_clk_values - Overwrite the max clock frequencies with the max DC mode timings
2731  * Input:
2732  *	max_clk_limit - struct containing the desired clock timings
2733  * Output:
2734  *	curr_clk_limit  - struct containing the timings that need to be overwritten
2735  * Return: 0 upon success, non-zero for failure
2736  */
override_max_clk_values(struct clk_limit_table_entry * max_clk_limit,struct clk_limit_table_entry * curr_clk_limit)2737 static int override_max_clk_values(struct clk_limit_table_entry *max_clk_limit,
2738 		struct clk_limit_table_entry *curr_clk_limit)
2739 {
2740 	if (NULL == max_clk_limit || NULL == curr_clk_limit)
2741 		return -1; //invalid parameters
2742 
2743 	//only overwrite if desired max clock frequency is initialized
2744 	if (max_clk_limit->dcfclk_mhz != 0)
2745 		curr_clk_limit->dcfclk_mhz = max_clk_limit->dcfclk_mhz;
2746 
2747 	if (max_clk_limit->fclk_mhz != 0)
2748 		curr_clk_limit->fclk_mhz = max_clk_limit->fclk_mhz;
2749 
2750 	if (max_clk_limit->memclk_mhz != 0)
2751 		curr_clk_limit->memclk_mhz = max_clk_limit->memclk_mhz;
2752 
2753 	if (max_clk_limit->socclk_mhz != 0)
2754 		curr_clk_limit->socclk_mhz = max_clk_limit->socclk_mhz;
2755 
2756 	if (max_clk_limit->dtbclk_mhz != 0)
2757 		curr_clk_limit->dtbclk_mhz = max_clk_limit->dtbclk_mhz;
2758 
2759 	if (max_clk_limit->dispclk_mhz != 0)
2760 		curr_clk_limit->dispclk_mhz = max_clk_limit->dispclk_mhz;
2761 
2762 	return 0;
2763 }
2764 
build_synthetic_soc_states(bool disable_dc_mode_overwrite,struct clk_bw_params * bw_params,struct _vcs_dpi_voltage_scaling_st * table,unsigned int * num_entries)2765 static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk_bw_params *bw_params,
2766 		struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
2767 {
2768 	int i, j;
2769 	struct _vcs_dpi_voltage_scaling_st entry = {0};
2770 	struct clk_limit_table_entry max_clk_data = {0};
2771 
2772 	unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299;
2773 
2774 	static const unsigned int num_dcfclk_stas = 5;
2775 	unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564};
2776 
2777 	unsigned int num_uclk_dpms = 0;
2778 	unsigned int num_fclk_dpms = 0;
2779 	unsigned int num_dcfclk_dpms = 0;
2780 
2781 	unsigned int num_dc_uclk_dpms = 0;
2782 	unsigned int num_dc_fclk_dpms = 0;
2783 	unsigned int num_dc_dcfclk_dpms = 0;
2784 
2785 	for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
2786 		if (bw_params->clk_table.entries[i].dcfclk_mhz > max_clk_data.dcfclk_mhz)
2787 			max_clk_data.dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
2788 		if (bw_params->clk_table.entries[i].fclk_mhz > max_clk_data.fclk_mhz)
2789 			max_clk_data.fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
2790 		if (bw_params->clk_table.entries[i].memclk_mhz > max_clk_data.memclk_mhz)
2791 			max_clk_data.memclk_mhz = bw_params->clk_table.entries[i].memclk_mhz;
2792 		if (bw_params->clk_table.entries[i].dispclk_mhz > max_clk_data.dispclk_mhz)
2793 			max_clk_data.dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
2794 		if (bw_params->clk_table.entries[i].dppclk_mhz > max_clk_data.dppclk_mhz)
2795 			max_clk_data.dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
2796 		if (bw_params->clk_table.entries[i].phyclk_mhz > max_clk_data.phyclk_mhz)
2797 			max_clk_data.phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
2798 		if (bw_params->clk_table.entries[i].dtbclk_mhz > max_clk_data.dtbclk_mhz)
2799 			max_clk_data.dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
2800 
2801 		if (bw_params->clk_table.entries[i].memclk_mhz > 0) {
2802 			num_uclk_dpms++;
2803 			if (bw_params->clk_table.entries[i].memclk_mhz <= bw_params->dc_mode_limit.memclk_mhz)
2804 				num_dc_uclk_dpms++;
2805 		}
2806 		if (bw_params->clk_table.entries[i].fclk_mhz > 0) {
2807 			num_fclk_dpms++;
2808 			if (bw_params->clk_table.entries[i].fclk_mhz <= bw_params->dc_mode_limit.fclk_mhz)
2809 				num_dc_fclk_dpms++;
2810 		}
2811 		if (bw_params->clk_table.entries[i].dcfclk_mhz > 0) {
2812 			num_dcfclk_dpms++;
2813 			if (bw_params->clk_table.entries[i].dcfclk_mhz <= bw_params->dc_mode_limit.dcfclk_mhz)
2814 				num_dc_dcfclk_dpms++;
2815 		}
2816 	}
2817 
2818 	if (!disable_dc_mode_overwrite) {
2819 		//Overwrite max frequencies with max DC mode frequencies for DC mode systems
2820 		override_max_clk_values(&bw_params->dc_mode_limit, &max_clk_data);
2821 		num_uclk_dpms = num_dc_uclk_dpms;
2822 		num_fclk_dpms = num_dc_fclk_dpms;
2823 		num_dcfclk_dpms = num_dc_dcfclk_dpms;
2824 		bw_params->clk_table.num_entries_per_clk.num_memclk_levels = num_uclk_dpms;
2825 		bw_params->clk_table.num_entries_per_clk.num_fclk_levels = num_fclk_dpms;
2826 	}
2827 
2828 	if (num_dcfclk_dpms > 0 && bw_params->clk_table.entries[0].fclk_mhz > min_fclk_mhz)
2829 		min_fclk_mhz = bw_params->clk_table.entries[0].fclk_mhz;
2830 
2831 	if (!max_clk_data.dcfclk_mhz || !max_clk_data.dispclk_mhz || !max_clk_data.dtbclk_mhz)
2832 		return -1;
2833 
2834 	if (max_clk_data.dppclk_mhz == 0)
2835 		max_clk_data.dppclk_mhz = max_clk_data.dispclk_mhz;
2836 
2837 	if (max_clk_data.fclk_mhz == 0)
2838 		max_clk_data.fclk_mhz = max_clk_data.dcfclk_mhz *
2839 				dcn3_2_soc.pct_ideal_sdp_bw_after_urgent /
2840 				dcn3_2_soc.pct_ideal_fabric_bw_after_urgent;
2841 
2842 	if (max_clk_data.phyclk_mhz == 0)
2843 		max_clk_data.phyclk_mhz = dcn3_2_soc.clock_limits[0].phyclk_mhz;
2844 
2845 	*num_entries = 0;
2846 	entry.dispclk_mhz = max_clk_data.dispclk_mhz;
2847 	entry.dscclk_mhz = max_clk_data.dispclk_mhz / 3;
2848 	entry.dppclk_mhz = max_clk_data.dppclk_mhz;
2849 	entry.dtbclk_mhz = max_clk_data.dtbclk_mhz;
2850 	entry.phyclk_mhz = max_clk_data.phyclk_mhz;
2851 	entry.phyclk_d18_mhz = dcn3_2_soc.clock_limits[0].phyclk_d18_mhz;
2852 	entry.phyclk_d32_mhz = dcn3_2_soc.clock_limits[0].phyclk_d32_mhz;
2853 
2854 	// Insert all the DCFCLK STAs
2855 	for (i = 0; i < num_dcfclk_stas; i++) {
2856 		entry.dcfclk_mhz = dcfclk_sta_targets[i];
2857 		entry.fabricclk_mhz = 0;
2858 		entry.dram_speed_mts = 0;
2859 
2860 		get_optimal_ntuple(&entry);
2861 		entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
2862 		insert_entry_into_table_sorted(table, num_entries, &entry);
2863 	}
2864 
2865 	// Insert the max DCFCLK
2866 	entry.dcfclk_mhz = max_clk_data.dcfclk_mhz;
2867 	entry.fabricclk_mhz = 0;
2868 	entry.dram_speed_mts = 0;
2869 
2870 	get_optimal_ntuple(&entry);
2871 	entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
2872 	insert_entry_into_table_sorted(table, num_entries, &entry);
2873 
2874 	// Insert the UCLK DPMS
2875 	for (i = 0; i < num_uclk_dpms; i++) {
2876 		entry.dcfclk_mhz = 0;
2877 		entry.fabricclk_mhz = 0;
2878 		entry.dram_speed_mts = bw_params->clk_table.entries[i].memclk_mhz * 16;
2879 
2880 		get_optimal_ntuple(&entry);
2881 		entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
2882 		insert_entry_into_table_sorted(table, num_entries, &entry);
2883 	}
2884 
2885 	// If FCLK is coarse grained, insert individual DPMs.
2886 	if (num_fclk_dpms > 2) {
2887 		for (i = 0; i < num_fclk_dpms; i++) {
2888 			entry.dcfclk_mhz = 0;
2889 			entry.fabricclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
2890 			entry.dram_speed_mts = 0;
2891 
2892 			get_optimal_ntuple(&entry);
2893 			entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
2894 			insert_entry_into_table_sorted(table, num_entries, &entry);
2895 		}
2896 	}
2897 	// If FCLK fine grained, only insert max
2898 	else {
2899 		entry.dcfclk_mhz = 0;
2900 		entry.fabricclk_mhz = max_clk_data.fclk_mhz;
2901 		entry.dram_speed_mts = 0;
2902 
2903 		get_optimal_ntuple(&entry);
2904 		entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&entry);
2905 		insert_entry_into_table_sorted(table, num_entries, &entry);
2906 	}
2907 
2908 	// At this point, the table contains all "points of interest" based on
2909 	// DPMs from PMFW, and STAs.  Table is sorted by BW, and all clock
2910 	// ratios (by derate, are exact).
2911 
2912 	// Remove states that require higher clocks than are supported
2913 	for (i = *num_entries - 1; i >= 0 ; i--) {
2914 		if (table[i].dcfclk_mhz > max_clk_data.dcfclk_mhz ||
2915 				table[i].fabricclk_mhz > max_clk_data.fclk_mhz ||
2916 				table[i].dram_speed_mts > max_clk_data.memclk_mhz * 16)
2917 			remove_entry_from_table_at_index(table, num_entries, i);
2918 	}
2919 
2920 	// Insert entry with all max dc limits without bandwidth matching
2921 	if (!disable_dc_mode_overwrite) {
2922 		struct _vcs_dpi_voltage_scaling_st max_dc_limits_entry = entry;
2923 
2924 		max_dc_limits_entry.dcfclk_mhz = max_clk_data.dcfclk_mhz;
2925 		max_dc_limits_entry.fabricclk_mhz = max_clk_data.fclk_mhz;
2926 		max_dc_limits_entry.dram_speed_mts = max_clk_data.memclk_mhz * 16;
2927 
2928 		max_dc_limits_entry.net_bw_in_kbytes_sec = calculate_net_bw_in_kbytes_sec(&max_dc_limits_entry);
2929 		insert_entry_into_table_sorted(table, num_entries, &max_dc_limits_entry);
2930 
2931 		sort_entries_with_same_bw(table, num_entries);
2932 		remove_inconsistent_entries(table, num_entries);
2933 	}
2934 
2935 	// At this point, the table only contains supported points of interest
2936 	// it could be used as is, but some states may be redundant due to
2937 	// coarse grained nature of some clocks, so we want to round up to
2938 	// coarse grained DPMs and remove duplicates.
2939 
2940 	// Round up UCLKs
2941 	for (i = *num_entries - 1; i >= 0 ; i--) {
2942 		for (j = 0; j < num_uclk_dpms; j++) {
2943 			if (bw_params->clk_table.entries[j].memclk_mhz * 16 >= table[i].dram_speed_mts) {
2944 				table[i].dram_speed_mts = bw_params->clk_table.entries[j].memclk_mhz * 16;
2945 				break;
2946 			}
2947 		}
2948 	}
2949 
2950 	// If FCLK is coarse grained, round up to next DPMs
2951 	if (num_fclk_dpms > 2) {
2952 		for (i = *num_entries - 1; i >= 0 ; i--) {
2953 			for (j = 0; j < num_fclk_dpms; j++) {
2954 				if (bw_params->clk_table.entries[j].fclk_mhz >= table[i].fabricclk_mhz) {
2955 					table[i].fabricclk_mhz = bw_params->clk_table.entries[j].fclk_mhz;
2956 					break;
2957 				}
2958 			}
2959 		}
2960 	}
2961 	// Otherwise, round up to minimum.
2962 	else {
2963 		for (i = *num_entries - 1; i >= 0 ; i--) {
2964 			if (table[i].fabricclk_mhz < min_fclk_mhz) {
2965 				table[i].fabricclk_mhz = min_fclk_mhz;
2966 			}
2967 		}
2968 	}
2969 
2970 	// Round DCFCLKs up to minimum
2971 	for (i = *num_entries - 1; i >= 0 ; i--) {
2972 		if (table[i].dcfclk_mhz < min_dcfclk_mhz) {
2973 			table[i].dcfclk_mhz = min_dcfclk_mhz;
2974 		}
2975 	}
2976 
2977 	// Remove duplicate states, note duplicate states are always neighbouring since table is sorted.
2978 	i = 0;
2979 	while (i < *num_entries - 1) {
2980 		if (table[i].dcfclk_mhz == table[i + 1].dcfclk_mhz &&
2981 				table[i].fabricclk_mhz == table[i + 1].fabricclk_mhz &&
2982 				table[i].dram_speed_mts == table[i + 1].dram_speed_mts)
2983 			remove_entry_from_table_at_index(table, num_entries, i + 1);
2984 		else
2985 			i++;
2986 	}
2987 
2988 	// Fix up the state indicies
2989 	for (i = *num_entries - 1; i >= 0 ; i--) {
2990 		table[i].state = i;
2991 	}
2992 
2993 	return 0;
2994 }
2995 
2996 /*
2997  * dcn32_update_bw_bounding_box
2998  *
2999  * This would override some dcn3_2 ip_or_soc initial parameters hardcoded from
3000  * spreadsheet with actual values as per dGPU SKU:
3001  * - with passed few options from dc->config
3002  * - with dentist_vco_frequency from Clk Mgr (currently hardcoded, but might
3003  *   need to get it from PM FW)
3004  * - with passed latency values (passed in ns units) in dc-> bb override for
3005  *   debugging purposes
3006  * - with passed latencies from VBIOS (in 100_ns units) if available for
3007  *   certain dGPU SKU
3008  * - with number of DRAM channels from VBIOS (which differ for certain dGPU SKU
3009  *   of the same ASIC)
3010  * - clocks levels with passed clk_table entries from Clk Mgr as reported by PM
3011  *   FW for different clocks (which might differ for certain dGPU SKU of the
3012  *   same ASIC)
3013  */
dcn32_update_bw_bounding_box_fpu(struct dc * dc,struct clk_bw_params * bw_params)3014 void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params)
3015 {
3016 	dc_assert_fp_enabled();
3017 
3018 	/* Overrides from dc->config options */
3019 	dcn3_2_ip.clamp_min_dcfclk = dc->config.clamp_min_dcfclk;
3020 
3021 	/* Override from passed dc->bb_overrides if available*/
3022 	if ((int)(dcn3_2_soc.sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns
3023 			&& dc->bb_overrides.sr_exit_time_ns) {
3024 		dc->dml2_options.bbox_overrides.sr_exit_latency_us =
3025 		dcn3_2_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
3026 	}
3027 
3028 	if ((int)(dcn3_2_soc.sr_enter_plus_exit_time_us * 1000)
3029 			!= dc->bb_overrides.sr_enter_plus_exit_time_ns
3030 			&& dc->bb_overrides.sr_enter_plus_exit_time_ns) {
3031 		dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us =
3032 		dcn3_2_soc.sr_enter_plus_exit_time_us =
3033 			dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
3034 	}
3035 
3036 	if ((int)(dcn3_2_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
3037 		&& dc->bb_overrides.urgent_latency_ns) {
3038 		dcn3_2_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
3039 		dc->dml2_options.bbox_overrides.urgent_latency_us =
3040 		dcn3_2_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
3041 	}
3042 
3043 	if ((int)(dcn3_2_soc.dram_clock_change_latency_us * 1000)
3044 			!= dc->bb_overrides.dram_clock_change_latency_ns
3045 			&& dc->bb_overrides.dram_clock_change_latency_ns) {
3046 		dc->dml2_options.bbox_overrides.dram_clock_change_latency_us =
3047 		dcn3_2_soc.dram_clock_change_latency_us =
3048 			dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
3049 	}
3050 
3051 	if ((int)(dcn3_2_soc.fclk_change_latency_us * 1000)
3052 			!= dc->bb_overrides.fclk_clock_change_latency_ns
3053 			&& dc->bb_overrides.fclk_clock_change_latency_ns) {
3054 		dc->dml2_options.bbox_overrides.fclk_change_latency_us =
3055 		dcn3_2_soc.fclk_change_latency_us =
3056 			dc->bb_overrides.fclk_clock_change_latency_ns / 1000;
3057 	}
3058 
3059 	if ((int)(dcn3_2_soc.dummy_pstate_latency_us * 1000)
3060 			!= dc->bb_overrides.dummy_clock_change_latency_ns
3061 			&& dc->bb_overrides.dummy_clock_change_latency_ns) {
3062 		dcn3_2_soc.dummy_pstate_latency_us =
3063 			dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0;
3064 	}
3065 
3066 	/* Override from VBIOS if VBIOS bb_info available */
3067 	if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
3068 		struct bp_soc_bb_info bb_info = {0};
3069 
3070 		if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
3071 			if (bb_info.dram_clock_change_latency_100ns > 0)
3072 				dc->dml2_options.bbox_overrides.dram_clock_change_latency_us =
3073 				dcn3_2_soc.dram_clock_change_latency_us =
3074 					bb_info.dram_clock_change_latency_100ns * 10;
3075 
3076 			if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
3077 				dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us =
3078 				dcn3_2_soc.sr_enter_plus_exit_time_us =
3079 					bb_info.dram_sr_enter_exit_latency_100ns * 10;
3080 
3081 			if (bb_info.dram_sr_exit_latency_100ns > 0)
3082 				dc->dml2_options.bbox_overrides.sr_exit_latency_us =
3083 				dcn3_2_soc.sr_exit_time_us =
3084 					bb_info.dram_sr_exit_latency_100ns * 10;
3085 		}
3086 	}
3087 
3088 	/* Override from VBIOS for num_chan */
3089 	if (dc->ctx->dc_bios->vram_info.num_chans) {
3090 		dc->dml2_options.bbox_overrides.dram_num_chan =
3091 		dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
3092 		dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc,
3093 			dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel);
3094 	}
3095 
3096 	if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
3097 		dc->dml2_options.bbox_overrides.dram_chanel_width_bytes =
3098 		dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
3099 
3100 	/* DML DSC delay factor workaround */
3101 	dcn3_2_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0;
3102 
3103 	dcn3_2_ip.min_prefetch_in_strobe_us = dc->debug.min_prefetch_in_strobe_ns / 1000.0;
3104 
3105 	/* Override dispclk_dppclk_vco_speed_mhz from Clk Mgr */
3106 	dcn3_2_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
3107 	dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
3108 	dc->dml2_options.bbox_overrides.disp_pll_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
3109 	dc->dml2_options.bbox_overrides.xtalclk_mhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000.0;
3110 	dc->dml2_options.bbox_overrides.dchub_refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
3111 	dc->dml2_options.bbox_overrides.dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000.0;
3112 
3113 	/* Overrides Clock levelsfrom CLK Mgr table entries as reported by PM FW */
3114 	if (bw_params->clk_table.entries[0].memclk_mhz) {
3115 		if (dc->debug.use_legacy_soc_bb_mechanism) {
3116 			unsigned int i = 0, j = 0, num_states = 0;
3117 
3118 			unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0};
3119 			unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0};
3120 			unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0};
3121 			unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0};
3122 			unsigned int min_dcfclk = UINT_MAX;
3123 			/* Set 199 as first value in STA target array to have a minimum DCFCLK value.
3124 			 * For DCN32 we set min to 199 so minimum FCLK DPM0 (300Mhz can be achieved) */
3125 			unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564};
3126 			unsigned int num_dcfclk_sta_targets = 4, num_uclk_states = 0;
3127 			unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
3128 
3129 			for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
3130 				if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
3131 					max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
3132 				if (bw_params->clk_table.entries[i].dcfclk_mhz != 0 &&
3133 						bw_params->clk_table.entries[i].dcfclk_mhz < min_dcfclk)
3134 					min_dcfclk = bw_params->clk_table.entries[i].dcfclk_mhz;
3135 				if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
3136 					max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
3137 				if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
3138 					max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
3139 				if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
3140 					max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
3141 			}
3142 			if (min_dcfclk > dcfclk_sta_targets[0])
3143 				dcfclk_sta_targets[0] = min_dcfclk;
3144 			if (!max_dcfclk_mhz)
3145 				max_dcfclk_mhz = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
3146 			if (!max_dispclk_mhz)
3147 				max_dispclk_mhz = dcn3_2_soc.clock_limits[0].dispclk_mhz;
3148 			if (!max_dppclk_mhz)
3149 				max_dppclk_mhz = dcn3_2_soc.clock_limits[0].dppclk_mhz;
3150 			if (!max_phyclk_mhz)
3151 				max_phyclk_mhz = dcn3_2_soc.clock_limits[0].phyclk_mhz;
3152 
3153 			if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
3154 				// If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array
3155 				dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
3156 				num_dcfclk_sta_targets++;
3157 			} else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
3158 				// If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates
3159 				for (i = 0; i < num_dcfclk_sta_targets; i++) {
3160 					if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
3161 						dcfclk_sta_targets[i] = max_dcfclk_mhz;
3162 						break;
3163 					}
3164 				}
3165 				// Update size of array since we "removed" duplicates
3166 				num_dcfclk_sta_targets = i + 1;
3167 			}
3168 
3169 			num_uclk_states = bw_params->clk_table.num_entries;
3170 
3171 			// Calculate optimal dcfclk for each uclk
3172 			for (i = 0; i < num_uclk_states; i++) {
3173 				dcn32_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
3174 						&optimal_dcfclk_for_uclk[i], NULL);
3175 				if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) {
3176 					optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz;
3177 				}
3178 			}
3179 
3180 			// Calculate optimal uclk for each dcfclk sta target
3181 			for (i = 0; i < num_dcfclk_sta_targets; i++) {
3182 				for (j = 0; j < num_uclk_states; j++) {
3183 					if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
3184 						optimal_uclk_for_dcfclk_sta_targets[i] =
3185 								bw_params->clk_table.entries[j].memclk_mhz * 16;
3186 						break;
3187 					}
3188 				}
3189 			}
3190 
3191 			i = 0;
3192 			j = 0;
3193 			// create the final dcfclk and uclk table
3194 			while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
3195 				if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
3196 					dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
3197 					dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
3198 				} else {
3199 					if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
3200 						dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
3201 						dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
3202 					} else {
3203 						j = num_uclk_states;
3204 					}
3205 				}
3206 			}
3207 
3208 			while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) {
3209 				dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
3210 				dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
3211 			}
3212 
3213 			while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
3214 					optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
3215 				dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
3216 				dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
3217 			}
3218 
3219 			/* bw_params->clk_table.entries[MAX_NUM_DPM_LVL].
3220 			 * MAX_NUM_DPM_LVL is 8.
3221 			 * dcn3_02_soc.clock_limits[DC__VOLTAGE_STATES].
3222 			 * DC__VOLTAGE_STATES is 40.
3223 			 */
3224 			if (num_states > MAX_NUM_DPM_LVL) {
3225 				ASSERT(0);
3226 				return;
3227 			}
3228 
3229 			dcn3_2_soc.num_states = num_states;
3230 			for (i = 0; i < dcn3_2_soc.num_states; i++) {
3231 				dcn3_2_soc.clock_limits[i].state = i;
3232 				dcn3_2_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
3233 				dcn3_2_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
3234 
3235 				/* Fill all states with max values of all these clocks */
3236 				dcn3_2_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
3237 				dcn3_2_soc.clock_limits[i].dppclk_mhz  = max_dppclk_mhz;
3238 				dcn3_2_soc.clock_limits[i].phyclk_mhz  = max_phyclk_mhz;
3239 				dcn3_2_soc.clock_limits[i].dscclk_mhz  = max_dispclk_mhz / 3;
3240 
3241 				/* Populate from bw_params for DTBCLK, SOCCLK */
3242 				if (i > 0) {
3243 					if (!bw_params->clk_table.entries[i].dtbclk_mhz) {
3244 						dcn3_2_soc.clock_limits[i].dtbclk_mhz  = dcn3_2_soc.clock_limits[i-1].dtbclk_mhz;
3245 					} else {
3246 						dcn3_2_soc.clock_limits[i].dtbclk_mhz  = bw_params->clk_table.entries[i].dtbclk_mhz;
3247 					}
3248 				} else if (bw_params->clk_table.entries[i].dtbclk_mhz) {
3249 					dcn3_2_soc.clock_limits[i].dtbclk_mhz  = bw_params->clk_table.entries[i].dtbclk_mhz;
3250 				}
3251 
3252 				if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0)
3253 					dcn3_2_soc.clock_limits[i].socclk_mhz = dcn3_2_soc.clock_limits[i-1].socclk_mhz;
3254 				else
3255 					dcn3_2_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz;
3256 
3257 				if (!dram_speed_mts[i] && i > 0)
3258 					dcn3_2_soc.clock_limits[i].dram_speed_mts = dcn3_2_soc.clock_limits[i-1].dram_speed_mts;
3259 				else
3260 					dcn3_2_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
3261 
3262 				/* These clocks cannot come from bw_params, always fill from dcn3_2_soc[0] */
3263 				/* PHYCLK_D18, PHYCLK_D32 */
3264 				dcn3_2_soc.clock_limits[i].phyclk_d18_mhz = dcn3_2_soc.clock_limits[0].phyclk_d18_mhz;
3265 				dcn3_2_soc.clock_limits[i].phyclk_d32_mhz = dcn3_2_soc.clock_limits[0].phyclk_d32_mhz;
3266 			}
3267 		} else {
3268 			build_synthetic_soc_states(dc->debug.disable_dc_mode_overwrite, bw_params,
3269 					dcn3_2_soc.clock_limits, &dcn3_2_soc.num_states);
3270 		}
3271 
3272 		/* Re-init DML with updated bb */
3273 		dml_init_instance(&dc->dml, &dcn3_2_soc, &dcn3_2_ip, DML_PROJECT_DCN32);
3274 		if (dc->current_state)
3275 			dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_2_soc, &dcn3_2_ip, DML_PROJECT_DCN32);
3276 	}
3277 
3278 	if (dc->clk_mgr->bw_params->clk_table.num_entries > 1) {
3279 		unsigned int i = 0;
3280 
3281 		dc->dml2_options.bbox_overrides.clks_table.num_states = dc->clk_mgr->bw_params->clk_table.num_entries;
3282 
3283 		dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
3284 			dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels;
3285 
3286 		dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
3287 			dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels;
3288 
3289 		dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
3290 			dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels;
3291 
3292 		dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels =
3293 			dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels;
3294 
3295 		dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels =
3296 			dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels;
3297 
3298 		dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels =
3299 			dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
3300 
3301 		dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels =
3302 			dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dppclk_levels;
3303 
3304 		for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels; i++) {
3305 			if (dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz)
3306 				dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz =
3307 					dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz;
3308 		}
3309 
3310 		for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels; i++) {
3311 			if (dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz)
3312 				dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz =
3313 					dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz;
3314 		}
3315 
3316 		for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; i++) {
3317 			if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz)
3318 				dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
3319 					dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
3320 		}
3321 
3322 		for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels; i++) {
3323 			if (dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz)
3324 				dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz =
3325 					dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz;
3326 		}
3327 
3328 		for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels; i++) {
3329 			if (dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz)
3330 				dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
3331 					dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz;
3332 		}
3333 
3334 		for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; i++) {
3335 			if (dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz) {
3336 				dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz =
3337 					dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz;
3338 				dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz =
3339 					dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz;
3340 			}
3341 		}
3342 	}
3343 }
3344 
dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st * pipes,int pipe_cnt)3345 void dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
3346 				  int pipe_cnt)
3347 {
3348 	dc_assert_fp_enabled();
3349 
3350 	pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
3351 	pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
3352 }
3353 
dcn32_allow_subvp_with_active_margin(struct pipe_ctx * pipe)3354 bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe)
3355 {
3356 	bool allow = false;
3357 	uint32_t refresh_rate = 0;
3358 	uint32_t min_refresh = subvp_active_margin_list.min_refresh;
3359 	uint32_t max_refresh = subvp_active_margin_list.max_refresh;
3360 	uint32_t i;
3361 
3362 	for (i = 0; i < SUBVP_ACTIVE_MARGIN_LIST_LEN; i++) {
3363 		uint32_t width = subvp_active_margin_list.res[i].width;
3364 		uint32_t height = subvp_active_margin_list.res[i].height;
3365 
3366 		refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
3367 			(uint64_t)pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
3368 		refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
3369 		refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
3370 
3371 		if (refresh_rate >= min_refresh && refresh_rate <= max_refresh &&
3372 				dcn32_check_native_scaling_for_res(pipe, width, height)) {
3373 			allow = true;
3374 			break;
3375 		}
3376 	}
3377 	return allow;
3378 }
3379 
3380 /**
3381  * dcn32_allow_subvp_high_refresh_rate: Determine if the high refresh rate config will allow subvp
3382  *
3383  * @dc: Current DC state
3384  * @context: New DC state to be programmed
3385  * @pipe: Pipe to be considered for use in subvp
3386  *
3387  * On high refresh rate display configs, we will allow subvp under the following conditions:
3388  * 1. Resolution is 3840x2160, 3440x1440, or 2560x1440
3389  * 2. Refresh rate is between 120hz - 165hz
3390  * 3. No scaling
3391  * 4. Freesync is inactive
3392  * 5. For single display cases, freesync must be disabled
3393  *
3394  * Return: True if pipe can be used for subvp, false otherwise
3395  */
dcn32_allow_subvp_high_refresh_rate(struct dc * dc,struct dc_state * context,struct pipe_ctx * pipe)3396 bool dcn32_allow_subvp_high_refresh_rate(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe)
3397 {
3398 	bool allow = false;
3399 	uint32_t refresh_rate = 0;
3400 	uint32_t subvp_min_refresh = subvp_high_refresh_list.min_refresh;
3401 	uint32_t subvp_max_refresh = subvp_high_refresh_list.max_refresh;
3402 	uint32_t min_refresh = subvp_max_refresh;
3403 	uint32_t i;
3404 
3405 	/* Only allow SubVP on high refresh displays if all connected displays
3406 	 * are considered "high refresh" (i.e. >= 120hz). We do not want to
3407 	 * allow combinations such as 120hz (SubVP) + 60hz (SubVP).
3408 	 */
3409 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3410 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3411 
3412 		if (!pipe_ctx->stream)
3413 			continue;
3414 		refresh_rate = (pipe_ctx->stream->timing.pix_clk_100hz * 100 +
3415 				pipe_ctx->stream->timing.v_total * pipe_ctx->stream->timing.h_total - 1)
3416 						/ (double)(pipe_ctx->stream->timing.v_total * pipe_ctx->stream->timing.h_total);
3417 
3418 		if (refresh_rate < min_refresh)
3419 			min_refresh = refresh_rate;
3420 	}
3421 
3422 	if (!dc->debug.disable_subvp_high_refresh && min_refresh >= subvp_min_refresh && pipe->stream &&
3423 			pipe->plane_state && !(pipe->stream->vrr_active_variable || pipe->stream->vrr_active_fixed)) {
3424 		refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
3425 						pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
3426 						/ (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
3427 		if (refresh_rate >= subvp_min_refresh && refresh_rate <= subvp_max_refresh) {
3428 			for (i = 0; i < SUBVP_HIGH_REFRESH_LIST_LEN; i++) {
3429 				uint32_t width = subvp_high_refresh_list.res[i].width;
3430 				uint32_t height = subvp_high_refresh_list.res[i].height;
3431 
3432 				if (dcn32_check_native_scaling_for_res(pipe, width, height)) {
3433 					if ((context->stream_count == 1 && !pipe->stream->allow_freesync) || context->stream_count > 1) {
3434 						allow = true;
3435 						break;
3436 					}
3437 				}
3438 			}
3439 		}
3440 	}
3441 	return allow;
3442 }
3443 
3444 /**
3445  * dcn32_determine_max_vratio_prefetch: Determine max Vratio for prefetch by driver policy
3446  *
3447  * @dc: Current DC state
3448  * @context: New DC state to be programmed
3449  *
3450  * Return: Max vratio for prefetch
3451  */
dcn32_determine_max_vratio_prefetch(struct dc * dc,struct dc_state * context)3452 double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context)
3453 {
3454 	(void)dc;
3455 	double max_vratio_pre = __DML_MAX_BW_RATIO_PRE__; // Default value is 4
3456 	int i;
3457 
3458 	/* For single display MPO configs, allow the max vratio to be 8
3459 	 * if any plane is YUV420 format
3460 	 */
3461 	if (context->stream_count == 1 && context->stream_status[0].plane_count > 1) {
3462 		for (i = 0; i < context->stream_status[0].plane_count; i++) {
3463 			if (context->stream_status[0].plane_states[i]->format == SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr ||
3464 					context->stream_status[0].plane_states[i]->format == SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb) {
3465 				max_vratio_pre = __DML_MAX_VRATIO_PRE__;
3466 			}
3467 		}
3468 	}
3469 	return max_vratio_pre;
3470 }
3471 
3472 /**
3473  * dcn32_assign_fpo_vactive_candidate - Assign the FPO stream candidate for FPO + VActive case
3474  *
3475  * This function chooses the FPO candidate stream for FPO + VActive cases (2 stream config).
3476  * For FPO + VAtive cases, the assumption is that one display has ActiveMargin > 0, and the
3477  * other display has ActiveMargin <= 0. This function will choose the pipe/stream that has
3478  * ActiveMargin <= 0 to be the FPO stream candidate if found.
3479  *
3480  *
3481  * @dc: current dc state
3482  * @context: new dc state
3483  * @fpo_candidate_stream: pointer to FPO stream candidate if one is found
3484  *
3485  * Return: void
3486  */
dcn32_assign_fpo_vactive_candidate(struct dc * dc,const struct dc_state * context,struct dc_stream_state ** fpo_candidate_stream)3487 void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *context, struct dc_stream_state **fpo_candidate_stream)
3488 {
3489 	unsigned int i, pipe_idx;
3490 	const struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
3491 
3492 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
3493 		const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3494 
3495 		/* In DCN32/321, FPO uses per-pipe P-State force.
3496 		 * If there's no planes, HUBP is power gated and
3497 		 * therefore programming UCLK_PSTATE_FORCE does
3498 		 * nothing (P-State will always be asserted naturally
3499 		 * on a pipe that has HUBP power gated. Therefore we
3500 		 * only want to enable FPO if the FPO pipe has both
3501 		 * a stream and a plane.
3502 		 */
3503 		if (!pipe->stream || !pipe->plane_state)
3504 			continue;
3505 
3506 		if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
3507 			*fpo_candidate_stream = pipe->stream;
3508 			break;
3509 		}
3510 		pipe_idx++;
3511 	}
3512 }
3513 
3514 /**
3515  * dcn32_find_vactive_pipe - Determines if the config has a pipe that can switch in VACTIVE
3516  *
3517  * @dc: current dc state
3518  * @context: new dc state
3519  * @fpo_candidate_stream: candidate stream to be chosen for FPO
3520  * @vactive_margin_req_us: The vactive marign required for a vactive pipe to be considered "found"
3521  *
3522  * Return: True if VACTIVE display is found, false otherwise
3523  */
dcn32_find_vactive_pipe(struct dc * dc,const struct dc_state * context,struct dc_stream_state * fpo_candidate_stream,uint32_t vactive_margin_req_us)3524 bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, struct dc_stream_state *fpo_candidate_stream, uint32_t vactive_margin_req_us)
3525 {
3526 	unsigned int i, pipe_idx;
3527 	const struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
3528 	bool vactive_found = true;
3529 	unsigned int blank_us = 0;
3530 
3531 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
3532 		const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3533 
3534 		if (!pipe->stream)
3535 			continue;
3536 
3537 		/* Don't need to check for vactive margin on the FPO candidate stream */
3538 		if (fpo_candidate_stream && pipe->stream == fpo_candidate_stream) {
3539 			pipe_idx++;
3540 			continue;
3541 		}
3542 
3543 		/* Every plane (apart from the ones driven by the FPO pipes) needs to have active margin
3544 		 * in order for us to have found a valid "vactive" config for FPO + Vactive
3545 		 */
3546 		blank_us = ((pipe->stream->timing.v_total - pipe->stream->timing.v_addressable) * pipe->stream->timing.h_total /
3547 				(double)(pipe->stream->timing.pix_clk_100hz * 100)) * 1000000;
3548 		if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] < vactive_margin_req_us ||
3549 				pipe->stream->vrr_active_variable || pipe->stream->vrr_active_fixed || blank_us >= dc->debug.fpo_vactive_max_blank_us) {
3550 			vactive_found = false;
3551 			break;
3552 		}
3553 		pipe_idx++;
3554 	}
3555 	return vactive_found;
3556 }
3557 
dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st * soc_bb)3558 void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb)
3559 {
3560 	(void)soc_bb;
3561 	dc_assert_fp_enabled();
3562 	dcn3_2_soc.clock_limits[0].dcfclk_mhz = 1200.0;
3563 }
3564 
dcn32_override_min_req_memclk(struct dc * dc,struct dc_state * context)3565 void dcn32_override_min_req_memclk(struct dc *dc, struct dc_state *context)
3566 {
3567 	// WA: restrict FPO and SubVP to use first non-strobe mode (DCN32 BW issue)
3568 	if ((context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dcn32_subvp_in_use(dc, context)) &&
3569 			dc->dml.soc.num_chans <= 8) {
3570 		int num_mclk_levels = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels;
3571 
3572 		if (context->bw_ctx.dml.vba.DRAMSpeed <= dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 16 &&
3573 				num_mclk_levels > 1) {
3574 			context->bw_ctx.dml.vba.DRAMSpeed = dc->clk_mgr->bw_params->clk_table.entries[1].memclk_mhz * 16;
3575 			context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
3576 		}
3577 	}
3578 }
3579