xref: /linux/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c (revision 4e73826089ce899357580bbf6e0afe4e6f9900b7)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright 2023 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: AMD
24  *
25  */
26 
27 #include "display_mode_core.h"
28 #include "dml2_internal_types.h"
29 #include "dml2_utils.h"
30 #include "dml2_policy.h"
31 #include "dml2_translation_helper.h"
32 #include "dml2_mall_phantom.h"
33 #include "dml2_dc_resource_mgmt.h"
34 
35 
36 static void initialize_dml2_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out)
37 {
38 	if (dml2->config.use_native_soc_bb_construction)
39 		dml2_init_ip_params(dml2, in_dc, out);
40 	else
41 		dml2_translate_ip_params(in_dc, out);
42 }
43 
44 static void initialize_dml2_soc_bbox(struct dml2_context *dml2, const struct dc *in_dc, struct soc_bounding_box_st *out)
45 {
46 	if (dml2->config.use_native_soc_bb_construction)
47 		dml2_init_socbb_params(dml2, in_dc, out);
48 	else
49 		dml2_translate_socbb_params(in_dc, out);
50 }
51 
52 static void initialize_dml2_soc_states(struct dml2_context *dml2,
53 	const struct dc *in_dc, const struct soc_bounding_box_st *in_bbox, struct soc_states_st *out)
54 {
55 	if (dml2->config.use_native_soc_bb_construction)
56 		dml2_init_soc_states(dml2, in_dc, in_bbox, out);
57 	else
58 		dml2_translate_soc_states(in_dc, out, in_dc->dml.soc.num_states);
59 }
60 
61 static void map_hw_resources(struct dml2_context *dml2,
62 		struct dml_display_cfg_st *in_out_display_cfg, struct dml_mode_support_info_st *mode_support_info)
63 {
64 	unsigned int num_pipes = 0;
65 	int i, j;
66 
67 	for (i = 0; i < __DML_NUM_PLANES__; i++) {
68 		in_out_display_cfg->hw.ODMMode[i] = mode_support_info->ODMMode[i];
69 		in_out_display_cfg->hw.DPPPerSurface[i] = mode_support_info->DPPPerSurface[i];
70 		in_out_display_cfg->hw.DSCEnabled[i] = mode_support_info->DSCEnabled[i];
71 		in_out_display_cfg->hw.NumberOfDSCSlices[i] = mode_support_info->NumberOfDSCSlices[i];
72 		in_out_display_cfg->hw.DLGRefClkFreqMHz = 24;
73 		if (dml2->v20.dml_core_ctx.project != dml_project_dcn35 &&
74 			dml2->v20.dml_core_ctx.project != dml_project_dcn351) {
75 			/*dGPU default as 50Mhz*/
76 			in_out_display_cfg->hw.DLGRefClkFreqMHz = 50;
77 		}
78 		for (j = 0; j < mode_support_info->DPPPerSurface[i]; j++) {
79 			dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i];
80 			dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[num_pipes] = true;
81 			dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[i];
82 			dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id_valid[num_pipes] = true;
83 			num_pipes++;
84 		}
85 	}
86 }
87 
88 static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2,
89 	const struct dml_display_cfg_st *display_cfg,
90 	struct dml_mode_support_info_st *evaluation_info)
91 {
92 	struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
93 
94 	s->mode_support_params.mode_lib = &dml2->v20.dml_core_ctx;
95 	s->mode_support_params.in_display_cfg = display_cfg;
96 	s->mode_support_params.out_evaluation_info = evaluation_info;
97 
98 	memset(evaluation_info, 0, sizeof(struct dml_mode_support_info_st));
99 	s->mode_support_params.out_lowest_state_idx = 0;
100 
101 	return dml_mode_support_ex(&s->mode_support_params);
102 }
103 
104 static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrapper_optimize_configuration_params *p)
105 {
106 	int unused_dpps = p->ip_params->max_num_dpp;
107 	int i, j;
108 	int odms_needed, refresh_rate_hz, dpps_needed, subvp_height, pstate_width_fw_delay_lines, surface_count;
109 	int subvp_timing_to_add, new_timing_index, subvp_surface_to_add, new_surface_index;
110 	float frame_time_sec, max_frame_time_sec;
111 	int largest_blend_and_timing = 0;
112 	bool optimization_done = false;
113 
114 	for (i = 0; i < (int) p->cur_display_config->num_timings; i++) {
115 		if (p->cur_display_config->plane.BlendingAndTiming[i] > largest_blend_and_timing)
116 			largest_blend_and_timing = p->cur_display_config->plane.BlendingAndTiming[i];
117 	}
118 
119 	if (p->new_policy != p->cur_policy)
120 		*p->new_policy = *p->cur_policy;
121 
122 	if (p->new_display_config != p->cur_display_config)
123 		*p->new_display_config = *p->cur_display_config;
124 
125 	// Optimize P-State Support
126 	if (dml2->config.use_native_pstate_optimization) {
127 		if (p->cur_mode_support_info->DRAMClockChangeSupport[0] == dml_dram_clock_change_unsupported) {
128 			// Find a display with < 120Hz refresh rate with maximal refresh rate that's not already subvp
129 			subvp_timing_to_add = -1;
130 			subvp_surface_to_add = -1;
131 			max_frame_time_sec = 0;
132 			surface_count = 0;
133 			for (i = 0; i < (int) p->cur_display_config->num_timings; i++) {
134 				refresh_rate_hz = (int)div_u64((unsigned long long) p->cur_display_config->timing.PixelClock[i] * 1000 * 1000,
135 					(p->cur_display_config->timing.HTotal[i] * p->cur_display_config->timing.VTotal[i]));
136 				if (refresh_rate_hz < 120) {
137 					// Check its upstream surfaces to see if this one could be converted to subvp.
138 					dpps_needed = 0;
139 				for (j = 0; j < (int) p->cur_display_config->num_surfaces; j++) {
140 					if (p->cur_display_config->plane.BlendingAndTiming[j] == i &&
141 						p->cur_display_config->plane.UseMALLForPStateChange[j] == dml_use_mall_pstate_change_disable) {
142 						dpps_needed += p->cur_mode_support_info->DPPPerSurface[j];
143 						subvp_surface_to_add = j;
144 						surface_count++;
145 					}
146 				}
147 
148 				if (surface_count == 1 && dpps_needed > 0 && dpps_needed <= unused_dpps) {
149 					frame_time_sec = (float)1 / refresh_rate_hz;
150 					if (frame_time_sec > max_frame_time_sec) {
151 						max_frame_time_sec = frame_time_sec;
152 						subvp_timing_to_add = i;
153 						}
154 					}
155 				}
156 			}
157 			if (subvp_timing_to_add >= 0) {
158 				new_timing_index = p->new_display_config->num_timings++;
159 				new_surface_index = p->new_display_config->num_surfaces++;
160 				// Add a phantom pipe reflecting the main pipe's timing
161 				dml2_util_copy_dml_timing(&p->new_display_config->timing, new_timing_index, subvp_timing_to_add);
162 
163 				pstate_width_fw_delay_lines = (int)(((double)(p->config->svp_pstate.subvp_fw_processing_delay_us +
164 					p->config->svp_pstate.subvp_pstate_allow_width_us) / 1000000) *
165 				(p->new_display_config->timing.PixelClock[subvp_timing_to_add] * 1000 * 1000) /
166 				(double)p->new_display_config->timing.HTotal[subvp_timing_to_add]);
167 
168 				subvp_height = p->cur_mode_support_info->SubViewportLinesNeededInMALL[subvp_timing_to_add] + pstate_width_fw_delay_lines;
169 
170 				p->new_display_config->timing.VActive[new_timing_index] = subvp_height;
171 				p->new_display_config->timing.VTotal[new_timing_index] = subvp_height +
172 				p->new_display_config->timing.VTotal[subvp_timing_to_add] - p->new_display_config->timing.VActive[subvp_timing_to_add];
173 
174 				p->new_display_config->output.OutputDisabled[new_timing_index] = true;
175 
176 				p->new_display_config->plane.UseMALLForPStateChange[subvp_surface_to_add] = dml_use_mall_pstate_change_sub_viewport;
177 
178 				dml2_util_copy_dml_plane(&p->new_display_config->plane, new_surface_index, subvp_surface_to_add);
179 				dml2_util_copy_dml_surface(&p->new_display_config->surface, new_surface_index, subvp_surface_to_add);
180 
181 				p->new_display_config->plane.ViewportHeight[new_surface_index] = subvp_height;
182 				p->new_display_config->plane.ViewportHeightChroma[new_surface_index] = subvp_height;
183 				p->new_display_config->plane.ViewportStationary[new_surface_index] = false;
184 
185 				p->new_display_config->plane.UseMALLForStaticScreen[new_surface_index] = dml_use_mall_static_screen_disable;
186 				p->new_display_config->plane.UseMALLForPStateChange[new_surface_index] = dml_use_mall_pstate_change_phantom_pipe;
187 
188 				p->new_display_config->plane.NumberOfCursors[new_surface_index] = 0;
189 
190 				p->new_policy->ImmediateFlipRequirement[new_surface_index] = dml_immediate_flip_not_required;
191 
192 				p->new_display_config->plane.BlendingAndTiming[new_surface_index] = new_timing_index;
193 
194 				optimization_done = true;
195 			}
196 		}
197 	}
198 
199 	// Optimize Clocks
200 	if (!optimization_done) {
201 		if (largest_blend_and_timing == 0 && p->cur_policy->ODMUse[0] == dml_odm_use_policy_combine_as_needed && dml2->config.minimize_dispclk_using_odm) {
202 			odms_needed = dml2_util_get_maximum_odm_combine_for_output(dml2->config.optimize_odm_4to1,
203 				p->cur_display_config->output.OutputEncoder[0], p->cur_mode_support_info->DSCEnabled[0]) - 1;
204 
205 			if (odms_needed <= unused_dpps) {
206 				unused_dpps -= odms_needed;
207 
208 				if (odms_needed == 1) {
209 					p->new_policy->ODMUse[0] = dml_odm_use_policy_combine_2to1;
210 					optimization_done = true;
211 				} else if (odms_needed == 3) {
212 					p->new_policy->ODMUse[0] = dml_odm_use_policy_combine_4to1;
213 					optimization_done = true;
214 				} else
215 					optimization_done = false;
216 			}
217 		}
218 	}
219 
220 	return optimization_done;
221 }
222 
223 static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state)
224 {
225 	struct dml2_calculate_lowest_supported_state_for_temp_read_scratch *s = &dml2->v20.scratch.dml2_calculate_lowest_supported_state_for_temp_read_scratch;
226 	struct dml2_wrapper_scratch *s_global = &dml2->v20.scratch;
227 
228 	unsigned int dml_result = 0;
229 	int result = -1, i, j;
230 
231 	build_unoptimized_policy_settings(dml2->v20.dml_core_ctx.project, &dml2->v20.dml_core_ctx.policy);
232 
233 	/* Zero out before each call before proceeding */
234 	memset(s, 0, sizeof(struct dml2_calculate_lowest_supported_state_for_temp_read_scratch));
235 	memset(&s_global->mode_support_params, 0, sizeof(struct dml_mode_support_ex_params_st));
236 	memset(&s_global->dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
237 
238 	for (i = 0; i < dml2->config.dcn_pipe_count; i++) {
239 		/* Calling resource_build_scaling_params will populate the pipe params
240 		 * with the necessary information needed for correct DML calculations
241 		 * This is also done in DML1 driver code path and hence display_state
242 		 * cannot be const.
243 		 */
244 		struct pipe_ctx *pipe = &display_state->res_ctx.pipe_ctx[i];
245 
246 		if (pipe->plane_state) {
247 			if (!dml2->config.callbacks.build_scaling_params(pipe)) {
248 				ASSERT(false);
249 				return false;
250 			}
251 		}
252 	}
253 
254 	map_dc_state_into_dml_display_cfg(dml2, display_state, &s->cur_display_config);
255 
256 	for (i = 0; i < dml2->v20.dml_core_ctx.states.num_states; i++) {
257 		s->uclk_change_latencies[i] = dml2->v20.dml_core_ctx.states.state_array[i].dram_clock_change_latency_us;
258 	}
259 
260 	for (i = 0; i < 4; i++) {
261 		for (j = 0; j < dml2->v20.dml_core_ctx.states.num_states; j++) {
262 			dml2->v20.dml_core_ctx.states.state_array[j].dram_clock_change_latency_us = s_global->dummy_pstate_table[i].dummy_pstate_latency_us;
263 		}
264 
265 		dml_result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, &s->evaluation_info);
266 
267 		if (dml_result && s->evaluation_info.DRAMClockChangeSupport[0] == dml_dram_clock_change_vactive) {
268 			map_hw_resources(dml2, &s->cur_display_config, &s->evaluation_info);
269 			dml_result = dml_mode_programming(&dml2->v20.dml_core_ctx, s_global->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true);
270 
271 			ASSERT(dml_result);
272 
273 			dml2_extract_watermark_set(&dml2->v20.g6_temp_read_watermark_set, &dml2->v20.dml_core_ctx);
274 			dml2->v20.g6_temp_read_watermark_set.cstate_pstate.fclk_pstate_change_ns = dml2->v20.g6_temp_read_watermark_set.cstate_pstate.pstate_change_ns;
275 
276 			result = s_global->mode_support_params.out_lowest_state_idx;
277 
278 			while (dml2->v20.dml_core_ctx.states.state_array[result].dram_speed_mts < s_global->dummy_pstate_table[i].dram_speed_mts)
279 				result++;
280 
281 			break;
282 		}
283 	}
284 
285 	for (i = 0; i < dml2->v20.dml_core_ctx.states.num_states; i++) {
286 		dml2->v20.dml_core_ctx.states.state_array[i].dram_clock_change_latency_us = s->uclk_change_latencies[i];
287 	}
288 
289 	return result;
290 }
291 
292 static void copy_dummy_pstate_table(struct dummy_pstate_entry *dest, struct dummy_pstate_entry *src, unsigned int num_entries)
293 {
294 	for (int i = 0; i < num_entries; i++) {
295 		dest[i] = src[i];
296 	}
297 }
298 
299 static bool are_timings_requiring_odm_doing_blending(const struct dml_display_cfg_st *display_cfg,
300 		const struct dml_mode_support_info_st *evaluation_info)
301 {
302 	unsigned int planes_per_timing[__DML_NUM_PLANES__] = {0};
303 	int i;
304 
305 	for (i = 0; i < display_cfg->num_surfaces; i++)
306 		planes_per_timing[display_cfg->plane.BlendingAndTiming[i]]++;
307 
308 	for (i = 0; i < __DML_NUM_PLANES__; i++) {
309 		if (planes_per_timing[i] > 1 && evaluation_info->ODMMode[i] != dml_odm_mode_bypass)
310 			return true;
311 	}
312 
313 	return false;
314 }
315 
316 static bool does_configuration_meet_sw_policies(struct dml2_context *ctx, const struct dml_display_cfg_st *display_cfg,
317 	const struct dml_mode_support_info_st *evaluation_info)
318 {
319 	bool pass = true;
320 
321 	if (!ctx->config.enable_windowed_mpo_odm) {
322 		if (are_timings_requiring_odm_doing_blending(display_cfg, evaluation_info))
323 			pass = false;
324 	}
325 
326 	return pass;
327 }
328 
329 static bool dml_mode_support_wrapper(struct dml2_context *dml2,
330 		struct dc_state *display_state)
331 {
332 	struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
333 	unsigned int result = 0, i;
334 	unsigned int optimized_result = true;
335 
336 	build_unoptimized_policy_settings(dml2->v20.dml_core_ctx.project, &dml2->v20.dml_core_ctx.policy);
337 
338 	/* Zero out before each call before proceeding */
339 	memset(&s->cur_display_config, 0, sizeof(struct dml_display_cfg_st));
340 	memset(&s->mode_support_params, 0, sizeof(struct dml_mode_support_ex_params_st));
341 	memset(&s->dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping));
342 	memset(&s->optimize_configuration_params, 0, sizeof(struct dml2_wrapper_optimize_configuration_params));
343 
344 	for (i = 0; i < dml2->config.dcn_pipe_count; i++) {
345 		/* Calling resource_build_scaling_params will populate the pipe params
346 		 * with the necessary information needed for correct DML calculations
347 		 * This is also done in DML1 driver code path and hence display_state
348 		 * cannot be const.
349 		 */
350 		struct pipe_ctx *pipe = &display_state->res_ctx.pipe_ctx[i];
351 
352 		if (pipe->plane_state) {
353 			if (!dml2->config.callbacks.build_scaling_params(pipe)) {
354 				ASSERT(false);
355 				return false;
356 			}
357 		}
358 	}
359 
360 	map_dc_state_into_dml_display_cfg(dml2, display_state, &s->cur_display_config);
361 	if (!dml2->config.skip_hw_state_mapping)
362 		dml2_apply_det_buffer_allocation_policy(dml2, &s->cur_display_config);
363 
364 	result = pack_and_call_dml_mode_support_ex(dml2,
365 		&s->cur_display_config,
366 		&s->mode_support_info);
367 
368 	if (result)
369 		result = does_configuration_meet_sw_policies(dml2, &s->cur_display_config, &s->mode_support_info);
370 
371 	// Try to optimize
372 	if (result) {
373 		s->cur_policy = dml2->v20.dml_core_ctx.policy;
374 		s->optimize_configuration_params.dml_core_ctx = &dml2->v20.dml_core_ctx;
375 		s->optimize_configuration_params.config = &dml2->config;
376 		s->optimize_configuration_params.ip_params = &dml2->v20.dml_core_ctx.ip;
377 		s->optimize_configuration_params.cur_display_config = &s->cur_display_config;
378 		s->optimize_configuration_params.cur_mode_support_info = &s->mode_support_info;
379 		s->optimize_configuration_params.cur_policy = &s->cur_policy;
380 		s->optimize_configuration_params.new_display_config = &s->new_display_config;
381 		s->optimize_configuration_params.new_policy = &s->new_policy;
382 
383 		while (optimized_result && optimize_configuration(dml2, &s->optimize_configuration_params)) {
384 			dml2->v20.dml_core_ctx.policy = s->new_policy;
385 			optimized_result = pack_and_call_dml_mode_support_ex(dml2,
386 				&s->new_display_config,
387 				&s->mode_support_info);
388 
389 			if (optimized_result)
390 				optimized_result = does_configuration_meet_sw_policies(dml2, &s->new_display_config, &s->mode_support_info);
391 
392 			// If the new optimized state is supposed, then set current = new
393 			if (optimized_result) {
394 				s->cur_display_config = s->new_display_config;
395 				s->cur_policy = s->new_policy;
396 			} else {
397 				// Else, restore policy to current
398 				dml2->v20.dml_core_ctx.policy = s->cur_policy;
399 			}
400 		}
401 
402 		// Optimize ended with a failed config, so we need to restore DML state to last passing
403 		if (!optimized_result) {
404 			result = pack_and_call_dml_mode_support_ex(dml2,
405 				&s->cur_display_config,
406 				&s->mode_support_info);
407 		}
408 	}
409 
410 	if (result)
411 		map_hw_resources(dml2, &s->cur_display_config, &s->mode_support_info);
412 
413 	return result;
414 }
415 
416 static int find_drr_eligible_stream(struct dc_state *display_state)
417 {
418 	int i;
419 
420 	for (i = 0; i < display_state->stream_count; i++) {
421 		if (dc_state_get_stream_subvp_type(display_state, display_state->streams[i]) == SUBVP_NONE
422 			&& display_state->streams[i]->ignore_msa_timing_param) {
423 			// Use ignore_msa_timing_param flag to identify as DRR
424 			return i;
425 		}
426 	}
427 
428 	return -1;
429 }
430 
431 static bool optimize_pstate_with_svp_and_drr(struct dml2_context *dml2, struct dc_state *display_state)
432 {
433 	struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
434 	bool pstate_optimization_done = false;
435 	bool pstate_optimization_success = false;
436 	bool result = false;
437 	int drr_display_index = 0, non_svp_streams = 0;
438 	bool force_svp = dml2->config.svp_pstate.force_enable_subvp;
439 	bool advanced_pstate_switching = false;
440 
441 	display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
442 	display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false;
443 
444 	result = dml_mode_support_wrapper(dml2, display_state);
445 
446 	if (!result) {
447 		pstate_optimization_done = true;
448 	} else if (!advanced_pstate_switching ||
449 		(s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported && !force_svp)) {
450 		pstate_optimization_success = true;
451 		pstate_optimization_done = true;
452 	}
453 
454 	if (display_state->stream_count == 1 && dml2->config.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch(dml2->config.callbacks.dc, display_state)) {
455 			display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = true;
456 
457 			result = dml_mode_support_wrapper(dml2, display_state);
458 	} else {
459 		non_svp_streams = display_state->stream_count;
460 
461 		while (!pstate_optimization_done) {
462 			result = dml_mode_programming(&dml2->v20.dml_core_ctx, s->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true);
463 
464 			// Always try adding SVP first
465 			if (result)
466 				result = dml2_svp_add_phantom_pipe_to_dc_state(dml2, display_state, &s->mode_support_info);
467 			else
468 				pstate_optimization_done = true;
469 
470 
471 			if (result) {
472 				result = dml_mode_support_wrapper(dml2, display_state);
473 			} else {
474 				pstate_optimization_done = true;
475 			}
476 
477 			if (result) {
478 				non_svp_streams--;
479 
480 				if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) {
481 					if (dml2_svp_validate_static_schedulability(dml2, display_state, s->mode_support_info.DRAMClockChangeSupport[0])) {
482 						pstate_optimization_success = true;
483 						pstate_optimization_done = true;
484 					} else {
485 						pstate_optimization_success = false;
486 						pstate_optimization_done = false;
487 					}
488 				} else {
489 					drr_display_index = find_drr_eligible_stream(display_state);
490 
491 					// If there is only 1 remaining non SubVP pipe that is DRR, check static
492 					// schedulability for SubVP + DRR.
493 					if (non_svp_streams == 1 && drr_display_index >= 0) {
494 						if (dml2_svp_drr_schedulable(dml2, display_state, &display_state->streams[drr_display_index]->timing)) {
495 							display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = true;
496 							display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index = drr_display_index;
497 							result = dml_mode_support_wrapper(dml2, display_state);
498 						}
499 
500 						if (result && s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) {
501 							pstate_optimization_success = true;
502 							pstate_optimization_done = true;
503 						} else {
504 							pstate_optimization_success = false;
505 							pstate_optimization_done = false;
506 						}
507 					}
508 
509 					if (pstate_optimization_success) {
510 						pstate_optimization_done = true;
511 					} else {
512 						pstate_optimization_done = false;
513 					}
514 				}
515 			}
516 		}
517 	}
518 
519 	if (!pstate_optimization_success) {
520 		dml2_svp_remove_all_phantom_pipes(dml2, display_state);
521 		display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
522 		display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false;
523 		result = dml_mode_support_wrapper(dml2, display_state);
524 	}
525 
526 	return result;
527 }
528 
529 static bool call_dml_mode_support_and_programming(struct dc_state *context)
530 {
531 	unsigned int result = 0;
532 	unsigned int min_state;
533 	int min_state_for_g6_temp_read = 0;
534 	struct dml2_context *dml2 = context->bw_ctx.dml2;
535 	struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
536 
537 	min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context);
538 
539 	ASSERT(min_state_for_g6_temp_read >= 0);
540 
541 	if (!dml2->config.use_native_pstate_optimization) {
542 		result = optimize_pstate_with_svp_and_drr(dml2, context);
543 	} else {
544 		result = dml_mode_support_wrapper(dml2, context);
545 	}
546 
547 	/* Upon trying to sett certain frequencies in FRL, min_state_for_g6_temp_read is reported as -1. This leads to an invalid value of min_state causing crashes later on.
548 	 * Use the default logic for min_state only when min_state_for_g6_temp_read is a valid value. In other cases, use the value calculated by the DML directly.
549 	 */
550 	if (min_state_for_g6_temp_read >= 0)
551 		min_state = min_state_for_g6_temp_read > s->mode_support_params.out_lowest_state_idx ? min_state_for_g6_temp_read : s->mode_support_params.out_lowest_state_idx;
552 	else
553 		min_state = s->mode_support_params.out_lowest_state_idx;
554 
555 	if (result)
556 		result = dml_mode_programming(&dml2->v20.dml_core_ctx, min_state, &s->cur_display_config, true);
557 
558 	return result;
559 }
560 
561 static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context)
562 {
563 	struct dml2_context *dml2 = context->bw_ctx.dml2;
564 	struct dml2_wrapper_scratch *s = &dml2->v20.scratch;
565 	struct dml2_dcn_clocks out_clks;
566 	unsigned int result = 0;
567 	bool need_recalculation = false;
568 
569 	if (!context || context->stream_count == 0)
570 		return true;
571 
572 	/* Zero out before each call before proceeding */
573 	memset(&dml2->v20.scratch, 0, sizeof(struct dml2_wrapper_scratch));
574 	memset(&dml2->v20.dml_core_ctx.policy, 0, sizeof(struct dml_mode_eval_policy_st));
575 	memset(&dml2->v20.dml_core_ctx.ms, 0, sizeof(struct mode_support_st));
576 	memset(&dml2->v20.dml_core_ctx.mp, 0, sizeof(struct mode_program_st));
577 
578 	/* Initialize DET scratch */
579 	dml2_initialize_det_scratch(dml2);
580 
581 	copy_dummy_pstate_table(s->dummy_pstate_table, in_dc->clk_mgr->bw_params->dummy_pstate_table, 4);
582 
583 	result = call_dml_mode_support_and_programming(context);
584 	/* Call map dc pipes to map the pipes based on the DML output. For correctly determining if recalculation
585 	 * is required or not, the resource context needs to correctly reflect the number of active pipes. We would
586 	 * only know the correct number if active pipes after dml2_map_dc_pipes is called.
587 	 */
588 	if (result && !dml2->config.skip_hw_state_mapping)
589 		dml2_map_dc_pipes(dml2, context, &s->cur_display_config, &s->dml_to_dc_pipe_mapping, in_dc->current_state);
590 
591 	/* Verify and update DET Buffer configuration if needed. dml2_verify_det_buffer_configuration will check if DET Buffer
592 	 * size needs to be updated. If yes it will update the DETOverride variable and set need_recalculation flag to true.
593 	 * Based on that flag, run mode support again. Verification needs to be run after dml_mode_programming because the getters
594 	 * return correct det buffer values only after dml_mode_programming is called.
595 	 */
596 	if (result && !dml2->config.skip_hw_state_mapping) {
597 		need_recalculation = dml2_verify_det_buffer_configuration(dml2, context, &dml2->det_helper_scratch);
598 		if (need_recalculation) {
599 			/* Engage the DML again if recalculation is required. */
600 			call_dml_mode_support_and_programming(context);
601 			if (!dml2->config.skip_hw_state_mapping) {
602 				dml2_map_dc_pipes(dml2, context, &s->cur_display_config, &s->dml_to_dc_pipe_mapping, in_dc->current_state);
603 			}
604 			need_recalculation = dml2_verify_det_buffer_configuration(dml2, context, &dml2->det_helper_scratch);
605 			ASSERT(need_recalculation == false);
606 		}
607 	}
608 
609 	if (result) {
610 		unsigned int lowest_state_idx = s->mode_support_params.out_lowest_state_idx;
611 		out_clks.dispclk_khz = (unsigned int)dml2->v20.dml_core_ctx.mp.Dispclk_calculated * 1000;
612 		out_clks.p_state_supported = s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported;
613 		if (in_dc->config.use_default_clock_table &&
614 			(lowest_state_idx < dml2->v20.dml_core_ctx.states.num_states - 1)) {
615 			lowest_state_idx = dml2->v20.dml_core_ctx.states.num_states - 1;
616 			out_clks.dispclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dispclk_mhz * 1000;
617 		}
618 
619 		out_clks.dcfclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dcfclk_mhz * 1000;
620 		out_clks.fclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].fabricclk_mhz * 1000;
621 		out_clks.uclk_mts = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dram_speed_mts;
622 		out_clks.phyclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].phyclk_mhz * 1000;
623 		out_clks.socclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].socclk_mhz * 1000;
624 		out_clks.ref_dtbclk_khz = (unsigned int)dml2->v20.dml_core_ctx.states.state_array[lowest_state_idx].dtbclk_mhz * 1000;
625 		context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(in_dc, context);
626 
627 		if (!dml2->config.skip_hw_state_mapping) {
628 			/* Call dml2_calculate_rq_and_dlg_params */
629 			dml2_calculate_rq_and_dlg_params(in_dc, context, &context->res_ctx, dml2, in_dc->res_pool->pipe_count);
630 		}
631 
632 		dml2_copy_clocks_to_dc_state(&out_clks, context);
633 		dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.a, &dml2->v20.dml_core_ctx);
634 		dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.b, &dml2->v20.dml_core_ctx);
635 		memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c));
636 		dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx);
637 		//copy for deciding zstate use
638 		context->bw_ctx.dml.vba.StutterPeriod = context->bw_ctx.dml2->v20.dml_core_ctx.mp.StutterPeriod;
639 	}
640 
641 	return result;
642 }
643 
644 static bool dml2_validate_only(struct dc_state *context)
645 {
646 	struct dml2_context *dml2 = context->bw_ctx.dml2;
647 	unsigned int result = 0;
648 
649 	if (!context || context->stream_count == 0)
650 		return true;
651 
652 	/* Zero out before each call before proceeding */
653 	memset(&dml2->v20.scratch, 0, sizeof(struct dml2_wrapper_scratch));
654 	memset(&dml2->v20.dml_core_ctx.policy, 0, sizeof(struct dml_mode_eval_policy_st));
655 	memset(&dml2->v20.dml_core_ctx.ms, 0, sizeof(struct mode_support_st));
656 	memset(&dml2->v20.dml_core_ctx.mp, 0, sizeof(struct mode_program_st));
657 
658 	build_unoptimized_policy_settings(dml2->v20.dml_core_ctx.project, &dml2->v20.dml_core_ctx.policy);
659 
660 	map_dc_state_into_dml_display_cfg(dml2, context, &dml2->v20.scratch.cur_display_config);
661 
662 	result = pack_and_call_dml_mode_support_ex(dml2,
663 		&dml2->v20.scratch.cur_display_config,
664 		&dml2->v20.scratch.mode_support_info);
665 
666 	if (result)
667 		result = does_configuration_meet_sw_policies(dml2, &dml2->v20.scratch.cur_display_config, &dml2->v20.scratch.mode_support_info);
668 
669 	return (result == 1) ? true : false;
670 }
671 
672 static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *dml2)
673 {
674 	if (dc->debug.override_odm_optimization) {
675 		dml2->config.minimize_dispclk_using_odm = dc->debug.minimize_dispclk_using_odm;
676 	}
677 }
678 
679 bool dml2_validate(const struct dc *in_dc, struct dc_state *context, bool fast_validate)
680 {
681 	bool out = false;
682 
683 	if (!(context->bw_ctx.dml2))
684 		return false;
685 	dml2_apply_debug_options(in_dc, context->bw_ctx.dml2);
686 
687 
688 	/* Use dml_validate_only for fast_validate path */
689 	if (fast_validate)
690 		out = dml2_validate_only(context);
691 	else
692 		out = dml2_validate_and_build_resource(in_dc, context);
693 	return out;
694 }
695 
696 static inline struct dml2_context *dml2_allocate_memory(void)
697 {
698 	return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
699 }
700 
701 bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
702 {
703 	// Allocate Mode Lib Ctx
704 	*dml2 = dml2_allocate_memory();
705 
706 	if (!(*dml2))
707 		return false;
708 
709 	// Store config options
710 	(*dml2)->config = *config;
711 
712 	switch (in_dc->ctx->dce_version) {
713 	case DCN_VERSION_3_5:
714 		(*dml2)->v20.dml_core_ctx.project = dml_project_dcn35;
715 		break;
716 	case DCN_VERSION_3_51:
717 		(*dml2)->v20.dml_core_ctx.project = dml_project_dcn351;
718 		break;
719 	case DCN_VERSION_3_2:
720 		(*dml2)->v20.dml_core_ctx.project = dml_project_dcn32;
721 		break;
722 	case DCN_VERSION_3_21:
723 		(*dml2)->v20.dml_core_ctx.project = dml_project_dcn321;
724 		break;
725 	default:
726 		(*dml2)->v20.dml_core_ctx.project = dml_project_default;
727 		break;
728 	}
729 
730 	initialize_dml2_ip_params(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.ip);
731 
732 	initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc);
733 
734 	initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states);
735 
736 	/*Initialize DML20 instance which calls dml2_core_create, and core_dcn3_populate_informative*/
737 	//dml2_initialize_instance(&(*dml_ctx)->v20.dml_init);
738 	return true;
739 }
740 
741 void dml2_destroy(struct dml2_context *dml2)
742 {
743 	if (!dml2)
744 		return;
745 
746 	kfree(dml2);
747 }
748 
749 void dml2_extract_dram_and_fclk_change_support(struct dml2_context *dml2,
750 	unsigned int *fclk_change_support, unsigned int *dram_clk_change_support)
751 {
752 	*fclk_change_support = (unsigned int) dml2->v20.dml_core_ctx.ms.support.FCLKChangeSupport[0];
753 	*dram_clk_change_support = (unsigned int) dml2->v20.dml_core_ctx.ms.support.DRAMClockChangeSupport[0];
754 }
755 
756 void dml2_copy(struct dml2_context *dst_dml2,
757 	struct dml2_context *src_dml2)
758 {
759 	/* copy Mode Lib Ctx */
760 	memcpy(dst_dml2, src_dml2, sizeof(struct dml2_context));
761 }
762 
763 bool dml2_create_copy(struct dml2_context **dst_dml2,
764 	struct dml2_context *src_dml2)
765 {
766 	/* Allocate Mode Lib Ctx */
767 	*dst_dml2 = dml2_allocate_memory();
768 
769 	if (!(*dst_dml2))
770 		return false;
771 
772 	/* copy Mode Lib Ctx */
773 	dml2_copy(*dst_dml2, src_dml2);
774 
775 	return true;
776 }
777