1 // SPDX-License-Identifier: MIT
2 //
3 // Copyright 2024 Advanced Micro Devices, Inc.
4
5 #include "dml2_pmo_factory.h"
6 #include "dml2_debug.h"
7 #include "lib_float_math.h"
8 #include "dml2_pmo_dcn4_fams2.h"
9
10 static const double MIN_VACTIVE_MARGIN_PCT = 0.25; // We need more than non-zero margin because DET buffer granularity can alter vactive latency hiding
11 static const double MIN_BLANK_STUTTER_FACTOR = 3.0;
12
13 static const struct dml2_pmo_pstate_strategy base_strategy_list_1_display[] = {
14 // VActive Preferred
15 {
16 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
17 .allow_state_increase = true,
18 },
19
20 // Then SVP
21 {
22 .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
23 .allow_state_increase = true,
24 },
25
26 // Then VBlank
27 {
28 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
29 .allow_state_increase = false,
30 },
31
32 // Then DRR
33 {
34 .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
35 .allow_state_increase = true,
36 },
37
38 // Finally VBlank, but allow base clocks for latency to increase
39 /*
40 {
41 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
42 .allow_state_increase = true,
43 },
44 */
45 };
46
47 static const int base_strategy_list_1_display_size = sizeof(base_strategy_list_1_display) / sizeof(struct dml2_pmo_pstate_strategy);
48
49 static const struct dml2_pmo_pstate_strategy base_strategy_list_2_display[] = {
50 // VActive only is preferred
51 {
52 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_na, dml2_pstate_method_na },
53 .allow_state_increase = true,
54 },
55
56 // Then VActive + VBlank
57 {
58 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
59 .allow_state_increase = false,
60 },
61
62 // Then VBlank only
63 {
64 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
65 .allow_state_increase = false,
66 },
67
68 // Then SVP + VBlank
69 {
70 .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
71 .allow_state_increase = false,
72 },
73
74 // Then SVP + DRR
75 {
76 .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na },
77 .allow_state_increase = true,
78 },
79
80 // Then SVP + SVP
81 {
82 .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_fw_svp, dml2_pstate_method_na, dml2_pstate_method_na },
83 .allow_state_increase = true,
84 },
85
86 // Then DRR + VActive
87 {
88 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na },
89 .allow_state_increase = true,
90 },
91
92 // Then DRR + DRR
93 {
94 .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na },
95 .allow_state_increase = true,
96 },
97
98 // Finally VBlank, but allow base clocks for latency to increase
99 /*
100 {
101 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
102 .allow_state_increase = true,
103 },
104 */
105 };
106
107 static const int base_strategy_list_2_display_size = sizeof(base_strategy_list_2_display) / sizeof(struct dml2_pmo_pstate_strategy);
108
109 static const struct dml2_pmo_pstate_strategy base_strategy_list_3_display[] = {
110 // All VActive
111 {
112 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_na },
113 .allow_state_increase = true,
114 },
115
116 // VActive + 1 VBlank
117 {
118 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vblank, dml2_pstate_method_na },
119 .allow_state_increase = false,
120 },
121
122 // All VBlank
123 {
124 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na },
125 .allow_state_increase = false,
126 },
127
128 // All DRR
129 {
130 .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_na },
131 .allow_state_increase = true,
132 },
133
134 // All VBlank, with state increase allowed
135 /*
136 {
137 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na },
138 .allow_state_increase = true,
139 },
140 */
141 };
142
143 static const int base_strategy_list_3_display_size = sizeof(base_strategy_list_3_display) / sizeof(struct dml2_pmo_pstate_strategy);
144
145 static const struct dml2_pmo_pstate_strategy base_strategy_list_4_display[] = {
146 // All VActive
147 {
148 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive },
149 .allow_state_increase = true,
150 },
151
152 // VActive + 1 VBlank
153 {
154 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vblank },
155 .allow_state_increase = false,
156 },
157
158 // All Vblank
159 {
160 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank },
161 .allow_state_increase = false,
162 },
163
164 // All DRR
165 {
166 .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr },
167 .allow_state_increase = true,
168 },
169
170 // All VBlank, with state increase allowed
171 /*
172 {
173 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank },
174 .allow_state_increase = true,
175 },
176 */
177 };
178
179 static const int base_strategy_list_4_display_size = sizeof(base_strategy_list_4_display) / sizeof(struct dml2_pmo_pstate_strategy);
180
181
increase_odm_combine_factor(enum dml2_odm_mode * odm_mode,int odms_calculated)182 static bool increase_odm_combine_factor(enum dml2_odm_mode *odm_mode, int odms_calculated)
183 {
184 bool result = true;
185
186 if (*odm_mode == dml2_odm_mode_auto) {
187 switch (odms_calculated) {
188 case 1:
189 *odm_mode = dml2_odm_mode_bypass;
190 break;
191 case 2:
192 *odm_mode = dml2_odm_mode_combine_2to1;
193 break;
194 case 3:
195 *odm_mode = dml2_odm_mode_combine_3to1;
196 break;
197 case 4:
198 *odm_mode = dml2_odm_mode_combine_4to1;
199 break;
200 default:
201 result = false;
202 break;
203 }
204 }
205
206 if (result) {
207 if (*odm_mode == dml2_odm_mode_bypass) {
208 *odm_mode = dml2_odm_mode_combine_2to1;
209 } else if (*odm_mode == dml2_odm_mode_combine_2to1) {
210 *odm_mode = dml2_odm_mode_combine_3to1;
211 } else if (*odm_mode == dml2_odm_mode_combine_3to1) {
212 *odm_mode = dml2_odm_mode_combine_4to1;
213 } else {
214 result = false;
215 }
216 }
217
218 return result;
219 }
220
increase_mpc_combine_factor(unsigned int * mpc_combine_factor,unsigned int limit)221 static bool increase_mpc_combine_factor(unsigned int *mpc_combine_factor, unsigned int limit)
222 {
223 if (*mpc_combine_factor < limit) {
224 (*mpc_combine_factor)++;
225 return true;
226 }
227
228 return false;
229 }
230
count_planes_with_stream_index(const struct dml2_display_cfg * display_cfg,unsigned int stream_index)231 static int count_planes_with_stream_index(const struct dml2_display_cfg *display_cfg, unsigned int stream_index)
232 {
233 unsigned int i, count;
234
235 count = 0;
236 for (i = 0; i < display_cfg->num_planes; i++) {
237 if (display_cfg->plane_descriptors[i].stream_index == stream_index)
238 count++;
239 }
240
241 return count;
242 }
243
optimize_dcc_mcache_no_odm(struct dml2_pmo_optimize_dcc_mcache_in_out * in_out,int free_pipes)244 static bool optimize_dcc_mcache_no_odm(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out,
245 int free_pipes)
246 {
247 struct dml2_pmo_instance *pmo = in_out->instance;
248
249 unsigned int i;
250 bool result = true;
251
252 for (i = 0; i < in_out->optimized_display_cfg->num_planes; i++) {
253 // For pipes that failed dcc mcache check, we want to increase the pipe count.
254 // The logic for doing this depends on how many pipes is already being used,
255 // and whether it's mpcc or odm combine.
256 if (!in_out->dcc_mcache_supported[i]) {
257 // For the general case of "n displays", we can only optimize streams with an ODM combine factor of 1
258 if (in_out->cfg_support_info->stream_support_info[in_out->optimized_display_cfg->plane_descriptors[i].stream_index].odms_used == 1) {
259 in_out->optimized_display_cfg->plane_descriptors[i].overrides.mpcc_combine_factor =
260 in_out->cfg_support_info->plane_support_info[i].dpps_used;
261 // For each plane that is not passing mcache validation, just add another pipe to it, up to the limit.
262 if (free_pipes > 0) {
263 if (!increase_mpc_combine_factor(&in_out->optimized_display_cfg->plane_descriptors[i].overrides.mpcc_combine_factor,
264 pmo->mpc_combine_limit)) {
265 // We've reached max pipes allocatable to a single plane, so we fail.
266 result = false;
267 break;
268 } else {
269 // Successfully added another pipe to this failing plane.
270 free_pipes--;
271 }
272 } else {
273 // No free pipes to add.
274 result = false;
275 break;
276 }
277 } else {
278 // If the stream of this plane needs ODM combine, no further optimization can be done.
279 result = false;
280 break;
281 }
282 }
283 }
284
285 return result;
286 }
287
pmo_dcn4_fams2_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_out * in_out)288 bool pmo_dcn4_fams2_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out)
289 {
290 struct dml2_pmo_instance *pmo = in_out->instance;
291
292 unsigned int i, used_pipes, free_pipes, planes_on_stream;
293 bool result;
294
295 if (in_out->display_config != in_out->optimized_display_cfg) {
296 memcpy(in_out->optimized_display_cfg, in_out->display_config, sizeof(struct dml2_display_cfg));
297 }
298
299 //Count number of free pipes, and check if any odm combine is in use.
300 used_pipes = 0;
301 for (i = 0; i < in_out->optimized_display_cfg->num_planes; i++) {
302 used_pipes += in_out->cfg_support_info->plane_support_info[i].dpps_used;
303 }
304 free_pipes = pmo->ip_caps->pipe_count - used_pipes;
305
306 // Optimization loop
307 // The goal here is to add more pipes to any planes
308 // which are failing mcache admissibility
309 result = true;
310
311 // The optimization logic depends on whether ODM combine is enabled, and the stream count.
312 if (in_out->optimized_display_cfg->num_streams > 1 || in_out->instance->options->disable_dyn_odm) {
313 // If there are multiple streams, we are limited to only be able to optimize mcache failures on planes
314 // which are not ODM combined.
315
316 result = optimize_dcc_mcache_no_odm(in_out, free_pipes);
317 } else if (in_out->optimized_display_cfg->num_streams == 1) {
318 // In single stream cases, we still optimize mcache failures when there's ODM combine with some
319 // additional logic.
320
321 if (in_out->cfg_support_info->stream_support_info[0].odms_used > 1) {
322 // If ODM combine is enabled, then the logic is to increase ODM combine factor.
323
324 // Optimization for streams with > 1 ODM combine factor is only supported for single display.
325 planes_on_stream = count_planes_with_stream_index(in_out->optimized_display_cfg, 0);
326
327 for (i = 0; i < in_out->optimized_display_cfg->num_planes; i++) {
328 // For pipes that failed dcc mcache check, we want to increase the pipe count.
329 // The logic for doing this depends on how many pipes is already being used,
330 // and whether it's mpcc or odm combine.
331 if (!in_out->dcc_mcache_supported[i]) {
332 // Increasing ODM combine factor on a stream requires a free pipe for each plane on the stream.
333 if (free_pipes >= planes_on_stream) {
334 if (!increase_odm_combine_factor(&in_out->optimized_display_cfg->stream_descriptors[i].overrides.odm_mode,
335 in_out->cfg_support_info->plane_support_info[i].dpps_used)) {
336 result = false;
337 } else {
338 break;
339 }
340 } else {
341 result = false;
342 break;
343 }
344 }
345 }
346 } else {
347 // If ODM combine is not enabled, then we can actually use the same logic as before.
348
349 result = optimize_dcc_mcache_no_odm(in_out, free_pipes);
350 }
351 } else {
352 result = true;
353 }
354
355 return result;
356 }
357
convert_strategy_to_drr_variant(const enum dml2_pstate_method base_strategy)358 static enum dml2_pstate_method convert_strategy_to_drr_variant(const enum dml2_pstate_method base_strategy)
359 {
360 enum dml2_pstate_method variant_strategy = 0;
361
362 switch (base_strategy) {
363 case dml2_pstate_method_vactive:
364 variant_strategy = dml2_pstate_method_fw_vactive_drr;
365 break;
366 case dml2_pstate_method_vblank:
367 variant_strategy = dml2_pstate_method_fw_vblank_drr;
368 break;
369 case dml2_pstate_method_fw_svp:
370 variant_strategy = dml2_pstate_method_fw_svp_drr;
371 break;
372 case dml2_pstate_method_fw_vactive_drr:
373 case dml2_pstate_method_fw_vblank_drr:
374 case dml2_pstate_method_fw_svp_drr:
375 case dml2_pstate_method_fw_drr:
376 case dml2_pstate_method_reserved_hw:
377 case dml2_pstate_method_reserved_fw:
378 case dml2_pstate_method_reserved_fw_drr_clamped:
379 case dml2_pstate_method_reserved_fw_drr_var:
380 case dml2_pstate_method_count:
381 case dml2_pstate_method_na:
382 default:
383 /* no variant for this mode */
384 variant_strategy = base_strategy;
385 }
386
387 return variant_strategy;
388 }
389
get_expanded_strategy_list(struct dml2_pmo_init_data * init_data,int stream_count)390 static struct dml2_pmo_pstate_strategy *get_expanded_strategy_list(struct dml2_pmo_init_data *init_data, int stream_count)
391 {
392 struct dml2_pmo_pstate_strategy *expanded_strategy_list = NULL;
393
394 switch (stream_count) {
395 case 1:
396 expanded_strategy_list = init_data->pmo_dcn4.expanded_strategy_list_1_display;
397 break;
398 case 2:
399 expanded_strategy_list = init_data->pmo_dcn4.expanded_strategy_list_2_display;
400 break;
401 case 3:
402 expanded_strategy_list = init_data->pmo_dcn4.expanded_strategy_list_3_display;
403 break;
404 case 4:
405 expanded_strategy_list = init_data->pmo_dcn4.expanded_strategy_list_4_display;
406 break;
407 default:
408 break;
409 }
410
411 return expanded_strategy_list;
412 }
413
get_num_expanded_strategies(struct dml2_pmo_init_data * init_data,int stream_count)414 static unsigned int get_num_expanded_strategies(
415 struct dml2_pmo_init_data *init_data,
416 int stream_count)
417 {
418 return init_data->pmo_dcn4.num_expanded_strategies_per_list[stream_count - 1];
419 }
420
insert_strategy_into_expanded_list(const struct dml2_pmo_pstate_strategy * per_stream_pstate_strategy,const int stream_count,struct dml2_pmo_pstate_strategy * expanded_strategy_list,unsigned int * num_expanded_strategies)421 static void insert_strategy_into_expanded_list(
422 const struct dml2_pmo_pstate_strategy *per_stream_pstate_strategy,
423 const int stream_count,
424 struct dml2_pmo_pstate_strategy *expanded_strategy_list,
425 unsigned int *num_expanded_strategies)
426 {
427 if (expanded_strategy_list && num_expanded_strategies) {
428 memcpy(&expanded_strategy_list[*num_expanded_strategies], per_stream_pstate_strategy, sizeof(struct dml2_pmo_pstate_strategy));
429
430 (*num_expanded_strategies)++;
431 }
432 }
433
expand_base_strategy(const struct dml2_pmo_pstate_strategy * base_strategy,const unsigned int stream_count,struct dml2_pmo_pstate_strategy * expanded_strategy_list,unsigned int * num_expanded_strategies)434 static void expand_base_strategy(
435 const struct dml2_pmo_pstate_strategy *base_strategy,
436 const unsigned int stream_count,
437 struct dml2_pmo_pstate_strategy *expanded_strategy_list,
438 unsigned int *num_expanded_strategies)
439 {
440 bool skip_to_next_stream;
441 bool expanded_strategy_added;
442 bool skip_iteration;
443 unsigned int i, j;
444 unsigned int num_streams_per_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
445 unsigned int stream_iteration_indices[PMO_DCN4_MAX_DISPLAYS] = { 0 };
446 struct dml2_pmo_pstate_strategy cur_strategy_list = { 0 };
447
448 /* determine number of displays per method */
449 for (i = 0; i < stream_count; i++) {
450 /* increment the count of the earliest index with the same method */
451 for (j = 0; j < stream_count; j++) {
452 if (base_strategy->per_stream_pstate_method[i] == base_strategy->per_stream_pstate_method[j]) {
453 num_streams_per_method[j] = num_streams_per_method[j] + 1;
454 break;
455 }
456 }
457 }
458
459 cur_strategy_list.allow_state_increase = base_strategy->allow_state_increase;
460
461 i = 0;
462 /* uses a while loop instead of recursion to build permutations of base strategy */
463 while (stream_iteration_indices[0] < stream_count) {
464 skip_to_next_stream = false;
465 expanded_strategy_added = false;
466 skip_iteration = false;
467
468 /* determine what to do for this iteration */
469 if (stream_iteration_indices[i] < stream_count && num_streams_per_method[stream_iteration_indices[i]] != 0) {
470 /* decrement count and assign method */
471 cur_strategy_list.per_stream_pstate_method[i] = base_strategy->per_stream_pstate_method[stream_iteration_indices[i]];
472 num_streams_per_method[stream_iteration_indices[i]] -= 1;
473
474 if (i >= stream_count - 1) {
475 /* insert into strategy list */
476 insert_strategy_into_expanded_list(&cur_strategy_list, stream_count, expanded_strategy_list, num_expanded_strategies);
477 expanded_strategy_added = true;
478 } else {
479 /* skip to next stream */
480 skip_to_next_stream = true;
481 }
482 } else {
483 skip_iteration = true;
484 }
485
486 /* prepare for next iteration */
487 if (skip_to_next_stream) {
488 i++;
489 } else {
490 /* restore count */
491 if (!skip_iteration) {
492 num_streams_per_method[stream_iteration_indices[i]] += 1;
493 }
494
495 /* increment iteration count */
496 stream_iteration_indices[i]++;
497
498 /* if iterations are complete, or last stream was reached */
499 if ((stream_iteration_indices[i] >= stream_count || expanded_strategy_added) && i > 0) {
500 /* reset per stream index, decrement i */
501 stream_iteration_indices[i] = 0;
502 i--;
503
504 /* restore previous stream's count and increment index */
505 num_streams_per_method[stream_iteration_indices[i]] += 1;
506 stream_iteration_indices[i]++;
507 }
508 }
509 }
510 }
511
512
is_variant_method_valid(const struct dml2_pmo_pstate_strategy * base_strategy,const struct dml2_pmo_pstate_strategy * variant_strategy,const unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS],const unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS],const unsigned int stream_count)513 static bool is_variant_method_valid(const struct dml2_pmo_pstate_strategy *base_strategy,
514 const struct dml2_pmo_pstate_strategy *variant_strategy,
515 const unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS],
516 const unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS],
517 const unsigned int stream_count)
518 {
519 bool valid = true;
520 unsigned int i;
521
522 /* check all restrictions are met */
523 for (i = 0; i < stream_count; i++) {
524 /* vblank + vblank_drr variants are invalid */
525 if (base_strategy->per_stream_pstate_method[i] == dml2_pstate_method_vblank &&
526 ((num_streams_per_base_method[i] > 0 && num_streams_per_variant_method[i] > 0) ||
527 num_streams_per_variant_method[i] > 1)) {
528 valid = false;
529 break;
530 }
531 }
532
533 return valid;
534 }
535
expand_variant_strategy(const struct dml2_pmo_pstate_strategy * base_strategy,const unsigned int stream_count,const bool should_permute,struct dml2_pmo_pstate_strategy * expanded_strategy_list,unsigned int * num_expanded_strategies)536 static void expand_variant_strategy(
537 const struct dml2_pmo_pstate_strategy *base_strategy,
538 const unsigned int stream_count,
539 const bool should_permute,
540 struct dml2_pmo_pstate_strategy *expanded_strategy_list,
541 unsigned int *num_expanded_strategies)
542 {
543 bool variant_found;
544 unsigned int i, j;
545 unsigned int method_index;
546 unsigned int stream_index;
547 unsigned int num_streams_per_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
548 unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
549 unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
550 enum dml2_pstate_method per_stream_variant_method[DML2_MAX_PLANES];
551 struct dml2_pmo_pstate_strategy variant_strategy = { 0 };
552
553 /* determine number of displays per method */
554 for (i = 0; i < stream_count; i++) {
555 /* increment the count of the earliest index with the same method */
556 for (j = 0; j < stream_count; j++) {
557 if (base_strategy->per_stream_pstate_method[i] == base_strategy->per_stream_pstate_method[j]) {
558 num_streams_per_method[j] = num_streams_per_method[j] + 1;
559 break;
560 }
561 }
562
563 per_stream_variant_method[i] = convert_strategy_to_drr_variant(base_strategy->per_stream_pstate_method[i]);
564 }
565 memcpy(num_streams_per_base_method, num_streams_per_method, sizeof(unsigned int) * PMO_DCN4_MAX_DISPLAYS);
566
567 memcpy(&variant_strategy, base_strategy, sizeof(struct dml2_pmo_pstate_strategy));
568
569 method_index = 0;
570 /* uses a while loop instead of recursion to build permutations of base strategy */
571 while (num_streams_per_base_method[0] > 0 || method_index != 0) {
572 if (method_index == stream_count) {
573 /* construct variant strategy */
574 variant_found = false;
575 stream_index = 0;
576
577 for (i = 0; i < stream_count; i++) {
578 for (j = 0; j < num_streams_per_base_method[i]; j++) {
579 variant_strategy.per_stream_pstate_method[stream_index++] = base_strategy->per_stream_pstate_method[i];
580 }
581
582 for (j = 0; j < num_streams_per_variant_method[i]; j++) {
583 variant_strategy.per_stream_pstate_method[stream_index++] = per_stream_variant_method[i];
584 if (base_strategy->per_stream_pstate_method[i] != per_stream_variant_method[i]) {
585 variant_found = true;
586 }
587 }
588 }
589
590 if (variant_found && is_variant_method_valid(base_strategy, &variant_strategy, num_streams_per_base_method, num_streams_per_variant_method, stream_count)) {
591 if (should_permute) {
592 /* permutations are permitted, proceed to expand */
593 expand_base_strategy(&variant_strategy, stream_count, expanded_strategy_list, num_expanded_strategies);
594 } else {
595 /* no permutations allowed, so add to list now */
596 insert_strategy_into_expanded_list(&variant_strategy, stream_count, expanded_strategy_list, num_expanded_strategies);
597 }
598 }
599
600 /* rollback to earliest method with bases remaining */
601 for (method_index = stream_count - 1; method_index > 0; method_index--) {
602 if (num_streams_per_base_method[method_index]) {
603 /* bases remaining */
604 break;
605 } else {
606 /* reset counters */
607 num_streams_per_base_method[method_index] = num_streams_per_method[method_index];
608 num_streams_per_variant_method[method_index] = 0;
609 }
610 }
611 }
612
613 if (num_streams_per_base_method[method_index]) {
614 num_streams_per_base_method[method_index]--;
615 num_streams_per_variant_method[method_index]++;
616
617 method_index++;
618 } else if (method_index != 0) {
619 method_index++;
620 }
621 }
622 }
623
pmo_dcn4_fams2_expand_base_pstate_strategies(const struct dml2_pmo_pstate_strategy * base_strategies_list,const unsigned int num_base_strategies,const unsigned int stream_count,struct dml2_pmo_pstate_strategy * expanded_strategy_list,unsigned int * num_expanded_strategies)624 void pmo_dcn4_fams2_expand_base_pstate_strategies(
625 const struct dml2_pmo_pstate_strategy *base_strategies_list,
626 const unsigned int num_base_strategies,
627 const unsigned int stream_count,
628 struct dml2_pmo_pstate_strategy *expanded_strategy_list,
629 unsigned int *num_expanded_strategies)
630 {
631 unsigned int i;
632
633 /* expand every explicit base strategy (except all DRR) */
634 for (i = 0; i < num_base_strategies; i++) {
635 expand_base_strategy(&base_strategies_list[i], stream_count, expanded_strategy_list, num_expanded_strategies);
636 expand_variant_strategy(&base_strategies_list[i], stream_count, true, expanded_strategy_list, num_expanded_strategies);
637 }
638 }
639
pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out * in_out)640 bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
641 {
642 int i = 0;
643 struct dml2_pmo_instance *pmo = in_out->instance;
644
645 pmo->soc_bb = in_out->soc_bb;
646 pmo->ip_caps = in_out->ip_caps;
647 pmo->mpc_combine_limit = 2;
648 pmo->odm_combine_limit = 4;
649 pmo->mcg_clock_table_size = in_out->mcg_clock_table_size;
650
651 pmo->fams_params.v2.subvp.refresh_rate_limit_max = 175;
652 pmo->fams_params.v2.subvp.refresh_rate_limit_min = 0;
653 pmo->fams_params.v2.drr.refresh_rate_limit_max = 1000;
654 pmo->fams_params.v2.drr.refresh_rate_limit_min = 119;
655
656 pmo->options = in_out->options;
657
658 /* generate permutations of p-state configs from base strategy list */
659 for (i = 1; i <= PMO_DCN4_MAX_DISPLAYS; i++) {
660 switch (i) {
661 case 1:
662 DML2_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
663
664 /* populate list */
665 pmo_dcn4_fams2_expand_base_pstate_strategies(
666 base_strategy_list_1_display,
667 base_strategy_list_1_display_size,
668 i,
669 pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display,
670 &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
671 break;
672 case 2:
673 DML2_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
674
675 /* populate list */
676 pmo_dcn4_fams2_expand_base_pstate_strategies(
677 base_strategy_list_2_display,
678 base_strategy_list_2_display_size,
679 i,
680 pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display,
681 &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
682 break;
683 case 3:
684 DML2_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
685
686 /* populate list */
687 pmo_dcn4_fams2_expand_base_pstate_strategies(
688 base_strategy_list_3_display,
689 base_strategy_list_3_display_size,
690 i,
691 pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display,
692 &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
693 break;
694 case 4:
695 DML2_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
696
697 /* populate list */
698 pmo_dcn4_fams2_expand_base_pstate_strategies(
699 base_strategy_list_4_display,
700 base_strategy_list_4_display_size,
701 i,
702 pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display,
703 &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
704 break;
705 }
706 }
707
708 return true;
709 }
710
is_h_timing_divisible_by(const struct dml2_timing_cfg * timing,unsigned char denominator)711 static bool is_h_timing_divisible_by(const struct dml2_timing_cfg *timing, unsigned char denominator)
712 {
713 /*
714 * Htotal, Hblank start/end, and Hsync start/end all must be divisible
715 * in order for the horizontal timing params to be considered divisible
716 * by 2. Hsync start is always 0.
717 */
718 unsigned long h_blank_start = timing->h_total - timing->h_front_porch;
719
720 return (timing->h_total % denominator == 0) &&
721 (h_blank_start % denominator == 0) &&
722 (timing->h_blank_end % denominator == 0) &&
723 (timing->h_sync_width % denominator == 0);
724 }
725
is_dp_encoder(enum dml2_output_encoder_class encoder_type)726 static bool is_dp_encoder(enum dml2_output_encoder_class encoder_type)
727 {
728 switch (encoder_type) {
729 case dml2_dp:
730 case dml2_edp:
731 case dml2_dp2p0:
732 case dml2_none:
733 return true;
734 case dml2_hdmi:
735 case dml2_hdmifrl:
736 default:
737 return false;
738 }
739 }
740
pmo_dcn4_fams2_init_for_vmin(struct dml2_pmo_init_for_vmin_in_out * in_out)741 bool pmo_dcn4_fams2_init_for_vmin(struct dml2_pmo_init_for_vmin_in_out *in_out)
742 {
743 unsigned int i;
744 const struct dml2_display_cfg *display_config =
745 &in_out->base_display_config->display_config;
746 const struct dml2_core_mode_support_result *mode_support_result =
747 &in_out->base_display_config->mode_support_result;
748 struct dml2_optimization_stage4_state *state =
749 &in_out->base_display_config->stage4;
750
751 if (in_out->instance->options->disable_dyn_odm ||
752 (in_out->instance->options->disable_dyn_odm_for_multi_stream && display_config->num_streams > 1))
753 return false;
754
755 for (i = 0; i < display_config->num_planes; i++)
756 /*
757 * vmin optimization is required to be seamlessly switched off
758 * at any time when the new configuration is no longer
759 * supported. However switching from ODM combine to MPC combine
760 * is not always seamless. When there not enough free pipes, we
761 * will have to use the same secondary OPP heads as secondary
762 * DPP pipes in MPC combine in new state. This transition is
763 * expected to cause glitches. To avoid the transition, we only
764 * allow vmin optimization if the stream's base configuration
765 * doesn't require MPC combine. This condition checks if MPC
766 * combine is enabled. If so do not optimize the stream.
767 */
768 if (mode_support_result->cfg_support_info.plane_support_info[i].dpps_used > 1 &&
769 mode_support_result->cfg_support_info.stream_support_info[display_config->plane_descriptors[i].stream_index].odms_used == 1)
770 state->unoptimizable_streams[display_config->plane_descriptors[i].stream_index] = true;
771
772 for (i = 0; i < display_config->num_streams; i++) {
773 if (display_config->stream_descriptors[i].overrides.disable_dynamic_odm)
774 state->unoptimizable_streams[i] = true;
775 else if (in_out->base_display_config->stage3.stream_svp_meta[i].valid &&
776 in_out->instance->options->disable_dyn_odm_for_stream_with_svp)
777 state->unoptimizable_streams[i] = true;
778 /*
779 * ODM Combine requires horizontal timing divisible by 2 so each
780 * ODM segment has the same size.
781 */
782 else if (!is_h_timing_divisible_by(&display_config->stream_descriptors[i].timing, 2))
783 state->unoptimizable_streams[i] = true;
784 /*
785 * Our hardware support seamless ODM transitions for DP encoders
786 * only.
787 */
788 else if (!is_dp_encoder(display_config->stream_descriptors[i].output.output_encoder))
789 state->unoptimizable_streams[i] = true;
790 }
791
792 state->performed = true;
793
794 return true;
795 }
796
pmo_dcn4_fams2_test_for_vmin(struct dml2_pmo_test_for_vmin_in_out * in_out)797 bool pmo_dcn4_fams2_test_for_vmin(struct dml2_pmo_test_for_vmin_in_out *in_out)
798 {
799 bool is_vmin = true;
800
801 if (in_out->vmin_limits->dispclk_khz > 0 &&
802 in_out->display_config->mode_support_result.global.dispclk_khz > in_out->vmin_limits->dispclk_khz)
803 is_vmin = false;
804
805 return is_vmin;
806 }
807
find_highest_odm_load_stream_index(const struct dml2_display_cfg * display_config,const struct dml2_core_mode_support_result * mode_support_result)808 static int find_highest_odm_load_stream_index(
809 const struct dml2_display_cfg *display_config,
810 const struct dml2_core_mode_support_result *mode_support_result)
811 {
812 unsigned int i;
813 int odm_load, highest_odm_load = -1, highest_odm_load_index = -1;
814
815 for (i = 0; i < display_config->num_streams; i++) {
816 if (mode_support_result->cfg_support_info.stream_support_info[i].odms_used > 0)
817 odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz
818 / mode_support_result->cfg_support_info.stream_support_info[i].odms_used;
819 else
820 odm_load = 0;
821
822 if (odm_load > highest_odm_load) {
823 highest_odm_load_index = i;
824 highest_odm_load = odm_load;
825 }
826 }
827
828 return highest_odm_load_index;
829 }
830
pmo_dcn4_fams2_optimize_for_vmin(struct dml2_pmo_optimize_for_vmin_in_out * in_out)831 bool pmo_dcn4_fams2_optimize_for_vmin(struct dml2_pmo_optimize_for_vmin_in_out *in_out)
832 {
833 int stream_index;
834 const struct dml2_display_cfg *display_config =
835 &in_out->base_display_config->display_config;
836 const struct dml2_core_mode_support_result *mode_support_result =
837 &in_out->base_display_config->mode_support_result;
838 unsigned int odms_used;
839 struct dml2_stream_parameters *stream_descriptor;
840 bool optimizable = false;
841
842 /*
843 * highest odm load stream must be optimizable to continue as dispclk is
844 * bounded by it.
845 */
846 stream_index = find_highest_odm_load_stream_index(display_config,
847 mode_support_result);
848
849 if (stream_index < 0 ||
850 in_out->base_display_config->stage4.unoptimizable_streams[stream_index])
851 return false;
852
853 odms_used = mode_support_result->cfg_support_info.stream_support_info[stream_index].odms_used;
854 if ((int)odms_used >= in_out->instance->odm_combine_limit)
855 return false;
856
857 memcpy(in_out->optimized_display_config,
858 in_out->base_display_config,
859 sizeof(struct display_configuation_with_meta));
860
861 stream_descriptor = &in_out->optimized_display_config->display_config.stream_descriptors[stream_index];
862 while (!optimizable && increase_odm_combine_factor(
863 &stream_descriptor->overrides.odm_mode,
864 odms_used)) {
865 switch (stream_descriptor->overrides.odm_mode) {
866 case dml2_odm_mode_combine_2to1:
867 optimizable = true;
868 break;
869 case dml2_odm_mode_combine_3to1:
870 /*
871 * In ODM Combine 3:1 OTG_valid_pixel rate is 1/4 of
872 * actual pixel rate. Therefore horizontal timing must
873 * be divisible by 4.
874 */
875 if (is_h_timing_divisible_by(&display_config->stream_descriptors[stream_index].timing, 4)) {
876 if (mode_support_result->cfg_support_info.stream_support_info[stream_index].dsc_enable) {
877 /*
878 * DSC h slice count must be divisible
879 * by 3.
880 */
881 if (mode_support_result->cfg_support_info.stream_support_info[stream_index].num_dsc_slices % 3 == 0)
882 optimizable = true;
883 } else {
884 optimizable = true;
885 }
886 }
887 break;
888 case dml2_odm_mode_combine_4to1:
889 /*
890 * In ODM Combine 4:1 OTG_valid_pixel rate is 1/4 of
891 * actual pixel rate. Therefore horizontal timing must
892 * be divisible by 4.
893 */
894 if (is_h_timing_divisible_by(&display_config->stream_descriptors[stream_index].timing, 4)) {
895 if (mode_support_result->cfg_support_info.stream_support_info[stream_index].dsc_enable) {
896 /*
897 * DSC h slice count must be divisible
898 * by 4.
899 */
900 if (mode_support_result->cfg_support_info.stream_support_info[stream_index].num_dsc_slices % 4 == 0)
901 optimizable = true;
902 } else {
903 optimizable = true;
904 }
905 }
906 break;
907 case dml2_odm_mode_auto:
908 case dml2_odm_mode_bypass:
909 case dml2_odm_mode_split_1to2:
910 case dml2_odm_mode_mso_1to2:
911 case dml2_odm_mode_mso_1to4:
912 default:
913 break;
914 }
915 }
916
917 return optimizable;
918 }
919
set_bit_in_bitfield(unsigned int * bit_field,unsigned int bit_offset)920 static void set_bit_in_bitfield(unsigned int *bit_field, unsigned int bit_offset)
921 {
922 *bit_field = *bit_field | (0x1 << bit_offset);
923 }
924
is_bit_set_in_bitfield(unsigned int bit_field,unsigned int bit_offset)925 static bool is_bit_set_in_bitfield(unsigned int bit_field, unsigned int bit_offset)
926 {
927 if (bit_field & (0x1 << bit_offset))
928 return true;
929
930 return false;
931 }
932
build_synchronized_timing_groups(struct dml2_pmo_instance * pmo,struct display_configuation_with_meta * display_config)933 static void build_synchronized_timing_groups(
934 struct dml2_pmo_instance *pmo,
935 struct display_configuation_with_meta *display_config)
936 {
937 unsigned int i, j;
938 struct dml2_timing_cfg *master_timing;
939
940 unsigned int stream_mapped_mask = 0;
941 unsigned int num_timing_groups = 0;
942 unsigned int timing_group_idx = 0;
943 struct dml2_pmo_scratch *s = &pmo->scratch;
944
945 /* clear all group masks */
946 memset(s->pmo_dcn4.synchronized_timing_group_masks, 0, sizeof(s->pmo_dcn4.synchronized_timing_group_masks));
947 memset(s->pmo_dcn4.group_is_drr_enabled, 0, sizeof(s->pmo_dcn4.group_is_drr_enabled));
948 memset(s->pmo_dcn4.group_is_drr_active, 0, sizeof(s->pmo_dcn4.group_is_drr_active));
949 memset(s->pmo_dcn4.group_line_time_us, 0, sizeof(s->pmo_dcn4.group_line_time_us));
950 s->pmo_dcn4.num_timing_groups = 0;
951
952 for (i = 0; i < display_config->display_config.num_streams; i++) {
953 master_timing = &display_config->display_config.stream_descriptors[i].timing;
954
955 /* only need to build group of this stream is not in a group already */
956 if (is_bit_set_in_bitfield(stream_mapped_mask, i)) {
957 continue;
958 }
959 set_bit_in_bitfield(&stream_mapped_mask, i);
960 timing_group_idx = num_timing_groups;
961 num_timing_groups++;
962
963 /* trivially set default timing group to itself */
964 set_bit_in_bitfield(&s->pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], i);
965 s->pmo_dcn4.group_line_time_us[timing_group_idx] = (double)master_timing->h_total / master_timing->pixel_clock_khz * 1000.0;
966
967 /* if drr is in use, timing is not sychnronizable */
968 if (master_timing->drr_config.enabled) {
969 s->pmo_dcn4.group_is_drr_enabled[timing_group_idx] = true;
970 s->pmo_dcn4.group_is_drr_active[timing_group_idx] = !master_timing->drr_config.disallowed &&
971 (master_timing->drr_config.drr_active_fixed || master_timing->drr_config.drr_active_variable);
972 continue;
973 }
974
975 /* find synchronizable timing groups */
976 for (j = i + 1; j < display_config->display_config.num_streams; j++) {
977 if (memcmp(master_timing,
978 &display_config->display_config.stream_descriptors[j].timing,
979 sizeof(struct dml2_timing_cfg)) == 0) {
980 set_bit_in_bitfield(&pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], j);
981 set_bit_in_bitfield(&stream_mapped_mask, j);
982 }
983 }
984 }
985
986 s->pmo_dcn4.num_timing_groups = num_timing_groups;
987 }
988
all_timings_support_vactive(const struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_config,unsigned int mask)989 static bool all_timings_support_vactive(const struct dml2_pmo_instance *pmo,
990 const struct display_configuation_with_meta *display_config,
991 unsigned int mask)
992 {
993 unsigned int i;
994 bool valid = true;
995
996 // Create a remap array to enable simple iteration through only masked stream indicies
997 for (i = 0; i < display_config->display_config.num_streams; i++) {
998 if (is_bit_set_in_bitfield(mask, i)) {
999 /* check if stream has enough vactive margin */
1000 valid &= is_bit_set_in_bitfield(pmo->scratch.pmo_dcn4.stream_vactive_capability_mask, i);
1001 }
1002 }
1003
1004 return valid;
1005 }
1006
all_timings_support_vblank(const struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_config,unsigned int mask)1007 static bool all_timings_support_vblank(const struct dml2_pmo_instance *pmo,
1008 const struct display_configuation_with_meta *display_config,
1009 unsigned int mask)
1010 {
1011 unsigned int i;
1012
1013 bool synchronizable = true;
1014
1015 /* find first vblank stream index and compare the timing group mask */
1016 for (i = 0; i < display_config->display_config.num_streams; i++) {
1017 if (is_bit_set_in_bitfield(mask, i)) {
1018 if (mask != pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[i]) {
1019 /* vblank streams are not synchronizable */
1020 synchronizable = false;
1021 }
1022 break;
1023 }
1024 }
1025
1026 return synchronizable;
1027 }
1028
calc_svp_microschedule(const struct dml2_fams2_meta * fams2_meta)1029 static unsigned int calc_svp_microschedule(const struct dml2_fams2_meta *fams2_meta)
1030 {
1031 return fams2_meta->contention_delay_otg_vlines +
1032 fams2_meta->method_subvp.programming_delay_otg_vlines +
1033 fams2_meta->method_subvp.phantom_vtotal +
1034 fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
1035 fams2_meta->dram_clk_change_blackout_otg_vlines;
1036 }
1037
all_timings_support_drr(const struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_config,unsigned int mask)1038 static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo,
1039 const struct display_configuation_with_meta *display_config,
1040 unsigned int mask)
1041 {
1042 unsigned int i;
1043 for (i = 0; i < DML2_MAX_PLANES; i++) {
1044 const struct dml2_stream_parameters *stream_descriptor;
1045 const struct dml2_fams2_meta *stream_fams2_meta;
1046
1047 if (is_bit_set_in_bitfield(mask, i)) {
1048 stream_descriptor = &display_config->display_config.stream_descriptors[i];
1049 stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i];
1050
1051 if (!stream_descriptor->timing.drr_config.enabled)
1052 return false;
1053
1054 /* cannot support required vtotal */
1055 if (stream_fams2_meta->method_drr.stretched_vtotal > stream_fams2_meta->max_vtotal) {
1056 return false;
1057 }
1058
1059 /* check rr is within bounds */
1060 if (stream_fams2_meta->nom_refresh_rate_hz < pmo->fams_params.v2.drr.refresh_rate_limit_min ||
1061 stream_fams2_meta->nom_refresh_rate_hz > pmo->fams_params.v2.drr.refresh_rate_limit_max) {
1062 return false;
1063 }
1064
1065 /* check required stretch is allowed */
1066 if (stream_descriptor->timing.drr_config.max_instant_vtotal_delta > 0 &&
1067 stream_fams2_meta->method_drr.stretched_vtotal - stream_fams2_meta->nom_vtotal > stream_descriptor->timing.drr_config.max_instant_vtotal_delta) {
1068 return false;
1069 }
1070 }
1071 }
1072
1073 return true;
1074 }
1075
all_timings_support_svp(const struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_config,unsigned int mask)1076 static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
1077 const struct display_configuation_with_meta *display_config,
1078 unsigned int mask)
1079 {
1080 const struct dml2_stream_parameters *stream_descriptor;
1081 const struct dml2_plane_parameters *plane_descriptor;
1082 const struct dml2_fams2_meta *stream_fams2_meta;
1083 unsigned int microschedule_vlines;
1084 unsigned int i;
1085
1086 unsigned int num_planes_per_stream[DML2_MAX_PLANES] = { 0 };
1087
1088 /* confirm timing it is not a centered timing */
1089 for (i = 0; i < display_config->display_config.num_planes; i++) {
1090 plane_descriptor = &display_config->display_config.plane_descriptors[i];
1091
1092 if (is_bit_set_in_bitfield(mask, (unsigned char)plane_descriptor->stream_index)) {
1093 num_planes_per_stream[plane_descriptor->stream_index]++;
1094
1095 /* check recout height covers entire otg vactive, and single plane */
1096 if (num_planes_per_stream[plane_descriptor->stream_index] > 1 ||
1097 !plane_descriptor->composition.rect_out_height_spans_vactive ||
1098 plane_descriptor->composition.rotation_angle != dml2_rotation_0) {
1099 return false;
1100 }
1101 }
1102 }
1103
1104 for (i = 0; i < DML2_MAX_PLANES; i++) {
1105 if (is_bit_set_in_bitfield(mask, i)) {
1106 stream_descriptor = &display_config->display_config.stream_descriptors[i];
1107 stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i];
1108
1109 if (stream_descriptor->overrides.disable_subvp) {
1110 return false;
1111 }
1112
1113 microschedule_vlines = calc_svp_microschedule(&pmo->scratch.pmo_dcn4.stream_fams2_meta[i]);
1114
1115 /* block if using an interlaced timing */
1116 if (stream_descriptor->timing.interlaced) {
1117 return false;
1118 }
1119
1120 /* 1) svp main stream's vactive must be able to fit the microschedule
1121 * 2) refresh rate must be within the allowed bounds
1122 */
1123 if (microschedule_vlines >= stream_descriptor->timing.v_active ||
1124 (stream_fams2_meta->nom_refresh_rate_hz < pmo->fams_params.v2.subvp.refresh_rate_limit_min ||
1125 stream_fams2_meta->nom_refresh_rate_hz > pmo->fams_params.v2.subvp.refresh_rate_limit_max)) {
1126 return false;
1127 }
1128 }
1129 }
1130
1131 return true;
1132 }
1133
insert_into_candidate_list(const struct dml2_pmo_pstate_strategy * pstate_strategy,int stream_count,struct dml2_pmo_scratch * scratch)1134 static void insert_into_candidate_list(const struct dml2_pmo_pstate_strategy *pstate_strategy, int stream_count, struct dml2_pmo_scratch *scratch)
1135 {
1136 scratch->pmo_dcn4.pstate_strategy_candidates[scratch->pmo_dcn4.num_pstate_candidates] = *pstate_strategy;
1137 scratch->pmo_dcn4.num_pstate_candidates++;
1138 }
1139
uclk_pstate_strategy_override_to_pstate_method(const enum dml2_uclk_pstate_change_strategy override_strategy)1140 static enum dml2_pstate_method uclk_pstate_strategy_override_to_pstate_method(const enum dml2_uclk_pstate_change_strategy override_strategy)
1141 {
1142 enum dml2_pstate_method method = dml2_pstate_method_na;
1143
1144 switch (override_strategy) {
1145 case dml2_uclk_pstate_change_strategy_force_vactive:
1146 method = dml2_pstate_method_vactive;
1147 break;
1148 case dml2_uclk_pstate_change_strategy_force_vblank:
1149 method = dml2_pstate_method_vblank;
1150 break;
1151 case dml2_uclk_pstate_change_strategy_force_drr:
1152 method = dml2_pstate_method_fw_drr;
1153 break;
1154 case dml2_uclk_pstate_change_strategy_force_mall_svp:
1155 method = dml2_pstate_method_fw_svp;
1156 break;
1157 case dml2_uclk_pstate_change_strategy_force_mall_full_frame:
1158 case dml2_uclk_pstate_change_strategy_auto:
1159 default:
1160 method = dml2_pstate_method_na;
1161 }
1162
1163 return method;
1164 }
1165
pstate_method_to_uclk_pstate_strategy_override(const enum dml2_pstate_method method)1166 static enum dml2_uclk_pstate_change_strategy pstate_method_to_uclk_pstate_strategy_override(const enum dml2_pstate_method method)
1167 {
1168 enum dml2_uclk_pstate_change_strategy override_strategy = dml2_uclk_pstate_change_strategy_auto;
1169
1170 switch (method) {
1171 case dml2_pstate_method_vactive:
1172 case dml2_pstate_method_fw_vactive_drr:
1173 override_strategy = dml2_uclk_pstate_change_strategy_force_vactive;
1174 break;
1175 case dml2_pstate_method_vblank:
1176 case dml2_pstate_method_fw_vblank_drr:
1177 override_strategy = dml2_uclk_pstate_change_strategy_force_vblank;
1178 break;
1179 case dml2_pstate_method_fw_svp:
1180 case dml2_pstate_method_fw_svp_drr:
1181 override_strategy = dml2_uclk_pstate_change_strategy_force_mall_svp;
1182 break;
1183 case dml2_pstate_method_fw_drr:
1184 override_strategy = dml2_uclk_pstate_change_strategy_force_drr;
1185 break;
1186 case dml2_pstate_method_reserved_hw:
1187 case dml2_pstate_method_reserved_fw:
1188 case dml2_pstate_method_reserved_fw_drr_clamped:
1189 case dml2_pstate_method_reserved_fw_drr_var:
1190 case dml2_pstate_method_count:
1191 case dml2_pstate_method_na:
1192 default:
1193 override_strategy = dml2_uclk_pstate_change_strategy_auto;
1194 }
1195
1196 return override_strategy;
1197 }
1198
all_planes_match_method(const struct display_configuation_with_meta * display_cfg,int plane_mask,enum dml2_pstate_method method)1199 static bool all_planes_match_method(const struct display_configuation_with_meta *display_cfg, int plane_mask, enum dml2_pstate_method method)
1200 {
1201 unsigned int i;
1202
1203 for (i = 0; i < DML2_MAX_PLANES; i++) {
1204 if (is_bit_set_in_bitfield(plane_mask, i)) {
1205 if (display_cfg->display_config.plane_descriptors[i].overrides.uclk_pstate_change_strategy != dml2_uclk_pstate_change_strategy_auto &&
1206 display_cfg->display_config.plane_descriptors[i].overrides.uclk_pstate_change_strategy != pstate_method_to_uclk_pstate_strategy_override(method))
1207 return false;
1208 }
1209 }
1210
1211 return true;
1212 }
1213
build_method_scheduling_params(struct dml2_fams2_per_method_common_meta * stream_method_fams2_meta,struct dml2_fams2_meta * stream_fams2_meta)1214 static void build_method_scheduling_params(
1215 struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta,
1216 struct dml2_fams2_meta *stream_fams2_meta)
1217 {
1218 stream_method_fams2_meta->allow_time_us =
1219 (double)((int)stream_method_fams2_meta->allow_end_otg_vline - (int)stream_method_fams2_meta->allow_start_otg_vline) *
1220 stream_fams2_meta->otg_vline_time_us;
1221 if (stream_method_fams2_meta->allow_time_us >= stream_method_fams2_meta->period_us) {
1222 /* when allow wave overlaps an entire frame, it is always schedulable (DRR can do this)*/
1223 stream_method_fams2_meta->disallow_time_us = 0.0;
1224 } else {
1225 stream_method_fams2_meta->disallow_time_us =
1226 stream_method_fams2_meta->period_us - stream_method_fams2_meta->allow_time_us;
1227 }
1228 }
1229
get_per_method_common_meta(struct dml2_pmo_instance * pmo,enum dml2_pstate_method stream_pstate_method,int stream_idx)1230 static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta(
1231 struct dml2_pmo_instance *pmo,
1232 enum dml2_pstate_method stream_pstate_method,
1233 int stream_idx)
1234 {
1235 struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta = NULL;
1236
1237 switch (stream_pstate_method) {
1238 case dml2_pstate_method_vactive:
1239 case dml2_pstate_method_fw_vactive_drr:
1240 stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vactive.common;
1241 break;
1242 case dml2_pstate_method_vblank:
1243 case dml2_pstate_method_fw_vblank_drr:
1244 stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vblank.common;
1245 break;
1246 case dml2_pstate_method_fw_svp:
1247 case dml2_pstate_method_fw_svp_drr:
1248 stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_subvp.common;
1249 break;
1250 case dml2_pstate_method_fw_drr:
1251 stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_drr.common;
1252 break;
1253 case dml2_pstate_method_reserved_hw:
1254 case dml2_pstate_method_reserved_fw:
1255 case dml2_pstate_method_reserved_fw_drr_clamped:
1256 case dml2_pstate_method_reserved_fw_drr_var:
1257 case dml2_pstate_method_count:
1258 case dml2_pstate_method_na:
1259 default:
1260 stream_method_fams2_meta = NULL;
1261 }
1262
1263 return stream_method_fams2_meta;
1264 }
1265
is_timing_group_schedulable(struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_cfg,const struct dml2_pmo_pstate_strategy * pstate_strategy,const unsigned int timing_group_idx,struct dml2_fams2_per_method_common_meta * group_fams2_meta)1266 static bool is_timing_group_schedulable(
1267 struct dml2_pmo_instance *pmo,
1268 const struct display_configuation_with_meta *display_cfg,
1269 const struct dml2_pmo_pstate_strategy *pstate_strategy,
1270 const unsigned int timing_group_idx,
1271 struct dml2_fams2_per_method_common_meta *group_fams2_meta)
1272 {
1273 unsigned int i;
1274 struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta;
1275
1276 unsigned int base_stream_idx = 0;
1277 struct dml2_pmo_scratch *s = &pmo->scratch;
1278
1279 /* find base stream idx */
1280 for (base_stream_idx = 0; base_stream_idx < display_cfg->display_config.num_streams; base_stream_idx++) {
1281 if (is_bit_set_in_bitfield(s->pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], base_stream_idx)) {
1282 /* master stream found */
1283 break;
1284 }
1285 }
1286
1287 /* init allow start and end lines for timing group */
1288 stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[base_stream_idx], base_stream_idx);
1289 if (!stream_method_fams2_meta)
1290 return false;
1291
1292 group_fams2_meta->allow_start_otg_vline = stream_method_fams2_meta->allow_start_otg_vline;
1293 group_fams2_meta->allow_end_otg_vline = stream_method_fams2_meta->allow_end_otg_vline;
1294 group_fams2_meta->period_us = stream_method_fams2_meta->period_us;
1295 for (i = base_stream_idx + 1; i < display_cfg->display_config.num_streams; i++) {
1296 if (is_bit_set_in_bitfield(pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], i)) {
1297 stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i);
1298 if (!stream_method_fams2_meta)
1299 continue;
1300
1301 if (group_fams2_meta->allow_start_otg_vline < stream_method_fams2_meta->allow_start_otg_vline) {
1302 /* set group allow start to larger otg vline */
1303 group_fams2_meta->allow_start_otg_vline = stream_method_fams2_meta->allow_start_otg_vline;
1304 }
1305
1306 if (group_fams2_meta->allow_end_otg_vline > stream_method_fams2_meta->allow_end_otg_vline) {
1307 /* set group allow end to smaller otg vline */
1308 group_fams2_meta->allow_end_otg_vline = stream_method_fams2_meta->allow_end_otg_vline;
1309 }
1310
1311 /* check waveform still has positive width */
1312 if (group_fams2_meta->allow_start_otg_vline >= group_fams2_meta->allow_end_otg_vline) {
1313 /* timing group is not schedulable */
1314 return false;
1315 }
1316 }
1317 }
1318
1319 /* calculate the rest of the meta */
1320 build_method_scheduling_params(group_fams2_meta, &pmo->scratch.pmo_dcn4.stream_fams2_meta[base_stream_idx]);
1321
1322 return group_fams2_meta->allow_time_us > 0.0 &&
1323 group_fams2_meta->disallow_time_us < pmo->ip_caps->fams2.max_allow_delay_us;
1324 }
1325
is_config_schedulable(struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_cfg,const struct dml2_pmo_pstate_strategy * pstate_strategy)1326 static bool is_config_schedulable(
1327 struct dml2_pmo_instance *pmo,
1328 const struct display_configuation_with_meta *display_cfg,
1329 const struct dml2_pmo_pstate_strategy *pstate_strategy)
1330 {
1331 unsigned int i, j;
1332 bool schedulable;
1333 struct dml2_pmo_scratch *s = &pmo->scratch;
1334
1335 double max_allow_delay_us = 0.0;
1336
1337 memset(s->pmo_dcn4.group_common_fams2_meta, 0, sizeof(s->pmo_dcn4.group_common_fams2_meta));
1338 memset(s->pmo_dcn4.sorted_group_gtl_disallow_index, 0, sizeof(unsigned int) * DML2_MAX_PLANES);
1339
1340 /* search for a general solution to the schedule */
1341
1342 /* STAGE 0: Early return for special cases */
1343 if (display_cfg->display_config.num_streams == 0) {
1344 return true;
1345 }
1346
1347 /* STAGE 1: confirm allow waves overlap for synchronizable streams */
1348 schedulable = true;
1349 for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
1350 s->pmo_dcn4.sorted_group_gtl_disallow_index[i] = i;
1351 s->pmo_dcn4.sorted_group_gtl_period_index[i] = i;
1352 if (!is_timing_group_schedulable(pmo, display_cfg, pstate_strategy, i, &s->pmo_dcn4.group_common_fams2_meta[i])) {
1353 /* synchronized timing group was not schedulable */
1354 schedulable = false;
1355 break;
1356 }
1357 max_allow_delay_us += s->pmo_dcn4.group_common_fams2_meta[i].disallow_time_us;
1358 }
1359
1360 if ((schedulable && s->pmo_dcn4.num_timing_groups <= 1) || !schedulable) {
1361 /* 1. the only timing group was schedulable, so early pass
1362 * 2. one of the timing groups was not schedulable, so early fail */
1363 return schedulable;
1364 }
1365
1366 /* STAGE 2: Check allow can't be masked entirely by other disallows */
1367 schedulable = true;
1368
1369 /* sort disallow times from greatest to least */
1370 for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
1371 bool swapped = false;
1372
1373 for (j = 0; j < s->pmo_dcn4.num_timing_groups - 1; j++) {
1374 double j_disallow_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j]].disallow_time_us;
1375 double jp1_disallow_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]].disallow_time_us;
1376 if (j_disallow_us < jp1_disallow_us) {
1377 /* swap as A < B */
1378 swap(s->pmo_dcn4.sorted_group_gtl_disallow_index[j],
1379 s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]);
1380 swapped = true;
1381 }
1382 }
1383
1384 /* sorted, exit early */
1385 if (!swapped)
1386 break;
1387 }
1388
1389 /* Check worst case disallow region occurs in the middle of allow for the
1390 * other display, or when >2 streams continue to halve the remaining allow time.
1391 */
1392 for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
1393 if (s->pmo_dcn4.group_common_fams2_meta[i].disallow_time_us <= 0.0) {
1394 /* this timing group always allows */
1395 continue;
1396 }
1397
1398 double max_allow_time_us = s->pmo_dcn4.group_common_fams2_meta[i].allow_time_us;
1399 for (j = 0; j < s->pmo_dcn4.num_timing_groups; j++) {
1400 unsigned int sorted_j = s->pmo_dcn4.sorted_group_gtl_disallow_index[j];
1401 /* stream can't overlap itself */
1402 if (i != sorted_j && s->pmo_dcn4.group_common_fams2_meta[sorted_j].disallow_time_us > 0.0) {
1403 max_allow_time_us = math_min2(
1404 s->pmo_dcn4.group_common_fams2_meta[sorted_j].allow_time_us,
1405 (max_allow_time_us - s->pmo_dcn4.group_common_fams2_meta[sorted_j].disallow_time_us) / 2);
1406
1407 if (max_allow_time_us < 0.0) {
1408 /* failed exit early */
1409 break;
1410 }
1411 }
1412 }
1413
1414 if (max_allow_time_us <= 0.0) {
1415 /* not enough time for microschedule in the worst case */
1416 schedulable = false;
1417 break;
1418 }
1419 }
1420
1421 if (schedulable && max_allow_delay_us < pmo->ip_caps->fams2.max_allow_delay_us) {
1422 return true;
1423 }
1424
1425 /* STAGE 3: check larger allow can fit period of all other streams */
1426 schedulable = true;
1427
1428 /* sort periods from greatest to least */
1429 for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
1430 bool swapped = false;
1431
1432 for (j = 0; j < s->pmo_dcn4.num_timing_groups - 1; j++) {
1433 double j_period_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j]].period_us;
1434 double jp1_period_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]].period_us;
1435 if (j_period_us < jp1_period_us) {
1436 /* swap as A < B */
1437 swap(s->pmo_dcn4.sorted_group_gtl_period_index[j],
1438 s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]);
1439 swapped = true;
1440 }
1441 }
1442
1443 /* sorted, exit early */
1444 if (!swapped)
1445 break;
1446 }
1447
1448 /* check larger allow can fit period of all other streams */
1449 for (i = 0; i < s->pmo_dcn4.num_timing_groups - 1; i++) {
1450 unsigned int sorted_i = s->pmo_dcn4.sorted_group_gtl_period_index[i];
1451 unsigned int sorted_ip1 = s->pmo_dcn4.sorted_group_gtl_period_index[i + 1];
1452
1453 if (s->pmo_dcn4.group_common_fams2_meta[sorted_i].allow_time_us < s->pmo_dcn4.group_common_fams2_meta[sorted_ip1].period_us ||
1454 (s->pmo_dcn4.group_is_drr_enabled[sorted_ip1] && s->pmo_dcn4.group_is_drr_active[sorted_ip1])) {
1455 schedulable = false;
1456 break;
1457 }
1458 }
1459
1460 if (schedulable && max_allow_delay_us < pmo->ip_caps->fams2.max_allow_delay_us) {
1461 return true;
1462 }
1463
1464 /* STAGE 4: When using HW exclusive modes, check disallow alignments are within allowed threshold */
1465 if (s->pmo_dcn4.num_timing_groups == 2 &&
1466 !is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, pstate_strategy->per_stream_pstate_method[0]) &&
1467 !is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, pstate_strategy->per_stream_pstate_method[1])) {
1468 double period_ratio;
1469 double max_shift_us;
1470 double shift_per_period;
1471
1472 /* default period_0 > period_1 */
1473 unsigned int lrg_idx = 0;
1474 unsigned int sml_idx = 1;
1475 if (s->pmo_dcn4.group_common_fams2_meta[0].period_us < s->pmo_dcn4.group_common_fams2_meta[1].period_us) {
1476 /* period_0 < period_1 */
1477 lrg_idx = 1;
1478 sml_idx = 0;
1479 }
1480 period_ratio = s->pmo_dcn4.group_common_fams2_meta[lrg_idx].period_us / s->pmo_dcn4.group_common_fams2_meta[sml_idx].period_us;
1481 shift_per_period = s->pmo_dcn4.group_common_fams2_meta[sml_idx].period_us * (period_ratio - math_floor(period_ratio));
1482 max_shift_us = s->pmo_dcn4.group_common_fams2_meta[lrg_idx].disallow_time_us - s->pmo_dcn4.group_common_fams2_meta[sml_idx].allow_time_us;
1483 max_allow_delay_us = max_shift_us / shift_per_period * s->pmo_dcn4.group_common_fams2_meta[lrg_idx].period_us;
1484
1485 if (shift_per_period > 0.0 &&
1486 shift_per_period < s->pmo_dcn4.group_common_fams2_meta[lrg_idx].allow_time_us + s->pmo_dcn4.group_common_fams2_meta[sml_idx].allow_time_us &&
1487 max_allow_delay_us < pmo->ip_caps->fams2.max_allow_delay_us) {
1488 schedulable = true;
1489 }
1490 }
1491
1492 return schedulable;
1493 }
1494
stream_matches_drr_policy(struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_cfg,const enum dml2_pstate_method stream_pstate_method,unsigned int stream_index)1495 static bool stream_matches_drr_policy(struct dml2_pmo_instance *pmo,
1496 const struct display_configuation_with_meta *display_cfg,
1497 const enum dml2_pstate_method stream_pstate_method,
1498 unsigned int stream_index)
1499 {
1500 const struct dml2_stream_parameters *stream_descriptor = &display_cfg->display_config.stream_descriptors[stream_index];
1501 bool strategy_matches_drr_requirements = true;
1502
1503 /* check if strategy is compatible with stream drr capability and strategy */
1504 if (is_bit_set_in_bitfield(PMO_NO_DRR_STRATEGY_MASK, stream_pstate_method) &&
1505 display_cfg->display_config.num_streams > 1 &&
1506 stream_descriptor->timing.drr_config.enabled &&
1507 (stream_descriptor->timing.drr_config.drr_active_fixed || stream_descriptor->timing.drr_config.drr_active_variable)) {
1508 /* DRR is active, so config may become unschedulable */
1509 strategy_matches_drr_requirements = false;
1510 } else if (is_bit_set_in_bitfield(PMO_NO_DRR_STRATEGY_MASK, stream_pstate_method) &&
1511 is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, stream_pstate_method) &&
1512 stream_descriptor->timing.drr_config.enabled &&
1513 stream_descriptor->timing.drr_config.drr_active_variable) {
1514 /* DRR is variable, fw exclusive methods require DRR to be clamped */
1515 strategy_matches_drr_requirements = false;
1516 } else if (is_bit_set_in_bitfield(PMO_DRR_VAR_STRATEGY_MASK, stream_pstate_method) &&
1517 pmo->options->disable_drr_var_when_var_active &&
1518 stream_descriptor->timing.drr_config.enabled &&
1519 stream_descriptor->timing.drr_config.drr_active_variable) {
1520 /* DRR variable is active, but policy blocks DRR for p-state when this happens */
1521 strategy_matches_drr_requirements = false;
1522 } else if (is_bit_set_in_bitfield(PMO_DRR_VAR_STRATEGY_MASK, stream_pstate_method) &&
1523 (pmo->options->disable_drr_var ||
1524 !stream_descriptor->timing.drr_config.enabled ||
1525 stream_descriptor->timing.drr_config.disallowed)) {
1526 /* DRR variable strategies are disallowed due to settings or policy */
1527 strategy_matches_drr_requirements = false;
1528 } else if (is_bit_set_in_bitfield(PMO_DRR_CLAMPED_STRATEGY_MASK, stream_pstate_method) &&
1529 (pmo->options->disable_drr_clamped ||
1530 (!stream_descriptor->timing.drr_config.enabled ||
1531 (!stream_descriptor->timing.drr_config.drr_active_fixed && !stream_descriptor->timing.drr_config.drr_active_variable)) ||
1532 (pmo->options->disable_drr_clamped_when_var_active &&
1533 stream_descriptor->timing.drr_config.enabled &&
1534 stream_descriptor->timing.drr_config.drr_active_variable))) {
1535 /* DRR fixed strategies are disallowed due to settings or policy */
1536 strategy_matches_drr_requirements = false;
1537 } else if (is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, stream_pstate_method) &&
1538 pmo->options->disable_fams2) {
1539 /* FW modes require FAMS2 */
1540 strategy_matches_drr_requirements = false;
1541 }
1542
1543 return strategy_matches_drr_requirements;
1544 }
1545
validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_cfg,const struct dml2_pmo_pstate_strategy * pstate_strategy)1546 static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_instance *pmo,
1547 const struct display_configuation_with_meta *display_cfg,
1548 const struct dml2_pmo_pstate_strategy *pstate_strategy)
1549 {
1550 struct dml2_pmo_scratch *s = &pmo->scratch;
1551
1552 unsigned int stream_index = 0;
1553
1554 unsigned int svp_count = 0;
1555 unsigned int svp_stream_mask = 0;
1556 unsigned int drr_count = 0;
1557 unsigned int drr_stream_mask = 0;
1558 unsigned int vactive_count = 0;
1559 unsigned int vactive_stream_mask = 0;
1560 unsigned int vblank_count = 0;
1561 unsigned int vblank_stream_mask = 0;
1562
1563 bool strategy_matches_forced_requirements = true;
1564 bool strategy_matches_drr_requirements = true;
1565
1566 // Tabulate everything
1567 for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) {
1568
1569 if (!all_planes_match_method(display_cfg, s->pmo_dcn4.stream_plane_mask[stream_index],
1570 pstate_strategy->per_stream_pstate_method[stream_index])) {
1571 strategy_matches_forced_requirements = false;
1572 break;
1573 }
1574
1575 strategy_matches_drr_requirements &=
1576 stream_matches_drr_policy(pmo, display_cfg, pstate_strategy->per_stream_pstate_method[stream_index], stream_index);
1577
1578 if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp ||
1579 pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp_drr) {
1580 svp_count++;
1581 set_bit_in_bitfield(&svp_stream_mask, stream_index);
1582 } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_drr) {
1583 drr_count++;
1584 set_bit_in_bitfield(&drr_stream_mask, stream_index);
1585 } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive ||
1586 pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
1587 vactive_count++;
1588 set_bit_in_bitfield(&vactive_stream_mask, stream_index);
1589 } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_vblank ||
1590 pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vblank_drr) {
1591 vblank_count++;
1592 set_bit_in_bitfield(&vblank_stream_mask, stream_index);
1593 }
1594 }
1595
1596 if (!strategy_matches_forced_requirements || !strategy_matches_drr_requirements)
1597 return false;
1598
1599 if (vactive_count > 0 && !all_timings_support_vactive(pmo, display_cfg, vactive_stream_mask))
1600 return false;
1601
1602 if (vblank_count > 0 && (pmo->options->disable_vblank || !all_timings_support_vblank(pmo, display_cfg, vblank_stream_mask)))
1603 return false;
1604
1605 if (drr_count > 0 && (pmo->options->disable_drr_var || !all_timings_support_drr(pmo, display_cfg, drr_stream_mask)))
1606 return false;
1607
1608 if (svp_count > 0 && (pmo->options->disable_svp || !all_timings_support_svp(pmo, display_cfg, svp_stream_mask)))
1609 return false;
1610
1611 return is_config_schedulable(pmo, display_cfg, pstate_strategy);
1612 }
1613
get_vactive_pstate_margin(const struct display_configuation_with_meta * display_cfg,int plane_mask)1614 static int get_vactive_pstate_margin(const struct display_configuation_with_meta *display_cfg, int plane_mask)
1615 {
1616 unsigned int i;
1617 int min_vactive_margin_us = 0xFFFFFFF;
1618
1619 for (i = 0; i < DML2_MAX_PLANES; i++) {
1620 if (is_bit_set_in_bitfield(plane_mask, i)) {
1621 if (display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_latency_hiding_margin_in_active < min_vactive_margin_us)
1622 min_vactive_margin_us = display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_latency_hiding_margin_in_active;
1623 }
1624 }
1625
1626 return min_vactive_margin_us;
1627 }
1628
get_vactive_det_fill_latency_delay_us(const struct display_configuation_with_meta * display_cfg,int plane_mask)1629 static unsigned int get_vactive_det_fill_latency_delay_us(const struct display_configuation_with_meta *display_cfg, int plane_mask)
1630 {
1631 unsigned char i;
1632 unsigned int max_vactive_fill_us = 0;
1633
1634 for (i = 0; i < DML2_MAX_PLANES; i++) {
1635 if (is_bit_set_in_bitfield(plane_mask, i)) {
1636 if (display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_vactive_det_fill_delay_us > max_vactive_fill_us)
1637 max_vactive_fill_us = display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_vactive_det_fill_delay_us;
1638 }
1639 }
1640
1641 return max_vactive_fill_us;
1642 }
1643
build_fams2_meta_per_stream(struct dml2_pmo_instance * pmo,struct display_configuation_with_meta * display_config,int stream_index)1644 static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
1645 struct display_configuation_with_meta *display_config,
1646 int stream_index)
1647 {
1648 const struct dml2_ip_capabilities *ip_caps = pmo->ip_caps;
1649 const struct dml2_stream_parameters *stream_descriptor = &display_config->display_config.stream_descriptors[stream_index];
1650 const struct core_stream_support_info *stream_info = &display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index];
1651 const struct dml2_timing_cfg *timing = &stream_descriptor->timing;
1652 struct dml2_fams2_meta *stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index];
1653
1654 /* worst case all other streams require some programming at the same time, 0 if only 1 stream */
1655 unsigned int contention_delay_us = (ip_caps->fams2.vertical_interrupt_ack_delay_us +
1656 (unsigned int)math_max3(ip_caps->fams2.subvp_programming_delay_us, ip_caps->fams2.drr_programming_delay_us, ip_caps->fams2.allow_programming_delay_us)) *
1657 (display_config->display_config.num_streams - 1);
1658
1659 /* common */
1660 stream_fams2_meta->valid = true;
1661 stream_fams2_meta->otg_vline_time_us = (double)timing->h_total / timing->pixel_clock_khz * 1000.0;
1662 stream_fams2_meta->nom_vtotal = stream_descriptor->timing.vblank_nom + stream_descriptor->timing.v_active;
1663 stream_fams2_meta->nom_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
1664 (stream_fams2_meta->nom_vtotal * timing->h_total);
1665 stream_fams2_meta->nom_frame_time_us =
1666 (double)stream_fams2_meta->nom_vtotal * stream_fams2_meta->otg_vline_time_us;
1667 stream_fams2_meta->vblank_start = timing->v_blank_end + timing->v_active;
1668
1669 if (stream_descriptor->timing.drr_config.enabled == true) {
1670 if (stream_descriptor->timing.drr_config.min_refresh_uhz != 0.0) {
1671 stream_fams2_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
1672 ((double)stream_descriptor->timing.drr_config.min_refresh_uhz * stream_descriptor->timing.h_total) * 1e9);
1673 } else {
1674 /* assume min of 48Hz */
1675 stream_fams2_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
1676 (48000000.0 * stream_descriptor->timing.h_total) * 1e9);
1677 }
1678 } else {
1679 stream_fams2_meta->max_vtotal = stream_fams2_meta->nom_vtotal;
1680 }
1681 stream_fams2_meta->min_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
1682 (stream_fams2_meta->max_vtotal * timing->h_total);
1683 stream_fams2_meta->max_frame_time_us =
1684 (double)stream_fams2_meta->max_vtotal * stream_fams2_meta->otg_vline_time_us;
1685
1686 stream_fams2_meta->scheduling_delay_otg_vlines =
1687 (unsigned int)math_ceil(ip_caps->fams2.scheduling_delay_us / stream_fams2_meta->otg_vline_time_us);
1688 stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines =
1689 (unsigned int)math_ceil(ip_caps->fams2.vertical_interrupt_ack_delay_us / stream_fams2_meta->otg_vline_time_us);
1690 stream_fams2_meta->contention_delay_otg_vlines =
1691 (unsigned int)math_ceil(contention_delay_us / stream_fams2_meta->otg_vline_time_us);
1692 /* worst case allow to target needs to account for all streams' allow events overlapping, and 1 line for error */
1693 stream_fams2_meta->allow_to_target_delay_otg_vlines =
1694 (unsigned int)(math_ceil((ip_caps->fams2.vertical_interrupt_ack_delay_us + contention_delay_us + ip_caps->fams2.allow_programming_delay_us) / stream_fams2_meta->otg_vline_time_us)) + 1;
1695 stream_fams2_meta->min_allow_width_otg_vlines =
1696 (unsigned int)math_ceil(ip_caps->fams2.min_allow_width_us / stream_fams2_meta->otg_vline_time_us);
1697 /* this value should account for urgent latency */
1698 stream_fams2_meta->dram_clk_change_blackout_otg_vlines =
1699 (unsigned int)math_ceil(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us /
1700 stream_fams2_meta->otg_vline_time_us);
1701
1702 /* scheduling params should be built based on the worst case for allow_time:disallow_time */
1703
1704 /* vactive */
1705 if (display_config->display_config.num_streams == 1) {
1706 /* for single stream, guarantee at least an instant of allow */
1707 stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = (unsigned int)math_floor(
1708 math_max2(0.0,
1709 timing->v_active - math_max2(1.0, stream_fams2_meta->min_allow_width_otg_vlines) - stream_fams2_meta->dram_clk_change_blackout_otg_vlines));
1710 } else {
1711 /* for multi stream, bound to a max fill time defined by IP caps */
1712 stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines =
1713 (unsigned int)math_floor((double)ip_caps->max_vactive_det_fill_delay_us / stream_fams2_meta->otg_vline_time_us);
1714 }
1715 stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us = stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines * stream_fams2_meta->otg_vline_time_us;
1716
1717 if (stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us > 0.0) {
1718 stream_fams2_meta->method_vactive.common.allow_start_otg_vline =
1719 timing->v_blank_end + stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
1720 stream_fams2_meta->method_vactive.common.allow_end_otg_vline =
1721 stream_fams2_meta->vblank_start -
1722 stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
1723 } else {
1724 stream_fams2_meta->method_vactive.common.allow_start_otg_vline = 0;
1725 stream_fams2_meta->method_vactive.common.allow_end_otg_vline = 0;
1726 }
1727 stream_fams2_meta->method_vactive.common.period_us = stream_fams2_meta->nom_frame_time_us;
1728 build_method_scheduling_params(&stream_fams2_meta->method_vactive.common, stream_fams2_meta);
1729
1730 /* vblank */
1731 stream_fams2_meta->method_vblank.common.allow_start_otg_vline = stream_fams2_meta->vblank_start;
1732 stream_fams2_meta->method_vblank.common.allow_end_otg_vline =
1733 stream_fams2_meta->method_vblank.common.allow_start_otg_vline + 1;
1734 stream_fams2_meta->method_vblank.common.period_us = stream_fams2_meta->nom_frame_time_us;
1735 build_method_scheduling_params(&stream_fams2_meta->method_vblank.common, stream_fams2_meta);
1736
1737 /* subvp */
1738 stream_fams2_meta->method_subvp.programming_delay_otg_vlines =
1739 (unsigned int)math_ceil(ip_caps->fams2.subvp_programming_delay_us / stream_fams2_meta->otg_vline_time_us);
1740 stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines =
1741 (unsigned int)math_ceil(ip_caps->fams2.subvp_df_throttle_delay_us / stream_fams2_meta->otg_vline_time_us);
1742 stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines =
1743 (unsigned int)math_ceil(ip_caps->fams2.subvp_prefetch_to_mall_delay_us / stream_fams2_meta->otg_vline_time_us);
1744 stream_fams2_meta->method_subvp.phantom_vactive =
1745 stream_fams2_meta->allow_to_target_delay_otg_vlines +
1746 stream_fams2_meta->min_allow_width_otg_vlines +
1747 stream_info->phantom_min_v_active;
1748 stream_fams2_meta->method_subvp.phantom_vfp =
1749 stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines;
1750 /* phantom vtotal = v_bp(vstartup) + v_sync(1) + v_fp(throttle_delay) + v_active(allow_to_target + min_allow + min_vactive)*/
1751 stream_fams2_meta->method_subvp.phantom_vtotal =
1752 stream_info->phantom_v_startup +
1753 stream_fams2_meta->method_subvp.phantom_vfp +
1754 1 +
1755 stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines +
1756 stream_fams2_meta->method_subvp.phantom_vactive;
1757 stream_fams2_meta->method_subvp.common.allow_start_otg_vline =
1758 stream_descriptor->timing.v_blank_end +
1759 stream_fams2_meta->contention_delay_otg_vlines +
1760 stream_fams2_meta->method_subvp.programming_delay_otg_vlines +
1761 stream_fams2_meta->method_subvp.phantom_vtotal +
1762 stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
1763 stream_fams2_meta->allow_to_target_delay_otg_vlines;
1764 stream_fams2_meta->method_subvp.common.allow_end_otg_vline =
1765 stream_fams2_meta->vblank_start -
1766 stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
1767 stream_fams2_meta->method_subvp.common.period_us = stream_fams2_meta->nom_frame_time_us;
1768 build_method_scheduling_params(&stream_fams2_meta->method_subvp.common, stream_fams2_meta);
1769
1770 /* drr */
1771 stream_fams2_meta->method_drr.programming_delay_otg_vlines =
1772 (unsigned int)math_ceil(ip_caps->fams2.drr_programming_delay_us / stream_fams2_meta->otg_vline_time_us);
1773 stream_fams2_meta->method_drr.common.allow_start_otg_vline =
1774 stream_fams2_meta->vblank_start +
1775 stream_fams2_meta->allow_to_target_delay_otg_vlines;
1776 stream_fams2_meta->method_drr.common.period_us = stream_fams2_meta->nom_frame_time_us;
1777 if (display_config->display_config.num_streams <= 1) {
1778 /* only need to stretch vblank for blackout time */
1779 stream_fams2_meta->method_drr.stretched_vtotal =
1780 stream_fams2_meta->nom_vtotal +
1781 stream_fams2_meta->allow_to_target_delay_otg_vlines +
1782 stream_fams2_meta->min_allow_width_otg_vlines +
1783 stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
1784 } else {
1785 /* multi display needs to always be schedulable */
1786 stream_fams2_meta->method_drr.stretched_vtotal =
1787 stream_fams2_meta->nom_vtotal * 2 +
1788 stream_fams2_meta->allow_to_target_delay_otg_vlines +
1789 stream_fams2_meta->min_allow_width_otg_vlines +
1790 stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
1791 }
1792 stream_fams2_meta->method_drr.common.allow_end_otg_vline =
1793 stream_fams2_meta->method_drr.stretched_vtotal -
1794 stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
1795 build_method_scheduling_params(&stream_fams2_meta->method_drr.common, stream_fams2_meta);
1796 }
1797
build_subvp_meta_per_stream(struct dml2_pmo_instance * pmo,struct display_configuation_with_meta * display_config,int stream_index)1798 static void build_subvp_meta_per_stream(struct dml2_pmo_instance *pmo,
1799 struct display_configuation_with_meta *display_config,
1800 int stream_index)
1801 {
1802 struct dml2_implicit_svp_meta *stream_svp_meta = &pmo->scratch.pmo_dcn4.stream_svp_meta[stream_index];
1803 struct dml2_fams2_meta *stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index];
1804
1805 stream_svp_meta->valid = true;
1806
1807 /* PMO FAMS2 precaulcates these values */
1808 stream_svp_meta->v_active = stream_fams2_meta->method_subvp.phantom_vactive;
1809 stream_svp_meta->v_front_porch = stream_fams2_meta->method_subvp.phantom_vfp;
1810 stream_svp_meta->v_total = stream_fams2_meta->method_subvp.phantom_vtotal;
1811 }
1812
pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out * in_out)1813 bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out *in_out)
1814 {
1815 struct dml2_pmo_instance *pmo = in_out->instance;
1816 struct dml2_optimization_stage3_state *state = &in_out->base_display_config->stage3;
1817 struct dml2_pmo_scratch *s = &pmo->scratch;
1818
1819 struct display_configuation_with_meta *display_config;
1820 const struct dml2_plane_parameters *plane_descriptor;
1821 const struct dml2_pmo_pstate_strategy *strategy_list = NULL;
1822 struct dml2_pmo_pstate_strategy override_base_strategy = { 0 };
1823 unsigned int strategy_list_size = 0;
1824 unsigned int plane_index, stream_index, i;
1825 bool build_override_strategy = true;
1826
1827 state->performed = true;
1828 in_out->base_display_config->stage3.min_clk_index_for_latency = in_out->base_display_config->stage1.min_clk_index_for_latency;
1829
1830 display_config = in_out->base_display_config;
1831 display_config->display_config.overrides.enable_subvp_implicit_pmo = true;
1832
1833 memset(s, 0, sizeof(struct dml2_pmo_scratch));
1834
1835 if (display_config->display_config.overrides.all_streams_blanked) {
1836 return true;
1837 }
1838
1839 pmo->scratch.pmo_dcn4.min_latency_index = in_out->base_display_config->stage1.min_clk_index_for_latency;
1840 pmo->scratch.pmo_dcn4.max_latency_index = pmo->mcg_clock_table_size;
1841 pmo->scratch.pmo_dcn4.cur_latency_index = in_out->base_display_config->stage1.min_clk_index_for_latency;
1842
1843 // First build the stream plane mask (array of bitfields indexed by stream, indicating plane mapping)
1844 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
1845 plane_descriptor = &display_config->display_config.plane_descriptors[plane_index];
1846
1847 set_bit_in_bitfield(&s->pmo_dcn4.stream_plane_mask[plane_descriptor->stream_index], plane_index);
1848
1849 state->pstate_switch_modes[plane_index] = dml2_pstate_method_vactive;
1850
1851 build_override_strategy &= plane_descriptor->overrides.uclk_pstate_change_strategy != dml2_uclk_pstate_change_strategy_auto;
1852 override_base_strategy.per_stream_pstate_method[plane_descriptor->stream_index] =
1853 uclk_pstate_strategy_override_to_pstate_method(plane_descriptor->overrides.uclk_pstate_change_strategy);
1854 }
1855
1856 // Figure out which streams can do vactive, and also build up implicit SVP and FAMS2 meta
1857 for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
1858 if (get_vactive_pstate_margin(display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) >= (int)(MIN_VACTIVE_MARGIN_PCT * pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us))
1859 set_bit_in_bitfield(&s->pmo_dcn4.stream_vactive_capability_mask, stream_index);
1860
1861 /* FAMS2 meta */
1862 build_fams2_meta_per_stream(pmo, display_config, stream_index);
1863
1864 /* SVP meta */
1865 build_subvp_meta_per_stream(pmo, display_config, stream_index);
1866 }
1867
1868 /* get synchronized timing groups */
1869 build_synchronized_timing_groups(pmo, display_config);
1870
1871 if (build_override_strategy) {
1872 /* build expanded override strategy list (no permutations) */
1873 override_base_strategy.allow_state_increase = true;
1874 s->pmo_dcn4.num_expanded_override_strategies = 0;
1875 insert_strategy_into_expanded_list(&override_base_strategy,
1876 display_config->display_config.num_streams,
1877 s->pmo_dcn4.expanded_override_strategy_list,
1878 &s->pmo_dcn4.num_expanded_override_strategies);
1879 expand_variant_strategy(&override_base_strategy,
1880 display_config->display_config.num_streams,
1881 false,
1882 s->pmo_dcn4.expanded_override_strategy_list,
1883 &s->pmo_dcn4.num_expanded_override_strategies);
1884
1885 /* use override strategy list */
1886 strategy_list = s->pmo_dcn4.expanded_override_strategy_list;
1887 strategy_list_size = s->pmo_dcn4.num_expanded_override_strategies;
1888 } else {
1889 /* use predefined strategy list */
1890 strategy_list = get_expanded_strategy_list(&pmo->init_data, display_config->display_config.num_streams);
1891 strategy_list_size = get_num_expanded_strategies(&pmo->init_data, display_config->display_config.num_streams);
1892 }
1893
1894 if (!strategy_list || strategy_list_size == 0)
1895 return false;
1896
1897 s->pmo_dcn4.num_pstate_candidates = 0;
1898
1899 for (i = 0; i < strategy_list_size && s->pmo_dcn4.num_pstate_candidates < DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE; i++) {
1900 if (validate_pstate_support_strategy_cofunctionality(pmo, display_config, &strategy_list[i])) {
1901 insert_into_candidate_list(&strategy_list[i], display_config->display_config.num_streams, s);
1902 }
1903 }
1904
1905 if (s->pmo_dcn4.num_pstate_candidates > 0) {
1906 s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.num_pstate_candidates-1].allow_state_increase = true;
1907 s->pmo_dcn4.cur_pstate_candidate = -1;
1908 return true;
1909 } else {
1910 return false;
1911 }
1912 }
1913
reset_display_configuration(struct display_configuation_with_meta * display_config)1914 static void reset_display_configuration(struct display_configuation_with_meta *display_config)
1915 {
1916 unsigned int plane_index;
1917 unsigned int stream_index;
1918 struct dml2_plane_parameters *plane;
1919
1920 for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
1921 display_config->stage3.stream_svp_meta[stream_index].valid = false;
1922
1923 display_config->display_config.stream_descriptors[stream_index].overrides.minimize_active_latency_hiding = false;
1924 display_config->display_config.overrides.best_effort_min_active_latency_hiding_us = 0;
1925 }
1926
1927 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
1928 plane = &display_config->display_config.plane_descriptors[plane_index];
1929
1930 // Unset SubVP
1931 plane->overrides.legacy_svp_config = dml2_svp_mode_override_auto;
1932
1933 // Remove reserve time
1934 plane->overrides.reserved_vblank_time_ns = 0;
1935
1936 // Reset strategy to auto
1937 plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_auto;
1938
1939 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_na;
1940 }
1941 }
1942
setup_planes_for_drr_by_mask(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int plane_mask)1943 static void setup_planes_for_drr_by_mask(struct display_configuation_with_meta *display_config,
1944 struct dml2_pmo_instance *pmo,
1945 int plane_mask)
1946 {
1947 unsigned int plane_index;
1948 struct dml2_plane_parameters *plane;
1949
1950 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
1951 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
1952 plane = &display_config->display_config.plane_descriptors[plane_index];
1953
1954 plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_force_drr;
1955
1956 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_drr;
1957
1958 }
1959 }
1960 }
1961
setup_planes_for_svp_by_mask(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int plane_mask)1962 static void setup_planes_for_svp_by_mask(struct display_configuation_with_meta *display_config,
1963 struct dml2_pmo_instance *pmo,
1964 int plane_mask)
1965 {
1966 struct dml2_pmo_scratch *scratch = &pmo->scratch;
1967
1968 unsigned int plane_index;
1969 int stream_index = -1;
1970
1971 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
1972 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
1973 stream_index = (char)display_config->display_config.plane_descriptors[plane_index].stream_index;
1974 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_svp;
1975 }
1976 }
1977
1978 if (stream_index >= 0) {
1979 memcpy(&display_config->stage3.stream_svp_meta[stream_index],
1980 &scratch->pmo_dcn4.stream_svp_meta[stream_index],
1981 sizeof(struct dml2_implicit_svp_meta));
1982 }
1983 }
1984
setup_planes_for_svp_drr_by_mask(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int plane_mask)1985 static void setup_planes_for_svp_drr_by_mask(struct display_configuation_with_meta *display_config,
1986 struct dml2_pmo_instance *pmo,
1987 int plane_mask)
1988 {
1989 struct dml2_pmo_scratch *scratch = &pmo->scratch;
1990
1991 unsigned int plane_index;
1992 int stream_index = -1;
1993
1994 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
1995 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
1996 stream_index = (char)display_config->display_config.plane_descriptors[plane_index].stream_index;
1997 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_svp_drr;
1998 }
1999 }
2000
2001 if (stream_index >= 0) {
2002 memcpy(&display_config->stage3.stream_svp_meta[stream_index],
2003 &scratch->pmo_dcn4.stream_svp_meta[stream_index],
2004 sizeof(struct dml2_implicit_svp_meta));
2005 }
2006 }
2007
setup_planes_for_vblank_by_mask(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int plane_mask)2008 static void setup_planes_for_vblank_by_mask(struct display_configuation_with_meta *display_config,
2009 struct dml2_pmo_instance *pmo,
2010 int plane_mask)
2011 {
2012 unsigned int plane_index;
2013 struct dml2_plane_parameters *plane;
2014
2015 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
2016 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
2017 plane = &display_config->display_config.plane_descriptors[plane_index];
2018
2019 plane->overrides.reserved_vblank_time_ns = (long)math_max2(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000.0,
2020 plane->overrides.reserved_vblank_time_ns);
2021
2022 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vblank;
2023
2024 }
2025 }
2026 }
2027
setup_planes_for_vblank_drr_by_mask(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int plane_mask)2028 static void setup_planes_for_vblank_drr_by_mask(struct display_configuation_with_meta *display_config,
2029 struct dml2_pmo_instance *pmo,
2030 int plane_mask)
2031 {
2032 unsigned int plane_index;
2033 struct dml2_plane_parameters *plane;
2034
2035 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
2036 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
2037 plane = &display_config->display_config.plane_descriptors[plane_index];
2038 plane->overrides.reserved_vblank_time_ns = (long)(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000);
2039
2040 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vblank_drr;
2041 }
2042 }
2043 }
2044
setup_planes_for_vactive_by_mask(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int plane_mask)2045 static void setup_planes_for_vactive_by_mask(struct display_configuation_with_meta *display_config,
2046 struct dml2_pmo_instance *pmo,
2047 int plane_mask)
2048 {
2049 unsigned int plane_index;
2050 unsigned int stream_index;
2051
2052 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
2053 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
2054 stream_index = display_config->display_config.plane_descriptors[plane_index].stream_index;
2055
2056 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vactive;
2057
2058 if (!pmo->options->disable_vactive_det_fill_bw_pad) {
2059 display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us =
2060 (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
2061 }
2062 }
2063 }
2064 }
2065
setup_planes_for_vactive_drr_by_mask(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int plane_mask)2066 static void setup_planes_for_vactive_drr_by_mask(struct display_configuation_with_meta *display_config,
2067 struct dml2_pmo_instance *pmo,
2068 int plane_mask)
2069 {
2070 unsigned int plane_index;
2071 unsigned int stream_index;
2072
2073 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
2074 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
2075 stream_index = display_config->display_config.plane_descriptors[plane_index].stream_index;
2076
2077 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vactive_drr;
2078
2079 if (!pmo->options->disable_vactive_det_fill_bw_pad) {
2080 display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us =
2081 (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
2082 }
2083 }
2084 }
2085 }
2086
setup_display_config(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int strategy_index)2087 static bool setup_display_config(struct display_configuation_with_meta *display_config, struct dml2_pmo_instance *pmo, int strategy_index)
2088 {
2089 struct dml2_pmo_scratch *scratch = &pmo->scratch;
2090
2091 bool fams2_required = false;
2092 bool success = true;
2093 unsigned int stream_index;
2094
2095 reset_display_configuration(display_config);
2096
2097 for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
2098
2099 if (pmo->scratch.pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_na) {
2100 success = false;
2101 break;
2102 } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive) {
2103 setup_planes_for_vactive_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
2104 } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_vblank) {
2105 setup_planes_for_vblank_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
2106 } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp) {
2107 fams2_required = true;
2108 setup_planes_for_svp_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
2109 } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
2110 fams2_required = true;
2111 setup_planes_for_vactive_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
2112 } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vblank_drr) {
2113 fams2_required = true;
2114 setup_planes_for_vblank_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
2115 } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp_drr) {
2116 fams2_required = true;
2117 setup_planes_for_svp_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
2118 } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_drr) {
2119 fams2_required = true;
2120 setup_planes_for_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
2121 }
2122 }
2123
2124 /* copy FAMS2 meta */
2125 if (success) {
2126 display_config->stage3.fams2_required = fams2_required;
2127 memcpy(&display_config->stage3.stream_fams2_meta,
2128 &scratch->pmo_dcn4.stream_fams2_meta,
2129 sizeof(struct dml2_fams2_meta) * DML2_MAX_PLANES);
2130 }
2131
2132 return success;
2133 }
2134
get_minimum_reserved_time_us_for_planes(struct display_configuation_with_meta * display_config,int plane_mask)2135 static int get_minimum_reserved_time_us_for_planes(struct display_configuation_with_meta *display_config, int plane_mask)
2136 {
2137 int min_time_us = 0xFFFFFF;
2138 unsigned int plane_index = 0;
2139
2140 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
2141 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
2142 if (min_time_us > (display_config->display_config.plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000))
2143 min_time_us = display_config->display_config.plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000;
2144 }
2145 }
2146 return min_time_us;
2147 }
2148
pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_support_in_out * in_out)2149 bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_support_in_out *in_out)
2150 {
2151 bool p_state_supported = true;
2152 unsigned int stream_index;
2153 struct dml2_pmo_scratch *s = &in_out->instance->scratch;
2154
2155 int MIN_VACTIVE_MARGIN_VBLANK = 0;
2156 int MIN_VACTIVE_MARGIN_DRR = 0;
2157 int REQUIRED_RESERVED_TIME = 0;
2158
2159 if (in_out->base_display_config->display_config.overrides.all_streams_blanked) {
2160 return true;
2161 }
2162
2163 MIN_VACTIVE_MARGIN_VBLANK = INT_MIN;
2164 MIN_VACTIVE_MARGIN_DRR = INT_MIN;
2165 REQUIRED_RESERVED_TIME = (int)in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us;
2166
2167 if (s->pmo_dcn4.cur_pstate_candidate < 0)
2168 return false;
2169
2170 for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) {
2171 struct dml2_fams2_meta *stream_fams2_meta = &s->pmo_dcn4.stream_fams2_meta[stream_index];
2172
2173 if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive ||
2174 s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
2175 if (get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < (MIN_VACTIVE_MARGIN_PCT * in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) ||
2176 get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us) {
2177 p_state_supported = false;
2178 break;
2179 }
2180 } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vblank ||
2181 s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vblank_drr) {
2182 if (get_minimum_reserved_time_us_for_planes(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) <
2183 REQUIRED_RESERVED_TIME ||
2184 get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_VBLANK) {
2185 p_state_supported = false;
2186 break;
2187 }
2188 } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp ||
2189 s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp_drr) {
2190 if (in_out->base_display_config->stage3.stream_svp_meta[stream_index].valid == false) {
2191 p_state_supported = false;
2192 break;
2193 }
2194 } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_drr) {
2195 if (!all_planes_match_method(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index], dml2_pstate_method_fw_drr) ||
2196 get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_DRR) {
2197 p_state_supported = false;
2198 break;
2199 }
2200 } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_na) {
2201 p_state_supported = false;
2202 break;
2203 }
2204 }
2205
2206 return p_state_supported;
2207 }
2208
pmo_dcn4_fams2_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pstate_support_in_out * in_out)2209 bool pmo_dcn4_fams2_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pstate_support_in_out *in_out)
2210 {
2211 bool success = false;
2212 struct dml2_pmo_scratch *s = &in_out->instance->scratch;
2213
2214 memcpy(in_out->optimized_display_config, in_out->base_display_config, sizeof(struct display_configuation_with_meta));
2215
2216 if (in_out->last_candidate_failed) {
2217 if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].allow_state_increase &&
2218 s->pmo_dcn4.cur_latency_index < s->pmo_dcn4.max_latency_index - 1) {
2219 s->pmo_dcn4.cur_latency_index++;
2220
2221 success = true;
2222 }
2223 }
2224
2225 if (!success) {
2226 s->pmo_dcn4.cur_latency_index = s->pmo_dcn4.min_latency_index;
2227 s->pmo_dcn4.cur_pstate_candidate++;
2228
2229 if (s->pmo_dcn4.cur_pstate_candidate < s->pmo_dcn4.num_pstate_candidates) {
2230 success = true;
2231 }
2232 }
2233
2234 if (success) {
2235 in_out->optimized_display_config->stage3.min_clk_index_for_latency = s->pmo_dcn4.cur_latency_index;
2236 setup_display_config(in_out->optimized_display_config, in_out->instance, in_out->instance->scratch.pmo_dcn4.cur_pstate_candidate);
2237 }
2238
2239 return success;
2240 }
2241
pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out * in_out)2242 bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in_out)
2243 {
2244 bool success = true;
2245 struct dml2_pmo_instance *pmo = in_out->instance;
2246 bool stutter_period_meets_z8_eco = true;
2247 bool z8_stutter_optimization_too_expensive = false;
2248 bool stutter_optimization_too_expensive = false;
2249 double line_time_us, vblank_nom_time_us;
2250
2251 unsigned int i;
2252
2253 if (pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us > 0 &&
2254 pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0 &&
2255 pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us < pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us)
2256 return false; // Unexpected SoCBB setup
2257
2258 for (i = 0; i < in_out->base_display_config->display_config.num_planes; i++) {
2259 if (in_out->base_display_config->mode_support_result.cfg_support_info.plane_support_info[i].active_latency_hiding_us <
2260 pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us + pmo->soc_bb->power_management_parameters.z8_min_idle_time) {
2261 stutter_period_meets_z8_eco = false;
2262 break;
2263 }
2264 }
2265
2266 for (i = 0; i < in_out->base_display_config->display_config.num_streams; i++) {
2267 line_time_us = (double)in_out->base_display_config->display_config.stream_descriptors[i].timing.h_total / (in_out->base_display_config->display_config.stream_descriptors[i].timing.pixel_clock_khz * 1000) * 1000000;
2268 vblank_nom_time_us = line_time_us * in_out->base_display_config->display_config.stream_descriptors[i].timing.vblank_nom;
2269
2270 if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us * MIN_BLANK_STUTTER_FACTOR) {
2271 z8_stutter_optimization_too_expensive = true;
2272 break;
2273 }
2274
2275 if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us * MIN_BLANK_STUTTER_FACTOR) {
2276 stutter_optimization_too_expensive = true;
2277 break;
2278 }
2279 }
2280
2281 pmo->scratch.pmo_dcn4.num_stutter_candidates = 0;
2282 pmo->scratch.pmo_dcn4.cur_stutter_candidate = 0;
2283
2284 if (stutter_period_meets_z8_eco && !z8_stutter_optimization_too_expensive) {
2285 if (pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us > 0) {
2286 pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.num_stutter_candidates] = (unsigned int)pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us;
2287 pmo->scratch.pmo_dcn4.num_stutter_candidates++;
2288 pmo->scratch.pmo_dcn4.z8_vblank_optimizable = true;
2289 }
2290 } else {
2291 pmo->scratch.pmo_dcn4.z8_vblank_optimizable = false;
2292 }
2293
2294 if (!stutter_optimization_too_expensive && pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0) {
2295 pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.num_stutter_candidates] = (unsigned int)pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us;
2296 pmo->scratch.pmo_dcn4.num_stutter_candidates++;
2297 }
2298
2299 if (pmo->scratch.pmo_dcn4.num_stutter_candidates == 0)
2300 success = false;
2301
2302 return success;
2303 }
2304
pmo_dcn4_fams2_test_for_stutter(struct dml2_pmo_test_for_stutter_in_out * in_out)2305 bool pmo_dcn4_fams2_test_for_stutter(struct dml2_pmo_test_for_stutter_in_out *in_out)
2306 {
2307 bool success = true;
2308 struct dml2_pmo_instance *pmo = in_out->instance;
2309
2310 unsigned int i;
2311
2312 for (i = 0; i < in_out->base_display_config->display_config.num_planes; i++) {
2313 if (pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us > 0 &&
2314 pmo->scratch.pmo_dcn4.z8_vblank_optimizable &&
2315 in_out->base_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns < (int)pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us * 1000) {
2316 success = false;
2317 break;
2318 }
2319 if (pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0 &&
2320 in_out->base_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns < (int)pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us * 1000) {
2321 success = false;
2322 break;
2323 }
2324 }
2325
2326 return success;
2327 }
2328
pmo_dcn4_fams2_optimize_for_stutter(struct dml2_pmo_optimize_for_stutter_in_out * in_out)2329 bool pmo_dcn4_fams2_optimize_for_stutter(struct dml2_pmo_optimize_for_stutter_in_out *in_out)
2330 {
2331 bool success = false;
2332 struct dml2_pmo_instance *pmo = in_out->instance;
2333 unsigned int i;
2334
2335 memcpy(in_out->optimized_display_config, in_out->base_display_config, sizeof(struct display_configuation_with_meta));
2336
2337 if (!in_out->last_candidate_failed) {
2338 if (pmo->scratch.pmo_dcn4.cur_stutter_candidate < pmo->scratch.pmo_dcn4.num_stutter_candidates) {
2339 for (i = 0; i < in_out->optimized_display_config->display_config.num_planes; i++) {
2340 /* take the max of the current and the optimal reserved time */
2341 in_out->optimized_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns =
2342 (long)math_max2(pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.cur_stutter_candidate] * 1000,
2343 in_out->optimized_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns);
2344 }
2345
2346 success = true;
2347 }
2348 }
2349
2350 return success;
2351 }
2352