1 // SPDX-License-Identifier: MIT
2 //
3 // Copyright 2024 Advanced Micro Devices, Inc.
4
5 #include "dml2_pmo_factory.h"
6 #include "dml2_debug.h"
7 #include "lib_float_math.h"
8 #include "dml2_pmo_dcn4_fams2.h"
9
10 static const double MIN_VACTIVE_MARGIN_PCT = 0.25; // We need more than non-zero margin because DET buffer granularity can alter vactive latency hiding
11 static const double MIN_BLANK_STUTTER_FACTOR = 3.0;
12
13 static const struct dml2_pmo_pstate_strategy base_strategy_list_1_display[] = {
14 // VActive Preferred
15 {
16 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
17 .allow_state_increase = true,
18 },
19
20 // Then SVP
21 {
22 .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
23 .allow_state_increase = true,
24 },
25
26
27 // Then VBlank
28 {
29 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
30 .allow_state_increase = false,
31 },
32
33 // Then DRR
34 {
35 .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
36 .allow_state_increase = true,
37 },
38
39 // Finally VBlank, but allow base clocks for latency to increase
40 /*
41 {
42 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
43 .allow_state_increase = true,
44 },
45 */
46 };
47
48 static const int base_strategy_list_1_display_size = sizeof(base_strategy_list_1_display) / sizeof(struct dml2_pmo_pstate_strategy);
49
50 static const struct dml2_pmo_pstate_strategy base_strategy_list_2_display[] = {
51 // VActive only is preferred
52 {
53 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_na, dml2_pstate_method_na },
54 .allow_state_increase = true,
55 },
56
57
58 // Then VActive + VBlank
59 {
60 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
61 .allow_state_increase = false,
62 },
63
64 // Then VBlank only
65 {
66 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
67 .allow_state_increase = false,
68 },
69
70 // Then SVP + VBlank
71 {
72 .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
73 .allow_state_increase = false,
74 },
75
76 // Then SVP + DRR
77 {
78 .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na },
79 .allow_state_increase = true,
80 },
81
82 // Then SVP + SVP
83 {
84 .per_stream_pstate_method = { dml2_pstate_method_fw_svp, dml2_pstate_method_fw_svp, dml2_pstate_method_na, dml2_pstate_method_na },
85 .allow_state_increase = true,
86 },
87
88 // Then DRR + VActive
89 {
90 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na },
91 .allow_state_increase = true,
92 },
93
94 // Then DRR + DRR
95 {
96 .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_na, dml2_pstate_method_na },
97 .allow_state_increase = true,
98 },
99
100 // Finally VBlank, but allow base clocks for latency to increase
101 /*
102 {
103 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
104 .allow_state_increase = true,
105 },
106 */
107 };
108
109 static const int base_strategy_list_2_display_size = sizeof(base_strategy_list_2_display) / sizeof(struct dml2_pmo_pstate_strategy);
110
111 static const struct dml2_pmo_pstate_strategy base_strategy_list_3_display[] = {
112 // All VActive
113 {
114 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_na },
115 .allow_state_increase = true,
116 },
117
118
119 // VActive + 1 VBlank
120 {
121 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vblank, dml2_pstate_method_na },
122 .allow_state_increase = false,
123 },
124
125 // All VBlank
126 {
127 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na },
128 .allow_state_increase = false,
129 },
130
131 // All DRR
132 {
133 .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_na },
134 .allow_state_increase = true,
135 },
136
137 // All VBlank, with state increase allowed
138 /*
139 {
140 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_na },
141 .allow_state_increase = true,
142 },
143 */
144 };
145
146 static const int base_strategy_list_3_display_size = sizeof(base_strategy_list_3_display) / sizeof(struct dml2_pmo_pstate_strategy);
147
148 static const struct dml2_pmo_pstate_strategy base_strategy_list_4_display[] = {
149 // All VActive
150 {
151 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive },
152 .allow_state_increase = true,
153 },
154
155
156 // VActive + 1 VBlank
157 {
158 .per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vblank },
159 .allow_state_increase = false,
160 },
161
162 // All Vblank
163 {
164 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank },
165 .allow_state_increase = false,
166 },
167
168 // All DRR
169 {
170 .per_stream_pstate_method = { dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr, dml2_pstate_method_fw_drr },
171 .allow_state_increase = true,
172 },
173
174 // All VBlank, with state increase allowed
175 /*
176 {
177 .per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank, dml2_pstate_method_vblank },
178 .allow_state_increase = true,
179 },
180 */
181 };
182
183 static const int base_strategy_list_4_display_size = sizeof(base_strategy_list_4_display) / sizeof(struct dml2_pmo_pstate_strategy);
184
185
increase_odm_combine_factor(enum dml2_odm_mode * odm_mode,int odms_calculated)186 static bool increase_odm_combine_factor(enum dml2_odm_mode *odm_mode, int odms_calculated)
187 {
188 bool result = true;
189
190 if (*odm_mode == dml2_odm_mode_auto) {
191 switch (odms_calculated) {
192 case 1:
193 *odm_mode = dml2_odm_mode_bypass;
194 break;
195 case 2:
196 *odm_mode = dml2_odm_mode_combine_2to1;
197 break;
198 case 3:
199 *odm_mode = dml2_odm_mode_combine_3to1;
200 break;
201 case 4:
202 *odm_mode = dml2_odm_mode_combine_4to1;
203 break;
204 default:
205 result = false;
206 break;
207 }
208 }
209
210 if (result) {
211 if (*odm_mode == dml2_odm_mode_bypass) {
212 *odm_mode = dml2_odm_mode_combine_2to1;
213 } else if (*odm_mode == dml2_odm_mode_combine_2to1) {
214 *odm_mode = dml2_odm_mode_combine_3to1;
215 } else if (*odm_mode == dml2_odm_mode_combine_3to1) {
216 *odm_mode = dml2_odm_mode_combine_4to1;
217 } else {
218 result = false;
219 }
220 }
221
222 return result;
223 }
224
increase_mpc_combine_factor(unsigned int * mpc_combine_factor,unsigned int limit)225 static bool increase_mpc_combine_factor(unsigned int *mpc_combine_factor, unsigned int limit)
226 {
227 if (*mpc_combine_factor < limit) {
228 (*mpc_combine_factor)++;
229 return true;
230 }
231
232 return false;
233 }
234
count_planes_with_stream_index(const struct dml2_display_cfg * display_cfg,unsigned int stream_index)235 static int count_planes_with_stream_index(const struct dml2_display_cfg *display_cfg, unsigned int stream_index)
236 {
237 unsigned int i, count;
238
239 count = 0;
240 for (i = 0; i < display_cfg->num_planes; i++) {
241 if (display_cfg->plane_descriptors[i].stream_index == stream_index)
242 count++;
243 }
244
245 return count;
246 }
247
optimize_dcc_mcache_no_odm(struct dml2_pmo_optimize_dcc_mcache_in_out * in_out,int free_pipes)248 static bool optimize_dcc_mcache_no_odm(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out,
249 int free_pipes)
250 {
251 struct dml2_pmo_instance *pmo = in_out->instance;
252
253 unsigned int i;
254 bool result = true;
255
256 for (i = 0; i < in_out->optimized_display_cfg->num_planes; i++) {
257 // For pipes that failed dcc mcache check, we want to increase the pipe count.
258 // The logic for doing this depends on how many pipes is already being used,
259 // and whether it's mpcc or odm combine.
260 if (!in_out->dcc_mcache_supported[i]) {
261 // For the general case of "n displays", we can only optimize streams with an ODM combine factor of 1
262 if (in_out->cfg_support_info->stream_support_info[in_out->optimized_display_cfg->plane_descriptors[i].stream_index].odms_used == 1) {
263 in_out->optimized_display_cfg->plane_descriptors[i].overrides.mpcc_combine_factor =
264 in_out->cfg_support_info->plane_support_info[i].dpps_used;
265 // For each plane that is not passing mcache validation, just add another pipe to it, up to the limit.
266 if (free_pipes > 0) {
267 if (!increase_mpc_combine_factor(&in_out->optimized_display_cfg->plane_descriptors[i].overrides.mpcc_combine_factor,
268 pmo->mpc_combine_limit)) {
269 // We've reached max pipes allocatable to a single plane, so we fail.
270 result = false;
271 break;
272 } else {
273 // Successfully added another pipe to this failing plane.
274 free_pipes--;
275 }
276 } else {
277 // No free pipes to add.
278 result = false;
279 break;
280 }
281 } else {
282 // If the stream of this plane needs ODM combine, no further optimization can be done.
283 result = false;
284 break;
285 }
286 }
287 }
288
289 return result;
290 }
291
pmo_dcn4_fams2_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_out * in_out)292 bool pmo_dcn4_fams2_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_out *in_out)
293 {
294 struct dml2_pmo_instance *pmo = in_out->instance;
295
296 unsigned int i, used_pipes, free_pipes, planes_on_stream;
297 bool result;
298
299 if (in_out->display_config != in_out->optimized_display_cfg) {
300 memcpy(in_out->optimized_display_cfg, in_out->display_config, sizeof(struct dml2_display_cfg));
301 }
302
303 //Count number of free pipes, and check if any odm combine is in use.
304 used_pipes = 0;
305 for (i = 0; i < in_out->optimized_display_cfg->num_planes; i++) {
306 used_pipes += in_out->cfg_support_info->plane_support_info[i].dpps_used;
307 }
308 free_pipes = pmo->ip_caps->pipe_count - used_pipes;
309
310 // Optimization loop
311 // The goal here is to add more pipes to any planes
312 // which are failing mcache admissibility
313 result = true;
314
315 // The optimization logic depends on whether ODM combine is enabled, and the stream count.
316 if (in_out->optimized_display_cfg->num_streams > 1 || in_out->instance->options->disable_dyn_odm) {
317 // If there are multiple streams, we are limited to only be able to optimize mcache failures on planes
318 // which are not ODM combined.
319
320 result = optimize_dcc_mcache_no_odm(in_out, free_pipes);
321 } else if (in_out->optimized_display_cfg->num_streams == 1) {
322 // In single stream cases, we still optimize mcache failures when there's ODM combine with some
323 // additional logic.
324
325 if (in_out->cfg_support_info->stream_support_info[0].odms_used > 1) {
326 // If ODM combine is enabled, then the logic is to increase ODM combine factor.
327
328 // Optimization for streams with > 1 ODM combine factor is only supported for single display.
329 planes_on_stream = count_planes_with_stream_index(in_out->optimized_display_cfg, 0);
330
331 for (i = 0; i < in_out->optimized_display_cfg->num_planes; i++) {
332 // For pipes that failed dcc mcache check, we want to increase the pipe count.
333 // The logic for doing this depends on how many pipes is already being used,
334 // and whether it's mpcc or odm combine.
335 if (!in_out->dcc_mcache_supported[i]) {
336 // Increasing ODM combine factor on a stream requires a free pipe for each plane on the stream.
337 if (free_pipes >= planes_on_stream) {
338 if (!increase_odm_combine_factor(&in_out->optimized_display_cfg->stream_descriptors[i].overrides.odm_mode,
339 in_out->cfg_support_info->plane_support_info[i].dpps_used)) {
340 result = false;
341 } else {
342 break;
343 }
344 } else {
345 result = false;
346 break;
347 }
348 }
349 }
350 } else {
351 // If ODM combine is not enabled, then we can actually use the same logic as before.
352
353 result = optimize_dcc_mcache_no_odm(in_out, free_pipes);
354 }
355 } else {
356 result = true;
357 }
358
359 return result;
360 }
361
convert_strategy_to_drr_variant(const enum dml2_pstate_method base_strategy)362 static enum dml2_pstate_method convert_strategy_to_drr_variant(const enum dml2_pstate_method base_strategy)
363 {
364 enum dml2_pstate_method variant_strategy = 0;
365
366 switch (base_strategy) {
367 case dml2_pstate_method_vactive:
368 variant_strategy = dml2_pstate_method_fw_vactive_drr;
369 break;
370 case dml2_pstate_method_vblank:
371 variant_strategy = dml2_pstate_method_fw_vblank_drr;
372 break;
373 case dml2_pstate_method_fw_svp:
374 variant_strategy = dml2_pstate_method_fw_svp_drr;
375 break;
376 case dml2_pstate_method_fw_vactive_drr:
377 case dml2_pstate_method_fw_vblank_drr:
378 case dml2_pstate_method_fw_svp_drr:
379 case dml2_pstate_method_fw_drr:
380 case dml2_pstate_method_reserved_hw:
381 case dml2_pstate_method_reserved_fw:
382 case dml2_pstate_method_reserved_fw_drr_clamped:
383 case dml2_pstate_method_reserved_fw_drr_var:
384 case dml2_pstate_method_count:
385 case dml2_pstate_method_na:
386 default:
387 /* no variant for this mode */
388 variant_strategy = base_strategy;
389 }
390
391 return variant_strategy;
392 }
393
get_expanded_strategy_list(struct dml2_pmo_init_data * init_data,int stream_count)394 static struct dml2_pmo_pstate_strategy *get_expanded_strategy_list(struct dml2_pmo_init_data *init_data, int stream_count)
395 {
396 struct dml2_pmo_pstate_strategy *expanded_strategy_list = NULL;
397
398 switch (stream_count) {
399 case 1:
400 expanded_strategy_list = init_data->pmo_dcn4.expanded_strategy_list_1_display;
401 break;
402 case 2:
403 expanded_strategy_list = init_data->pmo_dcn4.expanded_strategy_list_2_display;
404 break;
405 case 3:
406 expanded_strategy_list = init_data->pmo_dcn4.expanded_strategy_list_3_display;
407 break;
408 case 4:
409 expanded_strategy_list = init_data->pmo_dcn4.expanded_strategy_list_4_display;
410 break;
411 default:
412 break;
413 }
414
415 return expanded_strategy_list;
416 }
417
get_num_expanded_strategies(struct dml2_pmo_init_data * init_data,int stream_count)418 static unsigned int get_num_expanded_strategies(
419 struct dml2_pmo_init_data *init_data,
420 int stream_count)
421 {
422 return init_data->pmo_dcn4.num_expanded_strategies_per_list[stream_count - 1];
423 }
424
insert_strategy_into_expanded_list(const struct dml2_pmo_pstate_strategy * per_stream_pstate_strategy,const int stream_count,struct dml2_pmo_pstate_strategy * expanded_strategy_list,unsigned int * num_expanded_strategies)425 static void insert_strategy_into_expanded_list(
426 const struct dml2_pmo_pstate_strategy *per_stream_pstate_strategy,
427 const int stream_count,
428 struct dml2_pmo_pstate_strategy *expanded_strategy_list,
429 unsigned int *num_expanded_strategies)
430 {
431 (void)stream_count;
432 if (expanded_strategy_list && num_expanded_strategies) {
433 memcpy(&expanded_strategy_list[*num_expanded_strategies], per_stream_pstate_strategy, sizeof(struct dml2_pmo_pstate_strategy));
434
435 (*num_expanded_strategies)++;
436 }
437 }
438
expand_base_strategy(const struct dml2_pmo_pstate_strategy * base_strategy,const unsigned int stream_count,struct dml2_pmo_pstate_strategy * expanded_strategy_list,unsigned int * num_expanded_strategies)439 static void expand_base_strategy(
440 const struct dml2_pmo_pstate_strategy *base_strategy,
441 const unsigned int stream_count,
442 struct dml2_pmo_pstate_strategy *expanded_strategy_list,
443 unsigned int *num_expanded_strategies)
444 {
445 bool skip_to_next_stream;
446 bool expanded_strategy_added;
447 bool skip_iteration;
448 unsigned int i, j;
449 unsigned int num_streams_per_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
450 unsigned int stream_iteration_indices[PMO_DCN4_MAX_DISPLAYS] = { 0 };
451 struct dml2_pmo_pstate_strategy cur_strategy_list = { 0 };
452
453 /* determine number of displays per method */
454 for (i = 0; i < stream_count; i++) {
455 /* increment the count of the earliest index with the same method */
456 for (j = 0; j < stream_count; j++) {
457 if (base_strategy->per_stream_pstate_method[i] == base_strategy->per_stream_pstate_method[j]) {
458 num_streams_per_method[j] = num_streams_per_method[j] + 1;
459 break;
460 }
461 }
462 }
463
464 cur_strategy_list.allow_state_increase = base_strategy->allow_state_increase;
465
466 i = 0;
467 /* uses a while loop instead of recursion to build permutations of base strategy */
468 while (stream_iteration_indices[0] < stream_count) {
469 skip_to_next_stream = false;
470 expanded_strategy_added = false;
471 skip_iteration = false;
472
473 /* determine what to do for this iteration */
474 if (stream_iteration_indices[i] < stream_count && num_streams_per_method[stream_iteration_indices[i]] != 0) {
475 /* decrement count and assign method */
476 cur_strategy_list.per_stream_pstate_method[i] = base_strategy->per_stream_pstate_method[stream_iteration_indices[i]];
477 num_streams_per_method[stream_iteration_indices[i]] -= 1;
478
479 if (i >= stream_count - 1) {
480 /* insert into strategy list */
481 insert_strategy_into_expanded_list(&cur_strategy_list, stream_count, expanded_strategy_list, num_expanded_strategies);
482 expanded_strategy_added = true;
483 } else {
484 /* skip to next stream */
485 skip_to_next_stream = true;
486 }
487 } else {
488 skip_iteration = true;
489 }
490
491 /* prepare for next iteration */
492 if (skip_to_next_stream) {
493 i++;
494 } else {
495 /* restore count */
496 if (!skip_iteration) {
497 num_streams_per_method[stream_iteration_indices[i]] += 1;
498 }
499
500 /* increment iteration count */
501 stream_iteration_indices[i]++;
502
503 /* if iterations are complete, or last stream was reached */
504 if ((stream_iteration_indices[i] >= stream_count || expanded_strategy_added) && i > 0) {
505 /* reset per stream index, decrement i */
506 stream_iteration_indices[i] = 0;
507 i--;
508
509 /* restore previous stream's count and increment index */
510 num_streams_per_method[stream_iteration_indices[i]] += 1;
511 stream_iteration_indices[i]++;
512 }
513 }
514 }
515 }
516
517
is_variant_method_valid(const struct dml2_pmo_pstate_strategy * base_strategy,const struct dml2_pmo_pstate_strategy * variant_strategy,const unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS],const unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS],const unsigned int stream_count)518 static bool is_variant_method_valid(const struct dml2_pmo_pstate_strategy *base_strategy,
519 const struct dml2_pmo_pstate_strategy *variant_strategy,
520 const unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS],
521 const unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS],
522 const unsigned int stream_count)
523 {
524 (void)variant_strategy;
525 bool valid = true;
526 unsigned int i;
527
528 /* check all restrictions are met */
529 for (i = 0; i < stream_count; i++) {
530 /* vblank + vblank_drr variants are invalid */
531 if (base_strategy->per_stream_pstate_method[i] == dml2_pstate_method_vblank &&
532 ((num_streams_per_base_method[i] > 0 && num_streams_per_variant_method[i] > 0) ||
533 num_streams_per_variant_method[i] > 1)) {
534 valid = false;
535 break;
536 }
537 }
538
539 return valid;
540 }
541
expand_variant_strategy(const struct dml2_pmo_pstate_strategy * base_strategy,const unsigned int stream_count,const bool should_permute,struct dml2_pmo_pstate_strategy * expanded_strategy_list,unsigned int * num_expanded_strategies)542 static void expand_variant_strategy(
543 const struct dml2_pmo_pstate_strategy *base_strategy,
544 const unsigned int stream_count,
545 const bool should_permute,
546 struct dml2_pmo_pstate_strategy *expanded_strategy_list,
547 unsigned int *num_expanded_strategies)
548 {
549 bool variant_found;
550 unsigned int i, j;
551 unsigned int method_index;
552 unsigned int stream_index;
553 unsigned int num_streams_per_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
554 unsigned int num_streams_per_base_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
555 unsigned int num_streams_per_variant_method[PMO_DCN4_MAX_DISPLAYS] = { 0 };
556 enum dml2_pstate_method per_stream_variant_method[DML2_MAX_PLANES];
557 struct dml2_pmo_pstate_strategy variant_strategy = { 0 };
558
559 /* determine number of displays per method */
560 for (i = 0; i < stream_count; i++) {
561 /* increment the count of the earliest index with the same method */
562 for (j = 0; j < stream_count; j++) {
563 if (base_strategy->per_stream_pstate_method[i] == base_strategy->per_stream_pstate_method[j]) {
564 num_streams_per_method[j] = num_streams_per_method[j] + 1;
565 break;
566 }
567 }
568
569 per_stream_variant_method[i] = convert_strategy_to_drr_variant(base_strategy->per_stream_pstate_method[i]);
570 }
571 memcpy(num_streams_per_base_method, num_streams_per_method, sizeof(unsigned int) * PMO_DCN4_MAX_DISPLAYS);
572
573 memcpy(&variant_strategy, base_strategy, sizeof(struct dml2_pmo_pstate_strategy));
574
575 method_index = 0;
576 /* uses a while loop instead of recursion to build permutations of base strategy */
577 while (num_streams_per_base_method[0] > 0 || method_index != 0) {
578 if (method_index == stream_count) {
579 /* construct variant strategy */
580 variant_found = false;
581 stream_index = 0;
582
583 for (i = 0; i < stream_count; i++) {
584 for (j = 0; j < num_streams_per_base_method[i]; j++) {
585 variant_strategy.per_stream_pstate_method[stream_index++] = base_strategy->per_stream_pstate_method[i];
586 }
587
588 for (j = 0; j < num_streams_per_variant_method[i]; j++) {
589 variant_strategy.per_stream_pstate_method[stream_index++] = per_stream_variant_method[i];
590 if (base_strategy->per_stream_pstate_method[i] != per_stream_variant_method[i]) {
591 variant_found = true;
592 }
593 }
594 }
595
596 if (variant_found && is_variant_method_valid(base_strategy, &variant_strategy, num_streams_per_base_method, num_streams_per_variant_method, stream_count)) {
597 if (should_permute) {
598 /* permutations are permitted, proceed to expand */
599 expand_base_strategy(&variant_strategy, stream_count, expanded_strategy_list, num_expanded_strategies);
600 } else {
601 /* no permutations allowed, so add to list now */
602 insert_strategy_into_expanded_list(&variant_strategy, stream_count, expanded_strategy_list, num_expanded_strategies);
603 }
604 }
605
606 /* rollback to earliest method with bases remaining */
607 for (method_index = stream_count - 1; method_index > 0; method_index--) {
608 if (num_streams_per_base_method[method_index]) {
609 /* bases remaining */
610 break;
611 } else {
612 /* reset counters */
613 num_streams_per_base_method[method_index] = num_streams_per_method[method_index];
614 num_streams_per_variant_method[method_index] = 0;
615 }
616 }
617 }
618
619 if (num_streams_per_base_method[method_index]) {
620 num_streams_per_base_method[method_index]--;
621 num_streams_per_variant_method[method_index]++;
622
623 method_index++;
624 } else if (method_index != 0) {
625 method_index++;
626 }
627 }
628 }
629
pmo_dcn4_fams2_expand_base_pstate_strategies(const struct dml2_pmo_pstate_strategy * base_strategies_list,const unsigned int num_base_strategies,const unsigned int stream_count,struct dml2_pmo_pstate_strategy * expanded_strategy_list,unsigned int * num_expanded_strategies)630 void pmo_dcn4_fams2_expand_base_pstate_strategies(
631 const struct dml2_pmo_pstate_strategy *base_strategies_list,
632 const unsigned int num_base_strategies,
633 const unsigned int stream_count,
634 struct dml2_pmo_pstate_strategy *expanded_strategy_list,
635 unsigned int *num_expanded_strategies)
636 {
637 unsigned int i;
638
639 /* expand every explicit base strategy (except all DRR) */
640 for (i = 0; i < num_base_strategies; i++) {
641 expand_base_strategy(&base_strategies_list[i], stream_count, expanded_strategy_list, num_expanded_strategies);
642 expand_variant_strategy(&base_strategies_list[i], stream_count, true, expanded_strategy_list, num_expanded_strategies);
643 }
644 }
645
pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out * in_out)646 bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
647 {
648 int i = 0;
649 struct dml2_pmo_instance *pmo = in_out->instance;
650
651 unsigned int base_list_size = 0;
652 const struct dml2_pmo_pstate_strategy *base_list = NULL;
653 unsigned int *expanded_list_size = NULL;
654 struct dml2_pmo_pstate_strategy *expanded_list = NULL;
655
656 pmo->soc_bb = in_out->soc_bb;
657 pmo->ip_caps = in_out->ip_caps;
658 pmo->mpc_combine_limit = 2;
659 pmo->odm_combine_limit = 4;
660 pmo->mcg_clock_table_size = in_out->mcg_clock_table_size;
661
662 pmo->fams_params.v2.subvp.refresh_rate_limit_max = 175;
663 pmo->fams_params.v2.subvp.refresh_rate_limit_min = 0;
664 pmo->fams_params.v2.drr.refresh_rate_limit_max = 1000;
665 pmo->fams_params.v2.drr.refresh_rate_limit_min = 119;
666
667 pmo->options = in_out->options;
668
669 /* generate permutations of p-state configs from base strategy list */
670 for (i = 0; i < PMO_DCN4_MAX_DISPLAYS; i++) {
671 switch (i+1) {
672 case 1:
673 if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
674 base_list = pmo->options->override_strategy_lists[i];
675 base_list_size = pmo->options->num_override_strategies_per_list[i];
676 } else {
677 base_list = base_strategy_list_1_display;
678 base_list_size = base_strategy_list_1_display_size;
679 }
680
681 expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
682 expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display;
683
684 break;
685 case 2:
686 if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
687 base_list = pmo->options->override_strategy_lists[i];
688 base_list_size = pmo->options->num_override_strategies_per_list[i];
689 } else {
690 base_list = base_strategy_list_2_display;
691 base_list_size = base_strategy_list_2_display_size;
692 }
693
694 expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
695 expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display;
696
697 break;
698 case 3:
699 if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
700 base_list = pmo->options->override_strategy_lists[i];
701 base_list_size = pmo->options->num_override_strategies_per_list[i];
702 } else {
703 base_list = base_strategy_list_3_display;
704 base_list_size = base_strategy_list_3_display_size;
705 }
706
707 expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
708 expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display;
709
710 break;
711 case 4:
712 if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
713 base_list = pmo->options->override_strategy_lists[i];
714 base_list_size = pmo->options->num_override_strategies_per_list[i];
715 } else {
716 base_list = base_strategy_list_4_display;
717 base_list_size = base_strategy_list_4_display_size;
718 }
719
720 expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
721 expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display;
722
723 break;
724 }
725
726 DML_ASSERT(base_list_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
727
728 /* populate list */
729 pmo_dcn4_fams2_expand_base_pstate_strategies(
730 base_list,
731 base_list_size,
732 i + 1,
733 expanded_list,
734 expanded_list_size);
735 }
736
737 return true;
738 }
739
is_h_timing_divisible_by(const struct dml2_timing_cfg * timing,unsigned char denominator)740 static bool is_h_timing_divisible_by(const struct dml2_timing_cfg *timing, unsigned char denominator)
741 {
742 /*
743 * Htotal, Hblank start/end, and Hsync start/end all must be divisible
744 * in order for the horizontal timing params to be considered divisible
745 * by 2. Hsync start is always 0.
746 */
747 unsigned long h_blank_start = timing->h_total - timing->h_front_porch;
748
749 return (timing->h_total % denominator == 0) &&
750 (h_blank_start % denominator == 0) &&
751 (timing->h_blank_end % denominator == 0) &&
752 (timing->h_sync_width % denominator == 0);
753 }
754
is_dp_encoder(enum dml2_output_encoder_class encoder_type)755 static bool is_dp_encoder(enum dml2_output_encoder_class encoder_type)
756 {
757 switch (encoder_type) {
758 case dml2_dp:
759 case dml2_edp:
760 case dml2_dp2p0:
761 case dml2_none:
762 return true;
763 case dml2_hdmi:
764 case dml2_hdmifrl:
765 default:
766 return false;
767 }
768 }
769
pmo_dcn4_fams2_init_for_vmin(struct dml2_pmo_init_for_vmin_in_out * in_out)770 bool pmo_dcn4_fams2_init_for_vmin(struct dml2_pmo_init_for_vmin_in_out *in_out)
771 {
772 unsigned int i;
773 const struct dml2_display_cfg *display_config =
774 &in_out->base_display_config->display_config;
775 const struct dml2_core_mode_support_result *mode_support_result =
776 &in_out->base_display_config->mode_support_result;
777 struct dml2_optimization_stage4_state *state =
778 &in_out->base_display_config->stage4;
779
780 if (in_out->instance->options->disable_dyn_odm ||
781 (in_out->instance->options->disable_dyn_odm_for_multi_stream && display_config->num_streams > 1))
782 return false;
783
784 for (i = 0; i < display_config->num_planes; i++)
785 /*
786 * vmin optimization is required to be seamlessly switched off
787 * at any time when the new configuration is no longer
788 * supported. However switching from ODM combine to MPC combine
789 * is not always seamless. When there not enough free pipes, we
790 * will have to use the same secondary OPP heads as secondary
791 * DPP pipes in MPC combine in new state. This transition is
792 * expected to cause glitches. To avoid the transition, we only
793 * allow vmin optimization if the stream's base configuration
794 * doesn't require MPC combine. This condition checks if MPC
795 * combine is enabled. If so do not optimize the stream.
796 */
797 if (mode_support_result->cfg_support_info.plane_support_info[i].dpps_used > 1 &&
798 mode_support_result->cfg_support_info.stream_support_info[display_config->plane_descriptors[i].stream_index].odms_used == 1)
799 state->unoptimizable_streams[display_config->plane_descriptors[i].stream_index] = true;
800
801 for (i = 0; i < display_config->num_streams; i++) {
802 if (display_config->stream_descriptors[i].overrides.disable_dynamic_odm)
803 state->unoptimizable_streams[i] = true;
804 else if (in_out->base_display_config->stage3.stream_svp_meta[i].valid &&
805 in_out->instance->options->disable_dyn_odm_for_stream_with_svp)
806 state->unoptimizable_streams[i] = true;
807 /*
808 * ODM Combine requires horizontal timing divisible by 2 so each
809 * ODM segment has the same size.
810 */
811 else if (!is_h_timing_divisible_by(&display_config->stream_descriptors[i].timing, 2))
812 state->unoptimizable_streams[i] = true;
813 /*
814 * Our hardware support seamless ODM transitions for DP encoders
815 * only.
816 */
817 else if (!is_dp_encoder(display_config->stream_descriptors[i].output.output_encoder))
818 state->unoptimizable_streams[i] = true;
819 }
820
821 state->performed = true;
822
823 return true;
824 }
825
pmo_dcn4_fams2_test_for_vmin(struct dml2_pmo_test_for_vmin_in_out * in_out)826 bool pmo_dcn4_fams2_test_for_vmin(struct dml2_pmo_test_for_vmin_in_out *in_out)
827 {
828 bool is_vmin = true;
829
830 if (in_out->vmin_limits->dispclk_khz > 0 &&
831 in_out->display_config->mode_support_result.global.dispclk_khz > in_out->vmin_limits->dispclk_khz)
832 is_vmin = false;
833
834 return is_vmin;
835 }
836
find_highest_odm_load_stream_index(const struct dml2_display_cfg * display_config,const struct dml2_core_mode_support_result * mode_support_result)837 static int find_highest_odm_load_stream_index(
838 const struct dml2_display_cfg *display_config,
839 const struct dml2_core_mode_support_result *mode_support_result)
840 {
841 unsigned int i;
842 int odm_load, highest_odm_load = -1, highest_odm_load_index = -1;
843
844 for (i = 0; i < display_config->num_streams; i++) {
845 if (mode_support_result->cfg_support_info.stream_support_info[i].odms_used > 0)
846 odm_load = display_config->stream_descriptors[i].timing.pixel_clock_khz
847 / mode_support_result->cfg_support_info.stream_support_info[i].odms_used;
848 else
849 odm_load = 0;
850
851 if (odm_load > highest_odm_load) {
852 highest_odm_load_index = i;
853 highest_odm_load = odm_load;
854 }
855 }
856
857 return highest_odm_load_index;
858 }
859
pmo_dcn4_fams2_optimize_for_vmin(struct dml2_pmo_optimize_for_vmin_in_out * in_out)860 bool pmo_dcn4_fams2_optimize_for_vmin(struct dml2_pmo_optimize_for_vmin_in_out *in_out)
861 {
862 int stream_index;
863 const struct dml2_display_cfg *display_config =
864 &in_out->base_display_config->display_config;
865 const struct dml2_core_mode_support_result *mode_support_result =
866 &in_out->base_display_config->mode_support_result;
867 unsigned int odms_used;
868 struct dml2_stream_parameters *stream_descriptor;
869 bool optimizable = false;
870
871 /*
872 * highest odm load stream must be optimizable to continue as dispclk is
873 * bounded by it.
874 */
875 stream_index = find_highest_odm_load_stream_index(display_config,
876 mode_support_result);
877
878 if (stream_index < 0 ||
879 in_out->base_display_config->stage4.unoptimizable_streams[stream_index])
880 return false;
881
882 odms_used = mode_support_result->cfg_support_info.stream_support_info[stream_index].odms_used;
883 if ((int)odms_used >= in_out->instance->odm_combine_limit)
884 return false;
885
886 memcpy(in_out->optimized_display_config,
887 in_out->base_display_config,
888 sizeof(struct display_configuation_with_meta));
889
890 stream_descriptor = &in_out->optimized_display_config->display_config.stream_descriptors[stream_index];
891 while (!optimizable && increase_odm_combine_factor(
892 &stream_descriptor->overrides.odm_mode,
893 odms_used)) {
894 switch (stream_descriptor->overrides.odm_mode) {
895 case dml2_odm_mode_combine_2to1:
896 optimizable = true;
897 break;
898 case dml2_odm_mode_combine_3to1:
899 /*
900 * In ODM Combine 3:1 OTG_valid_pixel rate is 1/4 of
901 * actual pixel rate. Therefore horizontal timing must
902 * be divisible by 4.
903 */
904 if (is_h_timing_divisible_by(&display_config->stream_descriptors[stream_index].timing, 4)) {
905 if (mode_support_result->cfg_support_info.stream_support_info[stream_index].dsc_enable) {
906 /*
907 * DSC h slice count must be divisible
908 * by 3.
909 */
910 if (mode_support_result->cfg_support_info.stream_support_info[stream_index].num_dsc_slices % 3 == 0)
911 optimizable = true;
912 } else {
913 optimizable = true;
914 }
915 }
916 break;
917 case dml2_odm_mode_combine_4to1:
918 /*
919 * In ODM Combine 4:1 OTG_valid_pixel rate is 1/4 of
920 * actual pixel rate. Therefore horizontal timing must
921 * be divisible by 4.
922 */
923 if (is_h_timing_divisible_by(&display_config->stream_descriptors[stream_index].timing, 4)) {
924 if (mode_support_result->cfg_support_info.stream_support_info[stream_index].dsc_enable) {
925 /*
926 * DSC h slice count must be divisible
927 * by 4.
928 */
929 if (mode_support_result->cfg_support_info.stream_support_info[stream_index].num_dsc_slices % 4 == 0)
930 optimizable = true;
931 } else {
932 optimizable = true;
933 }
934 }
935 break;
936 case dml2_odm_mode_auto:
937 case dml2_odm_mode_bypass:
938 case dml2_odm_mode_split_1to2:
939 case dml2_odm_mode_mso_1to2:
940 case dml2_odm_mode_mso_1to4:
941 default:
942 break;
943 }
944 }
945
946 return optimizable;
947 }
948
set_bit_in_bitfield(unsigned int * bit_field,unsigned int bit_offset)949 static void set_bit_in_bitfield(unsigned int *bit_field, unsigned int bit_offset)
950 {
951 *bit_field = *bit_field | (0x1 << bit_offset);
952 }
953
is_bit_set_in_bitfield(unsigned int bit_field,unsigned int bit_offset)954 static bool is_bit_set_in_bitfield(unsigned int bit_field, unsigned int bit_offset)
955 {
956 if (bit_field & (0x1 << bit_offset))
957 return true;
958
959 return false;
960 }
961
build_synchronized_timing_groups(struct dml2_pmo_instance * pmo,struct display_configuation_with_meta * display_config)962 static void build_synchronized_timing_groups(
963 struct dml2_pmo_instance *pmo,
964 struct display_configuation_with_meta *display_config)
965 {
966 unsigned int i, j;
967 struct dml2_timing_cfg *master_timing;
968
969 unsigned int stream_mapped_mask = 0;
970 unsigned int num_timing_groups = 0;
971 unsigned int timing_group_idx = 0;
972 struct dml2_pmo_scratch *s = &pmo->scratch;
973
974 /* clear all group masks */
975 memset(s->pmo_dcn4.synchronized_timing_group_masks, 0, sizeof(s->pmo_dcn4.synchronized_timing_group_masks));
976 memset(s->pmo_dcn4.group_is_drr_enabled, 0, sizeof(s->pmo_dcn4.group_is_drr_enabled));
977 memset(s->pmo_dcn4.group_is_drr_active, 0, sizeof(s->pmo_dcn4.group_is_drr_active));
978 memset(s->pmo_dcn4.group_line_time_us, 0, sizeof(s->pmo_dcn4.group_line_time_us));
979 s->pmo_dcn4.num_timing_groups = 0;
980
981 for (i = 0; i < display_config->display_config.num_streams; i++) {
982 master_timing = &display_config->display_config.stream_descriptors[i].timing;
983
984 /* only need to build group of this stream is not in a group already */
985 if (is_bit_set_in_bitfield(stream_mapped_mask, i)) {
986 continue;
987 }
988 set_bit_in_bitfield(&stream_mapped_mask, i);
989 timing_group_idx = num_timing_groups;
990 num_timing_groups++;
991
992 /* trivially set default timing group to itself */
993 set_bit_in_bitfield(&s->pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], i);
994 s->pmo_dcn4.group_line_time_us[timing_group_idx] = (double)master_timing->h_total / master_timing->pixel_clock_khz * 1000.0;
995
996 /* if drr is in use, timing is not sychnronizable */
997 if (master_timing->drr_config.enabled) {
998 s->pmo_dcn4.group_is_drr_enabled[timing_group_idx] = true;
999 s->pmo_dcn4.group_is_drr_active[timing_group_idx] = !master_timing->drr_config.disallowed &&
1000 (master_timing->drr_config.drr_active_fixed || master_timing->drr_config.drr_active_variable);
1001 continue;
1002 }
1003
1004 /* find synchronizable timing groups */
1005 for (j = i + 1; j < display_config->display_config.num_streams; j++) {
1006 if (memcmp(master_timing,
1007 &display_config->display_config.stream_descriptors[j].timing,
1008 sizeof(struct dml2_timing_cfg)) == 0) {
1009 set_bit_in_bitfield(&pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], j);
1010 set_bit_in_bitfield(&stream_mapped_mask, j);
1011 }
1012 }
1013 }
1014
1015 s->pmo_dcn4.num_timing_groups = num_timing_groups;
1016 }
1017
all_timings_support_vactive(const struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_config,unsigned int mask)1018 static bool all_timings_support_vactive(const struct dml2_pmo_instance *pmo,
1019 const struct display_configuation_with_meta *display_config,
1020 unsigned int mask)
1021 {
1022 unsigned int i;
1023 bool valid = true;
1024
1025 // Create a remap array to enable simple iteration through only masked stream indicies
1026 for (i = 0; i < display_config->display_config.num_streams; i++) {
1027 if (is_bit_set_in_bitfield(mask, i)) {
1028 /* check if stream has enough vactive margin */
1029 valid &= is_bit_set_in_bitfield(pmo->scratch.pmo_dcn4.stream_vactive_capability_mask, i);
1030 }
1031 }
1032
1033 return valid;
1034 }
1035
all_timings_support_vblank(const struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_config,unsigned int mask)1036 static bool all_timings_support_vblank(const struct dml2_pmo_instance *pmo,
1037 const struct display_configuation_with_meta *display_config,
1038 unsigned int mask)
1039 {
1040 unsigned int i;
1041
1042 bool synchronizable = true;
1043
1044 /* find first vblank stream index and compare the timing group mask */
1045 for (i = 0; i < display_config->display_config.num_streams; i++) {
1046 if (is_bit_set_in_bitfield(mask, i)) {
1047 if (mask != pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[i]) {
1048 /* vblank streams are not synchronizable */
1049 synchronizable = false;
1050 }
1051 break;
1052 }
1053 }
1054
1055 return synchronizable;
1056 }
1057
calc_svp_microschedule(const struct dml2_pstate_meta * pstate_meta)1058 static unsigned int calc_svp_microschedule(const struct dml2_pstate_meta *pstate_meta)
1059 {
1060 return pstate_meta->contention_delay_otg_vlines +
1061 pstate_meta->method_subvp.programming_delay_otg_vlines +
1062 pstate_meta->method_subvp.phantom_vtotal +
1063 pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
1064 pstate_meta->blackout_otg_vlines;
1065 }
1066
all_timings_support_drr(const struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_config,unsigned int mask)1067 static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo,
1068 const struct display_configuation_with_meta *display_config,
1069 unsigned int mask)
1070 {
1071 unsigned int i;
1072 for (i = 0; i < DML2_MAX_PLANES; i++) {
1073 const struct dml2_stream_parameters *stream_descriptor;
1074 const struct dml2_pstate_meta *stream_pstate_meta;
1075
1076 if (is_bit_set_in_bitfield(mask, i)) {
1077 stream_descriptor = &display_config->display_config.stream_descriptors[i];
1078 stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[i];
1079
1080 if (!stream_descriptor->timing.drr_config.enabled)
1081 return false;
1082
1083 /* cannot support required vtotal */
1084 if (stream_pstate_meta->method_drr.stretched_vtotal > stream_pstate_meta->max_vtotal) {
1085 return false;
1086 }
1087
1088 /* check rr is within bounds */
1089 if (stream_pstate_meta->nom_refresh_rate_hz < pmo->fams_params.v2.drr.refresh_rate_limit_min ||
1090 stream_pstate_meta->nom_refresh_rate_hz > pmo->fams_params.v2.drr.refresh_rate_limit_max) {
1091 return false;
1092 }
1093
1094 /* check required stretch is allowed */
1095 if (stream_descriptor->timing.drr_config.max_instant_vtotal_delta > 0 &&
1096 stream_pstate_meta->method_drr.stretched_vtotal - stream_pstate_meta->nom_vtotal > (int)stream_descriptor->timing.drr_config.max_instant_vtotal_delta) {
1097 return false;
1098 }
1099 }
1100 }
1101
1102 return true;
1103 }
1104
all_timings_support_svp(const struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_config,unsigned int mask)1105 static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
1106 const struct display_configuation_with_meta *display_config,
1107 unsigned int mask)
1108 {
1109 const struct dml2_stream_parameters *stream_descriptor;
1110 const struct dml2_plane_parameters *plane_descriptor;
1111 const struct dml2_pstate_meta *stream_pstate_meta;
1112 unsigned int microschedule_vlines;
1113 unsigned int i;
1114 unsigned int mcaches_per_plane;
1115 unsigned int total_mcaches_required = 0;
1116
1117 unsigned int num_planes_per_stream[DML2_MAX_PLANES] = { 0 };
1118
1119 /* confirm timing it is not a centered timing */
1120 for (i = 0; i < display_config->display_config.num_planes; i++) {
1121 plane_descriptor = &display_config->display_config.plane_descriptors[i];
1122 mcaches_per_plane = 0;
1123
1124 if (plane_descriptor->surface.dcc.enable) {
1125 mcaches_per_plane += display_config->stage2.mcache_allocations[i].num_mcaches_plane0 +
1126 display_config->stage2.mcache_allocations[i].num_mcaches_plane1 -
1127 (display_config->stage2.mcache_allocations[i].last_slice_sharing.plane0_plane1 ? 1 : 0);
1128 }
1129
1130 if (is_bit_set_in_bitfield(mask, (unsigned char)plane_descriptor->stream_index)) {
1131 num_planes_per_stream[plane_descriptor->stream_index]++;
1132
1133 /* check recout height covers entire otg vactive, and single plane */
1134 if (num_planes_per_stream[plane_descriptor->stream_index] > 1 ||
1135 !plane_descriptor->composition.rect_out_height_spans_vactive ||
1136 plane_descriptor->composition.rotation_angle != dml2_rotation_0) {
1137 return false;
1138 }
1139
1140 /* phantom requires same number of mcaches as main */
1141 if (plane_descriptor->surface.dcc.enable) {
1142 mcaches_per_plane *= 2;
1143 }
1144 }
1145 total_mcaches_required += mcaches_per_plane;
1146 }
1147
1148 if (total_mcaches_required > pmo->soc_bb->num_dcc_mcaches) {
1149 /* too many mcaches required */
1150 return false;
1151 }
1152
1153 for (i = 0; i < DML2_MAX_PLANES; i++) {
1154 if (is_bit_set_in_bitfield(mask, i)) {
1155 stream_descriptor = &display_config->display_config.stream_descriptors[i];
1156 stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[i];
1157
1158 if (stream_descriptor->overrides.disable_subvp) {
1159 return false;
1160 }
1161
1162 microschedule_vlines = calc_svp_microschedule(&pmo->scratch.pmo_dcn4.stream_pstate_meta[i]);
1163
1164 /* block if using an interlaced timing */
1165 if (stream_descriptor->timing.interlaced) {
1166 return false;
1167 }
1168
1169 /* 1) svp main stream's vactive must be able to fit the microschedule
1170 * 2) refresh rate must be within the allowed bounds
1171 */
1172 if (microschedule_vlines >= stream_descriptor->timing.v_active ||
1173 (stream_pstate_meta->nom_refresh_rate_hz < pmo->fams_params.v2.subvp.refresh_rate_limit_min ||
1174 stream_pstate_meta->nom_refresh_rate_hz > pmo->fams_params.v2.subvp.refresh_rate_limit_max)) {
1175 return false;
1176 }
1177 }
1178 }
1179
1180 return true;
1181 }
1182
insert_into_candidate_list(const struct dml2_pmo_pstate_strategy * pstate_strategy,int stream_count,struct dml2_pmo_scratch * scratch)1183 static void insert_into_candidate_list(const struct dml2_pmo_pstate_strategy *pstate_strategy, int stream_count, struct dml2_pmo_scratch *scratch)
1184 {
1185 (void)stream_count;
1186 scratch->pmo_dcn4.pstate_strategy_candidates[scratch->pmo_dcn4.num_pstate_candidates] = *pstate_strategy;
1187 scratch->pmo_dcn4.num_pstate_candidates++;
1188 }
1189
uclk_pstate_strategy_override_to_pstate_method(const enum dml2_uclk_pstate_change_strategy override_strategy)1190 static enum dml2_pstate_method uclk_pstate_strategy_override_to_pstate_method(const enum dml2_uclk_pstate_change_strategy override_strategy)
1191 {
1192 enum dml2_pstate_method method = dml2_pstate_method_na;
1193
1194 switch (override_strategy) {
1195 case dml2_uclk_pstate_change_strategy_force_vactive:
1196 method = dml2_pstate_method_vactive;
1197 break;
1198 case dml2_uclk_pstate_change_strategy_force_vblank:
1199 method = dml2_pstate_method_vblank;
1200 break;
1201 case dml2_uclk_pstate_change_strategy_force_drr:
1202 method = dml2_pstate_method_fw_drr;
1203 break;
1204 case dml2_uclk_pstate_change_strategy_force_mall_svp:
1205 method = dml2_pstate_method_fw_svp;
1206 break;
1207 case dml2_uclk_pstate_change_strategy_force_mall_full_frame:
1208 case dml2_uclk_pstate_change_strategy_auto:
1209 default:
1210 method = dml2_pstate_method_na;
1211 }
1212
1213 return method;
1214 }
1215
pstate_method_to_uclk_pstate_strategy_override(const enum dml2_pstate_method method)1216 static enum dml2_uclk_pstate_change_strategy pstate_method_to_uclk_pstate_strategy_override(const enum dml2_pstate_method method)
1217 {
1218 enum dml2_uclk_pstate_change_strategy override_strategy = dml2_uclk_pstate_change_strategy_auto;
1219
1220 switch (method) {
1221 case dml2_pstate_method_vactive:
1222 case dml2_pstate_method_fw_vactive_drr:
1223 override_strategy = dml2_uclk_pstate_change_strategy_force_vactive;
1224 break;
1225 case dml2_pstate_method_vblank:
1226 case dml2_pstate_method_fw_vblank_drr:
1227 override_strategy = dml2_uclk_pstate_change_strategy_force_vblank;
1228 break;
1229 case dml2_pstate_method_fw_svp:
1230 case dml2_pstate_method_fw_svp_drr:
1231 override_strategy = dml2_uclk_pstate_change_strategy_force_mall_svp;
1232 break;
1233 case dml2_pstate_method_fw_drr:
1234 override_strategy = dml2_uclk_pstate_change_strategy_force_drr;
1235 break;
1236 case dml2_pstate_method_reserved_hw:
1237 case dml2_pstate_method_reserved_fw:
1238 case dml2_pstate_method_reserved_fw_drr_clamped:
1239 case dml2_pstate_method_reserved_fw_drr_var:
1240 case dml2_pstate_method_count:
1241 case dml2_pstate_method_na:
1242 default:
1243 override_strategy = dml2_uclk_pstate_change_strategy_auto;
1244 }
1245
1246 return override_strategy;
1247 }
1248
all_planes_match_method(const struct display_configuation_with_meta * display_cfg,int plane_mask,enum dml2_pstate_method method)1249 static bool all_planes_match_method(const struct display_configuation_with_meta *display_cfg, int plane_mask, enum dml2_pstate_method method)
1250 {
1251 unsigned int i;
1252
1253 for (i = 0; i < DML2_MAX_PLANES; i++) {
1254 if (is_bit_set_in_bitfield(plane_mask, i)) {
1255 if (display_cfg->display_config.plane_descriptors[i].overrides.uclk_pstate_change_strategy != dml2_uclk_pstate_change_strategy_auto &&
1256 display_cfg->display_config.plane_descriptors[i].overrides.uclk_pstate_change_strategy != pstate_method_to_uclk_pstate_strategy_override(method))
1257 return false;
1258 }
1259 }
1260
1261 return true;
1262 }
1263
build_method_scheduling_params(struct dml2_pstate_per_method_common_meta * stream_method_pstate_meta,struct dml2_pstate_meta * stream_pstate_meta)1264 static void build_method_scheduling_params(
1265 struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta,
1266 struct dml2_pstate_meta *stream_pstate_meta)
1267 {
1268 stream_method_pstate_meta->allow_time_us =
1269 (double)((int)stream_method_pstate_meta->allow_end_otg_vline - (int)stream_method_pstate_meta->allow_start_otg_vline) *
1270 stream_pstate_meta->otg_vline_time_us;
1271 if (stream_method_pstate_meta->allow_time_us >= stream_method_pstate_meta->period_us) {
1272 /* when allow wave overlaps an entire frame, it is always schedulable (DRR can do this)*/
1273 stream_method_pstate_meta->disallow_time_us = 0.0;
1274 } else {
1275 stream_method_pstate_meta->disallow_time_us =
1276 stream_method_pstate_meta->period_us - stream_method_pstate_meta->allow_time_us;
1277 }
1278 }
1279
get_per_method_common_meta(struct dml2_pmo_instance * pmo,enum dml2_pstate_method stream_pstate_method,int stream_idx)1280 static struct dml2_pstate_per_method_common_meta *get_per_method_common_meta(
1281 struct dml2_pmo_instance *pmo,
1282 enum dml2_pstate_method stream_pstate_method,
1283 int stream_idx)
1284 {
1285 struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta = NULL;
1286
1287 switch (stream_pstate_method) {
1288 case dml2_pstate_method_vactive:
1289 case dml2_pstate_method_fw_vactive_drr:
1290 stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_vactive.common;
1291 break;
1292 case dml2_pstate_method_vblank:
1293 case dml2_pstate_method_fw_vblank_drr:
1294 stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_vblank.common;
1295 break;
1296 case dml2_pstate_method_fw_svp:
1297 case dml2_pstate_method_fw_svp_drr:
1298 stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_subvp.common;
1299 break;
1300 case dml2_pstate_method_fw_drr:
1301 stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_drr.common;
1302 break;
1303 case dml2_pstate_method_reserved_hw:
1304 case dml2_pstate_method_reserved_fw:
1305 case dml2_pstate_method_reserved_fw_drr_clamped:
1306 case dml2_pstate_method_reserved_fw_drr_var:
1307 case dml2_pstate_method_count:
1308 case dml2_pstate_method_na:
1309 default:
1310 stream_method_pstate_meta = NULL;
1311 }
1312
1313 return stream_method_pstate_meta;
1314 }
1315
is_timing_group_schedulable(struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_cfg,const struct dml2_pmo_pstate_strategy * pstate_strategy,const unsigned int timing_group_idx,struct dml2_pstate_per_method_common_meta * group_pstate_meta)1316 static bool is_timing_group_schedulable(
1317 struct dml2_pmo_instance *pmo,
1318 const struct display_configuation_with_meta *display_cfg,
1319 const struct dml2_pmo_pstate_strategy *pstate_strategy,
1320 const unsigned int timing_group_idx,
1321 struct dml2_pstate_per_method_common_meta *group_pstate_meta)
1322 {
1323 unsigned int i;
1324 struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta;
1325
1326 unsigned int base_stream_idx = 0;
1327 struct dml2_pmo_scratch *s = &pmo->scratch;
1328
1329 /* find base stream idx */
1330 for (base_stream_idx = 0; base_stream_idx < display_cfg->display_config.num_streams; base_stream_idx++) {
1331 if (is_bit_set_in_bitfield(s->pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], base_stream_idx)) {
1332 /* master stream found */
1333 break;
1334 }
1335 }
1336
1337 /* init allow start and end lines for timing group */
1338 stream_method_pstate_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[base_stream_idx], base_stream_idx);
1339 if (!stream_method_pstate_meta)
1340 return false;
1341
1342 group_pstate_meta->allow_start_otg_vline = stream_method_pstate_meta->allow_start_otg_vline;
1343 group_pstate_meta->allow_end_otg_vline = stream_method_pstate_meta->allow_end_otg_vline;
1344 group_pstate_meta->period_us = stream_method_pstate_meta->period_us;
1345 for (i = base_stream_idx + 1; i < display_cfg->display_config.num_streams; i++) {
1346 if (is_bit_set_in_bitfield(pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], i)) {
1347 stream_method_pstate_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i);
1348 if (!stream_method_pstate_meta)
1349 continue;
1350
1351 if (group_pstate_meta->allow_start_otg_vline < stream_method_pstate_meta->allow_start_otg_vline) {
1352 /* set group allow start to larger otg vline */
1353 group_pstate_meta->allow_start_otg_vline = stream_method_pstate_meta->allow_start_otg_vline;
1354 }
1355
1356 if (group_pstate_meta->allow_end_otg_vline > stream_method_pstate_meta->allow_end_otg_vline) {
1357 /* set group allow end to smaller otg vline */
1358 group_pstate_meta->allow_end_otg_vline = stream_method_pstate_meta->allow_end_otg_vline;
1359 }
1360
1361 /* check waveform still has positive width */
1362 if (group_pstate_meta->allow_start_otg_vline >= group_pstate_meta->allow_end_otg_vline) {
1363 /* timing group is not schedulable */
1364 return false;
1365 }
1366 }
1367 }
1368
1369 /* calculate the rest of the meta */
1370 build_method_scheduling_params(group_pstate_meta, &pmo->scratch.pmo_dcn4.stream_pstate_meta[base_stream_idx]);
1371
1372 return group_pstate_meta->allow_time_us > 0.0 &&
1373 group_pstate_meta->disallow_time_us < pmo->ip_caps->fams2.max_allow_delay_us;
1374 }
1375
is_config_schedulable(struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_cfg,const struct dml2_pmo_pstate_strategy * pstate_strategy)1376 static bool is_config_schedulable(
1377 struct dml2_pmo_instance *pmo,
1378 const struct display_configuation_with_meta *display_cfg,
1379 const struct dml2_pmo_pstate_strategy *pstate_strategy)
1380 {
1381 unsigned int i, j;
1382 bool schedulable;
1383 struct dml2_pmo_scratch *s = &pmo->scratch;
1384
1385 double max_allow_delay_us = 0.0;
1386
1387 memset(s->pmo_dcn4.group_common_pstate_meta, 0, sizeof(s->pmo_dcn4.group_common_pstate_meta));
1388 memset(s->pmo_dcn4.sorted_group_gtl_disallow_index, 0, sizeof(unsigned int) * DML2_MAX_PLANES);
1389
1390 /* search for a general solution to the schedule */
1391
1392 /* STAGE 0: Early return for special cases */
1393 if (display_cfg->display_config.num_streams == 0) {
1394 return true;
1395 }
1396
1397 /* STAGE 1: confirm allow waves overlap for synchronizable streams */
1398 schedulable = true;
1399 for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
1400 s->pmo_dcn4.sorted_group_gtl_disallow_index[i] = i;
1401 s->pmo_dcn4.sorted_group_gtl_period_index[i] = i;
1402 if (!is_timing_group_schedulable(pmo, display_cfg, pstate_strategy, i, &s->pmo_dcn4.group_common_pstate_meta[i])) {
1403 /* synchronized timing group was not schedulable */
1404 schedulable = false;
1405 break;
1406 }
1407 max_allow_delay_us += s->pmo_dcn4.group_common_pstate_meta[i].disallow_time_us;
1408 }
1409
1410 if ((schedulable && s->pmo_dcn4.num_timing_groups <= 1) || !schedulable) {
1411 /* 1. the only timing group was schedulable, so early pass
1412 * 2. one of the timing groups was not schedulable, so early fail */
1413 return schedulable;
1414 }
1415
1416 /* STAGE 2: Check allow can't be masked entirely by other disallows */
1417 schedulable = true;
1418
1419 /* sort disallow times from greatest to least */
1420 for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
1421 bool swapped = false;
1422
1423 for (j = 0; j < s->pmo_dcn4.num_timing_groups - 1; j++) {
1424 double j_disallow_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j]].disallow_time_us;
1425 double jp1_disallow_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]].disallow_time_us;
1426 if (j_disallow_us < jp1_disallow_us) {
1427 /* swap as A < B */
1428 swap(s->pmo_dcn4.sorted_group_gtl_disallow_index[j],
1429 s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]);
1430 swapped = true;
1431 }
1432 }
1433
1434 /* sorted, exit early */
1435 if (!swapped)
1436 break;
1437 }
1438
1439 /* Check worst case disallow region occurs in the middle of allow for the
1440 * other display, or when >2 streams continue to halve the remaining allow time.
1441 */
1442 for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
1443 if (s->pmo_dcn4.group_common_pstate_meta[i].disallow_time_us <= 0.0) {
1444 /* this timing group always allows */
1445 continue;
1446 }
1447
1448 double max_allow_time_us = s->pmo_dcn4.group_common_pstate_meta[i].allow_time_us;
1449 for (j = 0; j < s->pmo_dcn4.num_timing_groups; j++) {
1450 unsigned int sorted_j = s->pmo_dcn4.sorted_group_gtl_disallow_index[j];
1451 /* stream can't overlap itself */
1452 if (i != sorted_j && s->pmo_dcn4.group_common_pstate_meta[sorted_j].disallow_time_us > 0.0) {
1453 max_allow_time_us = math_min2(
1454 s->pmo_dcn4.group_common_pstate_meta[sorted_j].allow_time_us,
1455 (max_allow_time_us - s->pmo_dcn4.group_common_pstate_meta[sorted_j].disallow_time_us) / 2);
1456
1457 if (max_allow_time_us < 0.0) {
1458 /* failed exit early */
1459 break;
1460 }
1461 }
1462 }
1463
1464 if (max_allow_time_us <= 0.0) {
1465 /* not enough time for microschedule in the worst case */
1466 schedulable = false;
1467 break;
1468 }
1469 }
1470
1471 if (schedulable && max_allow_delay_us < pmo->ip_caps->fams2.max_allow_delay_us) {
1472 return true;
1473 }
1474
1475 /* STAGE 3: check larger allow can fit period of all other streams */
1476 schedulable = true;
1477
1478 /* sort periods from greatest to least */
1479 for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
1480 bool swapped = false;
1481
1482 for (j = 0; j < s->pmo_dcn4.num_timing_groups - 1; j++) {
1483 double j_period_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j]].period_us;
1484 double jp1_period_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]].period_us;
1485 if (j_period_us < jp1_period_us) {
1486 /* swap as A < B */
1487 swap(s->pmo_dcn4.sorted_group_gtl_period_index[j],
1488 s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]);
1489 swapped = true;
1490 }
1491 }
1492
1493 /* sorted, exit early */
1494 if (!swapped)
1495 break;
1496 }
1497
1498 /* check larger allow can fit period of all other streams */
1499 for (i = 0; i < s->pmo_dcn4.num_timing_groups - 1; i++) {
1500 unsigned int sorted_i = s->pmo_dcn4.sorted_group_gtl_period_index[i];
1501 unsigned int sorted_ip1 = s->pmo_dcn4.sorted_group_gtl_period_index[i + 1];
1502
1503 if (s->pmo_dcn4.group_common_pstate_meta[sorted_i].allow_time_us < s->pmo_dcn4.group_common_pstate_meta[sorted_ip1].period_us ||
1504 (s->pmo_dcn4.group_is_drr_enabled[sorted_ip1] && s->pmo_dcn4.group_is_drr_active[sorted_ip1])) {
1505 schedulable = false;
1506 break;
1507 }
1508 }
1509
1510 if (schedulable && max_allow_delay_us < pmo->ip_caps->fams2.max_allow_delay_us) {
1511 return true;
1512 }
1513
1514 /* STAGE 4: When using HW exclusive modes, check disallow alignments are within allowed threshold */
1515 if (s->pmo_dcn4.num_timing_groups == 2 &&
1516 !is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, pstate_strategy->per_stream_pstate_method[0]) &&
1517 !is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, pstate_strategy->per_stream_pstate_method[1])) {
1518 double period_ratio;
1519 double max_shift_us;
1520 double shift_per_period;
1521
1522 /* default period_0 > period_1 */
1523 unsigned int lrg_idx = 0;
1524 unsigned int sml_idx = 1;
1525 if (s->pmo_dcn4.group_common_pstate_meta[0].period_us < s->pmo_dcn4.group_common_pstate_meta[1].period_us) {
1526 /* period_0 < period_1 */
1527 lrg_idx = 1;
1528 sml_idx = 0;
1529 }
1530 period_ratio = s->pmo_dcn4.group_common_pstate_meta[lrg_idx].period_us / s->pmo_dcn4.group_common_pstate_meta[sml_idx].period_us;
1531 shift_per_period = s->pmo_dcn4.group_common_pstate_meta[sml_idx].period_us * (period_ratio - math_floor(period_ratio));
1532 max_shift_us = s->pmo_dcn4.group_common_pstate_meta[lrg_idx].disallow_time_us - s->pmo_dcn4.group_common_pstate_meta[sml_idx].allow_time_us;
1533 max_allow_delay_us = max_shift_us / shift_per_period * s->pmo_dcn4.group_common_pstate_meta[lrg_idx].period_us;
1534
1535 if (shift_per_period > 0.0 &&
1536 shift_per_period < s->pmo_dcn4.group_common_pstate_meta[lrg_idx].allow_time_us + s->pmo_dcn4.group_common_pstate_meta[sml_idx].allow_time_us &&
1537 max_allow_delay_us < pmo->ip_caps->fams2.max_allow_delay_us) {
1538 schedulable = true;
1539 }
1540 }
1541
1542 return schedulable;
1543 }
1544
stream_matches_drr_policy(struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_cfg,const enum dml2_pstate_method stream_pstate_method,unsigned int stream_index)1545 static bool stream_matches_drr_policy(struct dml2_pmo_instance *pmo,
1546 const struct display_configuation_with_meta *display_cfg,
1547 const enum dml2_pstate_method stream_pstate_method,
1548 unsigned int stream_index)
1549 {
1550 const struct dml2_stream_parameters *stream_descriptor = &display_cfg->display_config.stream_descriptors[stream_index];
1551 bool strategy_matches_drr_requirements = true;
1552
1553 /* check if strategy is compatible with stream drr capability and strategy */
1554 if (is_bit_set_in_bitfield(PMO_NO_DRR_STRATEGY_MASK, stream_pstate_method) &&
1555 display_cfg->display_config.num_streams > 1 &&
1556 stream_descriptor->timing.drr_config.enabled &&
1557 (stream_descriptor->timing.drr_config.drr_active_fixed || stream_descriptor->timing.drr_config.drr_active_variable)) {
1558 /* DRR is active, so config may become unschedulable */
1559 strategy_matches_drr_requirements = false;
1560 } else if (is_bit_set_in_bitfield(PMO_NO_DRR_STRATEGY_MASK, stream_pstate_method) &&
1561 is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, stream_pstate_method) &&
1562 stream_descriptor->timing.drr_config.enabled &&
1563 stream_descriptor->timing.drr_config.drr_active_variable) {
1564 /* DRR is variable, fw exclusive methods require DRR to be clamped */
1565 strategy_matches_drr_requirements = false;
1566 } else if (is_bit_set_in_bitfield(PMO_DRR_VAR_STRATEGY_MASK, stream_pstate_method) &&
1567 pmo->options->disable_drr_var_when_var_active &&
1568 stream_descriptor->timing.drr_config.enabled &&
1569 stream_descriptor->timing.drr_config.drr_active_variable) {
1570 /* DRR variable is active, but policy blocks DRR for p-state when this happens */
1571 strategy_matches_drr_requirements = false;
1572 } else if (is_bit_set_in_bitfield(PMO_DRR_VAR_STRATEGY_MASK, stream_pstate_method) &&
1573 (pmo->options->disable_drr_var ||
1574 !stream_descriptor->timing.drr_config.enabled ||
1575 stream_descriptor->timing.drr_config.disallowed)) {
1576 /* DRR variable strategies are disallowed due to settings or policy */
1577 strategy_matches_drr_requirements = false;
1578 } else if (is_bit_set_in_bitfield(PMO_DRR_CLAMPED_STRATEGY_MASK, stream_pstate_method) &&
1579 (pmo->options->disable_drr_clamped ||
1580 (!stream_descriptor->timing.drr_config.enabled ||
1581 (!stream_descriptor->timing.drr_config.drr_active_fixed && !stream_descriptor->timing.drr_config.drr_active_variable)) ||
1582 (pmo->options->disable_drr_clamped_when_var_active &&
1583 stream_descriptor->timing.drr_config.enabled &&
1584 stream_descriptor->timing.drr_config.drr_active_variable))) {
1585 /* DRR fixed strategies are disallowed due to settings or policy */
1586 strategy_matches_drr_requirements = false;
1587 } else if (is_bit_set_in_bitfield(PMO_FW_STRATEGY_MASK, stream_pstate_method) &&
1588 pmo->options->disable_fams2) {
1589 /* FW modes require FAMS2 */
1590 strategy_matches_drr_requirements = false;
1591 }
1592
1593 return strategy_matches_drr_requirements;
1594 }
1595
validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_instance * pmo,const struct display_configuation_with_meta * display_cfg,const struct dml2_pmo_pstate_strategy * pstate_strategy)1596 static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_instance *pmo,
1597 const struct display_configuation_with_meta *display_cfg,
1598 const struct dml2_pmo_pstate_strategy *pstate_strategy)
1599 {
1600 struct dml2_pmo_scratch *s = &pmo->scratch;
1601
1602 unsigned int stream_index = 0;
1603
1604 unsigned int svp_count = 0;
1605 unsigned int svp_stream_mask = 0;
1606 unsigned int drr_count = 0;
1607 unsigned int drr_stream_mask = 0;
1608 unsigned int vactive_count = 0;
1609 unsigned int vactive_stream_mask = 0;
1610 unsigned int vblank_count = 0;
1611 unsigned int vblank_stream_mask = 0;
1612
1613 bool strategy_matches_forced_requirements = true;
1614 bool strategy_matches_drr_requirements = true;
1615
1616 // Tabulate everything
1617 for (stream_index = 0; stream_index < display_cfg->display_config.num_streams; stream_index++) {
1618
1619 if (!all_planes_match_method(display_cfg, s->pmo_dcn4.stream_plane_mask[stream_index],
1620 pstate_strategy->per_stream_pstate_method[stream_index])) {
1621 strategy_matches_forced_requirements = false;
1622 break;
1623 }
1624
1625 strategy_matches_drr_requirements &=
1626 stream_matches_drr_policy(pmo, display_cfg, pstate_strategy->per_stream_pstate_method[stream_index], stream_index);
1627
1628 if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp ||
1629 pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp_drr) {
1630 svp_count++;
1631 set_bit_in_bitfield(&svp_stream_mask, stream_index);
1632 } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_drr) {
1633 drr_count++;
1634 set_bit_in_bitfield(&drr_stream_mask, stream_index);
1635 } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive ||
1636 pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
1637 vactive_count++;
1638 set_bit_in_bitfield(&vactive_stream_mask, stream_index);
1639 } else if (pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_vblank ||
1640 pstate_strategy->per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vblank_drr) {
1641 vblank_count++;
1642 set_bit_in_bitfield(&vblank_stream_mask, stream_index);
1643 }
1644 }
1645
1646 if (!strategy_matches_forced_requirements || !strategy_matches_drr_requirements)
1647 return false;
1648
1649 if (vactive_count > 0 && !all_timings_support_vactive(pmo, display_cfg, vactive_stream_mask))
1650 return false;
1651
1652 if (vblank_count > 0 && (pmo->options->disable_vblank || !all_timings_support_vblank(pmo, display_cfg, vblank_stream_mask)))
1653 return false;
1654
1655 if (drr_count > 0 && (pmo->options->disable_drr_var || !all_timings_support_drr(pmo, display_cfg, drr_stream_mask)))
1656 return false;
1657
1658 if (svp_count > 0 && (pmo->options->disable_svp || !all_timings_support_svp(pmo, display_cfg, svp_stream_mask)))
1659 return false;
1660
1661
1662 return is_config_schedulable(pmo, display_cfg, pstate_strategy);
1663 }
1664
dcn4_get_vactive_pstate_margin(const struct display_configuation_with_meta * display_cfg,int plane_mask)1665 int dcn4_get_vactive_pstate_margin(const struct display_configuation_with_meta *display_cfg, int plane_mask)
1666 {
1667 unsigned int i;
1668 int min_vactive_margin_us = 0xFFFFFFF;
1669
1670 for (i = 0; i < DML2_MAX_PLANES; i++) {
1671 if (is_bit_set_in_bitfield(plane_mask, i)) {
1672 if (display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_latency_hiding_margin_in_active < min_vactive_margin_us)
1673 min_vactive_margin_us = display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_latency_hiding_margin_in_active;
1674 }
1675 }
1676
1677 return min_vactive_margin_us;
1678 }
1679
get_vactive_det_fill_latency_delay_us(const struct display_configuation_with_meta * display_cfg,int plane_mask)1680 static int get_vactive_det_fill_latency_delay_us(const struct display_configuation_with_meta *display_cfg, int plane_mask)
1681 {
1682 unsigned char i;
1683 int max_vactive_fill_us = 0;
1684
1685 for (i = 0; i < DML2_MAX_PLANES; i++) {
1686 if (is_bit_set_in_bitfield(plane_mask, i)) {
1687 if (display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].vactive_det_fill_delay_us[dml2_pstate_type_uclk] > max_vactive_fill_us)
1688 max_vactive_fill_us = display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].vactive_det_fill_delay_us[dml2_pstate_type_uclk];
1689 }
1690 }
1691
1692 return max_vactive_fill_us;
1693 }
1694
build_pstate_meta_per_stream(struct dml2_pmo_instance * pmo,struct display_configuation_with_meta * display_config,int stream_index)1695 static void build_pstate_meta_per_stream(struct dml2_pmo_instance *pmo,
1696 struct display_configuation_with_meta *display_config,
1697 int stream_index)
1698 {
1699 const struct dml2_ip_capabilities *ip_caps = pmo->ip_caps;
1700 const struct dml2_stream_parameters *stream_descriptor = &display_config->display_config.stream_descriptors[stream_index];
1701 const struct core_stream_support_info *stream_info = &display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index];
1702 const struct dml2_timing_cfg *timing = &stream_descriptor->timing;
1703 struct dml2_pstate_meta *stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index];
1704
1705 /* worst case all other streams require some programming at the same time, 0 if only 1 stream */
1706 unsigned int contention_delay_us = (ip_caps->fams2.vertical_interrupt_ack_delay_us +
1707 (unsigned int)math_max3(ip_caps->fams2.subvp_programming_delay_us, ip_caps->fams2.drr_programming_delay_us, ip_caps->fams2.allow_programming_delay_us)) *
1708 (display_config->display_config.num_streams - 1);
1709
1710 /* common */
1711 stream_pstate_meta->valid = true;
1712 stream_pstate_meta->otg_vline_time_us = (double)timing->h_total / timing->pixel_clock_khz * 1000.0;
1713 stream_pstate_meta->nom_vtotal = stream_descriptor->timing.vblank_nom + stream_descriptor->timing.v_active;
1714 stream_pstate_meta->nom_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
1715 (stream_pstate_meta->nom_vtotal * timing->h_total);
1716 stream_pstate_meta->nom_frame_time_us =
1717 (double)stream_pstate_meta->nom_vtotal * stream_pstate_meta->otg_vline_time_us;
1718 stream_pstate_meta->vblank_start = timing->v_blank_end + timing->v_active;
1719
1720 if (stream_descriptor->timing.drr_config.enabled == true) {
1721 if (stream_descriptor->timing.drr_config.min_refresh_uhz != 0.0) {
1722 stream_pstate_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
1723 ((double)stream_descriptor->timing.drr_config.min_refresh_uhz * stream_descriptor->timing.h_total) * 1e9);
1724 } else {
1725 /* assume min of 48Hz */
1726 stream_pstate_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
1727 (48000000.0 * stream_descriptor->timing.h_total) * 1e9);
1728 }
1729 } else {
1730 stream_pstate_meta->max_vtotal = stream_pstate_meta->nom_vtotal;
1731 }
1732 stream_pstate_meta->min_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
1733 (stream_pstate_meta->max_vtotal * timing->h_total);
1734 stream_pstate_meta->max_frame_time_us =
1735 (double)stream_pstate_meta->max_vtotal * stream_pstate_meta->otg_vline_time_us;
1736
1737 stream_pstate_meta->scheduling_delay_otg_vlines =
1738 (unsigned int)math_ceil(ip_caps->fams2.scheduling_delay_us / stream_pstate_meta->otg_vline_time_us);
1739 stream_pstate_meta->vertical_interrupt_ack_delay_otg_vlines =
1740 (unsigned int)math_ceil(ip_caps->fams2.vertical_interrupt_ack_delay_us / stream_pstate_meta->otg_vline_time_us);
1741 stream_pstate_meta->contention_delay_otg_vlines =
1742 (unsigned int)math_ceil(contention_delay_us / stream_pstate_meta->otg_vline_time_us);
1743 /* worst case allow to target needs to account for all streams' allow events overlapping, and 1 line for error */
1744 stream_pstate_meta->allow_to_target_delay_otg_vlines =
1745 (unsigned int)(math_ceil((ip_caps->fams2.vertical_interrupt_ack_delay_us + contention_delay_us + ip_caps->fams2.allow_programming_delay_us) / stream_pstate_meta->otg_vline_time_us)) + 1;
1746 stream_pstate_meta->min_allow_width_otg_vlines =
1747 (unsigned int)math_ceil(ip_caps->fams2.min_allow_width_us / stream_pstate_meta->otg_vline_time_us);
1748 /* this value should account for urgent latency */
1749 stream_pstate_meta->blackout_otg_vlines =
1750 (unsigned int)math_ceil(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us /
1751 stream_pstate_meta->otg_vline_time_us);
1752
1753 /* scheduling params should be built based on the worst case for allow_time:disallow_time */
1754
1755 /* vactive */
1756 if (display_config->display_config.num_streams == 1) {
1757 /* for single stream, guarantee at least an instant of allow */
1758 stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = (unsigned int)math_floor(
1759 math_max2(0.0,
1760 timing->v_active - math_max2(1.0, stream_pstate_meta->min_allow_width_otg_vlines) - stream_pstate_meta->blackout_otg_vlines));
1761 } else {
1762 /* for multi stream, bound to a max fill time defined by IP caps */
1763 stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines =
1764 (unsigned int)math_floor((double)ip_caps->max_vactive_det_fill_delay_us / stream_pstate_meta->otg_vline_time_us);
1765 }
1766 stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us = stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines * stream_pstate_meta->otg_vline_time_us;
1767
1768 if (stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us > 0.0) {
1769 stream_pstate_meta->method_vactive.common.allow_start_otg_vline =
1770 timing->v_blank_end + stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
1771 stream_pstate_meta->method_vactive.common.allow_end_otg_vline =
1772 stream_pstate_meta->vblank_start -
1773 stream_pstate_meta->blackout_otg_vlines;
1774 } else {
1775 stream_pstate_meta->method_vactive.common.allow_start_otg_vline = 0;
1776 stream_pstate_meta->method_vactive.common.allow_end_otg_vline = 0;
1777 }
1778 stream_pstate_meta->method_vactive.common.period_us = stream_pstate_meta->nom_frame_time_us;
1779 build_method_scheduling_params(&stream_pstate_meta->method_vactive.common, stream_pstate_meta);
1780
1781 /* vblank */
1782 stream_pstate_meta->method_vblank.common.allow_start_otg_vline = stream_pstate_meta->vblank_start;
1783 stream_pstate_meta->method_vblank.common.allow_end_otg_vline =
1784 stream_pstate_meta->method_vblank.common.allow_start_otg_vline + 1;
1785 stream_pstate_meta->method_vblank.common.period_us = stream_pstate_meta->nom_frame_time_us;
1786 build_method_scheduling_params(&stream_pstate_meta->method_vblank.common, stream_pstate_meta);
1787
1788 /* subvp */
1789 stream_pstate_meta->method_subvp.programming_delay_otg_vlines =
1790 (unsigned int)math_ceil(ip_caps->fams2.subvp_programming_delay_us / stream_pstate_meta->otg_vline_time_us);
1791 stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines =
1792 (unsigned int)math_ceil(ip_caps->fams2.subvp_df_throttle_delay_us / stream_pstate_meta->otg_vline_time_us);
1793 stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines =
1794 (unsigned int)math_ceil(ip_caps->fams2.subvp_prefetch_to_mall_delay_us / stream_pstate_meta->otg_vline_time_us);
1795 stream_pstate_meta->method_subvp.phantom_vactive =
1796 stream_pstate_meta->allow_to_target_delay_otg_vlines +
1797 stream_pstate_meta->min_allow_width_otg_vlines +
1798 stream_info->phantom_min_v_active;
1799 stream_pstate_meta->method_subvp.phantom_vfp =
1800 stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines;
1801 /* phantom vtotal = v_bp(vstartup) + v_sync(1) + v_fp(throttle_delay) + v_active(allow_to_target + min_allow + min_vactive)*/
1802 stream_pstate_meta->method_subvp.phantom_vtotal =
1803 stream_info->phantom_v_startup +
1804 stream_pstate_meta->method_subvp.phantom_vfp +
1805 1 +
1806 stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines +
1807 stream_pstate_meta->method_subvp.phantom_vactive;
1808 stream_pstate_meta->method_subvp.common.allow_start_otg_vline =
1809 stream_descriptor->timing.v_blank_end +
1810 stream_pstate_meta->contention_delay_otg_vlines +
1811 stream_pstate_meta->method_subvp.programming_delay_otg_vlines +
1812 stream_pstate_meta->method_subvp.phantom_vtotal +
1813 stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
1814 stream_pstate_meta->allow_to_target_delay_otg_vlines;
1815 stream_pstate_meta->method_subvp.common.allow_end_otg_vline =
1816 stream_pstate_meta->vblank_start -
1817 stream_pstate_meta->blackout_otg_vlines;
1818 stream_pstate_meta->method_subvp.common.period_us = stream_pstate_meta->nom_frame_time_us;
1819 build_method_scheduling_params(&stream_pstate_meta->method_subvp.common, stream_pstate_meta);
1820
1821 /* drr */
1822 stream_pstate_meta->method_drr.programming_delay_otg_vlines =
1823 (unsigned int)math_ceil(ip_caps->fams2.drr_programming_delay_us / stream_pstate_meta->otg_vline_time_us);
1824 stream_pstate_meta->method_drr.common.allow_start_otg_vline =
1825 stream_pstate_meta->vblank_start +
1826 stream_pstate_meta->allow_to_target_delay_otg_vlines;
1827 stream_pstate_meta->method_drr.common.period_us = stream_pstate_meta->nom_frame_time_us;
1828 if (display_config->display_config.num_streams <= 1) {
1829 /* only need to stretch vblank for blackout time */
1830 stream_pstate_meta->method_drr.stretched_vtotal =
1831 stream_pstate_meta->nom_vtotal +
1832 stream_pstate_meta->allow_to_target_delay_otg_vlines +
1833 stream_pstate_meta->min_allow_width_otg_vlines +
1834 stream_pstate_meta->blackout_otg_vlines;
1835 } else {
1836 /* multi display needs to always be schedulable */
1837 stream_pstate_meta->method_drr.stretched_vtotal =
1838 stream_pstate_meta->nom_vtotal * 2 +
1839 stream_pstate_meta->allow_to_target_delay_otg_vlines +
1840 stream_pstate_meta->min_allow_width_otg_vlines +
1841 stream_pstate_meta->blackout_otg_vlines;
1842 }
1843 stream_pstate_meta->method_drr.common.allow_end_otg_vline =
1844 stream_pstate_meta->method_drr.stretched_vtotal -
1845 stream_pstate_meta->blackout_otg_vlines;
1846 build_method_scheduling_params(&stream_pstate_meta->method_drr.common, stream_pstate_meta);
1847 }
1848
build_subvp_meta_per_stream(struct dml2_pmo_instance * pmo,struct display_configuation_with_meta * display_config,int stream_index)1849 static void build_subvp_meta_per_stream(struct dml2_pmo_instance *pmo,
1850 struct display_configuation_with_meta *display_config,
1851 int stream_index)
1852 {
1853 (void)display_config;
1854 struct dml2_implicit_svp_meta *stream_svp_meta = &pmo->scratch.pmo_dcn4.stream_svp_meta[stream_index];
1855 struct dml2_pstate_meta *stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index];
1856
1857 stream_svp_meta->valid = true;
1858
1859 /* PMO FAMS2 precaulcates these values */
1860 stream_svp_meta->v_active = stream_pstate_meta->method_subvp.phantom_vactive;
1861 stream_svp_meta->v_front_porch = stream_pstate_meta->method_subvp.phantom_vfp;
1862 stream_svp_meta->v_total = stream_pstate_meta->method_subvp.phantom_vtotal;
1863 }
1864
pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out * in_out)1865 bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out *in_out)
1866 {
1867 struct dml2_pmo_instance *pmo = in_out->instance;
1868 struct dml2_optimization_stage3_state *state = &in_out->base_display_config->stage3;
1869 struct dml2_pmo_scratch *s = &pmo->scratch;
1870
1871 struct display_configuation_with_meta *display_config;
1872 const struct dml2_plane_parameters *plane_descriptor;
1873 const struct dml2_pmo_pstate_strategy *strategy_list = NULL;
1874 struct dml2_pmo_pstate_strategy override_base_strategy = { 0 };
1875 unsigned int strategy_list_size = 0;
1876 unsigned int plane_index, stream_index, i;
1877 bool build_override_strategy = true;
1878
1879 state->performed = true;
1880 in_out->base_display_config->stage3.min_clk_index_for_latency = in_out->base_display_config->stage1.min_clk_index_for_latency;
1881
1882 display_config = in_out->base_display_config;
1883 display_config->display_config.overrides.enable_subvp_implicit_pmo = true;
1884
1885 memset(s, 0, sizeof(struct dml2_pmo_scratch));
1886
1887 if (display_config->display_config.overrides.all_streams_blanked) {
1888 return true;
1889 }
1890
1891 pmo->scratch.pmo_dcn4.min_latency_index = in_out->base_display_config->stage1.min_clk_index_for_latency;
1892 pmo->scratch.pmo_dcn4.max_latency_index = pmo->mcg_clock_table_size;
1893 pmo->scratch.pmo_dcn4.cur_latency_index = in_out->base_display_config->stage1.min_clk_index_for_latency;
1894
1895 // First build the stream plane mask (array of bitfields indexed by stream, indicating plane mapping)
1896 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
1897 plane_descriptor = &display_config->display_config.plane_descriptors[plane_index];
1898
1899 set_bit_in_bitfield(&s->pmo_dcn4.stream_plane_mask[plane_descriptor->stream_index], plane_index);
1900
1901 state->pstate_switch_modes[plane_index] = dml2_pstate_method_vactive;
1902
1903 build_override_strategy &= plane_descriptor->overrides.uclk_pstate_change_strategy != dml2_uclk_pstate_change_strategy_auto;
1904 override_base_strategy.per_stream_pstate_method[plane_descriptor->stream_index] =
1905 uclk_pstate_strategy_override_to_pstate_method(plane_descriptor->overrides.uclk_pstate_change_strategy);
1906 }
1907
1908 // Figure out which streams can do vactive, and also build up implicit SVP and FAMS2 meta
1909 for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
1910 if (dcn4_get_vactive_pstate_margin(display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) >= (int)(MIN_VACTIVE_MARGIN_PCT * pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us))
1911 set_bit_in_bitfield(&s->pmo_dcn4.stream_vactive_capability_mask, stream_index);
1912
1913 /* FAMS2 meta */
1914 build_pstate_meta_per_stream(pmo, display_config, stream_index);
1915
1916 /* SVP meta */
1917 build_subvp_meta_per_stream(pmo, display_config, stream_index);
1918 }
1919
1920 /* get synchronized timing groups */
1921 build_synchronized_timing_groups(pmo, display_config);
1922
1923 if (build_override_strategy) {
1924 /* build expanded override strategy list (no permutations) */
1925 override_base_strategy.allow_state_increase = true;
1926 s->pmo_dcn4.num_expanded_override_strategies = 0;
1927 insert_strategy_into_expanded_list(&override_base_strategy,
1928 display_config->display_config.num_streams,
1929 s->pmo_dcn4.expanded_override_strategy_list,
1930 &s->pmo_dcn4.num_expanded_override_strategies);
1931 expand_variant_strategy(&override_base_strategy,
1932 display_config->display_config.num_streams,
1933 false,
1934 s->pmo_dcn4.expanded_override_strategy_list,
1935 &s->pmo_dcn4.num_expanded_override_strategies);
1936
1937 /* use override strategy list */
1938 strategy_list = s->pmo_dcn4.expanded_override_strategy_list;
1939 strategy_list_size = s->pmo_dcn4.num_expanded_override_strategies;
1940 } else {
1941 /* use predefined strategy list */
1942 strategy_list = get_expanded_strategy_list(&pmo->init_data, display_config->display_config.num_streams);
1943 strategy_list_size = get_num_expanded_strategies(&pmo->init_data, display_config->display_config.num_streams);
1944 }
1945
1946 if (!strategy_list || strategy_list_size == 0)
1947 return false;
1948
1949 s->pmo_dcn4.num_pstate_candidates = 0;
1950
1951 for (i = 0; i < strategy_list_size && s->pmo_dcn4.num_pstate_candidates < DML2_PMO_PSTATE_CANDIDATE_LIST_SIZE; i++) {
1952 if (validate_pstate_support_strategy_cofunctionality(pmo, display_config, &strategy_list[i])) {
1953 insert_into_candidate_list(&strategy_list[i], display_config->display_config.num_streams, s);
1954 }
1955 }
1956
1957 if (s->pmo_dcn4.num_pstate_candidates > 0) {
1958 s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.num_pstate_candidates-1].allow_state_increase = true;
1959 s->pmo_dcn4.cur_pstate_candidate = -1;
1960 return true;
1961 } else {
1962 return false;
1963 }
1964 }
1965
reset_display_configuration(struct display_configuation_with_meta * display_config)1966 static void reset_display_configuration(struct display_configuation_with_meta *display_config)
1967 {
1968 unsigned int plane_index;
1969 unsigned int stream_index;
1970 struct dml2_plane_parameters *plane;
1971
1972 for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
1973 display_config->stage3.stream_svp_meta[stream_index].valid = false;
1974 }
1975
1976 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
1977 plane = &display_config->display_config.plane_descriptors[plane_index];
1978
1979 // Unset SubVP
1980 plane->overrides.legacy_svp_config = dml2_svp_mode_override_auto;
1981
1982 // Remove reserve time
1983 plane->overrides.reserved_vblank_time_ns = 0;
1984
1985 // Reset strategy to auto
1986 plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_auto;
1987
1988 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_na;
1989 }
1990 }
1991
1992
setup_planes_for_drr_by_mask(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int plane_mask)1993 static void setup_planes_for_drr_by_mask(struct display_configuation_with_meta *display_config,
1994 struct dml2_pmo_instance *pmo,
1995 int plane_mask)
1996 {
1997 (void)pmo;
1998 unsigned int plane_index;
1999 struct dml2_plane_parameters *plane;
2000
2001 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
2002 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
2003 plane = &display_config->display_config.plane_descriptors[plane_index];
2004
2005 plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_force_drr;
2006
2007 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_drr;
2008 }
2009 }
2010 }
2011
setup_planes_for_svp_by_mask(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int plane_mask)2012 static void setup_planes_for_svp_by_mask(struct display_configuation_with_meta *display_config,
2013 struct dml2_pmo_instance *pmo,
2014 int plane_mask)
2015 {
2016 struct dml2_pmo_scratch *scratch = &pmo->scratch;
2017
2018 unsigned int plane_index;
2019 int stream_index = -1;
2020
2021 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
2022 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
2023 stream_index = (char)display_config->display_config.plane_descriptors[plane_index].stream_index;
2024 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_svp;
2025 }
2026 }
2027
2028 if (stream_index >= 0) {
2029 memcpy(&display_config->stage3.stream_svp_meta[stream_index],
2030 &scratch->pmo_dcn4.stream_svp_meta[stream_index],
2031 sizeof(struct dml2_implicit_svp_meta));
2032 }
2033 }
2034
setup_planes_for_svp_drr_by_mask(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int plane_mask)2035 static void setup_planes_for_svp_drr_by_mask(struct display_configuation_with_meta *display_config,
2036 struct dml2_pmo_instance *pmo,
2037 int plane_mask)
2038 {
2039 struct dml2_pmo_scratch *scratch = &pmo->scratch;
2040
2041 unsigned int plane_index;
2042 int stream_index = -1;
2043
2044 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
2045 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
2046 stream_index = (char)display_config->display_config.plane_descriptors[plane_index].stream_index;
2047 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_svp_drr;
2048 }
2049 }
2050
2051 if (stream_index >= 0) {
2052 memcpy(&display_config->stage3.stream_svp_meta[stream_index],
2053 &scratch->pmo_dcn4.stream_svp_meta[stream_index],
2054 sizeof(struct dml2_implicit_svp_meta));
2055 }
2056 }
2057
setup_planes_for_vblank_by_mask(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int plane_mask)2058 static void setup_planes_for_vblank_by_mask(struct display_configuation_with_meta *display_config,
2059 struct dml2_pmo_instance *pmo,
2060 int plane_mask)
2061 {
2062 unsigned int plane_index;
2063 struct dml2_plane_parameters *plane;
2064
2065 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
2066 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
2067 plane = &display_config->display_config.plane_descriptors[plane_index];
2068
2069 plane->overrides.reserved_vblank_time_ns = (long)math_max2(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000.0,
2070 plane->overrides.reserved_vblank_time_ns);
2071
2072 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vblank;
2073 }
2074 }
2075 }
2076
setup_planes_for_vblank_drr_by_mask(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int plane_mask)2077 static void setup_planes_for_vblank_drr_by_mask(struct display_configuation_with_meta *display_config,
2078 struct dml2_pmo_instance *pmo,
2079 int plane_mask)
2080 {
2081 unsigned int plane_index;
2082 struct dml2_plane_parameters *plane;
2083
2084 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
2085 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
2086 plane = &display_config->display_config.plane_descriptors[plane_index];
2087
2088 plane->overrides.reserved_vblank_time_ns = (long)(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000);
2089
2090 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vblank_drr;
2091 }
2092 }
2093 }
2094
setup_planes_for_vactive_by_mask(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int plane_mask)2095 static void setup_planes_for_vactive_by_mask(struct display_configuation_with_meta *display_config,
2096 struct dml2_pmo_instance *pmo,
2097 int plane_mask)
2098 {
2099 unsigned int plane_index;
2100 unsigned int stream_index;
2101
2102 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
2103 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
2104 stream_index = display_config->display_config.plane_descriptors[plane_index].stream_index;
2105
2106 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vactive;
2107
2108 if (!pmo->options->disable_vactive_det_fill_bw_pad) {
2109 display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk] =
2110 (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
2111 }
2112 }
2113 }
2114 }
2115
setup_planes_for_vactive_drr_by_mask(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int plane_mask)2116 static void setup_planes_for_vactive_drr_by_mask(struct display_configuation_with_meta *display_config,
2117 struct dml2_pmo_instance *pmo,
2118 int plane_mask)
2119 {
2120 unsigned int plane_index;
2121 unsigned int stream_index;
2122
2123 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
2124 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
2125 stream_index = display_config->display_config.plane_descriptors[plane_index].stream_index;
2126
2127 display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vactive_drr;
2128
2129 if (!pmo->options->disable_vactive_det_fill_bw_pad) {
2130 display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk] =
2131 (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
2132 }
2133 }
2134 }
2135 }
2136
setup_display_config(struct display_configuation_with_meta * display_config,struct dml2_pmo_instance * pmo,int strategy_index)2137 static bool setup_display_config(struct display_configuation_with_meta *display_config, struct dml2_pmo_instance *pmo, int strategy_index)
2138 {
2139 struct dml2_pmo_scratch *scratch = &pmo->scratch;
2140
2141 bool fams2_required = false;
2142 bool success = true;
2143 unsigned int stream_index;
2144
2145 reset_display_configuration(display_config);
2146
2147 for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
2148
2149 if (pmo->scratch.pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_na) {
2150 success = false;
2151 break;
2152 } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive) {
2153 setup_planes_for_vactive_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
2154 } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_vblank) {
2155 setup_planes_for_vblank_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
2156 } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp) {
2157 fams2_required = true;
2158 setup_planes_for_svp_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
2159 } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
2160 fams2_required = true;
2161 setup_planes_for_vactive_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
2162 } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vblank_drr) {
2163 fams2_required = true;
2164 setup_planes_for_vblank_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
2165 } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp_drr) {
2166 fams2_required = true;
2167 setup_planes_for_svp_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
2168 } else if (scratch->pmo_dcn4.pstate_strategy_candidates[strategy_index].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_drr) {
2169 fams2_required = true;
2170 setup_planes_for_drr_by_mask(display_config, pmo, scratch->pmo_dcn4.stream_plane_mask[stream_index]);
2171 }
2172 }
2173
2174 /* copy FAMS2 meta */
2175 if (success) {
2176 display_config->stage3.fams2_required = fams2_required;
2177 memcpy(&display_config->stage3.stream_pstate_meta,
2178 &scratch->pmo_dcn4.stream_pstate_meta,
2179 sizeof(struct dml2_pstate_meta) * DML2_MAX_PLANES);
2180 }
2181
2182 return success;
2183 }
2184
dcn4_get_minimum_reserved_time_us_for_planes(const struct display_configuation_with_meta * display_config,int plane_mask)2185 int dcn4_get_minimum_reserved_time_us_for_planes(
2186 const struct display_configuation_with_meta *display_config,
2187 int plane_mask)
2188 {
2189 int min_time_us = 0xFFFFFF;
2190 unsigned int plane_index = 0;
2191
2192 for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
2193 if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
2194 if (min_time_us > (display_config->display_config.plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000))
2195 min_time_us = display_config->display_config.plane_descriptors[plane_index].overrides.reserved_vblank_time_ns / 1000;
2196 }
2197 }
2198 return min_time_us;
2199 }
2200
pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_support_in_out * in_out)2201 bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_support_in_out *in_out)
2202 {
2203 bool p_state_supported = true;
2204 unsigned int stream_index;
2205 struct dml2_pmo_scratch *s = &in_out->instance->scratch;
2206
2207 int MIN_VACTIVE_MARGIN_VBLANK = 0;
2208 int MIN_VACTIVE_MARGIN_DRR = 0;
2209 int REQUIRED_RESERVED_TIME = 0;
2210
2211 if (in_out->base_display_config->display_config.overrides.all_streams_blanked) {
2212 return true;
2213 }
2214
2215 MIN_VACTIVE_MARGIN_VBLANK = INT_MIN;
2216 MIN_VACTIVE_MARGIN_DRR = INT_MIN;
2217 REQUIRED_RESERVED_TIME = (int)in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us;
2218
2219 if (s->pmo_dcn4.cur_pstate_candidate < 0)
2220 return false;
2221
2222 for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) {
2223 struct dml2_pstate_meta *stream_pstate_meta = &s->pmo_dcn4.stream_pstate_meta[stream_index];
2224
2225 if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive ||
2226 s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
2227 if (dcn4_get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < (MIN_VACTIVE_MARGIN_PCT * in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) ||
2228 get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us) {
2229 p_state_supported = false;
2230 break;
2231 }
2232 } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vblank ||
2233 s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vblank_drr) {
2234 if (dcn4_get_minimum_reserved_time_us_for_planes(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) <
2235 REQUIRED_RESERVED_TIME ||
2236 dcn4_get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_VBLANK) {
2237 p_state_supported = false;
2238 break;
2239 }
2240 } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp ||
2241 s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_svp_drr) {
2242 if (in_out->base_display_config->stage3.stream_svp_meta[stream_index].valid == false) {
2243 p_state_supported = false;
2244 break;
2245 }
2246 } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_drr) {
2247 if (!all_planes_match_method(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index], dml2_pstate_method_fw_drr) ||
2248 dcn4_get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < MIN_VACTIVE_MARGIN_DRR) {
2249 p_state_supported = false;
2250 break;
2251 }
2252 } else if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_na) {
2253 p_state_supported = false;
2254 break;
2255 }
2256 }
2257
2258 return p_state_supported;
2259 }
2260
pmo_dcn4_fams2_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pstate_support_in_out * in_out)2261 bool pmo_dcn4_fams2_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pstate_support_in_out *in_out)
2262 {
2263 bool success = false;
2264 struct dml2_pmo_scratch *s = &in_out->instance->scratch;
2265
2266 memcpy(in_out->optimized_display_config, in_out->base_display_config, sizeof(struct display_configuation_with_meta));
2267
2268 if (in_out->last_candidate_failed) {
2269 if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].allow_state_increase &&
2270 s->pmo_dcn4.cur_latency_index < s->pmo_dcn4.max_latency_index - 1) {
2271 s->pmo_dcn4.cur_latency_index++;
2272
2273 success = true;
2274 }
2275 }
2276
2277 if (!success) {
2278 s->pmo_dcn4.cur_latency_index = s->pmo_dcn4.min_latency_index;
2279 s->pmo_dcn4.cur_pstate_candidate++;
2280
2281 if (s->pmo_dcn4.cur_pstate_candidate < s->pmo_dcn4.num_pstate_candidates) {
2282 success = true;
2283 }
2284 }
2285
2286 if (success) {
2287 in_out->optimized_display_config->stage3.min_clk_index_for_latency = s->pmo_dcn4.cur_latency_index;
2288 setup_display_config(in_out->optimized_display_config, in_out->instance, in_out->instance->scratch.pmo_dcn4.cur_pstate_candidate);
2289 }
2290
2291 return success;
2292 }
2293
pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out * in_out)2294 bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in_out)
2295 {
2296 bool success = true;
2297 struct dml2_pmo_instance *pmo = in_out->instance;
2298 bool stutter_period_meets_z8_eco = true;
2299 bool z8_stutter_optimization_too_expensive = false;
2300 bool stutter_optimization_too_expensive = false;
2301 double line_time_us, vblank_nom_time_us;
2302
2303 unsigned int i;
2304
2305 if (pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us > 0 &&
2306 pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0 &&
2307 pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us < pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us)
2308 return false; // Unexpected SoCBB setup
2309
2310 for (i = 0; i < in_out->base_display_config->display_config.num_planes; i++) {
2311 if (in_out->base_display_config->mode_support_result.cfg_support_info.plane_support_info[i].active_latency_hiding_us <
2312 pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us + pmo->soc_bb->power_management_parameters.z8_min_idle_time) {
2313 stutter_period_meets_z8_eco = false;
2314 break;
2315 }
2316 }
2317
2318 for (i = 0; i < in_out->base_display_config->display_config.num_streams; i++) {
2319 line_time_us = (double)in_out->base_display_config->display_config.stream_descriptors[i].timing.h_total / (in_out->base_display_config->display_config.stream_descriptors[i].timing.pixel_clock_khz * 1000) * 1000000;
2320 vblank_nom_time_us = line_time_us * in_out->base_display_config->display_config.stream_descriptors[i].timing.vblank_nom;
2321
2322 if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us * MIN_BLANK_STUTTER_FACTOR) {
2323 z8_stutter_optimization_too_expensive = true;
2324 break;
2325 }
2326
2327 if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us * MIN_BLANK_STUTTER_FACTOR) {
2328 stutter_optimization_too_expensive = true;
2329 break;
2330 }
2331 }
2332
2333 pmo->scratch.pmo_dcn4.num_stutter_candidates = 0;
2334 pmo->scratch.pmo_dcn4.cur_stutter_candidate = 0;
2335
2336 if (stutter_period_meets_z8_eco && !z8_stutter_optimization_too_expensive) {
2337 if (pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us > 0) {
2338 pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.num_stutter_candidates] = (unsigned int)pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us;
2339 pmo->scratch.pmo_dcn4.num_stutter_candidates++;
2340 pmo->scratch.pmo_dcn4.z8_vblank_optimizable = true;
2341 }
2342 } else {
2343 pmo->scratch.pmo_dcn4.z8_vblank_optimizable = false;
2344 }
2345
2346 if (!stutter_optimization_too_expensive && pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0) {
2347 pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.num_stutter_candidates] = (unsigned int)pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us;
2348 pmo->scratch.pmo_dcn4.num_stutter_candidates++;
2349 }
2350
2351 if (pmo->scratch.pmo_dcn4.num_stutter_candidates == 0)
2352 success = false;
2353
2354 return success;
2355 }
2356
pmo_dcn4_fams2_test_for_stutter(struct dml2_pmo_test_for_stutter_in_out * in_out)2357 bool pmo_dcn4_fams2_test_for_stutter(struct dml2_pmo_test_for_stutter_in_out *in_out)
2358 {
2359 bool success = true;
2360 struct dml2_pmo_instance *pmo = in_out->instance;
2361
2362 unsigned int i;
2363
2364 for (i = 0; i < in_out->base_display_config->display_config.num_planes; i++) {
2365 if (pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us > 0 &&
2366 pmo->scratch.pmo_dcn4.z8_vblank_optimizable &&
2367 in_out->base_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns < (int)pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us * 1000) {
2368 success = false;
2369 break;
2370 }
2371 if (pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0 &&
2372 in_out->base_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns < (int)pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us * 1000) {
2373 success = false;
2374 break;
2375 }
2376 }
2377
2378 return success;
2379 }
2380
pmo_dcn4_fams2_optimize_for_stutter(struct dml2_pmo_optimize_for_stutter_in_out * in_out)2381 bool pmo_dcn4_fams2_optimize_for_stutter(struct dml2_pmo_optimize_for_stutter_in_out *in_out)
2382 {
2383 bool success = false;
2384 struct dml2_pmo_instance *pmo = in_out->instance;
2385 unsigned int i;
2386
2387 memcpy(in_out->optimized_display_config, in_out->base_display_config, sizeof(struct display_configuation_with_meta));
2388
2389 if (!in_out->last_candidate_failed) {
2390 if (pmo->scratch.pmo_dcn4.cur_stutter_candidate < pmo->scratch.pmo_dcn4.num_stutter_candidates) {
2391 for (i = 0; i < in_out->optimized_display_config->display_config.num_planes; i++) {
2392 /* take the max of the current and the optimal reserved time */
2393 in_out->optimized_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns =
2394 (long)math_max2(pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.cur_stutter_candidate] * 1000,
2395 in_out->optimized_display_config->display_config.plane_descriptors[i].overrides.reserved_vblank_time_ns);
2396 }
2397
2398 success = true;
2399 }
2400 }
2401
2402 return success;
2403 }
2404