xref: /linux/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c (revision fcab107abe1ab5be9dbe874baa722372da8f4f73)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include "dm_services.h"
27 #include "dc.h"
28 #include "dc_dmub_srv.h"
29 #include "../dmub/dmub_srv.h"
30 #include "dm_helpers.h"
31 #include "dc_hw_types.h"
32 #include "core_types.h"
33 #include "../basics/conversion.h"
34 #include "cursor_reg_cache.h"
35 #include "resource.h"
36 #include "clk_mgr.h"
37 #include "dc_state_priv.h"
38 #include "dc_plane_priv.h"
39 
40 #define CTX dc_dmub_srv->ctx
41 #define DC_LOGGER CTX->logger
42 #define GPINT_RETRY_NUM 20
43 
44 static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc,
45 				  struct dmub_srv *dmub)
46 {
47 	dc_srv->dmub = dmub;
48 	dc_srv->ctx = dc->ctx;
49 }
50 
51 struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub)
52 {
53 	struct dc_dmub_srv *dc_srv =
54 		kzalloc(sizeof(struct dc_dmub_srv), GFP_KERNEL);
55 
56 	if (dc_srv == NULL) {
57 		BREAK_TO_DEBUGGER();
58 		return NULL;
59 	}
60 
61 	dc_dmub_srv_construct(dc_srv, dc, dmub);
62 
63 	return dc_srv;
64 }
65 
66 void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
67 {
68 	if (*dmub_srv) {
69 		kfree(*dmub_srv);
70 		*dmub_srv = NULL;
71 	}
72 }
73 
74 bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv)
75 {
76 	struct dmub_srv *dmub;
77 	struct dc_context *dc_ctx;
78 	enum dmub_status status;
79 
80 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
81 		return false;
82 
83 	dc_ctx = dc_dmub_srv->ctx;
84 	dmub = dc_dmub_srv->dmub;
85 
86 	do {
87 		status = dmub_srv_wait_for_pending(dmub, 100000);
88 	} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
89 
90 	if (status != DMUB_STATUS_OK) {
91 		DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
92 		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
93 	}
94 
95 	return status == DMUB_STATUS_OK;
96 }
97 
98 void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv)
99 {
100 	struct dmub_srv *dmub = dc_dmub_srv->dmub;
101 	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
102 	enum dmub_status status = DMUB_STATUS_OK;
103 
104 	status = dmub_srv_clear_inbox0_ack(dmub);
105 	if (status != DMUB_STATUS_OK) {
106 		DC_ERROR("Error clearing INBOX0 ack: status=%d\n", status);
107 		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
108 	}
109 }
110 
111 void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv)
112 {
113 	struct dmub_srv *dmub = dc_dmub_srv->dmub;
114 	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
115 	enum dmub_status status = DMUB_STATUS_OK;
116 
117 	status = dmub_srv_wait_for_inbox0_ack(dmub, 100000);
118 	if (status != DMUB_STATUS_OK) {
119 		DC_ERROR("Error waiting for INBOX0 HW Lock Ack\n");
120 		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
121 	}
122 }
123 
124 void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv,
125 				 union dmub_inbox0_data_register data)
126 {
127 	struct dmub_srv *dmub = dc_dmub_srv->dmub;
128 	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
129 	enum dmub_status status = DMUB_STATUS_OK;
130 
131 	status = dmub_srv_send_inbox0_cmd(dmub, data);
132 	if (status != DMUB_STATUS_OK) {
133 		DC_ERROR("Error sending INBOX0 cmd\n");
134 		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
135 	}
136 }
137 
138 static bool dc_dmub_srv_reg_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
139 		unsigned int count,
140 		union dmub_rb_cmd *cmd_list)
141 {
142 	struct dc_context *dc_ctx;
143 	struct dmub_srv *dmub;
144 	enum dmub_status status = DMUB_STATUS_OK;
145 	int i;
146 
147 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
148 		return false;
149 
150 	dc_ctx = dc_dmub_srv->ctx;
151 	dmub = dc_dmub_srv->dmub;
152 
153 	for (i = 0 ; i < count; i++) {
154 		/* confirm no messages pending */
155 		do {
156 			status = dmub_srv_wait_for_idle(dmub, 100000);
157 		} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
158 
159 		/* queue command */
160 		if (status == DMUB_STATUS_OK)
161 			status = dmub_srv_reg_cmd_execute(dmub, &cmd_list[i]);
162 
163 		/* check for errors */
164 		if (status != DMUB_STATUS_OK) {
165 			break;
166 		}
167 	}
168 
169 	if (status != DMUB_STATUS_OK) {
170 		if (status != DMUB_STATUS_POWER_STATE_D3) {
171 			DC_ERROR("Error starting DMUB execution: status=%d\n", status);
172 			dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
173 		}
174 		return false;
175 	}
176 
177 	return true;
178 }
179 
180 static bool dc_dmub_srv_fb_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
181 		unsigned int count,
182 		union dmub_rb_cmd *cmd_list)
183 {
184 	struct dc_context *dc_ctx;
185 	struct dmub_srv *dmub;
186 	enum dmub_status status;
187 	int i;
188 
189 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
190 		return false;
191 
192 	dc_ctx = dc_dmub_srv->ctx;
193 	dmub = dc_dmub_srv->dmub;
194 
195 	for (i = 0 ; i < count; i++) {
196 		// Queue command
197 		if (!cmd_list[i].cmd_common.header.multi_cmd_pending ||
198 				dmub_rb_num_free(&dmub->inbox1.rb) >= count - i) {
199 			status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]);
200 		} else {
201 			status = DMUB_STATUS_QUEUE_FULL;
202 		}
203 
204 		if (status == DMUB_STATUS_QUEUE_FULL) {
205 			/* Execute and wait for queue to become empty again. */
206 			status = dmub_srv_fb_cmd_execute(dmub);
207 			if (status == DMUB_STATUS_POWER_STATE_D3)
208 				return false;
209 
210 			do {
211 					status = dmub_srv_wait_for_inbox_free(dmub, 100000, count - i);
212 			} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
213 
214 			/* Requeue the command. */
215 			status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]);
216 		}
217 
218 		if (status != DMUB_STATUS_OK) {
219 			if (status != DMUB_STATUS_POWER_STATE_D3) {
220 				DC_ERROR("Error queueing DMUB command: status=%d\n", status);
221 				dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
222 			}
223 			return false;
224 		}
225 	}
226 
227 	status = dmub_srv_fb_cmd_execute(dmub);
228 	if (status != DMUB_STATUS_OK) {
229 		if (status != DMUB_STATUS_POWER_STATE_D3) {
230 			DC_ERROR("Error starting DMUB execution: status=%d\n", status);
231 			dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
232 		}
233 		return false;
234 	}
235 
236 	return true;
237 }
238 
239 bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
240 		unsigned int count,
241 		union dmub_rb_cmd *cmd_list)
242 {
243 	bool res = false;
244 
245 	if (dc_dmub_srv && dc_dmub_srv->dmub) {
246 		if (dc_dmub_srv->dmub->inbox_type == DMUB_CMD_INTERFACE_REG) {
247 			res = dc_dmub_srv_reg_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list);
248 		} else {
249 			res = dc_dmub_srv_fb_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list);
250 		}
251 
252 		if (res)
253 			res = dmub_srv_update_inbox_status(dc_dmub_srv->dmub) == DMUB_STATUS_OK;
254 	}
255 
256 	return res;
257 }
258 
259 bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
260 		enum dm_dmub_wait_type wait_type,
261 		union dmub_rb_cmd *cmd_list)
262 {
263 	struct dmub_srv *dmub;
264 	enum dmub_status status;
265 
266 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
267 		return false;
268 
269 	dmub = dc_dmub_srv->dmub;
270 
271 	// Wait for DMUB to process command
272 	if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
273 		do {
274 			status = dmub_srv_wait_for_idle(dmub, 100000);
275 		} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
276 
277 		if (status != DMUB_STATUS_OK) {
278 			DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
279 			if (!dmub->debug.timeout_info.timeout_occured) {
280 				dmub->debug.timeout_info.timeout_occured = true;
281 				if (cmd_list)
282 					dmub->debug.timeout_info.timeout_cmd = *cmd_list;
283 				dmub->debug.timeout_info.timestamp = dm_get_timestamp(dc_dmub_srv->ctx);
284 			}
285 			dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
286 			return false;
287 		}
288 
289 		// Copy data back from ring buffer into command
290 		if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY && cmd_list) {
291 			dmub_srv_cmd_get_response(dc_dmub_srv->dmub, cmd_list);
292 		}
293 	}
294 
295 	return true;
296 }
297 
298 bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
299 {
300 	return dc_dmub_srv_cmd_run_list(dc_dmub_srv, 1, cmd, wait_type);
301 }
302 
303 bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type)
304 {
305 	if (!dc_dmub_srv_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list))
306 		return false;
307 
308 	return dc_dmub_srv_wait_for_idle(dc_dmub_srv, wait_type, cmd_list);
309 }
310 
311 bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv)
312 {
313 	struct dmub_srv *dmub;
314 	struct dc_context *dc_ctx;
315 	union dmub_fw_boot_status boot_status;
316 	enum dmub_status status;
317 
318 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
319 		return false;
320 
321 	dmub = dc_dmub_srv->dmub;
322 	dc_ctx = dc_dmub_srv->ctx;
323 
324 	status = dmub_srv_get_fw_boot_status(dmub, &boot_status);
325 	if (status != DMUB_STATUS_OK) {
326 		DC_ERROR("Error querying DMUB boot status: error=%d\n", status);
327 		return false;
328 	}
329 
330 	return boot_status.bits.optimized_init_done;
331 }
332 
333 bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
334 				    unsigned int stream_mask)
335 {
336 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
337 		return false;
338 
339 	return dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK,
340 					 stream_mask, NULL, DM_DMUB_WAIT_TYPE_WAIT);
341 }
342 
343 bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv)
344 {
345 	struct dmub_srv *dmub;
346 	struct dc_context *dc_ctx;
347 	union dmub_fw_boot_status boot_status;
348 	enum dmub_status status;
349 
350 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
351 		return false;
352 
353 	dmub = dc_dmub_srv->dmub;
354 	dc_ctx = dc_dmub_srv->ctx;
355 
356 	status = dmub_srv_get_fw_boot_status(dmub, &boot_status);
357 	if (status != DMUB_STATUS_OK) {
358 		DC_ERROR("Error querying DMUB boot status: error=%d\n", status);
359 		return false;
360 	}
361 
362 	return boot_status.bits.restore_required;
363 }
364 
365 bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_buf_entry *entry)
366 {
367 	struct dmub_srv *dmub = dc->ctx->dmub_srv->dmub;
368 	return dmub_srv_get_outbox0_msg(dmub, entry);
369 }
370 
371 void dc_dmub_trace_event_control(struct dc *dc, bool enable)
372 {
373 	dm_helpers_dmub_outbox_interrupt_control(dc->ctx, enable);
374 }
375 
376 void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal_min, uint32_t vtotal_max)
377 {
378 	union dmub_rb_cmd cmd = { 0 };
379 
380 	cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
381 	cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_DRR_UPDATE;
382 	cmd.drr_update.dmub_optc_state_req.v_total_max = vtotal_max;
383 	cmd.drr_update.dmub_optc_state_req.v_total_min = vtotal_min;
384 	cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst;
385 
386 	cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
387 
388 	// Send the command to the DMCUB.
389 	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
390 }
391 
392 void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
393 {
394 	union dmub_rb_cmd cmd = { 0 };
395 
396 	cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
397 	cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER;
398 	cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst;
399 
400 	cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
401 
402 	// Send the command to the DMCUB.
403 	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
404 }
405 
406 static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream)
407 {
408 	uint8_t pipes = 0;
409 	int i = 0;
410 
411 	for (i = 0; i < MAX_PIPES; i++) {
412 		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
413 
414 		if (pipe->stream == stream && pipe->stream_res.tg)
415 			pipes = i;
416 	}
417 	return pipes;
418 }
419 
420 static void dc_dmub_srv_populate_fams_pipe_info(struct dc *dc, struct dc_state *context,
421 		struct pipe_ctx *head_pipe,
422 		struct dmub_cmd_fw_assisted_mclk_switch_pipe_data *fams_pipe_data)
423 {
424 	int j;
425 	int pipe_idx = 0;
426 
427 	fams_pipe_data->pipe_index[pipe_idx++] = head_pipe->plane_res.hubp->inst;
428 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
429 		struct pipe_ctx *split_pipe = &context->res_ctx.pipe_ctx[j];
430 
431 		if (split_pipe->stream == head_pipe->stream && (split_pipe->top_pipe || split_pipe->prev_odm_pipe)) {
432 			fams_pipe_data->pipe_index[pipe_idx++] = split_pipe->plane_res.hubp->inst;
433 		}
434 	}
435 	fams_pipe_data->pipe_count = pipe_idx;
436 }
437 
438 bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, struct dc_state *context)
439 {
440 	union dmub_rb_cmd cmd = { 0 };
441 	struct dmub_cmd_fw_assisted_mclk_switch_config *config_data = &cmd.fw_assisted_mclk_switch.config_data;
442 	int i = 0, k = 0;
443 	int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it.
444 	uint8_t visual_confirm_enabled;
445 	int pipe_idx = 0;
446 	struct dc_stream_status *stream_status = NULL;
447 
448 	if (dc == NULL)
449 		return false;
450 
451 	visual_confirm_enabled = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS;
452 
453 	// Format command.
454 	cmd.fw_assisted_mclk_switch.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
455 	cmd.fw_assisted_mclk_switch.header.sub_type = DMUB_CMD__FAMS_SETUP_FW_CTRL;
456 	cmd.fw_assisted_mclk_switch.config_data.fams_enabled = should_manage_pstate;
457 	cmd.fw_assisted_mclk_switch.config_data.visual_confirm_enabled = visual_confirm_enabled;
458 
459 	if (should_manage_pstate) {
460 		for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
461 			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
462 
463 			if (!pipe->stream)
464 				continue;
465 
466 			/* If FAMS is being used to support P-State and there is a stream
467 			 * that does not use FAMS, we are in an FPO + VActive scenario.
468 			 * Assign vactive stretch margin in this case.
469 			 */
470 			stream_status = dc_state_get_stream_status(context, pipe->stream);
471 			if (stream_status && !stream_status->fpo_in_use) {
472 				cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us;
473 				break;
474 			}
475 			pipe_idx++;
476 		}
477 	}
478 
479 	for (i = 0, k = 0; context && i < dc->res_pool->pipe_count; i++) {
480 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
481 
482 		if (!resource_is_pipe_type(pipe, OTG_MASTER))
483 			continue;
484 
485 		stream_status = dc_state_get_stream_status(context, pipe->stream);
486 		if (stream_status && stream_status->fpo_in_use) {
487 			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
488 			uint8_t min_refresh_in_hz = (pipe->stream->timing.min_refresh_in_uhz + 999999) / 1000000;
489 
490 			config_data->pipe_data[k].pix_clk_100hz = pipe->stream->timing.pix_clk_100hz;
491 			config_data->pipe_data[k].min_refresh_in_hz = min_refresh_in_hz;
492 			config_data->pipe_data[k].max_ramp_step = ramp_up_num_steps;
493 			config_data->pipe_data[k].pipes = dc_dmub_srv_get_pipes_for_stream(dc, pipe->stream);
494 			dc_dmub_srv_populate_fams_pipe_info(dc, context, pipe, &config_data->pipe_data[k]);
495 			k++;
496 		}
497 	}
498 	cmd.fw_assisted_mclk_switch.header.payload_bytes =
499 		sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header);
500 
501 	// Send the command to the DMCUB.
502 	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
503 
504 	return true;
505 }
506 
507 void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv)
508 {
509 	union dmub_rb_cmd cmd = { 0 };
510 
511 	if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
512 		return;
513 
514 	memset(&cmd, 0, sizeof(cmd));
515 
516 	/* Prepare fw command */
517 	cmd.query_feature_caps.header.type = DMUB_CMD__QUERY_FEATURE_CAPS;
518 	cmd.query_feature_caps.header.sub_type = 0;
519 	cmd.query_feature_caps.header.ret_status = 1;
520 	cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data);
521 
522 	/* If command was processed, copy feature caps to dmub srv */
523 	if (dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
524 	    cmd.query_feature_caps.header.ret_status == 0) {
525 		memcpy(&dc_dmub_srv->dmub->feature_caps,
526 		       &cmd.query_feature_caps.query_feature_caps_data,
527 		       sizeof(struct dmub_feature_caps));
528 	}
529 }
530 
531 void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx)
532 {
533 	union dmub_rb_cmd cmd = { 0 };
534 	unsigned int panel_inst = 0;
535 
536 	if (!dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst) &&
537 			dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE)
538 		return;
539 
540 	memset(&cmd, 0, sizeof(cmd));
541 
542 	// Prepare fw command
543 	cmd.visual_confirm_color.header.type = DMUB_CMD__GET_VISUAL_CONFIRM_COLOR;
544 	cmd.visual_confirm_color.header.sub_type = 0;
545 	cmd.visual_confirm_color.header.ret_status = 1;
546 	cmd.visual_confirm_color.header.payload_bytes = sizeof(struct dmub_cmd_visual_confirm_color_data);
547 	cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst;
548 
549 	// If command was processed, copy feature caps to dmub srv
550 	if (dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
551 		cmd.visual_confirm_color.header.ret_status == 0) {
552 		memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color,
553 			&cmd.visual_confirm_color.visual_confirm_color_data,
554 			sizeof(struct dmub_visual_confirm_color));
555 	}
556 }
557 
558 /**
559  * populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command
560  *
561  * @dc: [in] pointer to dc object
562  * @subvp_pipe: [in] pipe_ctx for the SubVP pipe
563  * @vblank_pipe: [in] pipe_ctx for the DRR pipe
564  * @pipe_data: [in] Pipe data which stores the VBLANK/DRR info
565  * @context: [in] DC state for access to phantom stream
566  *
567  * Populate the DMCUB SubVP command with DRR pipe info. All the information
568  * required for calculating the SubVP + DRR microschedule is populated here.
569  *
570  * High level algorithm:
571  * 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe
572  * 2. Calculate the min and max vtotal which supports SubVP + DRR microschedule
573  * 3. Populate the drr_info with the min and max supported vtotal values
574  */
575 static void populate_subvp_cmd_drr_info(struct dc *dc,
576 		struct dc_state *context,
577 		struct pipe_ctx *subvp_pipe,
578 		struct pipe_ctx *vblank_pipe,
579 		struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data)
580 {
581 	struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
582 	struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
583 	struct dc_crtc_timing *phantom_timing;
584 	struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
585 	uint16_t drr_frame_us = 0;
586 	uint16_t min_drr_supported_us = 0;
587 	uint16_t max_drr_supported_us = 0;
588 	uint16_t max_drr_vblank_us = 0;
589 	uint16_t max_drr_mallregion_us = 0;
590 	uint16_t mall_region_us = 0;
591 	uint16_t prefetch_us = 0;
592 	uint16_t subvp_active_us = 0;
593 	uint16_t drr_active_us = 0;
594 	uint16_t min_vtotal_supported = 0;
595 	uint16_t max_vtotal_supported = 0;
596 
597 	if (!phantom_stream)
598 		return;
599 
600 	phantom_timing = &phantom_stream->timing;
601 
602 	pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true;
603 	pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping
604 	pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now
605 
606 	drr_frame_us = div64_u64(((uint64_t)drr_timing->v_total * drr_timing->h_total * 1000000),
607 			(((uint64_t)drr_timing->pix_clk_100hz * 100)));
608 	// P-State allow width and FW delays already included phantom_timing->v_addressable
609 	mall_region_us = div64_u64(((uint64_t)phantom_timing->v_addressable * phantom_timing->h_total * 1000000),
610 			(((uint64_t)phantom_timing->pix_clk_100hz * 100)));
611 	min_drr_supported_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
612 	min_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * min_drr_supported_us),
613 			(((uint64_t)drr_timing->h_total * 1000000)));
614 
615 	prefetch_us = div64_u64(((uint64_t)(phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total * 1000000),
616 			(((uint64_t)phantom_timing->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
617 	subvp_active_us = div64_u64(((uint64_t)main_timing->v_addressable * main_timing->h_total * 1000000),
618 			(((uint64_t)main_timing->pix_clk_100hz * 100)));
619 	drr_active_us = div64_u64(((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000),
620 			(((uint64_t)drr_timing->pix_clk_100hz * 100)));
621 	max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us -
622 			dc->caps.subvp_fw_processing_delay_us - drr_active_us), 2) + drr_active_us;
623 	max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us - dc->caps.subvp_fw_processing_delay_us;
624 	max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us;
625 	max_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us),
626 			(((uint64_t)drr_timing->h_total * 1000000)));
627 
628 	/* When calculating the max vtotal supported for SubVP + DRR cases, add
629 	 * margin due to possible rounding errors (being off by 1 line in the
630 	 * FW calculation can incorrectly push the P-State switch to wait 1 frame
631 	 * longer).
632 	 */
633 	max_vtotal_supported = max_vtotal_supported - dc->caps.subvp_drr_max_vblank_margin_us;
634 
635 	pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported;
636 	pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported;
637 	pipe_data->pipe_config.vblank_data.drr_info.drr_vblank_start_margin = dc->caps.subvp_drr_vblank_start_margin_us;
638 }
639 
640 /**
641  * populate_subvp_cmd_vblank_pipe_info - Helper to populate VBLANK pipe info for the DMUB subvp command
642  *
643  * @dc: [in] current dc state
644  * @context: [in] new dc state
645  * @cmd: [in] DMUB cmd to be populated with SubVP info
646  * @vblank_pipe: [in] pipe_ctx for the VBLANK pipe
647  * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd
648  *
649  * Populate the DMCUB SubVP command with VBLANK pipe info. All the information
650  * required to calculate the microschedule for SubVP + VBLANK case is stored in
651  * the pipe_data (subvp_data and vblank_data).  Also check if the VBLANK pipe
652  * is a DRR display -- if it is make a call to populate drr_info.
653  */
654 static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc,
655 		struct dc_state *context,
656 		union dmub_rb_cmd *cmd,
657 		struct pipe_ctx *vblank_pipe,
658 		uint8_t cmd_pipe_index)
659 {
660 	uint32_t i;
661 	struct pipe_ctx *pipe = NULL;
662 	struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data =
663 			&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
664 
665 	// Find the SubVP pipe
666 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
667 		pipe = &context->res_ctx.pipe_ctx[i];
668 
669 		// We check for master pipe, but it shouldn't matter since we only need
670 		// the pipe for timing info (stream should be same for any pipe splits)
671 		if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
672 				!resource_is_pipe_type(pipe, DPP_PIPE))
673 			continue;
674 
675 		// Find the SubVP pipe
676 		if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
677 			break;
678 	}
679 
680 	pipe_data->mode = VBLANK;
681 	pipe_data->pipe_config.vblank_data.pix_clk_100hz = vblank_pipe->stream->timing.pix_clk_100hz;
682 	pipe_data->pipe_config.vblank_data.vblank_start = vblank_pipe->stream->timing.v_total -
683 							vblank_pipe->stream->timing.v_front_porch;
684 	pipe_data->pipe_config.vblank_data.vtotal = vblank_pipe->stream->timing.v_total;
685 	pipe_data->pipe_config.vblank_data.htotal = vblank_pipe->stream->timing.h_total;
686 	pipe_data->pipe_config.vblank_data.vblank_pipe_index = vblank_pipe->pipe_idx;
687 	pipe_data->pipe_config.vblank_data.vstartup_start = vblank_pipe->pipe_dlg_param.vstartup_start;
688 	pipe_data->pipe_config.vblank_data.vblank_end =
689 			vblank_pipe->stream->timing.v_total - vblank_pipe->stream->timing.v_front_porch - vblank_pipe->stream->timing.v_addressable;
690 
691 	if (vblank_pipe->stream->ignore_msa_timing_param &&
692 		(vblank_pipe->stream->allow_freesync || vblank_pipe->stream->vrr_active_variable || vblank_pipe->stream->vrr_active_fixed))
693 		populate_subvp_cmd_drr_info(dc, context, pipe, vblank_pipe, pipe_data);
694 }
695 
696 /**
697  * update_subvp_prefetch_end_to_mall_start - Helper for SubVP + SubVP case
698  *
699  * @dc: [in] current dc state
700  * @context: [in] new dc state
701  * @cmd: [in] DMUB cmd to be populated with SubVP info
702  * @subvp_pipes: [in] Array of SubVP pipes (should always be length 2)
703  *
704  * For SubVP + SubVP, we use a single vertical interrupt to start the
705  * microschedule for both SubVP pipes. In order for this to work correctly, the
706  * MALL REGION of both SubVP pipes must start at the same time. This function
707  * lengthens the prefetch end to mall start delay of the SubVP pipe that has
708  * the shorter prefetch so that both MALL REGION's will start at the same time.
709  */
710 static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
711 		struct dc_state *context,
712 		union dmub_rb_cmd *cmd,
713 		struct pipe_ctx *subvp_pipes[])
714 {
715 	uint32_t subvp0_prefetch_us = 0;
716 	uint32_t subvp1_prefetch_us = 0;
717 	uint32_t prefetch_delta_us = 0;
718 	struct dc_stream_state *phantom_stream0 = NULL;
719 	struct dc_stream_state *phantom_stream1 = NULL;
720 	struct dc_crtc_timing *phantom_timing0 = NULL;
721 	struct dc_crtc_timing *phantom_timing1 = NULL;
722 	struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
723 
724 	phantom_stream0 = dc_state_get_paired_subvp_stream(context, subvp_pipes[0]->stream);
725 	if (!phantom_stream0)
726 		return;
727 
728 	phantom_stream1 = dc_state_get_paired_subvp_stream(context, subvp_pipes[1]->stream);
729 	if (!phantom_stream1)
730 		return;
731 
732 	phantom_timing0 = &phantom_stream0->timing;
733 	phantom_timing1 = &phantom_stream1->timing;
734 
735 	subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) *
736 			(uint64_t)phantom_timing0->h_total * 1000000),
737 			(((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
738 	subvp1_prefetch_us = div64_u64(((uint64_t)(phantom_timing1->v_total - phantom_timing1->v_front_porch) *
739 			(uint64_t)phantom_timing1->h_total * 1000000),
740 			(((uint64_t)phantom_timing1->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
741 
742 	// Whichever SubVP PIPE has the smaller prefetch (including the prefetch end to mall start time)
743 	// should increase it's prefetch time to match the other
744 	if (subvp0_prefetch_us > subvp1_prefetch_us) {
745 		pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[1];
746 		prefetch_delta_us = subvp0_prefetch_us - subvp1_prefetch_us;
747 		pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
748 				div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
749 					((uint64_t)phantom_timing1->pix_clk_100hz * 100) + ((uint64_t)phantom_timing1->h_total * 1000000 - 1)),
750 					((uint64_t)phantom_timing1->h_total * 1000000));
751 
752 	} else if (subvp1_prefetch_us >  subvp0_prefetch_us) {
753 		pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[0];
754 		prefetch_delta_us = subvp1_prefetch_us - subvp0_prefetch_us;
755 		pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
756 				div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
757 					((uint64_t)phantom_timing0->pix_clk_100hz * 100) + ((uint64_t)phantom_timing0->h_total * 1000000 - 1)),
758 					((uint64_t)phantom_timing0->h_total * 1000000));
759 	}
760 }
761 
762 /**
763  * populate_subvp_cmd_pipe_info - Helper to populate the SubVP pipe info for the DMUB subvp command
764  *
765  * @dc: [in] current dc state
766  * @context: [in] new dc state
767  * @cmd: [in] DMUB cmd to be populated with SubVP info
768  * @subvp_pipe: [in] pipe_ctx for the SubVP pipe
769  * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd
770  *
771  * Populate the DMCUB SubVP command with SubVP pipe info. All the information
772  * required to calculate the microschedule for the SubVP pipe is stored in the
773  * pipe_data of the DMCUB SubVP command.
774  */
775 static void populate_subvp_cmd_pipe_info(struct dc *dc,
776 		struct dc_state *context,
777 		union dmub_rb_cmd *cmd,
778 		struct pipe_ctx *subvp_pipe,
779 		uint8_t cmd_pipe_index)
780 {
781 	uint32_t j;
782 	struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data =
783 			&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
784 	struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
785 	struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
786 	struct dc_crtc_timing *phantom_timing;
787 	uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den;
788 
789 	if (!phantom_stream)
790 		return;
791 
792 	phantom_timing = &phantom_stream->timing;
793 
794 	pipe_data->mode = SUBVP;
795 	pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz;
796 	pipe_data->pipe_config.subvp_data.htotal = subvp_pipe->stream->timing.h_total;
797 	pipe_data->pipe_config.subvp_data.vtotal = subvp_pipe->stream->timing.v_total;
798 	pipe_data->pipe_config.subvp_data.main_vblank_start =
799 			main_timing->v_total - main_timing->v_front_porch;
800 	pipe_data->pipe_config.subvp_data.main_vblank_end =
801 			main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable;
802 	pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable;
803 	pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->stream_res.tg->inst;
804 	pipe_data->pipe_config.subvp_data.is_drr = subvp_pipe->stream->ignore_msa_timing_param &&
805 		(subvp_pipe->stream->allow_freesync || subvp_pipe->stream->vrr_active_variable || subvp_pipe->stream->vrr_active_fixed);
806 
807 	/* Calculate the scaling factor from the src and dst height.
808 	 * e.g. If 3840x2160 being downscaled to 1920x1080, the scaling factor is 1/2.
809 	 * Reduce the fraction 1080/2160 = 1/2 for the "scaling factor"
810 	 *
811 	 * Make sure to combine stream and plane scaling together.
812 	 */
813 	reduce_fraction(subvp_pipe->stream->src.height, subvp_pipe->stream->dst.height,
814 			&out_num_stream, &out_den_stream);
815 	reduce_fraction(subvp_pipe->plane_state->src_rect.height, subvp_pipe->plane_state->dst_rect.height,
816 			&out_num_plane, &out_den_plane);
817 	reduce_fraction(out_num_stream * out_num_plane, out_den_stream * out_den_plane, &out_num, &out_den);
818 	pipe_data->pipe_config.subvp_data.scale_factor_numerator = out_num;
819 	pipe_data->pipe_config.subvp_data.scale_factor_denominator = out_den;
820 
821 	// Prefetch lines is equal to VACTIVE + BP + VSYNC
822 	pipe_data->pipe_config.subvp_data.prefetch_lines =
823 			phantom_timing->v_total - phantom_timing->v_front_porch;
824 
825 	// Round up
826 	pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
827 			div64_u64(((uint64_t)dc->caps.subvp_prefetch_end_to_mall_start_us * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
828 					((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
829 	pipe_data->pipe_config.subvp_data.processing_delay_lines =
830 			div64_u64(((uint64_t)(dc->caps.subvp_fw_processing_delay_us) * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
831 					((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
832 
833 	if (subvp_pipe->bottom_pipe) {
834 		pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->bottom_pipe->pipe_idx;
835 	} else if (subvp_pipe->next_odm_pipe) {
836 		pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx;
837 	} else {
838 		pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0xF;
839 	}
840 
841 	// Find phantom pipe index based on phantom stream
842 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
843 		struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
844 
845 		if (resource_is_pipe_type(phantom_pipe, OTG_MASTER) &&
846 				phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) {
847 			pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst;
848 			if (phantom_pipe->bottom_pipe) {
849 				pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst;
850 			} else if (phantom_pipe->next_odm_pipe) {
851 				pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst;
852 			} else {
853 				pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0xF;
854 			}
855 			break;
856 		}
857 	}
858 }
859 
860 /**
861  * dc_dmub_setup_subvp_dmub_command - Populate the DMCUB SubVP command
862  *
863  * @dc: [in] current dc state
864  * @context: [in] new dc state
865  * @enable: [in] if true enables the pipes population
866  *
867  * This function loops through each pipe and populates the DMUB SubVP CMD info
868  * based on the pipe (e.g. SubVP, VBLANK).
869  */
870 void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
871 		struct dc_state *context,
872 		bool enable)
873 {
874 	uint8_t cmd_pipe_index = 0;
875 	uint32_t i, pipe_idx;
876 	uint8_t subvp_count = 0;
877 	union dmub_rb_cmd cmd;
878 	struct pipe_ctx *subvp_pipes[2];
879 	uint32_t wm_val_refclk = 0;
880 	enum mall_stream_type pipe_mall_type;
881 
882 	memset(&cmd, 0, sizeof(cmd));
883 	// FW command for SUBVP
884 	cmd.fw_assisted_mclk_switch_v2.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
885 	cmd.fw_assisted_mclk_switch_v2.header.sub_type = DMUB_CMD__HANDLE_SUBVP_CMD;
886 	cmd.fw_assisted_mclk_switch_v2.header.payload_bytes =
887 			sizeof(cmd.fw_assisted_mclk_switch_v2) - sizeof(cmd.fw_assisted_mclk_switch_v2.header);
888 
889 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
890 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
891 
892 		/* For SubVP pipe count, only count the top most (ODM / MPC) pipe
893 		 */
894 		if (resource_is_pipe_type(pipe, OTG_MASTER) &&
895 				resource_is_pipe_type(pipe, DPP_PIPE) &&
896 				dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
897 			subvp_pipes[subvp_count++] = pipe;
898 	}
899 
900 	if (enable) {
901 		// For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd
902 		for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
903 			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
904 			pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
905 
906 			if (!pipe->stream)
907 				continue;
908 
909 			/* When populating subvp cmd info, only pass in the top most (ODM / MPC) pipe.
910 			 * Any ODM or MPC splits being used in SubVP will be handled internally in
911 			 * populate_subvp_cmd_pipe_info
912 			 */
913 			if (resource_is_pipe_type(pipe, OTG_MASTER) &&
914 					resource_is_pipe_type(pipe, DPP_PIPE) &&
915 					pipe_mall_type == SUBVP_MAIN) {
916 				populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
917 			} else if (resource_is_pipe_type(pipe, OTG_MASTER) &&
918 					resource_is_pipe_type(pipe, DPP_PIPE) &&
919 					pipe_mall_type == SUBVP_NONE) {
920 				// Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where
921 				// we run through DML without calculating "natural" P-state support
922 				populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
923 
924 			}
925 			pipe_idx++;
926 		}
927 		if (subvp_count == 2) {
928 			update_subvp_prefetch_end_to_mall_start(dc, context, &cmd, subvp_pipes);
929 		}
930 		cmd.fw_assisted_mclk_switch_v2.config_data.pstate_allow_width_us = dc->caps.subvp_pstate_allow_width_us;
931 		cmd.fw_assisted_mclk_switch_v2.config_data.vertical_int_margin_us = dc->caps.subvp_vertical_int_margin_us;
932 
933 		// Store the original watermark value for this SubVP config so we can lower it when the
934 		// MCLK switch starts
935 		wm_val_refclk = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns *
936 				(dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000) / 1000;
937 
938 		cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF;
939 	}
940 
941 	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
942 }
943 
944 bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
945 {
946 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
947 		return false;
948 	return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub);
949 }
950 
951 void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
952 {
953 	uint32_t i;
954 
955 	if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
956 		DC_LOG_ERROR("%s: invalid parameters.", __func__);
957 		return;
958 	}
959 
960 	DC_LOG_ERROR("%s: DMCUB error - collecting diagnostic data\n", __func__);
961 
962 	if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv)) {
963 		DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__);
964 		return;
965 	}
966 
967 	DC_LOG_DEBUG("DMCUB STATE:");
968 	DC_LOG_DEBUG("    dmcub_version      : %08x", dc_dmub_srv->dmub->debug.dmcub_version);
969 	DC_LOG_DEBUG("    scratch  [0]       : %08x", dc_dmub_srv->dmub->debug.scratch[0]);
970 	DC_LOG_DEBUG("    scratch  [1]       : %08x", dc_dmub_srv->dmub->debug.scratch[1]);
971 	DC_LOG_DEBUG("    scratch  [2]       : %08x", dc_dmub_srv->dmub->debug.scratch[2]);
972 	DC_LOG_DEBUG("    scratch  [3]       : %08x", dc_dmub_srv->dmub->debug.scratch[3]);
973 	DC_LOG_DEBUG("    scratch  [4]       : %08x", dc_dmub_srv->dmub->debug.scratch[4]);
974 	DC_LOG_DEBUG("    scratch  [5]       : %08x", dc_dmub_srv->dmub->debug.scratch[5]);
975 	DC_LOG_DEBUG("    scratch  [6]       : %08x", dc_dmub_srv->dmub->debug.scratch[6]);
976 	DC_LOG_DEBUG("    scratch  [7]       : %08x", dc_dmub_srv->dmub->debug.scratch[7]);
977 	DC_LOG_DEBUG("    scratch  [8]       : %08x", dc_dmub_srv->dmub->debug.scratch[8]);
978 	DC_LOG_DEBUG("    scratch  [9]       : %08x", dc_dmub_srv->dmub->debug.scratch[9]);
979 	DC_LOG_DEBUG("    scratch [10]       : %08x", dc_dmub_srv->dmub->debug.scratch[10]);
980 	DC_LOG_DEBUG("    scratch [11]       : %08x", dc_dmub_srv->dmub->debug.scratch[11]);
981 	DC_LOG_DEBUG("    scratch [12]       : %08x", dc_dmub_srv->dmub->debug.scratch[12]);
982 	DC_LOG_DEBUG("    scratch [13]       : %08x", dc_dmub_srv->dmub->debug.scratch[13]);
983 	DC_LOG_DEBUG("    scratch [14]       : %08x", dc_dmub_srv->dmub->debug.scratch[14]);
984 	DC_LOG_DEBUG("    scratch [15]       : %08x", dc_dmub_srv->dmub->debug.scratch[15]);
985 	for (i = 0; i < DMUB_PC_SNAPSHOT_COUNT; i++)
986 		DC_LOG_DEBUG("    pc[%d]             : %08x", i, dc_dmub_srv->dmub->debug.pc[i]);
987 	DC_LOG_DEBUG("    unk_fault_addr     : %08x", dc_dmub_srv->dmub->debug.undefined_address_fault_addr);
988 	DC_LOG_DEBUG("    inst_fault_addr    : %08x", dc_dmub_srv->dmub->debug.inst_fetch_fault_addr);
989 	DC_LOG_DEBUG("    data_fault_addr    : %08x", dc_dmub_srv->dmub->debug.data_write_fault_addr);
990 	DC_LOG_DEBUG("    inbox1_rptr        : %08x", dc_dmub_srv->dmub->debug.inbox1_rptr);
991 	DC_LOG_DEBUG("    inbox1_wptr        : %08x", dc_dmub_srv->dmub->debug.inbox1_wptr);
992 	DC_LOG_DEBUG("    inbox1_size        : %08x", dc_dmub_srv->dmub->debug.inbox1_size);
993 	DC_LOG_DEBUG("    inbox0_rptr        : %08x", dc_dmub_srv->dmub->debug.inbox0_rptr);
994 	DC_LOG_DEBUG("    inbox0_wptr        : %08x", dc_dmub_srv->dmub->debug.inbox0_wptr);
995 	DC_LOG_DEBUG("    inbox0_size        : %08x", dc_dmub_srv->dmub->debug.inbox0_size);
996 	DC_LOG_DEBUG("    outbox1_rptr       : %08x", dc_dmub_srv->dmub->debug.outbox1_rptr);
997 	DC_LOG_DEBUG("    outbox1_wptr       : %08x", dc_dmub_srv->dmub->debug.outbox1_wptr);
998 	DC_LOG_DEBUG("    outbox1_size       : %08x", dc_dmub_srv->dmub->debug.outbox1_size);
999 	DC_LOG_DEBUG("    is_enabled         : %d", dc_dmub_srv->dmub->debug.is_dmcub_enabled);
1000 	DC_LOG_DEBUG("    is_soft_reset      : %d", dc_dmub_srv->dmub->debug.is_dmcub_soft_reset);
1001 	DC_LOG_DEBUG("    is_secure_reset    : %d", dc_dmub_srv->dmub->debug.is_dmcub_secure_reset);
1002 	DC_LOG_DEBUG("    is_traceport_en    : %d", dc_dmub_srv->dmub->debug.is_traceport_en);
1003 	DC_LOG_DEBUG("    is_cw0_en          : %d", dc_dmub_srv->dmub->debug.is_cw0_enabled);
1004 	DC_LOG_DEBUG("    is_cw6_en          : %d", dc_dmub_srv->dmub->debug.is_cw6_enabled);
1005 }
1006 
1007 static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx)
1008 {
1009 	if (pipe_ctx->plane_state != NULL) {
1010 		if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
1011 			resource_can_pipe_disable_cursor(pipe_ctx))
1012 			return false;
1013 	}
1014 
1015 	if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
1016 		pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) &&
1017 		pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
1018 		return true;
1019 
1020 	if (pipe_ctx->stream->link->replay_settings.config.replay_supported)
1021 		return true;
1022 
1023 	return false;
1024 }
1025 
1026 static void dc_build_cursor_update_payload0(
1027 		struct pipe_ctx *pipe_ctx, uint8_t p_idx,
1028 		struct dmub_cmd_update_cursor_payload0 *payload)
1029 {
1030 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1031 	unsigned int panel_inst = 0;
1032 
1033 	if (!dc_get_edp_link_panel_inst(hubp->ctx->dc,
1034 		pipe_ctx->stream->link, &panel_inst))
1035 		return;
1036 
1037 	/* Payload: Cursor Rect is built from position & attribute
1038 	 * x & y are obtained from postion
1039 	 */
1040 	payload->cursor_rect.x = hubp->cur_rect.x;
1041 	payload->cursor_rect.y = hubp->cur_rect.y;
1042 	/* w & h are obtained from attribute */
1043 	payload->cursor_rect.width  = hubp->cur_rect.w;
1044 	payload->cursor_rect.height = hubp->cur_rect.h;
1045 
1046 	payload->enable      = hubp->pos.cur_ctl.bits.cur_enable;
1047 	payload->pipe_idx    = p_idx;
1048 	payload->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
1049 	payload->panel_inst  = panel_inst;
1050 }
1051 
1052 static void dc_build_cursor_position_update_payload0(
1053 		struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx,
1054 		const struct hubp *hubp, const struct dpp *dpp)
1055 {
1056 	/* Hubp */
1057 	pl->position_cfg.pHubp.cur_ctl.raw  = hubp->pos.cur_ctl.raw;
1058 	pl->position_cfg.pHubp.position.raw = hubp->pos.position.raw;
1059 	pl->position_cfg.pHubp.hot_spot.raw = hubp->pos.hot_spot.raw;
1060 	pl->position_cfg.pHubp.dst_offset.raw = hubp->pos.dst_offset.raw;
1061 
1062 	/* dpp */
1063 	pl->position_cfg.pDpp.cur0_ctl.raw = dpp->pos.cur0_ctl.raw;
1064 	pl->position_cfg.pipe_idx = p_idx;
1065 }
1066 
1067 static void dc_build_cursor_attribute_update_payload1(
1068 		struct dmub_cursor_attributes_cfg *pl_A, const uint8_t p_idx,
1069 		const struct hubp *hubp, const struct dpp *dpp)
1070 {
1071 	/* Hubp */
1072 	pl_A->aHubp.SURFACE_ADDR_HIGH = hubp->att.SURFACE_ADDR_HIGH;
1073 	pl_A->aHubp.SURFACE_ADDR = hubp->att.SURFACE_ADDR;
1074 	pl_A->aHubp.cur_ctl.raw  = hubp->att.cur_ctl.raw;
1075 	pl_A->aHubp.size.raw     = hubp->att.size.raw;
1076 	pl_A->aHubp.settings.raw = hubp->att.settings.raw;
1077 
1078 	/* dpp */
1079 	pl_A->aDpp.cur0_ctl.raw = dpp->att.cur0_ctl.raw;
1080 }
1081 
1082 /**
1083  * dc_send_update_cursor_info_to_dmu - Populate the DMCUB Cursor update info command
1084  *
1085  * @pCtx: [in] pipe context
1086  * @pipe_idx: [in] pipe index
1087  *
1088  * This function would store the cursor related information and pass it into
1089  * dmub
1090  */
1091 void dc_send_update_cursor_info_to_dmu(
1092 		struct pipe_ctx *pCtx, uint8_t pipe_idx)
1093 {
1094 	union dmub_rb_cmd cmd[2];
1095 	union dmub_cmd_update_cursor_info_data *update_cursor_info_0 =
1096 					&cmd[0].update_cursor_info.update_cursor_info_data;
1097 
1098 	memset(cmd, 0, sizeof(cmd));
1099 
1100 	if (!dc_dmub_should_update_cursor_data(pCtx))
1101 		return;
1102 	/*
1103 	 * Since we use multi_cmd_pending for dmub command, the 2nd command is
1104 	 * only assigned to store cursor attributes info.
1105 	 * 1st command can view as 2 parts, 1st is for PSR/Replay data, the other
1106 	 * is to store cursor position info.
1107 	 *
1108 	 * Command heaer type must be the same type if using  multi_cmd_pending.
1109 	 * Besides, while process 2nd command in DMU, the sub type is useless.
1110 	 * So it's meanless to pass the sub type header with different type.
1111 	 */
1112 
1113 	{
1114 		/* Build Payload#0 Header */
1115 		cmd[0].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
1116 		cmd[0].update_cursor_info.header.payload_bytes =
1117 				sizeof(cmd[0].update_cursor_info.update_cursor_info_data);
1118 		cmd[0].update_cursor_info.header.multi_cmd_pending = 1; //To combine multi dmu cmd, 1st cmd
1119 
1120 		/* Prepare Payload */
1121 		dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info_0->payload0);
1122 
1123 		dc_build_cursor_position_update_payload0(&update_cursor_info_0->payload0, pipe_idx,
1124 				pCtx->plane_res.hubp, pCtx->plane_res.dpp);
1125 		}
1126 	{
1127 		/* Build Payload#1 Header */
1128 		cmd[1].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
1129 		cmd[1].update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg);
1130 		cmd[1].update_cursor_info.header.multi_cmd_pending = 0; //Indicate it's the last command.
1131 
1132 		dc_build_cursor_attribute_update_payload1(
1133 				&cmd[1].update_cursor_info.update_cursor_info_data.payload1.attribute_cfg,
1134 				pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp);
1135 
1136 		/* Combine 2nd cmds update_curosr_info to DMU */
1137 		dc_wake_and_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT);
1138 	}
1139 }
1140 
1141 bool dc_dmub_check_min_version(struct dmub_srv *srv)
1142 {
1143 	if (!srv->hw_funcs.is_psrsu_supported)
1144 		return true;
1145 	return srv->hw_funcs.is_psrsu_supported(srv);
1146 }
1147 
1148 void dc_dmub_srv_enable_dpia_trace(const struct dc *dc)
1149 {
1150 	struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
1151 
1152 	if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
1153 		DC_LOG_ERROR("%s: invalid parameters.", __func__);
1154 		return;
1155 	}
1156 
1157 	if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1,
1158 				       0x0010, NULL, DM_DMUB_WAIT_TYPE_WAIT)) {
1159 		DC_LOG_ERROR("timeout updating trace buffer mask word\n");
1160 		return;
1161 	}
1162 
1163 	if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK,
1164 				       0x0000, NULL, DM_DMUB_WAIT_TYPE_WAIT)) {
1165 		DC_LOG_ERROR("timeout updating trace buffer mask word\n");
1166 		return;
1167 	}
1168 
1169 	DC_LOG_DEBUG("Enabled DPIA trace\n");
1170 }
1171 
1172 void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, const struct dc_plane_address *addr, uint8_t subvp_index)
1173 {
1174 	dmub_srv_subvp_save_surf_addr(dc_dmub_srv->dmub, addr, subvp_index);
1175 }
1176 
1177 bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
1178 {
1179 	struct dc_context *dc_ctx;
1180 	enum dmub_status status;
1181 
1182 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1183 		return true;
1184 
1185 	if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
1186 		return true;
1187 
1188 	dc_ctx = dc_dmub_srv->ctx;
1189 
1190 	if (wait) {
1191 		if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
1192 			do {
1193 				status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000);
1194 			} while (status != DMUB_STATUS_OK);
1195 		} else {
1196 			status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000);
1197 			if (status != DMUB_STATUS_OK) {
1198 				DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status);
1199 				return false;
1200 			}
1201 		}
1202 	} else
1203 		return dmub_srv_is_hw_pwr_up(dc_dmub_srv->dmub);
1204 
1205 	return true;
1206 }
1207 
1208 static int count_active_streams(const struct dc *dc)
1209 {
1210 	int i, count = 0;
1211 
1212 	for (i = 0; i < dc->current_state->stream_count; ++i) {
1213 		struct dc_stream_state *stream = dc->current_state->streams[i];
1214 
1215 		if (stream && (!stream->dpms_off || dc->config.disable_ips_in_dpms_off))
1216 			count += 1;
1217 	}
1218 
1219 	return count;
1220 }
1221 
1222 static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
1223 {
1224 	volatile const struct dmub_shared_state_ips_fw *ips_fw;
1225 	struct dc_dmub_srv *dc_dmub_srv;
1226 	union dmub_rb_cmd cmd = {0};
1227 
1228 	if (dc->debug.dmcub_emulation)
1229 		return;
1230 
1231 	if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
1232 		return;
1233 
1234 	dc_dmub_srv = dc->ctx->dmub_srv;
1235 	ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
1236 
1237 	memset(&cmd, 0, sizeof(cmd));
1238 	cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
1239 	cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE;
1240 	cmd.idle_opt_notify_idle.header.payload_bytes =
1241 		sizeof(cmd.idle_opt_notify_idle) -
1242 		sizeof(cmd.idle_opt_notify_idle.header);
1243 
1244 	cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle;
1245 
1246 	if (dc->work_arounds.skip_psr_ips_crtc_disable)
1247 		cmd.idle_opt_notify_idle.cntl_data.skip_otg_disable = true;
1248 
1249 	if (allow_idle) {
1250 		volatile struct dmub_shared_state_ips_driver *ips_driver =
1251 			&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
1252 		union dmub_shared_state_ips_driver_signals new_signals;
1253 
1254 		DC_LOG_IPS(
1255 			"%s wait idle (ips1_commit=%u ips2_commit=%u)",
1256 			__func__,
1257 			ips_fw->signals.bits.ips1_commit,
1258 			ips_fw->signals.bits.ips2_commit);
1259 
1260 		dc_dmub_srv_wait_for_idle(dc->ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL);
1261 
1262 		memset(&new_signals, 0, sizeof(new_signals));
1263 
1264 		new_signals.bits.allow_idle = 1; /* always set */
1265 
1266 		if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
1267 		    dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
1268 			new_signals.bits.allow_pg = 1;
1269 			new_signals.bits.allow_ips1 = 1;
1270 			new_signals.bits.allow_ips2 = 1;
1271 			new_signals.bits.allow_z10 = 1;
1272 		} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
1273 			new_signals.bits.allow_ips1 = 1;
1274 		} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
1275 			new_signals.bits.allow_pg = 1;
1276 			new_signals.bits.allow_ips1 = 1;
1277 		} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
1278 			new_signals.bits.allow_pg = 1;
1279 			new_signals.bits.allow_ips1 = 1;
1280 			new_signals.bits.allow_ips2 = 1;
1281 		} else if (dc->config.disable_ips == DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF) {
1282 			/* TODO: Move this logic out to hwseq */
1283 			if (count_active_streams(dc) == 0) {
1284 				/* IPS2 - Display off */
1285 				new_signals.bits.allow_pg = 1;
1286 				new_signals.bits.allow_ips1 = 1;
1287 				new_signals.bits.allow_ips2 = 1;
1288 				new_signals.bits.allow_z10 = 1;
1289 			} else {
1290 				/* RCG only */
1291 				new_signals.bits.allow_pg = 0;
1292 				new_signals.bits.allow_ips1 = 1;
1293 				new_signals.bits.allow_ips2 = 0;
1294 				new_signals.bits.allow_z10 = 0;
1295 			}
1296 		}
1297 
1298 		ips_driver->signals = new_signals;
1299 		dc_dmub_srv->driver_signals = ips_driver->signals;
1300 	}
1301 
1302 	DC_LOG_IPS(
1303 		"%s send allow_idle=%d (ips1_commit=%u ips2_commit=%u)",
1304 		__func__,
1305 		allow_idle,
1306 		ips_fw->signals.bits.ips1_commit,
1307 		ips_fw->signals.bits.ips2_commit);
1308 
1309 	/* NOTE: This does not use the "wake" interface since this is part of the wake path. */
1310 	/* We also do not perform a wait since DMCUB could enter idle after the notification. */
1311 	dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT);
1312 
1313 	/* Register access should stop at this point. */
1314 	if (allow_idle)
1315 		dc_dmub_srv->needs_idle_wake = true;
1316 }
1317 
1318 static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
1319 {
1320 	struct dc_dmub_srv *dc_dmub_srv;
1321 	uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0;
1322 
1323 	if (dc->debug.dmcub_emulation)
1324 		return;
1325 
1326 	if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
1327 		return;
1328 
1329 	dc_dmub_srv = dc->ctx->dmub_srv;
1330 
1331 	if (dc->clk_mgr->funcs->exit_low_power_state) {
1332 		volatile const struct dmub_shared_state_ips_fw *ips_fw =
1333 			&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
1334 		volatile struct dmub_shared_state_ips_driver *ips_driver =
1335 			&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
1336 		union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals;
1337 
1338 		rcg_exit_count = ips_fw->rcg_exit_count;
1339 		ips1_exit_count = ips_fw->ips1_exit_count;
1340 		ips2_exit_count = ips_fw->ips2_exit_count;
1341 
1342 		ips_driver->signals.all = 0;
1343 		dc_dmub_srv->driver_signals = ips_driver->signals;
1344 
1345 		DC_LOG_IPS(
1346 			"%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u) (count rcg=%u ips1=%u ips2=%u)",
1347 			__func__,
1348 			ips_driver->signals.bits.allow_ips1,
1349 			ips_driver->signals.bits.allow_ips2,
1350 			ips_fw->signals.bits.ips1_commit,
1351 			ips_fw->signals.bits.ips2_commit,
1352 			ips_fw->rcg_entry_count,
1353 			ips_fw->ips1_entry_count,
1354 			ips_fw->ips2_entry_count);
1355 
1356 		/* Note: register access has technically not resumed for DCN here, but we
1357 		 * need to be message PMFW through our standard register interface.
1358 		 */
1359 		dc_dmub_srv->needs_idle_wake = false;
1360 
1361 		if ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) &&
1362 		    (!dc->debug.optimize_ips_handshake ||
1363 		     ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) {
1364 			DC_LOG_IPS(
1365 				"wait IPS2 eval (ips1_commit=%u ips2_commit=%u)",
1366 				ips_fw->signals.bits.ips1_commit,
1367 				ips_fw->signals.bits.ips2_commit);
1368 
1369 			if (!dc->debug.optimize_ips_handshake || !ips_fw->signals.bits.ips2_commit)
1370 				udelay(dc->debug.ips2_eval_delay_us);
1371 
1372 			DC_LOG_IPS(
1373 				"exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)",
1374 				ips_fw->signals.bits.ips1_commit,
1375 				ips_fw->signals.bits.ips2_commit);
1376 
1377 			// Tell PMFW to exit low power state
1378 			dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
1379 
1380 			if (ips_fw->signals.bits.ips2_commit) {
1381 
1382 				DC_LOG_IPS(
1383 					"wait IPS2 entry delay (ips1_commit=%u ips2_commit=%u)",
1384 					ips_fw->signals.bits.ips1_commit,
1385 					ips_fw->signals.bits.ips2_commit);
1386 
1387 				// Wait for IPS2 entry upper bound
1388 				udelay(dc->debug.ips2_entry_delay_us);
1389 
1390 				DC_LOG_IPS(
1391 					"exit IPS2 #2 (ips1_commit=%u ips2_commit=%u)",
1392 					ips_fw->signals.bits.ips1_commit,
1393 					ips_fw->signals.bits.ips2_commit);
1394 
1395 				dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
1396 
1397 				DC_LOG_IPS(
1398 					"wait IPS2 commit clear (ips1_commit=%u ips2_commit=%u)",
1399 					ips_fw->signals.bits.ips1_commit,
1400 					ips_fw->signals.bits.ips2_commit);
1401 
1402 				while (ips_fw->signals.bits.ips2_commit)
1403 					udelay(1);
1404 
1405 				DC_LOG_IPS(
1406 					"wait hw_pwr_up (ips1_commit=%u ips2_commit=%u)",
1407 					ips_fw->signals.bits.ips1_commit,
1408 					ips_fw->signals.bits.ips2_commit);
1409 
1410 				if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
1411 					ASSERT(0);
1412 
1413 				DC_LOG_IPS(
1414 					"resync inbox1 (ips1_commit=%u ips2_commit=%u)",
1415 					ips_fw->signals.bits.ips1_commit,
1416 					ips_fw->signals.bits.ips2_commit);
1417 
1418 				dmub_srv_sync_inboxes(dc->ctx->dmub_srv->dmub);
1419 			}
1420 		}
1421 
1422 		dc_dmub_srv_notify_idle(dc, false);
1423 		if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) {
1424 			DC_LOG_IPS(
1425 				"wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)",
1426 				ips_fw->signals.bits.ips1_commit,
1427 				ips_fw->signals.bits.ips2_commit);
1428 
1429 			while (ips_fw->signals.bits.ips1_commit)
1430 				udelay(1);
1431 
1432 			DC_LOG_IPS(
1433 				"wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u)",
1434 				ips_fw->signals.bits.ips1_commit,
1435 				ips_fw->signals.bits.ips2_commit);
1436 		}
1437 	}
1438 
1439 	if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
1440 		ASSERT(0);
1441 
1442 	DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u)",
1443 		__func__,
1444 		rcg_exit_count,
1445 		ips1_exit_count,
1446 		ips2_exit_count);
1447 }
1448 
1449 void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state)
1450 {
1451 	struct dmub_srv *dmub;
1452 
1453 	if (!dc_dmub_srv)
1454 		return;
1455 
1456 	dmub = dc_dmub_srv->dmub;
1457 
1458 	if (power_state == DC_ACPI_CM_POWER_STATE_D0)
1459 		dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D0);
1460 	else
1461 		dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D3);
1462 }
1463 
1464 void dc_dmub_srv_notify_fw_dc_power_state(struct dc_dmub_srv *dc_dmub_srv,
1465 					  enum dc_acpi_cm_power_state power_state)
1466 {
1467 	union dmub_rb_cmd cmd;
1468 
1469 	if (!dc_dmub_srv)
1470 		return;
1471 
1472 	memset(&cmd, 0, sizeof(cmd));
1473 
1474 	cmd.idle_opt_set_dc_power_state.header.type = DMUB_CMD__IDLE_OPT;
1475 	cmd.idle_opt_set_dc_power_state.header.sub_type = DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE;
1476 	cmd.idle_opt_set_dc_power_state.header.payload_bytes =
1477 		sizeof(cmd.idle_opt_set_dc_power_state) - sizeof(cmd.idle_opt_set_dc_power_state.header);
1478 
1479 	if (power_state == DC_ACPI_CM_POWER_STATE_D0) {
1480 		cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D0;
1481 	} else if (power_state == DC_ACPI_CM_POWER_STATE_D3) {
1482 		cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D3;
1483 	} else {
1484 		cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_UNKNOWN;
1485 	}
1486 
1487 	dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
1488 }
1489 
1490 bool dc_dmub_srv_should_detect(struct dc_dmub_srv *dc_dmub_srv)
1491 {
1492 	volatile const struct dmub_shared_state_ips_fw *ips_fw;
1493 	bool reallow_idle = false, should_detect = false;
1494 
1495 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1496 		return false;
1497 
1498 	if (dc_dmub_srv->dmub->shared_state &&
1499 	    dc_dmub_srv->dmub->meta_info.feature_bits.bits.shared_state_link_detection) {
1500 		ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
1501 		return ips_fw->signals.bits.detection_required;
1502 	}
1503 
1504 	/* Detection may require reading scratch 0 - exit out of idle prior to the read. */
1505 	if (dc_dmub_srv->idle_allowed) {
1506 		dc_dmub_srv_apply_idle_power_optimizations(dc_dmub_srv->ctx->dc, false);
1507 		reallow_idle = true;
1508 	}
1509 
1510 	should_detect = dmub_srv_should_detect(dc_dmub_srv->dmub);
1511 
1512 	/* Re-enter idle if we're not about to immediately redetect links. */
1513 	if (!should_detect && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
1514 	    !dc_dmub_srv->ctx->dc->debug.disable_dmub_reallow_idle)
1515 		dc_dmub_srv_apply_idle_power_optimizations(dc_dmub_srv->ctx->dc, true);
1516 
1517 	return should_detect;
1518 }
1519 
1520 void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle)
1521 {
1522 	struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
1523 
1524 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1525 		return;
1526 
1527 	allow_idle &= (!dc->debug.ips_disallow_entry);
1528 
1529 	if (dc_dmub_srv->idle_allowed == allow_idle)
1530 		return;
1531 
1532 	DC_LOG_IPS("%s state change: old=%d new=%d", __func__, dc_dmub_srv->idle_allowed, allow_idle);
1533 
1534 	/*
1535 	 * Entering a low power state requires a driver notification.
1536 	 * Powering up the hardware requires notifying PMFW and DMCUB.
1537 	 * Clearing the driver idle allow requires a DMCUB command.
1538 	 * DMCUB commands requires the DMCUB to be powered up and restored.
1539 	 */
1540 
1541 	if (!allow_idle) {
1542 		dc_dmub_srv->idle_exit_counter += 1;
1543 
1544 		dc_dmub_srv_exit_low_power_state(dc);
1545 		/*
1546 		 * Idle is considered fully exited only after the sequence above
1547 		 * fully completes. If we have a race of two threads exiting
1548 		 * at the same time then it's safe to perform the sequence
1549 		 * twice as long as we're not re-entering.
1550 		 *
1551 		 * Infinite command submission is avoided by using the
1552 		 * dm_execute_dmub_cmd submission instead of the "wake" helpers.
1553 		 */
1554 		dc_dmub_srv->idle_allowed = false;
1555 
1556 		dc_dmub_srv->idle_exit_counter -= 1;
1557 		if (dc_dmub_srv->idle_exit_counter < 0) {
1558 			ASSERT(0);
1559 			dc_dmub_srv->idle_exit_counter = 0;
1560 		}
1561 	} else {
1562 		/* Consider idle as notified prior to the actual submission to
1563 		 * prevent multiple entries. */
1564 		dc_dmub_srv->idle_allowed = true;
1565 
1566 		dc_dmub_srv_notify_idle(dc, allow_idle);
1567 	}
1568 }
1569 
1570 bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd,
1571 				  enum dm_dmub_wait_type wait_type)
1572 {
1573 	return dc_wake_and_execute_dmub_cmd_list(ctx, 1, cmd, wait_type);
1574 }
1575 
1576 bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count,
1577 				       union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
1578 {
1579 	struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
1580 	bool result = false, reallow_idle = false;
1581 
1582 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1583 		return false;
1584 
1585 	if (count == 0)
1586 		return true;
1587 
1588 	if (dc_dmub_srv->idle_allowed) {
1589 		dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false);
1590 		reallow_idle = true;
1591 	}
1592 
1593 	/*
1594 	 * These may have different implementations in DM, so ensure
1595 	 * that we guide it to the expected helper.
1596 	 */
1597 	if (count > 1)
1598 		result = dm_execute_dmub_cmd_list(ctx, count, cmd, wait_type);
1599 	else
1600 		result = dm_execute_dmub_cmd(ctx, cmd, wait_type);
1601 
1602 	if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
1603 	    !ctx->dc->debug.disable_dmub_reallow_idle)
1604 		dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
1605 
1606 	return result;
1607 }
1608 
1609 static bool dc_dmub_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
1610 				  uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type)
1611 {
1612 	struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
1613 	const uint32_t wait_us = wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT ? 0 : 30;
1614 	enum dmub_status status;
1615 
1616 	if (response)
1617 		*response = 0;
1618 
1619 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1620 		return false;
1621 
1622 	status = dmub_srv_send_gpint_command(dc_dmub_srv->dmub, command_code, param, wait_us);
1623 	if (status != DMUB_STATUS_OK) {
1624 		if (status == DMUB_STATUS_TIMEOUT && wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT)
1625 			return true;
1626 
1627 		return false;
1628 	}
1629 
1630 	if (response && wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
1631 		dmub_srv_get_gpint_response(dc_dmub_srv->dmub, response);
1632 
1633 	return true;
1634 }
1635 
1636 bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
1637 			       uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type)
1638 {
1639 	struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
1640 	bool result = false, reallow_idle = false;
1641 
1642 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1643 		return false;
1644 
1645 	if (dc_dmub_srv->idle_allowed) {
1646 		dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false);
1647 		reallow_idle = true;
1648 	}
1649 
1650 	result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type);
1651 
1652 	if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
1653 	    !ctx->dc->debug.disable_dmub_reallow_idle)
1654 		dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
1655 
1656 	return result;
1657 }
1658 
1659 void dc_dmub_srv_fams2_update_config(struct dc *dc,
1660 		struct dc_state *context,
1661 		bool enable)
1662 {
1663 	uint8_t num_cmds = 1;
1664 	uint32_t i;
1665 	union dmub_rb_cmd cmd[2 * MAX_STREAMS + 1];
1666 	struct dmub_rb_cmd_fams2 *global_cmd = &cmd[0].fams2_config;
1667 
1668 	memset(cmd, 0, sizeof(union dmub_rb_cmd) * (2 * MAX_STREAMS + 1));
1669 	/* fill in generic command header */
1670 	global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
1671 	global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
1672 	global_cmd->header.payload_bytes =
1673 			sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
1674 
1675 	if (enable) {
1676 		/* send global configuration parameters */
1677 		memcpy(&global_cmd->config.global, &context->bw_ctx.bw.dcn.fams2_global_config, sizeof(struct dmub_cmd_fams2_global_config));
1678 
1679 		/* copy static feature configuration overrides */
1680 		global_cmd->config.global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery;
1681 		global_cmd->config.global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug;
1682 		global_cmd->config.global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip;
1683 
1684 		/* construct per-stream configs */
1685 		for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) {
1686 			struct dmub_rb_cmd_fams2 *stream_base_cmd = &cmd[i+1].fams2_config;
1687 			struct dmub_rb_cmd_fams2 *stream_sub_state_cmd = &cmd[i+1+context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config;
1688 
1689 			/* configure command header */
1690 			stream_base_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
1691 			stream_base_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
1692 			stream_base_cmd->header.payload_bytes =
1693 					sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
1694 			stream_base_cmd->header.multi_cmd_pending = 1;
1695 			stream_sub_state_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
1696 			stream_sub_state_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
1697 			stream_sub_state_cmd->header.payload_bytes =
1698 					sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
1699 			stream_sub_state_cmd->header.multi_cmd_pending = 1;
1700 			/* copy stream static base state */
1701 			memcpy(&stream_base_cmd->config,
1702 					&context->bw_ctx.bw.dcn.fams2_stream_base_params[i],
1703 					sizeof(union dmub_cmd_fams2_config));
1704 			/* copy stream static sub state */
1705 			memcpy(&stream_sub_state_cmd->config,
1706 					&context->bw_ctx.bw.dcn.fams2_stream_sub_params[i],
1707 					sizeof(union dmub_cmd_fams2_config));
1708 		}
1709 	}
1710 
1711 	/* apply feature configuration based on current driver state */
1712 	global_cmd->config.global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2;
1713 	global_cmd->config.global.features.bits.enable = enable;
1714 
1715 	if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) {
1716 		/* set multi pending for global, and unset for last stream cmd */
1717 		global_cmd->header.multi_cmd_pending = 1;
1718 		cmd[2 * context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config.header.multi_cmd_pending = 0;
1719 		num_cmds += 2 * context->bw_ctx.bw.dcn.fams2_global_config.num_streams;
1720 	}
1721 
1722 	dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT);
1723 }
1724 
1725 void dc_dmub_srv_fams2_drr_update(struct dc *dc,
1726 		uint32_t tg_inst,
1727 		uint32_t vtotal_min,
1728 		uint32_t vtotal_max,
1729 		uint32_t vtotal_mid,
1730 		uint32_t vtotal_mid_frame_num,
1731 		bool program_manual_trigger)
1732 {
1733 	union dmub_rb_cmd cmd = { 0 };
1734 
1735 	cmd.fams2_drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
1736 	cmd.fams2_drr_update.header.sub_type = DMUB_CMD__FAMS2_DRR_UPDATE;
1737 	cmd.fams2_drr_update.dmub_optc_state_req.tg_inst = tg_inst;
1738 	cmd.fams2_drr_update.dmub_optc_state_req.v_total_max = vtotal_max;
1739 	cmd.fams2_drr_update.dmub_optc_state_req.v_total_min = vtotal_min;
1740 	cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid = vtotal_mid;
1741 	cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid_frame_num = vtotal_mid_frame_num;
1742 	cmd.fams2_drr_update.dmub_optc_state_req.program_manual_trigger = program_manual_trigger;
1743 
1744 	cmd.fams2_drr_update.header.payload_bytes =
1745 			sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header);
1746 
1747 	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
1748 }
1749 
1750 void dc_dmub_srv_fams2_passthrough_flip(
1751 		struct dc *dc,
1752 		struct dc_state *state,
1753 		struct dc_stream_state *stream,
1754 		struct dc_surface_update *srf_updates,
1755 		int surface_count)
1756 {
1757 	int plane_index;
1758 	union dmub_rb_cmd cmds[MAX_PLANES];
1759 	struct dc_plane_address *address;
1760 	struct dc_plane_state *plane_state;
1761 	int num_cmds = 0;
1762 	struct dc_stream_status *stream_status = dc_stream_get_status(stream);
1763 
1764 	if (surface_count <= 0 || stream_status == NULL)
1765 		return;
1766 
1767 	memset(cmds, 0, sizeof(union dmub_rb_cmd) * MAX_PLANES);
1768 
1769 	/* build command for each surface update */
1770 	for (plane_index = 0; plane_index < surface_count; plane_index++) {
1771 		plane_state = srf_updates[plane_index].surface;
1772 		address = &plane_state->address;
1773 
1774 		/* skip if there is no address update for plane */
1775 		if (!srf_updates[plane_index].flip_addr)
1776 			continue;
1777 
1778 		/* build command header */
1779 		cmds[num_cmds].fams2_flip.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
1780 		cmds[num_cmds].fams2_flip.header.sub_type = DMUB_CMD__FAMS2_FLIP;
1781 		cmds[num_cmds].fams2_flip.header.payload_bytes =
1782 				sizeof(struct dmub_rb_cmd_fams2_flip) - sizeof(struct dmub_cmd_header);
1783 
1784 		/* for chaining multiple commands, all but last command should set to 1 */
1785 		cmds[num_cmds].fams2_flip.header.multi_cmd_pending = 1;
1786 
1787 		/* set topology info */
1788 		cmds[num_cmds].fams2_flip.flip_info.pipe_mask = dc_plane_get_pipe_mask(state, plane_state);
1789 		if (stream_status)
1790 			cmds[num_cmds].fams2_flip.flip_info.otg_inst = stream_status->primary_otg_inst;
1791 
1792 		cmds[num_cmds].fams2_flip.flip_info.config.bits.is_immediate = plane_state->flip_immediate;
1793 
1794 		/* build address info for command */
1795 		switch (address->type) {
1796 		case PLN_ADDR_TYPE_GRAPHICS:
1797 			if (address->grph.addr.quad_part == 0) {
1798 				BREAK_TO_DEBUGGER();
1799 				break;
1800 			}
1801 
1802 			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_lo =
1803 					address->grph.meta_addr.low_part;
1804 			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_hi =
1805 					(uint16_t)address->grph.meta_addr.high_part;
1806 			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_lo =
1807 					address->grph.addr.low_part;
1808 			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_hi =
1809 					(uint16_t)address->grph.addr.high_part;
1810 			break;
1811 		case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
1812 			if (address->video_progressive.luma_addr.quad_part == 0 ||
1813 				address->video_progressive.chroma_addr.quad_part == 0) {
1814 				BREAK_TO_DEBUGGER();
1815 				break;
1816 			}
1817 
1818 			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_lo =
1819 					address->video_progressive.luma_meta_addr.low_part;
1820 			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_hi =
1821 					(uint16_t)address->video_progressive.luma_meta_addr.high_part;
1822 			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_c_lo =
1823 					address->video_progressive.chroma_meta_addr.low_part;
1824 			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_c_hi =
1825 					(uint16_t)address->video_progressive.chroma_meta_addr.high_part;
1826 			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_lo =
1827 					address->video_progressive.luma_addr.low_part;
1828 			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_hi =
1829 					(uint16_t)address->video_progressive.luma_addr.high_part;
1830 			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_c_lo =
1831 					address->video_progressive.chroma_addr.low_part;
1832 			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_c_hi =
1833 					(uint16_t)address->video_progressive.chroma_addr.high_part;
1834 			break;
1835 		default:
1836 			// Should never be hit
1837 			BREAK_TO_DEBUGGER();
1838 			break;
1839 		}
1840 
1841 		num_cmds++;
1842 	}
1843 
1844 	if (num_cmds > 0)  {
1845 		cmds[num_cmds - 1].fams2_flip.header.multi_cmd_pending = 0;
1846 		dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmds, DM_DMUB_WAIT_TYPE_WAIT);
1847 	}
1848 }
1849 
1850 bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement)
1851 {
1852 	bool result;
1853 
1854 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1855 		return false;
1856 
1857 	result = dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IPS_RESIDENCY,
1858 					   start_measurement, NULL, DM_DMUB_WAIT_TYPE_WAIT);
1859 
1860 	return result;
1861 }
1862 
1863 void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output)
1864 {
1865 	uint32_t i;
1866 	enum dmub_gpint_command command_code;
1867 
1868 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1869 		return;
1870 
1871 	switch (output->ips_mode) {
1872 	case DMUB_IPS_MODE_IPS1_MAX:
1873 		command_code = DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER;
1874 		break;
1875 	case DMUB_IPS_MODE_IPS2:
1876 		command_code = DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER;
1877 		break;
1878 	case DMUB_IPS_MODE_IPS1_RCG:
1879 		command_code = DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER;
1880 		break;
1881 	case DMUB_IPS_MODE_IPS1_ONO2_ON:
1882 		command_code = DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER;
1883 		break;
1884 	default:
1885 		command_code = DMUB_GPINT__INVALID_COMMAND;
1886 		break;
1887 	}
1888 
1889 	if (command_code == DMUB_GPINT__INVALID_COMMAND)
1890 		return;
1891 
1892 	for (i = 0; i < GPINT_RETRY_NUM; i++) {
1893 		// false could mean GPINT timeout, in which case we should retry
1894 		if (dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT,
1895 					      (uint16_t)(output->ips_mode), &output->residency_percent,
1896 					      DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
1897 			break;
1898 		udelay(100);
1899 	}
1900 
1901 	if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER,
1902 				      (uint16_t)(output->ips_mode),
1903 				       &output->entry_counter, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
1904 		output->entry_counter = 0;
1905 
1906 	if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO,
1907 				      (uint16_t)(output->ips_mode),
1908 				       &output->total_active_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
1909 		output->total_active_time_us[0] = 0;
1910 	if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI,
1911 				      (uint16_t)(output->ips_mode),
1912 				       &output->total_active_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
1913 		output->total_active_time_us[1] = 0;
1914 
1915 	if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO,
1916 				      (uint16_t)(output->ips_mode),
1917 				       &output->total_inactive_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
1918 		output->total_inactive_time_us[0] = 0;
1919 	if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI,
1920 				      (uint16_t)(output->ips_mode),
1921 				       &output->total_inactive_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
1922 		output->total_inactive_time_us[1] = 0;
1923 
1924 	// NUM_IPS_HISTOGRAM_BUCKETS = 16
1925 	for (i = 0; i < 16; i++)
1926 		if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, command_code, i, &output->histogram[i],
1927 					       DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
1928 			output->histogram[i] = 0;
1929 }
1930