xref: /linux/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c (revision 8c6a0234739e33c8be8830c2eee13a49acfd59ea)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include "dm_services.h"
27 #include "dc.h"
28 #include "dc_dmub_srv.h"
29 #include "../dmub/dmub_srv.h"
30 #include "dm_helpers.h"
31 #include "dc_hw_types.h"
32 #include "core_types.h"
33 #include "../basics/conversion.h"
34 #include "cursor_reg_cache.h"
35 #include "resource.h"
36 #include "clk_mgr.h"
37 #include "dc_state_priv.h"
38 #include "dc_plane_priv.h"
39 
40 #define CTX dc_dmub_srv->ctx
41 #define DC_LOGGER CTX->logger
42 #define GPINT_RETRY_NUM 20
43 
44 static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc,
45 				  struct dmub_srv *dmub)
46 {
47 	dc_srv->dmub = dmub;
48 	dc_srv->ctx = dc->ctx;
49 }
50 
51 struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub)
52 {
53 	struct dc_dmub_srv *dc_srv =
54 		kzalloc(sizeof(struct dc_dmub_srv), GFP_KERNEL);
55 
56 	if (dc_srv == NULL) {
57 		BREAK_TO_DEBUGGER();
58 		return NULL;
59 	}
60 
61 	dc_dmub_srv_construct(dc_srv, dc, dmub);
62 
63 	return dc_srv;
64 }
65 
66 void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
67 {
68 	if (*dmub_srv) {
69 		kfree(*dmub_srv);
70 		*dmub_srv = NULL;
71 	}
72 }
73 
74 bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv)
75 {
76 	struct dmub_srv *dmub;
77 	struct dc_context *dc_ctx;
78 	enum dmub_status status;
79 
80 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
81 		return false;
82 
83 	dc_ctx = dc_dmub_srv->ctx;
84 	dmub = dc_dmub_srv->dmub;
85 
86 	do {
87 		status = dmub_srv_wait_for_pending(dmub, 100000);
88 	} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
89 
90 	if (status != DMUB_STATUS_OK) {
91 		DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
92 		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
93 	}
94 
95 	return status == DMUB_STATUS_OK;
96 }
97 
98 void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv)
99 {
100 	struct dmub_srv *dmub = dc_dmub_srv->dmub;
101 	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
102 	enum dmub_status status = DMUB_STATUS_OK;
103 
104 	status = dmub_srv_clear_inbox0_ack(dmub);
105 	if (status != DMUB_STATUS_OK) {
106 		DC_ERROR("Error clearing INBOX0 ack: status=%d\n", status);
107 		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
108 	}
109 }
110 
111 void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv)
112 {
113 	struct dmub_srv *dmub = dc_dmub_srv->dmub;
114 	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
115 	enum dmub_status status = DMUB_STATUS_OK;
116 
117 	status = dmub_srv_wait_for_inbox0_ack(dmub, 100000);
118 	if (status != DMUB_STATUS_OK) {
119 		DC_ERROR("Error waiting for INBOX0 HW Lock Ack\n");
120 		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
121 	}
122 }
123 
124 void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv,
125 				 union dmub_inbox0_data_register data)
126 {
127 	struct dmub_srv *dmub = dc_dmub_srv->dmub;
128 	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
129 	enum dmub_status status = DMUB_STATUS_OK;
130 
131 	status = dmub_srv_send_inbox0_cmd(dmub, data);
132 	if (status != DMUB_STATUS_OK) {
133 		DC_ERROR("Error sending INBOX0 cmd\n");
134 		dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
135 	}
136 }
137 
138 static bool dc_dmub_srv_reg_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
139 		unsigned int count,
140 		union dmub_rb_cmd *cmd_list)
141 {
142 	struct dc_context *dc_ctx;
143 	struct dmub_srv *dmub;
144 	enum dmub_status status = DMUB_STATUS_OK;
145 	int i;
146 
147 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
148 		return false;
149 
150 	dc_ctx = dc_dmub_srv->ctx;
151 	dmub = dc_dmub_srv->dmub;
152 
153 	for (i = 0 ; i < count; i++) {
154 		/* confirm no messages pending */
155 		do {
156 			status = dmub_srv_wait_for_idle(dmub, 100000);
157 		} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
158 
159 		/* queue command */
160 		if (status == DMUB_STATUS_OK)
161 			status = dmub_srv_reg_cmd_execute(dmub, &cmd_list[i]);
162 
163 		/* check for errors */
164 		if (status != DMUB_STATUS_OK) {
165 			break;
166 		}
167 	}
168 
169 	if (status != DMUB_STATUS_OK) {
170 		if (status != DMUB_STATUS_POWER_STATE_D3) {
171 			DC_ERROR("Error starting DMUB execution: status=%d\n", status);
172 			dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
173 		}
174 		return false;
175 	}
176 
177 	return true;
178 }
179 
180 static bool dc_dmub_srv_fb_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
181 		unsigned int count,
182 		union dmub_rb_cmd *cmd_list)
183 {
184 	struct dc_context *dc_ctx;
185 	struct dmub_srv *dmub;
186 	enum dmub_status status;
187 	int i;
188 
189 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
190 		return false;
191 
192 	dc_ctx = dc_dmub_srv->ctx;
193 	dmub = dc_dmub_srv->dmub;
194 
195 	for (i = 0 ; i < count; i++) {
196 		// Queue command
197 		if (!cmd_list[i].cmd_common.header.multi_cmd_pending ||
198 				dmub_rb_num_free(&dmub->inbox1.rb) >= count - i) {
199 			status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]);
200 		} else {
201 			status = DMUB_STATUS_QUEUE_FULL;
202 		}
203 
204 		if (status == DMUB_STATUS_QUEUE_FULL) {
205 			/* Execute and wait for queue to become empty again. */
206 			status = dmub_srv_fb_cmd_execute(dmub);
207 			if (status == DMUB_STATUS_POWER_STATE_D3)
208 				return false;
209 
210 			do {
211 					status = dmub_srv_wait_for_inbox_free(dmub, 100000, count - i);
212 			} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
213 
214 			/* Requeue the command. */
215 			status = dmub_srv_fb_cmd_queue(dmub, &cmd_list[i]);
216 		}
217 
218 		if (status != DMUB_STATUS_OK) {
219 			if (status != DMUB_STATUS_POWER_STATE_D3) {
220 				DC_ERROR("Error queueing DMUB command: status=%d\n", status);
221 				dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
222 			}
223 			return false;
224 		}
225 	}
226 
227 	status = dmub_srv_fb_cmd_execute(dmub);
228 	if (status != DMUB_STATUS_OK) {
229 		if (status != DMUB_STATUS_POWER_STATE_D3) {
230 			DC_ERROR("Error starting DMUB execution: status=%d\n", status);
231 			dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
232 		}
233 		return false;
234 	}
235 
236 	return true;
237 }
238 
239 bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
240 		unsigned int count,
241 		union dmub_rb_cmd *cmd_list)
242 {
243 	bool res = false;
244 
245 	if (dc_dmub_srv && dc_dmub_srv->dmub) {
246 		if (dc_dmub_srv->dmub->inbox_type == DMUB_CMD_INTERFACE_REG) {
247 			res = dc_dmub_srv_reg_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list);
248 		} else {
249 			res = dc_dmub_srv_fb_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list);
250 		}
251 
252 		if (res)
253 			res = dmub_srv_update_inbox_status(dc_dmub_srv->dmub) == DMUB_STATUS_OK;
254 	}
255 
256 	return res;
257 }
258 
259 bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
260 		enum dm_dmub_wait_type wait_type,
261 		union dmub_rb_cmd *cmd_list)
262 {
263 	struct dmub_srv *dmub;
264 	enum dmub_status status;
265 
266 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
267 		return false;
268 
269 	dmub = dc_dmub_srv->dmub;
270 
271 	// Wait for DMUB to process command
272 	if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
273 		do {
274 			status = dmub_srv_wait_for_idle(dmub, 100000);
275 		} while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
276 
277 		if (status != DMUB_STATUS_OK) {
278 			DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
279 			if (!dmub->debug.timeout_info.timeout_occured) {
280 				dmub->debug.timeout_info.timeout_occured = true;
281 				if (cmd_list)
282 					dmub->debug.timeout_info.timeout_cmd = *cmd_list;
283 				dmub->debug.timeout_info.timestamp = dm_get_timestamp(dc_dmub_srv->ctx);
284 			}
285 			dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
286 			return false;
287 		}
288 
289 		// Copy data back from ring buffer into command
290 		if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY && cmd_list) {
291 			dmub_srv_cmd_get_response(dc_dmub_srv->dmub, cmd_list);
292 		}
293 	}
294 
295 	return true;
296 }
297 
298 bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
299 {
300 	return dc_dmub_srv_cmd_run_list(dc_dmub_srv, 1, cmd, wait_type);
301 }
302 
303 bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type)
304 {
305 	if (!dc_dmub_srv_cmd_list_queue_execute(dc_dmub_srv, count, cmd_list))
306 		return false;
307 
308 	return dc_dmub_srv_wait_for_idle(dc_dmub_srv, wait_type, cmd_list);
309 }
310 
311 bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv)
312 {
313 	struct dmub_srv *dmub;
314 	struct dc_context *dc_ctx;
315 	union dmub_fw_boot_status boot_status;
316 	enum dmub_status status;
317 
318 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
319 		return false;
320 
321 	dmub = dc_dmub_srv->dmub;
322 	dc_ctx = dc_dmub_srv->ctx;
323 
324 	status = dmub_srv_get_fw_boot_status(dmub, &boot_status);
325 	if (status != DMUB_STATUS_OK) {
326 		DC_ERROR("Error querying DMUB boot status: error=%d\n", status);
327 		return false;
328 	}
329 
330 	return boot_status.bits.optimized_init_done;
331 }
332 
333 bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
334 				    unsigned int stream_mask)
335 {
336 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
337 		return false;
338 
339 	return dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK,
340 					 stream_mask, NULL, DM_DMUB_WAIT_TYPE_WAIT);
341 }
342 
343 bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv)
344 {
345 	struct dmub_srv *dmub;
346 	struct dc_context *dc_ctx;
347 	union dmub_fw_boot_status boot_status;
348 	enum dmub_status status;
349 
350 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
351 		return false;
352 
353 	dmub = dc_dmub_srv->dmub;
354 	dc_ctx = dc_dmub_srv->ctx;
355 
356 	status = dmub_srv_get_fw_boot_status(dmub, &boot_status);
357 	if (status != DMUB_STATUS_OK) {
358 		DC_ERROR("Error querying DMUB boot status: error=%d\n", status);
359 		return false;
360 	}
361 
362 	return boot_status.bits.restore_required;
363 }
364 
365 bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_buf_entry *entry)
366 {
367 	struct dmub_srv *dmub = dc->ctx->dmub_srv->dmub;
368 	return dmub_srv_get_outbox0_msg(dmub, entry);
369 }
370 
371 void dc_dmub_trace_event_control(struct dc *dc, bool enable)
372 {
373 	dm_helpers_dmub_outbox_interrupt_control(dc->ctx, enable);
374 }
375 
376 void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal_min, uint32_t vtotal_max)
377 {
378 	union dmub_rb_cmd cmd = { 0 };
379 
380 	cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
381 	cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_DRR_UPDATE;
382 	cmd.drr_update.dmub_optc_state_req.v_total_max = vtotal_max;
383 	cmd.drr_update.dmub_optc_state_req.v_total_min = vtotal_min;
384 	cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst;
385 
386 	cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
387 
388 	// Send the command to the DMCUB.
389 	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
390 }
391 
392 void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
393 {
394 	union dmub_rb_cmd cmd = { 0 };
395 
396 	cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
397 	cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER;
398 	cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst;
399 
400 	cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
401 
402 	// Send the command to the DMCUB.
403 	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
404 }
405 
406 static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream)
407 {
408 	uint8_t pipes = 0;
409 	int i = 0;
410 
411 	for (i = 0; i < MAX_PIPES; i++) {
412 		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
413 
414 		if (pipe->stream == stream && pipe->stream_res.tg)
415 			pipes = i;
416 	}
417 	return pipes;
418 }
419 
420 static void dc_dmub_srv_populate_fams_pipe_info(struct dc *dc, struct dc_state *context,
421 		struct pipe_ctx *head_pipe,
422 		struct dmub_cmd_fw_assisted_mclk_switch_pipe_data *fams_pipe_data)
423 {
424 	int j;
425 	int pipe_idx = 0;
426 
427 	fams_pipe_data->pipe_index[pipe_idx++] = head_pipe->plane_res.hubp->inst;
428 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
429 		struct pipe_ctx *split_pipe = &context->res_ctx.pipe_ctx[j];
430 
431 		if (split_pipe->stream == head_pipe->stream && (split_pipe->top_pipe || split_pipe->prev_odm_pipe)) {
432 			fams_pipe_data->pipe_index[pipe_idx++] = split_pipe->plane_res.hubp->inst;
433 		}
434 	}
435 	fams_pipe_data->pipe_count = pipe_idx;
436 }
437 
438 bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, struct dc_state *context)
439 {
440 	union dmub_rb_cmd cmd = { 0 };
441 	struct dmub_cmd_fw_assisted_mclk_switch_config *config_data = &cmd.fw_assisted_mclk_switch.config_data;
442 	int i = 0, k = 0;
443 	int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it.
444 	uint8_t visual_confirm_enabled;
445 	int pipe_idx = 0;
446 	struct dc_stream_status *stream_status = NULL;
447 
448 	if (dc == NULL)
449 		return false;
450 
451 	visual_confirm_enabled = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS;
452 
453 	// Format command.
454 	cmd.fw_assisted_mclk_switch.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
455 	cmd.fw_assisted_mclk_switch.header.sub_type = DMUB_CMD__FAMS_SETUP_FW_CTRL;
456 	cmd.fw_assisted_mclk_switch.config_data.fams_enabled = should_manage_pstate;
457 	cmd.fw_assisted_mclk_switch.config_data.visual_confirm_enabled = visual_confirm_enabled;
458 
459 	if (should_manage_pstate) {
460 		for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
461 			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
462 
463 			if (!pipe->stream)
464 				continue;
465 
466 			/* If FAMS is being used to support P-State and there is a stream
467 			 * that does not use FAMS, we are in an FPO + VActive scenario.
468 			 * Assign vactive stretch margin in this case.
469 			 */
470 			stream_status = dc_state_get_stream_status(context, pipe->stream);
471 			if (stream_status && !stream_status->fpo_in_use) {
472 				cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us;
473 				break;
474 			}
475 			pipe_idx++;
476 		}
477 	}
478 
479 	for (i = 0, k = 0; context && i < dc->res_pool->pipe_count; i++) {
480 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
481 
482 		if (!resource_is_pipe_type(pipe, OTG_MASTER))
483 			continue;
484 
485 		stream_status = dc_state_get_stream_status(context, pipe->stream);
486 		if (stream_status && stream_status->fpo_in_use) {
487 			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
488 			uint8_t min_refresh_in_hz = (pipe->stream->timing.min_refresh_in_uhz + 999999) / 1000000;
489 
490 			config_data->pipe_data[k].pix_clk_100hz = pipe->stream->timing.pix_clk_100hz;
491 			config_data->pipe_data[k].min_refresh_in_hz = min_refresh_in_hz;
492 			config_data->pipe_data[k].max_ramp_step = ramp_up_num_steps;
493 			config_data->pipe_data[k].pipes = dc_dmub_srv_get_pipes_for_stream(dc, pipe->stream);
494 			dc_dmub_srv_populate_fams_pipe_info(dc, context, pipe, &config_data->pipe_data[k]);
495 			k++;
496 		}
497 	}
498 	cmd.fw_assisted_mclk_switch.header.payload_bytes =
499 		sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header);
500 
501 	// Send the command to the DMCUB.
502 	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
503 
504 	return true;
505 }
506 
507 void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv)
508 {
509 	union dmub_rb_cmd cmd = { 0 };
510 
511 	if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
512 		return;
513 
514 	memset(&cmd, 0, sizeof(cmd));
515 
516 	/* Prepare fw command */
517 	cmd.query_feature_caps.header.type = DMUB_CMD__QUERY_FEATURE_CAPS;
518 	cmd.query_feature_caps.header.sub_type = 0;
519 	cmd.query_feature_caps.header.ret_status = 1;
520 	cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data);
521 
522 	/* If command was processed, copy feature caps to dmub srv */
523 	if (dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
524 	    cmd.query_feature_caps.header.ret_status == 0) {
525 		memcpy(&dc_dmub_srv->dmub->feature_caps,
526 		       &cmd.query_feature_caps.query_feature_caps_data,
527 		       sizeof(struct dmub_feature_caps));
528 	}
529 }
530 
531 void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx)
532 {
533 	union dmub_rb_cmd cmd = { 0 };
534 	unsigned int panel_inst = 0;
535 
536 	if (!dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst) &&
537 			dc->debug.visual_confirm == VISUAL_CONFIRM_DISABLE)
538 		return;
539 
540 	memset(&cmd, 0, sizeof(cmd));
541 
542 	// Prepare fw command
543 	cmd.visual_confirm_color.header.type = DMUB_CMD__GET_VISUAL_CONFIRM_COLOR;
544 	cmd.visual_confirm_color.header.sub_type = 0;
545 	cmd.visual_confirm_color.header.ret_status = 1;
546 	cmd.visual_confirm_color.header.payload_bytes = sizeof(struct dmub_cmd_visual_confirm_color_data);
547 	cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst;
548 
549 	// If command was processed, copy feature caps to dmub srv
550 	if (dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
551 		cmd.visual_confirm_color.header.ret_status == 0) {
552 		memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color,
553 			&cmd.visual_confirm_color.visual_confirm_color_data,
554 			sizeof(struct dmub_visual_confirm_color));
555 	}
556 }
557 
558 /**
559  * populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command
560  *
561  * @dc: [in] pointer to dc object
562  * @subvp_pipe: [in] pipe_ctx for the SubVP pipe
563  * @vblank_pipe: [in] pipe_ctx for the DRR pipe
564  * @pipe_data: [in] Pipe data which stores the VBLANK/DRR info
565  * @context: [in] DC state for access to phantom stream
566  *
567  * Populate the DMCUB SubVP command with DRR pipe info. All the information
568  * required for calculating the SubVP + DRR microschedule is populated here.
569  *
570  * High level algorithm:
571  * 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe
572  * 2. Calculate the min and max vtotal which supports SubVP + DRR microschedule
573  * 3. Populate the drr_info with the min and max supported vtotal values
574  */
575 static void populate_subvp_cmd_drr_info(struct dc *dc,
576 		struct dc_state *context,
577 		struct pipe_ctx *subvp_pipe,
578 		struct pipe_ctx *vblank_pipe,
579 		struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data)
580 {
581 	struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
582 	struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
583 	struct dc_crtc_timing *phantom_timing;
584 	struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
585 	uint16_t drr_frame_us = 0;
586 	uint16_t min_drr_supported_us = 0;
587 	uint16_t max_drr_supported_us = 0;
588 	uint16_t max_drr_vblank_us = 0;
589 	uint16_t max_drr_mallregion_us = 0;
590 	uint16_t mall_region_us = 0;
591 	uint16_t prefetch_us = 0;
592 	uint16_t subvp_active_us = 0;
593 	uint16_t drr_active_us = 0;
594 	uint16_t min_vtotal_supported = 0;
595 	uint16_t max_vtotal_supported = 0;
596 
597 	if (!phantom_stream)
598 		return;
599 
600 	phantom_timing = &phantom_stream->timing;
601 
602 	pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true;
603 	pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping
604 	pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now
605 
606 	drr_frame_us = div64_u64(((uint64_t)drr_timing->v_total * drr_timing->h_total * 1000000),
607 			(((uint64_t)drr_timing->pix_clk_100hz * 100)));
608 	// P-State allow width and FW delays already included phantom_timing->v_addressable
609 	mall_region_us = div64_u64(((uint64_t)phantom_timing->v_addressable * phantom_timing->h_total * 1000000),
610 			(((uint64_t)phantom_timing->pix_clk_100hz * 100)));
611 	min_drr_supported_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
612 	min_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * min_drr_supported_us),
613 			(((uint64_t)drr_timing->h_total * 1000000)));
614 
615 	prefetch_us = div64_u64(((uint64_t)(phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total * 1000000),
616 			(((uint64_t)phantom_timing->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
617 	subvp_active_us = div64_u64(((uint64_t)main_timing->v_addressable * main_timing->h_total * 1000000),
618 			(((uint64_t)main_timing->pix_clk_100hz * 100)));
619 	drr_active_us = div64_u64(((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000),
620 			(((uint64_t)drr_timing->pix_clk_100hz * 100)));
621 	max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us -
622 			dc->caps.subvp_fw_processing_delay_us - drr_active_us), 2) + drr_active_us;
623 	max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us - dc->caps.subvp_fw_processing_delay_us;
624 	max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us;
625 	max_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us),
626 			(((uint64_t)drr_timing->h_total * 1000000)));
627 
628 	/* When calculating the max vtotal supported for SubVP + DRR cases, add
629 	 * margin due to possible rounding errors (being off by 1 line in the
630 	 * FW calculation can incorrectly push the P-State switch to wait 1 frame
631 	 * longer).
632 	 */
633 	max_vtotal_supported = max_vtotal_supported - dc->caps.subvp_drr_max_vblank_margin_us;
634 
635 	pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported;
636 	pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported;
637 	pipe_data->pipe_config.vblank_data.drr_info.drr_vblank_start_margin = dc->caps.subvp_drr_vblank_start_margin_us;
638 }
639 
640 /**
641  * populate_subvp_cmd_vblank_pipe_info - Helper to populate VBLANK pipe info for the DMUB subvp command
642  *
643  * @dc: [in] current dc state
644  * @context: [in] new dc state
645  * @cmd: [in] DMUB cmd to be populated with SubVP info
646  * @vblank_pipe: [in] pipe_ctx for the VBLANK pipe
647  * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd
648  *
649  * Populate the DMCUB SubVP command with VBLANK pipe info. All the information
650  * required to calculate the microschedule for SubVP + VBLANK case is stored in
651  * the pipe_data (subvp_data and vblank_data).  Also check if the VBLANK pipe
652  * is a DRR display -- if it is make a call to populate drr_info.
653  */
654 static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc,
655 		struct dc_state *context,
656 		union dmub_rb_cmd *cmd,
657 		struct pipe_ctx *vblank_pipe,
658 		uint8_t cmd_pipe_index)
659 {
660 	uint32_t i;
661 	struct pipe_ctx *pipe = NULL;
662 	struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data =
663 			&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
664 
665 	// Find the SubVP pipe
666 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
667 		pipe = &context->res_ctx.pipe_ctx[i];
668 
669 		// We check for master pipe, but it shouldn't matter since we only need
670 		// the pipe for timing info (stream should be same for any pipe splits)
671 		if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
672 				!resource_is_pipe_type(pipe, DPP_PIPE))
673 			continue;
674 
675 		// Find the SubVP pipe
676 		if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
677 			break;
678 	}
679 
680 	pipe_data->mode = VBLANK;
681 	pipe_data->pipe_config.vblank_data.pix_clk_100hz = vblank_pipe->stream->timing.pix_clk_100hz;
682 	pipe_data->pipe_config.vblank_data.vblank_start = vblank_pipe->stream->timing.v_total -
683 							vblank_pipe->stream->timing.v_front_porch;
684 	pipe_data->pipe_config.vblank_data.vtotal = vblank_pipe->stream->timing.v_total;
685 	pipe_data->pipe_config.vblank_data.htotal = vblank_pipe->stream->timing.h_total;
686 	pipe_data->pipe_config.vblank_data.vblank_pipe_index = vblank_pipe->pipe_idx;
687 	pipe_data->pipe_config.vblank_data.vstartup_start = vblank_pipe->pipe_dlg_param.vstartup_start;
688 	pipe_data->pipe_config.vblank_data.vblank_end =
689 			vblank_pipe->stream->timing.v_total - vblank_pipe->stream->timing.v_front_porch - vblank_pipe->stream->timing.v_addressable;
690 
691 	if (vblank_pipe->stream->ignore_msa_timing_param &&
692 		(vblank_pipe->stream->allow_freesync || vblank_pipe->stream->vrr_active_variable || vblank_pipe->stream->vrr_active_fixed))
693 		populate_subvp_cmd_drr_info(dc, context, pipe, vblank_pipe, pipe_data);
694 }
695 
696 /**
697  * update_subvp_prefetch_end_to_mall_start - Helper for SubVP + SubVP case
698  *
699  * @dc: [in] current dc state
700  * @context: [in] new dc state
701  * @cmd: [in] DMUB cmd to be populated with SubVP info
702  * @subvp_pipes: [in] Array of SubVP pipes (should always be length 2)
703  *
704  * For SubVP + SubVP, we use a single vertical interrupt to start the
705  * microschedule for both SubVP pipes. In order for this to work correctly, the
706  * MALL REGION of both SubVP pipes must start at the same time. This function
707  * lengthens the prefetch end to mall start delay of the SubVP pipe that has
708  * the shorter prefetch so that both MALL REGION's will start at the same time.
709  */
710 static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
711 		struct dc_state *context,
712 		union dmub_rb_cmd *cmd,
713 		struct pipe_ctx *subvp_pipes[])
714 {
715 	uint32_t subvp0_prefetch_us = 0;
716 	uint32_t subvp1_prefetch_us = 0;
717 	uint32_t prefetch_delta_us = 0;
718 	struct dc_stream_state *phantom_stream0 = NULL;
719 	struct dc_stream_state *phantom_stream1 = NULL;
720 	struct dc_crtc_timing *phantom_timing0 = NULL;
721 	struct dc_crtc_timing *phantom_timing1 = NULL;
722 	struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
723 
724 	phantom_stream0 = dc_state_get_paired_subvp_stream(context, subvp_pipes[0]->stream);
725 	if (!phantom_stream0)
726 		return;
727 
728 	phantom_stream1 = dc_state_get_paired_subvp_stream(context, subvp_pipes[1]->stream);
729 	if (!phantom_stream1)
730 		return;
731 
732 	phantom_timing0 = &phantom_stream0->timing;
733 	phantom_timing1 = &phantom_stream1->timing;
734 
735 	subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) *
736 			(uint64_t)phantom_timing0->h_total * 1000000),
737 			(((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
738 	subvp1_prefetch_us = div64_u64(((uint64_t)(phantom_timing1->v_total - phantom_timing1->v_front_porch) *
739 			(uint64_t)phantom_timing1->h_total * 1000000),
740 			(((uint64_t)phantom_timing1->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
741 
742 	// Whichever SubVP PIPE has the smaller prefetch (including the prefetch end to mall start time)
743 	// should increase it's prefetch time to match the other
744 	if (subvp0_prefetch_us > subvp1_prefetch_us) {
745 		pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[1];
746 		prefetch_delta_us = subvp0_prefetch_us - subvp1_prefetch_us;
747 		pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
748 				div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
749 					((uint64_t)phantom_timing1->pix_clk_100hz * 100) + ((uint64_t)phantom_timing1->h_total * 1000000 - 1)),
750 					((uint64_t)phantom_timing1->h_total * 1000000));
751 
752 	} else if (subvp1_prefetch_us >  subvp0_prefetch_us) {
753 		pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[0];
754 		prefetch_delta_us = subvp1_prefetch_us - subvp0_prefetch_us;
755 		pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
756 				div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
757 					((uint64_t)phantom_timing0->pix_clk_100hz * 100) + ((uint64_t)phantom_timing0->h_total * 1000000 - 1)),
758 					((uint64_t)phantom_timing0->h_total * 1000000));
759 	}
760 }
761 
762 /**
763  * populate_subvp_cmd_pipe_info - Helper to populate the SubVP pipe info for the DMUB subvp command
764  *
765  * @dc: [in] current dc state
766  * @context: [in] new dc state
767  * @cmd: [in] DMUB cmd to be populated with SubVP info
768  * @subvp_pipe: [in] pipe_ctx for the SubVP pipe
769  * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd
770  *
771  * Populate the DMCUB SubVP command with SubVP pipe info. All the information
772  * required to calculate the microschedule for the SubVP pipe is stored in the
773  * pipe_data of the DMCUB SubVP command.
774  */
775 static void populate_subvp_cmd_pipe_info(struct dc *dc,
776 		struct dc_state *context,
777 		union dmub_rb_cmd *cmd,
778 		struct pipe_ctx *subvp_pipe,
779 		uint8_t cmd_pipe_index)
780 {
781 	uint32_t j;
782 	struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data =
783 			&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
784 	struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
785 	struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
786 	struct dc_crtc_timing *phantom_timing;
787 	uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den;
788 
789 	if (!phantom_stream)
790 		return;
791 
792 	phantom_timing = &phantom_stream->timing;
793 
794 	pipe_data->mode = SUBVP;
795 	pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz;
796 	pipe_data->pipe_config.subvp_data.htotal = subvp_pipe->stream->timing.h_total;
797 	pipe_data->pipe_config.subvp_data.vtotal = subvp_pipe->stream->timing.v_total;
798 	pipe_data->pipe_config.subvp_data.main_vblank_start =
799 			main_timing->v_total - main_timing->v_front_porch;
800 	pipe_data->pipe_config.subvp_data.main_vblank_end =
801 			main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable;
802 	pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable;
803 	pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->stream_res.tg->inst;
804 	pipe_data->pipe_config.subvp_data.is_drr = subvp_pipe->stream->ignore_msa_timing_param &&
805 		(subvp_pipe->stream->allow_freesync || subvp_pipe->stream->vrr_active_variable || subvp_pipe->stream->vrr_active_fixed);
806 
807 	/* Calculate the scaling factor from the src and dst height.
808 	 * e.g. If 3840x2160 being downscaled to 1920x1080, the scaling factor is 1/2.
809 	 * Reduce the fraction 1080/2160 = 1/2 for the "scaling factor"
810 	 *
811 	 * Make sure to combine stream and plane scaling together.
812 	 */
813 	reduce_fraction(subvp_pipe->stream->src.height, subvp_pipe->stream->dst.height,
814 			&out_num_stream, &out_den_stream);
815 	reduce_fraction(subvp_pipe->plane_state->src_rect.height, subvp_pipe->plane_state->dst_rect.height,
816 			&out_num_plane, &out_den_plane);
817 	reduce_fraction(out_num_stream * out_num_plane, out_den_stream * out_den_plane, &out_num, &out_den);
818 	pipe_data->pipe_config.subvp_data.scale_factor_numerator = out_num;
819 	pipe_data->pipe_config.subvp_data.scale_factor_denominator = out_den;
820 
821 	// Prefetch lines is equal to VACTIVE + BP + VSYNC
822 	pipe_data->pipe_config.subvp_data.prefetch_lines =
823 			phantom_timing->v_total - phantom_timing->v_front_porch;
824 
825 	// Round up
826 	pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
827 			div64_u64(((uint64_t)dc->caps.subvp_prefetch_end_to_mall_start_us * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
828 					((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
829 	pipe_data->pipe_config.subvp_data.processing_delay_lines =
830 			div64_u64(((uint64_t)(dc->caps.subvp_fw_processing_delay_us) * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
831 					((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
832 
833 	if (subvp_pipe->bottom_pipe) {
834 		pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->bottom_pipe->pipe_idx;
835 	} else if (subvp_pipe->next_odm_pipe) {
836 		pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx;
837 	} else {
838 		pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0xF;
839 	}
840 
841 	// Find phantom pipe index based on phantom stream
842 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
843 		struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
844 
845 		if (resource_is_pipe_type(phantom_pipe, OTG_MASTER) &&
846 				phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) {
847 			pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst;
848 			if (phantom_pipe->bottom_pipe) {
849 				pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst;
850 			} else if (phantom_pipe->next_odm_pipe) {
851 				pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst;
852 			} else {
853 				pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0xF;
854 			}
855 			break;
856 		}
857 	}
858 }
859 
860 /**
861  * dc_dmub_setup_subvp_dmub_command - Populate the DMCUB SubVP command
862  *
863  * @dc: [in] current dc state
864  * @context: [in] new dc state
865  * @enable: [in] if true enables the pipes population
866  *
867  * This function loops through each pipe and populates the DMUB SubVP CMD info
868  * based on the pipe (e.g. SubVP, VBLANK).
869  */
870 void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
871 		struct dc_state *context,
872 		bool enable)
873 {
874 	uint8_t cmd_pipe_index = 0;
875 	uint32_t i, pipe_idx;
876 	uint8_t subvp_count = 0;
877 	union dmub_rb_cmd cmd;
878 	struct pipe_ctx *subvp_pipes[2];
879 	uint32_t wm_val_refclk = 0;
880 	enum mall_stream_type pipe_mall_type;
881 
882 	memset(&cmd, 0, sizeof(cmd));
883 	// FW command for SUBVP
884 	cmd.fw_assisted_mclk_switch_v2.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
885 	cmd.fw_assisted_mclk_switch_v2.header.sub_type = DMUB_CMD__HANDLE_SUBVP_CMD;
886 	cmd.fw_assisted_mclk_switch_v2.header.payload_bytes =
887 			sizeof(cmd.fw_assisted_mclk_switch_v2) - sizeof(cmd.fw_assisted_mclk_switch_v2.header);
888 
889 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
890 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
891 
892 		/* For SubVP pipe count, only count the top most (ODM / MPC) pipe
893 		 */
894 		if (resource_is_pipe_type(pipe, OTG_MASTER) &&
895 				resource_is_pipe_type(pipe, DPP_PIPE) &&
896 				dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
897 			subvp_pipes[subvp_count++] = pipe;
898 	}
899 
900 	if (enable) {
901 		// For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd
902 		for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
903 			struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
904 			pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
905 
906 			if (!pipe->stream)
907 				continue;
908 
909 			/* When populating subvp cmd info, only pass in the top most (ODM / MPC) pipe.
910 			 * Any ODM or MPC splits being used in SubVP will be handled internally in
911 			 * populate_subvp_cmd_pipe_info
912 			 */
913 			if (resource_is_pipe_type(pipe, OTG_MASTER) &&
914 					resource_is_pipe_type(pipe, DPP_PIPE) &&
915 					pipe_mall_type == SUBVP_MAIN) {
916 				populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
917 			} else if (resource_is_pipe_type(pipe, OTG_MASTER) &&
918 					resource_is_pipe_type(pipe, DPP_PIPE) &&
919 					pipe_mall_type == SUBVP_NONE) {
920 				// Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where
921 				// we run through DML without calculating "natural" P-state support
922 				populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
923 
924 			}
925 			pipe_idx++;
926 		}
927 		if (subvp_count == 2) {
928 			update_subvp_prefetch_end_to_mall_start(dc, context, &cmd, subvp_pipes);
929 		}
930 		cmd.fw_assisted_mclk_switch_v2.config_data.pstate_allow_width_us = dc->caps.subvp_pstate_allow_width_us;
931 		cmd.fw_assisted_mclk_switch_v2.config_data.vertical_int_margin_us = dc->caps.subvp_vertical_int_margin_us;
932 
933 		// Store the original watermark value for this SubVP config so we can lower it when the
934 		// MCLK switch starts
935 		wm_val_refclk = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns *
936 				(dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000) / 1000;
937 
938 		cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF;
939 	}
940 
941 	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
942 }
943 
944 bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
945 {
946 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
947 		return false;
948 	return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub);
949 }
950 
951 void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
952 {
953 	uint32_t i;
954 
955 	if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
956 		DC_LOG_ERROR("%s: invalid parameters.", __func__);
957 		return;
958 	}
959 
960 	DC_LOG_ERROR("%s: DMCUB error - collecting diagnostic data\n", __func__);
961 
962 	if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv)) {
963 		DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__);
964 		return;
965 	}
966 
967 	DC_LOG_DEBUG("DMCUB STATE:");
968 	DC_LOG_DEBUG("    dmcub_version      : %08x", dc_dmub_srv->dmub->debug.dmcub_version);
969 	DC_LOG_DEBUG("    scratch  [0]       : %08x", dc_dmub_srv->dmub->debug.scratch[0]);
970 	DC_LOG_DEBUG("    scratch  [1]       : %08x", dc_dmub_srv->dmub->debug.scratch[1]);
971 	DC_LOG_DEBUG("    scratch  [2]       : %08x", dc_dmub_srv->dmub->debug.scratch[2]);
972 	DC_LOG_DEBUG("    scratch  [3]       : %08x", dc_dmub_srv->dmub->debug.scratch[3]);
973 	DC_LOG_DEBUG("    scratch  [4]       : %08x", dc_dmub_srv->dmub->debug.scratch[4]);
974 	DC_LOG_DEBUG("    scratch  [5]       : %08x", dc_dmub_srv->dmub->debug.scratch[5]);
975 	DC_LOG_DEBUG("    scratch  [6]       : %08x", dc_dmub_srv->dmub->debug.scratch[6]);
976 	DC_LOG_DEBUG("    scratch  [7]       : %08x", dc_dmub_srv->dmub->debug.scratch[7]);
977 	DC_LOG_DEBUG("    scratch  [8]       : %08x", dc_dmub_srv->dmub->debug.scratch[8]);
978 	DC_LOG_DEBUG("    scratch  [9]       : %08x", dc_dmub_srv->dmub->debug.scratch[9]);
979 	DC_LOG_DEBUG("    scratch [10]       : %08x", dc_dmub_srv->dmub->debug.scratch[10]);
980 	DC_LOG_DEBUG("    scratch [11]       : %08x", dc_dmub_srv->dmub->debug.scratch[11]);
981 	DC_LOG_DEBUG("    scratch [12]       : %08x", dc_dmub_srv->dmub->debug.scratch[12]);
982 	DC_LOG_DEBUG("    scratch [13]       : %08x", dc_dmub_srv->dmub->debug.scratch[13]);
983 	DC_LOG_DEBUG("    scratch [14]       : %08x", dc_dmub_srv->dmub->debug.scratch[14]);
984 	DC_LOG_DEBUG("    scratch [15]       : %08x", dc_dmub_srv->dmub->debug.scratch[15]);
985 	for (i = 0; i < DMUB_PC_SNAPSHOT_COUNT; i++)
986 		DC_LOG_DEBUG("    pc[%d]             : %08x", i, dc_dmub_srv->dmub->debug.pc[i]);
987 	DC_LOG_DEBUG("    unk_fault_addr     : %08x", dc_dmub_srv->dmub->debug.undefined_address_fault_addr);
988 	DC_LOG_DEBUG("    inst_fault_addr    : %08x", dc_dmub_srv->dmub->debug.inst_fetch_fault_addr);
989 	DC_LOG_DEBUG("    data_fault_addr    : %08x", dc_dmub_srv->dmub->debug.data_write_fault_addr);
990 	DC_LOG_DEBUG("    inbox1_rptr        : %08x", dc_dmub_srv->dmub->debug.inbox1_rptr);
991 	DC_LOG_DEBUG("    inbox1_wptr        : %08x", dc_dmub_srv->dmub->debug.inbox1_wptr);
992 	DC_LOG_DEBUG("    inbox1_size        : %08x", dc_dmub_srv->dmub->debug.inbox1_size);
993 	DC_LOG_DEBUG("    inbox0_rptr        : %08x", dc_dmub_srv->dmub->debug.inbox0_rptr);
994 	DC_LOG_DEBUG("    inbox0_wptr        : %08x", dc_dmub_srv->dmub->debug.inbox0_wptr);
995 	DC_LOG_DEBUG("    inbox0_size        : %08x", dc_dmub_srv->dmub->debug.inbox0_size);
996 	DC_LOG_DEBUG("    outbox1_rptr       : %08x", dc_dmub_srv->dmub->debug.outbox1_rptr);
997 	DC_LOG_DEBUG("    outbox1_wptr       : %08x", dc_dmub_srv->dmub->debug.outbox1_wptr);
998 	DC_LOG_DEBUG("    outbox1_size       : %08x", dc_dmub_srv->dmub->debug.outbox1_size);
999 	DC_LOG_DEBUG("    is_enabled         : %d", dc_dmub_srv->dmub->debug.is_dmcub_enabled);
1000 	DC_LOG_DEBUG("    is_soft_reset      : %d", dc_dmub_srv->dmub->debug.is_dmcub_soft_reset);
1001 	DC_LOG_DEBUG("    is_secure_reset    : %d", dc_dmub_srv->dmub->debug.is_dmcub_secure_reset);
1002 	DC_LOG_DEBUG("    is_traceport_en    : %d", dc_dmub_srv->dmub->debug.is_traceport_en);
1003 	DC_LOG_DEBUG("    is_cw0_en          : %d", dc_dmub_srv->dmub->debug.is_cw0_enabled);
1004 	DC_LOG_DEBUG("    is_cw6_en          : %d", dc_dmub_srv->dmub->debug.is_cw6_enabled);
1005 }
1006 
1007 static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx)
1008 {
1009 	if (pipe_ctx->plane_state != NULL) {
1010 		if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
1011 			resource_can_pipe_disable_cursor(pipe_ctx))
1012 			return false;
1013 	}
1014 
1015 	if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
1016 		pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) &&
1017 		pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
1018 		return true;
1019 
1020 	if (pipe_ctx->stream->link->replay_settings.config.replay_supported)
1021 		return true;
1022 
1023 	return false;
1024 }
1025 
1026 static void dc_build_cursor_update_payload0(
1027 		struct pipe_ctx *pipe_ctx, uint8_t p_idx,
1028 		struct dmub_cmd_update_cursor_payload0 *payload)
1029 {
1030 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1031 	unsigned int panel_inst = 0;
1032 
1033 	if (!dc_get_edp_link_panel_inst(hubp->ctx->dc,
1034 		pipe_ctx->stream->link, &panel_inst))
1035 		return;
1036 
1037 	/* Payload: Cursor Rect is built from position & attribute
1038 	 * x & y are obtained from postion
1039 	 */
1040 	payload->cursor_rect.x = hubp->cur_rect.x;
1041 	payload->cursor_rect.y = hubp->cur_rect.y;
1042 	/* w & h are obtained from attribute */
1043 	payload->cursor_rect.width  = hubp->cur_rect.w;
1044 	payload->cursor_rect.height = hubp->cur_rect.h;
1045 
1046 	payload->enable      = hubp->pos.cur_ctl.bits.cur_enable;
1047 	payload->pipe_idx    = p_idx;
1048 	payload->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
1049 	payload->panel_inst  = panel_inst;
1050 }
1051 
1052 static void dc_build_cursor_position_update_payload0(
1053 		struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx,
1054 		const struct hubp *hubp, const struct dpp *dpp)
1055 {
1056 	/* Hubp */
1057 	pl->position_cfg.pHubp.cur_ctl.raw  = hubp->pos.cur_ctl.raw;
1058 	pl->position_cfg.pHubp.position.raw = hubp->pos.position.raw;
1059 	pl->position_cfg.pHubp.hot_spot.raw = hubp->pos.hot_spot.raw;
1060 	pl->position_cfg.pHubp.dst_offset.raw = hubp->pos.dst_offset.raw;
1061 
1062 	/* dpp */
1063 	pl->position_cfg.pDpp.cur0_ctl.raw = dpp->pos.cur0_ctl.raw;
1064 	pl->position_cfg.pipe_idx = p_idx;
1065 }
1066 
1067 static void dc_build_cursor_attribute_update_payload1(
1068 		struct dmub_cursor_attributes_cfg *pl_A, const uint8_t p_idx,
1069 		const struct hubp *hubp, const struct dpp *dpp)
1070 {
1071 	/* Hubp */
1072 	pl_A->aHubp.SURFACE_ADDR_HIGH = hubp->att.SURFACE_ADDR_HIGH;
1073 	pl_A->aHubp.SURFACE_ADDR = hubp->att.SURFACE_ADDR;
1074 	pl_A->aHubp.cur_ctl.raw  = hubp->att.cur_ctl.raw;
1075 	pl_A->aHubp.size.raw     = hubp->att.size.raw;
1076 	pl_A->aHubp.settings.raw = hubp->att.settings.raw;
1077 
1078 	/* dpp */
1079 	pl_A->aDpp.cur0_ctl.raw = dpp->att.cur0_ctl.raw;
1080 }
1081 
1082 /**
1083  * dc_send_update_cursor_info_to_dmu - Populate the DMCUB Cursor update info command
1084  *
1085  * @pCtx: [in] pipe context
1086  * @pipe_idx: [in] pipe index
1087  *
1088  * This function would store the cursor related information and pass it into
1089  * dmub
1090  */
1091 void dc_send_update_cursor_info_to_dmu(
1092 		struct pipe_ctx *pCtx, uint8_t pipe_idx)
1093 {
1094 	union dmub_rb_cmd cmd[2];
1095 	union dmub_cmd_update_cursor_info_data *update_cursor_info_0 =
1096 					&cmd[0].update_cursor_info.update_cursor_info_data;
1097 
1098 	memset(cmd, 0, sizeof(cmd));
1099 
1100 	if (!dc_dmub_should_update_cursor_data(pCtx))
1101 		return;
1102 	/*
1103 	 * Since we use multi_cmd_pending for dmub command, the 2nd command is
1104 	 * only assigned to store cursor attributes info.
1105 	 * 1st command can view as 2 parts, 1st is for PSR/Replay data, the other
1106 	 * is to store cursor position info.
1107 	 *
1108 	 * Command heaer type must be the same type if using  multi_cmd_pending.
1109 	 * Besides, while process 2nd command in DMU, the sub type is useless.
1110 	 * So it's meanless to pass the sub type header with different type.
1111 	 */
1112 
1113 	{
1114 		/* Build Payload#0 Header */
1115 		cmd[0].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
1116 		cmd[0].update_cursor_info.header.payload_bytes =
1117 				sizeof(cmd[0].update_cursor_info.update_cursor_info_data);
1118 		cmd[0].update_cursor_info.header.multi_cmd_pending = 1; //To combine multi dmu cmd, 1st cmd
1119 
1120 		/* Prepare Payload */
1121 		dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info_0->payload0);
1122 
1123 		dc_build_cursor_position_update_payload0(&update_cursor_info_0->payload0, pipe_idx,
1124 				pCtx->plane_res.hubp, pCtx->plane_res.dpp);
1125 		}
1126 	{
1127 		/* Build Payload#1 Header */
1128 		cmd[1].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
1129 		cmd[1].update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg);
1130 		cmd[1].update_cursor_info.header.multi_cmd_pending = 0; //Indicate it's the last command.
1131 
1132 		dc_build_cursor_attribute_update_payload1(
1133 				&cmd[1].update_cursor_info.update_cursor_info_data.payload1.attribute_cfg,
1134 				pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp);
1135 
1136 		/* Combine 2nd cmds update_curosr_info to DMU */
1137 		dc_wake_and_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT);
1138 	}
1139 }
1140 
1141 bool dc_dmub_check_min_version(struct dmub_srv *srv)
1142 {
1143 	if (!srv->hw_funcs.is_psrsu_supported)
1144 		return true;
1145 	return srv->hw_funcs.is_psrsu_supported(srv);
1146 }
1147 
1148 void dc_dmub_srv_enable_dpia_trace(const struct dc *dc)
1149 {
1150 	struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
1151 
1152 	if (!dc_dmub_srv || !dc_dmub_srv->dmub) {
1153 		DC_LOG_ERROR("%s: invalid parameters.", __func__);
1154 		return;
1155 	}
1156 
1157 	if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1,
1158 				       0x0010, NULL, DM_DMUB_WAIT_TYPE_WAIT)) {
1159 		DC_LOG_ERROR("timeout updating trace buffer mask word\n");
1160 		return;
1161 	}
1162 
1163 	if (!dc_wake_and_execute_gpint(dc->ctx, DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK,
1164 				       0x0000, NULL, DM_DMUB_WAIT_TYPE_WAIT)) {
1165 		DC_LOG_ERROR("timeout updating trace buffer mask word\n");
1166 		return;
1167 	}
1168 
1169 	DC_LOG_DEBUG("Enabled DPIA trace\n");
1170 }
1171 
1172 void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, const struct dc_plane_address *addr, uint8_t subvp_index)
1173 {
1174 	dmub_srv_subvp_save_surf_addr(dc_dmub_srv->dmub, addr, subvp_index);
1175 }
1176 
1177 void dc_dmub_srv_cursor_offload_init(struct dc *dc)
1178 {
1179 	struct dmub_rb_cmd_cursor_offload_init *init;
1180 	struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
1181 	union dmub_rb_cmd cmd;
1182 
1183 	if (!dc->config.enable_cursor_offload)
1184 		return;
1185 
1186 	if (!dc_dmub_srv->dmub->meta_info.feature_bits.bits.cursor_offload_v1_support)
1187 		return;
1188 
1189 	if (!dc_dmub_srv->dmub->cursor_offload_fb.gpu_addr || !dc_dmub_srv->dmub->cursor_offload_fb.cpu_addr)
1190 		return;
1191 
1192 	if (!dc_dmub_srv->dmub->cursor_offload_v1)
1193 		return;
1194 
1195 	if (!dc_dmub_srv->dmub->shared_state)
1196 		return;
1197 
1198 	memset(&cmd, 0, sizeof(cmd));
1199 
1200 	init = &cmd.cursor_offload_init;
1201 	init->header.type = DMUB_CMD__CURSOR_OFFLOAD;
1202 	init->header.sub_type = DMUB_CMD__CURSOR_OFFLOAD_INIT;
1203 	init->header.payload_bytes = sizeof(init->init_data);
1204 	init->init_data.state_addr.quad_part = dc_dmub_srv->dmub->cursor_offload_fb.gpu_addr;
1205 	init->init_data.state_size = dc_dmub_srv->dmub->cursor_offload_fb.size;
1206 
1207 	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
1208 
1209 	dc_dmub_srv->cursor_offload_enabled = true;
1210 }
1211 
1212 void dc_dmub_srv_control_cursor_offload(struct dc *dc, struct dc_state *context,
1213 					const struct dc_stream_state *stream, bool enable)
1214 {
1215 	struct pipe_ctx const *pipe_ctx;
1216 	struct dmub_rb_cmd_cursor_offload_stream_cntl *cntl;
1217 	union dmub_rb_cmd cmd;
1218 
1219 	if (!dc_dmub_srv_is_cursor_offload_enabled(dc))
1220 		return;
1221 
1222 	if (!stream)
1223 		return;
1224 
1225 	pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
1226 	if (!pipe_ctx || !pipe_ctx->stream_res.tg || pipe_ctx->stream != stream)
1227 		return;
1228 
1229 	memset(&cmd, 0, sizeof(cmd));
1230 
1231 	cntl = &cmd.cursor_offload_stream_ctnl;
1232 	cntl->header.type = DMUB_CMD__CURSOR_OFFLOAD;
1233 	cntl->header.sub_type =
1234 		enable ? DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE : DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE;
1235 	cntl->header.payload_bytes = sizeof(cntl->data);
1236 
1237 	cntl->data.otg_inst = pipe_ctx->stream_res.tg->inst;
1238 	cntl->data.line_time_in_ns = 1u + (uint32_t)(div64_u64(stream->timing.h_total * 1000000ull,
1239 							       stream->timing.pix_clk_100hz / 10));
1240 
1241 	cntl->data.v_total_max = stream->adjust.v_total_max > stream->timing.v_total ?
1242 					 stream->adjust.v_total_max :
1243 					 stream->timing.v_total;
1244 
1245 	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd,
1246 				     enable ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT);
1247 }
1248 
1249 void dc_dmub_srv_program_cursor_now(struct dc *dc, const struct pipe_ctx *pipe)
1250 {
1251 	struct dmub_rb_cmd_cursor_offload_stream_cntl *cntl;
1252 	union dmub_rb_cmd cmd;
1253 
1254 	if (!dc_dmub_srv_is_cursor_offload_enabled(dc))
1255 		return;
1256 
1257 	if (!pipe || !pipe->stream || !pipe->stream_res.tg)
1258 		return;
1259 
1260 	memset(&cmd, 0, sizeof(cmd));
1261 
1262 	cntl = &cmd.cursor_offload_stream_ctnl;
1263 	cntl->header.type = DMUB_CMD__CURSOR_OFFLOAD;
1264 	cntl->header.sub_type = DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM;
1265 	cntl->header.payload_bytes = sizeof(cntl->data);
1266 	cntl->data.otg_inst = pipe->stream_res.tg->inst;
1267 
1268 	dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
1269 }
1270 
1271 bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
1272 {
1273 	struct dc_context *dc_ctx;
1274 	enum dmub_status status;
1275 
1276 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1277 		return true;
1278 
1279 	if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
1280 		return true;
1281 
1282 	dc_ctx = dc_dmub_srv->ctx;
1283 
1284 	if (wait) {
1285 		if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
1286 			do {
1287 				status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000);
1288 			} while (status != DMUB_STATUS_OK);
1289 		} else {
1290 			status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000);
1291 			if (status != DMUB_STATUS_OK) {
1292 				DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status);
1293 				return false;
1294 			}
1295 		}
1296 	} else
1297 		return dmub_srv_is_hw_pwr_up(dc_dmub_srv->dmub);
1298 
1299 	return true;
1300 }
1301 
1302 static int count_active_streams(const struct dc *dc)
1303 {
1304 	int i, count = 0;
1305 
1306 	for (i = 0; i < dc->current_state->stream_count; ++i) {
1307 		struct dc_stream_state *stream = dc->current_state->streams[i];
1308 
1309 		if (stream && (!stream->dpms_off || dc->config.disable_ips_in_dpms_off))
1310 			count += 1;
1311 	}
1312 
1313 	return count;
1314 }
1315 
1316 static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
1317 {
1318 	volatile const struct dmub_shared_state_ips_fw *ips_fw;
1319 	struct dc_dmub_srv *dc_dmub_srv;
1320 	union dmub_rb_cmd cmd = {0};
1321 
1322 	if (dc->debug.dmcub_emulation)
1323 		return;
1324 
1325 	if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
1326 		return;
1327 
1328 	dc_dmub_srv = dc->ctx->dmub_srv;
1329 	ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
1330 
1331 	memset(&cmd, 0, sizeof(cmd));
1332 	cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
1333 	cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE;
1334 	cmd.idle_opt_notify_idle.header.payload_bytes =
1335 		sizeof(cmd.idle_opt_notify_idle) -
1336 		sizeof(cmd.idle_opt_notify_idle.header);
1337 
1338 	cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle;
1339 
1340 	if (dc->work_arounds.skip_psr_ips_crtc_disable)
1341 		cmd.idle_opt_notify_idle.cntl_data.skip_otg_disable = true;
1342 
1343 	if (allow_idle) {
1344 		volatile struct dmub_shared_state_ips_driver *ips_driver =
1345 			&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
1346 		union dmub_shared_state_ips_driver_signals new_signals;
1347 
1348 		DC_LOG_IPS(
1349 			"%s wait idle (ips1_commit=%u ips2_commit=%u)",
1350 			__func__,
1351 			ips_fw->signals.bits.ips1_commit,
1352 			ips_fw->signals.bits.ips2_commit);
1353 
1354 		dc_dmub_srv_wait_for_idle(dc->ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL);
1355 
1356 		memset(&new_signals, 0, sizeof(new_signals));
1357 
1358 		new_signals.bits.allow_idle = 1; /* always set */
1359 
1360 		if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
1361 		    dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
1362 			new_signals.bits.allow_pg = 1;
1363 			new_signals.bits.allow_ips1 = 1;
1364 			new_signals.bits.allow_ips2 = 1;
1365 			new_signals.bits.allow_z10 = 1;
1366 			// New in IPSv2.0
1367 			new_signals.bits.allow_ips1z8 = 1;
1368 		} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
1369 			new_signals.bits.allow_ips1 = 1;
1370 		} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
1371 			// IPSv1.0 only
1372 			new_signals.bits.allow_pg = 1;
1373 			new_signals.bits.allow_ips1 = 1;
1374 		} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
1375 			// IPSv1.0 only
1376 			new_signals.bits.allow_pg = 1;
1377 			new_signals.bits.allow_ips1 = 1;
1378 			new_signals.bits.allow_ips2 = 1;
1379 		} else if (dc->config.disable_ips == DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF) {
1380 			/* TODO: Move this logic out to hwseq */
1381 			if (count_active_streams(dc) == 0) {
1382 				/* IPS2 - Display off */
1383 				new_signals.bits.allow_pg = 1;
1384 				new_signals.bits.allow_ips1 = 1;
1385 				new_signals.bits.allow_ips2 = 1;
1386 				new_signals.bits.allow_z10 = 1;
1387 				// New in IPSv2.0
1388 				new_signals.bits.allow_ips1z8 = 1;
1389 			} else {
1390 				/* RCG only */
1391 				new_signals.bits.allow_pg = 0;
1392 				new_signals.bits.allow_ips1 = 1;
1393 				new_signals.bits.allow_ips2 = 0;
1394 				new_signals.bits.allow_z10 = 0;
1395 			}
1396 		} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_Z8_RETENTION) {
1397 			new_signals.bits.allow_pg = 1;
1398 			new_signals.bits.allow_ips1 = 1;
1399 			new_signals.bits.allow_ips2 = 1;
1400 			new_signals.bits.allow_z10 = 1;
1401 		}
1402 		// Setting RCG allow bits (IPSv2.0)
1403 		if (dc->config.disable_ips_rcg == DMUB_IPS_RCG_ENABLE) {
1404 			new_signals.bits.allow_ips0_rcg = 1;
1405 			new_signals.bits.allow_ips1_rcg = 1;
1406 		} else if (dc->config.disable_ips_rcg == DMUB_IPS0_RCG_DISABLE) {
1407 			new_signals.bits.allow_ips1_rcg = 1;
1408 		} else if (dc->config.disable_ips_rcg == DMUB_IPS1_RCG_DISABLE) {
1409 			new_signals.bits.allow_ips0_rcg = 1;
1410 		}
1411 		// IPS dynamic allow bits (IPSv2 change, vpb use case)
1412 		if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_IPS1_AND_RCG) {
1413 			new_signals.bits.allow_dynamic_ips1 = 1;
1414 		} else if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_ALL) {
1415 			new_signals.bits.allow_dynamic_ips1 = 1;
1416 			new_signals.bits.allow_dynamic_ips1_z8 = 1;
1417 		}
1418 		ips_driver->signals = new_signals;
1419 		dc_dmub_srv->driver_signals = ips_driver->signals;
1420 	}
1421 
1422 	DC_LOG_IPS(
1423 		"%s send allow_idle=%d (ips1_commit=%u ips2_commit=%u)",
1424 		__func__,
1425 		allow_idle,
1426 		ips_fw->signals.bits.ips1_commit,
1427 		ips_fw->signals.bits.ips2_commit);
1428 
1429 	/* NOTE: This does not use the "wake" interface since this is part of the wake path. */
1430 	/* We also do not perform a wait since DMCUB could enter idle after the notification. */
1431 	dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT);
1432 
1433 	/* Register access should stop at this point. */
1434 	if (allow_idle)
1435 		dc_dmub_srv->needs_idle_wake = true;
1436 }
1437 
1438 static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
1439 {
1440 	struct dc_dmub_srv *dc_dmub_srv;
1441 	uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0, ips1z8_exit_count = 0;
1442 
1443 	if (dc->debug.dmcub_emulation)
1444 		return;
1445 
1446 	if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
1447 		return;
1448 
1449 	dc_dmub_srv = dc->ctx->dmub_srv;
1450 
1451 	if (dc->clk_mgr->funcs->exit_low_power_state) {
1452 		volatile const struct dmub_shared_state_ips_fw *ips_fw =
1453 			&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
1454 		volatile struct dmub_shared_state_ips_driver *ips_driver =
1455 			&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
1456 		union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals;
1457 
1458 		rcg_exit_count = ips_fw->rcg_exit_count;
1459 		ips1_exit_count = ips_fw->ips1_exit_count;
1460 		ips2_exit_count = ips_fw->ips2_exit_count;
1461 		ips1z8_exit_count = ips_fw->ips1_z8ret_exit_count;
1462 
1463 		ips_driver->signals.all = 0;
1464 		dc_dmub_srv->driver_signals = ips_driver->signals;
1465 
1466 		DC_LOG_IPS(
1467 			"%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u ips1z8=%u) (count rcg=%u ips1=%u ips2=%u ips1_z8=%u)",
1468 			__func__,
1469 			ips_driver->signals.bits.allow_ips1,
1470 			ips_driver->signals.bits.allow_ips2,
1471 			ips_fw->signals.bits.ips1_commit,
1472 			ips_fw->signals.bits.ips2_commit,
1473 			ips_fw->signals.bits.ips1z8_commit,
1474 			ips_fw->rcg_entry_count,
1475 			ips_fw->ips1_entry_count,
1476 			ips_fw->ips2_entry_count,
1477 			ips_fw->ips1_z8ret_entry_count);
1478 
1479 		/* Note: register access has technically not resumed for DCN here, but we
1480 		 * need to be message PMFW through our standard register interface.
1481 		 */
1482 		dc_dmub_srv->needs_idle_wake = false;
1483 
1484 		if (!dc->caps.ips_v2_support && ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) &&
1485 		    (!dc->debug.optimize_ips_handshake ||
1486 		     ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle))) {
1487 			DC_LOG_IPS(
1488 				"wait IPS2 eval (ips1_commit=%u ips2_commit=%u )",
1489 				ips_fw->signals.bits.ips1_commit,
1490 				ips_fw->signals.bits.ips2_commit);
1491 
1492 			if (!dc->debug.optimize_ips_handshake || !ips_fw->signals.bits.ips2_commit)
1493 				udelay(dc->debug.ips2_eval_delay_us);
1494 
1495 			DC_LOG_IPS(
1496 				"exit IPS2 #1 (ips1_commit=%u ips2_commit=%u)",
1497 				ips_fw->signals.bits.ips1_commit,
1498 				ips_fw->signals.bits.ips2_commit);
1499 
1500 			// Tell PMFW to exit low power state
1501 			dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
1502 
1503 			if (ips_fw->signals.bits.ips2_commit) {
1504 
1505 				DC_LOG_IPS(
1506 					"wait IPS2 entry delay (ips1_commit=%u ips2_commit=%u)",
1507 					ips_fw->signals.bits.ips1_commit,
1508 					ips_fw->signals.bits.ips2_commit);
1509 
1510 				// Wait for IPS2 entry upper bound
1511 				udelay(dc->debug.ips2_entry_delay_us);
1512 
1513 				DC_LOG_IPS(
1514 					"exit IPS2 #2 (ips1_commit=%u ips2_commit=%u)",
1515 					ips_fw->signals.bits.ips1_commit,
1516 					ips_fw->signals.bits.ips2_commit);
1517 
1518 				dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
1519 
1520 				DC_LOG_IPS(
1521 					"wait IPS2 commit clear (ips1_commit=%u ips2_commit=%u)",
1522 					ips_fw->signals.bits.ips1_commit,
1523 					ips_fw->signals.bits.ips2_commit);
1524 
1525 				while (ips_fw->signals.bits.ips2_commit)
1526 					udelay(1);
1527 
1528 				DC_LOG_IPS(
1529 					"wait hw_pwr_up (ips1_commit=%u ips2_commit=%u)",
1530 					ips_fw->signals.bits.ips1_commit,
1531 					ips_fw->signals.bits.ips2_commit);
1532 
1533 				if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
1534 					ASSERT(0);
1535 
1536 				DC_LOG_IPS(
1537 					"resync inbox1 (ips1_commit=%u ips2_commit=%u)",
1538 					ips_fw->signals.bits.ips1_commit,
1539 					ips_fw->signals.bits.ips2_commit);
1540 
1541 				dmub_srv_sync_inboxes(dc->ctx->dmub_srv->dmub);
1542 			}
1543 		}
1544 
1545 		dc_dmub_srv_notify_idle(dc, false);
1546 		if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) {
1547 			DC_LOG_IPS(
1548 				"wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u ips1z8=%u)",
1549 				ips_fw->signals.bits.ips1_commit,
1550 				ips_fw->signals.bits.ips2_commit,
1551 				ips_fw->signals.bits.ips1z8_commit);
1552 
1553 			while (ips_fw->signals.bits.ips1_commit)
1554 				udelay(1);
1555 
1556 			DC_LOG_IPS(
1557 				"wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u ips1z8=%u)",
1558 				ips_fw->signals.bits.ips1_commit,
1559 				ips_fw->signals.bits.ips2_commit,
1560 				ips_fw->signals.bits.ips1z8_commit);
1561 		}
1562 	}
1563 
1564 	if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
1565 		ASSERT(0);
1566 
1567 	DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u ips1z8=%u)",
1568 		__func__,
1569 		rcg_exit_count,
1570 		ips1_exit_count,
1571 		ips2_exit_count,
1572 		ips1z8_exit_count);
1573 }
1574 
1575 void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state)
1576 {
1577 	struct dmub_srv *dmub;
1578 
1579 	if (!dc_dmub_srv)
1580 		return;
1581 
1582 	dmub = dc_dmub_srv->dmub;
1583 
1584 	if (power_state == DC_ACPI_CM_POWER_STATE_D0)
1585 		dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D0);
1586 	else
1587 		dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D3);
1588 }
1589 
1590 void dc_dmub_srv_notify_fw_dc_power_state(struct dc_dmub_srv *dc_dmub_srv,
1591 					  enum dc_acpi_cm_power_state power_state)
1592 {
1593 	union dmub_rb_cmd cmd;
1594 
1595 	if (!dc_dmub_srv)
1596 		return;
1597 
1598 	memset(&cmd, 0, sizeof(cmd));
1599 
1600 	cmd.idle_opt_set_dc_power_state.header.type = DMUB_CMD__IDLE_OPT;
1601 	cmd.idle_opt_set_dc_power_state.header.sub_type = DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE;
1602 	cmd.idle_opt_set_dc_power_state.header.payload_bytes =
1603 		sizeof(cmd.idle_opt_set_dc_power_state) - sizeof(cmd.idle_opt_set_dc_power_state.header);
1604 
1605 	if (power_state == DC_ACPI_CM_POWER_STATE_D0) {
1606 		cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D0;
1607 	} else if (power_state == DC_ACPI_CM_POWER_STATE_D3) {
1608 		cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D3;
1609 	} else {
1610 		cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_UNKNOWN;
1611 	}
1612 
1613 	dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
1614 }
1615 
1616 bool dc_dmub_srv_should_detect(struct dc_dmub_srv *dc_dmub_srv)
1617 {
1618 	volatile const struct dmub_shared_state_ips_fw *ips_fw;
1619 	bool reallow_idle = false, should_detect = false;
1620 
1621 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1622 		return false;
1623 
1624 	if (dc_dmub_srv->dmub->shared_state &&
1625 	    dc_dmub_srv->dmub->meta_info.feature_bits.bits.shared_state_link_detection) {
1626 		ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
1627 		return ips_fw->signals.bits.detection_required;
1628 	}
1629 
1630 	/* Detection may require reading scratch 0 - exit out of idle prior to the read. */
1631 	if (dc_dmub_srv->idle_allowed) {
1632 		dc_dmub_srv_apply_idle_power_optimizations(dc_dmub_srv->ctx->dc, false);
1633 		reallow_idle = true;
1634 	}
1635 
1636 	should_detect = dmub_srv_should_detect(dc_dmub_srv->dmub);
1637 
1638 	/* Re-enter idle if we're not about to immediately redetect links. */
1639 	if (!should_detect && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
1640 	    !dc_dmub_srv->ctx->dc->debug.disable_dmub_reallow_idle)
1641 		dc_dmub_srv_apply_idle_power_optimizations(dc_dmub_srv->ctx->dc, true);
1642 
1643 	return should_detect;
1644 }
1645 
1646 void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle)
1647 {
1648 	struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
1649 
1650 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1651 		return;
1652 
1653 	allow_idle &= (!dc->debug.ips_disallow_entry);
1654 
1655 	if (dc_dmub_srv->idle_allowed == allow_idle)
1656 		return;
1657 
1658 	DC_LOG_IPS("%s state change: old=%d new=%d", __func__, dc_dmub_srv->idle_allowed, allow_idle);
1659 
1660 	/*
1661 	 * Entering a low power state requires a driver notification.
1662 	 * Powering up the hardware requires notifying PMFW and DMCUB.
1663 	 * Clearing the driver idle allow requires a DMCUB command.
1664 	 * DMCUB commands requires the DMCUB to be powered up and restored.
1665 	 */
1666 
1667 	if (!allow_idle) {
1668 		dc_dmub_srv->idle_exit_counter += 1;
1669 
1670 		dc_dmub_srv_exit_low_power_state(dc);
1671 		/*
1672 		 * Idle is considered fully exited only after the sequence above
1673 		 * fully completes. If we have a race of two threads exiting
1674 		 * at the same time then it's safe to perform the sequence
1675 		 * twice as long as we're not re-entering.
1676 		 *
1677 		 * Infinite command submission is avoided by using the
1678 		 * dm_execute_dmub_cmd submission instead of the "wake" helpers.
1679 		 */
1680 		dc_dmub_srv->idle_allowed = false;
1681 
1682 		dc_dmub_srv->idle_exit_counter -= 1;
1683 		if (dc_dmub_srv->idle_exit_counter < 0) {
1684 			ASSERT(0);
1685 			dc_dmub_srv->idle_exit_counter = 0;
1686 		}
1687 	} else {
1688 		/* Consider idle as notified prior to the actual submission to
1689 		 * prevent multiple entries. */
1690 		dc_dmub_srv->idle_allowed = true;
1691 
1692 		dc_dmub_srv_notify_idle(dc, allow_idle);
1693 	}
1694 }
1695 
1696 bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd,
1697 				  enum dm_dmub_wait_type wait_type)
1698 {
1699 	return dc_wake_and_execute_dmub_cmd_list(ctx, 1, cmd, wait_type);
1700 }
1701 
1702 bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count,
1703 				       union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
1704 {
1705 	struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
1706 	bool result = false, reallow_idle = false;
1707 
1708 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1709 		return false;
1710 
1711 	if (count == 0)
1712 		return true;
1713 
1714 	if (dc_dmub_srv->idle_allowed) {
1715 		dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false);
1716 		reallow_idle = true;
1717 	}
1718 
1719 	/*
1720 	 * These may have different implementations in DM, so ensure
1721 	 * that we guide it to the expected helper.
1722 	 */
1723 	if (count > 1)
1724 		result = dm_execute_dmub_cmd_list(ctx, count, cmd, wait_type);
1725 	else
1726 		result = dm_execute_dmub_cmd(ctx, cmd, wait_type);
1727 
1728 	if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
1729 	    !ctx->dc->debug.disable_dmub_reallow_idle)
1730 		dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
1731 
1732 	return result;
1733 }
1734 
1735 static bool dc_dmub_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
1736 				  uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type)
1737 {
1738 	struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
1739 	const uint32_t wait_us = wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT ? 0 : 30;
1740 	enum dmub_status status;
1741 
1742 	if (response)
1743 		*response = 0;
1744 
1745 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1746 		return false;
1747 
1748 	status = dmub_srv_send_gpint_command(dc_dmub_srv->dmub, command_code, param, wait_us);
1749 	if (status != DMUB_STATUS_OK) {
1750 		if (status == DMUB_STATUS_TIMEOUT && wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT)
1751 			return true;
1752 
1753 		return false;
1754 	}
1755 
1756 	if (response && wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
1757 		dmub_srv_get_gpint_response(dc_dmub_srv->dmub, response);
1758 
1759 	return true;
1760 }
1761 
1762 bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
1763 			       uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type)
1764 {
1765 	struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv;
1766 	bool result = false, reallow_idle = false;
1767 
1768 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
1769 		return false;
1770 
1771 	if (dc_dmub_srv->idle_allowed) {
1772 		dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, false);
1773 		reallow_idle = true;
1774 	}
1775 
1776 	result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type);
1777 
1778 	if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 &&
1779 	    !ctx->dc->debug.disable_dmub_reallow_idle)
1780 		dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
1781 
1782 	return result;
1783 }
1784 
1785 static void dc_dmub_srv_rb_based_fams2_update_config(struct dc *dc,
1786 		struct dc_state *context,
1787 		bool enable)
1788 {
1789 	uint8_t num_cmds = 1;
1790 	uint32_t i;
1791 	union dmub_rb_cmd cmd[2 * MAX_STREAMS + 1];
1792 	struct dmub_rb_cmd_fams2 *global_cmd = &cmd[0].fams2_config;
1793 
1794 	memset(cmd, 0, sizeof(union dmub_rb_cmd) * (2 * MAX_STREAMS + 1));
1795 	/* fill in generic command header */
1796 	global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
1797 	global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
1798 	global_cmd->header.payload_bytes =
1799 			sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
1800 
1801 	if (enable) {
1802 		/* send global configuration parameters */
1803 		memcpy(&global_cmd->config.global, &context->bw_ctx.bw.dcn.fams2_global_config, sizeof(struct dmub_cmd_fams2_global_config));
1804 
1805 		/* copy static feature configuration overrides */
1806 		global_cmd->config.global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery;
1807 		global_cmd->config.global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug;
1808 		global_cmd->config.global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip;
1809 
1810 		/* construct per-stream configs */
1811 		for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) {
1812 			struct dmub_rb_cmd_fams2 *stream_base_cmd = &cmd[i+1].fams2_config;
1813 			struct dmub_rb_cmd_fams2 *stream_sub_state_cmd = &cmd[i+1+context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config;
1814 
1815 			/* configure command header */
1816 			stream_base_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
1817 			stream_base_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
1818 			stream_base_cmd->header.payload_bytes =
1819 					sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
1820 			stream_base_cmd->header.multi_cmd_pending = 1;
1821 			stream_sub_state_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
1822 			stream_sub_state_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
1823 			stream_sub_state_cmd->header.payload_bytes =
1824 					sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
1825 			stream_sub_state_cmd->header.multi_cmd_pending = 1;
1826 			/* copy stream static base state */
1827 			memcpy(&stream_base_cmd->config,
1828 					&context->bw_ctx.bw.dcn.fams2_stream_base_params[i],
1829 					sizeof(union dmub_cmd_fams2_config));
1830 			/* copy stream static sub state */
1831 			memcpy(&stream_sub_state_cmd->config,
1832 					&context->bw_ctx.bw.dcn.fams2_stream_sub_params[i],
1833 					sizeof(union dmub_cmd_fams2_config));
1834 		}
1835 	}
1836 
1837 	/* apply feature configuration based on current driver state */
1838 	global_cmd->config.global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2;
1839 	global_cmd->config.global.features.bits.enable = enable;
1840 
1841 	if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) {
1842 		/* set multi pending for global, and unset for last stream cmd */
1843 		global_cmd->header.multi_cmd_pending = 1;
1844 		cmd[2 * context->bw_ctx.bw.dcn.fams2_global_config.num_streams].fams2_config.header.multi_cmd_pending = 0;
1845 		num_cmds += 2 * context->bw_ctx.bw.dcn.fams2_global_config.num_streams;
1846 	}
1847 
1848 	dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT);
1849 }
1850 
1851 static void dc_dmub_srv_ib_based_fams2_update_config(struct dc *dc,
1852 		struct dc_state *context,
1853 		bool enable)
1854 {
1855 	struct dmub_fams2_config_v2 *config = (struct dmub_fams2_config_v2 *)dc->ctx->dmub_srv->dmub->ib_mem_gart.cpu_addr;
1856 	union dmub_rb_cmd cmd;
1857 	uint32_t i;
1858 
1859 	memset(config, 0, sizeof(*config));
1860 	memset(&cmd, 0, sizeof(cmd));
1861 
1862 	cmd.ib_fams2_config.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
1863 	cmd.ib_fams2_config.header.sub_type = DMUB_CMD__FAMS2_IB_CONFIG;
1864 
1865 	cmd.ib_fams2_config.ib_data.src.quad_part = dc->ctx->dmub_srv->dmub->ib_mem_gart.gpu_addr;
1866 	cmd.ib_fams2_config.ib_data.size = sizeof(*config);
1867 
1868 	if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) {
1869 		/* copy static feature configuration overrides */
1870 		config->global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery;
1871 		config->global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip;
1872 		config->global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug;
1873 
1874 		/* send global configuration parameters */
1875 		memcpy(&config->global, &context->bw_ctx.bw.dcn.fams2_global_config,
1876 			sizeof(struct dmub_cmd_fams2_global_config));
1877 
1878 		/* construct per-stream configs */
1879 		for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) {
1880 			/* copy stream static base state */
1881 			memcpy(&config->stream_v1[i].base,
1882 				&context->bw_ctx.bw.dcn.fams2_stream_base_params[i],
1883 				sizeof(config->stream_v1[i].base));
1884 
1885 			/* copy stream static sub-state */
1886 			memcpy(&config->stream_v1[i].sub_state,
1887 				&context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2[i],
1888 				sizeof(config->stream_v1[i].sub_state));
1889 		}
1890 	}
1891 
1892 	config->global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2;
1893 	config->global.features.bits.enable = enable;
1894 
1895 	dm_execute_dmub_cmd_list(dc->ctx, 1, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
1896 }
1897 
1898 void dc_dmub_srv_fams2_update_config(struct dc *dc,
1899 		struct dc_state *context,
1900 		bool enable)
1901 {
1902 	if (dc->debug.fams_version.major == 2)
1903 		dc_dmub_srv_rb_based_fams2_update_config(dc, context, enable);
1904 	if (dc->debug.fams_version.major == 3)
1905 		dc_dmub_srv_ib_based_fams2_update_config(dc, context, enable);
1906 }
1907 
1908 void dc_dmub_srv_fams2_drr_update(struct dc *dc,
1909 		uint32_t tg_inst,
1910 		uint32_t vtotal_min,
1911 		uint32_t vtotal_max,
1912 		uint32_t vtotal_mid,
1913 		uint32_t vtotal_mid_frame_num,
1914 		bool program_manual_trigger)
1915 {
1916 	union dmub_rb_cmd cmd = { 0 };
1917 
1918 	cmd.fams2_drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
1919 	cmd.fams2_drr_update.header.sub_type = DMUB_CMD__FAMS2_DRR_UPDATE;
1920 	cmd.fams2_drr_update.dmub_optc_state_req.tg_inst = tg_inst;
1921 	cmd.fams2_drr_update.dmub_optc_state_req.v_total_max = vtotal_max;
1922 	cmd.fams2_drr_update.dmub_optc_state_req.v_total_min = vtotal_min;
1923 	cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid = vtotal_mid;
1924 	cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid_frame_num = vtotal_mid_frame_num;
1925 	cmd.fams2_drr_update.dmub_optc_state_req.program_manual_trigger = program_manual_trigger;
1926 
1927 	cmd.fams2_drr_update.header.payload_bytes =
1928 			sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header);
1929 
1930 	dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
1931 }
1932 
1933 void dc_dmub_srv_fams2_passthrough_flip(
1934 		struct dc *dc,
1935 		struct dc_state *state,
1936 		struct dc_stream_state *stream,
1937 		struct dc_surface_update *srf_updates,
1938 		int surface_count)
1939 {
1940 	int plane_index;
1941 	union dmub_rb_cmd cmds[MAX_PLANES];
1942 	struct dc_plane_address *address;
1943 	struct dc_plane_state *plane_state;
1944 	int num_cmds = 0;
1945 	struct dc_stream_status *stream_status = dc_stream_get_status(stream);
1946 
1947 	if (surface_count <= 0 || stream_status == NULL)
1948 		return;
1949 
1950 	memset(cmds, 0, sizeof(union dmub_rb_cmd) * MAX_PLANES);
1951 
1952 	/* build command for each surface update */
1953 	for (plane_index = 0; plane_index < surface_count; plane_index++) {
1954 		plane_state = srf_updates[plane_index].surface;
1955 		address = &plane_state->address;
1956 
1957 		/* skip if there is no address update for plane */
1958 		if (!srf_updates[plane_index].flip_addr)
1959 			continue;
1960 
1961 		/* build command header */
1962 		cmds[num_cmds].fams2_flip.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
1963 		cmds[num_cmds].fams2_flip.header.sub_type = DMUB_CMD__FAMS2_FLIP;
1964 		cmds[num_cmds].fams2_flip.header.payload_bytes =
1965 				sizeof(struct dmub_rb_cmd_fams2_flip) - sizeof(struct dmub_cmd_header);
1966 
1967 		/* for chaining multiple commands, all but last command should set to 1 */
1968 		cmds[num_cmds].fams2_flip.header.multi_cmd_pending = 1;
1969 
1970 		/* set topology info */
1971 		cmds[num_cmds].fams2_flip.flip_info.pipe_mask = dc_plane_get_pipe_mask(state, plane_state);
1972 		if (stream_status)
1973 			cmds[num_cmds].fams2_flip.flip_info.otg_inst = stream_status->primary_otg_inst;
1974 
1975 		cmds[num_cmds].fams2_flip.flip_info.config.bits.is_immediate = plane_state->flip_immediate;
1976 
1977 		/* build address info for command */
1978 		switch (address->type) {
1979 		case PLN_ADDR_TYPE_GRAPHICS:
1980 			if (address->grph.addr.quad_part == 0) {
1981 				BREAK_TO_DEBUGGER();
1982 				break;
1983 			}
1984 
1985 			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_lo =
1986 					address->grph.meta_addr.low_part;
1987 			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_hi =
1988 					(uint16_t)address->grph.meta_addr.high_part;
1989 			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_lo =
1990 					address->grph.addr.low_part;
1991 			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_hi =
1992 					(uint16_t)address->grph.addr.high_part;
1993 			break;
1994 		case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
1995 			if (address->video_progressive.luma_addr.quad_part == 0 ||
1996 				address->video_progressive.chroma_addr.quad_part == 0) {
1997 				BREAK_TO_DEBUGGER();
1998 				break;
1999 			}
2000 
2001 			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_lo =
2002 					address->video_progressive.luma_meta_addr.low_part;
2003 			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_hi =
2004 					(uint16_t)address->video_progressive.luma_meta_addr.high_part;
2005 			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_c_lo =
2006 					address->video_progressive.chroma_meta_addr.low_part;
2007 			cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_c_hi =
2008 					(uint16_t)address->video_progressive.chroma_meta_addr.high_part;
2009 			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_lo =
2010 					address->video_progressive.luma_addr.low_part;
2011 			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_hi =
2012 					(uint16_t)address->video_progressive.luma_addr.high_part;
2013 			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_c_lo =
2014 					address->video_progressive.chroma_addr.low_part;
2015 			cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_c_hi =
2016 					(uint16_t)address->video_progressive.chroma_addr.high_part;
2017 			break;
2018 		default:
2019 			// Should never be hit
2020 			BREAK_TO_DEBUGGER();
2021 			break;
2022 		}
2023 
2024 		num_cmds++;
2025 	}
2026 
2027 	if (num_cmds > 0)  {
2028 		cmds[num_cmds - 1].fams2_flip.header.multi_cmd_pending = 0;
2029 		dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmds, DM_DMUB_WAIT_TYPE_WAIT);
2030 	}
2031 }
2032 
2033 
2034 bool dc_dmub_srv_ips_residency_cntl(const struct dc_context *ctx, uint8_t panel_inst, bool start_measurement)
2035 {
2036 	union dmub_rb_cmd cmd;
2037 
2038 	memset(&cmd, 0, sizeof(cmd));
2039 
2040 	cmd.ips_residency_cntl.header.type = DMUB_CMD__IPS;
2041 	cmd.ips_residency_cntl.header.sub_type = DMUB_CMD__IPS_RESIDENCY_CNTL;
2042 	cmd.ips_residency_cntl.header.payload_bytes = sizeof(struct dmub_cmd_ips_residency_cntl_data);
2043 
2044 	// only panel_inst=0 is supported at the moment
2045 	cmd.ips_residency_cntl.cntl_data.panel_inst = panel_inst;
2046 	cmd.ips_residency_cntl.cntl_data.start_measurement = start_measurement;
2047 
2048 	if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
2049 		return false;
2050 
2051 	return true;
2052 }
2053 
2054 bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t panel_inst, struct dmub_ips_residency_info *driver_info,
2055 					  enum ips_residency_mode ips_mode)
2056 {
2057 	union dmub_rb_cmd cmd;
2058 	uint32_t bytes = sizeof(struct dmub_ips_residency_info);
2059 
2060 	dmub_flush_buffer_mem(&ctx->dmub_srv->dmub->scratch_mem_fb);
2061 	memset(&cmd, 0, sizeof(cmd));
2062 
2063 	cmd.ips_query_residency_info.header.type = DMUB_CMD__IPS;
2064 	cmd.ips_query_residency_info.header.sub_type = DMUB_CMD__IPS_QUERY_RESIDENCY_INFO;
2065 	cmd.ips_query_residency_info.header.payload_bytes = sizeof(struct dmub_cmd_ips_query_residency_info_data);
2066 
2067 	cmd.ips_query_residency_info.info_data.dest.quad_part = ctx->dmub_srv->dmub->scratch_mem_fb.gpu_addr;
2068 	cmd.ips_query_residency_info.info_data.size = bytes;
2069 	cmd.ips_query_residency_info.info_data.panel_inst = panel_inst;
2070 	cmd.ips_query_residency_info.info_data.ips_mode = (uint32_t)ips_mode;
2071 
2072 	if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) ||
2073 					  cmd.ips_query_residency_info.header.ret_status == 0)
2074 		return false;
2075 
2076 	// copy the result to the output since ret_status != 0 means the command returned data
2077 	memcpy(driver_info, ctx->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes);
2078 
2079 	return true;
2080 }
2081 
2082 bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv)
2083 {
2084 	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
2085 	union dmub_rb_cmd cmd;
2086 	enum dm_dmub_wait_type wait_type;
2087 	struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
2088 	bool result;
2089 
2090 	memset(&cmd, 0, sizeof(cmd));
2091 
2092 	cmd.cmd_common.header.type     = DMUB_CMD__LSDMA;
2093 	cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_INIT_CONFIG;
2094 	wait_type                      = DM_DMUB_WAIT_TYPE_NO_WAIT;
2095 
2096 	lsdma_data->u.init_data.gpu_addr_base.quad_part = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.gpu_addr;
2097 	lsdma_data->u.init_data.ring_size               = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.size;
2098 
2099 	result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
2100 
2101 	if (!result)
2102 		DC_ERROR("LSDMA Init failed in DMUB");
2103 
2104 	return result;
2105 }
2106 
2107 bool dmub_lsdma_send_linear_copy_command(
2108 	struct dc_dmub_srv *dc_dmub_srv,
2109 	uint64_t src_addr,
2110 	uint64_t dst_addr,
2111 	uint32_t count
2112 )
2113 {
2114 	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
2115 	union dmub_rb_cmd cmd;
2116 	enum dm_dmub_wait_type wait_type;
2117 	struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
2118 	bool result;
2119 
2120 	memset(&cmd, 0, sizeof(cmd));
2121 
2122 	cmd.cmd_common.header.type     = DMUB_CMD__LSDMA;
2123 	cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_LINEAR_COPY;
2124 	wait_type                      = DM_DMUB_WAIT_TYPE_NO_WAIT;
2125 
2126 	lsdma_data->u.linear_copy_data.count   = count - 1; // LSDMA controller expects bytes to copy -1
2127 	lsdma_data->u.linear_copy_data.src_lo  = src_addr & 0xFFFFFFFF;
2128 	lsdma_data->u.linear_copy_data.src_hi  = (src_addr >> 32) & 0xFFFFFFFF;
2129 	lsdma_data->u.linear_copy_data.dst_lo  = dst_addr & 0xFFFFFFFF;
2130 	lsdma_data->u.linear_copy_data.dst_hi  = (dst_addr >> 32) & 0xFFFFFFFF;
2131 
2132 	result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
2133 
2134 	if (!result)
2135 		DC_ERROR("LSDMA Linear Copy failed in DMUB");
2136 
2137 	return result;
2138 }
2139 
2140 bool dmub_lsdma_send_linear_sub_window_copy_command(
2141 	struct dc_dmub_srv *dc_dmub_srv,
2142 	struct lsdma_linear_sub_window_copy_params copy_data
2143 )
2144 {
2145 	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
2146 	union dmub_rb_cmd cmd;
2147 	enum dm_dmub_wait_type wait_type;
2148 	struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
2149 	bool result;
2150 
2151 	memset(&cmd, 0, sizeof(cmd));
2152 
2153 	cmd.cmd_common.header.type     = DMUB_CMD__LSDMA;
2154 	cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_LINEAR_SUB_WINDOW_COPY;
2155 	wait_type                      = DM_DMUB_WAIT_TYPE_NO_WAIT;
2156 
2157 	lsdma_data->u.linear_sub_window_copy_data.tmz              = copy_data.tmz;
2158 	lsdma_data->u.linear_sub_window_copy_data.element_size     = copy_data.element_size;
2159 	lsdma_data->u.linear_sub_window_copy_data.src_lo           = copy_data.src_lo;
2160 	lsdma_data->u.linear_sub_window_copy_data.src_hi           = copy_data.src_hi;
2161 	lsdma_data->u.linear_sub_window_copy_data.src_x            = copy_data.src_x;
2162 	lsdma_data->u.linear_sub_window_copy_data.src_y            = copy_data.src_y;
2163 	lsdma_data->u.linear_sub_window_copy_data.src_pitch        = copy_data.src_pitch;
2164 	lsdma_data->u.linear_sub_window_copy_data.src_slice_pitch  = copy_data.src_slice_pitch;
2165 	lsdma_data->u.linear_sub_window_copy_data.dst_lo           = copy_data.dst_lo;
2166 	lsdma_data->u.linear_sub_window_copy_data.dst_hi           = copy_data.dst_hi;
2167 	lsdma_data->u.linear_sub_window_copy_data.dst_x            = copy_data.dst_x;
2168 	lsdma_data->u.linear_sub_window_copy_data.dst_y            = copy_data.dst_y;
2169 	lsdma_data->u.linear_sub_window_copy_data.dst_pitch        = copy_data.dst_pitch;
2170 	lsdma_data->u.linear_sub_window_copy_data.dst_slice_pitch  = copy_data.dst_slice_pitch;
2171 	lsdma_data->u.linear_sub_window_copy_data.rect_x           = copy_data.rect_x;
2172 	lsdma_data->u.linear_sub_window_copy_data.rect_y           = copy_data.rect_y;
2173 	lsdma_data->u.linear_sub_window_copy_data.src_cache_policy = copy_data.src_cache_policy;
2174 	lsdma_data->u.linear_sub_window_copy_data.dst_cache_policy = copy_data.dst_cache_policy;
2175 
2176 	result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
2177 
2178 	if (!result)
2179 		DC_ERROR("LSDMA Linear Sub Window Copy failed in DMUB");
2180 
2181 	return result;
2182 }
2183 
2184 bool dmub_lsdma_send_tiled_to_tiled_copy_command(
2185 	struct dc_dmub_srv *dc_dmub_srv,
2186 	struct lsdma_send_tiled_to_tiled_copy_command_params params
2187 )
2188 {
2189 	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
2190 	union dmub_rb_cmd cmd;
2191 	enum dm_dmub_wait_type wait_type;
2192 	struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
2193 	bool result;
2194 
2195 	memset(&cmd, 0, sizeof(cmd));
2196 
2197 	cmd.cmd_common.header.type     = DMUB_CMD__LSDMA;
2198 	cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_TILED_TO_TILED_COPY;
2199 	wait_type                      = DM_DMUB_WAIT_TYPE_NO_WAIT;
2200 
2201 	lsdma_data->u.tiled_copy_data.src_addr_lo      = params.src_addr & 0xFFFFFFFF;
2202 	lsdma_data->u.tiled_copy_data.src_addr_hi      = (params.src_addr >> 32) & 0xFFFFFFFF;
2203 	lsdma_data->u.tiled_copy_data.dst_addr_lo      = params.dst_addr & 0xFFFFFFFF;
2204 	lsdma_data->u.tiled_copy_data.dst_addr_hi      = (params.dst_addr >> 32) & 0xFFFFFFFF;
2205 	lsdma_data->u.tiled_copy_data.src_x            = params.src_x;
2206 	lsdma_data->u.tiled_copy_data.src_y            = params.src_y;
2207 	lsdma_data->u.tiled_copy_data.dst_x            = params.dst_x;
2208 	lsdma_data->u.tiled_copy_data.dst_y            = params.dst_y;
2209 	lsdma_data->u.tiled_copy_data.src_width        = params.src_width;
2210 	lsdma_data->u.tiled_copy_data.dst_width        = params.dst_width;
2211 	lsdma_data->u.tiled_copy_data.src_swizzle_mode = params.swizzle_mode;
2212 	lsdma_data->u.tiled_copy_data.dst_swizzle_mode = params.swizzle_mode;
2213 	lsdma_data->u.tiled_copy_data.src_element_size = params.element_size;
2214 	lsdma_data->u.tiled_copy_data.dst_element_size = params.element_size;
2215 	lsdma_data->u.tiled_copy_data.rect_x           = params.rect_x;
2216 	lsdma_data->u.tiled_copy_data.rect_y           = params.rect_y;
2217 	lsdma_data->u.tiled_copy_data.dcc              = params.dcc;
2218 	lsdma_data->u.tiled_copy_data.tmz              = params.tmz;
2219 	lsdma_data->u.tiled_copy_data.read_compress    = params.read_compress;
2220 	lsdma_data->u.tiled_copy_data.write_compress   = params.write_compress;
2221 	lsdma_data->u.tiled_copy_data.src_height       = params.src_height;
2222 	lsdma_data->u.tiled_copy_data.dst_height       = params.dst_height;
2223 	lsdma_data->u.tiled_copy_data.data_format      = params.data_format;
2224 	lsdma_data->u.tiled_copy_data.max_com          = params.max_com;
2225 	lsdma_data->u.tiled_copy_data.max_uncom        = params.max_uncom;
2226 
2227 	result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
2228 
2229 	if (!result)
2230 		DC_ERROR("LSDMA Tiled to Tiled Copy failed in DMUB");
2231 
2232 	return result;
2233 }
2234 
2235 bool dmub_lsdma_send_pio_copy_command(
2236 	struct dc_dmub_srv *dc_dmub_srv,
2237 	uint64_t src_addr,
2238 	uint64_t dst_addr,
2239 	uint32_t byte_count,
2240 	uint32_t overlap_disable
2241 )
2242 {
2243 	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
2244 	union dmub_rb_cmd cmd;
2245 	enum dm_dmub_wait_type wait_type;
2246 	struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
2247 	bool result;
2248 
2249 	memset(&cmd, 0, sizeof(cmd));
2250 
2251 	cmd.cmd_common.header.type     = DMUB_CMD__LSDMA;
2252 	cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_COPY;
2253 	wait_type                      = DM_DMUB_WAIT_TYPE_NO_WAIT;
2254 
2255 	lsdma_data->u.pio_copy_data.packet.fields.byte_count      = byte_count;
2256 	lsdma_data->u.pio_copy_data.packet.fields.overlap_disable = overlap_disable;
2257 	lsdma_data->u.pio_copy_data.src_lo                        = src_addr & 0xFFFFFFFF;
2258 	lsdma_data->u.pio_copy_data.src_hi                        = (src_addr >> 32) & 0xFFFFFFFF;
2259 	lsdma_data->u.pio_copy_data.dst_lo                        = dst_addr & 0xFFFFFFFF;
2260 	lsdma_data->u.pio_copy_data.dst_hi                        = (dst_addr >> 32) & 0xFFFFFFFF;
2261 
2262 	result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
2263 
2264 	if (!result)
2265 		DC_ERROR("LSDMA PIO Copy failed in DMUB");
2266 
2267 	return result;
2268 }
2269 
2270 bool dmub_lsdma_send_pio_constfill_command(
2271 	struct dc_dmub_srv *dc_dmub_srv,
2272 	uint64_t dst_addr,
2273 	uint32_t byte_count,
2274 	uint32_t data
2275 )
2276 {
2277 	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
2278 	union dmub_rb_cmd cmd;
2279 	enum dm_dmub_wait_type wait_type;
2280 	struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
2281 	bool result;
2282 
2283 	memset(&cmd, 0, sizeof(cmd));
2284 
2285 	cmd.cmd_common.header.type     = DMUB_CMD__LSDMA;
2286 	cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_CONSTFILL;
2287 	wait_type                      = DM_DMUB_WAIT_TYPE_NO_WAIT;
2288 
2289 	lsdma_data->u.pio_constfill_data.packet.fields.constant_fill = 1;
2290 	lsdma_data->u.pio_constfill_data.packet.fields.byte_count    = byte_count;
2291 	lsdma_data->u.pio_constfill_data.dst_lo                      = dst_addr & 0xFFFFFFFF;
2292 	lsdma_data->u.pio_constfill_data.dst_hi                      = (dst_addr >> 32) & 0xFFFFFFFF;
2293 	lsdma_data->u.pio_constfill_data.data                        = data;
2294 
2295 	result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
2296 
2297 	if (!result)
2298 		DC_ERROR("LSDMA PIO Constfill failed in DMUB");
2299 
2300 	return result;
2301 }
2302 
2303 bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uint32_t reg_addr, uint32_t reg_data)
2304 {
2305 	struct dc_context *dc_ctx = dc_dmub_srv->ctx;
2306 	union dmub_rb_cmd cmd;
2307 	enum dm_dmub_wait_type wait_type;
2308 	struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
2309 	bool result;
2310 
2311 	memset(&cmd, 0, sizeof(cmd));
2312 
2313 	cmd.cmd_common.header.type     = DMUB_CMD__LSDMA;
2314 	cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_POLL_REG_WRITE;
2315 	wait_type                      = DM_DMUB_WAIT_TYPE_NO_WAIT;
2316 
2317 	lsdma_data->u.reg_write_data.reg_addr = reg_addr;
2318 	lsdma_data->u.reg_write_data.reg_data = reg_data;
2319 
2320 	result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
2321 
2322 	if (!result)
2323 		DC_ERROR("LSDMA Poll Reg failed in DMUB");
2324 
2325 	return result;
2326 }
2327 
2328 bool dc_dmub_srv_is_cursor_offload_enabled(const struct dc *dc)
2329 {
2330 	return dc->ctx->dmub_srv && dc->ctx->dmub_srv->cursor_offload_enabled;
2331 }
2332 
2333 void dc_dmub_srv_release_hw(const struct dc *dc)
2334 {
2335 	struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
2336 	union dmub_rb_cmd cmd = {0};
2337 
2338 	if (!dc_dmub_srv || !dc_dmub_srv->dmub)
2339 		return;
2340 
2341 	memset(&cmd, 0, sizeof(cmd));
2342 	cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
2343 	cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_RELEASE_HW;
2344 	cmd.idle_opt_notify_idle.header.payload_bytes =
2345 		sizeof(cmd.idle_opt_notify_idle) -
2346 		sizeof(cmd.idle_opt_notify_idle.header);
2347 
2348 	dm_execute_dmub_cmd(dc->ctx, &cmd,  DM_DMUB_WAIT_TYPE_WAIT);
2349 }
2350