xref: /linux/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include "../dmub_srv.h"
27 #include "dmub_dcn20.h"
28 #include "dmub_dcn21.h"
29 #include "dmub_cmd.h"
30 #include "dmub_dcn30.h"
31 #include "dmub_dcn301.h"
32 #include "dmub_dcn302.h"
33 #include "dmub_dcn303.h"
34 #include "dmub_dcn31.h"
35 #include "dmub_dcn314.h"
36 #include "dmub_dcn315.h"
37 #include "dmub_dcn316.h"
38 #include "dmub_dcn32.h"
39 #include "dmub_dcn35.h"
40 #include "dmub_dcn351.h"
41 #include "dmub_dcn36.h"
42 #include "dmub_dcn401.h"
43 #include "os_types.h"
44 /*
45  * Note: the DMUB service is standalone. No additional headers should be
46  * added below or above this line unless they reside within the DMUB
47  * folder.
48  */
49 
50 /* Alignment for framebuffer memory. */
51 #define DMUB_FB_ALIGNMENT (1024 * 1024)
52 
53 /* Stack size. */
54 #define DMUB_STACK_SIZE (128 * 1024)
55 
56 /* Context size. */
57 #define DMUB_CONTEXT_SIZE (512 * 1024)
58 
59 /* Mailbox size : Ring buffers are required for both inbox and outbox */
60 #define DMUB_MAILBOX_SIZE ((2 * DMUB_RB_SIZE))
61 
62 /* Default state size if meta is absent. */
63 #define DMUB_FW_STATE_SIZE (64 * 1024)
64 
65 /* Default scratch mem size. */
66 #define DMUB_SCRATCH_MEM_SIZE (1024)
67 
68 /* Default indirect buffer size. */
69 #define DMUB_IB_MEM_SIZE (sizeof(struct dmub_fams2_config_v2))
70 
71 /* Default LSDMA ring buffer size. */
72 #define DMUB_LSDMA_RB_SIZE (64 * 1024)
73 
74 /* Number of windows in use. */
75 #define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL)
76 /* Base addresses. */
77 
78 #define DMUB_CW0_BASE (0x60000000)
79 #define DMUB_CW1_BASE (0x61000000)
80 #define DMUB_CW3_BASE (0x63000000)
81 #define DMUB_CW4_BASE (0x64000000)
82 #define DMUB_CW5_BASE (0x65000000)
83 #define DMUB_CW6_BASE (0x66000000)
84 
85 #define DMUB_REGION5_BASE (0xA0000000)
86 #define DMUB_REGION6_BASE (0xC0000000)
87 
88 static struct dmub_srv_dcn32_regs dmub_srv_dcn32_regs;
89 static struct dmub_srv_dcn35_regs dmub_srv_dcn35_regs;
90 
91 static inline uint32_t dmub_align(uint32_t val, uint32_t factor)
92 {
93 	return (val + factor - 1) / factor * factor;
94 }
95 
96 void dmub_flush_buffer_mem(const struct dmub_fb *fb)
97 {
98 	const uint8_t *base = (const uint8_t *)fb->cpu_addr;
99 	uint8_t buf[64];
100 	uint32_t pos, end;
101 
102 	/**
103 	 * Read 64-byte chunks since we don't want to store a
104 	 * large temporary buffer for this purpose.
105 	 */
106 	end = fb->size / sizeof(buf) * sizeof(buf);
107 
108 	for (pos = 0; pos < end; pos += sizeof(buf))
109 		dmub_memcpy(buf, base + pos, sizeof(buf));
110 
111 	/* Read anything leftover into the buffer. */
112 	if (end < fb->size)
113 		dmub_memcpy(buf, base + pos, fb->size - end);
114 }
115 
116 static const struct dmub_fw_meta_info *
117 dmub_get_fw_meta_info_from_blob(const uint8_t *blob, uint32_t blob_size, uint32_t meta_offset)
118 {
119 	const union dmub_fw_meta *meta;
120 
121 	if (!blob || !blob_size)
122 		return NULL;
123 
124 	if (blob_size < sizeof(union dmub_fw_meta) + meta_offset)
125 		return NULL;
126 
127 	meta = (const union dmub_fw_meta *)(blob + blob_size - meta_offset -
128 					    sizeof(union dmub_fw_meta));
129 
130 	if (meta->info.magic_value != DMUB_FW_META_MAGIC)
131 		return NULL;
132 
133 	return &meta->info;
134 }
135 
136 static const struct dmub_fw_meta_info *
137 dmub_get_fw_meta_info(const struct dmub_srv_fw_meta_info_params *params)
138 {
139 	const struct dmub_fw_meta_info *info = NULL;
140 
141 	if (params->fw_bss_data && params->bss_data_size) {
142 		/* Legacy metadata region. */
143 		info = dmub_get_fw_meta_info_from_blob(params->fw_bss_data,
144 						       params->bss_data_size,
145 						       DMUB_FW_META_OFFSET);
146 	} else if (params->fw_inst_const && params->inst_const_size) {
147 		/* Combined metadata region - can be aligned to 16-bytes. */
148 		uint32_t i;
149 
150 		for (i = 0; i < 16; ++i) {
151 			info = dmub_get_fw_meta_info_from_blob(
152 				params->fw_inst_const, params->inst_const_size, i);
153 
154 			if (info)
155 				break;
156 		}
157 	}
158 
159 	return info;
160 }
161 
162 enum dmub_status
163 dmub_srv_get_fw_meta_info_from_raw_fw(struct dmub_srv_fw_meta_info_params *params,
164 				      struct dmub_fw_meta_info *fw_info_out)
165 {
166 	const struct dmub_fw_meta_info *fw_info = NULL;
167 	uint32_t inst_const_size_temp = params->inst_const_size;
168 
169 	/* First try custom psp footer size, if present */
170 	if (params->custom_psp_footer_size) {
171 		params->inst_const_size -= params->custom_psp_footer_size;
172 		fw_info = dmub_get_fw_meta_info(params);
173 		if (fw_info) {
174 			memcpy(fw_info_out, fw_info, sizeof(*fw_info));
175 			return DMUB_STATUS_OK;
176 		}
177 		params->inst_const_size = inst_const_size_temp;
178 	}
179 
180 	/* Try 256-byte psp footer size */
181 	params->inst_const_size -= PSP_FOOTER_BYTES_256;
182 	fw_info = dmub_get_fw_meta_info(params);
183 	if (fw_info) {
184 		memcpy(fw_info_out, fw_info, sizeof(*fw_info));
185 		return DMUB_STATUS_OK;
186 	}
187 
188 	/* Try 512-byte psp footer size - final attempt */
189 	params->inst_const_size -= PSP_FOOTER_BYTES_256; // 256 bytes already subtracted, subtract 256 again
190 	fw_info = dmub_get_fw_meta_info(params);
191 	if (fw_info) {
192 		memcpy(fw_info_out, fw_info, sizeof(*fw_info));
193 		return DMUB_STATUS_OK;
194 	}
195 
196 	/* Restore original inst_const_size and subtract default PSP footer size - default behaviour */
197 	params->inst_const_size = inst_const_size_temp - PSP_FOOTER_BYTES_256;
198 
199 	return DMUB_STATUS_INVALID;
200 }
201 
202 static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
203 {
204 	struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs;
205 
206 	/* default to specifying now inbox type */
207 	enum dmub_inbox_cmd_interface_type default_inbox_type = DMUB_CMD_INTERFACE_DEFAULT;
208 
209 	switch (asic) {
210 	case DMUB_ASIC_DCN20:
211 	case DMUB_ASIC_DCN21:
212 	case DMUB_ASIC_DCN30:
213 	case DMUB_ASIC_DCN301:
214 	case DMUB_ASIC_DCN302:
215 	case DMUB_ASIC_DCN303:
216 		dmub->regs = &dmub_srv_dcn20_regs;
217 
218 		funcs->reset = dmub_dcn20_reset;
219 		funcs->reset_release = dmub_dcn20_reset_release;
220 		funcs->backdoor_load = dmub_dcn20_backdoor_load;
221 		funcs->setup_windows = dmub_dcn20_setup_windows;
222 		funcs->setup_mailbox = dmub_dcn20_setup_mailbox;
223 		funcs->get_inbox1_wptr = dmub_dcn20_get_inbox1_wptr;
224 		funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr;
225 		funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr;
226 		funcs->is_supported = dmub_dcn20_is_supported;
227 		funcs->is_hw_init = dmub_dcn20_is_hw_init;
228 		funcs->set_gpint = dmub_dcn20_set_gpint;
229 		funcs->is_gpint_acked = dmub_dcn20_is_gpint_acked;
230 		funcs->get_gpint_response = dmub_dcn20_get_gpint_response;
231 		funcs->get_fw_status = dmub_dcn20_get_fw_boot_status;
232 		funcs->enable_dmub_boot_options = dmub_dcn20_enable_dmub_boot_options;
233 		funcs->skip_dmub_panel_power_sequence = dmub_dcn20_skip_dmub_panel_power_sequence;
234 		funcs->get_current_time = dmub_dcn20_get_current_time;
235 
236 		// Out mailbox register access functions for RN and above
237 		funcs->setup_out_mailbox = dmub_dcn20_setup_out_mailbox;
238 		funcs->get_outbox1_wptr = dmub_dcn20_get_outbox1_wptr;
239 		funcs->set_outbox1_rptr = dmub_dcn20_set_outbox1_rptr;
240 
241 		//outbox0 call stacks
242 		funcs->setup_outbox0 = dmub_dcn20_setup_outbox0;
243 		funcs->get_outbox0_wptr = dmub_dcn20_get_outbox0_wptr;
244 		funcs->set_outbox0_rptr = dmub_dcn20_set_outbox0_rptr;
245 
246 		funcs->get_diagnostic_data = dmub_dcn20_get_diagnostic_data;
247 
248 		if (asic == DMUB_ASIC_DCN21)
249 			dmub->regs = &dmub_srv_dcn21_regs;
250 
251 		if (asic == DMUB_ASIC_DCN30) {
252 			dmub->regs = &dmub_srv_dcn30_regs;
253 
254 			funcs->backdoor_load = dmub_dcn30_backdoor_load;
255 			funcs->setup_windows = dmub_dcn30_setup_windows;
256 		}
257 		if (asic == DMUB_ASIC_DCN301) {
258 			dmub->regs = &dmub_srv_dcn301_regs;
259 
260 			funcs->backdoor_load = dmub_dcn30_backdoor_load;
261 			funcs->setup_windows = dmub_dcn30_setup_windows;
262 		}
263 		if (asic == DMUB_ASIC_DCN302) {
264 			dmub->regs = &dmub_srv_dcn302_regs;
265 
266 			funcs->backdoor_load = dmub_dcn30_backdoor_load;
267 			funcs->setup_windows = dmub_dcn30_setup_windows;
268 		}
269 		if (asic == DMUB_ASIC_DCN303) {
270 			dmub->regs = &dmub_srv_dcn303_regs;
271 
272 			funcs->backdoor_load = dmub_dcn30_backdoor_load;
273 			funcs->setup_windows = dmub_dcn30_setup_windows;
274 		}
275 		break;
276 
277 	case DMUB_ASIC_DCN31:
278 	case DMUB_ASIC_DCN31B:
279 	case DMUB_ASIC_DCN314:
280 	case DMUB_ASIC_DCN315:
281 	case DMUB_ASIC_DCN316:
282 		if (asic == DMUB_ASIC_DCN314) {
283 			dmub->regs_dcn31 = &dmub_srv_dcn314_regs;
284 			funcs->is_psrsu_supported = dmub_dcn314_is_psrsu_supported;
285 		} else if (asic == DMUB_ASIC_DCN315) {
286 			dmub->regs_dcn31 = &dmub_srv_dcn315_regs;
287 		} else if (asic == DMUB_ASIC_DCN316) {
288 			dmub->regs_dcn31 = &dmub_srv_dcn316_regs;
289 		} else {
290 			dmub->regs_dcn31 = &dmub_srv_dcn31_regs;
291 			funcs->is_psrsu_supported = dmub_dcn31_is_psrsu_supported;
292 		}
293 		funcs->reset = dmub_dcn31_reset;
294 		funcs->reset_release = dmub_dcn31_reset_release;
295 		funcs->backdoor_load = dmub_dcn31_backdoor_load;
296 		funcs->setup_windows = dmub_dcn31_setup_windows;
297 		funcs->setup_mailbox = dmub_dcn31_setup_mailbox;
298 		funcs->get_inbox1_wptr = dmub_dcn31_get_inbox1_wptr;
299 		funcs->get_inbox1_rptr = dmub_dcn31_get_inbox1_rptr;
300 		funcs->set_inbox1_wptr = dmub_dcn31_set_inbox1_wptr;
301 		funcs->setup_out_mailbox = dmub_dcn31_setup_out_mailbox;
302 		funcs->get_outbox1_wptr = dmub_dcn31_get_outbox1_wptr;
303 		funcs->set_outbox1_rptr = dmub_dcn31_set_outbox1_rptr;
304 		funcs->is_supported = dmub_dcn31_is_supported;
305 		funcs->is_hw_init = dmub_dcn31_is_hw_init;
306 		funcs->set_gpint = dmub_dcn31_set_gpint;
307 		funcs->is_gpint_acked = dmub_dcn31_is_gpint_acked;
308 		funcs->get_gpint_response = dmub_dcn31_get_gpint_response;
309 		funcs->get_gpint_dataout = dmub_dcn31_get_gpint_dataout;
310 		funcs->get_fw_status = dmub_dcn31_get_fw_boot_status;
311 		funcs->get_fw_boot_option = dmub_dcn31_get_fw_boot_option;
312 		funcs->enable_dmub_boot_options = dmub_dcn31_enable_dmub_boot_options;
313 		funcs->skip_dmub_panel_power_sequence = dmub_dcn31_skip_dmub_panel_power_sequence;
314 		//outbox0 call stacks
315 		funcs->setup_outbox0 = dmub_dcn31_setup_outbox0;
316 		funcs->get_outbox0_wptr = dmub_dcn31_get_outbox0_wptr;
317 		funcs->set_outbox0_rptr = dmub_dcn31_set_outbox0_rptr;
318 
319 		funcs->get_diagnostic_data = dmub_dcn31_get_diagnostic_data;
320 		funcs->should_detect = dmub_dcn31_should_detect;
321 		funcs->get_current_time = dmub_dcn31_get_current_time;
322 
323 		break;
324 
325 	case DMUB_ASIC_DCN32:
326 	case DMUB_ASIC_DCN321:
327 		dmub->regs_dcn32 = &dmub_srv_dcn32_regs;
328 		funcs->configure_dmub_in_system_memory = dmub_dcn32_configure_dmub_in_system_memory;
329 		funcs->send_inbox0_cmd = dmub_dcn32_send_inbox0_cmd;
330 		funcs->clear_inbox0_ack_register = dmub_dcn32_clear_inbox0_ack_register;
331 		funcs->read_inbox0_ack_register = dmub_dcn32_read_inbox0_ack_register;
332 		funcs->subvp_save_surf_addr = dmub_dcn32_save_surf_addr;
333 		funcs->reset = dmub_dcn32_reset;
334 		funcs->reset_release = dmub_dcn32_reset_release;
335 		funcs->backdoor_load = dmub_dcn32_backdoor_load;
336 		funcs->backdoor_load_zfb_mode = dmub_dcn32_backdoor_load_zfb_mode;
337 		funcs->setup_windows = dmub_dcn32_setup_windows;
338 		funcs->setup_mailbox = dmub_dcn32_setup_mailbox;
339 		funcs->get_inbox1_wptr = dmub_dcn32_get_inbox1_wptr;
340 		funcs->get_inbox1_rptr = dmub_dcn32_get_inbox1_rptr;
341 		funcs->set_inbox1_wptr = dmub_dcn32_set_inbox1_wptr;
342 		funcs->setup_out_mailbox = dmub_dcn32_setup_out_mailbox;
343 		funcs->get_outbox1_wptr = dmub_dcn32_get_outbox1_wptr;
344 		funcs->set_outbox1_rptr = dmub_dcn32_set_outbox1_rptr;
345 		funcs->is_supported = dmub_dcn32_is_supported;
346 		funcs->is_hw_init = dmub_dcn32_is_hw_init;
347 		funcs->set_gpint = dmub_dcn32_set_gpint;
348 		funcs->is_gpint_acked = dmub_dcn32_is_gpint_acked;
349 		funcs->get_gpint_response = dmub_dcn32_get_gpint_response;
350 		funcs->get_gpint_dataout = dmub_dcn32_get_gpint_dataout;
351 		funcs->get_fw_status = dmub_dcn32_get_fw_boot_status;
352 		funcs->enable_dmub_boot_options = dmub_dcn32_enable_dmub_boot_options;
353 		funcs->skip_dmub_panel_power_sequence = dmub_dcn32_skip_dmub_panel_power_sequence;
354 
355 		/* outbox0 call stacks */
356 		funcs->setup_outbox0 = dmub_dcn32_setup_outbox0;
357 		funcs->get_outbox0_wptr = dmub_dcn32_get_outbox0_wptr;
358 		funcs->set_outbox0_rptr = dmub_dcn32_set_outbox0_rptr;
359 		funcs->get_current_time = dmub_dcn32_get_current_time;
360 		funcs->get_diagnostic_data = dmub_dcn32_get_diagnostic_data;
361 		funcs->init_reg_offsets = dmub_srv_dcn32_regs_init;
362 
363 		break;
364 
365 	case DMUB_ASIC_DCN35:
366 	case DMUB_ASIC_DCN351:
367 	case DMUB_ASIC_DCN36:
368 			dmub->regs_dcn35 = &dmub_srv_dcn35_regs;
369 			funcs->configure_dmub_in_system_memory = dmub_dcn35_configure_dmub_in_system_memory;
370 			funcs->send_inbox0_cmd = dmub_dcn35_send_inbox0_cmd;
371 			funcs->clear_inbox0_ack_register = dmub_dcn35_clear_inbox0_ack_register;
372 			funcs->read_inbox0_ack_register = dmub_dcn35_read_inbox0_ack_register;
373 			funcs->reset = dmub_dcn35_reset;
374 			funcs->reset_release = dmub_dcn35_reset_release;
375 			funcs->backdoor_load = dmub_dcn35_backdoor_load;
376 			funcs->backdoor_load_zfb_mode = dmub_dcn35_backdoor_load_zfb_mode;
377 			funcs->setup_windows = dmub_dcn35_setup_windows;
378 			funcs->setup_mailbox = dmub_dcn35_setup_mailbox;
379 			funcs->get_inbox1_wptr = dmub_dcn35_get_inbox1_wptr;
380 			funcs->get_inbox1_rptr = dmub_dcn35_get_inbox1_rptr;
381 			funcs->set_inbox1_wptr = dmub_dcn35_set_inbox1_wptr;
382 			funcs->setup_out_mailbox = dmub_dcn35_setup_out_mailbox;
383 			funcs->get_outbox1_wptr = dmub_dcn35_get_outbox1_wptr;
384 			funcs->set_outbox1_rptr = dmub_dcn35_set_outbox1_rptr;
385 			funcs->is_supported = dmub_dcn35_is_supported;
386 			funcs->is_hw_init = dmub_dcn35_is_hw_init;
387 			funcs->set_gpint = dmub_dcn35_set_gpint;
388 			funcs->is_gpint_acked = dmub_dcn35_is_gpint_acked;
389 			funcs->get_gpint_response = dmub_dcn35_get_gpint_response;
390 			funcs->get_gpint_dataout = dmub_dcn35_get_gpint_dataout;
391 			funcs->get_fw_status = dmub_dcn35_get_fw_boot_status;
392 			funcs->get_fw_boot_option = dmub_dcn35_get_fw_boot_option;
393 			funcs->enable_dmub_boot_options = dmub_dcn35_enable_dmub_boot_options;
394 			funcs->skip_dmub_panel_power_sequence = dmub_dcn35_skip_dmub_panel_power_sequence;
395 			//outbox0 call stacks
396 			funcs->setup_outbox0 = dmub_dcn35_setup_outbox0;
397 			funcs->get_outbox0_wptr = dmub_dcn35_get_outbox0_wptr;
398 			funcs->set_outbox0_rptr = dmub_dcn35_set_outbox0_rptr;
399 
400 			funcs->get_current_time = dmub_dcn35_get_current_time;
401 			funcs->get_diagnostic_data = dmub_dcn35_get_diagnostic_data;
402 			funcs->get_preos_fw_info = dmub_dcn35_get_preos_fw_info;
403 
404 			funcs->init_reg_offsets = dmub_srv_dcn35_regs_init;
405 			if (asic == DMUB_ASIC_DCN351)
406 				funcs->init_reg_offsets = dmub_srv_dcn351_regs_init;
407 			if (asic == DMUB_ASIC_DCN36)
408 				funcs->init_reg_offsets = dmub_srv_dcn36_regs_init;
409 
410 			funcs->is_hw_powered_up = dmub_dcn35_is_hw_powered_up;
411 			funcs->should_detect = dmub_dcn35_should_detect;
412 			break;
413 
414 	case DMUB_ASIC_DCN401:
415 		dmub->regs_dcn401 = &dmub_srv_dcn401_regs;
416 		funcs->configure_dmub_in_system_memory = dmub_dcn401_configure_dmub_in_system_memory;
417 		funcs->send_inbox0_cmd = dmub_dcn401_send_inbox0_cmd;
418 		funcs->clear_inbox0_ack_register = dmub_dcn401_clear_inbox0_ack_register;
419 		funcs->read_inbox0_ack_register = dmub_dcn401_read_inbox0_ack_register;
420 		funcs->reset = dmub_dcn401_reset;
421 		funcs->reset_release = dmub_dcn401_reset_release;
422 		funcs->backdoor_load = dmub_dcn401_backdoor_load;
423 		funcs->backdoor_load_zfb_mode = dmub_dcn401_backdoor_load_zfb_mode;
424 		funcs->setup_windows = dmub_dcn401_setup_windows;
425 		funcs->setup_mailbox = dmub_dcn401_setup_mailbox;
426 		funcs->get_inbox1_wptr = dmub_dcn401_get_inbox1_wptr;
427 		funcs->get_inbox1_rptr = dmub_dcn401_get_inbox1_rptr;
428 		funcs->set_inbox1_wptr = dmub_dcn401_set_inbox1_wptr;
429 		funcs->setup_out_mailbox = dmub_dcn401_setup_out_mailbox;
430 		funcs->get_outbox1_wptr = dmub_dcn401_get_outbox1_wptr;
431 		funcs->set_outbox1_rptr = dmub_dcn401_set_outbox1_rptr;
432 		funcs->is_supported = dmub_dcn401_is_supported;
433 		funcs->is_hw_init = dmub_dcn401_is_hw_init;
434 		funcs->set_gpint = dmub_dcn401_set_gpint;
435 		funcs->is_gpint_acked = dmub_dcn401_is_gpint_acked;
436 		funcs->get_gpint_response = dmub_dcn401_get_gpint_response;
437 		funcs->get_gpint_dataout = dmub_dcn401_get_gpint_dataout;
438 		funcs->get_fw_status = dmub_dcn401_get_fw_boot_status;
439 		funcs->enable_dmub_boot_options = dmub_dcn401_enable_dmub_boot_options;
440 		funcs->skip_dmub_panel_power_sequence = dmub_dcn401_skip_dmub_panel_power_sequence;
441 		//outbox0 call stacks
442 		funcs->setup_outbox0 = dmub_dcn401_setup_outbox0;
443 		funcs->get_outbox0_wptr = dmub_dcn401_get_outbox0_wptr;
444 		funcs->set_outbox0_rptr = dmub_dcn401_set_outbox0_rptr;
445 
446 		funcs->get_current_time = dmub_dcn401_get_current_time;
447 		funcs->get_diagnostic_data = dmub_dcn401_get_diagnostic_data;
448 
449 		funcs->send_reg_inbox0_cmd_msg = dmub_dcn401_send_reg_inbox0_cmd_msg;
450 		funcs->read_reg_inbox0_rsp_int_status = dmub_dcn401_read_reg_inbox0_rsp_int_status;
451 		funcs->read_reg_inbox0_cmd_rsp = dmub_dcn401_read_reg_inbox0_cmd_rsp;
452 		funcs->write_reg_inbox0_rsp_int_ack = dmub_dcn401_write_reg_inbox0_rsp_int_ack;
453 		funcs->clear_reg_inbox0_rsp_int_ack = dmub_dcn401_clear_reg_inbox0_rsp_int_ack;
454 		funcs->enable_reg_inbox0_rsp_int = dmub_dcn401_enable_reg_inbox0_rsp_int;
455 		default_inbox_type = DMUB_CMD_INTERFACE_FB; // still default to FB for now
456 
457 		funcs->write_reg_outbox0_rdy_int_ack = dmub_dcn401_write_reg_outbox0_rdy_int_ack;
458 		funcs->read_reg_outbox0_msg = dmub_dcn401_read_reg_outbox0_msg;
459 		funcs->write_reg_outbox0_rsp = dmub_dcn401_write_reg_outbox0_rsp;
460 		funcs->read_reg_outbox0_rdy_int_status = dmub_dcn401_read_reg_outbox0_rdy_int_status;
461 		funcs->read_reg_outbox0_rsp_int_status = dmub_dcn401_read_reg_outbox0_rsp_int_status;
462 		funcs->enable_reg_inbox0_rsp_int = dmub_dcn401_enable_reg_inbox0_rsp_int;
463 		funcs->enable_reg_outbox0_rdy_int = dmub_dcn401_enable_reg_outbox0_rdy_int;
464 		break;
465 	default:
466 		return false;
467 	}
468 
469 	/* set default inbox type if not overriden */
470 	if (dmub->inbox_type == DMUB_CMD_INTERFACE_DEFAULT) {
471 		if (default_inbox_type != DMUB_CMD_INTERFACE_DEFAULT) {
472 			/* use default inbox type as specified by DCN rev */
473 			dmub->inbox_type = default_inbox_type;
474 		} else if (funcs->send_reg_inbox0_cmd_msg) {
475 			/* prefer reg as default inbox type if present */
476 			dmub->inbox_type = DMUB_CMD_INTERFACE_REG;
477 		} else {
478 			/* use fb as fallback */
479 			dmub->inbox_type = DMUB_CMD_INTERFACE_FB;
480 		}
481 	}
482 
483 	return true;
484 }
485 
486 enum dmub_status dmub_srv_create(struct dmub_srv *dmub,
487 				 const struct dmub_srv_create_params *params)
488 {
489 	enum dmub_status status = DMUB_STATUS_OK;
490 
491 	dmub_memset(dmub, 0, sizeof(*dmub));
492 
493 	dmub->funcs = params->funcs;
494 	dmub->user_ctx = params->user_ctx;
495 	dmub->asic = params->asic;
496 	dmub->fw_version = params->fw_version;
497 	dmub->is_virtual = params->is_virtual;
498 	dmub->inbox_type = params->inbox_type;
499 
500 	/* Setup asic dependent hardware funcs. */
501 	if (!dmub_srv_hw_setup(dmub, params->asic)) {
502 		status = DMUB_STATUS_INVALID;
503 		goto cleanup;
504 	}
505 
506 	/* Override (some) hardware funcs based on user params. */
507 	if (params->hw_funcs) {
508 		if (params->hw_funcs->emul_get_inbox1_rptr)
509 			dmub->hw_funcs.emul_get_inbox1_rptr =
510 				params->hw_funcs->emul_get_inbox1_rptr;
511 
512 		if (params->hw_funcs->emul_set_inbox1_wptr)
513 			dmub->hw_funcs.emul_set_inbox1_wptr =
514 				params->hw_funcs->emul_set_inbox1_wptr;
515 
516 		if (params->hw_funcs->is_supported)
517 			dmub->hw_funcs.is_supported =
518 				params->hw_funcs->is_supported;
519 	}
520 
521 	/* Sanity checks for required hw func pointers. */
522 	if (!dmub->hw_funcs.get_inbox1_rptr ||
523 	    !dmub->hw_funcs.set_inbox1_wptr) {
524 		status = DMUB_STATUS_INVALID;
525 		goto cleanup;
526 	}
527 
528 cleanup:
529 	if (status == DMUB_STATUS_OK)
530 		dmub->sw_init = true;
531 	else
532 		dmub_srv_destroy(dmub);
533 
534 	return status;
535 }
536 
537 void dmub_srv_destroy(struct dmub_srv *dmub)
538 {
539 	dmub_memset(dmub, 0, sizeof(*dmub));
540 }
541 
542 static uint32_t dmub_srv_calc_regions_for_memory_type(const struct dmub_srv_region_params *params,
543 	struct dmub_srv_region_info *out,
544 	const uint32_t *window_sizes,
545 	enum dmub_window_memory_type memory_type)
546 {
547 	uint32_t i, top = 0;
548 
549 	for (i = 0; i < DMUB_WINDOW_TOTAL; ++i) {
550 		if (params->window_memory_type[i] == memory_type) {
551 			struct dmub_region *region = &out->regions[i];
552 
553 			region->base = dmub_align(top, 256);
554 			region->top = region->base + dmub_align(window_sizes[i], 64);
555 			top = region->top;
556 		}
557 	}
558 
559 	return dmub_align(top, 4096);
560 }
561 
562 enum dmub_status
563 	dmub_srv_calc_region_info(struct dmub_srv *dmub,
564 		const struct dmub_srv_region_params *params,
565 		struct dmub_srv_region_info *out)
566 {
567 	uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
568 	uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
569 	uint32_t shared_state_size = DMUB_FW_HEADER_SHARED_STATE_SIZE;
570 	uint32_t window_sizes[DMUB_WINDOW_TOTAL] = { 0 };
571 
572 	if (!dmub->sw_init)
573 		return DMUB_STATUS_INVALID;
574 
575 	memset(out, 0, sizeof(*out));
576 	memset(window_sizes, 0, sizeof(window_sizes));
577 
578 	out->num_regions = DMUB_NUM_WINDOWS;
579 
580 	if (params->fw_info) {
581 		memcpy(&dmub->meta_info, params->fw_info, sizeof(*params->fw_info));
582 
583 		fw_state_size = params->fw_info->fw_region_size;
584 		trace_buffer_size = params->fw_info->trace_buffer_size;
585 		shared_state_size = params->fw_info->shared_state_size;
586 
587 		/**
588 		 * If DM didn't fill in a version, then fill it in based on
589 		 * the firmware meta now that we have it.
590 		 *
591 		 * TODO: Make it easier for driver to extract this out to
592 		 * pass during creation.
593 		 */
594 		if (dmub->fw_version == 0)
595 			dmub->fw_version = params->fw_info->fw_version;
596 	}
597 
598 	window_sizes[DMUB_WINDOW_0_INST_CONST] = params->inst_const_size;
599 	window_sizes[DMUB_WINDOW_1_STACK] = DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE;
600 	window_sizes[DMUB_WINDOW_2_BSS_DATA] = params->bss_data_size;
601 	window_sizes[DMUB_WINDOW_3_VBIOS] = params->vbios_size;
602 	window_sizes[DMUB_WINDOW_4_MAILBOX] = DMUB_MAILBOX_SIZE;
603 	window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size;
604 	window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size;
605 	window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = dmub_align(DMUB_SCRATCH_MEM_SIZE, 64);
606 	window_sizes[DMUB_WINDOW_IB_MEM] = dmub_align(DMUB_IB_MEM_SIZE, 64);
607 	window_sizes[DMUB_WINDOW_SHARED_STATE] = max(DMUB_FW_HEADER_SHARED_STATE_SIZE, shared_state_size);
608 	window_sizes[DMUB_WINDOW_LSDMA_BUFFER] = DMUB_LSDMA_RB_SIZE;
609 	window_sizes[DMUB_WINDOW_CURSOR_OFFLOAD] = dmub_align(sizeof(struct dmub_cursor_offload_v1), 64);
610 
611 	out->fb_size =
612 		dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB);
613 
614 	out->gart_size =
615 		dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_GART);
616 
617 	return DMUB_STATUS_OK;
618 }
619 
620 enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
621 				       const struct dmub_srv_memory_params *params,
622 				       struct dmub_srv_fb_info *out)
623 {
624 	uint32_t i;
625 
626 	if (!dmub->sw_init)
627 		return DMUB_STATUS_INVALID;
628 
629 	memset(out, 0, sizeof(*out));
630 
631 	if (params->region_info->num_regions != DMUB_NUM_WINDOWS)
632 		return DMUB_STATUS_INVALID;
633 
634 	for (i = 0; i < DMUB_NUM_WINDOWS; ++i) {
635 		const struct dmub_region *reg =
636 			&params->region_info->regions[i];
637 
638 		if (params->window_memory_type[i] == DMUB_WINDOW_MEMORY_TYPE_GART) {
639 			out->fb[i].cpu_addr = (uint8_t *)params->cpu_gart_addr + reg->base;
640 			out->fb[i].gpu_addr = params->gpu_gart_addr + reg->base;
641 		} else {
642 			out->fb[i].cpu_addr = (uint8_t *)params->cpu_fb_addr + reg->base;
643 			out->fb[i].gpu_addr = params->gpu_fb_addr + reg->base;
644 		}
645 
646 		out->fb[i].size = reg->top - reg->base;
647 	}
648 
649 	out->num_fb = DMUB_NUM_WINDOWS;
650 
651 	return DMUB_STATUS_OK;
652 }
653 
654 enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub,
655 					 bool *is_supported)
656 {
657 	*is_supported = false;
658 
659 	if (!dmub->sw_init)
660 		return DMUB_STATUS_INVALID;
661 
662 	if (dmub->hw_funcs.is_supported)
663 		*is_supported = dmub->hw_funcs.is_supported(dmub);
664 
665 	return DMUB_STATUS_OK;
666 }
667 
668 enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init)
669 {
670 	*is_hw_init = false;
671 
672 	if (!dmub->sw_init)
673 		return DMUB_STATUS_INVALID;
674 
675 	if (!dmub->hw_init)
676 		return DMUB_STATUS_OK;
677 
678 	if (dmub->hw_funcs.is_hw_init)
679 		*is_hw_init = dmub->hw_funcs.is_hw_init(dmub);
680 
681 	return DMUB_STATUS_OK;
682 }
683 
684 enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
685 				  const struct dmub_srv_hw_params *params)
686 {
687 	struct dmub_fb *inst_fb = params->fb[DMUB_WINDOW_0_INST_CONST];
688 	struct dmub_fb *stack_fb = params->fb[DMUB_WINDOW_1_STACK];
689 	struct dmub_fb *data_fb = params->fb[DMUB_WINDOW_2_BSS_DATA];
690 	struct dmub_fb *bios_fb = params->fb[DMUB_WINDOW_3_VBIOS];
691 	struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX];
692 	struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
693 	struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
694 	struct dmub_fb *shared_state_fb = params->fb[DMUB_WINDOW_SHARED_STATE];
695 
696 	struct dmub_rb_init_params rb_params, outbox0_rb_params;
697 	struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6, region6;
698 	struct dmub_region inbox1, outbox1, outbox0;
699 
700 	uint32_t i;
701 
702 	if (!dmub->sw_init)
703 		return DMUB_STATUS_INVALID;
704 
705 	for (i = 0; i < DMUB_WINDOW_TOTAL; ++i) {
706 		if (!params->fb[i]) {
707 			ASSERT(0);
708 			return DMUB_STATUS_INVALID;
709 		}
710 	}
711 
712 	memcpy(&dmub->soc_fb_info, &params->soc_fb_info, sizeof(params->soc_fb_info));
713 	dmub->psp_version = params->psp_version;
714 
715 	if (dmub->hw_funcs.reset)
716 		dmub->hw_funcs.reset(dmub);
717 
718 	/* reset the cache of the last wptr as well now that hw is reset */
719 	dmub->inbox1_last_wptr = 0;
720 
721 	cw0.offset.quad_part = inst_fb->gpu_addr;
722 	cw0.region.base = DMUB_CW0_BASE;
723 	cw0.region.top = cw0.region.base + inst_fb->size - 1;
724 
725 	cw1.offset.quad_part = stack_fb->gpu_addr;
726 	cw1.region.base = DMUB_CW1_BASE;
727 	cw1.region.top = cw1.region.base + stack_fb->size - 1;
728 
729 	if (params->fw_in_system_memory && dmub->hw_funcs.configure_dmub_in_system_memory)
730 		dmub->hw_funcs.configure_dmub_in_system_memory(dmub);
731 
732 	if (params->load_inst_const && dmub->hw_funcs.backdoor_load) {
733 		/**
734 		 * Read back all the instruction memory so we don't hang the
735 		 * DMCUB when backdoor loading if the write from x86 hasn't been
736 		 * flushed yet. This only occurs in backdoor loading.
737 		 */
738 		if (params->mem_access_type == DMUB_MEMORY_ACCESS_CPU)
739 			dmub_flush_buffer_mem(inst_fb);
740 
741 		if (params->fw_in_system_memory && dmub->hw_funcs.backdoor_load_zfb_mode)
742 			dmub->hw_funcs.backdoor_load_zfb_mode(dmub, &cw0, &cw1);
743 		else
744 			dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1);
745 	}
746 
747 	cw2.offset.quad_part = data_fb->gpu_addr;
748 	cw2.region.base = DMUB_CW0_BASE + inst_fb->size;
749 	cw2.region.top = cw2.region.base + data_fb->size;
750 
751 	cw3.offset.quad_part = bios_fb->gpu_addr;
752 	cw3.region.base = DMUB_CW3_BASE;
753 	cw3.region.top = cw3.region.base + bios_fb->size;
754 
755 	cw4.offset.quad_part = mail_fb->gpu_addr;
756 	cw4.region.base = DMUB_CW4_BASE;
757 	cw4.region.top = cw4.region.base + mail_fb->size;
758 
759 	/**
760 	 * Doubled the mailbox region to accomodate inbox and outbox.
761 	 * Note: Currently, currently total mailbox size is 16KB. It is split
762 	 * equally into 8KB between inbox and outbox. If this config is
763 	 * changed, then uncached base address configuration of outbox1
764 	 * has to be updated in funcs->setup_out_mailbox.
765 	 */
766 	inbox1.base = cw4.region.base;
767 	inbox1.top = cw4.region.base + DMUB_RB_SIZE;
768 	outbox1.base = inbox1.top;
769 	outbox1.top = inbox1.top + DMUB_RB_SIZE;
770 
771 	cw5.offset.quad_part = tracebuff_fb->gpu_addr;
772 	cw5.region.base = DMUB_CW5_BASE;
773 	cw5.region.top = cw5.region.base + tracebuff_fb->size;
774 
775 	outbox0.base = DMUB_REGION5_BASE + TRACE_BUFFER_ENTRY_OFFSET;
776 	outbox0.top = outbox0.base + tracebuff_fb->size - TRACE_BUFFER_ENTRY_OFFSET;
777 
778 	cw6.offset.quad_part = fw_state_fb->gpu_addr;
779 	cw6.region.base = DMUB_CW6_BASE;
780 	cw6.region.top = cw6.region.base + fw_state_fb->size;
781 
782 	dmub->fw_state = (void *)((uintptr_t)(fw_state_fb->cpu_addr) + DMUB_DEBUG_FW_STATE_OFFSET);
783 
784 	region6.offset.quad_part = shared_state_fb->gpu_addr;
785 	region6.region.base = DMUB_CW6_BASE;
786 	region6.region.top = region6.region.base + shared_state_fb->size;
787 
788 	dmub->shared_state = shared_state_fb->cpu_addr;
789 
790 	dmub->scratch_mem_fb = *params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
791 	dmub->ib_mem_gart = *params->fb[DMUB_WINDOW_IB_MEM];
792 
793 	dmub->cursor_offload_fb = *params->fb[DMUB_WINDOW_CURSOR_OFFLOAD];
794 	dmub->cursor_offload_v1 = (struct dmub_cursor_offload_v1 *)dmub->cursor_offload_fb.cpu_addr;
795 
796 	if (dmub->hw_funcs.setup_windows)
797 		dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6, &region6);
798 
799 	if (dmub->hw_funcs.setup_outbox0)
800 		dmub->hw_funcs.setup_outbox0(dmub, &outbox0);
801 
802 	if (dmub->hw_funcs.setup_mailbox)
803 		dmub->hw_funcs.setup_mailbox(dmub, &inbox1);
804 	if (dmub->hw_funcs.setup_out_mailbox)
805 		dmub->hw_funcs.setup_out_mailbox(dmub, &outbox1);
806 	if (dmub->hw_funcs.enable_reg_inbox0_rsp_int)
807 		dmub->hw_funcs.enable_reg_inbox0_rsp_int(dmub, true);
808 	if (dmub->hw_funcs.enable_reg_outbox0_rdy_int)
809 		dmub->hw_funcs.enable_reg_outbox0_rdy_int(dmub, true);
810 
811 	dmub_memset(&rb_params, 0, sizeof(rb_params));
812 	rb_params.ctx = dmub;
813 	rb_params.base_address = mail_fb->cpu_addr;
814 	rb_params.capacity = DMUB_RB_SIZE;
815 	dmub_rb_init(&dmub->inbox1.rb, &rb_params);
816 
817 	// Initialize outbox1 ring buffer
818 	rb_params.ctx = dmub;
819 	rb_params.base_address = (void *) ((uint8_t *) (mail_fb->cpu_addr) + DMUB_RB_SIZE);
820 	rb_params.capacity = DMUB_RB_SIZE;
821 	dmub_rb_init(&dmub->outbox1_rb, &rb_params);
822 
823 	dmub_memset(&outbox0_rb_params, 0, sizeof(outbox0_rb_params));
824 	outbox0_rb_params.ctx = dmub;
825 	outbox0_rb_params.base_address = (void *)((uintptr_t)(tracebuff_fb->cpu_addr) + TRACE_BUFFER_ENTRY_OFFSET);
826 	outbox0_rb_params.capacity = tracebuff_fb->size - dmub_align(TRACE_BUFFER_ENTRY_OFFSET, 64);
827 	dmub_rb_init(&dmub->outbox0_rb, &outbox0_rb_params);
828 
829 	/* Report to DMUB what features are supported by current driver */
830 	if (dmub->hw_funcs.enable_dmub_boot_options)
831 		dmub->hw_funcs.enable_dmub_boot_options(dmub, params);
832 
833 	if (dmub->hw_funcs.skip_dmub_panel_power_sequence && !dmub->is_virtual)
834 		dmub->hw_funcs.skip_dmub_panel_power_sequence(dmub,
835 			params->skip_panel_power_sequence);
836 
837 	if (dmub->hw_funcs.reset_release && !dmub->is_virtual)
838 		dmub->hw_funcs.reset_release(dmub);
839 
840 	dmub->hw_init = true;
841 	dmub->power_state = DMUB_POWER_STATE_D0;
842 
843 	return DMUB_STATUS_OK;
844 }
845 
846 enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
847 {
848 	if (!dmub->sw_init)
849 		return DMUB_STATUS_INVALID;
850 
851 	if (dmub->hw_funcs.reset)
852 		dmub->hw_funcs.reset(dmub);
853 
854 	/* mailboxes have been reset in hw, so reset the sw state as well */
855 	dmub->inbox1_last_wptr = 0;
856 	dmub->inbox1.rb.wrpt = 0;
857 	dmub->inbox1.rb.rptr = 0;
858 	dmub->inbox1.num_reported = 0;
859 	dmub->inbox1.num_submitted = 0;
860 	dmub->reg_inbox0.num_reported = 0;
861 	dmub->reg_inbox0.num_submitted = 0;
862 	dmub->reg_inbox0.is_pending = 0;
863 	dmub->outbox0_rb.wrpt = 0;
864 	dmub->outbox0_rb.rptr = 0;
865 	dmub->outbox1_rb.wrpt = 0;
866 	dmub->outbox1_rb.rptr = 0;
867 
868 	dmub->hw_init = false;
869 
870 	return DMUB_STATUS_OK;
871 }
872 
873 enum dmub_status dmub_srv_fb_cmd_queue(struct dmub_srv *dmub,
874 				    const union dmub_rb_cmd *cmd)
875 {
876 	if (!dmub->hw_init)
877 		return DMUB_STATUS_INVALID;
878 
879 	if (dmub->power_state != DMUB_POWER_STATE_D0)
880 		return DMUB_STATUS_POWER_STATE_D3;
881 
882 	if (dmub->inbox1.rb.rptr > dmub->inbox1.rb.capacity ||
883 	    dmub->inbox1.rb.wrpt > dmub->inbox1.rb.capacity) {
884 		return DMUB_STATUS_HW_FAILURE;
885 	}
886 
887 	if (dmub_rb_push_front(&dmub->inbox1.rb, cmd)) {
888 		dmub->inbox1.num_submitted++;
889 		return DMUB_STATUS_OK;
890 	}
891 
892 	return DMUB_STATUS_QUEUE_FULL;
893 }
894 
895 enum dmub_status dmub_srv_fb_cmd_execute(struct dmub_srv *dmub)
896 {
897 	struct dmub_rb flush_rb;
898 
899 	if (!dmub->hw_init)
900 		return DMUB_STATUS_INVALID;
901 
902 	if (dmub->power_state != DMUB_POWER_STATE_D0)
903 		return DMUB_STATUS_POWER_STATE_D3;
904 
905 	/**
906 	 * Read back all the queued commands to ensure that they've
907 	 * been flushed to framebuffer memory. Otherwise DMCUB might
908 	 * read back stale, fully invalid or partially invalid data.
909 	 */
910 	flush_rb = dmub->inbox1.rb;
911 	flush_rb.rptr = dmub->inbox1_last_wptr;
912 	dmub_rb_flush_pending(&flush_rb);
913 
914 		dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1.rb.wrpt);
915 
916 	dmub->inbox1_last_wptr = dmub->inbox1.rb.wrpt;
917 
918 	return DMUB_STATUS_OK;
919 }
920 
921 bool dmub_srv_is_hw_pwr_up(struct dmub_srv *dmub)
922 {
923 	if (!dmub->hw_funcs.is_hw_powered_up)
924 		return true;
925 
926 	if (!dmub->hw_funcs.is_hw_powered_up(dmub))
927 		return false;
928 
929 	return true;
930 }
931 
932 enum dmub_status dmub_srv_wait_for_hw_pwr_up(struct dmub_srv *dmub,
933 					     uint32_t timeout_us)
934 {
935 	uint32_t i;
936 
937 	if (!dmub->hw_init)
938 		return DMUB_STATUS_INVALID;
939 
940 	for (i = 0; i <= timeout_us; i += 100) {
941 		if (dmub_srv_is_hw_pwr_up(dmub))
942 			return DMUB_STATUS_OK;
943 
944 		udelay(100);
945 	}
946 
947 	return DMUB_STATUS_TIMEOUT;
948 }
949 
950 enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
951 					     uint32_t timeout_us)
952 {
953 	uint32_t i;
954 	bool hw_on = true;
955 
956 	if (!dmub->hw_init)
957 		return DMUB_STATUS_INVALID;
958 
959 	for (i = 0; i <= timeout_us; i += 100) {
960 		union dmub_fw_boot_status status = dmub->hw_funcs.get_fw_status(dmub);
961 
962 		if (dmub->hw_funcs.is_hw_powered_up)
963 			hw_on = dmub->hw_funcs.is_hw_powered_up(dmub);
964 
965 		if (status.bits.dal_fw && status.bits.mailbox_rdy && hw_on)
966 			return DMUB_STATUS_OK;
967 
968 		udelay(100);
969 	}
970 
971 	return DMUB_STATUS_TIMEOUT;
972 }
973 
974 static void dmub_srv_update_reg_inbox0_status(struct dmub_srv *dmub)
975 {
976 	if (dmub->reg_inbox0.is_pending) {
977 		dmub->reg_inbox0.is_pending = dmub->hw_funcs.read_reg_inbox0_rsp_int_status &&
978 				!dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
979 
980 		if (!dmub->reg_inbox0.is_pending) {
981 			/* ack the rsp interrupt */
982 			if (dmub->hw_funcs.write_reg_inbox0_rsp_int_ack)
983 				dmub->hw_funcs.write_reg_inbox0_rsp_int_ack(dmub);
984 
985 			/* only update the reported count if commands aren't being batched */
986 			if (!dmub->reg_inbox0.is_pending && !dmub->reg_inbox0.is_multi_pending) {
987 				dmub->reg_inbox0.num_reported = dmub->reg_inbox0.num_submitted;
988 			}
989 		}
990 	}
991 }
992 
993 enum dmub_status dmub_srv_wait_for_pending(struct dmub_srv *dmub,
994 					uint32_t timeout_us)
995 {
996 	uint32_t i;
997 	const uint32_t polling_interval_us = 1;
998 	struct dmub_srv_inbox scratch_reg_inbox0 = dmub->reg_inbox0;
999 	struct dmub_srv_inbox scratch_inbox1 = dmub->inbox1;
1000 	const volatile struct dmub_srv_inbox *reg_inbox0 = &dmub->reg_inbox0;
1001 	const volatile struct dmub_srv_inbox *inbox1 = &dmub->inbox1;
1002 
1003 	if (!dmub->hw_init ||
1004 			!dmub->hw_funcs.get_inbox1_wptr)
1005 		return DMUB_STATUS_INVALID;
1006 
1007 	for (i = 0; i <= timeout_us; i += polling_interval_us) {
1008 			scratch_inbox1.rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub);
1009 			scratch_inbox1.rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
1010 
1011 		scratch_reg_inbox0.is_pending = scratch_reg_inbox0.is_pending &&
1012 				dmub->hw_funcs.read_reg_inbox0_rsp_int_status &&
1013 				!dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub);
1014 
1015 		if (scratch_inbox1.rb.rptr > dmub->inbox1.rb.capacity)
1016 			return DMUB_STATUS_HW_FAILURE;
1017 
1018 		/* check current HW state first, but use command submission vs reported as a fallback */
1019 		if ((dmub_rb_empty(&scratch_inbox1.rb) ||
1020 				inbox1->num_reported >= scratch_inbox1.num_submitted) &&
1021 				(!scratch_reg_inbox0.is_pending ||
1022 				reg_inbox0->num_reported >= scratch_reg_inbox0.num_submitted))
1023 			return DMUB_STATUS_OK;
1024 
1025 		udelay(polling_interval_us);
1026 	}
1027 
1028 	return DMUB_STATUS_TIMEOUT;
1029 }
1030 
1031 enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
1032 					uint32_t timeout_us)
1033 {
1034 	enum dmub_status status;
1035 	uint32_t i;
1036 	const uint32_t polling_interval_us = 1;
1037 
1038 	if (!dmub->hw_init)
1039 		return DMUB_STATUS_INVALID;
1040 
1041 	for (i = 0; i < timeout_us; i += polling_interval_us) {
1042 		status = dmub_srv_update_inbox_status(dmub);
1043 
1044 		if (status != DMUB_STATUS_OK)
1045 			return status;
1046 
1047 		/* check for idle */
1048 		if (dmub_rb_empty(&dmub->inbox1.rb) && !dmub->reg_inbox0.is_pending)
1049 			return DMUB_STATUS_OK;
1050 
1051 		udelay(polling_interval_us);
1052 	}
1053 
1054 	return DMUB_STATUS_TIMEOUT;
1055 }
1056 
1057 enum dmub_status
1058 dmub_srv_send_gpint_command(struct dmub_srv *dmub,
1059 			    enum dmub_gpint_command command_code,
1060 			    uint16_t param, uint32_t timeout_us)
1061 {
1062 	union dmub_gpint_data_register reg;
1063 	uint32_t i;
1064 
1065 	if (!dmub->sw_init)
1066 		return DMUB_STATUS_INVALID;
1067 
1068 	if (!dmub->hw_funcs.set_gpint)
1069 		return DMUB_STATUS_INVALID;
1070 
1071 	if (!dmub->hw_funcs.is_gpint_acked)
1072 		return DMUB_STATUS_INVALID;
1073 
1074 	reg.bits.status = 1;
1075 	reg.bits.command_code = command_code;
1076 	reg.bits.param = param;
1077 
1078 	dmub->hw_funcs.set_gpint(dmub, reg);
1079 
1080 	for (i = 0; i < timeout_us; ++i) {
1081 		udelay(1);
1082 
1083 		if (dmub->hw_funcs.is_gpint_acked(dmub, reg))
1084 			return DMUB_STATUS_OK;
1085 	}
1086 
1087 	return DMUB_STATUS_TIMEOUT;
1088 }
1089 
1090 enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub,
1091 					     uint32_t *response)
1092 {
1093 	*response = 0;
1094 
1095 	if (!dmub->sw_init)
1096 		return DMUB_STATUS_INVALID;
1097 
1098 	if (!dmub->hw_funcs.get_gpint_response)
1099 		return DMUB_STATUS_INVALID;
1100 
1101 	*response = dmub->hw_funcs.get_gpint_response(dmub);
1102 
1103 	return DMUB_STATUS_OK;
1104 }
1105 
1106 enum dmub_status dmub_srv_get_gpint_dataout(struct dmub_srv *dmub,
1107 					     uint32_t *dataout)
1108 {
1109 	*dataout = 0;
1110 
1111 	if (!dmub->sw_init)
1112 		return DMUB_STATUS_INVALID;
1113 
1114 	if (!dmub->hw_funcs.get_gpint_dataout)
1115 		return DMUB_STATUS_INVALID;
1116 
1117 	*dataout = dmub->hw_funcs.get_gpint_dataout(dmub);
1118 
1119 	return DMUB_STATUS_OK;
1120 }
1121 
1122 enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub,
1123 					     union dmub_fw_boot_status *status)
1124 {
1125 	status->all = 0;
1126 
1127 	if (!dmub->sw_init)
1128 		return DMUB_STATUS_INVALID;
1129 
1130 	if (dmub->hw_funcs.get_fw_status)
1131 		*status = dmub->hw_funcs.get_fw_status(dmub);
1132 
1133 	return DMUB_STATUS_OK;
1134 }
1135 
1136 enum dmub_status dmub_srv_get_fw_boot_option(struct dmub_srv *dmub,
1137 					     union dmub_fw_boot_options *option)
1138 {
1139 	option->all = 0;
1140 
1141 	if (!dmub->sw_init)
1142 		return DMUB_STATUS_INVALID;
1143 
1144 	if (dmub->hw_funcs.get_fw_boot_option)
1145 		*option = dmub->hw_funcs.get_fw_boot_option(dmub);
1146 
1147 	return DMUB_STATUS_OK;
1148 }
1149 
1150 enum dmub_status dmub_srv_set_skip_panel_power_sequence(struct dmub_srv *dmub,
1151 					     bool skip)
1152 {
1153 	if (!dmub->sw_init)
1154 		return DMUB_STATUS_INVALID;
1155 
1156 	if (dmub->hw_funcs.skip_dmub_panel_power_sequence && !dmub->is_virtual)
1157 		dmub->hw_funcs.skip_dmub_panel_power_sequence(dmub, skip);
1158 
1159 	return DMUB_STATUS_OK;
1160 }
1161 
1162 static inline bool dmub_rb_out_trace_buffer_front(struct dmub_rb *rb,
1163 				 void *entry)
1164 {
1165 	const uint64_t *src = (const uint64_t *)(rb->base_address) + rb->rptr / sizeof(uint64_t);
1166 	uint64_t *dst = (uint64_t *)entry;
1167 	uint8_t i;
1168 	uint8_t loop_count;
1169 
1170 	if (rb->rptr == rb->wrpt)
1171 		return false;
1172 
1173 	loop_count = sizeof(struct dmcub_trace_buf_entry) / sizeof(uint64_t);
1174 	// copying data
1175 	for (i = 0; i < loop_count; i++)
1176 		*dst++ = *src++;
1177 
1178 	rb->rptr += sizeof(struct dmcub_trace_buf_entry);
1179 
1180 	rb->rptr %= rb->capacity;
1181 
1182 	return true;
1183 }
1184 
1185 bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entry *entry)
1186 {
1187 	dmub->outbox0_rb.wrpt = dmub->hw_funcs.get_outbox0_wptr(dmub);
1188 
1189 	return dmub_rb_out_trace_buffer_front(&dmub->outbox0_rb, (void *)entry);
1190 }
1191 
1192 bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub)
1193 {
1194 	if (!dmub || !dmub->hw_funcs.get_diagnostic_data)
1195 		return false;
1196 	dmub->hw_funcs.get_diagnostic_data(dmub);
1197 	return true;
1198 }
1199 
1200 bool dmub_srv_should_detect(struct dmub_srv *dmub)
1201 {
1202 	if (!dmub->hw_init || !dmub->hw_funcs.should_detect)
1203 		return false;
1204 
1205 	return dmub->hw_funcs.should_detect(dmub);
1206 }
1207 
1208 enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub)
1209 {
1210 	if (!dmub->hw_init || !dmub->hw_funcs.clear_inbox0_ack_register)
1211 		return DMUB_STATUS_INVALID;
1212 
1213 	dmub->hw_funcs.clear_inbox0_ack_register(dmub);
1214 	return DMUB_STATUS_OK;
1215 }
1216 
1217 enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t timeout_us)
1218 {
1219 	uint32_t i = 0;
1220 	uint32_t ack = 0;
1221 
1222 	if (!dmub->hw_init || !dmub->hw_funcs.read_inbox0_ack_register)
1223 		return DMUB_STATUS_INVALID;
1224 
1225 	for (i = 0; i <= timeout_us; i++) {
1226 		ack = dmub->hw_funcs.read_inbox0_ack_register(dmub);
1227 		if (ack)
1228 			return DMUB_STATUS_OK;
1229 		udelay(1);
1230 	}
1231 	return DMUB_STATUS_TIMEOUT;
1232 }
1233 
1234 enum dmub_status dmub_srv_send_inbox0_cmd(struct dmub_srv *dmub,
1235 		union dmub_inbox0_data_register data)
1236 {
1237 	if (!dmub->hw_init || !dmub->hw_funcs.send_inbox0_cmd)
1238 		return DMUB_STATUS_INVALID;
1239 
1240 	dmub->hw_funcs.send_inbox0_cmd(dmub, data);
1241 	return DMUB_STATUS_OK;
1242 }
1243 
1244 void dmub_srv_subvp_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index)
1245 {
1246 	if (dmub->hw_funcs.subvp_save_surf_addr) {
1247 		dmub->hw_funcs.subvp_save_surf_addr(dmub,
1248 				addr,
1249 				subvp_index);
1250 	}
1251 }
1252 
1253 void dmub_srv_set_power_state(struct dmub_srv *dmub, enum dmub_srv_power_state_type dmub_srv_power_state)
1254 {
1255 	if (!dmub || !dmub->hw_init)
1256 		return;
1257 
1258 	dmub->power_state = dmub_srv_power_state;
1259 }
1260 
1261 enum dmub_status dmub_srv_reg_cmd_execute(struct dmub_srv *dmub, union dmub_rb_cmd *cmd)
1262 {
1263 	uint32_t num_pending = 0;
1264 
1265 	if (!dmub->hw_init)
1266 		return DMUB_STATUS_INVALID;
1267 
1268 	if (dmub->power_state != DMUB_POWER_STATE_D0)
1269 		return DMUB_STATUS_POWER_STATE_D3;
1270 
1271 	if (!dmub->hw_funcs.send_reg_inbox0_cmd_msg ||
1272 			!dmub->hw_funcs.clear_reg_inbox0_rsp_int_ack)
1273 		return DMUB_STATUS_INVALID;
1274 
1275 	if (dmub->reg_inbox0.num_submitted >= dmub->reg_inbox0.num_reported)
1276 		num_pending = dmub->reg_inbox0.num_submitted - dmub->reg_inbox0.num_reported;
1277 	else
1278 		/* num_submitted wrapped */
1279 		num_pending = DMUB_REG_INBOX0_RB_MAX_ENTRY -
1280 				(dmub->reg_inbox0.num_reported - dmub->reg_inbox0.num_submitted);
1281 
1282 	if (num_pending >= DMUB_REG_INBOX0_RB_MAX_ENTRY)
1283 		return DMUB_STATUS_QUEUE_FULL;
1284 
1285 	/* clear last rsp ack and send message */
1286 	dmub->hw_funcs.clear_reg_inbox0_rsp_int_ack(dmub);
1287 	dmub->hw_funcs.send_reg_inbox0_cmd_msg(dmub, cmd);
1288 
1289 	dmub->reg_inbox0.num_submitted++;
1290 	dmub->reg_inbox0.is_pending = true;
1291 	dmub->reg_inbox0.is_multi_pending = cmd->cmd_common.header.multi_cmd_pending;
1292 
1293 	return DMUB_STATUS_OK;
1294 }
1295 
1296 void dmub_srv_cmd_get_response(struct dmub_srv *dmub,
1297 		union dmub_rb_cmd *cmd_rsp)
1298 {
1299 	if (dmub) {
1300 		if (dmub->inbox_type == DMUB_CMD_INTERFACE_REG &&
1301 				dmub->hw_funcs.read_reg_inbox0_cmd_rsp) {
1302 			dmub->hw_funcs.read_reg_inbox0_cmd_rsp(dmub, cmd_rsp);
1303 		} else {
1304 			dmub_rb_get_return_data(&dmub->inbox1.rb, cmd_rsp);
1305 		}
1306 	}
1307 }
1308 
1309 static enum dmub_status dmub_srv_sync_reg_inbox0(struct dmub_srv *dmub)
1310 {
1311 	if (!dmub || !dmub->sw_init)
1312 		return DMUB_STATUS_INVALID;
1313 
1314 	dmub->reg_inbox0.is_pending = 0;
1315 	dmub->reg_inbox0.is_multi_pending = 0;
1316 
1317 	return DMUB_STATUS_OK;
1318 }
1319 
1320 static enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
1321 {
1322 	if (!dmub->sw_init)
1323 		return DMUB_STATUS_INVALID;
1324 
1325 	if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
1326 		uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
1327 		uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
1328 
1329 		if (rptr > dmub->inbox1.rb.capacity || wptr > dmub->inbox1.rb.capacity) {
1330 			return DMUB_STATUS_HW_FAILURE;
1331 		} else {
1332 			dmub->inbox1.rb.rptr = rptr;
1333 			dmub->inbox1.rb.wrpt = wptr;
1334 			dmub->inbox1_last_wptr = dmub->inbox1.rb.wrpt;
1335 		}
1336 	}
1337 
1338 	return DMUB_STATUS_OK;
1339 }
1340 
1341 enum dmub_status dmub_srv_sync_inboxes(struct dmub_srv *dmub)
1342 {
1343 	enum dmub_status status;
1344 
1345 	status = dmub_srv_sync_reg_inbox0(dmub);
1346 	if (status != DMUB_STATUS_OK)
1347 		return status;
1348 
1349 	status = dmub_srv_sync_inbox1(dmub);
1350 	if (status != DMUB_STATUS_OK)
1351 		return status;
1352 
1353 	return DMUB_STATUS_OK;
1354 }
1355 
1356 enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub,
1357 		uint32_t timeout_us,
1358 		uint32_t num_free_required)
1359 {
1360 	enum dmub_status status;
1361 	uint32_t i;
1362 	const uint32_t polling_interval_us = 1;
1363 
1364 	if (!dmub->hw_init)
1365 		return DMUB_STATUS_INVALID;
1366 
1367 	for (i = 0; i < timeout_us; i += polling_interval_us) {
1368 		status = dmub_srv_update_inbox_status(dmub);
1369 
1370 		if (status != DMUB_STATUS_OK)
1371 			return status;
1372 
1373 		/* check for space in inbox1 */
1374 		if (dmub_rb_num_free(&dmub->inbox1.rb) >= num_free_required)
1375 			return DMUB_STATUS_OK;
1376 
1377 		udelay(polling_interval_us);
1378 	}
1379 
1380 	return DMUB_STATUS_TIMEOUT;
1381 }
1382 
1383 enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub)
1384 {
1385 	uint32_t rptr;
1386 
1387 	if (!dmub->hw_init)
1388 		return DMUB_STATUS_INVALID;
1389 
1390 	if (dmub->power_state != DMUB_POWER_STATE_D0)
1391 		return DMUB_STATUS_POWER_STATE_D3;
1392 
1393 	/* update inbox1 state */
1394 	rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
1395 
1396 	if (rptr > dmub->inbox1.rb.capacity)
1397 		return DMUB_STATUS_HW_FAILURE;
1398 
1399 	if (dmub->inbox1.rb.rptr > rptr) {
1400 		/* rb wrapped */
1401 		dmub->inbox1.num_reported += (rptr + dmub->inbox1.rb.capacity - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
1402 	} else {
1403 		dmub->inbox1.num_reported += (rptr - dmub->inbox1.rb.rptr) / DMUB_RB_CMD_SIZE;
1404 	}
1405 	dmub->inbox1.rb.rptr = rptr;
1406 
1407 	/* update reg_inbox0 */
1408 	dmub_srv_update_reg_inbox0_status(dmub);
1409 
1410 	return DMUB_STATUS_OK;
1411 }
1412 
1413 bool dmub_srv_get_preos_info(struct dmub_srv *dmub)
1414 {
1415 	if (!dmub || !dmub->hw_funcs.get_preos_fw_info)
1416 		return false;
1417 
1418 	return dmub->hw_funcs.get_preos_fw_info(dmub);
1419 }
1420