xref: /linux/drivers/gpu/drm/amd/display/dc/dc_helper.c (revision aec2f682d47c54ef434b2d440992626d80b1ebdc)
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 /*
24  * dc_helper.c
25  *
26  *  Created on: Aug 30, 2016
27  *      Author: agrodzov
28  */
29 
30 #include <linux/delay.h>
31 #include <linux/stdarg.h>
32 
33 #include "dm_services.h"
34 
35 #include "dc.h"
36 #include "dc_dmub_srv.h"
37 #include "reg_helper.h"
38 
39 #define DC_LOGGER \
40 	ctx->logger
41 
42 static inline void submit_dmub_read_modify_write(
43 	struct dc_reg_helper_state *offload,
44 	const struct dc_context *ctx)
45 {
46 	struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write;
47 
48 	offload->should_burst_write =
49 			(offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1));
50 	cmd_buf->header.payload_bytes =
51 			sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count;
52 
53 	dc_wake_and_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
54 
55 	memset(cmd_buf, 0, sizeof(*cmd_buf));
56 
57 	offload->reg_seq_count = 0;
58 	offload->same_addr_count = 0;
59 }
60 
61 static inline void submit_dmub_burst_write(
62 	struct dc_reg_helper_state *offload,
63 	const struct dc_context *ctx)
64 {
65 	struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write;
66 
67 	cmd_buf->header.payload_bytes =
68 			sizeof(uint32_t) * offload->reg_seq_count;
69 
70 	dc_wake_and_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
71 
72 	memset(cmd_buf, 0, sizeof(*cmd_buf));
73 
74 	offload->reg_seq_count = 0;
75 }
76 
77 static inline void submit_dmub_reg_wait(
78 		struct dc_reg_helper_state *offload,
79 		const struct dc_context *ctx)
80 {
81 	struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
82 
83 	dc_wake_and_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
84 
85 	memset(cmd_buf, 0, sizeof(*cmd_buf));
86 	offload->reg_seq_count = 0;
87 }
88 
89 struct dc_reg_value_masks {
90 	uint32_t value;
91 	uint32_t mask;
92 };
93 
94 static inline void set_reg_field_value_masks(
95 	struct dc_reg_value_masks *field_value_mask,
96 	uint32_t value,
97 	uint32_t mask,
98 	uint8_t shift)
99 {
100 	ASSERT(mask != 0);
101 
102 	field_value_mask->value = (field_value_mask->value & ~mask) | (mask & (value << shift));
103 	field_value_mask->mask = field_value_mask->mask | mask;
104 }
105 
106 static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask,
107 		uint32_t addr, int n,
108 		uint8_t shift1, uint32_t mask1, uint32_t field_value1,
109 		va_list ap)
110 {
111 	(void)addr;
112 	uint32_t shift, mask, field_value;
113 	int i = 1;
114 
115 	/* gather all bits value/mask getting updated in this register */
116 	set_reg_field_value_masks(field_value_mask,
117 			field_value1, mask1, shift1);
118 
119 	while (i < n) {
120 		shift = va_arg(ap, uint32_t);
121 		mask = va_arg(ap, uint32_t);
122 		field_value = va_arg(ap, uint32_t);
123 
124 		set_reg_field_value_masks(field_value_mask,
125 				field_value, mask, shift);
126 		i++;
127 	}
128 }
129 
130 static void dmub_flush_buffer_execute(
131 		struct dc_reg_helper_state *offload,
132 		const struct dc_context *ctx)
133 {
134 	submit_dmub_read_modify_write(offload, ctx);
135 }
136 
137 static void dmub_flush_burst_write_buffer_execute(
138 		struct dc_reg_helper_state *offload,
139 		const struct dc_context *ctx)
140 {
141 	submit_dmub_burst_write(offload, ctx);
142 }
143 
144 static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr,
145 		uint32_t reg_val)
146 {
147 	struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
148 	struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write;
149 
150 	/* flush command if buffer is full */
151 	if (offload->reg_seq_count == DMUB_BURST_WRITE_VALUES__MAX)
152 		dmub_flush_burst_write_buffer_execute(offload, ctx);
153 
154 	if (offload->cmd_data.cmd_common.header.type == DMUB_CMD__REG_SEQ_BURST_WRITE &&
155 			addr != cmd_buf->addr) {
156 		dmub_flush_burst_write_buffer_execute(offload, ctx);
157 		return false;
158 	}
159 
160 	cmd_buf->header.type = DMUB_CMD__REG_SEQ_BURST_WRITE;
161 	cmd_buf->header.sub_type = 0;
162 	cmd_buf->addr = addr;
163 	cmd_buf->write_values[offload->reg_seq_count] = reg_val;
164 	offload->reg_seq_count++;
165 
166 	return true;
167 }
168 
169 static uint32_t dmub_reg_value_pack(const struct dc_context *ctx, uint32_t addr,
170 		struct dc_reg_value_masks *field_value_mask)
171 {
172 	struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
173 	struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write;
174 	struct dmub_cmd_read_modify_write_sequence *seq;
175 
176 	/* flush command if buffer is full */
177 	if (offload->cmd_data.cmd_common.header.type != DMUB_CMD__REG_SEQ_BURST_WRITE &&
178 			offload->reg_seq_count == DMUB_READ_MODIFY_WRITE_SEQ__MAX)
179 		dmub_flush_buffer_execute(offload, ctx);
180 
181 	if (offload->should_burst_write) {
182 		if (dmub_reg_value_burst_set_pack(ctx, addr, field_value_mask->value))
183 			return field_value_mask->value;
184 		else
185 			offload->should_burst_write = false;
186 	}
187 
188 	/* pack commands */
189 	cmd_buf->header.type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE;
190 	cmd_buf->header.sub_type = 0;
191 	seq = &cmd_buf->seq[offload->reg_seq_count];
192 
193 	if (offload->reg_seq_count) {
194 		if (cmd_buf->seq[offload->reg_seq_count - 1].addr == addr)
195 			offload->same_addr_count++;
196 		else
197 			offload->same_addr_count = 0;
198 	}
199 
200 	seq->addr = addr;
201 	seq->modify_mask = field_value_mask->mask;
202 	seq->modify_value = field_value_mask->value;
203 	offload->reg_seq_count++;
204 
205 	return field_value_mask->value;
206 }
207 
208 static void dmub_reg_wait_done_pack(const struct dc_context *ctx, uint32_t addr,
209 		uint32_t mask, uint32_t shift, uint32_t condition_value, uint32_t time_out_us)
210 {
211 	struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
212 	struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
213 
214 	cmd_buf->header.type = DMUB_CMD__REG_REG_WAIT;
215 	cmd_buf->header.sub_type = 0;
216 	cmd_buf->reg_wait.addr = addr;
217 	cmd_buf->reg_wait.condition_field_value = mask & (condition_value << shift);
218 	cmd_buf->reg_wait.mask = mask;
219 	cmd_buf->reg_wait.time_out_us = time_out_us;
220 }
221 
222 uint32_t generic_reg_update_ex(const struct dc_context *ctx,
223 		uint32_t addr, int n,
224 		uint8_t shift1, uint32_t mask1, uint32_t field_value1,
225 		...)
226 {
227 	struct dc_reg_value_masks field_value_mask = {0};
228 	uint32_t reg_val;
229 	va_list ap;
230 
231 	va_start(ap, field_value1);
232 
233 	set_reg_field_values(&field_value_mask, addr, n, shift1, mask1,
234 			field_value1, ap);
235 
236 	va_end(ap);
237 
238 	if (ctx->dmub_srv &&
239 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress)
240 		return dmub_reg_value_pack(ctx, addr, &field_value_mask);
241 		/* todo: return void so we can decouple code running in driver from register states */
242 
243 	/* mmio write directly */
244 	reg_val = dm_read_reg(ctx, addr);
245 	reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
246 	dm_write_reg(ctx, addr, reg_val);
247 	return reg_val;
248 }
249 
250 uint32_t generic_reg_set_ex(const struct dc_context *ctx,
251 		uint32_t addr, uint32_t reg_val, int n,
252 		uint8_t shift1, uint32_t mask1, uint32_t field_value1,
253 		...)
254 {
255 	struct dc_reg_value_masks field_value_mask = {0};
256 	va_list ap;
257 
258 	va_start(ap, field_value1);
259 
260 	set_reg_field_values(&field_value_mask, addr, n, shift1, mask1,
261 			field_value1, ap);
262 
263 	va_end(ap);
264 
265 	/* mmio write directly */
266 	reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
267 
268 	if (ctx->dmub_srv &&
269 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
270 		return dmub_reg_value_burst_set_pack(ctx, addr, reg_val);
271 		/* todo: return void so we can decouple code running in driver from register states */
272 	}
273 
274 	dm_write_reg(ctx, addr, reg_val);
275 	return reg_val;
276 }
277 
278 uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr,
279 		uint8_t shift, uint32_t mask, uint32_t *field_value)
280 {
281 	uint32_t reg_val = dm_read_reg(ctx, addr);
282 	*field_value = get_reg_field_value_ex(reg_val, mask, shift);
283 	return reg_val;
284 }
285 
286 uint32_t generic_reg_get2(const struct dc_context *ctx, uint32_t addr,
287 		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
288 		uint8_t shift2, uint32_t mask2, uint32_t *field_value2)
289 {
290 	uint32_t reg_val = dm_read_reg(ctx, addr);
291 	*field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
292 	*field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
293 	return reg_val;
294 }
295 
296 uint32_t generic_reg_get3(const struct dc_context *ctx, uint32_t addr,
297 		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
298 		uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
299 		uint8_t shift3, uint32_t mask3, uint32_t *field_value3)
300 {
301 	uint32_t reg_val = dm_read_reg(ctx, addr);
302 	*field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
303 	*field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
304 	*field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
305 	return reg_val;
306 }
307 
308 uint32_t generic_reg_get4(const struct dc_context *ctx, uint32_t addr,
309 		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
310 		uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
311 		uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
312 		uint8_t shift4, uint32_t mask4, uint32_t *field_value4)
313 {
314 	uint32_t reg_val = dm_read_reg(ctx, addr);
315 	*field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
316 	*field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
317 	*field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
318 	*field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
319 	return reg_val;
320 }
321 
322 uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
323 		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
324 		uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
325 		uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
326 		uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
327 		uint8_t shift5, uint32_t mask5, uint32_t *field_value5)
328 {
329 	uint32_t reg_val = dm_read_reg(ctx, addr);
330 	*field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
331 	*field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
332 	*field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
333 	*field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
334 	*field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
335 	return reg_val;
336 }
337 
338 uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr,
339 		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
340 		uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
341 		uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
342 		uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
343 		uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
344 		uint8_t shift6, uint32_t mask6, uint32_t *field_value6)
345 {
346 	uint32_t reg_val = dm_read_reg(ctx, addr);
347 	*field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
348 	*field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
349 	*field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
350 	*field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
351 	*field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
352 	*field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
353 	return reg_val;
354 }
355 
356 uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr,
357 		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
358 		uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
359 		uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
360 		uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
361 		uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
362 		uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
363 		uint8_t shift7, uint32_t mask7, uint32_t *field_value7)
364 {
365 	uint32_t reg_val = dm_read_reg(ctx, addr);
366 	*field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
367 	*field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
368 	*field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
369 	*field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
370 	*field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
371 	*field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
372 	*field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7);
373 	return reg_val;
374 }
375 
376 uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
377 		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
378 		uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
379 		uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
380 		uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
381 		uint8_t shift5, uint32_t mask5, uint32_t *field_value5,
382 		uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
383 		uint8_t shift7, uint32_t mask7, uint32_t *field_value7,
384 		uint8_t shift8, uint32_t mask8, uint32_t *field_value8)
385 {
386 	uint32_t reg_val = dm_read_reg(ctx, addr);
387 	*field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
388 	*field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
389 	*field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
390 	*field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
391 	*field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
392 	*field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6);
393 	*field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7);
394 	*field_value8 = get_reg_field_value_ex(reg_val, mask8, shift8);
395 	return reg_val;
396 }
397 /* note:  va version of this is pretty bad idea, since there is a output parameter pass by pointer
398  * compiler won't be able to check for size match and is prone to stack corruption type of bugs
399 
400 uint32_t generic_reg_get(const struct dc_context *ctx,
401 		uint32_t addr, int n, ...)
402 {
403 	uint32_t shift, mask;
404 	uint32_t *field_value;
405 	uint32_t reg_val;
406 	int i = 0;
407 
408 	reg_val = dm_read_reg(ctx, addr);
409 
410 	va_list ap;
411 	va_start(ap, n);
412 
413 	while (i < n) {
414 		shift = va_arg(ap, uint32_t);
415 		mask = va_arg(ap, uint32_t);
416 		field_value = va_arg(ap, uint32_t *);
417 
418 		*field_value = get_reg_field_value_ex(reg_val, mask, shift);
419 		i++;
420 	}
421 
422 	va_end(ap);
423 
424 	return reg_val;
425 }
426 */
427 
428 void generic_reg_wait(const struct dc_context *ctx,
429 	uint32_t addr, uint32_t shift, uint32_t mask, uint32_t condition_value,
430 	unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
431 	const char *func_name, int line)
432 {
433 	uint32_t field_value;
434 	uint32_t reg_val;
435 	int i;
436 
437 	if (ctx->dmub_srv &&
438 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
439 		dmub_reg_wait_done_pack(ctx, addr, mask, shift, condition_value,
440 				delay_between_poll_us * time_out_num_tries);
441 		return;
442 	}
443 
444 	/*
445 	 * Something is terribly wrong if time out is > 3000ms.
446 	 * 3000ms is the maximum time needed for SMU to pass values back.
447 	 * This value comes from experiments.
448 	 *
449 	 */
450 	ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000);
451 
452 	for (i = 0; i <= time_out_num_tries; i++) {
453 		if (i) {
454 			if (delay_between_poll_us >= 1000)
455 				msleep(delay_between_poll_us/1000);
456 			else if (delay_between_poll_us > 0)
457 				udelay(delay_between_poll_us);
458 		}
459 
460 		reg_val = dm_read_reg(ctx, addr);
461 
462 		field_value = get_reg_field_value_ex(reg_val, mask, shift);
463 
464 		if (field_value == condition_value) {
465 			if (i * delay_between_poll_us > 1000)
466 				DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n",
467 						delay_between_poll_us * i / 1000,
468 						func_name, line);
469 			return;
470 		}
471 	}
472 
473 	DC_LOG_WARNING("REG_WAIT timeout %dus * %d tries - %s line:%d\n",
474 			delay_between_poll_us, time_out_num_tries,
475 			func_name, line);
476 
477 	BREAK_TO_DEBUGGER();
478 }
479 
480 void generic_write_indirect_reg(const struct dc_context *ctx,
481 		uint32_t addr_index, uint32_t addr_data,
482 		uint32_t index, uint32_t data)
483 {
484 	dm_write_reg(ctx, addr_index, index);
485 	dm_write_reg(ctx, addr_data, data);
486 }
487 
488 uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
489 		uint32_t addr_index, uint32_t addr_data,
490 		uint32_t index)
491 {
492 	uint32_t value = 0;
493 
494 	// when reg read, there should not be any offload.
495 	if (ctx->dmub_srv &&
496 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
497 		ASSERT(false);
498 	}
499 
500 	dm_write_reg(ctx, addr_index, index);
501 	value = dm_read_reg(ctx, addr_data);
502 
503 	return value;
504 }
505 
506 uint32_t generic_indirect_reg_get(const struct dc_context *ctx,
507 		uint32_t addr_index, uint32_t addr_data,
508 		uint32_t index, int n,
509 		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
510 		...)
511 {
512 	uint32_t shift, mask, *field_value;
513 	uint32_t value = 0;
514 	int i = 1;
515 
516 	va_list ap;
517 
518 	va_start(ap, field_value1);
519 
520 	value = generic_read_indirect_reg(ctx, addr_index, addr_data, index);
521 	*field_value1 = get_reg_field_value_ex(value, mask1, shift1);
522 
523 	while (i < n) {
524 		shift = va_arg(ap, uint32_t);
525 		mask = va_arg(ap, uint32_t);
526 		field_value = va_arg(ap, uint32_t *);
527 
528 		*field_value = get_reg_field_value_ex(value, mask, shift);
529 		i++;
530 	}
531 
532 	va_end(ap);
533 
534 	return value;
535 }
536 
537 uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
538 		uint32_t addr_index, uint32_t addr_data,
539 		uint32_t index, uint32_t reg_val, int n,
540 		uint8_t shift1, uint32_t mask1, uint32_t field_value1,
541 		...)
542 {
543 	uint32_t shift, mask, field_value;
544 	int i = 1;
545 
546 	va_list ap;
547 
548 	va_start(ap, field_value1);
549 
550 	reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1);
551 
552 	while (i < n) {
553 		shift = va_arg(ap, uint32_t);
554 		mask = va_arg(ap, uint32_t);
555 		field_value = va_arg(ap, uint32_t);
556 
557 		reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift);
558 		i++;
559 	}
560 
561 	generic_write_indirect_reg(ctx, addr_index, addr_data, index, reg_val);
562 	va_end(ap);
563 
564 	return reg_val;
565 }
566 
567 
568 uint32_t generic_indirect_reg_update_ex_sync(const struct dc_context *ctx,
569 		uint32_t index, uint32_t reg_val, int n,
570 		uint8_t shift1, uint32_t mask1, uint32_t field_value1,
571 		...)
572 {
573 	uint32_t shift, mask, field_value;
574 	int i = 1;
575 
576 	va_list ap;
577 
578 	va_start(ap, field_value1);
579 
580 	reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1);
581 
582 	while (i < n) {
583 		shift = va_arg(ap, uint32_t);
584 		mask = va_arg(ap, uint32_t);
585 		field_value = va_arg(ap, uint32_t);
586 
587 		reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift);
588 		i++;
589 	}
590 
591 	dm_write_index_reg(ctx, CGS_IND_REG__PCIE, index, reg_val);
592 	va_end(ap);
593 
594 	return reg_val;
595 }
596 
597 uint32_t generic_indirect_reg_get_sync(const struct dc_context *ctx,
598 		uint32_t index, int n,
599 		uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
600 		...)
601 {
602 	uint32_t shift, mask, *field_value;
603 	uint32_t value = 0;
604 	int i = 1;
605 
606 	va_list ap;
607 
608 	va_start(ap, field_value1);
609 
610 	value = dm_read_index_reg(ctx, CGS_IND_REG__PCIE, index);
611 	*field_value1 = get_reg_field_value_ex(value, mask1, shift1);
612 
613 	while (i < n) {
614 		shift = va_arg(ap, uint32_t);
615 		mask = va_arg(ap, uint32_t);
616 		field_value = va_arg(ap, uint32_t *);
617 
618 		*field_value = get_reg_field_value_ex(value, mask, shift);
619 		i++;
620 	}
621 
622 	va_end(ap);
623 
624 	return value;
625 }
626 
627 void reg_sequence_start_gather(const struct dc_context *ctx)
628 {
629 	/* if reg sequence is supported and enabled, set flag to
630 	 * indicate we want to have REG_SET, REG_UPDATE macro build
631 	 * reg sequence command buffer rather than MMIO directly.
632 	 */
633 
634 	if (ctx->dmub_srv && ctx->dc->debug.dmub_offload_enabled) {
635 		struct dc_reg_helper_state *offload =
636 			&ctx->dmub_srv->reg_helper_offload;
637 
638 		/* caller sequence mismatch.  need to debug caller.  offload will not work!!! */
639 		ASSERT(!offload->gather_in_progress);
640 
641 		offload->gather_in_progress = true;
642 	}
643 }
644 
645 void reg_sequence_start_execute(const struct dc_context *ctx)
646 {
647 	struct dc_reg_helper_state *offload;
648 
649 	if (!ctx->dmub_srv)
650 		return;
651 
652 	offload = &ctx->dmub_srv->reg_helper_offload;
653 
654 	if (offload && offload->gather_in_progress) {
655 		offload->gather_in_progress = false;
656 		offload->should_burst_write = false;
657 		switch (offload->cmd_data.cmd_common.header.type) {
658 		case DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE:
659 			submit_dmub_read_modify_write(offload, ctx);
660 			break;
661 		case DMUB_CMD__REG_REG_WAIT:
662 			submit_dmub_reg_wait(offload, ctx);
663 			break;
664 		case DMUB_CMD__REG_SEQ_BURST_WRITE:
665 			submit_dmub_burst_write(offload, ctx);
666 			break;
667 		default:
668 			return;
669 		}
670 	}
671 }
672 
673 void reg_sequence_wait_done(const struct dc_context *ctx)
674 {
675 	/* callback to DM to poll for last submission done*/
676 	struct dc_reg_helper_state *offload;
677 
678 	if (!ctx->dmub_srv)
679 		return;
680 
681 	offload = &ctx->dmub_srv->reg_helper_offload;
682 
683 	if (offload &&
684 	    ctx->dc->debug.dmub_offload_enabled &&
685 	    !ctx->dc->debug.dmcub_emulation) {
686 		dc_dmub_srv_wait_for_idle(ctx->dmub_srv, DM_DMUB_WAIT_TYPE_WAIT, NULL);
687 	}
688 }
689 
690 char *dce_version_to_string(const int version)
691 {
692 	switch (version) {
693 	case DCE_VERSION_6_0:
694 		return "DCE 6.0";
695 	case DCE_VERSION_6_1:
696 		return "DCE 6.1";
697 	case DCE_VERSION_6_4:
698 		return "DCE 6.4";
699 	case DCE_VERSION_8_0:
700 		return "DCE 8.0";
701 	case DCE_VERSION_8_1:
702 		return "DCE 8.1";
703 	case DCE_VERSION_8_3:
704 		return "DCE 8.3";
705 	case DCE_VERSION_10_0:
706 		return "DCE 10.0";
707 	case DCE_VERSION_11_0:
708 		return "DCE 11.0";
709 	case DCE_VERSION_11_2:
710 		return "DCE 11.2";
711 	case DCE_VERSION_11_22:
712 		return "DCE 11.22";
713 	case DCE_VERSION_12_0:
714 		return "DCE 12.0";
715 	case DCE_VERSION_12_1:
716 		return "DCE 12.1";
717 	case DCN_VERSION_1_0:
718 		return "DCN 1.0";
719 	case DCN_VERSION_1_01:
720 		return "DCN 1.0.1";
721 	case DCN_VERSION_2_0:
722 		return "DCN 2.0";
723 	case DCN_VERSION_2_1:
724 		return "DCN 2.1";
725 	case DCN_VERSION_2_01:
726 		return "DCN 2.0.1";
727 	case DCN_VERSION_3_0:
728 		return "DCN 3.0";
729 	case DCN_VERSION_3_01:
730 		return "DCN 3.0.1";
731 	case DCN_VERSION_3_02:
732 		return "DCN 3.0.2";
733 	case DCN_VERSION_3_03:
734 		return "DCN 3.0.3";
735 	case DCN_VERSION_3_1:
736 		return "DCN 3.1.2";
737 	case DCN_VERSION_3_14:
738 		return "DCN 3.1.4";
739 	case DCN_VERSION_3_15:
740 		return "DCN 3.1.5";
741 	case DCN_VERSION_3_16:
742 		return "DCN 3.1.6";
743 	case DCN_VERSION_3_2:
744 		return "DCN 3.2";
745 	case DCN_VERSION_3_21:
746 		return "DCN 3.2.1";
747 	case DCN_VERSION_3_5:
748 		return "DCN 3.5";
749 	case DCN_VERSION_3_51:
750 		return "DCN 3.5.1";
751 	case DCN_VERSION_3_6:
752 		return "DCN 3.6";
753 	case DCN_VERSION_4_01:
754 		return "DCN 4.0.1";
755 	case DCN_VERSION_4_2:
756 		return "DCN 4.2";
757 	default:
758 		return "Unknown";
759 	}
760 }
761 
762 bool dc_supports_vrr(const enum dce_version v)
763 {
764 	return v >= DCE_VERSION_8_0;
765 }
766