xref: /linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c (revision 92c4c9fdc838d3b41a996bb700ea64b9e78fc7ea)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2015 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: AMD
24  *
25  */
26 
27 #include <acpi/video.h>
28 
29 #include <linux/string.h>
30 #include <linux/acpi.h>
31 #include <linux/i2c.h>
32 
33 #include <drm/drm_atomic.h>
34 #include <drm/drm_probe_helper.h>
35 #include <drm/amdgpu_drm.h>
36 #include <drm/drm_edid.h>
37 #include <drm/drm_fixed.h>
38 
39 #include "dm_services.h"
40 #include "amdgpu.h"
41 #include "dc.h"
42 #include "amdgpu_dm.h"
43 #include "amdgpu_dm_irq.h"
44 #include "amdgpu_dm_mst_types.h"
45 #include "dpcd_defs.h"
46 #include "dc/inc/core_types.h"
47 
48 #include "dm_helpers.h"
49 #include "ddc_service_types.h"
50 #include "clk_mgr.h"
51 
52 #define MCCS_DEST_ADDR (0x6E >> 1)
53 #define MCCS_SRC_ADDR	0x51
54 #define MCCS_LENGTH_OFFSET 0x80
55 #define MCCS_MAX_DATA_SIZE 0x20
56 
57 enum mccs_op_code {
58 	MCCS_OP_CODE_VCP_REQUEST = 0x01,
59 	MCCS_OP_CODE_VCP_REPLY = 0x02,
60 	MCCS_OP_CODE_VCP_SET = 0x03,
61 	MCCS_OP_CODE_VCP_RESET = 0x09,
62 	MCCS_OP_CODE_CAP_REQUEST = 0xF3,
63 	MCCS_OP_CODE_CAP_REPLY = 0xE3
64 };
65 
66 enum mccs_op_buff_size {
67 	MCCS_OP_BUFF_SIZE__WR_VCP_REQUEST = 5,
68 	MCCS_OP_BUFF_SIZE_RD_VCP_REQUEST = 11,
69 	MCCS_OP_BUFF_SIZE_WR_VCP_SET = 7,
70 };
71 
72 enum vcp_reply_mask {
73 	FREESYNC_SUPPORTED = 0x1
74 };
75 
76 union vcp_reply {
77 	struct {
78 		unsigned char src_addr;
79 		unsigned char length;			/* Length is offset by MccsLengthOffs = 0x80 */
80 		unsigned char reply_op_code;	/* Should return MCCS_OP_CODE_VCP_REPLY = 0x02 */
81 		unsigned char result_code;		/* 00h No Error, 01h Unsupported VCP Code */
82 		unsigned char request_code;		/* Should return mccs vcp code sent in the vcp request */
83 		unsigned char type_code;		/* VCP type code: 00h Set parameter, 01h Momentary */
84 		unsigned char max_value[2];		/* 2 bytes returning max value current value */
85 		unsigned char present_value[2];	/* NOTE: Byte0 is MSB, Byte1 is LSB */
86 		unsigned char check_sum;
87 	} bytes;
88 	unsigned char raw[11];
89 };
90 
edid_extract_panel_id(struct edid * edid)91 static u32 edid_extract_panel_id(struct edid *edid)
92 {
93 	return (u32)edid->mfg_id[0] << 24   |
94 	       (u32)edid->mfg_id[1] << 16   |
95 	       (u32)EDID_PRODUCT_ID(edid);
96 }
97 
apply_edid_quirks(struct drm_device * dev,struct edid * edid,struct dc_edid_caps * edid_caps)98 static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct dc_edid_caps *edid_caps)
99 {
100 	uint32_t panel_id = edid_extract_panel_id(edid);
101 
102 	switch (panel_id) {
103 	/* Workaround for monitors that need a delay after detecting the link */
104 	case drm_edid_encode_panel_id('G', 'B', 'T', 0x3215):
105 		drm_dbg_driver(dev, "Add 10s delay for link detection for panel id %X\n", panel_id);
106 		edid_caps->panel_patch.wait_after_dpcd_poweroff_ms = 10000;
107 		break;
108 	/* Workaround for some monitors which does not work well with FAMS */
109 	case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E):
110 	case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053):
111 	case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC):
112 		drm_dbg_driver(dev, "Disabling FAMS on monitor with panel id %X\n", panel_id);
113 		edid_caps->panel_patch.disable_fams = true;
114 		break;
115 	/* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */
116 	case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB):
117 	case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B):
118 	case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A):
119 	case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1):
120 	case drm_edid_encode_panel_id('M', 'S', 'F', 0x1003):
121 		drm_dbg_driver(dev, "Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
122 		edid_caps->panel_patch.remove_sink_ext_caps = true;
123 		break;
124 	case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154):
125 	case drm_edid_encode_panel_id('S', 'D', 'C', 0x4171):
126 		drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id);
127 		edid_caps->panel_patch.disable_colorimetry = true;
128 		break;
129 	default:
130 		return;
131 	}
132 }
133 
134 /**
135  * dm_helpers_parse_edid_caps() - Parse edid caps
136  *
137  * @link: current detected link
138  * @edid:	[in] pointer to edid
139  * @edid_caps:	[in] pointer to edid caps
140  *
141  * Return: void
142  */
dm_helpers_parse_edid_caps(struct dc_link * link,const struct dc_edid * edid,struct dc_edid_caps * edid_caps)143 enum dc_edid_status dm_helpers_parse_edid_caps(
144 		struct dc_link *link,
145 		const struct dc_edid *edid,
146 		struct dc_edid_caps *edid_caps)
147 {
148 	struct amdgpu_dm_connector *aconnector = link->priv;
149 	struct drm_connector *connector = &aconnector->base;
150 	struct drm_device *dev = connector->dev;
151 	struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL;
152 	struct cea_sad *sads;
153 	int sad_count = -1;
154 	int sadb_count = -1;
155 	int i = 0;
156 	uint8_t *sadb = NULL;
157 
158 	enum dc_edid_status result = EDID_OK;
159 
160 	if (!edid_caps || !edid)
161 		return EDID_BAD_INPUT;
162 
163 	if (!drm_edid_is_valid(edid_buf))
164 		result = EDID_BAD_CHECKSUM;
165 
166 	edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] |
167 					((uint16_t) edid_buf->mfg_id[1])<<8;
168 	edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] |
169 					((uint16_t) edid_buf->prod_code[1])<<8;
170 	edid_caps->serial_number = edid_buf->serial;
171 	edid_caps->manufacture_week = edid_buf->mfg_week;
172 	edid_caps->manufacture_year = edid_buf->mfg_year;
173 	edid_caps->analog = !(edid_buf->input & DRM_EDID_INPUT_DIGITAL);
174 
175 	drm_edid_get_monitor_name(edid_buf,
176 				  edid_caps->display_name,
177 				  AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
178 
179 	edid_caps->edid_hdmi = connector->display_info.is_hdmi;
180 
181 	if (edid_caps->edid_hdmi)
182 		populate_hdmi_info_from_connector(&connector->display_info.hdmi, edid_caps);
183 
184 	apply_edid_quirks(dev, edid_buf, edid_caps);
185 
186 	sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
187 	if (sad_count <= 0)
188 		return result;
189 
190 	edid_caps->audio_mode_count = min(sad_count, DC_MAX_AUDIO_DESC_COUNT);
191 	for (i = 0; i < edid_caps->audio_mode_count; ++i) {
192 		struct cea_sad *sad = &sads[i];
193 
194 		edid_caps->audio_modes[i].format_code = sad->format;
195 		edid_caps->audio_modes[i].channel_count = sad->channels + 1;
196 		edid_caps->audio_modes[i].sample_rate = sad->freq;
197 		edid_caps->audio_modes[i].sample_size = sad->byte2;
198 	}
199 
200 	sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb);
201 
202 	if (sadb_count < 0) {
203 		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count);
204 		sadb_count = 0;
205 	}
206 
207 	if (sadb_count)
208 		edid_caps->speaker_flags = sadb[0];
209 	else
210 		edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
211 
212 	kfree(sads);
213 	kfree(sadb);
214 
215 	return result;
216 }
217 
218 static void
fill_dc_mst_payload_table_from_drm(struct dc_link * link,bool enable,struct drm_dp_mst_atomic_payload * target_payload,struct dc_dp_mst_stream_allocation_table * table)219 fill_dc_mst_payload_table_from_drm(struct dc_link *link,
220 				   bool enable,
221 				   struct drm_dp_mst_atomic_payload *target_payload,
222 				   struct dc_dp_mst_stream_allocation_table *table)
223 {
224 	struct dc_dp_mst_stream_allocation_table new_table = { 0 };
225 	struct dc_dp_mst_stream_allocation *sa;
226 	struct link_mst_stream_allocation_table copy_of_link_table =
227 										link->mst_stream_alloc_table;
228 
229 	int i;
230 	int current_hw_table_stream_cnt = copy_of_link_table.stream_count;
231 	struct link_mst_stream_allocation *dc_alloc;
232 
233 	/* TODO: refactor to set link->mst_stream_alloc_table directly if possible.*/
234 	if (enable) {
235 		dc_alloc =
236 		&copy_of_link_table.stream_allocations[current_hw_table_stream_cnt];
237 		dc_alloc->vcp_id = target_payload->vcpi;
238 		dc_alloc->slot_count = target_payload->time_slots;
239 	} else {
240 		for (i = 0; i < copy_of_link_table.stream_count; i++) {
241 			dc_alloc =
242 			&copy_of_link_table.stream_allocations[i];
243 
244 			if (dc_alloc->vcp_id == target_payload->vcpi) {
245 				dc_alloc->vcp_id = 0;
246 				dc_alloc->slot_count = 0;
247 				break;
248 			}
249 		}
250 		ASSERT(i != copy_of_link_table.stream_count);
251 	}
252 
253 	/* Fill payload info*/
254 	for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
255 		dc_alloc =
256 			&copy_of_link_table.stream_allocations[i];
257 		if (dc_alloc->vcp_id > 0 && dc_alloc->slot_count > 0) {
258 			sa = &new_table.stream_allocations[new_table.stream_count];
259 			sa->slot_count = dc_alloc->slot_count;
260 			sa->vcp_id = dc_alloc->vcp_id;
261 			new_table.stream_count++;
262 		}
263 	}
264 
265 	/* Overwrite the old table */
266 	*table = new_table;
267 }
268 
dm_helpers_dp_update_branch_info(struct dc_context * ctx,const struct dc_link * link)269 void dm_helpers_dp_update_branch_info(
270 	struct dc_context *ctx,
271 	const struct dc_link *link)
272 {}
273 
dm_helpers_construct_old_payload(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_topology_state * mst_state,struct drm_dp_mst_atomic_payload * new_payload,struct drm_dp_mst_atomic_payload * old_payload)274 static void dm_helpers_construct_old_payload(
275 			struct drm_dp_mst_topology_mgr *mgr,
276 			struct drm_dp_mst_topology_state *mst_state,
277 			struct drm_dp_mst_atomic_payload *new_payload,
278 			struct drm_dp_mst_atomic_payload *old_payload)
279 {
280 	struct drm_dp_mst_atomic_payload *pos;
281 	int pbn_per_slot = dfixed_trunc(mst_state->pbn_div);
282 	u8 next_payload_vc_start = mgr->next_start_slot;
283 	u8 payload_vc_start = new_payload->vc_start_slot;
284 	u8 allocated_time_slots;
285 
286 	*old_payload = *new_payload;
287 
288 	/* Set correct time_slots/PBN of old payload.
289 	 * other fields (delete & dsc_enabled) in
290 	 * struct drm_dp_mst_atomic_payload are don't care fields
291 	 * while calling drm_dp_remove_payload_part2()
292 	 */
293 	list_for_each_entry(pos, &mst_state->payloads, next) {
294 		if (pos != new_payload &&
295 		    pos->vc_start_slot > payload_vc_start &&
296 		    pos->vc_start_slot < next_payload_vc_start)
297 			next_payload_vc_start = pos->vc_start_slot;
298 	}
299 
300 	allocated_time_slots = next_payload_vc_start - payload_vc_start;
301 
302 	old_payload->time_slots = allocated_time_slots;
303 	old_payload->pbn = allocated_time_slots * pbn_per_slot;
304 }
305 
306 /*
307  * Writes payload allocation table in immediate downstream device.
308  */
dm_helpers_dp_mst_write_payload_allocation_table(struct dc_context * ctx,const struct dc_stream_state * stream,struct dc_dp_mst_stream_allocation_table * proposed_table,bool enable)309 bool dm_helpers_dp_mst_write_payload_allocation_table(
310 		struct dc_context *ctx,
311 		const struct dc_stream_state *stream,
312 		struct dc_dp_mst_stream_allocation_table *proposed_table,
313 		bool enable)
314 {
315 	struct amdgpu_dm_connector *aconnector;
316 	struct drm_dp_mst_topology_state *mst_state;
317 	struct drm_dp_mst_atomic_payload *target_payload, *new_payload, old_payload;
318 	struct drm_dp_mst_topology_mgr *mst_mgr;
319 
320 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
321 	/* Accessing the connector state is required for vcpi_slots allocation
322 	 * and directly relies on behaviour in commit check
323 	 * that blocks before commit guaranteeing that the state
324 	 * is not gonna be swapped while still in use in commit tail
325 	 */
326 
327 	if (!aconnector || !aconnector->mst_root)
328 		return false;
329 
330 	mst_mgr = &aconnector->mst_root->mst_mgr;
331 	mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
332 	new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
333 
334 	if (enable) {
335 		target_payload = new_payload;
336 
337 		/* It's OK for this to fail */
338 		drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload);
339 	} else {
340 		/* construct old payload by VCPI*/
341 		dm_helpers_construct_old_payload(mst_mgr, mst_state,
342 						 new_payload, &old_payload);
343 		target_payload = &old_payload;
344 
345 		drm_dp_remove_payload_part1(mst_mgr, mst_state, new_payload);
346 	}
347 
348 	/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
349 	 * AUX message. The sequence is slot 1-63 allocated sequence for each
350 	 * stream. AMD ASIC stream slot allocation should follow the same
351 	 * sequence. copy DRM MST allocation to dc
352 	 */
353 	fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table);
354 
355 	return true;
356 }
357 
358 /*
359  * poll pending down reply
360  */
dm_helpers_dp_mst_poll_pending_down_reply(struct dc_context * ctx,const struct dc_link * link)361 void dm_helpers_dp_mst_poll_pending_down_reply(
362 	struct dc_context *ctx,
363 	const struct dc_link *link)
364 {}
365 
366 /*
367  * Clear payload allocation table before enable MST DP link.
368  */
dm_helpers_dp_mst_clear_payload_allocation_table(struct dc_context * ctx,const struct dc_link * link)369 void dm_helpers_dp_mst_clear_payload_allocation_table(
370 	struct dc_context *ctx,
371 	const struct dc_link *link)
372 {}
373 
374 /*
375  * Polls for ACT (allocation change trigger) handled and sends
376  * ALLOCATE_PAYLOAD message.
377  */
dm_helpers_dp_mst_poll_for_allocation_change_trigger(struct dc_context * ctx,const struct dc_stream_state * stream)378 enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
379 		struct dc_context *ctx,
380 		const struct dc_stream_state *stream)
381 {
382 	struct amdgpu_dm_connector *aconnector;
383 	struct drm_dp_mst_topology_mgr *mst_mgr;
384 	int ret;
385 
386 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
387 
388 	if (!aconnector || !aconnector->mst_root)
389 		return ACT_FAILED;
390 
391 	mst_mgr = &aconnector->mst_root->mst_mgr;
392 
393 	if (!mst_mgr->mst_state)
394 		return ACT_FAILED;
395 
396 	ret = drm_dp_check_act_status(mst_mgr);
397 
398 	if (ret)
399 		return ACT_FAILED;
400 
401 	return ACT_SUCCESS;
402 }
403 
dm_helpers_dp_mst_send_payload_allocation(struct dc_context * ctx,const struct dc_stream_state * stream)404 void dm_helpers_dp_mst_send_payload_allocation(
405 		struct dc_context *ctx,
406 		const struct dc_stream_state *stream)
407 {
408 	struct amdgpu_dm_connector *aconnector;
409 	struct drm_dp_mst_topology_state *mst_state;
410 	struct drm_dp_mst_topology_mgr *mst_mgr;
411 	struct drm_dp_mst_atomic_payload *new_payload;
412 	enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD;
413 	enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
414 	int ret = 0;
415 
416 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
417 
418 	if (!aconnector || !aconnector->mst_root)
419 		return;
420 
421 	mst_mgr = &aconnector->mst_root->mst_mgr;
422 	mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
423 	new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
424 
425 	ret = drm_dp_add_payload_part2(mst_mgr, new_payload);
426 
427 	if (ret) {
428 		amdgpu_dm_set_mst_status(&aconnector->mst_status,
429 			set_flag, false);
430 	} else {
431 		amdgpu_dm_set_mst_status(&aconnector->mst_status,
432 			set_flag, true);
433 		amdgpu_dm_set_mst_status(&aconnector->mst_status,
434 			clr_flag, false);
435 	}
436 }
437 
dm_helpers_dp_mst_update_mst_mgr_for_deallocation(struct dc_context * ctx,const struct dc_stream_state * stream)438 void dm_helpers_dp_mst_update_mst_mgr_for_deallocation(
439 		struct dc_context *ctx,
440 		const struct dc_stream_state *stream)
441 {
442 	struct amdgpu_dm_connector *aconnector;
443 	struct drm_dp_mst_topology_state *mst_state;
444 	struct drm_dp_mst_topology_mgr *mst_mgr;
445 	struct drm_dp_mst_atomic_payload *new_payload, old_payload;
446 	enum mst_progress_status set_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
447 	enum mst_progress_status clr_flag = MST_ALLOCATE_NEW_PAYLOAD;
448 
449 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
450 
451 	if (!aconnector || !aconnector->mst_root)
452 		return;
453 
454 	mst_mgr = &aconnector->mst_root->mst_mgr;
455 	mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
456 	new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
457 	dm_helpers_construct_old_payload(mst_mgr, mst_state,
458 					 new_payload, &old_payload);
459 
460 	drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload);
461 
462 	amdgpu_dm_set_mst_status(&aconnector->mst_status, set_flag, true);
463 	amdgpu_dm_set_mst_status(&aconnector->mst_status, clr_flag, false);
464  }
465 
dm_dtn_log_begin(struct dc_context * ctx,struct dc_log_buffer_ctx * log_ctx)466 void dm_dtn_log_begin(struct dc_context *ctx,
467 	struct dc_log_buffer_ctx *log_ctx)
468 {
469 	static const char msg[] = "[dtn begin]\n";
470 
471 	if (!log_ctx) {
472 		pr_info("%s", msg);
473 		return;
474 	}
475 
476 	dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
477 }
478 
479 __printf(3, 4)
dm_dtn_log_append_v(struct dc_context * ctx,struct dc_log_buffer_ctx * log_ctx,const char * msg,...)480 void dm_dtn_log_append_v(struct dc_context *ctx,
481 	struct dc_log_buffer_ctx *log_ctx,
482 	const char *msg, ...)
483 {
484 	va_list args;
485 	size_t total;
486 	int n;
487 
488 	if (!log_ctx) {
489 		/* No context, redirect to dmesg. */
490 		struct va_format vaf;
491 
492 		vaf.fmt = msg;
493 		vaf.va = &args;
494 
495 		va_start(args, msg);
496 		pr_info("%pV", &vaf);
497 		va_end(args);
498 
499 		return;
500 	}
501 
502 	/* Measure the output. */
503 	va_start(args, msg);
504 	n = vsnprintf(NULL, 0, msg, args);
505 	va_end(args);
506 
507 	if (n <= 0)
508 		return;
509 
510 	/* Reallocate the string buffer as needed. */
511 	total = log_ctx->pos + n + 1;
512 
513 	if (total > log_ctx->size) {
514 		char *buf = kvcalloc(total, sizeof(char), GFP_KERNEL);
515 
516 		if (buf) {
517 			memcpy(buf, log_ctx->buf, log_ctx->pos);
518 			kfree(log_ctx->buf);
519 
520 			log_ctx->buf = buf;
521 			log_ctx->size = total;
522 		}
523 	}
524 
525 	if (!log_ctx->buf)
526 		return;
527 
528 	/* Write the formatted string to the log buffer. */
529 	va_start(args, msg);
530 	n = vscnprintf(
531 		log_ctx->buf + log_ctx->pos,
532 		log_ctx->size - log_ctx->pos,
533 		msg,
534 		args);
535 	va_end(args);
536 
537 	if (n > 0)
538 		log_ctx->pos += n;
539 }
540 
dm_dtn_log_end(struct dc_context * ctx,struct dc_log_buffer_ctx * log_ctx)541 void dm_dtn_log_end(struct dc_context *ctx,
542 	struct dc_log_buffer_ctx *log_ctx)
543 {
544 	static const char msg[] = "[dtn end]\n";
545 
546 	if (!log_ctx) {
547 		pr_info("%s", msg);
548 		return;
549 	}
550 
551 	dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
552 }
553 
dm_helpers_dp_mst_start_top_mgr(struct dc_context * ctx,const struct dc_link * link,bool boot)554 bool dm_helpers_dp_mst_start_top_mgr(
555 		struct dc_context *ctx,
556 		const struct dc_link *link,
557 		bool boot)
558 {
559 	struct amdgpu_dm_connector *aconnector = link->priv;
560 	int ret;
561 
562 	if (!aconnector) {
563 		DRM_ERROR("Failed to find connector for link!");
564 		return false;
565 	}
566 
567 	if (boot) {
568 		DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n",
569 					aconnector, aconnector->base.base.id);
570 		return true;
571 	}
572 
573 	DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
574 			aconnector, aconnector->base.base.id);
575 
576 	ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
577 	if (ret < 0) {
578 		DRM_ERROR("DM_MST: Failed to set the device into MST mode!");
579 		return false;
580 	}
581 
582 	DRM_INFO("DM_MST: DP%x, %d-lane link detected\n", aconnector->mst_mgr.dpcd[0],
583 		aconnector->mst_mgr.dpcd[2] & DP_MAX_LANE_COUNT_MASK);
584 
585 	return true;
586 }
587 
dm_helpers_dp_mst_stop_top_mgr(struct dc_context * ctx,struct dc_link * link)588 bool dm_helpers_dp_mst_stop_top_mgr(
589 		struct dc_context *ctx,
590 		struct dc_link *link)
591 {
592 	struct amdgpu_dm_connector *aconnector = link->priv;
593 
594 	if (!aconnector) {
595 		DRM_ERROR("Failed to find connector for link!");
596 		return false;
597 	}
598 
599 	DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
600 			aconnector, aconnector->base.base.id);
601 
602 	if (aconnector->mst_mgr.mst_state == true) {
603 		drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false);
604 		link->cur_link_settings.lane_count = 0;
605 	}
606 
607 	return false;
608 }
609 
dm_helpers_dp_read_dpcd(struct dc_context * ctx,const struct dc_link * link,uint32_t address,uint8_t * data,uint32_t size)610 bool dm_helpers_dp_read_dpcd(
611 		struct dc_context *ctx,
612 		const struct dc_link *link,
613 		uint32_t address,
614 		uint8_t *data,
615 		uint32_t size)
616 {
617 
618 	struct amdgpu_dm_connector *aconnector = link->priv;
619 
620 	if (!aconnector)
621 		return false;
622 
623 	return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, data,
624 				size) == size;
625 }
626 
dm_helpers_dp_write_dpcd(struct dc_context * ctx,const struct dc_link * link,uint32_t address,const uint8_t * data,uint32_t size)627 bool dm_helpers_dp_write_dpcd(
628 		struct dc_context *ctx,
629 		const struct dc_link *link,
630 		uint32_t address,
631 		const uint8_t *data,
632 		uint32_t size)
633 {
634 	struct amdgpu_dm_connector *aconnector = link->priv;
635 
636 	if (!aconnector)
637 		return false;
638 
639 	return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux,
640 			address, (uint8_t *)data, size) > 0;
641 }
642 
dm_helpers_submit_i2c(struct dc_context * ctx,const struct dc_link * link,struct i2c_command * cmd)643 bool dm_helpers_submit_i2c(
644 		struct dc_context *ctx,
645 		const struct dc_link *link,
646 		struct i2c_command *cmd)
647 {
648 	struct amdgpu_dm_connector *aconnector = link->priv;
649 	struct i2c_msg *msgs;
650 	int i = 0;
651 	int num = cmd->number_of_payloads;
652 	bool result;
653 
654 	if (!aconnector) {
655 		DRM_ERROR("Failed to find connector for link!");
656 		return false;
657 	}
658 
659 	msgs = kzalloc_objs(struct i2c_msg, num);
660 
661 	if (!msgs)
662 		return false;
663 
664 	for (i = 0; i < num; i++) {
665 		msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD;
666 		msgs[i].addr = cmd->payloads[i].address;
667 		msgs[i].len = cmd->payloads[i].length;
668 		msgs[i].buf = cmd->payloads[i].data;
669 	}
670 
671 	result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num;
672 
673 	kfree(msgs);
674 
675 	return result;
676 }
677 
dm_helpers_execute_fused_io(struct dc_context * ctx,struct dc_link * link,union dmub_rb_cmd * commands,uint8_t count,uint32_t timeout_us)678 bool dm_helpers_execute_fused_io(
679 		struct dc_context *ctx,
680 		struct dc_link *link,
681 		union dmub_rb_cmd *commands,
682 		uint8_t count,
683 		uint32_t timeout_us
684 )
685 {
686 	struct amdgpu_device *dev = ctx->driver_context;
687 
688 	return amdgpu_dm_execute_fused_io(dev, link, commands, count, timeout_us);
689 }
690 
execute_synaptics_rc_command(struct drm_dp_aux * aux,bool is_write_cmd,unsigned char cmd,unsigned int length,unsigned int offset,unsigned char * data)691 static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
692 		bool is_write_cmd,
693 		unsigned char cmd,
694 		unsigned int length,
695 		unsigned int offset,
696 		unsigned char *data)
697 {
698 	bool success = false;
699 	unsigned char rc_data[16] = {0};
700 	unsigned char rc_offset[4] = {0};
701 	unsigned char rc_length[2] = {0};
702 	unsigned char rc_cmd = 0;
703 	unsigned char rc_result = 0xFF;
704 	unsigned char i = 0;
705 	int ret;
706 
707 	if (is_write_cmd) {
708 		// write rc data
709 		memmove(rc_data, data, length);
710 		ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data));
711 		if (ret < 0)
712 			goto err;
713 	}
714 
715 	// write rc offset
716 	rc_offset[0] = (unsigned char) offset & 0xFF;
717 	rc_offset[1] = (unsigned char) (offset >> 8) & 0xFF;
718 	rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF;
719 	rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF;
720 	ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset));
721 	if (ret < 0)
722 		goto err;
723 
724 	// write rc length
725 	rc_length[0] = (unsigned char) length & 0xFF;
726 	rc_length[1] = (unsigned char) (length >> 8) & 0xFF;
727 	ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length));
728 	if (ret < 0)
729 		goto err;
730 
731 	// write rc cmd
732 	rc_cmd = cmd | 0x80;
733 	ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd));
734 	if (ret < 0)
735 		goto err;
736 
737 	// poll until active is 0
738 	for (i = 0; i < 10; i++) {
739 		drm_dp_dpcd_read(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd));
740 		if (rc_cmd == cmd)
741 			// active is 0
742 			break;
743 		msleep(10);
744 	}
745 
746 	// read rc result
747 	drm_dp_dpcd_read(aux, SYNAPTICS_RC_RESULT, &rc_result, sizeof(rc_result));
748 	success = (rc_result == 0);
749 
750 	if (success && !is_write_cmd) {
751 		// read rc data
752 		drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length);
753 	}
754 
755 	drm_dbg_dp(aux->drm_dev, "success = %d\n", success);
756 
757 	return success;
758 
759 err:
760 	DRM_ERROR("%s: write cmd ..., err = %d\n",  __func__, ret);
761 	return false;
762 }
763 
apply_synaptics_fifo_reset_wa(struct drm_dp_aux * aux)764 static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux)
765 {
766 	unsigned char data[16] = {0};
767 
768 	drm_dbg_dp(aux->drm_dev, "Start\n");
769 
770 	// Step 2
771 	data[0] = 'P';
772 	data[1] = 'R';
773 	data[2] = 'I';
774 	data[3] = 'U';
775 	data[4] = 'S';
776 
777 	if (!execute_synaptics_rc_command(aux, true, 0x01, 5, 0, data))
778 		return;
779 
780 	// Step 3 and 4
781 	if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data))
782 		return;
783 
784 	data[0] &= (~(1 << 1)); // set bit 1 to 0
785 	if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data))
786 		return;
787 
788 	if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data))
789 		return;
790 
791 	data[0] &= (~(1 << 1)); // set bit 1 to 0
792 	if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220D98, data))
793 		return;
794 
795 	if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data))
796 		return;
797 
798 	data[0] &= (~(1 << 1)); // set bit 1 to 0
799 	if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data))
800 		return;
801 
802 	// Step 3 and 5
803 	if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data))
804 		return;
805 
806 	data[0] |= (1 << 1); // set bit 1 to 1
807 	if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data))
808 		return;
809 
810 	if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data))
811 		return;
812 
813 	data[0] |= (1 << 1); // set bit 1 to 1
814 
815 	if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data))
816 		return;
817 
818 	data[0] |= (1 << 1); // set bit 1 to 1
819 	if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data))
820 		return;
821 
822 	// Step 6
823 	if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL))
824 		return;
825 
826 	drm_dbg_dp(aux->drm_dev, "Done\n");
827 }
828 
829 /* MST Dock */
830 static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA";
831 
write_dsc_enable_synaptics_non_virtual_dpcd_mst(struct drm_dp_aux * aux,const struct dc_stream_state * stream,bool enable)832 static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst(
833 		struct drm_dp_aux *aux,
834 		const struct dc_stream_state *stream,
835 		bool enable)
836 {
837 	uint8_t ret = 0;
838 
839 	drm_dbg_dp(aux->drm_dev,
840 		   "MST_DSC Configure DSC to non-virtual dpcd synaptics\n");
841 
842 	if (enable) {
843 		/* When DSC is enabled on previous boot and reboot with the hub,
844 		 * there is a chance that Synaptics hub gets stuck during reboot sequence.
845 		 * Applying a workaround to reset Synaptics SDP fifo before enabling the first stream
846 		 */
847 		if (!stream->link->link_status.link_active &&
848 			memcmp(stream->link->dpcd_caps.branch_dev_name,
849 				(int8_t *)SYNAPTICS_DEVICE_ID, 4) == 0)
850 			apply_synaptics_fifo_reset_wa(aux);
851 
852 		ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1);
853 		DRM_INFO("MST_DSC Send DSC enable to synaptics\n");
854 
855 	} else {
856 		/* Synaptics hub not support virtual dpcd,
857 		 * external monitor occur garbage while disable DSC,
858 		 * Disable DSC only when entire link status turn to false,
859 		 */
860 		if (!stream->link->link_status.link_active) {
861 			ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1);
862 			DRM_INFO("MST_DSC Send DSC disable to synaptics\n");
863 		}
864 	}
865 
866 	return ret;
867 }
868 
dm_helpers_dp_write_dsc_enable(struct dc_context * ctx,const struct dc_stream_state * stream,bool enable)869 bool dm_helpers_dp_write_dsc_enable(
870 		struct dc_context *ctx,
871 		const struct dc_stream_state *stream,
872 		bool enable)
873 {
874 	static const uint8_t DSC_DISABLE;
875 	static const uint8_t DSC_DECODING = 0x01;
876 	static const uint8_t DSC_PASSTHROUGH = 0x02;
877 
878 	struct amdgpu_dm_connector *aconnector =
879 		(struct amdgpu_dm_connector *)stream->dm_stream_context;
880 	struct drm_device *dev = aconnector->base.dev;
881 	struct drm_dp_mst_port *port;
882 	uint8_t enable_dsc = enable ? DSC_DECODING : DSC_DISABLE;
883 	uint8_t enable_passthrough = enable ? DSC_PASSTHROUGH : DSC_DISABLE;
884 	uint8_t ret = 0;
885 
886 	if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
887 		if (!aconnector->dsc_aux)
888 			return false;
889 
890 		// apply w/a to synaptics
891 		if (needs_dsc_aux_workaround(aconnector->dc_link) &&
892 		    (aconnector->mst_downstream_port_present.byte & 0x7) != 0x3)
893 			return write_dsc_enable_synaptics_non_virtual_dpcd_mst(
894 				aconnector->dsc_aux, stream, enable_dsc);
895 
896 		port = aconnector->mst_output_port;
897 
898 		if (enable) {
899 			if (port->passthrough_aux) {
900 				ret = drm_dp_dpcd_write(port->passthrough_aux,
901 							DP_DSC_ENABLE,
902 							&enable_passthrough, 1);
903 				drm_dbg_dp(dev,
904 					   "MST_DSC Sent DSC pass-through enable to virtual dpcd port, ret = %u\n",
905 					   ret);
906 			}
907 
908 			ret = drm_dp_dpcd_write(aconnector->dsc_aux,
909 						DP_DSC_ENABLE, &enable_dsc, 1);
910 			drm_dbg_dp(dev,
911 				   "MST_DSC Sent DSC decoding enable to %s port, ret = %u\n",
912 				   (port->passthrough_aux) ? "remote RX" :
913 				   "virtual dpcd",
914 				   ret);
915 		} else {
916 			ret = drm_dp_dpcd_write(aconnector->dsc_aux,
917 						DP_DSC_ENABLE, &enable_dsc, 1);
918 			drm_dbg_dp(dev,
919 				   "MST_DSC Sent DSC decoding disable to %s port, ret = %u\n",
920 				   (port->passthrough_aux) ? "remote RX" :
921 				   "virtual dpcd",
922 				   ret);
923 
924 			if (port->passthrough_aux) {
925 				ret = drm_dp_dpcd_write(port->passthrough_aux,
926 							DP_DSC_ENABLE,
927 							&enable_passthrough, 1);
928 				drm_dbg_dp(dev,
929 					   "MST_DSC Sent DSC pass-through disable to virtual dpcd port, ret = %u\n",
930 					   ret);
931 			}
932 		}
933 	}
934 
935 	if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) {
936 		if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
937 			ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
938 			drm_dbg_dp(dev,
939 				   "SST_DSC Send DSC %s to SST RX\n",
940 				   enable_dsc ? "enable" : "disable");
941 		} else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
942 			ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
943 			drm_dbg_dp(dev,
944 				   "SST_DSC Send DSC %s to DP-HDMI PCON\n",
945 				   enable_dsc ? "enable" : "disable");
946 		}
947 	}
948 
949 	return ret;
950 }
951 
dm_helpers_dp_write_hblank_reduction(struct dc_context * ctx,const struct dc_stream_state * stream)952 bool dm_helpers_dp_write_hblank_reduction(struct dc_context *ctx, const struct dc_stream_state *stream)
953 {
954 	// TODO
955 	return false;
956 }
957 
dm_helpers_is_dp_sink_present(struct dc_link * link)958 bool dm_helpers_is_dp_sink_present(struct dc_link *link)
959 {
960 	bool dp_sink_present;
961 	struct amdgpu_dm_connector *aconnector = link->priv;
962 
963 	if (!aconnector) {
964 		BUG_ON("Failed to find connector for link!");
965 		return true;
966 	}
967 
968 	mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex);
969 	dp_sink_present = dc_link_is_dp_sink_present(link);
970 	mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex);
971 	return dp_sink_present;
972 }
973 
974 static int
dm_helpers_probe_acpi_edid(void * data,u8 * buf,unsigned int block,size_t len)975 dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len)
976 {
977 	struct drm_connector *connector = data;
978 	struct acpi_device *acpidev = ACPI_COMPANION(connector->dev->dev);
979 	unsigned short start = block * EDID_LENGTH;
980 	struct edid *edid;
981 	int r;
982 
983 	if (!acpidev)
984 		return -ENODEV;
985 
986 	/* fetch the entire edid from BIOS */
987 	r = acpi_video_get_edid(acpidev, ACPI_VIDEO_DISPLAY_LCD, -1, (void *)&edid);
988 	if (r < 0) {
989 		drm_dbg(connector->dev, "Failed to get EDID from ACPI: %d\n", r);
990 		return r;
991 	}
992 	if (len > r || start > r || start + len > r) {
993 		r = -EINVAL;
994 		goto cleanup;
995 	}
996 
997 	/* sanity check */
998 	if (edid->revision < 4 || !(edid->input & DRM_EDID_INPUT_DIGITAL) ||
999 	    (edid->input & DRM_EDID_DIGITAL_TYPE_MASK) == DRM_EDID_DIGITAL_TYPE_UNDEF) {
1000 		r = -EINVAL;
1001 		goto cleanup;
1002 	}
1003 
1004 	memcpy(buf, (void *)edid + start, len);
1005 	r = 0;
1006 
1007 cleanup:
1008 	kfree(edid);
1009 
1010 	return r;
1011 }
1012 
1013 static const struct drm_edid *
dm_helpers_read_acpi_edid(struct amdgpu_dm_connector * aconnector)1014 dm_helpers_read_acpi_edid(struct amdgpu_dm_connector *aconnector)
1015 {
1016 	struct drm_connector *connector = &aconnector->base;
1017 
1018 	if (amdgpu_dc_debug_mask & DC_DISABLE_ACPI_EDID)
1019 		return NULL;
1020 
1021 	switch (connector->connector_type) {
1022 	case DRM_MODE_CONNECTOR_LVDS:
1023 	case DRM_MODE_CONNECTOR_eDP:
1024 		break;
1025 	default:
1026 		return NULL;
1027 	}
1028 
1029 	if (connector->force == DRM_FORCE_OFF)
1030 		return NULL;
1031 
1032 	return drm_edid_read_custom(connector, dm_helpers_probe_acpi_edid, connector);
1033 }
1034 
populate_hdmi_info_from_connector(struct drm_hdmi_info * hdmi,struct dc_edid_caps * edid_caps)1035 void populate_hdmi_info_from_connector(struct drm_hdmi_info *hdmi, struct dc_edid_caps *edid_caps)
1036 {
1037 	edid_caps->scdc_present = hdmi->scdc.supported;
1038 }
1039 
dm_helpers_read_local_edid(struct dc_context * ctx,struct dc_link * link,struct dc_sink * sink)1040 enum dc_edid_status dm_helpers_read_local_edid(
1041 		struct dc_context *ctx,
1042 		struct dc_link *link,
1043 		struct dc_sink *sink)
1044 {
1045 	struct amdgpu_dm_connector *aconnector = link->priv;
1046 	struct drm_connector *connector = &aconnector->base;
1047 	struct i2c_adapter *ddc;
1048 	int retry = 25;
1049 	enum dc_edid_status edid_status = EDID_NO_RESPONSE;
1050 	const struct drm_edid *drm_edid;
1051 	const struct edid *edid;
1052 
1053 	if (link->aux_mode)
1054 		ddc = &aconnector->dm_dp_aux.aux.ddc;
1055 	else
1056 		ddc = &aconnector->i2c->base;
1057 
1058 	if (link->dc->hwss.prepare_ddc)
1059 		link->dc->hwss.prepare_ddc(link);
1060 
1061 	/* some dongles read edid incorrectly the first time,
1062 	 * do check sum and retry to make sure read correct edid.
1063 	 */
1064 	do {
1065 		drm_edid = dm_helpers_read_acpi_edid(aconnector);
1066 		if (drm_edid)
1067 			drm_info(connector->dev, "Using ACPI provided EDID for %s\n", connector->name);
1068 		else
1069 			drm_edid = drm_edid_read_ddc(connector, ddc);
1070 		drm_edid_connector_update(connector, drm_edid);
1071 
1072 		/* DP Compliance Test 4.2.2.6 */
1073 		if (link->aux_mode && connector->edid_corrupt)
1074 			drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum);
1075 
1076 		if (!drm_edid && connector->edid_corrupt) {
1077 			connector->edid_corrupt = false;
1078 			return EDID_BAD_CHECKSUM;
1079 		}
1080 
1081 		if (!drm_edid)
1082 			continue;
1083 
1084 		edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
1085 		if (!edid ||
1086 		    edid->extensions >= sizeof(sink->dc_edid.raw_edid) / EDID_LENGTH)
1087 			return EDID_BAD_INPUT;
1088 
1089 		sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
1090 		memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);
1091 
1092 		/* We don't need the original edid anymore */
1093 		drm_edid_free(drm_edid);
1094 
1095 		edid_status = dm_helpers_parse_edid_caps(
1096 						link,
1097 						&sink->dc_edid,
1098 						&sink->edid_caps);
1099 
1100 	} while ((edid_status == EDID_BAD_CHECKSUM || edid_status == EDID_NO_RESPONSE) && --retry > 0);
1101 
1102 	if (edid_status != EDID_OK)
1103 		DRM_ERROR("EDID err: %d, on connector: %s",
1104 				edid_status,
1105 				aconnector->base.name);
1106 	if (link->aux_mode) {
1107 		union test_request test_request = {0};
1108 		union test_response test_response = {0};
1109 
1110 		dm_helpers_dp_read_dpcd(ctx,
1111 					link,
1112 					DP_TEST_REQUEST,
1113 					&test_request.raw,
1114 					sizeof(union test_request));
1115 
1116 		if (!test_request.bits.EDID_READ)
1117 			return edid_status;
1118 
1119 		test_response.bits.EDID_CHECKSUM_WRITE = 1;
1120 
1121 		dm_helpers_dp_write_dpcd(ctx,
1122 					link,
1123 					DP_TEST_EDID_CHECKSUM,
1124 					&sink->dc_edid.raw_edid[sink->dc_edid.length-1],
1125 					1);
1126 
1127 		dm_helpers_dp_write_dpcd(ctx,
1128 					link,
1129 					DP_TEST_RESPONSE,
1130 					&test_response.raw,
1131 					sizeof(test_response));
1132 
1133 	}
1134 
1135 	return edid_status;
1136 }
dm_helper_dmub_aux_transfer_sync(struct dc_context * ctx,const struct dc_link * link,struct aux_payload * payload,enum aux_return_code_type * operation_result)1137 int dm_helper_dmub_aux_transfer_sync(
1138 		struct dc_context *ctx,
1139 		const struct dc_link *link,
1140 		struct aux_payload *payload,
1141 		enum aux_return_code_type *operation_result)
1142 {
1143 	if (!link->hpd_status) {
1144 		*operation_result = AUX_RET_ERROR_HPD_DISCON;
1145 		return -1;
1146 	}
1147 
1148 	return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload,
1149 			operation_result);
1150 }
1151 
dm_helpers_dmub_set_config_sync(struct dc_context * ctx,const struct dc_link * link,struct set_config_cmd_payload * payload,enum set_config_status * operation_result)1152 int dm_helpers_dmub_set_config_sync(struct dc_context *ctx,
1153 		const struct dc_link *link,
1154 		struct set_config_cmd_payload *payload,
1155 		enum set_config_status *operation_result)
1156 {
1157 	return amdgpu_dm_process_dmub_set_config_sync(ctx, link->link_index, payload,
1158 			operation_result);
1159 }
1160 
dm_set_dcn_clocks(struct dc_context * ctx,struct dc_clocks * clks)1161 void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks)
1162 {
1163 	/* TODO: something */
1164 }
1165 
dm_helpers_dmu_timeout(struct dc_context * ctx)1166 void dm_helpers_dmu_timeout(struct dc_context *ctx)
1167 {
1168 	// TODO:
1169 	//amdgpu_device_gpu_recover(dc_context->driver-context, NULL);
1170 }
1171 
dm_helpers_smu_timeout(struct dc_context * ctx,unsigned int msg_id,unsigned int param,unsigned int timeout_us)1172 void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us)
1173 {
1174 	// TODO:
1175 	//amdgpu_device_gpu_recover(dc_context->driver-context, NULL);
1176 }
1177 
dm_helpers_init_panel_settings(struct dc_context * ctx,struct dc_panel_config * panel_config,struct dc_sink * sink)1178 void dm_helpers_init_panel_settings(
1179 	struct dc_context *ctx,
1180 	struct dc_panel_config *panel_config,
1181 	struct dc_sink *sink)
1182 {
1183 	// Extra Panel Power Sequence
1184 	panel_config->pps.extra_t3_ms = sink->edid_caps.panel_patch.extra_t3_ms;
1185 	panel_config->pps.extra_t7_ms = sink->edid_caps.panel_patch.extra_t7_ms;
1186 	panel_config->pps.extra_delay_backlight_off = sink->edid_caps.panel_patch.extra_delay_backlight_off;
1187 	panel_config->pps.extra_post_t7_ms = 0;
1188 	panel_config->pps.extra_pre_t11_ms = 0;
1189 	panel_config->pps.extra_t12_ms = sink->edid_caps.panel_patch.extra_t12_ms;
1190 	panel_config->pps.extra_post_OUI_ms = 0;
1191 	// Feature DSC
1192 	panel_config->dsc.disable_dsc_edp = false;
1193 	panel_config->dsc.force_dsc_edp_policy = 0;
1194 }
1195 
dm_helpers_override_panel_settings(struct dc_context * ctx,struct dc_link * link)1196 void dm_helpers_override_panel_settings(
1197 	struct dc_context *ctx,
1198 	struct dc_link *link)
1199 {
1200 	unsigned int panel_inst = 0;
1201 
1202 	// Feature DSC
1203 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1204 		link->panel_config.dsc.disable_dsc_edp = true;
1205 
1206 	if (dc_get_edp_link_panel_inst(ctx->dc, link, &panel_inst) && panel_inst == 1) {
1207 		link->panel_config.psr.disable_psr = true;
1208 		link->panel_config.psr.disallow_psrsu = true;
1209 		link->panel_config.psr.disallow_replay = true;
1210 	}
1211 }
1212 
dm_helpers_allocate_gpu_mem(struct dc_context * ctx,enum dc_gpu_mem_alloc_type type,size_t size,long long * addr)1213 void *dm_helpers_allocate_gpu_mem(
1214 		struct dc_context *ctx,
1215 		enum dc_gpu_mem_alloc_type type,
1216 		size_t size,
1217 		long long *addr)
1218 {
1219 	struct amdgpu_device *adev = ctx->driver_context;
1220 
1221 	return dm_allocate_gpu_mem(adev, type, size, addr);
1222 }
1223 
dm_helpers_free_gpu_mem(struct dc_context * ctx,enum dc_gpu_mem_alloc_type type,void * pvMem)1224 void dm_helpers_free_gpu_mem(
1225 		struct dc_context *ctx,
1226 		enum dc_gpu_mem_alloc_type type,
1227 		void *pvMem)
1228 {
1229 	struct amdgpu_device *adev = ctx->driver_context;
1230 
1231 	dm_free_gpu_mem(adev, type, pvMem);
1232 }
1233 
dm_helpers_dmub_outbox_interrupt_control(struct dc_context * ctx,bool enable)1234 bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable)
1235 {
1236 	enum dc_irq_source irq_source;
1237 	bool ret;
1238 
1239 	irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX;
1240 
1241 	ret = dc_interrupt_set(ctx->dc, irq_source, enable);
1242 
1243 	DRM_DEBUG_DRIVER("Dmub trace irq %sabling: r=%d\n",
1244 			 enable ? "en" : "dis", ret);
1245 	return ret;
1246 }
1247 
dm_helpers_mst_enable_stream_features(const struct dc_stream_state * stream)1248 void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream)
1249 {
1250 	/* TODO: virtual DPCD */
1251 	struct dc_link *link = stream->link;
1252 	union down_spread_ctrl old_downspread;
1253 	union down_spread_ctrl new_downspread;
1254 
1255 	if (link->aux_access_disabled)
1256 		return;
1257 
1258 	if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL,
1259 				     &old_downspread.raw,
1260 				     sizeof(old_downspread)))
1261 		return;
1262 
1263 	new_downspread.raw = old_downspread.raw;
1264 	new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
1265 		(stream->ignore_msa_timing_param) ? 1 : 0;
1266 
1267 	if (new_downspread.raw != old_downspread.raw)
1268 		dm_helpers_dp_write_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL,
1269 					 &new_downspread.raw,
1270 					 sizeof(new_downspread));
1271 }
1272 
dm_helpers_dp_handle_test_pattern_request(struct dc_context * ctx,const struct dc_link * link,union link_test_pattern dpcd_test_pattern,union test_misc dpcd_test_params)1273 bool dm_helpers_dp_handle_test_pattern_request(
1274 		struct dc_context *ctx,
1275 		const struct dc_link *link,
1276 		union link_test_pattern dpcd_test_pattern,
1277 		union test_misc dpcd_test_params)
1278 {
1279 	enum dp_test_pattern test_pattern;
1280 	enum dp_test_pattern_color_space test_pattern_color_space =
1281 			DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED;
1282 	enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED;
1283 	enum dc_pixel_encoding requestPixelEncoding = PIXEL_ENCODING_UNDEFINED;
1284 	struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
1285 	struct pipe_ctx *pipe_ctx = NULL;
1286 	struct amdgpu_dm_connector *aconnector = link->priv;
1287 	struct drm_device *dev = aconnector->base.dev;
1288 	struct dc_state *dc_state = ctx->dc->current_state;
1289 	struct clk_mgr *clk_mgr = ctx->dc->clk_mgr;
1290 	int i;
1291 
1292 	for (i = 0; i < MAX_PIPES; i++) {
1293 		if (pipes[i].stream == NULL)
1294 			continue;
1295 
1296 		if (pipes[i].stream->link == link && !pipes[i].top_pipe &&
1297 			!pipes[i].prev_odm_pipe) {
1298 			pipe_ctx = &pipes[i];
1299 			break;
1300 		}
1301 	}
1302 
1303 	if (pipe_ctx == NULL)
1304 		return false;
1305 
1306 	switch (dpcd_test_pattern.bits.PATTERN) {
1307 	case LINK_TEST_PATTERN_COLOR_RAMP:
1308 		test_pattern = DP_TEST_PATTERN_COLOR_RAMP;
1309 	break;
1310 	case LINK_TEST_PATTERN_VERTICAL_BARS:
1311 		test_pattern = DP_TEST_PATTERN_VERTICAL_BARS;
1312 	break; /* black and white */
1313 	case LINK_TEST_PATTERN_COLOR_SQUARES:
1314 		test_pattern = (dpcd_test_params.bits.DYN_RANGE ==
1315 				TEST_DYN_RANGE_VESA ?
1316 				DP_TEST_PATTERN_COLOR_SQUARES :
1317 				DP_TEST_PATTERN_COLOR_SQUARES_CEA);
1318 	break;
1319 	default:
1320 		test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
1321 	break;
1322 	}
1323 
1324 	if (dpcd_test_params.bits.CLR_FORMAT == 0)
1325 		test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB;
1326 	else
1327 		test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
1328 				DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
1329 				DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
1330 
1331 	switch (dpcd_test_params.bits.BPC) {
1332 	case 0: // 6 bits
1333 		requestColorDepth = COLOR_DEPTH_666;
1334 		break;
1335 	case 1: // 8 bits
1336 		requestColorDepth = COLOR_DEPTH_888;
1337 		break;
1338 	case 2: // 10 bits
1339 		requestColorDepth = COLOR_DEPTH_101010;
1340 		break;
1341 	case 3: // 12 bits
1342 		requestColorDepth = COLOR_DEPTH_121212;
1343 		break;
1344 	default:
1345 		break;
1346 	}
1347 
1348 	switch (dpcd_test_params.bits.CLR_FORMAT) {
1349 	case 0:
1350 		requestPixelEncoding = PIXEL_ENCODING_RGB;
1351 		break;
1352 	case 1:
1353 		requestPixelEncoding = PIXEL_ENCODING_YCBCR422;
1354 		break;
1355 	case 2:
1356 		requestPixelEncoding = PIXEL_ENCODING_YCBCR444;
1357 		break;
1358 	default:
1359 		requestPixelEncoding = PIXEL_ENCODING_RGB;
1360 		break;
1361 	}
1362 
1363 	if ((requestColorDepth != COLOR_DEPTH_UNDEFINED
1364 		&& pipe_ctx->stream->timing.display_color_depth != requestColorDepth)
1365 		|| (requestPixelEncoding != PIXEL_ENCODING_UNDEFINED
1366 		&& pipe_ctx->stream->timing.pixel_encoding != requestPixelEncoding)) {
1367 		drm_dbg(dev,
1368 			"original bpc %d pix encoding %d, changing to %d  %d\n",
1369 			pipe_ctx->stream->timing.display_color_depth,
1370 			pipe_ctx->stream->timing.pixel_encoding,
1371 			requestColorDepth,
1372 			requestPixelEncoding);
1373 		pipe_ctx->stream->timing.display_color_depth = requestColorDepth;
1374 		pipe_ctx->stream->timing.pixel_encoding = requestPixelEncoding;
1375 
1376 		dc_link_update_dsc_config(pipe_ctx);
1377 
1378 		aconnector->timing_changed = true;
1379 		/* store current timing */
1380 		if (aconnector->timing_requested)
1381 			*aconnector->timing_requested = pipe_ctx->stream->timing;
1382 		else
1383 			drm_err(dev, "timing storage failed\n");
1384 
1385 	}
1386 
1387 	pipe_ctx->stream->test_pattern.type = test_pattern;
1388 	pipe_ctx->stream->test_pattern.color_space = test_pattern_color_space;
1389 
1390 	/* Temp W/A for compliance test failure */
1391 	dc_state->bw_ctx.bw.dcn.clk.p_state_change_support = false;
1392 	dc_state->bw_ctx.bw.dcn.clk.dramclk_khz = clk_mgr->dc_mode_softmax_enabled ?
1393 		clk_mgr->bw_params->dc_mode_softmax_memclk : clk_mgr->bw_params->max_memclk_mhz;
1394 	dc_state->bw_ctx.bw.dcn.clk.idle_dramclk_khz = dc_state->bw_ctx.bw.dcn.clk.dramclk_khz;
1395 	ctx->dc->clk_mgr->funcs->update_clocks(
1396 			ctx->dc->clk_mgr,
1397 			dc_state,
1398 			false);
1399 
1400 	dc_link_dp_set_test_pattern(
1401 		(struct dc_link *) link,
1402 		test_pattern,
1403 		test_pattern_color_space,
1404 		NULL,
1405 		NULL,
1406 		0);
1407 
1408 	return false;
1409 }
1410 
dm_set_phyd32clk(struct dc_context * ctx,int freq_khz)1411 void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)
1412 {
1413        // TODO
1414 }
1415 
dm_helpers_enable_periodic_detection(struct dc_context * ctx,bool enable)1416 void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable)
1417 {
1418 	struct amdgpu_device *adev = ctx->driver_context;
1419 
1420 	if (adev->dm.idle_workqueue) {
1421 		adev->dm.idle_workqueue->enable = enable;
1422 		if (enable && !adev->dm.idle_workqueue->running && amdgpu_dm_is_headless(adev))
1423 			schedule_work(&adev->dm.idle_workqueue->work);
1424 	}
1425 }
1426 
dm_helpers_dp_mst_update_branch_bandwidth(struct dc_context * ctx,struct dc_link * link)1427 void dm_helpers_dp_mst_update_branch_bandwidth(
1428 		struct dc_context *ctx,
1429 		struct dc_link *link)
1430 {
1431 	// TODO
1432 }
1433 
dm_is_freesync_pcon_whitelist(const uint32_t branch_dev_id)1434 static bool dm_is_freesync_pcon_whitelist(const uint32_t branch_dev_id)
1435 {
1436 	bool ret_val = false;
1437 
1438 	switch (branch_dev_id) {
1439 	case DP_BRANCH_DEVICE_ID_0060AD:
1440 	case DP_BRANCH_DEVICE_ID_00E04C:
1441 	case DP_BRANCH_DEVICE_ID_90CC24:
1442 	case DP_BRANCH_DEVICE_ID_001CF8:
1443 	case DP_BRANCH_DEVICE_ID_001FF2:
1444 		ret_val = true;
1445 		break;
1446 	default:
1447 		break;
1448 	}
1449 
1450 	return ret_val;
1451 }
1452 
dm_get_adaptive_sync_support_type(struct dc_link * link)1453 enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link)
1454 {
1455 	struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
1456 	enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
1457 
1458 	switch (dpcd_caps->dongle_type) {
1459 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
1460 		if (dpcd_caps->adaptive_sync_caps.dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT == true &&
1461 			dpcd_caps->allow_invalid_MSA_timing_param == true &&
1462 			dm_is_freesync_pcon_whitelist(dpcd_caps->branch_dev_id))
1463 			as_type = FREESYNC_TYPE_PCON_IN_WHITELIST;
1464 		break;
1465 	default:
1466 		break;
1467 	}
1468 
1469 	return as_type;
1470 }
1471 
dm_helpers_is_fullscreen(struct dc_context * ctx,struct dc_stream_state * stream)1472 bool dm_helpers_is_fullscreen(struct dc_context *ctx, struct dc_stream_state *stream)
1473 {
1474 	// TODO
1475 	return false;
1476 }
1477 
dm_helpers_is_hdr_on(struct dc_context * ctx,struct dc_stream_state * stream)1478 bool dm_helpers_is_hdr_on(struct dc_context *ctx, struct dc_stream_state *stream)
1479 {
1480 	// TODO
1481 	return false;
1482 }
1483 
mccs_operation_vcp_request(unsigned int vcp_code,struct dc_link * link,union vcp_reply * reply)1484 static int mccs_operation_vcp_request(unsigned int vcp_code, struct dc_link *link,
1485 				union vcp_reply *reply)
1486 {
1487 	const unsigned char retry_interval_ms = 40;
1488 	unsigned char retry = 5;
1489 	struct amdgpu_dm_connector *aconnector = link->priv;
1490 	struct i2c_adapter *ddc;
1491 	struct i2c_msg msg = {0};
1492 	int ret = 0;
1493 	int idx;
1494 
1495 	unsigned char wr_data[MCCS_OP_BUFF_SIZE__WR_VCP_REQUEST] = {
1496 		MCCS_SRC_ADDR,				/* Byte0 - Src Addr */
1497 		MCCS_LENGTH_OFFSET + 2,		/* Byte1 - Length */
1498 		MCCS_OP_CODE_VCP_REQUEST,	/* Byte2 - MCCS Command */
1499 		(unsigned char) vcp_code,	/* Byte3 - VCP Code */
1500 		MCCS_DEST_ADDR << 1			/* Byte4 - CheckSum */
1501 	};
1502 
1503 	/* calculate checksum */
1504 	for (idx = 0; idx < (MCCS_OP_BUFF_SIZE__WR_VCP_REQUEST - 1); idx++)
1505 		wr_data[(MCCS_OP_BUFF_SIZE__WR_VCP_REQUEST-1)] ^= wr_data[idx];
1506 
1507 	if (link->aux_mode)
1508 		ddc = &aconnector->dm_dp_aux.aux.ddc;
1509 	else
1510 		ddc = &aconnector->i2c->base;
1511 
1512 	do {
1513 		msg.addr = MCCS_DEST_ADDR;
1514 		msg.flags = 0;
1515 		msg.len = MCCS_OP_BUFF_SIZE__WR_VCP_REQUEST;
1516 		msg.buf = wr_data;
1517 
1518 		ret = i2c_transfer(ddc, &msg, 1);
1519 		if (ret != 1)
1520 			goto mccs_retry;
1521 
1522 		msleep(retry_interval_ms);
1523 
1524 		msg.addr = MCCS_DEST_ADDR;
1525 		msg.flags = I2C_M_RD;
1526 		msg.len = MCCS_OP_BUFF_SIZE_RD_VCP_REQUEST;
1527 		msg.buf = reply->raw;
1528 
1529 		ret = i2c_transfer(ddc, &msg, 1);
1530 
1531 		/* sink might reply with null msg if it can't reply in time */
1532 		if (ret == 1 && reply->bytes.length > MCCS_LENGTH_OFFSET)
1533 			break;
1534 mccs_retry:
1535 		retry--;
1536 		msleep(retry_interval_ms);
1537 	} while (retry);
1538 
1539 	if (!retry) {
1540 		drm_dbg_driver(aconnector->base.dev,
1541 			"%s: MCCS VCP request failed after retries", __func__);
1542 		return -EIO;
1543 	}
1544 
1545 	return 0;
1546 }
1547 
dm_helpers_read_mccs_caps(struct dc_context * ctx,struct dc_link * link,struct dc_sink * sink)1548 void dm_helpers_read_mccs_caps(struct dc_context *ctx, struct dc_link *link,
1549 		struct dc_sink *sink)
1550 {
1551 	bool mccs_op = false;
1552 	struct dpcd_caps *dpcd_caps;
1553 	struct drm_device *dev;
1554 	uint16_t freesync_vcp_value = 0;
1555 	union vcp_reply vcp_reply_value = {0};
1556 
1557 	if (!ctx)
1558 		return;
1559 	dev = adev_to_drm(ctx->driver_context);
1560 
1561 	if (!link || !sink) {
1562 		drm_dbg_driver(dev, "%s: link or sink is NULL", __func__);
1563 		return;
1564 	}
1565 
1566 	sink->mccs_caps.freesync_supported = false;
1567 	dpcd_caps = &link->dpcd_caps;
1568 
1569 	if (sink->edid_caps.freesync_vcp_code != 0) {
1570 		if (dc_is_dp_signal(link->connector_signal)) {
1571 			if ((dpcd_caps->dpcd_rev.raw >= DPCD_REV_14) &&
1572 				(dpcd_caps->dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) &&
1573 				dm_is_freesync_pcon_whitelist(dpcd_caps->branch_dev_id) &&
1574 				(dpcd_caps->adaptive_sync_caps.dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT == true))
1575 				mccs_op = true;
1576 
1577 			if ((dpcd_caps->dongle_type != DISPLAY_DONGLE_NONE &&
1578 				dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)) {
1579 				if (mccs_op == false)
1580 					drm_dbg_driver(dev, "%s: Legacy Pcon support", __func__);
1581 				mccs_op = true;
1582 			}
1583 
1584 			if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1585 				// Todo: Freesync over MST
1586 				mccs_op = false;
1587 			}
1588 		}
1589 
1590 		if (dc_is_hdmi_signal(link->connector_signal)) {
1591 			drm_dbg_driver(dev, "%s: Local HDMI sink", __func__);
1592 			mccs_op = true;
1593 		}
1594 
1595 		if (mccs_op == true) {
1596 			// MCCS VCP request to get VCP value
1597 			if (!mccs_operation_vcp_request(sink->edid_caps.freesync_vcp_code, link,
1598 					&vcp_reply_value)) {
1599 				freesync_vcp_value = vcp_reply_value.bytes.present_value[1];
1600 				freesync_vcp_value |= (uint16_t) vcp_reply_value.bytes.present_value[0] << 8;
1601 			}
1602 			// If VCP Value bit 0 is 1, freesyncSupport = true
1603 			sink->mccs_caps.freesync_supported =
1604 				(freesync_vcp_value & FREESYNC_SUPPORTED) ? true : false;
1605 		}
1606 	}
1607 }
1608 
mccs_operation_vcp_set(unsigned int vcp_code,struct dc_link * link,uint16_t value)1609 static int mccs_operation_vcp_set(unsigned int vcp_code, struct dc_link *link, uint16_t value)
1610 {
1611 	const unsigned char retry_interval_ms = 40;
1612 	unsigned char retry = 5;
1613 	struct amdgpu_dm_connector *aconnector = link->priv;
1614 	struct i2c_adapter *ddc;
1615 	struct i2c_msg msg = {0};
1616 	int ret = 0;
1617 	int idx;
1618 
1619 	unsigned char wr_data[MCCS_OP_BUFF_SIZE_WR_VCP_SET] = {
1620 		MCCS_SRC_ADDR,				/* Byte0 - Src Addr */
1621 		MCCS_LENGTH_OFFSET + 4,		/* Byte1 - Length */
1622 		MCCS_OP_CODE_VCP_SET,		/* Byte2 - MCCS Command */
1623 		(unsigned char)vcp_code,	/* Byte3 - VCP Code */
1624 		(unsigned char)(value >> 8),	/* Byte4 - Value High Byte */
1625 		(unsigned char)(value & 0xFF),	/* Byte5 - Value Low Byte */
1626 		MCCS_DEST_ADDR << 1		/* Byte6 - CheckSum */
1627 	};
1628 
1629 	/* calculate checksum */
1630 	for (idx = 0; idx < (MCCS_OP_BUFF_SIZE_WR_VCP_SET - 1); idx++)
1631 		wr_data[MCCS_OP_BUFF_SIZE_WR_VCP_SET - 1] ^= wr_data[idx];
1632 
1633 	if (link->aux_mode)
1634 		ddc = &aconnector->dm_dp_aux.aux.ddc;
1635 	else
1636 		ddc = &aconnector->i2c->base;
1637 
1638 	do {
1639 		msg.addr = MCCS_DEST_ADDR;
1640 		msg.flags = 0;
1641 		msg.len = MCCS_OP_BUFF_SIZE_WR_VCP_SET;
1642 		msg.buf = wr_data;
1643 
1644 		ret = i2c_transfer(ddc, &msg, 1);
1645 		if (ret == 1)
1646 			break;
1647 
1648 		retry--;
1649 		msleep(retry_interval_ms);
1650 	} while (retry);
1651 
1652 	if (!retry)
1653 		return -EIO;
1654 
1655 	return 0;
1656 }
1657 
dm_helpers_mccs_vcp_set(struct dc_context * ctx,struct dc_link * link,struct dc_sink * sink)1658 void dm_helpers_mccs_vcp_set(struct dc_context *ctx, struct dc_link *link,
1659 		struct dc_sink *sink)
1660 {
1661 	struct drm_device *dev;
1662 	const uint16_t enable = 0x0101;
1663 
1664 	if (!ctx)
1665 		return;
1666 	dev = adev_to_drm(ctx->driver_context);
1667 
1668 	if (!link || !sink) {
1669 		drm_dbg_driver(dev, "%s: link or sink is NULL", __func__);
1670 		return;
1671 	}
1672 
1673 	if (!sink->mccs_caps.freesync_supported) {
1674 		drm_dbg_driver(dev, "%s: MCCS freesync not supported on this sink", __func__);
1675 		return;
1676 	}
1677 
1678 	if (mccs_operation_vcp_set(sink->edid_caps.freesync_vcp_code, link, enable))
1679 		drm_dbg_driver(dev, "%s: Failed to set VCP code %d", __func__,
1680 				sink->edid_caps.freesync_vcp_code);
1681 }
1682 
1683