1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2015 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: AMD
24 *
25 */
26
27 #include <acpi/video.h>
28
29 #include <linux/string.h>
30 #include <linux/acpi.h>
31 #include <linux/i2c.h>
32
33 #include <drm/drm_atomic.h>
34 #include <drm/drm_probe_helper.h>
35 #include <drm/amdgpu_drm.h>
36 #include <drm/drm_edid.h>
37 #include <drm/drm_fixed.h>
38
39 #include "dm_services.h"
40 #include "amdgpu.h"
41 #include "dc.h"
42 #include "amdgpu_dm.h"
43 #include "amdgpu_dm_irq.h"
44 #include "amdgpu_dm_mst_types.h"
45 #include "dpcd_defs.h"
46 #include "dc/inc/core_types.h"
47
48 #include "dm_helpers.h"
49 #include "ddc_service_types.h"
50 #include "clk_mgr.h"
51
52 #define MCCS_DEST_ADDR (0x6E >> 1)
53 #define MCCS_SRC_ADDR 0x51
54 #define MCCS_LENGTH_OFFSET 0x80
55 #define MCCS_MAX_DATA_SIZE 0x20
56
57 enum mccs_op_code {
58 MCCS_OP_CODE_VCP_REQUEST = 0x01,
59 MCCS_OP_CODE_VCP_REPLY = 0x02,
60 MCCS_OP_CODE_VCP_SET = 0x03,
61 MCCS_OP_CODE_VCP_RESET = 0x09,
62 MCCS_OP_CODE_CAP_REQUEST = 0xF3,
63 MCCS_OP_CODE_CAP_REPLY = 0xE3
64 };
65
66 enum mccs_op_buff_size {
67 MCCS_OP_BUFF_SIZE__WR_VCP_REQUEST = 5,
68 MCCS_OP_BUFF_SIZE_RD_VCP_REQUEST = 11,
69 MCCS_OP_BUFF_SIZE_WR_VCP_SET = 7,
70 };
71
72 enum vcp_reply_mask {
73 FREESYNC_SUPPORTED = 0x1
74 };
75
76 union vcp_reply {
77 struct {
78 unsigned char src_addr;
79 unsigned char length; /* Length is offset by MccsLengthOffs = 0x80 */
80 unsigned char reply_op_code; /* Should return MCCS_OP_CODE_VCP_REPLY = 0x02 */
81 unsigned char result_code; /* 00h No Error, 01h Unsupported VCP Code */
82 unsigned char request_code; /* Should return mccs vcp code sent in the vcp request */
83 unsigned char type_code; /* VCP type code: 00h Set parameter, 01h Momentary */
84 unsigned char max_value[2]; /* 2 bytes returning max value current value */
85 unsigned char present_value[2]; /* NOTE: Byte0 is MSB, Byte1 is LSB */
86 unsigned char check_sum;
87 } bytes;
88 unsigned char raw[11];
89 };
90
edid_extract_panel_id(struct edid * edid)91 static u32 edid_extract_panel_id(struct edid *edid)
92 {
93 return (u32)edid->mfg_id[0] << 24 |
94 (u32)edid->mfg_id[1] << 16 |
95 (u32)EDID_PRODUCT_ID(edid);
96 }
97
apply_edid_quirks(struct drm_device * dev,struct edid * edid,struct dc_edid_caps * edid_caps)98 static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct dc_edid_caps *edid_caps)
99 {
100 uint32_t panel_id = edid_extract_panel_id(edid);
101
102 switch (panel_id) {
103 /* Workaround for monitors that need a delay after detecting the link */
104 case drm_edid_encode_panel_id('G', 'B', 'T', 0x3215):
105 drm_dbg_driver(dev, "Add 10s delay for link detection for panel id %X\n", panel_id);
106 edid_caps->panel_patch.wait_after_dpcd_poweroff_ms = 10000;
107 break;
108 /* Workaround for some monitors which does not work well with FAMS */
109 case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E):
110 case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053):
111 case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC):
112 drm_dbg_driver(dev, "Disabling FAMS on monitor with panel id %X\n", panel_id);
113 edid_caps->panel_patch.disable_fams = true;
114 break;
115 /* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */
116 case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB):
117 case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B):
118 case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A):
119 case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1):
120 case drm_edid_encode_panel_id('M', 'S', 'F', 0x1003):
121 drm_dbg_driver(dev, "Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
122 edid_caps->panel_patch.remove_sink_ext_caps = true;
123 break;
124 case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154):
125 case drm_edid_encode_panel_id('S', 'D', 'C', 0x4171):
126 drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id);
127 edid_caps->panel_patch.disable_colorimetry = true;
128 break;
129 default:
130 return;
131 }
132 }
133
134 /**
135 * dm_helpers_parse_edid_caps() - Parse edid caps
136 *
137 * @link: current detected link
138 * @edid: [in] pointer to edid
139 * @edid_caps: [in] pointer to edid caps
140 *
141 * Return: void
142 */
dm_helpers_parse_edid_caps(struct dc_link * link,const struct dc_edid * edid,struct dc_edid_caps * edid_caps)143 enum dc_edid_status dm_helpers_parse_edid_caps(
144 struct dc_link *link,
145 const struct dc_edid *edid,
146 struct dc_edid_caps *edid_caps)
147 {
148 struct amdgpu_dm_connector *aconnector = link->priv;
149 struct drm_connector *connector = &aconnector->base;
150 struct drm_device *dev = connector->dev;
151 struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL;
152 struct cea_sad *sads;
153 int sad_count = -1;
154 int sadb_count = -1;
155 int i = 0;
156 uint8_t *sadb = NULL;
157
158 enum dc_edid_status result = EDID_OK;
159
160 if (!edid_caps || !edid)
161 return EDID_BAD_INPUT;
162
163 if (!drm_edid_is_valid(edid_buf))
164 result = EDID_BAD_CHECKSUM;
165
166 edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] |
167 ((uint16_t) edid_buf->mfg_id[1])<<8;
168 edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] |
169 ((uint16_t) edid_buf->prod_code[1])<<8;
170 edid_caps->serial_number = edid_buf->serial;
171 edid_caps->manufacture_week = edid_buf->mfg_week;
172 edid_caps->manufacture_year = edid_buf->mfg_year;
173 edid_caps->analog = !(edid_buf->input & DRM_EDID_INPUT_DIGITAL);
174
175 drm_edid_get_monitor_name(edid_buf,
176 edid_caps->display_name,
177 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
178
179 edid_caps->edid_hdmi = connector->display_info.is_hdmi;
180
181 if (edid_caps->edid_hdmi)
182 populate_hdmi_info_from_connector(&connector->display_info.hdmi, edid_caps);
183
184 apply_edid_quirks(dev, edid_buf, edid_caps);
185
186 sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
187 if (sad_count <= 0)
188 return result;
189
190 edid_caps->audio_mode_count = min(sad_count, DC_MAX_AUDIO_DESC_COUNT);
191 for (i = 0; i < edid_caps->audio_mode_count; ++i) {
192 struct cea_sad *sad = &sads[i];
193
194 edid_caps->audio_modes[i].format_code = sad->format;
195 edid_caps->audio_modes[i].channel_count = sad->channels + 1;
196 edid_caps->audio_modes[i].sample_rate = sad->freq;
197 edid_caps->audio_modes[i].sample_size = sad->byte2;
198 }
199
200 sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb);
201
202 if (sadb_count < 0) {
203 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count);
204 sadb_count = 0;
205 }
206
207 if (sadb_count)
208 edid_caps->speaker_flags = sadb[0];
209 else
210 edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
211
212 kfree(sads);
213 kfree(sadb);
214
215 return result;
216 }
217
218 static void
fill_dc_mst_payload_table_from_drm(struct dc_link * link,bool enable,struct drm_dp_mst_atomic_payload * target_payload,struct dc_dp_mst_stream_allocation_table * table)219 fill_dc_mst_payload_table_from_drm(struct dc_link *link,
220 bool enable,
221 struct drm_dp_mst_atomic_payload *target_payload,
222 struct dc_dp_mst_stream_allocation_table *table)
223 {
224 struct dc_dp_mst_stream_allocation_table new_table = { 0 };
225 struct dc_dp_mst_stream_allocation *sa;
226 struct link_mst_stream_allocation_table copy_of_link_table =
227 link->mst_stream_alloc_table;
228
229 int i;
230 int current_hw_table_stream_cnt = copy_of_link_table.stream_count;
231 struct link_mst_stream_allocation *dc_alloc;
232
233 /* TODO: refactor to set link->mst_stream_alloc_table directly if possible.*/
234 if (enable) {
235 dc_alloc =
236 ©_of_link_table.stream_allocations[current_hw_table_stream_cnt];
237 dc_alloc->vcp_id = target_payload->vcpi;
238 dc_alloc->slot_count = target_payload->time_slots;
239 } else {
240 for (i = 0; i < copy_of_link_table.stream_count; i++) {
241 dc_alloc =
242 ©_of_link_table.stream_allocations[i];
243
244 if (dc_alloc->vcp_id == target_payload->vcpi) {
245 dc_alloc->vcp_id = 0;
246 dc_alloc->slot_count = 0;
247 break;
248 }
249 }
250 ASSERT(i != copy_of_link_table.stream_count);
251 }
252
253 /* Fill payload info*/
254 for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
255 dc_alloc =
256 ©_of_link_table.stream_allocations[i];
257 if (dc_alloc->vcp_id > 0 && dc_alloc->slot_count > 0) {
258 sa = &new_table.stream_allocations[new_table.stream_count];
259 sa->slot_count = dc_alloc->slot_count;
260 sa->vcp_id = dc_alloc->vcp_id;
261 new_table.stream_count++;
262 }
263 }
264
265 /* Overwrite the old table */
266 *table = new_table;
267 }
268
dm_helpers_dp_update_branch_info(struct dc_context * ctx,const struct dc_link * link)269 void dm_helpers_dp_update_branch_info(
270 struct dc_context *ctx,
271 const struct dc_link *link)
272 {}
273
dm_helpers_construct_old_payload(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_topology_state * mst_state,struct drm_dp_mst_atomic_payload * new_payload,struct drm_dp_mst_atomic_payload * old_payload)274 static void dm_helpers_construct_old_payload(
275 struct drm_dp_mst_topology_mgr *mgr,
276 struct drm_dp_mst_topology_state *mst_state,
277 struct drm_dp_mst_atomic_payload *new_payload,
278 struct drm_dp_mst_atomic_payload *old_payload)
279 {
280 struct drm_dp_mst_atomic_payload *pos;
281 int pbn_per_slot = dfixed_trunc(mst_state->pbn_div);
282 u8 next_payload_vc_start = mgr->next_start_slot;
283 u8 payload_vc_start = new_payload->vc_start_slot;
284 u8 allocated_time_slots;
285
286 *old_payload = *new_payload;
287
288 /* Set correct time_slots/PBN of old payload.
289 * other fields (delete & dsc_enabled) in
290 * struct drm_dp_mst_atomic_payload are don't care fields
291 * while calling drm_dp_remove_payload_part2()
292 */
293 list_for_each_entry(pos, &mst_state->payloads, next) {
294 if (pos != new_payload &&
295 pos->vc_start_slot > payload_vc_start &&
296 pos->vc_start_slot < next_payload_vc_start)
297 next_payload_vc_start = pos->vc_start_slot;
298 }
299
300 allocated_time_slots = next_payload_vc_start - payload_vc_start;
301
302 old_payload->time_slots = allocated_time_slots;
303 old_payload->pbn = allocated_time_slots * pbn_per_slot;
304 }
305
306 /*
307 * Writes payload allocation table in immediate downstream device.
308 */
dm_helpers_dp_mst_write_payload_allocation_table(struct dc_context * ctx,const struct dc_stream_state * stream,struct dc_dp_mst_stream_allocation_table * proposed_table,bool enable)309 bool dm_helpers_dp_mst_write_payload_allocation_table(
310 struct dc_context *ctx,
311 const struct dc_stream_state *stream,
312 struct dc_dp_mst_stream_allocation_table *proposed_table,
313 bool enable)
314 {
315 struct amdgpu_dm_connector *aconnector;
316 struct drm_dp_mst_topology_state *mst_state;
317 struct drm_dp_mst_atomic_payload *target_payload, *new_payload, old_payload;
318 struct drm_dp_mst_topology_mgr *mst_mgr;
319
320 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
321 /* Accessing the connector state is required for vcpi_slots allocation
322 * and directly relies on behaviour in commit check
323 * that blocks before commit guaranteeing that the state
324 * is not gonna be swapped while still in use in commit tail
325 */
326
327 if (!aconnector || !aconnector->mst_root)
328 return false;
329
330 mst_mgr = &aconnector->mst_root->mst_mgr;
331 mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
332 new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
333
334 if (enable) {
335 target_payload = new_payload;
336
337 /* It's OK for this to fail */
338 drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload);
339 } else {
340 /* construct old payload by VCPI*/
341 dm_helpers_construct_old_payload(mst_mgr, mst_state,
342 new_payload, &old_payload);
343 target_payload = &old_payload;
344
345 drm_dp_remove_payload_part1(mst_mgr, mst_state, new_payload);
346 }
347
348 /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
349 * AUX message. The sequence is slot 1-63 allocated sequence for each
350 * stream. AMD ASIC stream slot allocation should follow the same
351 * sequence. copy DRM MST allocation to dc
352 */
353 fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table);
354
355 return true;
356 }
357
358 /*
359 * poll pending down reply
360 */
dm_helpers_dp_mst_poll_pending_down_reply(struct dc_context * ctx,const struct dc_link * link)361 void dm_helpers_dp_mst_poll_pending_down_reply(
362 struct dc_context *ctx,
363 const struct dc_link *link)
364 {}
365
366 /*
367 * Clear payload allocation table before enable MST DP link.
368 */
dm_helpers_dp_mst_clear_payload_allocation_table(struct dc_context * ctx,const struct dc_link * link)369 void dm_helpers_dp_mst_clear_payload_allocation_table(
370 struct dc_context *ctx,
371 const struct dc_link *link)
372 {}
373
374 /*
375 * Polls for ACT (allocation change trigger) handled and sends
376 * ALLOCATE_PAYLOAD message.
377 */
dm_helpers_dp_mst_poll_for_allocation_change_trigger(struct dc_context * ctx,const struct dc_stream_state * stream)378 enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
379 struct dc_context *ctx,
380 const struct dc_stream_state *stream)
381 {
382 struct amdgpu_dm_connector *aconnector;
383 struct drm_dp_mst_topology_mgr *mst_mgr;
384 int ret;
385
386 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
387
388 if (!aconnector || !aconnector->mst_root)
389 return ACT_FAILED;
390
391 mst_mgr = &aconnector->mst_root->mst_mgr;
392
393 if (!mst_mgr->mst_state)
394 return ACT_FAILED;
395
396 ret = drm_dp_check_act_status(mst_mgr);
397
398 if (ret)
399 return ACT_FAILED;
400
401 return ACT_SUCCESS;
402 }
403
dm_helpers_dp_mst_send_payload_allocation(struct dc_context * ctx,const struct dc_stream_state * stream)404 void dm_helpers_dp_mst_send_payload_allocation(
405 struct dc_context *ctx,
406 const struct dc_stream_state *stream)
407 {
408 struct amdgpu_dm_connector *aconnector;
409 struct drm_dp_mst_topology_state *mst_state;
410 struct drm_dp_mst_topology_mgr *mst_mgr;
411 struct drm_dp_mst_atomic_payload *new_payload;
412 enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD;
413 enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
414 int ret = 0;
415
416 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
417
418 if (!aconnector || !aconnector->mst_root)
419 return;
420
421 mst_mgr = &aconnector->mst_root->mst_mgr;
422 mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
423 new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
424
425 ret = drm_dp_add_payload_part2(mst_mgr, new_payload);
426
427 if (ret) {
428 amdgpu_dm_set_mst_status(&aconnector->mst_status,
429 set_flag, false);
430 } else {
431 amdgpu_dm_set_mst_status(&aconnector->mst_status,
432 set_flag, true);
433 amdgpu_dm_set_mst_status(&aconnector->mst_status,
434 clr_flag, false);
435 }
436 }
437
dm_helpers_dp_mst_update_mst_mgr_for_deallocation(struct dc_context * ctx,const struct dc_stream_state * stream)438 void dm_helpers_dp_mst_update_mst_mgr_for_deallocation(
439 struct dc_context *ctx,
440 const struct dc_stream_state *stream)
441 {
442 struct amdgpu_dm_connector *aconnector;
443 struct drm_dp_mst_topology_state *mst_state;
444 struct drm_dp_mst_topology_mgr *mst_mgr;
445 struct drm_dp_mst_atomic_payload *new_payload, old_payload;
446 enum mst_progress_status set_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
447 enum mst_progress_status clr_flag = MST_ALLOCATE_NEW_PAYLOAD;
448
449 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
450
451 if (!aconnector || !aconnector->mst_root)
452 return;
453
454 mst_mgr = &aconnector->mst_root->mst_mgr;
455 mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
456 new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
457 dm_helpers_construct_old_payload(mst_mgr, mst_state,
458 new_payload, &old_payload);
459
460 drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload);
461
462 amdgpu_dm_set_mst_status(&aconnector->mst_status, set_flag, true);
463 amdgpu_dm_set_mst_status(&aconnector->mst_status, clr_flag, false);
464 }
465
dm_dtn_log_begin(struct dc_context * ctx,struct dc_log_buffer_ctx * log_ctx)466 void dm_dtn_log_begin(struct dc_context *ctx,
467 struct dc_log_buffer_ctx *log_ctx)
468 {
469 static const char msg[] = "[dtn begin]\n";
470
471 if (!log_ctx) {
472 pr_info("%s", msg);
473 return;
474 }
475
476 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
477 }
478
479 __printf(3, 4)
dm_dtn_log_append_v(struct dc_context * ctx,struct dc_log_buffer_ctx * log_ctx,const char * msg,...)480 void dm_dtn_log_append_v(struct dc_context *ctx,
481 struct dc_log_buffer_ctx *log_ctx,
482 const char *msg, ...)
483 {
484 va_list args;
485 size_t total;
486 int n;
487
488 if (!log_ctx) {
489 /* No context, redirect to dmesg. */
490 struct va_format vaf;
491
492 vaf.fmt = msg;
493 vaf.va = &args;
494
495 va_start(args, msg);
496 pr_info("%pV", &vaf);
497 va_end(args);
498
499 return;
500 }
501
502 /* Measure the output. */
503 va_start(args, msg);
504 n = vsnprintf(NULL, 0, msg, args);
505 va_end(args);
506
507 if (n <= 0)
508 return;
509
510 /* Reallocate the string buffer as needed. */
511 total = log_ctx->pos + n + 1;
512
513 if (total > log_ctx->size) {
514 char *buf = kvcalloc(total, sizeof(char), GFP_KERNEL);
515
516 if (buf) {
517 memcpy(buf, log_ctx->buf, log_ctx->pos);
518 kfree(log_ctx->buf);
519
520 log_ctx->buf = buf;
521 log_ctx->size = total;
522 }
523 }
524
525 if (!log_ctx->buf)
526 return;
527
528 /* Write the formatted string to the log buffer. */
529 va_start(args, msg);
530 n = vscnprintf(
531 log_ctx->buf + log_ctx->pos,
532 log_ctx->size - log_ctx->pos,
533 msg,
534 args);
535 va_end(args);
536
537 if (n > 0)
538 log_ctx->pos += n;
539 }
540
dm_dtn_log_end(struct dc_context * ctx,struct dc_log_buffer_ctx * log_ctx)541 void dm_dtn_log_end(struct dc_context *ctx,
542 struct dc_log_buffer_ctx *log_ctx)
543 {
544 static const char msg[] = "[dtn end]\n";
545
546 if (!log_ctx) {
547 pr_info("%s", msg);
548 return;
549 }
550
551 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
552 }
553
dm_helpers_dp_mst_start_top_mgr(struct dc_context * ctx,const struct dc_link * link,bool boot)554 bool dm_helpers_dp_mst_start_top_mgr(
555 struct dc_context *ctx,
556 const struct dc_link *link,
557 bool boot)
558 {
559 struct amdgpu_dm_connector *aconnector = link->priv;
560 int ret;
561
562 if (!aconnector) {
563 DRM_ERROR("Failed to find connector for link!");
564 return false;
565 }
566
567 if (boot) {
568 DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n",
569 aconnector, aconnector->base.base.id);
570 return true;
571 }
572
573 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
574 aconnector, aconnector->base.base.id);
575
576 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
577 if (ret < 0) {
578 DRM_ERROR("DM_MST: Failed to set the device into MST mode!");
579 return false;
580 }
581
582 DRM_INFO("DM_MST: DP%x, %d-lane link detected\n", aconnector->mst_mgr.dpcd[0],
583 aconnector->mst_mgr.dpcd[2] & DP_MAX_LANE_COUNT_MASK);
584
585 return true;
586 }
587
dm_helpers_dp_mst_stop_top_mgr(struct dc_context * ctx,struct dc_link * link)588 bool dm_helpers_dp_mst_stop_top_mgr(
589 struct dc_context *ctx,
590 struct dc_link *link)
591 {
592 struct amdgpu_dm_connector *aconnector = link->priv;
593
594 if (!aconnector) {
595 DRM_ERROR("Failed to find connector for link!");
596 return false;
597 }
598
599 DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
600 aconnector, aconnector->base.base.id);
601
602 if (aconnector->mst_mgr.mst_state == true) {
603 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false);
604 link->cur_link_settings.lane_count = 0;
605 }
606
607 return false;
608 }
609
dm_helpers_dp_read_dpcd(struct dc_context * ctx,const struct dc_link * link,uint32_t address,uint8_t * data,uint32_t size)610 bool dm_helpers_dp_read_dpcd(
611 struct dc_context *ctx,
612 const struct dc_link *link,
613 uint32_t address,
614 uint8_t *data,
615 uint32_t size)
616 {
617
618 struct amdgpu_dm_connector *aconnector = link->priv;
619
620 if (!aconnector)
621 return false;
622
623 return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, data,
624 size) == size;
625 }
626
dm_helpers_dp_write_dpcd(struct dc_context * ctx,const struct dc_link * link,uint32_t address,const uint8_t * data,uint32_t size)627 bool dm_helpers_dp_write_dpcd(
628 struct dc_context *ctx,
629 const struct dc_link *link,
630 uint32_t address,
631 const uint8_t *data,
632 uint32_t size)
633 {
634 struct amdgpu_dm_connector *aconnector = link->priv;
635
636 if (!aconnector)
637 return false;
638
639 return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux,
640 address, (uint8_t *)data, size) > 0;
641 }
642
dm_helpers_submit_i2c(struct dc_context * ctx,const struct dc_link * link,struct i2c_command * cmd)643 bool dm_helpers_submit_i2c(
644 struct dc_context *ctx,
645 const struct dc_link *link,
646 struct i2c_command *cmd)
647 {
648 struct amdgpu_dm_connector *aconnector = link->priv;
649 struct i2c_msg *msgs;
650 int i = 0;
651 int num = cmd->number_of_payloads;
652 bool result;
653
654 if (!aconnector) {
655 DRM_ERROR("Failed to find connector for link!");
656 return false;
657 }
658
659 msgs = kzalloc_objs(struct i2c_msg, num);
660
661 if (!msgs)
662 return false;
663
664 for (i = 0; i < num; i++) {
665 msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD;
666 msgs[i].addr = cmd->payloads[i].address;
667 msgs[i].len = cmd->payloads[i].length;
668 msgs[i].buf = cmd->payloads[i].data;
669 }
670
671 result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num;
672
673 kfree(msgs);
674
675 return result;
676 }
677
dm_helpers_execute_fused_io(struct dc_context * ctx,struct dc_link * link,union dmub_rb_cmd * commands,uint8_t count,uint32_t timeout_us)678 bool dm_helpers_execute_fused_io(
679 struct dc_context *ctx,
680 struct dc_link *link,
681 union dmub_rb_cmd *commands,
682 uint8_t count,
683 uint32_t timeout_us
684 )
685 {
686 struct amdgpu_device *dev = ctx->driver_context;
687
688 return amdgpu_dm_execute_fused_io(dev, link, commands, count, timeout_us);
689 }
690
execute_synaptics_rc_command(struct drm_dp_aux * aux,bool is_write_cmd,unsigned char cmd,unsigned int length,unsigned int offset,unsigned char * data)691 static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
692 bool is_write_cmd,
693 unsigned char cmd,
694 unsigned int length,
695 unsigned int offset,
696 unsigned char *data)
697 {
698 bool success = false;
699 unsigned char rc_data[16] = {0};
700 unsigned char rc_offset[4] = {0};
701 unsigned char rc_length[2] = {0};
702 unsigned char rc_cmd = 0;
703 unsigned char rc_result = 0xFF;
704 unsigned char i = 0;
705 int ret;
706
707 if (is_write_cmd) {
708 // write rc data
709 memmove(rc_data, data, length);
710 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data));
711 if (ret < 0)
712 goto err;
713 }
714
715 // write rc offset
716 rc_offset[0] = (unsigned char) offset & 0xFF;
717 rc_offset[1] = (unsigned char) (offset >> 8) & 0xFF;
718 rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF;
719 rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF;
720 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset));
721 if (ret < 0)
722 goto err;
723
724 // write rc length
725 rc_length[0] = (unsigned char) length & 0xFF;
726 rc_length[1] = (unsigned char) (length >> 8) & 0xFF;
727 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length));
728 if (ret < 0)
729 goto err;
730
731 // write rc cmd
732 rc_cmd = cmd | 0x80;
733 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd));
734 if (ret < 0)
735 goto err;
736
737 // poll until active is 0
738 for (i = 0; i < 10; i++) {
739 drm_dp_dpcd_read(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd));
740 if (rc_cmd == cmd)
741 // active is 0
742 break;
743 msleep(10);
744 }
745
746 // read rc result
747 drm_dp_dpcd_read(aux, SYNAPTICS_RC_RESULT, &rc_result, sizeof(rc_result));
748 success = (rc_result == 0);
749
750 if (success && !is_write_cmd) {
751 // read rc data
752 drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length);
753 }
754
755 drm_dbg_dp(aux->drm_dev, "success = %d\n", success);
756
757 return success;
758
759 err:
760 DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret);
761 return false;
762 }
763
apply_synaptics_fifo_reset_wa(struct drm_dp_aux * aux)764 static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux)
765 {
766 unsigned char data[16] = {0};
767
768 drm_dbg_dp(aux->drm_dev, "Start\n");
769
770 // Step 2
771 data[0] = 'P';
772 data[1] = 'R';
773 data[2] = 'I';
774 data[3] = 'U';
775 data[4] = 'S';
776
777 if (!execute_synaptics_rc_command(aux, true, 0x01, 5, 0, data))
778 return;
779
780 // Step 3 and 4
781 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data))
782 return;
783
784 data[0] &= (~(1 << 1)); // set bit 1 to 0
785 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data))
786 return;
787
788 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data))
789 return;
790
791 data[0] &= (~(1 << 1)); // set bit 1 to 0
792 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220D98, data))
793 return;
794
795 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data))
796 return;
797
798 data[0] &= (~(1 << 1)); // set bit 1 to 0
799 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data))
800 return;
801
802 // Step 3 and 5
803 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data))
804 return;
805
806 data[0] |= (1 << 1); // set bit 1 to 1
807 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data))
808 return;
809
810 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data))
811 return;
812
813 data[0] |= (1 << 1); // set bit 1 to 1
814
815 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data))
816 return;
817
818 data[0] |= (1 << 1); // set bit 1 to 1
819 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data))
820 return;
821
822 // Step 6
823 if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL))
824 return;
825
826 drm_dbg_dp(aux->drm_dev, "Done\n");
827 }
828
829 /* MST Dock */
830 static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA";
831
write_dsc_enable_synaptics_non_virtual_dpcd_mst(struct drm_dp_aux * aux,const struct dc_stream_state * stream,bool enable)832 static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst(
833 struct drm_dp_aux *aux,
834 const struct dc_stream_state *stream,
835 bool enable)
836 {
837 uint8_t ret = 0;
838
839 drm_dbg_dp(aux->drm_dev,
840 "MST_DSC Configure DSC to non-virtual dpcd synaptics\n");
841
842 if (enable) {
843 /* When DSC is enabled on previous boot and reboot with the hub,
844 * there is a chance that Synaptics hub gets stuck during reboot sequence.
845 * Applying a workaround to reset Synaptics SDP fifo before enabling the first stream
846 */
847 if (!stream->link->link_status.link_active &&
848 memcmp(stream->link->dpcd_caps.branch_dev_name,
849 (int8_t *)SYNAPTICS_DEVICE_ID, 4) == 0)
850 apply_synaptics_fifo_reset_wa(aux);
851
852 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1);
853 DRM_INFO("MST_DSC Send DSC enable to synaptics\n");
854
855 } else {
856 /* Synaptics hub not support virtual dpcd,
857 * external monitor occur garbage while disable DSC,
858 * Disable DSC only when entire link status turn to false,
859 */
860 if (!stream->link->link_status.link_active) {
861 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1);
862 DRM_INFO("MST_DSC Send DSC disable to synaptics\n");
863 }
864 }
865
866 return ret;
867 }
868
dm_helpers_dp_write_dsc_enable(struct dc_context * ctx,const struct dc_stream_state * stream,bool enable)869 bool dm_helpers_dp_write_dsc_enable(
870 struct dc_context *ctx,
871 const struct dc_stream_state *stream,
872 bool enable)
873 {
874 static const uint8_t DSC_DISABLE;
875 static const uint8_t DSC_DECODING = 0x01;
876 static const uint8_t DSC_PASSTHROUGH = 0x02;
877
878 struct amdgpu_dm_connector *aconnector =
879 (struct amdgpu_dm_connector *)stream->dm_stream_context;
880 struct drm_device *dev = aconnector->base.dev;
881 struct drm_dp_mst_port *port;
882 uint8_t enable_dsc = enable ? DSC_DECODING : DSC_DISABLE;
883 uint8_t enable_passthrough = enable ? DSC_PASSTHROUGH : DSC_DISABLE;
884 uint8_t ret = 0;
885
886 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
887 if (!aconnector->dsc_aux)
888 return false;
889
890 // apply w/a to synaptics
891 if (needs_dsc_aux_workaround(aconnector->dc_link) &&
892 (aconnector->mst_downstream_port_present.byte & 0x7) != 0x3)
893 return write_dsc_enable_synaptics_non_virtual_dpcd_mst(
894 aconnector->dsc_aux, stream, enable_dsc);
895
896 port = aconnector->mst_output_port;
897
898 if (enable) {
899 if (port->passthrough_aux) {
900 ret = drm_dp_dpcd_write(port->passthrough_aux,
901 DP_DSC_ENABLE,
902 &enable_passthrough, 1);
903 drm_dbg_dp(dev,
904 "MST_DSC Sent DSC pass-through enable to virtual dpcd port, ret = %u\n",
905 ret);
906 }
907
908 ret = drm_dp_dpcd_write(aconnector->dsc_aux,
909 DP_DSC_ENABLE, &enable_dsc, 1);
910 drm_dbg_dp(dev,
911 "MST_DSC Sent DSC decoding enable to %s port, ret = %u\n",
912 (port->passthrough_aux) ? "remote RX" :
913 "virtual dpcd",
914 ret);
915 } else {
916 ret = drm_dp_dpcd_write(aconnector->dsc_aux,
917 DP_DSC_ENABLE, &enable_dsc, 1);
918 drm_dbg_dp(dev,
919 "MST_DSC Sent DSC decoding disable to %s port, ret = %u\n",
920 (port->passthrough_aux) ? "remote RX" :
921 "virtual dpcd",
922 ret);
923
924 if (port->passthrough_aux) {
925 ret = drm_dp_dpcd_write(port->passthrough_aux,
926 DP_DSC_ENABLE,
927 &enable_passthrough, 1);
928 drm_dbg_dp(dev,
929 "MST_DSC Sent DSC pass-through disable to virtual dpcd port, ret = %u\n",
930 ret);
931 }
932 }
933 }
934
935 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) {
936 if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
937 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
938 drm_dbg_dp(dev,
939 "SST_DSC Send DSC %s to SST RX\n",
940 enable_dsc ? "enable" : "disable");
941 } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
942 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
943 drm_dbg_dp(dev,
944 "SST_DSC Send DSC %s to DP-HDMI PCON\n",
945 enable_dsc ? "enable" : "disable");
946 }
947 }
948
949 return ret;
950 }
951
dm_helpers_dp_write_hblank_reduction(struct dc_context * ctx,const struct dc_stream_state * stream)952 bool dm_helpers_dp_write_hblank_reduction(struct dc_context *ctx, const struct dc_stream_state *stream)
953 {
954 // TODO
955 return false;
956 }
957
dm_helpers_is_dp_sink_present(struct dc_link * link)958 bool dm_helpers_is_dp_sink_present(struct dc_link *link)
959 {
960 bool dp_sink_present;
961 struct amdgpu_dm_connector *aconnector = link->priv;
962
963 if (!aconnector) {
964 BUG_ON("Failed to find connector for link!");
965 return true;
966 }
967
968 mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex);
969 dp_sink_present = dc_link_is_dp_sink_present(link);
970 mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex);
971 return dp_sink_present;
972 }
973
974 static int
dm_helpers_probe_acpi_edid(void * data,u8 * buf,unsigned int block,size_t len)975 dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len)
976 {
977 struct drm_connector *connector = data;
978 struct acpi_device *acpidev = ACPI_COMPANION(connector->dev->dev);
979 unsigned short start = block * EDID_LENGTH;
980 struct edid *edid;
981 int r;
982
983 if (!acpidev)
984 return -ENODEV;
985
986 /* fetch the entire edid from BIOS */
987 r = acpi_video_get_edid(acpidev, ACPI_VIDEO_DISPLAY_LCD, -1, (void *)&edid);
988 if (r < 0) {
989 drm_dbg(connector->dev, "Failed to get EDID from ACPI: %d\n", r);
990 return r;
991 }
992 if (len > r || start > r || start + len > r) {
993 r = -EINVAL;
994 goto cleanup;
995 }
996
997 /* sanity check */
998 if (edid->revision < 4 || !(edid->input & DRM_EDID_INPUT_DIGITAL) ||
999 (edid->input & DRM_EDID_DIGITAL_TYPE_MASK) == DRM_EDID_DIGITAL_TYPE_UNDEF) {
1000 r = -EINVAL;
1001 goto cleanup;
1002 }
1003
1004 memcpy(buf, (void *)edid + start, len);
1005 r = 0;
1006
1007 cleanup:
1008 kfree(edid);
1009
1010 return r;
1011 }
1012
1013 static const struct drm_edid *
dm_helpers_read_acpi_edid(struct amdgpu_dm_connector * aconnector)1014 dm_helpers_read_acpi_edid(struct amdgpu_dm_connector *aconnector)
1015 {
1016 struct drm_connector *connector = &aconnector->base;
1017
1018 if (amdgpu_dc_debug_mask & DC_DISABLE_ACPI_EDID)
1019 return NULL;
1020
1021 switch (connector->connector_type) {
1022 case DRM_MODE_CONNECTOR_LVDS:
1023 case DRM_MODE_CONNECTOR_eDP:
1024 break;
1025 default:
1026 return NULL;
1027 }
1028
1029 if (connector->force == DRM_FORCE_OFF)
1030 return NULL;
1031
1032 return drm_edid_read_custom(connector, dm_helpers_probe_acpi_edid, connector);
1033 }
1034
1035 static const struct drm_edid *
dm_helpers_read_vbios_hardcoded_edid(struct dc_link * link,struct amdgpu_dm_connector * aconnector)1036 dm_helpers_read_vbios_hardcoded_edid(struct dc_link *link, struct amdgpu_dm_connector *aconnector)
1037 {
1038 struct dc_bios *bios = link->ctx->dc_bios;
1039 struct embedded_panel_info info;
1040 const struct drm_edid *edid;
1041 enum bp_result r;
1042
1043 if (!dc_is_embedded_signal(link->connector_signal) ||
1044 !bios->funcs->get_embedded_panel_info)
1045 return NULL;
1046
1047 memset(&info, 0, sizeof(info));
1048 r = bios->funcs->get_embedded_panel_info(bios, &info);
1049
1050 if (r != BP_RESULT_OK) {
1051 dm_error("Error when reading embedded panel info: %u\n", r);
1052 return NULL;
1053 }
1054
1055 if (!info.fake_edid || !info.fake_edid_size) {
1056 dm_error("Embedded panel info doesn't contain an EDID\n");
1057 return NULL;
1058 }
1059
1060 edid = drm_edid_alloc(info.fake_edid, info.fake_edid_size);
1061
1062 if (!drm_edid_valid(edid)) {
1063 dm_error("EDID from embedded panel info is invalid\n");
1064 drm_edid_free(edid);
1065 return NULL;
1066 }
1067
1068 aconnector->base.display_info.width_mm = info.panel_width_mm;
1069 aconnector->base.display_info.height_mm = info.panel_height_mm;
1070
1071 return edid;
1072 }
1073
populate_hdmi_info_from_connector(struct drm_hdmi_info * hdmi,struct dc_edid_caps * edid_caps)1074 void populate_hdmi_info_from_connector(struct drm_hdmi_info *hdmi, struct dc_edid_caps *edid_caps)
1075 {
1076 edid_caps->scdc_present = hdmi->scdc.supported;
1077 }
1078
dm_helpers_read_local_edid(struct dc_context * ctx,struct dc_link * link,struct dc_sink * sink)1079 enum dc_edid_status dm_helpers_read_local_edid(
1080 struct dc_context *ctx,
1081 struct dc_link *link,
1082 struct dc_sink *sink)
1083 {
1084 struct amdgpu_dm_connector *aconnector = link->priv;
1085 struct drm_connector *connector = &aconnector->base;
1086 struct i2c_adapter *ddc;
1087 int retry = 25;
1088 enum dc_edid_status edid_status = EDID_NO_RESPONSE;
1089 const struct drm_edid *drm_edid;
1090 const struct edid *edid;
1091
1092 if (link->aux_mode)
1093 ddc = &aconnector->dm_dp_aux.aux.ddc;
1094 else if (link->ddc_hw_inst == GPIO_DDC_LINE_UNKNOWN &&
1095 dc_is_embedded_signal(link->connector_signal))
1096 ddc = NULL;
1097 else
1098 ddc = &aconnector->i2c->base;
1099
1100 if (link->dc->hwss.prepare_ddc)
1101 link->dc->hwss.prepare_ddc(link);
1102
1103 /* some dongles read edid incorrectly the first time,
1104 * do check sum and retry to make sure read correct edid.
1105 */
1106 do {
1107 drm_edid = dm_helpers_read_acpi_edid(aconnector);
1108 if (drm_edid)
1109 drm_info(connector->dev, "Using ACPI provided EDID for %s\n", connector->name);
1110 else if (!ddc)
1111 drm_edid = dm_helpers_read_vbios_hardcoded_edid(link, aconnector);
1112 else
1113 drm_edid = drm_edid_read_ddc(connector, ddc);
1114 drm_edid_connector_update(connector, drm_edid);
1115
1116 /* DP Compliance Test 4.2.2.6 */
1117 if (link->aux_mode && connector->edid_corrupt)
1118 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum);
1119
1120 if (!drm_edid && connector->edid_corrupt) {
1121 connector->edid_corrupt = false;
1122 return EDID_BAD_CHECKSUM;
1123 }
1124
1125 if (!drm_edid)
1126 continue;
1127
1128 edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
1129 if (!edid ||
1130 edid->extensions >= sizeof(sink->dc_edid.raw_edid) / EDID_LENGTH)
1131 return EDID_BAD_INPUT;
1132
1133 sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
1134 memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);
1135
1136 /* We don't need the original edid anymore */
1137 drm_edid_free(drm_edid);
1138
1139 edid_status = dm_helpers_parse_edid_caps(
1140 link,
1141 &sink->dc_edid,
1142 &sink->edid_caps);
1143
1144 } while ((edid_status == EDID_BAD_CHECKSUM || edid_status == EDID_NO_RESPONSE) && --retry > 0);
1145
1146 if (edid_status != EDID_OK)
1147 DRM_ERROR("EDID err: %d, on connector: %s",
1148 edid_status,
1149 aconnector->base.name);
1150 if (link->aux_mode) {
1151 union test_request test_request = {0};
1152 union test_response test_response = {0};
1153
1154 dm_helpers_dp_read_dpcd(ctx,
1155 link,
1156 DP_TEST_REQUEST,
1157 &test_request.raw,
1158 sizeof(union test_request));
1159
1160 if (!test_request.bits.EDID_READ)
1161 return edid_status;
1162
1163 test_response.bits.EDID_CHECKSUM_WRITE = 1;
1164
1165 dm_helpers_dp_write_dpcd(ctx,
1166 link,
1167 DP_TEST_EDID_CHECKSUM,
1168 &sink->dc_edid.raw_edid[sink->dc_edid.length-1],
1169 1);
1170
1171 dm_helpers_dp_write_dpcd(ctx,
1172 link,
1173 DP_TEST_RESPONSE,
1174 &test_response.raw,
1175 sizeof(test_response));
1176
1177 }
1178
1179 return edid_status;
1180 }
dm_helper_dmub_aux_transfer_sync(struct dc_context * ctx,const struct dc_link * link,struct aux_payload * payload,enum aux_return_code_type * operation_result)1181 int dm_helper_dmub_aux_transfer_sync(
1182 struct dc_context *ctx,
1183 const struct dc_link *link,
1184 struct aux_payload *payload,
1185 enum aux_return_code_type *operation_result)
1186 {
1187 if (!link->hpd_status) {
1188 *operation_result = AUX_RET_ERROR_HPD_DISCON;
1189 return -1;
1190 }
1191
1192 return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload,
1193 operation_result);
1194 }
1195
dm_helpers_dmub_set_config_sync(struct dc_context * ctx,const struct dc_link * link,struct set_config_cmd_payload * payload,enum set_config_status * operation_result)1196 int dm_helpers_dmub_set_config_sync(struct dc_context *ctx,
1197 const struct dc_link *link,
1198 struct set_config_cmd_payload *payload,
1199 enum set_config_status *operation_result)
1200 {
1201 return amdgpu_dm_process_dmub_set_config_sync(ctx, link->link_index, payload,
1202 operation_result);
1203 }
1204
dm_set_dcn_clocks(struct dc_context * ctx,struct dc_clocks * clks)1205 void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks)
1206 {
1207 /* TODO: something */
1208 }
1209
dm_helpers_dmu_timeout(struct dc_context * ctx)1210 void dm_helpers_dmu_timeout(struct dc_context *ctx)
1211 {
1212 // TODO:
1213 //amdgpu_device_gpu_recover(dc_context->driver-context, NULL);
1214 }
1215
dm_helpers_smu_timeout(struct dc_context * ctx,unsigned int msg_id,unsigned int param,unsigned int timeout_us)1216 void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us)
1217 {
1218 // TODO:
1219 //amdgpu_device_gpu_recover(dc_context->driver-context, NULL);
1220 }
1221
dm_helpers_init_panel_settings(struct dc_context * ctx,struct dc_panel_config * panel_config,struct dc_sink * sink)1222 void dm_helpers_init_panel_settings(
1223 struct dc_context *ctx,
1224 struct dc_panel_config *panel_config,
1225 struct dc_sink *sink)
1226 {
1227 // Extra Panel Power Sequence
1228 panel_config->pps.extra_t3_ms = sink->edid_caps.panel_patch.extra_t3_ms;
1229 panel_config->pps.extra_t7_ms = sink->edid_caps.panel_patch.extra_t7_ms;
1230 panel_config->pps.extra_delay_backlight_off = sink->edid_caps.panel_patch.extra_delay_backlight_off;
1231 panel_config->pps.extra_post_t7_ms = 0;
1232 panel_config->pps.extra_pre_t11_ms = 0;
1233 panel_config->pps.extra_t12_ms = sink->edid_caps.panel_patch.extra_t12_ms;
1234 panel_config->pps.extra_post_OUI_ms = 0;
1235 // Feature DSC
1236 panel_config->dsc.disable_dsc_edp = false;
1237 panel_config->dsc.force_dsc_edp_policy = 0;
1238 }
1239
dm_helpers_override_panel_settings(struct dc_context * ctx,struct dc_link * link)1240 void dm_helpers_override_panel_settings(
1241 struct dc_context *ctx,
1242 struct dc_link *link)
1243 {
1244 unsigned int panel_inst = 0;
1245
1246 // Feature DSC
1247 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1248 link->panel_config.dsc.disable_dsc_edp = true;
1249
1250 if (dc_get_edp_link_panel_inst(ctx->dc, link, &panel_inst) && panel_inst == 1) {
1251 link->panel_config.psr.disable_psr = true;
1252 link->panel_config.psr.disallow_psrsu = true;
1253 link->panel_config.psr.disallow_replay = true;
1254 }
1255 }
1256
dm_helpers_allocate_gpu_mem(struct dc_context * ctx,enum dc_gpu_mem_alloc_type type,size_t size,long long * addr)1257 void *dm_helpers_allocate_gpu_mem(
1258 struct dc_context *ctx,
1259 enum dc_gpu_mem_alloc_type type,
1260 size_t size,
1261 long long *addr)
1262 {
1263 struct amdgpu_device *adev = ctx->driver_context;
1264
1265 return dm_allocate_gpu_mem(adev, type, size, addr);
1266 }
1267
dm_helpers_free_gpu_mem(struct dc_context * ctx,enum dc_gpu_mem_alloc_type type,void * pvMem)1268 void dm_helpers_free_gpu_mem(
1269 struct dc_context *ctx,
1270 enum dc_gpu_mem_alloc_type type,
1271 void *pvMem)
1272 {
1273 struct amdgpu_device *adev = ctx->driver_context;
1274
1275 dm_free_gpu_mem(adev, type, pvMem);
1276 }
1277
dm_helpers_dmub_outbox_interrupt_control(struct dc_context * ctx,bool enable)1278 bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable)
1279 {
1280 enum dc_irq_source irq_source;
1281 bool ret;
1282
1283 irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX;
1284
1285 ret = dc_interrupt_set(ctx->dc, irq_source, enable);
1286
1287 DRM_DEBUG_DRIVER("Dmub trace irq %sabling: r=%d\n",
1288 enable ? "en" : "dis", ret);
1289 return ret;
1290 }
1291
dm_helpers_mst_enable_stream_features(const struct dc_stream_state * stream)1292 void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream)
1293 {
1294 /* TODO: virtual DPCD */
1295 struct dc_link *link = stream->link;
1296 union down_spread_ctrl old_downspread;
1297 union down_spread_ctrl new_downspread;
1298
1299 if (link->aux_access_disabled)
1300 return;
1301
1302 if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL,
1303 &old_downspread.raw,
1304 sizeof(old_downspread)))
1305 return;
1306
1307 new_downspread.raw = old_downspread.raw;
1308 new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
1309 (stream->ignore_msa_timing_param) ? 1 : 0;
1310
1311 if (new_downspread.raw != old_downspread.raw)
1312 dm_helpers_dp_write_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL,
1313 &new_downspread.raw,
1314 sizeof(new_downspread));
1315 }
1316
dm_helpers_dp_handle_test_pattern_request(struct dc_context * ctx,const struct dc_link * link,union link_test_pattern dpcd_test_pattern,union test_misc dpcd_test_params)1317 bool dm_helpers_dp_handle_test_pattern_request(
1318 struct dc_context *ctx,
1319 const struct dc_link *link,
1320 union link_test_pattern dpcd_test_pattern,
1321 union test_misc dpcd_test_params)
1322 {
1323 enum dp_test_pattern test_pattern;
1324 enum dp_test_pattern_color_space test_pattern_color_space =
1325 DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED;
1326 enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED;
1327 enum dc_pixel_encoding requestPixelEncoding = PIXEL_ENCODING_UNDEFINED;
1328 struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
1329 struct pipe_ctx *pipe_ctx = NULL;
1330 struct amdgpu_dm_connector *aconnector = link->priv;
1331 struct drm_device *dev = aconnector->base.dev;
1332 struct dc_state *dc_state = ctx->dc->current_state;
1333 struct clk_mgr *clk_mgr = ctx->dc->clk_mgr;
1334 int i;
1335
1336 for (i = 0; i < MAX_PIPES; i++) {
1337 if (pipes[i].stream == NULL)
1338 continue;
1339
1340 if (pipes[i].stream->link == link && !pipes[i].top_pipe &&
1341 !pipes[i].prev_odm_pipe) {
1342 pipe_ctx = &pipes[i];
1343 break;
1344 }
1345 }
1346
1347 if (pipe_ctx == NULL)
1348 return false;
1349
1350 switch (dpcd_test_pattern.bits.PATTERN) {
1351 case LINK_TEST_PATTERN_COLOR_RAMP:
1352 test_pattern = DP_TEST_PATTERN_COLOR_RAMP;
1353 break;
1354 case LINK_TEST_PATTERN_VERTICAL_BARS:
1355 test_pattern = DP_TEST_PATTERN_VERTICAL_BARS;
1356 break; /* black and white */
1357 case LINK_TEST_PATTERN_COLOR_SQUARES:
1358 test_pattern = (dpcd_test_params.bits.DYN_RANGE ==
1359 TEST_DYN_RANGE_VESA ?
1360 DP_TEST_PATTERN_COLOR_SQUARES :
1361 DP_TEST_PATTERN_COLOR_SQUARES_CEA);
1362 break;
1363 default:
1364 test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
1365 break;
1366 }
1367
1368 if (dpcd_test_params.bits.CLR_FORMAT == 0)
1369 test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB;
1370 else
1371 test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
1372 DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
1373 DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
1374
1375 switch (dpcd_test_params.bits.BPC) {
1376 case 0: // 6 bits
1377 requestColorDepth = COLOR_DEPTH_666;
1378 break;
1379 case 1: // 8 bits
1380 requestColorDepth = COLOR_DEPTH_888;
1381 break;
1382 case 2: // 10 bits
1383 requestColorDepth = COLOR_DEPTH_101010;
1384 break;
1385 case 3: // 12 bits
1386 requestColorDepth = COLOR_DEPTH_121212;
1387 break;
1388 default:
1389 break;
1390 }
1391
1392 switch (dpcd_test_params.bits.CLR_FORMAT) {
1393 case 0:
1394 requestPixelEncoding = PIXEL_ENCODING_RGB;
1395 break;
1396 case 1:
1397 requestPixelEncoding = PIXEL_ENCODING_YCBCR422;
1398 break;
1399 case 2:
1400 requestPixelEncoding = PIXEL_ENCODING_YCBCR444;
1401 break;
1402 default:
1403 requestPixelEncoding = PIXEL_ENCODING_RGB;
1404 break;
1405 }
1406
1407 if ((requestColorDepth != COLOR_DEPTH_UNDEFINED
1408 && pipe_ctx->stream->timing.display_color_depth != requestColorDepth)
1409 || (requestPixelEncoding != PIXEL_ENCODING_UNDEFINED
1410 && pipe_ctx->stream->timing.pixel_encoding != requestPixelEncoding)) {
1411 drm_dbg(dev,
1412 "original bpc %d pix encoding %d, changing to %d %d\n",
1413 pipe_ctx->stream->timing.display_color_depth,
1414 pipe_ctx->stream->timing.pixel_encoding,
1415 requestColorDepth,
1416 requestPixelEncoding);
1417 pipe_ctx->stream->timing.display_color_depth = requestColorDepth;
1418 pipe_ctx->stream->timing.pixel_encoding = requestPixelEncoding;
1419
1420 dc_link_update_dsc_config(pipe_ctx);
1421
1422 aconnector->timing_changed = true;
1423 /* store current timing */
1424 if (aconnector->timing_requested)
1425 *aconnector->timing_requested = pipe_ctx->stream->timing;
1426 else
1427 drm_err(dev, "timing storage failed\n");
1428
1429 }
1430
1431 pipe_ctx->stream->test_pattern.type = test_pattern;
1432 pipe_ctx->stream->test_pattern.color_space = test_pattern_color_space;
1433
1434 /* Temp W/A for compliance test failure */
1435 dc_state->bw_ctx.bw.dcn.clk.p_state_change_support = false;
1436 dc_state->bw_ctx.bw.dcn.clk.dramclk_khz = clk_mgr->dc_mode_softmax_enabled ?
1437 clk_mgr->bw_params->dc_mode_softmax_memclk : clk_mgr->bw_params->max_memclk_mhz;
1438 dc_state->bw_ctx.bw.dcn.clk.idle_dramclk_khz = dc_state->bw_ctx.bw.dcn.clk.dramclk_khz;
1439 ctx->dc->clk_mgr->funcs->update_clocks(
1440 ctx->dc->clk_mgr,
1441 dc_state,
1442 false);
1443
1444 dc_link_dp_set_test_pattern(
1445 (struct dc_link *) link,
1446 test_pattern,
1447 test_pattern_color_space,
1448 NULL,
1449 NULL,
1450 0);
1451
1452 return false;
1453 }
1454
dm_set_phyd32clk(struct dc_context * ctx,int freq_khz)1455 void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)
1456 {
1457 // TODO
1458 }
1459
dm_helpers_enable_periodic_detection(struct dc_context * ctx,bool enable)1460 void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable)
1461 {
1462 struct amdgpu_device *adev = ctx->driver_context;
1463
1464 if (adev->dm.idle_workqueue) {
1465 adev->dm.idle_workqueue->enable = enable;
1466 if (enable && !adev->dm.idle_workqueue->running && amdgpu_dm_is_headless(adev))
1467 schedule_work(&adev->dm.idle_workqueue->work);
1468 }
1469 }
1470
dm_helpers_dp_mst_update_branch_bandwidth(struct dc_context * ctx,struct dc_link * link)1471 void dm_helpers_dp_mst_update_branch_bandwidth(
1472 struct dc_context *ctx,
1473 struct dc_link *link)
1474 {
1475 // TODO
1476 }
1477
dm_is_freesync_pcon_whitelist(const uint32_t branch_dev_id)1478 static bool dm_is_freesync_pcon_whitelist(const uint32_t branch_dev_id)
1479 {
1480 bool ret_val = false;
1481
1482 switch (branch_dev_id) {
1483 case DP_BRANCH_DEVICE_ID_0060AD:
1484 case DP_BRANCH_DEVICE_ID_00E04C:
1485 case DP_BRANCH_DEVICE_ID_90CC24:
1486 case DP_BRANCH_DEVICE_ID_001CF8:
1487 case DP_BRANCH_DEVICE_ID_001FF2:
1488 ret_val = true;
1489 break;
1490 default:
1491 break;
1492 }
1493
1494 return ret_val;
1495 }
1496
dm_get_adaptive_sync_support_type(struct dc_link * link)1497 enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link)
1498 {
1499 struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
1500 enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
1501
1502 switch (dpcd_caps->dongle_type) {
1503 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
1504 if (dpcd_caps->adaptive_sync_caps.dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT == true &&
1505 dpcd_caps->allow_invalid_MSA_timing_param == true &&
1506 dm_is_freesync_pcon_whitelist(dpcd_caps->branch_dev_id))
1507 as_type = FREESYNC_TYPE_PCON_IN_WHITELIST;
1508 break;
1509 default:
1510 break;
1511 }
1512
1513 return as_type;
1514 }
1515
dm_helpers_is_fullscreen(struct dc_context * ctx,struct dc_stream_state * stream)1516 bool dm_helpers_is_fullscreen(struct dc_context *ctx, struct dc_stream_state *stream)
1517 {
1518 // TODO
1519 return false;
1520 }
1521
dm_helpers_is_hdr_on(struct dc_context * ctx,struct dc_stream_state * stream)1522 bool dm_helpers_is_hdr_on(struct dc_context *ctx, struct dc_stream_state *stream)
1523 {
1524 // TODO
1525 return false;
1526 }
1527
mccs_operation_vcp_request(unsigned int vcp_code,struct dc_link * link,union vcp_reply * reply)1528 static int mccs_operation_vcp_request(unsigned int vcp_code, struct dc_link *link,
1529 union vcp_reply *reply)
1530 {
1531 const unsigned char retry_interval_ms = 40;
1532 unsigned char retry = 5;
1533 struct amdgpu_dm_connector *aconnector = link->priv;
1534 struct i2c_adapter *ddc;
1535 struct i2c_msg msg = {0};
1536 int ret = 0;
1537 int idx;
1538
1539 unsigned char wr_data[MCCS_OP_BUFF_SIZE__WR_VCP_REQUEST] = {
1540 MCCS_SRC_ADDR, /* Byte0 - Src Addr */
1541 MCCS_LENGTH_OFFSET + 2, /* Byte1 - Length */
1542 MCCS_OP_CODE_VCP_REQUEST, /* Byte2 - MCCS Command */
1543 (unsigned char) vcp_code, /* Byte3 - VCP Code */
1544 MCCS_DEST_ADDR << 1 /* Byte4 - CheckSum */
1545 };
1546
1547 /* calculate checksum */
1548 for (idx = 0; idx < (MCCS_OP_BUFF_SIZE__WR_VCP_REQUEST - 1); idx++)
1549 wr_data[(MCCS_OP_BUFF_SIZE__WR_VCP_REQUEST-1)] ^= wr_data[idx];
1550
1551 if (link->aux_mode)
1552 ddc = &aconnector->dm_dp_aux.aux.ddc;
1553 else
1554 ddc = &aconnector->i2c->base;
1555
1556 do {
1557 msg.addr = MCCS_DEST_ADDR;
1558 msg.flags = 0;
1559 msg.len = MCCS_OP_BUFF_SIZE__WR_VCP_REQUEST;
1560 msg.buf = wr_data;
1561
1562 ret = i2c_transfer(ddc, &msg, 1);
1563 if (ret != 1)
1564 goto mccs_retry;
1565
1566 msleep(retry_interval_ms);
1567
1568 msg.addr = MCCS_DEST_ADDR;
1569 msg.flags = I2C_M_RD;
1570 msg.len = MCCS_OP_BUFF_SIZE_RD_VCP_REQUEST;
1571 msg.buf = reply->raw;
1572
1573 ret = i2c_transfer(ddc, &msg, 1);
1574
1575 /* sink might reply with null msg if it can't reply in time */
1576 if (ret == 1 && reply->bytes.length > MCCS_LENGTH_OFFSET)
1577 break;
1578 mccs_retry:
1579 retry--;
1580 msleep(retry_interval_ms);
1581 } while (retry);
1582
1583 if (!retry) {
1584 drm_dbg_driver(aconnector->base.dev,
1585 "%s: MCCS VCP request failed after retries", __func__);
1586 return -EIO;
1587 }
1588
1589 return 0;
1590 }
1591
dm_helpers_read_mccs_caps(struct dc_context * ctx,struct dc_link * link,struct dc_sink * sink)1592 void dm_helpers_read_mccs_caps(struct dc_context *ctx, struct dc_link *link,
1593 struct dc_sink *sink)
1594 {
1595 bool mccs_op = false;
1596 struct dpcd_caps *dpcd_caps;
1597 struct drm_device *dev;
1598 uint16_t freesync_vcp_value = 0;
1599 union vcp_reply vcp_reply_value = {0};
1600
1601 if (!ctx)
1602 return;
1603 dev = adev_to_drm(ctx->driver_context);
1604
1605 if (!link || !sink) {
1606 drm_dbg_driver(dev, "%s: link or sink is NULL", __func__);
1607 return;
1608 }
1609
1610 sink->mccs_caps.freesync_supported = false;
1611 dpcd_caps = &link->dpcd_caps;
1612
1613 if (sink->edid_caps.freesync_vcp_code != 0) {
1614 if (dc_is_dp_signal(link->connector_signal)) {
1615 if ((dpcd_caps->dpcd_rev.raw >= DPCD_REV_14) &&
1616 (dpcd_caps->dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) &&
1617 dm_is_freesync_pcon_whitelist(dpcd_caps->branch_dev_id) &&
1618 (dpcd_caps->adaptive_sync_caps.dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT == true))
1619 mccs_op = true;
1620
1621 if ((dpcd_caps->dongle_type != DISPLAY_DONGLE_NONE &&
1622 dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)) {
1623 if (mccs_op == false)
1624 drm_dbg_driver(dev, "%s: Legacy Pcon support", __func__);
1625 mccs_op = true;
1626 }
1627
1628 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1629 // Todo: Freesync over MST
1630 mccs_op = false;
1631 }
1632 }
1633
1634 if (dc_is_hdmi_signal(link->connector_signal)) {
1635 drm_dbg_driver(dev, "%s: Local HDMI sink", __func__);
1636 mccs_op = true;
1637 }
1638
1639 if (mccs_op == true) {
1640 // MCCS VCP request to get VCP value
1641 if (!mccs_operation_vcp_request(sink->edid_caps.freesync_vcp_code, link,
1642 &vcp_reply_value)) {
1643 freesync_vcp_value = vcp_reply_value.bytes.present_value[1];
1644 freesync_vcp_value |= (uint16_t) vcp_reply_value.bytes.present_value[0] << 8;
1645 }
1646 // If VCP Value bit 0 is 1, freesyncSupport = true
1647 sink->mccs_caps.freesync_supported =
1648 (freesync_vcp_value & FREESYNC_SUPPORTED) ? true : false;
1649 }
1650 }
1651 }
1652
mccs_operation_vcp_set(unsigned int vcp_code,struct dc_link * link,uint16_t value)1653 static int mccs_operation_vcp_set(unsigned int vcp_code, struct dc_link *link, uint16_t value)
1654 {
1655 const unsigned char retry_interval_ms = 40;
1656 unsigned char retry = 5;
1657 struct amdgpu_dm_connector *aconnector = link->priv;
1658 struct i2c_adapter *ddc;
1659 struct i2c_msg msg = {0};
1660 int ret = 0;
1661 int idx;
1662
1663 unsigned char wr_data[MCCS_OP_BUFF_SIZE_WR_VCP_SET] = {
1664 MCCS_SRC_ADDR, /* Byte0 - Src Addr */
1665 MCCS_LENGTH_OFFSET + 4, /* Byte1 - Length */
1666 MCCS_OP_CODE_VCP_SET, /* Byte2 - MCCS Command */
1667 (unsigned char)vcp_code, /* Byte3 - VCP Code */
1668 (unsigned char)(value >> 8), /* Byte4 - Value High Byte */
1669 (unsigned char)(value & 0xFF), /* Byte5 - Value Low Byte */
1670 MCCS_DEST_ADDR << 1 /* Byte6 - CheckSum */
1671 };
1672
1673 /* calculate checksum */
1674 for (idx = 0; idx < (MCCS_OP_BUFF_SIZE_WR_VCP_SET - 1); idx++)
1675 wr_data[MCCS_OP_BUFF_SIZE_WR_VCP_SET - 1] ^= wr_data[idx];
1676
1677 if (link->aux_mode)
1678 ddc = &aconnector->dm_dp_aux.aux.ddc;
1679 else
1680 ddc = &aconnector->i2c->base;
1681
1682 do {
1683 msg.addr = MCCS_DEST_ADDR;
1684 msg.flags = 0;
1685 msg.len = MCCS_OP_BUFF_SIZE_WR_VCP_SET;
1686 msg.buf = wr_data;
1687
1688 ret = i2c_transfer(ddc, &msg, 1);
1689 if (ret == 1)
1690 break;
1691
1692 retry--;
1693 msleep(retry_interval_ms);
1694 } while (retry);
1695
1696 if (!retry)
1697 return -EIO;
1698
1699 return 0;
1700 }
1701
dm_helpers_mccs_vcp_set(struct dc_context * ctx,struct dc_link * link,struct dc_sink * sink)1702 void dm_helpers_mccs_vcp_set(struct dc_context *ctx, struct dc_link *link,
1703 struct dc_sink *sink)
1704 {
1705 struct drm_device *dev;
1706 const uint16_t enable = 0x0101;
1707
1708 if (!ctx)
1709 return;
1710 dev = adev_to_drm(ctx->driver_context);
1711
1712 if (!link || !sink) {
1713 drm_dbg_driver(dev, "%s: link or sink is NULL", __func__);
1714 return;
1715 }
1716
1717 if (!sink->mccs_caps.freesync_supported) {
1718 drm_dbg_driver(dev, "%s: MCCS freesync not supported on this sink", __func__);
1719 return;
1720 }
1721
1722 if (mccs_operation_vcp_set(sink->edid_caps.freesync_vcp_code, link, enable))
1723 drm_dbg_driver(dev, "%s: Failed to set VCP code %d", __func__,
1724 sink->edid_caps.freesync_vcp_code);
1725 }
1726
1727