xref: /linux/drivers/gpu/drm/i915/display/intel_dp_link_training.c (revision fcab107abe1ab5be9dbe874baa722372da8f4f73)
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <linux/debugfs.h>
25 
26 #include <drm/display/drm_dp_helper.h>
27 #include <drm/drm_print.h>
28 
29 #include "i915_utils.h"
30 #include "intel_display_core.h"
31 #include "intel_display_types.h"
32 #include "intel_dp.h"
33 #include "intel_dp_link_training.h"
34 #include "intel_encoder.h"
35 #include "intel_hotplug.h"
36 #include "intel_panel.h"
37 
38 #define LT_MSG_PREFIX			"[CONNECTOR:%d:%s][ENCODER:%d:%s][%s] "
39 #define LT_MSG_ARGS(_intel_dp, _dp_phy)	(_intel_dp)->attached_connector->base.base.id, \
40 					(_intel_dp)->attached_connector->base.name, \
41 					dp_to_dig_port(_intel_dp)->base.base.base.id, \
42 					dp_to_dig_port(_intel_dp)->base.base.name, \
43 					drm_dp_phy_name(_dp_phy)
44 
45 #define lt_dbg(_intel_dp, _dp_phy, _format, ...) \
46 	drm_dbg_kms(to_intel_display(_intel_dp)->drm, \
47 		    LT_MSG_PREFIX _format, \
48 		    LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__)
49 
50 #define lt_err(_intel_dp, _dp_phy, _format, ...) do { \
51 	if (intel_digital_port_connected(&dp_to_dig_port(_intel_dp)->base)) \
52 		drm_err(to_intel_display(_intel_dp)->drm, \
53 			LT_MSG_PREFIX _format, \
54 			LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__); \
55 	else \
56 		lt_dbg(_intel_dp, _dp_phy, "Sink disconnected: " _format, ## __VA_ARGS__); \
57 } while (0)
58 
59 #define MAX_SEQ_TRAIN_FAILURES 2
60 
61 static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
62 {
63 	memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps));
64 }
65 
66 static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
67 {
68 	intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT -
69 				    DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0;
70 }
71 
72 static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp,
73 				   enum drm_dp_phy dp_phy)
74 {
75 	return intel_dp->lttpr_phy_caps[dp_phy - DP_PHY_LTTPR1];
76 }
77 
78 static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
79 					 const u8 dpcd[DP_RECEIVER_CAP_SIZE],
80 					 enum drm_dp_phy dp_phy)
81 {
82 	u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
83 
84 	if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dpcd, dp_phy, phy_caps) < 0) {
85 		lt_dbg(intel_dp, dp_phy, "failed to read the PHY caps\n");
86 		return;
87 	}
88 
89 	lt_dbg(intel_dp, dp_phy, "PHY capabilities: %*ph\n",
90 	       (int)sizeof(intel_dp->lttpr_phy_caps[0]),
91 	       phy_caps);
92 }
93 
94 static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp,
95 					    const u8 dpcd[DP_RECEIVER_CAP_SIZE])
96 {
97 	int ret;
98 
99 	ret = drm_dp_read_lttpr_common_caps(&intel_dp->aux, dpcd,
100 					    intel_dp->lttpr_common_caps);
101 	if (ret < 0)
102 		goto reset_caps;
103 
104 	lt_dbg(intel_dp, DP_PHY_DPRX, "LTTPR common capabilities: %*ph\n",
105 	       (int)sizeof(intel_dp->lttpr_common_caps),
106 	       intel_dp->lttpr_common_caps);
107 
108 	/* The minimum value of LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV is 1.4 */
109 	if (intel_dp->lttpr_common_caps[0] < 0x14)
110 		goto reset_caps;
111 
112 	return true;
113 
114 reset_caps:
115 	intel_dp_reset_lttpr_common_caps(intel_dp);
116 	return false;
117 }
118 
119 static bool
120 intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
121 {
122 	u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT :
123 			  DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
124 
125 	intel_dp->lttpr_common_caps[DP_PHY_REPEATER_MODE -
126 				    DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = val;
127 
128 	return true;
129 }
130 
131 bool intel_dp_lttpr_transparent_mode_enabled(struct intel_dp *intel_dp)
132 {
133 	return intel_dp->lttpr_common_caps[DP_PHY_REPEATER_MODE -
134 					   DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] ==
135 		DP_PHY_REPEATER_MODE_TRANSPARENT;
136 }
137 
138 /*
139  * Read the LTTPR common capabilities and switch the LTTPR PHYs to
140  * non-transparent mode if this is supported. Preserve the
141  * transparent/non-transparent mode on an active link.
142  *
143  * Return the number of detected LTTPRs in non-transparent mode or 0 if the
144  * LTTPRs are in transparent mode or the detection failed.
145  */
146 static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
147 {
148 	int lttpr_count;
149 	int ret;
150 
151 	if (!intel_dp_read_lttpr_common_caps(intel_dp, dpcd))
152 		return 0;
153 
154 	lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
155 	/*
156 	 * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are
157 	 * detected as this breaks link training at least on the Dell WD19TB
158 	 * dock.
159 	 */
160 	if (lttpr_count == 0)
161 		return 0;
162 
163 	/*
164 	 * Don't change the mode on an active link, to prevent a loss of link
165 	 * synchronization. See DP Standard v2.0 3.6.7. about the LTTPR
166 	 * resetting its internal state when the mode is changed from
167 	 * non-transparent to transparent.
168 	 */
169 	if (intel_dp->link.active) {
170 		if (lttpr_count < 0 || intel_dp_lttpr_transparent_mode_enabled(intel_dp))
171 			goto out_reset_lttpr_count;
172 
173 		return lttpr_count;
174 	}
175 
176 	ret = drm_dp_lttpr_init(&intel_dp->aux, lttpr_count);
177 	if (ret) {
178 		lt_dbg(intel_dp, DP_PHY_DPRX,
179 		       "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n");
180 
181 		intel_dp_set_lttpr_transparent_mode(intel_dp, true);
182 
183 		goto out_reset_lttpr_count;
184 	}
185 
186 	intel_dp_set_lttpr_transparent_mode(intel_dp, false);
187 
188 	return lttpr_count;
189 
190 out_reset_lttpr_count:
191 	intel_dp_reset_lttpr_count(intel_dp);
192 
193 	return 0;
194 }
195 
196 static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE])
197 {
198 	int lttpr_count;
199 	int i;
200 
201 	lttpr_count = intel_dp_init_lttpr_phys(intel_dp, dpcd);
202 
203 	for (i = 0; i < lttpr_count; i++) {
204 		intel_dp_read_lttpr_phy_caps(intel_dp, dpcd, DP_PHY_LTTPR(i));
205 		drm_dp_dump_lttpr_desc(&intel_dp->aux, DP_PHY_LTTPR(i));
206 	}
207 
208 	return lttpr_count;
209 }
210 
211 int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE])
212 {
213 	struct intel_display *display = to_intel_display(intel_dp);
214 
215 	if (intel_dp_is_edp(intel_dp))
216 		return 0;
217 
218 	/*
219 	 * Detecting LTTPRs must be avoided on platforms with an AUX timeout
220 	 * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
221 	 */
222 	if (DISPLAY_VER(display) >= 10 && !display->platform.geminilake)
223 		if (drm_dp_dpcd_probe(&intel_dp->aux,
224 				      DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV))
225 			return -EIO;
226 
227 	if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd))
228 		return -EIO;
229 
230 	return 0;
231 }
232 
233 /**
234  * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
235  * @intel_dp: Intel DP struct
236  *
237  * Read the LTTPR common and DPRX capabilities and switch to non-transparent
238  * link training mode if any is detected and read the PHY capabilities for all
239  * detected LTTPRs. In case of an LTTPR detection error or if the number of
240  * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
241  * transparent mode link training mode.
242  *
243  * Returns:
244  *   >0  if LTTPRs were detected and the non-transparent LT mode was set. The
245  *       DPRX capabilities are read out.
246  *    0  if no LTTPRs or more than 8 LTTPRs were detected or in case of a
247  *       detection failure and the transparent LT mode was set. The DPRX
248  *       capabilities are read out.
249  *   <0  Reading out the DPRX capabilities failed.
250  */
251 int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
252 {
253 	struct intel_display *display = to_intel_display(intel_dp);
254 	int lttpr_count = 0;
255 
256 	/*
257 	 * Detecting LTTPRs must be avoided on platforms with an AUX timeout
258 	 * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
259 	 */
260 	if (!intel_dp_is_edp(intel_dp) &&
261 	    (DISPLAY_VER(display) >= 10 && !display->platform.geminilake)) {
262 		u8 dpcd[DP_RECEIVER_CAP_SIZE];
263 		int err = intel_dp_read_dprx_caps(intel_dp, dpcd);
264 
265 		if (err != 0)
266 			return err;
267 
268 		lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd);
269 	}
270 
271 	/*
272 	 * The DPTX shall read the DPRX caps after LTTPR detection, so re-read
273 	 * it here.
274 	 */
275 	if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
276 		intel_dp_reset_lttpr_common_caps(intel_dp);
277 		return -EIO;
278 	}
279 
280 	return lttpr_count;
281 }
282 
283 static u8 dp_voltage_max(u8 preemph)
284 {
285 	switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) {
286 	case DP_TRAIN_PRE_EMPH_LEVEL_0:
287 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
288 	case DP_TRAIN_PRE_EMPH_LEVEL_1:
289 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
290 	case DP_TRAIN_PRE_EMPH_LEVEL_2:
291 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
292 	case DP_TRAIN_PRE_EMPH_LEVEL_3:
293 	default:
294 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
295 	}
296 }
297 
298 static u8 intel_dp_lttpr_voltage_max(struct intel_dp *intel_dp,
299 				     enum drm_dp_phy dp_phy)
300 {
301 	const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
302 
303 	if (drm_dp_lttpr_voltage_swing_level_3_supported(phy_caps))
304 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
305 	else
306 		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
307 }
308 
309 static u8 intel_dp_lttpr_preemph_max(struct intel_dp *intel_dp,
310 				     enum drm_dp_phy dp_phy)
311 {
312 	const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
313 
314 	if (drm_dp_lttpr_pre_emphasis_level_3_supported(phy_caps))
315 		return DP_TRAIN_PRE_EMPH_LEVEL_3;
316 	else
317 		return DP_TRAIN_PRE_EMPH_LEVEL_2;
318 }
319 
320 static bool
321 intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp,
322 				     enum drm_dp_phy dp_phy)
323 {
324 	struct intel_display *display = to_intel_display(intel_dp);
325 	int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
326 
327 	drm_WARN_ON_ONCE(display->drm,
328 			 lttpr_count <= 0 && dp_phy != DP_PHY_DPRX);
329 
330 	return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
331 }
332 
333 static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
334 				   const struct intel_crtc_state *crtc_state,
335 				   enum drm_dp_phy dp_phy)
336 {
337 	struct intel_display *display = to_intel_display(intel_dp);
338 	u8 voltage_max;
339 
340 	/*
341 	 * Get voltage_max from the DPTX_PHY (source or LTTPR) upstream from
342 	 * the DPRX_PHY we train.
343 	 */
344 	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
345 		voltage_max = intel_dp->voltage_max(intel_dp, crtc_state);
346 	else
347 		voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1);
348 
349 	drm_WARN_ON_ONCE(display->drm,
350 			 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 &&
351 			 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3);
352 
353 	return voltage_max;
354 }
355 
356 static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
357 				   enum drm_dp_phy dp_phy)
358 {
359 	struct intel_display *display = to_intel_display(intel_dp);
360 	u8 preemph_max;
361 
362 	/*
363 	 * Get preemph_max from the DPTX_PHY (source or LTTPR) upstream from
364 	 * the DPRX_PHY we train.
365 	 */
366 	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
367 		preemph_max = intel_dp->preemph_max(intel_dp);
368 	else
369 		preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1);
370 
371 	drm_WARN_ON_ONCE(display->drm,
372 			 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 &&
373 			 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3);
374 
375 	return preemph_max;
376 }
377 
378 static bool has_per_lane_signal_levels(struct intel_dp *intel_dp,
379 				       enum drm_dp_phy dp_phy)
380 {
381 	struct intel_display *display = to_intel_display(intel_dp);
382 
383 	return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) ||
384 		DISPLAY_VER(display) >= 10 || display->platform.broxton;
385 }
386 
387 /* 128b/132b */
388 static u8 intel_dp_get_lane_adjust_tx_ffe_preset(struct intel_dp *intel_dp,
389 						 const struct intel_crtc_state *crtc_state,
390 						 enum drm_dp_phy dp_phy,
391 						 const u8 link_status[DP_LINK_STATUS_SIZE],
392 						 int lane)
393 {
394 	u8 tx_ffe = 0;
395 
396 	if (has_per_lane_signal_levels(intel_dp, dp_phy)) {
397 		lane = min(lane, crtc_state->lane_count - 1);
398 		tx_ffe = drm_dp_get_adjust_tx_ffe_preset(link_status, lane);
399 	} else {
400 		for (lane = 0; lane < crtc_state->lane_count; lane++)
401 			tx_ffe = max(tx_ffe, drm_dp_get_adjust_tx_ffe_preset(link_status, lane));
402 	}
403 
404 	return tx_ffe;
405 }
406 
407 /* 8b/10b */
408 static u8 intel_dp_get_lane_adjust_vswing_preemph(struct intel_dp *intel_dp,
409 						  const struct intel_crtc_state *crtc_state,
410 						  enum drm_dp_phy dp_phy,
411 						  const u8 link_status[DP_LINK_STATUS_SIZE],
412 						  int lane)
413 {
414 	u8 v = 0;
415 	u8 p = 0;
416 	u8 voltage_max;
417 	u8 preemph_max;
418 
419 	if (has_per_lane_signal_levels(intel_dp, dp_phy)) {
420 		lane = min(lane, crtc_state->lane_count - 1);
421 
422 		v = drm_dp_get_adjust_request_voltage(link_status, lane);
423 		p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
424 	} else {
425 		for (lane = 0; lane < crtc_state->lane_count; lane++) {
426 			v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane));
427 			p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane));
428 		}
429 	}
430 
431 	preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy);
432 	if (p >= preemph_max)
433 		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
434 
435 	v = min(v, dp_voltage_max(p));
436 
437 	voltage_max = intel_dp_phy_voltage_max(intel_dp, crtc_state, dp_phy);
438 	if (v >= voltage_max)
439 		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
440 
441 	return v | p;
442 }
443 
444 static u8 intel_dp_get_lane_adjust_train(struct intel_dp *intel_dp,
445 					 const struct intel_crtc_state *crtc_state,
446 					 enum drm_dp_phy dp_phy,
447 					 const u8 link_status[DP_LINK_STATUS_SIZE],
448 					 int lane)
449 {
450 	if (intel_dp_is_uhbr(crtc_state))
451 		return intel_dp_get_lane_adjust_tx_ffe_preset(intel_dp, crtc_state,
452 							      dp_phy, link_status, lane);
453 	else
454 		return intel_dp_get_lane_adjust_vswing_preemph(intel_dp, crtc_state,
455 							       dp_phy, link_status, lane);
456 }
457 
458 #define TRAIN_REQ_FMT "%d/%d/%d/%d"
459 #define _TRAIN_REQ_VSWING_ARGS(link_status, lane) \
460 	(drm_dp_get_adjust_request_voltage((link_status), (lane)) >> DP_TRAIN_VOLTAGE_SWING_SHIFT)
461 #define TRAIN_REQ_VSWING_ARGS(link_status) \
462 	_TRAIN_REQ_VSWING_ARGS(link_status, 0), \
463 	_TRAIN_REQ_VSWING_ARGS(link_status, 1), \
464 	_TRAIN_REQ_VSWING_ARGS(link_status, 2), \
465 	_TRAIN_REQ_VSWING_ARGS(link_status, 3)
466 #define _TRAIN_REQ_PREEMPH_ARGS(link_status, lane) \
467 	(drm_dp_get_adjust_request_pre_emphasis((link_status), (lane)) >> DP_TRAIN_PRE_EMPHASIS_SHIFT)
468 #define TRAIN_REQ_PREEMPH_ARGS(link_status) \
469 	_TRAIN_REQ_PREEMPH_ARGS(link_status, 0), \
470 	_TRAIN_REQ_PREEMPH_ARGS(link_status, 1), \
471 	_TRAIN_REQ_PREEMPH_ARGS(link_status, 2), \
472 	_TRAIN_REQ_PREEMPH_ARGS(link_status, 3)
473 #define _TRAIN_REQ_TX_FFE_ARGS(link_status, lane) \
474 	drm_dp_get_adjust_tx_ffe_preset((link_status), (lane))
475 #define TRAIN_REQ_TX_FFE_ARGS(link_status) \
476 	_TRAIN_REQ_TX_FFE_ARGS(link_status, 0), \
477 	_TRAIN_REQ_TX_FFE_ARGS(link_status, 1), \
478 	_TRAIN_REQ_TX_FFE_ARGS(link_status, 2), \
479 	_TRAIN_REQ_TX_FFE_ARGS(link_status, 3)
480 
481 void
482 intel_dp_get_adjust_train(struct intel_dp *intel_dp,
483 			  const struct intel_crtc_state *crtc_state,
484 			  enum drm_dp_phy dp_phy,
485 			  const u8 link_status[DP_LINK_STATUS_SIZE])
486 {
487 	int lane;
488 
489 	if (intel_dp_is_uhbr(crtc_state)) {
490 		lt_dbg(intel_dp, dp_phy,
491 		       "128b/132b, lanes: %d, "
492 		       "TX FFE request: " TRAIN_REQ_FMT "\n",
493 		       crtc_state->lane_count,
494 		       TRAIN_REQ_TX_FFE_ARGS(link_status));
495 	} else {
496 		lt_dbg(intel_dp, dp_phy,
497 		       "8b/10b, lanes: %d, "
498 		       "vswing request: " TRAIN_REQ_FMT ", "
499 		       "pre-emphasis request: " TRAIN_REQ_FMT "\n",
500 		       crtc_state->lane_count,
501 		       TRAIN_REQ_VSWING_ARGS(link_status),
502 		       TRAIN_REQ_PREEMPH_ARGS(link_status));
503 	}
504 
505 	for (lane = 0; lane < 4; lane++)
506 		intel_dp->train_set[lane] =
507 			intel_dp_get_lane_adjust_train(intel_dp, crtc_state,
508 						       dp_phy, link_status, lane);
509 }
510 
511 static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp,
512 					     enum drm_dp_phy dp_phy)
513 {
514 	return dp_phy == DP_PHY_DPRX ?
515 		DP_TRAINING_PATTERN_SET :
516 		DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy);
517 }
518 
519 static bool
520 intel_dp_set_link_train(struct intel_dp *intel_dp,
521 			const struct intel_crtc_state *crtc_state,
522 			enum drm_dp_phy dp_phy,
523 			u8 dp_train_pat)
524 {
525 	int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
526 	u8 buf[sizeof(intel_dp->train_set) + 1];
527 	int len;
528 
529 	intel_dp_program_link_training_pattern(intel_dp, crtc_state,
530 					       dp_phy, dp_train_pat);
531 
532 	buf[0] = dp_train_pat;
533 	/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
534 	memcpy(buf + 1, intel_dp->train_set, crtc_state->lane_count);
535 	len = crtc_state->lane_count + 1;
536 
537 	return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len;
538 }
539 
540 static char dp_training_pattern_name(u8 train_pat)
541 {
542 	switch (train_pat) {
543 	case DP_TRAINING_PATTERN_1:
544 	case DP_TRAINING_PATTERN_2:
545 	case DP_TRAINING_PATTERN_3:
546 		return '0' + train_pat;
547 	case DP_TRAINING_PATTERN_4:
548 		return '4';
549 	default:
550 		MISSING_CASE(train_pat);
551 		return '?';
552 	}
553 }
554 
555 void
556 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
557 				       const struct intel_crtc_state *crtc_state,
558 				       enum drm_dp_phy dp_phy,
559 				       u8 dp_train_pat)
560 {
561 	u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);
562 
563 	if (train_pat != DP_TRAINING_PATTERN_DISABLE)
564 		lt_dbg(intel_dp, dp_phy, "Using DP training pattern TPS%c\n",
565 		       dp_training_pattern_name(train_pat));
566 
567 	intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
568 }
569 
570 #define TRAIN_SET_FMT "%d%s/%d%s/%d%s/%d%s"
571 #define _TRAIN_SET_VSWING_ARGS(train_set) \
572 	((train_set) & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT, \
573 	(train_set) & DP_TRAIN_MAX_SWING_REACHED ? "(max)" : ""
574 #define TRAIN_SET_VSWING_ARGS(train_set) \
575 	_TRAIN_SET_VSWING_ARGS((train_set)[0]), \
576 	_TRAIN_SET_VSWING_ARGS((train_set)[1]), \
577 	_TRAIN_SET_VSWING_ARGS((train_set)[2]), \
578 	_TRAIN_SET_VSWING_ARGS((train_set)[3])
579 #define _TRAIN_SET_PREEMPH_ARGS(train_set) \
580 	((train_set) & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT, \
581 	(train_set) & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? "(max)" : ""
582 #define TRAIN_SET_PREEMPH_ARGS(train_set) \
583 	_TRAIN_SET_PREEMPH_ARGS((train_set)[0]), \
584 	_TRAIN_SET_PREEMPH_ARGS((train_set)[1]), \
585 	_TRAIN_SET_PREEMPH_ARGS((train_set)[2]), \
586 	_TRAIN_SET_PREEMPH_ARGS((train_set)[3])
587 #define _TRAIN_SET_TX_FFE_ARGS(train_set) \
588 	((train_set) & DP_TX_FFE_PRESET_VALUE_MASK), ""
589 #define TRAIN_SET_TX_FFE_ARGS(train_set) \
590 	_TRAIN_SET_TX_FFE_ARGS((train_set)[0]), \
591 	_TRAIN_SET_TX_FFE_ARGS((train_set)[1]), \
592 	_TRAIN_SET_TX_FFE_ARGS((train_set)[2]), \
593 	_TRAIN_SET_TX_FFE_ARGS((train_set)[3])
594 
595 void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
596 				const struct intel_crtc_state *crtc_state,
597 				enum drm_dp_phy dp_phy)
598 {
599 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
600 
601 	if (intel_dp_is_uhbr(crtc_state)) {
602 		lt_dbg(intel_dp, dp_phy,
603 		       "128b/132b, lanes: %d, "
604 		       "TX FFE presets: " TRAIN_SET_FMT "\n",
605 		       crtc_state->lane_count,
606 		       TRAIN_SET_TX_FFE_ARGS(intel_dp->train_set));
607 	} else {
608 		lt_dbg(intel_dp, dp_phy,
609 		       "8b/10b, lanes: %d, "
610 		       "vswing levels: " TRAIN_SET_FMT ", "
611 		       "pre-emphasis levels: " TRAIN_SET_FMT "\n",
612 		       crtc_state->lane_count,
613 		       TRAIN_SET_VSWING_ARGS(intel_dp->train_set),
614 		       TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set));
615 	}
616 
617 	if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
618 		encoder->set_signal_levels(encoder, crtc_state);
619 }
620 
621 static bool
622 intel_dp_reset_link_train(struct intel_dp *intel_dp,
623 			  const struct intel_crtc_state *crtc_state,
624 			  enum drm_dp_phy dp_phy,
625 			  u8 dp_train_pat)
626 {
627 	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
628 	intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
629 	return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat);
630 }
631 
632 static bool
633 intel_dp_update_link_train(struct intel_dp *intel_dp,
634 			   const struct intel_crtc_state *crtc_state,
635 			   enum drm_dp_phy dp_phy)
636 {
637 	int reg = dp_phy == DP_PHY_DPRX ?
638 			    DP_TRAINING_LANE0_SET :
639 			    DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
640 	int ret;
641 
642 	intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
643 
644 	ret = drm_dp_dpcd_write(&intel_dp->aux, reg,
645 				intel_dp->train_set, crtc_state->lane_count);
646 
647 	return ret == crtc_state->lane_count;
648 }
649 
650 /* 128b/132b */
651 static bool intel_dp_lane_max_tx_ffe_reached(u8 train_set_lane)
652 {
653 	return (train_set_lane & DP_TX_FFE_PRESET_VALUE_MASK) ==
654 		DP_TX_FFE_PRESET_VALUE_MASK;
655 }
656 
657 /*
658  * 8b/10b
659  *
660  * FIXME: The DP spec is very confusing here, also the Link CTS spec seems to
661  * have self contradicting tests around this area.
662  *
663  * In lieu of better ideas let's just stop when we've reached the max supported
664  * vswing with its max pre-emphasis, which is either 2+1 or 3+0 depending on
665  * whether vswing level 3 is supported or not.
666  */
667 static bool intel_dp_lane_max_vswing_reached(u8 train_set_lane)
668 {
669 	u8 v = (train_set_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >>
670 		DP_TRAIN_VOLTAGE_SWING_SHIFT;
671 	u8 p = (train_set_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >>
672 		DP_TRAIN_PRE_EMPHASIS_SHIFT;
673 
674 	if ((train_set_lane & DP_TRAIN_MAX_SWING_REACHED) == 0)
675 		return false;
676 
677 	if (v + p != 3)
678 		return false;
679 
680 	return true;
681 }
682 
683 static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp,
684 					     const struct intel_crtc_state *crtc_state)
685 {
686 	int lane;
687 
688 	for (lane = 0; lane < crtc_state->lane_count; lane++) {
689 		u8 train_set_lane = intel_dp->train_set[lane];
690 
691 		if (intel_dp_is_uhbr(crtc_state)) {
692 			if (!intel_dp_lane_max_tx_ffe_reached(train_set_lane))
693 				return false;
694 		} else {
695 			if (!intel_dp_lane_max_vswing_reached(train_set_lane))
696 				return false;
697 		}
698 	}
699 
700 	return true;
701 }
702 
703 void intel_dp_link_training_set_mode(struct intel_dp *intel_dp, int link_rate, bool is_vrr)
704 {
705 	u8 link_config[2];
706 
707 	link_config[0] = is_vrr ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
708 	link_config[1] = drm_dp_is_uhbr_rate(link_rate) ?
709 			 DP_SET_ANSI_128B132B : DP_SET_ANSI_8B10B;
710 	drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
711 }
712 
713 static void intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp,
714 					    const struct intel_crtc_state *crtc_state)
715 {
716 	 /*
717 	  * Currently, we set the MSA ignore bit based on vrr.in_range.
718 	  * We can't really read that out during driver load since we don't have
719 	  * the connector information read in yet. So if we do end up doing a
720 	  * modeset during initial_commit() we'll clear the MSA ignore bit.
721 	  * GOP likely wouldn't have set this bit so after the initial commit,
722 	  * if there are no modesets and we enable VRR mode seamlessly
723 	  * (without a full modeset), the MSA ignore bit might never get set.
724 	  *
725 	  * #TODO: Implement readout of vrr.in_range.
726 	  * We need fastset support for setting the MSA ignore bit in DPCD,
727 	  * especially on the first real commit when clearing the inherited flag.
728 	  */
729 	intel_dp_link_training_set_mode(intel_dp,
730 					crtc_state->port_clock, crtc_state->vrr.in_range);
731 }
732 
733 void intel_dp_link_training_set_bw(struct intel_dp *intel_dp,
734 				   int link_bw, int rate_select, int lane_count,
735 				   bool enhanced_framing)
736 {
737 	if (enhanced_framing)
738 		lane_count |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
739 
740 	if (link_bw) {
741 		/* DP and eDP v1.3 and earlier link bw set method. */
742 		u8 link_config[] = { link_bw, lane_count };
743 
744 		drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config,
745 				  ARRAY_SIZE(link_config));
746 	} else {
747 		/*
748 		 * eDP v1.4 and later link rate set method.
749 		 *
750 		 * eDP v1.4x sinks shall ignore DP_LINK_RATE_SET if
751 		 * DP_LINK_BW_SET is set. Avoid writing DP_LINK_BW_SET.
752 		 *
753 		 * eDP v1.5 sinks allow choosing either, and the last choice
754 		 * shall be active.
755 		 */
756 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_LANE_COUNT_SET, lane_count);
757 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_LINK_RATE_SET, rate_select);
758 	}
759 }
760 
761 static void intel_dp_update_link_bw_set(struct intel_dp *intel_dp,
762 					const struct intel_crtc_state *crtc_state,
763 					u8 link_bw, u8 rate_select)
764 {
765 	intel_dp_link_training_set_bw(intel_dp, link_bw, rate_select, crtc_state->lane_count,
766 				      crtc_state->enhanced_framing);
767 }
768 
769 /*
770  * Prepare link training by configuring the link parameters. On DDI platforms
771  * also enable the port here.
772  */
773 static bool
774 intel_dp_prepare_link_train(struct intel_dp *intel_dp,
775 			    const struct intel_crtc_state *crtc_state)
776 {
777 	u8 link_bw, rate_select;
778 
779 	if (intel_dp->prepare_link_retrain)
780 		intel_dp->prepare_link_retrain(intel_dp, crtc_state);
781 
782 	intel_dp_compute_rate(intel_dp, crtc_state->port_clock,
783 			      &link_bw, &rate_select);
784 
785 	/*
786 	 * WaEdpLinkRateDataReload
787 	 *
788 	 * Parade PS8461E MUX (used on various TGL+ laptops) needs
789 	 * to snoop the link rates reported by the sink when we
790 	 * use LINK_RATE_SET in order to operate in jitter cleaning
791 	 * mode (as opposed to redriver mode). Unfortunately it
792 	 * loses track of the snooped link rates when powered down,
793 	 * so we need to make it re-snoop often. Without this high
794 	 * link rates are not stable.
795 	 */
796 	if (!link_bw) {
797 		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
798 
799 		lt_dbg(intel_dp, DP_PHY_DPRX, "Reloading eDP link rates\n");
800 
801 		drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
802 				 sink_rates, sizeof(sink_rates));
803 	}
804 
805 	if (link_bw)
806 		lt_dbg(intel_dp, DP_PHY_DPRX, "Using LINK_BW_SET value %02x\n",
807 		       link_bw);
808 	else
809 		lt_dbg(intel_dp, DP_PHY_DPRX,
810 		       "Using LINK_RATE_SET value %02x\n",
811 		       rate_select);
812 	/*
813 	 * Spec DP2.1 Section 3.5.2.16
814 	 * Prior to LT DPTX should set 128b/132b DP Channel coding and then set link rate
815 	 */
816 	intel_dp_update_downspread_ctrl(intel_dp, crtc_state);
817 	intel_dp_update_link_bw_set(intel_dp, crtc_state, link_bw,
818 				    rate_select);
819 
820 	return true;
821 }
822 
823 static bool intel_dp_adjust_request_changed(const struct intel_crtc_state *crtc_state,
824 					    const u8 old_link_status[DP_LINK_STATUS_SIZE],
825 					    const u8 new_link_status[DP_LINK_STATUS_SIZE])
826 {
827 	int lane;
828 
829 	for (lane = 0; lane < crtc_state->lane_count; lane++) {
830 		u8 old, new;
831 
832 		if (intel_dp_is_uhbr(crtc_state)) {
833 			old = drm_dp_get_adjust_tx_ffe_preset(old_link_status, lane);
834 			new = drm_dp_get_adjust_tx_ffe_preset(new_link_status, lane);
835 		} else {
836 			old = drm_dp_get_adjust_request_voltage(old_link_status, lane) |
837 				drm_dp_get_adjust_request_pre_emphasis(old_link_status, lane);
838 			new = drm_dp_get_adjust_request_voltage(new_link_status, lane) |
839 				drm_dp_get_adjust_request_pre_emphasis(new_link_status, lane);
840 		}
841 
842 		if (old != new)
843 			return true;
844 	}
845 
846 	return false;
847 }
848 
849 void
850 intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy,
851 			  const u8 link_status[DP_LINK_STATUS_SIZE])
852 {
853 	lt_dbg(intel_dp, dp_phy,
854 	       "ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n",
855 	       link_status[0], link_status[1], link_status[2],
856 	       link_status[3], link_status[4], link_status[5]);
857 }
858 
859 /*
860  * Perform the link training clock recovery phase on the given DP PHY using
861  * training pattern 1.
862  */
863 static bool
864 intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
865 				      const struct intel_crtc_state *crtc_state,
866 				      enum drm_dp_phy dp_phy)
867 {
868 	u8 old_link_status[DP_LINK_STATUS_SIZE] = {};
869 	int voltage_tries, cr_tries, max_cr_tries;
870 	u8 link_status[DP_LINK_STATUS_SIZE];
871 	bool max_vswing_reached = false;
872 	int delay_us;
873 
874 	delay_us = drm_dp_read_clock_recovery_delay(&intel_dp->aux,
875 						    intel_dp->dpcd, dp_phy,
876 						    intel_dp_is_uhbr(crtc_state));
877 
878 	/* clock recovery */
879 	if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy,
880 				       DP_TRAINING_PATTERN_1 |
881 				       DP_LINK_SCRAMBLING_DISABLE)) {
882 		lt_err(intel_dp, dp_phy, "Failed to enable link training\n");
883 		return false;
884 	}
885 
886 	/*
887 	 * The DP 1.4 spec defines the max clock recovery retries value
888 	 * as 10 but for pre-DP 1.4 devices we set a very tolerant
889 	 * retry limit of 80 (4 voltage levels x 4 preemphasis levels x
890 	 * x 5 identical voltage retries). Since the previous specs didn't
891 	 * define a limit and created the possibility of an infinite loop
892 	 * we want to prevent any sync from triggering that corner case.
893 	 */
894 	if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
895 		max_cr_tries = 10;
896 	else
897 		max_cr_tries = 80;
898 
899 	voltage_tries = 1;
900 	for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
901 		fsleep(delay_us);
902 
903 		if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
904 						     link_status) < 0) {
905 			lt_err(intel_dp, dp_phy, "Failed to get link status\n");
906 			return false;
907 		}
908 
909 		if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) {
910 			lt_dbg(intel_dp, dp_phy, "Clock recovery OK\n");
911 			return true;
912 		}
913 
914 		if (voltage_tries == 5) {
915 			intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
916 			lt_dbg(intel_dp, dp_phy, "Same voltage tried 5 times\n");
917 			return false;
918 		}
919 
920 		if (max_vswing_reached) {
921 			intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
922 			lt_dbg(intel_dp, dp_phy, "Max Voltage Swing reached\n");
923 			return false;
924 		}
925 
926 		/* Update training set as requested by target */
927 		intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
928 					  link_status);
929 		if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
930 			lt_err(intel_dp, dp_phy, "Failed to update link training\n");
931 			return false;
932 		}
933 
934 		if (!intel_dp_adjust_request_changed(crtc_state, old_link_status, link_status))
935 			++voltage_tries;
936 		else
937 			voltage_tries = 1;
938 
939 		memcpy(old_link_status, link_status, sizeof(link_status));
940 
941 		if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state))
942 			max_vswing_reached = true;
943 	}
944 
945 	intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
946 	lt_err(intel_dp, dp_phy, "Failed clock recovery %d times, giving up!\n",
947 	       max_cr_tries);
948 
949 	return false;
950 }
951 
952 /*
953  * Pick Training Pattern Sequence (TPS) for channel equalization. 128b/132b TPS2
954  * for UHBR+, TPS4 for HBR3 or for 1.4 devices that support it, TPS3 for HBR2 or
955  * 1.2 devices that support it, TPS2 otherwise.
956  */
957 static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
958 				     const struct intel_crtc_state *crtc_state,
959 				     enum drm_dp_phy dp_phy)
960 {
961 	struct intel_display *display = to_intel_display(intel_dp);
962 	bool source_tps3, sink_tps3, source_tps4, sink_tps4;
963 
964 	/* UHBR+ use separate 128b/132b TPS2 */
965 	if (intel_dp_is_uhbr(crtc_state))
966 		return DP_TRAINING_PATTERN_2;
967 
968 	/*
969 	 * TPS4 support is mandatory for all downstream devices that
970 	 * support HBR3. There are no known eDP panels that support
971 	 * TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification.
972 	 * LTTPRs must support TPS4.
973 	 */
974 	source_tps4 = intel_dp_source_supports_tps4(display);
975 	sink_tps4 = dp_phy != DP_PHY_DPRX ||
976 		    drm_dp_tps4_supported(intel_dp->dpcd);
977 	if (source_tps4 && sink_tps4) {
978 		return DP_TRAINING_PATTERN_4;
979 	} else if (crtc_state->port_clock == 810000) {
980 		if (!source_tps4)
981 			lt_dbg(intel_dp, dp_phy,
982 			       "8.1 Gbps link rate without source TPS4 support\n");
983 		if (!sink_tps4)
984 			lt_dbg(intel_dp, dp_phy,
985 			       "8.1 Gbps link rate without sink TPS4 support\n");
986 	}
987 
988 	/*
989 	 * TPS3 support is mandatory for downstream devices that
990 	 * support HBR2. However, not all sinks follow the spec.
991 	 */
992 	source_tps3 = intel_dp_source_supports_tps3(display);
993 	sink_tps3 = dp_phy != DP_PHY_DPRX ||
994 		    drm_dp_tps3_supported(intel_dp->dpcd);
995 	if (source_tps3 && sink_tps3) {
996 		return  DP_TRAINING_PATTERN_3;
997 	} else if (crtc_state->port_clock >= 540000) {
998 		if (!source_tps3)
999 			lt_dbg(intel_dp, dp_phy,
1000 			       ">=5.4/6.48 Gbps link rate without source TPS3 support\n");
1001 		if (!sink_tps3)
1002 			lt_dbg(intel_dp, dp_phy,
1003 			       ">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
1004 	}
1005 
1006 	return DP_TRAINING_PATTERN_2;
1007 }
1008 
1009 /*
1010  * Perform the link training channel equalization phase on the given DP PHY
1011  * using one of training pattern 2, 3 or 4 depending on the source and
1012  * sink capabilities.
1013  */
1014 static bool
1015 intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
1016 					    const struct intel_crtc_state *crtc_state,
1017 					    enum drm_dp_phy dp_phy)
1018 {
1019 	int tries;
1020 	u32 training_pattern;
1021 	u8 link_status[DP_LINK_STATUS_SIZE];
1022 	bool channel_eq = false;
1023 	int delay_us;
1024 
1025 	delay_us = drm_dp_read_channel_eq_delay(&intel_dp->aux,
1026 						intel_dp->dpcd, dp_phy,
1027 						intel_dp_is_uhbr(crtc_state));
1028 
1029 	training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy);
1030 	/* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
1031 	if (training_pattern != DP_TRAINING_PATTERN_4)
1032 		training_pattern |= DP_LINK_SCRAMBLING_DISABLE;
1033 
1034 	/* channel equalization */
1035 	if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy,
1036 				     training_pattern)) {
1037 		lt_err(intel_dp, dp_phy, "Failed to start channel equalization\n");
1038 		return false;
1039 	}
1040 
1041 	for (tries = 0; tries < 5; tries++) {
1042 		fsleep(delay_us);
1043 
1044 		if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
1045 						     link_status) < 0) {
1046 			lt_err(intel_dp, dp_phy, "Failed to get link status\n");
1047 			break;
1048 		}
1049 
1050 		/* Make sure clock is still ok */
1051 		if (!drm_dp_clock_recovery_ok(link_status,
1052 					      crtc_state->lane_count)) {
1053 			intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
1054 			lt_dbg(intel_dp, dp_phy,
1055 			       "Clock recovery check failed, cannot continue channel equalization\n");
1056 			break;
1057 		}
1058 
1059 		if (drm_dp_channel_eq_ok(link_status,
1060 					 crtc_state->lane_count)) {
1061 			channel_eq = true;
1062 			lt_dbg(intel_dp, dp_phy, "Channel EQ done. DP Training successful\n");
1063 			break;
1064 		}
1065 
1066 		/* Update training set as requested by target */
1067 		intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
1068 					  link_status);
1069 		if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
1070 			lt_err(intel_dp, dp_phy, "Failed to update link training\n");
1071 			break;
1072 		}
1073 	}
1074 
1075 	/* Try 5 times, else fail and try at lower BW */
1076 	if (tries == 5) {
1077 		intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
1078 		lt_dbg(intel_dp, dp_phy, "Channel equalization failed 5 times\n");
1079 	}
1080 
1081 	return channel_eq;
1082 }
1083 
1084 static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp,
1085 						   enum drm_dp_phy dp_phy)
1086 {
1087 	int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy);
1088 	u8 val = DP_TRAINING_PATTERN_DISABLE;
1089 
1090 	return drm_dp_dpcd_write(&intel_dp->aux, reg, &val, 1) == 1;
1091 }
1092 
1093 static int
1094 intel_dp_128b132b_intra_hop(struct intel_dp *intel_dp,
1095 			    const struct intel_crtc_state *crtc_state)
1096 {
1097 	u8 sink_status;
1098 	int ret;
1099 
1100 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_STATUS, &sink_status);
1101 	if (ret != 1) {
1102 		lt_dbg(intel_dp, DP_PHY_DPRX, "Failed to read sink status\n");
1103 		return ret < 0 ? ret : -EIO;
1104 	}
1105 
1106 	return sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION ? 1 : 0;
1107 }
1108 
1109 /**
1110  * intel_dp_stop_link_train - stop link training
1111  * @intel_dp: DP struct
1112  * @crtc_state: state for CRTC attached to the encoder
1113  *
1114  * Stop the link training of the @intel_dp port, disabling the training
1115  * pattern in the sink's DPCD, and disabling the test pattern symbol
1116  * generation on the port.
1117  *
1118  * What symbols are output on the port after this point is
1119  * platform specific: On DDI/VLV/CHV platforms it will be the idle pattern
1120  * with the pipe being disabled, on older platforms it's HW specific if/how an
1121  * idle pattern is generated, as the pipe is already enabled here for those.
1122  *
1123  * This function must be called after intel_dp_start_link_train().
1124  */
1125 void intel_dp_stop_link_train(struct intel_dp *intel_dp,
1126 			      const struct intel_crtc_state *crtc_state)
1127 {
1128 	struct intel_display *display = to_intel_display(intel_dp);
1129 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1130 
1131 	intel_dp->link.active = true;
1132 
1133 	intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
1134 	intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX,
1135 					       DP_TRAINING_PATTERN_DISABLE);
1136 
1137 	if (intel_dp_is_uhbr(crtc_state) &&
1138 	    wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
1139 		lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n");
1140 	}
1141 
1142 	intel_hpd_unblock(encoder);
1143 
1144 	if (!display->hotplug.ignore_long_hpd &&
1145 	    intel_dp->link.seq_train_failures < MAX_SEQ_TRAIN_FAILURES) {
1146 		int delay_ms = intel_dp->link.seq_train_failures ? 0 : 2000;
1147 
1148 		intel_encoder_link_check_queue_work(encoder, delay_ms);
1149 	}
1150 }
1151 
1152 static bool
1153 intel_dp_link_train_phy(struct intel_dp *intel_dp,
1154 			const struct intel_crtc_state *crtc_state,
1155 			enum drm_dp_phy dp_phy)
1156 {
1157 	bool ret = false;
1158 
1159 	if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy))
1160 		goto out;
1161 
1162 	if (!intel_dp_link_training_channel_equalization(intel_dp, crtc_state, dp_phy))
1163 		goto out;
1164 
1165 	ret = true;
1166 
1167 out:
1168 	lt_dbg(intel_dp, dp_phy,
1169 	       "Link Training %s at link rate = %d, lane count = %d\n",
1170 	       ret ? "passed" : "failed",
1171 	       crtc_state->port_clock, crtc_state->lane_count);
1172 
1173 	return ret;
1174 }
1175 
1176 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
1177 						     int link_rate,
1178 						     u8 lane_count)
1179 {
1180 	/* FIXME figure out what we actually want here */
1181 	const struct drm_display_mode *fixed_mode =
1182 		intel_panel_preferred_fixed_mode(intel_dp->attached_connector);
1183 	int mode_rate, max_rate;
1184 
1185 	mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
1186 	max_rate = intel_dp_max_link_data_rate(intel_dp, link_rate, lane_count);
1187 	if (mode_rate > max_rate)
1188 		return false;
1189 
1190 	return true;
1191 }
1192 
1193 static bool reduce_link_params_in_bw_order(struct intel_dp *intel_dp,
1194 					   const struct intel_crtc_state *crtc_state,
1195 					   int *new_link_rate, int *new_lane_count)
1196 {
1197 	int link_rate;
1198 	int lane_count;
1199 	int i;
1200 
1201 	i = intel_dp_link_config_index(intel_dp, crtc_state->port_clock, crtc_state->lane_count);
1202 	for (i--; i >= 0; i--) {
1203 		intel_dp_link_config_get(intel_dp, i, &link_rate, &lane_count);
1204 
1205 		if ((intel_dp->link.force_rate &&
1206 		     intel_dp->link.force_rate != link_rate) ||
1207 		    (intel_dp->link.force_lane_count &&
1208 		     intel_dp->link.force_lane_count != lane_count))
1209 			continue;
1210 
1211 		break;
1212 	}
1213 
1214 	if (i < 0)
1215 		return false;
1216 
1217 	*new_link_rate = link_rate;
1218 	*new_lane_count = lane_count;
1219 
1220 	return true;
1221 }
1222 
1223 static int reduce_link_rate(struct intel_dp *intel_dp, int current_rate)
1224 {
1225 	int rate_index;
1226 	int new_rate;
1227 
1228 	if (intel_dp->link.force_rate)
1229 		return -1;
1230 
1231 	rate_index = intel_dp_rate_index(intel_dp->common_rates,
1232 					 intel_dp->num_common_rates,
1233 					 current_rate);
1234 
1235 	if (rate_index <= 0)
1236 		return -1;
1237 
1238 	new_rate = intel_dp_common_rate(intel_dp, rate_index - 1);
1239 
1240 	/* TODO: Make switching from UHBR to non-UHBR rates work. */
1241 	if (drm_dp_is_uhbr_rate(current_rate) != drm_dp_is_uhbr_rate(new_rate))
1242 		return -1;
1243 
1244 	return new_rate;
1245 }
1246 
1247 static int reduce_lane_count(struct intel_dp *intel_dp, int current_lane_count)
1248 {
1249 	if (intel_dp->link.force_lane_count)
1250 		return -1;
1251 
1252 	if (current_lane_count == 1)
1253 		return -1;
1254 
1255 	return current_lane_count >> 1;
1256 }
1257 
1258 static bool reduce_link_params_in_rate_lane_order(struct intel_dp *intel_dp,
1259 						  const struct intel_crtc_state *crtc_state,
1260 						  int *new_link_rate, int *new_lane_count)
1261 {
1262 	int link_rate;
1263 	int lane_count;
1264 
1265 	lane_count = crtc_state->lane_count;
1266 	link_rate = reduce_link_rate(intel_dp, crtc_state->port_clock);
1267 	if (link_rate < 0) {
1268 		lane_count = reduce_lane_count(intel_dp, crtc_state->lane_count);
1269 		link_rate = intel_dp_max_common_rate(intel_dp);
1270 	}
1271 
1272 	if (lane_count < 0)
1273 		return false;
1274 
1275 	*new_link_rate = link_rate;
1276 	*new_lane_count = lane_count;
1277 
1278 	return true;
1279 }
1280 
1281 static bool reduce_link_params(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state,
1282 			       int *new_link_rate, int *new_lane_count)
1283 {
1284 	/* TODO: Use the same fallback logic on SST as on MST. */
1285 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
1286 		return reduce_link_params_in_bw_order(intel_dp, crtc_state,
1287 						      new_link_rate, new_lane_count);
1288 	else
1289 		return reduce_link_params_in_rate_lane_order(intel_dp, crtc_state,
1290 							     new_link_rate, new_lane_count);
1291 }
1292 
1293 static int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
1294 						   const struct intel_crtc_state *crtc_state)
1295 {
1296 	int new_link_rate;
1297 	int new_lane_count;
1298 
1299 	if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) {
1300 		lt_dbg(intel_dp, DP_PHY_DPRX,
1301 		       "Retrying Link training for eDP with max parameters\n");
1302 		intel_dp->use_max_params = true;
1303 		return 0;
1304 	}
1305 
1306 	if (!reduce_link_params(intel_dp, crtc_state, &new_link_rate, &new_lane_count))
1307 		return -1;
1308 
1309 	if (intel_dp_is_edp(intel_dp) &&
1310 	    !intel_dp_can_link_train_fallback_for_edp(intel_dp, new_link_rate, new_lane_count)) {
1311 		lt_dbg(intel_dp, DP_PHY_DPRX,
1312 		       "Retrying Link training for eDP with same parameters\n");
1313 		return 0;
1314 	}
1315 
1316 	lt_dbg(intel_dp, DP_PHY_DPRX,
1317 	       "Reducing link parameters from %dx%d to %dx%d\n",
1318 	       crtc_state->lane_count, crtc_state->port_clock,
1319 	       new_lane_count, new_link_rate);
1320 
1321 	intel_dp->link.max_rate = new_link_rate;
1322 	intel_dp->link.max_lane_count = new_lane_count;
1323 
1324 	return 0;
1325 }
1326 
1327 static bool intel_dp_schedule_fallback_link_training(struct intel_atomic_state *state,
1328 						     struct intel_dp *intel_dp,
1329 						     const struct intel_crtc_state *crtc_state)
1330 {
1331 	struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1332 
1333 	if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) {
1334 		lt_dbg(intel_dp, DP_PHY_DPRX, "Link Training failed on disconnected sink.\n");
1335 		return true;
1336 	}
1337 
1338 	if (intel_dp->hobl_active) {
1339 		lt_dbg(intel_dp, DP_PHY_DPRX,
1340 		       "Link Training failed with HOBL active, not enabling it from now on\n");
1341 		intel_dp->hobl_failed = true;
1342 	} else if (intel_dp_get_link_train_fallback_values(intel_dp, crtc_state)) {
1343 		return false;
1344 	}
1345 
1346 	/* Schedule a Hotplug Uevent to userspace to start modeset */
1347 	intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state);
1348 
1349 	return true;
1350 }
1351 
1352 /* Perform the link training on all LTTPRs and the DPRX on a link. */
1353 static bool
1354 intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
1355 			     const struct intel_crtc_state *crtc_state,
1356 			     int lttpr_count)
1357 {
1358 	bool ret = true;
1359 	int i;
1360 
1361 	for (i = lttpr_count - 1; i >= 0; i--) {
1362 		enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i);
1363 
1364 		ret = intel_dp_link_train_phy(intel_dp, crtc_state, dp_phy);
1365 		intel_dp_disable_dpcd_training_pattern(intel_dp, dp_phy);
1366 
1367 		if (!ret)
1368 			break;
1369 	}
1370 
1371 	if (ret)
1372 		ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
1373 
1374 	if (intel_dp->set_idle_link_train)
1375 		intel_dp->set_idle_link_train(intel_dp, crtc_state);
1376 
1377 	return ret;
1378 }
1379 
1380 /*
1381  * 128b/132b DP LANEx_EQ_DONE Sequence (DP 2.0 E11 3.5.2.16.1)
1382  */
1383 static bool
1384 intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp,
1385 			  const struct intel_crtc_state *crtc_state)
1386 {
1387 	u8 link_status[DP_LINK_STATUS_SIZE];
1388 	int delay_us;
1389 	int try, max_tries = 20;
1390 	unsigned long deadline;
1391 	bool timeout = false;
1392 
1393 	/*
1394 	 * Reset signal levels. Start transmitting 128b/132b TPS1.
1395 	 *
1396 	 * Put DPRX and LTTPRs (if any) into intra-hop AUX mode by writing TPS1
1397 	 * in DP_TRAINING_PATTERN_SET.
1398 	 */
1399 	if (!intel_dp_reset_link_train(intel_dp, crtc_state, DP_PHY_DPRX,
1400 				       DP_TRAINING_PATTERN_1)) {
1401 		lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS1\n");
1402 		return false;
1403 	}
1404 
1405 	delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux);
1406 
1407 	/* Read the initial TX FFE settings. */
1408 	if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
1409 		lt_err(intel_dp, DP_PHY_DPRX, "Failed to read TX FFE presets\n");
1410 		return false;
1411 	}
1412 
1413 	/* Update signal levels and training set as requested. */
1414 	intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status);
1415 	if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) {
1416 		lt_err(intel_dp, DP_PHY_DPRX, "Failed to set initial TX FFE settings\n");
1417 		return false;
1418 	}
1419 
1420 	/* Start transmitting 128b/132b TPS2. */
1421 	if (!intel_dp_set_link_train(intel_dp, crtc_state, DP_PHY_DPRX,
1422 				     DP_TRAINING_PATTERN_2)) {
1423 		lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS2\n");
1424 		return false;
1425 	}
1426 
1427 	/* Time budget for the LANEx_EQ_DONE Sequence */
1428 	deadline = jiffies + msecs_to_jiffies_timeout(450);
1429 
1430 	for (try = 0; try < max_tries; try++) {
1431 		fsleep(delay_us);
1432 
1433 		if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
1434 			lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n");
1435 			return false;
1436 		}
1437 
1438 		if (drm_dp_128b132b_link_training_failed(link_status)) {
1439 			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1440 			lt_err(intel_dp, DP_PHY_DPRX,
1441 			       "Downstream link training failure\n");
1442 			return false;
1443 		}
1444 
1445 		if (drm_dp_128b132b_lane_channel_eq_done(link_status, crtc_state->lane_count)) {
1446 			lt_dbg(intel_dp, DP_PHY_DPRX, "Lane channel eq done\n");
1447 			break;
1448 		}
1449 
1450 		if (timeout) {
1451 			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1452 			lt_err(intel_dp, DP_PHY_DPRX, "Lane channel eq timeout\n");
1453 			return false;
1454 		}
1455 
1456 		if (time_after(jiffies, deadline))
1457 			timeout = true; /* try one last time after deadline */
1458 
1459 		/*
1460 		 * During LT, Tx shall read AUX_RD_INTERVAL just before writing the new FFE
1461 		 * presets.
1462 		 */
1463 		delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux);
1464 
1465 		intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status);
1466 
1467 		/* Update signal levels and training set as requested. */
1468 		if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) {
1469 			lt_err(intel_dp, DP_PHY_DPRX, "Failed to update TX FFE settings\n");
1470 			return false;
1471 		}
1472 	}
1473 
1474 	if (try == max_tries) {
1475 		intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1476 		lt_err(intel_dp, DP_PHY_DPRX, "Max loop count reached\n");
1477 		return false;
1478 	}
1479 
1480 	for (;;) {
1481 		if (time_after(jiffies, deadline))
1482 			timeout = true; /* try one last time after deadline */
1483 
1484 		if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
1485 			lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n");
1486 			return false;
1487 		}
1488 
1489 		if (drm_dp_128b132b_link_training_failed(link_status)) {
1490 			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1491 			lt_err(intel_dp, DP_PHY_DPRX, "Downstream link training failure\n");
1492 			return false;
1493 		}
1494 
1495 		if (drm_dp_128b132b_eq_interlane_align_done(link_status)) {
1496 			lt_dbg(intel_dp, DP_PHY_DPRX, "Interlane align done\n");
1497 			break;
1498 		}
1499 
1500 		if (timeout) {
1501 			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1502 			lt_err(intel_dp, DP_PHY_DPRX, "Interlane align timeout\n");
1503 			return false;
1504 		}
1505 
1506 		usleep_range(2000, 3000);
1507 	}
1508 
1509 	return true;
1510 }
1511 
1512 /*
1513  * 128b/132b DP LANEx_CDS_DONE Sequence (DP 2.0 E11 3.5.2.16.2)
1514  */
1515 static bool
1516 intel_dp_128b132b_lane_cds(struct intel_dp *intel_dp,
1517 			   const struct intel_crtc_state *crtc_state,
1518 			   int lttpr_count)
1519 {
1520 	u8 link_status[DP_LINK_STATUS_SIZE];
1521 	unsigned long deadline;
1522 
1523 	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
1524 			       DP_TRAINING_PATTERN_2_CDS) != 1) {
1525 		lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS2 CDS\n");
1526 		return false;
1527 	}
1528 
1529 	/* Time budget for the LANEx_CDS_DONE Sequence */
1530 	deadline = jiffies + msecs_to_jiffies_timeout((lttpr_count + 1) * 20);
1531 
1532 	for (;;) {
1533 		bool timeout = false;
1534 
1535 		if (time_after(jiffies, deadline))
1536 			timeout = true; /* try one last time after deadline */
1537 
1538 		usleep_range(2000, 3000);
1539 
1540 		if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) {
1541 			lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n");
1542 			return false;
1543 		}
1544 
1545 		if (drm_dp_128b132b_eq_interlane_align_done(link_status) &&
1546 		    drm_dp_128b132b_cds_interlane_align_done(link_status) &&
1547 		    drm_dp_128b132b_lane_symbol_locked(link_status, crtc_state->lane_count)) {
1548 			lt_dbg(intel_dp, DP_PHY_DPRX, "CDS interlane align done\n");
1549 			break;
1550 		}
1551 
1552 		if (drm_dp_128b132b_link_training_failed(link_status)) {
1553 			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1554 			lt_err(intel_dp, DP_PHY_DPRX, "Downstream link training failure\n");
1555 			return false;
1556 		}
1557 
1558 		if (timeout) {
1559 			intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status);
1560 			lt_err(intel_dp, DP_PHY_DPRX, "CDS timeout\n");
1561 			return false;
1562 		}
1563 	}
1564 
1565 	return true;
1566 }
1567 
1568 /*
1569  * 128b/132b link training sequence. (DP 2.0 E11 SCR on link training.)
1570  */
1571 static bool
1572 intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
1573 			     const struct intel_crtc_state *crtc_state,
1574 			     int lttpr_count)
1575 {
1576 	bool passed = false;
1577 
1578 	if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
1579 		lt_err(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clear\n");
1580 		goto out;
1581 	}
1582 
1583 	if (intel_dp_128b132b_lane_eq(intel_dp, crtc_state) &&
1584 	    intel_dp_128b132b_lane_cds(intel_dp, crtc_state, lttpr_count))
1585 		passed = true;
1586 
1587 	lt_dbg(intel_dp, DP_PHY_DPRX,
1588 	       "128b/132b Link Training %s at link rate = %d, lane count = %d\n",
1589 	       passed ? "passed" : "failed",
1590 	       crtc_state->port_clock, crtc_state->lane_count);
1591 
1592 out:
1593 	/*
1594 	 * Ensure that the training pattern does get set to TPS2 even in case
1595 	 * of a failure, as is the case at the end of a passing link training
1596 	 * and what is expected by the transcoder. Leaving TPS1 set (and
1597 	 * disabling the link train mode in DP_TP_CTL later from TPS1 directly)
1598 	 * would result in a stuck transcoder HW state and flip-done timeouts
1599 	 * later in the modeset sequence.
1600 	 */
1601 	if (!passed)
1602 		intel_dp_program_link_training_pattern(intel_dp, crtc_state,
1603 						       DP_PHY_DPRX, DP_TRAINING_PATTERN_2);
1604 
1605 	return passed;
1606 }
1607 
1608 /**
1609  * intel_dp_start_link_train - start link training
1610  * @state: Atomic state
1611  * @intel_dp: DP struct
1612  * @crtc_state: state for CRTC attached to the encoder
1613  *
1614  * Start the link training of the @intel_dp port, scheduling a fallback
1615  * retraining with reduced link rate/lane parameters if the link training
1616  * fails.
1617  * After calling this function intel_dp_stop_link_train() must be called.
1618  */
1619 void intel_dp_start_link_train(struct intel_atomic_state *state,
1620 			       struct intel_dp *intel_dp,
1621 			       const struct intel_crtc_state *crtc_state)
1622 {
1623 	struct intel_display *display = to_intel_display(state);
1624 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1625 	struct intel_encoder *encoder = &dig_port->base;
1626 	bool passed;
1627 	/*
1628 	 * Reinit the LTTPRs here to ensure that they are switched to
1629 	 * non-transparent mode. During an earlier LTTPR detection this
1630 	 * could've been prevented by an active link.
1631 	 */
1632 	int lttpr_count;
1633 
1634 	intel_hpd_block(encoder);
1635 
1636 	lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
1637 
1638 	if (lttpr_count < 0)
1639 		/* Still continue with enabling the port and link training. */
1640 		lttpr_count = 0;
1641 
1642 	intel_dp_prepare_link_train(intel_dp, crtc_state);
1643 
1644 	if (intel_dp_is_uhbr(crtc_state))
1645 		passed = intel_dp_128b132b_link_train(intel_dp, crtc_state, lttpr_count);
1646 	else
1647 		passed = intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count);
1648 
1649 	if (intel_dp->link.force_train_failure) {
1650 		intel_dp->link.force_train_failure--;
1651 		lt_dbg(intel_dp, DP_PHY_DPRX, "Forcing link training failure\n");
1652 	} else if (passed) {
1653 		intel_dp->link.seq_train_failures = 0;
1654 		return;
1655 	}
1656 
1657 	intel_dp->link.seq_train_failures++;
1658 
1659 	/*
1660 	 * Ignore the link failure in CI
1661 	 *
1662 	 * In fixed environments like CI, sometimes unexpected long HPDs are
1663 	 * generated by the displays. If ignore_long_hpd flag is set, such long
1664 	 * HPDs are ignored. And probably as a consequence of these ignored
1665 	 * long HPDs, subsequent link trainings are failed resulting into CI
1666 	 * execution failures.
1667 	 *
1668 	 * For test cases which rely on the link training or processing of HPDs
1669 	 * ignore_long_hpd flag can unset from the testcase.
1670 	 */
1671 	if (display->hotplug.ignore_long_hpd) {
1672 		lt_dbg(intel_dp, DP_PHY_DPRX, "Ignore the link failure\n");
1673 		return;
1674 	}
1675 
1676 	if (intel_dp->link.seq_train_failures < MAX_SEQ_TRAIN_FAILURES)
1677 		return;
1678 
1679 	if (intel_dp_schedule_fallback_link_training(state, intel_dp, crtc_state))
1680 		return;
1681 
1682 	intel_dp->link.retrain_disabled = true;
1683 
1684 	if (!passed)
1685 		lt_err(intel_dp, DP_PHY_DPRX, "Can't reduce link training parameters after failure\n");
1686 	else
1687 		lt_dbg(intel_dp, DP_PHY_DPRX, "Can't reduce link training parameters after forced failure\n");
1688 }
1689 
1690 void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp,
1691 				 const struct intel_crtc_state *crtc_state)
1692 {
1693 	/*
1694 	 * VIDEO_DIP_CTL register bit 31 should be set to '0' to not
1695 	 * disable SDP CRC. This is applicable for Display version 13.
1696 	 * Default value of bit 31 is '0' hence discarding the write
1697 	 * TODO: Corrective actions on SDP corruption yet to be defined
1698 	 */
1699 	if (!intel_dp_is_uhbr(crtc_state))
1700 		return;
1701 
1702 	/* DP v2.0 SCR on SDP CRC16 for 128b/132b Link Layer */
1703 	drm_dp_dpcd_writeb(&intel_dp->aux,
1704 			   DP_SDP_ERROR_DETECTION_CONFIGURATION,
1705 			   DP_SDP_CRC16_128B132B_EN);
1706 
1707 	lt_dbg(intel_dp, DP_PHY_DPRX, "DP2.0 SDP CRC16 for 128b/132b enabled\n");
1708 }
1709 
1710 static int i915_dp_force_link_rate_show(struct seq_file *m, void *data)
1711 {
1712 	struct intel_connector *connector = to_intel_connector(m->private);
1713 	struct intel_display *display = to_intel_display(connector);
1714 	struct intel_dp *intel_dp = intel_attached_dp(connector);
1715 	int current_rate = -1;
1716 	int force_rate;
1717 	int err;
1718 	int i;
1719 
1720 	err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
1721 	if (err)
1722 		return err;
1723 
1724 	if (intel_dp->link.active)
1725 		current_rate = intel_dp->link_rate;
1726 	force_rate = intel_dp->link.force_rate;
1727 
1728 	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
1729 
1730 	seq_printf(m, "%sauto%s",
1731 		   force_rate == 0 ? "[" : "",
1732 		   force_rate == 0 ? "]" : "");
1733 
1734 	for (i = 0; i < intel_dp->num_source_rates; i++)
1735 		seq_printf(m, " %s%d%s%s",
1736 			   intel_dp->source_rates[i] == force_rate ? "[" : "",
1737 			   intel_dp->source_rates[i],
1738 			   intel_dp->source_rates[i] == current_rate ? "*" : "",
1739 			   intel_dp->source_rates[i] == force_rate ? "]" : "");
1740 
1741 	seq_putc(m, '\n');
1742 
1743 	return 0;
1744 }
1745 
1746 static int parse_link_rate(struct intel_dp *intel_dp, const char __user *ubuf, size_t len)
1747 {
1748 	char *kbuf;
1749 	const char *p;
1750 	int rate;
1751 	int ret = 0;
1752 
1753 	kbuf = memdup_user_nul(ubuf, len);
1754 	if (IS_ERR(kbuf))
1755 		return PTR_ERR(kbuf);
1756 
1757 	p = strim(kbuf);
1758 
1759 	if (!strcmp(p, "auto")) {
1760 		rate = 0;
1761 	} else {
1762 		ret = kstrtoint(p, 0, &rate);
1763 		if (ret < 0)
1764 			goto out_free;
1765 
1766 		if (intel_dp_rate_index(intel_dp->source_rates,
1767 					intel_dp->num_source_rates,
1768 					rate) < 0)
1769 			ret = -EINVAL;
1770 	}
1771 
1772 out_free:
1773 	kfree(kbuf);
1774 
1775 	return ret < 0 ? ret : rate;
1776 }
1777 
1778 static ssize_t i915_dp_force_link_rate_write(struct file *file,
1779 					     const char __user *ubuf,
1780 					     size_t len, loff_t *offp)
1781 {
1782 	struct seq_file *m = file->private_data;
1783 	struct intel_connector *connector = to_intel_connector(m->private);
1784 	struct intel_display *display = to_intel_display(connector);
1785 	struct intel_dp *intel_dp = intel_attached_dp(connector);
1786 	int rate;
1787 	int err;
1788 
1789 	rate = parse_link_rate(intel_dp, ubuf, len);
1790 	if (rate < 0)
1791 		return rate;
1792 
1793 	err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
1794 	if (err)
1795 		return err;
1796 
1797 	intel_dp_reset_link_params(intel_dp);
1798 	intel_dp->link.force_rate = rate;
1799 
1800 	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
1801 
1802 	*offp += len;
1803 
1804 	return len;
1805 }
1806 DEFINE_SHOW_STORE_ATTRIBUTE(i915_dp_force_link_rate);
1807 
1808 static int i915_dp_force_lane_count_show(struct seq_file *m, void *data)
1809 {
1810 	struct intel_connector *connector = to_intel_connector(m->private);
1811 	struct intel_display *display = to_intel_display(connector);
1812 	struct intel_dp *intel_dp = intel_attached_dp(connector);
1813 	int current_lane_count = -1;
1814 	int force_lane_count;
1815 	int err;
1816 	int i;
1817 
1818 	err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
1819 	if (err)
1820 		return err;
1821 
1822 	if (intel_dp->link.active)
1823 		current_lane_count = intel_dp->lane_count;
1824 	force_lane_count = intel_dp->link.force_lane_count;
1825 
1826 	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
1827 
1828 	seq_printf(m, "%sauto%s",
1829 		   force_lane_count == 0 ? "[" : "",
1830 		   force_lane_count == 0 ? "]" : "");
1831 
1832 	for (i = 1; i <= 4; i <<= 1)
1833 		seq_printf(m, " %s%d%s%s",
1834 			   i == force_lane_count ? "[" : "",
1835 			   i,
1836 			   i == current_lane_count ? "*" : "",
1837 			   i == force_lane_count ? "]" : "");
1838 
1839 	seq_putc(m, '\n');
1840 
1841 	return 0;
1842 }
1843 
1844 static int parse_lane_count(const char __user *ubuf, size_t len)
1845 {
1846 	char *kbuf;
1847 	const char *p;
1848 	int lane_count;
1849 	int ret = 0;
1850 
1851 	kbuf = memdup_user_nul(ubuf, len);
1852 	if (IS_ERR(kbuf))
1853 		return PTR_ERR(kbuf);
1854 
1855 	p = strim(kbuf);
1856 
1857 	if (!strcmp(p, "auto")) {
1858 		lane_count = 0;
1859 	} else {
1860 		ret = kstrtoint(p, 0, &lane_count);
1861 		if (ret < 0)
1862 			goto out_free;
1863 
1864 		switch (lane_count) {
1865 		case 1:
1866 		case 2:
1867 		case 4:
1868 			break;
1869 		default:
1870 			ret = -EINVAL;
1871 		}
1872 	}
1873 
1874 out_free:
1875 	kfree(kbuf);
1876 
1877 	return ret < 0 ? ret : lane_count;
1878 }
1879 
1880 static ssize_t i915_dp_force_lane_count_write(struct file *file,
1881 					      const char __user *ubuf,
1882 					      size_t len, loff_t *offp)
1883 {
1884 	struct seq_file *m = file->private_data;
1885 	struct intel_connector *connector = to_intel_connector(m->private);
1886 	struct intel_display *display = to_intel_display(connector);
1887 	struct intel_dp *intel_dp = intel_attached_dp(connector);
1888 	int lane_count;
1889 	int err;
1890 
1891 	lane_count = parse_lane_count(ubuf, len);
1892 	if (lane_count < 0)
1893 		return lane_count;
1894 
1895 	err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
1896 	if (err)
1897 		return err;
1898 
1899 	intel_dp_reset_link_params(intel_dp);
1900 	intel_dp->link.force_lane_count = lane_count;
1901 
1902 	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
1903 
1904 	*offp += len;
1905 
1906 	return len;
1907 }
1908 DEFINE_SHOW_STORE_ATTRIBUTE(i915_dp_force_lane_count);
1909 
1910 static int i915_dp_max_link_rate_show(void *data, u64 *val)
1911 {
1912 	struct intel_connector *connector = to_intel_connector(data);
1913 	struct intel_display *display = to_intel_display(connector);
1914 	struct intel_dp *intel_dp = intel_attached_dp(connector);
1915 	int err;
1916 
1917 	err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
1918 	if (err)
1919 		return err;
1920 
1921 	*val = intel_dp->link.max_rate;
1922 
1923 	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
1924 
1925 	return 0;
1926 }
1927 DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_max_link_rate_fops, i915_dp_max_link_rate_show, NULL, "%llu\n");
1928 
1929 static int i915_dp_max_lane_count_show(void *data, u64 *val)
1930 {
1931 	struct intel_connector *connector = to_intel_connector(data);
1932 	struct intel_display *display = to_intel_display(connector);
1933 	struct intel_dp *intel_dp = intel_attached_dp(connector);
1934 	int err;
1935 
1936 	err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
1937 	if (err)
1938 		return err;
1939 
1940 	*val = intel_dp->link.max_lane_count;
1941 
1942 	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
1943 
1944 	return 0;
1945 }
1946 DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_max_lane_count_fops, i915_dp_max_lane_count_show, NULL, "%llu\n");
1947 
1948 static int i915_dp_force_link_training_failure_show(void *data, u64 *val)
1949 {
1950 	struct intel_connector *connector = to_intel_connector(data);
1951 	struct intel_display *display = to_intel_display(connector);
1952 	struct intel_dp *intel_dp = intel_attached_dp(connector);
1953 	int err;
1954 
1955 	err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
1956 	if (err)
1957 		return err;
1958 
1959 	*val = intel_dp->link.force_train_failure;
1960 
1961 	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
1962 
1963 	return 0;
1964 }
1965 
1966 static int i915_dp_force_link_training_failure_write(void *data, u64 val)
1967 {
1968 	struct intel_connector *connector = to_intel_connector(data);
1969 	struct intel_display *display = to_intel_display(connector);
1970 	struct intel_dp *intel_dp = intel_attached_dp(connector);
1971 	int err;
1972 
1973 	if (val > 2)
1974 		return -EINVAL;
1975 
1976 	err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
1977 	if (err)
1978 		return err;
1979 
1980 	intel_dp->link.force_train_failure = val;
1981 
1982 	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
1983 
1984 	return 0;
1985 }
1986 DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_force_link_training_failure_fops,
1987 			 i915_dp_force_link_training_failure_show,
1988 			 i915_dp_force_link_training_failure_write, "%llu\n");
1989 
1990 static int i915_dp_force_link_retrain_show(void *data, u64 *val)
1991 {
1992 	struct intel_connector *connector = to_intel_connector(data);
1993 	struct intel_display *display = to_intel_display(connector);
1994 	struct intel_dp *intel_dp = intel_attached_dp(connector);
1995 	int err;
1996 
1997 	err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
1998 	if (err)
1999 		return err;
2000 
2001 	*val = intel_dp->link.force_retrain;
2002 
2003 	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
2004 
2005 	return 0;
2006 }
2007 
2008 static int i915_dp_force_link_retrain_write(void *data, u64 val)
2009 {
2010 	struct intel_connector *connector = to_intel_connector(data);
2011 	struct intel_display *display = to_intel_display(connector);
2012 	struct intel_dp *intel_dp = intel_attached_dp(connector);
2013 	int err;
2014 
2015 	err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
2016 	if (err)
2017 		return err;
2018 
2019 	intel_dp->link.force_retrain = val;
2020 
2021 	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
2022 
2023 	intel_hpd_trigger_irq(dp_to_dig_port(intel_dp));
2024 
2025 	return 0;
2026 }
2027 DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_force_link_retrain_fops,
2028 			 i915_dp_force_link_retrain_show,
2029 			 i915_dp_force_link_retrain_write, "%llu\n");
2030 
2031 static int i915_dp_link_retrain_disabled_show(struct seq_file *m, void *data)
2032 {
2033 	struct intel_connector *connector = to_intel_connector(m->private);
2034 	struct intel_display *display = to_intel_display(connector);
2035 	struct intel_dp *intel_dp = intel_attached_dp(connector);
2036 	int err;
2037 
2038 	err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
2039 	if (err)
2040 		return err;
2041 
2042 	seq_printf(m, "%s\n", str_yes_no(intel_dp->link.retrain_disabled));
2043 
2044 	drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
2045 
2046 	return 0;
2047 }
2048 DEFINE_SHOW_ATTRIBUTE(i915_dp_link_retrain_disabled);
2049 
2050 void intel_dp_link_training_debugfs_add(struct intel_connector *connector)
2051 {
2052 	struct dentry *root = connector->base.debugfs_entry;
2053 
2054 	if (connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort &&
2055 	    connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
2056 		return;
2057 
2058 	debugfs_create_file("i915_dp_force_link_rate", 0644, root,
2059 			    connector, &i915_dp_force_link_rate_fops);
2060 
2061 	debugfs_create_file("i915_dp_force_lane_count", 0644, root,
2062 			    connector, &i915_dp_force_lane_count_fops);
2063 
2064 	debugfs_create_file("i915_dp_max_link_rate", 0444, root,
2065 			    connector, &i915_dp_max_link_rate_fops);
2066 
2067 	debugfs_create_file("i915_dp_max_lane_count", 0444, root,
2068 			    connector, &i915_dp_max_lane_count_fops);
2069 
2070 	debugfs_create_file("i915_dp_force_link_training_failure", 0644, root,
2071 			    connector, &i915_dp_force_link_training_failure_fops);
2072 
2073 	debugfs_create_file("i915_dp_force_link_retrain", 0644, root,
2074 			    connector, &i915_dp_force_link_retrain_fops);
2075 
2076 	debugfs_create_file("i915_dp_link_retrain_disabled", 0444, root,
2077 			    connector, &i915_dp_link_retrain_disabled_fops);
2078 }
2079