1 /* 2 * Copyright © 2008-2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include <drm/display/drm_dp_helper.h> 25 26 #include "i915_drv.h" 27 #include "intel_display_types.h" 28 #include "intel_dp.h" 29 #include "intel_dp_link_training.h" 30 #include "intel_encoder.h" 31 #include "intel_hotplug.h" 32 #include "intel_panel.h" 33 34 #define LT_MSG_PREFIX "[CONNECTOR:%d:%s][ENCODER:%d:%s][%s] " 35 #define LT_MSG_ARGS(_intel_dp, _dp_phy) (_intel_dp)->attached_connector->base.base.id, \ 36 (_intel_dp)->attached_connector->base.name, \ 37 dp_to_dig_port(_intel_dp)->base.base.base.id, \ 38 dp_to_dig_port(_intel_dp)->base.base.name, \ 39 drm_dp_phy_name(_dp_phy) 40 41 #define lt_dbg(_intel_dp, _dp_phy, _format, ...) \ 42 drm_dbg_kms(to_intel_display(_intel_dp)->drm, \ 43 LT_MSG_PREFIX _format, \ 44 LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__) 45 46 #define lt_err(_intel_dp, _dp_phy, _format, ...) do { \ 47 if (intel_digital_port_connected(&dp_to_dig_port(_intel_dp)->base)) \ 48 drm_err(to_intel_display(_intel_dp)->drm, \ 49 LT_MSG_PREFIX _format, \ 50 LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__); \ 51 else \ 52 lt_dbg(_intel_dp, _dp_phy, "Sink disconnected: " _format, ## __VA_ARGS__); \ 53 } while (0) 54 55 static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp) 56 { 57 memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps)); 58 } 59 60 static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp) 61 { 62 intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT - 63 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0; 64 } 65 66 static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp, 67 enum drm_dp_phy dp_phy) 68 { 69 return intel_dp->lttpr_phy_caps[dp_phy - DP_PHY_LTTPR1]; 70 } 71 72 static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp, 73 const u8 dpcd[DP_RECEIVER_CAP_SIZE], 74 enum drm_dp_phy dp_phy) 75 { 76 u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); 77 78 if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dpcd, dp_phy, phy_caps) < 0) { 79 lt_dbg(intel_dp, dp_phy, "failed to read the PHY caps\n"); 80 return; 81 } 82 83 lt_dbg(intel_dp, dp_phy, "PHY capabilities: %*ph\n", 84 (int)sizeof(intel_dp->lttpr_phy_caps[0]), 85 phy_caps); 86 } 87 88 static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp, 89 const u8 dpcd[DP_RECEIVER_CAP_SIZE]) 90 { 91 int ret; 92 93 ret = drm_dp_read_lttpr_common_caps(&intel_dp->aux, dpcd, 94 intel_dp->lttpr_common_caps); 95 if (ret < 0) 96 goto reset_caps; 97 98 lt_dbg(intel_dp, DP_PHY_DPRX, "LTTPR common capabilities: %*ph\n", 99 (int)sizeof(intel_dp->lttpr_common_caps), 100 intel_dp->lttpr_common_caps); 101 102 /* The minimum value of LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV is 1.4 */ 103 if (intel_dp->lttpr_common_caps[0] < 0x14) 104 goto reset_caps; 105 106 return true; 107 108 reset_caps: 109 intel_dp_reset_lttpr_common_caps(intel_dp); 110 return false; 111 } 112 113 static bool 114 intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable) 115 { 116 u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT : 117 DP_PHY_REPEATER_MODE_NON_TRANSPARENT; 118 119 if (drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) != 1) 120 return false; 121 122 intel_dp->lttpr_common_caps[DP_PHY_REPEATER_MODE - 123 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = val; 124 125 return true; 126 } 127 128 static bool intel_dp_lttpr_transparent_mode_enabled(struct intel_dp *intel_dp) 129 { 130 return intel_dp->lttpr_common_caps[DP_PHY_REPEATER_MODE - 131 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] == 132 DP_PHY_REPEATER_MODE_TRANSPARENT; 133 } 134 135 /* 136 * Read the LTTPR common capabilities and switch the LTTPR PHYs to 137 * non-transparent mode if this is supported. Preserve the 138 * transparent/non-transparent mode on an active link. 139 * 140 * Return the number of detected LTTPRs in non-transparent mode or 0 if the 141 * LTTPRs are in transparent mode or the detection failed. 142 */ 143 static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE]) 144 { 145 int lttpr_count; 146 147 if (!intel_dp_read_lttpr_common_caps(intel_dp, dpcd)) 148 return 0; 149 150 lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps); 151 /* 152 * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are 153 * detected as this breaks link training at least on the Dell WD19TB 154 * dock. 155 */ 156 if (lttpr_count == 0) 157 return 0; 158 159 /* 160 * Don't change the mode on an active link, to prevent a loss of link 161 * synchronization. See DP Standard v2.0 3.6.7. about the LTTPR 162 * resetting its internal state when the mode is changed from 163 * non-transparent to transparent. 164 */ 165 if (intel_dp->link_trained) { 166 if (lttpr_count < 0 || intel_dp_lttpr_transparent_mode_enabled(intel_dp)) 167 goto out_reset_lttpr_count; 168 169 return lttpr_count; 170 } 171 172 /* 173 * See DP Standard v2.0 3.6.6.1. about the explicit disabling of 174 * non-transparent mode and the disable->enable non-transparent mode 175 * sequence. 176 */ 177 intel_dp_set_lttpr_transparent_mode(intel_dp, true); 178 179 /* 180 * In case of unsupported number of LTTPRs or failing to switch to 181 * non-transparent mode fall-back to transparent link training mode, 182 * still taking into account any LTTPR common lane- rate/count limits. 183 */ 184 if (lttpr_count < 0) 185 goto out_reset_lttpr_count; 186 187 if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) { 188 lt_dbg(intel_dp, DP_PHY_DPRX, 189 "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n"); 190 191 intel_dp_set_lttpr_transparent_mode(intel_dp, true); 192 193 goto out_reset_lttpr_count; 194 } 195 196 return lttpr_count; 197 198 out_reset_lttpr_count: 199 intel_dp_reset_lttpr_count(intel_dp); 200 201 return 0; 202 } 203 204 static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE]) 205 { 206 int lttpr_count; 207 int i; 208 209 lttpr_count = intel_dp_init_lttpr_phys(intel_dp, dpcd); 210 211 for (i = 0; i < lttpr_count; i++) 212 intel_dp_read_lttpr_phy_caps(intel_dp, dpcd, DP_PHY_LTTPR(i)); 213 214 return lttpr_count; 215 } 216 217 int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE]) 218 { 219 struct intel_display *display = to_intel_display(intel_dp); 220 struct drm_i915_private *i915 = to_i915(display->drm); 221 222 if (intel_dp_is_edp(intel_dp)) 223 return 0; 224 225 /* 226 * Detecting LTTPRs must be avoided on platforms with an AUX timeout 227 * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1). 228 */ 229 if (DISPLAY_VER(display) >= 10 && !IS_GEMINILAKE(i915)) 230 if (drm_dp_dpcd_probe(&intel_dp->aux, 231 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV)) 232 return -EIO; 233 234 if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd)) 235 return -EIO; 236 237 return 0; 238 } 239 240 /** 241 * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode 242 * @intel_dp: Intel DP struct 243 * 244 * Read the LTTPR common and DPRX capabilities and switch to non-transparent 245 * link training mode if any is detected and read the PHY capabilities for all 246 * detected LTTPRs. In case of an LTTPR detection error or if the number of 247 * LTTPRs is more than is supported (8), fall back to the no-LTTPR, 248 * transparent mode link training mode. 249 * 250 * Returns: 251 * >0 if LTTPRs were detected and the non-transparent LT mode was set. The 252 * DPRX capabilities are read out. 253 * 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a 254 * detection failure and the transparent LT mode was set. The DPRX 255 * capabilities are read out. 256 * <0 Reading out the DPRX capabilities failed. 257 */ 258 int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp) 259 { 260 struct intel_display *display = to_intel_display(intel_dp); 261 struct drm_i915_private *i915 = to_i915(display->drm); 262 int lttpr_count = 0; 263 264 /* 265 * Detecting LTTPRs must be avoided on platforms with an AUX timeout 266 * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1). 267 */ 268 if (!intel_dp_is_edp(intel_dp) && 269 (DISPLAY_VER(display) >= 10 && !IS_GEMINILAKE(i915))) { 270 u8 dpcd[DP_RECEIVER_CAP_SIZE]; 271 int err = intel_dp_read_dprx_caps(intel_dp, dpcd); 272 273 if (err != 0) 274 return err; 275 276 lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd); 277 } 278 279 /* 280 * The DPTX shall read the DPRX caps after LTTPR detection, so re-read 281 * it here. 282 */ 283 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) { 284 intel_dp_reset_lttpr_common_caps(intel_dp); 285 return -EIO; 286 } 287 288 return lttpr_count; 289 } 290 291 static u8 dp_voltage_max(u8 preemph) 292 { 293 switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) { 294 case DP_TRAIN_PRE_EMPH_LEVEL_0: 295 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 296 case DP_TRAIN_PRE_EMPH_LEVEL_1: 297 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 298 case DP_TRAIN_PRE_EMPH_LEVEL_2: 299 return DP_TRAIN_VOLTAGE_SWING_LEVEL_1; 300 case DP_TRAIN_PRE_EMPH_LEVEL_3: 301 default: 302 return DP_TRAIN_VOLTAGE_SWING_LEVEL_0; 303 } 304 } 305 306 static u8 intel_dp_lttpr_voltage_max(struct intel_dp *intel_dp, 307 enum drm_dp_phy dp_phy) 308 { 309 const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); 310 311 if (drm_dp_lttpr_voltage_swing_level_3_supported(phy_caps)) 312 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 313 else 314 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 315 } 316 317 static u8 intel_dp_lttpr_preemph_max(struct intel_dp *intel_dp, 318 enum drm_dp_phy dp_phy) 319 { 320 const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); 321 322 if (drm_dp_lttpr_pre_emphasis_level_3_supported(phy_caps)) 323 return DP_TRAIN_PRE_EMPH_LEVEL_3; 324 else 325 return DP_TRAIN_PRE_EMPH_LEVEL_2; 326 } 327 328 static bool 329 intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp, 330 enum drm_dp_phy dp_phy) 331 { 332 struct intel_display *display = to_intel_display(intel_dp); 333 int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps); 334 335 drm_WARN_ON_ONCE(display->drm, 336 lttpr_count <= 0 && dp_phy != DP_PHY_DPRX); 337 338 return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1); 339 } 340 341 static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp, 342 const struct intel_crtc_state *crtc_state, 343 enum drm_dp_phy dp_phy) 344 { 345 struct intel_display *display = to_intel_display(intel_dp); 346 u8 voltage_max; 347 348 /* 349 * Get voltage_max from the DPTX_PHY (source or LTTPR) upstream from 350 * the DPRX_PHY we train. 351 */ 352 if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) 353 voltage_max = intel_dp->voltage_max(intel_dp, crtc_state); 354 else 355 voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1); 356 357 drm_WARN_ON_ONCE(display->drm, 358 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 && 359 voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3); 360 361 return voltage_max; 362 } 363 364 static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp, 365 enum drm_dp_phy dp_phy) 366 { 367 struct intel_display *display = to_intel_display(intel_dp); 368 u8 preemph_max; 369 370 /* 371 * Get preemph_max from the DPTX_PHY (source or LTTPR) upstream from 372 * the DPRX_PHY we train. 373 */ 374 if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) 375 preemph_max = intel_dp->preemph_max(intel_dp); 376 else 377 preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1); 378 379 drm_WARN_ON_ONCE(display->drm, 380 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 && 381 preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3); 382 383 return preemph_max; 384 } 385 386 static bool has_per_lane_signal_levels(struct intel_dp *intel_dp, 387 enum drm_dp_phy dp_phy) 388 { 389 struct intel_display *display = to_intel_display(intel_dp); 390 struct drm_i915_private *i915 = to_i915(display->drm); 391 392 return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) || 393 DISPLAY_VER(display) >= 10 || IS_BROXTON(i915); 394 } 395 396 /* 128b/132b */ 397 static u8 intel_dp_get_lane_adjust_tx_ffe_preset(struct intel_dp *intel_dp, 398 const struct intel_crtc_state *crtc_state, 399 enum drm_dp_phy dp_phy, 400 const u8 link_status[DP_LINK_STATUS_SIZE], 401 int lane) 402 { 403 u8 tx_ffe = 0; 404 405 if (has_per_lane_signal_levels(intel_dp, dp_phy)) { 406 lane = min(lane, crtc_state->lane_count - 1); 407 tx_ffe = drm_dp_get_adjust_tx_ffe_preset(link_status, lane); 408 } else { 409 for (lane = 0; lane < crtc_state->lane_count; lane++) 410 tx_ffe = max(tx_ffe, drm_dp_get_adjust_tx_ffe_preset(link_status, lane)); 411 } 412 413 return tx_ffe; 414 } 415 416 /* 8b/10b */ 417 static u8 intel_dp_get_lane_adjust_vswing_preemph(struct intel_dp *intel_dp, 418 const struct intel_crtc_state *crtc_state, 419 enum drm_dp_phy dp_phy, 420 const u8 link_status[DP_LINK_STATUS_SIZE], 421 int lane) 422 { 423 u8 v = 0; 424 u8 p = 0; 425 u8 voltage_max; 426 u8 preemph_max; 427 428 if (has_per_lane_signal_levels(intel_dp, dp_phy)) { 429 lane = min(lane, crtc_state->lane_count - 1); 430 431 v = drm_dp_get_adjust_request_voltage(link_status, lane); 432 p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); 433 } else { 434 for (lane = 0; lane < crtc_state->lane_count; lane++) { 435 v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane)); 436 p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane)); 437 } 438 } 439 440 preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy); 441 if (p >= preemph_max) 442 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 443 444 v = min(v, dp_voltage_max(p)); 445 446 voltage_max = intel_dp_phy_voltage_max(intel_dp, crtc_state, dp_phy); 447 if (v >= voltage_max) 448 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; 449 450 return v | p; 451 } 452 453 static u8 intel_dp_get_lane_adjust_train(struct intel_dp *intel_dp, 454 const struct intel_crtc_state *crtc_state, 455 enum drm_dp_phy dp_phy, 456 const u8 link_status[DP_LINK_STATUS_SIZE], 457 int lane) 458 { 459 if (intel_dp_is_uhbr(crtc_state)) 460 return intel_dp_get_lane_adjust_tx_ffe_preset(intel_dp, crtc_state, 461 dp_phy, link_status, lane); 462 else 463 return intel_dp_get_lane_adjust_vswing_preemph(intel_dp, crtc_state, 464 dp_phy, link_status, lane); 465 } 466 467 #define TRAIN_REQ_FMT "%d/%d/%d/%d" 468 #define _TRAIN_REQ_VSWING_ARGS(link_status, lane) \ 469 (drm_dp_get_adjust_request_voltage((link_status), (lane)) >> DP_TRAIN_VOLTAGE_SWING_SHIFT) 470 #define TRAIN_REQ_VSWING_ARGS(link_status) \ 471 _TRAIN_REQ_VSWING_ARGS(link_status, 0), \ 472 _TRAIN_REQ_VSWING_ARGS(link_status, 1), \ 473 _TRAIN_REQ_VSWING_ARGS(link_status, 2), \ 474 _TRAIN_REQ_VSWING_ARGS(link_status, 3) 475 #define _TRAIN_REQ_PREEMPH_ARGS(link_status, lane) \ 476 (drm_dp_get_adjust_request_pre_emphasis((link_status), (lane)) >> DP_TRAIN_PRE_EMPHASIS_SHIFT) 477 #define TRAIN_REQ_PREEMPH_ARGS(link_status) \ 478 _TRAIN_REQ_PREEMPH_ARGS(link_status, 0), \ 479 _TRAIN_REQ_PREEMPH_ARGS(link_status, 1), \ 480 _TRAIN_REQ_PREEMPH_ARGS(link_status, 2), \ 481 _TRAIN_REQ_PREEMPH_ARGS(link_status, 3) 482 #define _TRAIN_REQ_TX_FFE_ARGS(link_status, lane) \ 483 drm_dp_get_adjust_tx_ffe_preset((link_status), (lane)) 484 #define TRAIN_REQ_TX_FFE_ARGS(link_status) \ 485 _TRAIN_REQ_TX_FFE_ARGS(link_status, 0), \ 486 _TRAIN_REQ_TX_FFE_ARGS(link_status, 1), \ 487 _TRAIN_REQ_TX_FFE_ARGS(link_status, 2), \ 488 _TRAIN_REQ_TX_FFE_ARGS(link_status, 3) 489 490 void 491 intel_dp_get_adjust_train(struct intel_dp *intel_dp, 492 const struct intel_crtc_state *crtc_state, 493 enum drm_dp_phy dp_phy, 494 const u8 link_status[DP_LINK_STATUS_SIZE]) 495 { 496 int lane; 497 498 if (intel_dp_is_uhbr(crtc_state)) { 499 lt_dbg(intel_dp, dp_phy, 500 "128b/132b, lanes: %d, " 501 "TX FFE request: " TRAIN_REQ_FMT "\n", 502 crtc_state->lane_count, 503 TRAIN_REQ_TX_FFE_ARGS(link_status)); 504 } else { 505 lt_dbg(intel_dp, dp_phy, 506 "8b/10b, lanes: %d, " 507 "vswing request: " TRAIN_REQ_FMT ", " 508 "pre-emphasis request: " TRAIN_REQ_FMT "\n", 509 crtc_state->lane_count, 510 TRAIN_REQ_VSWING_ARGS(link_status), 511 TRAIN_REQ_PREEMPH_ARGS(link_status)); 512 } 513 514 for (lane = 0; lane < 4; lane++) 515 intel_dp->train_set[lane] = 516 intel_dp_get_lane_adjust_train(intel_dp, crtc_state, 517 dp_phy, link_status, lane); 518 } 519 520 static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp, 521 enum drm_dp_phy dp_phy) 522 { 523 return dp_phy == DP_PHY_DPRX ? 524 DP_TRAINING_PATTERN_SET : 525 DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy); 526 } 527 528 static bool 529 intel_dp_set_link_train(struct intel_dp *intel_dp, 530 const struct intel_crtc_state *crtc_state, 531 enum drm_dp_phy dp_phy, 532 u8 dp_train_pat) 533 { 534 int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy); 535 u8 buf[sizeof(intel_dp->train_set) + 1]; 536 int len; 537 538 intel_dp_program_link_training_pattern(intel_dp, crtc_state, 539 dp_phy, dp_train_pat); 540 541 buf[0] = dp_train_pat; 542 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */ 543 memcpy(buf + 1, intel_dp->train_set, crtc_state->lane_count); 544 len = crtc_state->lane_count + 1; 545 546 return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len; 547 } 548 549 static char dp_training_pattern_name(u8 train_pat) 550 { 551 switch (train_pat) { 552 case DP_TRAINING_PATTERN_1: 553 case DP_TRAINING_PATTERN_2: 554 case DP_TRAINING_PATTERN_3: 555 return '0' + train_pat; 556 case DP_TRAINING_PATTERN_4: 557 return '4'; 558 default: 559 MISSING_CASE(train_pat); 560 return '?'; 561 } 562 } 563 564 void 565 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, 566 const struct intel_crtc_state *crtc_state, 567 enum drm_dp_phy dp_phy, 568 u8 dp_train_pat) 569 { 570 u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat); 571 572 if (train_pat != DP_TRAINING_PATTERN_DISABLE) 573 lt_dbg(intel_dp, dp_phy, "Using DP training pattern TPS%c\n", 574 dp_training_pattern_name(train_pat)); 575 576 intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat); 577 } 578 579 #define TRAIN_SET_FMT "%d%s/%d%s/%d%s/%d%s" 580 #define _TRAIN_SET_VSWING_ARGS(train_set) \ 581 ((train_set) & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT, \ 582 (train_set) & DP_TRAIN_MAX_SWING_REACHED ? "(max)" : "" 583 #define TRAIN_SET_VSWING_ARGS(train_set) \ 584 _TRAIN_SET_VSWING_ARGS((train_set)[0]), \ 585 _TRAIN_SET_VSWING_ARGS((train_set)[1]), \ 586 _TRAIN_SET_VSWING_ARGS((train_set)[2]), \ 587 _TRAIN_SET_VSWING_ARGS((train_set)[3]) 588 #define _TRAIN_SET_PREEMPH_ARGS(train_set) \ 589 ((train_set) & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT, \ 590 (train_set) & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? "(max)" : "" 591 #define TRAIN_SET_PREEMPH_ARGS(train_set) \ 592 _TRAIN_SET_PREEMPH_ARGS((train_set)[0]), \ 593 _TRAIN_SET_PREEMPH_ARGS((train_set)[1]), \ 594 _TRAIN_SET_PREEMPH_ARGS((train_set)[2]), \ 595 _TRAIN_SET_PREEMPH_ARGS((train_set)[3]) 596 #define _TRAIN_SET_TX_FFE_ARGS(train_set) \ 597 ((train_set) & DP_TX_FFE_PRESET_VALUE_MASK), "" 598 #define TRAIN_SET_TX_FFE_ARGS(train_set) \ 599 _TRAIN_SET_TX_FFE_ARGS((train_set)[0]), \ 600 _TRAIN_SET_TX_FFE_ARGS((train_set)[1]), \ 601 _TRAIN_SET_TX_FFE_ARGS((train_set)[2]), \ 602 _TRAIN_SET_TX_FFE_ARGS((train_set)[3]) 603 604 void intel_dp_set_signal_levels(struct intel_dp *intel_dp, 605 const struct intel_crtc_state *crtc_state, 606 enum drm_dp_phy dp_phy) 607 { 608 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 609 610 if (intel_dp_is_uhbr(crtc_state)) { 611 lt_dbg(intel_dp, dp_phy, 612 "128b/132b, lanes: %d, " 613 "TX FFE presets: " TRAIN_SET_FMT "\n", 614 crtc_state->lane_count, 615 TRAIN_SET_TX_FFE_ARGS(intel_dp->train_set)); 616 } else { 617 lt_dbg(intel_dp, dp_phy, 618 "8b/10b, lanes: %d, " 619 "vswing levels: " TRAIN_SET_FMT ", " 620 "pre-emphasis levels: " TRAIN_SET_FMT "\n", 621 crtc_state->lane_count, 622 TRAIN_SET_VSWING_ARGS(intel_dp->train_set), 623 TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set)); 624 } 625 626 if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) 627 encoder->set_signal_levels(encoder, crtc_state); 628 } 629 630 static bool 631 intel_dp_reset_link_train(struct intel_dp *intel_dp, 632 const struct intel_crtc_state *crtc_state, 633 enum drm_dp_phy dp_phy, 634 u8 dp_train_pat) 635 { 636 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); 637 intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy); 638 return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat); 639 } 640 641 static bool 642 intel_dp_update_link_train(struct intel_dp *intel_dp, 643 const struct intel_crtc_state *crtc_state, 644 enum drm_dp_phy dp_phy) 645 { 646 int reg = dp_phy == DP_PHY_DPRX ? 647 DP_TRAINING_LANE0_SET : 648 DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy); 649 int ret; 650 651 intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy); 652 653 ret = drm_dp_dpcd_write(&intel_dp->aux, reg, 654 intel_dp->train_set, crtc_state->lane_count); 655 656 return ret == crtc_state->lane_count; 657 } 658 659 /* 128b/132b */ 660 static bool intel_dp_lane_max_tx_ffe_reached(u8 train_set_lane) 661 { 662 return (train_set_lane & DP_TX_FFE_PRESET_VALUE_MASK) == 663 DP_TX_FFE_PRESET_VALUE_MASK; 664 } 665 666 /* 667 * 8b/10b 668 * 669 * FIXME: The DP spec is very confusing here, also the Link CTS spec seems to 670 * have self contradicting tests around this area. 671 * 672 * In lieu of better ideas let's just stop when we've reached the max supported 673 * vswing with its max pre-emphasis, which is either 2+1 or 3+0 depending on 674 * whether vswing level 3 is supported or not. 675 */ 676 static bool intel_dp_lane_max_vswing_reached(u8 train_set_lane) 677 { 678 u8 v = (train_set_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >> 679 DP_TRAIN_VOLTAGE_SWING_SHIFT; 680 u8 p = (train_set_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >> 681 DP_TRAIN_PRE_EMPHASIS_SHIFT; 682 683 if ((train_set_lane & DP_TRAIN_MAX_SWING_REACHED) == 0) 684 return false; 685 686 if (v + p != 3) 687 return false; 688 689 return true; 690 } 691 692 static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp, 693 const struct intel_crtc_state *crtc_state) 694 { 695 int lane; 696 697 for (lane = 0; lane < crtc_state->lane_count; lane++) { 698 u8 train_set_lane = intel_dp->train_set[lane]; 699 700 if (intel_dp_is_uhbr(crtc_state)) { 701 if (!intel_dp_lane_max_tx_ffe_reached(train_set_lane)) 702 return false; 703 } else { 704 if (!intel_dp_lane_max_vswing_reached(train_set_lane)) 705 return false; 706 } 707 } 708 709 return true; 710 } 711 712 void intel_dp_link_training_set_mode(struct intel_dp *intel_dp, int link_rate, bool is_vrr) 713 { 714 u8 link_config[2]; 715 716 link_config[0] = is_vrr ? DP_MSA_TIMING_PAR_IGNORE_EN : 0; 717 link_config[1] = drm_dp_is_uhbr_rate(link_rate) ? 718 DP_SET_ANSI_128B132B : DP_SET_ANSI_8B10B; 719 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); 720 } 721 722 static void intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp, 723 const struct intel_crtc_state *crtc_state) 724 { 725 intel_dp_link_training_set_mode(intel_dp, 726 crtc_state->port_clock, crtc_state->vrr.flipline); 727 } 728 729 void intel_dp_link_training_set_bw(struct intel_dp *intel_dp, 730 int link_bw, int rate_select, int lane_count, 731 bool enhanced_framing) 732 { 733 if (enhanced_framing) 734 lane_count |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 735 736 if (link_bw) { 737 /* DP and eDP v1.3 and earlier link bw set method. */ 738 u8 link_config[] = { link_bw, lane_count }; 739 740 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 741 ARRAY_SIZE(link_config)); 742 } else { 743 /* 744 * eDP v1.4 and later link rate set method. 745 * 746 * eDP v1.4x sinks shall ignore DP_LINK_RATE_SET if 747 * DP_LINK_BW_SET is set. Avoid writing DP_LINK_BW_SET. 748 * 749 * eDP v1.5 sinks allow choosing either, and the last choice 750 * shall be active. 751 */ 752 drm_dp_dpcd_writeb(&intel_dp->aux, DP_LANE_COUNT_SET, lane_count); 753 drm_dp_dpcd_writeb(&intel_dp->aux, DP_LINK_RATE_SET, rate_select); 754 } 755 } 756 757 static void intel_dp_update_link_bw_set(struct intel_dp *intel_dp, 758 const struct intel_crtc_state *crtc_state, 759 u8 link_bw, u8 rate_select) 760 { 761 intel_dp_link_training_set_bw(intel_dp, link_bw, rate_select, crtc_state->lane_count, 762 crtc_state->enhanced_framing); 763 } 764 765 /* 766 * Prepare link training by configuring the link parameters. On DDI platforms 767 * also enable the port here. 768 */ 769 static bool 770 intel_dp_prepare_link_train(struct intel_dp *intel_dp, 771 const struct intel_crtc_state *crtc_state) 772 { 773 u8 link_bw, rate_select; 774 775 if (intel_dp->prepare_link_retrain) 776 intel_dp->prepare_link_retrain(intel_dp, crtc_state); 777 778 intel_dp_compute_rate(intel_dp, crtc_state->port_clock, 779 &link_bw, &rate_select); 780 781 /* 782 * WaEdpLinkRateDataReload 783 * 784 * Parade PS8461E MUX (used on varius TGL+ laptops) needs 785 * to snoop the link rates reported by the sink when we 786 * use LINK_RATE_SET in order to operate in jitter cleaning 787 * mode (as opposed to redriver mode). Unfortunately it 788 * loses track of the snooped link rates when powered down, 789 * so we need to make it re-snoop often. Without this high 790 * link rates are not stable. 791 */ 792 if (!link_bw) { 793 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 794 795 lt_dbg(intel_dp, DP_PHY_DPRX, "Reloading eDP link rates\n"); 796 797 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 798 sink_rates, sizeof(sink_rates)); 799 } 800 801 if (link_bw) 802 lt_dbg(intel_dp, DP_PHY_DPRX, "Using LINK_BW_SET value %02x\n", 803 link_bw); 804 else 805 lt_dbg(intel_dp, DP_PHY_DPRX, 806 "Using LINK_RATE_SET value %02x\n", 807 rate_select); 808 /* 809 * Spec DP2.1 Section 3.5.2.16 810 * Prior to LT DPTX should set 128b/132b DP Channel coding and then set link rate 811 */ 812 intel_dp_update_downspread_ctrl(intel_dp, crtc_state); 813 intel_dp_update_link_bw_set(intel_dp, crtc_state, link_bw, 814 rate_select); 815 816 return true; 817 } 818 819 static bool intel_dp_adjust_request_changed(const struct intel_crtc_state *crtc_state, 820 const u8 old_link_status[DP_LINK_STATUS_SIZE], 821 const u8 new_link_status[DP_LINK_STATUS_SIZE]) 822 { 823 int lane; 824 825 for (lane = 0; lane < crtc_state->lane_count; lane++) { 826 u8 old, new; 827 828 if (intel_dp_is_uhbr(crtc_state)) { 829 old = drm_dp_get_adjust_tx_ffe_preset(old_link_status, lane); 830 new = drm_dp_get_adjust_tx_ffe_preset(new_link_status, lane); 831 } else { 832 old = drm_dp_get_adjust_request_voltage(old_link_status, lane) | 833 drm_dp_get_adjust_request_pre_emphasis(old_link_status, lane); 834 new = drm_dp_get_adjust_request_voltage(new_link_status, lane) | 835 drm_dp_get_adjust_request_pre_emphasis(new_link_status, lane); 836 } 837 838 if (old != new) 839 return true; 840 } 841 842 return false; 843 } 844 845 void 846 intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy, 847 const u8 link_status[DP_LINK_STATUS_SIZE]) 848 { 849 lt_dbg(intel_dp, dp_phy, 850 "ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n", 851 link_status[0], link_status[1], link_status[2], 852 link_status[3], link_status[4], link_status[5]); 853 } 854 855 /* 856 * Perform the link training clock recovery phase on the given DP PHY using 857 * training pattern 1. 858 */ 859 static bool 860 intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, 861 const struct intel_crtc_state *crtc_state, 862 enum drm_dp_phy dp_phy) 863 { 864 u8 old_link_status[DP_LINK_STATUS_SIZE] = {}; 865 int voltage_tries, cr_tries, max_cr_tries; 866 u8 link_status[DP_LINK_STATUS_SIZE]; 867 bool max_vswing_reached = false; 868 int delay_us; 869 870 delay_us = drm_dp_read_clock_recovery_delay(&intel_dp->aux, 871 intel_dp->dpcd, dp_phy, 872 intel_dp_is_uhbr(crtc_state)); 873 874 /* clock recovery */ 875 if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy, 876 DP_TRAINING_PATTERN_1 | 877 DP_LINK_SCRAMBLING_DISABLE)) { 878 lt_err(intel_dp, dp_phy, "Failed to enable link training\n"); 879 return false; 880 } 881 882 /* 883 * The DP 1.4 spec defines the max clock recovery retries value 884 * as 10 but for pre-DP 1.4 devices we set a very tolerant 885 * retry limit of 80 (4 voltage levels x 4 preemphasis levels x 886 * x 5 identical voltage retries). Since the previous specs didn't 887 * define a limit and created the possibility of an infinite loop 888 * we want to prevent any sync from triggering that corner case. 889 */ 890 if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14) 891 max_cr_tries = 10; 892 else 893 max_cr_tries = 80; 894 895 voltage_tries = 1; 896 for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) { 897 usleep_range(delay_us, 2 * delay_us); 898 899 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy, 900 link_status) < 0) { 901 lt_err(intel_dp, dp_phy, "Failed to get link status\n"); 902 return false; 903 } 904 905 if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) { 906 lt_dbg(intel_dp, dp_phy, "Clock recovery OK\n"); 907 return true; 908 } 909 910 if (voltage_tries == 5) { 911 intel_dp_dump_link_status(intel_dp, dp_phy, link_status); 912 lt_dbg(intel_dp, dp_phy, "Same voltage tried 5 times\n"); 913 return false; 914 } 915 916 if (max_vswing_reached) { 917 intel_dp_dump_link_status(intel_dp, dp_phy, link_status); 918 lt_dbg(intel_dp, dp_phy, "Max Voltage Swing reached\n"); 919 return false; 920 } 921 922 /* Update training set as requested by target */ 923 intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy, 924 link_status); 925 if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) { 926 lt_err(intel_dp, dp_phy, "Failed to update link training\n"); 927 return false; 928 } 929 930 if (!intel_dp_adjust_request_changed(crtc_state, old_link_status, link_status)) 931 ++voltage_tries; 932 else 933 voltage_tries = 1; 934 935 memcpy(old_link_status, link_status, sizeof(link_status)); 936 937 if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state)) 938 max_vswing_reached = true; 939 } 940 941 intel_dp_dump_link_status(intel_dp, dp_phy, link_status); 942 lt_err(intel_dp, dp_phy, "Failed clock recovery %d times, giving up!\n", 943 max_cr_tries); 944 945 return false; 946 } 947 948 /* 949 * Pick Training Pattern Sequence (TPS) for channel equalization. 128b/132b TPS2 950 * for UHBR+, TPS4 for HBR3 or for 1.4 devices that support it, TPS3 for HBR2 or 951 * 1.2 devices that support it, TPS2 otherwise. 952 */ 953 static u32 intel_dp_training_pattern(struct intel_dp *intel_dp, 954 const struct intel_crtc_state *crtc_state, 955 enum drm_dp_phy dp_phy) 956 { 957 struct intel_display *display = to_intel_display(intel_dp); 958 struct drm_i915_private *i915 = to_i915(display->drm); 959 bool source_tps3, sink_tps3, source_tps4, sink_tps4; 960 961 /* UHBR+ use separate 128b/132b TPS2 */ 962 if (intel_dp_is_uhbr(crtc_state)) 963 return DP_TRAINING_PATTERN_2; 964 965 /* 966 * TPS4 support is mandatory for all downstream devices that 967 * support HBR3. There are no known eDP panels that support 968 * TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification. 969 * LTTPRs must support TPS4. 970 */ 971 source_tps4 = intel_dp_source_supports_tps4(i915); 972 sink_tps4 = dp_phy != DP_PHY_DPRX || 973 drm_dp_tps4_supported(intel_dp->dpcd); 974 if (source_tps4 && sink_tps4) { 975 return DP_TRAINING_PATTERN_4; 976 } else if (crtc_state->port_clock == 810000) { 977 if (!source_tps4) 978 lt_dbg(intel_dp, dp_phy, 979 "8.1 Gbps link rate without source TPS4 support\n"); 980 if (!sink_tps4) 981 lt_dbg(intel_dp, dp_phy, 982 "8.1 Gbps link rate without sink TPS4 support\n"); 983 } 984 985 /* 986 * TPS3 support is mandatory for downstream devices that 987 * support HBR2. However, not all sinks follow the spec. 988 */ 989 source_tps3 = intel_dp_source_supports_tps3(i915); 990 sink_tps3 = dp_phy != DP_PHY_DPRX || 991 drm_dp_tps3_supported(intel_dp->dpcd); 992 if (source_tps3 && sink_tps3) { 993 return DP_TRAINING_PATTERN_3; 994 } else if (crtc_state->port_clock >= 540000) { 995 if (!source_tps3) 996 lt_dbg(intel_dp, dp_phy, 997 ">=5.4/6.48 Gbps link rate without source TPS3 support\n"); 998 if (!sink_tps3) 999 lt_dbg(intel_dp, dp_phy, 1000 ">=5.4/6.48 Gbps link rate without sink TPS3 support\n"); 1001 } 1002 1003 return DP_TRAINING_PATTERN_2; 1004 } 1005 1006 /* 1007 * Perform the link training channel equalization phase on the given DP PHY 1008 * using one of training pattern 2, 3 or 4 depending on the source and 1009 * sink capabilities. 1010 */ 1011 static bool 1012 intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, 1013 const struct intel_crtc_state *crtc_state, 1014 enum drm_dp_phy dp_phy) 1015 { 1016 int tries; 1017 u32 training_pattern; 1018 u8 link_status[DP_LINK_STATUS_SIZE]; 1019 bool channel_eq = false; 1020 int delay_us; 1021 1022 delay_us = drm_dp_read_channel_eq_delay(&intel_dp->aux, 1023 intel_dp->dpcd, dp_phy, 1024 intel_dp_is_uhbr(crtc_state)); 1025 1026 training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy); 1027 /* Scrambling is disabled for TPS2/3 and enabled for TPS4 */ 1028 if (training_pattern != DP_TRAINING_PATTERN_4) 1029 training_pattern |= DP_LINK_SCRAMBLING_DISABLE; 1030 1031 /* channel equalization */ 1032 if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, 1033 training_pattern)) { 1034 lt_err(intel_dp, dp_phy, "Failed to start channel equalization\n"); 1035 return false; 1036 } 1037 1038 for (tries = 0; tries < 5; tries++) { 1039 usleep_range(delay_us, 2 * delay_us); 1040 1041 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy, 1042 link_status) < 0) { 1043 lt_err(intel_dp, dp_phy, "Failed to get link status\n"); 1044 break; 1045 } 1046 1047 /* Make sure clock is still ok */ 1048 if (!drm_dp_clock_recovery_ok(link_status, 1049 crtc_state->lane_count)) { 1050 intel_dp_dump_link_status(intel_dp, dp_phy, link_status); 1051 lt_dbg(intel_dp, dp_phy, 1052 "Clock recovery check failed, cannot continue channel equalization\n"); 1053 break; 1054 } 1055 1056 if (drm_dp_channel_eq_ok(link_status, 1057 crtc_state->lane_count)) { 1058 channel_eq = true; 1059 lt_dbg(intel_dp, dp_phy, "Channel EQ done. DP Training successful\n"); 1060 break; 1061 } 1062 1063 /* Update training set as requested by target */ 1064 intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy, 1065 link_status); 1066 if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) { 1067 lt_err(intel_dp, dp_phy, "Failed to update link training\n"); 1068 break; 1069 } 1070 } 1071 1072 /* Try 5 times, else fail and try at lower BW */ 1073 if (tries == 5) { 1074 intel_dp_dump_link_status(intel_dp, dp_phy, link_status); 1075 lt_dbg(intel_dp, dp_phy, "Channel equalization failed 5 times\n"); 1076 } 1077 1078 return channel_eq; 1079 } 1080 1081 static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp, 1082 enum drm_dp_phy dp_phy) 1083 { 1084 int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy); 1085 u8 val = DP_TRAINING_PATTERN_DISABLE; 1086 1087 return drm_dp_dpcd_write(&intel_dp->aux, reg, &val, 1) == 1; 1088 } 1089 1090 static int 1091 intel_dp_128b132b_intra_hop(struct intel_dp *intel_dp, 1092 const struct intel_crtc_state *crtc_state) 1093 { 1094 u8 sink_status; 1095 int ret; 1096 1097 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_STATUS, &sink_status); 1098 if (ret != 1) { 1099 lt_dbg(intel_dp, DP_PHY_DPRX, "Failed to read sink status\n"); 1100 return ret < 0 ? ret : -EIO; 1101 } 1102 1103 return sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION ? 1 : 0; 1104 } 1105 1106 /** 1107 * intel_dp_stop_link_train - stop link training 1108 * @intel_dp: DP struct 1109 * @crtc_state: state for CRTC attached to the encoder 1110 * 1111 * Stop the link training of the @intel_dp port, disabling the training 1112 * pattern in the sink's DPCD, and disabling the test pattern symbol 1113 * generation on the port. 1114 * 1115 * What symbols are output on the port after this point is 1116 * platform specific: On DDI/VLV/CHV platforms it will be the idle pattern 1117 * with the pipe being disabled, on older platforms it's HW specific if/how an 1118 * idle pattern is generated, as the pipe is already enabled here for those. 1119 * 1120 * This function must be called after intel_dp_start_link_train(). 1121 */ 1122 void intel_dp_stop_link_train(struct intel_dp *intel_dp, 1123 const struct intel_crtc_state *crtc_state) 1124 { 1125 intel_dp->link_trained = true; 1126 1127 intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX); 1128 intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX, 1129 DP_TRAINING_PATTERN_DISABLE); 1130 1131 if (intel_dp_is_uhbr(crtc_state) && 1132 wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) { 1133 lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n"); 1134 } 1135 } 1136 1137 static bool 1138 intel_dp_link_train_phy(struct intel_dp *intel_dp, 1139 const struct intel_crtc_state *crtc_state, 1140 enum drm_dp_phy dp_phy) 1141 { 1142 bool ret = false; 1143 1144 if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy)) 1145 goto out; 1146 1147 if (!intel_dp_link_training_channel_equalization(intel_dp, crtc_state, dp_phy)) 1148 goto out; 1149 1150 ret = true; 1151 1152 out: 1153 lt_dbg(intel_dp, dp_phy, 1154 "Link Training %s at link rate = %d, lane count = %d\n", 1155 ret ? "passed" : "failed", 1156 crtc_state->port_clock, crtc_state->lane_count); 1157 1158 return ret; 1159 } 1160 1161 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, 1162 int link_rate, 1163 u8 lane_count) 1164 { 1165 /* FIXME figure out what we actually want here */ 1166 const struct drm_display_mode *fixed_mode = 1167 intel_panel_preferred_fixed_mode(intel_dp->attached_connector); 1168 int mode_rate, max_rate; 1169 1170 mode_rate = intel_dp_link_required(fixed_mode->clock, 18); 1171 max_rate = intel_dp_max_link_data_rate(intel_dp, link_rate, lane_count); 1172 if (mode_rate > max_rate) 1173 return false; 1174 1175 return true; 1176 } 1177 1178 static bool reduce_link_params_in_bw_order(struct intel_dp *intel_dp, 1179 const struct intel_crtc_state *crtc_state, 1180 int *new_link_rate, int *new_lane_count) 1181 { 1182 int link_rate; 1183 int lane_count; 1184 int i; 1185 1186 i = intel_dp_link_config_index(intel_dp, crtc_state->port_clock, crtc_state->lane_count); 1187 for (i--; i >= 0; i--) { 1188 intel_dp_link_config_get(intel_dp, i, &link_rate, &lane_count); 1189 1190 if ((intel_dp->link.force_rate && 1191 intel_dp->link.force_rate != link_rate) || 1192 (intel_dp->link.force_lane_count && 1193 intel_dp->link.force_lane_count != lane_count)) 1194 continue; 1195 1196 break; 1197 } 1198 1199 if (i < 0) 1200 return false; 1201 1202 *new_link_rate = link_rate; 1203 *new_lane_count = lane_count; 1204 1205 return true; 1206 } 1207 1208 static int reduce_link_rate(struct intel_dp *intel_dp, int current_rate) 1209 { 1210 int rate_index; 1211 int new_rate; 1212 1213 if (intel_dp->link.force_rate) 1214 return -1; 1215 1216 rate_index = intel_dp_rate_index(intel_dp->common_rates, 1217 intel_dp->num_common_rates, 1218 current_rate); 1219 1220 if (rate_index <= 0) 1221 return -1; 1222 1223 new_rate = intel_dp_common_rate(intel_dp, rate_index - 1); 1224 1225 /* TODO: Make switching from UHBR to non-UHBR rates work. */ 1226 if (drm_dp_is_uhbr_rate(current_rate) != drm_dp_is_uhbr_rate(new_rate)) 1227 return -1; 1228 1229 return new_rate; 1230 } 1231 1232 static int reduce_lane_count(struct intel_dp *intel_dp, int current_lane_count) 1233 { 1234 if (intel_dp->link.force_lane_count) 1235 return -1; 1236 1237 if (current_lane_count == 1) 1238 return -1; 1239 1240 return current_lane_count >> 1; 1241 } 1242 1243 static bool reduce_link_params_in_rate_lane_order(struct intel_dp *intel_dp, 1244 const struct intel_crtc_state *crtc_state, 1245 int *new_link_rate, int *new_lane_count) 1246 { 1247 int link_rate; 1248 int lane_count; 1249 1250 lane_count = crtc_state->lane_count; 1251 link_rate = reduce_link_rate(intel_dp, crtc_state->port_clock); 1252 if (link_rate < 0) { 1253 lane_count = reduce_lane_count(intel_dp, crtc_state->lane_count); 1254 link_rate = intel_dp_max_common_rate(intel_dp); 1255 } 1256 1257 if (lane_count < 0) 1258 return false; 1259 1260 *new_link_rate = link_rate; 1261 *new_lane_count = lane_count; 1262 1263 return true; 1264 } 1265 1266 static bool reduce_link_params(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, 1267 int *new_link_rate, int *new_lane_count) 1268 { 1269 /* TODO: Use the same fallback logic on SST as on MST. */ 1270 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) 1271 return reduce_link_params_in_bw_order(intel_dp, crtc_state, 1272 new_link_rate, new_lane_count); 1273 else 1274 return reduce_link_params_in_rate_lane_order(intel_dp, crtc_state, 1275 new_link_rate, new_lane_count); 1276 } 1277 1278 static int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 1279 const struct intel_crtc_state *crtc_state) 1280 { 1281 int new_link_rate; 1282 int new_lane_count; 1283 1284 if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) { 1285 lt_dbg(intel_dp, DP_PHY_DPRX, 1286 "Retrying Link training for eDP with max parameters\n"); 1287 intel_dp->use_max_params = true; 1288 return 0; 1289 } 1290 1291 if (!reduce_link_params(intel_dp, crtc_state, &new_link_rate, &new_lane_count)) 1292 return -1; 1293 1294 if (intel_dp_is_edp(intel_dp) && 1295 !intel_dp_can_link_train_fallback_for_edp(intel_dp, new_link_rate, new_lane_count)) { 1296 lt_dbg(intel_dp, DP_PHY_DPRX, 1297 "Retrying Link training for eDP with same parameters\n"); 1298 return 0; 1299 } 1300 1301 lt_dbg(intel_dp, DP_PHY_DPRX, 1302 "Reducing link parameters from %dx%d to %dx%d\n", 1303 crtc_state->lane_count, crtc_state->port_clock, 1304 new_lane_count, new_link_rate); 1305 1306 intel_dp->link.max_rate = new_link_rate; 1307 intel_dp->link.max_lane_count = new_lane_count; 1308 1309 return 0; 1310 } 1311 1312 static bool intel_dp_schedule_fallback_link_training(struct intel_atomic_state *state, 1313 struct intel_dp *intel_dp, 1314 const struct intel_crtc_state *crtc_state) 1315 { 1316 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1317 1318 if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) { 1319 lt_dbg(intel_dp, DP_PHY_DPRX, "Link Training failed on disconnected sink.\n"); 1320 return true; 1321 } 1322 1323 if (intel_dp->hobl_active) { 1324 lt_dbg(intel_dp, DP_PHY_DPRX, 1325 "Link Training failed with HOBL active, not enabling it from now on\n"); 1326 intel_dp->hobl_failed = true; 1327 } else if (intel_dp_get_link_train_fallback_values(intel_dp, crtc_state)) { 1328 return false; 1329 } 1330 1331 /* Schedule a Hotplug Uevent to userspace to start modeset */ 1332 intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state); 1333 1334 return true; 1335 } 1336 1337 /* Perform the link training on all LTTPRs and the DPRX on a link. */ 1338 static bool 1339 intel_dp_link_train_all_phys(struct intel_dp *intel_dp, 1340 const struct intel_crtc_state *crtc_state, 1341 int lttpr_count) 1342 { 1343 bool ret = true; 1344 int i; 1345 1346 for (i = lttpr_count - 1; i >= 0; i--) { 1347 enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i); 1348 1349 ret = intel_dp_link_train_phy(intel_dp, crtc_state, dp_phy); 1350 intel_dp_disable_dpcd_training_pattern(intel_dp, dp_phy); 1351 1352 if (!ret) 1353 break; 1354 } 1355 1356 if (ret) 1357 ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX); 1358 1359 if (intel_dp->set_idle_link_train) 1360 intel_dp->set_idle_link_train(intel_dp, crtc_state); 1361 1362 return ret; 1363 } 1364 1365 /* 1366 * 128b/132b DP LANEx_EQ_DONE Sequence (DP 2.0 E11 3.5.2.16.1) 1367 */ 1368 static bool 1369 intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp, 1370 const struct intel_crtc_state *crtc_state) 1371 { 1372 u8 link_status[DP_LINK_STATUS_SIZE]; 1373 int delay_us; 1374 int try, max_tries = 20; 1375 unsigned long deadline; 1376 bool timeout = false; 1377 1378 /* 1379 * Reset signal levels. Start transmitting 128b/132b TPS1. 1380 * 1381 * Put DPRX and LTTPRs (if any) into intra-hop AUX mode by writing TPS1 1382 * in DP_TRAINING_PATTERN_SET. 1383 */ 1384 if (!intel_dp_reset_link_train(intel_dp, crtc_state, DP_PHY_DPRX, 1385 DP_TRAINING_PATTERN_1)) { 1386 lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS1\n"); 1387 return false; 1388 } 1389 1390 delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux); 1391 1392 /* Read the initial TX FFE settings. */ 1393 if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) { 1394 lt_err(intel_dp, DP_PHY_DPRX, "Failed to read TX FFE presets\n"); 1395 return false; 1396 } 1397 1398 /* Update signal levels and training set as requested. */ 1399 intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status); 1400 if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) { 1401 lt_err(intel_dp, DP_PHY_DPRX, "Failed to set initial TX FFE settings\n"); 1402 return false; 1403 } 1404 1405 /* Start transmitting 128b/132b TPS2. */ 1406 if (!intel_dp_set_link_train(intel_dp, crtc_state, DP_PHY_DPRX, 1407 DP_TRAINING_PATTERN_2)) { 1408 lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS2\n"); 1409 return false; 1410 } 1411 1412 /* Time budget for the LANEx_EQ_DONE Sequence */ 1413 deadline = jiffies + msecs_to_jiffies_timeout(400); 1414 1415 for (try = 0; try < max_tries; try++) { 1416 usleep_range(delay_us, 2 * delay_us); 1417 1418 /* 1419 * The delay may get updated. The transmitter shall read the 1420 * delay before link status during link training. 1421 */ 1422 delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux); 1423 1424 if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) { 1425 lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n"); 1426 return false; 1427 } 1428 1429 if (drm_dp_128b132b_link_training_failed(link_status)) { 1430 intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); 1431 lt_err(intel_dp, DP_PHY_DPRX, 1432 "Downstream link training failure\n"); 1433 return false; 1434 } 1435 1436 if (drm_dp_128b132b_lane_channel_eq_done(link_status, crtc_state->lane_count)) { 1437 lt_dbg(intel_dp, DP_PHY_DPRX, "Lane channel eq done\n"); 1438 break; 1439 } 1440 1441 if (timeout) { 1442 intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); 1443 lt_err(intel_dp, DP_PHY_DPRX, "Lane channel eq timeout\n"); 1444 return false; 1445 } 1446 1447 if (time_after(jiffies, deadline)) 1448 timeout = true; /* try one last time after deadline */ 1449 1450 /* Update signal levels and training set as requested. */ 1451 intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status); 1452 if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) { 1453 lt_err(intel_dp, DP_PHY_DPRX, "Failed to update TX FFE settings\n"); 1454 return false; 1455 } 1456 } 1457 1458 if (try == max_tries) { 1459 intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); 1460 lt_err(intel_dp, DP_PHY_DPRX, "Max loop count reached\n"); 1461 return false; 1462 } 1463 1464 for (;;) { 1465 if (time_after(jiffies, deadline)) 1466 timeout = true; /* try one last time after deadline */ 1467 1468 if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) { 1469 lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n"); 1470 return false; 1471 } 1472 1473 if (drm_dp_128b132b_link_training_failed(link_status)) { 1474 intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); 1475 lt_err(intel_dp, DP_PHY_DPRX, "Downstream link training failure\n"); 1476 return false; 1477 } 1478 1479 if (drm_dp_128b132b_eq_interlane_align_done(link_status)) { 1480 lt_dbg(intel_dp, DP_PHY_DPRX, "Interlane align done\n"); 1481 break; 1482 } 1483 1484 if (timeout) { 1485 intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); 1486 lt_err(intel_dp, DP_PHY_DPRX, "Interlane align timeout\n"); 1487 return false; 1488 } 1489 1490 usleep_range(2000, 3000); 1491 } 1492 1493 return true; 1494 } 1495 1496 /* 1497 * 128b/132b DP LANEx_CDS_DONE Sequence (DP 2.0 E11 3.5.2.16.2) 1498 */ 1499 static bool 1500 intel_dp_128b132b_lane_cds(struct intel_dp *intel_dp, 1501 const struct intel_crtc_state *crtc_state, 1502 int lttpr_count) 1503 { 1504 u8 link_status[DP_LINK_STATUS_SIZE]; 1505 unsigned long deadline; 1506 1507 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TRAINING_PATTERN_SET, 1508 DP_TRAINING_PATTERN_2_CDS) != 1) { 1509 lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS2 CDS\n"); 1510 return false; 1511 } 1512 1513 /* Time budget for the LANEx_CDS_DONE Sequence */ 1514 deadline = jiffies + msecs_to_jiffies_timeout((lttpr_count + 1) * 20); 1515 1516 for (;;) { 1517 bool timeout = false; 1518 1519 if (time_after(jiffies, deadline)) 1520 timeout = true; /* try one last time after deadline */ 1521 1522 usleep_range(2000, 3000); 1523 1524 if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) { 1525 lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n"); 1526 return false; 1527 } 1528 1529 if (drm_dp_128b132b_eq_interlane_align_done(link_status) && 1530 drm_dp_128b132b_cds_interlane_align_done(link_status) && 1531 drm_dp_128b132b_lane_symbol_locked(link_status, crtc_state->lane_count)) { 1532 lt_dbg(intel_dp, DP_PHY_DPRX, "CDS interlane align done\n"); 1533 break; 1534 } 1535 1536 if (drm_dp_128b132b_link_training_failed(link_status)) { 1537 intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); 1538 lt_err(intel_dp, DP_PHY_DPRX, "Downstream link training failure\n"); 1539 return false; 1540 } 1541 1542 if (timeout) { 1543 intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); 1544 lt_err(intel_dp, DP_PHY_DPRX, "CDS timeout\n"); 1545 return false; 1546 } 1547 } 1548 1549 return true; 1550 } 1551 1552 /* 1553 * 128b/132b link training sequence. (DP 2.0 E11 SCR on link training.) 1554 */ 1555 static bool 1556 intel_dp_128b132b_link_train(struct intel_dp *intel_dp, 1557 const struct intel_crtc_state *crtc_state, 1558 int lttpr_count) 1559 { 1560 bool passed = false; 1561 1562 if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) { 1563 lt_err(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clear\n"); 1564 return false; 1565 } 1566 1567 if (intel_dp_128b132b_lane_eq(intel_dp, crtc_state) && 1568 intel_dp_128b132b_lane_cds(intel_dp, crtc_state, lttpr_count)) 1569 passed = true; 1570 1571 lt_dbg(intel_dp, DP_PHY_DPRX, 1572 "128b/132b Link Training %s at link rate = %d, lane count = %d\n", 1573 passed ? "passed" : "failed", 1574 crtc_state->port_clock, crtc_state->lane_count); 1575 1576 return passed; 1577 } 1578 1579 /** 1580 * intel_dp_start_link_train - start link training 1581 * @state: Atomic state 1582 * @intel_dp: DP struct 1583 * @crtc_state: state for CRTC attached to the encoder 1584 * 1585 * Start the link training of the @intel_dp port, scheduling a fallback 1586 * retraining with reduced link rate/lane parameters if the link training 1587 * fails. 1588 * After calling this function intel_dp_stop_link_train() must be called. 1589 */ 1590 void intel_dp_start_link_train(struct intel_atomic_state *state, 1591 struct intel_dp *intel_dp, 1592 const struct intel_crtc_state *crtc_state) 1593 { 1594 struct intel_display *display = to_intel_display(state); 1595 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1596 struct intel_encoder *encoder = &dig_port->base; 1597 bool passed; 1598 /* 1599 * Reinit the LTTPRs here to ensure that they are switched to 1600 * non-transparent mode. During an earlier LTTPR detection this 1601 * could've been prevented by an active link. 1602 */ 1603 int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp); 1604 1605 if (lttpr_count < 0) 1606 /* Still continue with enabling the port and link training. */ 1607 lttpr_count = 0; 1608 1609 intel_dp_prepare_link_train(intel_dp, crtc_state); 1610 1611 if (intel_dp_is_uhbr(crtc_state)) 1612 passed = intel_dp_128b132b_link_train(intel_dp, crtc_state, lttpr_count); 1613 else 1614 passed = intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count); 1615 1616 if (intel_dp->link.force_train_failure) { 1617 intel_dp->link.force_train_failure--; 1618 lt_dbg(intel_dp, DP_PHY_DPRX, "Forcing link training failure\n"); 1619 } else if (passed) { 1620 intel_dp->link.seq_train_failures = 0; 1621 intel_encoder_link_check_queue_work(encoder, 2000); 1622 return; 1623 } 1624 1625 intel_dp->link.seq_train_failures++; 1626 1627 /* 1628 * Ignore the link failure in CI 1629 * 1630 * In fixed enviroments like CI, sometimes unexpected long HPDs are 1631 * generated by the displays. If ignore_long_hpd flag is set, such long 1632 * HPDs are ignored. And probably as a consequence of these ignored 1633 * long HPDs, subsequent link trainings are failed resulting into CI 1634 * execution failures. 1635 * 1636 * For test cases which rely on the link training or processing of HPDs 1637 * ignore_long_hpd flag can unset from the testcase. 1638 */ 1639 if (display->hotplug.ignore_long_hpd) { 1640 lt_dbg(intel_dp, DP_PHY_DPRX, "Ignore the link failure\n"); 1641 return; 1642 } 1643 1644 if (intel_dp->link.seq_train_failures < 2) { 1645 intel_encoder_link_check_queue_work(encoder, 0); 1646 return; 1647 } 1648 1649 if (intel_dp_schedule_fallback_link_training(state, intel_dp, crtc_state)) 1650 return; 1651 1652 intel_dp->link.retrain_disabled = true; 1653 1654 if (!passed) 1655 lt_err(intel_dp, DP_PHY_DPRX, "Can't reduce link training parameters after failure\n"); 1656 else 1657 lt_dbg(intel_dp, DP_PHY_DPRX, "Can't reduce link training parameters after forced failure\n"); 1658 } 1659 1660 void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp, 1661 const struct intel_crtc_state *crtc_state) 1662 { 1663 /* 1664 * VIDEO_DIP_CTL register bit 31 should be set to '0' to not 1665 * disable SDP CRC. This is applicable for Display version 13. 1666 * Default value of bit 31 is '0' hence discarding the write 1667 * TODO: Corrective actions on SDP corruption yet to be defined 1668 */ 1669 if (!intel_dp_is_uhbr(crtc_state)) 1670 return; 1671 1672 /* DP v2.0 SCR on SDP CRC16 for 128b/132b Link Layer */ 1673 drm_dp_dpcd_writeb(&intel_dp->aux, 1674 DP_SDP_ERROR_DETECTION_CONFIGURATION, 1675 DP_SDP_CRC16_128B132B_EN); 1676 1677 lt_dbg(intel_dp, DP_PHY_DPRX, "DP2.0 SDP CRC16 for 128b/132b enabled\n"); 1678 } 1679 1680 static struct intel_dp *intel_connector_to_intel_dp(struct intel_connector *connector) 1681 { 1682 if (connector->mst_port) 1683 return connector->mst_port; 1684 else 1685 return enc_to_intel_dp(intel_attached_encoder(connector)); 1686 } 1687 1688 static int i915_dp_force_link_rate_show(struct seq_file *m, void *data) 1689 { 1690 struct intel_connector *connector = to_intel_connector(m->private); 1691 struct intel_display *display = to_intel_display(connector); 1692 struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); 1693 int current_rate = -1; 1694 int force_rate; 1695 int err; 1696 int i; 1697 1698 err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); 1699 if (err) 1700 return err; 1701 1702 if (intel_dp->link_trained) 1703 current_rate = intel_dp->link_rate; 1704 force_rate = intel_dp->link.force_rate; 1705 1706 drm_modeset_unlock(&display->drm->mode_config.connection_mutex); 1707 1708 seq_printf(m, "%sauto%s", 1709 force_rate == 0 ? "[" : "", 1710 force_rate == 0 ? "]" : ""); 1711 1712 for (i = 0; i < intel_dp->num_source_rates; i++) 1713 seq_printf(m, " %s%d%s%s", 1714 intel_dp->source_rates[i] == force_rate ? "[" : "", 1715 intel_dp->source_rates[i], 1716 intel_dp->source_rates[i] == current_rate ? "*" : "", 1717 intel_dp->source_rates[i] == force_rate ? "]" : ""); 1718 1719 seq_putc(m, '\n'); 1720 1721 return 0; 1722 } 1723 1724 static int parse_link_rate(struct intel_dp *intel_dp, const char __user *ubuf, size_t len) 1725 { 1726 char *kbuf; 1727 const char *p; 1728 int rate; 1729 int ret = 0; 1730 1731 kbuf = memdup_user_nul(ubuf, len); 1732 if (IS_ERR(kbuf)) 1733 return PTR_ERR(kbuf); 1734 1735 p = strim(kbuf); 1736 1737 if (!strcmp(p, "auto")) { 1738 rate = 0; 1739 } else { 1740 ret = kstrtoint(p, 0, &rate); 1741 if (ret < 0) 1742 goto out_free; 1743 1744 if (intel_dp_rate_index(intel_dp->source_rates, 1745 intel_dp->num_source_rates, 1746 rate) < 0) 1747 ret = -EINVAL; 1748 } 1749 1750 out_free: 1751 kfree(kbuf); 1752 1753 return ret < 0 ? ret : rate; 1754 } 1755 1756 static ssize_t i915_dp_force_link_rate_write(struct file *file, 1757 const char __user *ubuf, 1758 size_t len, loff_t *offp) 1759 { 1760 struct seq_file *m = file->private_data; 1761 struct intel_connector *connector = to_intel_connector(m->private); 1762 struct intel_display *display = to_intel_display(connector); 1763 struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); 1764 int rate; 1765 int err; 1766 1767 rate = parse_link_rate(intel_dp, ubuf, len); 1768 if (rate < 0) 1769 return rate; 1770 1771 err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); 1772 if (err) 1773 return err; 1774 1775 intel_dp_reset_link_params(intel_dp); 1776 intel_dp->link.force_rate = rate; 1777 1778 drm_modeset_unlock(&display->drm->mode_config.connection_mutex); 1779 1780 *offp += len; 1781 1782 return len; 1783 } 1784 DEFINE_SHOW_STORE_ATTRIBUTE(i915_dp_force_link_rate); 1785 1786 static int i915_dp_force_lane_count_show(struct seq_file *m, void *data) 1787 { 1788 struct intel_connector *connector = to_intel_connector(m->private); 1789 struct intel_display *display = to_intel_display(connector); 1790 struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); 1791 int current_lane_count = -1; 1792 int force_lane_count; 1793 int err; 1794 int i; 1795 1796 err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); 1797 if (err) 1798 return err; 1799 1800 if (intel_dp->link_trained) 1801 current_lane_count = intel_dp->lane_count; 1802 force_lane_count = intel_dp->link.force_lane_count; 1803 1804 drm_modeset_unlock(&display->drm->mode_config.connection_mutex); 1805 1806 seq_printf(m, "%sauto%s", 1807 force_lane_count == 0 ? "[" : "", 1808 force_lane_count == 0 ? "]" : ""); 1809 1810 for (i = 1; i <= 4; i <<= 1) 1811 seq_printf(m, " %s%d%s%s", 1812 i == force_lane_count ? "[" : "", 1813 i, 1814 i == current_lane_count ? "*" : "", 1815 i == force_lane_count ? "]" : ""); 1816 1817 seq_putc(m, '\n'); 1818 1819 return 0; 1820 } 1821 1822 static int parse_lane_count(const char __user *ubuf, size_t len) 1823 { 1824 char *kbuf; 1825 const char *p; 1826 int lane_count; 1827 int ret = 0; 1828 1829 kbuf = memdup_user_nul(ubuf, len); 1830 if (IS_ERR(kbuf)) 1831 return PTR_ERR(kbuf); 1832 1833 p = strim(kbuf); 1834 1835 if (!strcmp(p, "auto")) { 1836 lane_count = 0; 1837 } else { 1838 ret = kstrtoint(p, 0, &lane_count); 1839 if (ret < 0) 1840 goto out_free; 1841 1842 switch (lane_count) { 1843 case 1: 1844 case 2: 1845 case 4: 1846 break; 1847 default: 1848 ret = -EINVAL; 1849 } 1850 } 1851 1852 out_free: 1853 kfree(kbuf); 1854 1855 return ret < 0 ? ret : lane_count; 1856 } 1857 1858 static ssize_t i915_dp_force_lane_count_write(struct file *file, 1859 const char __user *ubuf, 1860 size_t len, loff_t *offp) 1861 { 1862 struct seq_file *m = file->private_data; 1863 struct intel_connector *connector = to_intel_connector(m->private); 1864 struct intel_display *display = to_intel_display(connector); 1865 struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); 1866 int lane_count; 1867 int err; 1868 1869 lane_count = parse_lane_count(ubuf, len); 1870 if (lane_count < 0) 1871 return lane_count; 1872 1873 err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); 1874 if (err) 1875 return err; 1876 1877 intel_dp_reset_link_params(intel_dp); 1878 intel_dp->link.force_lane_count = lane_count; 1879 1880 drm_modeset_unlock(&display->drm->mode_config.connection_mutex); 1881 1882 *offp += len; 1883 1884 return len; 1885 } 1886 DEFINE_SHOW_STORE_ATTRIBUTE(i915_dp_force_lane_count); 1887 1888 static int i915_dp_max_link_rate_show(void *data, u64 *val) 1889 { 1890 struct intel_connector *connector = to_intel_connector(data); 1891 struct intel_display *display = to_intel_display(connector); 1892 struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); 1893 int err; 1894 1895 err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); 1896 if (err) 1897 return err; 1898 1899 *val = intel_dp->link.max_rate; 1900 1901 drm_modeset_unlock(&display->drm->mode_config.connection_mutex); 1902 1903 return 0; 1904 } 1905 DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_max_link_rate_fops, i915_dp_max_link_rate_show, NULL, "%llu\n"); 1906 1907 static int i915_dp_max_lane_count_show(void *data, u64 *val) 1908 { 1909 struct intel_connector *connector = to_intel_connector(data); 1910 struct intel_display *display = to_intel_display(connector); 1911 struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); 1912 int err; 1913 1914 err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); 1915 if (err) 1916 return err; 1917 1918 *val = intel_dp->link.max_lane_count; 1919 1920 drm_modeset_unlock(&display->drm->mode_config.connection_mutex); 1921 1922 return 0; 1923 } 1924 DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_max_lane_count_fops, i915_dp_max_lane_count_show, NULL, "%llu\n"); 1925 1926 static int i915_dp_force_link_training_failure_show(void *data, u64 *val) 1927 { 1928 struct intel_connector *connector = to_intel_connector(data); 1929 struct intel_display *display = to_intel_display(connector); 1930 struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); 1931 int err; 1932 1933 err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); 1934 if (err) 1935 return err; 1936 1937 *val = intel_dp->link.force_train_failure; 1938 1939 drm_modeset_unlock(&display->drm->mode_config.connection_mutex); 1940 1941 return 0; 1942 } 1943 1944 static int i915_dp_force_link_training_failure_write(void *data, u64 val) 1945 { 1946 struct intel_connector *connector = to_intel_connector(data); 1947 struct intel_display *display = to_intel_display(connector); 1948 struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); 1949 int err; 1950 1951 if (val > 2) 1952 return -EINVAL; 1953 1954 err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); 1955 if (err) 1956 return err; 1957 1958 intel_dp->link.force_train_failure = val; 1959 1960 drm_modeset_unlock(&display->drm->mode_config.connection_mutex); 1961 1962 return 0; 1963 } 1964 DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_force_link_training_failure_fops, 1965 i915_dp_force_link_training_failure_show, 1966 i915_dp_force_link_training_failure_write, "%llu\n"); 1967 1968 static int i915_dp_force_link_retrain_show(void *data, u64 *val) 1969 { 1970 struct intel_connector *connector = to_intel_connector(data); 1971 struct intel_display *display = to_intel_display(connector); 1972 struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); 1973 int err; 1974 1975 err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); 1976 if (err) 1977 return err; 1978 1979 *val = intel_dp->link.force_retrain; 1980 1981 drm_modeset_unlock(&display->drm->mode_config.connection_mutex); 1982 1983 return 0; 1984 } 1985 1986 static int i915_dp_force_link_retrain_write(void *data, u64 val) 1987 { 1988 struct intel_connector *connector = to_intel_connector(data); 1989 struct intel_display *display = to_intel_display(connector); 1990 struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); 1991 int err; 1992 1993 err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); 1994 if (err) 1995 return err; 1996 1997 intel_dp->link.force_retrain = val; 1998 1999 drm_modeset_unlock(&display->drm->mode_config.connection_mutex); 2000 2001 intel_hpd_trigger_irq(dp_to_dig_port(intel_dp)); 2002 2003 return 0; 2004 } 2005 DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_force_link_retrain_fops, 2006 i915_dp_force_link_retrain_show, 2007 i915_dp_force_link_retrain_write, "%llu\n"); 2008 2009 static int i915_dp_link_retrain_disabled_show(struct seq_file *m, void *data) 2010 { 2011 struct intel_connector *connector = to_intel_connector(m->private); 2012 struct intel_display *display = to_intel_display(connector); 2013 struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector); 2014 int err; 2015 2016 err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); 2017 if (err) 2018 return err; 2019 2020 seq_printf(m, "%s\n", str_yes_no(intel_dp->link.retrain_disabled)); 2021 2022 drm_modeset_unlock(&display->drm->mode_config.connection_mutex); 2023 2024 return 0; 2025 } 2026 DEFINE_SHOW_ATTRIBUTE(i915_dp_link_retrain_disabled); 2027 2028 void intel_dp_link_training_debugfs_add(struct intel_connector *connector) 2029 { 2030 struct dentry *root = connector->base.debugfs_entry; 2031 2032 if (connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort && 2033 connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) 2034 return; 2035 2036 debugfs_create_file("i915_dp_force_link_rate", 0644, root, 2037 connector, &i915_dp_force_link_rate_fops); 2038 2039 debugfs_create_file("i915_dp_force_lane_count", 0644, root, 2040 connector, &i915_dp_force_lane_count_fops); 2041 2042 debugfs_create_file("i915_dp_max_link_rate", 0444, root, 2043 connector, &i915_dp_max_link_rate_fops); 2044 2045 debugfs_create_file("i915_dp_max_lane_count", 0444, root, 2046 connector, &i915_dp_max_lane_count_fops); 2047 2048 debugfs_create_file("i915_dp_force_link_training_failure", 0644, root, 2049 connector, &i915_dp_force_link_training_failure_fops); 2050 2051 debugfs_create_file("i915_dp_force_link_retrain", 0644, root, 2052 connector, &i915_dp_force_link_retrain_fops); 2053 2054 debugfs_create_file("i915_dp_link_retrain_disabled", 0444, root, 2055 connector, &i915_dp_link_retrain_disabled_fops); 2056 } 2057