1 /* 2 * Copyright © 2014-2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include "bxt_dpio_phy_regs.h" 25 #include "i915_reg.h" 26 #include "intel_ddi.h" 27 #include "intel_ddi_buf_trans.h" 28 #include "intel_de.h" 29 #include "intel_display_power_well.h" 30 #include "intel_display_types.h" 31 #include "intel_dp.h" 32 #include "intel_dpio_phy.h" 33 #include "vlv_dpio_phy_regs.h" 34 #include "vlv_sideband.h" 35 36 /** 37 * DOC: DPIO 38 * 39 * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI 40 * ports. DPIO is the name given to such a display PHY. These PHYs 41 * don't follow the standard programming model using direct MMIO 42 * registers, and instead their registers must be accessed trough IOSF 43 * sideband. VLV has one such PHY for driving ports B and C, and CHV 44 * adds another PHY for driving port D. Each PHY responds to specific 45 * IOSF-SB port. 46 * 47 * Each display PHY is made up of one or two channels. Each channel 48 * houses a common lane part which contains the PLL and other common 49 * logic. CH0 common lane also contains the IOSF-SB logic for the 50 * Common Register Interface (CRI) ie. the DPIO registers. CRI clock 51 * must be running when any DPIO registers are accessed. 52 * 53 * In addition to having their own registers, the PHYs are also 54 * controlled through some dedicated signals from the display 55 * controller. These include PLL reference clock enable, PLL enable, 56 * and CRI clock selection, for example. 57 * 58 * Eeach channel also has two splines (also called data lanes), and 59 * each spline is made up of one Physical Access Coding Sub-Layer 60 * (PCS) block and two TX lanes. So each channel has two PCS blocks 61 * and four TX lanes. The TX lanes are used as DP lanes or TMDS 62 * data/clock pairs depending on the output type. 63 * 64 * Additionally the PHY also contains an AUX lane with AUX blocks 65 * for each channel. This is used for DP AUX communication, but 66 * this fact isn't really relevant for the driver since AUX is 67 * controlled from the display controller side. No DPIO registers 68 * need to be accessed during AUX communication, 69 * 70 * Generally on VLV/CHV the common lane corresponds to the pipe and 71 * the spline (PCS/TX) corresponds to the port. 72 * 73 * For dual channel PHY (VLV/CHV): 74 * 75 * pipe A == CMN/PLL/REF CH0 76 * 77 * pipe B == CMN/PLL/REF CH1 78 * 79 * port B == PCS/TX CH0 80 * 81 * port C == PCS/TX CH1 82 * 83 * This is especially important when we cross the streams 84 * ie. drive port B with pipe B, or port C with pipe A. 85 * 86 * For single channel PHY (CHV): 87 * 88 * pipe C == CMN/PLL/REF CH0 89 * 90 * port D == PCS/TX CH0 91 * 92 * On BXT the entire PHY channel corresponds to the port. That means 93 * the PLL is also now associated with the port rather than the pipe, 94 * and so the clock needs to be routed to the appropriate transcoder. 95 * Port A PLL is directly connected to transcoder EDP and port B/C 96 * PLLs can be routed to any transcoder A/B/C. 97 * 98 * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is 99 * digital port D (CHV) or port A (BXT). :: 100 * 101 * 102 * Dual channel PHY (VLV/CHV/BXT) 103 * --------------------------------- 104 * | CH0 | CH1 | 105 * | CMN/PLL/REF | CMN/PLL/REF | 106 * |---------------|---------------| Display PHY 107 * | PCS01 | PCS23 | PCS01 | PCS23 | 108 * |-------|-------|-------|-------| 109 * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3| 110 * --------------------------------- 111 * | DDI0 | DDI1 | DP/HDMI ports 112 * --------------------------------- 113 * 114 * Single channel PHY (CHV/BXT) 115 * ----------------- 116 * | CH0 | 117 * | CMN/PLL/REF | 118 * |---------------| Display PHY 119 * | PCS01 | PCS23 | 120 * |-------|-------| 121 * |TX0|TX1|TX2|TX3| 122 * ----------------- 123 * | DDI2 | DP/HDMI port 124 * ----------------- 125 */ 126 127 /** 128 * struct bxt_dpio_phy_info - Hold info for a broxton DDI phy 129 */ 130 struct bxt_dpio_phy_info { 131 /** 132 * @dual_channel: true if this phy has a second channel. 133 */ 134 bool dual_channel; 135 136 /** 137 * @rcomp_phy: If -1, indicates this phy has its own rcomp resistor. 138 * Otherwise the GRC value will be copied from the phy indicated by 139 * this field. 140 */ 141 enum dpio_phy rcomp_phy; 142 143 /** 144 * @reset_delay: delay in us to wait before setting the common reset 145 * bit in BXT_PHY_CTL_FAMILY, which effectively enables the phy. 146 */ 147 int reset_delay; 148 149 /** 150 * @pwron_mask: Mask with the appropriate bit set that would cause the 151 * punit to power this phy if written to BXT_P_CR_GT_DISP_PWRON. 152 */ 153 u32 pwron_mask; 154 155 /** 156 * @channel: struct containing per channel information. 157 */ 158 struct { 159 /** 160 * @channel.port: which port maps to this channel. 161 */ 162 enum port port; 163 } channel[2]; 164 }; 165 166 static const struct bxt_dpio_phy_info bxt_dpio_phy_info[] = { 167 [DPIO_PHY0] = { 168 .dual_channel = true, 169 .rcomp_phy = DPIO_PHY1, 170 .pwron_mask = BIT(0), 171 172 .channel = { 173 [DPIO_CH0] = { .port = PORT_B }, 174 [DPIO_CH1] = { .port = PORT_C }, 175 } 176 }, 177 [DPIO_PHY1] = { 178 .dual_channel = false, 179 .rcomp_phy = -1, 180 .pwron_mask = BIT(1), 181 182 .channel = { 183 [DPIO_CH0] = { .port = PORT_A }, 184 } 185 }, 186 }; 187 188 static const struct bxt_dpio_phy_info glk_dpio_phy_info[] = { 189 [DPIO_PHY0] = { 190 .dual_channel = false, 191 .rcomp_phy = DPIO_PHY1, 192 .pwron_mask = BIT(0), 193 .reset_delay = 20, 194 195 .channel = { 196 [DPIO_CH0] = { .port = PORT_B }, 197 } 198 }, 199 [DPIO_PHY1] = { 200 .dual_channel = false, 201 .rcomp_phy = -1, 202 .pwron_mask = BIT(3), 203 .reset_delay = 20, 204 205 .channel = { 206 [DPIO_CH0] = { .port = PORT_A }, 207 } 208 }, 209 [DPIO_PHY2] = { 210 .dual_channel = false, 211 .rcomp_phy = DPIO_PHY1, 212 .pwron_mask = BIT(1), 213 .reset_delay = 20, 214 215 .channel = { 216 [DPIO_CH0] = { .port = PORT_C }, 217 } 218 }, 219 }; 220 221 static const struct bxt_dpio_phy_info * 222 bxt_get_phy_list(struct intel_display *display, int *count) 223 { 224 struct drm_i915_private *dev_priv = to_i915(display->drm); 225 226 if (IS_GEMINILAKE(dev_priv)) { 227 *count = ARRAY_SIZE(glk_dpio_phy_info); 228 return glk_dpio_phy_info; 229 } else { 230 *count = ARRAY_SIZE(bxt_dpio_phy_info); 231 return bxt_dpio_phy_info; 232 } 233 } 234 235 static const struct bxt_dpio_phy_info * 236 bxt_get_phy_info(struct intel_display *display, enum dpio_phy phy) 237 { 238 int count; 239 const struct bxt_dpio_phy_info *phy_list = 240 bxt_get_phy_list(display, &count); 241 242 return &phy_list[phy]; 243 } 244 245 void bxt_port_to_phy_channel(struct intel_display *display, enum port port, 246 enum dpio_phy *phy, enum dpio_channel *ch) 247 { 248 const struct bxt_dpio_phy_info *phy_info, *phys; 249 int i, count; 250 251 phys = bxt_get_phy_list(display, &count); 252 253 for (i = 0; i < count; i++) { 254 phy_info = &phys[i]; 255 256 if (port == phy_info->channel[DPIO_CH0].port) { 257 *phy = i; 258 *ch = DPIO_CH0; 259 return; 260 } 261 262 if (phy_info->dual_channel && 263 port == phy_info->channel[DPIO_CH1].port) { 264 *phy = i; 265 *ch = DPIO_CH1; 266 return; 267 } 268 } 269 270 drm_WARN(display->drm, 1, "PHY not found for PORT %c", 271 port_name(port)); 272 *phy = DPIO_PHY0; 273 *ch = DPIO_CH0; 274 } 275 276 /* 277 * Like intel_de_rmw() but reads from a single per-lane register and 278 * writes to the group register to write the same value to all the lanes. 279 */ 280 static u32 bxt_dpio_phy_rmw_grp(struct intel_display *display, 281 i915_reg_t reg_single, 282 i915_reg_t reg_group, 283 u32 clear, u32 set) 284 { 285 u32 old, val; 286 287 old = intel_de_read(display, reg_single); 288 val = (old & ~clear) | set; 289 intel_de_write(display, reg_group, val); 290 291 return old; 292 } 293 294 void bxt_dpio_phy_set_signal_levels(struct intel_encoder *encoder, 295 const struct intel_crtc_state *crtc_state) 296 { 297 struct intel_display *display = to_intel_display(encoder); 298 const struct intel_ddi_buf_trans *trans; 299 enum dpio_channel ch; 300 enum dpio_phy phy; 301 int lane, n_entries; 302 303 trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); 304 if (drm_WARN_ON_ONCE(display->drm, !trans)) 305 return; 306 307 bxt_port_to_phy_channel(display, encoder->port, &phy, &ch); 308 309 /* 310 * While we write to the group register to program all lanes at once we 311 * can read only lane registers and we pick lanes 0/1 for that. 312 */ 313 bxt_dpio_phy_rmw_grp(display, BXT_PORT_PCS_DW10_LN01(phy, ch), 314 BXT_PORT_PCS_DW10_GRP(phy, ch), 315 TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT, 0); 316 317 for (lane = 0; lane < crtc_state->lane_count; lane++) { 318 int level = intel_ddi_level(encoder, crtc_state, lane); 319 320 intel_de_rmw(display, BXT_PORT_TX_DW2_LN(phy, ch, lane), 321 MARGIN_000_MASK | UNIQ_TRANS_SCALE_MASK, 322 MARGIN_000(trans->entries[level].bxt.margin) | 323 UNIQ_TRANS_SCALE(trans->entries[level].bxt.scale)); 324 } 325 326 for (lane = 0; lane < crtc_state->lane_count; lane++) { 327 int level = intel_ddi_level(encoder, crtc_state, lane); 328 u32 val; 329 330 intel_de_rmw(display, BXT_PORT_TX_DW3_LN(phy, ch, lane), 331 SCALE_DCOMP_METHOD, 332 trans->entries[level].bxt.enable ? 333 SCALE_DCOMP_METHOD : 0); 334 335 val = intel_de_read(display, BXT_PORT_TX_DW3_LN(phy, ch, lane)); 336 if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD)) 337 drm_err(display->drm, 338 "Disabled scaling while ouniqetrangenmethod was set"); 339 } 340 341 for (lane = 0; lane < crtc_state->lane_count; lane++) { 342 int level = intel_ddi_level(encoder, crtc_state, lane); 343 344 intel_de_rmw(display, BXT_PORT_TX_DW4_LN(phy, ch, lane), 345 DE_EMPHASIS_MASK, 346 DE_EMPHASIS(trans->entries[level].bxt.deemphasis)); 347 } 348 349 bxt_dpio_phy_rmw_grp(display, BXT_PORT_PCS_DW10_LN01(phy, ch), 350 BXT_PORT_PCS_DW10_GRP(phy, ch), 351 0, TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT); 352 } 353 354 bool bxt_dpio_phy_is_enabled(struct intel_display *display, 355 enum dpio_phy phy) 356 { 357 const struct bxt_dpio_phy_info *phy_info; 358 359 phy_info = bxt_get_phy_info(display, phy); 360 361 if (!(intel_de_read(display, BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask)) 362 return false; 363 364 if ((intel_de_read(display, BXT_PORT_CL1CM_DW0(phy)) & 365 (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) { 366 drm_dbg(display->drm, 367 "DDI PHY %d powered, but power hasn't settled\n", phy); 368 369 return false; 370 } 371 372 if (!(intel_de_read(display, BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) { 373 drm_dbg(display->drm, 374 "DDI PHY %d powered, but still in reset\n", phy); 375 376 return false; 377 } 378 379 return true; 380 } 381 382 static u32 bxt_get_grc(struct intel_display *display, enum dpio_phy phy) 383 { 384 u32 val = intel_de_read(display, BXT_PORT_REF_DW6(phy)); 385 386 return REG_FIELD_GET(GRC_CODE_MASK, val); 387 } 388 389 static void bxt_phy_wait_grc_done(struct intel_display *display, 390 enum dpio_phy phy) 391 { 392 if (intel_de_wait_for_set(display, BXT_PORT_REF_DW3(phy), GRC_DONE, 10)) 393 drm_err(display->drm, "timeout waiting for PHY%d GRC\n", phy); 394 } 395 396 static void _bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy) 397 { 398 const struct bxt_dpio_phy_info *phy_info; 399 u32 val; 400 401 phy_info = bxt_get_phy_info(display, phy); 402 403 if (bxt_dpio_phy_is_enabled(display, phy)) { 404 /* Still read out the GRC value for state verification */ 405 if (phy_info->rcomp_phy != -1) 406 display->state.bxt_phy_grc = bxt_get_grc(display, phy); 407 408 if (bxt_dpio_phy_verify_state(display, phy)) { 409 drm_dbg(display->drm, "DDI PHY %d already enabled, " 410 "won't reprogram it\n", phy); 411 return; 412 } 413 414 drm_dbg(display->drm, 415 "DDI PHY %d enabled with invalid state, " 416 "force reprogramming it\n", phy); 417 } 418 419 intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, 0, phy_info->pwron_mask); 420 421 /* 422 * The PHY registers start out inaccessible and respond to reads with 423 * all 1s. Eventually they become accessible as they power up, then 424 * the reserved bit will give the default 0. Poll on the reserved bit 425 * becoming 0 to find when the PHY is accessible. 426 * The flag should get set in 100us according to the HW team, but 427 * use 1ms due to occasional timeouts observed with that. 428 */ 429 if (intel_de_wait_fw(display, BXT_PORT_CL1CM_DW0(phy), 430 PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1)) 431 drm_err(display->drm, "timeout during PHY%d power on\n", 432 phy); 433 434 /* Program PLL Rcomp code offset */ 435 intel_de_rmw(display, BXT_PORT_CL1CM_DW9(phy), 436 IREF0RC_OFFSET_MASK, IREF0RC_OFFSET(0xE4)); 437 438 intel_de_rmw(display, BXT_PORT_CL1CM_DW10(phy), 439 IREF1RC_OFFSET_MASK, IREF1RC_OFFSET(0xE4)); 440 441 /* Program power gating */ 442 intel_de_rmw(display, BXT_PORT_CL1CM_DW28(phy), 0, 443 OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG); 444 445 if (phy_info->dual_channel) 446 intel_de_rmw(display, BXT_PORT_CL2CM_DW6(phy), 0, 447 DW6_OLDO_DYN_PWR_DOWN_EN); 448 449 if (phy_info->rcomp_phy != -1) { 450 u32 grc_code; 451 452 bxt_phy_wait_grc_done(display, phy_info->rcomp_phy); 453 454 /* 455 * PHY0 isn't connected to an RCOMP resistor so copy over 456 * the corresponding calibrated value from PHY1, and disable 457 * the automatic calibration on PHY0. 458 */ 459 val = bxt_get_grc(display, phy_info->rcomp_phy); 460 display->state.bxt_phy_grc = val; 461 462 grc_code = GRC_CODE_FAST(val) | 463 GRC_CODE_SLOW(val) | 464 GRC_CODE_NOM(val); 465 intel_de_write(display, BXT_PORT_REF_DW6(phy), grc_code); 466 intel_de_rmw(display, BXT_PORT_REF_DW8(phy), 467 0, GRC_DIS | GRC_RDY_OVRD); 468 } 469 470 if (phy_info->reset_delay) 471 udelay(phy_info->reset_delay); 472 473 intel_de_rmw(display, BXT_PHY_CTL_FAMILY(phy), 0, COMMON_RESET_DIS); 474 } 475 476 void bxt_dpio_phy_uninit(struct intel_display *display, enum dpio_phy phy) 477 { 478 const struct bxt_dpio_phy_info *phy_info; 479 480 phy_info = bxt_get_phy_info(display, phy); 481 482 intel_de_rmw(display, BXT_PHY_CTL_FAMILY(phy), COMMON_RESET_DIS, 0); 483 484 intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, phy_info->pwron_mask, 0); 485 } 486 487 void bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy) 488 { 489 const struct bxt_dpio_phy_info *phy_info = bxt_get_phy_info(display, phy); 490 enum dpio_phy rcomp_phy = phy_info->rcomp_phy; 491 bool was_enabled; 492 493 lockdep_assert_held(&display->power.domains.lock); 494 495 was_enabled = true; 496 if (rcomp_phy != -1) 497 was_enabled = bxt_dpio_phy_is_enabled(display, rcomp_phy); 498 499 /* 500 * We need to copy the GRC calibration value from rcomp_phy, 501 * so make sure it's powered up. 502 */ 503 if (!was_enabled) 504 _bxt_dpio_phy_init(display, rcomp_phy); 505 506 _bxt_dpio_phy_init(display, phy); 507 508 if (!was_enabled) 509 bxt_dpio_phy_uninit(display, rcomp_phy); 510 } 511 512 static bool __printf(6, 7) 513 __phy_reg_verify_state(struct intel_display *display, enum dpio_phy phy, 514 i915_reg_t reg, u32 mask, u32 expected, 515 const char *reg_fmt, ...) 516 { 517 struct va_format vaf; 518 va_list args; 519 u32 val; 520 521 val = intel_de_read(display, reg); 522 if ((val & mask) == expected) 523 return true; 524 525 va_start(args, reg_fmt); 526 vaf.fmt = reg_fmt; 527 vaf.va = &args; 528 529 drm_dbg(display->drm, "DDI PHY %d reg %pV [%08x] state mismatch: " 530 "current %08x, expected %08x (mask %08x)\n", 531 phy, &vaf, reg.reg, val, (val & ~mask) | expected, 532 mask); 533 534 va_end(args); 535 536 return false; 537 } 538 539 bool bxt_dpio_phy_verify_state(struct intel_display *display, 540 enum dpio_phy phy) 541 { 542 const struct bxt_dpio_phy_info *phy_info; 543 u32 mask; 544 bool ok; 545 546 phy_info = bxt_get_phy_info(display, phy); 547 548 #define _CHK(reg, mask, exp, fmt, ...) \ 549 __phy_reg_verify_state(display, phy, reg, mask, exp, fmt, \ 550 ## __VA_ARGS__) 551 552 if (!bxt_dpio_phy_is_enabled(display, phy)) 553 return false; 554 555 ok = true; 556 557 /* PLL Rcomp code offset */ 558 ok &= _CHK(BXT_PORT_CL1CM_DW9(phy), 559 IREF0RC_OFFSET_MASK, IREF0RC_OFFSET(0xe4), 560 "BXT_PORT_CL1CM_DW9(%d)", phy); 561 ok &= _CHK(BXT_PORT_CL1CM_DW10(phy), 562 IREF1RC_OFFSET_MASK, IREF1RC_OFFSET(0xe4), 563 "BXT_PORT_CL1CM_DW10(%d)", phy); 564 565 /* Power gating */ 566 mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG; 567 ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask, 568 "BXT_PORT_CL1CM_DW28(%d)", phy); 569 570 if (phy_info->dual_channel) 571 ok &= _CHK(BXT_PORT_CL2CM_DW6(phy), 572 DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN, 573 "BXT_PORT_CL2CM_DW6(%d)", phy); 574 575 if (phy_info->rcomp_phy != -1) { 576 u32 grc_code = display->state.bxt_phy_grc; 577 578 grc_code = GRC_CODE_FAST(grc_code) | 579 GRC_CODE_SLOW(grc_code) | 580 GRC_CODE_NOM(grc_code); 581 mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK | 582 GRC_CODE_NOM_MASK; 583 ok &= _CHK(BXT_PORT_REF_DW6(phy), mask, grc_code, 584 "BXT_PORT_REF_DW6(%d)", phy); 585 586 mask = GRC_DIS | GRC_RDY_OVRD; 587 ok &= _CHK(BXT_PORT_REF_DW8(phy), mask, mask, 588 "BXT_PORT_REF_DW8(%d)", phy); 589 } 590 591 return ok; 592 #undef _CHK 593 } 594 595 u8 596 bxt_dpio_phy_calc_lane_lat_optim_mask(u8 lane_count) 597 { 598 switch (lane_count) { 599 case 1: 600 return 0; 601 case 2: 602 return BIT(2) | BIT(0); 603 case 4: 604 return BIT(3) | BIT(2) | BIT(0); 605 default: 606 MISSING_CASE(lane_count); 607 608 return 0; 609 } 610 } 611 612 void bxt_dpio_phy_set_lane_optim_mask(struct intel_encoder *encoder, 613 u8 lane_lat_optim_mask) 614 { 615 struct intel_display *display = to_intel_display(encoder); 616 enum port port = encoder->port; 617 enum dpio_phy phy; 618 enum dpio_channel ch; 619 int lane; 620 621 bxt_port_to_phy_channel(display, port, &phy, &ch); 622 623 for (lane = 0; lane < 4; lane++) { 624 /* 625 * Note that on CHV this flag is called UPAR, but has 626 * the same function. 627 */ 628 intel_de_rmw(display, BXT_PORT_TX_DW14_LN(phy, ch, lane), 629 LATENCY_OPTIM, 630 lane_lat_optim_mask & BIT(lane) ? LATENCY_OPTIM : 0); 631 } 632 } 633 634 u8 635 bxt_dpio_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder) 636 { 637 struct intel_display *display = to_intel_display(encoder); 638 enum port port = encoder->port; 639 enum dpio_phy phy; 640 enum dpio_channel ch; 641 int lane; 642 u8 mask; 643 644 bxt_port_to_phy_channel(display, port, &phy, &ch); 645 646 mask = 0; 647 for (lane = 0; lane < 4; lane++) { 648 u32 val = intel_de_read(display, 649 BXT_PORT_TX_DW14_LN(phy, ch, lane)); 650 651 if (val & LATENCY_OPTIM) 652 mask |= BIT(lane); 653 } 654 655 return mask; 656 } 657 658 enum dpio_channel vlv_dig_port_to_channel(struct intel_digital_port *dig_port) 659 { 660 switch (dig_port->base.port) { 661 default: 662 MISSING_CASE(dig_port->base.port); 663 fallthrough; 664 case PORT_B: 665 case PORT_D: 666 return DPIO_CH0; 667 case PORT_C: 668 return DPIO_CH1; 669 } 670 } 671 672 enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_port) 673 { 674 switch (dig_port->base.port) { 675 default: 676 MISSING_CASE(dig_port->base.port); 677 fallthrough; 678 case PORT_B: 679 case PORT_C: 680 return DPIO_PHY0; 681 case PORT_D: 682 return DPIO_PHY1; 683 } 684 } 685 686 enum dpio_phy vlv_pipe_to_phy(enum pipe pipe) 687 { 688 switch (pipe) { 689 default: 690 MISSING_CASE(pipe); 691 fallthrough; 692 case PIPE_A: 693 case PIPE_B: 694 return DPIO_PHY0; 695 case PIPE_C: 696 return DPIO_PHY1; 697 } 698 } 699 700 enum dpio_channel vlv_pipe_to_channel(enum pipe pipe) 701 { 702 switch (pipe) { 703 default: 704 MISSING_CASE(pipe); 705 fallthrough; 706 case PIPE_A: 707 case PIPE_C: 708 return DPIO_CH0; 709 case PIPE_B: 710 return DPIO_CH1; 711 } 712 } 713 714 void chv_set_phy_signal_level(struct intel_encoder *encoder, 715 const struct intel_crtc_state *crtc_state, 716 u32 deemph_reg_value, u32 margin_reg_value, 717 bool uniq_trans_scale) 718 { 719 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 720 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 721 enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); 722 enum dpio_phy phy = vlv_dig_port_to_phy(dig_port); 723 u32 val; 724 int i; 725 726 vlv_dpio_get(dev_priv); 727 728 /* Clear calc init */ 729 val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch)); 730 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); 731 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); 732 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; 733 vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val); 734 735 if (crtc_state->lane_count > 2) { 736 val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch)); 737 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); 738 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); 739 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; 740 vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val); 741 } 742 743 val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW9(ch)); 744 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); 745 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; 746 vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW9(ch), val); 747 748 if (crtc_state->lane_count > 2) { 749 val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW9(ch)); 750 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); 751 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; 752 vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW9(ch), val); 753 } 754 755 /* Program swing deemph */ 756 for (i = 0; i < crtc_state->lane_count; i++) { 757 val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW4(ch, i)); 758 val &= ~DPIO_SWING_DEEMPH9P5_MASK; 759 val |= DPIO_SWING_DEEMPH9P5(deemph_reg_value); 760 vlv_dpio_write(dev_priv, phy, CHV_TX_DW4(ch, i), val); 761 } 762 763 /* Program swing margin */ 764 for (i = 0; i < crtc_state->lane_count; i++) { 765 val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW2(ch, i)); 766 767 val &= ~DPIO_SWING_MARGIN000_MASK; 768 val |= DPIO_SWING_MARGIN000(margin_reg_value); 769 770 /* 771 * Supposedly this value shouldn't matter when unique transition 772 * scale is disabled, but in fact it does matter. Let's just 773 * always program the same value and hope it's OK. 774 */ 775 val &= ~DPIO_UNIQ_TRANS_SCALE_MASK; 776 val |= DPIO_UNIQ_TRANS_SCALE(0x9a); 777 778 vlv_dpio_write(dev_priv, phy, CHV_TX_DW2(ch, i), val); 779 } 780 781 /* 782 * The document said it needs to set bit 27 for ch0 and bit 26 783 * for ch1. Might be a typo in the doc. 784 * For now, for this unique transition scale selection, set bit 785 * 27 for ch0 and ch1. 786 */ 787 for (i = 0; i < crtc_state->lane_count; i++) { 788 val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW3(ch, i)); 789 if (uniq_trans_scale) 790 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN; 791 else 792 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN; 793 vlv_dpio_write(dev_priv, phy, CHV_TX_DW3(ch, i), val); 794 } 795 796 /* Start swing calculation */ 797 val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch)); 798 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; 799 vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val); 800 801 if (crtc_state->lane_count > 2) { 802 val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch)); 803 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; 804 vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val); 805 } 806 807 vlv_dpio_put(dev_priv); 808 } 809 810 void chv_data_lane_soft_reset(struct intel_encoder *encoder, 811 const struct intel_crtc_state *crtc_state, 812 bool reset) 813 { 814 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 815 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 816 enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); 817 enum dpio_phy phy = vlv_dig_port_to_phy(dig_port); 818 u32 val; 819 820 val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW0(ch)); 821 if (reset) 822 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); 823 else 824 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; 825 vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW0(ch), val); 826 827 if (crtc_state->lane_count > 2) { 828 val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW0(ch)); 829 if (reset) 830 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); 831 else 832 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; 833 vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW0(ch), val); 834 } 835 836 val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW1(ch)); 837 val |= CHV_PCS_REQ_SOFTRESET_EN; 838 if (reset) 839 val &= ~DPIO_PCS_CLK_SOFT_RESET; 840 else 841 val |= DPIO_PCS_CLK_SOFT_RESET; 842 vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW1(ch), val); 843 844 if (crtc_state->lane_count > 2) { 845 val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW1(ch)); 846 val |= CHV_PCS_REQ_SOFTRESET_EN; 847 if (reset) 848 val &= ~DPIO_PCS_CLK_SOFT_RESET; 849 else 850 val |= DPIO_PCS_CLK_SOFT_RESET; 851 vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW1(ch), val); 852 } 853 } 854 855 void chv_phy_pre_pll_enable(struct intel_encoder *encoder, 856 const struct intel_crtc_state *crtc_state) 857 { 858 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 859 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 860 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 861 enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); 862 enum dpio_phy phy = vlv_dig_port_to_phy(dig_port); 863 enum pipe pipe = crtc->pipe; 864 unsigned int lane_mask = 865 intel_dp_unused_lane_mask(crtc_state->lane_count); 866 u32 val; 867 868 /* 869 * Must trick the second common lane into life. 870 * Otherwise we can't even access the PLL. 871 */ 872 if (ch == DPIO_CH0 && pipe == PIPE_B) 873 dig_port->release_cl2_override = 874 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true); 875 876 chv_phy_powergate_lanes(encoder, true, lane_mask); 877 878 vlv_dpio_get(dev_priv); 879 880 /* Assert data lane reset */ 881 chv_data_lane_soft_reset(encoder, crtc_state, true); 882 883 /* program left/right clock distribution */ 884 if (pipe != PIPE_B) { 885 val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW5_CH0); 886 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); 887 if (ch == DPIO_CH0) 888 val |= CHV_BUFLEFTENA1_FORCE; 889 if (ch == DPIO_CH1) 890 val |= CHV_BUFRIGHTENA1_FORCE; 891 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW5_CH0, val); 892 } else { 893 val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW1_CH1); 894 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); 895 if (ch == DPIO_CH0) 896 val |= CHV_BUFLEFTENA2_FORCE; 897 if (ch == DPIO_CH1) 898 val |= CHV_BUFRIGHTENA2_FORCE; 899 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW1_CH1, val); 900 } 901 902 /* program clock channel usage */ 903 val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(ch)); 904 val |= DPIO_PCS_USEDCLKCHANNEL_OVRRIDE; 905 if (pipe == PIPE_B) 906 val |= DPIO_PCS_USEDCLKCHANNEL; 907 else 908 val &= ~DPIO_PCS_USEDCLKCHANNEL; 909 vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW8(ch), val); 910 911 if (crtc_state->lane_count > 2) { 912 val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW8(ch)); 913 val |= DPIO_PCS_USEDCLKCHANNEL_OVRRIDE; 914 if (pipe == PIPE_B) 915 val |= DPIO_PCS_USEDCLKCHANNEL; 916 else 917 val &= ~DPIO_PCS_USEDCLKCHANNEL; 918 vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW8(ch), val); 919 } 920 921 /* 922 * This a a bit weird since generally CL 923 * matches the pipe, but here we need to 924 * pick the CL based on the port. 925 */ 926 val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW19(ch)); 927 if (pipe == PIPE_B) 928 val |= CHV_CMN_USEDCLKCHANNEL; 929 else 930 val &= ~CHV_CMN_USEDCLKCHANNEL; 931 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW19(ch), val); 932 933 vlv_dpio_put(dev_priv); 934 } 935 936 void chv_phy_pre_encoder_enable(struct intel_encoder *encoder, 937 const struct intel_crtc_state *crtc_state) 938 { 939 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 940 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 941 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 942 enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); 943 enum dpio_phy phy = vlv_dig_port_to_phy(dig_port); 944 int data, i, stagger; 945 u32 val; 946 947 vlv_dpio_get(dev_priv); 948 949 /* allow hardware to manage TX FIFO reset source */ 950 val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch)); 951 val &= ~DPIO_LANEDESKEW_STRAP_OVRD; 952 vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val); 953 954 if (crtc_state->lane_count > 2) { 955 val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch)); 956 val &= ~DPIO_LANEDESKEW_STRAP_OVRD; 957 vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val); 958 } 959 960 /* Program Tx lane latency optimal setting*/ 961 for (i = 0; i < crtc_state->lane_count; i++) { 962 /* Set the upar bit */ 963 if (crtc_state->lane_count == 1) 964 data = 0; 965 else 966 data = (i == 1) ? 0 : DPIO_UPAR; 967 vlv_dpio_write(dev_priv, phy, CHV_TX_DW14(ch, i), data); 968 } 969 970 /* Data lane stagger programming */ 971 if (crtc_state->port_clock > 270000) 972 stagger = 0x18; 973 else if (crtc_state->port_clock > 135000) 974 stagger = 0xd; 975 else if (crtc_state->port_clock > 67500) 976 stagger = 0x7; 977 else if (crtc_state->port_clock > 33750) 978 stagger = 0x4; 979 else 980 stagger = 0x2; 981 982 val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch)); 983 val |= DPIO_TX2_STAGGER_MASK(0x1f); 984 vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val); 985 986 if (crtc_state->lane_count > 2) { 987 val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch)); 988 val |= DPIO_TX2_STAGGER_MASK(0x1f); 989 vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val); 990 } 991 992 vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW12(ch), 993 DPIO_LANESTAGGER_STRAP(stagger) | 994 DPIO_LANESTAGGER_STRAP_OVRD | 995 DPIO_TX1_STAGGER_MASK(0x1f) | 996 DPIO_TX1_STAGGER_MULT(6) | 997 DPIO_TX2_STAGGER_MULT(0)); 998 999 if (crtc_state->lane_count > 2) { 1000 vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW12(ch), 1001 DPIO_LANESTAGGER_STRAP(stagger) | 1002 DPIO_LANESTAGGER_STRAP_OVRD | 1003 DPIO_TX1_STAGGER_MASK(0x1f) | 1004 DPIO_TX1_STAGGER_MULT(7) | 1005 DPIO_TX2_STAGGER_MULT(5)); 1006 } 1007 1008 /* Deassert data lane reset */ 1009 chv_data_lane_soft_reset(encoder, crtc_state, false); 1010 1011 vlv_dpio_put(dev_priv); 1012 } 1013 1014 void chv_phy_release_cl2_override(struct intel_encoder *encoder) 1015 { 1016 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 1017 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1018 1019 if (dig_port->release_cl2_override) { 1020 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false); 1021 dig_port->release_cl2_override = false; 1022 } 1023 } 1024 1025 void chv_phy_post_pll_disable(struct intel_encoder *encoder, 1026 const struct intel_crtc_state *old_crtc_state) 1027 { 1028 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1029 enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); 1030 enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe; 1031 u32 val; 1032 1033 vlv_dpio_get(dev_priv); 1034 1035 /* disable left/right clock distribution */ 1036 if (pipe != PIPE_B) { 1037 val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW5_CH0); 1038 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); 1039 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW5_CH0, val); 1040 } else { 1041 val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW1_CH1); 1042 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); 1043 vlv_dpio_write(dev_priv, phy, CHV_CMN_DW1_CH1, val); 1044 } 1045 1046 vlv_dpio_put(dev_priv); 1047 1048 /* 1049 * Leave the power down bit cleared for at least one 1050 * lane so that chv_powergate_phy_ch() will power 1051 * on something when the channel is otherwise unused. 1052 * When the port is off and the override is removed 1053 * the lanes power down anyway, so otherwise it doesn't 1054 * really matter what the state of power down bits is 1055 * after this. 1056 */ 1057 chv_phy_powergate_lanes(encoder, false, 0x0); 1058 } 1059 1060 void vlv_set_phy_signal_level(struct intel_encoder *encoder, 1061 const struct intel_crtc_state *crtc_state, 1062 u32 demph_reg_value, u32 preemph_reg_value, 1063 u32 uniqtranscale_reg_value, u32 tx3_demph) 1064 { 1065 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1066 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 1067 enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); 1068 enum dpio_phy phy = vlv_dig_port_to_phy(dig_port); 1069 1070 vlv_dpio_get(dev_priv); 1071 1072 vlv_dpio_write(dev_priv, phy, VLV_TX_DW5_GRP(ch), 0x00000000); 1073 vlv_dpio_write(dev_priv, phy, VLV_TX_DW4_GRP(ch), demph_reg_value); 1074 vlv_dpio_write(dev_priv, phy, VLV_TX_DW2_GRP(ch), 1075 uniqtranscale_reg_value); 1076 vlv_dpio_write(dev_priv, phy, VLV_TX_DW3_GRP(ch), 0x0C782040); 1077 1078 if (tx3_demph) 1079 vlv_dpio_write(dev_priv, phy, VLV_TX_DW4(ch, 3), tx3_demph); 1080 1081 vlv_dpio_write(dev_priv, phy, VLV_PCS_DW11_GRP(ch), 0x00030000); 1082 vlv_dpio_write(dev_priv, phy, VLV_PCS_DW9_GRP(ch), preemph_reg_value); 1083 vlv_dpio_write(dev_priv, phy, VLV_TX_DW5_GRP(ch), DPIO_TX_OCALINIT_EN); 1084 1085 vlv_dpio_put(dev_priv); 1086 } 1087 1088 void vlv_phy_pre_pll_enable(struct intel_encoder *encoder, 1089 const struct intel_crtc_state *crtc_state) 1090 { 1091 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 1092 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1093 enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); 1094 enum dpio_phy phy = vlv_dig_port_to_phy(dig_port); 1095 1096 /* Program Tx lane resets to default */ 1097 vlv_dpio_get(dev_priv); 1098 1099 vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0_GRP(ch), 1100 DPIO_PCS_TX_LANE2_RESET | 1101 DPIO_PCS_TX_LANE1_RESET); 1102 vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1_GRP(ch), 1103 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | 1104 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | 1105 DPIO_PCS_CLK_DATAWIDTH_8_10 | 1106 DPIO_PCS_CLK_SOFT_RESET); 1107 1108 /* Fix up inter-pair skew failure */ 1109 vlv_dpio_write(dev_priv, phy, VLV_PCS_DW12_GRP(ch), 0x00750f00); 1110 vlv_dpio_write(dev_priv, phy, VLV_TX_DW11_GRP(ch), 0x00001500); 1111 vlv_dpio_write(dev_priv, phy, VLV_TX_DW14_GRP(ch), 0x40400000); 1112 1113 vlv_dpio_put(dev_priv); 1114 } 1115 1116 void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder, 1117 const struct intel_crtc_state *crtc_state) 1118 { 1119 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1120 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1121 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1122 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1123 enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); 1124 enum dpio_phy phy = vlv_dig_port_to_phy(dig_port); 1125 enum pipe pipe = crtc->pipe; 1126 u32 val; 1127 1128 vlv_dpio_get(dev_priv); 1129 1130 /* Enable clock channels for this port */ 1131 val = DPIO_PCS_USEDCLKCHANNEL_OVRRIDE; 1132 if (pipe == PIPE_B) 1133 val |= DPIO_PCS_USEDCLKCHANNEL; 1134 val |= 0xc4; 1135 vlv_dpio_write(dev_priv, phy, VLV_PCS_DW8_GRP(ch), val); 1136 1137 /* Program lane clock */ 1138 vlv_dpio_write(dev_priv, phy, VLV_PCS_DW14_GRP(ch), 0x00760018); 1139 vlv_dpio_write(dev_priv, phy, VLV_PCS_DW23_GRP(ch), 0x00400888); 1140 1141 vlv_dpio_put(dev_priv); 1142 } 1143 1144 void vlv_phy_reset_lanes(struct intel_encoder *encoder, 1145 const struct intel_crtc_state *old_crtc_state) 1146 { 1147 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 1148 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1149 enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); 1150 enum dpio_phy phy = vlv_dig_port_to_phy(dig_port); 1151 1152 vlv_dpio_get(dev_priv); 1153 vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0_GRP(ch), 0x00000000); 1154 vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1_GRP(ch), 0x00e00060); 1155 vlv_dpio_put(dev_priv); 1156 } 1157