1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020-2021 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "i915_reg.h" 8 #include "i915_trace.h" 9 #include "intel_bios.h" 10 #include "intel_de.h" 11 #include "intel_display_types.h" 12 #include "intel_dp_aux.h" 13 #include "intel_dp_aux_regs.h" 14 #include "intel_pps.h" 15 #include "intel_tc.h" 16 17 #define AUX_CH_NAME_BUFSIZE 6 18 19 static const char *aux_ch_name(struct drm_i915_private *i915, 20 char *buf, int size, enum aux_ch aux_ch) 21 { 22 if (DISPLAY_VER(i915) >= 13 && aux_ch >= AUX_CH_D_XELPD) 23 snprintf(buf, size, "%c", 'A' + aux_ch - AUX_CH_D_XELPD + AUX_CH_D); 24 else if (DISPLAY_VER(i915) >= 12 && aux_ch >= AUX_CH_USBC1) 25 snprintf(buf, size, "USBC%c", '1' + aux_ch - AUX_CH_USBC1); 26 else 27 snprintf(buf, size, "%c", 'A' + aux_ch); 28 29 return buf; 30 } 31 32 u32 intel_dp_aux_pack(const u8 *src, int src_bytes) 33 { 34 int i; 35 u32 v = 0; 36 37 if (src_bytes > 4) 38 src_bytes = 4; 39 for (i = 0; i < src_bytes; i++) 40 v |= ((u32)src[i]) << ((3 - i) * 8); 41 return v; 42 } 43 44 static void intel_dp_aux_unpack(u32 src, u8 *dst, int dst_bytes) 45 { 46 int i; 47 48 if (dst_bytes > 4) 49 dst_bytes = 4; 50 for (i = 0; i < dst_bytes; i++) 51 dst[i] = src >> ((3 - i) * 8); 52 } 53 54 static u32 55 intel_dp_aux_wait_done(struct intel_dp *intel_dp) 56 { 57 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 58 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 59 const unsigned int timeout_ms = 10; 60 u32 status; 61 int ret; 62 63 ret = __intel_de_wait_for_register(i915, ch_ctl, 64 DP_AUX_CH_CTL_SEND_BUSY, 0, 65 2, timeout_ms, &status); 66 67 if (ret == -ETIMEDOUT) 68 drm_err(&i915->drm, 69 "%s: did not complete or timeout within %ums (status 0x%08x)\n", 70 intel_dp->aux.name, timeout_ms, status); 71 72 return status; 73 } 74 75 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 76 { 77 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 78 79 if (index) 80 return 0; 81 82 /* 83 * The clock divider is based off the hrawclk, and would like to run at 84 * 2MHz. So, take the hrawclk value and divide by 2000 and use that 85 */ 86 return DIV_ROUND_CLOSEST(RUNTIME_INFO(i915)->rawclk_freq, 2000); 87 } 88 89 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 90 { 91 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 92 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 93 u32 freq; 94 95 if (index) 96 return 0; 97 98 /* 99 * The clock divider is based off the cdclk or PCH rawclk, and would 100 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and 101 * divide by 2000 and use that 102 */ 103 if (dig_port->aux_ch == AUX_CH_A) 104 freq = i915->display.cdclk.hw.cdclk; 105 else 106 freq = RUNTIME_INFO(i915)->rawclk_freq; 107 return DIV_ROUND_CLOSEST(freq, 2000); 108 } 109 110 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 111 { 112 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 113 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 114 115 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(i915)) { 116 /* Workaround for non-ULT HSW */ 117 switch (index) { 118 case 0: return 63; 119 case 1: return 72; 120 default: return 0; 121 } 122 } 123 124 return ilk_get_aux_clock_divider(intel_dp, index); 125 } 126 127 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 128 { 129 /* 130 * SKL doesn't need us to program the AUX clock divider (Hardware will 131 * derive the clock from CDCLK automatically). We still implement the 132 * get_aux_clock_divider vfunc to plug-in into the existing code. 133 */ 134 return index ? 0 : 1; 135 } 136 137 static int intel_dp_aux_sync_len(void) 138 { 139 int precharge = 16; /* 10-16 */ 140 int preamble = 16; 141 142 return precharge + preamble; 143 } 144 145 static int intel_dp_aux_fw_sync_len(void) 146 { 147 int precharge = 10; /* 10-16 */ 148 int preamble = 8; 149 150 return precharge + preamble; 151 } 152 153 static int g4x_dp_aux_precharge_len(void) 154 { 155 int precharge_min = 10; 156 int preamble = 16; 157 158 /* HW wants the length of the extra precharge in 2us units */ 159 return (intel_dp_aux_sync_len() - 160 precharge_min - preamble) / 2; 161 } 162 163 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, 164 int send_bytes, 165 u32 aux_clock_divider) 166 { 167 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 168 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 169 u32 timeout; 170 171 /* Max timeout value on G4x-BDW: 1.6ms */ 172 if (IS_BROADWELL(i915)) 173 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 174 else 175 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 176 177 return DP_AUX_CH_CTL_SEND_BUSY | 178 DP_AUX_CH_CTL_DONE | 179 DP_AUX_CH_CTL_INTERRUPT | 180 DP_AUX_CH_CTL_TIME_OUT_ERROR | 181 timeout | 182 DP_AUX_CH_CTL_RECEIVE_ERROR | 183 DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) | 184 DP_AUX_CH_CTL_PRECHARGE_2US(g4x_dp_aux_precharge_len()) | 185 DP_AUX_CH_CTL_BIT_CLOCK_2X(aux_clock_divider); 186 } 187 188 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, 189 int send_bytes, 190 u32 unused) 191 { 192 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 193 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 194 u32 ret; 195 196 /* 197 * Max timeout values: 198 * SKL-GLK: 1.6ms 199 * ICL+: 4ms 200 */ 201 ret = DP_AUX_CH_CTL_SEND_BUSY | 202 DP_AUX_CH_CTL_DONE | 203 DP_AUX_CH_CTL_INTERRUPT | 204 DP_AUX_CH_CTL_TIME_OUT_ERROR | 205 DP_AUX_CH_CTL_TIME_OUT_MAX | 206 DP_AUX_CH_CTL_RECEIVE_ERROR | 207 DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) | 208 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len()) | 209 DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len()); 210 211 if (intel_tc_port_in_tbt_alt_mode(dig_port)) 212 ret |= DP_AUX_CH_CTL_TBT_IO; 213 214 /* 215 * Power request bit is already set during aux power well enable. 216 * Preserve the bit across aux transactions. 217 */ 218 if (DISPLAY_VER(i915) >= 14) 219 ret |= XELPDP_DP_AUX_CH_CTL_POWER_REQUEST; 220 221 return ret; 222 } 223 224 static int 225 intel_dp_aux_xfer(struct intel_dp *intel_dp, 226 const u8 *send, int send_bytes, 227 u8 *recv, int recv_size, 228 u32 aux_send_ctl_flags) 229 { 230 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 231 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 232 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 233 bool is_tc_port = intel_phy_is_tc(i915, phy); 234 i915_reg_t ch_ctl, ch_data[5]; 235 u32 aux_clock_divider; 236 enum intel_display_power_domain aux_domain; 237 intel_wakeref_t aux_wakeref; 238 intel_wakeref_t pps_wakeref; 239 int i, ret, recv_bytes; 240 int try, clock = 0; 241 u32 status; 242 bool vdd; 243 244 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 245 for (i = 0; i < ARRAY_SIZE(ch_data); i++) 246 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); 247 248 if (is_tc_port) { 249 intel_tc_port_lock(dig_port); 250 /* 251 * Abort transfers on a disconnected port as required by 252 * DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX 253 * timeouts that would otherwise happen. 254 * TODO: abort the transfer on non-TC ports as well. 255 */ 256 if (!intel_tc_port_connected_locked(&dig_port->base)) { 257 ret = -ENXIO; 258 goto out_unlock; 259 } 260 } 261 262 aux_domain = intel_aux_power_domain(dig_port); 263 264 aux_wakeref = intel_display_power_get(i915, aux_domain); 265 pps_wakeref = intel_pps_lock(intel_dp); 266 267 /* 268 * We will be called with VDD already enabled for dpcd/edid/oui reads. 269 * In such cases we want to leave VDD enabled and it's up to upper layers 270 * to turn it off. But for eg. i2c-dev access we need to turn it on/off 271 * ourselves. 272 */ 273 vdd = intel_pps_vdd_on_unlocked(intel_dp); 274 275 /* 276 * dp aux is extremely sensitive to irq latency, hence request the 277 * lowest possible wakeup latency and so prevent the cpu from going into 278 * deep sleep states. 279 */ 280 cpu_latency_qos_update_request(&intel_dp->pm_qos, 0); 281 282 intel_pps_check_power_unlocked(intel_dp); 283 284 /* 285 * FIXME PSR should be disabled here to prevent 286 * it using the same AUX CH simultaneously 287 */ 288 289 /* Try to wait for any previous AUX channel activity */ 290 for (try = 0; try < 3; try++) { 291 status = intel_de_read_notrace(i915, ch_ctl); 292 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 293 break; 294 msleep(1); 295 } 296 /* just trace the final value */ 297 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 298 299 if (try == 3) { 300 const u32 status = intel_de_read(i915, ch_ctl); 301 302 if (status != intel_dp->aux_busy_last_status) { 303 drm_WARN(&i915->drm, 1, 304 "%s: not started (status 0x%08x)\n", 305 intel_dp->aux.name, status); 306 intel_dp->aux_busy_last_status = status; 307 } 308 309 ret = -EBUSY; 310 goto out; 311 } 312 313 /* Only 5 data registers! */ 314 if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) { 315 ret = -E2BIG; 316 goto out; 317 } 318 319 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 320 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 321 send_bytes, 322 aux_clock_divider); 323 324 send_ctl |= aux_send_ctl_flags; 325 326 /* Must try at least 3 times according to DP spec */ 327 for (try = 0; try < 5; try++) { 328 /* Load the send data into the aux channel data registers */ 329 for (i = 0; i < send_bytes; i += 4) 330 intel_de_write(i915, ch_data[i >> 2], 331 intel_dp_aux_pack(send + i, 332 send_bytes - i)); 333 334 /* Send the command and wait for it to complete */ 335 intel_de_write(i915, ch_ctl, send_ctl); 336 337 status = intel_dp_aux_wait_done(intel_dp); 338 339 /* Clear done status and any errors */ 340 intel_de_write(i915, ch_ctl, 341 status | DP_AUX_CH_CTL_DONE | 342 DP_AUX_CH_CTL_TIME_OUT_ERROR | 343 DP_AUX_CH_CTL_RECEIVE_ERROR); 344 345 /* 346 * DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 347 * 400us delay required for errors and timeouts 348 * Timeout errors from the HW already meet this 349 * requirement so skip to next iteration 350 */ 351 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) 352 continue; 353 354 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 355 usleep_range(400, 500); 356 continue; 357 } 358 if (status & DP_AUX_CH_CTL_DONE) 359 goto done; 360 } 361 } 362 363 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 364 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n", 365 intel_dp->aux.name, status); 366 ret = -EBUSY; 367 goto out; 368 } 369 370 done: 371 /* 372 * Check for timeout or receive error. Timeouts occur when the sink is 373 * not connected. 374 */ 375 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 376 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n", 377 intel_dp->aux.name, status); 378 ret = -EIO; 379 goto out; 380 } 381 382 /* 383 * Timeouts occur when the device isn't connected, so they're "normal" 384 * -- don't fill the kernel log with these 385 */ 386 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 387 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n", 388 intel_dp->aux.name, status); 389 ret = -ETIMEDOUT; 390 goto out; 391 } 392 393 /* Unload any bytes sent back from the other side */ 394 recv_bytes = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, status); 395 396 /* 397 * By BSpec: "Message sizes of 0 or >20 are not allowed." 398 * We have no idea of what happened so we return -EBUSY so 399 * drm layer takes care for the necessary retries. 400 */ 401 if (recv_bytes == 0 || recv_bytes > 20) { 402 drm_dbg_kms(&i915->drm, 403 "%s: Forbidden recv_bytes = %d on aux transaction\n", 404 intel_dp->aux.name, recv_bytes); 405 ret = -EBUSY; 406 goto out; 407 } 408 409 if (recv_bytes > recv_size) 410 recv_bytes = recv_size; 411 412 for (i = 0; i < recv_bytes; i += 4) 413 intel_dp_aux_unpack(intel_de_read(i915, ch_data[i >> 2]), 414 recv + i, recv_bytes - i); 415 416 ret = recv_bytes; 417 out: 418 cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); 419 420 if (vdd) 421 intel_pps_vdd_off_unlocked(intel_dp, false); 422 423 intel_pps_unlock(intel_dp, pps_wakeref); 424 intel_display_power_put_async(i915, aux_domain, aux_wakeref); 425 out_unlock: 426 if (is_tc_port) 427 intel_tc_port_unlock(dig_port); 428 429 return ret; 430 } 431 432 #define BARE_ADDRESS_SIZE 3 433 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) 434 435 static void 436 intel_dp_aux_header(u8 txbuf[HEADER_SIZE], 437 const struct drm_dp_aux_msg *msg) 438 { 439 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf); 440 txbuf[1] = (msg->address >> 8) & 0xff; 441 txbuf[2] = msg->address & 0xff; 442 txbuf[3] = msg->size - 1; 443 } 444 445 static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg) 446 { 447 /* 448 * If we're trying to send the HDCP Aksv, we need to set a the Aksv 449 * select bit to inform the hardware to send the Aksv after our header 450 * since we can't access that data from software. 451 */ 452 if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE && 453 msg->address == DP_AUX_HDCP_AKSV) 454 return DP_AUX_CH_CTL_AUX_AKSV_SELECT; 455 456 return 0; 457 } 458 459 static ssize_t 460 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 461 { 462 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); 463 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 464 u8 txbuf[20], rxbuf[20]; 465 size_t txsize, rxsize; 466 u32 flags = intel_dp_aux_xfer_flags(msg); 467 int ret; 468 469 intel_dp_aux_header(txbuf, msg); 470 471 switch (msg->request & ~DP_AUX_I2C_MOT) { 472 case DP_AUX_NATIVE_WRITE: 473 case DP_AUX_I2C_WRITE: 474 case DP_AUX_I2C_WRITE_STATUS_UPDATE: 475 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; 476 rxsize = 2; /* 0 or 1 data bytes */ 477 478 if (drm_WARN_ON(&i915->drm, txsize > 20)) 479 return -E2BIG; 480 481 drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size); 482 483 if (msg->buffer) 484 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); 485 486 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 487 rxbuf, rxsize, flags); 488 if (ret > 0) { 489 msg->reply = rxbuf[0] >> 4; 490 491 if (ret > 1) { 492 /* Number of bytes written in a short write. */ 493 ret = clamp_t(int, rxbuf[1], 0, msg->size); 494 } else { 495 /* Return payload size. */ 496 ret = msg->size; 497 } 498 } 499 break; 500 501 case DP_AUX_NATIVE_READ: 502 case DP_AUX_I2C_READ: 503 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; 504 rxsize = msg->size + 1; 505 506 if (drm_WARN_ON(&i915->drm, rxsize > 20)) 507 return -E2BIG; 508 509 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 510 rxbuf, rxsize, flags); 511 if (ret > 0) { 512 msg->reply = rxbuf[0] >> 4; 513 /* 514 * Assume happy day, and copy the data. The caller is 515 * expected to check msg->reply before touching it. 516 * 517 * Return payload size. 518 */ 519 ret--; 520 memcpy(msg->buffer, rxbuf + 1, ret); 521 } 522 break; 523 524 default: 525 ret = -EINVAL; 526 break; 527 } 528 529 return ret; 530 } 531 532 static i915_reg_t vlv_aux_ctl_reg(struct intel_dp *intel_dp) 533 { 534 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 535 enum aux_ch aux_ch = dig_port->aux_ch; 536 537 switch (aux_ch) { 538 case AUX_CH_B: 539 case AUX_CH_C: 540 case AUX_CH_D: 541 return VLV_DP_AUX_CH_CTL(aux_ch); 542 default: 543 MISSING_CASE(aux_ch); 544 return VLV_DP_AUX_CH_CTL(AUX_CH_B); 545 } 546 } 547 548 static i915_reg_t vlv_aux_data_reg(struct intel_dp *intel_dp, int index) 549 { 550 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 551 enum aux_ch aux_ch = dig_port->aux_ch; 552 553 switch (aux_ch) { 554 case AUX_CH_B: 555 case AUX_CH_C: 556 case AUX_CH_D: 557 return VLV_DP_AUX_CH_DATA(aux_ch, index); 558 default: 559 MISSING_CASE(aux_ch); 560 return VLV_DP_AUX_CH_DATA(AUX_CH_B, index); 561 } 562 } 563 564 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) 565 { 566 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 567 enum aux_ch aux_ch = dig_port->aux_ch; 568 569 switch (aux_ch) { 570 case AUX_CH_B: 571 case AUX_CH_C: 572 case AUX_CH_D: 573 return DP_AUX_CH_CTL(aux_ch); 574 default: 575 MISSING_CASE(aux_ch); 576 return DP_AUX_CH_CTL(AUX_CH_B); 577 } 578 } 579 580 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) 581 { 582 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 583 enum aux_ch aux_ch = dig_port->aux_ch; 584 585 switch (aux_ch) { 586 case AUX_CH_B: 587 case AUX_CH_C: 588 case AUX_CH_D: 589 return DP_AUX_CH_DATA(aux_ch, index); 590 default: 591 MISSING_CASE(aux_ch); 592 return DP_AUX_CH_DATA(AUX_CH_B, index); 593 } 594 } 595 596 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) 597 { 598 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 599 enum aux_ch aux_ch = dig_port->aux_ch; 600 601 switch (aux_ch) { 602 case AUX_CH_A: 603 return DP_AUX_CH_CTL(aux_ch); 604 case AUX_CH_B: 605 case AUX_CH_C: 606 case AUX_CH_D: 607 return PCH_DP_AUX_CH_CTL(aux_ch); 608 default: 609 MISSING_CASE(aux_ch); 610 return DP_AUX_CH_CTL(AUX_CH_A); 611 } 612 } 613 614 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) 615 { 616 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 617 enum aux_ch aux_ch = dig_port->aux_ch; 618 619 switch (aux_ch) { 620 case AUX_CH_A: 621 return DP_AUX_CH_DATA(aux_ch, index); 622 case AUX_CH_B: 623 case AUX_CH_C: 624 case AUX_CH_D: 625 return PCH_DP_AUX_CH_DATA(aux_ch, index); 626 default: 627 MISSING_CASE(aux_ch); 628 return DP_AUX_CH_DATA(AUX_CH_A, index); 629 } 630 } 631 632 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) 633 { 634 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 635 enum aux_ch aux_ch = dig_port->aux_ch; 636 637 switch (aux_ch) { 638 case AUX_CH_A: 639 case AUX_CH_B: 640 case AUX_CH_C: 641 case AUX_CH_D: 642 case AUX_CH_E: 643 case AUX_CH_F: 644 return DP_AUX_CH_CTL(aux_ch); 645 default: 646 MISSING_CASE(aux_ch); 647 return DP_AUX_CH_CTL(AUX_CH_A); 648 } 649 } 650 651 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) 652 { 653 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 654 enum aux_ch aux_ch = dig_port->aux_ch; 655 656 switch (aux_ch) { 657 case AUX_CH_A: 658 case AUX_CH_B: 659 case AUX_CH_C: 660 case AUX_CH_D: 661 case AUX_CH_E: 662 case AUX_CH_F: 663 return DP_AUX_CH_DATA(aux_ch, index); 664 default: 665 MISSING_CASE(aux_ch); 666 return DP_AUX_CH_DATA(AUX_CH_A, index); 667 } 668 } 669 670 static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp) 671 { 672 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 673 enum aux_ch aux_ch = dig_port->aux_ch; 674 675 switch (aux_ch) { 676 case AUX_CH_A: 677 case AUX_CH_B: 678 case AUX_CH_C: 679 case AUX_CH_USBC1: 680 case AUX_CH_USBC2: 681 case AUX_CH_USBC3: 682 case AUX_CH_USBC4: 683 case AUX_CH_USBC5: /* aka AUX_CH_D_XELPD */ 684 case AUX_CH_USBC6: /* aka AUX_CH_E_XELPD */ 685 return DP_AUX_CH_CTL(aux_ch); 686 default: 687 MISSING_CASE(aux_ch); 688 return DP_AUX_CH_CTL(AUX_CH_A); 689 } 690 } 691 692 static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index) 693 { 694 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 695 enum aux_ch aux_ch = dig_port->aux_ch; 696 697 switch (aux_ch) { 698 case AUX_CH_A: 699 case AUX_CH_B: 700 case AUX_CH_C: 701 case AUX_CH_USBC1: 702 case AUX_CH_USBC2: 703 case AUX_CH_USBC3: 704 case AUX_CH_USBC4: 705 case AUX_CH_USBC5: /* aka AUX_CH_D_XELPD */ 706 case AUX_CH_USBC6: /* aka AUX_CH_E_XELPD */ 707 return DP_AUX_CH_DATA(aux_ch, index); 708 default: 709 MISSING_CASE(aux_ch); 710 return DP_AUX_CH_DATA(AUX_CH_A, index); 711 } 712 } 713 714 static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp) 715 { 716 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 717 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 718 enum aux_ch aux_ch = dig_port->aux_ch; 719 720 switch (aux_ch) { 721 case AUX_CH_A: 722 case AUX_CH_B: 723 case AUX_CH_USBC1: 724 case AUX_CH_USBC2: 725 case AUX_CH_USBC3: 726 case AUX_CH_USBC4: 727 return XELPDP_DP_AUX_CH_CTL(i915, aux_ch); 728 default: 729 MISSING_CASE(aux_ch); 730 return XELPDP_DP_AUX_CH_CTL(i915, AUX_CH_A); 731 } 732 } 733 734 static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index) 735 { 736 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 737 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 738 enum aux_ch aux_ch = dig_port->aux_ch; 739 740 switch (aux_ch) { 741 case AUX_CH_A: 742 case AUX_CH_B: 743 case AUX_CH_USBC1: 744 case AUX_CH_USBC2: 745 case AUX_CH_USBC3: 746 case AUX_CH_USBC4: 747 return XELPDP_DP_AUX_CH_DATA(i915, aux_ch, index); 748 default: 749 MISSING_CASE(aux_ch); 750 return XELPDP_DP_AUX_CH_DATA(i915, AUX_CH_A, index); 751 } 752 } 753 754 void intel_dp_aux_fini(struct intel_dp *intel_dp) 755 { 756 if (cpu_latency_qos_request_active(&intel_dp->pm_qos)) 757 cpu_latency_qos_remove_request(&intel_dp->pm_qos); 758 759 kfree(intel_dp->aux.name); 760 } 761 762 void intel_dp_aux_init(struct intel_dp *intel_dp) 763 { 764 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 765 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 766 struct intel_encoder *encoder = &dig_port->base; 767 enum aux_ch aux_ch = dig_port->aux_ch; 768 char buf[AUX_CH_NAME_BUFSIZE]; 769 770 if (DISPLAY_VER(i915) >= 14) { 771 intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg; 772 intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg; 773 } else if (DISPLAY_VER(i915) >= 12) { 774 intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg; 775 intel_dp->aux_ch_data_reg = tgl_aux_data_reg; 776 } else if (DISPLAY_VER(i915) >= 9) { 777 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; 778 intel_dp->aux_ch_data_reg = skl_aux_data_reg; 779 } else if (HAS_PCH_SPLIT(i915)) { 780 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; 781 intel_dp->aux_ch_data_reg = ilk_aux_data_reg; 782 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 783 intel_dp->aux_ch_ctl_reg = vlv_aux_ctl_reg; 784 intel_dp->aux_ch_data_reg = vlv_aux_data_reg; 785 } else { 786 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; 787 intel_dp->aux_ch_data_reg = g4x_aux_data_reg; 788 } 789 790 if (DISPLAY_VER(i915) >= 9) 791 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; 792 else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) 793 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 794 else if (HAS_PCH_SPLIT(i915)) 795 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 796 else 797 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; 798 799 if (DISPLAY_VER(i915) >= 9) 800 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; 801 else 802 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; 803 804 intel_dp->aux.drm_dev = &i915->drm; 805 drm_dp_aux_init(&intel_dp->aux); 806 807 /* Failure to allocate our preferred name is not critical */ 808 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %s/%s", 809 aux_ch_name(i915, buf, sizeof(buf), aux_ch), 810 encoder->base.name); 811 812 intel_dp->aux.transfer = intel_dp_aux_transfer; 813 cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); 814 } 815 816 static enum aux_ch default_aux_ch(struct intel_encoder *encoder) 817 { 818 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 819 820 /* SKL has DDI E but no AUX E */ 821 if (DISPLAY_VER(i915) == 9 && encoder->port == PORT_E) 822 return AUX_CH_A; 823 824 return (enum aux_ch)encoder->port; 825 } 826 827 static struct intel_encoder * 828 get_encoder_by_aux_ch(struct intel_encoder *encoder, 829 enum aux_ch aux_ch) 830 { 831 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 832 struct intel_encoder *other; 833 834 for_each_intel_encoder(&i915->drm, other) { 835 if (other == encoder) 836 continue; 837 838 if (!intel_encoder_is_dig_port(other)) 839 continue; 840 841 if (enc_to_dig_port(other)->aux_ch == aux_ch) 842 return other; 843 } 844 845 return NULL; 846 } 847 848 enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder) 849 { 850 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 851 struct intel_encoder *other; 852 const char *source; 853 enum aux_ch aux_ch; 854 char buf[AUX_CH_NAME_BUFSIZE]; 855 856 aux_ch = intel_bios_dp_aux_ch(encoder->devdata); 857 source = "VBT"; 858 859 if (aux_ch == AUX_CH_NONE) { 860 aux_ch = default_aux_ch(encoder); 861 source = "platform default"; 862 } 863 864 if (aux_ch == AUX_CH_NONE) 865 return AUX_CH_NONE; 866 867 /* FIXME validate aux_ch against platform caps */ 868 869 other = get_encoder_by_aux_ch(encoder, aux_ch); 870 if (other) { 871 drm_dbg_kms(&i915->drm, 872 "[ENCODER:%d:%s] AUX CH %s already claimed by [ENCODER:%d:%s]\n", 873 encoder->base.base.id, encoder->base.name, 874 aux_ch_name(i915, buf, sizeof(buf), aux_ch), 875 other->base.base.id, other->base.name); 876 return AUX_CH_NONE; 877 } 878 879 drm_dbg_kms(&i915->drm, 880 "[ENCODER:%d:%s] Using AUX CH %s (%s)\n", 881 encoder->base.base.id, encoder->base.name, 882 aux_ch_name(i915, buf, sizeof(buf), aux_ch), source); 883 884 return aux_ch; 885 } 886 887 void intel_dp_aux_irq_handler(struct drm_i915_private *i915) 888 { 889 wake_up_all(&i915->display.gmbus.wait_queue); 890 } 891