1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020-2021 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "i915_reg.h" 8 #include "i915_trace.h" 9 #include "intel_bios.h" 10 #include "intel_de.h" 11 #include "intel_display_types.h" 12 #include "intel_dp.h" 13 #include "intel_dp_aux.h" 14 #include "intel_dp_aux_regs.h" 15 #include "intel_pps.h" 16 #include "intel_quirks.h" 17 #include "intel_tc.h" 18 19 #define AUX_CH_NAME_BUFSIZE 6 20 21 static const char *aux_ch_name(struct intel_display *display, 22 char *buf, int size, enum aux_ch aux_ch) 23 { 24 if (DISPLAY_VER(display) >= 13 && aux_ch >= AUX_CH_D_XELPD) 25 snprintf(buf, size, "%c", 'A' + aux_ch - AUX_CH_D_XELPD + AUX_CH_D); 26 else if (DISPLAY_VER(display) >= 12 && aux_ch >= AUX_CH_USBC1) 27 snprintf(buf, size, "USBC%c", '1' + aux_ch - AUX_CH_USBC1); 28 else 29 snprintf(buf, size, "%c", 'A' + aux_ch); 30 31 return buf; 32 } 33 34 u32 intel_dp_aux_pack(const u8 *src, int src_bytes) 35 { 36 int i; 37 u32 v = 0; 38 39 if (src_bytes > 4) 40 src_bytes = 4; 41 for (i = 0; i < src_bytes; i++) 42 v |= ((u32)src[i]) << ((3 - i) * 8); 43 return v; 44 } 45 46 static void intel_dp_aux_unpack(u32 src, u8 *dst, int dst_bytes) 47 { 48 int i; 49 50 if (dst_bytes > 4) 51 dst_bytes = 4; 52 for (i = 0; i < dst_bytes; i++) 53 dst[i] = src >> ((3 - i) * 8); 54 } 55 56 static u32 57 intel_dp_aux_wait_done(struct intel_dp *intel_dp) 58 { 59 struct intel_display *display = to_intel_display(intel_dp); 60 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 61 const unsigned int timeout_ms = 10; 62 u32 status; 63 int ret; 64 65 ret = intel_de_wait_custom(display, ch_ctl, DP_AUX_CH_CTL_SEND_BUSY, 66 0, 67 2, timeout_ms, &status); 68 69 if (ret == -ETIMEDOUT) 70 drm_err(display->drm, 71 "%s: did not complete or timeout within %ums (status 0x%08x)\n", 72 intel_dp->aux.name, timeout_ms, status); 73 74 return status; 75 } 76 77 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 78 { 79 struct intel_display *display = to_intel_display(intel_dp); 80 81 if (index) 82 return 0; 83 84 /* 85 * The clock divider is based off the hrawclk, and would like to run at 86 * 2MHz. So, take the hrawclk value and divide by 2000 and use that 87 */ 88 return DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(display)->rawclk_freq, 2000); 89 } 90 91 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 92 { 93 struct intel_display *display = to_intel_display(intel_dp); 94 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 95 u32 freq; 96 97 if (index) 98 return 0; 99 100 /* 101 * The clock divider is based off the cdclk or PCH rawclk, and would 102 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and 103 * divide by 2000 and use that 104 */ 105 if (dig_port->aux_ch == AUX_CH_A) 106 freq = display->cdclk.hw.cdclk; 107 else 108 freq = DISPLAY_RUNTIME_INFO(display)->rawclk_freq; 109 return DIV_ROUND_CLOSEST(freq, 2000); 110 } 111 112 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 113 { 114 struct intel_display *display = to_intel_display(intel_dp); 115 struct drm_i915_private *i915 = to_i915(display->drm); 116 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 117 118 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(i915)) { 119 /* Workaround for non-ULT HSW */ 120 switch (index) { 121 case 0: return 63; 122 case 1: return 72; 123 default: return 0; 124 } 125 } 126 127 return ilk_get_aux_clock_divider(intel_dp, index); 128 } 129 130 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 131 { 132 /* 133 * SKL doesn't need us to program the AUX clock divider (Hardware will 134 * derive the clock from CDCLK automatically). We still implement the 135 * get_aux_clock_divider vfunc to plug-in into the existing code. 136 */ 137 return index ? 0 : 1; 138 } 139 140 static int intel_dp_aux_sync_len(void) 141 { 142 int precharge = 16; /* 10-16 */ 143 int preamble = 16; 144 145 return precharge + preamble; 146 } 147 148 int intel_dp_aux_fw_sync_len(struct intel_dp *intel_dp) 149 { 150 int precharge = 10; /* 10-16 */ 151 int preamble = 8; 152 153 /* 154 * We faced some glitches on Dell Precision 5490 MTL laptop with panel: 155 * "Manufacturer: AUO, Model: 63898" when using HW default 18. Using 20 156 * is fixing these problems with the panel. It is still within range 157 * mentioned in eDP specification. Increasing Fast Wake sync length is 158 * causing problems with other panels: increase length as a quirk for 159 * this specific laptop. 160 */ 161 if (intel_has_dpcd_quirk(intel_dp, QUIRK_FW_SYNC_LEN)) 162 precharge += 2; 163 164 return precharge + preamble; 165 } 166 167 static int g4x_dp_aux_precharge_len(void) 168 { 169 int precharge_min = 10; 170 int preamble = 16; 171 172 /* HW wants the length of the extra precharge in 2us units */ 173 return (intel_dp_aux_sync_len() - 174 precharge_min - preamble) / 2; 175 } 176 177 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, 178 int send_bytes, 179 u32 aux_clock_divider) 180 { 181 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 182 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 183 u32 timeout; 184 185 /* Max timeout value on G4x-BDW: 1.6ms */ 186 if (IS_BROADWELL(i915)) 187 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 188 else 189 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 190 191 return DP_AUX_CH_CTL_SEND_BUSY | 192 DP_AUX_CH_CTL_DONE | 193 DP_AUX_CH_CTL_INTERRUPT | 194 DP_AUX_CH_CTL_TIME_OUT_ERROR | 195 timeout | 196 DP_AUX_CH_CTL_RECEIVE_ERROR | 197 DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) | 198 DP_AUX_CH_CTL_PRECHARGE_2US(g4x_dp_aux_precharge_len()) | 199 DP_AUX_CH_CTL_BIT_CLOCK_2X(aux_clock_divider); 200 } 201 202 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, 203 int send_bytes, 204 u32 unused) 205 { 206 struct intel_display *display = to_intel_display(intel_dp); 207 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 208 u32 ret; 209 210 /* 211 * Max timeout values: 212 * SKL-GLK: 1.6ms 213 * ICL+: 4ms 214 */ 215 ret = DP_AUX_CH_CTL_SEND_BUSY | 216 DP_AUX_CH_CTL_DONE | 217 DP_AUX_CH_CTL_INTERRUPT | 218 DP_AUX_CH_CTL_TIME_OUT_ERROR | 219 DP_AUX_CH_CTL_TIME_OUT_MAX | 220 DP_AUX_CH_CTL_RECEIVE_ERROR | 221 DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) | 222 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len(intel_dp)) | 223 DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len()); 224 225 if (intel_tc_port_in_tbt_alt_mode(dig_port)) 226 ret |= DP_AUX_CH_CTL_TBT_IO; 227 228 /* 229 * Power request bit is already set during aux power well enable. 230 * Preserve the bit across aux transactions. 231 */ 232 if (DISPLAY_VER(display) >= 14) 233 ret |= XELPDP_DP_AUX_CH_CTL_POWER_REQUEST; 234 235 return ret; 236 } 237 238 static int 239 intel_dp_aux_xfer(struct intel_dp *intel_dp, 240 const u8 *send, int send_bytes, 241 u8 *recv, int recv_size, 242 u32 aux_send_ctl_flags) 243 { 244 struct intel_display *display = to_intel_display(intel_dp); 245 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 246 struct intel_encoder *encoder = &dig_port->base; 247 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 248 i915_reg_t ch_ctl, ch_data[5]; 249 u32 aux_clock_divider; 250 enum intel_display_power_domain aux_domain; 251 intel_wakeref_t aux_wakeref; 252 intel_wakeref_t pps_wakeref; 253 int i, ret, recv_bytes; 254 int try, clock = 0; 255 u32 status; 256 bool vdd; 257 258 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 259 for (i = 0; i < ARRAY_SIZE(ch_data); i++) 260 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); 261 262 intel_digital_port_lock(encoder); 263 /* 264 * Abort transfers on a disconnected port as required by 265 * DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX 266 * timeouts that would otherwise happen. 267 */ 268 if (!intel_dp_is_edp(intel_dp) && 269 !intel_digital_port_connected_locked(&dig_port->base)) { 270 ret = -ENXIO; 271 goto out_unlock; 272 } 273 274 aux_domain = intel_aux_power_domain(dig_port); 275 276 aux_wakeref = intel_display_power_get(i915, aux_domain); 277 pps_wakeref = intel_pps_lock(intel_dp); 278 279 /* 280 * We will be called with VDD already enabled for dpcd/edid/oui reads. 281 * In such cases we want to leave VDD enabled and it's up to upper layers 282 * to turn it off. But for eg. i2c-dev access we need to turn it on/off 283 * ourselves. 284 */ 285 vdd = intel_pps_vdd_on_unlocked(intel_dp); 286 287 /* 288 * dp aux is extremely sensitive to irq latency, hence request the 289 * lowest possible wakeup latency and so prevent the cpu from going into 290 * deep sleep states. 291 */ 292 cpu_latency_qos_update_request(&intel_dp->pm_qos, 0); 293 294 intel_pps_check_power_unlocked(intel_dp); 295 296 /* 297 * FIXME PSR should be disabled here to prevent 298 * it using the same AUX CH simultaneously 299 */ 300 301 /* Try to wait for any previous AUX channel activity */ 302 for (try = 0; try < 3; try++) { 303 status = intel_de_read_notrace(display, ch_ctl); 304 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 305 break; 306 msleep(1); 307 } 308 /* just trace the final value */ 309 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 310 311 if (try == 3) { 312 const u32 status = intel_de_read(display, ch_ctl); 313 314 if (status != intel_dp->aux_busy_last_status) { 315 drm_WARN(display->drm, 1, 316 "%s: not started (status 0x%08x)\n", 317 intel_dp->aux.name, status); 318 intel_dp->aux_busy_last_status = status; 319 } 320 321 ret = -EBUSY; 322 goto out; 323 } 324 325 /* Only 5 data registers! */ 326 if (drm_WARN_ON(display->drm, send_bytes > 20 || recv_size > 20)) { 327 ret = -E2BIG; 328 goto out; 329 } 330 331 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 332 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 333 send_bytes, 334 aux_clock_divider); 335 336 send_ctl |= aux_send_ctl_flags; 337 338 /* Must try at least 3 times according to DP spec */ 339 for (try = 0; try < 5; try++) { 340 /* Load the send data into the aux channel data registers */ 341 for (i = 0; i < send_bytes; i += 4) 342 intel_de_write(display, ch_data[i >> 2], 343 intel_dp_aux_pack(send + i, 344 send_bytes - i)); 345 346 /* Send the command and wait for it to complete */ 347 intel_de_write(display, ch_ctl, send_ctl); 348 349 status = intel_dp_aux_wait_done(intel_dp); 350 351 /* Clear done status and any errors */ 352 intel_de_write(display, ch_ctl, 353 status | DP_AUX_CH_CTL_DONE | 354 DP_AUX_CH_CTL_TIME_OUT_ERROR | 355 DP_AUX_CH_CTL_RECEIVE_ERROR); 356 357 /* 358 * DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 359 * 400us delay required for errors and timeouts 360 * Timeout errors from the HW already meet this 361 * requirement so skip to next iteration 362 */ 363 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) 364 continue; 365 366 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 367 usleep_range(400, 500); 368 continue; 369 } 370 if (status & DP_AUX_CH_CTL_DONE) 371 goto done; 372 } 373 } 374 375 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 376 drm_err(display->drm, "%s: not done (status 0x%08x)\n", 377 intel_dp->aux.name, status); 378 ret = -EBUSY; 379 goto out; 380 } 381 382 done: 383 /* 384 * Check for timeout or receive error. Timeouts occur when the sink is 385 * not connected. 386 */ 387 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 388 drm_err(display->drm, "%s: receive error (status 0x%08x)\n", 389 intel_dp->aux.name, status); 390 ret = -EIO; 391 goto out; 392 } 393 394 /* 395 * Timeouts occur when the device isn't connected, so they're "normal" 396 * -- don't fill the kernel log with these 397 */ 398 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 399 drm_dbg_kms(display->drm, "%s: timeout (status 0x%08x)\n", 400 intel_dp->aux.name, status); 401 ret = -ETIMEDOUT; 402 goto out; 403 } 404 405 /* Unload any bytes sent back from the other side */ 406 recv_bytes = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, status); 407 408 /* 409 * By BSpec: "Message sizes of 0 or >20 are not allowed." 410 * We have no idea of what happened so we return -EBUSY so 411 * drm layer takes care for the necessary retries. 412 */ 413 if (recv_bytes == 0 || recv_bytes > 20) { 414 drm_dbg_kms(display->drm, 415 "%s: Forbidden recv_bytes = %d on aux transaction\n", 416 intel_dp->aux.name, recv_bytes); 417 ret = -EBUSY; 418 goto out; 419 } 420 421 if (recv_bytes > recv_size) 422 recv_bytes = recv_size; 423 424 for (i = 0; i < recv_bytes; i += 4) 425 intel_dp_aux_unpack(intel_de_read(display, ch_data[i >> 2]), 426 recv + i, recv_bytes - i); 427 428 ret = recv_bytes; 429 out: 430 cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); 431 432 if (vdd) 433 intel_pps_vdd_off_unlocked(intel_dp, false); 434 435 intel_pps_unlock(intel_dp, pps_wakeref); 436 intel_display_power_put_async(i915, aux_domain, aux_wakeref); 437 out_unlock: 438 intel_digital_port_unlock(encoder); 439 440 return ret; 441 } 442 443 #define BARE_ADDRESS_SIZE 3 444 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) 445 446 static void 447 intel_dp_aux_header(u8 txbuf[HEADER_SIZE], 448 const struct drm_dp_aux_msg *msg) 449 { 450 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf); 451 txbuf[1] = (msg->address >> 8) & 0xff; 452 txbuf[2] = msg->address & 0xff; 453 txbuf[3] = msg->size - 1; 454 } 455 456 static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg) 457 { 458 /* 459 * If we're trying to send the HDCP Aksv, we need to set a the Aksv 460 * select bit to inform the hardware to send the Aksv after our header 461 * since we can't access that data from software. 462 */ 463 if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE && 464 msg->address == DP_AUX_HDCP_AKSV) 465 return DP_AUX_CH_CTL_AUX_AKSV_SELECT; 466 467 return 0; 468 } 469 470 static ssize_t 471 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 472 { 473 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); 474 struct intel_display *display = to_intel_display(intel_dp); 475 u8 txbuf[20], rxbuf[20]; 476 size_t txsize, rxsize; 477 u32 flags = intel_dp_aux_xfer_flags(msg); 478 int ret; 479 480 intel_dp_aux_header(txbuf, msg); 481 482 switch (msg->request & ~DP_AUX_I2C_MOT) { 483 case DP_AUX_NATIVE_WRITE: 484 case DP_AUX_I2C_WRITE: 485 case DP_AUX_I2C_WRITE_STATUS_UPDATE: 486 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; 487 rxsize = 2; /* 0 or 1 data bytes */ 488 489 if (drm_WARN_ON(display->drm, txsize > 20)) 490 return -E2BIG; 491 492 drm_WARN_ON(display->drm, !msg->buffer != !msg->size); 493 494 if (msg->buffer) 495 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); 496 497 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 498 rxbuf, rxsize, flags); 499 if (ret > 0) { 500 msg->reply = rxbuf[0] >> 4; 501 502 if (ret > 1) { 503 /* Number of bytes written in a short write. */ 504 ret = clamp_t(int, rxbuf[1], 0, msg->size); 505 } else { 506 /* Return payload size. */ 507 ret = msg->size; 508 } 509 } 510 break; 511 512 case DP_AUX_NATIVE_READ: 513 case DP_AUX_I2C_READ: 514 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; 515 rxsize = msg->size + 1; 516 517 if (drm_WARN_ON(display->drm, rxsize > 20)) 518 return -E2BIG; 519 520 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 521 rxbuf, rxsize, flags); 522 if (ret > 0) { 523 msg->reply = rxbuf[0] >> 4; 524 /* 525 * Assume happy day, and copy the data. The caller is 526 * expected to check msg->reply before touching it. 527 * 528 * Return payload size. 529 */ 530 ret--; 531 memcpy(msg->buffer, rxbuf + 1, ret); 532 } 533 break; 534 535 default: 536 ret = -EINVAL; 537 break; 538 } 539 540 return ret; 541 } 542 543 static i915_reg_t vlv_aux_ctl_reg(struct intel_dp *intel_dp) 544 { 545 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 546 enum aux_ch aux_ch = dig_port->aux_ch; 547 548 switch (aux_ch) { 549 case AUX_CH_B: 550 case AUX_CH_C: 551 case AUX_CH_D: 552 return VLV_DP_AUX_CH_CTL(aux_ch); 553 default: 554 MISSING_CASE(aux_ch); 555 return VLV_DP_AUX_CH_CTL(AUX_CH_B); 556 } 557 } 558 559 static i915_reg_t vlv_aux_data_reg(struct intel_dp *intel_dp, int index) 560 { 561 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 562 enum aux_ch aux_ch = dig_port->aux_ch; 563 564 switch (aux_ch) { 565 case AUX_CH_B: 566 case AUX_CH_C: 567 case AUX_CH_D: 568 return VLV_DP_AUX_CH_DATA(aux_ch, index); 569 default: 570 MISSING_CASE(aux_ch); 571 return VLV_DP_AUX_CH_DATA(AUX_CH_B, index); 572 } 573 } 574 575 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) 576 { 577 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 578 enum aux_ch aux_ch = dig_port->aux_ch; 579 580 switch (aux_ch) { 581 case AUX_CH_B: 582 case AUX_CH_C: 583 case AUX_CH_D: 584 return DP_AUX_CH_CTL(aux_ch); 585 default: 586 MISSING_CASE(aux_ch); 587 return DP_AUX_CH_CTL(AUX_CH_B); 588 } 589 } 590 591 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) 592 { 593 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 594 enum aux_ch aux_ch = dig_port->aux_ch; 595 596 switch (aux_ch) { 597 case AUX_CH_B: 598 case AUX_CH_C: 599 case AUX_CH_D: 600 return DP_AUX_CH_DATA(aux_ch, index); 601 default: 602 MISSING_CASE(aux_ch); 603 return DP_AUX_CH_DATA(AUX_CH_B, index); 604 } 605 } 606 607 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) 608 { 609 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 610 enum aux_ch aux_ch = dig_port->aux_ch; 611 612 switch (aux_ch) { 613 case AUX_CH_A: 614 return DP_AUX_CH_CTL(aux_ch); 615 case AUX_CH_B: 616 case AUX_CH_C: 617 case AUX_CH_D: 618 return PCH_DP_AUX_CH_CTL(aux_ch); 619 default: 620 MISSING_CASE(aux_ch); 621 return DP_AUX_CH_CTL(AUX_CH_A); 622 } 623 } 624 625 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) 626 { 627 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 628 enum aux_ch aux_ch = dig_port->aux_ch; 629 630 switch (aux_ch) { 631 case AUX_CH_A: 632 return DP_AUX_CH_DATA(aux_ch, index); 633 case AUX_CH_B: 634 case AUX_CH_C: 635 case AUX_CH_D: 636 return PCH_DP_AUX_CH_DATA(aux_ch, index); 637 default: 638 MISSING_CASE(aux_ch); 639 return DP_AUX_CH_DATA(AUX_CH_A, index); 640 } 641 } 642 643 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) 644 { 645 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 646 enum aux_ch aux_ch = dig_port->aux_ch; 647 648 switch (aux_ch) { 649 case AUX_CH_A: 650 case AUX_CH_B: 651 case AUX_CH_C: 652 case AUX_CH_D: 653 case AUX_CH_E: 654 case AUX_CH_F: 655 return DP_AUX_CH_CTL(aux_ch); 656 default: 657 MISSING_CASE(aux_ch); 658 return DP_AUX_CH_CTL(AUX_CH_A); 659 } 660 } 661 662 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) 663 { 664 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 665 enum aux_ch aux_ch = dig_port->aux_ch; 666 667 switch (aux_ch) { 668 case AUX_CH_A: 669 case AUX_CH_B: 670 case AUX_CH_C: 671 case AUX_CH_D: 672 case AUX_CH_E: 673 case AUX_CH_F: 674 return DP_AUX_CH_DATA(aux_ch, index); 675 default: 676 MISSING_CASE(aux_ch); 677 return DP_AUX_CH_DATA(AUX_CH_A, index); 678 } 679 } 680 681 static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp) 682 { 683 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 684 enum aux_ch aux_ch = dig_port->aux_ch; 685 686 switch (aux_ch) { 687 case AUX_CH_A: 688 case AUX_CH_B: 689 case AUX_CH_C: 690 case AUX_CH_USBC1: 691 case AUX_CH_USBC2: 692 case AUX_CH_USBC3: 693 case AUX_CH_USBC4: 694 case AUX_CH_USBC5: /* aka AUX_CH_D_XELPD */ 695 case AUX_CH_USBC6: /* aka AUX_CH_E_XELPD */ 696 return DP_AUX_CH_CTL(aux_ch); 697 default: 698 MISSING_CASE(aux_ch); 699 return DP_AUX_CH_CTL(AUX_CH_A); 700 } 701 } 702 703 static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index) 704 { 705 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 706 enum aux_ch aux_ch = dig_port->aux_ch; 707 708 switch (aux_ch) { 709 case AUX_CH_A: 710 case AUX_CH_B: 711 case AUX_CH_C: 712 case AUX_CH_USBC1: 713 case AUX_CH_USBC2: 714 case AUX_CH_USBC3: 715 case AUX_CH_USBC4: 716 case AUX_CH_USBC5: /* aka AUX_CH_D_XELPD */ 717 case AUX_CH_USBC6: /* aka AUX_CH_E_XELPD */ 718 return DP_AUX_CH_DATA(aux_ch, index); 719 default: 720 MISSING_CASE(aux_ch); 721 return DP_AUX_CH_DATA(AUX_CH_A, index); 722 } 723 } 724 725 static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp) 726 { 727 struct intel_display *display = to_intel_display(intel_dp); 728 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 729 enum aux_ch aux_ch = dig_port->aux_ch; 730 731 switch (aux_ch) { 732 case AUX_CH_A: 733 case AUX_CH_B: 734 case AUX_CH_USBC1: 735 case AUX_CH_USBC2: 736 case AUX_CH_USBC3: 737 case AUX_CH_USBC4: 738 return XELPDP_DP_AUX_CH_CTL(display, aux_ch); 739 default: 740 MISSING_CASE(aux_ch); 741 return XELPDP_DP_AUX_CH_CTL(display, AUX_CH_A); 742 } 743 } 744 745 static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index) 746 { 747 struct intel_display *display = to_intel_display(intel_dp); 748 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 749 enum aux_ch aux_ch = dig_port->aux_ch; 750 751 switch (aux_ch) { 752 case AUX_CH_A: 753 case AUX_CH_B: 754 case AUX_CH_USBC1: 755 case AUX_CH_USBC2: 756 case AUX_CH_USBC3: 757 case AUX_CH_USBC4: 758 return XELPDP_DP_AUX_CH_DATA(display, aux_ch, index); 759 default: 760 MISSING_CASE(aux_ch); 761 return XELPDP_DP_AUX_CH_DATA(display, AUX_CH_A, index); 762 } 763 } 764 765 void intel_dp_aux_fini(struct intel_dp *intel_dp) 766 { 767 if (cpu_latency_qos_request_active(&intel_dp->pm_qos)) 768 cpu_latency_qos_remove_request(&intel_dp->pm_qos); 769 770 kfree(intel_dp->aux.name); 771 } 772 773 void intel_dp_aux_init(struct intel_dp *intel_dp) 774 { 775 struct intel_display *display = to_intel_display(intel_dp); 776 struct drm_i915_private *i915 = to_i915(display->drm); 777 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 778 struct intel_encoder *encoder = &dig_port->base; 779 enum aux_ch aux_ch = dig_port->aux_ch; 780 char buf[AUX_CH_NAME_BUFSIZE]; 781 782 if (DISPLAY_VER(display) >= 14) { 783 intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg; 784 intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg; 785 } else if (DISPLAY_VER(display) >= 12) { 786 intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg; 787 intel_dp->aux_ch_data_reg = tgl_aux_data_reg; 788 } else if (DISPLAY_VER(display) >= 9) { 789 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; 790 intel_dp->aux_ch_data_reg = skl_aux_data_reg; 791 } else if (HAS_PCH_SPLIT(i915)) { 792 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; 793 intel_dp->aux_ch_data_reg = ilk_aux_data_reg; 794 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 795 intel_dp->aux_ch_ctl_reg = vlv_aux_ctl_reg; 796 intel_dp->aux_ch_data_reg = vlv_aux_data_reg; 797 } else { 798 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; 799 intel_dp->aux_ch_data_reg = g4x_aux_data_reg; 800 } 801 802 if (DISPLAY_VER(display) >= 9) 803 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; 804 else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) 805 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 806 else if (HAS_PCH_SPLIT(i915)) 807 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 808 else 809 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; 810 811 if (DISPLAY_VER(display) >= 9) 812 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; 813 else 814 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; 815 816 intel_dp->aux.drm_dev = display->drm; 817 drm_dp_aux_init(&intel_dp->aux); 818 819 /* Failure to allocate our preferred name is not critical */ 820 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %s/%s", 821 aux_ch_name(display, buf, sizeof(buf), aux_ch), 822 encoder->base.name); 823 824 intel_dp->aux.transfer = intel_dp_aux_transfer; 825 cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); 826 } 827 828 static enum aux_ch default_aux_ch(struct intel_encoder *encoder) 829 { 830 struct intel_display *display = to_intel_display(encoder); 831 832 /* SKL has DDI E but no AUX E */ 833 if (DISPLAY_VER(display) == 9 && encoder->port == PORT_E) 834 return AUX_CH_A; 835 836 return (enum aux_ch)encoder->port; 837 } 838 839 static struct intel_encoder * 840 get_encoder_by_aux_ch(struct intel_encoder *encoder, 841 enum aux_ch aux_ch) 842 { 843 struct intel_display *display = to_intel_display(encoder); 844 struct intel_encoder *other; 845 846 for_each_intel_encoder(display->drm, other) { 847 if (other == encoder) 848 continue; 849 850 if (!intel_encoder_is_dig_port(other)) 851 continue; 852 853 if (enc_to_dig_port(other)->aux_ch == aux_ch) 854 return other; 855 } 856 857 return NULL; 858 } 859 860 enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder) 861 { 862 struct intel_display *display = to_intel_display(encoder); 863 struct intel_encoder *other; 864 const char *source; 865 enum aux_ch aux_ch; 866 char buf[AUX_CH_NAME_BUFSIZE]; 867 868 aux_ch = intel_bios_dp_aux_ch(encoder->devdata); 869 source = "VBT"; 870 871 if (aux_ch == AUX_CH_NONE) { 872 aux_ch = default_aux_ch(encoder); 873 source = "platform default"; 874 } 875 876 if (aux_ch == AUX_CH_NONE) 877 return AUX_CH_NONE; 878 879 /* FIXME validate aux_ch against platform caps */ 880 881 other = get_encoder_by_aux_ch(encoder, aux_ch); 882 if (other) { 883 drm_dbg_kms(display->drm, 884 "[ENCODER:%d:%s] AUX CH %s already claimed by [ENCODER:%d:%s]\n", 885 encoder->base.base.id, encoder->base.name, 886 aux_ch_name(display, buf, sizeof(buf), aux_ch), 887 other->base.base.id, other->base.name); 888 return AUX_CH_NONE; 889 } 890 891 drm_dbg_kms(display->drm, 892 "[ENCODER:%d:%s] Using AUX CH %s (%s)\n", 893 encoder->base.base.id, encoder->base.name, 894 aux_ch_name(display, buf, sizeof(buf), aux_ch), source); 895 896 return aux_ch; 897 } 898 899 void intel_dp_aux_irq_handler(struct intel_display *display) 900 { 901 wake_up_all(&display->gmbus.wait_queue); 902 } 903