1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020-2021 Intel Corporation 4 */ 5 6 #include <drm/drm_print.h> 7 8 #include "i915_utils.h" 9 #include "intel_de.h" 10 #include "intel_display_types.h" 11 #include "intel_dp.h" 12 #include "intel_dp_aux.h" 13 #include "intel_dp_aux_regs.h" 14 #include "intel_pps.h" 15 #include "intel_quirks.h" 16 #include "intel_tc.h" 17 #include "intel_uncore_trace.h" 18 19 #define AUX_CH_NAME_BUFSIZE 6 20 21 static const char *aux_ch_name(struct intel_display *display, 22 char *buf, int size, enum aux_ch aux_ch) 23 { 24 if (DISPLAY_VER(display) >= 13 && aux_ch >= AUX_CH_D_XELPD) 25 snprintf(buf, size, "%c", 'A' + aux_ch - AUX_CH_D_XELPD + AUX_CH_D); 26 else if (DISPLAY_VER(display) >= 12 && aux_ch >= AUX_CH_USBC1) 27 snprintf(buf, size, "USBC%c", '1' + aux_ch - AUX_CH_USBC1); 28 else 29 snprintf(buf, size, "%c", 'A' + aux_ch); 30 31 return buf; 32 } 33 34 u32 intel_dp_aux_pack(const u8 *src, int src_bytes) 35 { 36 int i; 37 u32 v = 0; 38 39 if (src_bytes > 4) 40 src_bytes = 4; 41 for (i = 0; i < src_bytes; i++) 42 v |= ((u32)src[i]) << ((3 - i) * 8); 43 return v; 44 } 45 46 static void intel_dp_aux_unpack(u32 src, u8 *dst, int dst_bytes) 47 { 48 int i; 49 50 if (dst_bytes > 4) 51 dst_bytes = 4; 52 for (i = 0; i < dst_bytes; i++) 53 dst[i] = src >> ((3 - i) * 8); 54 } 55 56 static u32 57 intel_dp_aux_wait_done(struct intel_dp *intel_dp) 58 { 59 struct intel_display *display = to_intel_display(intel_dp); 60 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 61 const unsigned int timeout_ms = 10; 62 u32 status; 63 int ret; 64 65 ret = intel_de_wait_custom(display, ch_ctl, DP_AUX_CH_CTL_SEND_BUSY, 66 0, 67 2, timeout_ms, &status); 68 69 if (ret == -ETIMEDOUT) 70 drm_err(display->drm, 71 "%s: did not complete or timeout within %ums (status 0x%08x)\n", 72 intel_dp->aux.name, timeout_ms, status); 73 74 return status; 75 } 76 77 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 78 { 79 struct intel_display *display = to_intel_display(intel_dp); 80 81 if (index) 82 return 0; 83 84 /* 85 * The clock divider is based off the hrawclk, and would like to run at 86 * 2MHz. So, take the hrawclk value and divide by 2000 and use that 87 */ 88 return DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(display)->rawclk_freq, 2000); 89 } 90 91 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 92 { 93 struct intel_display *display = to_intel_display(intel_dp); 94 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 95 u32 freq; 96 97 if (index) 98 return 0; 99 100 /* 101 * The clock divider is based off the cdclk or PCH rawclk, and would 102 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and 103 * divide by 2000 and use that 104 */ 105 if (dig_port->aux_ch == AUX_CH_A) 106 freq = display->cdclk.hw.cdclk; 107 else 108 freq = DISPLAY_RUNTIME_INFO(display)->rawclk_freq; 109 return DIV_ROUND_CLOSEST(freq, 2000); 110 } 111 112 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 113 { 114 struct intel_display *display = to_intel_display(intel_dp); 115 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 116 117 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(display)) { 118 /* Workaround for non-ULT HSW */ 119 switch (index) { 120 case 0: return 63; 121 case 1: return 72; 122 default: return 0; 123 } 124 } 125 126 return ilk_get_aux_clock_divider(intel_dp, index); 127 } 128 129 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 130 { 131 /* 132 * SKL doesn't need us to program the AUX clock divider (Hardware will 133 * derive the clock from CDCLK automatically). We still implement the 134 * get_aux_clock_divider vfunc to plug-in into the existing code. 135 */ 136 return index ? 0 : 1; 137 } 138 139 static int intel_dp_aux_sync_len(void) 140 { 141 int precharge = 16; /* 10-16 */ 142 int preamble = 16; 143 144 return precharge + preamble; 145 } 146 147 int intel_dp_aux_fw_sync_len(struct intel_dp *intel_dp) 148 { 149 int precharge = 10; /* 10-16 */ 150 int preamble = 8; 151 152 /* 153 * We faced some glitches on Dell Precision 5490 MTL laptop with panel: 154 * "Manufacturer: AUO, Model: 63898" when using HW default 18. Using 20 155 * is fixing these problems with the panel. It is still within range 156 * mentioned in eDP specification. Increasing Fast Wake sync length is 157 * causing problems with other panels: increase length as a quirk for 158 * this specific laptop. 159 */ 160 if (intel_has_dpcd_quirk(intel_dp, QUIRK_FW_SYNC_LEN)) 161 precharge += 2; 162 163 return precharge + preamble; 164 } 165 166 static int g4x_dp_aux_precharge_len(void) 167 { 168 int precharge_min = 10; 169 int preamble = 16; 170 171 /* HW wants the length of the extra precharge in 2us units */ 172 return (intel_dp_aux_sync_len() - 173 precharge_min - preamble) / 2; 174 } 175 176 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, 177 int send_bytes, 178 u32 aux_clock_divider) 179 { 180 struct intel_display *display = to_intel_display(intel_dp); 181 u32 timeout; 182 183 /* Max timeout value on G4x-BDW: 1.6ms */ 184 if (display->platform.broadwell) 185 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 186 else 187 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 188 189 return DP_AUX_CH_CTL_SEND_BUSY | 190 DP_AUX_CH_CTL_DONE | 191 DP_AUX_CH_CTL_INTERRUPT | 192 DP_AUX_CH_CTL_TIME_OUT_ERROR | 193 timeout | 194 DP_AUX_CH_CTL_RECEIVE_ERROR | 195 DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) | 196 DP_AUX_CH_CTL_PRECHARGE_2US(g4x_dp_aux_precharge_len()) | 197 DP_AUX_CH_CTL_BIT_CLOCK_2X(aux_clock_divider); 198 } 199 200 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, 201 int send_bytes, 202 u32 unused) 203 { 204 struct intel_display *display = to_intel_display(intel_dp); 205 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 206 u32 ret; 207 208 /* 209 * Max timeout values: 210 * SKL-GLK: 1.6ms 211 * ICL+: 4ms 212 */ 213 ret = DP_AUX_CH_CTL_SEND_BUSY | 214 DP_AUX_CH_CTL_DONE | 215 DP_AUX_CH_CTL_INTERRUPT | 216 DP_AUX_CH_CTL_TIME_OUT_ERROR | 217 DP_AUX_CH_CTL_TIME_OUT_MAX | 218 DP_AUX_CH_CTL_RECEIVE_ERROR | 219 DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) | 220 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len(intel_dp)) | 221 DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len()); 222 223 if (intel_tc_port_in_tbt_alt_mode(dig_port)) 224 ret |= DP_AUX_CH_CTL_TBT_IO; 225 226 /* 227 * Power request bit is already set during aux power well enable. 228 * Preserve the bit across aux transactions. 229 */ 230 if (DISPLAY_VER(display) >= 14) 231 ret |= XELPDP_DP_AUX_CH_CTL_POWER_REQUEST; 232 233 return ret; 234 } 235 236 static int 237 intel_dp_aux_xfer(struct intel_dp *intel_dp, 238 const u8 *send, int send_bytes, 239 u8 *recv, int recv_size, 240 u32 aux_send_ctl_flags) 241 { 242 struct intel_display *display = to_intel_display(intel_dp); 243 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 244 struct intel_encoder *encoder = &dig_port->base; 245 i915_reg_t ch_ctl, ch_data[5]; 246 u32 aux_clock_divider; 247 enum intel_display_power_domain aux_domain; 248 intel_wakeref_t aux_wakeref; 249 intel_wakeref_t pps_wakeref = NULL; 250 int i, ret, recv_bytes; 251 int try, clock = 0; 252 u32 status; 253 bool vdd; 254 255 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 256 for (i = 0; i < ARRAY_SIZE(ch_data); i++) 257 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); 258 259 intel_digital_port_lock(encoder); 260 /* 261 * Abort transfers on a disconnected port as required by 262 * DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX 263 * timeouts that would otherwise happen. 264 */ 265 if (!intel_dp_is_edp(intel_dp) && 266 !intel_digital_port_connected_locked(&dig_port->base)) { 267 ret = -ENXIO; 268 goto out_unlock; 269 } 270 271 aux_domain = intel_aux_power_domain(dig_port); 272 273 aux_wakeref = intel_display_power_get(display, aux_domain); 274 275 /* 276 * The PPS state needs to be locked for: 277 * - eDP on all platforms, since AUX transfers on eDP need VDD power 278 * (either forced or via panel power) which depends on the PPS 279 * state. 280 * - non-eDP on platforms where the PPS is a pipe instance (VLV/CHV), 281 * since changing the PPS state (via a parallel modeset for 282 * instance) may interfere with the AUX transfers on a non-eDP 283 * output as well. 284 */ 285 if (intel_dp_is_edp(intel_dp) || 286 display->platform.valleyview || display->platform.cherryview) 287 pps_wakeref = intel_pps_lock(intel_dp); 288 289 /* 290 * We will be called with VDD already enabled for dpcd/edid/oui reads. 291 * In such cases we want to leave VDD enabled and it's up to upper layers 292 * to turn it off. But for eg. i2c-dev access we need to turn it on/off 293 * ourselves. 294 */ 295 vdd = intel_pps_vdd_on_unlocked(intel_dp); 296 297 /* 298 * dp aux is extremely sensitive to irq latency, hence request the 299 * lowest possible wakeup latency and so prevent the cpu from going into 300 * deep sleep states. 301 */ 302 cpu_latency_qos_update_request(&intel_dp->pm_qos, 0); 303 304 intel_pps_check_power_unlocked(intel_dp); 305 306 /* 307 * FIXME PSR should be disabled here to prevent 308 * it using the same AUX CH simultaneously 309 */ 310 311 /* Try to wait for any previous AUX channel activity */ 312 for (try = 0; try < 3; try++) { 313 status = intel_de_read_notrace(display, ch_ctl); 314 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 315 break; 316 msleep(1); 317 } 318 /* just trace the final value */ 319 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 320 321 if (try == 3) { 322 const u32 status = intel_de_read(display, ch_ctl); 323 324 if (status != intel_dp->aux_busy_last_status) { 325 drm_WARN(display->drm, 1, 326 "%s: not started (status 0x%08x)\n", 327 intel_dp->aux.name, status); 328 intel_dp->aux_busy_last_status = status; 329 } 330 331 ret = -EBUSY; 332 goto out; 333 } 334 335 /* Only 5 data registers! */ 336 if (drm_WARN_ON(display->drm, send_bytes > 20 || recv_size > 20)) { 337 ret = -E2BIG; 338 goto out; 339 } 340 341 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 342 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 343 send_bytes, 344 aux_clock_divider); 345 346 send_ctl |= aux_send_ctl_flags; 347 348 /* Must try at least 3 times according to DP spec */ 349 for (try = 0; try < 5; try++) { 350 /* Load the send data into the aux channel data registers */ 351 for (i = 0; i < send_bytes; i += 4) 352 intel_de_write(display, ch_data[i >> 2], 353 intel_dp_aux_pack(send + i, 354 send_bytes - i)); 355 356 /* Send the command and wait for it to complete */ 357 intel_de_write(display, ch_ctl, send_ctl); 358 359 status = intel_dp_aux_wait_done(intel_dp); 360 361 /* Clear done status and any errors */ 362 intel_de_write(display, ch_ctl, 363 status | DP_AUX_CH_CTL_DONE | 364 DP_AUX_CH_CTL_TIME_OUT_ERROR | 365 DP_AUX_CH_CTL_RECEIVE_ERROR); 366 367 /* 368 * DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 369 * 400us delay required for errors and timeouts 370 * Timeout errors from the HW already meet this 371 * requirement so skip to next iteration 372 */ 373 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) 374 continue; 375 376 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 377 usleep_range(400, 500); 378 continue; 379 } 380 if (status & DP_AUX_CH_CTL_DONE) 381 goto done; 382 } 383 } 384 385 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 386 drm_err(display->drm, "%s: not done (status 0x%08x)\n", 387 intel_dp->aux.name, status); 388 ret = -EBUSY; 389 goto out; 390 } 391 392 done: 393 /* 394 * Check for timeout or receive error. Timeouts occur when the sink is 395 * not connected. 396 */ 397 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 398 drm_err(display->drm, "%s: receive error (status 0x%08x)\n", 399 intel_dp->aux.name, status); 400 ret = -EIO; 401 goto out; 402 } 403 404 /* 405 * Timeouts occur when the device isn't connected, so they're "normal" 406 * -- don't fill the kernel log with these 407 */ 408 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 409 drm_dbg_kms(display->drm, "%s: timeout (status 0x%08x)\n", 410 intel_dp->aux.name, status); 411 ret = -ETIMEDOUT; 412 goto out; 413 } 414 415 /* Unload any bytes sent back from the other side */ 416 recv_bytes = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, status); 417 418 /* 419 * By BSpec: "Message sizes of 0 or >20 are not allowed." 420 * We have no idea of what happened so we return -EBUSY so 421 * drm layer takes care for the necessary retries. 422 */ 423 if (recv_bytes == 0 || recv_bytes > 20) { 424 drm_dbg_kms(display->drm, 425 "%s: Forbidden recv_bytes = %d on aux transaction\n", 426 intel_dp->aux.name, recv_bytes); 427 ret = -EBUSY; 428 goto out; 429 } 430 431 if (recv_bytes > recv_size) 432 recv_bytes = recv_size; 433 434 for (i = 0; i < recv_bytes; i += 4) 435 intel_dp_aux_unpack(intel_de_read(display, ch_data[i >> 2]), 436 recv + i, recv_bytes - i); 437 438 ret = recv_bytes; 439 out: 440 cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); 441 442 if (vdd) 443 intel_pps_vdd_off_unlocked(intel_dp, false); 444 445 if (pps_wakeref) 446 intel_pps_unlock(intel_dp, pps_wakeref); 447 448 intel_display_power_put_async(display, aux_domain, aux_wakeref); 449 out_unlock: 450 intel_digital_port_unlock(encoder); 451 452 return ret; 453 } 454 455 #define BARE_ADDRESS_SIZE 3 456 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) 457 458 static void 459 intel_dp_aux_header(u8 txbuf[HEADER_SIZE], 460 const struct drm_dp_aux_msg *msg) 461 { 462 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf); 463 txbuf[1] = (msg->address >> 8) & 0xff; 464 txbuf[2] = msg->address & 0xff; 465 txbuf[3] = msg->size - 1; 466 } 467 468 static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg) 469 { 470 /* 471 * If we're trying to send the HDCP Aksv, we need to set a the Aksv 472 * select bit to inform the hardware to send the Aksv after our header 473 * since we can't access that data from software. 474 */ 475 if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE && 476 msg->address == DP_AUX_HDCP_AKSV) 477 return DP_AUX_CH_CTL_AUX_AKSV_SELECT; 478 479 return 0; 480 } 481 482 static ssize_t 483 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 484 { 485 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); 486 struct intel_display *display = to_intel_display(intel_dp); 487 u8 txbuf[20], rxbuf[20]; 488 size_t txsize, rxsize; 489 u32 flags = intel_dp_aux_xfer_flags(msg); 490 int ret; 491 492 intel_dp_aux_header(txbuf, msg); 493 494 switch (msg->request & ~DP_AUX_I2C_MOT) { 495 case DP_AUX_NATIVE_WRITE: 496 case DP_AUX_I2C_WRITE: 497 case DP_AUX_I2C_WRITE_STATUS_UPDATE: 498 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; 499 rxsize = 2; /* 0 or 1 data bytes */ 500 501 if (drm_WARN_ON(display->drm, txsize > 20)) 502 return -E2BIG; 503 504 drm_WARN_ON(display->drm, !msg->buffer != !msg->size); 505 506 if (msg->buffer) 507 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); 508 509 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 510 rxbuf, rxsize, flags); 511 if (ret > 0) { 512 msg->reply = rxbuf[0] >> 4; 513 514 if (ret > 1) { 515 /* Number of bytes written in a short write. */ 516 ret = clamp_t(int, rxbuf[1], 0, msg->size); 517 } else { 518 /* Return payload size. */ 519 ret = msg->size; 520 } 521 } 522 break; 523 524 case DP_AUX_NATIVE_READ: 525 case DP_AUX_I2C_READ: 526 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; 527 rxsize = msg->size + 1; 528 529 if (drm_WARN_ON(display->drm, rxsize > 20)) 530 return -E2BIG; 531 532 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 533 rxbuf, rxsize, flags); 534 if (ret > 0) { 535 msg->reply = rxbuf[0] >> 4; 536 /* 537 * Assume happy day, and copy the data. The caller is 538 * expected to check msg->reply before touching it. 539 * 540 * Return payload size. 541 */ 542 ret--; 543 memcpy(msg->buffer, rxbuf + 1, ret); 544 } 545 break; 546 547 default: 548 ret = -EINVAL; 549 break; 550 } 551 552 return ret; 553 } 554 555 static i915_reg_t vlv_aux_ctl_reg(struct intel_dp *intel_dp) 556 { 557 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 558 enum aux_ch aux_ch = dig_port->aux_ch; 559 560 switch (aux_ch) { 561 case AUX_CH_B: 562 case AUX_CH_C: 563 case AUX_CH_D: 564 return VLV_DP_AUX_CH_CTL(aux_ch); 565 default: 566 MISSING_CASE(aux_ch); 567 return VLV_DP_AUX_CH_CTL(AUX_CH_B); 568 } 569 } 570 571 static i915_reg_t vlv_aux_data_reg(struct intel_dp *intel_dp, int index) 572 { 573 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 574 enum aux_ch aux_ch = dig_port->aux_ch; 575 576 switch (aux_ch) { 577 case AUX_CH_B: 578 case AUX_CH_C: 579 case AUX_CH_D: 580 return VLV_DP_AUX_CH_DATA(aux_ch, index); 581 default: 582 MISSING_CASE(aux_ch); 583 return VLV_DP_AUX_CH_DATA(AUX_CH_B, index); 584 } 585 } 586 587 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) 588 { 589 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 590 enum aux_ch aux_ch = dig_port->aux_ch; 591 592 switch (aux_ch) { 593 case AUX_CH_B: 594 case AUX_CH_C: 595 case AUX_CH_D: 596 return DP_AUX_CH_CTL(aux_ch); 597 default: 598 MISSING_CASE(aux_ch); 599 return DP_AUX_CH_CTL(AUX_CH_B); 600 } 601 } 602 603 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) 604 { 605 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 606 enum aux_ch aux_ch = dig_port->aux_ch; 607 608 switch (aux_ch) { 609 case AUX_CH_B: 610 case AUX_CH_C: 611 case AUX_CH_D: 612 return DP_AUX_CH_DATA(aux_ch, index); 613 default: 614 MISSING_CASE(aux_ch); 615 return DP_AUX_CH_DATA(AUX_CH_B, index); 616 } 617 } 618 619 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) 620 { 621 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 622 enum aux_ch aux_ch = dig_port->aux_ch; 623 624 switch (aux_ch) { 625 case AUX_CH_A: 626 return DP_AUX_CH_CTL(aux_ch); 627 case AUX_CH_B: 628 case AUX_CH_C: 629 case AUX_CH_D: 630 return PCH_DP_AUX_CH_CTL(aux_ch); 631 default: 632 MISSING_CASE(aux_ch); 633 return DP_AUX_CH_CTL(AUX_CH_A); 634 } 635 } 636 637 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) 638 { 639 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 640 enum aux_ch aux_ch = dig_port->aux_ch; 641 642 switch (aux_ch) { 643 case AUX_CH_A: 644 return DP_AUX_CH_DATA(aux_ch, index); 645 case AUX_CH_B: 646 case AUX_CH_C: 647 case AUX_CH_D: 648 return PCH_DP_AUX_CH_DATA(aux_ch, index); 649 default: 650 MISSING_CASE(aux_ch); 651 return DP_AUX_CH_DATA(AUX_CH_A, index); 652 } 653 } 654 655 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) 656 { 657 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 658 enum aux_ch aux_ch = dig_port->aux_ch; 659 660 switch (aux_ch) { 661 case AUX_CH_A: 662 case AUX_CH_B: 663 case AUX_CH_C: 664 case AUX_CH_D: 665 case AUX_CH_E: 666 case AUX_CH_F: 667 return DP_AUX_CH_CTL(aux_ch); 668 default: 669 MISSING_CASE(aux_ch); 670 return DP_AUX_CH_CTL(AUX_CH_A); 671 } 672 } 673 674 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) 675 { 676 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 677 enum aux_ch aux_ch = dig_port->aux_ch; 678 679 switch (aux_ch) { 680 case AUX_CH_A: 681 case AUX_CH_B: 682 case AUX_CH_C: 683 case AUX_CH_D: 684 case AUX_CH_E: 685 case AUX_CH_F: 686 return DP_AUX_CH_DATA(aux_ch, index); 687 default: 688 MISSING_CASE(aux_ch); 689 return DP_AUX_CH_DATA(AUX_CH_A, index); 690 } 691 } 692 693 static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp) 694 { 695 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 696 enum aux_ch aux_ch = dig_port->aux_ch; 697 698 switch (aux_ch) { 699 case AUX_CH_A: 700 case AUX_CH_B: 701 case AUX_CH_C: 702 case AUX_CH_USBC1: 703 case AUX_CH_USBC2: 704 case AUX_CH_USBC3: 705 case AUX_CH_USBC4: 706 case AUX_CH_USBC5: /* aka AUX_CH_D_XELPD */ 707 case AUX_CH_USBC6: /* aka AUX_CH_E_XELPD */ 708 return DP_AUX_CH_CTL(aux_ch); 709 default: 710 MISSING_CASE(aux_ch); 711 return DP_AUX_CH_CTL(AUX_CH_A); 712 } 713 } 714 715 static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index) 716 { 717 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 718 enum aux_ch aux_ch = dig_port->aux_ch; 719 720 switch (aux_ch) { 721 case AUX_CH_A: 722 case AUX_CH_B: 723 case AUX_CH_C: 724 case AUX_CH_USBC1: 725 case AUX_CH_USBC2: 726 case AUX_CH_USBC3: 727 case AUX_CH_USBC4: 728 case AUX_CH_USBC5: /* aka AUX_CH_D_XELPD */ 729 case AUX_CH_USBC6: /* aka AUX_CH_E_XELPD */ 730 return DP_AUX_CH_DATA(aux_ch, index); 731 default: 732 MISSING_CASE(aux_ch); 733 return DP_AUX_CH_DATA(AUX_CH_A, index); 734 } 735 } 736 737 static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp) 738 { 739 struct intel_display *display = to_intel_display(intel_dp); 740 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 741 enum aux_ch aux_ch = dig_port->aux_ch; 742 743 switch (aux_ch) { 744 case AUX_CH_A: 745 case AUX_CH_B: 746 case AUX_CH_USBC1: 747 case AUX_CH_USBC2: 748 case AUX_CH_USBC3: 749 case AUX_CH_USBC4: 750 return XELPDP_DP_AUX_CH_CTL(display, aux_ch); 751 default: 752 MISSING_CASE(aux_ch); 753 return XELPDP_DP_AUX_CH_CTL(display, AUX_CH_A); 754 } 755 } 756 757 static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index) 758 { 759 struct intel_display *display = to_intel_display(intel_dp); 760 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 761 enum aux_ch aux_ch = dig_port->aux_ch; 762 763 switch (aux_ch) { 764 case AUX_CH_A: 765 case AUX_CH_B: 766 case AUX_CH_USBC1: 767 case AUX_CH_USBC2: 768 case AUX_CH_USBC3: 769 case AUX_CH_USBC4: 770 return XELPDP_DP_AUX_CH_DATA(display, aux_ch, index); 771 default: 772 MISSING_CASE(aux_ch); 773 return XELPDP_DP_AUX_CH_DATA(display, AUX_CH_A, index); 774 } 775 } 776 777 void intel_dp_aux_fini(struct intel_dp *intel_dp) 778 { 779 if (cpu_latency_qos_request_active(&intel_dp->pm_qos)) 780 cpu_latency_qos_remove_request(&intel_dp->pm_qos); 781 782 kfree(intel_dp->aux.name); 783 } 784 785 void intel_dp_aux_init(struct intel_dp *intel_dp) 786 { 787 struct intel_display *display = to_intel_display(intel_dp); 788 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 789 struct intel_encoder *encoder = &dig_port->base; 790 enum aux_ch aux_ch = dig_port->aux_ch; 791 char buf[AUX_CH_NAME_BUFSIZE]; 792 793 if (DISPLAY_VER(display) >= 14) { 794 intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg; 795 intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg; 796 } else if (DISPLAY_VER(display) >= 12) { 797 intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg; 798 intel_dp->aux_ch_data_reg = tgl_aux_data_reg; 799 } else if (DISPLAY_VER(display) >= 9) { 800 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; 801 intel_dp->aux_ch_data_reg = skl_aux_data_reg; 802 } else if (HAS_PCH_SPLIT(display)) { 803 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; 804 intel_dp->aux_ch_data_reg = ilk_aux_data_reg; 805 } else if (display->platform.valleyview || display->platform.cherryview) { 806 intel_dp->aux_ch_ctl_reg = vlv_aux_ctl_reg; 807 intel_dp->aux_ch_data_reg = vlv_aux_data_reg; 808 } else { 809 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; 810 intel_dp->aux_ch_data_reg = g4x_aux_data_reg; 811 } 812 813 if (DISPLAY_VER(display) >= 9) 814 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; 815 else if (display->platform.broadwell || display->platform.haswell) 816 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 817 else if (HAS_PCH_SPLIT(display)) 818 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 819 else 820 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; 821 822 if (DISPLAY_VER(display) >= 9) 823 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; 824 else 825 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; 826 827 intel_dp->aux.drm_dev = display->drm; 828 drm_dp_aux_init(&intel_dp->aux); 829 830 /* Failure to allocate our preferred name is not critical */ 831 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %s/%s", 832 aux_ch_name(display, buf, sizeof(buf), aux_ch), 833 encoder->base.name); 834 835 intel_dp->aux.transfer = intel_dp_aux_transfer; 836 cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); 837 838 intel_dp_dpcd_set_probe(intel_dp, true); 839 } 840 841 static enum aux_ch default_aux_ch(struct intel_encoder *encoder) 842 { 843 struct intel_display *display = to_intel_display(encoder); 844 845 /* SKL has DDI E but no AUX E */ 846 if (DISPLAY_VER(display) == 9 && encoder->port == PORT_E) 847 return AUX_CH_A; 848 849 return (enum aux_ch)encoder->port; 850 } 851 852 static struct intel_encoder * 853 get_encoder_by_aux_ch(struct intel_encoder *encoder, 854 enum aux_ch aux_ch) 855 { 856 struct intel_display *display = to_intel_display(encoder); 857 struct intel_encoder *other; 858 859 for_each_intel_encoder(display->drm, other) { 860 if (other == encoder) 861 continue; 862 863 if (!intel_encoder_is_dig_port(other)) 864 continue; 865 866 if (enc_to_dig_port(other)->aux_ch == aux_ch) 867 return other; 868 } 869 870 return NULL; 871 } 872 873 enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder) 874 { 875 struct intel_display *display = to_intel_display(encoder); 876 struct intel_encoder *other; 877 const char *source; 878 enum aux_ch aux_ch; 879 char buf[AUX_CH_NAME_BUFSIZE]; 880 881 aux_ch = intel_bios_dp_aux_ch(encoder->devdata); 882 source = "VBT"; 883 884 if (aux_ch == AUX_CH_NONE) { 885 aux_ch = default_aux_ch(encoder); 886 source = "platform default"; 887 } 888 889 if (aux_ch == AUX_CH_NONE) 890 return AUX_CH_NONE; 891 892 /* FIXME validate aux_ch against platform caps */ 893 894 other = get_encoder_by_aux_ch(encoder, aux_ch); 895 if (other) { 896 drm_dbg_kms(display->drm, 897 "[ENCODER:%d:%s] AUX CH %s already claimed by [ENCODER:%d:%s]\n", 898 encoder->base.base.id, encoder->base.name, 899 aux_ch_name(display, buf, sizeof(buf), aux_ch), 900 other->base.base.id, other->base.name); 901 return AUX_CH_NONE; 902 } 903 904 drm_dbg_kms(display->drm, 905 "[ENCODER:%d:%s] Using AUX CH %s (%s)\n", 906 encoder->base.base.id, encoder->base.name, 907 aux_ch_name(display, buf, sizeof(buf), aux_ch), source); 908 909 return aux_ch; 910 } 911 912 void intel_dp_aux_irq_handler(struct intel_display *display) 913 { 914 wake_up_all(&display->gmbus.wait_queue); 915 } 916