1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020-2021 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "i915_trace.h" 8 #include "intel_display_types.h" 9 #include "intel_dp_aux.h" 10 #include "intel_pps.h" 11 #include "intel_tc.h" 12 13 u32 intel_dp_pack_aux(const u8 *src, int src_bytes) 14 { 15 int i; 16 u32 v = 0; 17 18 if (src_bytes > 4) 19 src_bytes = 4; 20 for (i = 0; i < src_bytes; i++) 21 v |= ((u32)src[i]) << ((3 - i) * 8); 22 return v; 23 } 24 25 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes) 26 { 27 int i; 28 29 if (dst_bytes > 4) 30 dst_bytes = 4; 31 for (i = 0; i < dst_bytes; i++) 32 dst[i] = src >> ((3 - i) * 8); 33 } 34 35 static u32 36 intel_dp_aux_wait_done(struct intel_dp *intel_dp) 37 { 38 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 39 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 40 const unsigned int timeout_ms = 10; 41 u32 status; 42 bool done; 43 44 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 45 done = wait_event_timeout(i915->gmbus_wait_queue, C, 46 msecs_to_jiffies_timeout(timeout_ms)); 47 48 /* just trace the final value */ 49 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 50 51 if (!done) 52 drm_err(&i915->drm, 53 "%s: did not complete or timeout within %ums (status 0x%08x)\n", 54 intel_dp->aux.name, timeout_ms, status); 55 #undef C 56 57 return status; 58 } 59 60 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 61 { 62 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 63 64 if (index) 65 return 0; 66 67 /* 68 * The clock divider is based off the hrawclk, and would like to run at 69 * 2MHz. So, take the hrawclk value and divide by 2000 and use that 70 */ 71 return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000); 72 } 73 74 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 75 { 76 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 77 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 78 u32 freq; 79 80 if (index) 81 return 0; 82 83 /* 84 * The clock divider is based off the cdclk or PCH rawclk, and would 85 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and 86 * divide by 2000 and use that 87 */ 88 if (dig_port->aux_ch == AUX_CH_A) 89 freq = dev_priv->cdclk.hw.cdclk; 90 else 91 freq = RUNTIME_INFO(dev_priv)->rawclk_freq; 92 return DIV_ROUND_CLOSEST(freq, 2000); 93 } 94 95 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 96 { 97 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 98 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 99 100 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { 101 /* Workaround for non-ULT HSW */ 102 switch (index) { 103 case 0: return 63; 104 case 1: return 72; 105 default: return 0; 106 } 107 } 108 109 return ilk_get_aux_clock_divider(intel_dp, index); 110 } 111 112 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 113 { 114 /* 115 * SKL doesn't need us to program the AUX clock divider (Hardware will 116 * derive the clock from CDCLK automatically). We still implement the 117 * get_aux_clock_divider vfunc to plug-in into the existing code. 118 */ 119 return index ? 0 : 1; 120 } 121 122 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, 123 int send_bytes, 124 u32 aux_clock_divider) 125 { 126 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 127 struct drm_i915_private *dev_priv = 128 to_i915(dig_port->base.base.dev); 129 u32 timeout; 130 131 /* Max timeout value on G4x-BDW: 1.6ms */ 132 if (IS_BROADWELL(dev_priv)) 133 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 134 else 135 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 136 137 return DP_AUX_CH_CTL_SEND_BUSY | 138 DP_AUX_CH_CTL_DONE | 139 DP_AUX_CH_CTL_INTERRUPT | 140 DP_AUX_CH_CTL_TIME_OUT_ERROR | 141 timeout | 142 DP_AUX_CH_CTL_RECEIVE_ERROR | 143 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 144 (3 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 145 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 146 } 147 148 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, 149 int send_bytes, 150 u32 unused) 151 { 152 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 153 u32 ret; 154 155 /* 156 * Max timeout values: 157 * SKL-GLK: 1.6ms 158 * ICL+: 4ms 159 */ 160 ret = DP_AUX_CH_CTL_SEND_BUSY | 161 DP_AUX_CH_CTL_DONE | 162 DP_AUX_CH_CTL_INTERRUPT | 163 DP_AUX_CH_CTL_TIME_OUT_ERROR | 164 DP_AUX_CH_CTL_TIME_OUT_MAX | 165 DP_AUX_CH_CTL_RECEIVE_ERROR | 166 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 167 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | 168 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 169 170 if (intel_tc_port_in_tbt_alt_mode(dig_port)) 171 ret |= DP_AUX_CH_CTL_TBT_IO; 172 173 return ret; 174 } 175 176 static int 177 intel_dp_aux_xfer(struct intel_dp *intel_dp, 178 const u8 *send, int send_bytes, 179 u8 *recv, int recv_size, 180 u32 aux_send_ctl_flags) 181 { 182 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 183 struct drm_i915_private *i915 = 184 to_i915(dig_port->base.base.dev); 185 struct intel_uncore *uncore = &i915->uncore; 186 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 187 bool is_tc_port = intel_phy_is_tc(i915, phy); 188 i915_reg_t ch_ctl, ch_data[5]; 189 u32 aux_clock_divider; 190 enum intel_display_power_domain aux_domain; 191 intel_wakeref_t aux_wakeref; 192 intel_wakeref_t pps_wakeref; 193 int i, ret, recv_bytes; 194 int try, clock = 0; 195 u32 status; 196 bool vdd; 197 198 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); 199 for (i = 0; i < ARRAY_SIZE(ch_data); i++) 200 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); 201 202 if (is_tc_port) 203 intel_tc_port_lock(dig_port); 204 205 aux_domain = intel_aux_power_domain(dig_port); 206 207 aux_wakeref = intel_display_power_get(i915, aux_domain); 208 pps_wakeref = intel_pps_lock(intel_dp); 209 210 /* 211 * We will be called with VDD already enabled for dpcd/edid/oui reads. 212 * In such cases we want to leave VDD enabled and it's up to upper layers 213 * to turn it off. But for eg. i2c-dev access we need to turn it on/off 214 * ourselves. 215 */ 216 vdd = intel_pps_vdd_on_unlocked(intel_dp); 217 218 /* 219 * dp aux is extremely sensitive to irq latency, hence request the 220 * lowest possible wakeup latency and so prevent the cpu from going into 221 * deep sleep states. 222 */ 223 cpu_latency_qos_update_request(&intel_dp->pm_qos, 0); 224 225 intel_pps_check_power_unlocked(intel_dp); 226 227 /* Try to wait for any previous AUX channel activity */ 228 for (try = 0; try < 3; try++) { 229 status = intel_uncore_read_notrace(uncore, ch_ctl); 230 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 231 break; 232 msleep(1); 233 } 234 /* just trace the final value */ 235 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); 236 237 if (try == 3) { 238 const u32 status = intel_uncore_read(uncore, ch_ctl); 239 240 if (status != intel_dp->aux_busy_last_status) { 241 drm_WARN(&i915->drm, 1, 242 "%s: not started (status 0x%08x)\n", 243 intel_dp->aux.name, status); 244 intel_dp->aux_busy_last_status = status; 245 } 246 247 ret = -EBUSY; 248 goto out; 249 } 250 251 /* Only 5 data registers! */ 252 if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) { 253 ret = -E2BIG; 254 goto out; 255 } 256 257 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 258 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 259 send_bytes, 260 aux_clock_divider); 261 262 send_ctl |= aux_send_ctl_flags; 263 264 /* Must try at least 3 times according to DP spec */ 265 for (try = 0; try < 5; try++) { 266 /* Load the send data into the aux channel data registers */ 267 for (i = 0; i < send_bytes; i += 4) 268 intel_uncore_write(uncore, 269 ch_data[i >> 2], 270 intel_dp_pack_aux(send + i, 271 send_bytes - i)); 272 273 /* Send the command and wait for it to complete */ 274 intel_uncore_write(uncore, ch_ctl, send_ctl); 275 276 status = intel_dp_aux_wait_done(intel_dp); 277 278 /* Clear done status and any errors */ 279 intel_uncore_write(uncore, 280 ch_ctl, 281 status | 282 DP_AUX_CH_CTL_DONE | 283 DP_AUX_CH_CTL_TIME_OUT_ERROR | 284 DP_AUX_CH_CTL_RECEIVE_ERROR); 285 286 /* 287 * DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 288 * 400us delay required for errors and timeouts 289 * Timeout errors from the HW already meet this 290 * requirement so skip to next iteration 291 */ 292 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) 293 continue; 294 295 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 296 usleep_range(400, 500); 297 continue; 298 } 299 if (status & DP_AUX_CH_CTL_DONE) 300 goto done; 301 } 302 } 303 304 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 305 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n", 306 intel_dp->aux.name, status); 307 ret = -EBUSY; 308 goto out; 309 } 310 311 done: 312 /* 313 * Check for timeout or receive error. Timeouts occur when the sink is 314 * not connected. 315 */ 316 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 317 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n", 318 intel_dp->aux.name, status); 319 ret = -EIO; 320 goto out; 321 } 322 323 /* 324 * Timeouts occur when the device isn't connected, so they're "normal" 325 * -- don't fill the kernel log with these 326 */ 327 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 328 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n", 329 intel_dp->aux.name, status); 330 ret = -ETIMEDOUT; 331 goto out; 332 } 333 334 /* Unload any bytes sent back from the other side */ 335 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 336 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 337 338 /* 339 * By BSpec: "Message sizes of 0 or >20 are not allowed." 340 * We have no idea of what happened so we return -EBUSY so 341 * drm layer takes care for the necessary retries. 342 */ 343 if (recv_bytes == 0 || recv_bytes > 20) { 344 drm_dbg_kms(&i915->drm, 345 "%s: Forbidden recv_bytes = %d on aux transaction\n", 346 intel_dp->aux.name, recv_bytes); 347 ret = -EBUSY; 348 goto out; 349 } 350 351 if (recv_bytes > recv_size) 352 recv_bytes = recv_size; 353 354 for (i = 0; i < recv_bytes; i += 4) 355 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]), 356 recv + i, recv_bytes - i); 357 358 ret = recv_bytes; 359 out: 360 cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); 361 362 if (vdd) 363 intel_pps_vdd_off_unlocked(intel_dp, false); 364 365 intel_pps_unlock(intel_dp, pps_wakeref); 366 intel_display_power_put_async(i915, aux_domain, aux_wakeref); 367 368 if (is_tc_port) 369 intel_tc_port_unlock(dig_port); 370 371 return ret; 372 } 373 374 #define BARE_ADDRESS_SIZE 3 375 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) 376 377 static void 378 intel_dp_aux_header(u8 txbuf[HEADER_SIZE], 379 const struct drm_dp_aux_msg *msg) 380 { 381 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf); 382 txbuf[1] = (msg->address >> 8) & 0xff; 383 txbuf[2] = msg->address & 0xff; 384 txbuf[3] = msg->size - 1; 385 } 386 387 static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg) 388 { 389 /* 390 * If we're trying to send the HDCP Aksv, we need to set a the Aksv 391 * select bit to inform the hardware to send the Aksv after our header 392 * since we can't access that data from software. 393 */ 394 if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE && 395 msg->address == DP_AUX_HDCP_AKSV) 396 return DP_AUX_CH_CTL_AUX_AKSV_SELECT; 397 398 return 0; 399 } 400 401 static ssize_t 402 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 403 { 404 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); 405 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 406 u8 txbuf[20], rxbuf[20]; 407 size_t txsize, rxsize; 408 u32 flags = intel_dp_aux_xfer_flags(msg); 409 int ret; 410 411 intel_dp_aux_header(txbuf, msg); 412 413 switch (msg->request & ~DP_AUX_I2C_MOT) { 414 case DP_AUX_NATIVE_WRITE: 415 case DP_AUX_I2C_WRITE: 416 case DP_AUX_I2C_WRITE_STATUS_UPDATE: 417 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; 418 rxsize = 2; /* 0 or 1 data bytes */ 419 420 if (drm_WARN_ON(&i915->drm, txsize > 20)) 421 return -E2BIG; 422 423 drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size); 424 425 if (msg->buffer) 426 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); 427 428 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 429 rxbuf, rxsize, flags); 430 if (ret > 0) { 431 msg->reply = rxbuf[0] >> 4; 432 433 if (ret > 1) { 434 /* Number of bytes written in a short write. */ 435 ret = clamp_t(int, rxbuf[1], 0, msg->size); 436 } else { 437 /* Return payload size. */ 438 ret = msg->size; 439 } 440 } 441 break; 442 443 case DP_AUX_NATIVE_READ: 444 case DP_AUX_I2C_READ: 445 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; 446 rxsize = msg->size + 1; 447 448 if (drm_WARN_ON(&i915->drm, rxsize > 20)) 449 return -E2BIG; 450 451 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, 452 rxbuf, rxsize, flags); 453 if (ret > 0) { 454 msg->reply = rxbuf[0] >> 4; 455 /* 456 * Assume happy day, and copy the data. The caller is 457 * expected to check msg->reply before touching it. 458 * 459 * Return payload size. 460 */ 461 ret--; 462 memcpy(msg->buffer, rxbuf + 1, ret); 463 } 464 break; 465 466 default: 467 ret = -EINVAL; 468 break; 469 } 470 471 return ret; 472 } 473 474 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) 475 { 476 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 477 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 478 enum aux_ch aux_ch = dig_port->aux_ch; 479 480 switch (aux_ch) { 481 case AUX_CH_B: 482 case AUX_CH_C: 483 case AUX_CH_D: 484 return DP_AUX_CH_CTL(aux_ch); 485 default: 486 MISSING_CASE(aux_ch); 487 return DP_AUX_CH_CTL(AUX_CH_B); 488 } 489 } 490 491 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) 492 { 493 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 494 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 495 enum aux_ch aux_ch = dig_port->aux_ch; 496 497 switch (aux_ch) { 498 case AUX_CH_B: 499 case AUX_CH_C: 500 case AUX_CH_D: 501 return DP_AUX_CH_DATA(aux_ch, index); 502 default: 503 MISSING_CASE(aux_ch); 504 return DP_AUX_CH_DATA(AUX_CH_B, index); 505 } 506 } 507 508 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) 509 { 510 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 511 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 512 enum aux_ch aux_ch = dig_port->aux_ch; 513 514 switch (aux_ch) { 515 case AUX_CH_A: 516 return DP_AUX_CH_CTL(aux_ch); 517 case AUX_CH_B: 518 case AUX_CH_C: 519 case AUX_CH_D: 520 return PCH_DP_AUX_CH_CTL(aux_ch); 521 default: 522 MISSING_CASE(aux_ch); 523 return DP_AUX_CH_CTL(AUX_CH_A); 524 } 525 } 526 527 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) 528 { 529 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 530 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 531 enum aux_ch aux_ch = dig_port->aux_ch; 532 533 switch (aux_ch) { 534 case AUX_CH_A: 535 return DP_AUX_CH_DATA(aux_ch, index); 536 case AUX_CH_B: 537 case AUX_CH_C: 538 case AUX_CH_D: 539 return PCH_DP_AUX_CH_DATA(aux_ch, index); 540 default: 541 MISSING_CASE(aux_ch); 542 return DP_AUX_CH_DATA(AUX_CH_A, index); 543 } 544 } 545 546 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) 547 { 548 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 549 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 550 enum aux_ch aux_ch = dig_port->aux_ch; 551 552 switch (aux_ch) { 553 case AUX_CH_A: 554 case AUX_CH_B: 555 case AUX_CH_C: 556 case AUX_CH_D: 557 case AUX_CH_E: 558 case AUX_CH_F: 559 return DP_AUX_CH_CTL(aux_ch); 560 default: 561 MISSING_CASE(aux_ch); 562 return DP_AUX_CH_CTL(AUX_CH_A); 563 } 564 } 565 566 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) 567 { 568 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 569 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 570 enum aux_ch aux_ch = dig_port->aux_ch; 571 572 switch (aux_ch) { 573 case AUX_CH_A: 574 case AUX_CH_B: 575 case AUX_CH_C: 576 case AUX_CH_D: 577 case AUX_CH_E: 578 case AUX_CH_F: 579 return DP_AUX_CH_DATA(aux_ch, index); 580 default: 581 MISSING_CASE(aux_ch); 582 return DP_AUX_CH_DATA(AUX_CH_A, index); 583 } 584 } 585 586 static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp) 587 { 588 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 589 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 590 enum aux_ch aux_ch = dig_port->aux_ch; 591 592 switch (aux_ch) { 593 case AUX_CH_A: 594 case AUX_CH_B: 595 case AUX_CH_C: 596 case AUX_CH_USBC1: 597 case AUX_CH_USBC2: 598 case AUX_CH_USBC3: 599 case AUX_CH_USBC4: 600 case AUX_CH_USBC5: /* aka AUX_CH_D_XELPD */ 601 case AUX_CH_USBC6: /* aka AUX_CH_E_XELPD */ 602 return DP_AUX_CH_CTL(aux_ch); 603 default: 604 MISSING_CASE(aux_ch); 605 return DP_AUX_CH_CTL(AUX_CH_A); 606 } 607 } 608 609 static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index) 610 { 611 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 612 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 613 enum aux_ch aux_ch = dig_port->aux_ch; 614 615 switch (aux_ch) { 616 case AUX_CH_A: 617 case AUX_CH_B: 618 case AUX_CH_C: 619 case AUX_CH_USBC1: 620 case AUX_CH_USBC2: 621 case AUX_CH_USBC3: 622 case AUX_CH_USBC4: 623 case AUX_CH_USBC5: /* aka AUX_CH_D_XELPD */ 624 case AUX_CH_USBC6: /* aka AUX_CH_E_XELPD */ 625 return DP_AUX_CH_DATA(aux_ch, index); 626 default: 627 MISSING_CASE(aux_ch); 628 return DP_AUX_CH_DATA(AUX_CH_A, index); 629 } 630 } 631 632 void intel_dp_aux_fini(struct intel_dp *intel_dp) 633 { 634 if (cpu_latency_qos_request_active(&intel_dp->pm_qos)) 635 cpu_latency_qos_remove_request(&intel_dp->pm_qos); 636 637 kfree(intel_dp->aux.name); 638 } 639 640 void intel_dp_aux_init(struct intel_dp *intel_dp) 641 { 642 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 643 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 644 struct intel_encoder *encoder = &dig_port->base; 645 enum aux_ch aux_ch = dig_port->aux_ch; 646 647 if (DISPLAY_VER(dev_priv) >= 12) { 648 intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg; 649 intel_dp->aux_ch_data_reg = tgl_aux_data_reg; 650 } else if (DISPLAY_VER(dev_priv) >= 9) { 651 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; 652 intel_dp->aux_ch_data_reg = skl_aux_data_reg; 653 } else if (HAS_PCH_SPLIT(dev_priv)) { 654 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; 655 intel_dp->aux_ch_data_reg = ilk_aux_data_reg; 656 } else { 657 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; 658 intel_dp->aux_ch_data_reg = g4x_aux_data_reg; 659 } 660 661 if (DISPLAY_VER(dev_priv) >= 9) 662 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; 663 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 664 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 665 else if (HAS_PCH_SPLIT(dev_priv)) 666 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 667 else 668 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; 669 670 if (DISPLAY_VER(dev_priv) >= 9) 671 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; 672 else 673 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; 674 675 intel_dp->aux.drm_dev = &dev_priv->drm; 676 drm_dp_aux_init(&intel_dp->aux); 677 678 /* Failure to allocate our preferred name is not critical */ 679 if (DISPLAY_VER(dev_priv) >= 13 && aux_ch >= AUX_CH_D_XELPD) 680 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s", 681 aux_ch_name(aux_ch - AUX_CH_D_XELPD + AUX_CH_D), 682 encoder->base.name); 683 else if (DISPLAY_VER(dev_priv) >= 12 && aux_ch >= AUX_CH_USBC1) 684 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX USBC%c/%s", 685 aux_ch - AUX_CH_USBC1 + '1', 686 encoder->base.name); 687 else 688 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/%s", 689 aux_ch_name(aux_ch), 690 encoder->base.name); 691 692 intel_dp->aux.transfer = intel_dp_aux_transfer; 693 cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); 694 } 695