1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include <linux/log2.h> 7 #include <linux/math64.h> 8 9 #include <drm/drm_print.h> 10 11 #include "intel_alpm.h" 12 #include "intel_cx0_phy.h" 13 #include "intel_cx0_phy_regs.h" 14 #include "intel_display_regs.h" 15 #include "intel_ddi.h" 16 #include "intel_ddi_buf_trans.h" 17 #include "intel_de.h" 18 #include "intel_display_types.h" 19 #include "intel_display_utils.h" 20 #include "intel_dp.h" 21 #include "intel_hdmi.h" 22 #include "intel_lt_phy.h" 23 #include "intel_panel.h" 24 #include "intel_psr.h" 25 #include "intel_snps_hdmi_pll.h" 26 #include "intel_tc.h" 27 28 #define for_each_cx0_lane_in_mask(__lane_mask, __lane) \ 29 for ((__lane) = 0; (__lane) < 2; (__lane)++) \ 30 for_each_if((__lane_mask) & BIT(__lane)) 31 32 #define INTEL_CX0_LANE0 BIT(0) 33 #define INTEL_CX0_LANE1 BIT(1) 34 #define INTEL_CX0_BOTH_LANES (INTEL_CX0_LANE1 | INTEL_CX0_LANE0) 35 36 bool intel_encoder_is_c10phy(struct intel_encoder *encoder) 37 { 38 struct intel_display *display = to_intel_display(encoder); 39 enum phy phy = intel_encoder_to_phy(encoder); 40 41 if (display->platform.pantherlake) { 42 if (display->platform.pantherlake_wildcatlake) 43 return phy <= PHY_B; 44 else 45 return phy == PHY_A; 46 } 47 48 if ((display->platform.lunarlake || display->platform.meteorlake) && phy < PHY_C) 49 return true; 50 51 return false; 52 } 53 54 static int lane_mask_to_lane(u8 lane_mask) 55 { 56 if (WARN_ON((lane_mask & ~INTEL_CX0_BOTH_LANES) || 57 hweight8(lane_mask) != 1)) 58 return 0; 59 60 return ilog2(lane_mask); 61 } 62 63 static u8 intel_cx0_get_owned_lane_mask(struct intel_encoder *encoder) 64 { 65 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 66 67 if (!intel_tc_port_in_dp_alt_mode(dig_port)) 68 return INTEL_CX0_BOTH_LANES; 69 70 /* 71 * In DP-alt with pin assignment D, only PHY lane 0 is owned 72 * by display and lane 1 is owned by USB. 73 */ 74 return intel_tc_port_max_lane_count(dig_port) > 2 75 ? INTEL_CX0_BOTH_LANES : INTEL_CX0_LANE0; 76 } 77 78 static void 79 assert_dc_off(struct intel_display *display) 80 { 81 bool enabled; 82 83 enabled = intel_display_power_is_enabled(display, POWER_DOMAIN_DC_OFF); 84 drm_WARN_ON(display->drm, !enabled); 85 } 86 87 static void intel_cx0_program_msgbus_timer(struct intel_encoder *encoder) 88 { 89 struct intel_display *display = to_intel_display(encoder); 90 int lane; 91 92 for_each_cx0_lane_in_mask(INTEL_CX0_BOTH_LANES, lane) 93 intel_de_rmw(display, 94 XELPDP_PORT_MSGBUS_TIMER(display, encoder->port, lane), 95 XELPDP_PORT_MSGBUS_TIMER_VAL_MASK, 96 XELPDP_PORT_MSGBUS_TIMER_VAL); 97 } 98 99 /* 100 * Prepare HW for CX0 phy transactions. 101 * 102 * It is required that PSR and DC5/6 are disabled before any CX0 message 103 * bus transaction is executed. 104 * 105 * We also do the msgbus timer programming here to ensure that the timer 106 * is already programmed before any access to the msgbus. 107 */ 108 static intel_wakeref_t intel_cx0_phy_transaction_begin(struct intel_encoder *encoder) 109 { 110 struct intel_display *display = to_intel_display(encoder); 111 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 112 intel_wakeref_t wakeref; 113 114 intel_psr_pause(intel_dp); 115 wakeref = intel_display_power_get(display, POWER_DOMAIN_DC_OFF); 116 intel_cx0_program_msgbus_timer(encoder); 117 118 return wakeref; 119 } 120 121 static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_wakeref_t wakeref) 122 { 123 struct intel_display *display = to_intel_display(encoder); 124 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 125 126 intel_psr_resume(intel_dp); 127 intel_display_power_put(display, POWER_DOMAIN_DC_OFF, wakeref); 128 } 129 130 void intel_clear_response_ready_flag(struct intel_encoder *encoder, 131 int lane) 132 { 133 struct intel_display *display = to_intel_display(encoder); 134 135 intel_de_rmw(display, 136 XELPDP_PORT_P2M_MSGBUS_STATUS(display, encoder->port, lane), 137 0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET); 138 } 139 140 void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane) 141 { 142 struct intel_display *display = to_intel_display(encoder); 143 enum port port = encoder->port; 144 enum phy phy = intel_encoder_to_phy(encoder); 145 146 intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), 147 XELPDP_PORT_M2P_TRANSACTION_RESET); 148 149 if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), 150 XELPDP_PORT_M2P_TRANSACTION_RESET, 151 XELPDP_MSGBUS_TIMEOUT_MS)) { 152 drm_err_once(display->drm, 153 "Failed to bring PHY %c to idle.\n", 154 phy_name(phy)); 155 return; 156 } 157 158 intel_clear_response_ready_flag(encoder, lane); 159 } 160 161 int intel_cx0_wait_for_ack(struct intel_encoder *encoder, 162 int command, int lane, u32 *val) 163 { 164 struct intel_display *display = to_intel_display(encoder); 165 enum port port = encoder->port; 166 enum phy phy = intel_encoder_to_phy(encoder); 167 168 if (intel_de_wait_ms(display, XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane), 169 XELPDP_PORT_P2M_RESPONSE_READY, 170 XELPDP_PORT_P2M_RESPONSE_READY, 171 XELPDP_MSGBUS_TIMEOUT_MS, val)) { 172 drm_dbg_kms(display->drm, 173 "PHY %c Timeout waiting for message ACK. Status: 0x%x\n", 174 phy_name(phy), *val); 175 176 if (!(intel_de_read(display, XELPDP_PORT_MSGBUS_TIMER(display, port, lane)) & 177 XELPDP_PORT_MSGBUS_TIMER_TIMED_OUT)) 178 drm_dbg_kms(display->drm, 179 "PHY %c Hardware did not detect a timeout\n", 180 phy_name(phy)); 181 182 intel_cx0_bus_reset(encoder, lane); 183 return -ETIMEDOUT; 184 } 185 186 if (*val & XELPDP_PORT_P2M_ERROR_SET) { 187 drm_dbg_kms(display->drm, 188 "PHY %c Error occurred during %s command. Status: 0x%x\n", 189 phy_name(phy), 190 command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val); 191 intel_cx0_bus_reset(encoder, lane); 192 return -EINVAL; 193 } 194 195 if (REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, *val) != command) { 196 drm_dbg_kms(display->drm, 197 "PHY %c Not a %s response. MSGBUS Status: 0x%x.\n", 198 phy_name(phy), 199 command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val); 200 intel_cx0_bus_reset(encoder, lane); 201 return -EINVAL; 202 } 203 204 return 0; 205 } 206 207 static int __intel_cx0_read_once(struct intel_encoder *encoder, 208 int lane, u16 addr) 209 { 210 struct intel_display *display = to_intel_display(encoder); 211 enum port port = encoder->port; 212 enum phy phy = intel_encoder_to_phy(encoder); 213 int ack; 214 u32 val; 215 216 if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), 217 XELPDP_PORT_M2P_TRANSACTION_PENDING, 218 XELPDP_MSGBUS_TIMEOUT_MS)) { 219 drm_dbg_kms(display->drm, 220 "PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy)); 221 intel_cx0_bus_reset(encoder, lane); 222 return -ETIMEDOUT; 223 } 224 225 intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), 226 XELPDP_PORT_M2P_TRANSACTION_PENDING | 227 XELPDP_PORT_M2P_COMMAND_READ | 228 XELPDP_PORT_M2P_ADDRESS(addr)); 229 230 ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_READ_ACK, lane, &val); 231 if (ack < 0) 232 return ack; 233 234 intel_clear_response_ready_flag(encoder, lane); 235 236 /* 237 * FIXME: Workaround to let HW to settle 238 * down and let the message bus to end up 239 * in a known state 240 */ 241 if (DISPLAY_VER(display) < 30) 242 intel_cx0_bus_reset(encoder, lane); 243 244 return REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val); 245 } 246 247 static u8 __intel_cx0_read(struct intel_encoder *encoder, 248 int lane, u16 addr) 249 { 250 struct intel_display *display = to_intel_display(encoder); 251 enum phy phy = intel_encoder_to_phy(encoder); 252 int i, status; 253 254 assert_dc_off(display); 255 256 /* 3 tries is assumed to be enough to read successfully */ 257 for (i = 0; i < 3; i++) { 258 status = __intel_cx0_read_once(encoder, lane, addr); 259 260 if (status >= 0) 261 return status; 262 } 263 264 drm_err_once(display->drm, 265 "PHY %c Read %04x failed after %d retries.\n", 266 phy_name(phy), addr, i); 267 268 return 0; 269 } 270 271 u8 intel_cx0_read(struct intel_encoder *encoder, u8 lane_mask, u16 addr) 272 { 273 int lane = lane_mask_to_lane(lane_mask); 274 275 return __intel_cx0_read(encoder, lane, addr); 276 } 277 278 static int __intel_cx0_write_once(struct intel_encoder *encoder, 279 int lane, u16 addr, u8 data, bool committed) 280 { 281 struct intel_display *display = to_intel_display(encoder); 282 enum port port = encoder->port; 283 enum phy phy = intel_encoder_to_phy(encoder); 284 int ack; 285 u32 val; 286 287 if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), 288 XELPDP_PORT_M2P_TRANSACTION_PENDING, 289 XELPDP_MSGBUS_TIMEOUT_MS)) { 290 drm_dbg_kms(display->drm, 291 "PHY %c Timeout waiting for previous transaction to complete. Resetting the bus.\n", phy_name(phy)); 292 intel_cx0_bus_reset(encoder, lane); 293 return -ETIMEDOUT; 294 } 295 296 intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), 297 XELPDP_PORT_M2P_TRANSACTION_PENDING | 298 (committed ? XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED : 299 XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED) | 300 XELPDP_PORT_M2P_DATA(data) | 301 XELPDP_PORT_M2P_ADDRESS(addr)); 302 303 if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), 304 XELPDP_PORT_M2P_TRANSACTION_PENDING, 305 XELPDP_MSGBUS_TIMEOUT_MS)) { 306 drm_dbg_kms(display->drm, 307 "PHY %c Timeout waiting for write to complete. Resetting the bus.\n", phy_name(phy)); 308 intel_cx0_bus_reset(encoder, lane); 309 return -ETIMEDOUT; 310 } 311 312 if (committed) { 313 ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val); 314 if (ack < 0) 315 return ack; 316 } else if ((intel_de_read(display, XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane)) & 317 XELPDP_PORT_P2M_ERROR_SET)) { 318 drm_dbg_kms(display->drm, 319 "PHY %c Error occurred during write command.\n", phy_name(phy)); 320 intel_cx0_bus_reset(encoder, lane); 321 return -EINVAL; 322 } 323 324 intel_clear_response_ready_flag(encoder, lane); 325 326 /* 327 * FIXME: Workaround to let HW to settle 328 * down and let the message bus to end up 329 * in a known state 330 */ 331 if (DISPLAY_VER(display) < 30) 332 intel_cx0_bus_reset(encoder, lane); 333 334 return 0; 335 } 336 337 static void __intel_cx0_write(struct intel_encoder *encoder, 338 int lane, u16 addr, u8 data, bool committed) 339 { 340 struct intel_display *display = to_intel_display(encoder); 341 enum phy phy = intel_encoder_to_phy(encoder); 342 int i, status; 343 344 assert_dc_off(display); 345 346 /* 3 tries is assumed to be enough to write successfully */ 347 for (i = 0; i < 3; i++) { 348 status = __intel_cx0_write_once(encoder, lane, addr, data, committed); 349 350 if (status == 0) 351 return; 352 } 353 354 drm_err_once(display->drm, 355 "PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, i); 356 } 357 358 void intel_cx0_write(struct intel_encoder *encoder, 359 u8 lane_mask, u16 addr, u8 data, bool committed) 360 { 361 int lane; 362 363 for_each_cx0_lane_in_mask(lane_mask, lane) 364 __intel_cx0_write(encoder, lane, addr, data, committed); 365 } 366 367 static void intel_c20_sram_write(struct intel_encoder *encoder, 368 int lane, u16 addr, u16 data) 369 { 370 struct intel_display *display = to_intel_display(encoder); 371 372 assert_dc_off(display); 373 374 intel_cx0_write(encoder, lane, PHY_C20_WR_ADDRESS_H, addr >> 8, 0); 375 intel_cx0_write(encoder, lane, PHY_C20_WR_ADDRESS_L, addr & 0xff, 0); 376 377 intel_cx0_write(encoder, lane, PHY_C20_WR_DATA_H, data >> 8, 0); 378 intel_cx0_write(encoder, lane, PHY_C20_WR_DATA_L, data & 0xff, 1); 379 } 380 381 static u16 intel_c20_sram_read(struct intel_encoder *encoder, 382 int lane, u16 addr) 383 { 384 struct intel_display *display = to_intel_display(encoder); 385 u16 val; 386 387 assert_dc_off(display); 388 389 intel_cx0_write(encoder, lane, PHY_C20_RD_ADDRESS_H, addr >> 8, 0); 390 intel_cx0_write(encoder, lane, PHY_C20_RD_ADDRESS_L, addr & 0xff, 1); 391 392 val = intel_cx0_read(encoder, lane, PHY_C20_RD_DATA_H); 393 val <<= 8; 394 val |= intel_cx0_read(encoder, lane, PHY_C20_RD_DATA_L); 395 396 return val; 397 } 398 399 static void __intel_cx0_rmw(struct intel_encoder *encoder, 400 int lane, u16 addr, u8 clear, u8 set, bool committed) 401 { 402 u8 old, val; 403 404 old = __intel_cx0_read(encoder, lane, addr); 405 val = (old & ~clear) | set; 406 407 if (val != old) 408 __intel_cx0_write(encoder, lane, addr, val, committed); 409 } 410 411 void intel_cx0_rmw(struct intel_encoder *encoder, 412 u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed) 413 { 414 u8 lane; 415 416 for_each_cx0_lane_in_mask(lane_mask, lane) 417 __intel_cx0_rmw(encoder, lane, addr, clear, set, committed); 418 } 419 420 static u8 intel_c10_get_tx_vboost_lvl(const struct intel_crtc_state *crtc_state) 421 { 422 if (intel_crtc_has_dp_encoder(crtc_state)) { 423 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && 424 (crtc_state->port_clock == 540000 || 425 crtc_state->port_clock == 810000)) 426 return 5; 427 else 428 return 4; 429 } else { 430 return 5; 431 } 432 } 433 434 static u8 intel_c10_get_tx_term_ctl(const struct intel_crtc_state *crtc_state) 435 { 436 if (intel_crtc_has_dp_encoder(crtc_state)) { 437 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && 438 (crtc_state->port_clock == 540000 || 439 crtc_state->port_clock == 810000)) 440 return 5; 441 else 442 return 2; 443 } else { 444 return 6; 445 } 446 } 447 448 static void intel_c10_msgbus_access_begin(struct intel_encoder *encoder, 449 u8 lane_mask) 450 { 451 if (!intel_encoder_is_c10phy(encoder)) 452 return; 453 454 intel_cx0_rmw(encoder, lane_mask, PHY_C10_VDR_CONTROL(1), 455 0, C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED); 456 } 457 458 static void intel_c10_msgbus_access_commit(struct intel_encoder *encoder, 459 u8 lane_mask, bool master_lane) 460 { 461 u8 val = C10_VDR_CTRL_UPDATE_CFG; 462 463 if (!intel_encoder_is_c10phy(encoder)) 464 return; 465 466 if (master_lane) 467 val |= C10_VDR_CTRL_MASTER_LANE; 468 469 intel_cx0_rmw(encoder, lane_mask, PHY_C10_VDR_CONTROL(1), 470 0, val, MB_WRITE_COMMITTED); 471 } 472 473 void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder, 474 const struct intel_crtc_state *crtc_state) 475 { 476 struct intel_display *display = to_intel_display(encoder); 477 const struct intel_ddi_buf_trans *trans; 478 u8 owned_lane_mask; 479 intel_wakeref_t wakeref; 480 int n_entries, ln; 481 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 482 483 if (intel_tc_port_in_tbt_alt_mode(dig_port)) 484 return; 485 486 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder); 487 488 wakeref = intel_cx0_phy_transaction_begin(encoder); 489 490 trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); 491 if (drm_WARN_ON_ONCE(display->drm, !trans)) { 492 intel_cx0_phy_transaction_end(encoder, wakeref); 493 return; 494 } 495 496 intel_c10_msgbus_access_begin(encoder, owned_lane_mask); 497 498 if (intel_encoder_is_c10phy(encoder)) { 499 intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CMN(3), 500 C10_CMN3_TXVBOOST_MASK, 501 C10_CMN3_TXVBOOST(intel_c10_get_tx_vboost_lvl(crtc_state)), 502 MB_WRITE_UNCOMMITTED); 503 intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_TX(1), 504 C10_TX1_TERMCTL_MASK, 505 C10_TX1_TERMCTL(intel_c10_get_tx_term_ctl(crtc_state)), 506 MB_WRITE_COMMITTED); 507 } 508 509 for (ln = 0; ln < crtc_state->lane_count; ln++) { 510 int level = intel_ddi_level(encoder, crtc_state, ln); 511 int lane = ln / 2; 512 int tx = ln % 2; 513 u8 lane_mask = lane == 0 ? INTEL_CX0_LANE0 : INTEL_CX0_LANE1; 514 515 if (!(lane_mask & owned_lane_mask)) 516 continue; 517 518 intel_cx0_rmw(encoder, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 0), 519 C10_PHY_OVRD_LEVEL_MASK, 520 C10_PHY_OVRD_LEVEL(trans->entries[level].snps.pre_cursor), 521 MB_WRITE_COMMITTED); 522 intel_cx0_rmw(encoder, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 1), 523 C10_PHY_OVRD_LEVEL_MASK, 524 C10_PHY_OVRD_LEVEL(trans->entries[level].snps.vswing), 525 MB_WRITE_COMMITTED); 526 intel_cx0_rmw(encoder, lane_mask, PHY_CX0_VDROVRD_CTL(lane, tx, 2), 527 C10_PHY_OVRD_LEVEL_MASK, 528 C10_PHY_OVRD_LEVEL(trans->entries[level].snps.post_cursor), 529 MB_WRITE_COMMITTED); 530 } 531 532 /* Write Override enables in 0xD71 */ 533 intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_OVRD, 534 0, PHY_C10_VDR_OVRD_TX1 | PHY_C10_VDR_OVRD_TX2, 535 MB_WRITE_COMMITTED); 536 537 intel_c10_msgbus_access_commit(encoder, owned_lane_mask, false); 538 539 intel_cx0_phy_transaction_end(encoder, wakeref); 540 } 541 542 /* 543 * Basic DP link rates with 38.4 MHz reference clock. 544 * Note: The tables below are with SSC. In non-ssc 545 * registers 0xC04 to 0xC08(pll[4] to pll[8]) will be 546 * programmed 0. 547 */ 548 549 static const struct intel_c10pll_state mtl_c10_dp_rbr = { 550 .clock = 162000, 551 .tx = 0x10, 552 .cmn = 0x21, 553 .pll[0] = 0xB4, 554 .pll[1] = 0, 555 .pll[2] = 0x30, 556 .pll[3] = 0x1, 557 .pll[4] = 0x26, 558 .pll[5] = 0x0C, 559 .pll[6] = 0x98, 560 .pll[7] = 0x46, 561 .pll[8] = 0x1, 562 .pll[9] = 0x1, 563 .pll[10] = 0, 564 .pll[11] = 0, 565 .pll[12] = 0xC0, 566 .pll[13] = 0, 567 .pll[14] = 0, 568 .pll[15] = 0x2, 569 .pll[16] = 0x84, 570 .pll[17] = 0x4F, 571 .pll[18] = 0xE5, 572 .pll[19] = 0x23, 573 }; 574 575 static const struct intel_c10pll_state mtl_c10_edp_r216 = { 576 .clock = 216000, 577 .tx = 0x10, 578 .cmn = 0x21, 579 .pll[0] = 0x4, 580 .pll[1] = 0, 581 .pll[2] = 0xA2, 582 .pll[3] = 0x1, 583 .pll[4] = 0x33, 584 .pll[5] = 0x10, 585 .pll[6] = 0x75, 586 .pll[7] = 0xB3, 587 .pll[8] = 0x1, 588 .pll[9] = 0x1, 589 .pll[10] = 0, 590 .pll[11] = 0, 591 .pll[12] = 0, 592 .pll[13] = 0, 593 .pll[14] = 0, 594 .pll[15] = 0x2, 595 .pll[16] = 0x85, 596 .pll[17] = 0x0F, 597 .pll[18] = 0xE6, 598 .pll[19] = 0x23, 599 }; 600 601 static const struct intel_c10pll_state mtl_c10_edp_r243 = { 602 .clock = 243000, 603 .tx = 0x10, 604 .cmn = 0x21, 605 .pll[0] = 0x34, 606 .pll[1] = 0, 607 .pll[2] = 0xDA, 608 .pll[3] = 0x1, 609 .pll[4] = 0x39, 610 .pll[5] = 0x12, 611 .pll[6] = 0xE3, 612 .pll[7] = 0xE9, 613 .pll[8] = 0x1, 614 .pll[9] = 0x1, 615 .pll[10] = 0, 616 .pll[11] = 0, 617 .pll[12] = 0x20, 618 .pll[13] = 0, 619 .pll[14] = 0, 620 .pll[15] = 0x2, 621 .pll[16] = 0x85, 622 .pll[17] = 0x8F, 623 .pll[18] = 0xE6, 624 .pll[19] = 0x23, 625 }; 626 627 static const struct intel_c10pll_state mtl_c10_dp_hbr1 = { 628 .clock = 270000, 629 .tx = 0x10, 630 .cmn = 0x21, 631 .pll[0] = 0xF4, 632 .pll[1] = 0, 633 .pll[2] = 0xF8, 634 .pll[3] = 0x0, 635 .pll[4] = 0x20, 636 .pll[5] = 0x0A, 637 .pll[6] = 0x29, 638 .pll[7] = 0x10, 639 .pll[8] = 0x1, /* Verify */ 640 .pll[9] = 0x1, 641 .pll[10] = 0, 642 .pll[11] = 0, 643 .pll[12] = 0xA0, 644 .pll[13] = 0, 645 .pll[14] = 0, 646 .pll[15] = 0x1, 647 .pll[16] = 0x84, 648 .pll[17] = 0x4F, 649 .pll[18] = 0xE5, 650 .pll[19] = 0x23, 651 }; 652 653 static const struct intel_c10pll_state mtl_c10_edp_r324 = { 654 .clock = 324000, 655 .tx = 0x10, 656 .cmn = 0x21, 657 .pll[0] = 0xB4, 658 .pll[1] = 0, 659 .pll[2] = 0x30, 660 .pll[3] = 0x1, 661 .pll[4] = 0x26, 662 .pll[5] = 0x0C, 663 .pll[6] = 0x98, 664 .pll[7] = 0x46, 665 .pll[8] = 0x1, 666 .pll[9] = 0x1, 667 .pll[10] = 0, 668 .pll[11] = 0, 669 .pll[12] = 0xC0, 670 .pll[13] = 0, 671 .pll[14] = 0, 672 .pll[15] = 0x1, 673 .pll[16] = 0x85, 674 .pll[17] = 0x4F, 675 .pll[18] = 0xE6, 676 .pll[19] = 0x23, 677 }; 678 679 static const struct intel_c10pll_state mtl_c10_edp_r432 = { 680 .clock = 432000, 681 .tx = 0x10, 682 .cmn = 0x21, 683 .pll[0] = 0x4, 684 .pll[1] = 0, 685 .pll[2] = 0xA2, 686 .pll[3] = 0x1, 687 .pll[4] = 0x33, 688 .pll[5] = 0x10, 689 .pll[6] = 0x75, 690 .pll[7] = 0xB3, 691 .pll[8] = 0x1, 692 .pll[9] = 0x1, 693 .pll[10] = 0, 694 .pll[11] = 0, 695 .pll[12] = 0, 696 .pll[13] = 0, 697 .pll[14] = 0, 698 .pll[15] = 0x1, 699 .pll[16] = 0x85, 700 .pll[17] = 0x0F, 701 .pll[18] = 0xE6, 702 .pll[19] = 0x23, 703 }; 704 705 static const struct intel_c10pll_state mtl_c10_dp_hbr2 = { 706 .clock = 540000, 707 .tx = 0x10, 708 .cmn = 0x21, 709 .pll[0] = 0xF4, 710 .pll[1] = 0, 711 .pll[2] = 0xF8, 712 .pll[3] = 0, 713 .pll[4] = 0x20, 714 .pll[5] = 0x0A, 715 .pll[6] = 0x29, 716 .pll[7] = 0x10, 717 .pll[8] = 0x1, 718 .pll[9] = 0x1, 719 .pll[10] = 0, 720 .pll[11] = 0, 721 .pll[12] = 0xA0, 722 .pll[13] = 0, 723 .pll[14] = 0, 724 .pll[15] = 0, 725 .pll[16] = 0x84, 726 .pll[17] = 0x4F, 727 .pll[18] = 0xE5, 728 .pll[19] = 0x23, 729 }; 730 731 static const struct intel_c10pll_state mtl_c10_edp_r675 = { 732 .clock = 675000, 733 .tx = 0x10, 734 .cmn = 0x21, 735 .pll[0] = 0xB4, 736 .pll[1] = 0, 737 .pll[2] = 0x3E, 738 .pll[3] = 0x1, 739 .pll[4] = 0xA8, 740 .pll[5] = 0x0C, 741 .pll[6] = 0x33, 742 .pll[7] = 0x54, 743 .pll[8] = 0x1, 744 .pll[9] = 0x1, 745 .pll[10] = 0, 746 .pll[11] = 0, 747 .pll[12] = 0xC8, 748 .pll[13] = 0, 749 .pll[14] = 0, 750 .pll[15] = 0, 751 .pll[16] = 0x85, 752 .pll[17] = 0x8F, 753 .pll[18] = 0xE6, 754 .pll[19] = 0x23, 755 }; 756 757 static const struct intel_c10pll_state mtl_c10_dp_hbr3 = { 758 .clock = 810000, 759 .tx = 0x10, 760 .cmn = 0x21, 761 .pll[0] = 0x34, 762 .pll[1] = 0, 763 .pll[2] = 0x84, 764 .pll[3] = 0x1, 765 .pll[4] = 0x30, 766 .pll[5] = 0x0F, 767 .pll[6] = 0x3D, 768 .pll[7] = 0x98, 769 .pll[8] = 0x1, 770 .pll[9] = 0x1, 771 .pll[10] = 0, 772 .pll[11] = 0, 773 .pll[12] = 0xF0, 774 .pll[13] = 0, 775 .pll[14] = 0, 776 .pll[15] = 0, 777 .pll[16] = 0x84, 778 .pll[17] = 0x0F, 779 .pll[18] = 0xE5, 780 .pll[19] = 0x23, 781 }; 782 783 static const struct intel_c10pll_state * const mtl_c10_dp_tables[] = { 784 &mtl_c10_dp_rbr, 785 &mtl_c10_dp_hbr1, 786 &mtl_c10_dp_hbr2, 787 &mtl_c10_dp_hbr3, 788 NULL, 789 }; 790 791 static const struct intel_c10pll_state * const mtl_c10_edp_tables[] = { 792 &mtl_c10_dp_rbr, 793 &mtl_c10_edp_r216, 794 &mtl_c10_edp_r243, 795 &mtl_c10_dp_hbr1, 796 &mtl_c10_edp_r324, 797 &mtl_c10_edp_r432, 798 &mtl_c10_dp_hbr2, 799 &mtl_c10_edp_r675, 800 &mtl_c10_dp_hbr3, 801 NULL, 802 }; 803 804 /* C20 basic DP 1.4 tables */ 805 static const struct intel_c20pll_state mtl_c20_dp_rbr = { 806 .clock = 162000, 807 .tx = { 0xbe88, /* tx cfg0 */ 808 0x5800, /* tx cfg1 */ 809 0x0000, /* tx cfg2 */ 810 }, 811 .cmn = {0x0500, /* cmn cfg0*/ 812 0x0005, /* cmn cfg1 */ 813 0x0000, /* cmn cfg2 */ 814 0x0000, /* cmn cfg3 */ 815 }, 816 .mpllb = { 0x50a8, /* mpllb cfg0 */ 817 0x2120, /* mpllb cfg1 */ 818 0xcd9a, /* mpllb cfg2 */ 819 0xbfc1, /* mpllb cfg3 */ 820 0x5ab8, /* mpllb cfg4 */ 821 0x4c34, /* mpllb cfg5 */ 822 0x2000, /* mpllb cfg6 */ 823 0x0001, /* mpllb cfg7 */ 824 0x6000, /* mpllb cfg8 */ 825 0x0000, /* mpllb cfg9 */ 826 0x0000, /* mpllb cfg10 */ 827 }, 828 }; 829 830 static const struct intel_c20pll_state mtl_c20_dp_hbr1 = { 831 .clock = 270000, 832 .tx = { 0xbe88, /* tx cfg0 */ 833 0x4800, /* tx cfg1 */ 834 0x0000, /* tx cfg2 */ 835 }, 836 .cmn = {0x0500, /* cmn cfg0*/ 837 0x0005, /* cmn cfg1 */ 838 0x0000, /* cmn cfg2 */ 839 0x0000, /* cmn cfg3 */ 840 }, 841 .mpllb = { 0x308c, /* mpllb cfg0 */ 842 0x2110, /* mpllb cfg1 */ 843 0xcc9c, /* mpllb cfg2 */ 844 0xbfc1, /* mpllb cfg3 */ 845 0x4b9a, /* mpllb cfg4 */ 846 0x3f81, /* mpllb cfg5 */ 847 0x2000, /* mpllb cfg6 */ 848 0x0001, /* mpllb cfg7 */ 849 0x5000, /* mpllb cfg8 */ 850 0x0000, /* mpllb cfg9 */ 851 0x0000, /* mpllb cfg10 */ 852 }, 853 }; 854 855 static const struct intel_c20pll_state mtl_c20_dp_hbr2 = { 856 .clock = 540000, 857 .tx = { 0xbe88, /* tx cfg0 */ 858 0x4800, /* tx cfg1 */ 859 0x0000, /* tx cfg2 */ 860 }, 861 .cmn = {0x0500, /* cmn cfg0*/ 862 0x0005, /* cmn cfg1 */ 863 0x0000, /* cmn cfg2 */ 864 0x0000, /* cmn cfg3 */ 865 }, 866 .mpllb = { 0x108c, /* mpllb cfg0 */ 867 0x2108, /* mpllb cfg1 */ 868 0xcc9c, /* mpllb cfg2 */ 869 0xbfc1, /* mpllb cfg3 */ 870 0x4b9a, /* mpllb cfg4 */ 871 0x3f81, /* mpllb cfg5 */ 872 0x2000, /* mpllb cfg6 */ 873 0x0001, /* mpllb cfg7 */ 874 0x5000, /* mpllb cfg8 */ 875 0x0000, /* mpllb cfg9 */ 876 0x0000, /* mpllb cfg10 */ 877 }, 878 }; 879 880 static const struct intel_c20pll_state mtl_c20_dp_hbr3 = { 881 .clock = 810000, 882 .tx = { 0xbe88, /* tx cfg0 */ 883 0x4800, /* tx cfg1 */ 884 0x0000, /* tx cfg2 */ 885 }, 886 .cmn = {0x0500, /* cmn cfg0*/ 887 0x0005, /* cmn cfg1 */ 888 0x0000, /* cmn cfg2 */ 889 0x0000, /* cmn cfg3 */ 890 }, 891 .mpllb = { 0x10d2, /* mpllb cfg0 */ 892 0x2108, /* mpllb cfg1 */ 893 0x8d98, /* mpllb cfg2 */ 894 0xbfc1, /* mpllb cfg3 */ 895 0x7166, /* mpllb cfg4 */ 896 0x5f42, /* mpllb cfg5 */ 897 0x2000, /* mpllb cfg6 */ 898 0x0001, /* mpllb cfg7 */ 899 0x7800, /* mpllb cfg8 */ 900 0x0000, /* mpllb cfg9 */ 901 0x0000, /* mpllb cfg10 */ 902 }, 903 }; 904 905 /* C20 basic DP 2.0 tables */ 906 static const struct intel_c20pll_state mtl_c20_dp_uhbr10 = { 907 .clock = 1000000, /* 10 Gbps */ 908 .tx = { 0xbe21, /* tx cfg0 */ 909 0xe800, /* tx cfg1 */ 910 0x0000, /* tx cfg2 */ 911 }, 912 .cmn = {0x0700, /* cmn cfg0*/ 913 0x0005, /* cmn cfg1 */ 914 0x0000, /* cmn cfg2 */ 915 0x0000, /* cmn cfg3 */ 916 }, 917 .mplla = { 0x3104, /* mplla cfg0 */ 918 0xd105, /* mplla cfg1 */ 919 0xc025, /* mplla cfg2 */ 920 0xc025, /* mplla cfg3 */ 921 0x8c00, /* mplla cfg4 */ 922 0x759a, /* mplla cfg5 */ 923 0x4000, /* mplla cfg6 */ 924 0x0003, /* mplla cfg7 */ 925 0x3555, /* mplla cfg8 */ 926 0x0001, /* mplla cfg9 */ 927 }, 928 }; 929 930 static const struct intel_c20pll_state mtl_c20_dp_uhbr13_5 = { 931 .clock = 1350000, /* 13.5 Gbps */ 932 .tx = { 0xbea0, /* tx cfg0 */ 933 0x4800, /* tx cfg1 */ 934 0x0000, /* tx cfg2 */ 935 }, 936 .cmn = {0x0500, /* cmn cfg0*/ 937 0x0005, /* cmn cfg1 */ 938 0x0000, /* cmn cfg2 */ 939 0x0000, /* cmn cfg3 */ 940 }, 941 .mpllb = { 0x015f, /* mpllb cfg0 */ 942 0x2205, /* mpllb cfg1 */ 943 0x1b17, /* mpllb cfg2 */ 944 0xffc1, /* mpllb cfg3 */ 945 0xe100, /* mpllb cfg4 */ 946 0xbd00, /* mpllb cfg5 */ 947 0x2000, /* mpllb cfg6 */ 948 0x0001, /* mpllb cfg7 */ 949 0x4800, /* mpllb cfg8 */ 950 0x0000, /* mpllb cfg9 */ 951 0x0000, /* mpllb cfg10 */ 952 }, 953 }; 954 955 static const struct intel_c20pll_state mtl_c20_dp_uhbr20 = { 956 .clock = 2000000, /* 20 Gbps */ 957 .tx = { 0xbe20, /* tx cfg0 */ 958 0x4800, /* tx cfg1 */ 959 0x0000, /* tx cfg2 */ 960 }, 961 .cmn = {0x0500, /* cmn cfg0*/ 962 0x0005, /* cmn cfg1 */ 963 0x0000, /* cmn cfg2 */ 964 0x0000, /* cmn cfg3 */ 965 }, 966 .mplla = { 0x3104, /* mplla cfg0 */ 967 0xd105, /* mplla cfg1 */ 968 0x9217, /* mplla cfg2 */ 969 0x9217, /* mplla cfg3 */ 970 0x8c00, /* mplla cfg4 */ 971 0x759a, /* mplla cfg5 */ 972 0x4000, /* mplla cfg6 */ 973 0x0003, /* mplla cfg7 */ 974 0x3555, /* mplla cfg8 */ 975 0x0001, /* mplla cfg9 */ 976 }, 977 }; 978 979 static const struct intel_c20pll_state * const mtl_c20_dp_tables[] = { 980 &mtl_c20_dp_rbr, 981 &mtl_c20_dp_hbr1, 982 &mtl_c20_dp_hbr2, 983 &mtl_c20_dp_hbr3, 984 &mtl_c20_dp_uhbr10, 985 &mtl_c20_dp_uhbr13_5, 986 &mtl_c20_dp_uhbr20, 987 NULL, 988 }; 989 990 /* 991 * eDP link rates with 38.4 MHz reference clock. 992 */ 993 994 static const struct intel_c20pll_state xe2hpd_c20_edp_r216 = { 995 .clock = 216000, 996 .tx = { 0xbe88, 997 0x4800, 998 0x0000, 999 }, 1000 .cmn = { 0x0500, 1001 0x0005, 1002 0x0000, 1003 0x0000, 1004 }, 1005 .mpllb = { 0x50e1, 1006 0x2120, 1007 0x8e18, 1008 0xbfc1, 1009 0x9000, 1010 0x78f6, 1011 0x0000, 1012 0x0000, 1013 0x0000, 1014 0x0000, 1015 0x0000, 1016 }, 1017 }; 1018 1019 static const struct intel_c20pll_state xe2hpd_c20_edp_r243 = { 1020 .clock = 243000, 1021 .tx = { 0xbe88, 1022 0x4800, 1023 0x0000, 1024 }, 1025 .cmn = { 0x0500, 1026 0x0005, 1027 0x0000, 1028 0x0000, 1029 }, 1030 .mpllb = { 0x50fd, 1031 0x2120, 1032 0x8f18, 1033 0xbfc1, 1034 0xa200, 1035 0x8814, 1036 0x2000, 1037 0x0001, 1038 0x1000, 1039 0x0000, 1040 0x0000, 1041 }, 1042 }; 1043 1044 static const struct intel_c20pll_state xe2hpd_c20_edp_r324 = { 1045 .clock = 324000, 1046 .tx = { 0xbe88, 1047 0x4800, 1048 0x0000, 1049 }, 1050 .cmn = { 0x0500, 1051 0x0005, 1052 0x0000, 1053 0x0000, 1054 }, 1055 .mpllb = { 0x30a8, 1056 0x2110, 1057 0xcd9a, 1058 0xbfc1, 1059 0x6c00, 1060 0x5ab8, 1061 0x2000, 1062 0x0001, 1063 0x6000, 1064 0x0000, 1065 0x0000, 1066 }, 1067 }; 1068 1069 static const struct intel_c20pll_state xe2hpd_c20_edp_r432 = { 1070 .clock = 432000, 1071 .tx = { 0xbe88, 1072 0x4800, 1073 0x0000, 1074 }, 1075 .cmn = { 0x0500, 1076 0x0005, 1077 0x0000, 1078 0x0000, 1079 }, 1080 .mpllb = { 0x30e1, 1081 0x2110, 1082 0x8e18, 1083 0xbfc1, 1084 0x9000, 1085 0x78f6, 1086 0x0000, 1087 0x0000, 1088 0x0000, 1089 0x0000, 1090 0x0000, 1091 }, 1092 }; 1093 1094 static const struct intel_c20pll_state xe2hpd_c20_edp_r675 = { 1095 .clock = 675000, 1096 .tx = { 0xbe88, 1097 0x4800, 1098 0x0000, 1099 }, 1100 .cmn = { 0x0500, 1101 0x0005, 1102 0x0000, 1103 0x0000, 1104 }, 1105 .mpllb = { 0x10af, 1106 0x2108, 1107 0xce1a, 1108 0xbfc1, 1109 0x7080, 1110 0x5e80, 1111 0x2000, 1112 0x0001, 1113 0x6400, 1114 0x0000, 1115 0x0000, 1116 }, 1117 }; 1118 1119 static const struct intel_c20pll_state * const xe2hpd_c20_edp_tables[] = { 1120 &mtl_c20_dp_rbr, 1121 &xe2hpd_c20_edp_r216, 1122 &xe2hpd_c20_edp_r243, 1123 &mtl_c20_dp_hbr1, 1124 &xe2hpd_c20_edp_r324, 1125 &xe2hpd_c20_edp_r432, 1126 &mtl_c20_dp_hbr2, 1127 &xe2hpd_c20_edp_r675, 1128 &mtl_c20_dp_hbr3, 1129 NULL, 1130 }; 1131 1132 static const struct intel_c20pll_state xe2hpd_c20_dp_uhbr13_5 = { 1133 .clock = 1350000, /* 13.5 Gbps */ 1134 .tx = { 0xbea0, /* tx cfg0 */ 1135 0x4800, /* tx cfg1 */ 1136 0x0000, /* tx cfg2 */ 1137 }, 1138 .cmn = {0x0500, /* cmn cfg0*/ 1139 0x0005, /* cmn cfg1 */ 1140 0x0000, /* cmn cfg2 */ 1141 0x0000, /* cmn cfg3 */ 1142 }, 1143 .mpllb = { 0x015f, /* mpllb cfg0 */ 1144 0x2205, /* mpllb cfg1 */ 1145 0x1b17, /* mpllb cfg2 */ 1146 0xffc1, /* mpllb cfg3 */ 1147 0xbd00, /* mpllb cfg4 */ 1148 0x9ec3, /* mpllb cfg5 */ 1149 0x2000, /* mpllb cfg6 */ 1150 0x0001, /* mpllb cfg7 */ 1151 0x4800, /* mpllb cfg8 */ 1152 0x0000, /* mpllb cfg9 */ 1153 0x0000, /* mpllb cfg10 */ 1154 }, 1155 }; 1156 1157 static const struct intel_c20pll_state * const xe2hpd_c20_dp_tables[] = { 1158 &mtl_c20_dp_rbr, 1159 &mtl_c20_dp_hbr1, 1160 &mtl_c20_dp_hbr2, 1161 &mtl_c20_dp_hbr3, 1162 &mtl_c20_dp_uhbr10, 1163 &xe2hpd_c20_dp_uhbr13_5, 1164 NULL, 1165 }; 1166 1167 static const struct intel_c20pll_state * const xe3lpd_c20_dp_edp_tables[] = { 1168 &mtl_c20_dp_rbr, 1169 &xe2hpd_c20_edp_r216, 1170 &xe2hpd_c20_edp_r243, 1171 &mtl_c20_dp_hbr1, 1172 &xe2hpd_c20_edp_r324, 1173 &xe2hpd_c20_edp_r432, 1174 &mtl_c20_dp_hbr2, 1175 &xe2hpd_c20_edp_r675, 1176 &mtl_c20_dp_hbr3, 1177 &mtl_c20_dp_uhbr10, 1178 &xe2hpd_c20_dp_uhbr13_5, 1179 &mtl_c20_dp_uhbr20, 1180 NULL, 1181 }; 1182 1183 /* 1184 * HDMI link rates with 38.4 MHz reference clock. 1185 */ 1186 1187 static const struct intel_c10pll_state mtl_c10_hdmi_25_2 = { 1188 .clock = 25200, 1189 .tx = 0x10, 1190 .cmn = 0x1, 1191 .pll[0] = 0x4, 1192 .pll[1] = 0, 1193 .pll[2] = 0xB2, 1194 .pll[3] = 0, 1195 .pll[4] = 0, 1196 .pll[5] = 0, 1197 .pll[6] = 0, 1198 .pll[7] = 0, 1199 .pll[8] = 0x20, 1200 .pll[9] = 0x1, 1201 .pll[10] = 0, 1202 .pll[11] = 0, 1203 .pll[12] = 0, 1204 .pll[13] = 0, 1205 .pll[14] = 0, 1206 .pll[15] = 0xD, 1207 .pll[16] = 0x6, 1208 .pll[17] = 0x8F, 1209 .pll[18] = 0x84, 1210 .pll[19] = 0x23, 1211 }; 1212 1213 static const struct intel_c10pll_state mtl_c10_hdmi_27_0 = { 1214 .clock = 27000, 1215 .tx = 0x10, 1216 .cmn = 0x1, 1217 .pll[0] = 0x34, 1218 .pll[1] = 0, 1219 .pll[2] = 0xC0, 1220 .pll[3] = 0, 1221 .pll[4] = 0, 1222 .pll[5] = 0, 1223 .pll[6] = 0, 1224 .pll[7] = 0, 1225 .pll[8] = 0x20, 1226 .pll[9] = 0x1, 1227 .pll[10] = 0, 1228 .pll[11] = 0, 1229 .pll[12] = 0x80, 1230 .pll[13] = 0, 1231 .pll[14] = 0, 1232 .pll[15] = 0xD, 1233 .pll[16] = 0x6, 1234 .pll[17] = 0xCF, 1235 .pll[18] = 0x84, 1236 .pll[19] = 0x23, 1237 }; 1238 1239 static const struct intel_c10pll_state mtl_c10_hdmi_74_25 = { 1240 .clock = 74250, 1241 .tx = 0x10, 1242 .cmn = 0x1, 1243 .pll[0] = 0xF4, 1244 .pll[1] = 0, 1245 .pll[2] = 0x7A, 1246 .pll[3] = 0, 1247 .pll[4] = 0, 1248 .pll[5] = 0, 1249 .pll[6] = 0, 1250 .pll[7] = 0, 1251 .pll[8] = 0x20, 1252 .pll[9] = 0x1, 1253 .pll[10] = 0, 1254 .pll[11] = 0, 1255 .pll[12] = 0x58, 1256 .pll[13] = 0, 1257 .pll[14] = 0, 1258 .pll[15] = 0xB, 1259 .pll[16] = 0x6, 1260 .pll[17] = 0xF, 1261 .pll[18] = 0x85, 1262 .pll[19] = 0x23, 1263 }; 1264 1265 static const struct intel_c10pll_state mtl_c10_hdmi_148_5 = { 1266 .clock = 148500, 1267 .tx = 0x10, 1268 .cmn = 0x1, 1269 .pll[0] = 0xF4, 1270 .pll[1] = 0, 1271 .pll[2] = 0x7A, 1272 .pll[3] = 0, 1273 .pll[4] = 0, 1274 .pll[5] = 0, 1275 .pll[6] = 0, 1276 .pll[7] = 0, 1277 .pll[8] = 0x20, 1278 .pll[9] = 0x1, 1279 .pll[10] = 0, 1280 .pll[11] = 0, 1281 .pll[12] = 0x58, 1282 .pll[13] = 0, 1283 .pll[14] = 0, 1284 .pll[15] = 0xA, 1285 .pll[16] = 0x6, 1286 .pll[17] = 0xF, 1287 .pll[18] = 0x85, 1288 .pll[19] = 0x23, 1289 }; 1290 1291 static const struct intel_c10pll_state mtl_c10_hdmi_594 = { 1292 .clock = 594000, 1293 .tx = 0x10, 1294 .cmn = 0x1, 1295 .pll[0] = 0xF4, 1296 .pll[1] = 0, 1297 .pll[2] = 0x7A, 1298 .pll[3] = 0, 1299 .pll[4] = 0, 1300 .pll[5] = 0, 1301 .pll[6] = 0, 1302 .pll[7] = 0, 1303 .pll[8] = 0x20, 1304 .pll[9] = 0x1, 1305 .pll[10] = 0, 1306 .pll[11] = 0, 1307 .pll[12] = 0x58, 1308 .pll[13] = 0, 1309 .pll[14] = 0, 1310 .pll[15] = 0x8, 1311 .pll[16] = 0x6, 1312 .pll[17] = 0xF, 1313 .pll[18] = 0x85, 1314 .pll[19] = 0x23, 1315 }; 1316 1317 /* Precomputed C10 HDMI PLL tables */ 1318 static const struct intel_c10pll_state mtl_c10_hdmi_27027 = { 1319 .clock = 27027, 1320 .tx = 0x10, 1321 .cmn = 0x1, 1322 .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xC0, .pll[3] = 0x00, .pll[4] = 0x00, 1323 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1324 .pll[10] = 0xFF, .pll[11] = 0xCC, .pll[12] = 0x9C, .pll[13] = 0xCB, .pll[14] = 0xCC, 1325 .pll[15] = 0x0D, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1326 }; 1327 1328 static const struct intel_c10pll_state mtl_c10_hdmi_28320 = { 1329 .clock = 28320, 1330 .tx = 0x10, 1331 .cmn = 0x1, 1332 .pll[0] = 0x04, .pll[1] = 0x00, .pll[2] = 0xCC, .pll[3] = 0x00, .pll[4] = 0x00, 1333 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1334 .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x00, .pll[13] = 0x00, .pll[14] = 0x00, 1335 .pll[15] = 0x0D, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1336 }; 1337 1338 static const struct intel_c10pll_state mtl_c10_hdmi_30240 = { 1339 .clock = 30240, 1340 .tx = 0x10, 1341 .cmn = 0x1, 1342 .pll[0] = 0x04, .pll[1] = 0x00, .pll[2] = 0xDC, .pll[3] = 0x00, .pll[4] = 0x00, 1343 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1344 .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x00, .pll[13] = 0x00, .pll[14] = 0x00, 1345 .pll[15] = 0x0D, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, 1346 }; 1347 1348 static const struct intel_c10pll_state mtl_c10_hdmi_31500 = { 1349 .clock = 31500, 1350 .tx = 0x10, 1351 .cmn = 0x1, 1352 .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x62, .pll[3] = 0x00, .pll[4] = 0x00, 1353 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1354 .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0xA0, .pll[13] = 0x00, .pll[14] = 0x00, 1355 .pll[15] = 0x0C, .pll[16] = 0x09, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1356 }; 1357 1358 static const struct intel_c10pll_state mtl_c10_hdmi_36000 = { 1359 .clock = 36000, 1360 .tx = 0x10, 1361 .cmn = 0x1, 1362 .pll[0] = 0xC4, .pll[1] = 0x00, .pll[2] = 0x76, .pll[3] = 0x00, .pll[4] = 0x00, 1363 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1364 .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x00, .pll[13] = 0x00, .pll[14] = 0x00, 1365 .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1366 }; 1367 1368 static const struct intel_c10pll_state mtl_c10_hdmi_40000 = { 1369 .clock = 40000, 1370 .tx = 0x10, 1371 .cmn = 0x1, 1372 .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x86, .pll[3] = 0x00, .pll[4] = 0x00, 1373 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1374 .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x55, .pll[13] = 0x55, .pll[14] = 0x55, 1375 .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1376 }; 1377 1378 static const struct intel_c10pll_state mtl_c10_hdmi_49500 = { 1379 .clock = 49500, 1380 .tx = 0x10, 1381 .cmn = 0x1, 1382 .pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xAE, .pll[3] = 0x00, .pll[4] = 0x00, 1383 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1384 .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x20, .pll[13] = 0x00, .pll[14] = 0x00, 1385 .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, 1386 }; 1387 1388 static const struct intel_c10pll_state mtl_c10_hdmi_50000 = { 1389 .clock = 50000, 1390 .tx = 0x10, 1391 .cmn = 0x1, 1392 .pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xB0, .pll[3] = 0x00, .pll[4] = 0x00, 1393 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1394 .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x2A, .pll[13] = 0xA9, .pll[14] = 0xAA, 1395 .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, 1396 }; 1397 1398 static const struct intel_c10pll_state mtl_c10_hdmi_57284 = { 1399 .clock = 57284, 1400 .tx = 0x10, 1401 .cmn = 0x1, 1402 .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xCE, .pll[3] = 0x00, .pll[4] = 0x00, 1403 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1404 .pll[10] = 0xFF, .pll[11] = 0x77, .pll[12] = 0x57, .pll[13] = 0x77, .pll[14] = 0x77, 1405 .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1406 }; 1407 1408 static const struct intel_c10pll_state mtl_c10_hdmi_58000 = { 1409 .clock = 58000, 1410 .tx = 0x10, 1411 .cmn = 0x1, 1412 .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xD0, .pll[3] = 0x00, .pll[4] = 0x00, 1413 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1414 .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xD5, .pll[13] = 0x55, .pll[14] = 0x55, 1415 .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, 1416 }; 1417 1418 static const struct intel_c10pll_state mtl_c10_hdmi_65000 = { 1419 .clock = 65000, 1420 .tx = 0x10, 1421 .cmn = 0x1, 1422 .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x66, .pll[3] = 0x00, .pll[4] = 0x00, 1423 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1424 .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xB5, .pll[13] = 0x55, .pll[14] = 0x55, 1425 .pll[15] = 0x0B, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, 1426 }; 1427 1428 static const struct intel_c10pll_state mtl_c10_hdmi_71000 = { 1429 .clock = 71000, 1430 .tx = 0x10, 1431 .cmn = 0x1, 1432 .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x72, .pll[3] = 0x00, .pll[4] = 0x00, 1433 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1434 .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xF5, .pll[13] = 0x55, .pll[14] = 0x55, 1435 .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1436 }; 1437 1438 static const struct intel_c10pll_state mtl_c10_hdmi_74176 = { 1439 .clock = 74176, 1440 .tx = 0x10, 1441 .cmn = 0x1, 1442 .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00, 1443 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1444 .pll[10] = 0xFF, .pll[11] = 0x44, .pll[12] = 0x44, .pll[13] = 0x44, .pll[14] = 0x44, 1445 .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1446 }; 1447 1448 static const struct intel_c10pll_state mtl_c10_hdmi_75000 = { 1449 .clock = 75000, 1450 .tx = 0x10, 1451 .cmn = 0x1, 1452 .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7C, .pll[3] = 0x00, .pll[4] = 0x00, 1453 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1454 .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x20, .pll[13] = 0x00, .pll[14] = 0x00, 1455 .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, 1456 }; 1457 1458 static const struct intel_c10pll_state mtl_c10_hdmi_78750 = { 1459 .clock = 78750, 1460 .tx = 0x10, 1461 .cmn = 0x1, 1462 .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x84, .pll[3] = 0x00, .pll[4] = 0x00, 1463 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1464 .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x08, .pll[13] = 0x00, .pll[14] = 0x00, 1465 .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1466 }; 1467 1468 static const struct intel_c10pll_state mtl_c10_hdmi_85500 = { 1469 .clock = 85500, 1470 .tx = 0x10, 1471 .cmn = 0x1, 1472 .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x92, .pll[3] = 0x00, .pll[4] = 0x00, 1473 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1474 .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x10, .pll[13] = 0x00, .pll[14] = 0x00, 1475 .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, 1476 }; 1477 1478 static const struct intel_c10pll_state mtl_c10_hdmi_88750 = { 1479 .clock = 88750, 1480 .tx = 0x10, 1481 .cmn = 0x1, 1482 .pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0x98, .pll[3] = 0x00, .pll[4] = 0x00, 1483 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1484 .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x72, .pll[13] = 0xA9, .pll[14] = 0xAA, 1485 .pll[15] = 0x0B, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, 1486 }; 1487 1488 static const struct intel_c10pll_state mtl_c10_hdmi_106500 = { 1489 .clock = 106500, 1490 .tx = 0x10, 1491 .cmn = 0x1, 1492 .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xBC, .pll[3] = 0x00, .pll[4] = 0x00, 1493 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1494 .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0xF0, .pll[13] = 0x00, .pll[14] = 0x00, 1495 .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1496 }; 1497 1498 static const struct intel_c10pll_state mtl_c10_hdmi_108000 = { 1499 .clock = 108000, 1500 .tx = 0x10, 1501 .cmn = 0x1, 1502 .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xC0, .pll[3] = 0x00, .pll[4] = 0x00, 1503 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1504 .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x80, .pll[13] = 0x00, .pll[14] = 0x00, 1505 .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1506 }; 1507 1508 static const struct intel_c10pll_state mtl_c10_hdmi_115500 = { 1509 .clock = 115500, 1510 .tx = 0x10, 1511 .cmn = 0x1, 1512 .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xD0, .pll[3] = 0x00, .pll[4] = 0x00, 1513 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1514 .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x50, .pll[13] = 0x00, .pll[14] = 0x00, 1515 .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, 1516 }; 1517 1518 static const struct intel_c10pll_state mtl_c10_hdmi_119000 = { 1519 .clock = 119000, 1520 .tx = 0x10, 1521 .cmn = 0x1, 1522 .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xD6, .pll[3] = 0x00, .pll[4] = 0x00, 1523 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1524 .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xF5, .pll[13] = 0x55, .pll[14] = 0x55, 1525 .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, 1526 }; 1527 1528 static const struct intel_c10pll_state mtl_c10_hdmi_135000 = { 1529 .clock = 135000, 1530 .tx = 0x10, 1531 .cmn = 0x1, 1532 .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x6C, .pll[3] = 0x00, .pll[4] = 0x00, 1533 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1534 .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x50, .pll[13] = 0x00, .pll[14] = 0x00, 1535 .pll[15] = 0x0A, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, 1536 }; 1537 1538 static const struct intel_c10pll_state mtl_c10_hdmi_138500 = { 1539 .clock = 138500, 1540 .tx = 0x10, 1541 .cmn = 0x1, 1542 .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x70, .pll[3] = 0x00, .pll[4] = 0x00, 1543 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1544 .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x22, .pll[13] = 0xA9, .pll[14] = 0xAA, 1545 .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1546 }; 1547 1548 static const struct intel_c10pll_state mtl_c10_hdmi_147160 = { 1549 .clock = 147160, 1550 .tx = 0x10, 1551 .cmn = 0x1, 1552 .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x78, .pll[3] = 0x00, .pll[4] = 0x00, 1553 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1554 .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xA5, .pll[13] = 0x55, .pll[14] = 0x55, 1555 .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1556 }; 1557 1558 static const struct intel_c10pll_state mtl_c10_hdmi_148352 = { 1559 .clock = 148352, 1560 .tx = 0x10, 1561 .cmn = 0x1, 1562 .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00, 1563 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1564 .pll[10] = 0xFF, .pll[11] = 0x44, .pll[12] = 0x44, .pll[13] = 0x44, .pll[14] = 0x44, 1565 .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1566 }; 1567 1568 static const struct intel_c10pll_state mtl_c10_hdmi_154000 = { 1569 .clock = 154000, 1570 .tx = 0x10, 1571 .cmn = 0x1, 1572 .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x80, .pll[3] = 0x00, .pll[4] = 0x00, 1573 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1574 .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x35, .pll[13] = 0x55, .pll[14] = 0x55, 1575 .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1576 }; 1577 1578 static const struct intel_c10pll_state mtl_c10_hdmi_162000 = { 1579 .clock = 162000, 1580 .tx = 0x10, 1581 .cmn = 0x1, 1582 .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x88, .pll[3] = 0x00, .pll[4] = 0x00, 1583 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1584 .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x60, .pll[13] = 0x00, .pll[14] = 0x00, 1585 .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1586 }; 1587 1588 static const struct intel_c10pll_state mtl_c10_hdmi_167000 = { 1589 .clock = 167000, 1590 .tx = 0x10, 1591 .cmn = 0x1, 1592 .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x8C, .pll[3] = 0x00, .pll[4] = 0x00, 1593 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1594 .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0xFA, .pll[13] = 0xA9, .pll[14] = 0xAA, 1595 .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1596 }; 1597 1598 static const struct intel_c10pll_state mtl_c10_hdmi_197802 = { 1599 .clock = 197802, 1600 .tx = 0x10, 1601 .cmn = 0x1, 1602 .pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xAE, .pll[3] = 0x00, .pll[4] = 0x00, 1603 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1604 .pll[10] = 0xFF, .pll[11] = 0x99, .pll[12] = 0x05, .pll[13] = 0x98, .pll[14] = 0x99, 1605 .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, 1606 }; 1607 1608 static const struct intel_c10pll_state mtl_c10_hdmi_198000 = { 1609 .clock = 198000, 1610 .tx = 0x10, 1611 .cmn = 0x1, 1612 .pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xAE, .pll[3] = 0x00, .pll[4] = 0x00, 1613 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1614 .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x20, .pll[13] = 0x00, .pll[14] = 0x00, 1615 .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, 1616 }; 1617 1618 static const struct intel_c10pll_state mtl_c10_hdmi_209800 = { 1619 .clock = 209800, 1620 .tx = 0x10, 1621 .cmn = 0x1, 1622 .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xBA, .pll[3] = 0x00, .pll[4] = 0x00, 1623 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1624 .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x45, .pll[13] = 0x55, .pll[14] = 0x55, 1625 .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1626 }; 1627 1628 static const struct intel_c10pll_state mtl_c10_hdmi_241500 = { 1629 .clock = 241500, 1630 .tx = 0x10, 1631 .cmn = 0x1, 1632 .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xDA, .pll[3] = 0x00, .pll[4] = 0x00, 1633 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1634 .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0xC8, .pll[13] = 0x00, .pll[14] = 0x00, 1635 .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, 1636 }; 1637 1638 static const struct intel_c10pll_state mtl_c10_hdmi_262750 = { 1639 .clock = 262750, 1640 .tx = 0x10, 1641 .cmn = 0x1, 1642 .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x68, .pll[3] = 0x00, .pll[4] = 0x00, 1643 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1644 .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x6C, .pll[13] = 0xA9, .pll[14] = 0xAA, 1645 .pll[15] = 0x09, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, 1646 }; 1647 1648 static const struct intel_c10pll_state mtl_c10_hdmi_268500 = { 1649 .clock = 268500, 1650 .tx = 0x10, 1651 .cmn = 0x1, 1652 .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x6A, .pll[3] = 0x00, .pll[4] = 0x00, 1653 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1654 .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0xEC, .pll[13] = 0x00, .pll[14] = 0x00, 1655 .pll[15] = 0x09, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, 1656 }; 1657 1658 static const struct intel_c10pll_state mtl_c10_hdmi_296703 = { 1659 .clock = 296703, 1660 .tx = 0x10, 1661 .cmn = 0x1, 1662 .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00, 1663 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1664 .pll[10] = 0xFF, .pll[11] = 0x33, .pll[12] = 0x44, .pll[13] = 0x33, .pll[14] = 0x33, 1665 .pll[15] = 0x09, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1666 }; 1667 1668 static const struct intel_c10pll_state mtl_c10_hdmi_297000 = { 1669 .clock = 297000, 1670 .tx = 0x10, 1671 .cmn = 0x1, 1672 .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00, 1673 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1674 .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x58, .pll[13] = 0x00, .pll[14] = 0x00, 1675 .pll[15] = 0x09, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1676 }; 1677 1678 static const struct intel_c10pll_state mtl_c10_hdmi_319750 = { 1679 .clock = 319750, 1680 .tx = 0x10, 1681 .cmn = 0x1, 1682 .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x86, .pll[3] = 0x00, .pll[4] = 0x00, 1683 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1684 .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x44, .pll[13] = 0xA9, .pll[14] = 0xAA, 1685 .pll[15] = 0x09, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1686 }; 1687 1688 static const struct intel_c10pll_state mtl_c10_hdmi_497750 = { 1689 .clock = 497750, 1690 .tx = 0x10, 1691 .cmn = 0x1, 1692 .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xE2, .pll[3] = 0x00, .pll[4] = 0x00, 1693 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1694 .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x9F, .pll[13] = 0x55, .pll[14] = 0x55, 1695 .pll[15] = 0x09, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23, 1696 }; 1697 1698 static const struct intel_c10pll_state mtl_c10_hdmi_592000 = { 1699 .clock = 592000, 1700 .tx = 0x10, 1701 .cmn = 0x1, 1702 .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00, 1703 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1704 .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x15, .pll[13] = 0x55, .pll[14] = 0x55, 1705 .pll[15] = 0x08, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1706 }; 1707 1708 static const struct intel_c10pll_state mtl_c10_hdmi_593407 = { 1709 .clock = 593407, 1710 .tx = 0x10, 1711 .cmn = 0x1, 1712 .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00, 1713 .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF, 1714 .pll[10] = 0xFF, .pll[11] = 0x3B, .pll[12] = 0x44, .pll[13] = 0xBA, .pll[14] = 0xBB, 1715 .pll[15] = 0x08, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23, 1716 }; 1717 1718 static const struct intel_c10pll_state * const mtl_c10_hdmi_tables[] = { 1719 &mtl_c10_hdmi_25_2, /* Consolidated Table */ 1720 &mtl_c10_hdmi_27_0, /* Consolidated Table */ 1721 &mtl_c10_hdmi_27027, 1722 &mtl_c10_hdmi_28320, 1723 &mtl_c10_hdmi_30240, 1724 &mtl_c10_hdmi_31500, 1725 &mtl_c10_hdmi_36000, 1726 &mtl_c10_hdmi_40000, 1727 &mtl_c10_hdmi_49500, 1728 &mtl_c10_hdmi_50000, 1729 &mtl_c10_hdmi_57284, 1730 &mtl_c10_hdmi_58000, 1731 &mtl_c10_hdmi_65000, 1732 &mtl_c10_hdmi_71000, 1733 &mtl_c10_hdmi_74176, 1734 &mtl_c10_hdmi_74_25, /* Consolidated Table */ 1735 &mtl_c10_hdmi_75000, 1736 &mtl_c10_hdmi_78750, 1737 &mtl_c10_hdmi_85500, 1738 &mtl_c10_hdmi_88750, 1739 &mtl_c10_hdmi_106500, 1740 &mtl_c10_hdmi_108000, 1741 &mtl_c10_hdmi_115500, 1742 &mtl_c10_hdmi_119000, 1743 &mtl_c10_hdmi_135000, 1744 &mtl_c10_hdmi_138500, 1745 &mtl_c10_hdmi_147160, 1746 &mtl_c10_hdmi_148352, 1747 &mtl_c10_hdmi_148_5, /* Consolidated Table */ 1748 &mtl_c10_hdmi_154000, 1749 &mtl_c10_hdmi_162000, 1750 &mtl_c10_hdmi_167000, 1751 &mtl_c10_hdmi_197802, 1752 &mtl_c10_hdmi_198000, 1753 &mtl_c10_hdmi_209800, 1754 &mtl_c10_hdmi_241500, 1755 &mtl_c10_hdmi_262750, 1756 &mtl_c10_hdmi_268500, 1757 &mtl_c10_hdmi_296703, 1758 &mtl_c10_hdmi_297000, 1759 &mtl_c10_hdmi_319750, 1760 &mtl_c10_hdmi_497750, 1761 &mtl_c10_hdmi_592000, 1762 &mtl_c10_hdmi_593407, 1763 &mtl_c10_hdmi_594, /* Consolidated Table */ 1764 NULL, 1765 }; 1766 1767 static const struct intel_c20pll_state mtl_c20_hdmi_25_175 = { 1768 .clock = 25175, 1769 .tx = { 0xbe88, /* tx cfg0 */ 1770 0x9800, /* tx cfg1 */ 1771 0x0000, /* tx cfg2 */ 1772 }, 1773 .cmn = { 0x0500, /* cmn cfg0*/ 1774 0x0005, /* cmn cfg1 */ 1775 0x0000, /* cmn cfg2 */ 1776 0x0000, /* cmn cfg3 */ 1777 }, 1778 .mpllb = { 0xa0d2, /* mpllb cfg0 */ 1779 0x7d80, /* mpllb cfg1 */ 1780 0x0906, /* mpllb cfg2 */ 1781 0xbe40, /* mpllb cfg3 */ 1782 0x0000, /* mpllb cfg4 */ 1783 0x0000, /* mpllb cfg5 */ 1784 0x0200, /* mpllb cfg6 */ 1785 0x0001, /* mpllb cfg7 */ 1786 0x0000, /* mpllb cfg8 */ 1787 0x0000, /* mpllb cfg9 */ 1788 0x0001, /* mpllb cfg10 */ 1789 }, 1790 }; 1791 1792 static const struct intel_c20pll_state mtl_c20_hdmi_27_0 = { 1793 .clock = 27000, 1794 .tx = { 0xbe88, /* tx cfg0 */ 1795 0x9800, /* tx cfg1 */ 1796 0x0000, /* tx cfg2 */ 1797 }, 1798 .cmn = { 0x0500, /* cmn cfg0*/ 1799 0x0005, /* cmn cfg1 */ 1800 0x0000, /* cmn cfg2 */ 1801 0x0000, /* cmn cfg3 */ 1802 }, 1803 .mpllb = { 0xa0e0, /* mpllb cfg0 */ 1804 0x7d80, /* mpllb cfg1 */ 1805 0x0906, /* mpllb cfg2 */ 1806 0xbe40, /* mpllb cfg3 */ 1807 0x0000, /* mpllb cfg4 */ 1808 0x0000, /* mpllb cfg5 */ 1809 0x2200, /* mpllb cfg6 */ 1810 0x0001, /* mpllb cfg7 */ 1811 0x8000, /* mpllb cfg8 */ 1812 0x0000, /* mpllb cfg9 */ 1813 0x0001, /* mpllb cfg10 */ 1814 }, 1815 }; 1816 1817 static const struct intel_c20pll_state mtl_c20_hdmi_74_25 = { 1818 .clock = 74250, 1819 .tx = { 0xbe88, /* tx cfg0 */ 1820 0x9800, /* tx cfg1 */ 1821 0x0000, /* tx cfg2 */ 1822 }, 1823 .cmn = { 0x0500, /* cmn cfg0*/ 1824 0x0005, /* cmn cfg1 */ 1825 0x0000, /* cmn cfg2 */ 1826 0x0000, /* cmn cfg3 */ 1827 }, 1828 .mpllb = { 0x609a, /* mpllb cfg0 */ 1829 0x7d40, /* mpllb cfg1 */ 1830 0xca06, /* mpllb cfg2 */ 1831 0xbe40, /* mpllb cfg3 */ 1832 0x0000, /* mpllb cfg4 */ 1833 0x0000, /* mpllb cfg5 */ 1834 0x2200, /* mpllb cfg6 */ 1835 0x0001, /* mpllb cfg7 */ 1836 0x5800, /* mpllb cfg8 */ 1837 0x0000, /* mpllb cfg9 */ 1838 0x0001, /* mpllb cfg10 */ 1839 }, 1840 }; 1841 1842 static const struct intel_c20pll_state mtl_c20_hdmi_148_5 = { 1843 .clock = 148500, 1844 .tx = { 0xbe88, /* tx cfg0 */ 1845 0x9800, /* tx cfg1 */ 1846 0x0000, /* tx cfg2 */ 1847 }, 1848 .cmn = { 0x0500, /* cmn cfg0*/ 1849 0x0005, /* cmn cfg1 */ 1850 0x0000, /* cmn cfg2 */ 1851 0x0000, /* cmn cfg3 */ 1852 }, 1853 .mpllb = { 0x409a, /* mpllb cfg0 */ 1854 0x7d20, /* mpllb cfg1 */ 1855 0xca06, /* mpllb cfg2 */ 1856 0xbe40, /* mpllb cfg3 */ 1857 0x0000, /* mpllb cfg4 */ 1858 0x0000, /* mpllb cfg5 */ 1859 0x2200, /* mpllb cfg6 */ 1860 0x0001, /* mpllb cfg7 */ 1861 0x5800, /* mpllb cfg8 */ 1862 0x0000, /* mpllb cfg9 */ 1863 0x0001, /* mpllb cfg10 */ 1864 }, 1865 }; 1866 1867 static const struct intel_c20pll_state mtl_c20_hdmi_594 = { 1868 .clock = 594000, 1869 .tx = { 0xbe88, /* tx cfg0 */ 1870 0x9800, /* tx cfg1 */ 1871 0x0000, /* tx cfg2 */ 1872 }, 1873 .cmn = { 0x0500, /* cmn cfg0*/ 1874 0x0005, /* cmn cfg1 */ 1875 0x0000, /* cmn cfg2 */ 1876 0x0000, /* cmn cfg3 */ 1877 }, 1878 .mpllb = { 0x009a, /* mpllb cfg0 */ 1879 0x7d08, /* mpllb cfg1 */ 1880 0xca06, /* mpllb cfg2 */ 1881 0xbe40, /* mpllb cfg3 */ 1882 0x0000, /* mpllb cfg4 */ 1883 0x0000, /* mpllb cfg5 */ 1884 0x2200, /* mpllb cfg6 */ 1885 0x0001, /* mpllb cfg7 */ 1886 0x5800, /* mpllb cfg8 */ 1887 0x0000, /* mpllb cfg9 */ 1888 0x0001, /* mpllb cfg10 */ 1889 }, 1890 }; 1891 1892 static const struct intel_c20pll_state mtl_c20_hdmi_300 = { 1893 .clock = 3000000, 1894 .tx = { 0xbe98, /* tx cfg0 */ 1895 0x8800, /* tx cfg1 */ 1896 0x0000, /* tx cfg2 */ 1897 }, 1898 .cmn = { 0x0500, /* cmn cfg0*/ 1899 0x0005, /* cmn cfg1 */ 1900 0x0000, /* cmn cfg2 */ 1901 0x0000, /* cmn cfg3 */ 1902 }, 1903 .mpllb = { 0x309c, /* mpllb cfg0 */ 1904 0x2110, /* mpllb cfg1 */ 1905 0xca06, /* mpllb cfg2 */ 1906 0xbe40, /* mpllb cfg3 */ 1907 0x0000, /* mpllb cfg4 */ 1908 0x0000, /* mpllb cfg5 */ 1909 0x2200, /* mpllb cfg6 */ 1910 0x0001, /* mpllb cfg7 */ 1911 0x2000, /* mpllb cfg8 */ 1912 0x0000, /* mpllb cfg9 */ 1913 0x0004, /* mpllb cfg10 */ 1914 }, 1915 }; 1916 1917 static const struct intel_c20pll_state mtl_c20_hdmi_600 = { 1918 .clock = 6000000, 1919 .tx = { 0xbe98, /* tx cfg0 */ 1920 0x8800, /* tx cfg1 */ 1921 0x0000, /* tx cfg2 */ 1922 }, 1923 .cmn = { 0x0500, /* cmn cfg0*/ 1924 0x0005, /* cmn cfg1 */ 1925 0x0000, /* cmn cfg2 */ 1926 0x0000, /* cmn cfg3 */ 1927 }, 1928 .mpllb = { 0x109c, /* mpllb cfg0 */ 1929 0x2108, /* mpllb cfg1 */ 1930 0xca06, /* mpllb cfg2 */ 1931 0xbe40, /* mpllb cfg3 */ 1932 0x0000, /* mpllb cfg4 */ 1933 0x0000, /* mpllb cfg5 */ 1934 0x2200, /* mpllb cfg6 */ 1935 0x0001, /* mpllb cfg7 */ 1936 0x2000, /* mpllb cfg8 */ 1937 0x0000, /* mpllb cfg9 */ 1938 0x0004, /* mpllb cfg10 */ 1939 }, 1940 }; 1941 1942 static const struct intel_c20pll_state mtl_c20_hdmi_800 = { 1943 .clock = 8000000, 1944 .tx = { 0xbe98, /* tx cfg0 */ 1945 0x8800, /* tx cfg1 */ 1946 0x0000, /* tx cfg2 */ 1947 }, 1948 .cmn = { 0x0500, /* cmn cfg0*/ 1949 0x0005, /* cmn cfg1 */ 1950 0x0000, /* cmn cfg2 */ 1951 0x0000, /* cmn cfg3 */ 1952 }, 1953 .mpllb = { 0x10d0, /* mpllb cfg0 */ 1954 0x2108, /* mpllb cfg1 */ 1955 0x4a06, /* mpllb cfg2 */ 1956 0xbe40, /* mpllb cfg3 */ 1957 0x0000, /* mpllb cfg4 */ 1958 0x0000, /* mpllb cfg5 */ 1959 0x2200, /* mpllb cfg6 */ 1960 0x0003, /* mpllb cfg7 */ 1961 0x2aaa, /* mpllb cfg8 */ 1962 0x0002, /* mpllb cfg9 */ 1963 0x0004, /* mpllb cfg10 */ 1964 }, 1965 }; 1966 1967 static const struct intel_c20pll_state mtl_c20_hdmi_1000 = { 1968 .clock = 10000000, 1969 .tx = { 0xbe98, /* tx cfg0 */ 1970 0x8800, /* tx cfg1 */ 1971 0x0000, /* tx cfg2 */ 1972 }, 1973 .cmn = { 0x0500, /* cmn cfg0*/ 1974 0x0005, /* cmn cfg1 */ 1975 0x0000, /* cmn cfg2 */ 1976 0x0000, /* cmn cfg3 */ 1977 }, 1978 .mpllb = { 0x1104, /* mpllb cfg0 */ 1979 0x2108, /* mpllb cfg1 */ 1980 0x0a06, /* mpllb cfg2 */ 1981 0xbe40, /* mpllb cfg3 */ 1982 0x0000, /* mpllb cfg4 */ 1983 0x0000, /* mpllb cfg5 */ 1984 0x2200, /* mpllb cfg6 */ 1985 0x0003, /* mpllb cfg7 */ 1986 0x3555, /* mpllb cfg8 */ 1987 0x0001, /* mpllb cfg9 */ 1988 0x0004, /* mpllb cfg10 */ 1989 }, 1990 }; 1991 1992 static const struct intel_c20pll_state mtl_c20_hdmi_1200 = { 1993 .clock = 12000000, 1994 .tx = { 0xbe98, /* tx cfg0 */ 1995 0x8800, /* tx cfg1 */ 1996 0x0000, /* tx cfg2 */ 1997 }, 1998 .cmn = { 0x0500, /* cmn cfg0*/ 1999 0x0005, /* cmn cfg1 */ 2000 0x0000, /* cmn cfg2 */ 2001 0x0000, /* cmn cfg3 */ 2002 }, 2003 .mpllb = { 0x1138, /* mpllb cfg0 */ 2004 0x2108, /* mpllb cfg1 */ 2005 0x5486, /* mpllb cfg2 */ 2006 0xfe40, /* mpllb cfg3 */ 2007 0x0000, /* mpllb cfg4 */ 2008 0x0000, /* mpllb cfg5 */ 2009 0x2200, /* mpllb cfg6 */ 2010 0x0001, /* mpllb cfg7 */ 2011 0x4000, /* mpllb cfg8 */ 2012 0x0000, /* mpllb cfg9 */ 2013 0x0004, /* mpllb cfg10 */ 2014 }, 2015 }; 2016 2017 static const struct intel_c20pll_state * const mtl_c20_hdmi_tables[] = { 2018 &mtl_c20_hdmi_25_175, 2019 &mtl_c20_hdmi_27_0, 2020 &mtl_c20_hdmi_74_25, 2021 &mtl_c20_hdmi_148_5, 2022 &mtl_c20_hdmi_594, 2023 &mtl_c20_hdmi_300, 2024 &mtl_c20_hdmi_600, 2025 &mtl_c20_hdmi_800, 2026 &mtl_c20_hdmi_1000, 2027 &mtl_c20_hdmi_1200, 2028 NULL, 2029 }; 2030 2031 static const struct intel_c10pll_state * const * 2032 intel_c10pll_tables_get(const struct intel_crtc_state *crtc_state, 2033 struct intel_encoder *encoder) 2034 { 2035 if (intel_crtc_has_dp_encoder(crtc_state)) { 2036 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) 2037 return mtl_c10_edp_tables; 2038 else 2039 return mtl_c10_dp_tables; 2040 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { 2041 return mtl_c10_hdmi_tables; 2042 } 2043 2044 MISSING_CASE(encoder->type); 2045 return NULL; 2046 } 2047 2048 static void intel_cx0pll_update_ssc(struct intel_encoder *encoder, 2049 struct intel_cx0pll_state *pll_state, bool is_dp) 2050 { 2051 struct intel_display *display = to_intel_display(encoder); 2052 2053 if (is_dp) { 2054 if (intel_panel_use_ssc(display)) { 2055 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2056 pll_state->ssc_enabled = 2057 (intel_dp->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5); 2058 } 2059 } 2060 } 2061 2062 #define C10_PLL_SSC_REG_START_IDX 4 2063 #define C10_PLL_SSC_REG_COUNT 5 2064 2065 static bool intel_c10pll_ssc_enabled(const struct intel_c10pll_state *pll_state) 2066 { 2067 return memchr_inv(&pll_state->pll[C10_PLL_SSC_REG_START_IDX], 2068 0, sizeof(pll_state->pll[0]) * C10_PLL_SSC_REG_COUNT); 2069 } 2070 2071 static void intel_c10pll_update_pll(struct intel_encoder *encoder, 2072 struct intel_cx0pll_state *pll_state) 2073 { 2074 struct intel_display *display = to_intel_display(encoder); 2075 int i; 2076 2077 if (pll_state->ssc_enabled) 2078 return; 2079 2080 drm_WARN_ON(display->drm, ARRAY_SIZE(pll_state->c10.pll) < 2081 C10_PLL_SSC_REG_START_IDX + C10_PLL_SSC_REG_COUNT); 2082 for (i = C10_PLL_SSC_REG_START_IDX; 2083 i < C10_PLL_SSC_REG_START_IDX + C10_PLL_SSC_REG_COUNT; 2084 i++) 2085 pll_state->c10.pll[i] = 0; 2086 } 2087 2088 static bool c10pll_state_is_dp(const struct intel_c10pll_state *pll_state) 2089 { 2090 return !REG_FIELD_GET8(C10_PLL15_HDMIDIV_MASK, pll_state->pll[15]); 2091 } 2092 2093 static bool c20pll_state_is_dp(const struct intel_c20pll_state *pll_state) 2094 { 2095 return pll_state->vdr.serdes_rate & PHY_C20_IS_DP; 2096 } 2097 2098 static bool cx0pll_state_is_dp(const struct intel_cx0pll_state *pll_state) 2099 { 2100 if (pll_state->use_c10) 2101 return c10pll_state_is_dp(&pll_state->c10); 2102 2103 return c20pll_state_is_dp(&pll_state->c20); 2104 } 2105 2106 /* 2107 * TODO: Convert the following to align with intel_c20pll_find_table() and 2108 * intel_c20pll_calc_state_from_table(). 2109 */ 2110 static int intel_c10pll_calc_state_from_table(struct intel_encoder *encoder, 2111 const struct intel_c10pll_state * const *tables, 2112 bool is_dp, int port_clock, int lane_count, 2113 struct intel_cx0pll_state *pll_state) 2114 { 2115 struct intel_display *display = to_intel_display(encoder); 2116 int i; 2117 2118 for (i = 0; tables[i]; i++) { 2119 if (port_clock == tables[i]->clock) { 2120 pll_state->c10 = *tables[i]; 2121 intel_cx0pll_update_ssc(encoder, pll_state, is_dp); 2122 intel_c10pll_update_pll(encoder, pll_state); 2123 2124 pll_state->use_c10 = true; 2125 pll_state->lane_count = lane_count; 2126 2127 drm_WARN_ON(display->drm, is_dp != c10pll_state_is_dp(&pll_state->c10)); 2128 2129 return 0; 2130 } 2131 } 2132 2133 return -EINVAL; 2134 } 2135 2136 static int intel_c10pll_calc_state(const struct intel_crtc_state *crtc_state, 2137 struct intel_encoder *encoder, 2138 struct intel_dpll_hw_state *hw_state) 2139 { 2140 struct intel_display *display = to_intel_display(encoder); 2141 bool is_dp = intel_crtc_has_dp_encoder(crtc_state); 2142 const struct intel_c10pll_state * const *tables; 2143 int err; 2144 2145 tables = intel_c10pll_tables_get(crtc_state, encoder); 2146 if (!tables) 2147 return -EINVAL; 2148 2149 err = intel_c10pll_calc_state_from_table(encoder, tables, is_dp, 2150 crtc_state->port_clock, crtc_state->lane_count, 2151 &hw_state->cx0pll); 2152 2153 if (err == 0 || !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 2154 return err; 2155 2156 /* For HDMI PLLs try SNPS PHY algorithm, if there are no precomputed tables */ 2157 intel_snps_hdmi_pll_compute_c10pll(&hw_state->cx0pll.c10, 2158 crtc_state->port_clock); 2159 intel_c10pll_update_pll(encoder, &hw_state->cx0pll); 2160 2161 hw_state->cx0pll.use_c10 = true; 2162 hw_state->cx0pll.lane_count = crtc_state->lane_count; 2163 2164 drm_WARN_ON(display->drm, is_dp != c10pll_state_is_dp(&hw_state->cx0pll.c10)); 2165 2166 return 0; 2167 } 2168 2169 static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder, 2170 const struct intel_c10pll_state *pll_state) 2171 { 2172 unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1; 2173 unsigned int multiplier, tx_clk_div, hdmi_div, refclk = 38400; 2174 int tmpclk = 0; 2175 2176 if (pll_state->pll[0] & C10_PLL0_FRACEN) { 2177 frac_quot = pll_state->pll[12] << 8 | pll_state->pll[11]; 2178 frac_rem = pll_state->pll[14] << 8 | pll_state->pll[13]; 2179 frac_den = pll_state->pll[10] << 8 | pll_state->pll[9]; 2180 } 2181 2182 multiplier = (REG_FIELD_GET8(C10_PLL3_MULTIPLIERH_MASK, pll_state->pll[3]) << 8 | 2183 pll_state->pll[2]) / 2 + 16; 2184 2185 tx_clk_div = REG_FIELD_GET8(C10_PLL15_TXCLKDIV_MASK, pll_state->pll[15]); 2186 hdmi_div = REG_FIELD_GET8(C10_PLL15_HDMIDIV_MASK, pll_state->pll[15]); 2187 2188 tmpclk = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, (multiplier << 16) + frac_quot) + 2189 DIV_ROUND_CLOSEST(refclk * frac_rem, frac_den), 2190 10 << (tx_clk_div + 16)); 2191 tmpclk *= (hdmi_div ? 2 : 1); 2192 2193 return tmpclk; 2194 } 2195 2196 static int readout_enabled_lane_count(struct intel_encoder *encoder) 2197 { 2198 struct intel_display *display = to_intel_display(encoder); 2199 u8 enabled_tx_lane_count = 0; 2200 int max_tx_lane_count; 2201 int tx_lane; 2202 2203 /* 2204 * TODO: also check inactive TX lanes in all PHY lanes owned by the 2205 * display. For now checking only those PHY lane(s) which are owned 2206 * based on the active TX lane count (i.e. 2207 * 1,2 active TX lanes -> PHY lane#0 2208 * 3,4 active TX lanes -> PHY lane#0 and PHY lane#1). 2209 */ 2210 max_tx_lane_count = DDI_PORT_WIDTH_GET(intel_de_read(display, DDI_BUF_CTL(encoder->port))); 2211 if (!drm_WARN_ON(display->drm, max_tx_lane_count == 0)) 2212 max_tx_lane_count = roundup_pow_of_two(max_tx_lane_count); 2213 2214 for (tx_lane = 0; tx_lane < max_tx_lane_count; tx_lane++) { 2215 u8 phy_lane_mask = tx_lane < 2 ? INTEL_CX0_LANE0 : INTEL_CX0_LANE1; 2216 int tx = tx_lane % 2 + 1; 2217 u8 val; 2218 2219 val = intel_cx0_read(encoder, phy_lane_mask, PHY_CX0_TX_CONTROL(tx, 2)); 2220 if (!(val & CONTROL2_DISABLE_SINGLE_TX)) 2221 enabled_tx_lane_count++; 2222 } 2223 2224 return enabled_tx_lane_count; 2225 } 2226 2227 static bool readout_ssc_state(struct intel_encoder *encoder, bool is_mpll_b) 2228 { 2229 struct intel_display *display = to_intel_display(encoder); 2230 2231 return intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port)) & 2232 (is_mpll_b ? XELPDP_SSC_ENABLE_PLLB : XELPDP_SSC_ENABLE_PLLA); 2233 } 2234 2235 static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, 2236 struct intel_cx0pll_state *cx0pll_state) 2237 { 2238 struct intel_c10pll_state *pll_state = &cx0pll_state->c10; 2239 struct intel_display *display = to_intel_display(encoder); 2240 enum phy phy = intel_encoder_to_phy(encoder); 2241 u8 lane = INTEL_CX0_LANE0; 2242 intel_wakeref_t wakeref; 2243 int i; 2244 2245 cx0pll_state->use_c10 = true; 2246 2247 wakeref = intel_cx0_phy_transaction_begin(encoder); 2248 2249 /* 2250 * According to C10 VDR Register programming Sequence we need 2251 * to do this to read PHY internal registers from MsgBus. 2252 */ 2253 intel_c10_msgbus_access_begin(encoder, lane); 2254 2255 cx0pll_state->lane_count = readout_enabled_lane_count(encoder); 2256 2257 for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++) 2258 pll_state->pll[i] = intel_cx0_read(encoder, lane, PHY_C10_VDR_PLL(i)); 2259 2260 pll_state->cmn = intel_cx0_read(encoder, lane, PHY_C10_VDR_CMN(0)); 2261 pll_state->tx = intel_cx0_read(encoder, lane, PHY_C10_VDR_TX(0)); 2262 2263 intel_cx0_phy_transaction_end(encoder, wakeref); 2264 2265 pll_state->clock = intel_c10pll_calc_port_clock(encoder, pll_state); 2266 2267 cx0pll_state->ssc_enabled = readout_ssc_state(encoder, true); 2268 drm_WARN(display->drm, 2269 cx0pll_state->ssc_enabled != intel_c10pll_ssc_enabled(pll_state), 2270 "PHY %c: SSC enabled state (%s), doesn't match PLL configuration (%s)\n", 2271 phy_name(phy), str_yes_no(cx0pll_state->ssc_enabled), 2272 intel_c10pll_ssc_enabled(pll_state) ? "SSC-enabled" : "SSC-disabled"); 2273 } 2274 2275 static void intel_c10_pll_program(struct intel_display *display, 2276 struct intel_encoder *encoder, 2277 const struct intel_c10pll_state *pll_state) 2278 { 2279 int i; 2280 2281 intel_c10_msgbus_access_begin(encoder, INTEL_CX0_BOTH_LANES); 2282 2283 /* Program the pll values only for the master lane */ 2284 for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++) 2285 intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i), 2286 pll_state->pll[i], 2287 (i % 4) ? MB_WRITE_UNCOMMITTED : MB_WRITE_COMMITTED); 2288 2289 intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED); 2290 intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED); 2291 2292 /* Custom width needs to be programmed to 0 for both the phy lanes */ 2293 intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH, 2294 C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10, 2295 MB_WRITE_COMMITTED); 2296 2297 intel_c10_msgbus_access_commit(encoder, INTEL_CX0_LANE0, true); 2298 } 2299 2300 static void intel_c10pll_dump_hw_state(struct drm_printer *p, 2301 const struct intel_c10pll_state *hw_state) 2302 { 2303 bool fracen; 2304 int i; 2305 unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1; 2306 unsigned int multiplier, tx_clk_div; 2307 2308 fracen = hw_state->pll[0] & C10_PLL0_FRACEN; 2309 drm_printf(p, "c10pll_hw_state: clock: %d, fracen: %s, ", 2310 hw_state->clock, str_yes_no(fracen)); 2311 2312 if (fracen) { 2313 frac_quot = hw_state->pll[12] << 8 | hw_state->pll[11]; 2314 frac_rem = hw_state->pll[14] << 8 | hw_state->pll[13]; 2315 frac_den = hw_state->pll[10] << 8 | hw_state->pll[9]; 2316 drm_printf(p, "quot: %u, rem: %u, den: %u,\n", 2317 frac_quot, frac_rem, frac_den); 2318 } 2319 2320 multiplier = (REG_FIELD_GET8(C10_PLL3_MULTIPLIERH_MASK, hw_state->pll[3]) << 8 | 2321 hw_state->pll[2]) / 2 + 16; 2322 tx_clk_div = REG_FIELD_GET8(C10_PLL15_TXCLKDIV_MASK, hw_state->pll[15]); 2323 drm_printf(p, 2324 "multiplier: %u, tx_clk_div: %u.\n", multiplier, tx_clk_div); 2325 2326 drm_printf(p, "c10pll_rawhw_state:"); 2327 drm_printf(p, "tx: 0x%x, cmn: 0x%x\n", hw_state->tx, hw_state->cmn); 2328 2329 BUILD_BUG_ON(ARRAY_SIZE(hw_state->pll) % 4); 2330 for (i = 0; i < ARRAY_SIZE(hw_state->pll); i = i + 4) 2331 drm_printf(p, 2332 "pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x\n", 2333 i, hw_state->pll[i], i + 1, hw_state->pll[i + 1], 2334 i + 2, hw_state->pll[i + 2], i + 3, hw_state->pll[i + 3]); 2335 } 2336 2337 /* 2338 * Some ARLs SoCs have the same drm PCI IDs, so need a helper to differentiate based 2339 * on the host bridge device ID to get the correct txx_mics value. 2340 */ 2341 static bool is_arrowlake_s_by_host_bridge(void) 2342 { 2343 struct pci_dev *pdev = NULL; 2344 u16 host_bridge_pci_dev_id; 2345 2346 while ((pdev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, pdev))) 2347 host_bridge_pci_dev_id = pdev->device; 2348 2349 return pdev && IS_ARROWLAKE_S_BY_HOST_BRIDGE_ID(host_bridge_pci_dev_id); 2350 } 2351 2352 static u16 intel_c20_hdmi_tmds_tx_cgf_1(const struct intel_crtc_state *crtc_state) 2353 { 2354 struct intel_display *display = to_intel_display(crtc_state); 2355 u16 tx_misc; 2356 u16 tx_dcc_cal_dac_ctrl_range = 8; 2357 u16 tx_term_ctrl = 2; 2358 2359 if (DISPLAY_VER(display) >= 20) { 2360 tx_misc = 5; 2361 tx_term_ctrl = 4; 2362 } else if (display->platform.battlemage) { 2363 tx_misc = 0; 2364 } else if (display->platform.meteorlake_u || 2365 is_arrowlake_s_by_host_bridge()) { 2366 tx_misc = 3; 2367 } else { 2368 tx_misc = 7; 2369 } 2370 2371 return (C20_PHY_TX_MISC(tx_misc) | 2372 C20_PHY_TX_DCC_CAL_RANGE(tx_dcc_cal_dac_ctrl_range) | 2373 C20_PHY_TX_DCC_BYPASS | C20_PHY_TX_TERM_CTL(tx_term_ctrl)); 2374 } 2375 2376 static int intel_c20_compute_hdmi_tmds_pll(const struct intel_crtc_state *crtc_state, 2377 struct intel_c20pll_state *pll_state) 2378 { 2379 u64 datarate; 2380 u64 mpll_tx_clk_div; 2381 u64 vco_freq_shift; 2382 u64 vco_freq; 2383 u64 multiplier; 2384 u64 mpll_multiplier; 2385 u64 mpll_fracn_quot; 2386 u64 mpll_fracn_rem; 2387 u8 mpllb_ana_freq_vco; 2388 u8 mpll_div_multiplier; 2389 2390 if (crtc_state->port_clock < 25175 || crtc_state->port_clock > 600000) 2391 return -EINVAL; 2392 2393 datarate = ((u64)crtc_state->port_clock * 1000) * 10; 2394 mpll_tx_clk_div = ilog2(div64_u64((u64)CLOCK_9999MHZ, (u64)datarate)); 2395 vco_freq_shift = ilog2(div64_u64((u64)CLOCK_4999MHZ * (u64)256, (u64)datarate)); 2396 vco_freq = (datarate << vco_freq_shift) >> 8; 2397 multiplier = div64_u64((vco_freq << 28), (REFCLK_38_4_MHZ >> 4)); 2398 mpll_multiplier = 2 * (multiplier >> 32); 2399 2400 mpll_fracn_quot = (multiplier >> 16) & 0xFFFF; 2401 mpll_fracn_rem = multiplier & 0xFFFF; 2402 2403 mpll_div_multiplier = min_t(u8, div64_u64((vco_freq * 16 + (datarate >> 1)), 2404 datarate), 255); 2405 2406 if (vco_freq <= DATARATE_3000000000) 2407 mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_3; 2408 else if (vco_freq <= DATARATE_3500000000) 2409 mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_2; 2410 else if (vco_freq <= DATARATE_4000000000) 2411 mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_1; 2412 else 2413 mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_0; 2414 2415 pll_state->clock = crtc_state->port_clock; 2416 pll_state->tx[0] = 0xbe88; 2417 pll_state->tx[1] = intel_c20_hdmi_tmds_tx_cgf_1(crtc_state); 2418 pll_state->tx[2] = 0x0000; 2419 pll_state->cmn[0] = 0x0500; 2420 pll_state->cmn[1] = 0x0005; 2421 pll_state->cmn[2] = 0x0000; 2422 pll_state->cmn[3] = 0x0000; 2423 pll_state->mpllb[0] = (MPLL_TX_CLK_DIV(mpll_tx_clk_div) | 2424 MPLL_MULTIPLIER(mpll_multiplier)); 2425 pll_state->mpllb[1] = (CAL_DAC_CODE(CAL_DAC_CODE_31) | 2426 WORD_CLK_DIV | 2427 MPLL_DIV_MULTIPLIER(mpll_div_multiplier)); 2428 pll_state->mpllb[2] = (MPLLB_ANA_FREQ_VCO(mpllb_ana_freq_vco) | 2429 CP_PROP(CP_PROP_20) | 2430 CP_INT(CP_INT_6)); 2431 pll_state->mpllb[3] = (V2I(V2I_2) | 2432 CP_PROP_GS(CP_PROP_GS_30) | 2433 CP_INT_GS(CP_INT_GS_28)); 2434 pll_state->mpllb[4] = 0x0000; 2435 pll_state->mpllb[5] = 0x0000; 2436 pll_state->mpllb[6] = (C20_MPLLB_FRACEN | SSC_UP_SPREAD); 2437 pll_state->mpllb[7] = MPLL_FRACN_DEN; 2438 pll_state->mpllb[8] = mpll_fracn_quot; 2439 pll_state->mpllb[9] = mpll_fracn_rem; 2440 pll_state->mpllb[10] = HDMI_DIV(HDMI_DIV_1); 2441 2442 return 0; 2443 } 2444 2445 static const struct intel_c20pll_state * const * 2446 intel_c20_pll_tables_get(const struct intel_crtc_state *crtc_state, 2447 struct intel_encoder *encoder) 2448 { 2449 struct intel_display *display = to_intel_display(crtc_state); 2450 2451 if (intel_crtc_has_dp_encoder(crtc_state)) { 2452 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) { 2453 if (DISPLAY_RUNTIME_INFO(display)->edp_typec_support) 2454 return xe3lpd_c20_dp_edp_tables; 2455 if (DISPLAY_VERx100(display) == 1401) 2456 return xe2hpd_c20_edp_tables; 2457 } 2458 2459 if (DISPLAY_VER(display) >= 30) 2460 return xe3lpd_c20_dp_edp_tables; 2461 else if (DISPLAY_VERx100(display) == 1401) 2462 return xe2hpd_c20_dp_tables; 2463 else 2464 return mtl_c20_dp_tables; 2465 2466 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { 2467 return mtl_c20_hdmi_tables; 2468 } 2469 2470 MISSING_CASE(encoder->type); 2471 return NULL; 2472 } 2473 2474 static u8 intel_c20_get_dp_rate(u32 clock) 2475 { 2476 switch (clock) { 2477 case 162000: /* 1.62 Gbps DP1.4 */ 2478 return 0; 2479 case 270000: /* 2.7 Gbps DP1.4 */ 2480 return 1; 2481 case 540000: /* 5.4 Gbps DP 1.4 */ 2482 return 2; 2483 case 810000: /* 8.1 Gbps DP1.4 */ 2484 return 3; 2485 case 216000: /* 2.16 Gbps eDP */ 2486 return 4; 2487 case 243000: /* 2.43 Gbps eDP */ 2488 return 5; 2489 case 324000: /* 3.24 Gbps eDP */ 2490 return 6; 2491 case 432000: /* 4.32 Gbps eDP */ 2492 return 7; 2493 case 1000000: /* 10 Gbps DP2.0 */ 2494 return 8; 2495 case 1350000: /* 13.5 Gbps DP2.0 */ 2496 return 9; 2497 case 2000000: /* 20 Gbps DP2.0 */ 2498 return 10; 2499 case 648000: /* 6.48 Gbps eDP*/ 2500 return 11; 2501 case 675000: /* 6.75 Gbps eDP*/ 2502 return 12; 2503 default: 2504 MISSING_CASE(clock); 2505 return 0; 2506 } 2507 } 2508 2509 static u8 intel_c20_get_hdmi_rate(u32 clock) 2510 { 2511 if (clock >= 25175 && clock <= 600000) 2512 return 0; 2513 2514 switch (clock) { 2515 case 300000: /* 3 Gbps */ 2516 case 600000: /* 6 Gbps */ 2517 case 1200000: /* 12 Gbps */ 2518 return 1; 2519 case 800000: /* 8 Gbps */ 2520 return 2; 2521 case 1000000: /* 10 Gbps */ 2522 return 3; 2523 default: 2524 MISSING_CASE(clock); 2525 return 0; 2526 } 2527 } 2528 2529 static bool is_dp2(u32 clock) 2530 { 2531 /* DP2.0 clock rates */ 2532 if (clock == 1000000 || clock == 1350000 || clock == 2000000) 2533 return true; 2534 2535 return false; 2536 } 2537 2538 static int intel_get_c20_custom_width(u32 clock, bool dp) 2539 { 2540 if (dp && is_dp2(clock)) 2541 return 2; 2542 else if (intel_hdmi_is_frl(clock)) 2543 return 1; 2544 else 2545 return 0; 2546 } 2547 2548 static void intel_c20_calc_vdr_params(struct intel_c20pll_vdr_state *vdr, bool is_dp, 2549 int port_clock) 2550 { 2551 vdr->custom_width = intel_get_c20_custom_width(port_clock, is_dp); 2552 2553 vdr->serdes_rate = 0; 2554 vdr->hdmi_rate = 0; 2555 2556 if (is_dp) { 2557 vdr->serdes_rate = PHY_C20_IS_DP | 2558 PHY_C20_DP_RATE(intel_c20_get_dp_rate(port_clock)); 2559 } else { 2560 if (intel_hdmi_is_frl(port_clock)) 2561 vdr->serdes_rate = PHY_C20_IS_HDMI_FRL; 2562 2563 vdr->hdmi_rate = intel_c20_get_hdmi_rate(port_clock); 2564 } 2565 } 2566 2567 #define PHY_C20_SERDES_RATE_MASK (PHY_C20_IS_DP | PHY_C20_DP_RATE_MASK | PHY_C20_IS_HDMI_FRL) 2568 2569 static void intel_c20_readout_vdr_params(struct intel_encoder *encoder, 2570 struct intel_c20pll_vdr_state *vdr, bool *cntx) 2571 { 2572 u8 serdes; 2573 2574 serdes = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE); 2575 *cntx = serdes & PHY_C20_CONTEXT_TOGGLE; 2576 2577 vdr->custom_width = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_WIDTH) & 2578 PHY_C20_CUSTOM_WIDTH_MASK; 2579 2580 vdr->serdes_rate = serdes & PHY_C20_SERDES_RATE_MASK; 2581 if (!(vdr->serdes_rate & PHY_C20_IS_DP)) 2582 vdr->hdmi_rate = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_HDMI_RATE) & 2583 PHY_C20_HDMI_RATE_MASK; 2584 else 2585 vdr->hdmi_rate = 0; 2586 } 2587 2588 static void intel_c20_program_vdr_params(struct intel_encoder *encoder, 2589 const struct intel_c20pll_vdr_state *vdr, 2590 u8 owned_lane_mask) 2591 { 2592 struct intel_display *display = to_intel_display(encoder); 2593 2594 drm_WARN_ON(display->drm, vdr->custom_width & ~PHY_C20_CUSTOM_WIDTH_MASK); 2595 intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_WIDTH, 2596 PHY_C20_CUSTOM_WIDTH_MASK, vdr->custom_width, 2597 MB_WRITE_COMMITTED); 2598 2599 drm_WARN_ON(display->drm, vdr->serdes_rate & ~PHY_C20_SERDES_RATE_MASK); 2600 intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE, 2601 PHY_C20_SERDES_RATE_MASK, vdr->serdes_rate, 2602 MB_WRITE_COMMITTED); 2603 2604 if (vdr->serdes_rate & PHY_C20_IS_DP) 2605 return; 2606 2607 drm_WARN_ON(display->drm, vdr->hdmi_rate & ~PHY_C20_HDMI_RATE_MASK); 2608 intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE, 2609 PHY_C20_HDMI_RATE_MASK, vdr->hdmi_rate, 2610 MB_WRITE_COMMITTED); 2611 } 2612 2613 static const struct intel_c20pll_state * 2614 intel_c20_pll_find_table(const struct intel_crtc_state *crtc_state, 2615 struct intel_encoder *encoder) 2616 { 2617 const struct intel_c20pll_state * const *tables; 2618 int i; 2619 2620 tables = intel_c20_pll_tables_get(crtc_state, encoder); 2621 if (!tables) 2622 return NULL; 2623 2624 for (i = 0; tables[i]; i++) 2625 if (crtc_state->port_clock == tables[i]->clock) 2626 return tables[i]; 2627 2628 return NULL; 2629 } 2630 2631 static int intel_c20pll_calc_state_from_table(const struct intel_crtc_state *crtc_state, 2632 struct intel_encoder *encoder, 2633 struct intel_cx0pll_state *pll_state) 2634 { 2635 const struct intel_c20pll_state *table; 2636 2637 table = intel_c20_pll_find_table(crtc_state, encoder); 2638 if (!table) 2639 return -EINVAL; 2640 2641 pll_state->c20 = *table; 2642 2643 intel_cx0pll_update_ssc(encoder, pll_state, intel_crtc_has_dp_encoder(crtc_state)); 2644 2645 return 0; 2646 } 2647 2648 static int intel_c20pll_calc_state(const struct intel_crtc_state *crtc_state, 2649 struct intel_encoder *encoder, 2650 struct intel_dpll_hw_state *hw_state) 2651 { 2652 struct intel_display *display = to_intel_display(encoder); 2653 bool is_dp = intel_crtc_has_dp_encoder(crtc_state); 2654 int err = -ENOENT; 2655 2656 hw_state->cx0pll.use_c10 = false; 2657 hw_state->cx0pll.lane_count = crtc_state->lane_count; 2658 2659 /* try computed C20 HDMI tables before using consolidated tables */ 2660 if (!is_dp) 2661 /* TODO: Update SSC state for HDMI as well */ 2662 err = intel_c20_compute_hdmi_tmds_pll(crtc_state, &hw_state->cx0pll.c20); 2663 2664 if (err) 2665 err = intel_c20pll_calc_state_from_table(crtc_state, encoder, 2666 &hw_state->cx0pll); 2667 2668 if (err) 2669 return err; 2670 2671 intel_c20_calc_vdr_params(&hw_state->cx0pll.c20.vdr, 2672 is_dp, crtc_state->port_clock); 2673 2674 drm_WARN_ON(display->drm, is_dp != c20pll_state_is_dp(&hw_state->cx0pll.c20)); 2675 2676 return 0; 2677 } 2678 2679 int intel_cx0pll_calc_state(const struct intel_crtc_state *crtc_state, 2680 struct intel_encoder *encoder, 2681 struct intel_dpll_hw_state *hw_state) 2682 { 2683 memset(hw_state, 0, sizeof(*hw_state)); 2684 2685 if (intel_encoder_is_c10phy(encoder)) 2686 return intel_c10pll_calc_state(crtc_state, encoder, hw_state); 2687 return intel_c20pll_calc_state(crtc_state, encoder, hw_state); 2688 } 2689 2690 static bool intel_c20phy_use_mpllb(const struct intel_c20pll_state *state) 2691 { 2692 return state->tx[0] & C20_PHY_USE_MPLLB; 2693 } 2694 2695 static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder, 2696 const struct intel_c20pll_state *pll_state) 2697 { 2698 unsigned int frac, frac_en, frac_quot, frac_rem, frac_den; 2699 unsigned int multiplier, refclk = 38400; 2700 unsigned int tx_clk_div; 2701 unsigned int ref_clk_mpllb_div; 2702 unsigned int fb_clk_div4_en; 2703 unsigned int ref, vco; 2704 unsigned int tx_rate_mult; 2705 unsigned int tx_rate = REG_FIELD_GET(C20_PHY_TX_RATE, pll_state->tx[0]); 2706 2707 if (intel_c20phy_use_mpllb(pll_state)) { 2708 tx_rate_mult = 1; 2709 frac_en = REG_FIELD_GET(C20_MPLLB_FRACEN, pll_state->mpllb[6]); 2710 frac_quot = pll_state->mpllb[8]; 2711 frac_rem = pll_state->mpllb[9]; 2712 frac_den = pll_state->mpllb[7]; 2713 multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mpllb[0]); 2714 tx_clk_div = REG_FIELD_GET(C20_MPLLB_TX_CLK_DIV_MASK, pll_state->mpllb[0]); 2715 ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mpllb[6]); 2716 fb_clk_div4_en = 0; 2717 } else { 2718 tx_rate_mult = 2; 2719 frac_en = REG_FIELD_GET(C20_MPLLA_FRACEN, pll_state->mplla[6]); 2720 frac_quot = pll_state->mplla[8]; 2721 frac_rem = pll_state->mplla[9]; 2722 frac_den = pll_state->mplla[7]; 2723 multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mplla[0]); 2724 tx_clk_div = REG_FIELD_GET(C20_MPLLA_TX_CLK_DIV_MASK, pll_state->mplla[1]); 2725 ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mplla[6]); 2726 fb_clk_div4_en = REG_FIELD_GET(C20_FB_CLK_DIV4_EN, pll_state->mplla[0]); 2727 } 2728 2729 if (frac_en) 2730 frac = frac_quot + DIV_ROUND_CLOSEST(frac_rem, frac_den); 2731 else 2732 frac = 0; 2733 2734 ref = DIV_ROUND_CLOSEST(refclk * (1 << (1 + fb_clk_div4_en)), 1 << ref_clk_mpllb_div); 2735 vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(ref, (multiplier << (17 - 2)) + frac) >> 17, 10); 2736 2737 return vco << tx_rate_mult >> tx_clk_div >> tx_rate; 2738 } 2739 2740 static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, 2741 struct intel_cx0pll_state *cx0pll_state) 2742 { 2743 struct intel_c20pll_state *pll_state = &cx0pll_state->c20; 2744 struct intel_display *display = to_intel_display(encoder); 2745 bool cntx; 2746 intel_wakeref_t wakeref; 2747 int i; 2748 2749 cx0pll_state->use_c10 = false; 2750 2751 wakeref = intel_cx0_phy_transaction_begin(encoder); 2752 2753 cx0pll_state->lane_count = readout_enabled_lane_count(encoder); 2754 2755 /* 1. Read VDR params and current context selection */ 2756 intel_c20_readout_vdr_params(encoder, &pll_state->vdr, &cntx); 2757 2758 /* Read Tx configuration */ 2759 for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) { 2760 if (cntx) 2761 pll_state->tx[i] = intel_c20_sram_read(encoder, 2762 INTEL_CX0_LANE0, 2763 PHY_C20_B_TX_CNTX_CFG(display, i)); 2764 else 2765 pll_state->tx[i] = intel_c20_sram_read(encoder, 2766 INTEL_CX0_LANE0, 2767 PHY_C20_A_TX_CNTX_CFG(display, i)); 2768 } 2769 2770 /* Read common configuration */ 2771 for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) { 2772 if (cntx) 2773 pll_state->cmn[i] = intel_c20_sram_read(encoder, 2774 INTEL_CX0_LANE0, 2775 PHY_C20_B_CMN_CNTX_CFG(display, i)); 2776 else 2777 pll_state->cmn[i] = intel_c20_sram_read(encoder, 2778 INTEL_CX0_LANE0, 2779 PHY_C20_A_CMN_CNTX_CFG(display, i)); 2780 } 2781 2782 if (intel_c20phy_use_mpllb(pll_state)) { 2783 /* MPLLB configuration */ 2784 for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) { 2785 if (cntx) 2786 pll_state->mpllb[i] = intel_c20_sram_read(encoder, 2787 INTEL_CX0_LANE0, 2788 PHY_C20_B_MPLLB_CNTX_CFG(display, i)); 2789 else 2790 pll_state->mpllb[i] = intel_c20_sram_read(encoder, 2791 INTEL_CX0_LANE0, 2792 PHY_C20_A_MPLLB_CNTX_CFG(display, i)); 2793 } 2794 } else { 2795 /* MPLLA configuration */ 2796 for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) { 2797 if (cntx) 2798 pll_state->mplla[i] = intel_c20_sram_read(encoder, 2799 INTEL_CX0_LANE0, 2800 PHY_C20_B_MPLLA_CNTX_CFG(display, i)); 2801 else 2802 pll_state->mplla[i] = intel_c20_sram_read(encoder, 2803 INTEL_CX0_LANE0, 2804 PHY_C20_A_MPLLA_CNTX_CFG(display, i)); 2805 } 2806 } 2807 2808 pll_state->clock = intel_c20pll_calc_port_clock(encoder, pll_state); 2809 2810 intel_cx0_phy_transaction_end(encoder, wakeref); 2811 2812 cx0pll_state->ssc_enabled = readout_ssc_state(encoder, intel_c20phy_use_mpllb(pll_state)); 2813 } 2814 2815 static void intel_c20pll_dump_hw_state(struct drm_printer *p, 2816 const struct intel_c20pll_state *hw_state) 2817 { 2818 int i; 2819 2820 drm_printf(p, "c20pll_hw_state: clock: %d\n", hw_state->clock); 2821 drm_printf(p, 2822 "tx[0] = 0x%.4x, tx[1] = 0x%.4x, tx[2] = 0x%.4x\n", 2823 hw_state->tx[0], hw_state->tx[1], hw_state->tx[2]); 2824 drm_printf(p, 2825 "cmn[0] = 0x%.4x, cmn[1] = 0x%.4x, cmn[2] = 0x%.4x, cmn[3] = 0x%.4x\n", 2826 hw_state->cmn[0], hw_state->cmn[1], hw_state->cmn[2], hw_state->cmn[3]); 2827 2828 if (intel_c20phy_use_mpllb(hw_state)) { 2829 for (i = 0; i < ARRAY_SIZE(hw_state->mpllb); i++) 2830 drm_printf(p, "mpllb[%d] = 0x%.4x\n", i, hw_state->mpllb[i]); 2831 } else { 2832 for (i = 0; i < ARRAY_SIZE(hw_state->mplla); i++) 2833 drm_printf(p, "mplla[%d] = 0x%.4x\n", i, hw_state->mplla[i]); 2834 2835 /* For full coverage, also print the additional PLL B entry. */ 2836 BUILD_BUG_ON(ARRAY_SIZE(hw_state->mplla) + 1 != ARRAY_SIZE(hw_state->mpllb)); 2837 drm_printf(p, "mpllb[%d] = 0x%.4x\n", i, hw_state->mpllb[i]); 2838 } 2839 2840 drm_printf(p, 2841 "vdr: custom width: 0x%02x, serdes rate: 0x%02x, hdmi rate: 0x%02x\n", 2842 hw_state->vdr.custom_width, hw_state->vdr.serdes_rate, hw_state->vdr.hdmi_rate); 2843 } 2844 2845 void intel_cx0pll_dump_hw_state(struct drm_printer *p, 2846 const struct intel_cx0pll_state *hw_state) 2847 { 2848 drm_printf(p, 2849 "cx0pll_hw_state: lane_count: %d, ssc_enabled: %s, use_c10: %s, tbt_mode: %s\n", 2850 hw_state->lane_count, str_yes_no(hw_state->ssc_enabled), 2851 str_yes_no(hw_state->use_c10), str_yes_no(hw_state->tbt_mode)); 2852 2853 if (hw_state->use_c10) 2854 intel_c10pll_dump_hw_state(p, &hw_state->c10); 2855 else 2856 intel_c20pll_dump_hw_state(p, &hw_state->c20); 2857 } 2858 2859 static bool intel_c20_protocol_switch_valid(struct intel_encoder *encoder) 2860 { 2861 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 2862 2863 /* banks should not be cleared for DPALT/USB4/TBT modes */ 2864 /* TODO: optimize re-calibration in legacy mode */ 2865 return intel_tc_port_in_legacy_mode(intel_dig_port); 2866 } 2867 2868 static void intel_c20_pll_program(struct intel_display *display, 2869 struct intel_encoder *encoder, 2870 const struct intel_c20pll_state *pll_state) 2871 { 2872 u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder); 2873 bool cntx; 2874 int i; 2875 2876 /* 1. Read current context selection */ 2877 cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & 2878 PHY_C20_CONTEXT_TOGGLE; 2879 2880 /* 2881 * 2. If there is a protocol switch from HDMI to DP or vice versa, clear 2882 * the lane #0 MPLLB CAL_DONE_BANK DP2.0 10G and 20G rates enable MPLLA. 2883 * Protocol switch is only applicable for MPLLA 2884 */ 2885 if (intel_c20_protocol_switch_valid(encoder)) { 2886 for (i = 0; i < 4; i++) 2887 intel_c20_sram_write(encoder, INTEL_CX0_LANE0, RAWLANEAONX_DIG_TX_MPLLB_CAL_DONE_BANK(i), 0); 2888 usleep_range(4000, 4100); 2889 } 2890 2891 /* 3. Write SRAM configuration context. If A in use, write configuration to B context */ 2892 /* 3.1 Tx configuration */ 2893 for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) { 2894 if (cntx) 2895 intel_c20_sram_write(encoder, INTEL_CX0_LANE0, 2896 PHY_C20_A_TX_CNTX_CFG(display, i), 2897 pll_state->tx[i]); 2898 else 2899 intel_c20_sram_write(encoder, INTEL_CX0_LANE0, 2900 PHY_C20_B_TX_CNTX_CFG(display, i), 2901 pll_state->tx[i]); 2902 } 2903 2904 /* 3.2 common configuration */ 2905 for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) { 2906 if (cntx) 2907 intel_c20_sram_write(encoder, INTEL_CX0_LANE0, 2908 PHY_C20_A_CMN_CNTX_CFG(display, i), 2909 pll_state->cmn[i]); 2910 else 2911 intel_c20_sram_write(encoder, INTEL_CX0_LANE0, 2912 PHY_C20_B_CMN_CNTX_CFG(display, i), 2913 pll_state->cmn[i]); 2914 } 2915 2916 /* 3.3 mpllb or mplla configuration */ 2917 if (intel_c20phy_use_mpllb(pll_state)) { 2918 for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) { 2919 if (cntx) 2920 intel_c20_sram_write(encoder, INTEL_CX0_LANE0, 2921 PHY_C20_A_MPLLB_CNTX_CFG(display, i), 2922 pll_state->mpllb[i]); 2923 else 2924 intel_c20_sram_write(encoder, INTEL_CX0_LANE0, 2925 PHY_C20_B_MPLLB_CNTX_CFG(display, i), 2926 pll_state->mpllb[i]); 2927 } 2928 } else { 2929 for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) { 2930 if (cntx) 2931 intel_c20_sram_write(encoder, INTEL_CX0_LANE0, 2932 PHY_C20_A_MPLLA_CNTX_CFG(display, i), 2933 pll_state->mplla[i]); 2934 else 2935 intel_c20_sram_write(encoder, INTEL_CX0_LANE0, 2936 PHY_C20_B_MPLLA_CNTX_CFG(display, i), 2937 pll_state->mplla[i]); 2938 } 2939 } 2940 2941 /* 2942 * 4. Program custom width to match the link protocol. 2943 * 5. For DP or 6. For HDMI 2944 */ 2945 intel_c20_program_vdr_params(encoder, &pll_state->vdr, owned_lane_mask); 2946 2947 /* 2948 * 7. Write Vendor specific registers to toggle context setting to load 2949 * the updated programming toggle context bit 2950 */ 2951 intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE, 2952 PHY_C20_CONTEXT_TOGGLE, cntx ? 0 : PHY_C20_CONTEXT_TOGGLE, 2953 MB_WRITE_COMMITTED); 2954 } 2955 2956 static void intel_program_port_clock_ctl(struct intel_encoder *encoder, 2957 const struct intel_cx0pll_state *pll_state, 2958 int port_clock, 2959 bool lane_reversal) 2960 { 2961 struct intel_display *display = to_intel_display(encoder); 2962 bool is_dp = cx0pll_state_is_dp(pll_state); 2963 u32 val = 0; 2964 2965 intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port), 2966 XELPDP_PORT_REVERSAL, 2967 lane_reversal ? XELPDP_PORT_REVERSAL : 0); 2968 2969 if (lane_reversal) 2970 val |= XELPDP_LANE1_PHY_CLOCK_SELECT; 2971 2972 val |= XELPDP_FORWARD_CLOCK_UNGATE; 2973 2974 if (!is_dp && intel_hdmi_is_frl(port_clock)) 2975 val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_DIV18CLK); 2976 else 2977 val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_MAXPCLK); 2978 2979 /* TODO: HDMI FRL */ 2980 /* DP2.0 10G and 20G rates enable MPLLA*/ 2981 if (port_clock == 1000000 || port_clock == 2000000) 2982 val |= pll_state->ssc_enabled ? XELPDP_SSC_ENABLE_PLLA : 0; 2983 else 2984 val |= pll_state->ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0; 2985 2986 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), 2987 XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE | 2988 XELPDP_DDI_CLOCK_SELECT_MASK(display) | XELPDP_SSC_ENABLE_PLLA | 2989 XELPDP_SSC_ENABLE_PLLB, val); 2990 } 2991 2992 static u32 intel_cx0_get_powerdown_update(u8 lane_mask) 2993 { 2994 u32 val = 0; 2995 int lane = 0; 2996 2997 for_each_cx0_lane_in_mask(lane_mask, lane) 2998 val |= XELPDP_LANE_POWERDOWN_UPDATE(lane); 2999 3000 return val; 3001 } 3002 3003 static u32 intel_cx0_get_powerdown_state(u8 lane_mask, u8 state) 3004 { 3005 u32 val = 0; 3006 int lane = 0; 3007 3008 for_each_cx0_lane_in_mask(lane_mask, lane) 3009 val |= XELPDP_LANE_POWERDOWN_NEW_STATE(lane, state); 3010 3011 return val; 3012 } 3013 3014 void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder, 3015 u8 lane_mask, u8 state) 3016 { 3017 struct intel_display *display = to_intel_display(encoder); 3018 enum port port = encoder->port; 3019 enum phy phy = intel_encoder_to_phy(encoder); 3020 i915_reg_t buf_ctl2_reg = XELPDP_PORT_BUF_CTL2(display, port); 3021 int lane; 3022 3023 intel_de_rmw(display, buf_ctl2_reg, 3024 intel_cx0_get_powerdown_state(INTEL_CX0_BOTH_LANES, XELPDP_LANE_POWERDOWN_NEW_STATE_MASK), 3025 intel_cx0_get_powerdown_state(lane_mask, state)); 3026 3027 /* Wait for pending transactions.*/ 3028 for_each_cx0_lane_in_mask(lane_mask, lane) 3029 if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), 3030 XELPDP_PORT_M2P_TRANSACTION_PENDING, 3031 XELPDP_MSGBUS_TIMEOUT_MS)) { 3032 drm_dbg_kms(display->drm, 3033 "PHY %c Timeout waiting for previous transaction to complete. Reset the bus.\n", 3034 phy_name(phy)); 3035 intel_cx0_bus_reset(encoder, lane); 3036 } 3037 3038 intel_de_rmw(display, buf_ctl2_reg, 3039 intel_cx0_get_powerdown_update(INTEL_CX0_BOTH_LANES), 3040 intel_cx0_get_powerdown_update(lane_mask)); 3041 3042 /* Update Timeout Value */ 3043 if (intel_de_wait_for_clear_ms(display, buf_ctl2_reg, 3044 intel_cx0_get_powerdown_update(lane_mask), 3045 XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_MS)) 3046 drm_warn(display->drm, 3047 "PHY %c failed to bring out of lane reset\n", 3048 phy_name(phy)); 3049 } 3050 3051 void intel_cx0_setup_powerdown(struct intel_encoder *encoder) 3052 { 3053 struct intel_display *display = to_intel_display(encoder); 3054 enum port port = encoder->port; 3055 3056 intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), 3057 XELPDP_POWER_STATE_READY_MASK, 3058 XELPDP_POWER_STATE_READY(XELPDP_P2_STATE_READY)); 3059 intel_de_rmw(display, XELPDP_PORT_BUF_CTL3(display, port), 3060 XELPDP_POWER_STATE_ACTIVE_MASK | 3061 XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, 3062 XELPDP_POWER_STATE_ACTIVE(XELPDP_P0_STATE_ACTIVE) | 3063 XELPDP_PLL_LANE_STAGGERING_DELAY(0)); 3064 } 3065 3066 static u32 intel_cx0_get_pclk_refclk_request(u8 lane_mask) 3067 { 3068 u32 val = 0; 3069 int lane = 0; 3070 3071 for_each_cx0_lane_in_mask(lane_mask, lane) 3072 val |= XELPDP_LANE_PCLK_REFCLK_REQUEST(lane); 3073 3074 return val; 3075 } 3076 3077 static u32 intel_cx0_get_pclk_refclk_ack(u8 lane_mask) 3078 { 3079 u32 val = 0; 3080 int lane = 0; 3081 3082 for_each_cx0_lane_in_mask(lane_mask, lane) 3083 val |= XELPDP_LANE_PCLK_REFCLK_ACK(lane); 3084 3085 return val; 3086 } 3087 3088 static void intel_cx0_phy_lane_reset(struct intel_encoder *encoder, 3089 bool lane_reversal) 3090 { 3091 struct intel_display *display = to_intel_display(encoder); 3092 enum port port = encoder->port; 3093 enum phy phy = intel_encoder_to_phy(encoder); 3094 u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder); 3095 u8 lane_mask = lane_reversal ? INTEL_CX0_LANE1 : INTEL_CX0_LANE0; 3096 u32 lane_pipe_reset = owned_lane_mask == INTEL_CX0_BOTH_LANES 3097 ? XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1) 3098 : XELPDP_LANE_PIPE_RESET(0); 3099 u32 lane_phy_current_status = owned_lane_mask == INTEL_CX0_BOTH_LANES 3100 ? (XELPDP_LANE_PHY_CURRENT_STATUS(0) | 3101 XELPDP_LANE_PHY_CURRENT_STATUS(1)) 3102 : XELPDP_LANE_PHY_CURRENT_STATUS(0); 3103 3104 if (intel_de_wait_for_set_us(display, XELPDP_PORT_BUF_CTL1(display, port), 3105 XELPDP_PORT_BUF_SOC_PHY_READY, 3106 XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US)) 3107 drm_warn(display->drm, 3108 "PHY %c failed to bring out of SOC reset\n", 3109 phy_name(phy)); 3110 3111 intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset, 3112 lane_pipe_reset); 3113 3114 if (intel_de_wait_for_set_us(display, XELPDP_PORT_BUF_CTL2(display, port), 3115 lane_phy_current_status, 3116 XELPDP_PORT_RESET_START_TIMEOUT_US)) 3117 drm_warn(display->drm, 3118 "PHY %c failed to bring out of lane reset\n", 3119 phy_name(phy)); 3120 3121 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port), 3122 intel_cx0_get_pclk_refclk_request(owned_lane_mask), 3123 intel_cx0_get_pclk_refclk_request(lane_mask)); 3124 3125 if (intel_de_wait_us(display, XELPDP_PORT_CLOCK_CTL(display, port), 3126 intel_cx0_get_pclk_refclk_ack(owned_lane_mask), 3127 intel_cx0_get_pclk_refclk_ack(lane_mask), 3128 XELPDP_REFCLK_ENABLE_TIMEOUT_US, NULL)) 3129 drm_warn(display->drm, 3130 "PHY %c failed to request refclk\n", 3131 phy_name(phy)); 3132 3133 intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES, 3134 XELPDP_P2_STATE_RESET); 3135 intel_cx0_setup_powerdown(encoder); 3136 3137 intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset, 0); 3138 3139 if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_BUF_CTL2(display, port), 3140 lane_phy_current_status, 3141 XELPDP_PORT_RESET_END_TIMEOUT_MS)) 3142 drm_warn(display->drm, 3143 "PHY %c failed to bring out of lane reset\n", 3144 phy_name(phy)); 3145 } 3146 3147 static void intel_cx0_program_phy_lane(struct intel_encoder *encoder, int lane_count, 3148 bool lane_reversal) 3149 { 3150 int i; 3151 u8 disables; 3152 bool dp_alt_mode = intel_tc_port_in_dp_alt_mode(enc_to_dig_port(encoder)); 3153 u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder); 3154 3155 intel_c10_msgbus_access_begin(encoder, owned_lane_mask); 3156 3157 if (lane_reversal) 3158 disables = REG_GENMASK8(3, 0) >> lane_count; 3159 else 3160 disables = REG_GENMASK8(3, 0) << lane_count; 3161 3162 if (dp_alt_mode && lane_count == 1) { 3163 disables &= ~REG_GENMASK8(1, 0); 3164 disables |= REG_FIELD_PREP8(REG_GENMASK8(1, 0), 0x1); 3165 } 3166 3167 for (i = 0; i < 4; i++) { 3168 int tx = i % 2 + 1; 3169 u8 lane_mask = i < 2 ? INTEL_CX0_LANE0 : INTEL_CX0_LANE1; 3170 3171 if (!(owned_lane_mask & lane_mask)) 3172 continue; 3173 3174 intel_cx0_rmw(encoder, lane_mask, PHY_CX0_TX_CONTROL(tx, 2), 3175 CONTROL2_DISABLE_SINGLE_TX, 3176 disables & BIT(i) ? CONTROL2_DISABLE_SINGLE_TX : 0, 3177 MB_WRITE_COMMITTED); 3178 } 3179 3180 intel_c10_msgbus_access_commit(encoder, owned_lane_mask, false); 3181 } 3182 3183 static u32 intel_cx0_get_pclk_pll_request(u8 lane_mask) 3184 { 3185 u32 val = 0; 3186 int lane = 0; 3187 3188 for_each_cx0_lane_in_mask(lane_mask, lane) 3189 val |= XELPDP_LANE_PCLK_PLL_REQUEST(lane); 3190 3191 return val; 3192 } 3193 3194 static u32 intel_cx0_get_pclk_pll_ack(u8 lane_mask) 3195 { 3196 u32 val = 0; 3197 int lane = 0; 3198 3199 for_each_cx0_lane_in_mask(lane_mask, lane) 3200 val |= XELPDP_LANE_PCLK_PLL_ACK(lane); 3201 3202 return val; 3203 } 3204 3205 static void intel_cx0pll_enable(struct intel_encoder *encoder, 3206 const struct intel_cx0pll_state *pll_state) 3207 { 3208 int port_clock = pll_state->use_c10 ? pll_state->c10.clock : pll_state->c20.clock; 3209 struct intel_display *display = to_intel_display(encoder); 3210 enum phy phy = intel_encoder_to_phy(encoder); 3211 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 3212 bool lane_reversal = dig_port->lane_reversal; 3213 u8 maxpclk_lane = lane_reversal ? INTEL_CX0_LANE1 : 3214 INTEL_CX0_LANE0; 3215 intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder); 3216 3217 /* 3218 * 1. Program PORT_CLOCK_CTL REGISTER to configure 3219 * clock muxes, gating and SSC 3220 */ 3221 intel_program_port_clock_ctl(encoder, pll_state, port_clock, lane_reversal); 3222 3223 /* 2. Bring PHY out of reset. */ 3224 intel_cx0_phy_lane_reset(encoder, lane_reversal); 3225 3226 /* 3227 * 3. Change Phy power state to Ready. 3228 * TODO: For DP alt mode use only one lane. 3229 */ 3230 intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES, 3231 XELPDP_P2_STATE_READY); 3232 3233 /* 3234 * 4. Program PORT_MSGBUS_TIMER register's Message Bus Timer field to 0xA000. 3235 * (This is done inside intel_cx0_phy_transaction_begin(), since we would need 3236 * the right timer thresholds for readouts too.) 3237 */ 3238 3239 /* 5. Program PHY internal PLL internal registers. */ 3240 if (intel_encoder_is_c10phy(encoder)) 3241 intel_c10_pll_program(display, encoder, &pll_state->c10); 3242 else 3243 intel_c20_pll_program(display, encoder, &pll_state->c20); 3244 3245 /* 3246 * 6. Program the enabled and disabled owned PHY lane 3247 * transmitters over message bus 3248 */ 3249 intel_cx0_program_phy_lane(encoder, pll_state->lane_count, lane_reversal); 3250 3251 /* 3252 * 7. Follow the Display Voltage Frequency Switching - Sequence 3253 * Before Frequency Change. We handle this step in bxt_set_cdclk(). 3254 */ 3255 3256 /* 3257 * 8. Program DDI_CLK_VALFREQ to match intended DDI 3258 * clock frequency. 3259 */ 3260 intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), port_clock); 3261 3262 /* 3263 * 9. Set PORT_CLOCK_CTL register PCLK PLL Request 3264 * LN<Lane for maxPCLK> to "1" to enable PLL. 3265 */ 3266 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), 3267 intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES), 3268 intel_cx0_get_pclk_pll_request(maxpclk_lane)); 3269 3270 /* 10. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */ 3271 if (intel_de_wait_us(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), 3272 intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES), 3273 intel_cx0_get_pclk_pll_ack(maxpclk_lane), 3274 XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, NULL)) 3275 drm_warn(display->drm, "Port %c PLL not locked\n", 3276 phy_name(phy)); 3277 3278 /* 3279 * 11. Follow the Display Voltage Frequency Switching Sequence After 3280 * Frequency Change. We handle this step in bxt_set_cdclk(). 3281 */ 3282 3283 intel_cx0_phy_transaction_end(encoder, wakeref); 3284 } 3285 3286 void intel_mtl_tbt_pll_calc_state(struct intel_dpll_hw_state *hw_state) 3287 { 3288 memset(hw_state, 0, sizeof(*hw_state)); 3289 3290 hw_state->cx0pll.tbt_mode = true; 3291 } 3292 3293 bool intel_mtl_tbt_pll_readout_hw_state(struct intel_display *display, 3294 struct intel_dpll *pll, 3295 struct intel_dpll_hw_state *hw_state) 3296 { 3297 memset(hw_state, 0, sizeof(*hw_state)); 3298 3299 hw_state->cx0pll.tbt_mode = true; 3300 3301 return true; 3302 } 3303 3304 int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder) 3305 { 3306 struct intel_display *display = to_intel_display(encoder); 3307 u32 clock, val; 3308 3309 val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port)); 3310 3311 clock = XELPDP_DDI_CLOCK_SELECT_GET(display, val); 3312 3313 drm_WARN_ON(display->drm, !(val & XELPDP_FORWARD_CLOCK_UNGATE)); 3314 drm_WARN_ON(display->drm, !(val & XELPDP_TBT_CLOCK_REQUEST)); 3315 drm_WARN_ON(display->drm, !(val & XELPDP_TBT_CLOCK_ACK)); 3316 3317 switch (clock) { 3318 case XELPDP_DDI_CLOCK_SELECT_TBT_162: 3319 return 162000; 3320 case XELPDP_DDI_CLOCK_SELECT_TBT_270: 3321 return 270000; 3322 case XELPDP_DDI_CLOCK_SELECT_TBT_540: 3323 return 540000; 3324 case XELPDP_DDI_CLOCK_SELECT_TBT_810: 3325 return 810000; 3326 case XELPDP_DDI_CLOCK_SELECT_TBT_312_5: 3327 return 1000000; 3328 case XELPDP_DDI_CLOCK_SELECT_TBT_625: 3329 return 2000000; 3330 default: 3331 MISSING_CASE(clock); 3332 return 162000; 3333 } 3334 } 3335 3336 static int intel_mtl_tbt_clock_select(struct intel_display *display, 3337 int clock) 3338 { 3339 switch (clock) { 3340 case 162000: 3341 return XELPDP_DDI_CLOCK_SELECT_TBT_162; 3342 case 270000: 3343 return XELPDP_DDI_CLOCK_SELECT_TBT_270; 3344 case 540000: 3345 return XELPDP_DDI_CLOCK_SELECT_TBT_540; 3346 case 810000: 3347 return XELPDP_DDI_CLOCK_SELECT_TBT_810; 3348 case 1000000: 3349 if (DISPLAY_VER(display) < 30) { 3350 drm_WARN_ON(display->drm, "UHBR10 not supported for the platform\n"); 3351 return XELPDP_DDI_CLOCK_SELECT_TBT_162; 3352 } 3353 return XELPDP_DDI_CLOCK_SELECT_TBT_312_5; 3354 case 2000000: 3355 if (DISPLAY_VER(display) < 30) { 3356 drm_WARN_ON(display->drm, "UHBR20 not supported for the platform\n"); 3357 return XELPDP_DDI_CLOCK_SELECT_TBT_162; 3358 } 3359 return XELPDP_DDI_CLOCK_SELECT_TBT_625; 3360 default: 3361 MISSING_CASE(clock); 3362 return XELPDP_DDI_CLOCK_SELECT_TBT_162; 3363 } 3364 } 3365 3366 void intel_mtl_tbt_pll_enable_clock(struct intel_encoder *encoder, int port_clock) 3367 { 3368 struct intel_display *display = to_intel_display(encoder); 3369 enum phy phy = intel_encoder_to_phy(encoder); 3370 u32 val = 0; 3371 u32 mask; 3372 3373 /* 3374 * 1. Program PORT_CLOCK_CTL REGISTER to configure 3375 * clock muxes, gating and SSC 3376 */ 3377 3378 mask = XELPDP_DDI_CLOCK_SELECT_MASK(display); 3379 val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, 3380 intel_mtl_tbt_clock_select(display, port_clock)); 3381 3382 mask |= XELPDP_FORWARD_CLOCK_UNGATE; 3383 val |= XELPDP_FORWARD_CLOCK_UNGATE; 3384 3385 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), 3386 mask, val); 3387 3388 /* 2. Read back PORT_CLOCK_CTL REGISTER */ 3389 val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port)); 3390 3391 /* 3392 * 3. Follow the Display Voltage Frequency Switching - Sequence 3393 * Before Frequency Change. We handle this step in bxt_set_cdclk(). 3394 */ 3395 3396 /* 3397 * 4. Set PORT_CLOCK_CTL register TBT CLOCK Request to "1" to enable PLL. 3398 */ 3399 val |= XELPDP_TBT_CLOCK_REQUEST; 3400 intel_de_write(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), val); 3401 3402 /* 5. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "1". */ 3403 if (intel_de_wait_for_set_us(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), 3404 XELPDP_TBT_CLOCK_ACK, 100)) 3405 drm_warn(display->drm, "[ENCODER:%d:%s][%c] PHY PLL not locked\n", 3406 encoder->base.base.id, encoder->base.name, phy_name(phy)); 3407 3408 /* 3409 * 6. Follow the Display Voltage Frequency Switching Sequence After 3410 * Frequency Change. We handle this step in bxt_set_cdclk(). 3411 */ 3412 3413 /* 3414 * 7. Program DDI_CLK_VALFREQ to match intended DDI 3415 * clock frequency. 3416 */ 3417 intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), 3418 port_clock); 3419 } 3420 3421 void intel_mtl_pll_enable(struct intel_encoder *encoder, 3422 struct intel_dpll *pll, 3423 const struct intel_dpll_hw_state *dpll_hw_state) 3424 { 3425 intel_cx0pll_enable(encoder, &dpll_hw_state->cx0pll); 3426 } 3427 3428 void intel_mtl_pll_enable_clock(struct intel_encoder *encoder, 3429 const struct intel_crtc_state *crtc_state) 3430 { 3431 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 3432 3433 if (intel_tc_port_in_tbt_alt_mode(dig_port)) 3434 intel_mtl_tbt_pll_enable_clock(encoder, crtc_state->port_clock); 3435 } 3436 3437 /* 3438 * According to HAS we need to enable MAC Transmitting LFPS in the "PHY Common 3439 * Control 0" PIPE register in case of AUX Less ALPM is going to be used. This 3440 * function is doing that and is called by link retrain sequence. 3441 */ 3442 void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder, 3443 const struct intel_crtc_state *crtc_state) 3444 { 3445 struct intel_display *display = to_intel_display(encoder); 3446 intel_wakeref_t wakeref; 3447 int i; 3448 u8 owned_lane_mask; 3449 3450 if (DISPLAY_VER(display) < 20 || 3451 !intel_alpm_is_alpm_aux_less(enc_to_intel_dp(encoder), crtc_state)) 3452 return; 3453 3454 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder); 3455 3456 wakeref = intel_cx0_phy_transaction_begin(encoder); 3457 3458 intel_c10_msgbus_access_begin(encoder, owned_lane_mask); 3459 3460 for (i = 0; i < 4; i++) { 3461 int tx = i % 2 + 1; 3462 u8 lane_mask = i < 2 ? INTEL_CX0_LANE0 : INTEL_CX0_LANE1; 3463 3464 if (!(owned_lane_mask & lane_mask)) 3465 continue; 3466 3467 intel_cx0_rmw(encoder, lane_mask, PHY_CMN1_CONTROL(tx, 0), 3468 CONTROL0_MAC_TRANSMIT_LFPS, 3469 CONTROL0_MAC_TRANSMIT_LFPS, MB_WRITE_COMMITTED); 3470 } 3471 3472 intel_cx0_phy_transaction_end(encoder, wakeref); 3473 } 3474 3475 static u8 cx0_power_control_disable_val(struct intel_encoder *encoder) 3476 { 3477 struct intel_display *display = to_intel_display(encoder); 3478 3479 if (intel_encoder_is_c10phy(encoder)) 3480 return XELPDP_P2PG_STATE_DISABLE; 3481 3482 if ((display->platform.battlemage && encoder->port == PORT_A) || 3483 (DISPLAY_VER(display) >= 30 && encoder->type == INTEL_OUTPUT_EDP)) 3484 return XELPDP_P2PG_STATE_DISABLE; 3485 3486 return XELPDP_P4PG_STATE_DISABLE; 3487 } 3488 3489 static void intel_cx0pll_disable(struct intel_encoder *encoder) 3490 { 3491 struct intel_display *display = to_intel_display(encoder); 3492 enum phy phy = intel_encoder_to_phy(encoder); 3493 intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder); 3494 3495 /* 1. Change owned PHY lane power to Disable state. */ 3496 intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES, 3497 cx0_power_control_disable_val(encoder)); 3498 3499 /* 3500 * 2. Follow the Display Voltage Frequency Switching Sequence Before 3501 * Frequency Change. We handle this step in bxt_set_cdclk(). 3502 */ 3503 3504 /* 3505 * 3. Set PORT_CLOCK_CTL register PCLK PLL Request LN<Lane for maxPCLK> 3506 * to "0" to disable PLL. 3507 */ 3508 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), 3509 intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES) | 3510 intel_cx0_get_pclk_refclk_request(INTEL_CX0_BOTH_LANES), 0); 3511 3512 /* 4. Program DDI_CLK_VALFREQ to 0. */ 3513 intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), 0); 3514 3515 /* 3516 * 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0". 3517 */ 3518 if (intel_de_wait_for_clear_us(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), 3519 intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) | 3520 intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 3521 XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US)) 3522 drm_warn(display->drm, "Port %c PLL not unlocked\n", 3523 phy_name(phy)); 3524 3525 /* 3526 * 6. Follow the Display Voltage Frequency Switching Sequence After 3527 * Frequency Change. We handle this step in bxt_set_cdclk(). 3528 */ 3529 3530 /* 7. Program PORT_CLOCK_CTL register to disable and gate clocks. */ 3531 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), 3532 XELPDP_DDI_CLOCK_SELECT_MASK(display), 0); 3533 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), 3534 XELPDP_FORWARD_CLOCK_UNGATE, 0); 3535 3536 intel_cx0_phy_transaction_end(encoder, wakeref); 3537 } 3538 3539 static bool intel_cx0_pll_is_enabled(struct intel_encoder *encoder) 3540 { 3541 struct intel_display *display = to_intel_display(encoder); 3542 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 3543 u8 lane = dig_port->lane_reversal ? INTEL_CX0_LANE1 : INTEL_CX0_LANE0; 3544 3545 return intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port)) & 3546 intel_cx0_get_pclk_pll_request(lane); 3547 } 3548 3549 void intel_mtl_tbt_pll_disable_clock(struct intel_encoder *encoder) 3550 { 3551 struct intel_display *display = to_intel_display(encoder); 3552 enum phy phy = intel_encoder_to_phy(encoder); 3553 3554 /* 3555 * 1. Follow the Display Voltage Frequency Switching Sequence Before 3556 * Frequency Change. We handle this step in bxt_set_cdclk(). 3557 */ 3558 3559 /* 3560 * 2. Set PORT_CLOCK_CTL register TBT CLOCK Request to "0" to disable PLL. 3561 */ 3562 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), 3563 XELPDP_TBT_CLOCK_REQUEST, 0); 3564 3565 /* 3. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "0". */ 3566 if (intel_de_wait_for_clear_us(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), 3567 XELPDP_TBT_CLOCK_ACK, 10)) 3568 drm_warn(display->drm, "[ENCODER:%d:%s][%c] PHY PLL not unlocked\n", 3569 encoder->base.base.id, encoder->base.name, phy_name(phy)); 3570 3571 /* 3572 * 4. Follow the Display Voltage Frequency Switching Sequence After 3573 * Frequency Change. We handle this step in bxt_set_cdclk(). 3574 */ 3575 3576 /* 3577 * 5. Program PORT CLOCK CTRL register to disable and gate clocks 3578 */ 3579 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), 3580 XELPDP_DDI_CLOCK_SELECT_MASK(display) | 3581 XELPDP_FORWARD_CLOCK_UNGATE, 0); 3582 3583 /* 6. Program DDI_CLK_VALFREQ to 0. */ 3584 intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), 0); 3585 } 3586 3587 void intel_mtl_pll_disable(struct intel_encoder *encoder) 3588 { 3589 intel_cx0pll_disable(encoder); 3590 } 3591 3592 void intel_mtl_pll_disable_clock(struct intel_encoder *encoder) 3593 { 3594 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 3595 3596 if (intel_tc_port_in_tbt_alt_mode(dig_port)) 3597 intel_mtl_tbt_pll_disable_clock(encoder); 3598 } 3599 3600 enum icl_port_dpll_id 3601 intel_mtl_port_pll_type(struct intel_encoder *encoder, 3602 const struct intel_crtc_state *crtc_state) 3603 { 3604 struct intel_display *display = to_intel_display(encoder); 3605 u32 val, clock; 3606 3607 /* 3608 * TODO: Determine the PLL type from the SW state, once MTL PLL 3609 * handling is done via the standard shared DPLL framework. 3610 */ 3611 val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port)); 3612 clock = XELPDP_DDI_CLOCK_SELECT_GET(display, val); 3613 3614 if (clock == XELPDP_DDI_CLOCK_SELECT_MAXPCLK || 3615 clock == XELPDP_DDI_CLOCK_SELECT_DIV18CLK) 3616 return ICL_PORT_DPLL_MG_PHY; 3617 else 3618 return ICL_PORT_DPLL_DEFAULT; 3619 } 3620 3621 bool intel_cx0pll_readout_hw_state(struct intel_encoder *encoder, 3622 struct intel_cx0pll_state *pll_state) 3623 { 3624 memset(pll_state, 0, sizeof(*pll_state)); 3625 3626 if (!intel_cx0_pll_is_enabled(encoder)) 3627 return false; 3628 3629 if (intel_encoder_is_c10phy(encoder)) 3630 intel_c10pll_readout_hw_state(encoder, pll_state); 3631 else 3632 intel_c20pll_readout_hw_state(encoder, pll_state); 3633 3634 return true; 3635 } 3636 3637 static bool mtl_compare_hw_state_c10(const struct intel_c10pll_state *a, 3638 const struct intel_c10pll_state *b) 3639 { 3640 if (a->tx != b->tx) 3641 return false; 3642 3643 if (a->cmn != b->cmn) 3644 return false; 3645 3646 if (memcmp(&a->pll, &b->pll, sizeof(a->pll)) != 0) 3647 return false; 3648 3649 return true; 3650 } 3651 3652 static bool mtl_compare_hw_state_c20(const struct intel_c20pll_state *a, 3653 const struct intel_c20pll_state *b) 3654 { 3655 if (memcmp(&a->tx, &b->tx, sizeof(a->tx)) != 0) 3656 return false; 3657 3658 if (memcmp(&a->cmn, &b->cmn, sizeof(a->cmn)) != 0) 3659 return false; 3660 3661 if (a->tx[0] & C20_PHY_USE_MPLLB) { 3662 if (memcmp(&a->mpllb, &b->mpllb, sizeof(a->mpllb)) != 0) 3663 return false; 3664 } else { 3665 if (memcmp(&a->mplla, &b->mplla, sizeof(a->mplla)) != 0) 3666 return false; 3667 } 3668 3669 return true; 3670 } 3671 3672 bool intel_cx0pll_compare_hw_state(const struct intel_cx0pll_state *a, 3673 const struct intel_cx0pll_state *b) 3674 { 3675 if (a->tbt_mode || b->tbt_mode) 3676 return true; 3677 3678 if (a->use_c10 != b->use_c10) 3679 return false; 3680 3681 if (a->use_c10) 3682 return mtl_compare_hw_state_c10(&a->c10, 3683 &b->c10); 3684 else 3685 return mtl_compare_hw_state_c20(&a->c20, 3686 &b->c20); 3687 } 3688 3689 int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder, 3690 const struct intel_cx0pll_state *pll_state) 3691 { 3692 if (intel_encoder_is_c10phy(encoder)) 3693 return intel_c10pll_calc_port_clock(encoder, &pll_state->c10); 3694 3695 return intel_c20pll_calc_port_clock(encoder, &pll_state->c20); 3696 } 3697 3698 /* 3699 * WA 14022081154 3700 * The dedicated display PHYs reset to a power state that blocks S0ix, increasing idle 3701 * system power. After a system reset (cold boot, S3/4/5, warm reset) if a dedicated 3702 * PHY is not being brought up shortly, use these steps to move the PHY to the lowest 3703 * power state to save power. For PTL the workaround is needed only for port A. Port B 3704 * is not connected. 3705 * 3706 * 1. Follow the PLL Enable Sequence, using any valid frequency such as DP 1.62 GHz. 3707 * This brings lanes out of reset and enables the PLL to allow powerdown to be moved 3708 * to the Disable state. 3709 * 2. Follow PLL Disable Sequence. This moves powerdown to the Disable state and disables the PLL. 3710 */ 3711 void intel_cx0_pll_power_save_wa(struct intel_display *display) 3712 { 3713 struct intel_encoder *encoder; 3714 3715 if (DISPLAY_VER(display) != 30) 3716 return; 3717 3718 for_each_intel_encoder(display->drm, encoder) { 3719 struct intel_cx0pll_state pll_state = {}; 3720 int port_clock = 162000; 3721 int lane_count = 4; 3722 3723 if (!intel_encoder_is_dig_port(encoder)) 3724 continue; 3725 3726 if (!intel_encoder_is_c10phy(encoder)) 3727 continue; 3728 3729 if (intel_cx0_pll_is_enabled(encoder)) 3730 continue; 3731 3732 if (intel_c10pll_calc_state_from_table(encoder, 3733 mtl_c10_edp_tables, 3734 true, port_clock, lane_count, 3735 &pll_state) < 0) { 3736 drm_WARN_ON(display->drm, 3737 "Unable to calc C10 state from the tables\n"); 3738 continue; 3739 } 3740 3741 drm_dbg_kms(display->drm, 3742 "[ENCODER:%d:%s] Applying power saving workaround on disabled PLL\n", 3743 encoder->base.base.id, encoder->base.name); 3744 3745 intel_cx0pll_enable(encoder, &pll_state); 3746 intel_cx0pll_disable(encoder); 3747 } 3748 } 3749