1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2025 Intel Corporation 4 */ 5 6 #include <drm/drm_print.h> 7 8 #include "i915_reg.h" 9 #include "intel_cx0_phy.h" 10 #include "intel_cx0_phy_regs.h" 11 #include "intel_ddi.h" 12 #include "intel_ddi_buf_trans.h" 13 #include "intel_de.h" 14 #include "intel_display.h" 15 #include "intel_display_types.h" 16 #include "intel_display_utils.h" 17 #include "intel_dpll_mgr.h" 18 #include "intel_hdmi.h" 19 #include "intel_lt_phy.h" 20 #include "intel_lt_phy_regs.h" 21 #include "intel_panel.h" 22 #include "intel_psr.h" 23 #include "intel_tc.h" 24 25 #define for_each_lt_phy_lane_in_mask(__lane_mask, __lane) \ 26 for ((__lane) = 0; (__lane) < 2; (__lane)++) \ 27 for_each_if((__lane_mask) & BIT(__lane)) 28 29 #define INTEL_LT_PHY_LANE0 BIT(0) 30 #define INTEL_LT_PHY_LANE1 BIT(1) 31 #define INTEL_LT_PHY_BOTH_LANES (INTEL_LT_PHY_LANE1 |\ 32 INTEL_LT_PHY_LANE0) 33 #define MODE_DP 3 34 #define MODE_HDMI_20 4 35 #define Q32_TO_INT(x) ((x) >> 32) 36 #define Q32_TO_FRAC(x) ((x) & 0xFFFFFFFF) 37 #define DCO_MIN_FREQ_MHZ 11850 38 #define REF_CLK_KHZ 38400 39 #define TDC_RES_MULTIPLIER 10000000ULL 40 41 struct phy_param_t { 42 u32 val; 43 u32 addr; 44 }; 45 46 struct lt_phy_params { 47 struct phy_param_t pll_reg4; 48 struct phy_param_t pll_reg3; 49 struct phy_param_t pll_reg5; 50 struct phy_param_t pll_reg57; 51 struct phy_param_t lf; 52 struct phy_param_t tdc; 53 struct phy_param_t ssc; 54 struct phy_param_t bias2; 55 struct phy_param_t bias_trim; 56 struct phy_param_t dco_med; 57 struct phy_param_t dco_fine; 58 struct phy_param_t ssc_inj; 59 struct phy_param_t surv_bonus; 60 }; 61 62 static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_rbr = { 63 .clock = 162000, 64 .config = { 65 0x83, 66 0x2d, 67 0x0, 68 }, 69 .addr_msb = { 70 0x87, 71 0x87, 72 0x87, 73 0x87, 74 0x88, 75 0x88, 76 0x88, 77 0x88, 78 0x88, 79 0x88, 80 0x88, 81 0x88, 82 0x88, 83 }, 84 .addr_lsb = { 85 0x10, 86 0x0c, 87 0x14, 88 0xe4, 89 0x0c, 90 0x10, 91 0x14, 92 0x18, 93 0x48, 94 0x40, 95 0x4c, 96 0x24, 97 0x44, 98 }, 99 .data = { 100 { 0x0, 0x4c, 0x2, 0x0 }, 101 { 0x5, 0xa, 0x2a, 0x20 }, 102 { 0x80, 0x0, 0x0, 0x0 }, 103 { 0x4, 0x4, 0x82, 0x28 }, 104 { 0xfa, 0x16, 0x83, 0x11 }, 105 { 0x80, 0x0f, 0xf9, 0x53 }, 106 { 0x84, 0x26, 0x5, 0x4 }, 107 { 0x0, 0xe0, 0x1, 0x0 }, 108 { 0x4b, 0x48, 0x0, 0x0 }, 109 { 0x27, 0x8, 0x0, 0x0 }, 110 { 0x5a, 0x13, 0x29, 0x13 }, 111 { 0x0, 0x5b, 0xe0, 0x0a }, 112 { 0x0, 0x0, 0x0, 0x0 }, 113 }, 114 }; 115 116 static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr1 = { 117 .clock = 270000, 118 .config = { 119 0x8b, 120 0x2d, 121 0x0, 122 }, 123 .addr_msb = { 124 0x87, 125 0x87, 126 0x87, 127 0x87, 128 0x88, 129 0x88, 130 0x88, 131 0x88, 132 0x88, 133 0x88, 134 0x88, 135 0x88, 136 0x88, 137 }, 138 .addr_lsb = { 139 0x10, 140 0x0c, 141 0x14, 142 0xe4, 143 0x0c, 144 0x10, 145 0x14, 146 0x18, 147 0x48, 148 0x40, 149 0x4c, 150 0x24, 151 0x44, 152 }, 153 .data = { 154 { 0x0, 0x4c, 0x2, 0x0 }, 155 { 0x3, 0xca, 0x34, 0xa0 }, 156 { 0xe0, 0x0, 0x0, 0x0 }, 157 { 0x5, 0x4, 0x81, 0xad }, 158 { 0xfa, 0x11, 0x83, 0x11 }, 159 { 0x80, 0x0f, 0xf9, 0x53 }, 160 { 0x84, 0x26, 0x7, 0x4 }, 161 { 0x0, 0xe0, 0x1, 0x0 }, 162 { 0x43, 0x48, 0x0, 0x0 }, 163 { 0x27, 0x8, 0x0, 0x0 }, 164 { 0x5a, 0x13, 0x29, 0x13 }, 165 { 0x0, 0x5b, 0xe0, 0x0d }, 166 { 0x0, 0x0, 0x0, 0x0 }, 167 }, 168 }; 169 170 static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr2 = { 171 .clock = 540000, 172 .config = { 173 0x93, 174 0x2d, 175 0x0, 176 }, 177 .addr_msb = { 178 0x87, 179 0x87, 180 0x87, 181 0x87, 182 0x88, 183 0x88, 184 0x88, 185 0x88, 186 0x88, 187 0x88, 188 0x88, 189 0x88, 190 0x88, 191 }, 192 .addr_lsb = { 193 0x10, 194 0x0c, 195 0x14, 196 0xe4, 197 0x0c, 198 0x10, 199 0x14, 200 0x18, 201 0x48, 202 0x40, 203 0x4c, 204 0x24, 205 0x44, 206 }, 207 .data = { 208 { 0x0, 0x4c, 0x2, 0x0 }, 209 { 0x1, 0x4d, 0x34, 0xa0 }, 210 { 0xe0, 0x0, 0x0, 0x0 }, 211 { 0xa, 0x4, 0x81, 0xda }, 212 { 0xfa, 0x11, 0x83, 0x11 }, 213 { 0x80, 0x0f, 0xf9, 0x53 }, 214 { 0x84, 0x26, 0x7, 0x4 }, 215 { 0x0, 0xe0, 0x1, 0x0 }, 216 { 0x43, 0x48, 0x0, 0x0 }, 217 { 0x27, 0x8, 0x0, 0x0 }, 218 { 0x5a, 0x13, 0x29, 0x13 }, 219 { 0x0, 0x5b, 0xe0, 0x0d }, 220 { 0x0, 0x0, 0x0, 0x0 }, 221 }, 222 }; 223 224 static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr3 = { 225 .clock = 810000, 226 .config = { 227 0x9b, 228 0x2d, 229 0x0, 230 }, 231 .addr_msb = { 232 0x87, 233 0x87, 234 0x87, 235 0x87, 236 0x88, 237 0x88, 238 0x88, 239 0x88, 240 0x88, 241 0x88, 242 0x88, 243 0x88, 244 0x88, 245 }, 246 .addr_lsb = { 247 0x10, 248 0x0c, 249 0x14, 250 0xe4, 251 0x0c, 252 0x10, 253 0x14, 254 0x18, 255 0x48, 256 0x40, 257 0x4c, 258 0x24, 259 0x44, 260 }, 261 .data = { 262 { 0x0, 0x4c, 0x2, 0x0 }, 263 { 0x1, 0x4a, 0x34, 0xa0 }, 264 { 0xe0, 0x0, 0x0, 0x0 }, 265 { 0x5, 0x4, 0x80, 0xa8 }, 266 { 0xfa, 0x11, 0x83, 0x11 }, 267 { 0x80, 0x0f, 0xf9, 0x53 }, 268 { 0x84, 0x26, 0x7, 0x4 }, 269 { 0x0, 0xe0, 0x1, 0x0 }, 270 { 0x43, 0x48, 0x0, 0x0 }, 271 { 0x27, 0x8, 0x0, 0x0 }, 272 { 0x5a, 0x13, 0x29, 0x13 }, 273 { 0x0, 0x5b, 0xe0, 0x0d }, 274 { 0x0, 0x0, 0x0, 0x0 }, 275 }, 276 }; 277 278 static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr10 = { 279 .clock = 1000000, 280 .config = { 281 0x43, 282 0x2d, 283 0x0, 284 }, 285 .addr_msb = { 286 0x85, 287 0x85, 288 0x85, 289 0x85, 290 0x86, 291 0x86, 292 0x86, 293 0x86, 294 0x86, 295 0x86, 296 0x86, 297 0x86, 298 0x86, 299 }, 300 .addr_lsb = { 301 0x10, 302 0x0c, 303 0x14, 304 0xe4, 305 0x0c, 306 0x10, 307 0x14, 308 0x18, 309 0x48, 310 0x40, 311 0x4c, 312 0x24, 313 0x44, 314 }, 315 .data = { 316 { 0x0, 0x4c, 0x2, 0x0 }, 317 { 0x1, 0xa, 0x20, 0x80 }, 318 { 0x6a, 0xaa, 0xaa, 0xab }, 319 { 0x0, 0x3, 0x4, 0x94 }, 320 { 0xfa, 0x1c, 0x83, 0x11 }, 321 { 0x80, 0x0f, 0xf9, 0x53 }, 322 { 0x84, 0x26, 0x4, 0x4 }, 323 { 0x0, 0xe0, 0x1, 0x0 }, 324 { 0x45, 0x48, 0x0, 0x0 }, 325 { 0x27, 0x8, 0x0, 0x0 }, 326 { 0x5a, 0x14, 0x2a, 0x14 }, 327 { 0x0, 0x5b, 0xe0, 0x8 }, 328 { 0x0, 0x0, 0x0, 0x0 }, 329 }, 330 }; 331 332 static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr13_5 = { 333 .clock = 1350000, 334 .config = { 335 0xcb, 336 0x2d, 337 0x0, 338 }, 339 .addr_msb = { 340 0x87, 341 0x87, 342 0x87, 343 0x87, 344 0x88, 345 0x88, 346 0x88, 347 0x88, 348 0x88, 349 0x88, 350 0x88, 351 0x88, 352 0x88, 353 }, 354 .addr_lsb = { 355 0x10, 356 0x0c, 357 0x14, 358 0xe4, 359 0x0c, 360 0x10, 361 0x14, 362 0x18, 363 0x48, 364 0x40, 365 0x4c, 366 0x24, 367 0x44, 368 }, 369 .data = { 370 { 0x0, 0x4c, 0x2, 0x0 }, 371 { 0x2, 0x9, 0x2b, 0xe0 }, 372 { 0x90, 0x0, 0x0, 0x0 }, 373 { 0x8, 0x4, 0x80, 0xe0 }, 374 { 0xfa, 0x15, 0x83, 0x11 }, 375 { 0x80, 0x0f, 0xf9, 0x53 }, 376 { 0x84, 0x26, 0x6, 0x4 }, 377 { 0x0, 0xe0, 0x1, 0x0 }, 378 { 0x49, 0x48, 0x0, 0x0 }, 379 { 0x27, 0x8, 0x0, 0x0 }, 380 { 0x5a, 0x13, 0x29, 0x13 }, 381 { 0x0, 0x57, 0xe0, 0x0c }, 382 { 0x0, 0x0, 0x0, 0x0 }, 383 }, 384 }; 385 386 static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr20 = { 387 .clock = 2000000, 388 .config = { 389 0x53, 390 0x2d, 391 0x0, 392 }, 393 .addr_msb = { 394 0x85, 395 0x85, 396 0x85, 397 0x85, 398 0x86, 399 0x86, 400 0x86, 401 0x86, 402 0x86, 403 0x86, 404 0x86, 405 0x86, 406 0x86, 407 }, 408 .addr_lsb = { 409 0x10, 410 0x0c, 411 0x14, 412 0xe4, 413 0x0c, 414 0x10, 415 0x14, 416 0x18, 417 0x48, 418 0x40, 419 0x4c, 420 0x24, 421 0x44, 422 }, 423 .data = { 424 { 0x0, 0x4c, 0x2, 0x0 }, 425 { 0x1, 0xa, 0x20, 0x80 }, 426 { 0x6a, 0xaa, 0xaa, 0xab }, 427 { 0x0, 0x3, 0x4, 0x94 }, 428 { 0xfa, 0x1c, 0x83, 0x11 }, 429 { 0x80, 0x0f, 0xf9, 0x53 }, 430 { 0x84, 0x26, 0x4, 0x4 }, 431 { 0x0, 0xe0, 0x1, 0x0 }, 432 { 0x45, 0x48, 0x0, 0x0 }, 433 { 0x27, 0x8, 0x0, 0x0 }, 434 { 0x5a, 0x14, 0x2a, 0x14 }, 435 { 0x0, 0x5b, 0xe0, 0x8 }, 436 { 0x0, 0x0, 0x0, 0x0 }, 437 }, 438 }; 439 440 static const struct intel_lt_phy_pll_state * const xe3plpd_lt_dp_tables[] = { 441 &xe3plpd_lt_dp_rbr, 442 &xe3plpd_lt_dp_hbr1, 443 &xe3plpd_lt_dp_hbr2, 444 &xe3plpd_lt_dp_hbr3, 445 &xe3plpd_lt_dp_uhbr10, 446 &xe3plpd_lt_dp_uhbr13_5, 447 &xe3plpd_lt_dp_uhbr20, 448 NULL, 449 }; 450 451 static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_2_16 = { 452 .clock = 216000, 453 .config = { 454 0xa3, 455 0x2d, 456 0x1, 457 }, 458 .addr_msb = { 459 0x87, 460 0x87, 461 0x87, 462 0x87, 463 0x88, 464 0x88, 465 0x88, 466 0x88, 467 0x88, 468 0x88, 469 0x88, 470 0x88, 471 0x88, 472 }, 473 .addr_lsb = { 474 0x10, 475 0x0c, 476 0x14, 477 0xe4, 478 0x0c, 479 0x10, 480 0x14, 481 0x18, 482 0x48, 483 0x40, 484 0x4c, 485 0x24, 486 0x44, 487 }, 488 .data = { 489 { 0x0, 0x4c, 0x2, 0x0 }, 490 { 0x3, 0xca, 0x2a, 0x20 }, 491 { 0x80, 0x0, 0x0, 0x0 }, 492 { 0x6, 0x4, 0x81, 0xbc }, 493 { 0xfa, 0x16, 0x83, 0x11 }, 494 { 0x80, 0x0f, 0xf9, 0x53 }, 495 { 0x84, 0x26, 0x5, 0x4 }, 496 { 0x0, 0xe0, 0x1, 0x0 }, 497 { 0x4b, 0x48, 0x0, 0x0 }, 498 { 0x27, 0x8, 0x0, 0x0 }, 499 { 0x5a, 0x13, 0x29, 0x13 }, 500 { 0x0, 0x5b, 0xe0, 0x0a }, 501 { 0x0, 0x0, 0x0, 0x0 }, 502 }, 503 }; 504 505 static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_2_43 = { 506 .clock = 243000, 507 .config = { 508 0xab, 509 0x2d, 510 0x1, 511 }, 512 .addr_msb = { 513 0x87, 514 0x87, 515 0x87, 516 0x87, 517 0x88, 518 0x88, 519 0x88, 520 0x88, 521 0x88, 522 0x88, 523 0x88, 524 0x88, 525 0x88, 526 }, 527 .addr_lsb = { 528 0x10, 529 0x0c, 530 0x14, 531 0xe4, 532 0x0c, 533 0x10, 534 0x14, 535 0x18, 536 0x48, 537 0x40, 538 0x4c, 539 0x24, 540 0x44, 541 }, 542 .data = { 543 { 0x0, 0x4c, 0x2, 0x0 }, 544 { 0x3, 0xca, 0x2f, 0x60 }, 545 { 0xb0, 0x0, 0x0, 0x0 }, 546 { 0x6, 0x4, 0x81, 0xbc }, 547 { 0xfa, 0x13, 0x83, 0x11 }, 548 { 0x80, 0x0f, 0xf9, 0x53 }, 549 { 0x84, 0x26, 0x6, 0x4 }, 550 { 0x0, 0xe0, 0x1, 0x0 }, 551 { 0x47, 0x48, 0x0, 0x0 }, 552 { 0x0, 0x0, 0x0, 0x0 }, 553 { 0x5a, 0x13, 0x29, 0x13 }, 554 { 0x0, 0x5b, 0xe0, 0x0c }, 555 { 0x0, 0x0, 0x0, 0x0 }, 556 }, 557 }; 558 559 static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_3_24 = { 560 .clock = 324000, 561 .config = { 562 0xb3, 563 0x2d, 564 0x1, 565 }, 566 .addr_msb = { 567 0x87, 568 0x87, 569 0x87, 570 0x87, 571 0x88, 572 0x88, 573 0x88, 574 0x88, 575 0x88, 576 0x88, 577 0x88, 578 0x88, 579 0x88, 580 }, 581 .addr_lsb = { 582 0x10, 583 0x0c, 584 0x14, 585 0xe4, 586 0x0c, 587 0x10, 588 0x14, 589 0x18, 590 0x48, 591 0x40, 592 0x4c, 593 0x24, 594 0x44, 595 }, 596 .data = { 597 { 0x0, 0x4c, 0x2, 0x0 }, 598 { 0x2, 0x8a, 0x2a, 0x20 }, 599 { 0x80, 0x0, 0x0, 0x0 }, 600 { 0x6, 0x4, 0x81, 0x28 }, 601 { 0xfa, 0x16, 0x83, 0x11 }, 602 { 0x80, 0x0f, 0xf9, 0x53 }, 603 { 0x84, 0x26, 0x5, 0x4 }, 604 { 0x0, 0xe0, 0x1, 0x0 }, 605 { 0x4b, 0x48, 0x0, 0x0 }, 606 { 0x27, 0x8, 0x0, 0x0 }, 607 { 0x5a, 0x13, 0x29, 0x13 }, 608 { 0x0, 0x5b, 0xe0, 0x0a }, 609 { 0x0, 0x0, 0x0, 0x0 }, 610 }, 611 }; 612 613 static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_4_32 = { 614 .clock = 432000, 615 .config = { 616 0xbb, 617 0x2d, 618 0x1, 619 }, 620 .addr_msb = { 621 0x87, 622 0x87, 623 0x87, 624 0x87, 625 0x88, 626 0x88, 627 0x88, 628 0x88, 629 0x88, 630 0x88, 631 0x88, 632 0x88, 633 0x88, 634 }, 635 .addr_lsb = { 636 0x10, 637 0x0c, 638 0x14, 639 0xe4, 640 0x0c, 641 0x10, 642 0x14, 643 0x18, 644 0x48, 645 0x40, 646 0x4c, 647 0x24, 648 0x44, 649 }, 650 .data = { 651 { 0x0, 0x4c, 0x2, 0x0 }, 652 { 0x1, 0x4d, 0x2a, 0x20 }, 653 { 0x80, 0x0, 0x0, 0x0 }, 654 { 0xc, 0x4, 0x81, 0xbc }, 655 { 0xfa, 0x16, 0x83, 0x11 }, 656 { 0x80, 0x0f, 0xf9, 0x53 }, 657 { 0x84, 0x26, 0x5, 0x4 }, 658 { 0x0, 0xe0, 0x1, 0x0 }, 659 { 0x4b, 0x48, 0x0, 0x0 }, 660 { 0x27, 0x8, 0x0, 0x0 }, 661 { 0x5a, 0x13, 0x29, 0x13 }, 662 { 0x0, 0x5b, 0xe0, 0x0a }, 663 { 0x0, 0x0, 0x0, 0x0 }, 664 }, 665 }; 666 667 static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_6_75 = { 668 .clock = 675000, 669 .config = { 670 0xdb, 671 0x2d, 672 0x1, 673 }, 674 .addr_msb = { 675 0x87, 676 0x87, 677 0x87, 678 0x87, 679 0x88, 680 0x88, 681 0x88, 682 0x88, 683 0x88, 684 0x88, 685 0x88, 686 0x88, 687 0x88, 688 }, 689 .addr_lsb = { 690 0x10, 691 0x0c, 692 0x14, 693 0xe4, 694 0x0c, 695 0x10, 696 0x14, 697 0x18, 698 0x48, 699 0x40, 700 0x4c, 701 0x24, 702 0x44, 703 }, 704 .data = { 705 { 0x0, 0x4c, 0x2, 0x0 }, 706 { 0x1, 0x4a, 0x2b, 0xe0 }, 707 { 0x90, 0x0, 0x0, 0x0 }, 708 { 0x6, 0x4, 0x80, 0xa8 }, 709 { 0xfa, 0x15, 0x83, 0x11 }, 710 { 0x80, 0x0f, 0xf9, 0x53 }, 711 { 0x84, 0x26, 0x6, 0x4 }, 712 { 0x0, 0xe0, 0x1, 0x0 }, 713 { 0x49, 0x48, 0x0, 0x0 }, 714 { 0x27, 0x8, 0x0, 0x0 }, 715 { 0x5a, 0x13, 0x29, 0x13 }, 716 { 0x0, 0x57, 0xe0, 0x0c }, 717 { 0x0, 0x0, 0x0, 0x0 }, 718 }, 719 }; 720 721 static const struct intel_lt_phy_pll_state * const xe3plpd_lt_edp_tables[] = { 722 &xe3plpd_lt_dp_rbr, 723 &xe3plpd_lt_edp_2_16, 724 &xe3plpd_lt_edp_2_43, 725 &xe3plpd_lt_dp_hbr1, 726 &xe3plpd_lt_edp_3_24, 727 &xe3plpd_lt_edp_4_32, 728 &xe3plpd_lt_dp_hbr2, 729 &xe3plpd_lt_edp_6_75, 730 &xe3plpd_lt_dp_hbr3, 731 NULL, 732 }; 733 734 static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_252 = { 735 .clock = 25200, 736 .config = { 737 0x84, 738 0x2d, 739 0x0, 740 }, 741 .addr_msb = { 742 0x87, 743 0x87, 744 0x87, 745 0x87, 746 0x88, 747 0x88, 748 0x88, 749 0x88, 750 0x88, 751 0x88, 752 0x88, 753 0x88, 754 0x88, 755 }, 756 .addr_lsb = { 757 0x10, 758 0x0c, 759 0x14, 760 0xe4, 761 0x0c, 762 0x10, 763 0x14, 764 0x18, 765 0x48, 766 0x40, 767 0x4c, 768 0x24, 769 0x44, 770 }, 771 .data = { 772 { 0x0, 0x4c, 0x2, 0x0 }, 773 { 0x0c, 0x15, 0x27, 0x60 }, 774 { 0x0, 0x0, 0x0, 0x0 }, 775 { 0x8, 0x4, 0x98, 0x28 }, 776 { 0x42, 0x0, 0x84, 0x10 }, 777 { 0x80, 0x0f, 0xd9, 0xb5 }, 778 { 0x86, 0x0, 0x0, 0x0 }, 779 { 0x1, 0xa0, 0x1, 0x0 }, 780 { 0x4b, 0x0, 0x0, 0x0 }, 781 { 0x28, 0x0, 0x0, 0x0 }, 782 { 0x0, 0x14, 0x2a, 0x14 }, 783 { 0x0, 0x0, 0x0, 0x0 }, 784 { 0x0, 0x0, 0x0, 0x0 }, 785 }, 786 }; 787 788 static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_272 = { 789 .clock = 27200, 790 .config = { 791 0x84, 792 0x2d, 793 0x0, 794 }, 795 .addr_msb = { 796 0x87, 797 0x87, 798 0x87, 799 0x87, 800 0x88, 801 0x88, 802 0x88, 803 0x88, 804 0x88, 805 0x88, 806 0x88, 807 0x88, 808 0x88, 809 }, 810 .addr_lsb = { 811 0x10, 812 0x0c, 813 0x14, 814 0xe4, 815 0x0c, 816 0x10, 817 0x14, 818 0x18, 819 0x48, 820 0x40, 821 0x4c, 822 0x24, 823 0x44, 824 }, 825 .data = { 826 { 0x0, 0x4c, 0x2, 0x0 }, 827 { 0x0b, 0x15, 0x26, 0xa0 }, 828 { 0x60, 0x0, 0x0, 0x0 }, 829 { 0x8, 0x4, 0x96, 0x28 }, 830 { 0xfa, 0x0c, 0x84, 0x11 }, 831 { 0x80, 0x0f, 0xd9, 0x53 }, 832 { 0x86, 0x0, 0x0, 0x0 }, 833 { 0x1, 0xa0, 0x1, 0x0 }, 834 { 0x4b, 0x0, 0x0, 0x0 }, 835 { 0x28, 0x0, 0x0, 0x0 }, 836 { 0x0, 0x14, 0x2a, 0x14 }, 837 { 0x0, 0x0, 0x0, 0x0 }, 838 { 0x0, 0x0, 0x0, 0x0 }, 839 }, 840 }; 841 842 static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_742p5 = { 843 .clock = 74250, 844 .config = { 845 0x84, 846 0x2d, 847 0x0, 848 }, 849 .addr_msb = { 850 0x87, 851 0x87, 852 0x87, 853 0x87, 854 0x88, 855 0x88, 856 0x88, 857 0x88, 858 0x88, 859 0x88, 860 0x88, 861 0x88, 862 0x88, 863 }, 864 .addr_lsb = { 865 0x10, 866 0x0c, 867 0x14, 868 0xe4, 869 0x0c, 870 0x10, 871 0x14, 872 0x18, 873 0x48, 874 0x40, 875 0x4c, 876 0x24, 877 0x44, 878 }, 879 .data = { 880 { 0x0, 0x4c, 0x2, 0x0 }, 881 { 0x4, 0x15, 0x26, 0xa0 }, 882 { 0x60, 0x0, 0x0, 0x0 }, 883 { 0x8, 0x4, 0x88, 0x28 }, 884 { 0xfa, 0x0c, 0x84, 0x11 }, 885 { 0x80, 0x0f, 0xd9, 0x53 }, 886 { 0x86, 0x0, 0x0, 0x0 }, 887 { 0x1, 0xa0, 0x1, 0x0 }, 888 { 0x4b, 0x0, 0x0, 0x0 }, 889 { 0x28, 0x0, 0x0, 0x0 }, 890 { 0x0, 0x14, 0x2a, 0x14 }, 891 { 0x0, 0x0, 0x0, 0x0 }, 892 { 0x0, 0x0, 0x0, 0x0 }, 893 }, 894 }; 895 896 static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_1p485 = { 897 .clock = 148500, 898 .config = { 899 0x84, 900 0x2d, 901 0x0, 902 }, 903 .addr_msb = { 904 0x87, 905 0x87, 906 0x87, 907 0x87, 908 0x88, 909 0x88, 910 0x88, 911 0x88, 912 0x88, 913 0x88, 914 0x88, 915 0x88, 916 0x88, 917 }, 918 .addr_lsb = { 919 0x10, 920 0x0c, 921 0x14, 922 0xe4, 923 0x0c, 924 0x10, 925 0x14, 926 0x18, 927 0x48, 928 0x40, 929 0x4c, 930 0x24, 931 0x44, 932 }, 933 .data = { 934 { 0x0, 0x4c, 0x2, 0x0 }, 935 { 0x2, 0x15, 0x26, 0xa0 }, 936 { 0x60, 0x0, 0x0, 0x0 }, 937 { 0x8, 0x4, 0x84, 0x28 }, 938 { 0xfa, 0x0c, 0x84, 0x11 }, 939 { 0x80, 0x0f, 0xd9, 0x53 }, 940 { 0x86, 0x0, 0x0, 0x0 }, 941 { 0x1, 0xa0, 0x1, 0x0 }, 942 { 0x4b, 0x0, 0x0, 0x0 }, 943 { 0x28, 0x0, 0x0, 0x0 }, 944 { 0x0, 0x14, 0x2a, 0x14 }, 945 { 0x0, 0x0, 0x0, 0x0 }, 946 { 0x0, 0x0, 0x0, 0x0 }, 947 }, 948 }; 949 950 static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_5p94 = { 951 .clock = 594000, 952 .config = { 953 0x84, 954 0x2d, 955 0x0, 956 }, 957 .addr_msb = { 958 0x87, 959 0x87, 960 0x87, 961 0x87, 962 0x88, 963 0x88, 964 0x88, 965 0x88, 966 0x88, 967 0x88, 968 0x88, 969 0x88, 970 0x88, 971 }, 972 .addr_lsb = { 973 0x10, 974 0x0c, 975 0x14, 976 0xe4, 977 0x0c, 978 0x10, 979 0x14, 980 0x18, 981 0x48, 982 0x40, 983 0x4c, 984 0x24, 985 0x44, 986 }, 987 .data = { 988 { 0x0, 0x4c, 0x2, 0x0 }, 989 { 0x0, 0x95, 0x26, 0xa0 }, 990 { 0x60, 0x0, 0x0, 0x0 }, 991 { 0x8, 0x4, 0x81, 0x28 }, 992 { 0xfa, 0x0c, 0x84, 0x11 }, 993 { 0x80, 0x0f, 0xd9, 0x53 }, 994 { 0x86, 0x0, 0x0, 0x0 }, 995 { 0x1, 0xa0, 0x1, 0x0 }, 996 { 0x4b, 0x0, 0x0, 0x0 }, 997 { 0x28, 0x0, 0x0, 0x0 }, 998 { 0x0, 0x14, 0x2a, 0x14 }, 999 { 0x0, 0x0, 0x0, 0x0 }, 1000 { 0x0, 0x0, 0x0, 0x0 }, 1001 }, 1002 }; 1003 1004 static const struct intel_lt_phy_pll_state * const xe3plpd_lt_hdmi_tables[] = { 1005 &xe3plpd_lt_hdmi_252, 1006 &xe3plpd_lt_hdmi_272, 1007 &xe3plpd_lt_hdmi_742p5, 1008 &xe3plpd_lt_hdmi_1p485, 1009 &xe3plpd_lt_hdmi_5p94, 1010 NULL, 1011 }; 1012 1013 static u8 intel_lt_phy_get_owned_lane_mask(struct intel_encoder *encoder) 1014 { 1015 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 1016 1017 if (!intel_tc_port_in_dp_alt_mode(dig_port)) 1018 return INTEL_LT_PHY_BOTH_LANES; 1019 1020 return intel_tc_port_max_lane_count(dig_port) > 2 1021 ? INTEL_LT_PHY_BOTH_LANES : INTEL_LT_PHY_LANE0; 1022 } 1023 1024 static u8 intel_lt_phy_read(struct intel_encoder *encoder, u8 lane_mask, u16 addr) 1025 { 1026 return intel_cx0_read(encoder, lane_mask, addr); 1027 } 1028 1029 static void intel_lt_phy_write(struct intel_encoder *encoder, 1030 u8 lane_mask, u16 addr, u8 data, bool committed) 1031 { 1032 intel_cx0_write(encoder, lane_mask, addr, data, committed); 1033 } 1034 1035 static void intel_lt_phy_rmw(struct intel_encoder *encoder, 1036 u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed) 1037 { 1038 intel_cx0_rmw(encoder, lane_mask, addr, clear, set, committed); 1039 } 1040 1041 static void intel_lt_phy_clear_status_p2p(struct intel_encoder *encoder, 1042 int lane) 1043 { 1044 struct intel_display *display = to_intel_display(encoder); 1045 1046 intel_de_rmw(display, 1047 XE3PLPD_PORT_P2M_MSGBUS_STATUS_P2P(encoder->port, lane), 1048 XELPDP_PORT_P2M_RESPONSE_READY, 0); 1049 } 1050 1051 static void 1052 assert_dc_off(struct intel_display *display) 1053 { 1054 bool enabled; 1055 1056 enabled = intel_display_power_is_enabled(display, POWER_DOMAIN_DC_OFF); 1057 drm_WARN_ON(display->drm, !enabled); 1058 } 1059 1060 static int __intel_lt_phy_p2p_write_once(struct intel_encoder *encoder, 1061 int lane, u16 addr, u8 data, 1062 i915_reg_t mac_reg_addr, 1063 u8 expected_mac_val) 1064 { 1065 struct intel_display *display = to_intel_display(encoder); 1066 enum port port = encoder->port; 1067 enum phy phy = intel_encoder_to_phy(encoder); 1068 int ack; 1069 u32 val; 1070 1071 if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), 1072 XELPDP_PORT_P2P_TRANSACTION_PENDING, 1073 XELPDP_MSGBUS_TIMEOUT_MS)) { 1074 drm_dbg_kms(display->drm, 1075 "PHY %c Timeout waiting for previous transaction to complete. Resetting bus.\n", 1076 phy_name(phy)); 1077 intel_cx0_bus_reset(encoder, lane); 1078 return -ETIMEDOUT; 1079 } 1080 1081 intel_de_rmw(display, XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane), 0, 0); 1082 1083 intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane), 1084 XELPDP_PORT_P2P_TRANSACTION_PENDING | 1085 XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED | 1086 XELPDP_PORT_M2P_DATA(data) | 1087 XELPDP_PORT_M2P_ADDRESS(addr)); 1088 1089 ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val); 1090 if (ack < 0) 1091 return ack; 1092 1093 if (val & XELPDP_PORT_P2M_ERROR_SET) { 1094 drm_dbg_kms(display->drm, 1095 "PHY %c Error occurred during P2P write command. Status: 0x%x\n", 1096 phy_name(phy), val); 1097 intel_lt_phy_clear_status_p2p(encoder, lane); 1098 intel_cx0_bus_reset(encoder, lane); 1099 return -EINVAL; 1100 } 1101 1102 /* 1103 * RE-VISIT: 1104 * This needs to be added to give PHY time to set everything up this was a requirement 1105 * to get the display up and running 1106 * This is the time PHY takes to settle down after programming the PHY. 1107 */ 1108 udelay(150); 1109 intel_clear_response_ready_flag(encoder, lane); 1110 intel_lt_phy_clear_status_p2p(encoder, lane); 1111 1112 return 0; 1113 } 1114 1115 static void __intel_lt_phy_p2p_write(struct intel_encoder *encoder, 1116 int lane, u16 addr, u8 data, 1117 i915_reg_t mac_reg_addr, 1118 u8 expected_mac_val) 1119 { 1120 struct intel_display *display = to_intel_display(encoder); 1121 enum phy phy = intel_encoder_to_phy(encoder); 1122 int i, status; 1123 1124 assert_dc_off(display); 1125 1126 /* 3 tries is assumed to be enough to write successfully */ 1127 for (i = 0; i < 3; i++) { 1128 status = __intel_lt_phy_p2p_write_once(encoder, lane, addr, data, mac_reg_addr, 1129 expected_mac_val); 1130 1131 if (status == 0) 1132 return; 1133 } 1134 1135 drm_err_once(display->drm, 1136 "PHY %c P2P Write %04x failed after %d retries.\n", phy_name(phy), addr, i); 1137 } 1138 1139 static void intel_lt_phy_p2p_write(struct intel_encoder *encoder, 1140 u8 lane_mask, u16 addr, u8 data, 1141 i915_reg_t mac_reg_addr, 1142 u8 expected_mac_val) 1143 { 1144 int lane; 1145 1146 for_each_lt_phy_lane_in_mask(lane_mask, lane) 1147 __intel_lt_phy_p2p_write(encoder, lane, addr, data, mac_reg_addr, expected_mac_val); 1148 } 1149 1150 static void 1151 intel_lt_phy_setup_powerdown(struct intel_encoder *encoder, u8 lane_count) 1152 { 1153 /* 1154 * The new PORT_BUF_CTL6 stuff for dc5 entry and exit needs to be handled 1155 * by dmc firmware not explicitly mentioned in Bspec. This leaves this 1156 * function as a wrapper only but keeping it expecting future changes. 1157 */ 1158 intel_cx0_setup_powerdown(encoder); 1159 } 1160 1161 static void 1162 intel_lt_phy_powerdown_change_sequence(struct intel_encoder *encoder, 1163 u8 lane_mask, u8 state) 1164 { 1165 intel_cx0_powerdown_change_sequence(encoder, lane_mask, state); 1166 } 1167 1168 static void 1169 intel_lt_phy_lane_reset(struct intel_encoder *encoder, 1170 u8 lane_count) 1171 { 1172 struct intel_display *display = to_intel_display(encoder); 1173 enum port port = encoder->port; 1174 enum phy phy = intel_encoder_to_phy(encoder); 1175 u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder); 1176 u32 lane_pipe_reset = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES 1177 ? XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1) 1178 : XELPDP_LANE_PIPE_RESET(0); 1179 u32 lane_phy_current_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES 1180 ? (XELPDP_LANE_PHY_CURRENT_STATUS(0) | 1181 XELPDP_LANE_PHY_CURRENT_STATUS(1)) 1182 : XELPDP_LANE_PHY_CURRENT_STATUS(0); 1183 u32 lane_phy_pulse_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES 1184 ? (XE3PLPDP_LANE_PHY_PULSE_STATUS(0) | 1185 XE3PLPDP_LANE_PHY_PULSE_STATUS(1)) 1186 : XE3PLPDP_LANE_PHY_PULSE_STATUS(0); 1187 1188 intel_de_rmw(display, XE3PLPD_PORT_BUF_CTL5(port), 1189 XE3PLPD_MACCLK_RATE_MASK, XE3PLPD_MACCLK_RATE_DEF); 1190 1191 intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, port), 1192 XE3PLPDP_PHY_MODE_MASK, XE3PLPDP_PHY_MODE_DP); 1193 1194 intel_lt_phy_setup_powerdown(encoder, lane_count); 1195 intel_lt_phy_powerdown_change_sequence(encoder, owned_lane_mask, 1196 XELPDP_P2_STATE_RESET); 1197 1198 intel_de_rmw(display, XE3PLPD_PORT_BUF_CTL5(port), 1199 XE3PLPD_MACCLK_RESET_0, 0); 1200 1201 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port), 1202 XELPDP_LANE_PCLK_PLL_REQUEST(0), 1203 XELPDP_LANE_PCLK_PLL_REQUEST(0)); 1204 1205 if (intel_de_wait_for_set_ms(display, XELPDP_PORT_CLOCK_CTL(display, port), 1206 XELPDP_LANE_PCLK_PLL_ACK(0), 1207 XE3PLPD_MACCLK_TURNON_LATENCY_MS)) 1208 drm_warn(display->drm, "PHY %c PLL MacCLK assertion ack not done\n", 1209 phy_name(phy)); 1210 1211 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port), 1212 XELPDP_FORWARD_CLOCK_UNGATE, 1213 XELPDP_FORWARD_CLOCK_UNGATE); 1214 1215 intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), 1216 lane_pipe_reset | lane_phy_pulse_status, 0); 1217 1218 if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_BUF_CTL2(display, port), 1219 lane_phy_current_status, 1220 XE3PLPD_RESET_END_LATENCY_MS)) 1221 drm_warn(display->drm, "PHY %c failed to bring out of lane reset\n", 1222 phy_name(phy)); 1223 1224 if (intel_de_wait_for_set_ms(display, XELPDP_PORT_BUF_CTL2(display, port), 1225 lane_phy_pulse_status, 1226 XE3PLPD_RATE_CALIB_DONE_LATENCY_MS)) 1227 drm_warn(display->drm, "PHY %c PLL rate not changed\n", 1228 phy_name(phy)); 1229 1230 intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_phy_pulse_status, 0); 1231 } 1232 1233 static void 1234 intel_lt_phy_program_port_clock_ctl(struct intel_encoder *encoder, 1235 const struct intel_crtc_state *crtc_state, 1236 bool lane_reversal) 1237 { 1238 struct intel_display *display = to_intel_display(encoder); 1239 u32 val = 0; 1240 1241 intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port), 1242 XELPDP_PORT_REVERSAL, 1243 lane_reversal ? XELPDP_PORT_REVERSAL : 0); 1244 1245 val |= XELPDP_FORWARD_CLOCK_UNGATE; 1246 1247 /* 1248 * We actually mean MACCLK here and not MAXPCLK when using LT Phy 1249 * but since the register bits still remain the same we use 1250 * the same definition 1251 */ 1252 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && 1253 intel_hdmi_is_frl(crtc_state->port_clock)) 1254 val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_DIV18CLK); 1255 else 1256 val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_MAXPCLK); 1257 1258 /* DP2.0 10G and 20G rates enable MPLLA*/ 1259 if (crtc_state->port_clock == 1000000 || crtc_state->port_clock == 2000000) 1260 val |= XELPDP_SSC_ENABLE_PLLA; 1261 else 1262 val |= crtc_state->dpll_hw_state.ltpll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0; 1263 1264 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), 1265 XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE | 1266 XELPDP_DDI_CLOCK_SELECT_MASK(display) | XELPDP_SSC_ENABLE_PLLA | 1267 XELPDP_SSC_ENABLE_PLLB, val); 1268 } 1269 1270 static u32 intel_lt_phy_get_dp_clock(u8 rate) 1271 { 1272 switch (rate) { 1273 case 0: 1274 return 162000; 1275 case 1: 1276 return 270000; 1277 case 2: 1278 return 540000; 1279 case 3: 1280 return 810000; 1281 case 4: 1282 return 216000; 1283 case 5: 1284 return 243000; 1285 case 6: 1286 return 324000; 1287 case 7: 1288 return 432000; 1289 case 8: 1290 return 1000000; 1291 case 9: 1292 return 1350000; 1293 case 10: 1294 return 2000000; 1295 case 11: 1296 return 675000; 1297 default: 1298 MISSING_CASE(rate); 1299 return 0; 1300 } 1301 } 1302 1303 static bool 1304 intel_lt_phy_config_changed(struct intel_encoder *encoder, 1305 const struct intel_crtc_state *crtc_state) 1306 { 1307 u8 val, rate; 1308 u32 clock; 1309 1310 val = intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0, 1311 LT_PHY_VDR_0_CONFIG); 1312 rate = REG_FIELD_GET8(LT_PHY_VDR_RATE_ENCODING_MASK, val); 1313 1314 /* 1315 * The only time we do not reconfigure the PLL is when we are 1316 * using 1.62 Gbps clock since PHY PLL defaults to that 1317 * otherwise we always need to reconfigure it. 1318 */ 1319 if (intel_crtc_has_dp_encoder(crtc_state)) { 1320 clock = intel_lt_phy_get_dp_clock(rate); 1321 if (crtc_state->port_clock == 1620000 && crtc_state->port_clock == clock) 1322 return false; 1323 } 1324 1325 return true; 1326 } 1327 1328 static struct ref_tracker *intel_lt_phy_transaction_begin(struct intel_encoder *encoder) 1329 { 1330 struct intel_display *display = to_intel_display(encoder); 1331 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1332 struct ref_tracker *wakeref; 1333 1334 intel_psr_pause(intel_dp); 1335 wakeref = intel_display_power_get(display, POWER_DOMAIN_DC_OFF); 1336 1337 return wakeref; 1338 } 1339 1340 static void intel_lt_phy_transaction_end(struct intel_encoder *encoder, struct ref_tracker *wakeref) 1341 { 1342 struct intel_display *display = to_intel_display(encoder); 1343 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1344 1345 intel_psr_resume(intel_dp); 1346 intel_display_power_put(display, POWER_DOMAIN_DC_OFF, wakeref); 1347 } 1348 1349 static const struct intel_lt_phy_pll_state * const * 1350 intel_lt_phy_pll_tables_get(struct intel_crtc_state *crtc_state, 1351 struct intel_encoder *encoder) 1352 { 1353 if (intel_crtc_has_dp_encoder(crtc_state)) { 1354 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) 1355 return xe3plpd_lt_edp_tables; 1356 1357 return xe3plpd_lt_dp_tables; 1358 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { 1359 return xe3plpd_lt_hdmi_tables; 1360 } 1361 1362 MISSING_CASE(encoder->type); 1363 return NULL; 1364 } 1365 1366 static bool 1367 intel_lt_phy_pll_is_ssc_enabled(struct intel_crtc_state *crtc_state, 1368 struct intel_encoder *encoder) 1369 { 1370 struct intel_display *display = to_intel_display(encoder); 1371 1372 if (intel_crtc_has_dp_encoder(crtc_state)) { 1373 if (intel_panel_use_ssc(display)) { 1374 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1375 1376 return (intel_dp->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5); 1377 } 1378 } 1379 1380 return false; 1381 } 1382 1383 static u64 mul_q32_u32(u64 a_q32, u32 b) 1384 { 1385 u64 p0, p1, carry, result; 1386 u64 x_hi = a_q32 >> 32; 1387 u64 x_lo = a_q32 & 0xFFFFFFFFULL; 1388 1389 p0 = x_lo * (u64)b; 1390 p1 = x_hi * (u64)b; 1391 carry = p0 >> 32; 1392 result = (p1 << 32) + (carry << 32) + (p0 & 0xFFFFFFFFULL); 1393 1394 return result; 1395 } 1396 1397 static bool 1398 calculate_target_dco_and_loop_cnt(u32 frequency_khz, u64 *target_dco_mhz, u32 *loop_cnt) 1399 { 1400 u32 ppm_value = 1; 1401 u32 dco_min_freq = DCO_MIN_FREQ_MHZ; 1402 u32 dco_max_freq = 16200; 1403 u32 dco_min_freq_low = 10000; 1404 u32 dco_max_freq_low = 12000; 1405 u64 val = 0; 1406 u64 refclk_khz = REF_CLK_KHZ; 1407 u64 m2div = 0; 1408 u64 val_with_frac = 0; 1409 u64 ppm = 0; 1410 u64 temp0 = 0, temp1, scale; 1411 int ppm_cnt, dco_count, y; 1412 1413 for (ppm_cnt = 0; ppm_cnt < 5; ppm_cnt++) { 1414 ppm_value = ppm_cnt == 2 ? 2 : 1; 1415 for (dco_count = 0; dco_count < 2; dco_count++) { 1416 if (dco_count == 1) { 1417 dco_min_freq = dco_min_freq_low; 1418 dco_max_freq = dco_max_freq_low; 1419 } 1420 for (y = 2; y <= 255; y += 2) { 1421 val = div64_u64((u64)y * frequency_khz, 200); 1422 m2div = div64_u64(((u64)(val) << 32), refclk_khz); 1423 m2div = mul_q32_u32(m2div, 500); 1424 val_with_frac = mul_q32_u32(m2div, refclk_khz); 1425 val_with_frac = div64_u64(val_with_frac, 500); 1426 temp1 = Q32_TO_INT(val_with_frac); 1427 temp0 = (temp1 > val) ? (temp1 - val) : 1428 (val - temp1); 1429 ppm = div64_u64(temp0, val); 1430 if (temp1 >= dco_min_freq && 1431 temp1 <= dco_max_freq && 1432 ppm < ppm_value) { 1433 /* Round to two places */ 1434 scale = (1ULL << 32) / 100; 1435 temp0 = DIV_ROUND_UP_ULL(val_with_frac, 1436 scale); 1437 *target_dco_mhz = temp0 * scale; 1438 *loop_cnt = y; 1439 return true; 1440 } 1441 } 1442 } 1443 } 1444 1445 return false; 1446 } 1447 1448 static void set_phy_vdr_addresses(struct lt_phy_params *p, int pll_type) 1449 { 1450 p->pll_reg4.addr = PLL_REG_ADDR(PLL_REG4_ADDR, pll_type); 1451 p->pll_reg3.addr = PLL_REG_ADDR(PLL_REG3_ADDR, pll_type); 1452 p->pll_reg5.addr = PLL_REG_ADDR(PLL_REG5_ADDR, pll_type); 1453 p->pll_reg57.addr = PLL_REG_ADDR(PLL_REG57_ADDR, pll_type); 1454 p->lf.addr = PLL_REG_ADDR(PLL_LF_ADDR, pll_type); 1455 p->tdc.addr = PLL_REG_ADDR(PLL_TDC_ADDR, pll_type); 1456 p->ssc.addr = PLL_REG_ADDR(PLL_SSC_ADDR, pll_type); 1457 p->bias2.addr = PLL_REG_ADDR(PLL_BIAS2_ADDR, pll_type); 1458 p->bias_trim.addr = PLL_REG_ADDR(PLL_BIAS_TRIM_ADDR, pll_type); 1459 p->dco_med.addr = PLL_REG_ADDR(PLL_DCO_MED_ADDR, pll_type); 1460 p->dco_fine.addr = PLL_REG_ADDR(PLL_DCO_FINE_ADDR, pll_type); 1461 p->ssc_inj.addr = PLL_REG_ADDR(PLL_SSC_INJ_ADDR, pll_type); 1462 p->surv_bonus.addr = PLL_REG_ADDR(PLL_SURV_BONUS_ADDR, pll_type); 1463 } 1464 1465 static void compute_ssc(struct lt_phy_params *p, u32 ana_cfg) 1466 { 1467 int ssc_stepsize = 0; 1468 int ssc_steplen = 0; 1469 int ssc_steplog = 0; 1470 1471 p->ssc.val = (1 << 31) | (ana_cfg << 24) | (ssc_steplog << 16) | 1472 (ssc_stepsize << 8) | ssc_steplen; 1473 } 1474 1475 static void compute_bias2(struct lt_phy_params *p) 1476 { 1477 u32 ssc_en_local = 0; 1478 u64 dynctrl_ovrd_en = 0; 1479 1480 p->bias2.val = (dynctrl_ovrd_en << 31) | (ssc_en_local << 30) | 1481 (1 << 23) | (1 << 24) | (32 << 16) | (1 << 8); 1482 } 1483 1484 static void compute_tdc(struct lt_phy_params *p, u64 tdc_fine) 1485 { 1486 u32 settling_time = 15; 1487 u32 bias_ovr_en = 1; 1488 u32 coldstart = 1; 1489 u32 true_lock = 2; 1490 u32 early_lock = 1; 1491 u32 lock_ovr_en = 1; 1492 u32 lock_thr = tdc_fine ? 3 : 5; 1493 u32 unlock_thr = tdc_fine ? 5 : 11; 1494 1495 p->tdc.val = (u32)((2 << 30) + (settling_time << 16) + (bias_ovr_en << 15) + 1496 (lock_ovr_en << 14) + (coldstart << 12) + (true_lock << 10) + 1497 (early_lock << 8) + (unlock_thr << 4) + lock_thr); 1498 } 1499 1500 static void compute_dco_med(struct lt_phy_params *p) 1501 { 1502 u32 cselmed_en = 0; 1503 u32 cselmed_dyn_adj = 0; 1504 u32 cselmed_ratio = 39; 1505 u32 cselmed_thr = 8; 1506 1507 p->dco_med.val = (cselmed_en << 31) + (cselmed_dyn_adj << 30) + 1508 (cselmed_ratio << 24) + (cselmed_thr << 21); 1509 } 1510 1511 static void compute_dco_fine(struct lt_phy_params *p, u32 dco_12g) 1512 { 1513 u32 dco_fine0_tune_2_0 = 0; 1514 u32 dco_fine1_tune_2_0 = 0; 1515 u32 dco_fine2_tune_2_0 = 0; 1516 u32 dco_fine3_tune_2_0 = 0; 1517 u32 dco_dith0_tune_2_0 = 0; 1518 u32 dco_dith1_tune_2_0 = 0; 1519 1520 dco_fine0_tune_2_0 = dco_12g ? 4 : 3; 1521 dco_fine1_tune_2_0 = 2; 1522 dco_fine2_tune_2_0 = dco_12g ? 2 : 1; 1523 dco_fine3_tune_2_0 = 5; 1524 dco_dith0_tune_2_0 = dco_12g ? 4 : 3; 1525 dco_dith1_tune_2_0 = 2; 1526 1527 p->dco_fine.val = (dco_dith1_tune_2_0 << 19) + 1528 (dco_dith0_tune_2_0 << 16) + 1529 (dco_fine3_tune_2_0 << 11) + 1530 (dco_fine2_tune_2_0 << 8) + 1531 (dco_fine1_tune_2_0 << 3) + 1532 dco_fine0_tune_2_0; 1533 } 1534 1535 int 1536 intel_lt_phy_calculate_hdmi_state(struct intel_lt_phy_pll_state *lt_state, 1537 u32 frequency_khz) 1538 { 1539 #define DATA_ASSIGN(i, pll_reg) \ 1540 do { \ 1541 lt_state->data[i][0] = (u8)((((pll_reg).val) & 0xFF000000) >> 24); \ 1542 lt_state->data[i][1] = (u8)((((pll_reg).val) & 0x00FF0000) >> 16); \ 1543 lt_state->data[i][2] = (u8)((((pll_reg).val) & 0x0000FF00) >> 8); \ 1544 lt_state->data[i][3] = (u8)((((pll_reg).val) & 0x000000FF)); \ 1545 } while (0) 1546 #define ADDR_ASSIGN(i, pll_reg) \ 1547 do { \ 1548 lt_state->addr_msb[i] = ((pll_reg).addr >> 8) & 0xFF; \ 1549 lt_state->addr_lsb[i] = (pll_reg).addr & 0xFF; \ 1550 } while (0) 1551 1552 bool found = false; 1553 struct lt_phy_params p; 1554 u32 dco_fmin = DCO_MIN_FREQ_MHZ; 1555 u64 refclk_khz = REF_CLK_KHZ; 1556 u32 refclk_mhz_int = REF_CLK_KHZ / 1000; 1557 u64 m2div = 0; 1558 u64 target_dco_mhz = 0; 1559 u64 tdc_fine, tdc_targetcnt; 1560 u64 feedfwd_gain ,feedfwd_cal_en; 1561 u64 tdc_res = 30; 1562 u32 prop_coeff; 1563 u32 int_coeff; 1564 u32 ndiv = 1; 1565 u32 m1div = 1, m2div_int, m2div_frac; 1566 u32 frac_en; 1567 u32 ana_cfg; 1568 u32 loop_cnt = 0; 1569 u32 gain_ctrl = 2; 1570 u32 postdiv = 0; 1571 u32 dco_12g = 0; 1572 u32 pll_type = 0; 1573 u32 d1 = 2, d3 = 5, d4 = 0, d5 = 0; 1574 u32 d6 = 0, d6_new = 0; 1575 u32 d7, d8 = 0; 1576 u32 bonus_7_0 = 0; 1577 u32 csel2fo = 11; 1578 u32 csel2fo_ovrd_en = 1; 1579 u64 temp0, temp1, temp2, temp3; 1580 1581 p.surv_bonus.val = (bonus_7_0 << 16); 1582 p.pll_reg4.val = (refclk_mhz_int << 17) + 1583 (ndiv << 9) + (1 << 4); 1584 p.bias_trim.val = (csel2fo_ovrd_en << 30) + (csel2fo << 24); 1585 p.ssc_inj.val = 0; 1586 found = calculate_target_dco_and_loop_cnt(frequency_khz, &target_dco_mhz, &loop_cnt); 1587 if (!found) 1588 return -EINVAL; 1589 1590 m2div = div64_u64(target_dco_mhz, (refclk_khz * ndiv * m1div)); 1591 m2div = mul_q32_u32(m2div, 1000); 1592 if (Q32_TO_INT(m2div) > 511) 1593 return -EINVAL; 1594 1595 m2div_int = (u32)Q32_TO_INT(m2div); 1596 m2div_frac = (u32)(Q32_TO_FRAC(m2div)); 1597 frac_en = (m2div_frac > 0) ? 1 : 0; 1598 1599 if (frac_en > 0) 1600 tdc_res = 70; 1601 else 1602 tdc_res = 36; 1603 tdc_fine = tdc_res > 50 ? 1 : 0; 1604 temp0 = tdc_res * 40 * 11; 1605 temp1 = div64_u64(((4 * TDC_RES_MULTIPLIER) + temp0) * 500, temp0 * refclk_khz); 1606 temp2 = div64_u64(temp0 * refclk_khz, 1000); 1607 temp3 = div64_u64(((8 * TDC_RES_MULTIPLIER) + temp2), temp2); 1608 tdc_targetcnt = tdc_res < 50 ? (int)(temp1) : (int)(temp3); 1609 tdc_targetcnt = (int)(tdc_targetcnt / 2); 1610 temp0 = mul_q32_u32(target_dco_mhz, tdc_res); 1611 temp0 >>= 32; 1612 feedfwd_gain = (m2div_frac > 0) ? div64_u64(m1div * TDC_RES_MULTIPLIER, temp0) : 0; 1613 feedfwd_cal_en = frac_en; 1614 1615 temp0 = (u32)Q32_TO_INT(target_dco_mhz); 1616 prop_coeff = (temp0 >= dco_fmin) ? 3 : 4; 1617 int_coeff = (temp0 >= dco_fmin) ? 7 : 8; 1618 ana_cfg = (temp0 >= dco_fmin) ? 8 : 6; 1619 dco_12g = (temp0 >= dco_fmin) ? 0 : 1; 1620 1621 if (temp0 > 12960) 1622 d7 = 10; 1623 else 1624 d7 = 8; 1625 1626 d8 = loop_cnt / 2; 1627 d4 = d8 * 2; 1628 1629 /* Compute pll_reg3,5,57 & lf */ 1630 p.pll_reg3.val = (u32)((d4 << 21) + (d3 << 18) + (d1 << 15) + (m2div_int << 5)); 1631 p.pll_reg5.val = m2div_frac; 1632 postdiv = (d5 == 0) ? 9 : d5; 1633 d6_new = (d6 == 0) ? 40 : d6; 1634 p.pll_reg57.val = (d7 << 24) + (postdiv << 15) + (d8 << 7) + d6_new; 1635 p.lf.val = (u32)((frac_en << 31) + (1 << 30) + (frac_en << 29) + 1636 (feedfwd_cal_en << 28) + (tdc_fine << 27) + 1637 (gain_ctrl << 24) + (feedfwd_gain << 16) + 1638 (int_coeff << 12) + (prop_coeff << 8) + tdc_targetcnt); 1639 1640 compute_ssc(&p, ana_cfg); 1641 compute_bias2(&p); 1642 compute_tdc(&p, tdc_fine); 1643 compute_dco_med(&p); 1644 compute_dco_fine(&p, dco_12g); 1645 1646 pll_type = ((frequency_khz == 10000) || (frequency_khz == 20000) || 1647 (frequency_khz == 2500) || (dco_12g == 1)) ? 0 : 1; 1648 set_phy_vdr_addresses(&p, pll_type); 1649 1650 lt_state->config[0] = 0x84; 1651 lt_state->config[1] = 0x2d; 1652 ADDR_ASSIGN(0, p.pll_reg4); 1653 ADDR_ASSIGN(1, p.pll_reg3); 1654 ADDR_ASSIGN(2, p.pll_reg5); 1655 ADDR_ASSIGN(3, p.pll_reg57); 1656 ADDR_ASSIGN(4, p.lf); 1657 ADDR_ASSIGN(5, p.tdc); 1658 ADDR_ASSIGN(6, p.ssc); 1659 ADDR_ASSIGN(7, p.bias2); 1660 ADDR_ASSIGN(8, p.bias_trim); 1661 ADDR_ASSIGN(9, p.dco_med); 1662 ADDR_ASSIGN(10, p.dco_fine); 1663 ADDR_ASSIGN(11, p.ssc_inj); 1664 ADDR_ASSIGN(12, p.surv_bonus); 1665 DATA_ASSIGN(0, p.pll_reg4); 1666 DATA_ASSIGN(1, p.pll_reg3); 1667 DATA_ASSIGN(2, p.pll_reg5); 1668 DATA_ASSIGN(3, p.pll_reg57); 1669 DATA_ASSIGN(4, p.lf); 1670 DATA_ASSIGN(5, p.tdc); 1671 DATA_ASSIGN(6, p.ssc); 1672 DATA_ASSIGN(7, p.bias2); 1673 DATA_ASSIGN(8, p.bias_trim); 1674 DATA_ASSIGN(9, p.dco_med); 1675 DATA_ASSIGN(10, p.dco_fine); 1676 DATA_ASSIGN(11, p.ssc_inj); 1677 DATA_ASSIGN(12, p.surv_bonus); 1678 1679 return 0; 1680 } 1681 1682 static int 1683 intel_lt_phy_calc_hdmi_port_clock(const struct intel_crtc_state *crtc_state) 1684 { 1685 #define REGVAL(i) ( \ 1686 (lt_state->data[i][3]) | \ 1687 (lt_state->data[i][2] << 8) | \ 1688 (lt_state->data[i][1] << 16) | \ 1689 (lt_state->data[i][0] << 24) \ 1690 ) 1691 1692 struct intel_display *display = to_intel_display(crtc_state); 1693 const struct intel_lt_phy_pll_state *lt_state = 1694 &crtc_state->dpll_hw_state.ltpll; 1695 int clk = 0; 1696 u32 d8, pll_reg_5, pll_reg_3, pll_reg_57, m2div_frac, m2div_int; 1697 u64 temp0, temp1; 1698 /* 1699 * The algorithm uses '+' to combine bitfields when 1700 * constructing PLL_reg3 and PLL_reg57: 1701 * PLL_reg57 = (D7 << 24) + (postdiv << 15) + (D8 << 7) + D6_new; 1702 * PLL_reg3 = (D4 << 21) + (D3 << 18) + (D1 << 15) + (m2div_int << 5); 1703 * 1704 * However, this is likely intended to be a bitwise OR operation, 1705 * as each field occupies distinct, non-overlapping bits in the register. 1706 * 1707 * PLL_reg57 is composed of following fields packed into a 32-bit value: 1708 * - D7: max value 10 -> fits in 4 bits -> placed at bits 24-27 1709 * - postdiv: max value 9 -> fits in 4 bits -> placed at bits 15-18 1710 * - D8: derived from loop_cnt / 2, max 127 -> fits in 7 bits 1711 * (though 8 bits are given to it) -> placed at bits 7-14 1712 * - D6_new: fits in lower 7 bits -> placed at bits 0-6 1713 * PLL_reg57 = (D7 << 24) | (postdiv << 15) | (D8 << 7) | D6_new; 1714 * 1715 * Similarly, PLL_reg3 is packed as: 1716 * - D4: max value 256 -> fits in 9 bits -> placed at bits 21-29 1717 * - D3: max value 9 -> fits in 4 bits -> placed at bits 18-21 1718 * - D1: max value 2 -> fits in 2 bits -> placed at bits 15-16 1719 * - m2div_int: max value 511 -> fits in 9 bits (10 bits allocated) 1720 * -> placed at bits 5-14 1721 * PLL_reg3 = (D4 << 21) | (D3 << 18) | (D1 << 15) | (m2div_int << 5); 1722 */ 1723 pll_reg_5 = REGVAL(2); 1724 pll_reg_3 = REGVAL(1); 1725 pll_reg_57 = REGVAL(3); 1726 m2div_frac = pll_reg_5; 1727 1728 /* 1729 * From forward algorithm we know 1730 * m2div = 2 * m2 1731 * val = y * frequency * 5 1732 * So now, 1733 * frequency = (m2 * 2 * refclk_khz / (d8 * 10)) 1734 * frequency = (m2div * refclk_khz / (d8 * 10)) 1735 */ 1736 d8 = (pll_reg_57 & REG_GENMASK(14, 7)) >> 7; 1737 if (d8 == 0) { 1738 drm_WARN_ON(display->drm, 1739 "Invalid port clock using lowest HDMI portclock\n"); 1740 return xe3plpd_lt_hdmi_252.clock; 1741 } 1742 m2div_int = (pll_reg_3 & REG_GENMASK(14, 5)) >> 5; 1743 temp0 = ((u64)m2div_frac * REF_CLK_KHZ) >> 32; 1744 temp1 = (u64)m2div_int * REF_CLK_KHZ; 1745 1746 clk = div_u64((temp1 + temp0), d8 * 10); 1747 1748 return clk; 1749 } 1750 1751 int 1752 intel_lt_phy_calc_port_clock(struct intel_encoder *encoder, 1753 const struct intel_crtc_state *crtc_state) 1754 { 1755 struct intel_display *display = to_intel_display(encoder); 1756 int clk; 1757 const struct intel_lt_phy_pll_state *lt_state = 1758 &crtc_state->dpll_hw_state.ltpll; 1759 u8 mode, rate; 1760 1761 mode = REG_FIELD_GET8(LT_PHY_VDR_MODE_ENCODING_MASK, 1762 lt_state->config[0]); 1763 /* 1764 * For edp/dp read the clock value from the tables 1765 * and return the clock as the algorithm used for 1766 * calculating the port clock does not exactly matches 1767 * with edp/dp clock. 1768 */ 1769 if (mode == MODE_DP) { 1770 rate = REG_FIELD_GET8(LT_PHY_VDR_RATE_ENCODING_MASK, 1771 lt_state->config[0]); 1772 clk = intel_lt_phy_get_dp_clock(rate); 1773 } else if (mode == MODE_HDMI_20) { 1774 clk = intel_lt_phy_calc_hdmi_port_clock(crtc_state); 1775 } else { 1776 drm_WARN_ON(display->drm, "Unsupported LT PHY Mode!\n"); 1777 clk = xe3plpd_lt_hdmi_252.clock; 1778 } 1779 1780 return clk; 1781 } 1782 1783 int 1784 intel_lt_phy_pll_calc_state(struct intel_crtc_state *crtc_state, 1785 struct intel_encoder *encoder) 1786 { 1787 const struct intel_lt_phy_pll_state * const *tables; 1788 int i; 1789 1790 tables = intel_lt_phy_pll_tables_get(crtc_state, encoder); 1791 if (!tables) 1792 return -EINVAL; 1793 1794 for (i = 0; tables[i]; i++) { 1795 if (crtc_state->port_clock == tables[i]->clock) { 1796 crtc_state->dpll_hw_state.ltpll = *tables[i]; 1797 if (intel_crtc_has_dp_encoder(crtc_state)) { 1798 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) 1799 crtc_state->dpll_hw_state.ltpll.config[2] = 1; 1800 } 1801 crtc_state->dpll_hw_state.ltpll.ssc_enabled = 1802 intel_lt_phy_pll_is_ssc_enabled(crtc_state, encoder); 1803 return 0; 1804 } 1805 } 1806 1807 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { 1808 return intel_lt_phy_calculate_hdmi_state(&crtc_state->dpll_hw_state.ltpll, 1809 crtc_state->port_clock); 1810 } 1811 1812 return -EINVAL; 1813 } 1814 1815 static void 1816 intel_lt_phy_program_pll(struct intel_encoder *encoder, 1817 const struct intel_crtc_state *crtc_state) 1818 { 1819 u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder); 1820 int i, j, k; 1821 1822 intel_lt_phy_write(encoder, owned_lane_mask, LT_PHY_VDR_0_CONFIG, 1823 crtc_state->dpll_hw_state.ltpll.config[0], MB_WRITE_COMMITTED); 1824 intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_1_CONFIG, 1825 crtc_state->dpll_hw_state.ltpll.config[1], MB_WRITE_COMMITTED); 1826 intel_lt_phy_write(encoder, owned_lane_mask, LT_PHY_VDR_2_CONFIG, 1827 crtc_state->dpll_hw_state.ltpll.config[2], MB_WRITE_COMMITTED); 1828 1829 for (i = 0; i <= 12; i++) { 1830 intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_X_ADDR_MSB(i), 1831 crtc_state->dpll_hw_state.ltpll.addr_msb[i], 1832 MB_WRITE_COMMITTED); 1833 intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_X_ADDR_LSB(i), 1834 crtc_state->dpll_hw_state.ltpll.addr_lsb[i], 1835 MB_WRITE_COMMITTED); 1836 1837 for (j = 3, k = 0; j >= 0; j--, k++) 1838 intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, 1839 LT_PHY_VDR_X_DATAY(i, j), 1840 crtc_state->dpll_hw_state.ltpll.data[i][k], 1841 MB_WRITE_COMMITTED); 1842 } 1843 } 1844 1845 static void 1846 intel_lt_phy_enable_disable_tx(struct intel_encoder *encoder, 1847 const struct intel_crtc_state *crtc_state) 1848 { 1849 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 1850 bool lane_reversal = dig_port->lane_reversal; 1851 u8 lane_count = crtc_state->lane_count; 1852 bool is_dp_alt = 1853 intel_tc_port_in_dp_alt_mode(dig_port); 1854 enum intel_tc_pin_assignment tc_pin = 1855 intel_tc_port_get_pin_assignment(dig_port); 1856 u8 transmitter_mask = 0; 1857 1858 /* 1859 * We have a two transmitters per lane and total of 2 PHY lanes so a total 1860 * of 4 transmitters. We prepare a mask of the lanes that need to be activated 1861 * and the transmitter which need to be activated for each lane. TX 0,1 correspond 1862 * to LANE0 and TX 2, 3 correspond to LANE1. 1863 */ 1864 1865 switch (lane_count) { 1866 case 1: 1867 transmitter_mask = lane_reversal ? REG_BIT8(3) : REG_BIT8(0); 1868 if (is_dp_alt) { 1869 if (tc_pin == INTEL_TC_PIN_ASSIGNMENT_D) 1870 transmitter_mask = REG_BIT8(0); 1871 else 1872 transmitter_mask = REG_BIT8(1); 1873 } 1874 break; 1875 case 2: 1876 transmitter_mask = lane_reversal ? REG_GENMASK8(3, 2) : REG_GENMASK8(1, 0); 1877 if (is_dp_alt) 1878 transmitter_mask = REG_GENMASK8(1, 0); 1879 break; 1880 case 3: 1881 transmitter_mask = lane_reversal ? REG_GENMASK8(3, 1) : REG_GENMASK8(2, 0); 1882 if (is_dp_alt) 1883 transmitter_mask = REG_GENMASK8(2, 0); 1884 break; 1885 case 4: 1886 transmitter_mask = REG_GENMASK8(3, 0); 1887 break; 1888 default: 1889 MISSING_CASE(lane_count); 1890 transmitter_mask = REG_GENMASK8(3, 0); 1891 break; 1892 } 1893 1894 if (transmitter_mask & BIT(0)) { 1895 intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(0), 1896 LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(0), 1897 LT_PHY_TX_LANE_ENABLE); 1898 } else { 1899 intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(0), 1900 0, LT_PHY_TXY_CTL10_MAC(0), 0); 1901 } 1902 1903 if (transmitter_mask & BIT(1)) { 1904 intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(1), 1905 LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(1), 1906 LT_PHY_TX_LANE_ENABLE); 1907 } else { 1908 intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(1), 1909 0, LT_PHY_TXY_CTL10_MAC(1), 0); 1910 } 1911 1912 if (transmitter_mask & BIT(2)) { 1913 intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(0), 1914 LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(0), 1915 LT_PHY_TX_LANE_ENABLE); 1916 } else { 1917 intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(0), 1918 0, LT_PHY_TXY_CTL10_MAC(0), 0); 1919 } 1920 1921 if (transmitter_mask & BIT(3)) { 1922 intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(1), 1923 LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(1), 1924 LT_PHY_TX_LANE_ENABLE); 1925 } else { 1926 intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(1), 1927 0, LT_PHY_TXY_CTL10_MAC(1), 0); 1928 } 1929 } 1930 1931 void intel_lt_phy_pll_enable(struct intel_encoder *encoder, 1932 const struct intel_crtc_state *crtc_state) 1933 { 1934 struct intel_display *display = to_intel_display(encoder); 1935 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 1936 bool lane_reversal = dig_port->lane_reversal; 1937 u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder); 1938 enum phy phy = intel_encoder_to_phy(encoder); 1939 enum port port = encoder->port; 1940 struct ref_tracker *wakeref = 0; 1941 u32 lane_phy_pulse_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES 1942 ? (XE3PLPDP_LANE_PHY_PULSE_STATUS(0) | 1943 XE3PLPDP_LANE_PHY_PULSE_STATUS(1)) 1944 : XE3PLPDP_LANE_PHY_PULSE_STATUS(0); 1945 u8 rate_update; 1946 1947 wakeref = intel_lt_phy_transaction_begin(encoder); 1948 1949 /* 1. Enable MacCLK at default 162 MHz frequency. */ 1950 intel_lt_phy_lane_reset(encoder, crtc_state->lane_count); 1951 1952 /* 2. Program PORT_CLOCK_CTL register to configure clock muxes, gating, and SSC. */ 1953 intel_lt_phy_program_port_clock_ctl(encoder, crtc_state, lane_reversal); 1954 1955 /* 3. Change owned PHY lanes power to Ready state. */ 1956 intel_lt_phy_powerdown_change_sequence(encoder, owned_lane_mask, 1957 XELPDP_P2_STATE_READY); 1958 1959 /* 1960 * 4. Read the PHY message bus VDR register PHY_VDR_0_Config check enabled PLL type, 1961 * encoded rate and encoded mode. 1962 */ 1963 if (intel_lt_phy_config_changed(encoder, crtc_state)) { 1964 /* 1965 * 5. Program the PHY internal PLL registers over PHY message bus for the desired 1966 * frequency and protocol type 1967 */ 1968 intel_lt_phy_program_pll(encoder, crtc_state); 1969 1970 /* 6. Use the P2P transaction flow */ 1971 /* 1972 * 6.1. Set the PHY VDR register 0xCC4[Rate Control VDR Update] = 1 over PHY message 1973 * bus for Owned PHY Lanes. 1974 */ 1975 /* 1976 * 6.2. Poll for P2P Transaction Ready = "1" and read the MAC message bus VDR 1977 * register at offset 0xC00 for Owned PHY Lanes*. 1978 */ 1979 /* 6.3. Clear P2P transaction Ready bit. */ 1980 intel_lt_phy_p2p_write(encoder, owned_lane_mask, LT_PHY_RATE_UPDATE, 1981 LT_PHY_RATE_CONTROL_VDR_UPDATE, LT_PHY_MAC_VDR, 1982 LT_PHY_PCLKIN_GATE); 1983 1984 /* 7. Program PORT_CLOCK_CTL[PCLK PLL Request LN0] = 0. */ 1985 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port), 1986 XELPDP_LANE_PCLK_PLL_REQUEST(0), 0); 1987 1988 /* 8. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 0. */ 1989 if (intel_de_wait_for_clear_us(display, XELPDP_PORT_CLOCK_CTL(display, port), 1990 XELPDP_LANE_PCLK_PLL_ACK(0), 1991 XE3PLPD_MACCLK_TURNOFF_LATENCY_US)) 1992 drm_warn(display->drm, "PHY %c PLL MacCLK ack deassertion timeout\n", 1993 phy_name(phy)); 1994 1995 /* 1996 * 9. Follow the Display Voltage Frequency Switching - Sequence Before Frequency 1997 * Change. We handle this step in bxt_set_cdclk(). 1998 */ 1999 /* 10. Program DDI_CLK_VALFREQ to match intended DDI clock frequency. */ 2000 intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), 2001 crtc_state->port_clock); 2002 2003 /* 11. Program PORT_CLOCK_CTL[PCLK PLL Request LN0] = 1. */ 2004 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port), 2005 XELPDP_LANE_PCLK_PLL_REQUEST(0), 2006 XELPDP_LANE_PCLK_PLL_REQUEST(0)); 2007 2008 /* 12. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 1. */ 2009 if (intel_de_wait_for_set_ms(display, XELPDP_PORT_CLOCK_CTL(display, port), 2010 XELPDP_LANE_PCLK_PLL_ACK(0), 2011 XE3PLPD_MACCLK_TURNON_LATENCY_MS)) 2012 drm_warn(display->drm, "PHY %c PLL MacCLK ack assertion timeout\n", 2013 phy_name(phy)); 2014 2015 /* 2016 * 13. Ungate the forward clock by setting 2017 * PORT_CLOCK_CTL[Forward Clock Ungate] = 1. 2018 */ 2019 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port), 2020 XELPDP_FORWARD_CLOCK_UNGATE, 2021 XELPDP_FORWARD_CLOCK_UNGATE); 2022 2023 /* 14. SW clears PORT_BUF_CTL2 [PHY Pulse Status]. */ 2024 intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), 2025 lane_phy_pulse_status, 2026 lane_phy_pulse_status); 2027 /* 2028 * 15. Clear the PHY VDR register 0xCC4[Rate Control VDR Update] over 2029 * PHY message bus for Owned PHY Lanes. 2030 */ 2031 rate_update = intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0, LT_PHY_RATE_UPDATE); 2032 rate_update &= ~LT_PHY_RATE_CONTROL_VDR_UPDATE; 2033 intel_lt_phy_write(encoder, owned_lane_mask, LT_PHY_RATE_UPDATE, 2034 rate_update, MB_WRITE_COMMITTED); 2035 2036 /* 16. Poll for PORT_BUF_CTL2 register PHY Pulse Status = 1 for Owned PHY Lanes. */ 2037 if (intel_de_wait_for_set_ms(display, XELPDP_PORT_BUF_CTL2(display, port), 2038 lane_phy_pulse_status, 2039 XE3PLPD_RATE_CALIB_DONE_LATENCY_MS)) 2040 drm_warn(display->drm, "PHY %c PLL rate not changed\n", 2041 phy_name(phy)); 2042 2043 /* 17. SW clears PORT_BUF_CTL2 [PHY Pulse Status]. */ 2044 intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), 2045 lane_phy_pulse_status, 2046 lane_phy_pulse_status); 2047 } else { 2048 intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), crtc_state->port_clock); 2049 } 2050 2051 /* 2052 * 18. Follow the Display Voltage Frequency Switching - Sequence After Frequency Change. 2053 * We handle this step in bxt_set_cdclk() 2054 */ 2055 /* 19. Move the PHY powerdown state to Active and program to enable/disable transmitters */ 2056 intel_lt_phy_powerdown_change_sequence(encoder, owned_lane_mask, 2057 XELPDP_P0_STATE_ACTIVE); 2058 2059 intel_lt_phy_enable_disable_tx(encoder, crtc_state); 2060 intel_lt_phy_transaction_end(encoder, wakeref); 2061 } 2062 2063 void intel_lt_phy_pll_disable(struct intel_encoder *encoder) 2064 { 2065 struct intel_display *display = to_intel_display(encoder); 2066 enum phy phy = intel_encoder_to_phy(encoder); 2067 enum port port = encoder->port; 2068 struct ref_tracker *wakeref; 2069 u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder); 2070 u32 lane_pipe_reset = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES 2071 ? (XELPDP_LANE_PIPE_RESET(0) | 2072 XELPDP_LANE_PIPE_RESET(1)) 2073 : XELPDP_LANE_PIPE_RESET(0); 2074 u32 lane_phy_current_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES 2075 ? (XELPDP_LANE_PHY_CURRENT_STATUS(0) | 2076 XELPDP_LANE_PHY_CURRENT_STATUS(1)) 2077 : XELPDP_LANE_PHY_CURRENT_STATUS(0); 2078 u32 lane_phy_pulse_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES 2079 ? (XE3PLPDP_LANE_PHY_PULSE_STATUS(0) | 2080 XE3PLPDP_LANE_PHY_PULSE_STATUS(1)) 2081 : XE3PLPDP_LANE_PHY_PULSE_STATUS(0); 2082 2083 wakeref = intel_lt_phy_transaction_begin(encoder); 2084 2085 /* 1. Clear PORT_BUF_CTL2 [PHY Pulse Status]. */ 2086 intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), 2087 lane_phy_pulse_status, 2088 lane_phy_pulse_status); 2089 2090 /* 2. Set PORT_BUF_CTL2<port> Lane<PHY Lanes Owned> Pipe Reset to 1. */ 2091 intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset, 2092 lane_pipe_reset); 2093 2094 /* 3. Poll for PORT_BUF_CTL2<port> Lane<PHY Lanes Owned> PHY Current Status == 1. */ 2095 if (intel_de_wait_for_set_us(display, XELPDP_PORT_BUF_CTL2(display, port), 2096 lane_phy_current_status, 2097 XE3PLPD_RESET_START_LATENCY_US)) 2098 drm_warn(display->drm, "PHY %c failed to reset lane\n", 2099 phy_name(phy)); 2100 2101 /* 4. Clear for PHY pulse status on owned PHY lanes. */ 2102 intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), 2103 lane_phy_pulse_status, 2104 lane_phy_pulse_status); 2105 2106 /* 2107 * 5. Follow the Display Voltage Frequency Switching - 2108 * Sequence Before Frequency Change. We handle this step in bxt_set_cdclk(). 2109 */ 2110 /* 6. Program PORT_CLOCK_CTL[PCLK PLL Request LN0] = 0. */ 2111 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port), 2112 XELPDP_LANE_PCLK_PLL_REQUEST(0), 0); 2113 2114 /* 7. Program DDI_CLK_VALFREQ to 0. */ 2115 intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), 0); 2116 2117 /* 8. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 0. */ 2118 if (intel_de_wait_for_clear_us(display, XELPDP_PORT_CLOCK_CTL(display, port), 2119 XELPDP_LANE_PCLK_PLL_ACK(0), 2120 XE3PLPD_MACCLK_TURNOFF_LATENCY_US)) 2121 drm_warn(display->drm, "PHY %c PLL MacCLK ack deassertion timeout\n", 2122 phy_name(phy)); 2123 2124 /* 2125 * 9. Follow the Display Voltage Frequency Switching - 2126 * Sequence After Frequency Change. We handle this step in bxt_set_cdclk(). 2127 */ 2128 /* 10. Program PORT_CLOCK_CTL register to disable and gate clocks. */ 2129 intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port), 2130 XELPDP_DDI_CLOCK_SELECT_MASK(display) | XELPDP_FORWARD_CLOCK_UNGATE, 0); 2131 2132 /* 11. Program PORT_BUF_CTL5[MacCLK Reset_0] = 1 to assert MacCLK reset. */ 2133 intel_de_rmw(display, XE3PLPD_PORT_BUF_CTL5(port), 2134 XE3PLPD_MACCLK_RESET_0, XE3PLPD_MACCLK_RESET_0); 2135 2136 intel_lt_phy_transaction_end(encoder, wakeref); 2137 } 2138 2139 void intel_lt_phy_set_signal_levels(struct intel_encoder *encoder, 2140 const struct intel_crtc_state *crtc_state) 2141 { 2142 struct intel_display *display = to_intel_display(encoder); 2143 const struct intel_ddi_buf_trans *trans; 2144 u8 owned_lane_mask; 2145 struct ref_tracker *wakeref; 2146 int n_entries, ln; 2147 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 2148 2149 if (intel_tc_port_in_tbt_alt_mode(dig_port)) 2150 return; 2151 2152 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder); 2153 2154 wakeref = intel_lt_phy_transaction_begin(encoder); 2155 2156 trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); 2157 if (drm_WARN_ON_ONCE(display->drm, !trans)) { 2158 intel_lt_phy_transaction_end(encoder, wakeref); 2159 return; 2160 } 2161 2162 for (ln = 0; ln < crtc_state->lane_count; ln++) { 2163 int level = intel_ddi_level(encoder, crtc_state, ln); 2164 int lane = ln / 2; 2165 int tx = ln % 2; 2166 u8 lane_mask = lane == 0 ? INTEL_LT_PHY_LANE0 : INTEL_LT_PHY_LANE1; 2167 2168 if (!(lane_mask & owned_lane_mask)) 2169 continue; 2170 2171 intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL8(tx), 2172 LT_PHY_TX_SWING_LEVEL_MASK | LT_PHY_TX_SWING_MASK, 2173 LT_PHY_TX_SWING_LEVEL(trans->entries[level].lt.txswing_level) | 2174 LT_PHY_TX_SWING(trans->entries[level].lt.txswing), 2175 MB_WRITE_COMMITTED); 2176 2177 intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL2(tx), 2178 LT_PHY_TX_CURSOR_MASK, 2179 LT_PHY_TX_CURSOR(trans->entries[level].lt.pre_cursor), 2180 MB_WRITE_COMMITTED); 2181 intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL3(tx), 2182 LT_PHY_TX_CURSOR_MASK, 2183 LT_PHY_TX_CURSOR(trans->entries[level].lt.main_cursor), 2184 MB_WRITE_COMMITTED); 2185 intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL4(tx), 2186 LT_PHY_TX_CURSOR_MASK, 2187 LT_PHY_TX_CURSOR(trans->entries[level].lt.post_cursor), 2188 MB_WRITE_COMMITTED); 2189 } 2190 2191 intel_lt_phy_transaction_end(encoder, wakeref); 2192 } 2193 2194 void intel_lt_phy_dump_hw_state(struct intel_display *display, 2195 const struct intel_lt_phy_pll_state *hw_state) 2196 { 2197 int i, j; 2198 2199 drm_dbg_kms(display->drm, "lt_phy_pll_hw_state:\n"); 2200 for (i = 0; i < 3; i++) { 2201 drm_dbg_kms(display->drm, "config[%d] = 0x%.4x,\n", 2202 i, hw_state->config[i]); 2203 } 2204 2205 for (i = 0; i <= 12; i++) 2206 for (j = 3; j >= 0; j--) 2207 drm_dbg_kms(display->drm, "vdr_data[%d][%d] = 0x%.4x,\n", 2208 i, j, hw_state->data[i][j]); 2209 } 2210 2211 bool 2212 intel_lt_phy_pll_compare_hw_state(const struct intel_lt_phy_pll_state *a, 2213 const struct intel_lt_phy_pll_state *b) 2214 { 2215 /* 2216 * With LT PHY values other than VDR0_CONFIG and VDR2_CONFIG are 2217 * unreliable. They cannot always be read back since internally 2218 * after power gating values are not restored back to the 2219 * shadow VDR registers. Thus we do not compare the whole state 2220 * just the two VDR registers. 2221 */ 2222 if (a->config[0] == b->config[0] && 2223 a->config[2] == b->config[2]) 2224 return true; 2225 2226 return false; 2227 } 2228 2229 void intel_lt_phy_pll_readout_hw_state(struct intel_encoder *encoder, 2230 const struct intel_crtc_state *crtc_state, 2231 struct intel_lt_phy_pll_state *pll_state) 2232 { 2233 u8 owned_lane_mask; 2234 u8 lane; 2235 struct ref_tracker *wakeref; 2236 int i, j, k; 2237 2238 pll_state->tbt_mode = intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder)); 2239 if (pll_state->tbt_mode) 2240 return; 2241 2242 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder); 2243 lane = owned_lane_mask & INTEL_LT_PHY_LANE0 ? : INTEL_LT_PHY_LANE1; 2244 wakeref = intel_lt_phy_transaction_begin(encoder); 2245 2246 pll_state->config[0] = intel_lt_phy_read(encoder, lane, LT_PHY_VDR_0_CONFIG); 2247 pll_state->config[1] = intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_1_CONFIG); 2248 pll_state->config[2] = intel_lt_phy_read(encoder, lane, LT_PHY_VDR_2_CONFIG); 2249 2250 for (i = 0; i <= 12; i++) { 2251 for (j = 3, k = 0; j >= 0; j--, k++) 2252 pll_state->data[i][k] = 2253 intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0, 2254 LT_PHY_VDR_X_DATAY(i, j)); 2255 } 2256 2257 pll_state->clock = 2258 intel_lt_phy_calc_port_clock(encoder, crtc_state); 2259 intel_lt_phy_transaction_end(encoder, wakeref); 2260 } 2261 2262 void intel_lt_phy_pll_state_verify(struct intel_atomic_state *state, 2263 struct intel_crtc *crtc) 2264 { 2265 struct intel_display *display = to_intel_display(state); 2266 struct intel_digital_port *dig_port; 2267 const struct intel_crtc_state *new_crtc_state = 2268 intel_atomic_get_new_crtc_state(state, crtc); 2269 struct intel_encoder *encoder; 2270 struct intel_lt_phy_pll_state pll_hw_state = {}; 2271 const struct intel_lt_phy_pll_state *pll_sw_state = &new_crtc_state->dpll_hw_state.ltpll; 2272 2273 if (DISPLAY_VER(display) < 35) 2274 return; 2275 2276 if (!new_crtc_state->hw.active) 2277 return; 2278 2279 /* intel_get_crtc_new_encoder() only works for modeset/fastset commits */ 2280 if (!intel_crtc_needs_modeset(new_crtc_state) && 2281 !intel_crtc_needs_fastset(new_crtc_state)) 2282 return; 2283 2284 encoder = intel_get_crtc_new_encoder(state, new_crtc_state); 2285 intel_lt_phy_pll_readout_hw_state(encoder, new_crtc_state, &pll_hw_state); 2286 2287 dig_port = enc_to_dig_port(encoder); 2288 if (intel_tc_port_in_tbt_alt_mode(dig_port)) 2289 return; 2290 2291 INTEL_DISPLAY_STATE_WARN(display, pll_hw_state.config[0] != pll_sw_state->config[0], 2292 "[CRTC:%d:%s] mismatch in LT PHY PLL CONFIG 0: (expected 0x%04x, found 0x%04x)", 2293 crtc->base.base.id, crtc->base.name, 2294 pll_sw_state->config[0], pll_hw_state.config[0]); 2295 INTEL_DISPLAY_STATE_WARN(display, pll_hw_state.config[2] != pll_sw_state->config[2], 2296 "[CRTC:%d:%s] mismatch in LT PHY PLL CONFIG 2: (expected 0x%04x, found 0x%04x)", 2297 crtc->base.base.id, crtc->base.name, 2298 pll_sw_state->config[2], pll_hw_state.config[2]); 2299 } 2300 2301 void intel_xe3plpd_pll_enable(struct intel_encoder *encoder, 2302 const struct intel_crtc_state *crtc_state) 2303 { 2304 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 2305 2306 if (intel_tc_port_in_tbt_alt_mode(dig_port)) 2307 intel_mtl_tbt_pll_enable_clock(encoder, crtc_state->port_clock); 2308 else 2309 intel_lt_phy_pll_enable(encoder, crtc_state); 2310 } 2311 2312 void intel_xe3plpd_pll_disable(struct intel_encoder *encoder) 2313 { 2314 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 2315 2316 if (intel_tc_port_in_tbt_alt_mode(dig_port)) 2317 intel_mtl_tbt_pll_disable_clock(encoder); 2318 else 2319 intel_lt_phy_pll_disable(encoder); 2320 2321 } 2322