1 /* 2 * Copyright © 2006-2017 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <linux/time.h> 25 26 #include "intel_atomic.h" 27 #include "intel_atomic_plane.h" 28 #include "intel_audio.h" 29 #include "intel_bw.h" 30 #include "intel_cdclk.h" 31 #include "intel_crtc.h" 32 #include "intel_de.h" 33 #include "intel_display_types.h" 34 #include "intel_pci_config.h" 35 #include "intel_pcode.h" 36 #include "intel_psr.h" 37 #include "vlv_sideband.h" 38 39 /** 40 * DOC: CDCLK / RAWCLK 41 * 42 * The display engine uses several different clocks to do its work. There 43 * are two main clocks involved that aren't directly related to the actual 44 * pixel clock or any symbol/bit clock of the actual output port. These 45 * are the core display clock (CDCLK) and RAWCLK. 46 * 47 * CDCLK clocks most of the display pipe logic, and thus its frequency 48 * must be high enough to support the rate at which pixels are flowing 49 * through the pipes. Downscaling must also be accounted as that increases 50 * the effective pixel rate. 51 * 52 * On several platforms the CDCLK frequency can be changed dynamically 53 * to minimize power consumption for a given display configuration. 54 * Typically changes to the CDCLK frequency require all the display pipes 55 * to be shut down while the frequency is being changed. 56 * 57 * On SKL+ the DMC will toggle the CDCLK off/on during DC5/6 entry/exit. 58 * DMC will not change the active CDCLK frequency however, so that part 59 * will still be performed by the driver directly. 60 * 61 * RAWCLK is a fixed frequency clock, often used by various auxiliary 62 * blocks such as AUX CH or backlight PWM. Hence the only thing we 63 * really need to know about RAWCLK is its frequency so that various 64 * dividers can be programmed correctly. 65 */ 66 67 struct intel_cdclk_funcs { 68 void (*get_cdclk)(struct drm_i915_private *i915, 69 struct intel_cdclk_config *cdclk_config); 70 void (*set_cdclk)(struct drm_i915_private *i915, 71 const struct intel_cdclk_config *cdclk_config, 72 enum pipe pipe); 73 int (*bw_calc_min_cdclk)(struct intel_atomic_state *state); 74 int (*modeset_calc_cdclk)(struct intel_cdclk_state *state); 75 u8 (*calc_voltage_level)(int cdclk); 76 }; 77 78 void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv, 79 struct intel_cdclk_config *cdclk_config) 80 { 81 dev_priv->cdclk_funcs->get_cdclk(dev_priv, cdclk_config); 82 } 83 84 static int intel_cdclk_bw_calc_min_cdclk(struct intel_atomic_state *state) 85 { 86 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 87 return dev_priv->cdclk_funcs->bw_calc_min_cdclk(state); 88 } 89 90 static void intel_cdclk_set_cdclk(struct drm_i915_private *dev_priv, 91 const struct intel_cdclk_config *cdclk_config, 92 enum pipe pipe) 93 { 94 dev_priv->cdclk_funcs->set_cdclk(dev_priv, cdclk_config, pipe); 95 } 96 97 static int intel_cdclk_modeset_calc_cdclk(struct drm_i915_private *dev_priv, 98 struct intel_cdclk_state *cdclk_config) 99 { 100 return dev_priv->cdclk_funcs->modeset_calc_cdclk(cdclk_config); 101 } 102 103 static u8 intel_cdclk_calc_voltage_level(struct drm_i915_private *dev_priv, 104 int cdclk) 105 { 106 return dev_priv->cdclk_funcs->calc_voltage_level(cdclk); 107 } 108 109 static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv, 110 struct intel_cdclk_config *cdclk_config) 111 { 112 cdclk_config->cdclk = 133333; 113 } 114 115 static void fixed_200mhz_get_cdclk(struct drm_i915_private *dev_priv, 116 struct intel_cdclk_config *cdclk_config) 117 { 118 cdclk_config->cdclk = 200000; 119 } 120 121 static void fixed_266mhz_get_cdclk(struct drm_i915_private *dev_priv, 122 struct intel_cdclk_config *cdclk_config) 123 { 124 cdclk_config->cdclk = 266667; 125 } 126 127 static void fixed_333mhz_get_cdclk(struct drm_i915_private *dev_priv, 128 struct intel_cdclk_config *cdclk_config) 129 { 130 cdclk_config->cdclk = 333333; 131 } 132 133 static void fixed_400mhz_get_cdclk(struct drm_i915_private *dev_priv, 134 struct intel_cdclk_config *cdclk_config) 135 { 136 cdclk_config->cdclk = 400000; 137 } 138 139 static void fixed_450mhz_get_cdclk(struct drm_i915_private *dev_priv, 140 struct intel_cdclk_config *cdclk_config) 141 { 142 cdclk_config->cdclk = 450000; 143 } 144 145 static void i85x_get_cdclk(struct drm_i915_private *dev_priv, 146 struct intel_cdclk_config *cdclk_config) 147 { 148 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 149 u16 hpllcc = 0; 150 151 /* 152 * 852GM/852GMV only supports 133 MHz and the HPLLCC 153 * encoding is different :( 154 * FIXME is this the right way to detect 852GM/852GMV? 155 */ 156 if (pdev->revision == 0x1) { 157 cdclk_config->cdclk = 133333; 158 return; 159 } 160 161 pci_bus_read_config_word(pdev->bus, 162 PCI_DEVFN(0, 3), HPLLCC, &hpllcc); 163 164 /* Assume that the hardware is in the high speed state. This 165 * should be the default. 166 */ 167 switch (hpllcc & GC_CLOCK_CONTROL_MASK) { 168 case GC_CLOCK_133_200: 169 case GC_CLOCK_133_200_2: 170 case GC_CLOCK_100_200: 171 cdclk_config->cdclk = 200000; 172 break; 173 case GC_CLOCK_166_250: 174 cdclk_config->cdclk = 250000; 175 break; 176 case GC_CLOCK_100_133: 177 cdclk_config->cdclk = 133333; 178 break; 179 case GC_CLOCK_133_266: 180 case GC_CLOCK_133_266_2: 181 case GC_CLOCK_166_266: 182 cdclk_config->cdclk = 266667; 183 break; 184 } 185 } 186 187 static void i915gm_get_cdclk(struct drm_i915_private *dev_priv, 188 struct intel_cdclk_config *cdclk_config) 189 { 190 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 191 u16 gcfgc = 0; 192 193 pci_read_config_word(pdev, GCFGC, &gcfgc); 194 195 if (gcfgc & GC_LOW_FREQUENCY_ENABLE) { 196 cdclk_config->cdclk = 133333; 197 return; 198 } 199 200 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 201 case GC_DISPLAY_CLOCK_333_320_MHZ: 202 cdclk_config->cdclk = 333333; 203 break; 204 default: 205 case GC_DISPLAY_CLOCK_190_200_MHZ: 206 cdclk_config->cdclk = 190000; 207 break; 208 } 209 } 210 211 static void i945gm_get_cdclk(struct drm_i915_private *dev_priv, 212 struct intel_cdclk_config *cdclk_config) 213 { 214 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 215 u16 gcfgc = 0; 216 217 pci_read_config_word(pdev, GCFGC, &gcfgc); 218 219 if (gcfgc & GC_LOW_FREQUENCY_ENABLE) { 220 cdclk_config->cdclk = 133333; 221 return; 222 } 223 224 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 225 case GC_DISPLAY_CLOCK_333_320_MHZ: 226 cdclk_config->cdclk = 320000; 227 break; 228 default: 229 case GC_DISPLAY_CLOCK_190_200_MHZ: 230 cdclk_config->cdclk = 200000; 231 break; 232 } 233 } 234 235 static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv) 236 { 237 static const unsigned int blb_vco[8] = { 238 [0] = 3200000, 239 [1] = 4000000, 240 [2] = 5333333, 241 [3] = 4800000, 242 [4] = 6400000, 243 }; 244 static const unsigned int pnv_vco[8] = { 245 [0] = 3200000, 246 [1] = 4000000, 247 [2] = 5333333, 248 [3] = 4800000, 249 [4] = 2666667, 250 }; 251 static const unsigned int cl_vco[8] = { 252 [0] = 3200000, 253 [1] = 4000000, 254 [2] = 5333333, 255 [3] = 6400000, 256 [4] = 3333333, 257 [5] = 3566667, 258 [6] = 4266667, 259 }; 260 static const unsigned int elk_vco[8] = { 261 [0] = 3200000, 262 [1] = 4000000, 263 [2] = 5333333, 264 [3] = 4800000, 265 }; 266 static const unsigned int ctg_vco[8] = { 267 [0] = 3200000, 268 [1] = 4000000, 269 [2] = 5333333, 270 [3] = 6400000, 271 [4] = 2666667, 272 [5] = 4266667, 273 }; 274 const unsigned int *vco_table; 275 unsigned int vco; 276 u8 tmp = 0; 277 278 /* FIXME other chipsets? */ 279 if (IS_GM45(dev_priv)) 280 vco_table = ctg_vco; 281 else if (IS_G45(dev_priv)) 282 vco_table = elk_vco; 283 else if (IS_I965GM(dev_priv)) 284 vco_table = cl_vco; 285 else if (IS_PINEVIEW(dev_priv)) 286 vco_table = pnv_vco; 287 else if (IS_G33(dev_priv)) 288 vco_table = blb_vco; 289 else 290 return 0; 291 292 tmp = intel_de_read(dev_priv, 293 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO); 294 295 vco = vco_table[tmp & 0x7]; 296 if (vco == 0) 297 drm_err(&dev_priv->drm, "Bad HPLL VCO (HPLLVCO=0x%02x)\n", 298 tmp); 299 else 300 drm_dbg_kms(&dev_priv->drm, "HPLL VCO %u kHz\n", vco); 301 302 return vco; 303 } 304 305 static void g33_get_cdclk(struct drm_i915_private *dev_priv, 306 struct intel_cdclk_config *cdclk_config) 307 { 308 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 309 static const u8 div_3200[] = { 12, 10, 8, 7, 5, 16 }; 310 static const u8 div_4000[] = { 14, 12, 10, 8, 6, 20 }; 311 static const u8 div_4800[] = { 20, 14, 12, 10, 8, 24 }; 312 static const u8 div_5333[] = { 20, 16, 12, 12, 8, 28 }; 313 const u8 *div_table; 314 unsigned int cdclk_sel; 315 u16 tmp = 0; 316 317 cdclk_config->vco = intel_hpll_vco(dev_priv); 318 319 pci_read_config_word(pdev, GCFGC, &tmp); 320 321 cdclk_sel = (tmp >> 4) & 0x7; 322 323 if (cdclk_sel >= ARRAY_SIZE(div_3200)) 324 goto fail; 325 326 switch (cdclk_config->vco) { 327 case 3200000: 328 div_table = div_3200; 329 break; 330 case 4000000: 331 div_table = div_4000; 332 break; 333 case 4800000: 334 div_table = div_4800; 335 break; 336 case 5333333: 337 div_table = div_5333; 338 break; 339 default: 340 goto fail; 341 } 342 343 cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, 344 div_table[cdclk_sel]); 345 return; 346 347 fail: 348 drm_err(&dev_priv->drm, 349 "Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", 350 cdclk_config->vco, tmp); 351 cdclk_config->cdclk = 190476; 352 } 353 354 static void pnv_get_cdclk(struct drm_i915_private *dev_priv, 355 struct intel_cdclk_config *cdclk_config) 356 { 357 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 358 u16 gcfgc = 0; 359 360 pci_read_config_word(pdev, GCFGC, &gcfgc); 361 362 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 363 case GC_DISPLAY_CLOCK_267_MHZ_PNV: 364 cdclk_config->cdclk = 266667; 365 break; 366 case GC_DISPLAY_CLOCK_333_MHZ_PNV: 367 cdclk_config->cdclk = 333333; 368 break; 369 case GC_DISPLAY_CLOCK_444_MHZ_PNV: 370 cdclk_config->cdclk = 444444; 371 break; 372 case GC_DISPLAY_CLOCK_200_MHZ_PNV: 373 cdclk_config->cdclk = 200000; 374 break; 375 default: 376 drm_err(&dev_priv->drm, 377 "Unknown pnv display core clock 0x%04x\n", gcfgc); 378 fallthrough; 379 case GC_DISPLAY_CLOCK_133_MHZ_PNV: 380 cdclk_config->cdclk = 133333; 381 break; 382 case GC_DISPLAY_CLOCK_167_MHZ_PNV: 383 cdclk_config->cdclk = 166667; 384 break; 385 } 386 } 387 388 static void i965gm_get_cdclk(struct drm_i915_private *dev_priv, 389 struct intel_cdclk_config *cdclk_config) 390 { 391 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 392 static const u8 div_3200[] = { 16, 10, 8 }; 393 static const u8 div_4000[] = { 20, 12, 10 }; 394 static const u8 div_5333[] = { 24, 16, 14 }; 395 const u8 *div_table; 396 unsigned int cdclk_sel; 397 u16 tmp = 0; 398 399 cdclk_config->vco = intel_hpll_vco(dev_priv); 400 401 pci_read_config_word(pdev, GCFGC, &tmp); 402 403 cdclk_sel = ((tmp >> 8) & 0x1f) - 1; 404 405 if (cdclk_sel >= ARRAY_SIZE(div_3200)) 406 goto fail; 407 408 switch (cdclk_config->vco) { 409 case 3200000: 410 div_table = div_3200; 411 break; 412 case 4000000: 413 div_table = div_4000; 414 break; 415 case 5333333: 416 div_table = div_5333; 417 break; 418 default: 419 goto fail; 420 } 421 422 cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, 423 div_table[cdclk_sel]); 424 return; 425 426 fail: 427 drm_err(&dev_priv->drm, 428 "Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", 429 cdclk_config->vco, tmp); 430 cdclk_config->cdclk = 200000; 431 } 432 433 static void gm45_get_cdclk(struct drm_i915_private *dev_priv, 434 struct intel_cdclk_config *cdclk_config) 435 { 436 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 437 unsigned int cdclk_sel; 438 u16 tmp = 0; 439 440 cdclk_config->vco = intel_hpll_vco(dev_priv); 441 442 pci_read_config_word(pdev, GCFGC, &tmp); 443 444 cdclk_sel = (tmp >> 12) & 0x1; 445 446 switch (cdclk_config->vco) { 447 case 2666667: 448 case 4000000: 449 case 5333333: 450 cdclk_config->cdclk = cdclk_sel ? 333333 : 222222; 451 break; 452 case 3200000: 453 cdclk_config->cdclk = cdclk_sel ? 320000 : 228571; 454 break; 455 default: 456 drm_err(&dev_priv->drm, 457 "Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", 458 cdclk_config->vco, tmp); 459 cdclk_config->cdclk = 222222; 460 break; 461 } 462 } 463 464 static void hsw_get_cdclk(struct drm_i915_private *dev_priv, 465 struct intel_cdclk_config *cdclk_config) 466 { 467 u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL); 468 u32 freq = lcpll & LCPLL_CLK_FREQ_MASK; 469 470 if (lcpll & LCPLL_CD_SOURCE_FCLK) 471 cdclk_config->cdclk = 800000; 472 else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT) 473 cdclk_config->cdclk = 450000; 474 else if (freq == LCPLL_CLK_FREQ_450) 475 cdclk_config->cdclk = 450000; 476 else if (IS_HSW_ULT(dev_priv)) 477 cdclk_config->cdclk = 337500; 478 else 479 cdclk_config->cdclk = 540000; 480 } 481 482 static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk) 483 { 484 int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 485 333333 : 320000; 486 487 /* 488 * We seem to get an unstable or solid color picture at 200MHz. 489 * Not sure what's wrong. For now use 200MHz only when all pipes 490 * are off. 491 */ 492 if (IS_VALLEYVIEW(dev_priv) && min_cdclk > freq_320) 493 return 400000; 494 else if (min_cdclk > 266667) 495 return freq_320; 496 else if (min_cdclk > 0) 497 return 266667; 498 else 499 return 200000; 500 } 501 502 static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk) 503 { 504 if (IS_VALLEYVIEW(dev_priv)) { 505 if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ 506 return 2; 507 else if (cdclk >= 266667) 508 return 1; 509 else 510 return 0; 511 } else { 512 /* 513 * Specs are full of misinformation, but testing on actual 514 * hardware has shown that we just need to write the desired 515 * CCK divider into the Punit register. 516 */ 517 return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; 518 } 519 } 520 521 static void vlv_get_cdclk(struct drm_i915_private *dev_priv, 522 struct intel_cdclk_config *cdclk_config) 523 { 524 u32 val; 525 526 vlv_iosf_sb_get(dev_priv, 527 BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT)); 528 529 cdclk_config->vco = vlv_get_hpll_vco(dev_priv); 530 cdclk_config->cdclk = vlv_get_cck_clock(dev_priv, "cdclk", 531 CCK_DISPLAY_CLOCK_CONTROL, 532 cdclk_config->vco); 533 534 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 535 536 vlv_iosf_sb_put(dev_priv, 537 BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT)); 538 539 if (IS_VALLEYVIEW(dev_priv)) 540 cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK) >> 541 DSPFREQGUAR_SHIFT; 542 else 543 cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK_CHV) >> 544 DSPFREQGUAR_SHIFT_CHV; 545 } 546 547 static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) 548 { 549 unsigned int credits, default_credits; 550 551 if (IS_CHERRYVIEW(dev_priv)) 552 default_credits = PFI_CREDIT(12); 553 else 554 default_credits = PFI_CREDIT(8); 555 556 if (dev_priv->cdclk.hw.cdclk >= dev_priv->czclk_freq) { 557 /* CHV suggested value is 31 or 63 */ 558 if (IS_CHERRYVIEW(dev_priv)) 559 credits = PFI_CREDIT_63; 560 else 561 credits = PFI_CREDIT(15); 562 } else { 563 credits = default_credits; 564 } 565 566 /* 567 * WA - write default credits before re-programming 568 * FIXME: should we also set the resend bit here? 569 */ 570 intel_de_write(dev_priv, GCI_CONTROL, 571 VGA_FAST_MODE_DISABLE | default_credits); 572 573 intel_de_write(dev_priv, GCI_CONTROL, 574 VGA_FAST_MODE_DISABLE | credits | PFI_CREDIT_RESEND); 575 576 /* 577 * FIXME is this guaranteed to clear 578 * immediately or should we poll for it? 579 */ 580 drm_WARN_ON(&dev_priv->drm, 581 intel_de_read(dev_priv, GCI_CONTROL) & PFI_CREDIT_RESEND); 582 } 583 584 static void vlv_set_cdclk(struct drm_i915_private *dev_priv, 585 const struct intel_cdclk_config *cdclk_config, 586 enum pipe pipe) 587 { 588 int cdclk = cdclk_config->cdclk; 589 u32 val, cmd = cdclk_config->voltage_level; 590 intel_wakeref_t wakeref; 591 592 switch (cdclk) { 593 case 400000: 594 case 333333: 595 case 320000: 596 case 266667: 597 case 200000: 598 break; 599 default: 600 MISSING_CASE(cdclk); 601 return; 602 } 603 604 /* There are cases where we can end up here with power domains 605 * off and a CDCLK frequency other than the minimum, like when 606 * issuing a modeset without actually changing any display after 607 * a system suspend. So grab the display core domain, which covers 608 * the HW blocks needed for the following programming. 609 */ 610 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE); 611 612 vlv_iosf_sb_get(dev_priv, 613 BIT(VLV_IOSF_SB_CCK) | 614 BIT(VLV_IOSF_SB_BUNIT) | 615 BIT(VLV_IOSF_SB_PUNIT)); 616 617 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 618 val &= ~DSPFREQGUAR_MASK; 619 val |= (cmd << DSPFREQGUAR_SHIFT); 620 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); 621 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & 622 DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), 623 50)) { 624 drm_err(&dev_priv->drm, 625 "timed out waiting for CDclk change\n"); 626 } 627 628 if (cdclk == 400000) { 629 u32 divider; 630 631 divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, 632 cdclk) - 1; 633 634 /* adjust cdclk divider */ 635 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 636 val &= ~CCK_FREQUENCY_VALUES; 637 val |= divider; 638 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); 639 640 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) & 641 CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT), 642 50)) 643 drm_err(&dev_priv->drm, 644 "timed out waiting for CDclk change\n"); 645 } 646 647 /* adjust self-refresh exit latency value */ 648 val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC); 649 val &= ~0x7f; 650 651 /* 652 * For high bandwidth configs, we set a higher latency in the bunit 653 * so that the core display fetch happens in time to avoid underruns. 654 */ 655 if (cdclk == 400000) 656 val |= 4500 / 250; /* 4.5 usec */ 657 else 658 val |= 3000 / 250; /* 3.0 usec */ 659 vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val); 660 661 vlv_iosf_sb_put(dev_priv, 662 BIT(VLV_IOSF_SB_CCK) | 663 BIT(VLV_IOSF_SB_BUNIT) | 664 BIT(VLV_IOSF_SB_PUNIT)); 665 666 intel_update_cdclk(dev_priv); 667 668 vlv_program_pfi_credits(dev_priv); 669 670 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); 671 } 672 673 static void chv_set_cdclk(struct drm_i915_private *dev_priv, 674 const struct intel_cdclk_config *cdclk_config, 675 enum pipe pipe) 676 { 677 int cdclk = cdclk_config->cdclk; 678 u32 val, cmd = cdclk_config->voltage_level; 679 intel_wakeref_t wakeref; 680 681 switch (cdclk) { 682 case 333333: 683 case 320000: 684 case 266667: 685 case 200000: 686 break; 687 default: 688 MISSING_CASE(cdclk); 689 return; 690 } 691 692 /* There are cases where we can end up here with power domains 693 * off and a CDCLK frequency other than the minimum, like when 694 * issuing a modeset without actually changing any display after 695 * a system suspend. So grab the display core domain, which covers 696 * the HW blocks needed for the following programming. 697 */ 698 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE); 699 700 vlv_punit_get(dev_priv); 701 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 702 val &= ~DSPFREQGUAR_MASK_CHV; 703 val |= (cmd << DSPFREQGUAR_SHIFT_CHV); 704 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); 705 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & 706 DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV), 707 50)) { 708 drm_err(&dev_priv->drm, 709 "timed out waiting for CDclk change\n"); 710 } 711 712 vlv_punit_put(dev_priv); 713 714 intel_update_cdclk(dev_priv); 715 716 vlv_program_pfi_credits(dev_priv); 717 718 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); 719 } 720 721 static int bdw_calc_cdclk(int min_cdclk) 722 { 723 if (min_cdclk > 540000) 724 return 675000; 725 else if (min_cdclk > 450000) 726 return 540000; 727 else if (min_cdclk > 337500) 728 return 450000; 729 else 730 return 337500; 731 } 732 733 static u8 bdw_calc_voltage_level(int cdclk) 734 { 735 switch (cdclk) { 736 default: 737 case 337500: 738 return 2; 739 case 450000: 740 return 0; 741 case 540000: 742 return 1; 743 case 675000: 744 return 3; 745 } 746 } 747 748 static void bdw_get_cdclk(struct drm_i915_private *dev_priv, 749 struct intel_cdclk_config *cdclk_config) 750 { 751 u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL); 752 u32 freq = lcpll & LCPLL_CLK_FREQ_MASK; 753 754 if (lcpll & LCPLL_CD_SOURCE_FCLK) 755 cdclk_config->cdclk = 800000; 756 else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT) 757 cdclk_config->cdclk = 450000; 758 else if (freq == LCPLL_CLK_FREQ_450) 759 cdclk_config->cdclk = 450000; 760 else if (freq == LCPLL_CLK_FREQ_54O_BDW) 761 cdclk_config->cdclk = 540000; 762 else if (freq == LCPLL_CLK_FREQ_337_5_BDW) 763 cdclk_config->cdclk = 337500; 764 else 765 cdclk_config->cdclk = 675000; 766 767 /* 768 * Can't read this out :( Let's assume it's 769 * at least what the CDCLK frequency requires. 770 */ 771 cdclk_config->voltage_level = 772 bdw_calc_voltage_level(cdclk_config->cdclk); 773 } 774 775 static u32 bdw_cdclk_freq_sel(int cdclk) 776 { 777 switch (cdclk) { 778 default: 779 MISSING_CASE(cdclk); 780 fallthrough; 781 case 337500: 782 return LCPLL_CLK_FREQ_337_5_BDW; 783 case 450000: 784 return LCPLL_CLK_FREQ_450; 785 case 540000: 786 return LCPLL_CLK_FREQ_54O_BDW; 787 case 675000: 788 return LCPLL_CLK_FREQ_675_BDW; 789 } 790 } 791 792 static void bdw_set_cdclk(struct drm_i915_private *dev_priv, 793 const struct intel_cdclk_config *cdclk_config, 794 enum pipe pipe) 795 { 796 int cdclk = cdclk_config->cdclk; 797 int ret; 798 799 if (drm_WARN(&dev_priv->drm, 800 (intel_de_read(dev_priv, LCPLL_CTL) & 801 (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK | 802 LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE | 803 LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW | 804 LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK, 805 "trying to change cdclk frequency with cdclk not enabled\n")) 806 return; 807 808 ret = snb_pcode_write(dev_priv, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); 809 if (ret) { 810 drm_err(&dev_priv->drm, 811 "failed to inform pcode about cdclk change\n"); 812 return; 813 } 814 815 intel_de_rmw(dev_priv, LCPLL_CTL, 816 0, LCPLL_CD_SOURCE_FCLK); 817 818 /* 819 * According to the spec, it should be enough to poll for this 1 us. 820 * However, extensive testing shows that this can take longer. 821 */ 822 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) & 823 LCPLL_CD_SOURCE_FCLK_DONE, 100)) 824 drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); 825 826 intel_de_rmw(dev_priv, LCPLL_CTL, 827 LCPLL_CLK_FREQ_MASK, bdw_cdclk_freq_sel(cdclk)); 828 829 intel_de_rmw(dev_priv, LCPLL_CTL, 830 LCPLL_CD_SOURCE_FCLK, 0); 831 832 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & 833 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 834 drm_err(&dev_priv->drm, "Switching back to LCPLL failed\n"); 835 836 snb_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 837 cdclk_config->voltage_level); 838 839 intel_de_write(dev_priv, CDCLK_FREQ, 840 DIV_ROUND_CLOSEST(cdclk, 1000) - 1); 841 842 intel_update_cdclk(dev_priv); 843 } 844 845 static int skl_calc_cdclk(int min_cdclk, int vco) 846 { 847 if (vco == 8640000) { 848 if (min_cdclk > 540000) 849 return 617143; 850 else if (min_cdclk > 432000) 851 return 540000; 852 else if (min_cdclk > 308571) 853 return 432000; 854 else 855 return 308571; 856 } else { 857 if (min_cdclk > 540000) 858 return 675000; 859 else if (min_cdclk > 450000) 860 return 540000; 861 else if (min_cdclk > 337500) 862 return 450000; 863 else 864 return 337500; 865 } 866 } 867 868 static u8 skl_calc_voltage_level(int cdclk) 869 { 870 if (cdclk > 540000) 871 return 3; 872 else if (cdclk > 450000) 873 return 2; 874 else if (cdclk > 337500) 875 return 1; 876 else 877 return 0; 878 } 879 880 static void skl_dpll0_update(struct drm_i915_private *dev_priv, 881 struct intel_cdclk_config *cdclk_config) 882 { 883 u32 val; 884 885 cdclk_config->ref = 24000; 886 cdclk_config->vco = 0; 887 888 val = intel_de_read(dev_priv, LCPLL1_CTL); 889 if ((val & LCPLL_PLL_ENABLE) == 0) 890 return; 891 892 if (drm_WARN_ON(&dev_priv->drm, (val & LCPLL_PLL_LOCK) == 0)) 893 return; 894 895 val = intel_de_read(dev_priv, DPLL_CTRL1); 896 897 if (drm_WARN_ON(&dev_priv->drm, 898 (val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | 899 DPLL_CTRL1_SSC(SKL_DPLL0) | 900 DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) != 901 DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) 902 return; 903 904 switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) { 905 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0): 906 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0): 907 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0): 908 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0): 909 cdclk_config->vco = 8100000; 910 break; 911 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0): 912 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0): 913 cdclk_config->vco = 8640000; 914 break; 915 default: 916 MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); 917 break; 918 } 919 } 920 921 static void skl_get_cdclk(struct drm_i915_private *dev_priv, 922 struct intel_cdclk_config *cdclk_config) 923 { 924 u32 cdctl; 925 926 skl_dpll0_update(dev_priv, cdclk_config); 927 928 cdclk_config->cdclk = cdclk_config->bypass = cdclk_config->ref; 929 930 if (cdclk_config->vco == 0) 931 goto out; 932 933 cdctl = intel_de_read(dev_priv, CDCLK_CTL); 934 935 if (cdclk_config->vco == 8640000) { 936 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 937 case CDCLK_FREQ_450_432: 938 cdclk_config->cdclk = 432000; 939 break; 940 case CDCLK_FREQ_337_308: 941 cdclk_config->cdclk = 308571; 942 break; 943 case CDCLK_FREQ_540: 944 cdclk_config->cdclk = 540000; 945 break; 946 case CDCLK_FREQ_675_617: 947 cdclk_config->cdclk = 617143; 948 break; 949 default: 950 MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK); 951 break; 952 } 953 } else { 954 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 955 case CDCLK_FREQ_450_432: 956 cdclk_config->cdclk = 450000; 957 break; 958 case CDCLK_FREQ_337_308: 959 cdclk_config->cdclk = 337500; 960 break; 961 case CDCLK_FREQ_540: 962 cdclk_config->cdclk = 540000; 963 break; 964 case CDCLK_FREQ_675_617: 965 cdclk_config->cdclk = 675000; 966 break; 967 default: 968 MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK); 969 break; 970 } 971 } 972 973 out: 974 /* 975 * Can't read this out :( Let's assume it's 976 * at least what the CDCLK frequency requires. 977 */ 978 cdclk_config->voltage_level = 979 skl_calc_voltage_level(cdclk_config->cdclk); 980 } 981 982 /* convert from kHz to .1 fixpoint MHz with -1MHz offset */ 983 static int skl_cdclk_decimal(int cdclk) 984 { 985 return DIV_ROUND_CLOSEST(cdclk - 1000, 500); 986 } 987 988 static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, 989 int vco) 990 { 991 bool changed = dev_priv->skl_preferred_vco_freq != vco; 992 993 dev_priv->skl_preferred_vco_freq = vco; 994 995 if (changed) 996 intel_update_max_cdclk(dev_priv); 997 } 998 999 static u32 skl_dpll0_link_rate(struct drm_i915_private *dev_priv, int vco) 1000 { 1001 drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000); 1002 1003 /* 1004 * We always enable DPLL0 with the lowest link rate possible, but still 1005 * taking into account the VCO required to operate the eDP panel at the 1006 * desired frequency. The usual DP link rates operate with a VCO of 1007 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640. 1008 * The modeset code is responsible for the selection of the exact link 1009 * rate later on, with the constraint of choosing a frequency that 1010 * works with vco. 1011 */ 1012 if (vco == 8640000) 1013 return DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0); 1014 else 1015 return DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0); 1016 } 1017 1018 static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) 1019 { 1020 intel_de_rmw(dev_priv, DPLL_CTRL1, 1021 DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | 1022 DPLL_CTRL1_SSC(SKL_DPLL0) | 1023 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0), 1024 DPLL_CTRL1_OVERRIDE(SKL_DPLL0) | 1025 skl_dpll0_link_rate(dev_priv, vco)); 1026 intel_de_posting_read(dev_priv, DPLL_CTRL1); 1027 1028 intel_de_rmw(dev_priv, LCPLL1_CTL, 1029 0, LCPLL_PLL_ENABLE); 1030 1031 if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5)) 1032 drm_err(&dev_priv->drm, "DPLL0 not locked\n"); 1033 1034 dev_priv->cdclk.hw.vco = vco; 1035 1036 /* We'll want to keep using the current vco from now on. */ 1037 skl_set_preferred_cdclk_vco(dev_priv, vco); 1038 } 1039 1040 static void skl_dpll0_disable(struct drm_i915_private *dev_priv) 1041 { 1042 intel_de_rmw(dev_priv, LCPLL1_CTL, 1043 LCPLL_PLL_ENABLE, 0); 1044 1045 if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1)) 1046 drm_err(&dev_priv->drm, "Couldn't disable DPLL0\n"); 1047 1048 dev_priv->cdclk.hw.vco = 0; 1049 } 1050 1051 static u32 skl_cdclk_freq_sel(struct drm_i915_private *dev_priv, 1052 int cdclk, int vco) 1053 { 1054 switch (cdclk) { 1055 default: 1056 drm_WARN_ON(&dev_priv->drm, 1057 cdclk != dev_priv->cdclk.hw.bypass); 1058 drm_WARN_ON(&dev_priv->drm, vco != 0); 1059 fallthrough; 1060 case 308571: 1061 case 337500: 1062 return CDCLK_FREQ_337_308; 1063 case 450000: 1064 case 432000: 1065 return CDCLK_FREQ_450_432; 1066 case 540000: 1067 return CDCLK_FREQ_540; 1068 case 617143: 1069 case 675000: 1070 return CDCLK_FREQ_675_617; 1071 } 1072 } 1073 1074 static void skl_set_cdclk(struct drm_i915_private *dev_priv, 1075 const struct intel_cdclk_config *cdclk_config, 1076 enum pipe pipe) 1077 { 1078 int cdclk = cdclk_config->cdclk; 1079 int vco = cdclk_config->vco; 1080 u32 freq_select, cdclk_ctl; 1081 int ret; 1082 1083 /* 1084 * Based on WA#1183 CDCLK rates 308 and 617MHz CDCLK rates are 1085 * unsupported on SKL. In theory this should never happen since only 1086 * the eDP1.4 2.16 and 4.32Gbps rates require it, but eDP1.4 is not 1087 * supported on SKL either, see the above WA. WARN whenever trying to 1088 * use the corresponding VCO freq as that always leads to using the 1089 * minimum 308MHz CDCLK. 1090 */ 1091 drm_WARN_ON_ONCE(&dev_priv->drm, 1092 IS_SKYLAKE(dev_priv) && vco == 8640000); 1093 1094 ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, 1095 SKL_CDCLK_PREPARE_FOR_CHANGE, 1096 SKL_CDCLK_READY_FOR_CHANGE, 1097 SKL_CDCLK_READY_FOR_CHANGE, 3); 1098 if (ret) { 1099 drm_err(&dev_priv->drm, 1100 "Failed to inform PCU about cdclk change (%d)\n", ret); 1101 return; 1102 } 1103 1104 freq_select = skl_cdclk_freq_sel(dev_priv, cdclk, vco); 1105 1106 if (dev_priv->cdclk.hw.vco != 0 && 1107 dev_priv->cdclk.hw.vco != vco) 1108 skl_dpll0_disable(dev_priv); 1109 1110 cdclk_ctl = intel_de_read(dev_priv, CDCLK_CTL); 1111 1112 if (dev_priv->cdclk.hw.vco != vco) { 1113 /* Wa Display #1183: skl,kbl,cfl */ 1114 cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK); 1115 cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk); 1116 intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); 1117 } 1118 1119 /* Wa Display #1183: skl,kbl,cfl */ 1120 cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE; 1121 intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); 1122 intel_de_posting_read(dev_priv, CDCLK_CTL); 1123 1124 if (dev_priv->cdclk.hw.vco != vco) 1125 skl_dpll0_enable(dev_priv, vco); 1126 1127 /* Wa Display #1183: skl,kbl,cfl */ 1128 cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK); 1129 intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); 1130 1131 cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk); 1132 intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); 1133 1134 /* Wa Display #1183: skl,kbl,cfl */ 1135 cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE; 1136 intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); 1137 intel_de_posting_read(dev_priv, CDCLK_CTL); 1138 1139 /* inform PCU of the change */ 1140 snb_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, 1141 cdclk_config->voltage_level); 1142 1143 intel_update_cdclk(dev_priv); 1144 } 1145 1146 static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) 1147 { 1148 u32 cdctl, expected; 1149 1150 /* 1151 * check if the pre-os initialized the display 1152 * There is SWF18 scratchpad register defined which is set by the 1153 * pre-os which can be used by the OS drivers to check the status 1154 */ 1155 if ((intel_de_read(dev_priv, SWF_ILK(0x18)) & 0x00FFFFFF) == 0) 1156 goto sanitize; 1157 1158 intel_update_cdclk(dev_priv); 1159 intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK"); 1160 1161 /* Is PLL enabled and locked ? */ 1162 if (dev_priv->cdclk.hw.vco == 0 || 1163 dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass) 1164 goto sanitize; 1165 1166 /* DPLL okay; verify the cdclock 1167 * 1168 * Noticed in some instances that the freq selection is correct but 1169 * decimal part is programmed wrong from BIOS where pre-os does not 1170 * enable display. Verify the same as well. 1171 */ 1172 cdctl = intel_de_read(dev_priv, CDCLK_CTL); 1173 expected = (cdctl & CDCLK_FREQ_SEL_MASK) | 1174 skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk); 1175 if (cdctl == expected) 1176 /* All well; nothing to sanitize */ 1177 return; 1178 1179 sanitize: 1180 drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n"); 1181 1182 /* force cdclk programming */ 1183 dev_priv->cdclk.hw.cdclk = 0; 1184 /* force full PLL disable + enable */ 1185 dev_priv->cdclk.hw.vco = -1; 1186 } 1187 1188 static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv) 1189 { 1190 struct intel_cdclk_config cdclk_config; 1191 1192 skl_sanitize_cdclk(dev_priv); 1193 1194 if (dev_priv->cdclk.hw.cdclk != 0 && 1195 dev_priv->cdclk.hw.vco != 0) { 1196 /* 1197 * Use the current vco as our initial 1198 * guess as to what the preferred vco is. 1199 */ 1200 if (dev_priv->skl_preferred_vco_freq == 0) 1201 skl_set_preferred_cdclk_vco(dev_priv, 1202 dev_priv->cdclk.hw.vco); 1203 return; 1204 } 1205 1206 cdclk_config = dev_priv->cdclk.hw; 1207 1208 cdclk_config.vco = dev_priv->skl_preferred_vco_freq; 1209 if (cdclk_config.vco == 0) 1210 cdclk_config.vco = 8100000; 1211 cdclk_config.cdclk = skl_calc_cdclk(0, cdclk_config.vco); 1212 cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk); 1213 1214 skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); 1215 } 1216 1217 static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv) 1218 { 1219 struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw; 1220 1221 cdclk_config.cdclk = cdclk_config.bypass; 1222 cdclk_config.vco = 0; 1223 cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk); 1224 1225 skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); 1226 } 1227 1228 static bool has_cdclk_squasher(struct drm_i915_private *i915) 1229 { 1230 return IS_DG2(i915); 1231 } 1232 1233 struct intel_cdclk_vals { 1234 u32 cdclk; 1235 u16 refclk; 1236 u16 waveform; 1237 u8 divider; /* CD2X divider * 2 */ 1238 u8 ratio; 1239 }; 1240 1241 static const struct intel_cdclk_vals bxt_cdclk_table[] = { 1242 { .refclk = 19200, .cdclk = 144000, .divider = 8, .ratio = 60 }, 1243 { .refclk = 19200, .cdclk = 288000, .divider = 4, .ratio = 60 }, 1244 { .refclk = 19200, .cdclk = 384000, .divider = 3, .ratio = 60 }, 1245 { .refclk = 19200, .cdclk = 576000, .divider = 2, .ratio = 60 }, 1246 { .refclk = 19200, .cdclk = 624000, .divider = 2, .ratio = 65 }, 1247 {} 1248 }; 1249 1250 static const struct intel_cdclk_vals glk_cdclk_table[] = { 1251 { .refclk = 19200, .cdclk = 79200, .divider = 8, .ratio = 33 }, 1252 { .refclk = 19200, .cdclk = 158400, .divider = 4, .ratio = 33 }, 1253 { .refclk = 19200, .cdclk = 316800, .divider = 2, .ratio = 33 }, 1254 {} 1255 }; 1256 1257 static const struct intel_cdclk_vals icl_cdclk_table[] = { 1258 { .refclk = 19200, .cdclk = 172800, .divider = 2, .ratio = 18 }, 1259 { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 }, 1260 { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 }, 1261 { .refclk = 19200, .cdclk = 326400, .divider = 4, .ratio = 68 }, 1262 { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 }, 1263 { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 }, 1264 1265 { .refclk = 24000, .cdclk = 180000, .divider = 2, .ratio = 15 }, 1266 { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 }, 1267 { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 }, 1268 { .refclk = 24000, .cdclk = 324000, .divider = 4, .ratio = 54 }, 1269 { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 }, 1270 { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 }, 1271 1272 { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 9 }, 1273 { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 }, 1274 { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 }, 1275 { .refclk = 38400, .cdclk = 326400, .divider = 4, .ratio = 34 }, 1276 { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 }, 1277 { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 }, 1278 {} 1279 }; 1280 1281 static const struct intel_cdclk_vals rkl_cdclk_table[] = { 1282 { .refclk = 19200, .cdclk = 172800, .divider = 4, .ratio = 36 }, 1283 { .refclk = 19200, .cdclk = 192000, .divider = 4, .ratio = 40 }, 1284 { .refclk = 19200, .cdclk = 307200, .divider = 4, .ratio = 64 }, 1285 { .refclk = 19200, .cdclk = 326400, .divider = 8, .ratio = 136 }, 1286 { .refclk = 19200, .cdclk = 556800, .divider = 4, .ratio = 116 }, 1287 { .refclk = 19200, .cdclk = 652800, .divider = 4, .ratio = 136 }, 1288 1289 { .refclk = 24000, .cdclk = 180000, .divider = 4, .ratio = 30 }, 1290 { .refclk = 24000, .cdclk = 192000, .divider = 4, .ratio = 32 }, 1291 { .refclk = 24000, .cdclk = 312000, .divider = 4, .ratio = 52 }, 1292 { .refclk = 24000, .cdclk = 324000, .divider = 8, .ratio = 108 }, 1293 { .refclk = 24000, .cdclk = 552000, .divider = 4, .ratio = 92 }, 1294 { .refclk = 24000, .cdclk = 648000, .divider = 4, .ratio = 108 }, 1295 1296 { .refclk = 38400, .cdclk = 172800, .divider = 4, .ratio = 18 }, 1297 { .refclk = 38400, .cdclk = 192000, .divider = 4, .ratio = 20 }, 1298 { .refclk = 38400, .cdclk = 307200, .divider = 4, .ratio = 32 }, 1299 { .refclk = 38400, .cdclk = 326400, .divider = 8, .ratio = 68 }, 1300 { .refclk = 38400, .cdclk = 556800, .divider = 4, .ratio = 58 }, 1301 { .refclk = 38400, .cdclk = 652800, .divider = 4, .ratio = 68 }, 1302 {} 1303 }; 1304 1305 static const struct intel_cdclk_vals adlp_a_step_cdclk_table[] = { 1306 { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 }, 1307 { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 }, 1308 { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 }, 1309 1310 { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 }, 1311 { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 }, 1312 { .refclk = 24400, .cdclk = 648000, .divider = 2, .ratio = 54 }, 1313 1314 { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 }, 1315 { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 }, 1316 { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 }, 1317 {} 1318 }; 1319 1320 static const struct intel_cdclk_vals adlp_cdclk_table[] = { 1321 { .refclk = 19200, .cdclk = 172800, .divider = 3, .ratio = 27 }, 1322 { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 }, 1323 { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 }, 1324 { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 }, 1325 { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 }, 1326 1327 { .refclk = 24000, .cdclk = 176000, .divider = 3, .ratio = 22 }, 1328 { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 }, 1329 { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 }, 1330 { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 }, 1331 { .refclk = 24400, .cdclk = 648000, .divider = 2, .ratio = 54 }, 1332 1333 { .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 }, 1334 { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 }, 1335 { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 }, 1336 { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 }, 1337 { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 }, 1338 {} 1339 }; 1340 1341 static const struct intel_cdclk_vals dg2_cdclk_table[] = { 1342 { .refclk = 38400, .cdclk = 163200, .divider = 2, .ratio = 34, .waveform = 0x8888 }, 1343 { .refclk = 38400, .cdclk = 204000, .divider = 2, .ratio = 34, .waveform = 0x9248 }, 1344 { .refclk = 38400, .cdclk = 244800, .divider = 2, .ratio = 34, .waveform = 0xa4a4 }, 1345 { .refclk = 38400, .cdclk = 285600, .divider = 2, .ratio = 34, .waveform = 0xa54a }, 1346 { .refclk = 38400, .cdclk = 326400, .divider = 2, .ratio = 34, .waveform = 0xaaaa }, 1347 { .refclk = 38400, .cdclk = 367200, .divider = 2, .ratio = 34, .waveform = 0xad5a }, 1348 { .refclk = 38400, .cdclk = 408000, .divider = 2, .ratio = 34, .waveform = 0xb6b6 }, 1349 { .refclk = 38400, .cdclk = 448800, .divider = 2, .ratio = 34, .waveform = 0xdbb6 }, 1350 { .refclk = 38400, .cdclk = 489600, .divider = 2, .ratio = 34, .waveform = 0xeeee }, 1351 { .refclk = 38400, .cdclk = 530400, .divider = 2, .ratio = 34, .waveform = 0xf7de }, 1352 { .refclk = 38400, .cdclk = 571200, .divider = 2, .ratio = 34, .waveform = 0xfefe }, 1353 { .refclk = 38400, .cdclk = 612000, .divider = 2, .ratio = 34, .waveform = 0xfffe }, 1354 { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34, .waveform = 0xffff }, 1355 {} 1356 }; 1357 1358 static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk) 1359 { 1360 const struct intel_cdclk_vals *table = dev_priv->cdclk.table; 1361 int i; 1362 1363 for (i = 0; table[i].refclk; i++) 1364 if (table[i].refclk == dev_priv->cdclk.hw.ref && 1365 table[i].cdclk >= min_cdclk) 1366 return table[i].cdclk; 1367 1368 drm_WARN(&dev_priv->drm, 1, 1369 "Cannot satisfy minimum cdclk %d with refclk %u\n", 1370 min_cdclk, dev_priv->cdclk.hw.ref); 1371 return 0; 1372 } 1373 1374 static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) 1375 { 1376 const struct intel_cdclk_vals *table = dev_priv->cdclk.table; 1377 int i; 1378 1379 if (cdclk == dev_priv->cdclk.hw.bypass) 1380 return 0; 1381 1382 for (i = 0; table[i].refclk; i++) 1383 if (table[i].refclk == dev_priv->cdclk.hw.ref && 1384 table[i].cdclk == cdclk) 1385 return dev_priv->cdclk.hw.ref * table[i].ratio; 1386 1387 drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n", 1388 cdclk, dev_priv->cdclk.hw.ref); 1389 return 0; 1390 } 1391 1392 static u8 bxt_calc_voltage_level(int cdclk) 1393 { 1394 return DIV_ROUND_UP(cdclk, 25000); 1395 } 1396 1397 static u8 icl_calc_voltage_level(int cdclk) 1398 { 1399 if (cdclk > 556800) 1400 return 2; 1401 else if (cdclk > 312000) 1402 return 1; 1403 else 1404 return 0; 1405 } 1406 1407 static u8 ehl_calc_voltage_level(int cdclk) 1408 { 1409 if (cdclk > 326400) 1410 return 3; 1411 else if (cdclk > 312000) 1412 return 2; 1413 else if (cdclk > 180000) 1414 return 1; 1415 else 1416 return 0; 1417 } 1418 1419 static u8 tgl_calc_voltage_level(int cdclk) 1420 { 1421 if (cdclk > 556800) 1422 return 3; 1423 else if (cdclk > 326400) 1424 return 2; 1425 else if (cdclk > 312000) 1426 return 1; 1427 else 1428 return 0; 1429 } 1430 1431 static void icl_readout_refclk(struct drm_i915_private *dev_priv, 1432 struct intel_cdclk_config *cdclk_config) 1433 { 1434 u32 dssm = intel_de_read(dev_priv, SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK; 1435 1436 switch (dssm) { 1437 default: 1438 MISSING_CASE(dssm); 1439 fallthrough; 1440 case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz: 1441 cdclk_config->ref = 24000; 1442 break; 1443 case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz: 1444 cdclk_config->ref = 19200; 1445 break; 1446 case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz: 1447 cdclk_config->ref = 38400; 1448 break; 1449 } 1450 } 1451 1452 static void bxt_de_pll_readout(struct drm_i915_private *dev_priv, 1453 struct intel_cdclk_config *cdclk_config) 1454 { 1455 u32 val, ratio; 1456 1457 if (IS_DG2(dev_priv)) 1458 cdclk_config->ref = 38400; 1459 else if (DISPLAY_VER(dev_priv) >= 11) 1460 icl_readout_refclk(dev_priv, cdclk_config); 1461 else 1462 cdclk_config->ref = 19200; 1463 1464 val = intel_de_read(dev_priv, BXT_DE_PLL_ENABLE); 1465 if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 || 1466 (val & BXT_DE_PLL_LOCK) == 0) { 1467 /* 1468 * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but 1469 * setting it to zero is a way to signal that. 1470 */ 1471 cdclk_config->vco = 0; 1472 return; 1473 } 1474 1475 /* 1476 * DISPLAY_VER >= 11 have the ratio directly in the PLL enable register, 1477 * gen9lp had it in a separate PLL control register. 1478 */ 1479 if (DISPLAY_VER(dev_priv) >= 11) 1480 ratio = val & ICL_CDCLK_PLL_RATIO_MASK; 1481 else 1482 ratio = intel_de_read(dev_priv, BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; 1483 1484 cdclk_config->vco = ratio * cdclk_config->ref; 1485 } 1486 1487 static void bxt_get_cdclk(struct drm_i915_private *dev_priv, 1488 struct intel_cdclk_config *cdclk_config) 1489 { 1490 u32 squash_ctl = 0; 1491 u32 divider; 1492 int div; 1493 1494 bxt_de_pll_readout(dev_priv, cdclk_config); 1495 1496 if (DISPLAY_VER(dev_priv) >= 12) 1497 cdclk_config->bypass = cdclk_config->ref / 2; 1498 else if (DISPLAY_VER(dev_priv) >= 11) 1499 cdclk_config->bypass = 50000; 1500 else 1501 cdclk_config->bypass = cdclk_config->ref; 1502 1503 if (cdclk_config->vco == 0) { 1504 cdclk_config->cdclk = cdclk_config->bypass; 1505 goto out; 1506 } 1507 1508 divider = intel_de_read(dev_priv, CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK; 1509 1510 switch (divider) { 1511 case BXT_CDCLK_CD2X_DIV_SEL_1: 1512 div = 2; 1513 break; 1514 case BXT_CDCLK_CD2X_DIV_SEL_1_5: 1515 div = 3; 1516 break; 1517 case BXT_CDCLK_CD2X_DIV_SEL_2: 1518 div = 4; 1519 break; 1520 case BXT_CDCLK_CD2X_DIV_SEL_4: 1521 div = 8; 1522 break; 1523 default: 1524 MISSING_CASE(divider); 1525 return; 1526 } 1527 1528 if (has_cdclk_squasher(dev_priv)) 1529 squash_ctl = intel_de_read(dev_priv, CDCLK_SQUASH_CTL); 1530 1531 if (squash_ctl & CDCLK_SQUASH_ENABLE) { 1532 u16 waveform; 1533 int size; 1534 1535 size = REG_FIELD_GET(CDCLK_SQUASH_WINDOW_SIZE_MASK, squash_ctl) + 1; 1536 waveform = REG_FIELD_GET(CDCLK_SQUASH_WAVEFORM_MASK, squash_ctl) >> (16 - size); 1537 1538 cdclk_config->cdclk = DIV_ROUND_CLOSEST(hweight16(waveform) * 1539 cdclk_config->vco, size * div); 1540 } else { 1541 cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, div); 1542 } 1543 1544 out: 1545 /* 1546 * Can't read this out :( Let's assume it's 1547 * at least what the CDCLK frequency requires. 1548 */ 1549 cdclk_config->voltage_level = 1550 intel_cdclk_calc_voltage_level(dev_priv, cdclk_config->cdclk); 1551 } 1552 1553 static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) 1554 { 1555 intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, 0); 1556 1557 /* Timeout 200us */ 1558 if (intel_de_wait_for_clear(dev_priv, 1559 BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) 1560 drm_err(&dev_priv->drm, "timeout waiting for DE PLL unlock\n"); 1561 1562 dev_priv->cdclk.hw.vco = 0; 1563 } 1564 1565 static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) 1566 { 1567 int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref); 1568 1569 intel_de_rmw(dev_priv, BXT_DE_PLL_CTL, 1570 BXT_DE_PLL_RATIO_MASK, BXT_DE_PLL_RATIO(ratio)); 1571 1572 intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); 1573 1574 /* Timeout 200us */ 1575 if (intel_de_wait_for_set(dev_priv, 1576 BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) 1577 drm_err(&dev_priv->drm, "timeout waiting for DE PLL lock\n"); 1578 1579 dev_priv->cdclk.hw.vco = vco; 1580 } 1581 1582 static void icl_cdclk_pll_disable(struct drm_i915_private *dev_priv) 1583 { 1584 intel_de_rmw(dev_priv, BXT_DE_PLL_ENABLE, 1585 BXT_DE_PLL_PLL_ENABLE, 0); 1586 1587 /* Timeout 200us */ 1588 if (intel_de_wait_for_clear(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) 1589 drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL unlock\n"); 1590 1591 dev_priv->cdclk.hw.vco = 0; 1592 } 1593 1594 static void icl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco) 1595 { 1596 int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref); 1597 u32 val; 1598 1599 val = ICL_CDCLK_PLL_RATIO(ratio); 1600 intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); 1601 1602 val |= BXT_DE_PLL_PLL_ENABLE; 1603 intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); 1604 1605 /* Timeout 200us */ 1606 if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) 1607 drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL lock\n"); 1608 1609 dev_priv->cdclk.hw.vco = vco; 1610 } 1611 1612 static void adlp_cdclk_pll_crawl(struct drm_i915_private *dev_priv, int vco) 1613 { 1614 int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref); 1615 u32 val; 1616 1617 /* Write PLL ratio without disabling */ 1618 val = ICL_CDCLK_PLL_RATIO(ratio) | BXT_DE_PLL_PLL_ENABLE; 1619 intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); 1620 1621 /* Submit freq change request */ 1622 val |= BXT_DE_PLL_FREQ_REQ; 1623 intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); 1624 1625 /* Timeout 200us */ 1626 if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE, 1627 BXT_DE_PLL_LOCK | BXT_DE_PLL_FREQ_REQ_ACK, 1)) 1628 drm_err(&dev_priv->drm, "timeout waiting for FREQ change request ack\n"); 1629 1630 val &= ~BXT_DE_PLL_FREQ_REQ; 1631 intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); 1632 1633 dev_priv->cdclk.hw.vco = vco; 1634 } 1635 1636 static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 1637 { 1638 if (DISPLAY_VER(dev_priv) >= 12) { 1639 if (pipe == INVALID_PIPE) 1640 return TGL_CDCLK_CD2X_PIPE_NONE; 1641 else 1642 return TGL_CDCLK_CD2X_PIPE(pipe); 1643 } else if (DISPLAY_VER(dev_priv) >= 11) { 1644 if (pipe == INVALID_PIPE) 1645 return ICL_CDCLK_CD2X_PIPE_NONE; 1646 else 1647 return ICL_CDCLK_CD2X_PIPE(pipe); 1648 } else { 1649 if (pipe == INVALID_PIPE) 1650 return BXT_CDCLK_CD2X_PIPE_NONE; 1651 else 1652 return BXT_CDCLK_CD2X_PIPE(pipe); 1653 } 1654 } 1655 1656 static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv, 1657 int cdclk, int vco) 1658 { 1659 /* cdclk = vco / 2 / div{1,1.5,2,4} */ 1660 switch (DIV_ROUND_CLOSEST(vco, cdclk)) { 1661 default: 1662 drm_WARN_ON(&dev_priv->drm, 1663 cdclk != dev_priv->cdclk.hw.bypass); 1664 drm_WARN_ON(&dev_priv->drm, vco != 0); 1665 fallthrough; 1666 case 2: 1667 return BXT_CDCLK_CD2X_DIV_SEL_1; 1668 case 3: 1669 return BXT_CDCLK_CD2X_DIV_SEL_1_5; 1670 case 4: 1671 return BXT_CDCLK_CD2X_DIV_SEL_2; 1672 case 8: 1673 return BXT_CDCLK_CD2X_DIV_SEL_4; 1674 } 1675 } 1676 1677 static u32 cdclk_squash_waveform(struct drm_i915_private *dev_priv, 1678 int cdclk) 1679 { 1680 const struct intel_cdclk_vals *table = dev_priv->cdclk.table; 1681 int i; 1682 1683 if (cdclk == dev_priv->cdclk.hw.bypass) 1684 return 0; 1685 1686 for (i = 0; table[i].refclk; i++) 1687 if (table[i].refclk == dev_priv->cdclk.hw.ref && 1688 table[i].cdclk == cdclk) 1689 return table[i].waveform; 1690 1691 drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n", 1692 cdclk, dev_priv->cdclk.hw.ref); 1693 1694 return 0xffff; 1695 } 1696 1697 static void bxt_set_cdclk(struct drm_i915_private *dev_priv, 1698 const struct intel_cdclk_config *cdclk_config, 1699 enum pipe pipe) 1700 { 1701 int cdclk = cdclk_config->cdclk; 1702 int vco = cdclk_config->vco; 1703 u32 val; 1704 u16 waveform; 1705 int clock; 1706 int ret; 1707 1708 /* Inform power controller of upcoming frequency change. */ 1709 if (DISPLAY_VER(dev_priv) >= 11) 1710 ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, 1711 SKL_CDCLK_PREPARE_FOR_CHANGE, 1712 SKL_CDCLK_READY_FOR_CHANGE, 1713 SKL_CDCLK_READY_FOR_CHANGE, 3); 1714 else 1715 /* 1716 * BSpec requires us to wait up to 150usec, but that leads to 1717 * timeouts; the 2ms used here is based on experiment. 1718 */ 1719 ret = snb_pcode_write_timeout(dev_priv, 1720 HSW_PCODE_DE_WRITE_FREQ_REQ, 1721 0x80000000, 150, 2); 1722 if (ret) { 1723 drm_err(&dev_priv->drm, 1724 "Failed to inform PCU about cdclk change (err %d, freq %d)\n", 1725 ret, cdclk); 1726 return; 1727 } 1728 1729 if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->cdclk.hw.vco > 0 && vco > 0) { 1730 if (dev_priv->cdclk.hw.vco != vco) 1731 adlp_cdclk_pll_crawl(dev_priv, vco); 1732 } else if (DISPLAY_VER(dev_priv) >= 11) { 1733 if (dev_priv->cdclk.hw.vco != 0 && 1734 dev_priv->cdclk.hw.vco != vco) 1735 icl_cdclk_pll_disable(dev_priv); 1736 1737 if (dev_priv->cdclk.hw.vco != vco) 1738 icl_cdclk_pll_enable(dev_priv, vco); 1739 } else { 1740 if (dev_priv->cdclk.hw.vco != 0 && 1741 dev_priv->cdclk.hw.vco != vco) 1742 bxt_de_pll_disable(dev_priv); 1743 1744 if (dev_priv->cdclk.hw.vco != vco) 1745 bxt_de_pll_enable(dev_priv, vco); 1746 } 1747 1748 waveform = cdclk_squash_waveform(dev_priv, cdclk); 1749 1750 if (waveform) 1751 clock = vco / 2; 1752 else 1753 clock = cdclk; 1754 1755 if (has_cdclk_squasher(dev_priv)) { 1756 u32 squash_ctl = 0; 1757 1758 if (waveform) 1759 squash_ctl = CDCLK_SQUASH_ENABLE | 1760 CDCLK_SQUASH_WINDOW_SIZE(0xf) | waveform; 1761 1762 intel_de_write(dev_priv, CDCLK_SQUASH_CTL, squash_ctl); 1763 } 1764 1765 val = bxt_cdclk_cd2x_div_sel(dev_priv, clock, vco) | 1766 bxt_cdclk_cd2x_pipe(dev_priv, pipe) | 1767 skl_cdclk_decimal(cdclk); 1768 1769 /* 1770 * Disable SSA Precharge when CD clock frequency < 500 MHz, 1771 * enable otherwise. 1772 */ 1773 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 1774 cdclk >= 500000) 1775 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; 1776 intel_de_write(dev_priv, CDCLK_CTL, val); 1777 1778 if (pipe != INVALID_PIPE) 1779 intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe)); 1780 1781 if (DISPLAY_VER(dev_priv) >= 11) { 1782 ret = snb_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, 1783 cdclk_config->voltage_level); 1784 } else { 1785 /* 1786 * The timeout isn't specified, the 2ms used here is based on 1787 * experiment. 1788 * FIXME: Waiting for the request completion could be delayed 1789 * until the next PCODE request based on BSpec. 1790 */ 1791 ret = snb_pcode_write_timeout(dev_priv, 1792 HSW_PCODE_DE_WRITE_FREQ_REQ, 1793 cdclk_config->voltage_level, 1794 150, 2); 1795 } 1796 1797 if (ret) { 1798 drm_err(&dev_priv->drm, 1799 "PCode CDCLK freq set failed, (err %d, freq %d)\n", 1800 ret, cdclk); 1801 return; 1802 } 1803 1804 intel_update_cdclk(dev_priv); 1805 1806 if (DISPLAY_VER(dev_priv) >= 11) 1807 /* 1808 * Can't read out the voltage level :( 1809 * Let's just assume everything is as expected. 1810 */ 1811 dev_priv->cdclk.hw.voltage_level = cdclk_config->voltage_level; 1812 } 1813 1814 static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) 1815 { 1816 u32 cdctl, expected; 1817 int cdclk, clock, vco; 1818 1819 intel_update_cdclk(dev_priv); 1820 intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK"); 1821 1822 if (dev_priv->cdclk.hw.vco == 0 || 1823 dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass) 1824 goto sanitize; 1825 1826 /* DPLL okay; verify the cdclock 1827 * 1828 * Some BIOS versions leave an incorrect decimal frequency value and 1829 * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4, 1830 * so sanitize this register. 1831 */ 1832 cdctl = intel_de_read(dev_priv, CDCLK_CTL); 1833 /* 1834 * Let's ignore the pipe field, since BIOS could have configured the 1835 * dividers both synching to an active pipe, or asynchronously 1836 * (PIPE_NONE). 1837 */ 1838 cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE); 1839 1840 /* Make sure this is a legal cdclk value for the platform */ 1841 cdclk = bxt_calc_cdclk(dev_priv, dev_priv->cdclk.hw.cdclk); 1842 if (cdclk != dev_priv->cdclk.hw.cdclk) 1843 goto sanitize; 1844 1845 /* Make sure the VCO is correct for the cdclk */ 1846 vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); 1847 if (vco != dev_priv->cdclk.hw.vco) 1848 goto sanitize; 1849 1850 expected = skl_cdclk_decimal(cdclk); 1851 1852 /* Figure out what CD2X divider we should be using for this cdclk */ 1853 if (has_cdclk_squasher(dev_priv)) 1854 clock = dev_priv->cdclk.hw.vco / 2; 1855 else 1856 clock = dev_priv->cdclk.hw.cdclk; 1857 1858 expected |= bxt_cdclk_cd2x_div_sel(dev_priv, clock, 1859 dev_priv->cdclk.hw.vco); 1860 1861 /* 1862 * Disable SSA Precharge when CD clock frequency < 500 MHz, 1863 * enable otherwise. 1864 */ 1865 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 1866 dev_priv->cdclk.hw.cdclk >= 500000) 1867 expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; 1868 1869 if (cdctl == expected) 1870 /* All well; nothing to sanitize */ 1871 return; 1872 1873 sanitize: 1874 drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n"); 1875 1876 /* force cdclk programming */ 1877 dev_priv->cdclk.hw.cdclk = 0; 1878 1879 /* force full PLL disable + enable */ 1880 dev_priv->cdclk.hw.vco = -1; 1881 } 1882 1883 static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv) 1884 { 1885 struct intel_cdclk_config cdclk_config; 1886 1887 bxt_sanitize_cdclk(dev_priv); 1888 1889 if (dev_priv->cdclk.hw.cdclk != 0 && 1890 dev_priv->cdclk.hw.vco != 0) 1891 return; 1892 1893 cdclk_config = dev_priv->cdclk.hw; 1894 1895 /* 1896 * FIXME: 1897 * - The initial CDCLK needs to be read from VBT. 1898 * Need to make this change after VBT has changes for BXT. 1899 */ 1900 cdclk_config.cdclk = bxt_calc_cdclk(dev_priv, 0); 1901 cdclk_config.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_config.cdclk); 1902 cdclk_config.voltage_level = 1903 intel_cdclk_calc_voltage_level(dev_priv, cdclk_config.cdclk); 1904 1905 bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); 1906 } 1907 1908 static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv) 1909 { 1910 struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw; 1911 1912 cdclk_config.cdclk = cdclk_config.bypass; 1913 cdclk_config.vco = 0; 1914 cdclk_config.voltage_level = 1915 intel_cdclk_calc_voltage_level(dev_priv, cdclk_config.cdclk); 1916 1917 bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); 1918 } 1919 1920 /** 1921 * intel_cdclk_init_hw - Initialize CDCLK hardware 1922 * @i915: i915 device 1923 * 1924 * Initialize CDCLK. This consists mainly of initializing dev_priv->cdclk.hw and 1925 * sanitizing the state of the hardware if needed. This is generally done only 1926 * during the display core initialization sequence, after which the DMC will 1927 * take care of turning CDCLK off/on as needed. 1928 */ 1929 void intel_cdclk_init_hw(struct drm_i915_private *i915) 1930 { 1931 if (DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915)) 1932 bxt_cdclk_init_hw(i915); 1933 else if (DISPLAY_VER(i915) == 9) 1934 skl_cdclk_init_hw(i915); 1935 } 1936 1937 /** 1938 * intel_cdclk_uninit_hw - Uninitialize CDCLK hardware 1939 * @i915: i915 device 1940 * 1941 * Uninitialize CDCLK. This is done only during the display core 1942 * uninitialization sequence. 1943 */ 1944 void intel_cdclk_uninit_hw(struct drm_i915_private *i915) 1945 { 1946 if (DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915)) 1947 bxt_cdclk_uninit_hw(i915); 1948 else if (DISPLAY_VER(i915) == 9) 1949 skl_cdclk_uninit_hw(i915); 1950 } 1951 1952 static bool intel_cdclk_can_crawl(struct drm_i915_private *dev_priv, 1953 const struct intel_cdclk_config *a, 1954 const struct intel_cdclk_config *b) 1955 { 1956 int a_div, b_div; 1957 1958 if (!HAS_CDCLK_CRAWL(dev_priv)) 1959 return false; 1960 1961 /* 1962 * The vco and cd2x divider will change independently 1963 * from each, so we disallow cd2x change when crawling. 1964 */ 1965 a_div = DIV_ROUND_CLOSEST(a->vco, a->cdclk); 1966 b_div = DIV_ROUND_CLOSEST(b->vco, b->cdclk); 1967 1968 return a->vco != 0 && b->vco != 0 && 1969 a->vco != b->vco && 1970 a_div == b_div && 1971 a->ref == b->ref; 1972 } 1973 1974 static bool intel_cdclk_can_squash(struct drm_i915_private *dev_priv, 1975 const struct intel_cdclk_config *a, 1976 const struct intel_cdclk_config *b) 1977 { 1978 /* 1979 * FIXME should store a bit more state in intel_cdclk_config 1980 * to differentiate squasher vs. cd2x divider properly. For 1981 * the moment all platforms with squasher use a fixed cd2x 1982 * divider. 1983 */ 1984 if (!has_cdclk_squasher(dev_priv)) 1985 return false; 1986 1987 return a->cdclk != b->cdclk && 1988 a->vco != 0 && 1989 a->vco == b->vco && 1990 a->ref == b->ref; 1991 } 1992 1993 /** 1994 * intel_cdclk_needs_modeset - Determine if changong between the CDCLK 1995 * configurations requires a modeset on all pipes 1996 * @a: first CDCLK configuration 1997 * @b: second CDCLK configuration 1998 * 1999 * Returns: 2000 * True if changing between the two CDCLK configurations 2001 * requires all pipes to be off, false if not. 2002 */ 2003 bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a, 2004 const struct intel_cdclk_config *b) 2005 { 2006 return a->cdclk != b->cdclk || 2007 a->vco != b->vco || 2008 a->ref != b->ref; 2009 } 2010 2011 /** 2012 * intel_cdclk_can_cd2x_update - Determine if changing between the two CDCLK 2013 * configurations requires only a cd2x divider update 2014 * @dev_priv: i915 device 2015 * @a: first CDCLK configuration 2016 * @b: second CDCLK configuration 2017 * 2018 * Returns: 2019 * True if changing between the two CDCLK configurations 2020 * can be done with just a cd2x divider update, false if not. 2021 */ 2022 static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv, 2023 const struct intel_cdclk_config *a, 2024 const struct intel_cdclk_config *b) 2025 { 2026 /* Older hw doesn't have the capability */ 2027 if (DISPLAY_VER(dev_priv) < 10 && !IS_BROXTON(dev_priv)) 2028 return false; 2029 2030 /* 2031 * FIXME should store a bit more state in intel_cdclk_config 2032 * to differentiate squasher vs. cd2x divider properly. For 2033 * the moment all platforms with squasher use a fixed cd2x 2034 * divider. 2035 */ 2036 if (has_cdclk_squasher(dev_priv)) 2037 return false; 2038 2039 return a->cdclk != b->cdclk && 2040 a->vco != 0 && 2041 a->vco == b->vco && 2042 a->ref == b->ref; 2043 } 2044 2045 /** 2046 * intel_cdclk_changed - Determine if two CDCLK configurations are different 2047 * @a: first CDCLK configuration 2048 * @b: second CDCLK configuration 2049 * 2050 * Returns: 2051 * True if the CDCLK configurations don't match, false if they do. 2052 */ 2053 static bool intel_cdclk_changed(const struct intel_cdclk_config *a, 2054 const struct intel_cdclk_config *b) 2055 { 2056 return intel_cdclk_needs_modeset(a, b) || 2057 a->voltage_level != b->voltage_level; 2058 } 2059 2060 void intel_cdclk_dump_config(struct drm_i915_private *i915, 2061 const struct intel_cdclk_config *cdclk_config, 2062 const char *context) 2063 { 2064 drm_dbg_kms(&i915->drm, "%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n", 2065 context, cdclk_config->cdclk, cdclk_config->vco, 2066 cdclk_config->ref, cdclk_config->bypass, 2067 cdclk_config->voltage_level); 2068 } 2069 2070 /** 2071 * intel_set_cdclk - Push the CDCLK configuration to the hardware 2072 * @dev_priv: i915 device 2073 * @cdclk_config: new CDCLK configuration 2074 * @pipe: pipe with which to synchronize the update 2075 * 2076 * Program the hardware based on the passed in CDCLK state, 2077 * if necessary. 2078 */ 2079 static void intel_set_cdclk(struct drm_i915_private *dev_priv, 2080 const struct intel_cdclk_config *cdclk_config, 2081 enum pipe pipe) 2082 { 2083 struct intel_encoder *encoder; 2084 2085 if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config)) 2086 return; 2087 2088 if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->cdclk_funcs->set_cdclk)) 2089 return; 2090 2091 intel_cdclk_dump_config(dev_priv, cdclk_config, "Changing CDCLK to"); 2092 2093 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 2094 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2095 2096 intel_psr_pause(intel_dp); 2097 } 2098 2099 intel_audio_cdclk_change_pre(dev_priv); 2100 2101 /* 2102 * Lock aux/gmbus while we change cdclk in case those 2103 * functions use cdclk. Not all platforms/ports do, 2104 * but we'll lock them all for simplicity. 2105 */ 2106 mutex_lock(&dev_priv->gmbus_mutex); 2107 for_each_intel_dp(&dev_priv->drm, encoder) { 2108 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2109 2110 mutex_lock_nest_lock(&intel_dp->aux.hw_mutex, 2111 &dev_priv->gmbus_mutex); 2112 } 2113 2114 intel_cdclk_set_cdclk(dev_priv, cdclk_config, pipe); 2115 2116 for_each_intel_dp(&dev_priv->drm, encoder) { 2117 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2118 2119 mutex_unlock(&intel_dp->aux.hw_mutex); 2120 } 2121 mutex_unlock(&dev_priv->gmbus_mutex); 2122 2123 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 2124 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2125 2126 intel_psr_resume(intel_dp); 2127 } 2128 2129 intel_audio_cdclk_change_post(dev_priv); 2130 2131 if (drm_WARN(&dev_priv->drm, 2132 intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config), 2133 "cdclk state doesn't match!\n")) { 2134 intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "[hw state]"); 2135 intel_cdclk_dump_config(dev_priv, cdclk_config, "[sw state]"); 2136 } 2137 } 2138 2139 /** 2140 * intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware 2141 * @state: intel atomic state 2142 * 2143 * Program the hardware before updating the HW plane state based on the 2144 * new CDCLK state, if necessary. 2145 */ 2146 void 2147 intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state) 2148 { 2149 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 2150 const struct intel_cdclk_state *old_cdclk_state = 2151 intel_atomic_get_old_cdclk_state(state); 2152 const struct intel_cdclk_state *new_cdclk_state = 2153 intel_atomic_get_new_cdclk_state(state); 2154 enum pipe pipe = new_cdclk_state->pipe; 2155 2156 if (!intel_cdclk_changed(&old_cdclk_state->actual, 2157 &new_cdclk_state->actual)) 2158 return; 2159 2160 if (pipe == INVALID_PIPE || 2161 old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) { 2162 drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed); 2163 2164 intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe); 2165 } 2166 } 2167 2168 /** 2169 * intel_set_cdclk_post_plane_update - Push the CDCLK state to the hardware 2170 * @state: intel atomic state 2171 * 2172 * Program the hardware after updating the HW plane state based on the 2173 * new CDCLK state, if necessary. 2174 */ 2175 void 2176 intel_set_cdclk_post_plane_update(struct intel_atomic_state *state) 2177 { 2178 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 2179 const struct intel_cdclk_state *old_cdclk_state = 2180 intel_atomic_get_old_cdclk_state(state); 2181 const struct intel_cdclk_state *new_cdclk_state = 2182 intel_atomic_get_new_cdclk_state(state); 2183 enum pipe pipe = new_cdclk_state->pipe; 2184 2185 if (!intel_cdclk_changed(&old_cdclk_state->actual, 2186 &new_cdclk_state->actual)) 2187 return; 2188 2189 if (pipe != INVALID_PIPE && 2190 old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) { 2191 drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed); 2192 2193 intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe); 2194 } 2195 } 2196 2197 static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state) 2198 { 2199 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 2200 int pixel_rate = crtc_state->pixel_rate; 2201 2202 if (DISPLAY_VER(dev_priv) >= 10) 2203 return DIV_ROUND_UP(pixel_rate, 2); 2204 else if (DISPLAY_VER(dev_priv) == 9 || 2205 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 2206 return pixel_rate; 2207 else if (IS_CHERRYVIEW(dev_priv)) 2208 return DIV_ROUND_UP(pixel_rate * 100, 95); 2209 else if (crtc_state->double_wide) 2210 return DIV_ROUND_UP(pixel_rate * 100, 90 * 2); 2211 else 2212 return DIV_ROUND_UP(pixel_rate * 100, 90); 2213 } 2214 2215 static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state) 2216 { 2217 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2218 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2219 struct intel_plane *plane; 2220 int min_cdclk = 0; 2221 2222 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) 2223 min_cdclk = max(crtc_state->min_cdclk[plane->id], min_cdclk); 2224 2225 return min_cdclk; 2226 } 2227 2228 int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) 2229 { 2230 struct drm_i915_private *dev_priv = 2231 to_i915(crtc_state->uapi.crtc->dev); 2232 int min_cdclk; 2233 2234 if (!crtc_state->hw.enable) 2235 return 0; 2236 2237 min_cdclk = intel_pixel_rate_to_cdclk(crtc_state); 2238 2239 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 2240 if (IS_BROADWELL(dev_priv) && hsw_crtc_state_ips_capable(crtc_state)) 2241 min_cdclk = DIV_ROUND_UP(min_cdclk * 100, 95); 2242 2243 /* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz, 2244 * audio enabled, port width x4, and link rate HBR2 (5.4 GHz), or else 2245 * there may be audio corruption or screen corruption." This cdclk 2246 * restriction for GLK is 316.8 MHz. 2247 */ 2248 if (intel_crtc_has_dp_encoder(crtc_state) && 2249 crtc_state->has_audio && 2250 crtc_state->port_clock >= 540000 && 2251 crtc_state->lane_count == 4) { 2252 if (DISPLAY_VER(dev_priv) == 10) { 2253 /* Display WA #1145: glk */ 2254 min_cdclk = max(316800, min_cdclk); 2255 } else if (DISPLAY_VER(dev_priv) == 9 || IS_BROADWELL(dev_priv)) { 2256 /* Display WA #1144: skl,bxt */ 2257 min_cdclk = max(432000, min_cdclk); 2258 } 2259 } 2260 2261 /* 2262 * According to BSpec, "The CD clock frequency must be at least twice 2263 * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default. 2264 */ 2265 if (crtc_state->has_audio && DISPLAY_VER(dev_priv) >= 9) 2266 min_cdclk = max(2 * 96000, min_cdclk); 2267 2268 /* 2269 * "For DP audio configuration, cdclk frequency shall be set to 2270 * meet the following requirements: 2271 * DP Link Frequency(MHz) | Cdclk frequency(MHz) 2272 * 270 | 320 or higher 2273 * 162 | 200 or higher" 2274 */ 2275 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 2276 intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio) 2277 min_cdclk = max(crtc_state->port_clock, min_cdclk); 2278 2279 /* 2280 * On Valleyview some DSI panels lose (v|h)sync when the clock is lower 2281 * than 320000KHz. 2282 */ 2283 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) && 2284 IS_VALLEYVIEW(dev_priv)) 2285 min_cdclk = max(320000, min_cdclk); 2286 2287 /* 2288 * On Geminilake once the CDCLK gets as low as 79200 2289 * picture gets unstable, despite that values are 2290 * correct for DSI PLL and DE PLL. 2291 */ 2292 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) && 2293 IS_GEMINILAKE(dev_priv)) 2294 min_cdclk = max(158400, min_cdclk); 2295 2296 /* Account for additional needs from the planes */ 2297 min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk); 2298 2299 /* 2300 * When we decide to use only one VDSC engine, since 2301 * each VDSC operates with 1 ppc throughput, pixel clock 2302 * cannot be higher than the VDSC clock (cdclk) 2303 */ 2304 if (crtc_state->dsc.compression_enable && !crtc_state->dsc.dsc_split) 2305 min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate); 2306 2307 /* 2308 * HACK. Currently for TGL platforms we calculate 2309 * min_cdclk initially based on pixel_rate divided 2310 * by 2, accounting for also plane requirements, 2311 * however in some cases the lowest possible CDCLK 2312 * doesn't work and causing the underruns. 2313 * Explicitly stating here that this seems to be currently 2314 * rather a Hack, than final solution. 2315 */ 2316 if (IS_TIGERLAKE(dev_priv)) { 2317 /* 2318 * Clamp to max_cdclk_freq in case pixel rate is higher, 2319 * in order not to break an 8K, but still leave W/A at place. 2320 */ 2321 min_cdclk = max_t(int, min_cdclk, 2322 min_t(int, crtc_state->pixel_rate, 2323 dev_priv->max_cdclk_freq)); 2324 } 2325 2326 if (min_cdclk > dev_priv->max_cdclk_freq) { 2327 drm_dbg_kms(&dev_priv->drm, 2328 "required cdclk (%d kHz) exceeds max (%d kHz)\n", 2329 min_cdclk, dev_priv->max_cdclk_freq); 2330 return -EINVAL; 2331 } 2332 2333 return min_cdclk; 2334 } 2335 2336 static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state) 2337 { 2338 struct intel_atomic_state *state = cdclk_state->base.state; 2339 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 2340 struct intel_bw_state *bw_state = NULL; 2341 struct intel_crtc *crtc; 2342 struct intel_crtc_state *crtc_state; 2343 int min_cdclk, i; 2344 enum pipe pipe; 2345 2346 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 2347 int ret; 2348 2349 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); 2350 if (min_cdclk < 0) 2351 return min_cdclk; 2352 2353 bw_state = intel_atomic_get_bw_state(state); 2354 if (IS_ERR(bw_state)) 2355 return PTR_ERR(bw_state); 2356 2357 if (cdclk_state->min_cdclk[crtc->pipe] == min_cdclk) 2358 continue; 2359 2360 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk; 2361 2362 ret = intel_atomic_lock_global_state(&cdclk_state->base); 2363 if (ret) 2364 return ret; 2365 } 2366 2367 min_cdclk = cdclk_state->force_min_cdclk; 2368 for_each_pipe(dev_priv, pipe) { 2369 min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk); 2370 2371 if (!bw_state) 2372 continue; 2373 2374 min_cdclk = max(bw_state->min_cdclk, min_cdclk); 2375 } 2376 2377 return min_cdclk; 2378 } 2379 2380 /* 2381 * Account for port clock min voltage level requirements. 2382 * This only really does something on DISPLA_VER >= 11 but can be 2383 * called on earlier platforms as well. 2384 * 2385 * Note that this functions assumes that 0 is 2386 * the lowest voltage value, and higher values 2387 * correspond to increasingly higher voltages. 2388 * 2389 * Should that relationship no longer hold on 2390 * future platforms this code will need to be 2391 * adjusted. 2392 */ 2393 static int bxt_compute_min_voltage_level(struct intel_cdclk_state *cdclk_state) 2394 { 2395 struct intel_atomic_state *state = cdclk_state->base.state; 2396 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 2397 struct intel_crtc *crtc; 2398 struct intel_crtc_state *crtc_state; 2399 u8 min_voltage_level; 2400 int i; 2401 enum pipe pipe; 2402 2403 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 2404 int ret; 2405 2406 if (crtc_state->hw.enable) 2407 min_voltage_level = crtc_state->min_voltage_level; 2408 else 2409 min_voltage_level = 0; 2410 2411 if (cdclk_state->min_voltage_level[crtc->pipe] == min_voltage_level) 2412 continue; 2413 2414 cdclk_state->min_voltage_level[crtc->pipe] = min_voltage_level; 2415 2416 ret = intel_atomic_lock_global_state(&cdclk_state->base); 2417 if (ret) 2418 return ret; 2419 } 2420 2421 min_voltage_level = 0; 2422 for_each_pipe(dev_priv, pipe) 2423 min_voltage_level = max(cdclk_state->min_voltage_level[pipe], 2424 min_voltage_level); 2425 2426 return min_voltage_level; 2427 } 2428 2429 static int vlv_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state) 2430 { 2431 struct intel_atomic_state *state = cdclk_state->base.state; 2432 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 2433 int min_cdclk, cdclk; 2434 2435 min_cdclk = intel_compute_min_cdclk(cdclk_state); 2436 if (min_cdclk < 0) 2437 return min_cdclk; 2438 2439 cdclk = vlv_calc_cdclk(dev_priv, min_cdclk); 2440 2441 cdclk_state->logical.cdclk = cdclk; 2442 cdclk_state->logical.voltage_level = 2443 vlv_calc_voltage_level(dev_priv, cdclk); 2444 2445 if (!cdclk_state->active_pipes) { 2446 cdclk = vlv_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk); 2447 2448 cdclk_state->actual.cdclk = cdclk; 2449 cdclk_state->actual.voltage_level = 2450 vlv_calc_voltage_level(dev_priv, cdclk); 2451 } else { 2452 cdclk_state->actual = cdclk_state->logical; 2453 } 2454 2455 return 0; 2456 } 2457 2458 static int bdw_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state) 2459 { 2460 int min_cdclk, cdclk; 2461 2462 min_cdclk = intel_compute_min_cdclk(cdclk_state); 2463 if (min_cdclk < 0) 2464 return min_cdclk; 2465 2466 /* 2467 * FIXME should also account for plane ratio 2468 * once 64bpp pixel formats are supported. 2469 */ 2470 cdclk = bdw_calc_cdclk(min_cdclk); 2471 2472 cdclk_state->logical.cdclk = cdclk; 2473 cdclk_state->logical.voltage_level = 2474 bdw_calc_voltage_level(cdclk); 2475 2476 if (!cdclk_state->active_pipes) { 2477 cdclk = bdw_calc_cdclk(cdclk_state->force_min_cdclk); 2478 2479 cdclk_state->actual.cdclk = cdclk; 2480 cdclk_state->actual.voltage_level = 2481 bdw_calc_voltage_level(cdclk); 2482 } else { 2483 cdclk_state->actual = cdclk_state->logical; 2484 } 2485 2486 return 0; 2487 } 2488 2489 static int skl_dpll0_vco(struct intel_cdclk_state *cdclk_state) 2490 { 2491 struct intel_atomic_state *state = cdclk_state->base.state; 2492 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 2493 struct intel_crtc *crtc; 2494 struct intel_crtc_state *crtc_state; 2495 int vco, i; 2496 2497 vco = cdclk_state->logical.vco; 2498 if (!vco) 2499 vco = dev_priv->skl_preferred_vco_freq; 2500 2501 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 2502 if (!crtc_state->hw.enable) 2503 continue; 2504 2505 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) 2506 continue; 2507 2508 /* 2509 * DPLL0 VCO may need to be adjusted to get the correct 2510 * clock for eDP. This will affect cdclk as well. 2511 */ 2512 switch (crtc_state->port_clock / 2) { 2513 case 108000: 2514 case 216000: 2515 vco = 8640000; 2516 break; 2517 default: 2518 vco = 8100000; 2519 break; 2520 } 2521 } 2522 2523 return vco; 2524 } 2525 2526 static int skl_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state) 2527 { 2528 int min_cdclk, cdclk, vco; 2529 2530 min_cdclk = intel_compute_min_cdclk(cdclk_state); 2531 if (min_cdclk < 0) 2532 return min_cdclk; 2533 2534 vco = skl_dpll0_vco(cdclk_state); 2535 2536 /* 2537 * FIXME should also account for plane ratio 2538 * once 64bpp pixel formats are supported. 2539 */ 2540 cdclk = skl_calc_cdclk(min_cdclk, vco); 2541 2542 cdclk_state->logical.vco = vco; 2543 cdclk_state->logical.cdclk = cdclk; 2544 cdclk_state->logical.voltage_level = 2545 skl_calc_voltage_level(cdclk); 2546 2547 if (!cdclk_state->active_pipes) { 2548 cdclk = skl_calc_cdclk(cdclk_state->force_min_cdclk, vco); 2549 2550 cdclk_state->actual.vco = vco; 2551 cdclk_state->actual.cdclk = cdclk; 2552 cdclk_state->actual.voltage_level = 2553 skl_calc_voltage_level(cdclk); 2554 } else { 2555 cdclk_state->actual = cdclk_state->logical; 2556 } 2557 2558 return 0; 2559 } 2560 2561 static int bxt_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state) 2562 { 2563 struct intel_atomic_state *state = cdclk_state->base.state; 2564 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 2565 int min_cdclk, min_voltage_level, cdclk, vco; 2566 2567 min_cdclk = intel_compute_min_cdclk(cdclk_state); 2568 if (min_cdclk < 0) 2569 return min_cdclk; 2570 2571 min_voltage_level = bxt_compute_min_voltage_level(cdclk_state); 2572 if (min_voltage_level < 0) 2573 return min_voltage_level; 2574 2575 cdclk = bxt_calc_cdclk(dev_priv, min_cdclk); 2576 vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); 2577 2578 cdclk_state->logical.vco = vco; 2579 cdclk_state->logical.cdclk = cdclk; 2580 cdclk_state->logical.voltage_level = 2581 max_t(int, min_voltage_level, 2582 intel_cdclk_calc_voltage_level(dev_priv, cdclk)); 2583 2584 if (!cdclk_state->active_pipes) { 2585 cdclk = bxt_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk); 2586 vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); 2587 2588 cdclk_state->actual.vco = vco; 2589 cdclk_state->actual.cdclk = cdclk; 2590 cdclk_state->actual.voltage_level = 2591 intel_cdclk_calc_voltage_level(dev_priv, cdclk); 2592 } else { 2593 cdclk_state->actual = cdclk_state->logical; 2594 } 2595 2596 return 0; 2597 } 2598 2599 static int fixed_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state) 2600 { 2601 int min_cdclk; 2602 2603 /* 2604 * We can't change the cdclk frequency, but we still want to 2605 * check that the required minimum frequency doesn't exceed 2606 * the actual cdclk frequency. 2607 */ 2608 min_cdclk = intel_compute_min_cdclk(cdclk_state); 2609 if (min_cdclk < 0) 2610 return min_cdclk; 2611 2612 return 0; 2613 } 2614 2615 static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_global_obj *obj) 2616 { 2617 struct intel_cdclk_state *cdclk_state; 2618 2619 cdclk_state = kmemdup(obj->state, sizeof(*cdclk_state), GFP_KERNEL); 2620 if (!cdclk_state) 2621 return NULL; 2622 2623 cdclk_state->pipe = INVALID_PIPE; 2624 2625 return &cdclk_state->base; 2626 } 2627 2628 static void intel_cdclk_destroy_state(struct intel_global_obj *obj, 2629 struct intel_global_state *state) 2630 { 2631 kfree(state); 2632 } 2633 2634 static const struct intel_global_state_funcs intel_cdclk_funcs = { 2635 .atomic_duplicate_state = intel_cdclk_duplicate_state, 2636 .atomic_destroy_state = intel_cdclk_destroy_state, 2637 }; 2638 2639 struct intel_cdclk_state * 2640 intel_atomic_get_cdclk_state(struct intel_atomic_state *state) 2641 { 2642 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 2643 struct intel_global_state *cdclk_state; 2644 2645 cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->cdclk.obj); 2646 if (IS_ERR(cdclk_state)) 2647 return ERR_CAST(cdclk_state); 2648 2649 return to_intel_cdclk_state(cdclk_state); 2650 } 2651 2652 int intel_cdclk_atomic_check(struct intel_atomic_state *state, 2653 bool *need_cdclk_calc) 2654 { 2655 struct drm_i915_private *i915 = to_i915(state->base.dev); 2656 const struct intel_cdclk_state *old_cdclk_state; 2657 const struct intel_cdclk_state *new_cdclk_state; 2658 struct intel_plane_state *plane_state; 2659 struct intel_bw_state *new_bw_state; 2660 struct intel_plane *plane; 2661 int min_cdclk = 0; 2662 enum pipe pipe; 2663 int ret; 2664 int i; 2665 2666 /* 2667 * active_planes bitmask has been updated, and potentially affected 2668 * planes are part of the state. We can now compute the minimum cdclk 2669 * for each plane. 2670 */ 2671 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 2672 ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc); 2673 if (ret) 2674 return ret; 2675 } 2676 2677 old_cdclk_state = intel_atomic_get_old_cdclk_state(state); 2678 new_cdclk_state = intel_atomic_get_new_cdclk_state(state); 2679 2680 if (new_cdclk_state && 2681 old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk) 2682 *need_cdclk_calc = true; 2683 2684 ret = intel_cdclk_bw_calc_min_cdclk(state); 2685 if (ret) 2686 return ret; 2687 2688 new_bw_state = intel_atomic_get_new_bw_state(state); 2689 2690 if (!new_cdclk_state || !new_bw_state) 2691 return 0; 2692 2693 for_each_pipe(i915, pipe) { 2694 min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk); 2695 2696 /* Currently do this change only if we need to increase */ 2697 if (new_bw_state->min_cdclk > min_cdclk) 2698 *need_cdclk_calc = true; 2699 } 2700 2701 return 0; 2702 } 2703 2704 int intel_cdclk_init(struct drm_i915_private *dev_priv) 2705 { 2706 struct intel_cdclk_state *cdclk_state; 2707 2708 cdclk_state = kzalloc(sizeof(*cdclk_state), GFP_KERNEL); 2709 if (!cdclk_state) 2710 return -ENOMEM; 2711 2712 intel_atomic_global_obj_init(dev_priv, &dev_priv->cdclk.obj, 2713 &cdclk_state->base, &intel_cdclk_funcs); 2714 2715 return 0; 2716 } 2717 2718 int intel_modeset_calc_cdclk(struct intel_atomic_state *state) 2719 { 2720 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 2721 const struct intel_cdclk_state *old_cdclk_state; 2722 struct intel_cdclk_state *new_cdclk_state; 2723 enum pipe pipe = INVALID_PIPE; 2724 int ret; 2725 2726 new_cdclk_state = intel_atomic_get_cdclk_state(state); 2727 if (IS_ERR(new_cdclk_state)) 2728 return PTR_ERR(new_cdclk_state); 2729 2730 old_cdclk_state = intel_atomic_get_old_cdclk_state(state); 2731 2732 new_cdclk_state->active_pipes = 2733 intel_calc_active_pipes(state, old_cdclk_state->active_pipes); 2734 2735 ret = intel_cdclk_modeset_calc_cdclk(dev_priv, new_cdclk_state); 2736 if (ret) 2737 return ret; 2738 2739 if (intel_cdclk_changed(&old_cdclk_state->actual, 2740 &new_cdclk_state->actual)) { 2741 /* 2742 * Also serialize commits across all crtcs 2743 * if the actual hw needs to be poked. 2744 */ 2745 ret = intel_atomic_serialize_global_state(&new_cdclk_state->base); 2746 if (ret) 2747 return ret; 2748 } else if (old_cdclk_state->active_pipes != new_cdclk_state->active_pipes || 2749 old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk || 2750 intel_cdclk_changed(&old_cdclk_state->logical, 2751 &new_cdclk_state->logical)) { 2752 ret = intel_atomic_lock_global_state(&new_cdclk_state->base); 2753 if (ret) 2754 return ret; 2755 } else { 2756 return 0; 2757 } 2758 2759 if (is_power_of_2(new_cdclk_state->active_pipes) && 2760 intel_cdclk_can_cd2x_update(dev_priv, 2761 &old_cdclk_state->actual, 2762 &new_cdclk_state->actual)) { 2763 struct intel_crtc *crtc; 2764 struct intel_crtc_state *crtc_state; 2765 2766 pipe = ilog2(new_cdclk_state->active_pipes); 2767 crtc = intel_crtc_for_pipe(dev_priv, pipe); 2768 2769 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 2770 if (IS_ERR(crtc_state)) 2771 return PTR_ERR(crtc_state); 2772 2773 if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi)) 2774 pipe = INVALID_PIPE; 2775 } 2776 2777 if (intel_cdclk_can_squash(dev_priv, 2778 &old_cdclk_state->actual, 2779 &new_cdclk_state->actual)) { 2780 drm_dbg_kms(&dev_priv->drm, 2781 "Can change cdclk via squasher\n"); 2782 } else if (intel_cdclk_can_crawl(dev_priv, 2783 &old_cdclk_state->actual, 2784 &new_cdclk_state->actual)) { 2785 drm_dbg_kms(&dev_priv->drm, 2786 "Can change cdclk via crawl\n"); 2787 } else if (pipe != INVALID_PIPE) { 2788 new_cdclk_state->pipe = pipe; 2789 2790 drm_dbg_kms(&dev_priv->drm, 2791 "Can change cdclk cd2x divider with pipe %c active\n", 2792 pipe_name(pipe)); 2793 } else if (intel_cdclk_needs_modeset(&old_cdclk_state->actual, 2794 &new_cdclk_state->actual)) { 2795 /* All pipes must be switched off while we change the cdclk. */ 2796 ret = intel_modeset_all_pipes(state); 2797 if (ret) 2798 return ret; 2799 2800 drm_dbg_kms(&dev_priv->drm, 2801 "Modeset required for cdclk change\n"); 2802 } 2803 2804 drm_dbg_kms(&dev_priv->drm, 2805 "New cdclk calculated to be logical %u kHz, actual %u kHz\n", 2806 new_cdclk_state->logical.cdclk, 2807 new_cdclk_state->actual.cdclk); 2808 drm_dbg_kms(&dev_priv->drm, 2809 "New voltage level calculated to be logical %u, actual %u\n", 2810 new_cdclk_state->logical.voltage_level, 2811 new_cdclk_state->actual.voltage_level); 2812 2813 return 0; 2814 } 2815 2816 static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) 2817 { 2818 int max_cdclk_freq = dev_priv->max_cdclk_freq; 2819 2820 if (DISPLAY_VER(dev_priv) >= 10) 2821 return 2 * max_cdclk_freq; 2822 else if (DISPLAY_VER(dev_priv) == 9 || 2823 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 2824 return max_cdclk_freq; 2825 else if (IS_CHERRYVIEW(dev_priv)) 2826 return max_cdclk_freq*95/100; 2827 else if (DISPLAY_VER(dev_priv) < 4) 2828 return 2*max_cdclk_freq*90/100; 2829 else 2830 return max_cdclk_freq*90/100; 2831 } 2832 2833 /** 2834 * intel_update_max_cdclk - Determine the maximum support CDCLK frequency 2835 * @dev_priv: i915 device 2836 * 2837 * Determine the maximum CDCLK frequency the platform supports, and also 2838 * derive the maximum dot clock frequency the maximum CDCLK frequency 2839 * allows. 2840 */ 2841 void intel_update_max_cdclk(struct drm_i915_private *dev_priv) 2842 { 2843 if (IS_JSL_EHL(dev_priv)) { 2844 if (dev_priv->cdclk.hw.ref == 24000) 2845 dev_priv->max_cdclk_freq = 552000; 2846 else 2847 dev_priv->max_cdclk_freq = 556800; 2848 } else if (DISPLAY_VER(dev_priv) >= 11) { 2849 if (dev_priv->cdclk.hw.ref == 24000) 2850 dev_priv->max_cdclk_freq = 648000; 2851 else 2852 dev_priv->max_cdclk_freq = 652800; 2853 } else if (IS_GEMINILAKE(dev_priv)) { 2854 dev_priv->max_cdclk_freq = 316800; 2855 } else if (IS_BROXTON(dev_priv)) { 2856 dev_priv->max_cdclk_freq = 624000; 2857 } else if (DISPLAY_VER(dev_priv) == 9) { 2858 u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; 2859 int max_cdclk, vco; 2860 2861 vco = dev_priv->skl_preferred_vco_freq; 2862 drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000); 2863 2864 /* 2865 * Use the lower (vco 8640) cdclk values as a 2866 * first guess. skl_calc_cdclk() will correct it 2867 * if the preferred vco is 8100 instead. 2868 */ 2869 if (limit == SKL_DFSM_CDCLK_LIMIT_675) 2870 max_cdclk = 617143; 2871 else if (limit == SKL_DFSM_CDCLK_LIMIT_540) 2872 max_cdclk = 540000; 2873 else if (limit == SKL_DFSM_CDCLK_LIMIT_450) 2874 max_cdclk = 432000; 2875 else 2876 max_cdclk = 308571; 2877 2878 dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco); 2879 } else if (IS_BROADWELL(dev_priv)) { 2880 /* 2881 * FIXME with extra cooling we can allow 2882 * 540 MHz for ULX and 675 Mhz for ULT. 2883 * How can we know if extra cooling is 2884 * available? PCI ID, VTB, something else? 2885 */ 2886 if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT) 2887 dev_priv->max_cdclk_freq = 450000; 2888 else if (IS_BDW_ULX(dev_priv)) 2889 dev_priv->max_cdclk_freq = 450000; 2890 else if (IS_BDW_ULT(dev_priv)) 2891 dev_priv->max_cdclk_freq = 540000; 2892 else 2893 dev_priv->max_cdclk_freq = 675000; 2894 } else if (IS_CHERRYVIEW(dev_priv)) { 2895 dev_priv->max_cdclk_freq = 320000; 2896 } else if (IS_VALLEYVIEW(dev_priv)) { 2897 dev_priv->max_cdclk_freq = 400000; 2898 } else { 2899 /* otherwise assume cdclk is fixed */ 2900 dev_priv->max_cdclk_freq = dev_priv->cdclk.hw.cdclk; 2901 } 2902 2903 dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv); 2904 2905 drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n", 2906 dev_priv->max_cdclk_freq); 2907 2908 drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n", 2909 dev_priv->max_dotclk_freq); 2910 } 2911 2912 /** 2913 * intel_update_cdclk - Determine the current CDCLK frequency 2914 * @dev_priv: i915 device 2915 * 2916 * Determine the current CDCLK frequency. 2917 */ 2918 void intel_update_cdclk(struct drm_i915_private *dev_priv) 2919 { 2920 intel_cdclk_get_cdclk(dev_priv, &dev_priv->cdclk.hw); 2921 2922 /* 2923 * 9:0 CMBUS [sic] CDCLK frequency (cdfreq): 2924 * Programmng [sic] note: bit[9:2] should be programmed to the number 2925 * of cdclk that generates 4MHz reference clock freq which is used to 2926 * generate GMBus clock. This will vary with the cdclk freq. 2927 */ 2928 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2929 intel_de_write(dev_priv, GMBUSFREQ_VLV, 2930 DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000)); 2931 } 2932 2933 static int dg1_rawclk(struct drm_i915_private *dev_priv) 2934 { 2935 /* 2936 * DG1 always uses a 38.4 MHz rawclk. The bspec tells us 2937 * "Program Numerator=2, Denominator=4, Divider=37 decimal." 2938 */ 2939 intel_de_write(dev_priv, PCH_RAWCLK_FREQ, 2940 CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2)); 2941 2942 return 38400; 2943 } 2944 2945 static int cnp_rawclk(struct drm_i915_private *dev_priv) 2946 { 2947 u32 rawclk; 2948 int divider, fraction; 2949 2950 if (intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) { 2951 /* 24 MHz */ 2952 divider = 24000; 2953 fraction = 0; 2954 } else { 2955 /* 19.2 MHz */ 2956 divider = 19000; 2957 fraction = 200; 2958 } 2959 2960 rawclk = CNP_RAWCLK_DIV(divider / 1000); 2961 if (fraction) { 2962 int numerator = 1; 2963 2964 rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000, 2965 fraction) - 1); 2966 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 2967 rawclk |= ICP_RAWCLK_NUM(numerator); 2968 } 2969 2970 intel_de_write(dev_priv, PCH_RAWCLK_FREQ, rawclk); 2971 return divider + fraction; 2972 } 2973 2974 static int pch_rawclk(struct drm_i915_private *dev_priv) 2975 { 2976 return (intel_de_read(dev_priv, PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000; 2977 } 2978 2979 static int vlv_hrawclk(struct drm_i915_private *dev_priv) 2980 { 2981 /* RAWCLK_FREQ_VLV register updated from power well code */ 2982 return vlv_get_cck_clock_hpll(dev_priv, "hrawclk", 2983 CCK_DISPLAY_REF_CLOCK_CONTROL); 2984 } 2985 2986 static int i9xx_hrawclk(struct drm_i915_private *dev_priv) 2987 { 2988 u32 clkcfg; 2989 2990 /* 2991 * hrawclock is 1/4 the FSB frequency 2992 * 2993 * Note that this only reads the state of the FSB 2994 * straps, not the actual FSB frequency. Some BIOSen 2995 * let you configure each independently. Ideally we'd 2996 * read out the actual FSB frequency but sadly we 2997 * don't know which registers have that information, 2998 * and all the relevant docs have gone to bit heaven :( 2999 */ 3000 clkcfg = intel_de_read(dev_priv, CLKCFG) & CLKCFG_FSB_MASK; 3001 3002 if (IS_MOBILE(dev_priv)) { 3003 switch (clkcfg) { 3004 case CLKCFG_FSB_400: 3005 return 100000; 3006 case CLKCFG_FSB_533: 3007 return 133333; 3008 case CLKCFG_FSB_667: 3009 return 166667; 3010 case CLKCFG_FSB_800: 3011 return 200000; 3012 case CLKCFG_FSB_1067: 3013 return 266667; 3014 case CLKCFG_FSB_1333: 3015 return 333333; 3016 default: 3017 MISSING_CASE(clkcfg); 3018 return 133333; 3019 } 3020 } else { 3021 switch (clkcfg) { 3022 case CLKCFG_FSB_400_ALT: 3023 return 100000; 3024 case CLKCFG_FSB_533: 3025 return 133333; 3026 case CLKCFG_FSB_667: 3027 return 166667; 3028 case CLKCFG_FSB_800: 3029 return 200000; 3030 case CLKCFG_FSB_1067_ALT: 3031 return 266667; 3032 case CLKCFG_FSB_1333_ALT: 3033 return 333333; 3034 case CLKCFG_FSB_1600_ALT: 3035 return 400000; 3036 default: 3037 return 133333; 3038 } 3039 } 3040 } 3041 3042 /** 3043 * intel_read_rawclk - Determine the current RAWCLK frequency 3044 * @dev_priv: i915 device 3045 * 3046 * Determine the current RAWCLK frequency. RAWCLK is a fixed 3047 * frequency clock so this needs to done only once. 3048 */ 3049 u32 intel_read_rawclk(struct drm_i915_private *dev_priv) 3050 { 3051 u32 freq; 3052 3053 if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) 3054 freq = dg1_rawclk(dev_priv); 3055 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) 3056 freq = cnp_rawclk(dev_priv); 3057 else if (HAS_PCH_SPLIT(dev_priv)) 3058 freq = pch_rawclk(dev_priv); 3059 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3060 freq = vlv_hrawclk(dev_priv); 3061 else if (DISPLAY_VER(dev_priv) >= 3) 3062 freq = i9xx_hrawclk(dev_priv); 3063 else 3064 /* no rawclk on other platforms, or no need to know it */ 3065 return 0; 3066 3067 return freq; 3068 } 3069 3070 static const struct intel_cdclk_funcs tgl_cdclk_funcs = { 3071 .get_cdclk = bxt_get_cdclk, 3072 .set_cdclk = bxt_set_cdclk, 3073 .bw_calc_min_cdclk = skl_bw_calc_min_cdclk, 3074 .modeset_calc_cdclk = bxt_modeset_calc_cdclk, 3075 .calc_voltage_level = tgl_calc_voltage_level, 3076 }; 3077 3078 static const struct intel_cdclk_funcs ehl_cdclk_funcs = { 3079 .get_cdclk = bxt_get_cdclk, 3080 .set_cdclk = bxt_set_cdclk, 3081 .bw_calc_min_cdclk = skl_bw_calc_min_cdclk, 3082 .modeset_calc_cdclk = bxt_modeset_calc_cdclk, 3083 .calc_voltage_level = ehl_calc_voltage_level, 3084 }; 3085 3086 static const struct intel_cdclk_funcs icl_cdclk_funcs = { 3087 .get_cdclk = bxt_get_cdclk, 3088 .set_cdclk = bxt_set_cdclk, 3089 .bw_calc_min_cdclk = skl_bw_calc_min_cdclk, 3090 .modeset_calc_cdclk = bxt_modeset_calc_cdclk, 3091 .calc_voltage_level = icl_calc_voltage_level, 3092 }; 3093 3094 static const struct intel_cdclk_funcs bxt_cdclk_funcs = { 3095 .get_cdclk = bxt_get_cdclk, 3096 .set_cdclk = bxt_set_cdclk, 3097 .bw_calc_min_cdclk = skl_bw_calc_min_cdclk, 3098 .modeset_calc_cdclk = bxt_modeset_calc_cdclk, 3099 .calc_voltage_level = bxt_calc_voltage_level, 3100 }; 3101 3102 static const struct intel_cdclk_funcs skl_cdclk_funcs = { 3103 .get_cdclk = skl_get_cdclk, 3104 .set_cdclk = skl_set_cdclk, 3105 .bw_calc_min_cdclk = skl_bw_calc_min_cdclk, 3106 .modeset_calc_cdclk = skl_modeset_calc_cdclk, 3107 }; 3108 3109 static const struct intel_cdclk_funcs bdw_cdclk_funcs = { 3110 .get_cdclk = bdw_get_cdclk, 3111 .set_cdclk = bdw_set_cdclk, 3112 .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, 3113 .modeset_calc_cdclk = bdw_modeset_calc_cdclk, 3114 }; 3115 3116 static const struct intel_cdclk_funcs chv_cdclk_funcs = { 3117 .get_cdclk = vlv_get_cdclk, 3118 .set_cdclk = chv_set_cdclk, 3119 .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, 3120 .modeset_calc_cdclk = vlv_modeset_calc_cdclk, 3121 }; 3122 3123 static const struct intel_cdclk_funcs vlv_cdclk_funcs = { 3124 .get_cdclk = vlv_get_cdclk, 3125 .set_cdclk = vlv_set_cdclk, 3126 .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, 3127 .modeset_calc_cdclk = vlv_modeset_calc_cdclk, 3128 }; 3129 3130 static const struct intel_cdclk_funcs hsw_cdclk_funcs = { 3131 .get_cdclk = hsw_get_cdclk, 3132 .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, 3133 .modeset_calc_cdclk = fixed_modeset_calc_cdclk, 3134 }; 3135 3136 /* SNB, IVB, 965G, 945G */ 3137 static const struct intel_cdclk_funcs fixed_400mhz_cdclk_funcs = { 3138 .get_cdclk = fixed_400mhz_get_cdclk, 3139 .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, 3140 .modeset_calc_cdclk = fixed_modeset_calc_cdclk, 3141 }; 3142 3143 static const struct intel_cdclk_funcs ilk_cdclk_funcs = { 3144 .get_cdclk = fixed_450mhz_get_cdclk, 3145 .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, 3146 .modeset_calc_cdclk = fixed_modeset_calc_cdclk, 3147 }; 3148 3149 static const struct intel_cdclk_funcs gm45_cdclk_funcs = { 3150 .get_cdclk = gm45_get_cdclk, 3151 .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, 3152 .modeset_calc_cdclk = fixed_modeset_calc_cdclk, 3153 }; 3154 3155 /* G45 uses G33 */ 3156 3157 static const struct intel_cdclk_funcs i965gm_cdclk_funcs = { 3158 .get_cdclk = i965gm_get_cdclk, 3159 .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, 3160 .modeset_calc_cdclk = fixed_modeset_calc_cdclk, 3161 }; 3162 3163 /* i965G uses fixed 400 */ 3164 3165 static const struct intel_cdclk_funcs pnv_cdclk_funcs = { 3166 .get_cdclk = pnv_get_cdclk, 3167 .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, 3168 .modeset_calc_cdclk = fixed_modeset_calc_cdclk, 3169 }; 3170 3171 static const struct intel_cdclk_funcs g33_cdclk_funcs = { 3172 .get_cdclk = g33_get_cdclk, 3173 .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, 3174 .modeset_calc_cdclk = fixed_modeset_calc_cdclk, 3175 }; 3176 3177 static const struct intel_cdclk_funcs i945gm_cdclk_funcs = { 3178 .get_cdclk = i945gm_get_cdclk, 3179 .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, 3180 .modeset_calc_cdclk = fixed_modeset_calc_cdclk, 3181 }; 3182 3183 /* i945G uses fixed 400 */ 3184 3185 static const struct intel_cdclk_funcs i915gm_cdclk_funcs = { 3186 .get_cdclk = i915gm_get_cdclk, 3187 .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, 3188 .modeset_calc_cdclk = fixed_modeset_calc_cdclk, 3189 }; 3190 3191 static const struct intel_cdclk_funcs i915g_cdclk_funcs = { 3192 .get_cdclk = fixed_333mhz_get_cdclk, 3193 .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, 3194 .modeset_calc_cdclk = fixed_modeset_calc_cdclk, 3195 }; 3196 3197 static const struct intel_cdclk_funcs i865g_cdclk_funcs = { 3198 .get_cdclk = fixed_266mhz_get_cdclk, 3199 .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, 3200 .modeset_calc_cdclk = fixed_modeset_calc_cdclk, 3201 }; 3202 3203 static const struct intel_cdclk_funcs i85x_cdclk_funcs = { 3204 .get_cdclk = i85x_get_cdclk, 3205 .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, 3206 .modeset_calc_cdclk = fixed_modeset_calc_cdclk, 3207 }; 3208 3209 static const struct intel_cdclk_funcs i845g_cdclk_funcs = { 3210 .get_cdclk = fixed_200mhz_get_cdclk, 3211 .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, 3212 .modeset_calc_cdclk = fixed_modeset_calc_cdclk, 3213 }; 3214 3215 static const struct intel_cdclk_funcs i830_cdclk_funcs = { 3216 .get_cdclk = fixed_133mhz_get_cdclk, 3217 .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, 3218 .modeset_calc_cdclk = fixed_modeset_calc_cdclk, 3219 }; 3220 3221 /** 3222 * intel_init_cdclk_hooks - Initialize CDCLK related modesetting hooks 3223 * @dev_priv: i915 device 3224 */ 3225 void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) 3226 { 3227 if (IS_DG2(dev_priv)) { 3228 dev_priv->cdclk_funcs = &tgl_cdclk_funcs; 3229 dev_priv->cdclk.table = dg2_cdclk_table; 3230 } else if (IS_ALDERLAKE_P(dev_priv)) { 3231 dev_priv->cdclk_funcs = &tgl_cdclk_funcs; 3232 /* Wa_22011320316:adl-p[a0] */ 3233 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) 3234 dev_priv->cdclk.table = adlp_a_step_cdclk_table; 3235 else 3236 dev_priv->cdclk.table = adlp_cdclk_table; 3237 } else if (IS_ROCKETLAKE(dev_priv)) { 3238 dev_priv->cdclk_funcs = &tgl_cdclk_funcs; 3239 dev_priv->cdclk.table = rkl_cdclk_table; 3240 } else if (DISPLAY_VER(dev_priv) >= 12) { 3241 dev_priv->cdclk_funcs = &tgl_cdclk_funcs; 3242 dev_priv->cdclk.table = icl_cdclk_table; 3243 } else if (IS_JSL_EHL(dev_priv)) { 3244 dev_priv->cdclk_funcs = &ehl_cdclk_funcs; 3245 dev_priv->cdclk.table = icl_cdclk_table; 3246 } else if (DISPLAY_VER(dev_priv) >= 11) { 3247 dev_priv->cdclk_funcs = &icl_cdclk_funcs; 3248 dev_priv->cdclk.table = icl_cdclk_table; 3249 } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 3250 dev_priv->cdclk_funcs = &bxt_cdclk_funcs; 3251 if (IS_GEMINILAKE(dev_priv)) 3252 dev_priv->cdclk.table = glk_cdclk_table; 3253 else 3254 dev_priv->cdclk.table = bxt_cdclk_table; 3255 } else if (DISPLAY_VER(dev_priv) == 9) { 3256 dev_priv->cdclk_funcs = &skl_cdclk_funcs; 3257 } else if (IS_BROADWELL(dev_priv)) { 3258 dev_priv->cdclk_funcs = &bdw_cdclk_funcs; 3259 } else if (IS_HASWELL(dev_priv)) { 3260 dev_priv->cdclk_funcs = &hsw_cdclk_funcs; 3261 } else if (IS_CHERRYVIEW(dev_priv)) { 3262 dev_priv->cdclk_funcs = &chv_cdclk_funcs; 3263 } else if (IS_VALLEYVIEW(dev_priv)) { 3264 dev_priv->cdclk_funcs = &vlv_cdclk_funcs; 3265 } else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) { 3266 dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs; 3267 } else if (IS_IRONLAKE(dev_priv)) { 3268 dev_priv->cdclk_funcs = &ilk_cdclk_funcs; 3269 } else if (IS_GM45(dev_priv)) { 3270 dev_priv->cdclk_funcs = &gm45_cdclk_funcs; 3271 } else if (IS_G45(dev_priv)) { 3272 dev_priv->cdclk_funcs = &g33_cdclk_funcs; 3273 } else if (IS_I965GM(dev_priv)) { 3274 dev_priv->cdclk_funcs = &i965gm_cdclk_funcs; 3275 } else if (IS_I965G(dev_priv)) { 3276 dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs; 3277 } else if (IS_PINEVIEW(dev_priv)) { 3278 dev_priv->cdclk_funcs = &pnv_cdclk_funcs; 3279 } else if (IS_G33(dev_priv)) { 3280 dev_priv->cdclk_funcs = &g33_cdclk_funcs; 3281 } else if (IS_I945GM(dev_priv)) { 3282 dev_priv->cdclk_funcs = &i945gm_cdclk_funcs; 3283 } else if (IS_I945G(dev_priv)) { 3284 dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs; 3285 } else if (IS_I915GM(dev_priv)) { 3286 dev_priv->cdclk_funcs = &i915gm_cdclk_funcs; 3287 } else if (IS_I915G(dev_priv)) { 3288 dev_priv->cdclk_funcs = &i915g_cdclk_funcs; 3289 } else if (IS_I865G(dev_priv)) { 3290 dev_priv->cdclk_funcs = &i865g_cdclk_funcs; 3291 } else if (IS_I85X(dev_priv)) { 3292 dev_priv->cdclk_funcs = &i85x_cdclk_funcs; 3293 } else if (IS_I845G(dev_priv)) { 3294 dev_priv->cdclk_funcs = &i845g_cdclk_funcs; 3295 } else if (IS_I830(dev_priv)) { 3296 dev_priv->cdclk_funcs = &i830_cdclk_funcs; 3297 } 3298 3299 if (drm_WARN(&dev_priv->drm, !dev_priv->cdclk_funcs, 3300 "Unknown platform. Assuming i830\n")) 3301 dev_priv->cdclk_funcs = &i830_cdclk_funcs; 3302 } 3303