1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/debugfs.h> 26 #include <linux/firmware.h> 27 28 #include "i915_drv.h" 29 #include "i915_reg.h" 30 #include "intel_de.h" 31 #include "intel_dmc.h" 32 #include "intel_dmc_regs.h" 33 #include "intel_step.h" 34 35 /** 36 * DOC: DMC Firmware Support 37 * 38 * From gen9 onwards we have newly added DMC (Display microcontroller) in display 39 * engine to save and restore the state of display engine when it enter into 40 * low-power state and comes back to normal. 41 */ 42 43 #define INTEL_DMC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git" 44 45 enum intel_dmc_id { 46 DMC_FW_MAIN = 0, 47 DMC_FW_PIPEA, 48 DMC_FW_PIPEB, 49 DMC_FW_PIPEC, 50 DMC_FW_PIPED, 51 DMC_FW_MAX 52 }; 53 54 struct intel_dmc { 55 struct drm_i915_private *i915; 56 struct work_struct work; 57 const char *fw_path; 58 u32 max_fw_size; /* bytes */ 59 u32 version; 60 struct dmc_fw_info { 61 u32 mmio_count; 62 i915_reg_t mmioaddr[20]; 63 u32 mmiodata[20]; 64 u32 dmc_offset; 65 u32 start_mmioaddr; 66 u32 dmc_fw_size; /*dwords */ 67 u32 *payload; 68 bool present; 69 } dmc_info[DMC_FW_MAX]; 70 }; 71 72 /* Note: This may be NULL. */ 73 static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915) 74 { 75 return i915->display.dmc.dmc; 76 } 77 78 static const char *dmc_firmware_param(struct drm_i915_private *i915) 79 { 80 const char *p = i915->display.params.dmc_firmware_path; 81 82 return p && *p ? p : NULL; 83 } 84 85 static bool dmc_firmware_param_disabled(struct drm_i915_private *i915) 86 { 87 const char *p = dmc_firmware_param(i915); 88 89 /* Magic path to indicate disabled */ 90 return p && !strcmp(p, "/dev/null"); 91 } 92 93 #define DMC_VERSION(major, minor) ((major) << 16 | (minor)) 94 #define DMC_VERSION_MAJOR(version) ((version) >> 16) 95 #define DMC_VERSION_MINOR(version) ((version) & 0xffff) 96 97 #define DMC_PATH(platform) \ 98 "i915/" __stringify(platform) "_dmc.bin" 99 100 /* 101 * New DMC additions should not use this. This is used solely to remain 102 * compatible with systems that have not yet updated DMC blobs to use 103 * unversioned file names. 104 */ 105 #define DMC_LEGACY_PATH(platform, major, minor) \ 106 "i915/" \ 107 __stringify(platform) "_dmc_ver" \ 108 __stringify(major) "_" \ 109 __stringify(minor) ".bin" 110 111 #define XE2LPD_DMC_MAX_FW_SIZE 0x8000 112 #define XELPDP_DMC_MAX_FW_SIZE 0x7000 113 #define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000 114 #define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE 115 116 #define XE2LPD_DMC_PATH DMC_PATH(xe2lpd) 117 MODULE_FIRMWARE(XE2LPD_DMC_PATH); 118 119 #define BMG_DMC_PATH DMC_PATH(bmg) 120 MODULE_FIRMWARE(BMG_DMC_PATH); 121 122 #define MTL_DMC_PATH DMC_PATH(mtl) 123 MODULE_FIRMWARE(MTL_DMC_PATH); 124 125 #define DG2_DMC_PATH DMC_LEGACY_PATH(dg2, 2, 08) 126 MODULE_FIRMWARE(DG2_DMC_PATH); 127 128 #define ADLP_DMC_PATH DMC_PATH(adlp) 129 #define ADLP_DMC_FALLBACK_PATH DMC_LEGACY_PATH(adlp, 2, 16) 130 MODULE_FIRMWARE(ADLP_DMC_PATH); 131 MODULE_FIRMWARE(ADLP_DMC_FALLBACK_PATH); 132 133 #define ADLS_DMC_PATH DMC_LEGACY_PATH(adls, 2, 01) 134 MODULE_FIRMWARE(ADLS_DMC_PATH); 135 136 #define DG1_DMC_PATH DMC_LEGACY_PATH(dg1, 2, 02) 137 MODULE_FIRMWARE(DG1_DMC_PATH); 138 139 #define RKL_DMC_PATH DMC_LEGACY_PATH(rkl, 2, 03) 140 MODULE_FIRMWARE(RKL_DMC_PATH); 141 142 #define TGL_DMC_PATH DMC_LEGACY_PATH(tgl, 2, 12) 143 MODULE_FIRMWARE(TGL_DMC_PATH); 144 145 #define ICL_DMC_PATH DMC_LEGACY_PATH(icl, 1, 09) 146 #define ICL_DMC_MAX_FW_SIZE 0x6000 147 MODULE_FIRMWARE(ICL_DMC_PATH); 148 149 #define GLK_DMC_PATH DMC_LEGACY_PATH(glk, 1, 04) 150 #define GLK_DMC_MAX_FW_SIZE 0x4000 151 MODULE_FIRMWARE(GLK_DMC_PATH); 152 153 #define KBL_DMC_PATH DMC_LEGACY_PATH(kbl, 1, 04) 154 #define KBL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE 155 MODULE_FIRMWARE(KBL_DMC_PATH); 156 157 #define SKL_DMC_PATH DMC_LEGACY_PATH(skl, 1, 27) 158 #define SKL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE 159 MODULE_FIRMWARE(SKL_DMC_PATH); 160 161 #define BXT_DMC_PATH DMC_LEGACY_PATH(bxt, 1, 07) 162 #define BXT_DMC_MAX_FW_SIZE 0x3000 163 MODULE_FIRMWARE(BXT_DMC_PATH); 164 165 static const char *dmc_firmware_default(struct drm_i915_private *i915, u32 *size) 166 { 167 const char *fw_path = NULL; 168 u32 max_fw_size = 0; 169 170 if (DISPLAY_VER_FULL(i915) == IP_VER(20, 0)) { 171 fw_path = XE2LPD_DMC_PATH; 172 max_fw_size = XE2LPD_DMC_MAX_FW_SIZE; 173 } else if (DISPLAY_VER_FULL(i915) == IP_VER(14, 1)) { 174 fw_path = BMG_DMC_PATH; 175 max_fw_size = XELPDP_DMC_MAX_FW_SIZE; 176 } else if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0)) { 177 fw_path = MTL_DMC_PATH; 178 max_fw_size = XELPDP_DMC_MAX_FW_SIZE; 179 } else if (IS_DG2(i915)) { 180 fw_path = DG2_DMC_PATH; 181 max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; 182 } else if (IS_ALDERLAKE_P(i915)) { 183 fw_path = ADLP_DMC_PATH; 184 max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; 185 } else if (IS_ALDERLAKE_S(i915)) { 186 fw_path = ADLS_DMC_PATH; 187 max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; 188 } else if (IS_DG1(i915)) { 189 fw_path = DG1_DMC_PATH; 190 max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; 191 } else if (IS_ROCKETLAKE(i915)) { 192 fw_path = RKL_DMC_PATH; 193 max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; 194 } else if (IS_TIGERLAKE(i915)) { 195 fw_path = TGL_DMC_PATH; 196 max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; 197 } else if (DISPLAY_VER(i915) == 11) { 198 fw_path = ICL_DMC_PATH; 199 max_fw_size = ICL_DMC_MAX_FW_SIZE; 200 } else if (IS_GEMINILAKE(i915)) { 201 fw_path = GLK_DMC_PATH; 202 max_fw_size = GLK_DMC_MAX_FW_SIZE; 203 } else if (IS_KABYLAKE(i915) || 204 IS_COFFEELAKE(i915) || 205 IS_COMETLAKE(i915)) { 206 fw_path = KBL_DMC_PATH; 207 max_fw_size = KBL_DMC_MAX_FW_SIZE; 208 } else if (IS_SKYLAKE(i915)) { 209 fw_path = SKL_DMC_PATH; 210 max_fw_size = SKL_DMC_MAX_FW_SIZE; 211 } else if (IS_BROXTON(i915)) { 212 fw_path = BXT_DMC_PATH; 213 max_fw_size = BXT_DMC_MAX_FW_SIZE; 214 } 215 216 *size = max_fw_size; 217 218 return fw_path; 219 } 220 221 #define DMC_DEFAULT_FW_OFFSET 0xFFFFFFFF 222 #define PACKAGE_MAX_FW_INFO_ENTRIES 20 223 #define PACKAGE_V2_MAX_FW_INFO_ENTRIES 32 224 #define DMC_V1_MAX_MMIO_COUNT 8 225 #define DMC_V3_MAX_MMIO_COUNT 20 226 #define DMC_V1_MMIO_START_RANGE 0x80000 227 228 #define PIPE_TO_DMC_ID(pipe) (DMC_FW_PIPEA + ((pipe) - PIPE_A)) 229 230 struct intel_css_header { 231 /* 0x09 for DMC */ 232 u32 module_type; 233 234 /* Includes the DMC specific header in dwords */ 235 u32 header_len; 236 237 /* always value would be 0x10000 */ 238 u32 header_ver; 239 240 /* Not used */ 241 u32 module_id; 242 243 /* Not used */ 244 u32 module_vendor; 245 246 /* in YYYYMMDD format */ 247 u32 date; 248 249 /* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */ 250 u32 size; 251 252 /* Not used */ 253 u32 key_size; 254 255 /* Not used */ 256 u32 modulus_size; 257 258 /* Not used */ 259 u32 exponent_size; 260 261 /* Not used */ 262 u32 reserved1[12]; 263 264 /* Major Minor */ 265 u32 version; 266 267 /* Not used */ 268 u32 reserved2[8]; 269 270 /* Not used */ 271 u32 kernel_header_info; 272 } __packed; 273 274 struct intel_fw_info { 275 u8 reserved1; 276 277 /* reserved on package_header version 1, must be 0 on version 2 */ 278 u8 dmc_id; 279 280 /* Stepping (A, B, C, ..., *). * is a wildcard */ 281 char stepping; 282 283 /* Sub-stepping (0, 1, ..., *). * is a wildcard */ 284 char substepping; 285 286 u32 offset; 287 u32 reserved2; 288 } __packed; 289 290 struct intel_package_header { 291 /* DMC container header length in dwords */ 292 u8 header_len; 293 294 /* 0x01, 0x02 */ 295 u8 header_ver; 296 297 u8 reserved[10]; 298 299 /* Number of valid entries in the FWInfo array below */ 300 u32 num_entries; 301 } __packed; 302 303 struct intel_dmc_header_base { 304 /* always value would be 0x40403E3E */ 305 u32 signature; 306 307 /* DMC binary header length */ 308 u8 header_len; 309 310 /* 0x01 */ 311 u8 header_ver; 312 313 /* Reserved */ 314 u16 dmcc_ver; 315 316 /* Major, Minor */ 317 u32 project; 318 319 /* Firmware program size (excluding header) in dwords */ 320 u32 fw_size; 321 322 /* Major Minor version */ 323 u32 fw_version; 324 } __packed; 325 326 struct intel_dmc_header_v1 { 327 struct intel_dmc_header_base base; 328 329 /* Number of valid MMIO cycles present. */ 330 u32 mmio_count; 331 332 /* MMIO address */ 333 u32 mmioaddr[DMC_V1_MAX_MMIO_COUNT]; 334 335 /* MMIO data */ 336 u32 mmiodata[DMC_V1_MAX_MMIO_COUNT]; 337 338 /* FW filename */ 339 char dfile[32]; 340 341 u32 reserved1[2]; 342 } __packed; 343 344 struct intel_dmc_header_v3 { 345 struct intel_dmc_header_base base; 346 347 /* DMC RAM start MMIO address */ 348 u32 start_mmioaddr; 349 350 u32 reserved[9]; 351 352 /* FW filename */ 353 char dfile[32]; 354 355 /* Number of valid MMIO cycles present. */ 356 u32 mmio_count; 357 358 /* MMIO address */ 359 u32 mmioaddr[DMC_V3_MAX_MMIO_COUNT]; 360 361 /* MMIO data */ 362 u32 mmiodata[DMC_V3_MAX_MMIO_COUNT]; 363 } __packed; 364 365 struct stepping_info { 366 char stepping; 367 char substepping; 368 }; 369 370 #define for_each_dmc_id(__dmc_id) \ 371 for ((__dmc_id) = DMC_FW_MAIN; (__dmc_id) < DMC_FW_MAX; (__dmc_id)++) 372 373 static bool is_valid_dmc_id(enum intel_dmc_id dmc_id) 374 { 375 return dmc_id >= DMC_FW_MAIN && dmc_id < DMC_FW_MAX; 376 } 377 378 static bool has_dmc_id_fw(struct drm_i915_private *i915, enum intel_dmc_id dmc_id) 379 { 380 struct intel_dmc *dmc = i915_to_dmc(i915); 381 382 return dmc && dmc->dmc_info[dmc_id].payload; 383 } 384 385 bool intel_dmc_has_payload(struct drm_i915_private *i915) 386 { 387 return has_dmc_id_fw(i915, DMC_FW_MAIN); 388 } 389 390 static const struct stepping_info * 391 intel_get_stepping_info(struct drm_i915_private *i915, 392 struct stepping_info *si) 393 { 394 const char *step_name = intel_display_step_name(i915); 395 396 si->stepping = step_name[0]; 397 si->substepping = step_name[1]; 398 return si; 399 } 400 401 static void gen9_set_dc_state_debugmask(struct drm_i915_private *i915) 402 { 403 /* The below bit doesn't need to be cleared ever afterwards */ 404 intel_de_rmw(i915, DC_STATE_DEBUG, 0, 405 DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP); 406 intel_de_posting_read(i915, DC_STATE_DEBUG); 407 } 408 409 static void disable_event_handler(struct drm_i915_private *i915, 410 i915_reg_t ctl_reg, i915_reg_t htp_reg) 411 { 412 intel_de_write(i915, ctl_reg, 413 REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, 414 DMC_EVT_CTL_TYPE_EDGE_0_1) | 415 REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, 416 DMC_EVT_CTL_EVENT_ID_FALSE)); 417 intel_de_write(i915, htp_reg, 0); 418 } 419 420 static void disable_all_event_handlers(struct drm_i915_private *i915) 421 { 422 enum intel_dmc_id dmc_id; 423 424 /* TODO: disable the event handlers on pre-GEN12 platforms as well */ 425 if (DISPLAY_VER(i915) < 12) 426 return; 427 428 for_each_dmc_id(dmc_id) { 429 int handler; 430 431 if (!has_dmc_id_fw(i915, dmc_id)) 432 continue; 433 434 for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++) 435 disable_event_handler(i915, 436 DMC_EVT_CTL(i915, dmc_id, handler), 437 DMC_EVT_HTP(i915, dmc_id, handler)); 438 } 439 } 440 441 static void adlp_pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) 442 { 443 enum pipe pipe; 444 445 /* 446 * Wa_16015201720:adl-p,dg2 447 * The WA requires clock gating to be disabled all the time 448 * for pipe A and B. 449 * For pipe C and D clock gating needs to be disabled only 450 * during initializing the firmware. 451 */ 452 if (enable) 453 for (pipe = PIPE_A; pipe <= PIPE_D; pipe++) 454 intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe), 455 0, PIPEDMC_GATING_DIS); 456 else 457 for (pipe = PIPE_C; pipe <= PIPE_D; pipe++) 458 intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe), 459 PIPEDMC_GATING_DIS, 0); 460 } 461 462 static void mtl_pipedmc_clock_gating_wa(struct drm_i915_private *i915) 463 { 464 /* 465 * Wa_16015201720 466 * The WA requires clock gating to be disabled all the time 467 * for pipe A and B. 468 */ 469 intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0, 470 MTL_PIPEDMC_GATING_DIS_A | MTL_PIPEDMC_GATING_DIS_B); 471 } 472 473 static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) 474 { 475 if (DISPLAY_VER(i915) >= 14 && enable) 476 mtl_pipedmc_clock_gating_wa(i915); 477 else if (DISPLAY_VER(i915) == 13) 478 adlp_pipedmc_clock_gating_wa(i915, enable); 479 } 480 481 void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe) 482 { 483 enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); 484 485 if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id)) 486 return; 487 488 if (DISPLAY_VER(i915) >= 14) 489 intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe)); 490 else 491 intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE); 492 } 493 494 void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe) 495 { 496 enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); 497 498 if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id)) 499 return; 500 501 if (DISPLAY_VER(i915) >= 14) 502 intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0); 503 else 504 intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0); 505 } 506 507 static bool is_dmc_evt_ctl_reg(struct drm_i915_private *i915, 508 enum intel_dmc_id dmc_id, i915_reg_t reg) 509 { 510 u32 offset = i915_mmio_reg_offset(reg); 511 u32 start = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, 0)); 512 u32 end = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12)); 513 514 return offset >= start && offset < end; 515 } 516 517 static bool is_dmc_evt_htp_reg(struct drm_i915_private *i915, 518 enum intel_dmc_id dmc_id, i915_reg_t reg) 519 { 520 u32 offset = i915_mmio_reg_offset(reg); 521 u32 start = i915_mmio_reg_offset(DMC_EVT_HTP(i915, dmc_id, 0)); 522 u32 end = i915_mmio_reg_offset(DMC_EVT_HTP(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12)); 523 524 return offset >= start && offset < end; 525 } 526 527 static bool disable_dmc_evt(struct drm_i915_private *i915, 528 enum intel_dmc_id dmc_id, 529 i915_reg_t reg, u32 data) 530 { 531 if (!is_dmc_evt_ctl_reg(i915, dmc_id, reg)) 532 return false; 533 534 /* keep all pipe DMC events disabled by default */ 535 if (dmc_id != DMC_FW_MAIN) 536 return true; 537 538 /* also disable the flip queue event on the main DMC on TGL */ 539 if (IS_TIGERLAKE(i915) && 540 REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_CLK_MSEC) 541 return true; 542 543 /* also disable the HRR event on the main DMC on TGL/ADLS */ 544 if ((IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915)) && 545 REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_VBLANK_A) 546 return true; 547 548 return false; 549 } 550 551 static u32 dmc_mmiodata(struct drm_i915_private *i915, 552 struct intel_dmc *dmc, 553 enum intel_dmc_id dmc_id, int i) 554 { 555 if (disable_dmc_evt(i915, dmc_id, 556 dmc->dmc_info[dmc_id].mmioaddr[i], 557 dmc->dmc_info[dmc_id].mmiodata[i])) 558 return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, 559 DMC_EVT_CTL_TYPE_EDGE_0_1) | 560 REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, 561 DMC_EVT_CTL_EVENT_ID_FALSE); 562 else 563 return dmc->dmc_info[dmc_id].mmiodata[i]; 564 } 565 566 /** 567 * intel_dmc_load_program() - write the firmware from memory to register. 568 * @i915: i915 drm device. 569 * 570 * DMC firmware is read from a .bin file and kept in internal memory one time. 571 * Everytime display comes back from low power state this function is called to 572 * copy the firmware from internal memory to registers. 573 */ 574 void intel_dmc_load_program(struct drm_i915_private *i915) 575 { 576 struct i915_power_domains *power_domains = &i915->display.power.domains; 577 struct intel_dmc *dmc = i915_to_dmc(i915); 578 enum intel_dmc_id dmc_id; 579 u32 i; 580 581 if (!intel_dmc_has_payload(i915)) 582 return; 583 584 pipedmc_clock_gating_wa(i915, true); 585 586 disable_all_event_handlers(i915); 587 588 assert_rpm_wakelock_held(&i915->runtime_pm); 589 590 preempt_disable(); 591 592 for_each_dmc_id(dmc_id) { 593 for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) { 594 intel_de_write_fw(i915, 595 DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i), 596 dmc->dmc_info[dmc_id].payload[i]); 597 } 598 } 599 600 preempt_enable(); 601 602 for_each_dmc_id(dmc_id) { 603 for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) { 604 intel_de_write(i915, dmc->dmc_info[dmc_id].mmioaddr[i], 605 dmc_mmiodata(i915, dmc, dmc_id, i)); 606 } 607 } 608 609 power_domains->dc_state = 0; 610 611 gen9_set_dc_state_debugmask(i915); 612 613 pipedmc_clock_gating_wa(i915, false); 614 } 615 616 /** 617 * intel_dmc_disable_program() - disable the firmware 618 * @i915: i915 drm device 619 * 620 * Disable all event handlers in the firmware, making sure the firmware is 621 * inactive after the display is uninitialized. 622 */ 623 void intel_dmc_disable_program(struct drm_i915_private *i915) 624 { 625 if (!intel_dmc_has_payload(i915)) 626 return; 627 628 pipedmc_clock_gating_wa(i915, true); 629 disable_all_event_handlers(i915); 630 pipedmc_clock_gating_wa(i915, false); 631 632 intel_dmc_wl_disable(&i915->display); 633 } 634 635 void assert_dmc_loaded(struct drm_i915_private *i915) 636 { 637 struct intel_dmc *dmc = i915_to_dmc(i915); 638 639 drm_WARN_ONCE(&i915->drm, !dmc, "DMC not initialized\n"); 640 drm_WARN_ONCE(&i915->drm, dmc && 641 !intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), 642 "DMC program storage start is NULL\n"); 643 drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE), 644 "DMC SSP Base Not fine\n"); 645 drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_HTP_SKL), 646 "DMC HTP Not fine\n"); 647 } 648 649 static bool fw_info_matches_stepping(const struct intel_fw_info *fw_info, 650 const struct stepping_info *si) 651 { 652 if ((fw_info->substepping == '*' && si->stepping == fw_info->stepping) || 653 (si->stepping == fw_info->stepping && si->substepping == fw_info->substepping) || 654 /* 655 * If we don't find a more specific one from above two checks, we 656 * then check for the generic one to be sure to work even with 657 * "broken firmware" 658 */ 659 (si->stepping == '*' && si->substepping == fw_info->substepping) || 660 (fw_info->stepping == '*' && fw_info->substepping == '*')) 661 return true; 662 663 return false; 664 } 665 666 /* 667 * Search fw_info table for dmc_offset to find firmware binary: num_entries is 668 * already sanitized. 669 */ 670 static void dmc_set_fw_offset(struct intel_dmc *dmc, 671 const struct intel_fw_info *fw_info, 672 unsigned int num_entries, 673 const struct stepping_info *si, 674 u8 package_ver) 675 { 676 struct drm_i915_private *i915 = dmc->i915; 677 enum intel_dmc_id dmc_id; 678 unsigned int i; 679 680 for (i = 0; i < num_entries; i++) { 681 dmc_id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id; 682 683 if (!is_valid_dmc_id(dmc_id)) { 684 drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", dmc_id); 685 continue; 686 } 687 688 /* More specific versions come first, so we don't even have to 689 * check for the stepping since we already found a previous FW 690 * for this id. 691 */ 692 if (dmc->dmc_info[dmc_id].present) 693 continue; 694 695 if (fw_info_matches_stepping(&fw_info[i], si)) { 696 dmc->dmc_info[dmc_id].present = true; 697 dmc->dmc_info[dmc_id].dmc_offset = fw_info[i].offset; 698 } 699 } 700 } 701 702 static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc, 703 const u32 *mmioaddr, u32 mmio_count, 704 int header_ver, enum intel_dmc_id dmc_id) 705 { 706 struct drm_i915_private *i915 = dmc->i915; 707 u32 start_range, end_range; 708 int i; 709 710 if (header_ver == 1) { 711 start_range = DMC_MMIO_START_RANGE; 712 end_range = DMC_MMIO_END_RANGE; 713 } else if (dmc_id == DMC_FW_MAIN) { 714 start_range = TGL_MAIN_MMIO_START; 715 end_range = TGL_MAIN_MMIO_END; 716 } else if (DISPLAY_VER(i915) >= 13) { 717 start_range = ADLP_PIPE_MMIO_START; 718 end_range = ADLP_PIPE_MMIO_END; 719 } else if (DISPLAY_VER(i915) >= 12) { 720 start_range = TGL_PIPE_MMIO_START(dmc_id); 721 end_range = TGL_PIPE_MMIO_END(dmc_id); 722 } else { 723 drm_warn(&i915->drm, "Unknown mmio range for sanity check"); 724 return false; 725 } 726 727 for (i = 0; i < mmio_count; i++) { 728 if (mmioaddr[i] < start_range || mmioaddr[i] > end_range) 729 return false; 730 } 731 732 return true; 733 } 734 735 static u32 parse_dmc_fw_header(struct intel_dmc *dmc, 736 const struct intel_dmc_header_base *dmc_header, 737 size_t rem_size, enum intel_dmc_id dmc_id) 738 { 739 struct drm_i915_private *i915 = dmc->i915; 740 struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id]; 741 unsigned int header_len_bytes, dmc_header_size, payload_size, i; 742 const u32 *mmioaddr, *mmiodata; 743 u32 mmio_count, mmio_count_max, start_mmioaddr; 744 u8 *payload; 745 746 BUILD_BUG_ON(ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V3_MAX_MMIO_COUNT || 747 ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V1_MAX_MMIO_COUNT); 748 749 /* 750 * Check if we can access common fields, we will checkc again below 751 * after we have read the version 752 */ 753 if (rem_size < sizeof(struct intel_dmc_header_base)) 754 goto error_truncated; 755 756 /* Cope with small differences between v1 and v3 */ 757 if (dmc_header->header_ver == 3) { 758 const struct intel_dmc_header_v3 *v3 = 759 (const struct intel_dmc_header_v3 *)dmc_header; 760 761 if (rem_size < sizeof(struct intel_dmc_header_v3)) 762 goto error_truncated; 763 764 mmioaddr = v3->mmioaddr; 765 mmiodata = v3->mmiodata; 766 mmio_count = v3->mmio_count; 767 mmio_count_max = DMC_V3_MAX_MMIO_COUNT; 768 /* header_len is in dwords */ 769 header_len_bytes = dmc_header->header_len * 4; 770 start_mmioaddr = v3->start_mmioaddr; 771 dmc_header_size = sizeof(*v3); 772 } else if (dmc_header->header_ver == 1) { 773 const struct intel_dmc_header_v1 *v1 = 774 (const struct intel_dmc_header_v1 *)dmc_header; 775 776 if (rem_size < sizeof(struct intel_dmc_header_v1)) 777 goto error_truncated; 778 779 mmioaddr = v1->mmioaddr; 780 mmiodata = v1->mmiodata; 781 mmio_count = v1->mmio_count; 782 mmio_count_max = DMC_V1_MAX_MMIO_COUNT; 783 header_len_bytes = dmc_header->header_len; 784 start_mmioaddr = DMC_V1_MMIO_START_RANGE; 785 dmc_header_size = sizeof(*v1); 786 } else { 787 drm_err(&i915->drm, "Unknown DMC fw header version: %u\n", 788 dmc_header->header_ver); 789 return 0; 790 } 791 792 if (header_len_bytes != dmc_header_size) { 793 drm_err(&i915->drm, "DMC firmware has wrong dmc header length " 794 "(%u bytes)\n", header_len_bytes); 795 return 0; 796 } 797 798 /* Cache the dmc header info. */ 799 if (mmio_count > mmio_count_max) { 800 drm_err(&i915->drm, "DMC firmware has wrong mmio count %u\n", mmio_count); 801 return 0; 802 } 803 804 if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count, 805 dmc_header->header_ver, dmc_id)) { 806 drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n"); 807 return 0; 808 } 809 810 drm_dbg_kms(&i915->drm, "DMC %d:\n", dmc_id); 811 for (i = 0; i < mmio_count; i++) { 812 dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]); 813 dmc_info->mmiodata[i] = mmiodata[i]; 814 815 drm_dbg_kms(&i915->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n", 816 i, mmioaddr[i], mmiodata[i], 817 is_dmc_evt_ctl_reg(i915, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" : 818 is_dmc_evt_htp_reg(i915, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "", 819 disable_dmc_evt(i915, dmc_id, dmc_info->mmioaddr[i], 820 dmc_info->mmiodata[i]) ? " (disabling)" : ""); 821 } 822 dmc_info->mmio_count = mmio_count; 823 dmc_info->start_mmioaddr = start_mmioaddr; 824 825 rem_size -= header_len_bytes; 826 827 /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */ 828 payload_size = dmc_header->fw_size * 4; 829 if (rem_size < payload_size) 830 goto error_truncated; 831 832 if (payload_size > dmc->max_fw_size) { 833 drm_err(&i915->drm, "DMC FW too big (%u bytes)\n", payload_size); 834 return 0; 835 } 836 dmc_info->dmc_fw_size = dmc_header->fw_size; 837 838 dmc_info->payload = kmalloc(payload_size, GFP_KERNEL); 839 if (!dmc_info->payload) 840 return 0; 841 842 payload = (u8 *)(dmc_header) + header_len_bytes; 843 memcpy(dmc_info->payload, payload, payload_size); 844 845 return header_len_bytes + payload_size; 846 847 error_truncated: 848 drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); 849 return 0; 850 } 851 852 static u32 853 parse_dmc_fw_package(struct intel_dmc *dmc, 854 const struct intel_package_header *package_header, 855 const struct stepping_info *si, 856 size_t rem_size) 857 { 858 struct drm_i915_private *i915 = dmc->i915; 859 u32 package_size = sizeof(struct intel_package_header); 860 u32 num_entries, max_entries; 861 const struct intel_fw_info *fw_info; 862 863 if (rem_size < package_size) 864 goto error_truncated; 865 866 if (package_header->header_ver == 1) { 867 max_entries = PACKAGE_MAX_FW_INFO_ENTRIES; 868 } else if (package_header->header_ver == 2) { 869 max_entries = PACKAGE_V2_MAX_FW_INFO_ENTRIES; 870 } else { 871 drm_err(&i915->drm, "DMC firmware has unknown header version %u\n", 872 package_header->header_ver); 873 return 0; 874 } 875 876 /* 877 * We should always have space for max_entries, 878 * even if not all are used 879 */ 880 package_size += max_entries * sizeof(struct intel_fw_info); 881 if (rem_size < package_size) 882 goto error_truncated; 883 884 if (package_header->header_len * 4 != package_size) { 885 drm_err(&i915->drm, "DMC firmware has wrong package header length " 886 "(%u bytes)\n", package_size); 887 return 0; 888 } 889 890 num_entries = package_header->num_entries; 891 if (WARN_ON(package_header->num_entries > max_entries)) 892 num_entries = max_entries; 893 894 fw_info = (const struct intel_fw_info *) 895 ((u8 *)package_header + sizeof(*package_header)); 896 dmc_set_fw_offset(dmc, fw_info, num_entries, si, 897 package_header->header_ver); 898 899 /* dmc_offset is in dwords */ 900 return package_size; 901 902 error_truncated: 903 drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); 904 return 0; 905 } 906 907 /* Return number of bytes parsed or 0 on error */ 908 static u32 parse_dmc_fw_css(struct intel_dmc *dmc, 909 struct intel_css_header *css_header, 910 size_t rem_size) 911 { 912 struct drm_i915_private *i915 = dmc->i915; 913 914 if (rem_size < sizeof(struct intel_css_header)) { 915 drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); 916 return 0; 917 } 918 919 if (sizeof(struct intel_css_header) != 920 (css_header->header_len * 4)) { 921 drm_err(&i915->drm, "DMC firmware has wrong CSS header length " 922 "(%u bytes)\n", 923 (css_header->header_len * 4)); 924 return 0; 925 } 926 927 dmc->version = css_header->version; 928 929 return sizeof(struct intel_css_header); 930 } 931 932 static int parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw) 933 { 934 struct drm_i915_private *i915 = dmc->i915; 935 struct intel_css_header *css_header; 936 struct intel_package_header *package_header; 937 struct intel_dmc_header_base *dmc_header; 938 struct stepping_info display_info = { '*', '*'}; 939 const struct stepping_info *si = intel_get_stepping_info(i915, &display_info); 940 enum intel_dmc_id dmc_id; 941 u32 readcount = 0; 942 u32 r, offset; 943 944 if (!fw) 945 return -EINVAL; 946 947 /* Extract CSS Header information */ 948 css_header = (struct intel_css_header *)fw->data; 949 r = parse_dmc_fw_css(dmc, css_header, fw->size); 950 if (!r) 951 return -EINVAL; 952 953 readcount += r; 954 955 /* Extract Package Header information */ 956 package_header = (struct intel_package_header *)&fw->data[readcount]; 957 r = parse_dmc_fw_package(dmc, package_header, si, fw->size - readcount); 958 if (!r) 959 return -EINVAL; 960 961 readcount += r; 962 963 for_each_dmc_id(dmc_id) { 964 if (!dmc->dmc_info[dmc_id].present) 965 continue; 966 967 offset = readcount + dmc->dmc_info[dmc_id].dmc_offset * 4; 968 if (offset > fw->size) { 969 drm_err(&i915->drm, "Reading beyond the fw_size\n"); 970 continue; 971 } 972 973 dmc_header = (struct intel_dmc_header_base *)&fw->data[offset]; 974 parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, dmc_id); 975 } 976 977 if (!intel_dmc_has_payload(i915)) { 978 drm_err(&i915->drm, "DMC firmware main program not found\n"); 979 return -ENOENT; 980 } 981 982 return 0; 983 } 984 985 static void intel_dmc_runtime_pm_get(struct drm_i915_private *i915) 986 { 987 drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref); 988 i915->display.dmc.wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT); 989 } 990 991 static void intel_dmc_runtime_pm_put(struct drm_i915_private *i915) 992 { 993 intel_wakeref_t wakeref __maybe_unused = 994 fetch_and_zero(&i915->display.dmc.wakeref); 995 996 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 997 } 998 999 static const char *dmc_fallback_path(struct drm_i915_private *i915) 1000 { 1001 if (IS_ALDERLAKE_P(i915)) 1002 return ADLP_DMC_FALLBACK_PATH; 1003 1004 return NULL; 1005 } 1006 1007 static void dmc_load_work_fn(struct work_struct *work) 1008 { 1009 struct intel_dmc *dmc = container_of(work, typeof(*dmc), work); 1010 struct drm_i915_private *i915 = dmc->i915; 1011 const struct firmware *fw = NULL; 1012 const char *fallback_path; 1013 int err; 1014 1015 err = request_firmware(&fw, dmc->fw_path, i915->drm.dev); 1016 1017 if (err == -ENOENT && !dmc_firmware_param(i915)) { 1018 fallback_path = dmc_fallback_path(i915); 1019 if (fallback_path) { 1020 drm_dbg_kms(&i915->drm, "%s not found, falling back to %s\n", 1021 dmc->fw_path, fallback_path); 1022 err = request_firmware(&fw, fallback_path, i915->drm.dev); 1023 if (err == 0) 1024 dmc->fw_path = fallback_path; 1025 } 1026 } 1027 1028 if (err) { 1029 drm_notice(&i915->drm, 1030 "Failed to load DMC firmware %s (%pe). Disabling runtime power management.\n", 1031 dmc->fw_path, ERR_PTR(err)); 1032 drm_notice(&i915->drm, "DMC firmware homepage: %s", 1033 INTEL_DMC_FIRMWARE_URL); 1034 return; 1035 } 1036 1037 err = parse_dmc_fw(dmc, fw); 1038 if (err) { 1039 drm_notice(&i915->drm, 1040 "Failed to parse DMC firmware %s (%pe). Disabling runtime power management.\n", 1041 dmc->fw_path, ERR_PTR(err)); 1042 goto out; 1043 } 1044 1045 intel_dmc_load_program(i915); 1046 intel_dmc_runtime_pm_put(i915); 1047 1048 drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n", 1049 dmc->fw_path, DMC_VERSION_MAJOR(dmc->version), 1050 DMC_VERSION_MINOR(dmc->version)); 1051 1052 out: 1053 release_firmware(fw); 1054 } 1055 1056 /** 1057 * intel_dmc_init() - initialize the firmware loading. 1058 * @i915: i915 drm device. 1059 * 1060 * This function is called at the time of loading the display driver to read 1061 * firmware from a .bin file and copied into a internal memory. 1062 */ 1063 void intel_dmc_init(struct drm_i915_private *i915) 1064 { 1065 struct intel_dmc *dmc; 1066 1067 if (!HAS_DMC(i915)) 1068 return; 1069 1070 /* 1071 * Obtain a runtime pm reference, until DMC is loaded, to avoid entering 1072 * runtime-suspend. 1073 * 1074 * On error, we return with the rpm wakeref held to prevent runtime 1075 * suspend as runtime suspend *requires* a working DMC for whatever 1076 * reason. 1077 */ 1078 intel_dmc_runtime_pm_get(i915); 1079 1080 dmc = kzalloc(sizeof(*dmc), GFP_KERNEL); 1081 if (!dmc) 1082 return; 1083 1084 dmc->i915 = i915; 1085 1086 INIT_WORK(&dmc->work, dmc_load_work_fn); 1087 1088 dmc->fw_path = dmc_firmware_default(i915, &dmc->max_fw_size); 1089 1090 if (dmc_firmware_param_disabled(i915)) { 1091 drm_info(&i915->drm, "Disabling DMC firmware and runtime PM\n"); 1092 goto out; 1093 } 1094 1095 if (dmc_firmware_param(i915)) 1096 dmc->fw_path = dmc_firmware_param(i915); 1097 1098 if (!dmc->fw_path) { 1099 drm_dbg_kms(&i915->drm, 1100 "No known DMC firmware for platform, disabling runtime PM\n"); 1101 goto out; 1102 } 1103 1104 i915->display.dmc.dmc = dmc; 1105 1106 drm_dbg_kms(&i915->drm, "Loading %s\n", dmc->fw_path); 1107 queue_work(i915->unordered_wq, &dmc->work); 1108 1109 return; 1110 1111 out: 1112 kfree(dmc); 1113 } 1114 1115 /** 1116 * intel_dmc_suspend() - prepare DMC firmware before system suspend 1117 * @i915: i915 drm device 1118 * 1119 * Prepare the DMC firmware before entering system suspend. This includes 1120 * flushing pending work items and releasing any resources acquired during 1121 * init. 1122 */ 1123 void intel_dmc_suspend(struct drm_i915_private *i915) 1124 { 1125 struct intel_dmc *dmc = i915_to_dmc(i915); 1126 1127 if (!HAS_DMC(i915)) 1128 return; 1129 1130 if (dmc) 1131 flush_work(&dmc->work); 1132 1133 intel_dmc_wl_disable(&i915->display); 1134 1135 /* Drop the reference held in case DMC isn't loaded. */ 1136 if (!intel_dmc_has_payload(i915)) 1137 intel_dmc_runtime_pm_put(i915); 1138 } 1139 1140 /** 1141 * intel_dmc_resume() - init DMC firmware during system resume 1142 * @i915: i915 drm device 1143 * 1144 * Reinitialize the DMC firmware during system resume, reacquiring any 1145 * resources released in intel_dmc_suspend(). 1146 */ 1147 void intel_dmc_resume(struct drm_i915_private *i915) 1148 { 1149 if (!HAS_DMC(i915)) 1150 return; 1151 1152 /* 1153 * Reacquire the reference to keep RPM disabled in case DMC isn't 1154 * loaded. 1155 */ 1156 if (!intel_dmc_has_payload(i915)) 1157 intel_dmc_runtime_pm_get(i915); 1158 } 1159 1160 /** 1161 * intel_dmc_fini() - unload the DMC firmware. 1162 * @i915: i915 drm device. 1163 * 1164 * Firmmware unloading includes freeing the internal memory and reset the 1165 * firmware loading status. 1166 */ 1167 void intel_dmc_fini(struct drm_i915_private *i915) 1168 { 1169 struct intel_dmc *dmc = i915_to_dmc(i915); 1170 enum intel_dmc_id dmc_id; 1171 1172 if (!HAS_DMC(i915)) 1173 return; 1174 1175 intel_dmc_suspend(i915); 1176 drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref); 1177 1178 if (dmc) { 1179 for_each_dmc_id(dmc_id) 1180 kfree(dmc->dmc_info[dmc_id].payload); 1181 1182 kfree(dmc); 1183 i915->display.dmc.dmc = NULL; 1184 } 1185 } 1186 1187 void intel_dmc_print_error_state(struct drm_printer *p, 1188 struct drm_i915_private *i915) 1189 { 1190 struct intel_dmc *dmc = i915_to_dmc(i915); 1191 1192 if (!HAS_DMC(i915)) 1193 return; 1194 1195 drm_printf(p, "DMC initialized: %s\n", str_yes_no(dmc)); 1196 drm_printf(p, "DMC loaded: %s\n", 1197 str_yes_no(intel_dmc_has_payload(i915))); 1198 if (dmc) 1199 drm_printf(p, "DMC fw version: %d.%d\n", 1200 DMC_VERSION_MAJOR(dmc->version), 1201 DMC_VERSION_MINOR(dmc->version)); 1202 } 1203 1204 static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) 1205 { 1206 struct drm_i915_private *i915 = m->private; 1207 struct intel_dmc *dmc = i915_to_dmc(i915); 1208 intel_wakeref_t wakeref; 1209 i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG; 1210 1211 if (!HAS_DMC(i915)) 1212 return -ENODEV; 1213 1214 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 1215 1216 seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc)); 1217 seq_printf(m, "fw loaded: %s\n", 1218 str_yes_no(intel_dmc_has_payload(i915))); 1219 seq_printf(m, "path: %s\n", dmc ? dmc->fw_path : "N/A"); 1220 seq_printf(m, "Pipe A fw needed: %s\n", 1221 str_yes_no(DISPLAY_VER(i915) >= 12)); 1222 seq_printf(m, "Pipe A fw loaded: %s\n", 1223 str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEA))); 1224 seq_printf(m, "Pipe B fw needed: %s\n", 1225 str_yes_no(IS_ALDERLAKE_P(i915) || 1226 DISPLAY_VER(i915) >= 14)); 1227 seq_printf(m, "Pipe B fw loaded: %s\n", 1228 str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEB))); 1229 1230 if (!intel_dmc_has_payload(i915)) 1231 goto out; 1232 1233 seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version), 1234 DMC_VERSION_MINOR(dmc->version)); 1235 1236 if (DISPLAY_VER(i915) >= 12) { 1237 i915_reg_t dc3co_reg; 1238 1239 if (IS_DGFX(i915) || DISPLAY_VER(i915) >= 14) { 1240 dc3co_reg = DG1_DMC_DEBUG3; 1241 dc5_reg = DG1_DMC_DEBUG_DC5_COUNT; 1242 } else { 1243 dc3co_reg = TGL_DMC_DEBUG3; 1244 dc5_reg = TGL_DMC_DEBUG_DC5_COUNT; 1245 dc6_reg = TGL_DMC_DEBUG_DC6_COUNT; 1246 } 1247 1248 seq_printf(m, "DC3CO count: %d\n", 1249 intel_de_read(i915, dc3co_reg)); 1250 } else { 1251 dc5_reg = IS_BROXTON(i915) ? BXT_DMC_DC3_DC5_COUNT : 1252 SKL_DMC_DC3_DC5_COUNT; 1253 if (!IS_GEMINILAKE(i915) && !IS_BROXTON(i915)) 1254 dc6_reg = SKL_DMC_DC5_DC6_COUNT; 1255 } 1256 1257 seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(i915, dc5_reg)); 1258 if (i915_mmio_reg_valid(dc6_reg)) 1259 seq_printf(m, "DC5 -> DC6 count: %d\n", 1260 intel_de_read(i915, dc6_reg)); 1261 1262 seq_printf(m, "program base: 0x%08x\n", 1263 intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0))); 1264 1265 out: 1266 seq_printf(m, "ssp base: 0x%08x\n", 1267 intel_de_read(i915, DMC_SSP_BASE)); 1268 seq_printf(m, "htp: 0x%08x\n", intel_de_read(i915, DMC_HTP_SKL)); 1269 1270 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 1271 1272 return 0; 1273 } 1274 1275 DEFINE_SHOW_ATTRIBUTE(intel_dmc_debugfs_status); 1276 1277 void intel_dmc_debugfs_register(struct drm_i915_private *i915) 1278 { 1279 struct drm_minor *minor = i915->drm.primary; 1280 1281 debugfs_create_file("i915_dmc_info", 0444, minor->debugfs_root, 1282 i915, &intel_dmc_debugfs_status_fops); 1283 } 1284