1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include <drm/drm_debugfs.h> 7 #include <drm/drm_fourcc.h> 8 9 #include "i915_debugfs.h" 10 #include "intel_de.h" 11 #include "intel_display_debugfs.h" 12 #include "intel_display_power.h" 13 #include "intel_display_types.h" 14 #include "intel_dmc.h" 15 #include "intel_dp.h" 16 #include "intel_dp_mst.h" 17 #include "intel_drrs.h" 18 #include "intel_fbc.h" 19 #include "intel_hdcp.h" 20 #include "intel_hdmi.h" 21 #include "intel_pm.h" 22 #include "intel_psr.h" 23 #include "intel_sprite.h" 24 25 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) 26 { 27 return to_i915(node->minor->dev); 28 } 29 30 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) 31 { 32 struct drm_i915_private *dev_priv = node_to_i915(m->private); 33 34 seq_printf(m, "FB tracking busy bits: 0x%08x\n", 35 dev_priv->fb_tracking.busy_bits); 36 37 seq_printf(m, "FB tracking flip bits: 0x%08x\n", 38 dev_priv->fb_tracking.flip_bits); 39 40 return 0; 41 } 42 43 static int i915_fbc_status(struct seq_file *m, void *unused) 44 { 45 struct drm_i915_private *dev_priv = node_to_i915(m->private); 46 struct intel_fbc *fbc = &dev_priv->fbc; 47 intel_wakeref_t wakeref; 48 49 if (!HAS_FBC(dev_priv)) 50 return -ENODEV; 51 52 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 53 mutex_lock(&fbc->lock); 54 55 if (intel_fbc_is_active(fbc)) { 56 seq_puts(m, "FBC enabled\n"); 57 seq_printf(m, "Compressing: %s\n", 58 yesno(intel_fbc_is_compressing(fbc))); 59 } else { 60 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); 61 } 62 63 mutex_unlock(&fbc->lock); 64 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 65 66 return 0; 67 } 68 69 static int i915_fbc_false_color_get(void *data, u64 *val) 70 { 71 struct drm_i915_private *dev_priv = data; 72 73 *val = dev_priv->fbc.false_color; 74 75 return 0; 76 } 77 78 static int i915_fbc_false_color_set(void *data, u64 val) 79 { 80 struct drm_i915_private *dev_priv = data; 81 82 return intel_fbc_set_false_color(&dev_priv->fbc, val); 83 } 84 85 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops, 86 i915_fbc_false_color_get, i915_fbc_false_color_set, 87 "%llu\n"); 88 89 static int i915_ips_status(struct seq_file *m, void *unused) 90 { 91 struct drm_i915_private *dev_priv = node_to_i915(m->private); 92 intel_wakeref_t wakeref; 93 94 if (!HAS_IPS(dev_priv)) 95 return -ENODEV; 96 97 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 98 99 seq_printf(m, "Enabled by kernel parameter: %s\n", 100 yesno(dev_priv->params.enable_ips)); 101 102 if (DISPLAY_VER(dev_priv) >= 8) { 103 seq_puts(m, "Currently: unknown\n"); 104 } else { 105 if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE) 106 seq_puts(m, "Currently: enabled\n"); 107 else 108 seq_puts(m, "Currently: disabled\n"); 109 } 110 111 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 112 113 return 0; 114 } 115 116 static int i915_sr_status(struct seq_file *m, void *unused) 117 { 118 struct drm_i915_private *dev_priv = node_to_i915(m->private); 119 intel_wakeref_t wakeref; 120 bool sr_enabled = false; 121 122 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 123 124 if (DISPLAY_VER(dev_priv) >= 9) 125 /* no global SR status; inspect per-plane WM */; 126 else if (HAS_PCH_SPLIT(dev_priv)) 127 sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN; 128 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) || 129 IS_I945G(dev_priv) || IS_I945GM(dev_priv)) 130 sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN; 131 else if (IS_I915GM(dev_priv)) 132 sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN; 133 else if (IS_PINEVIEW(dev_priv)) 134 sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 135 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 136 sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 137 138 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); 139 140 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled)); 141 142 return 0; 143 } 144 145 static int i915_opregion(struct seq_file *m, void *unused) 146 { 147 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; 148 149 if (opregion->header) 150 seq_write(m, opregion->header, OPREGION_SIZE); 151 152 return 0; 153 } 154 155 static int i915_vbt(struct seq_file *m, void *unused) 156 { 157 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; 158 159 if (opregion->vbt) 160 seq_write(m, opregion->vbt, opregion->vbt_size); 161 162 return 0; 163 } 164 165 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 166 { 167 struct drm_i915_private *dev_priv = node_to_i915(m->private); 168 struct drm_device *dev = &dev_priv->drm; 169 struct intel_framebuffer *fbdev_fb = NULL; 170 struct drm_framebuffer *drm_fb; 171 172 #ifdef CONFIG_DRM_FBDEV_EMULATION 173 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) { 174 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb); 175 176 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 177 fbdev_fb->base.width, 178 fbdev_fb->base.height, 179 fbdev_fb->base.format->depth, 180 fbdev_fb->base.format->cpp[0] * 8, 181 fbdev_fb->base.modifier, 182 drm_framebuffer_read_refcount(&fbdev_fb->base)); 183 i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base)); 184 seq_putc(m, '\n'); 185 } 186 #endif 187 188 mutex_lock(&dev->mode_config.fb_lock); 189 drm_for_each_fb(drm_fb, dev) { 190 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); 191 if (fb == fbdev_fb) 192 continue; 193 194 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 195 fb->base.width, 196 fb->base.height, 197 fb->base.format->depth, 198 fb->base.format->cpp[0] * 8, 199 fb->base.modifier, 200 drm_framebuffer_read_refcount(&fb->base)); 201 i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base)); 202 seq_putc(m, '\n'); 203 } 204 mutex_unlock(&dev->mode_config.fb_lock); 205 206 return 0; 207 } 208 209 static int i915_psr_sink_status_show(struct seq_file *m, void *data) 210 { 211 u8 val; 212 static const char * const sink_status[] = { 213 "inactive", 214 "transition to active, capture and display", 215 "active, display from RFB", 216 "active, capture and display on sink device timings", 217 "transition to inactive, capture and display, timing re-sync", 218 "reserved", 219 "reserved", 220 "sink internal error", 221 }; 222 struct drm_connector *connector = m->private; 223 struct intel_dp *intel_dp = 224 intel_attached_dp(to_intel_connector(connector)); 225 int ret; 226 227 if (!CAN_PSR(intel_dp)) { 228 seq_puts(m, "PSR Unsupported\n"); 229 return -ENODEV; 230 } 231 232 if (connector->status != connector_status_connected) 233 return -ENODEV; 234 235 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val); 236 237 if (ret == 1) { 238 const char *str = "unknown"; 239 240 val &= DP_PSR_SINK_STATE_MASK; 241 if (val < ARRAY_SIZE(sink_status)) 242 str = sink_status[val]; 243 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str); 244 } else { 245 return ret; 246 } 247 248 return 0; 249 } 250 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status); 251 252 static void 253 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) 254 { 255 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 256 const char *status = "unknown"; 257 u32 val, status_val; 258 259 if (intel_dp->psr.psr2_enabled) { 260 static const char * const live_status[] = { 261 "IDLE", 262 "CAPTURE", 263 "CAPTURE_FS", 264 "SLEEP", 265 "BUFON_FW", 266 "ML_UP", 267 "SU_STANDBY", 268 "FAST_SLEEP", 269 "DEEP_SLEEP", 270 "BUF_ON", 271 "TG_ON" 272 }; 273 val = intel_de_read(dev_priv, 274 EDP_PSR2_STATUS(intel_dp->psr.transcoder)); 275 status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val); 276 if (status_val < ARRAY_SIZE(live_status)) 277 status = live_status[status_val]; 278 } else { 279 static const char * const live_status[] = { 280 "IDLE", 281 "SRDONACK", 282 "SRDENT", 283 "BUFOFF", 284 "BUFON", 285 "AUXACK", 286 "SRDOFFACK", 287 "SRDENT_ON", 288 }; 289 val = intel_de_read(dev_priv, 290 EDP_PSR_STATUS(intel_dp->psr.transcoder)); 291 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >> 292 EDP_PSR_STATUS_STATE_SHIFT; 293 if (status_val < ARRAY_SIZE(live_status)) 294 status = live_status[status_val]; 295 } 296 297 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val); 298 } 299 300 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) 301 { 302 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 303 struct intel_psr *psr = &intel_dp->psr; 304 intel_wakeref_t wakeref; 305 const char *status; 306 bool enabled; 307 u32 val; 308 309 seq_printf(m, "Sink support: %s", yesno(psr->sink_support)); 310 if (psr->sink_support) 311 seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]); 312 seq_puts(m, "\n"); 313 314 if (!psr->sink_support) 315 return 0; 316 317 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 318 mutex_lock(&psr->lock); 319 320 if (psr->enabled) 321 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled"; 322 else 323 status = "disabled"; 324 seq_printf(m, "PSR mode: %s\n", status); 325 326 if (!psr->enabled) { 327 seq_printf(m, "PSR sink not reliable: %s\n", 328 yesno(psr->sink_not_reliable)); 329 330 goto unlock; 331 } 332 333 if (psr->psr2_enabled) { 334 val = intel_de_read(dev_priv, 335 EDP_PSR2_CTL(intel_dp->psr.transcoder)); 336 enabled = val & EDP_PSR2_ENABLE; 337 } else { 338 val = intel_de_read(dev_priv, 339 EDP_PSR_CTL(intel_dp->psr.transcoder)); 340 enabled = val & EDP_PSR_ENABLE; 341 } 342 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", 343 enableddisabled(enabled), val); 344 psr_source_status(intel_dp, m); 345 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n", 346 psr->busy_frontbuffer_bits); 347 348 /* 349 * SKL+ Perf counter is reset to 0 everytime DC state is entered 350 */ 351 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 352 val = intel_de_read(dev_priv, 353 EDP_PSR_PERF_CNT(intel_dp->psr.transcoder)); 354 val &= EDP_PSR_PERF_CNT_MASK; 355 seq_printf(m, "Performance counter: %u\n", val); 356 } 357 358 if (psr->debug & I915_PSR_DEBUG_IRQ) { 359 seq_printf(m, "Last attempted entry at: %lld\n", 360 psr->last_entry_attempt); 361 seq_printf(m, "Last exit at: %lld\n", psr->last_exit); 362 } 363 364 if (psr->psr2_enabled) { 365 u32 su_frames_val[3]; 366 int frame; 367 368 /* 369 * Reading all 3 registers before hand to minimize crossing a 370 * frame boundary between register reads 371 */ 372 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) { 373 val = intel_de_read(dev_priv, 374 PSR2_SU_STATUS(intel_dp->psr.transcoder, frame)); 375 su_frames_val[frame / 3] = val; 376 } 377 378 seq_puts(m, "Frame:\tPSR2 SU blocks:\n"); 379 380 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) { 381 u32 su_blocks; 382 383 su_blocks = su_frames_val[frame / 3] & 384 PSR2_SU_STATUS_MASK(frame); 385 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame); 386 seq_printf(m, "%d\t%d\n", frame, su_blocks); 387 } 388 389 seq_printf(m, "PSR2 selective fetch: %s\n", 390 enableddisabled(psr->psr2_sel_fetch_enabled)); 391 } 392 393 unlock: 394 mutex_unlock(&psr->lock); 395 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 396 397 return 0; 398 } 399 400 static int i915_edp_psr_status(struct seq_file *m, void *data) 401 { 402 struct drm_i915_private *dev_priv = node_to_i915(m->private); 403 struct intel_dp *intel_dp = NULL; 404 struct intel_encoder *encoder; 405 406 if (!HAS_PSR(dev_priv)) 407 return -ENODEV; 408 409 /* Find the first EDP which supports PSR */ 410 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 411 intel_dp = enc_to_intel_dp(encoder); 412 break; 413 } 414 415 if (!intel_dp) 416 return -ENODEV; 417 418 return intel_psr_status(m, intel_dp); 419 } 420 421 static int 422 i915_edp_psr_debug_set(void *data, u64 val) 423 { 424 struct drm_i915_private *dev_priv = data; 425 struct intel_encoder *encoder; 426 intel_wakeref_t wakeref; 427 int ret = -ENODEV; 428 429 if (!HAS_PSR(dev_priv)) 430 return ret; 431 432 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 433 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 434 435 drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val); 436 437 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 438 439 // TODO: split to each transcoder's PSR debug state 440 ret = intel_psr_debug_set(intel_dp, val); 441 442 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 443 } 444 445 return ret; 446 } 447 448 static int 449 i915_edp_psr_debug_get(void *data, u64 *val) 450 { 451 struct drm_i915_private *dev_priv = data; 452 struct intel_encoder *encoder; 453 454 if (!HAS_PSR(dev_priv)) 455 return -ENODEV; 456 457 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 458 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 459 460 // TODO: split to each transcoder's PSR debug state 461 *val = READ_ONCE(intel_dp->psr.debug); 462 return 0; 463 } 464 465 return -ENODEV; 466 } 467 468 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, 469 i915_edp_psr_debug_get, i915_edp_psr_debug_set, 470 "%llu\n"); 471 472 static int i915_power_domain_info(struct seq_file *m, void *unused) 473 { 474 struct drm_i915_private *i915 = node_to_i915(m->private); 475 476 intel_display_power_debug(i915, m); 477 478 return 0; 479 } 480 481 static int i915_dmc_info(struct seq_file *m, void *unused) 482 { 483 struct drm_i915_private *dev_priv = node_to_i915(m->private); 484 intel_wakeref_t wakeref; 485 struct intel_dmc *dmc; 486 i915_reg_t dc5_reg, dc6_reg = {}; 487 488 if (!HAS_DMC(dev_priv)) 489 return -ENODEV; 490 491 dmc = &dev_priv->dmc; 492 493 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 494 495 seq_printf(m, "fw loaded: %s\n", yesno(intel_dmc_has_payload(dev_priv))); 496 seq_printf(m, "path: %s\n", dmc->fw_path); 497 seq_printf(m, "Pipe A fw support: %s\n", 498 yesno(GRAPHICS_VER(dev_priv) >= 12)); 499 seq_printf(m, "Pipe A fw loaded: %s\n", yesno(dmc->dmc_info[DMC_FW_PIPEA].payload)); 500 seq_printf(m, "Pipe B fw support: %s\n", yesno(IS_ALDERLAKE_P(dev_priv))); 501 seq_printf(m, "Pipe B fw loaded: %s\n", yesno(dmc->dmc_info[DMC_FW_PIPEB].payload)); 502 503 if (!intel_dmc_has_payload(dev_priv)) 504 goto out; 505 506 seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version), 507 DMC_VERSION_MINOR(dmc->version)); 508 509 if (DISPLAY_VER(dev_priv) >= 12) { 510 if (IS_DGFX(dev_priv)) { 511 dc5_reg = DG1_DMC_DEBUG_DC5_COUNT; 512 } else { 513 dc5_reg = TGL_DMC_DEBUG_DC5_COUNT; 514 dc6_reg = TGL_DMC_DEBUG_DC6_COUNT; 515 } 516 517 /* 518 * NOTE: DMC_DEBUG3 is a general purpose reg. 519 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter 520 * reg for DC3CO debugging and validation, 521 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter. 522 */ 523 seq_printf(m, "DC3CO count: %d\n", 524 intel_de_read(dev_priv, DMC_DEBUG3)); 525 } else { 526 dc5_reg = IS_BROXTON(dev_priv) ? BXT_DMC_DC3_DC5_COUNT : 527 SKL_DMC_DC3_DC5_COUNT; 528 if (!IS_GEMINILAKE(dev_priv) && !IS_BROXTON(dev_priv)) 529 dc6_reg = SKL_DMC_DC5_DC6_COUNT; 530 } 531 532 seq_printf(m, "DC3 -> DC5 count: %d\n", 533 intel_de_read(dev_priv, dc5_reg)); 534 if (dc6_reg.reg) 535 seq_printf(m, "DC5 -> DC6 count: %d\n", 536 intel_de_read(dev_priv, dc6_reg)); 537 538 out: 539 seq_printf(m, "program base: 0x%08x\n", 540 intel_de_read(dev_priv, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0))); 541 seq_printf(m, "ssp base: 0x%08x\n", 542 intel_de_read(dev_priv, DMC_SSP_BASE)); 543 seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, DMC_HTP_SKL)); 544 545 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 546 547 return 0; 548 } 549 550 static void intel_seq_print_mode(struct seq_file *m, int tabs, 551 const struct drm_display_mode *mode) 552 { 553 int i; 554 555 for (i = 0; i < tabs; i++) 556 seq_putc(m, '\t'); 557 558 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); 559 } 560 561 static void intel_encoder_info(struct seq_file *m, 562 struct intel_crtc *crtc, 563 struct intel_encoder *encoder) 564 { 565 struct drm_i915_private *dev_priv = node_to_i915(m->private); 566 struct drm_connector_list_iter conn_iter; 567 struct drm_connector *connector; 568 569 seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n", 570 encoder->base.base.id, encoder->base.name); 571 572 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 573 drm_for_each_connector_iter(connector, &conn_iter) { 574 const struct drm_connector_state *conn_state = 575 connector->state; 576 577 if (conn_state->best_encoder != &encoder->base) 578 continue; 579 580 seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n", 581 connector->base.id, connector->name); 582 } 583 drm_connector_list_iter_end(&conn_iter); 584 } 585 586 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) 587 { 588 const struct drm_display_mode *mode = panel->fixed_mode; 589 590 seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); 591 } 592 593 static void intel_hdcp_info(struct seq_file *m, 594 struct intel_connector *intel_connector) 595 { 596 bool hdcp_cap, hdcp2_cap; 597 598 if (!intel_connector->hdcp.shim) { 599 seq_puts(m, "No Connector Support"); 600 goto out; 601 } 602 603 hdcp_cap = intel_hdcp_capable(intel_connector); 604 hdcp2_cap = intel_hdcp2_capable(intel_connector); 605 606 if (hdcp_cap) 607 seq_puts(m, "HDCP1.4 "); 608 if (hdcp2_cap) 609 seq_puts(m, "HDCP2.2 "); 610 611 if (!hdcp_cap && !hdcp2_cap) 612 seq_puts(m, "None"); 613 614 out: 615 seq_puts(m, "\n"); 616 } 617 618 static void intel_dp_info(struct seq_file *m, 619 struct intel_connector *intel_connector) 620 { 621 struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector); 622 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 623 const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr; 624 625 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 626 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); 627 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) 628 intel_panel_info(m, &intel_connector->panel); 629 630 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports, 631 edid ? edid->data : NULL, &intel_dp->aux); 632 } 633 634 static void intel_dp_mst_info(struct seq_file *m, 635 struct intel_connector *intel_connector) 636 { 637 bool has_audio = intel_connector->port->has_audio; 638 639 seq_printf(m, "\taudio support: %s\n", yesno(has_audio)); 640 } 641 642 static void intel_hdmi_info(struct seq_file *m, 643 struct intel_connector *intel_connector) 644 { 645 struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector); 646 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder); 647 648 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); 649 } 650 651 static void intel_lvds_info(struct seq_file *m, 652 struct intel_connector *intel_connector) 653 { 654 intel_panel_info(m, &intel_connector->panel); 655 } 656 657 static void intel_connector_info(struct seq_file *m, 658 struct drm_connector *connector) 659 { 660 struct intel_connector *intel_connector = to_intel_connector(connector); 661 const struct drm_connector_state *conn_state = connector->state; 662 struct intel_encoder *encoder = 663 to_intel_encoder(conn_state->best_encoder); 664 const struct drm_display_mode *mode; 665 666 seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n", 667 connector->base.id, connector->name, 668 drm_get_connector_status_name(connector->status)); 669 670 if (connector->status == connector_status_disconnected) 671 return; 672 673 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 674 connector->display_info.width_mm, 675 connector->display_info.height_mm); 676 seq_printf(m, "\tsubpixel order: %s\n", 677 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 678 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev); 679 680 if (!encoder) 681 return; 682 683 switch (connector->connector_type) { 684 case DRM_MODE_CONNECTOR_DisplayPort: 685 case DRM_MODE_CONNECTOR_eDP: 686 if (encoder->type == INTEL_OUTPUT_DP_MST) 687 intel_dp_mst_info(m, intel_connector); 688 else 689 intel_dp_info(m, intel_connector); 690 break; 691 case DRM_MODE_CONNECTOR_LVDS: 692 if (encoder->type == INTEL_OUTPUT_LVDS) 693 intel_lvds_info(m, intel_connector); 694 break; 695 case DRM_MODE_CONNECTOR_HDMIA: 696 if (encoder->type == INTEL_OUTPUT_HDMI || 697 encoder->type == INTEL_OUTPUT_DDI) 698 intel_hdmi_info(m, intel_connector); 699 break; 700 default: 701 break; 702 } 703 704 seq_puts(m, "\tHDCP version: "); 705 intel_hdcp_info(m, intel_connector); 706 707 seq_printf(m, "\tmodes:\n"); 708 list_for_each_entry(mode, &connector->modes, head) 709 intel_seq_print_mode(m, 2, mode); 710 } 711 712 static const char *plane_type(enum drm_plane_type type) 713 { 714 switch (type) { 715 case DRM_PLANE_TYPE_OVERLAY: 716 return "OVL"; 717 case DRM_PLANE_TYPE_PRIMARY: 718 return "PRI"; 719 case DRM_PLANE_TYPE_CURSOR: 720 return "CUR"; 721 /* 722 * Deliberately omitting default: to generate compiler warnings 723 * when a new drm_plane_type gets added. 724 */ 725 } 726 727 return "unknown"; 728 } 729 730 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation) 731 { 732 /* 733 * According to doc only one DRM_MODE_ROTATE_ is allowed but this 734 * will print them all to visualize if the values are misused 735 */ 736 snprintf(buf, bufsize, 737 "%s%s%s%s%s%s(0x%08x)", 738 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "", 739 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "", 740 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "", 741 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "", 742 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "", 743 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "", 744 rotation); 745 } 746 747 static const char *plane_visibility(const struct intel_plane_state *plane_state) 748 { 749 if (plane_state->uapi.visible) 750 return "visible"; 751 752 if (plane_state->planar_slave) 753 return "planar-slave"; 754 755 return "hidden"; 756 } 757 758 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane) 759 { 760 const struct intel_plane_state *plane_state = 761 to_intel_plane_state(plane->base.state); 762 const struct drm_framebuffer *fb = plane_state->uapi.fb; 763 struct drm_rect src, dst; 764 char rot_str[48]; 765 766 src = drm_plane_state_src(&plane_state->uapi); 767 dst = drm_plane_state_dest(&plane_state->uapi); 768 769 plane_rotation(rot_str, sizeof(rot_str), 770 plane_state->uapi.rotation); 771 772 seq_puts(m, "\t\tuapi: [FB:"); 773 if (fb) 774 seq_printf(m, "%d] %p4cc,0x%llx,%dx%d", fb->base.id, 775 &fb->format->format, fb->modifier, fb->width, 776 fb->height); 777 else 778 seq_puts(m, "0] n/a,0x0,0x0,"); 779 seq_printf(m, ", visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT 780 ", rotation=%s\n", plane_visibility(plane_state), 781 DRM_RECT_FP_ARG(&src), DRM_RECT_ARG(&dst), rot_str); 782 783 if (plane_state->planar_linked_plane) 784 seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n", 785 plane_state->planar_linked_plane->base.base.id, plane_state->planar_linked_plane->base.name, 786 plane_state->planar_slave ? "slave" : "master"); 787 } 788 789 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane) 790 { 791 const struct intel_plane_state *plane_state = 792 to_intel_plane_state(plane->base.state); 793 const struct drm_framebuffer *fb = plane_state->hw.fb; 794 char rot_str[48]; 795 796 if (!fb) 797 return; 798 799 plane_rotation(rot_str, sizeof(rot_str), 800 plane_state->hw.rotation); 801 802 seq_printf(m, "\t\thw: [FB:%d] %p4cc,0x%llx,%dx%d, visible=%s, src=" 803 DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", 804 fb->base.id, &fb->format->format, 805 fb->modifier, fb->width, fb->height, 806 yesno(plane_state->uapi.visible), 807 DRM_RECT_FP_ARG(&plane_state->uapi.src), 808 DRM_RECT_ARG(&plane_state->uapi.dst), 809 rot_str); 810 } 811 812 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc) 813 { 814 struct drm_i915_private *dev_priv = node_to_i915(m->private); 815 struct intel_plane *plane; 816 817 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 818 seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n", 819 plane->base.base.id, plane->base.name, 820 plane_type(plane->base.type)); 821 intel_plane_uapi_info(m, plane); 822 intel_plane_hw_info(m, plane); 823 } 824 } 825 826 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc) 827 { 828 const struct intel_crtc_state *crtc_state = 829 to_intel_crtc_state(crtc->base.state); 830 int num_scalers = crtc->num_scalers; 831 int i; 832 833 /* Not all platformas have a scaler */ 834 if (num_scalers) { 835 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d", 836 num_scalers, 837 crtc_state->scaler_state.scaler_users, 838 crtc_state->scaler_state.scaler_id); 839 840 for (i = 0; i < num_scalers; i++) { 841 const struct intel_scaler *sc = 842 &crtc_state->scaler_state.scalers[i]; 843 844 seq_printf(m, ", scalers[%d]: use=%s, mode=%x", 845 i, yesno(sc->in_use), sc->mode); 846 } 847 seq_puts(m, "\n"); 848 } else { 849 seq_puts(m, "\tNo scalers available on this platform\n"); 850 } 851 } 852 853 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE) 854 static void crtc_updates_info(struct seq_file *m, 855 struct intel_crtc *crtc, 856 const char *hdr) 857 { 858 u64 count; 859 int row; 860 861 count = 0; 862 for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) 863 count += crtc->debug.vbl.times[row]; 864 seq_printf(m, "%sUpdates: %llu\n", hdr, count); 865 if (!count) 866 return; 867 868 for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) { 869 char columns[80] = " |"; 870 unsigned int x; 871 872 if (row & 1) { 873 const char *units; 874 875 if (row > 10) { 876 x = 1000000; 877 units = "ms"; 878 } else { 879 x = 1000; 880 units = "us"; 881 } 882 883 snprintf(columns, sizeof(columns), "%4ld%s |", 884 DIV_ROUND_CLOSEST(BIT(row + 9), x), units); 885 } 886 887 if (crtc->debug.vbl.times[row]) { 888 x = ilog2(crtc->debug.vbl.times[row]); 889 memset(columns + 8, '*', x); 890 columns[8 + x] = '\0'; 891 } 892 893 seq_printf(m, "%s%s\n", hdr, columns); 894 } 895 896 seq_printf(m, "%sMin update: %lluns\n", 897 hdr, crtc->debug.vbl.min); 898 seq_printf(m, "%sMax update: %lluns\n", 899 hdr, crtc->debug.vbl.max); 900 seq_printf(m, "%sAverage update: %lluns\n", 901 hdr, div64_u64(crtc->debug.vbl.sum, count)); 902 seq_printf(m, "%sOverruns > %uus: %u\n", 903 hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over); 904 } 905 906 static int crtc_updates_show(struct seq_file *m, void *data) 907 { 908 crtc_updates_info(m, m->private, ""); 909 return 0; 910 } 911 912 static int crtc_updates_open(struct inode *inode, struct file *file) 913 { 914 return single_open(file, crtc_updates_show, inode->i_private); 915 } 916 917 static ssize_t crtc_updates_write(struct file *file, 918 const char __user *ubuf, 919 size_t len, loff_t *offp) 920 { 921 struct seq_file *m = file->private_data; 922 struct intel_crtc *crtc = m->private; 923 924 /* May race with an update. Meh. */ 925 memset(&crtc->debug.vbl, 0, sizeof(crtc->debug.vbl)); 926 927 return len; 928 } 929 930 static const struct file_operations crtc_updates_fops = { 931 .owner = THIS_MODULE, 932 .open = crtc_updates_open, 933 .read = seq_read, 934 .llseek = seq_lseek, 935 .release = single_release, 936 .write = crtc_updates_write 937 }; 938 939 static void crtc_updates_add(struct drm_crtc *crtc) 940 { 941 debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry, 942 to_intel_crtc(crtc), &crtc_updates_fops); 943 } 944 945 #else 946 static void crtc_updates_info(struct seq_file *m, 947 struct intel_crtc *crtc, 948 const char *hdr) 949 { 950 } 951 952 static void crtc_updates_add(struct drm_crtc *crtc) 953 { 954 } 955 #endif 956 957 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc) 958 { 959 struct drm_i915_private *dev_priv = node_to_i915(m->private); 960 const struct intel_crtc_state *crtc_state = 961 to_intel_crtc_state(crtc->base.state); 962 struct intel_encoder *encoder; 963 964 seq_printf(m, "[CRTC:%d:%s]:\n", 965 crtc->base.base.id, crtc->base.name); 966 967 seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n", 968 yesno(crtc_state->uapi.enable), 969 yesno(crtc_state->uapi.active), 970 DRM_MODE_ARG(&crtc_state->uapi.mode)); 971 972 if (crtc_state->hw.enable) { 973 seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n", 974 yesno(crtc_state->hw.active), 975 DRM_MODE_ARG(&crtc_state->hw.adjusted_mode)); 976 977 seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n", 978 crtc_state->pipe_src_w, crtc_state->pipe_src_h, 979 yesno(crtc_state->dither), crtc_state->pipe_bpp); 980 981 intel_scaler_info(m, crtc); 982 } 983 984 if (crtc_state->bigjoiner) 985 seq_printf(m, "\tLinked to [CRTC:%d:%s] as a %s\n", 986 crtc_state->bigjoiner_linked_crtc->base.base.id, 987 crtc_state->bigjoiner_linked_crtc->base.name, 988 crtc_state->bigjoiner_slave ? "slave" : "master"); 989 990 for_each_intel_encoder_mask(&dev_priv->drm, encoder, 991 crtc_state->uapi.encoder_mask) 992 intel_encoder_info(m, crtc, encoder); 993 994 intel_plane_info(m, crtc); 995 996 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n", 997 yesno(!crtc->cpu_fifo_underrun_disabled), 998 yesno(!crtc->pch_fifo_underrun_disabled)); 999 1000 crtc_updates_info(m, crtc, "\t"); 1001 } 1002 1003 static int i915_display_info(struct seq_file *m, void *unused) 1004 { 1005 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1006 struct drm_device *dev = &dev_priv->drm; 1007 struct intel_crtc *crtc; 1008 struct drm_connector *connector; 1009 struct drm_connector_list_iter conn_iter; 1010 intel_wakeref_t wakeref; 1011 1012 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 1013 1014 drm_modeset_lock_all(dev); 1015 1016 seq_printf(m, "CRTC info\n"); 1017 seq_printf(m, "---------\n"); 1018 for_each_intel_crtc(dev, crtc) 1019 intel_crtc_info(m, crtc); 1020 1021 seq_printf(m, "\n"); 1022 seq_printf(m, "Connector info\n"); 1023 seq_printf(m, "--------------\n"); 1024 drm_connector_list_iter_begin(dev, &conn_iter); 1025 drm_for_each_connector_iter(connector, &conn_iter) 1026 intel_connector_info(m, connector); 1027 drm_connector_list_iter_end(&conn_iter); 1028 1029 drm_modeset_unlock_all(dev); 1030 1031 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 1032 1033 return 0; 1034 } 1035 1036 static int i915_shared_dplls_info(struct seq_file *m, void *unused) 1037 { 1038 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1039 struct drm_device *dev = &dev_priv->drm; 1040 int i; 1041 1042 drm_modeset_lock_all(dev); 1043 1044 seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n", 1045 dev_priv->dpll.ref_clks.nssc, 1046 dev_priv->dpll.ref_clks.ssc); 1047 1048 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { 1049 struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i]; 1050 1051 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name, 1052 pll->info->id); 1053 seq_printf(m, " pipe_mask: 0x%x, active: 0x%x, on: %s\n", 1054 pll->state.pipe_mask, pll->active_mask, yesno(pll->on)); 1055 seq_printf(m, " tracked hardware state:\n"); 1056 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll); 1057 seq_printf(m, " dpll_md: 0x%08x\n", 1058 pll->state.hw_state.dpll_md); 1059 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0); 1060 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1); 1061 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll); 1062 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0); 1063 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1); 1064 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n", 1065 pll->state.hw_state.mg_refclkin_ctl); 1066 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n", 1067 pll->state.hw_state.mg_clktop2_coreclkctl1); 1068 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n", 1069 pll->state.hw_state.mg_clktop2_hsclkctl); 1070 seq_printf(m, " mg_pll_div0: 0x%08x\n", 1071 pll->state.hw_state.mg_pll_div0); 1072 seq_printf(m, " mg_pll_div1: 0x%08x\n", 1073 pll->state.hw_state.mg_pll_div1); 1074 seq_printf(m, " mg_pll_lf: 0x%08x\n", 1075 pll->state.hw_state.mg_pll_lf); 1076 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n", 1077 pll->state.hw_state.mg_pll_frac_lock); 1078 seq_printf(m, " mg_pll_ssc: 0x%08x\n", 1079 pll->state.hw_state.mg_pll_ssc); 1080 seq_printf(m, " mg_pll_bias: 0x%08x\n", 1081 pll->state.hw_state.mg_pll_bias); 1082 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n", 1083 pll->state.hw_state.mg_pll_tdc_coldst_bias); 1084 } 1085 drm_modeset_unlock_all(dev); 1086 1087 return 0; 1088 } 1089 1090 static int i915_ipc_status_show(struct seq_file *m, void *data) 1091 { 1092 struct drm_i915_private *dev_priv = m->private; 1093 1094 seq_printf(m, "Isochronous Priority Control: %s\n", 1095 yesno(dev_priv->ipc_enabled)); 1096 return 0; 1097 } 1098 1099 static int i915_ipc_status_open(struct inode *inode, struct file *file) 1100 { 1101 struct drm_i915_private *dev_priv = inode->i_private; 1102 1103 if (!HAS_IPC(dev_priv)) 1104 return -ENODEV; 1105 1106 return single_open(file, i915_ipc_status_show, dev_priv); 1107 } 1108 1109 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf, 1110 size_t len, loff_t *offp) 1111 { 1112 struct seq_file *m = file->private_data; 1113 struct drm_i915_private *dev_priv = m->private; 1114 intel_wakeref_t wakeref; 1115 bool enable; 1116 int ret; 1117 1118 ret = kstrtobool_from_user(ubuf, len, &enable); 1119 if (ret < 0) 1120 return ret; 1121 1122 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { 1123 if (!dev_priv->ipc_enabled && enable) 1124 drm_info(&dev_priv->drm, 1125 "Enabling IPC: WM will be proper only after next commit\n"); 1126 dev_priv->ipc_enabled = enable; 1127 intel_enable_ipc(dev_priv); 1128 } 1129 1130 return len; 1131 } 1132 1133 static const struct file_operations i915_ipc_status_fops = { 1134 .owner = THIS_MODULE, 1135 .open = i915_ipc_status_open, 1136 .read = seq_read, 1137 .llseek = seq_lseek, 1138 .release = single_release, 1139 .write = i915_ipc_status_write 1140 }; 1141 1142 static int i915_ddb_info(struct seq_file *m, void *unused) 1143 { 1144 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1145 struct drm_device *dev = &dev_priv->drm; 1146 struct skl_ddb_entry *entry; 1147 struct intel_crtc *crtc; 1148 1149 if (DISPLAY_VER(dev_priv) < 9) 1150 return -ENODEV; 1151 1152 drm_modeset_lock_all(dev); 1153 1154 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); 1155 1156 for_each_intel_crtc(&dev_priv->drm, crtc) { 1157 struct intel_crtc_state *crtc_state = 1158 to_intel_crtc_state(crtc->base.state); 1159 enum pipe pipe = crtc->pipe; 1160 enum plane_id plane_id; 1161 1162 seq_printf(m, "Pipe %c\n", pipe_name(pipe)); 1163 1164 for_each_plane_id_on_crtc(crtc, plane_id) { 1165 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id]; 1166 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1, 1167 entry->start, entry->end, 1168 skl_ddb_entry_size(entry)); 1169 } 1170 1171 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR]; 1172 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, 1173 entry->end, skl_ddb_entry_size(entry)); 1174 } 1175 1176 drm_modeset_unlock_all(dev); 1177 1178 return 0; 1179 } 1180 1181 static void drrs_status_per_crtc(struct seq_file *m, 1182 struct drm_device *dev, 1183 struct intel_crtc *crtc) 1184 { 1185 struct drm_i915_private *dev_priv = to_i915(dev); 1186 struct i915_drrs *drrs = &dev_priv->drrs; 1187 int vrefresh = 0; 1188 struct drm_connector *connector; 1189 struct drm_connector_list_iter conn_iter; 1190 1191 drm_connector_list_iter_begin(dev, &conn_iter); 1192 drm_for_each_connector_iter(connector, &conn_iter) { 1193 bool supported = false; 1194 1195 if (connector->state->crtc != &crtc->base) 1196 continue; 1197 1198 seq_printf(m, "%s:\n", connector->name); 1199 1200 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && 1201 drrs->type == SEAMLESS_DRRS_SUPPORT) 1202 supported = true; 1203 1204 seq_printf(m, "\tDRRS Supported: %s\n", yesno(supported)); 1205 } 1206 drm_connector_list_iter_end(&conn_iter); 1207 1208 seq_puts(m, "\n"); 1209 1210 if (to_intel_crtc_state(crtc->base.state)->has_drrs) { 1211 struct intel_panel *panel; 1212 1213 mutex_lock(&drrs->mutex); 1214 /* DRRS Supported */ 1215 seq_puts(m, "\tDRRS Enabled: Yes\n"); 1216 1217 /* disable_drrs() will make drrs->dp NULL */ 1218 if (!drrs->dp) { 1219 seq_puts(m, "Idleness DRRS: Disabled\n"); 1220 mutex_unlock(&drrs->mutex); 1221 return; 1222 } 1223 1224 panel = &drrs->dp->attached_connector->panel; 1225 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X", 1226 drrs->busy_frontbuffer_bits); 1227 1228 seq_puts(m, "\n\t\t"); 1229 if (drrs->refresh_rate_type == DRRS_HIGH_RR) { 1230 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); 1231 vrefresh = drm_mode_vrefresh(panel->fixed_mode); 1232 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) { 1233 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); 1234 vrefresh = drm_mode_vrefresh(panel->downclock_mode); 1235 } else { 1236 seq_printf(m, "DRRS_State: Unknown(%d)\n", 1237 drrs->refresh_rate_type); 1238 mutex_unlock(&drrs->mutex); 1239 return; 1240 } 1241 seq_printf(m, "\t\tVrefresh: %d", vrefresh); 1242 1243 seq_puts(m, "\n\t\t"); 1244 mutex_unlock(&drrs->mutex); 1245 } else { 1246 /* DRRS not supported. Print the VBT parameter*/ 1247 seq_puts(m, "\tDRRS Enabled : No"); 1248 } 1249 seq_puts(m, "\n"); 1250 } 1251 1252 static int i915_drrs_status(struct seq_file *m, void *unused) 1253 { 1254 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1255 struct drm_device *dev = &dev_priv->drm; 1256 struct intel_crtc *crtc; 1257 int active_crtc_cnt = 0; 1258 1259 drm_modeset_lock_all(dev); 1260 for_each_intel_crtc(dev, crtc) { 1261 if (crtc->base.state->active) { 1262 active_crtc_cnt++; 1263 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); 1264 1265 drrs_status_per_crtc(m, dev, crtc); 1266 } 1267 } 1268 drm_modeset_unlock_all(dev); 1269 1270 if (!active_crtc_cnt) 1271 seq_puts(m, "No active crtc found\n"); 1272 1273 return 0; 1274 } 1275 1276 static bool 1277 intel_lpsp_power_well_enabled(struct drm_i915_private *i915, 1278 enum i915_power_well_id power_well_id) 1279 { 1280 intel_wakeref_t wakeref; 1281 bool is_enabled; 1282 1283 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 1284 is_enabled = intel_display_power_well_is_enabled(i915, 1285 power_well_id); 1286 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 1287 1288 return is_enabled; 1289 } 1290 1291 static int i915_lpsp_status(struct seq_file *m, void *unused) 1292 { 1293 struct drm_i915_private *i915 = node_to_i915(m->private); 1294 bool lpsp_enabled = false; 1295 1296 if (DISPLAY_VER(i915) >= 13 || IS_DISPLAY_VER(i915, 9, 10)) { 1297 lpsp_enabled = !intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2); 1298 } else if (IS_DISPLAY_VER(i915, 11, 12)) { 1299 lpsp_enabled = !intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3); 1300 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 1301 lpsp_enabled = !intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL); 1302 } else { 1303 seq_puts(m, "LPSP: not supported\n"); 1304 return 0; 1305 } 1306 1307 seq_printf(m, "LPSP: %s\n", enableddisabled(lpsp_enabled)); 1308 1309 return 0; 1310 } 1311 1312 static int i915_dp_mst_info(struct seq_file *m, void *unused) 1313 { 1314 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1315 struct drm_device *dev = &dev_priv->drm; 1316 struct intel_encoder *intel_encoder; 1317 struct intel_digital_port *dig_port; 1318 struct drm_connector *connector; 1319 struct drm_connector_list_iter conn_iter; 1320 1321 drm_connector_list_iter_begin(dev, &conn_iter); 1322 drm_for_each_connector_iter(connector, &conn_iter) { 1323 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 1324 continue; 1325 1326 intel_encoder = intel_attached_encoder(to_intel_connector(connector)); 1327 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) 1328 continue; 1329 1330 dig_port = enc_to_dig_port(intel_encoder); 1331 if (!intel_dp_mst_source_support(&dig_port->dp)) 1332 continue; 1333 1334 seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n", 1335 dig_port->base.base.base.id, 1336 dig_port->base.base.name); 1337 drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr); 1338 } 1339 drm_connector_list_iter_end(&conn_iter); 1340 1341 return 0; 1342 } 1343 1344 static ssize_t i915_displayport_test_active_write(struct file *file, 1345 const char __user *ubuf, 1346 size_t len, loff_t *offp) 1347 { 1348 char *input_buffer; 1349 int status = 0; 1350 struct drm_device *dev; 1351 struct drm_connector *connector; 1352 struct drm_connector_list_iter conn_iter; 1353 struct intel_dp *intel_dp; 1354 int val = 0; 1355 1356 dev = ((struct seq_file *)file->private_data)->private; 1357 1358 if (len == 0) 1359 return 0; 1360 1361 input_buffer = memdup_user_nul(ubuf, len); 1362 if (IS_ERR(input_buffer)) 1363 return PTR_ERR(input_buffer); 1364 1365 drm_dbg(&to_i915(dev)->drm, 1366 "Copied %d bytes from user\n", (unsigned int)len); 1367 1368 drm_connector_list_iter_begin(dev, &conn_iter); 1369 drm_for_each_connector_iter(connector, &conn_iter) { 1370 struct intel_encoder *encoder; 1371 1372 if (connector->connector_type != 1373 DRM_MODE_CONNECTOR_DisplayPort) 1374 continue; 1375 1376 encoder = to_intel_encoder(connector->encoder); 1377 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 1378 continue; 1379 1380 if (encoder && connector->status == connector_status_connected) { 1381 intel_dp = enc_to_intel_dp(encoder); 1382 status = kstrtoint(input_buffer, 10, &val); 1383 if (status < 0) 1384 break; 1385 drm_dbg(&to_i915(dev)->drm, 1386 "Got %d for test active\n", val); 1387 /* To prevent erroneous activation of the compliance 1388 * testing code, only accept an actual value of 1 here 1389 */ 1390 if (val == 1) 1391 intel_dp->compliance.test_active = true; 1392 else 1393 intel_dp->compliance.test_active = false; 1394 } 1395 } 1396 drm_connector_list_iter_end(&conn_iter); 1397 kfree(input_buffer); 1398 if (status < 0) 1399 return status; 1400 1401 *offp += len; 1402 return len; 1403 } 1404 1405 static int i915_displayport_test_active_show(struct seq_file *m, void *data) 1406 { 1407 struct drm_i915_private *dev_priv = m->private; 1408 struct drm_device *dev = &dev_priv->drm; 1409 struct drm_connector *connector; 1410 struct drm_connector_list_iter conn_iter; 1411 struct intel_dp *intel_dp; 1412 1413 drm_connector_list_iter_begin(dev, &conn_iter); 1414 drm_for_each_connector_iter(connector, &conn_iter) { 1415 struct intel_encoder *encoder; 1416 1417 if (connector->connector_type != 1418 DRM_MODE_CONNECTOR_DisplayPort) 1419 continue; 1420 1421 encoder = to_intel_encoder(connector->encoder); 1422 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 1423 continue; 1424 1425 if (encoder && connector->status == connector_status_connected) { 1426 intel_dp = enc_to_intel_dp(encoder); 1427 if (intel_dp->compliance.test_active) 1428 seq_puts(m, "1"); 1429 else 1430 seq_puts(m, "0"); 1431 } else 1432 seq_puts(m, "0"); 1433 } 1434 drm_connector_list_iter_end(&conn_iter); 1435 1436 return 0; 1437 } 1438 1439 static int i915_displayport_test_active_open(struct inode *inode, 1440 struct file *file) 1441 { 1442 return single_open(file, i915_displayport_test_active_show, 1443 inode->i_private); 1444 } 1445 1446 static const struct file_operations i915_displayport_test_active_fops = { 1447 .owner = THIS_MODULE, 1448 .open = i915_displayport_test_active_open, 1449 .read = seq_read, 1450 .llseek = seq_lseek, 1451 .release = single_release, 1452 .write = i915_displayport_test_active_write 1453 }; 1454 1455 static int i915_displayport_test_data_show(struct seq_file *m, void *data) 1456 { 1457 struct drm_i915_private *dev_priv = m->private; 1458 struct drm_device *dev = &dev_priv->drm; 1459 struct drm_connector *connector; 1460 struct drm_connector_list_iter conn_iter; 1461 struct intel_dp *intel_dp; 1462 1463 drm_connector_list_iter_begin(dev, &conn_iter); 1464 drm_for_each_connector_iter(connector, &conn_iter) { 1465 struct intel_encoder *encoder; 1466 1467 if (connector->connector_type != 1468 DRM_MODE_CONNECTOR_DisplayPort) 1469 continue; 1470 1471 encoder = to_intel_encoder(connector->encoder); 1472 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 1473 continue; 1474 1475 if (encoder && connector->status == connector_status_connected) { 1476 intel_dp = enc_to_intel_dp(encoder); 1477 if (intel_dp->compliance.test_type == 1478 DP_TEST_LINK_EDID_READ) 1479 seq_printf(m, "%lx", 1480 intel_dp->compliance.test_data.edid); 1481 else if (intel_dp->compliance.test_type == 1482 DP_TEST_LINK_VIDEO_PATTERN) { 1483 seq_printf(m, "hdisplay: %d\n", 1484 intel_dp->compliance.test_data.hdisplay); 1485 seq_printf(m, "vdisplay: %d\n", 1486 intel_dp->compliance.test_data.vdisplay); 1487 seq_printf(m, "bpc: %u\n", 1488 intel_dp->compliance.test_data.bpc); 1489 } else if (intel_dp->compliance.test_type == 1490 DP_TEST_LINK_PHY_TEST_PATTERN) { 1491 seq_printf(m, "pattern: %d\n", 1492 intel_dp->compliance.test_data.phytest.phy_pattern); 1493 seq_printf(m, "Number of lanes: %d\n", 1494 intel_dp->compliance.test_data.phytest.num_lanes); 1495 seq_printf(m, "Link Rate: %d\n", 1496 intel_dp->compliance.test_data.phytest.link_rate); 1497 seq_printf(m, "level: %02x\n", 1498 intel_dp->train_set[0]); 1499 } 1500 } else 1501 seq_puts(m, "0"); 1502 } 1503 drm_connector_list_iter_end(&conn_iter); 1504 1505 return 0; 1506 } 1507 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data); 1508 1509 static int i915_displayport_test_type_show(struct seq_file *m, void *data) 1510 { 1511 struct drm_i915_private *dev_priv = m->private; 1512 struct drm_device *dev = &dev_priv->drm; 1513 struct drm_connector *connector; 1514 struct drm_connector_list_iter conn_iter; 1515 struct intel_dp *intel_dp; 1516 1517 drm_connector_list_iter_begin(dev, &conn_iter); 1518 drm_for_each_connector_iter(connector, &conn_iter) { 1519 struct intel_encoder *encoder; 1520 1521 if (connector->connector_type != 1522 DRM_MODE_CONNECTOR_DisplayPort) 1523 continue; 1524 1525 encoder = to_intel_encoder(connector->encoder); 1526 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 1527 continue; 1528 1529 if (encoder && connector->status == connector_status_connected) { 1530 intel_dp = enc_to_intel_dp(encoder); 1531 seq_printf(m, "%02lx\n", intel_dp->compliance.test_type); 1532 } else 1533 seq_puts(m, "0"); 1534 } 1535 drm_connector_list_iter_end(&conn_iter); 1536 1537 return 0; 1538 } 1539 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type); 1540 1541 static void wm_latency_show(struct seq_file *m, const u16 wm[8]) 1542 { 1543 struct drm_i915_private *dev_priv = m->private; 1544 struct drm_device *dev = &dev_priv->drm; 1545 int level; 1546 int num_levels; 1547 1548 if (IS_CHERRYVIEW(dev_priv)) 1549 num_levels = 3; 1550 else if (IS_VALLEYVIEW(dev_priv)) 1551 num_levels = 1; 1552 else if (IS_G4X(dev_priv)) 1553 num_levels = 3; 1554 else 1555 num_levels = ilk_wm_max_level(dev_priv) + 1; 1556 1557 drm_modeset_lock_all(dev); 1558 1559 for (level = 0; level < num_levels; level++) { 1560 unsigned int latency = wm[level]; 1561 1562 /* 1563 * - WM1+ latency values in 0.5us units 1564 * - latencies are in us on gen9/vlv/chv 1565 */ 1566 if (DISPLAY_VER(dev_priv) >= 9 || 1567 IS_VALLEYVIEW(dev_priv) || 1568 IS_CHERRYVIEW(dev_priv) || 1569 IS_G4X(dev_priv)) 1570 latency *= 10; 1571 else if (level > 0) 1572 latency *= 5; 1573 1574 seq_printf(m, "WM%d %u (%u.%u usec)\n", 1575 level, wm[level], latency / 10, latency % 10); 1576 } 1577 1578 drm_modeset_unlock_all(dev); 1579 } 1580 1581 static int pri_wm_latency_show(struct seq_file *m, void *data) 1582 { 1583 struct drm_i915_private *dev_priv = m->private; 1584 const u16 *latencies; 1585 1586 if (DISPLAY_VER(dev_priv) >= 9) 1587 latencies = dev_priv->wm.skl_latency; 1588 else 1589 latencies = dev_priv->wm.pri_latency; 1590 1591 wm_latency_show(m, latencies); 1592 1593 return 0; 1594 } 1595 1596 static int spr_wm_latency_show(struct seq_file *m, void *data) 1597 { 1598 struct drm_i915_private *dev_priv = m->private; 1599 const u16 *latencies; 1600 1601 if (DISPLAY_VER(dev_priv) >= 9) 1602 latencies = dev_priv->wm.skl_latency; 1603 else 1604 latencies = dev_priv->wm.spr_latency; 1605 1606 wm_latency_show(m, latencies); 1607 1608 return 0; 1609 } 1610 1611 static int cur_wm_latency_show(struct seq_file *m, void *data) 1612 { 1613 struct drm_i915_private *dev_priv = m->private; 1614 const u16 *latencies; 1615 1616 if (DISPLAY_VER(dev_priv) >= 9) 1617 latencies = dev_priv->wm.skl_latency; 1618 else 1619 latencies = dev_priv->wm.cur_latency; 1620 1621 wm_latency_show(m, latencies); 1622 1623 return 0; 1624 } 1625 1626 static int pri_wm_latency_open(struct inode *inode, struct file *file) 1627 { 1628 struct drm_i915_private *dev_priv = inode->i_private; 1629 1630 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) 1631 return -ENODEV; 1632 1633 return single_open(file, pri_wm_latency_show, dev_priv); 1634 } 1635 1636 static int spr_wm_latency_open(struct inode *inode, struct file *file) 1637 { 1638 struct drm_i915_private *dev_priv = inode->i_private; 1639 1640 if (HAS_GMCH(dev_priv)) 1641 return -ENODEV; 1642 1643 return single_open(file, spr_wm_latency_show, dev_priv); 1644 } 1645 1646 static int cur_wm_latency_open(struct inode *inode, struct file *file) 1647 { 1648 struct drm_i915_private *dev_priv = inode->i_private; 1649 1650 if (HAS_GMCH(dev_priv)) 1651 return -ENODEV; 1652 1653 return single_open(file, cur_wm_latency_show, dev_priv); 1654 } 1655 1656 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 1657 size_t len, loff_t *offp, u16 wm[8]) 1658 { 1659 struct seq_file *m = file->private_data; 1660 struct drm_i915_private *dev_priv = m->private; 1661 struct drm_device *dev = &dev_priv->drm; 1662 u16 new[8] = { 0 }; 1663 int num_levels; 1664 int level; 1665 int ret; 1666 char tmp[32]; 1667 1668 if (IS_CHERRYVIEW(dev_priv)) 1669 num_levels = 3; 1670 else if (IS_VALLEYVIEW(dev_priv)) 1671 num_levels = 1; 1672 else if (IS_G4X(dev_priv)) 1673 num_levels = 3; 1674 else 1675 num_levels = ilk_wm_max_level(dev_priv) + 1; 1676 1677 if (len >= sizeof(tmp)) 1678 return -EINVAL; 1679 1680 if (copy_from_user(tmp, ubuf, len)) 1681 return -EFAULT; 1682 1683 tmp[len] = '\0'; 1684 1685 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", 1686 &new[0], &new[1], &new[2], &new[3], 1687 &new[4], &new[5], &new[6], &new[7]); 1688 if (ret != num_levels) 1689 return -EINVAL; 1690 1691 drm_modeset_lock_all(dev); 1692 1693 for (level = 0; level < num_levels; level++) 1694 wm[level] = new[level]; 1695 1696 drm_modeset_unlock_all(dev); 1697 1698 return len; 1699 } 1700 1701 1702 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 1703 size_t len, loff_t *offp) 1704 { 1705 struct seq_file *m = file->private_data; 1706 struct drm_i915_private *dev_priv = m->private; 1707 u16 *latencies; 1708 1709 if (DISPLAY_VER(dev_priv) >= 9) 1710 latencies = dev_priv->wm.skl_latency; 1711 else 1712 latencies = dev_priv->wm.pri_latency; 1713 1714 return wm_latency_write(file, ubuf, len, offp, latencies); 1715 } 1716 1717 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 1718 size_t len, loff_t *offp) 1719 { 1720 struct seq_file *m = file->private_data; 1721 struct drm_i915_private *dev_priv = m->private; 1722 u16 *latencies; 1723 1724 if (DISPLAY_VER(dev_priv) >= 9) 1725 latencies = dev_priv->wm.skl_latency; 1726 else 1727 latencies = dev_priv->wm.spr_latency; 1728 1729 return wm_latency_write(file, ubuf, len, offp, latencies); 1730 } 1731 1732 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 1733 size_t len, loff_t *offp) 1734 { 1735 struct seq_file *m = file->private_data; 1736 struct drm_i915_private *dev_priv = m->private; 1737 u16 *latencies; 1738 1739 if (DISPLAY_VER(dev_priv) >= 9) 1740 latencies = dev_priv->wm.skl_latency; 1741 else 1742 latencies = dev_priv->wm.cur_latency; 1743 1744 return wm_latency_write(file, ubuf, len, offp, latencies); 1745 } 1746 1747 static const struct file_operations i915_pri_wm_latency_fops = { 1748 .owner = THIS_MODULE, 1749 .open = pri_wm_latency_open, 1750 .read = seq_read, 1751 .llseek = seq_lseek, 1752 .release = single_release, 1753 .write = pri_wm_latency_write 1754 }; 1755 1756 static const struct file_operations i915_spr_wm_latency_fops = { 1757 .owner = THIS_MODULE, 1758 .open = spr_wm_latency_open, 1759 .read = seq_read, 1760 .llseek = seq_lseek, 1761 .release = single_release, 1762 .write = spr_wm_latency_write 1763 }; 1764 1765 static const struct file_operations i915_cur_wm_latency_fops = { 1766 .owner = THIS_MODULE, 1767 .open = cur_wm_latency_open, 1768 .read = seq_read, 1769 .llseek = seq_lseek, 1770 .release = single_release, 1771 .write = cur_wm_latency_write 1772 }; 1773 1774 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) 1775 { 1776 struct drm_i915_private *dev_priv = m->private; 1777 struct i915_hotplug *hotplug = &dev_priv->hotplug; 1778 1779 /* Synchronize with everything first in case there's been an HPD 1780 * storm, but we haven't finished handling it in the kernel yet 1781 */ 1782 intel_synchronize_irq(dev_priv); 1783 flush_work(&dev_priv->hotplug.dig_port_work); 1784 flush_delayed_work(&dev_priv->hotplug.hotplug_work); 1785 1786 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); 1787 seq_printf(m, "Detected: %s\n", 1788 yesno(delayed_work_pending(&hotplug->reenable_work))); 1789 1790 return 0; 1791 } 1792 1793 static ssize_t i915_hpd_storm_ctl_write(struct file *file, 1794 const char __user *ubuf, size_t len, 1795 loff_t *offp) 1796 { 1797 struct seq_file *m = file->private_data; 1798 struct drm_i915_private *dev_priv = m->private; 1799 struct i915_hotplug *hotplug = &dev_priv->hotplug; 1800 unsigned int new_threshold; 1801 int i; 1802 char *newline; 1803 char tmp[16]; 1804 1805 if (len >= sizeof(tmp)) 1806 return -EINVAL; 1807 1808 if (copy_from_user(tmp, ubuf, len)) 1809 return -EFAULT; 1810 1811 tmp[len] = '\0'; 1812 1813 /* Strip newline, if any */ 1814 newline = strchr(tmp, '\n'); 1815 if (newline) 1816 *newline = '\0'; 1817 1818 if (strcmp(tmp, "reset") == 0) 1819 new_threshold = HPD_STORM_DEFAULT_THRESHOLD; 1820 else if (kstrtouint(tmp, 10, &new_threshold) != 0) 1821 return -EINVAL; 1822 1823 if (new_threshold > 0) 1824 drm_dbg_kms(&dev_priv->drm, 1825 "Setting HPD storm detection threshold to %d\n", 1826 new_threshold); 1827 else 1828 drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n"); 1829 1830 spin_lock_irq(&dev_priv->irq_lock); 1831 hotplug->hpd_storm_threshold = new_threshold; 1832 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 1833 for_each_hpd_pin(i) 1834 hotplug->stats[i].count = 0; 1835 spin_unlock_irq(&dev_priv->irq_lock); 1836 1837 /* Re-enable hpd immediately if we were in an irq storm */ 1838 flush_delayed_work(&dev_priv->hotplug.reenable_work); 1839 1840 return len; 1841 } 1842 1843 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file) 1844 { 1845 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private); 1846 } 1847 1848 static const struct file_operations i915_hpd_storm_ctl_fops = { 1849 .owner = THIS_MODULE, 1850 .open = i915_hpd_storm_ctl_open, 1851 .read = seq_read, 1852 .llseek = seq_lseek, 1853 .release = single_release, 1854 .write = i915_hpd_storm_ctl_write 1855 }; 1856 1857 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data) 1858 { 1859 struct drm_i915_private *dev_priv = m->private; 1860 1861 seq_printf(m, "Enabled: %s\n", 1862 yesno(dev_priv->hotplug.hpd_short_storm_enabled)); 1863 1864 return 0; 1865 } 1866 1867 static int 1868 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file) 1869 { 1870 return single_open(file, i915_hpd_short_storm_ctl_show, 1871 inode->i_private); 1872 } 1873 1874 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, 1875 const char __user *ubuf, 1876 size_t len, loff_t *offp) 1877 { 1878 struct seq_file *m = file->private_data; 1879 struct drm_i915_private *dev_priv = m->private; 1880 struct i915_hotplug *hotplug = &dev_priv->hotplug; 1881 char *newline; 1882 char tmp[16]; 1883 int i; 1884 bool new_state; 1885 1886 if (len >= sizeof(tmp)) 1887 return -EINVAL; 1888 1889 if (copy_from_user(tmp, ubuf, len)) 1890 return -EFAULT; 1891 1892 tmp[len] = '\0'; 1893 1894 /* Strip newline, if any */ 1895 newline = strchr(tmp, '\n'); 1896 if (newline) 1897 *newline = '\0'; 1898 1899 /* Reset to the "default" state for this system */ 1900 if (strcmp(tmp, "reset") == 0) 1901 new_state = !HAS_DP_MST(dev_priv); 1902 else if (kstrtobool(tmp, &new_state) != 0) 1903 return -EINVAL; 1904 1905 drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n", 1906 new_state ? "En" : "Dis"); 1907 1908 spin_lock_irq(&dev_priv->irq_lock); 1909 hotplug->hpd_short_storm_enabled = new_state; 1910 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 1911 for_each_hpd_pin(i) 1912 hotplug->stats[i].count = 0; 1913 spin_unlock_irq(&dev_priv->irq_lock); 1914 1915 /* Re-enable hpd immediately if we were in an irq storm */ 1916 flush_delayed_work(&dev_priv->hotplug.reenable_work); 1917 1918 return len; 1919 } 1920 1921 static const struct file_operations i915_hpd_short_storm_ctl_fops = { 1922 .owner = THIS_MODULE, 1923 .open = i915_hpd_short_storm_ctl_open, 1924 .read = seq_read, 1925 .llseek = seq_lseek, 1926 .release = single_release, 1927 .write = i915_hpd_short_storm_ctl_write, 1928 }; 1929 1930 static int i915_drrs_ctl_set(void *data, u64 val) 1931 { 1932 struct drm_i915_private *dev_priv = data; 1933 struct drm_device *dev = &dev_priv->drm; 1934 struct intel_crtc *crtc; 1935 1936 if (DISPLAY_VER(dev_priv) < 7) 1937 return -ENODEV; 1938 1939 for_each_intel_crtc(dev, crtc) { 1940 struct drm_connector_list_iter conn_iter; 1941 struct intel_crtc_state *crtc_state; 1942 struct drm_connector *connector; 1943 struct drm_crtc_commit *commit; 1944 int ret; 1945 1946 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex); 1947 if (ret) 1948 return ret; 1949 1950 crtc_state = to_intel_crtc_state(crtc->base.state); 1951 1952 if (!crtc_state->hw.active || 1953 !crtc_state->has_drrs) 1954 goto out; 1955 1956 commit = crtc_state->uapi.commit; 1957 if (commit) { 1958 ret = wait_for_completion_interruptible(&commit->hw_done); 1959 if (ret) 1960 goto out; 1961 } 1962 1963 drm_connector_list_iter_begin(dev, &conn_iter); 1964 drm_for_each_connector_iter(connector, &conn_iter) { 1965 struct intel_encoder *encoder; 1966 struct intel_dp *intel_dp; 1967 1968 if (!(crtc_state->uapi.connector_mask & 1969 drm_connector_mask(connector))) 1970 continue; 1971 1972 encoder = intel_attached_encoder(to_intel_connector(connector)); 1973 if (encoder->type != INTEL_OUTPUT_EDP) 1974 continue; 1975 1976 drm_dbg(&dev_priv->drm, 1977 "Manually %sabling DRRS. %llu\n", 1978 val ? "en" : "dis", val); 1979 1980 intel_dp = enc_to_intel_dp(encoder); 1981 if (val) 1982 intel_drrs_enable(intel_dp, crtc_state); 1983 else 1984 intel_drrs_disable(intel_dp, crtc_state); 1985 } 1986 drm_connector_list_iter_end(&conn_iter); 1987 1988 out: 1989 drm_modeset_unlock(&crtc->base.mutex); 1990 if (ret) 1991 return ret; 1992 } 1993 1994 return 0; 1995 } 1996 1997 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n"); 1998 1999 static ssize_t 2000 i915_fifo_underrun_reset_write(struct file *filp, 2001 const char __user *ubuf, 2002 size_t cnt, loff_t *ppos) 2003 { 2004 struct drm_i915_private *dev_priv = filp->private_data; 2005 struct intel_crtc *crtc; 2006 struct drm_device *dev = &dev_priv->drm; 2007 int ret; 2008 bool reset; 2009 2010 ret = kstrtobool_from_user(ubuf, cnt, &reset); 2011 if (ret) 2012 return ret; 2013 2014 if (!reset) 2015 return cnt; 2016 2017 for_each_intel_crtc(dev, crtc) { 2018 struct drm_crtc_commit *commit; 2019 struct intel_crtc_state *crtc_state; 2020 2021 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex); 2022 if (ret) 2023 return ret; 2024 2025 crtc_state = to_intel_crtc_state(crtc->base.state); 2026 commit = crtc_state->uapi.commit; 2027 if (commit) { 2028 ret = wait_for_completion_interruptible(&commit->hw_done); 2029 if (!ret) 2030 ret = wait_for_completion_interruptible(&commit->flip_done); 2031 } 2032 2033 if (!ret && crtc_state->hw.active) { 2034 drm_dbg_kms(&dev_priv->drm, 2035 "Re-arming FIFO underruns on pipe %c\n", 2036 pipe_name(crtc->pipe)); 2037 2038 intel_crtc_arm_fifo_underrun(crtc, crtc_state); 2039 } 2040 2041 drm_modeset_unlock(&crtc->base.mutex); 2042 2043 if (ret) 2044 return ret; 2045 } 2046 2047 ret = intel_fbc_reset_underrun(&dev_priv->fbc); 2048 if (ret) 2049 return ret; 2050 2051 return cnt; 2052 } 2053 2054 static const struct file_operations i915_fifo_underrun_reset_ops = { 2055 .owner = THIS_MODULE, 2056 .open = simple_open, 2057 .write = i915_fifo_underrun_reset_write, 2058 .llseek = default_llseek, 2059 }; 2060 2061 static const struct drm_info_list intel_display_debugfs_list[] = { 2062 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, 2063 {"i915_fbc_status", i915_fbc_status, 0}, 2064 {"i915_ips_status", i915_ips_status, 0}, 2065 {"i915_sr_status", i915_sr_status, 0}, 2066 {"i915_opregion", i915_opregion, 0}, 2067 {"i915_vbt", i915_vbt, 0}, 2068 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 2069 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 2070 {"i915_power_domain_info", i915_power_domain_info, 0}, 2071 {"i915_dmc_info", i915_dmc_info, 0}, 2072 {"i915_display_info", i915_display_info, 0}, 2073 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 2074 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 2075 {"i915_ddb_info", i915_ddb_info, 0}, 2076 {"i915_drrs_status", i915_drrs_status, 0}, 2077 {"i915_lpsp_status", i915_lpsp_status, 0}, 2078 }; 2079 2080 static const struct { 2081 const char *name; 2082 const struct file_operations *fops; 2083 } intel_display_debugfs_files[] = { 2084 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops}, 2085 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 2086 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 2087 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 2088 {"i915_fbc_false_color", &i915_fbc_false_color_fops}, 2089 {"i915_dp_test_data", &i915_displayport_test_data_fops}, 2090 {"i915_dp_test_type", &i915_displayport_test_type_fops}, 2091 {"i915_dp_test_active", &i915_displayport_test_active_fops}, 2092 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}, 2093 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops}, 2094 {"i915_ipc_status", &i915_ipc_status_fops}, 2095 {"i915_drrs_ctl", &i915_drrs_ctl_fops}, 2096 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}, 2097 }; 2098 2099 void intel_display_debugfs_register(struct drm_i915_private *i915) 2100 { 2101 struct drm_minor *minor = i915->drm.primary; 2102 int i; 2103 2104 for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) { 2105 debugfs_create_file(intel_display_debugfs_files[i].name, 2106 S_IRUGO | S_IWUSR, 2107 minor->debugfs_root, 2108 to_i915(minor->dev), 2109 intel_display_debugfs_files[i].fops); 2110 } 2111 2112 drm_debugfs_create_files(intel_display_debugfs_list, 2113 ARRAY_SIZE(intel_display_debugfs_list), 2114 minor->debugfs_root, minor); 2115 } 2116 2117 static int i915_panel_show(struct seq_file *m, void *data) 2118 { 2119 struct drm_connector *connector = m->private; 2120 struct intel_dp *intel_dp = 2121 intel_attached_dp(to_intel_connector(connector)); 2122 2123 if (connector->status != connector_status_connected) 2124 return -ENODEV; 2125 2126 seq_printf(m, "Panel power up delay: %d\n", 2127 intel_dp->pps.panel_power_up_delay); 2128 seq_printf(m, "Panel power down delay: %d\n", 2129 intel_dp->pps.panel_power_down_delay); 2130 seq_printf(m, "Backlight on delay: %d\n", 2131 intel_dp->pps.backlight_on_delay); 2132 seq_printf(m, "Backlight off delay: %d\n", 2133 intel_dp->pps.backlight_off_delay); 2134 2135 return 0; 2136 } 2137 DEFINE_SHOW_ATTRIBUTE(i915_panel); 2138 2139 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) 2140 { 2141 struct drm_connector *connector = m->private; 2142 struct drm_i915_private *i915 = to_i915(connector->dev); 2143 struct intel_connector *intel_connector = to_intel_connector(connector); 2144 int ret; 2145 2146 ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex); 2147 if (ret) 2148 return ret; 2149 2150 if (!connector->encoder || connector->status != connector_status_connected) { 2151 ret = -ENODEV; 2152 goto out; 2153 } 2154 2155 seq_printf(m, "%s:%d HDCP version: ", connector->name, 2156 connector->base.id); 2157 intel_hdcp_info(m, intel_connector); 2158 2159 out: 2160 drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); 2161 2162 return ret; 2163 } 2164 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability); 2165 2166 static int i915_psr_status_show(struct seq_file *m, void *data) 2167 { 2168 struct drm_connector *connector = m->private; 2169 struct intel_dp *intel_dp = 2170 intel_attached_dp(to_intel_connector(connector)); 2171 2172 return intel_psr_status(m, intel_dp); 2173 } 2174 DEFINE_SHOW_ATTRIBUTE(i915_psr_status); 2175 2176 static int i915_lpsp_capability_show(struct seq_file *m, void *data) 2177 { 2178 struct drm_connector *connector = m->private; 2179 struct drm_i915_private *i915 = to_i915(connector->dev); 2180 struct intel_encoder *encoder; 2181 bool lpsp_capable = false; 2182 2183 encoder = intel_attached_encoder(to_intel_connector(connector)); 2184 if (!encoder) 2185 return -ENODEV; 2186 2187 if (connector->status != connector_status_connected) 2188 return -ENODEV; 2189 2190 if (DISPLAY_VER(i915) >= 13) 2191 lpsp_capable = encoder->port <= PORT_B; 2192 else if (DISPLAY_VER(i915) >= 12) 2193 /* 2194 * Actually TGL can drive LPSP on port till DDI_C 2195 * but there is no physical connected DDI_C on TGL sku's, 2196 * even driver is not initilizing DDI_C port for gen12. 2197 */ 2198 lpsp_capable = encoder->port <= PORT_B; 2199 else if (DISPLAY_VER(i915) == 11) 2200 lpsp_capable = (connector->connector_type == DRM_MODE_CONNECTOR_DSI || 2201 connector->connector_type == DRM_MODE_CONNECTOR_eDP); 2202 else if (IS_DISPLAY_VER(i915, 9, 10)) 2203 lpsp_capable = (encoder->port == PORT_A && 2204 (connector->connector_type == DRM_MODE_CONNECTOR_DSI || 2205 connector->connector_type == DRM_MODE_CONNECTOR_eDP || 2206 connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)); 2207 else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 2208 lpsp_capable = connector->connector_type == DRM_MODE_CONNECTOR_eDP; 2209 2210 seq_printf(m, "LPSP: %s\n", lpsp_capable ? "capable" : "incapable"); 2211 2212 return 0; 2213 } 2214 DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability); 2215 2216 static int i915_dsc_fec_support_show(struct seq_file *m, void *data) 2217 { 2218 struct drm_connector *connector = m->private; 2219 struct drm_device *dev = connector->dev; 2220 struct drm_crtc *crtc; 2221 struct intel_dp *intel_dp; 2222 struct drm_modeset_acquire_ctx ctx; 2223 struct intel_crtc_state *crtc_state = NULL; 2224 int ret = 0; 2225 bool try_again = false; 2226 2227 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 2228 2229 do { 2230 try_again = false; 2231 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, 2232 &ctx); 2233 if (ret) { 2234 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) { 2235 try_again = true; 2236 continue; 2237 } 2238 break; 2239 } 2240 crtc = connector->state->crtc; 2241 if (connector->status != connector_status_connected || !crtc) { 2242 ret = -ENODEV; 2243 break; 2244 } 2245 ret = drm_modeset_lock(&crtc->mutex, &ctx); 2246 if (ret == -EDEADLK) { 2247 ret = drm_modeset_backoff(&ctx); 2248 if (!ret) { 2249 try_again = true; 2250 continue; 2251 } 2252 break; 2253 } else if (ret) { 2254 break; 2255 } 2256 intel_dp = intel_attached_dp(to_intel_connector(connector)); 2257 crtc_state = to_intel_crtc_state(crtc->state); 2258 seq_printf(m, "DSC_Enabled: %s\n", 2259 yesno(crtc_state->dsc.compression_enable)); 2260 seq_printf(m, "DSC_Sink_Support: %s\n", 2261 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd))); 2262 seq_printf(m, "Force_DSC_Enable: %s\n", 2263 yesno(intel_dp->force_dsc_en)); 2264 if (!intel_dp_is_edp(intel_dp)) 2265 seq_printf(m, "FEC_Sink_Support: %s\n", 2266 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable))); 2267 } while (try_again); 2268 2269 drm_modeset_drop_locks(&ctx); 2270 drm_modeset_acquire_fini(&ctx); 2271 2272 return ret; 2273 } 2274 2275 static ssize_t i915_dsc_fec_support_write(struct file *file, 2276 const char __user *ubuf, 2277 size_t len, loff_t *offp) 2278 { 2279 bool dsc_enable = false; 2280 int ret; 2281 struct drm_connector *connector = 2282 ((struct seq_file *)file->private_data)->private; 2283 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); 2284 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2285 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2286 2287 if (len == 0) 2288 return 0; 2289 2290 drm_dbg(&i915->drm, 2291 "Copied %zu bytes from user to force DSC\n", len); 2292 2293 ret = kstrtobool_from_user(ubuf, len, &dsc_enable); 2294 if (ret < 0) 2295 return ret; 2296 2297 drm_dbg(&i915->drm, "Got %s for DSC Enable\n", 2298 (dsc_enable) ? "true" : "false"); 2299 intel_dp->force_dsc_en = dsc_enable; 2300 2301 *offp += len; 2302 return len; 2303 } 2304 2305 static int i915_dsc_fec_support_open(struct inode *inode, 2306 struct file *file) 2307 { 2308 return single_open(file, i915_dsc_fec_support_show, 2309 inode->i_private); 2310 } 2311 2312 static const struct file_operations i915_dsc_fec_support_fops = { 2313 .owner = THIS_MODULE, 2314 .open = i915_dsc_fec_support_open, 2315 .read = seq_read, 2316 .llseek = seq_lseek, 2317 .release = single_release, 2318 .write = i915_dsc_fec_support_write 2319 }; 2320 2321 static int i915_dsc_bpp_show(struct seq_file *m, void *data) 2322 { 2323 struct drm_connector *connector = m->private; 2324 struct drm_device *dev = connector->dev; 2325 struct drm_crtc *crtc; 2326 struct intel_crtc_state *crtc_state; 2327 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); 2328 int ret; 2329 2330 if (!encoder) 2331 return -ENODEV; 2332 2333 ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex); 2334 if (ret) 2335 return ret; 2336 2337 crtc = connector->state->crtc; 2338 if (connector->status != connector_status_connected || !crtc) { 2339 ret = -ENODEV; 2340 goto out; 2341 } 2342 2343 crtc_state = to_intel_crtc_state(crtc->state); 2344 seq_printf(m, "Compressed_BPP: %d\n", crtc_state->dsc.compressed_bpp); 2345 2346 out: drm_modeset_unlock(&dev->mode_config.connection_mutex); 2347 2348 return ret; 2349 } 2350 2351 static ssize_t i915_dsc_bpp_write(struct file *file, 2352 const char __user *ubuf, 2353 size_t len, loff_t *offp) 2354 { 2355 struct drm_connector *connector = 2356 ((struct seq_file *)file->private_data)->private; 2357 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); 2358 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2359 int dsc_bpp = 0; 2360 int ret; 2361 2362 ret = kstrtoint_from_user(ubuf, len, 0, &dsc_bpp); 2363 if (ret < 0) 2364 return ret; 2365 2366 intel_dp->force_dsc_bpp = dsc_bpp; 2367 *offp += len; 2368 2369 return len; 2370 } 2371 2372 static int i915_dsc_bpp_open(struct inode *inode, 2373 struct file *file) 2374 { 2375 return single_open(file, i915_dsc_bpp_show, 2376 inode->i_private); 2377 } 2378 2379 static const struct file_operations i915_dsc_bpp_fops = { 2380 .owner = THIS_MODULE, 2381 .open = i915_dsc_bpp_open, 2382 .read = seq_read, 2383 .llseek = seq_lseek, 2384 .release = single_release, 2385 .write = i915_dsc_bpp_write 2386 }; 2387 2388 /** 2389 * intel_connector_debugfs_add - add i915 specific connector debugfs files 2390 * @connector: pointer to a registered drm_connector 2391 * 2392 * Cleanup will be done by drm_connector_unregister() through a call to 2393 * drm_debugfs_connector_remove(). 2394 */ 2395 void intel_connector_debugfs_add(struct intel_connector *intel_connector) 2396 { 2397 struct drm_connector *connector = &intel_connector->base; 2398 struct dentry *root = connector->debugfs_entry; 2399 struct drm_i915_private *dev_priv = to_i915(connector->dev); 2400 2401 /* The connector must have been registered beforehands. */ 2402 if (!root) 2403 return; 2404 2405 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 2406 debugfs_create_file("i915_panel_timings", S_IRUGO, root, 2407 connector, &i915_panel_fops); 2408 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root, 2409 connector, &i915_psr_sink_status_fops); 2410 } 2411 2412 if (HAS_PSR(dev_priv) && 2413 connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 2414 debugfs_create_file("i915_psr_status", 0444, root, 2415 connector, &i915_psr_status_fops); 2416 } 2417 2418 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 2419 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || 2420 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) { 2421 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root, 2422 connector, &i915_hdcp_sink_capability_fops); 2423 } 2424 2425 if (DISPLAY_VER(dev_priv) >= 11 && 2426 ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && 2427 !to_intel_connector(connector)->mst_port) || 2428 connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { 2429 debugfs_create_file("i915_dsc_fec_support", 0644, root, 2430 connector, &i915_dsc_fec_support_fops); 2431 2432 debugfs_create_file("i915_dsc_bpp", 0644, root, 2433 connector, &i915_dsc_bpp_fops); 2434 } 2435 2436 if (connector->connector_type == DRM_MODE_CONNECTOR_DSI || 2437 connector->connector_type == DRM_MODE_CONNECTOR_eDP || 2438 connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 2439 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || 2440 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) 2441 debugfs_create_file("i915_lpsp_capability", 0444, root, 2442 connector, &i915_lpsp_capability_fops); 2443 } 2444 2445 /** 2446 * intel_crtc_debugfs_add - add i915 specific crtc debugfs files 2447 * @crtc: pointer to a drm_crtc 2448 * 2449 * Failure to add debugfs entries should generally be ignored. 2450 */ 2451 void intel_crtc_debugfs_add(struct drm_crtc *crtc) 2452 { 2453 if (crtc->debugfs_entry) 2454 crtc_updates_add(crtc); 2455 } 2456