1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/debugfs.h> 30 #include <linux/sort.h> 31 #include "intel_drv.h" 32 33 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) 34 { 35 return to_i915(node->minor->dev); 36 } 37 38 static __always_inline void seq_print_param(struct seq_file *m, 39 const char *name, 40 const char *type, 41 const void *x) 42 { 43 if (!__builtin_strcmp(type, "bool")) 44 seq_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x)); 45 else if (!__builtin_strcmp(type, "int")) 46 seq_printf(m, "i915.%s=%d\n", name, *(const int *)x); 47 else if (!__builtin_strcmp(type, "unsigned int")) 48 seq_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x); 49 else if (!__builtin_strcmp(type, "char *")) 50 seq_printf(m, "i915.%s=%s\n", name, *(const char **)x); 51 else 52 BUILD_BUG(); 53 } 54 55 static int i915_capabilities(struct seq_file *m, void *data) 56 { 57 struct drm_i915_private *dev_priv = node_to_i915(m->private); 58 const struct intel_device_info *info = INTEL_INFO(dev_priv); 59 60 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv)); 61 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform)); 62 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv)); 63 64 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 65 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG); 66 #undef PRINT_FLAG 67 68 kernel_param_lock(THIS_MODULE); 69 #define PRINT_PARAM(T, x) seq_print_param(m, #x, #T, &i915.x); 70 I915_PARAMS_FOR_EACH(PRINT_PARAM); 71 #undef PRINT_PARAM 72 kernel_param_unlock(THIS_MODULE); 73 74 return 0; 75 } 76 77 static char get_active_flag(struct drm_i915_gem_object *obj) 78 { 79 return i915_gem_object_is_active(obj) ? '*' : ' '; 80 } 81 82 static char get_pin_flag(struct drm_i915_gem_object *obj) 83 { 84 return obj->pin_display ? 'p' : ' '; 85 } 86 87 static char get_tiling_flag(struct drm_i915_gem_object *obj) 88 { 89 switch (i915_gem_object_get_tiling(obj)) { 90 default: 91 case I915_TILING_NONE: return ' '; 92 case I915_TILING_X: return 'X'; 93 case I915_TILING_Y: return 'Y'; 94 } 95 } 96 97 static char get_global_flag(struct drm_i915_gem_object *obj) 98 { 99 return !list_empty(&obj->userfault_link) ? 'g' : ' '; 100 } 101 102 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj) 103 { 104 return obj->mm.mapping ? 'M' : ' '; 105 } 106 107 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) 108 { 109 u64 size = 0; 110 struct i915_vma *vma; 111 112 list_for_each_entry(vma, &obj->vma_list, obj_link) { 113 if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node)) 114 size += vma->node.size; 115 } 116 117 return size; 118 } 119 120 static void 121 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 122 { 123 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 124 struct intel_engine_cs *engine; 125 struct i915_vma *vma; 126 unsigned int frontbuffer_bits; 127 int pin_count = 0; 128 129 lockdep_assert_held(&obj->base.dev->struct_mutex); 130 131 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s", 132 &obj->base, 133 get_active_flag(obj), 134 get_pin_flag(obj), 135 get_tiling_flag(obj), 136 get_global_flag(obj), 137 get_pin_mapped_flag(obj), 138 obj->base.size / 1024, 139 obj->base.read_domains, 140 obj->base.write_domain, 141 i915_cache_level_str(dev_priv, obj->cache_level), 142 obj->mm.dirty ? " dirty" : "", 143 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : ""); 144 if (obj->base.name) 145 seq_printf(m, " (name: %d)", obj->base.name); 146 list_for_each_entry(vma, &obj->vma_list, obj_link) { 147 if (i915_vma_is_pinned(vma)) 148 pin_count++; 149 } 150 seq_printf(m, " (pinned x %d)", pin_count); 151 if (obj->pin_display) 152 seq_printf(m, " (display)"); 153 list_for_each_entry(vma, &obj->vma_list, obj_link) { 154 if (!drm_mm_node_allocated(&vma->node)) 155 continue; 156 157 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx", 158 i915_vma_is_ggtt(vma) ? "g" : "pp", 159 vma->node.start, vma->node.size); 160 if (i915_vma_is_ggtt(vma)) { 161 switch (vma->ggtt_view.type) { 162 case I915_GGTT_VIEW_NORMAL: 163 seq_puts(m, ", normal"); 164 break; 165 166 case I915_GGTT_VIEW_PARTIAL: 167 seq_printf(m, ", partial [%08llx+%x]", 168 vma->ggtt_view.partial.offset << PAGE_SHIFT, 169 vma->ggtt_view.partial.size << PAGE_SHIFT); 170 break; 171 172 case I915_GGTT_VIEW_ROTATED: 173 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]", 174 vma->ggtt_view.rotated.plane[0].width, 175 vma->ggtt_view.rotated.plane[0].height, 176 vma->ggtt_view.rotated.plane[0].stride, 177 vma->ggtt_view.rotated.plane[0].offset, 178 vma->ggtt_view.rotated.plane[1].width, 179 vma->ggtt_view.rotated.plane[1].height, 180 vma->ggtt_view.rotated.plane[1].stride, 181 vma->ggtt_view.rotated.plane[1].offset); 182 break; 183 184 default: 185 MISSING_CASE(vma->ggtt_view.type); 186 break; 187 } 188 } 189 if (vma->fence) 190 seq_printf(m, " , fence: %d%s", 191 vma->fence->id, 192 i915_gem_active_isset(&vma->last_fence) ? "*" : ""); 193 seq_puts(m, ")"); 194 } 195 if (obj->stolen) 196 seq_printf(m, " (stolen: %08llx)", obj->stolen->start); 197 198 engine = i915_gem_object_last_write_engine(obj); 199 if (engine) 200 seq_printf(m, " (%s)", engine->name); 201 202 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits); 203 if (frontbuffer_bits) 204 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits); 205 } 206 207 static int obj_rank_by_stolen(const void *A, const void *B) 208 { 209 const struct drm_i915_gem_object *a = 210 *(const struct drm_i915_gem_object **)A; 211 const struct drm_i915_gem_object *b = 212 *(const struct drm_i915_gem_object **)B; 213 214 if (a->stolen->start < b->stolen->start) 215 return -1; 216 if (a->stolen->start > b->stolen->start) 217 return 1; 218 return 0; 219 } 220 221 static int i915_gem_stolen_list_info(struct seq_file *m, void *data) 222 { 223 struct drm_i915_private *dev_priv = node_to_i915(m->private); 224 struct drm_device *dev = &dev_priv->drm; 225 struct drm_i915_gem_object **objects; 226 struct drm_i915_gem_object *obj; 227 u64 total_obj_size, total_gtt_size; 228 unsigned long total, count, n; 229 int ret; 230 231 total = READ_ONCE(dev_priv->mm.object_count); 232 objects = drm_malloc_ab(total, sizeof(*objects)); 233 if (!objects) 234 return -ENOMEM; 235 236 ret = mutex_lock_interruptible(&dev->struct_mutex); 237 if (ret) 238 goto out; 239 240 total_obj_size = total_gtt_size = count = 0; 241 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) { 242 if (count == total) 243 break; 244 245 if (obj->stolen == NULL) 246 continue; 247 248 objects[count++] = obj; 249 total_obj_size += obj->base.size; 250 total_gtt_size += i915_gem_obj_total_ggtt_size(obj); 251 252 } 253 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) { 254 if (count == total) 255 break; 256 257 if (obj->stolen == NULL) 258 continue; 259 260 objects[count++] = obj; 261 total_obj_size += obj->base.size; 262 } 263 264 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL); 265 266 seq_puts(m, "Stolen:\n"); 267 for (n = 0; n < count; n++) { 268 seq_puts(m, " "); 269 describe_obj(m, objects[n]); 270 seq_putc(m, '\n'); 271 } 272 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n", 273 count, total_obj_size, total_gtt_size); 274 275 mutex_unlock(&dev->struct_mutex); 276 out: 277 drm_free_large(objects); 278 return ret; 279 } 280 281 struct file_stats { 282 struct drm_i915_file_private *file_priv; 283 unsigned long count; 284 u64 total, unbound; 285 u64 global, shared; 286 u64 active, inactive; 287 }; 288 289 static int per_file_stats(int id, void *ptr, void *data) 290 { 291 struct drm_i915_gem_object *obj = ptr; 292 struct file_stats *stats = data; 293 struct i915_vma *vma; 294 295 stats->count++; 296 stats->total += obj->base.size; 297 if (!obj->bind_count) 298 stats->unbound += obj->base.size; 299 if (obj->base.name || obj->base.dma_buf) 300 stats->shared += obj->base.size; 301 302 list_for_each_entry(vma, &obj->vma_list, obj_link) { 303 if (!drm_mm_node_allocated(&vma->node)) 304 continue; 305 306 if (i915_vma_is_ggtt(vma)) { 307 stats->global += vma->node.size; 308 } else { 309 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm); 310 311 if (ppgtt->base.file != stats->file_priv) 312 continue; 313 } 314 315 if (i915_vma_is_active(vma)) 316 stats->active += vma->node.size; 317 else 318 stats->inactive += vma->node.size; 319 } 320 321 return 0; 322 } 323 324 #define print_file_stats(m, name, stats) do { \ 325 if (stats.count) \ 326 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \ 327 name, \ 328 stats.count, \ 329 stats.total, \ 330 stats.active, \ 331 stats.inactive, \ 332 stats.global, \ 333 stats.shared, \ 334 stats.unbound); \ 335 } while (0) 336 337 static void print_batch_pool_stats(struct seq_file *m, 338 struct drm_i915_private *dev_priv) 339 { 340 struct drm_i915_gem_object *obj; 341 struct file_stats stats; 342 struct intel_engine_cs *engine; 343 enum intel_engine_id id; 344 int j; 345 346 memset(&stats, 0, sizeof(stats)); 347 348 for_each_engine(engine, dev_priv, id) { 349 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { 350 list_for_each_entry(obj, 351 &engine->batch_pool.cache_list[j], 352 batch_pool_link) 353 per_file_stats(0, obj, &stats); 354 } 355 } 356 357 print_file_stats(m, "[k]batch pool", stats); 358 } 359 360 static int per_file_ctx_stats(int id, void *ptr, void *data) 361 { 362 struct i915_gem_context *ctx = ptr; 363 int n; 364 365 for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) { 366 if (ctx->engine[n].state) 367 per_file_stats(0, ctx->engine[n].state->obj, data); 368 if (ctx->engine[n].ring) 369 per_file_stats(0, ctx->engine[n].ring->vma->obj, data); 370 } 371 372 return 0; 373 } 374 375 static void print_context_stats(struct seq_file *m, 376 struct drm_i915_private *dev_priv) 377 { 378 struct drm_device *dev = &dev_priv->drm; 379 struct file_stats stats; 380 struct drm_file *file; 381 382 memset(&stats, 0, sizeof(stats)); 383 384 mutex_lock(&dev->struct_mutex); 385 if (dev_priv->kernel_context) 386 per_file_ctx_stats(0, dev_priv->kernel_context, &stats); 387 388 list_for_each_entry(file, &dev->filelist, lhead) { 389 struct drm_i915_file_private *fpriv = file->driver_priv; 390 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats); 391 } 392 mutex_unlock(&dev->struct_mutex); 393 394 print_file_stats(m, "[k]contexts", stats); 395 } 396 397 static int i915_gem_object_info(struct seq_file *m, void *data) 398 { 399 struct drm_i915_private *dev_priv = node_to_i915(m->private); 400 struct drm_device *dev = &dev_priv->drm; 401 struct i915_ggtt *ggtt = &dev_priv->ggtt; 402 u32 count, mapped_count, purgeable_count, dpy_count; 403 u64 size, mapped_size, purgeable_size, dpy_size; 404 struct drm_i915_gem_object *obj; 405 struct drm_file *file; 406 int ret; 407 408 ret = mutex_lock_interruptible(&dev->struct_mutex); 409 if (ret) 410 return ret; 411 412 seq_printf(m, "%u objects, %llu bytes\n", 413 dev_priv->mm.object_count, 414 dev_priv->mm.object_memory); 415 416 size = count = 0; 417 mapped_size = mapped_count = 0; 418 purgeable_size = purgeable_count = 0; 419 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) { 420 size += obj->base.size; 421 ++count; 422 423 if (obj->mm.madv == I915_MADV_DONTNEED) { 424 purgeable_size += obj->base.size; 425 ++purgeable_count; 426 } 427 428 if (obj->mm.mapping) { 429 mapped_count++; 430 mapped_size += obj->base.size; 431 } 432 } 433 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size); 434 435 size = count = dpy_size = dpy_count = 0; 436 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) { 437 size += obj->base.size; 438 ++count; 439 440 if (obj->pin_display) { 441 dpy_size += obj->base.size; 442 ++dpy_count; 443 } 444 445 if (obj->mm.madv == I915_MADV_DONTNEED) { 446 purgeable_size += obj->base.size; 447 ++purgeable_count; 448 } 449 450 if (obj->mm.mapping) { 451 mapped_count++; 452 mapped_size += obj->base.size; 453 } 454 } 455 seq_printf(m, "%u bound objects, %llu bytes\n", 456 count, size); 457 seq_printf(m, "%u purgeable objects, %llu bytes\n", 458 purgeable_count, purgeable_size); 459 seq_printf(m, "%u mapped objects, %llu bytes\n", 460 mapped_count, mapped_size); 461 seq_printf(m, "%u display objects (pinned), %llu bytes\n", 462 dpy_count, dpy_size); 463 464 seq_printf(m, "%llu [%llu] gtt total\n", 465 ggtt->base.total, ggtt->mappable_end); 466 467 seq_putc(m, '\n'); 468 print_batch_pool_stats(m, dev_priv); 469 mutex_unlock(&dev->struct_mutex); 470 471 mutex_lock(&dev->filelist_mutex); 472 print_context_stats(m, dev_priv); 473 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 474 struct file_stats stats; 475 struct drm_i915_file_private *file_priv = file->driver_priv; 476 struct drm_i915_gem_request *request; 477 struct task_struct *task; 478 479 memset(&stats, 0, sizeof(stats)); 480 stats.file_priv = file->driver_priv; 481 spin_lock(&file->table_lock); 482 idr_for_each(&file->object_idr, per_file_stats, &stats); 483 spin_unlock(&file->table_lock); 484 /* 485 * Although we have a valid reference on file->pid, that does 486 * not guarantee that the task_struct who called get_pid() is 487 * still alive (e.g. get_pid(current) => fork() => exit()). 488 * Therefore, we need to protect this ->comm access using RCU. 489 */ 490 mutex_lock(&dev->struct_mutex); 491 request = list_first_entry_or_null(&file_priv->mm.request_list, 492 struct drm_i915_gem_request, 493 client_link); 494 rcu_read_lock(); 495 task = pid_task(request && request->ctx->pid ? 496 request->ctx->pid : file->pid, 497 PIDTYPE_PID); 498 print_file_stats(m, task ? task->comm : "<unknown>", stats); 499 rcu_read_unlock(); 500 mutex_unlock(&dev->struct_mutex); 501 } 502 mutex_unlock(&dev->filelist_mutex); 503 504 return 0; 505 } 506 507 static int i915_gem_gtt_info(struct seq_file *m, void *data) 508 { 509 struct drm_info_node *node = m->private; 510 struct drm_i915_private *dev_priv = node_to_i915(node); 511 struct drm_device *dev = &dev_priv->drm; 512 bool show_pin_display_only = !!node->info_ent->data; 513 struct drm_i915_gem_object *obj; 514 u64 total_obj_size, total_gtt_size; 515 int count, ret; 516 517 ret = mutex_lock_interruptible(&dev->struct_mutex); 518 if (ret) 519 return ret; 520 521 total_obj_size = total_gtt_size = count = 0; 522 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) { 523 if (show_pin_display_only && !obj->pin_display) 524 continue; 525 526 seq_puts(m, " "); 527 describe_obj(m, obj); 528 seq_putc(m, '\n'); 529 total_obj_size += obj->base.size; 530 total_gtt_size += i915_gem_obj_total_ggtt_size(obj); 531 count++; 532 } 533 534 mutex_unlock(&dev->struct_mutex); 535 536 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", 537 count, total_obj_size, total_gtt_size); 538 539 return 0; 540 } 541 542 static int i915_gem_pageflip_info(struct seq_file *m, void *data) 543 { 544 struct drm_i915_private *dev_priv = node_to_i915(m->private); 545 struct drm_device *dev = &dev_priv->drm; 546 struct intel_crtc *crtc; 547 int ret; 548 549 ret = mutex_lock_interruptible(&dev->struct_mutex); 550 if (ret) 551 return ret; 552 553 for_each_intel_crtc(dev, crtc) { 554 const char pipe = pipe_name(crtc->pipe); 555 const char plane = plane_name(crtc->plane); 556 struct intel_flip_work *work; 557 558 spin_lock_irq(&dev->event_lock); 559 work = crtc->flip_work; 560 if (work == NULL) { 561 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 562 pipe, plane); 563 } else { 564 u32 pending; 565 u32 addr; 566 567 pending = atomic_read(&work->pending); 568 if (pending) { 569 seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n", 570 pipe, plane); 571 } else { 572 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 573 pipe, plane); 574 } 575 if (work->flip_queued_req) { 576 struct intel_engine_cs *engine = work->flip_queued_req->engine; 577 578 seq_printf(m, "Flip queued on %s at seqno %x, last submitted seqno %x [current breadcrumb %x], completed? %d\n", 579 engine->name, 580 work->flip_queued_req->global_seqno, 581 intel_engine_last_submit(engine), 582 intel_engine_get_seqno(engine), 583 i915_gem_request_completed(work->flip_queued_req)); 584 } else 585 seq_printf(m, "Flip not associated with any ring\n"); 586 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", 587 work->flip_queued_vblank, 588 work->flip_ready_vblank, 589 intel_crtc_get_vblank_counter(crtc)); 590 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 591 592 if (INTEL_GEN(dev_priv) >= 4) 593 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane))); 594 else 595 addr = I915_READ(DSPADDR(crtc->plane)); 596 seq_printf(m, "Current scanout address 0x%08x\n", addr); 597 598 if (work->pending_flip_obj) { 599 seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset); 600 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset); 601 } 602 } 603 spin_unlock_irq(&dev->event_lock); 604 } 605 606 mutex_unlock(&dev->struct_mutex); 607 608 return 0; 609 } 610 611 static int i915_gem_batch_pool_info(struct seq_file *m, void *data) 612 { 613 struct drm_i915_private *dev_priv = node_to_i915(m->private); 614 struct drm_device *dev = &dev_priv->drm; 615 struct drm_i915_gem_object *obj; 616 struct intel_engine_cs *engine; 617 enum intel_engine_id id; 618 int total = 0; 619 int ret, j; 620 621 ret = mutex_lock_interruptible(&dev->struct_mutex); 622 if (ret) 623 return ret; 624 625 for_each_engine(engine, dev_priv, id) { 626 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { 627 int count; 628 629 count = 0; 630 list_for_each_entry(obj, 631 &engine->batch_pool.cache_list[j], 632 batch_pool_link) 633 count++; 634 seq_printf(m, "%s cache[%d]: %d objects\n", 635 engine->name, j, count); 636 637 list_for_each_entry(obj, 638 &engine->batch_pool.cache_list[j], 639 batch_pool_link) { 640 seq_puts(m, " "); 641 describe_obj(m, obj); 642 seq_putc(m, '\n'); 643 } 644 645 total += count; 646 } 647 } 648 649 seq_printf(m, "total: %d\n", total); 650 651 mutex_unlock(&dev->struct_mutex); 652 653 return 0; 654 } 655 656 static void print_request(struct seq_file *m, 657 struct drm_i915_gem_request *rq, 658 const char *prefix) 659 { 660 seq_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix, 661 rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno, 662 rq->priotree.priority, 663 jiffies_to_msecs(jiffies - rq->emitted_jiffies), 664 rq->timeline->common->name); 665 } 666 667 static int i915_gem_request_info(struct seq_file *m, void *data) 668 { 669 struct drm_i915_private *dev_priv = node_to_i915(m->private); 670 struct drm_device *dev = &dev_priv->drm; 671 struct drm_i915_gem_request *req; 672 struct intel_engine_cs *engine; 673 enum intel_engine_id id; 674 int ret, any; 675 676 ret = mutex_lock_interruptible(&dev->struct_mutex); 677 if (ret) 678 return ret; 679 680 any = 0; 681 for_each_engine(engine, dev_priv, id) { 682 int count; 683 684 count = 0; 685 list_for_each_entry(req, &engine->timeline->requests, link) 686 count++; 687 if (count == 0) 688 continue; 689 690 seq_printf(m, "%s requests: %d\n", engine->name, count); 691 list_for_each_entry(req, &engine->timeline->requests, link) 692 print_request(m, req, " "); 693 694 any++; 695 } 696 mutex_unlock(&dev->struct_mutex); 697 698 if (any == 0) 699 seq_puts(m, "No requests\n"); 700 701 return 0; 702 } 703 704 static void i915_ring_seqno_info(struct seq_file *m, 705 struct intel_engine_cs *engine) 706 { 707 struct intel_breadcrumbs *b = &engine->breadcrumbs; 708 struct rb_node *rb; 709 710 seq_printf(m, "Current sequence (%s): %x\n", 711 engine->name, intel_engine_get_seqno(engine)); 712 713 spin_lock_irq(&b->rb_lock); 714 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { 715 struct intel_wait *w = rb_entry(rb, typeof(*w), node); 716 717 seq_printf(m, "Waiting (%s): %s [%d] on %x\n", 718 engine->name, w->tsk->comm, w->tsk->pid, w->seqno); 719 } 720 spin_unlock_irq(&b->rb_lock); 721 } 722 723 static int i915_gem_seqno_info(struct seq_file *m, void *data) 724 { 725 struct drm_i915_private *dev_priv = node_to_i915(m->private); 726 struct intel_engine_cs *engine; 727 enum intel_engine_id id; 728 729 for_each_engine(engine, dev_priv, id) 730 i915_ring_seqno_info(m, engine); 731 732 return 0; 733 } 734 735 736 static int i915_interrupt_info(struct seq_file *m, void *data) 737 { 738 struct drm_i915_private *dev_priv = node_to_i915(m->private); 739 struct intel_engine_cs *engine; 740 enum intel_engine_id id; 741 int i, pipe; 742 743 intel_runtime_pm_get(dev_priv); 744 745 if (IS_CHERRYVIEW(dev_priv)) { 746 seq_printf(m, "Master Interrupt Control:\t%08x\n", 747 I915_READ(GEN8_MASTER_IRQ)); 748 749 seq_printf(m, "Display IER:\t%08x\n", 750 I915_READ(VLV_IER)); 751 seq_printf(m, "Display IIR:\t%08x\n", 752 I915_READ(VLV_IIR)); 753 seq_printf(m, "Display IIR_RW:\t%08x\n", 754 I915_READ(VLV_IIR_RW)); 755 seq_printf(m, "Display IMR:\t%08x\n", 756 I915_READ(VLV_IMR)); 757 for_each_pipe(dev_priv, pipe) { 758 enum intel_display_power_domain power_domain; 759 760 power_domain = POWER_DOMAIN_PIPE(pipe); 761 if (!intel_display_power_get_if_enabled(dev_priv, 762 power_domain)) { 763 seq_printf(m, "Pipe %c power disabled\n", 764 pipe_name(pipe)); 765 continue; 766 } 767 768 seq_printf(m, "Pipe %c stat:\t%08x\n", 769 pipe_name(pipe), 770 I915_READ(PIPESTAT(pipe))); 771 772 intel_display_power_put(dev_priv, power_domain); 773 } 774 775 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 776 seq_printf(m, "Port hotplug:\t%08x\n", 777 I915_READ(PORT_HOTPLUG_EN)); 778 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 779 I915_READ(VLV_DPFLIPSTAT)); 780 seq_printf(m, "DPINVGTT:\t%08x\n", 781 I915_READ(DPINVGTT)); 782 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 783 784 for (i = 0; i < 4; i++) { 785 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 786 i, I915_READ(GEN8_GT_IMR(i))); 787 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 788 i, I915_READ(GEN8_GT_IIR(i))); 789 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 790 i, I915_READ(GEN8_GT_IER(i))); 791 } 792 793 seq_printf(m, "PCU interrupt mask:\t%08x\n", 794 I915_READ(GEN8_PCU_IMR)); 795 seq_printf(m, "PCU interrupt identity:\t%08x\n", 796 I915_READ(GEN8_PCU_IIR)); 797 seq_printf(m, "PCU interrupt enable:\t%08x\n", 798 I915_READ(GEN8_PCU_IER)); 799 } else if (INTEL_GEN(dev_priv) >= 8) { 800 seq_printf(m, "Master Interrupt Control:\t%08x\n", 801 I915_READ(GEN8_MASTER_IRQ)); 802 803 for (i = 0; i < 4; i++) { 804 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 805 i, I915_READ(GEN8_GT_IMR(i))); 806 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 807 i, I915_READ(GEN8_GT_IIR(i))); 808 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 809 i, I915_READ(GEN8_GT_IER(i))); 810 } 811 812 for_each_pipe(dev_priv, pipe) { 813 enum intel_display_power_domain power_domain; 814 815 power_domain = POWER_DOMAIN_PIPE(pipe); 816 if (!intel_display_power_get_if_enabled(dev_priv, 817 power_domain)) { 818 seq_printf(m, "Pipe %c power disabled\n", 819 pipe_name(pipe)); 820 continue; 821 } 822 seq_printf(m, "Pipe %c IMR:\t%08x\n", 823 pipe_name(pipe), 824 I915_READ(GEN8_DE_PIPE_IMR(pipe))); 825 seq_printf(m, "Pipe %c IIR:\t%08x\n", 826 pipe_name(pipe), 827 I915_READ(GEN8_DE_PIPE_IIR(pipe))); 828 seq_printf(m, "Pipe %c IER:\t%08x\n", 829 pipe_name(pipe), 830 I915_READ(GEN8_DE_PIPE_IER(pipe))); 831 832 intel_display_power_put(dev_priv, power_domain); 833 } 834 835 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 836 I915_READ(GEN8_DE_PORT_IMR)); 837 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", 838 I915_READ(GEN8_DE_PORT_IIR)); 839 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", 840 I915_READ(GEN8_DE_PORT_IER)); 841 842 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", 843 I915_READ(GEN8_DE_MISC_IMR)); 844 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", 845 I915_READ(GEN8_DE_MISC_IIR)); 846 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", 847 I915_READ(GEN8_DE_MISC_IER)); 848 849 seq_printf(m, "PCU interrupt mask:\t%08x\n", 850 I915_READ(GEN8_PCU_IMR)); 851 seq_printf(m, "PCU interrupt identity:\t%08x\n", 852 I915_READ(GEN8_PCU_IIR)); 853 seq_printf(m, "PCU interrupt enable:\t%08x\n", 854 I915_READ(GEN8_PCU_IER)); 855 } else if (IS_VALLEYVIEW(dev_priv)) { 856 seq_printf(m, "Display IER:\t%08x\n", 857 I915_READ(VLV_IER)); 858 seq_printf(m, "Display IIR:\t%08x\n", 859 I915_READ(VLV_IIR)); 860 seq_printf(m, "Display IIR_RW:\t%08x\n", 861 I915_READ(VLV_IIR_RW)); 862 seq_printf(m, "Display IMR:\t%08x\n", 863 I915_READ(VLV_IMR)); 864 for_each_pipe(dev_priv, pipe) { 865 enum intel_display_power_domain power_domain; 866 867 power_domain = POWER_DOMAIN_PIPE(pipe); 868 if (!intel_display_power_get_if_enabled(dev_priv, 869 power_domain)) { 870 seq_printf(m, "Pipe %c power disabled\n", 871 pipe_name(pipe)); 872 continue; 873 } 874 875 seq_printf(m, "Pipe %c stat:\t%08x\n", 876 pipe_name(pipe), 877 I915_READ(PIPESTAT(pipe))); 878 intel_display_power_put(dev_priv, power_domain); 879 } 880 881 seq_printf(m, "Master IER:\t%08x\n", 882 I915_READ(VLV_MASTER_IER)); 883 884 seq_printf(m, "Render IER:\t%08x\n", 885 I915_READ(GTIER)); 886 seq_printf(m, "Render IIR:\t%08x\n", 887 I915_READ(GTIIR)); 888 seq_printf(m, "Render IMR:\t%08x\n", 889 I915_READ(GTIMR)); 890 891 seq_printf(m, "PM IER:\t\t%08x\n", 892 I915_READ(GEN6_PMIER)); 893 seq_printf(m, "PM IIR:\t\t%08x\n", 894 I915_READ(GEN6_PMIIR)); 895 seq_printf(m, "PM IMR:\t\t%08x\n", 896 I915_READ(GEN6_PMIMR)); 897 898 seq_printf(m, "Port hotplug:\t%08x\n", 899 I915_READ(PORT_HOTPLUG_EN)); 900 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 901 I915_READ(VLV_DPFLIPSTAT)); 902 seq_printf(m, "DPINVGTT:\t%08x\n", 903 I915_READ(DPINVGTT)); 904 905 } else if (!HAS_PCH_SPLIT(dev_priv)) { 906 seq_printf(m, "Interrupt enable: %08x\n", 907 I915_READ(IER)); 908 seq_printf(m, "Interrupt identity: %08x\n", 909 I915_READ(IIR)); 910 seq_printf(m, "Interrupt mask: %08x\n", 911 I915_READ(IMR)); 912 for_each_pipe(dev_priv, pipe) 913 seq_printf(m, "Pipe %c stat: %08x\n", 914 pipe_name(pipe), 915 I915_READ(PIPESTAT(pipe))); 916 } else { 917 seq_printf(m, "North Display Interrupt enable: %08x\n", 918 I915_READ(DEIER)); 919 seq_printf(m, "North Display Interrupt identity: %08x\n", 920 I915_READ(DEIIR)); 921 seq_printf(m, "North Display Interrupt mask: %08x\n", 922 I915_READ(DEIMR)); 923 seq_printf(m, "South Display Interrupt enable: %08x\n", 924 I915_READ(SDEIER)); 925 seq_printf(m, "South Display Interrupt identity: %08x\n", 926 I915_READ(SDEIIR)); 927 seq_printf(m, "South Display Interrupt mask: %08x\n", 928 I915_READ(SDEIMR)); 929 seq_printf(m, "Graphics Interrupt enable: %08x\n", 930 I915_READ(GTIER)); 931 seq_printf(m, "Graphics Interrupt identity: %08x\n", 932 I915_READ(GTIIR)); 933 seq_printf(m, "Graphics Interrupt mask: %08x\n", 934 I915_READ(GTIMR)); 935 } 936 for_each_engine(engine, dev_priv, id) { 937 if (INTEL_GEN(dev_priv) >= 6) { 938 seq_printf(m, 939 "Graphics Interrupt mask (%s): %08x\n", 940 engine->name, I915_READ_IMR(engine)); 941 } 942 i915_ring_seqno_info(m, engine); 943 } 944 intel_runtime_pm_put(dev_priv); 945 946 return 0; 947 } 948 949 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 950 { 951 struct drm_i915_private *dev_priv = node_to_i915(m->private); 952 struct drm_device *dev = &dev_priv->drm; 953 int i, ret; 954 955 ret = mutex_lock_interruptible(&dev->struct_mutex); 956 if (ret) 957 return ret; 958 959 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 960 for (i = 0; i < dev_priv->num_fence_regs; i++) { 961 struct i915_vma *vma = dev_priv->fence_regs[i].vma; 962 963 seq_printf(m, "Fence %d, pin count = %d, object = ", 964 i, dev_priv->fence_regs[i].pin_count); 965 if (!vma) 966 seq_puts(m, "unused"); 967 else 968 describe_obj(m, vma->obj); 969 seq_putc(m, '\n'); 970 } 971 972 mutex_unlock(&dev->struct_mutex); 973 return 0; 974 } 975 976 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 977 static ssize_t gpu_state_read(struct file *file, char __user *ubuf, 978 size_t count, loff_t *pos) 979 { 980 struct i915_gpu_state *error = file->private_data; 981 struct drm_i915_error_state_buf str; 982 ssize_t ret; 983 loff_t tmp; 984 985 if (!error) 986 return 0; 987 988 ret = i915_error_state_buf_init(&str, error->i915, count, *pos); 989 if (ret) 990 return ret; 991 992 ret = i915_error_state_to_str(&str, error); 993 if (ret) 994 goto out; 995 996 tmp = 0; 997 ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes); 998 if (ret < 0) 999 goto out; 1000 1001 *pos = str.start + ret; 1002 out: 1003 i915_error_state_buf_release(&str); 1004 return ret; 1005 } 1006 1007 static int gpu_state_release(struct inode *inode, struct file *file) 1008 { 1009 i915_gpu_state_put(file->private_data); 1010 return 0; 1011 } 1012 1013 static int i915_gpu_info_open(struct inode *inode, struct file *file) 1014 { 1015 struct drm_i915_private *i915 = inode->i_private; 1016 struct i915_gpu_state *gpu; 1017 1018 intel_runtime_pm_get(i915); 1019 gpu = i915_capture_gpu_state(i915); 1020 intel_runtime_pm_put(i915); 1021 if (!gpu) 1022 return -ENOMEM; 1023 1024 file->private_data = gpu; 1025 return 0; 1026 } 1027 1028 static const struct file_operations i915_gpu_info_fops = { 1029 .owner = THIS_MODULE, 1030 .open = i915_gpu_info_open, 1031 .read = gpu_state_read, 1032 .llseek = default_llseek, 1033 .release = gpu_state_release, 1034 }; 1035 1036 static ssize_t 1037 i915_error_state_write(struct file *filp, 1038 const char __user *ubuf, 1039 size_t cnt, 1040 loff_t *ppos) 1041 { 1042 struct i915_gpu_state *error = filp->private_data; 1043 1044 if (!error) 1045 return 0; 1046 1047 DRM_DEBUG_DRIVER("Resetting error state\n"); 1048 i915_reset_error_state(error->i915); 1049 1050 return cnt; 1051 } 1052 1053 static int i915_error_state_open(struct inode *inode, struct file *file) 1054 { 1055 file->private_data = i915_first_error_state(inode->i_private); 1056 return 0; 1057 } 1058 1059 static const struct file_operations i915_error_state_fops = { 1060 .owner = THIS_MODULE, 1061 .open = i915_error_state_open, 1062 .read = gpu_state_read, 1063 .write = i915_error_state_write, 1064 .llseek = default_llseek, 1065 .release = gpu_state_release, 1066 }; 1067 #endif 1068 1069 static int 1070 i915_next_seqno_set(void *data, u64 val) 1071 { 1072 struct drm_i915_private *dev_priv = data; 1073 struct drm_device *dev = &dev_priv->drm; 1074 int ret; 1075 1076 ret = mutex_lock_interruptible(&dev->struct_mutex); 1077 if (ret) 1078 return ret; 1079 1080 ret = i915_gem_set_global_seqno(dev, val); 1081 mutex_unlock(&dev->struct_mutex); 1082 1083 return ret; 1084 } 1085 1086 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, 1087 NULL, i915_next_seqno_set, 1088 "0x%llx\n"); 1089 1090 static int i915_frequency_info(struct seq_file *m, void *unused) 1091 { 1092 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1093 int ret = 0; 1094 1095 intel_runtime_pm_get(dev_priv); 1096 1097 if (IS_GEN5(dev_priv)) { 1098 u16 rgvswctl = I915_READ16(MEMSWCTL); 1099 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 1100 1101 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 1102 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 1103 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 1104 MEMSTAT_VID_SHIFT); 1105 seq_printf(m, "Current P-state: %d\n", 1106 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 1107 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1108 u32 freq_sts; 1109 1110 mutex_lock(&dev_priv->rps.hw_lock); 1111 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 1112 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 1113 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 1114 1115 seq_printf(m, "actual GPU freq: %d MHz\n", 1116 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); 1117 1118 seq_printf(m, "current GPU freq: %d MHz\n", 1119 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); 1120 1121 seq_printf(m, "max GPU freq: %d MHz\n", 1122 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1123 1124 seq_printf(m, "min GPU freq: %d MHz\n", 1125 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); 1126 1127 seq_printf(m, "idle GPU freq: %d MHz\n", 1128 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); 1129 1130 seq_printf(m, 1131 "efficient (RPe) frequency: %d MHz\n", 1132 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); 1133 mutex_unlock(&dev_priv->rps.hw_lock); 1134 } else if (INTEL_GEN(dev_priv) >= 6) { 1135 u32 rp_state_limits; 1136 u32 gt_perf_status; 1137 u32 rp_state_cap; 1138 u32 rpmodectl, rpinclimit, rpdeclimit; 1139 u32 rpstat, cagf, reqf; 1140 u32 rpupei, rpcurup, rpprevup; 1141 u32 rpdownei, rpcurdown, rpprevdown; 1142 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; 1143 int max_freq; 1144 1145 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 1146 if (IS_GEN9_LP(dev_priv)) { 1147 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); 1148 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS); 1149 } else { 1150 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 1151 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 1152 } 1153 1154 /* RPSTAT1 is in the GT power well */ 1155 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1156 1157 reqf = I915_READ(GEN6_RPNSWREQ); 1158 if (IS_GEN9(dev_priv)) 1159 reqf >>= 23; 1160 else { 1161 reqf &= ~GEN6_TURBO_DISABLE; 1162 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1163 reqf >>= 24; 1164 else 1165 reqf >>= 25; 1166 } 1167 reqf = intel_gpu_freq(dev_priv, reqf); 1168 1169 rpmodectl = I915_READ(GEN6_RP_CONTROL); 1170 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD); 1171 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD); 1172 1173 rpstat = I915_READ(GEN6_RPSTAT1); 1174 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; 1175 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; 1176 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; 1177 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; 1178 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; 1179 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; 1180 if (IS_GEN9(dev_priv)) 1181 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; 1182 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1183 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 1184 else 1185 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 1186 cagf = intel_gpu_freq(dev_priv, cagf); 1187 1188 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1189 1190 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { 1191 pm_ier = I915_READ(GEN6_PMIER); 1192 pm_imr = I915_READ(GEN6_PMIMR); 1193 pm_isr = I915_READ(GEN6_PMISR); 1194 pm_iir = I915_READ(GEN6_PMIIR); 1195 pm_mask = I915_READ(GEN6_PMINTRMSK); 1196 } else { 1197 pm_ier = I915_READ(GEN8_GT_IER(2)); 1198 pm_imr = I915_READ(GEN8_GT_IMR(2)); 1199 pm_isr = I915_READ(GEN8_GT_ISR(2)); 1200 pm_iir = I915_READ(GEN8_GT_IIR(2)); 1201 pm_mask = I915_READ(GEN6_PMINTRMSK); 1202 } 1203 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", 1204 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); 1205 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n", 1206 dev_priv->rps.pm_intrmsk_mbz); 1207 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1208 seq_printf(m, "Render p-state ratio: %d\n", 1209 (gt_perf_status & (IS_GEN9(dev_priv) ? 0x1ff00 : 0xff00)) >> 8); 1210 seq_printf(m, "Render p-state VID: %d\n", 1211 gt_perf_status & 0xff); 1212 seq_printf(m, "Render p-state limit: %d\n", 1213 rp_state_limits & 0xff); 1214 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 1215 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl); 1216 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit); 1217 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 1218 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 1219 seq_printf(m, "CAGF: %dMHz\n", cagf); 1220 seq_printf(m, "RP CUR UP EI: %d (%dus)\n", 1221 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei)); 1222 seq_printf(m, "RP CUR UP: %d (%dus)\n", 1223 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup)); 1224 seq_printf(m, "RP PREV UP: %d (%dus)\n", 1225 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup)); 1226 seq_printf(m, "Up threshold: %d%%\n", 1227 dev_priv->rps.up_threshold); 1228 1229 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n", 1230 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei)); 1231 seq_printf(m, "RP CUR DOWN: %d (%dus)\n", 1232 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown)); 1233 seq_printf(m, "RP PREV DOWN: %d (%dus)\n", 1234 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown)); 1235 seq_printf(m, "Down threshold: %d%%\n", 1236 dev_priv->rps.down_threshold); 1237 1238 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 : 1239 rp_state_cap >> 16) & 0xff; 1240 max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1); 1241 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 1242 intel_gpu_freq(dev_priv, max_freq)); 1243 1244 max_freq = (rp_state_cap & 0xff00) >> 8; 1245 max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1); 1246 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 1247 intel_gpu_freq(dev_priv, max_freq)); 1248 1249 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 : 1250 rp_state_cap >> 0) & 0xff; 1251 max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1); 1252 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1253 intel_gpu_freq(dev_priv, max_freq)); 1254 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1255 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1256 1257 seq_printf(m, "Current freq: %d MHz\n", 1258 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); 1259 seq_printf(m, "Actual freq: %d MHz\n", cagf); 1260 seq_printf(m, "Idle freq: %d MHz\n", 1261 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); 1262 seq_printf(m, "Min freq: %d MHz\n", 1263 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); 1264 seq_printf(m, "Boost freq: %d MHz\n", 1265 intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq)); 1266 seq_printf(m, "Max freq: %d MHz\n", 1267 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1268 seq_printf(m, 1269 "efficient (RPe) frequency: %d MHz\n", 1270 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); 1271 } else { 1272 seq_puts(m, "no P-state info available\n"); 1273 } 1274 1275 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk); 1276 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq); 1277 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq); 1278 1279 intel_runtime_pm_put(dev_priv); 1280 return ret; 1281 } 1282 1283 static void i915_instdone_info(struct drm_i915_private *dev_priv, 1284 struct seq_file *m, 1285 struct intel_instdone *instdone) 1286 { 1287 int slice; 1288 int subslice; 1289 1290 seq_printf(m, "\t\tINSTDONE: 0x%08x\n", 1291 instdone->instdone); 1292 1293 if (INTEL_GEN(dev_priv) <= 3) 1294 return; 1295 1296 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n", 1297 instdone->slice_common); 1298 1299 if (INTEL_GEN(dev_priv) <= 6) 1300 return; 1301 1302 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 1303 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n", 1304 slice, subslice, instdone->sampler[slice][subslice]); 1305 1306 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 1307 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n", 1308 slice, subslice, instdone->row[slice][subslice]); 1309 } 1310 1311 static int i915_hangcheck_info(struct seq_file *m, void *unused) 1312 { 1313 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1314 struct intel_engine_cs *engine; 1315 u64 acthd[I915_NUM_ENGINES]; 1316 u32 seqno[I915_NUM_ENGINES]; 1317 struct intel_instdone instdone; 1318 enum intel_engine_id id; 1319 1320 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) 1321 seq_puts(m, "Wedged\n"); 1322 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) 1323 seq_puts(m, "Reset in progress: struct_mutex backoff\n"); 1324 if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags)) 1325 seq_puts(m, "Reset in progress: reset handoff to waiter\n"); 1326 if (waitqueue_active(&dev_priv->gpu_error.wait_queue)) 1327 seq_puts(m, "Waiter holding struct mutex\n"); 1328 if (waitqueue_active(&dev_priv->gpu_error.reset_queue)) 1329 seq_puts(m, "struct_mutex blocked for reset\n"); 1330 1331 if (!i915.enable_hangcheck) { 1332 seq_puts(m, "Hangcheck disabled\n"); 1333 return 0; 1334 } 1335 1336 intel_runtime_pm_get(dev_priv); 1337 1338 for_each_engine(engine, dev_priv, id) { 1339 acthd[id] = intel_engine_get_active_head(engine); 1340 seqno[id] = intel_engine_get_seqno(engine); 1341 } 1342 1343 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone); 1344 1345 intel_runtime_pm_put(dev_priv); 1346 1347 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer)) 1348 seq_printf(m, "Hangcheck active, timer fires in %dms\n", 1349 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires - 1350 jiffies)); 1351 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) 1352 seq_puts(m, "Hangcheck active, work pending\n"); 1353 else 1354 seq_puts(m, "Hangcheck inactive\n"); 1355 1356 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake)); 1357 1358 for_each_engine(engine, dev_priv, id) { 1359 struct intel_breadcrumbs *b = &engine->breadcrumbs; 1360 struct rb_node *rb; 1361 1362 seq_printf(m, "%s:\n", engine->name); 1363 seq_printf(m, "\tseqno = %x [current %x, last %x], inflight %d\n", 1364 engine->hangcheck.seqno, seqno[id], 1365 intel_engine_last_submit(engine), 1366 engine->timeline->inflight_seqnos); 1367 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n", 1368 yesno(intel_engine_has_waiter(engine)), 1369 yesno(test_bit(engine->id, 1370 &dev_priv->gpu_error.missed_irq_rings)), 1371 yesno(engine->hangcheck.stalled)); 1372 1373 spin_lock_irq(&b->rb_lock); 1374 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { 1375 struct intel_wait *w = rb_entry(rb, typeof(*w), node); 1376 1377 seq_printf(m, "\t%s [%d] waiting for %x\n", 1378 w->tsk->comm, w->tsk->pid, w->seqno); 1379 } 1380 spin_unlock_irq(&b->rb_lock); 1381 1382 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", 1383 (long long)engine->hangcheck.acthd, 1384 (long long)acthd[id]); 1385 seq_printf(m, "\taction = %s(%d) %d ms ago\n", 1386 hangcheck_action_to_str(engine->hangcheck.action), 1387 engine->hangcheck.action, 1388 jiffies_to_msecs(jiffies - 1389 engine->hangcheck.action_timestamp)); 1390 1391 if (engine->id == RCS) { 1392 seq_puts(m, "\tinstdone read =\n"); 1393 1394 i915_instdone_info(dev_priv, m, &instdone); 1395 1396 seq_puts(m, "\tinstdone accu =\n"); 1397 1398 i915_instdone_info(dev_priv, m, 1399 &engine->hangcheck.instdone); 1400 } 1401 } 1402 1403 return 0; 1404 } 1405 1406 static int ironlake_drpc_info(struct seq_file *m) 1407 { 1408 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1409 u32 rgvmodectl, rstdbyctl; 1410 u16 crstandvid; 1411 1412 rgvmodectl = I915_READ(MEMMODECTL); 1413 rstdbyctl = I915_READ(RSTDBYCTL); 1414 crstandvid = I915_READ16(CRSTANDVID); 1415 1416 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN)); 1417 seq_printf(m, "Boost freq: %d\n", 1418 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1419 MEMMODE_BOOST_FREQ_SHIFT); 1420 seq_printf(m, "HW control enabled: %s\n", 1421 yesno(rgvmodectl & MEMMODE_HWIDLE_EN)); 1422 seq_printf(m, "SW control enabled: %s\n", 1423 yesno(rgvmodectl & MEMMODE_SWMODE_EN)); 1424 seq_printf(m, "Gated voltage change: %s\n", 1425 yesno(rgvmodectl & MEMMODE_RCLK_GATE)); 1426 seq_printf(m, "Starting frequency: P%d\n", 1427 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1428 seq_printf(m, "Max P-state: P%d\n", 1429 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1430 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1431 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1432 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1433 seq_printf(m, "Render standby enabled: %s\n", 1434 yesno(!(rstdbyctl & RCX_SW_EXIT))); 1435 seq_puts(m, "Current RS state: "); 1436 switch (rstdbyctl & RSX_STATUS_MASK) { 1437 case RSX_STATUS_ON: 1438 seq_puts(m, "on\n"); 1439 break; 1440 case RSX_STATUS_RC1: 1441 seq_puts(m, "RC1\n"); 1442 break; 1443 case RSX_STATUS_RC1E: 1444 seq_puts(m, "RC1E\n"); 1445 break; 1446 case RSX_STATUS_RS1: 1447 seq_puts(m, "RS1\n"); 1448 break; 1449 case RSX_STATUS_RS2: 1450 seq_puts(m, "RS2 (RC6)\n"); 1451 break; 1452 case RSX_STATUS_RS3: 1453 seq_puts(m, "RC3 (RC6+)\n"); 1454 break; 1455 default: 1456 seq_puts(m, "unknown\n"); 1457 break; 1458 } 1459 1460 return 0; 1461 } 1462 1463 static int i915_forcewake_domains(struct seq_file *m, void *data) 1464 { 1465 struct drm_i915_private *i915 = node_to_i915(m->private); 1466 struct intel_uncore_forcewake_domain *fw_domain; 1467 unsigned int tmp; 1468 1469 for_each_fw_domain(fw_domain, i915, tmp) 1470 seq_printf(m, "%s.wake_count = %u\n", 1471 intel_uncore_forcewake_domain_to_str(fw_domain->id), 1472 READ_ONCE(fw_domain->wake_count)); 1473 1474 return 0; 1475 } 1476 1477 static void print_rc6_res(struct seq_file *m, 1478 const char *title, 1479 const i915_reg_t reg) 1480 { 1481 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1482 1483 seq_printf(m, "%s %u (%llu us)\n", 1484 title, I915_READ(reg), 1485 intel_rc6_residency_us(dev_priv, reg)); 1486 } 1487 1488 static int vlv_drpc_info(struct seq_file *m) 1489 { 1490 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1491 u32 rpmodectl1, rcctl1, pw_status; 1492 1493 pw_status = I915_READ(VLV_GTLC_PW_STATUS); 1494 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1495 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1496 1497 seq_printf(m, "Video Turbo Mode: %s\n", 1498 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1499 seq_printf(m, "Turbo enabled: %s\n", 1500 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1501 seq_printf(m, "HW control enabled: %s\n", 1502 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1503 seq_printf(m, "SW control enabled: %s\n", 1504 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1505 GEN6_RP_MEDIA_SW_MODE)); 1506 seq_printf(m, "RC6 Enabled: %s\n", 1507 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | 1508 GEN6_RC_CTL_EI_MODE(1)))); 1509 seq_printf(m, "Render Power Well: %s\n", 1510 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); 1511 seq_printf(m, "Media Power Well: %s\n", 1512 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1513 1514 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6); 1515 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6); 1516 1517 return i915_forcewake_domains(m, NULL); 1518 } 1519 1520 static int gen6_drpc_info(struct seq_file *m) 1521 { 1522 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1523 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1524 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0; 1525 unsigned forcewake_count; 1526 int count = 0; 1527 1528 forcewake_count = READ_ONCE(dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count); 1529 if (forcewake_count) { 1530 seq_puts(m, "RC information inaccurate because somebody " 1531 "holds a forcewake reference \n"); 1532 } else { 1533 /* NB: we cannot use forcewake, else we read the wrong values */ 1534 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1535 udelay(10); 1536 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1537 } 1538 1539 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS); 1540 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); 1541 1542 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1543 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1544 if (INTEL_GEN(dev_priv) >= 9) { 1545 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE); 1546 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS); 1547 } 1548 1549 mutex_lock(&dev_priv->rps.hw_lock); 1550 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1551 mutex_unlock(&dev_priv->rps.hw_lock); 1552 1553 seq_printf(m, "Video Turbo Mode: %s\n", 1554 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1555 seq_printf(m, "HW control enabled: %s\n", 1556 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1557 seq_printf(m, "SW control enabled: %s\n", 1558 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1559 GEN6_RP_MEDIA_SW_MODE)); 1560 seq_printf(m, "RC1e Enabled: %s\n", 1561 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1562 seq_printf(m, "RC6 Enabled: %s\n", 1563 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1564 if (INTEL_GEN(dev_priv) >= 9) { 1565 seq_printf(m, "Render Well Gating Enabled: %s\n", 1566 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE)); 1567 seq_printf(m, "Media Well Gating Enabled: %s\n", 1568 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE)); 1569 } 1570 seq_printf(m, "Deep RC6 Enabled: %s\n", 1571 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1572 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1573 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1574 seq_puts(m, "Current RC state: "); 1575 switch (gt_core_status & GEN6_RCn_MASK) { 1576 case GEN6_RC0: 1577 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1578 seq_puts(m, "Core Power Down\n"); 1579 else 1580 seq_puts(m, "on\n"); 1581 break; 1582 case GEN6_RC3: 1583 seq_puts(m, "RC3\n"); 1584 break; 1585 case GEN6_RC6: 1586 seq_puts(m, "RC6\n"); 1587 break; 1588 case GEN6_RC7: 1589 seq_puts(m, "RC7\n"); 1590 break; 1591 default: 1592 seq_puts(m, "Unknown\n"); 1593 break; 1594 } 1595 1596 seq_printf(m, "Core Power Down: %s\n", 1597 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1598 if (INTEL_GEN(dev_priv) >= 9) { 1599 seq_printf(m, "Render Power Well: %s\n", 1600 (gen9_powergate_status & 1601 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down"); 1602 seq_printf(m, "Media Power Well: %s\n", 1603 (gen9_powergate_status & 1604 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1605 } 1606 1607 /* Not exactly sure what this is */ 1608 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:", 1609 GEN6_GT_GFX_RC6_LOCKED); 1610 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6); 1611 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p); 1612 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp); 1613 1614 seq_printf(m, "RC6 voltage: %dmV\n", 1615 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1616 seq_printf(m, "RC6+ voltage: %dmV\n", 1617 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1618 seq_printf(m, "RC6++ voltage: %dmV\n", 1619 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1620 return i915_forcewake_domains(m, NULL); 1621 } 1622 1623 static int i915_drpc_info(struct seq_file *m, void *unused) 1624 { 1625 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1626 int err; 1627 1628 intel_runtime_pm_get(dev_priv); 1629 1630 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1631 err = vlv_drpc_info(m); 1632 else if (INTEL_GEN(dev_priv) >= 6) 1633 err = gen6_drpc_info(m); 1634 else 1635 err = ironlake_drpc_info(m); 1636 1637 intel_runtime_pm_put(dev_priv); 1638 1639 return err; 1640 } 1641 1642 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) 1643 { 1644 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1645 1646 seq_printf(m, "FB tracking busy bits: 0x%08x\n", 1647 dev_priv->fb_tracking.busy_bits); 1648 1649 seq_printf(m, "FB tracking flip bits: 0x%08x\n", 1650 dev_priv->fb_tracking.flip_bits); 1651 1652 return 0; 1653 } 1654 1655 static int i915_fbc_status(struct seq_file *m, void *unused) 1656 { 1657 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1658 1659 if (!HAS_FBC(dev_priv)) { 1660 seq_puts(m, "FBC unsupported on this chipset\n"); 1661 return 0; 1662 } 1663 1664 intel_runtime_pm_get(dev_priv); 1665 mutex_lock(&dev_priv->fbc.lock); 1666 1667 if (intel_fbc_is_active(dev_priv)) 1668 seq_puts(m, "FBC enabled\n"); 1669 else 1670 seq_printf(m, "FBC disabled: %s\n", 1671 dev_priv->fbc.no_fbc_reason); 1672 1673 if (intel_fbc_is_active(dev_priv) && INTEL_GEN(dev_priv) >= 7) { 1674 uint32_t mask = INTEL_GEN(dev_priv) >= 8 ? 1675 BDW_FBC_COMPRESSION_MASK : 1676 IVB_FBC_COMPRESSION_MASK; 1677 seq_printf(m, "Compressing: %s\n", 1678 yesno(I915_READ(FBC_STATUS2) & mask)); 1679 } 1680 1681 mutex_unlock(&dev_priv->fbc.lock); 1682 intel_runtime_pm_put(dev_priv); 1683 1684 return 0; 1685 } 1686 1687 static int i915_fbc_fc_get(void *data, u64 *val) 1688 { 1689 struct drm_i915_private *dev_priv = data; 1690 1691 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv)) 1692 return -ENODEV; 1693 1694 *val = dev_priv->fbc.false_color; 1695 1696 return 0; 1697 } 1698 1699 static int i915_fbc_fc_set(void *data, u64 val) 1700 { 1701 struct drm_i915_private *dev_priv = data; 1702 u32 reg; 1703 1704 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv)) 1705 return -ENODEV; 1706 1707 mutex_lock(&dev_priv->fbc.lock); 1708 1709 reg = I915_READ(ILK_DPFC_CONTROL); 1710 dev_priv->fbc.false_color = val; 1711 1712 I915_WRITE(ILK_DPFC_CONTROL, val ? 1713 (reg | FBC_CTL_FALSE_COLOR) : 1714 (reg & ~FBC_CTL_FALSE_COLOR)); 1715 1716 mutex_unlock(&dev_priv->fbc.lock); 1717 return 0; 1718 } 1719 1720 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops, 1721 i915_fbc_fc_get, i915_fbc_fc_set, 1722 "%llu\n"); 1723 1724 static int i915_ips_status(struct seq_file *m, void *unused) 1725 { 1726 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1727 1728 if (!HAS_IPS(dev_priv)) { 1729 seq_puts(m, "not supported\n"); 1730 return 0; 1731 } 1732 1733 intel_runtime_pm_get(dev_priv); 1734 1735 seq_printf(m, "Enabled by kernel parameter: %s\n", 1736 yesno(i915.enable_ips)); 1737 1738 if (INTEL_GEN(dev_priv) >= 8) { 1739 seq_puts(m, "Currently: unknown\n"); 1740 } else { 1741 if (I915_READ(IPS_CTL) & IPS_ENABLE) 1742 seq_puts(m, "Currently: enabled\n"); 1743 else 1744 seq_puts(m, "Currently: disabled\n"); 1745 } 1746 1747 intel_runtime_pm_put(dev_priv); 1748 1749 return 0; 1750 } 1751 1752 static int i915_sr_status(struct seq_file *m, void *unused) 1753 { 1754 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1755 bool sr_enabled = false; 1756 1757 intel_runtime_pm_get(dev_priv); 1758 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 1759 1760 if (INTEL_GEN(dev_priv) >= 9) 1761 /* no global SR status; inspect per-plane WM */; 1762 else if (HAS_PCH_SPLIT(dev_priv)) 1763 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1764 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) || 1765 IS_I945G(dev_priv) || IS_I945GM(dev_priv)) 1766 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1767 else if (IS_I915GM(dev_priv)) 1768 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1769 else if (IS_PINEVIEW(dev_priv)) 1770 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1771 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1772 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 1773 1774 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 1775 intel_runtime_pm_put(dev_priv); 1776 1777 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled)); 1778 1779 return 0; 1780 } 1781 1782 static int i915_emon_status(struct seq_file *m, void *unused) 1783 { 1784 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1785 struct drm_device *dev = &dev_priv->drm; 1786 unsigned long temp, chipset, gfx; 1787 int ret; 1788 1789 if (!IS_GEN5(dev_priv)) 1790 return -ENODEV; 1791 1792 ret = mutex_lock_interruptible(&dev->struct_mutex); 1793 if (ret) 1794 return ret; 1795 1796 temp = i915_mch_val(dev_priv); 1797 chipset = i915_chipset_val(dev_priv); 1798 gfx = i915_gfx_val(dev_priv); 1799 mutex_unlock(&dev->struct_mutex); 1800 1801 seq_printf(m, "GMCH temp: %ld\n", temp); 1802 seq_printf(m, "Chipset power: %ld\n", chipset); 1803 seq_printf(m, "GFX power: %ld\n", gfx); 1804 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1805 1806 return 0; 1807 } 1808 1809 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1810 { 1811 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1812 int ret = 0; 1813 int gpu_freq, ia_freq; 1814 unsigned int max_gpu_freq, min_gpu_freq; 1815 1816 if (!HAS_LLC(dev_priv)) { 1817 seq_puts(m, "unsupported on this chipset\n"); 1818 return 0; 1819 } 1820 1821 intel_runtime_pm_get(dev_priv); 1822 1823 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1824 if (ret) 1825 goto out; 1826 1827 if (IS_GEN9_BC(dev_priv)) { 1828 /* Convert GT frequency to 50 HZ units */ 1829 min_gpu_freq = 1830 dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER; 1831 max_gpu_freq = 1832 dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER; 1833 } else { 1834 min_gpu_freq = dev_priv->rps.min_freq_softlimit; 1835 max_gpu_freq = dev_priv->rps.max_freq_softlimit; 1836 } 1837 1838 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1839 1840 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { 1841 ia_freq = gpu_freq; 1842 sandybridge_pcode_read(dev_priv, 1843 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1844 &ia_freq); 1845 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1846 intel_gpu_freq(dev_priv, (gpu_freq * 1847 (IS_GEN9_BC(dev_priv) ? 1848 GEN9_FREQ_SCALER : 1))), 1849 ((ia_freq >> 0) & 0xff) * 100, 1850 ((ia_freq >> 8) & 0xff) * 100); 1851 } 1852 1853 mutex_unlock(&dev_priv->rps.hw_lock); 1854 1855 out: 1856 intel_runtime_pm_put(dev_priv); 1857 return ret; 1858 } 1859 1860 static int i915_opregion(struct seq_file *m, void *unused) 1861 { 1862 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1863 struct drm_device *dev = &dev_priv->drm; 1864 struct intel_opregion *opregion = &dev_priv->opregion; 1865 int ret; 1866 1867 ret = mutex_lock_interruptible(&dev->struct_mutex); 1868 if (ret) 1869 goto out; 1870 1871 if (opregion->header) 1872 seq_write(m, opregion->header, OPREGION_SIZE); 1873 1874 mutex_unlock(&dev->struct_mutex); 1875 1876 out: 1877 return 0; 1878 } 1879 1880 static int i915_vbt(struct seq_file *m, void *unused) 1881 { 1882 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; 1883 1884 if (opregion->vbt) 1885 seq_write(m, opregion->vbt, opregion->vbt_size); 1886 1887 return 0; 1888 } 1889 1890 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1891 { 1892 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1893 struct drm_device *dev = &dev_priv->drm; 1894 struct intel_framebuffer *fbdev_fb = NULL; 1895 struct drm_framebuffer *drm_fb; 1896 int ret; 1897 1898 ret = mutex_lock_interruptible(&dev->struct_mutex); 1899 if (ret) 1900 return ret; 1901 1902 #ifdef CONFIG_DRM_FBDEV_EMULATION 1903 if (dev_priv->fbdev) { 1904 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb); 1905 1906 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1907 fbdev_fb->base.width, 1908 fbdev_fb->base.height, 1909 fbdev_fb->base.format->depth, 1910 fbdev_fb->base.format->cpp[0] * 8, 1911 fbdev_fb->base.modifier, 1912 drm_framebuffer_read_refcount(&fbdev_fb->base)); 1913 describe_obj(m, fbdev_fb->obj); 1914 seq_putc(m, '\n'); 1915 } 1916 #endif 1917 1918 mutex_lock(&dev->mode_config.fb_lock); 1919 drm_for_each_fb(drm_fb, dev) { 1920 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); 1921 if (fb == fbdev_fb) 1922 continue; 1923 1924 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1925 fb->base.width, 1926 fb->base.height, 1927 fb->base.format->depth, 1928 fb->base.format->cpp[0] * 8, 1929 fb->base.modifier, 1930 drm_framebuffer_read_refcount(&fb->base)); 1931 describe_obj(m, fb->obj); 1932 seq_putc(m, '\n'); 1933 } 1934 mutex_unlock(&dev->mode_config.fb_lock); 1935 mutex_unlock(&dev->struct_mutex); 1936 1937 return 0; 1938 } 1939 1940 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring) 1941 { 1942 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u)", 1943 ring->space, ring->head, ring->tail); 1944 } 1945 1946 static int i915_context_status(struct seq_file *m, void *unused) 1947 { 1948 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1949 struct drm_device *dev = &dev_priv->drm; 1950 struct intel_engine_cs *engine; 1951 struct i915_gem_context *ctx; 1952 enum intel_engine_id id; 1953 int ret; 1954 1955 ret = mutex_lock_interruptible(&dev->struct_mutex); 1956 if (ret) 1957 return ret; 1958 1959 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1960 seq_printf(m, "HW context %u ", ctx->hw_id); 1961 if (ctx->pid) { 1962 struct task_struct *task; 1963 1964 task = get_pid_task(ctx->pid, PIDTYPE_PID); 1965 if (task) { 1966 seq_printf(m, "(%s [%d]) ", 1967 task->comm, task->pid); 1968 put_task_struct(task); 1969 } 1970 } else if (IS_ERR(ctx->file_priv)) { 1971 seq_puts(m, "(deleted) "); 1972 } else { 1973 seq_puts(m, "(kernel) "); 1974 } 1975 1976 seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 1977 seq_putc(m, '\n'); 1978 1979 for_each_engine(engine, dev_priv, id) { 1980 struct intel_context *ce = &ctx->engine[engine->id]; 1981 1982 seq_printf(m, "%s: ", engine->name); 1983 seq_putc(m, ce->initialised ? 'I' : 'i'); 1984 if (ce->state) 1985 describe_obj(m, ce->state->obj); 1986 if (ce->ring) 1987 describe_ctx_ring(m, ce->ring); 1988 seq_putc(m, '\n'); 1989 } 1990 1991 seq_putc(m, '\n'); 1992 } 1993 1994 mutex_unlock(&dev->struct_mutex); 1995 1996 return 0; 1997 } 1998 1999 static void i915_dump_lrc_obj(struct seq_file *m, 2000 struct i915_gem_context *ctx, 2001 struct intel_engine_cs *engine) 2002 { 2003 struct i915_vma *vma = ctx->engine[engine->id].state; 2004 struct page *page; 2005 int j; 2006 2007 seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id); 2008 2009 if (!vma) { 2010 seq_puts(m, "\tFake context\n"); 2011 return; 2012 } 2013 2014 if (vma->flags & I915_VMA_GLOBAL_BIND) 2015 seq_printf(m, "\tBound in GGTT at 0x%08x\n", 2016 i915_ggtt_offset(vma)); 2017 2018 if (i915_gem_object_pin_pages(vma->obj)) { 2019 seq_puts(m, "\tFailed to get pages for context object\n\n"); 2020 return; 2021 } 2022 2023 page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN); 2024 if (page) { 2025 u32 *reg_state = kmap_atomic(page); 2026 2027 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) { 2028 seq_printf(m, 2029 "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n", 2030 j * 4, 2031 reg_state[j], reg_state[j + 1], 2032 reg_state[j + 2], reg_state[j + 3]); 2033 } 2034 kunmap_atomic(reg_state); 2035 } 2036 2037 i915_gem_object_unpin_pages(vma->obj); 2038 seq_putc(m, '\n'); 2039 } 2040 2041 static int i915_dump_lrc(struct seq_file *m, void *unused) 2042 { 2043 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2044 struct drm_device *dev = &dev_priv->drm; 2045 struct intel_engine_cs *engine; 2046 struct i915_gem_context *ctx; 2047 enum intel_engine_id id; 2048 int ret; 2049 2050 if (!i915.enable_execlists) { 2051 seq_printf(m, "Logical Ring Contexts are disabled\n"); 2052 return 0; 2053 } 2054 2055 ret = mutex_lock_interruptible(&dev->struct_mutex); 2056 if (ret) 2057 return ret; 2058 2059 list_for_each_entry(ctx, &dev_priv->context_list, link) 2060 for_each_engine(engine, dev_priv, id) 2061 i915_dump_lrc_obj(m, ctx, engine); 2062 2063 mutex_unlock(&dev->struct_mutex); 2064 2065 return 0; 2066 } 2067 2068 static const char *swizzle_string(unsigned swizzle) 2069 { 2070 switch (swizzle) { 2071 case I915_BIT_6_SWIZZLE_NONE: 2072 return "none"; 2073 case I915_BIT_6_SWIZZLE_9: 2074 return "bit9"; 2075 case I915_BIT_6_SWIZZLE_9_10: 2076 return "bit9/bit10"; 2077 case I915_BIT_6_SWIZZLE_9_11: 2078 return "bit9/bit11"; 2079 case I915_BIT_6_SWIZZLE_9_10_11: 2080 return "bit9/bit10/bit11"; 2081 case I915_BIT_6_SWIZZLE_9_17: 2082 return "bit9/bit17"; 2083 case I915_BIT_6_SWIZZLE_9_10_17: 2084 return "bit9/bit10/bit17"; 2085 case I915_BIT_6_SWIZZLE_UNKNOWN: 2086 return "unknown"; 2087 } 2088 2089 return "bug"; 2090 } 2091 2092 static int i915_swizzle_info(struct seq_file *m, void *data) 2093 { 2094 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2095 2096 intel_runtime_pm_get(dev_priv); 2097 2098 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 2099 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 2100 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 2101 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 2102 2103 if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) { 2104 seq_printf(m, "DDC = 0x%08x\n", 2105 I915_READ(DCC)); 2106 seq_printf(m, "DDC2 = 0x%08x\n", 2107 I915_READ(DCC2)); 2108 seq_printf(m, "C0DRB3 = 0x%04x\n", 2109 I915_READ16(C0DRB3)); 2110 seq_printf(m, "C1DRB3 = 0x%04x\n", 2111 I915_READ16(C1DRB3)); 2112 } else if (INTEL_GEN(dev_priv) >= 6) { 2113 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 2114 I915_READ(MAD_DIMM_C0)); 2115 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 2116 I915_READ(MAD_DIMM_C1)); 2117 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 2118 I915_READ(MAD_DIMM_C2)); 2119 seq_printf(m, "TILECTL = 0x%08x\n", 2120 I915_READ(TILECTL)); 2121 if (INTEL_GEN(dev_priv) >= 8) 2122 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 2123 I915_READ(GAMTARBMODE)); 2124 else 2125 seq_printf(m, "ARB_MODE = 0x%08x\n", 2126 I915_READ(ARB_MODE)); 2127 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 2128 I915_READ(DISP_ARB_CTL)); 2129 } 2130 2131 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) 2132 seq_puts(m, "L-shaped memory detected\n"); 2133 2134 intel_runtime_pm_put(dev_priv); 2135 2136 return 0; 2137 } 2138 2139 static int per_file_ctx(int id, void *ptr, void *data) 2140 { 2141 struct i915_gem_context *ctx = ptr; 2142 struct seq_file *m = data; 2143 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2144 2145 if (!ppgtt) { 2146 seq_printf(m, " no ppgtt for context %d\n", 2147 ctx->user_handle); 2148 return 0; 2149 } 2150 2151 if (i915_gem_context_is_default(ctx)) 2152 seq_puts(m, " default context:\n"); 2153 else 2154 seq_printf(m, " context %d:\n", ctx->user_handle); 2155 ppgtt->debug_dump(ppgtt, m); 2156 2157 return 0; 2158 } 2159 2160 static void gen8_ppgtt_info(struct seq_file *m, 2161 struct drm_i915_private *dev_priv) 2162 { 2163 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2164 struct intel_engine_cs *engine; 2165 enum intel_engine_id id; 2166 int i; 2167 2168 if (!ppgtt) 2169 return; 2170 2171 for_each_engine(engine, dev_priv, id) { 2172 seq_printf(m, "%s\n", engine->name); 2173 for (i = 0; i < 4; i++) { 2174 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i)); 2175 pdp <<= 32; 2176 pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i)); 2177 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); 2178 } 2179 } 2180 } 2181 2182 static void gen6_ppgtt_info(struct seq_file *m, 2183 struct drm_i915_private *dev_priv) 2184 { 2185 struct intel_engine_cs *engine; 2186 enum intel_engine_id id; 2187 2188 if (IS_GEN6(dev_priv)) 2189 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 2190 2191 for_each_engine(engine, dev_priv, id) { 2192 seq_printf(m, "%s\n", engine->name); 2193 if (IS_GEN7(dev_priv)) 2194 seq_printf(m, "GFX_MODE: 0x%08x\n", 2195 I915_READ(RING_MODE_GEN7(engine))); 2196 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", 2197 I915_READ(RING_PP_DIR_BASE(engine))); 2198 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", 2199 I915_READ(RING_PP_DIR_BASE_READ(engine))); 2200 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", 2201 I915_READ(RING_PP_DIR_DCLV(engine))); 2202 } 2203 if (dev_priv->mm.aliasing_ppgtt) { 2204 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2205 2206 seq_puts(m, "aliasing PPGTT:\n"); 2207 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset); 2208 2209 ppgtt->debug_dump(ppgtt, m); 2210 } 2211 2212 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 2213 } 2214 2215 static int i915_ppgtt_info(struct seq_file *m, void *data) 2216 { 2217 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2218 struct drm_device *dev = &dev_priv->drm; 2219 struct drm_file *file; 2220 int ret; 2221 2222 mutex_lock(&dev->filelist_mutex); 2223 ret = mutex_lock_interruptible(&dev->struct_mutex); 2224 if (ret) 2225 goto out_unlock; 2226 2227 intel_runtime_pm_get(dev_priv); 2228 2229 if (INTEL_GEN(dev_priv) >= 8) 2230 gen8_ppgtt_info(m, dev_priv); 2231 else if (INTEL_GEN(dev_priv) >= 6) 2232 gen6_ppgtt_info(m, dev_priv); 2233 2234 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2235 struct drm_i915_file_private *file_priv = file->driver_priv; 2236 struct task_struct *task; 2237 2238 task = get_pid_task(file->pid, PIDTYPE_PID); 2239 if (!task) { 2240 ret = -ESRCH; 2241 goto out_rpm; 2242 } 2243 seq_printf(m, "\nproc: %s\n", task->comm); 2244 put_task_struct(task); 2245 idr_for_each(&file_priv->context_idr, per_file_ctx, 2246 (void *)(unsigned long)m); 2247 } 2248 2249 out_rpm: 2250 intel_runtime_pm_put(dev_priv); 2251 mutex_unlock(&dev->struct_mutex); 2252 out_unlock: 2253 mutex_unlock(&dev->filelist_mutex); 2254 return ret; 2255 } 2256 2257 static int count_irq_waiters(struct drm_i915_private *i915) 2258 { 2259 struct intel_engine_cs *engine; 2260 enum intel_engine_id id; 2261 int count = 0; 2262 2263 for_each_engine(engine, i915, id) 2264 count += intel_engine_has_waiter(engine); 2265 2266 return count; 2267 } 2268 2269 static const char *rps_power_to_str(unsigned int power) 2270 { 2271 static const char * const strings[] = { 2272 [LOW_POWER] = "low power", 2273 [BETWEEN] = "mixed", 2274 [HIGH_POWER] = "high power", 2275 }; 2276 2277 if (power >= ARRAY_SIZE(strings) || !strings[power]) 2278 return "unknown"; 2279 2280 return strings[power]; 2281 } 2282 2283 static int i915_rps_boost_info(struct seq_file *m, void *data) 2284 { 2285 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2286 struct drm_device *dev = &dev_priv->drm; 2287 struct drm_file *file; 2288 2289 seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled); 2290 seq_printf(m, "GPU busy? %s [%d requests]\n", 2291 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests); 2292 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); 2293 seq_printf(m, "Frequency requested %d\n", 2294 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); 2295 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", 2296 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), 2297 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit), 2298 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit), 2299 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 2300 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n", 2301 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), 2302 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 2303 intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq)); 2304 2305 mutex_lock(&dev->filelist_mutex); 2306 spin_lock(&dev_priv->rps.client_lock); 2307 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2308 struct drm_i915_file_private *file_priv = file->driver_priv; 2309 struct task_struct *task; 2310 2311 rcu_read_lock(); 2312 task = pid_task(file->pid, PIDTYPE_PID); 2313 seq_printf(m, "%s [%d]: %d boosts%s\n", 2314 task ? task->comm : "<unknown>", 2315 task ? task->pid : -1, 2316 file_priv->rps.boosts, 2317 list_empty(&file_priv->rps.link) ? "" : ", active"); 2318 rcu_read_unlock(); 2319 } 2320 seq_printf(m, "Kernel (anonymous) boosts: %d\n", dev_priv->rps.boosts); 2321 spin_unlock(&dev_priv->rps.client_lock); 2322 mutex_unlock(&dev->filelist_mutex); 2323 2324 if (INTEL_GEN(dev_priv) >= 6 && 2325 dev_priv->rps.enabled && 2326 dev_priv->gt.active_requests) { 2327 u32 rpup, rpupei; 2328 u32 rpdown, rpdownei; 2329 2330 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 2331 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK; 2332 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK; 2333 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK; 2334 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK; 2335 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 2336 2337 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n", 2338 rps_power_to_str(dev_priv->rps.power)); 2339 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n", 2340 rpup && rpupei ? 100 * rpup / rpupei : 0, 2341 dev_priv->rps.up_threshold); 2342 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n", 2343 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0, 2344 dev_priv->rps.down_threshold); 2345 } else { 2346 seq_puts(m, "\nRPS Autotuning inactive\n"); 2347 } 2348 2349 return 0; 2350 } 2351 2352 static int i915_llc(struct seq_file *m, void *data) 2353 { 2354 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2355 const bool edram = INTEL_GEN(dev_priv) > 8; 2356 2357 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv))); 2358 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC", 2359 intel_uncore_edram_size(dev_priv)/1024/1024); 2360 2361 return 0; 2362 } 2363 2364 static int i915_huc_load_status_info(struct seq_file *m, void *data) 2365 { 2366 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2367 struct intel_uc_fw *huc_fw = &dev_priv->huc.fw; 2368 2369 if (!HAS_HUC_UCODE(dev_priv)) 2370 return 0; 2371 2372 seq_puts(m, "HuC firmware status:\n"); 2373 seq_printf(m, "\tpath: %s\n", huc_fw->path); 2374 seq_printf(m, "\tfetch: %s\n", 2375 intel_uc_fw_status_repr(huc_fw->fetch_status)); 2376 seq_printf(m, "\tload: %s\n", 2377 intel_uc_fw_status_repr(huc_fw->load_status)); 2378 seq_printf(m, "\tversion wanted: %d.%d\n", 2379 huc_fw->major_ver_wanted, huc_fw->minor_ver_wanted); 2380 seq_printf(m, "\tversion found: %d.%d\n", 2381 huc_fw->major_ver_found, huc_fw->minor_ver_found); 2382 seq_printf(m, "\theader: offset is %d; size = %d\n", 2383 huc_fw->header_offset, huc_fw->header_size); 2384 seq_printf(m, "\tuCode: offset is %d; size = %d\n", 2385 huc_fw->ucode_offset, huc_fw->ucode_size); 2386 seq_printf(m, "\tRSA: offset is %d; size = %d\n", 2387 huc_fw->rsa_offset, huc_fw->rsa_size); 2388 2389 intel_runtime_pm_get(dev_priv); 2390 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2)); 2391 intel_runtime_pm_put(dev_priv); 2392 2393 return 0; 2394 } 2395 2396 static int i915_guc_load_status_info(struct seq_file *m, void *data) 2397 { 2398 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2399 struct intel_uc_fw *guc_fw = &dev_priv->guc.fw; 2400 u32 tmp, i; 2401 2402 if (!HAS_GUC_UCODE(dev_priv)) 2403 return 0; 2404 2405 seq_printf(m, "GuC firmware status:\n"); 2406 seq_printf(m, "\tpath: %s\n", 2407 guc_fw->path); 2408 seq_printf(m, "\tfetch: %s\n", 2409 intel_uc_fw_status_repr(guc_fw->fetch_status)); 2410 seq_printf(m, "\tload: %s\n", 2411 intel_uc_fw_status_repr(guc_fw->load_status)); 2412 seq_printf(m, "\tversion wanted: %d.%d\n", 2413 guc_fw->major_ver_wanted, guc_fw->minor_ver_wanted); 2414 seq_printf(m, "\tversion found: %d.%d\n", 2415 guc_fw->major_ver_found, guc_fw->minor_ver_found); 2416 seq_printf(m, "\theader: offset is %d; size = %d\n", 2417 guc_fw->header_offset, guc_fw->header_size); 2418 seq_printf(m, "\tuCode: offset is %d; size = %d\n", 2419 guc_fw->ucode_offset, guc_fw->ucode_size); 2420 seq_printf(m, "\tRSA: offset is %d; size = %d\n", 2421 guc_fw->rsa_offset, guc_fw->rsa_size); 2422 2423 intel_runtime_pm_get(dev_priv); 2424 2425 tmp = I915_READ(GUC_STATUS); 2426 2427 seq_printf(m, "\nGuC status 0x%08x:\n", tmp); 2428 seq_printf(m, "\tBootrom status = 0x%x\n", 2429 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT); 2430 seq_printf(m, "\tuKernel status = 0x%x\n", 2431 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT); 2432 seq_printf(m, "\tMIA Core status = 0x%x\n", 2433 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT); 2434 seq_puts(m, "\nScratch registers:\n"); 2435 for (i = 0; i < 16; i++) 2436 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i))); 2437 2438 intel_runtime_pm_put(dev_priv); 2439 2440 return 0; 2441 } 2442 2443 static void i915_guc_log_info(struct seq_file *m, 2444 struct drm_i915_private *dev_priv) 2445 { 2446 struct intel_guc *guc = &dev_priv->guc; 2447 2448 seq_puts(m, "\nGuC logging stats:\n"); 2449 2450 seq_printf(m, "\tISR: flush count %10u, overflow count %10u\n", 2451 guc->log.flush_count[GUC_ISR_LOG_BUFFER], 2452 guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]); 2453 2454 seq_printf(m, "\tDPC: flush count %10u, overflow count %10u\n", 2455 guc->log.flush_count[GUC_DPC_LOG_BUFFER], 2456 guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]); 2457 2458 seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n", 2459 guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER], 2460 guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]); 2461 2462 seq_printf(m, "\tTotal flush interrupt count: %u\n", 2463 guc->log.flush_interrupt_count); 2464 2465 seq_printf(m, "\tCapture miss count: %u\n", 2466 guc->log.capture_miss_count); 2467 } 2468 2469 static void i915_guc_client_info(struct seq_file *m, 2470 struct drm_i915_private *dev_priv, 2471 struct i915_guc_client *client) 2472 { 2473 struct intel_engine_cs *engine; 2474 enum intel_engine_id id; 2475 uint64_t tot = 0; 2476 2477 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n", 2478 client->priority, client->stage_id, client->proc_desc_offset); 2479 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx, cookie 0x%x\n", 2480 client->doorbell_id, client->doorbell_offset, client->doorbell_cookie); 2481 seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n", 2482 client->wq_size, client->wq_offset, client->wq_tail); 2483 2484 seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space); 2485 seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail); 2486 seq_printf(m, "\tLast submission result: %d\n", client->retcode); 2487 2488 for_each_engine(engine, dev_priv, id) { 2489 u64 submissions = client->submissions[id]; 2490 tot += submissions; 2491 seq_printf(m, "\tSubmissions: %llu %s\n", 2492 submissions, engine->name); 2493 } 2494 seq_printf(m, "\tTotal: %llu\n", tot); 2495 } 2496 2497 static int i915_guc_info(struct seq_file *m, void *data) 2498 { 2499 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2500 const struct intel_guc *guc = &dev_priv->guc; 2501 struct intel_engine_cs *engine; 2502 enum intel_engine_id id; 2503 u64 total; 2504 2505 if (!guc->execbuf_client) { 2506 seq_printf(m, "GuC submission %s\n", 2507 HAS_GUC_SCHED(dev_priv) ? 2508 "disabled" : 2509 "not supported"); 2510 return 0; 2511 } 2512 2513 seq_printf(m, "Doorbell map:\n"); 2514 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap); 2515 seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline); 2516 2517 seq_printf(m, "GuC total action count: %llu\n", guc->action_count); 2518 seq_printf(m, "GuC action failure count: %u\n", guc->action_fail); 2519 seq_printf(m, "GuC last action command: 0x%x\n", guc->action_cmd); 2520 seq_printf(m, "GuC last action status: 0x%x\n", guc->action_status); 2521 seq_printf(m, "GuC last action error code: %d\n", guc->action_err); 2522 2523 total = 0; 2524 seq_printf(m, "\nGuC submissions:\n"); 2525 for_each_engine(engine, dev_priv, id) { 2526 u64 submissions = guc->submissions[id]; 2527 total += submissions; 2528 seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n", 2529 engine->name, submissions, guc->last_seqno[id]); 2530 } 2531 seq_printf(m, "\t%s: %llu\n", "Total", total); 2532 2533 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client); 2534 i915_guc_client_info(m, dev_priv, guc->execbuf_client); 2535 2536 i915_guc_log_info(m, dev_priv); 2537 2538 /* Add more as required ... */ 2539 2540 return 0; 2541 } 2542 2543 static int i915_guc_log_dump(struct seq_file *m, void *data) 2544 { 2545 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2546 struct drm_i915_gem_object *obj; 2547 int i = 0, pg; 2548 2549 if (!dev_priv->guc.log.vma) 2550 return 0; 2551 2552 obj = dev_priv->guc.log.vma->obj; 2553 for (pg = 0; pg < obj->base.size / PAGE_SIZE; pg++) { 2554 u32 *log = kmap_atomic(i915_gem_object_get_page(obj, pg)); 2555 2556 for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4) 2557 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n", 2558 *(log + i), *(log + i + 1), 2559 *(log + i + 2), *(log + i + 3)); 2560 2561 kunmap_atomic(log); 2562 } 2563 2564 seq_putc(m, '\n'); 2565 2566 return 0; 2567 } 2568 2569 static int i915_guc_log_control_get(void *data, u64 *val) 2570 { 2571 struct drm_device *dev = data; 2572 struct drm_i915_private *dev_priv = to_i915(dev); 2573 2574 if (!dev_priv->guc.log.vma) 2575 return -EINVAL; 2576 2577 *val = i915.guc_log_level; 2578 2579 return 0; 2580 } 2581 2582 static int i915_guc_log_control_set(void *data, u64 val) 2583 { 2584 struct drm_device *dev = data; 2585 struct drm_i915_private *dev_priv = to_i915(dev); 2586 int ret; 2587 2588 if (!dev_priv->guc.log.vma) 2589 return -EINVAL; 2590 2591 ret = mutex_lock_interruptible(&dev->struct_mutex); 2592 if (ret) 2593 return ret; 2594 2595 intel_runtime_pm_get(dev_priv); 2596 ret = i915_guc_log_control(dev_priv, val); 2597 intel_runtime_pm_put(dev_priv); 2598 2599 mutex_unlock(&dev->struct_mutex); 2600 return ret; 2601 } 2602 2603 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops, 2604 i915_guc_log_control_get, i915_guc_log_control_set, 2605 "%lld\n"); 2606 2607 static const char *psr2_live_status(u32 val) 2608 { 2609 static const char * const live_status[] = { 2610 "IDLE", 2611 "CAPTURE", 2612 "CAPTURE_FS", 2613 "SLEEP", 2614 "BUFON_FW", 2615 "ML_UP", 2616 "SU_STANDBY", 2617 "FAST_SLEEP", 2618 "DEEP_SLEEP", 2619 "BUF_ON", 2620 "TG_ON" 2621 }; 2622 2623 val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT; 2624 if (val < ARRAY_SIZE(live_status)) 2625 return live_status[val]; 2626 2627 return "unknown"; 2628 } 2629 2630 static int i915_edp_psr_status(struct seq_file *m, void *data) 2631 { 2632 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2633 u32 psrperf = 0; 2634 u32 stat[3]; 2635 enum pipe pipe; 2636 bool enabled = false; 2637 2638 if (!HAS_PSR(dev_priv)) { 2639 seq_puts(m, "PSR not supported\n"); 2640 return 0; 2641 } 2642 2643 intel_runtime_pm_get(dev_priv); 2644 2645 mutex_lock(&dev_priv->psr.lock); 2646 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); 2647 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); 2648 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled)); 2649 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active)); 2650 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", 2651 dev_priv->psr.busy_frontbuffer_bits); 2652 seq_printf(m, "Re-enable work scheduled: %s\n", 2653 yesno(work_busy(&dev_priv->psr.work.work))); 2654 2655 if (HAS_DDI(dev_priv)) { 2656 if (dev_priv->psr.psr2_support) 2657 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE; 2658 else 2659 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; 2660 } else { 2661 for_each_pipe(dev_priv, pipe) { 2662 enum transcoder cpu_transcoder = 2663 intel_pipe_to_cpu_transcoder(dev_priv, pipe); 2664 enum intel_display_power_domain power_domain; 2665 2666 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 2667 if (!intel_display_power_get_if_enabled(dev_priv, 2668 power_domain)) 2669 continue; 2670 2671 stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & 2672 VLV_EDP_PSR_CURR_STATE_MASK; 2673 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2674 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2675 enabled = true; 2676 2677 intel_display_power_put(dev_priv, power_domain); 2678 } 2679 } 2680 2681 seq_printf(m, "Main link in standby mode: %s\n", 2682 yesno(dev_priv->psr.link_standby)); 2683 2684 seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled)); 2685 2686 if (!HAS_DDI(dev_priv)) 2687 for_each_pipe(dev_priv, pipe) { 2688 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2689 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2690 seq_printf(m, " pipe %c", pipe_name(pipe)); 2691 } 2692 seq_puts(m, "\n"); 2693 2694 /* 2695 * VLV/CHV PSR has no kind of performance counter 2696 * SKL+ Perf counter is reset to 0 everytime DC state is entered 2697 */ 2698 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2699 psrperf = I915_READ(EDP_PSR_PERF_CNT) & 2700 EDP_PSR_PERF_CNT_MASK; 2701 2702 seq_printf(m, "Performance_Counter: %u\n", psrperf); 2703 } 2704 if (dev_priv->psr.psr2_support) { 2705 u32 psr2 = I915_READ(EDP_PSR2_STATUS_CTL); 2706 2707 seq_printf(m, "EDP_PSR2_STATUS_CTL: %x [%s]\n", 2708 psr2, psr2_live_status(psr2)); 2709 } 2710 mutex_unlock(&dev_priv->psr.lock); 2711 2712 intel_runtime_pm_put(dev_priv); 2713 return 0; 2714 } 2715 2716 static int i915_sink_crc(struct seq_file *m, void *data) 2717 { 2718 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2719 struct drm_device *dev = &dev_priv->drm; 2720 struct intel_connector *connector; 2721 struct drm_connector_list_iter conn_iter; 2722 struct intel_dp *intel_dp = NULL; 2723 int ret; 2724 u8 crc[6]; 2725 2726 drm_modeset_lock_all(dev); 2727 drm_connector_list_iter_begin(dev, &conn_iter); 2728 for_each_intel_connector_iter(connector, &conn_iter) { 2729 struct drm_crtc *crtc; 2730 2731 if (!connector->base.state->best_encoder) 2732 continue; 2733 2734 crtc = connector->base.state->crtc; 2735 if (!crtc->state->active) 2736 continue; 2737 2738 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) 2739 continue; 2740 2741 intel_dp = enc_to_intel_dp(connector->base.state->best_encoder); 2742 2743 ret = intel_dp_sink_crc(intel_dp, crc); 2744 if (ret) 2745 goto out; 2746 2747 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n", 2748 crc[0], crc[1], crc[2], 2749 crc[3], crc[4], crc[5]); 2750 goto out; 2751 } 2752 ret = -ENODEV; 2753 out: 2754 drm_connector_list_iter_end(&conn_iter); 2755 drm_modeset_unlock_all(dev); 2756 return ret; 2757 } 2758 2759 static int i915_energy_uJ(struct seq_file *m, void *data) 2760 { 2761 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2762 u64 power; 2763 u32 units; 2764 2765 if (INTEL_GEN(dev_priv) < 6) 2766 return -ENODEV; 2767 2768 intel_runtime_pm_get(dev_priv); 2769 2770 rdmsrl(MSR_RAPL_POWER_UNIT, power); 2771 power = (power & 0x1f00) >> 8; 2772 units = 1000000 / (1 << power); /* convert to uJ */ 2773 power = I915_READ(MCH_SECP_NRG_STTS); 2774 power *= units; 2775 2776 intel_runtime_pm_put(dev_priv); 2777 2778 seq_printf(m, "%llu", (long long unsigned)power); 2779 2780 return 0; 2781 } 2782 2783 static int i915_runtime_pm_status(struct seq_file *m, void *unused) 2784 { 2785 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2786 struct pci_dev *pdev = dev_priv->drm.pdev; 2787 2788 if (!HAS_RUNTIME_PM(dev_priv)) 2789 seq_puts(m, "Runtime power management not supported\n"); 2790 2791 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake)); 2792 seq_printf(m, "IRQs disabled: %s\n", 2793 yesno(!intel_irqs_enabled(dev_priv))); 2794 #ifdef CONFIG_PM 2795 seq_printf(m, "Usage count: %d\n", 2796 atomic_read(&dev_priv->drm.dev->power.usage_count)); 2797 #else 2798 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); 2799 #endif 2800 seq_printf(m, "PCI device power state: %s [%d]\n", 2801 pci_power_name(pdev->current_state), 2802 pdev->current_state); 2803 2804 return 0; 2805 } 2806 2807 static int i915_power_domain_info(struct seq_file *m, void *unused) 2808 { 2809 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2810 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2811 int i; 2812 2813 mutex_lock(&power_domains->lock); 2814 2815 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 2816 for (i = 0; i < power_domains->power_well_count; i++) { 2817 struct i915_power_well *power_well; 2818 enum intel_display_power_domain power_domain; 2819 2820 power_well = &power_domains->power_wells[i]; 2821 seq_printf(m, "%-25s %d\n", power_well->name, 2822 power_well->count); 2823 2824 for_each_power_domain(power_domain, power_well->domains) 2825 seq_printf(m, " %-23s %d\n", 2826 intel_display_power_domain_str(power_domain), 2827 power_domains->domain_use_count[power_domain]); 2828 } 2829 2830 mutex_unlock(&power_domains->lock); 2831 2832 return 0; 2833 } 2834 2835 static int i915_dmc_info(struct seq_file *m, void *unused) 2836 { 2837 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2838 struct intel_csr *csr; 2839 2840 if (!HAS_CSR(dev_priv)) { 2841 seq_puts(m, "not supported\n"); 2842 return 0; 2843 } 2844 2845 csr = &dev_priv->csr; 2846 2847 intel_runtime_pm_get(dev_priv); 2848 2849 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL)); 2850 seq_printf(m, "path: %s\n", csr->fw_path); 2851 2852 if (!csr->dmc_payload) 2853 goto out; 2854 2855 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version), 2856 CSR_VERSION_MINOR(csr->version)); 2857 2858 if (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6)) { 2859 seq_printf(m, "DC3 -> DC5 count: %d\n", 2860 I915_READ(SKL_CSR_DC3_DC5_COUNT)); 2861 seq_printf(m, "DC5 -> DC6 count: %d\n", 2862 I915_READ(SKL_CSR_DC5_DC6_COUNT)); 2863 } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) { 2864 seq_printf(m, "DC3 -> DC5 count: %d\n", 2865 I915_READ(BXT_CSR_DC3_DC5_COUNT)); 2866 } 2867 2868 out: 2869 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0))); 2870 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE)); 2871 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL)); 2872 2873 intel_runtime_pm_put(dev_priv); 2874 2875 return 0; 2876 } 2877 2878 static void intel_seq_print_mode(struct seq_file *m, int tabs, 2879 struct drm_display_mode *mode) 2880 { 2881 int i; 2882 2883 for (i = 0; i < tabs; i++) 2884 seq_putc(m, '\t'); 2885 2886 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n", 2887 mode->base.id, mode->name, 2888 mode->vrefresh, mode->clock, 2889 mode->hdisplay, mode->hsync_start, 2890 mode->hsync_end, mode->htotal, 2891 mode->vdisplay, mode->vsync_start, 2892 mode->vsync_end, mode->vtotal, 2893 mode->type, mode->flags); 2894 } 2895 2896 static void intel_encoder_info(struct seq_file *m, 2897 struct intel_crtc *intel_crtc, 2898 struct intel_encoder *intel_encoder) 2899 { 2900 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2901 struct drm_device *dev = &dev_priv->drm; 2902 struct drm_crtc *crtc = &intel_crtc->base; 2903 struct intel_connector *intel_connector; 2904 struct drm_encoder *encoder; 2905 2906 encoder = &intel_encoder->base; 2907 seq_printf(m, "\tencoder %d: type: %s, connectors:\n", 2908 encoder->base.id, encoder->name); 2909 for_each_connector_on_encoder(dev, encoder, intel_connector) { 2910 struct drm_connector *connector = &intel_connector->base; 2911 seq_printf(m, "\t\tconnector %d: type: %s, status: %s", 2912 connector->base.id, 2913 connector->name, 2914 drm_get_connector_status_name(connector->status)); 2915 if (connector->status == connector_status_connected) { 2916 struct drm_display_mode *mode = &crtc->mode; 2917 seq_printf(m, ", mode:\n"); 2918 intel_seq_print_mode(m, 2, mode); 2919 } else { 2920 seq_putc(m, '\n'); 2921 } 2922 } 2923 } 2924 2925 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) 2926 { 2927 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2928 struct drm_device *dev = &dev_priv->drm; 2929 struct drm_crtc *crtc = &intel_crtc->base; 2930 struct intel_encoder *intel_encoder; 2931 struct drm_plane_state *plane_state = crtc->primary->state; 2932 struct drm_framebuffer *fb = plane_state->fb; 2933 2934 if (fb) 2935 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", 2936 fb->base.id, plane_state->src_x >> 16, 2937 plane_state->src_y >> 16, fb->width, fb->height); 2938 else 2939 seq_puts(m, "\tprimary plane disabled\n"); 2940 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 2941 intel_encoder_info(m, intel_crtc, intel_encoder); 2942 } 2943 2944 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) 2945 { 2946 struct drm_display_mode *mode = panel->fixed_mode; 2947 2948 seq_printf(m, "\tfixed mode:\n"); 2949 intel_seq_print_mode(m, 2, mode); 2950 } 2951 2952 static void intel_dp_info(struct seq_file *m, 2953 struct intel_connector *intel_connector) 2954 { 2955 struct intel_encoder *intel_encoder = intel_connector->encoder; 2956 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2957 2958 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 2959 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); 2960 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) 2961 intel_panel_info(m, &intel_connector->panel); 2962 2963 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports, 2964 &intel_dp->aux); 2965 } 2966 2967 static void intel_dp_mst_info(struct seq_file *m, 2968 struct intel_connector *intel_connector) 2969 { 2970 struct intel_encoder *intel_encoder = intel_connector->encoder; 2971 struct intel_dp_mst_encoder *intel_mst = 2972 enc_to_mst(&intel_encoder->base); 2973 struct intel_digital_port *intel_dig_port = intel_mst->primary; 2974 struct intel_dp *intel_dp = &intel_dig_port->dp; 2975 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, 2976 intel_connector->port); 2977 2978 seq_printf(m, "\taudio support: %s\n", yesno(has_audio)); 2979 } 2980 2981 static void intel_hdmi_info(struct seq_file *m, 2982 struct intel_connector *intel_connector) 2983 { 2984 struct intel_encoder *intel_encoder = intel_connector->encoder; 2985 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); 2986 2987 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); 2988 } 2989 2990 static void intel_lvds_info(struct seq_file *m, 2991 struct intel_connector *intel_connector) 2992 { 2993 intel_panel_info(m, &intel_connector->panel); 2994 } 2995 2996 static void intel_connector_info(struct seq_file *m, 2997 struct drm_connector *connector) 2998 { 2999 struct intel_connector *intel_connector = to_intel_connector(connector); 3000 struct intel_encoder *intel_encoder = intel_connector->encoder; 3001 struct drm_display_mode *mode; 3002 3003 seq_printf(m, "connector %d: type %s, status: %s\n", 3004 connector->base.id, connector->name, 3005 drm_get_connector_status_name(connector->status)); 3006 if (connector->status == connector_status_connected) { 3007 seq_printf(m, "\tname: %s\n", connector->display_info.name); 3008 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 3009 connector->display_info.width_mm, 3010 connector->display_info.height_mm); 3011 seq_printf(m, "\tsubpixel order: %s\n", 3012 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 3013 seq_printf(m, "\tCEA rev: %d\n", 3014 connector->display_info.cea_rev); 3015 } 3016 3017 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) 3018 return; 3019 3020 switch (connector->connector_type) { 3021 case DRM_MODE_CONNECTOR_DisplayPort: 3022 case DRM_MODE_CONNECTOR_eDP: 3023 if (intel_encoder->type == INTEL_OUTPUT_DP_MST) 3024 intel_dp_mst_info(m, intel_connector); 3025 else 3026 intel_dp_info(m, intel_connector); 3027 break; 3028 case DRM_MODE_CONNECTOR_LVDS: 3029 if (intel_encoder->type == INTEL_OUTPUT_LVDS) 3030 intel_lvds_info(m, intel_connector); 3031 break; 3032 case DRM_MODE_CONNECTOR_HDMIA: 3033 if (intel_encoder->type == INTEL_OUTPUT_HDMI || 3034 intel_encoder->type == INTEL_OUTPUT_UNKNOWN) 3035 intel_hdmi_info(m, intel_connector); 3036 break; 3037 default: 3038 break; 3039 } 3040 3041 seq_printf(m, "\tmodes:\n"); 3042 list_for_each_entry(mode, &connector->modes, head) 3043 intel_seq_print_mode(m, 2, mode); 3044 } 3045 3046 static bool cursor_active(struct drm_i915_private *dev_priv, int pipe) 3047 { 3048 u32 state; 3049 3050 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) 3051 state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; 3052 else 3053 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 3054 3055 return state; 3056 } 3057 3058 static bool cursor_position(struct drm_i915_private *dev_priv, 3059 int pipe, int *x, int *y) 3060 { 3061 u32 pos; 3062 3063 pos = I915_READ(CURPOS(pipe)); 3064 3065 *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK; 3066 if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT)) 3067 *x = -*x; 3068 3069 *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK; 3070 if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT)) 3071 *y = -*y; 3072 3073 return cursor_active(dev_priv, pipe); 3074 } 3075 3076 static const char *plane_type(enum drm_plane_type type) 3077 { 3078 switch (type) { 3079 case DRM_PLANE_TYPE_OVERLAY: 3080 return "OVL"; 3081 case DRM_PLANE_TYPE_PRIMARY: 3082 return "PRI"; 3083 case DRM_PLANE_TYPE_CURSOR: 3084 return "CUR"; 3085 /* 3086 * Deliberately omitting default: to generate compiler warnings 3087 * when a new drm_plane_type gets added. 3088 */ 3089 } 3090 3091 return "unknown"; 3092 } 3093 3094 static const char *plane_rotation(unsigned int rotation) 3095 { 3096 static char buf[48]; 3097 /* 3098 * According to doc only one DRM_ROTATE_ is allowed but this 3099 * will print them all to visualize if the values are misused 3100 */ 3101 snprintf(buf, sizeof(buf), 3102 "%s%s%s%s%s%s(0x%08x)", 3103 (rotation & DRM_ROTATE_0) ? "0 " : "", 3104 (rotation & DRM_ROTATE_90) ? "90 " : "", 3105 (rotation & DRM_ROTATE_180) ? "180 " : "", 3106 (rotation & DRM_ROTATE_270) ? "270 " : "", 3107 (rotation & DRM_REFLECT_X) ? "FLIPX " : "", 3108 (rotation & DRM_REFLECT_Y) ? "FLIPY " : "", 3109 rotation); 3110 3111 return buf; 3112 } 3113 3114 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc) 3115 { 3116 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3117 struct drm_device *dev = &dev_priv->drm; 3118 struct intel_plane *intel_plane; 3119 3120 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3121 struct drm_plane_state *state; 3122 struct drm_plane *plane = &intel_plane->base; 3123 struct drm_format_name_buf format_name; 3124 3125 if (!plane->state) { 3126 seq_puts(m, "plane->state is NULL!\n"); 3127 continue; 3128 } 3129 3130 state = plane->state; 3131 3132 if (state->fb) { 3133 drm_get_format_name(state->fb->format->format, 3134 &format_name); 3135 } else { 3136 sprintf(format_name.str, "N/A"); 3137 } 3138 3139 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n", 3140 plane->base.id, 3141 plane_type(intel_plane->base.type), 3142 state->crtc_x, state->crtc_y, 3143 state->crtc_w, state->crtc_h, 3144 (state->src_x >> 16), 3145 ((state->src_x & 0xffff) * 15625) >> 10, 3146 (state->src_y >> 16), 3147 ((state->src_y & 0xffff) * 15625) >> 10, 3148 (state->src_w >> 16), 3149 ((state->src_w & 0xffff) * 15625) >> 10, 3150 (state->src_h >> 16), 3151 ((state->src_h & 0xffff) * 15625) >> 10, 3152 format_name.str, 3153 plane_rotation(state->rotation)); 3154 } 3155 } 3156 3157 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc) 3158 { 3159 struct intel_crtc_state *pipe_config; 3160 int num_scalers = intel_crtc->num_scalers; 3161 int i; 3162 3163 pipe_config = to_intel_crtc_state(intel_crtc->base.state); 3164 3165 /* Not all platformas have a scaler */ 3166 if (num_scalers) { 3167 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d", 3168 num_scalers, 3169 pipe_config->scaler_state.scaler_users, 3170 pipe_config->scaler_state.scaler_id); 3171 3172 for (i = 0; i < num_scalers; i++) { 3173 struct intel_scaler *sc = 3174 &pipe_config->scaler_state.scalers[i]; 3175 3176 seq_printf(m, ", scalers[%d]: use=%s, mode=%x", 3177 i, yesno(sc->in_use), sc->mode); 3178 } 3179 seq_puts(m, "\n"); 3180 } else { 3181 seq_puts(m, "\tNo scalers available on this platform\n"); 3182 } 3183 } 3184 3185 static int i915_display_info(struct seq_file *m, void *unused) 3186 { 3187 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3188 struct drm_device *dev = &dev_priv->drm; 3189 struct intel_crtc *crtc; 3190 struct drm_connector *connector; 3191 struct drm_connector_list_iter conn_iter; 3192 3193 intel_runtime_pm_get(dev_priv); 3194 seq_printf(m, "CRTC info\n"); 3195 seq_printf(m, "---------\n"); 3196 for_each_intel_crtc(dev, crtc) { 3197 bool active; 3198 struct intel_crtc_state *pipe_config; 3199 int x, y; 3200 3201 drm_modeset_lock(&crtc->base.mutex, NULL); 3202 pipe_config = to_intel_crtc_state(crtc->base.state); 3203 3204 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n", 3205 crtc->base.base.id, pipe_name(crtc->pipe), 3206 yesno(pipe_config->base.active), 3207 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 3208 yesno(pipe_config->dither), pipe_config->pipe_bpp); 3209 3210 if (pipe_config->base.active) { 3211 intel_crtc_info(m, crtc); 3212 3213 active = cursor_position(dev_priv, crtc->pipe, &x, &y); 3214 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n", 3215 yesno(crtc->cursor_base), 3216 x, y, crtc->base.cursor->state->crtc_w, 3217 crtc->base.cursor->state->crtc_h, 3218 crtc->cursor_addr, yesno(active)); 3219 intel_scaler_info(m, crtc); 3220 intel_plane_info(m, crtc); 3221 } 3222 3223 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", 3224 yesno(!crtc->cpu_fifo_underrun_disabled), 3225 yesno(!crtc->pch_fifo_underrun_disabled)); 3226 drm_modeset_unlock(&crtc->base.mutex); 3227 } 3228 3229 seq_printf(m, "\n"); 3230 seq_printf(m, "Connector info\n"); 3231 seq_printf(m, "--------------\n"); 3232 mutex_lock(&dev->mode_config.mutex); 3233 drm_connector_list_iter_begin(dev, &conn_iter); 3234 drm_for_each_connector_iter(connector, &conn_iter) 3235 intel_connector_info(m, connector); 3236 drm_connector_list_iter_end(&conn_iter); 3237 mutex_unlock(&dev->mode_config.mutex); 3238 3239 intel_runtime_pm_put(dev_priv); 3240 3241 return 0; 3242 } 3243 3244 static int i915_engine_info(struct seq_file *m, void *unused) 3245 { 3246 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3247 struct intel_engine_cs *engine; 3248 enum intel_engine_id id; 3249 3250 intel_runtime_pm_get(dev_priv); 3251 3252 seq_printf(m, "GT awake? %s\n", 3253 yesno(dev_priv->gt.awake)); 3254 seq_printf(m, "Global active requests: %d\n", 3255 dev_priv->gt.active_requests); 3256 3257 for_each_engine(engine, dev_priv, id) { 3258 struct intel_breadcrumbs *b = &engine->breadcrumbs; 3259 struct drm_i915_gem_request *rq; 3260 struct rb_node *rb; 3261 u64 addr; 3262 3263 seq_printf(m, "%s\n", engine->name); 3264 seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n", 3265 intel_engine_get_seqno(engine), 3266 intel_engine_last_submit(engine), 3267 engine->hangcheck.seqno, 3268 jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp), 3269 engine->timeline->inflight_seqnos); 3270 3271 rcu_read_lock(); 3272 3273 seq_printf(m, "\tRequests:\n"); 3274 3275 rq = list_first_entry(&engine->timeline->requests, 3276 struct drm_i915_gem_request, link); 3277 if (&rq->link != &engine->timeline->requests) 3278 print_request(m, rq, "\t\tfirst "); 3279 3280 rq = list_last_entry(&engine->timeline->requests, 3281 struct drm_i915_gem_request, link); 3282 if (&rq->link != &engine->timeline->requests) 3283 print_request(m, rq, "\t\tlast "); 3284 3285 rq = i915_gem_find_active_request(engine); 3286 if (rq) { 3287 print_request(m, rq, "\t\tactive "); 3288 seq_printf(m, 3289 "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n", 3290 rq->head, rq->postfix, rq->tail, 3291 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u, 3292 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u); 3293 } 3294 3295 seq_printf(m, "\tRING_START: 0x%08x [0x%08x]\n", 3296 I915_READ(RING_START(engine->mmio_base)), 3297 rq ? i915_ggtt_offset(rq->ring->vma) : 0); 3298 seq_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n", 3299 I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR, 3300 rq ? rq->ring->head : 0); 3301 seq_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n", 3302 I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR, 3303 rq ? rq->ring->tail : 0); 3304 seq_printf(m, "\tRING_CTL: 0x%08x [%s]\n", 3305 I915_READ(RING_CTL(engine->mmio_base)), 3306 I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : ""); 3307 3308 rcu_read_unlock(); 3309 3310 addr = intel_engine_get_active_head(engine); 3311 seq_printf(m, "\tACTHD: 0x%08x_%08x\n", 3312 upper_32_bits(addr), lower_32_bits(addr)); 3313 addr = intel_engine_get_last_batch_head(engine); 3314 seq_printf(m, "\tBBADDR: 0x%08x_%08x\n", 3315 upper_32_bits(addr), lower_32_bits(addr)); 3316 3317 if (i915.enable_execlists) { 3318 u32 ptr, read, write; 3319 struct rb_node *rb; 3320 3321 seq_printf(m, "\tExeclist status: 0x%08x %08x\n", 3322 I915_READ(RING_EXECLIST_STATUS_LO(engine)), 3323 I915_READ(RING_EXECLIST_STATUS_HI(engine))); 3324 3325 ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine)); 3326 read = GEN8_CSB_READ_PTR(ptr); 3327 write = GEN8_CSB_WRITE_PTR(ptr); 3328 seq_printf(m, "\tExeclist CSB read %d, write %d\n", 3329 read, write); 3330 if (read >= GEN8_CSB_ENTRIES) 3331 read = 0; 3332 if (write >= GEN8_CSB_ENTRIES) 3333 write = 0; 3334 if (read > write) 3335 write += GEN8_CSB_ENTRIES; 3336 while (read < write) { 3337 unsigned int idx = ++read % GEN8_CSB_ENTRIES; 3338 3339 seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n", 3340 idx, 3341 I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)), 3342 I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx))); 3343 } 3344 3345 rcu_read_lock(); 3346 rq = READ_ONCE(engine->execlist_port[0].request); 3347 if (rq) { 3348 seq_printf(m, "\t\tELSP[0] count=%d, ", 3349 engine->execlist_port[0].count); 3350 print_request(m, rq, "rq: "); 3351 } else { 3352 seq_printf(m, "\t\tELSP[0] idle\n"); 3353 } 3354 rq = READ_ONCE(engine->execlist_port[1].request); 3355 if (rq) { 3356 seq_printf(m, "\t\tELSP[1] count=%d, ", 3357 engine->execlist_port[1].count); 3358 print_request(m, rq, "rq: "); 3359 } else { 3360 seq_printf(m, "\t\tELSP[1] idle\n"); 3361 } 3362 rcu_read_unlock(); 3363 3364 spin_lock_irq(&engine->timeline->lock); 3365 for (rb = engine->execlist_first; rb; rb = rb_next(rb)) { 3366 rq = rb_entry(rb, typeof(*rq), priotree.node); 3367 print_request(m, rq, "\t\tQ "); 3368 } 3369 spin_unlock_irq(&engine->timeline->lock); 3370 } else if (INTEL_GEN(dev_priv) > 6) { 3371 seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n", 3372 I915_READ(RING_PP_DIR_BASE(engine))); 3373 seq_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n", 3374 I915_READ(RING_PP_DIR_BASE_READ(engine))); 3375 seq_printf(m, "\tPP_DIR_DCLV: 0x%08x\n", 3376 I915_READ(RING_PP_DIR_DCLV(engine))); 3377 } 3378 3379 spin_lock_irq(&b->rb_lock); 3380 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { 3381 struct intel_wait *w = rb_entry(rb, typeof(*w), node); 3382 3383 seq_printf(m, "\t%s [%d] waiting for %x\n", 3384 w->tsk->comm, w->tsk->pid, w->seqno); 3385 } 3386 spin_unlock_irq(&b->rb_lock); 3387 3388 seq_puts(m, "\n"); 3389 } 3390 3391 intel_runtime_pm_put(dev_priv); 3392 3393 return 0; 3394 } 3395 3396 static int i915_semaphore_status(struct seq_file *m, void *unused) 3397 { 3398 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3399 struct drm_device *dev = &dev_priv->drm; 3400 struct intel_engine_cs *engine; 3401 int num_rings = INTEL_INFO(dev_priv)->num_rings; 3402 enum intel_engine_id id; 3403 int j, ret; 3404 3405 if (!i915.semaphores) { 3406 seq_puts(m, "Semaphores are disabled\n"); 3407 return 0; 3408 } 3409 3410 ret = mutex_lock_interruptible(&dev->struct_mutex); 3411 if (ret) 3412 return ret; 3413 intel_runtime_pm_get(dev_priv); 3414 3415 if (IS_BROADWELL(dev_priv)) { 3416 struct page *page; 3417 uint64_t *seqno; 3418 3419 page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0); 3420 3421 seqno = (uint64_t *)kmap_atomic(page); 3422 for_each_engine(engine, dev_priv, id) { 3423 uint64_t offset; 3424 3425 seq_printf(m, "%s\n", engine->name); 3426 3427 seq_puts(m, " Last signal:"); 3428 for (j = 0; j < num_rings; j++) { 3429 offset = id * I915_NUM_ENGINES + j; 3430 seq_printf(m, "0x%08llx (0x%02llx) ", 3431 seqno[offset], offset * 8); 3432 } 3433 seq_putc(m, '\n'); 3434 3435 seq_puts(m, " Last wait: "); 3436 for (j = 0; j < num_rings; j++) { 3437 offset = id + (j * I915_NUM_ENGINES); 3438 seq_printf(m, "0x%08llx (0x%02llx) ", 3439 seqno[offset], offset * 8); 3440 } 3441 seq_putc(m, '\n'); 3442 3443 } 3444 kunmap_atomic(seqno); 3445 } else { 3446 seq_puts(m, " Last signal:"); 3447 for_each_engine(engine, dev_priv, id) 3448 for (j = 0; j < num_rings; j++) 3449 seq_printf(m, "0x%08x\n", 3450 I915_READ(engine->semaphore.mbox.signal[j])); 3451 seq_putc(m, '\n'); 3452 } 3453 3454 intel_runtime_pm_put(dev_priv); 3455 mutex_unlock(&dev->struct_mutex); 3456 return 0; 3457 } 3458 3459 static int i915_shared_dplls_info(struct seq_file *m, void *unused) 3460 { 3461 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3462 struct drm_device *dev = &dev_priv->drm; 3463 int i; 3464 3465 drm_modeset_lock_all(dev); 3466 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 3467 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 3468 3469 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); 3470 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n", 3471 pll->state.crtc_mask, pll->active_mask, yesno(pll->on)); 3472 seq_printf(m, " tracked hardware state:\n"); 3473 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll); 3474 seq_printf(m, " dpll_md: 0x%08x\n", 3475 pll->state.hw_state.dpll_md); 3476 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0); 3477 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1); 3478 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll); 3479 } 3480 drm_modeset_unlock_all(dev); 3481 3482 return 0; 3483 } 3484 3485 static int i915_wa_registers(struct seq_file *m, void *unused) 3486 { 3487 int i; 3488 int ret; 3489 struct intel_engine_cs *engine; 3490 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3491 struct drm_device *dev = &dev_priv->drm; 3492 struct i915_workarounds *workarounds = &dev_priv->workarounds; 3493 enum intel_engine_id id; 3494 3495 ret = mutex_lock_interruptible(&dev->struct_mutex); 3496 if (ret) 3497 return ret; 3498 3499 intel_runtime_pm_get(dev_priv); 3500 3501 seq_printf(m, "Workarounds applied: %d\n", workarounds->count); 3502 for_each_engine(engine, dev_priv, id) 3503 seq_printf(m, "HW whitelist count for %s: %d\n", 3504 engine->name, workarounds->hw_whitelist_count[id]); 3505 for (i = 0; i < workarounds->count; ++i) { 3506 i915_reg_t addr; 3507 u32 mask, value, read; 3508 bool ok; 3509 3510 addr = workarounds->reg[i].addr; 3511 mask = workarounds->reg[i].mask; 3512 value = workarounds->reg[i].value; 3513 read = I915_READ(addr); 3514 ok = (value & mask) == (read & mask); 3515 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", 3516 i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL"); 3517 } 3518 3519 intel_runtime_pm_put(dev_priv); 3520 mutex_unlock(&dev->struct_mutex); 3521 3522 return 0; 3523 } 3524 3525 static int i915_ddb_info(struct seq_file *m, void *unused) 3526 { 3527 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3528 struct drm_device *dev = &dev_priv->drm; 3529 struct skl_ddb_allocation *ddb; 3530 struct skl_ddb_entry *entry; 3531 enum pipe pipe; 3532 int plane; 3533 3534 if (INTEL_GEN(dev_priv) < 9) 3535 return 0; 3536 3537 drm_modeset_lock_all(dev); 3538 3539 ddb = &dev_priv->wm.skl_hw.ddb; 3540 3541 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); 3542 3543 for_each_pipe(dev_priv, pipe) { 3544 seq_printf(m, "Pipe %c\n", pipe_name(pipe)); 3545 3546 for_each_universal_plane(dev_priv, pipe, plane) { 3547 entry = &ddb->plane[pipe][plane]; 3548 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1, 3549 entry->start, entry->end, 3550 skl_ddb_entry_size(entry)); 3551 } 3552 3553 entry = &ddb->plane[pipe][PLANE_CURSOR]; 3554 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, 3555 entry->end, skl_ddb_entry_size(entry)); 3556 } 3557 3558 drm_modeset_unlock_all(dev); 3559 3560 return 0; 3561 } 3562 3563 static void drrs_status_per_crtc(struct seq_file *m, 3564 struct drm_device *dev, 3565 struct intel_crtc *intel_crtc) 3566 { 3567 struct drm_i915_private *dev_priv = to_i915(dev); 3568 struct i915_drrs *drrs = &dev_priv->drrs; 3569 int vrefresh = 0; 3570 struct drm_connector *connector; 3571 struct drm_connector_list_iter conn_iter; 3572 3573 drm_connector_list_iter_begin(dev, &conn_iter); 3574 drm_for_each_connector_iter(connector, &conn_iter) { 3575 if (connector->state->crtc != &intel_crtc->base) 3576 continue; 3577 3578 seq_printf(m, "%s:\n", connector->name); 3579 } 3580 drm_connector_list_iter_end(&conn_iter); 3581 3582 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT) 3583 seq_puts(m, "\tVBT: DRRS_type: Static"); 3584 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT) 3585 seq_puts(m, "\tVBT: DRRS_type: Seamless"); 3586 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED) 3587 seq_puts(m, "\tVBT: DRRS_type: None"); 3588 else 3589 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value"); 3590 3591 seq_puts(m, "\n\n"); 3592 3593 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) { 3594 struct intel_panel *panel; 3595 3596 mutex_lock(&drrs->mutex); 3597 /* DRRS Supported */ 3598 seq_puts(m, "\tDRRS Supported: Yes\n"); 3599 3600 /* disable_drrs() will make drrs->dp NULL */ 3601 if (!drrs->dp) { 3602 seq_puts(m, "Idleness DRRS: Disabled"); 3603 mutex_unlock(&drrs->mutex); 3604 return; 3605 } 3606 3607 panel = &drrs->dp->attached_connector->panel; 3608 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X", 3609 drrs->busy_frontbuffer_bits); 3610 3611 seq_puts(m, "\n\t\t"); 3612 if (drrs->refresh_rate_type == DRRS_HIGH_RR) { 3613 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); 3614 vrefresh = panel->fixed_mode->vrefresh; 3615 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) { 3616 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); 3617 vrefresh = panel->downclock_mode->vrefresh; 3618 } else { 3619 seq_printf(m, "DRRS_State: Unknown(%d)\n", 3620 drrs->refresh_rate_type); 3621 mutex_unlock(&drrs->mutex); 3622 return; 3623 } 3624 seq_printf(m, "\t\tVrefresh: %d", vrefresh); 3625 3626 seq_puts(m, "\n\t\t"); 3627 mutex_unlock(&drrs->mutex); 3628 } else { 3629 /* DRRS not supported. Print the VBT parameter*/ 3630 seq_puts(m, "\tDRRS Supported : No"); 3631 } 3632 seq_puts(m, "\n"); 3633 } 3634 3635 static int i915_drrs_status(struct seq_file *m, void *unused) 3636 { 3637 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3638 struct drm_device *dev = &dev_priv->drm; 3639 struct intel_crtc *intel_crtc; 3640 int active_crtc_cnt = 0; 3641 3642 drm_modeset_lock_all(dev); 3643 for_each_intel_crtc(dev, intel_crtc) { 3644 if (intel_crtc->base.state->active) { 3645 active_crtc_cnt++; 3646 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); 3647 3648 drrs_status_per_crtc(m, dev, intel_crtc); 3649 } 3650 } 3651 drm_modeset_unlock_all(dev); 3652 3653 if (!active_crtc_cnt) 3654 seq_puts(m, "No active crtc found\n"); 3655 3656 return 0; 3657 } 3658 3659 static int i915_dp_mst_info(struct seq_file *m, void *unused) 3660 { 3661 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3662 struct drm_device *dev = &dev_priv->drm; 3663 struct intel_encoder *intel_encoder; 3664 struct intel_digital_port *intel_dig_port; 3665 struct drm_connector *connector; 3666 struct drm_connector_list_iter conn_iter; 3667 3668 drm_connector_list_iter_begin(dev, &conn_iter); 3669 drm_for_each_connector_iter(connector, &conn_iter) { 3670 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 3671 continue; 3672 3673 intel_encoder = intel_attached_encoder(connector); 3674 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) 3675 continue; 3676 3677 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 3678 if (!intel_dig_port->dp.can_mst) 3679 continue; 3680 3681 seq_printf(m, "MST Source Port %c\n", 3682 port_name(intel_dig_port->port)); 3683 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); 3684 } 3685 drm_connector_list_iter_end(&conn_iter); 3686 3687 return 0; 3688 } 3689 3690 static ssize_t i915_displayport_test_active_write(struct file *file, 3691 const char __user *ubuf, 3692 size_t len, loff_t *offp) 3693 { 3694 char *input_buffer; 3695 int status = 0; 3696 struct drm_device *dev; 3697 struct drm_connector *connector; 3698 struct drm_connector_list_iter conn_iter; 3699 struct intel_dp *intel_dp; 3700 int val = 0; 3701 3702 dev = ((struct seq_file *)file->private_data)->private; 3703 3704 if (len == 0) 3705 return 0; 3706 3707 input_buffer = kmalloc(len + 1, GFP_KERNEL); 3708 if (!input_buffer) 3709 return -ENOMEM; 3710 3711 if (copy_from_user(input_buffer, ubuf, len)) { 3712 status = -EFAULT; 3713 goto out; 3714 } 3715 3716 input_buffer[len] = '\0'; 3717 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len); 3718 3719 drm_connector_list_iter_begin(dev, &conn_iter); 3720 drm_for_each_connector_iter(connector, &conn_iter) { 3721 if (connector->connector_type != 3722 DRM_MODE_CONNECTOR_DisplayPort) 3723 continue; 3724 3725 if (connector->status == connector_status_connected && 3726 connector->encoder != NULL) { 3727 intel_dp = enc_to_intel_dp(connector->encoder); 3728 status = kstrtoint(input_buffer, 10, &val); 3729 if (status < 0) 3730 break; 3731 DRM_DEBUG_DRIVER("Got %d for test active\n", val); 3732 /* To prevent erroneous activation of the compliance 3733 * testing code, only accept an actual value of 1 here 3734 */ 3735 if (val == 1) 3736 intel_dp->compliance.test_active = 1; 3737 else 3738 intel_dp->compliance.test_active = 0; 3739 } 3740 } 3741 drm_connector_list_iter_end(&conn_iter); 3742 out: 3743 kfree(input_buffer); 3744 if (status < 0) 3745 return status; 3746 3747 *offp += len; 3748 return len; 3749 } 3750 3751 static int i915_displayport_test_active_show(struct seq_file *m, void *data) 3752 { 3753 struct drm_device *dev = m->private; 3754 struct drm_connector *connector; 3755 struct drm_connector_list_iter conn_iter; 3756 struct intel_dp *intel_dp; 3757 3758 drm_connector_list_iter_begin(dev, &conn_iter); 3759 drm_for_each_connector_iter(connector, &conn_iter) { 3760 if (connector->connector_type != 3761 DRM_MODE_CONNECTOR_DisplayPort) 3762 continue; 3763 3764 if (connector->status == connector_status_connected && 3765 connector->encoder != NULL) { 3766 intel_dp = enc_to_intel_dp(connector->encoder); 3767 if (intel_dp->compliance.test_active) 3768 seq_puts(m, "1"); 3769 else 3770 seq_puts(m, "0"); 3771 } else 3772 seq_puts(m, "0"); 3773 } 3774 drm_connector_list_iter_end(&conn_iter); 3775 3776 return 0; 3777 } 3778 3779 static int i915_displayport_test_active_open(struct inode *inode, 3780 struct file *file) 3781 { 3782 struct drm_i915_private *dev_priv = inode->i_private; 3783 3784 return single_open(file, i915_displayport_test_active_show, 3785 &dev_priv->drm); 3786 } 3787 3788 static const struct file_operations i915_displayport_test_active_fops = { 3789 .owner = THIS_MODULE, 3790 .open = i915_displayport_test_active_open, 3791 .read = seq_read, 3792 .llseek = seq_lseek, 3793 .release = single_release, 3794 .write = i915_displayport_test_active_write 3795 }; 3796 3797 static int i915_displayport_test_data_show(struct seq_file *m, void *data) 3798 { 3799 struct drm_device *dev = m->private; 3800 struct drm_connector *connector; 3801 struct drm_connector_list_iter conn_iter; 3802 struct intel_dp *intel_dp; 3803 3804 drm_connector_list_iter_begin(dev, &conn_iter); 3805 drm_for_each_connector_iter(connector, &conn_iter) { 3806 if (connector->connector_type != 3807 DRM_MODE_CONNECTOR_DisplayPort) 3808 continue; 3809 3810 if (connector->status == connector_status_connected && 3811 connector->encoder != NULL) { 3812 intel_dp = enc_to_intel_dp(connector->encoder); 3813 if (intel_dp->compliance.test_type == 3814 DP_TEST_LINK_EDID_READ) 3815 seq_printf(m, "%lx", 3816 intel_dp->compliance.test_data.edid); 3817 else if (intel_dp->compliance.test_type == 3818 DP_TEST_LINK_VIDEO_PATTERN) { 3819 seq_printf(m, "hdisplay: %d\n", 3820 intel_dp->compliance.test_data.hdisplay); 3821 seq_printf(m, "vdisplay: %d\n", 3822 intel_dp->compliance.test_data.vdisplay); 3823 seq_printf(m, "bpc: %u\n", 3824 intel_dp->compliance.test_data.bpc); 3825 } 3826 } else 3827 seq_puts(m, "0"); 3828 } 3829 drm_connector_list_iter_end(&conn_iter); 3830 3831 return 0; 3832 } 3833 static int i915_displayport_test_data_open(struct inode *inode, 3834 struct file *file) 3835 { 3836 struct drm_i915_private *dev_priv = inode->i_private; 3837 3838 return single_open(file, i915_displayport_test_data_show, 3839 &dev_priv->drm); 3840 } 3841 3842 static const struct file_operations i915_displayport_test_data_fops = { 3843 .owner = THIS_MODULE, 3844 .open = i915_displayport_test_data_open, 3845 .read = seq_read, 3846 .llseek = seq_lseek, 3847 .release = single_release 3848 }; 3849 3850 static int i915_displayport_test_type_show(struct seq_file *m, void *data) 3851 { 3852 struct drm_device *dev = m->private; 3853 struct drm_connector *connector; 3854 struct drm_connector_list_iter conn_iter; 3855 struct intel_dp *intel_dp; 3856 3857 drm_connector_list_iter_begin(dev, &conn_iter); 3858 drm_for_each_connector_iter(connector, &conn_iter) { 3859 if (connector->connector_type != 3860 DRM_MODE_CONNECTOR_DisplayPort) 3861 continue; 3862 3863 if (connector->status == connector_status_connected && 3864 connector->encoder != NULL) { 3865 intel_dp = enc_to_intel_dp(connector->encoder); 3866 seq_printf(m, "%02lx", intel_dp->compliance.test_type); 3867 } else 3868 seq_puts(m, "0"); 3869 } 3870 drm_connector_list_iter_end(&conn_iter); 3871 3872 return 0; 3873 } 3874 3875 static int i915_displayport_test_type_open(struct inode *inode, 3876 struct file *file) 3877 { 3878 struct drm_i915_private *dev_priv = inode->i_private; 3879 3880 return single_open(file, i915_displayport_test_type_show, 3881 &dev_priv->drm); 3882 } 3883 3884 static const struct file_operations i915_displayport_test_type_fops = { 3885 .owner = THIS_MODULE, 3886 .open = i915_displayport_test_type_open, 3887 .read = seq_read, 3888 .llseek = seq_lseek, 3889 .release = single_release 3890 }; 3891 3892 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) 3893 { 3894 struct drm_i915_private *dev_priv = m->private; 3895 struct drm_device *dev = &dev_priv->drm; 3896 int level; 3897 int num_levels; 3898 3899 if (IS_CHERRYVIEW(dev_priv)) 3900 num_levels = 3; 3901 else if (IS_VALLEYVIEW(dev_priv)) 3902 num_levels = 1; 3903 else 3904 num_levels = ilk_wm_max_level(dev_priv) + 1; 3905 3906 drm_modeset_lock_all(dev); 3907 3908 for (level = 0; level < num_levels; level++) { 3909 unsigned int latency = wm[level]; 3910 3911 /* 3912 * - WM1+ latency values in 0.5us units 3913 * - latencies are in us on gen9/vlv/chv 3914 */ 3915 if (INTEL_GEN(dev_priv) >= 9 || IS_VALLEYVIEW(dev_priv) || 3916 IS_CHERRYVIEW(dev_priv)) 3917 latency *= 10; 3918 else if (level > 0) 3919 latency *= 5; 3920 3921 seq_printf(m, "WM%d %u (%u.%u usec)\n", 3922 level, wm[level], latency / 10, latency % 10); 3923 } 3924 3925 drm_modeset_unlock_all(dev); 3926 } 3927 3928 static int pri_wm_latency_show(struct seq_file *m, void *data) 3929 { 3930 struct drm_i915_private *dev_priv = m->private; 3931 const uint16_t *latencies; 3932 3933 if (INTEL_GEN(dev_priv) >= 9) 3934 latencies = dev_priv->wm.skl_latency; 3935 else 3936 latencies = dev_priv->wm.pri_latency; 3937 3938 wm_latency_show(m, latencies); 3939 3940 return 0; 3941 } 3942 3943 static int spr_wm_latency_show(struct seq_file *m, void *data) 3944 { 3945 struct drm_i915_private *dev_priv = m->private; 3946 const uint16_t *latencies; 3947 3948 if (INTEL_GEN(dev_priv) >= 9) 3949 latencies = dev_priv->wm.skl_latency; 3950 else 3951 latencies = dev_priv->wm.spr_latency; 3952 3953 wm_latency_show(m, latencies); 3954 3955 return 0; 3956 } 3957 3958 static int cur_wm_latency_show(struct seq_file *m, void *data) 3959 { 3960 struct drm_i915_private *dev_priv = m->private; 3961 const uint16_t *latencies; 3962 3963 if (INTEL_GEN(dev_priv) >= 9) 3964 latencies = dev_priv->wm.skl_latency; 3965 else 3966 latencies = dev_priv->wm.cur_latency; 3967 3968 wm_latency_show(m, latencies); 3969 3970 return 0; 3971 } 3972 3973 static int pri_wm_latency_open(struct inode *inode, struct file *file) 3974 { 3975 struct drm_i915_private *dev_priv = inode->i_private; 3976 3977 if (INTEL_GEN(dev_priv) < 5) 3978 return -ENODEV; 3979 3980 return single_open(file, pri_wm_latency_show, dev_priv); 3981 } 3982 3983 static int spr_wm_latency_open(struct inode *inode, struct file *file) 3984 { 3985 struct drm_i915_private *dev_priv = inode->i_private; 3986 3987 if (HAS_GMCH_DISPLAY(dev_priv)) 3988 return -ENODEV; 3989 3990 return single_open(file, spr_wm_latency_show, dev_priv); 3991 } 3992 3993 static int cur_wm_latency_open(struct inode *inode, struct file *file) 3994 { 3995 struct drm_i915_private *dev_priv = inode->i_private; 3996 3997 if (HAS_GMCH_DISPLAY(dev_priv)) 3998 return -ENODEV; 3999 4000 return single_open(file, cur_wm_latency_show, dev_priv); 4001 } 4002 4003 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 4004 size_t len, loff_t *offp, uint16_t wm[8]) 4005 { 4006 struct seq_file *m = file->private_data; 4007 struct drm_i915_private *dev_priv = m->private; 4008 struct drm_device *dev = &dev_priv->drm; 4009 uint16_t new[8] = { 0 }; 4010 int num_levels; 4011 int level; 4012 int ret; 4013 char tmp[32]; 4014 4015 if (IS_CHERRYVIEW(dev_priv)) 4016 num_levels = 3; 4017 else if (IS_VALLEYVIEW(dev_priv)) 4018 num_levels = 1; 4019 else 4020 num_levels = ilk_wm_max_level(dev_priv) + 1; 4021 4022 if (len >= sizeof(tmp)) 4023 return -EINVAL; 4024 4025 if (copy_from_user(tmp, ubuf, len)) 4026 return -EFAULT; 4027 4028 tmp[len] = '\0'; 4029 4030 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", 4031 &new[0], &new[1], &new[2], &new[3], 4032 &new[4], &new[5], &new[6], &new[7]); 4033 if (ret != num_levels) 4034 return -EINVAL; 4035 4036 drm_modeset_lock_all(dev); 4037 4038 for (level = 0; level < num_levels; level++) 4039 wm[level] = new[level]; 4040 4041 drm_modeset_unlock_all(dev); 4042 4043 return len; 4044 } 4045 4046 4047 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 4048 size_t len, loff_t *offp) 4049 { 4050 struct seq_file *m = file->private_data; 4051 struct drm_i915_private *dev_priv = m->private; 4052 uint16_t *latencies; 4053 4054 if (INTEL_GEN(dev_priv) >= 9) 4055 latencies = dev_priv->wm.skl_latency; 4056 else 4057 latencies = dev_priv->wm.pri_latency; 4058 4059 return wm_latency_write(file, ubuf, len, offp, latencies); 4060 } 4061 4062 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 4063 size_t len, loff_t *offp) 4064 { 4065 struct seq_file *m = file->private_data; 4066 struct drm_i915_private *dev_priv = m->private; 4067 uint16_t *latencies; 4068 4069 if (INTEL_GEN(dev_priv) >= 9) 4070 latencies = dev_priv->wm.skl_latency; 4071 else 4072 latencies = dev_priv->wm.spr_latency; 4073 4074 return wm_latency_write(file, ubuf, len, offp, latencies); 4075 } 4076 4077 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 4078 size_t len, loff_t *offp) 4079 { 4080 struct seq_file *m = file->private_data; 4081 struct drm_i915_private *dev_priv = m->private; 4082 uint16_t *latencies; 4083 4084 if (INTEL_GEN(dev_priv) >= 9) 4085 latencies = dev_priv->wm.skl_latency; 4086 else 4087 latencies = dev_priv->wm.cur_latency; 4088 4089 return wm_latency_write(file, ubuf, len, offp, latencies); 4090 } 4091 4092 static const struct file_operations i915_pri_wm_latency_fops = { 4093 .owner = THIS_MODULE, 4094 .open = pri_wm_latency_open, 4095 .read = seq_read, 4096 .llseek = seq_lseek, 4097 .release = single_release, 4098 .write = pri_wm_latency_write 4099 }; 4100 4101 static const struct file_operations i915_spr_wm_latency_fops = { 4102 .owner = THIS_MODULE, 4103 .open = spr_wm_latency_open, 4104 .read = seq_read, 4105 .llseek = seq_lseek, 4106 .release = single_release, 4107 .write = spr_wm_latency_write 4108 }; 4109 4110 static const struct file_operations i915_cur_wm_latency_fops = { 4111 .owner = THIS_MODULE, 4112 .open = cur_wm_latency_open, 4113 .read = seq_read, 4114 .llseek = seq_lseek, 4115 .release = single_release, 4116 .write = cur_wm_latency_write 4117 }; 4118 4119 static int 4120 i915_wedged_get(void *data, u64 *val) 4121 { 4122 struct drm_i915_private *dev_priv = data; 4123 4124 *val = i915_terminally_wedged(&dev_priv->gpu_error); 4125 4126 return 0; 4127 } 4128 4129 static int 4130 i915_wedged_set(void *data, u64 val) 4131 { 4132 struct drm_i915_private *i915 = data; 4133 struct intel_engine_cs *engine; 4134 unsigned int tmp; 4135 4136 /* 4137 * There is no safeguard against this debugfs entry colliding 4138 * with the hangcheck calling same i915_handle_error() in 4139 * parallel, causing an explosion. For now we assume that the 4140 * test harness is responsible enough not to inject gpu hangs 4141 * while it is writing to 'i915_wedged' 4142 */ 4143 4144 if (i915_reset_backoff(&i915->gpu_error)) 4145 return -EAGAIN; 4146 4147 for_each_engine_masked(engine, i915, val, tmp) { 4148 engine->hangcheck.seqno = intel_engine_get_seqno(engine); 4149 engine->hangcheck.stalled = true; 4150 } 4151 4152 i915_handle_error(i915, val, "Manually setting wedged to %llu", val); 4153 4154 wait_on_bit(&i915->gpu_error.flags, 4155 I915_RESET_HANDOFF, 4156 TASK_UNINTERRUPTIBLE); 4157 4158 return 0; 4159 } 4160 4161 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 4162 i915_wedged_get, i915_wedged_set, 4163 "%llu\n"); 4164 4165 static int 4166 fault_irq_set(struct drm_i915_private *i915, 4167 unsigned long *irq, 4168 unsigned long val) 4169 { 4170 int err; 4171 4172 err = mutex_lock_interruptible(&i915->drm.struct_mutex); 4173 if (err) 4174 return err; 4175 4176 err = i915_gem_wait_for_idle(i915, 4177 I915_WAIT_LOCKED | 4178 I915_WAIT_INTERRUPTIBLE); 4179 if (err) 4180 goto err_unlock; 4181 4182 *irq = val; 4183 mutex_unlock(&i915->drm.struct_mutex); 4184 4185 /* Flush idle worker to disarm irq */ 4186 while (flush_delayed_work(&i915->gt.idle_work)) 4187 ; 4188 4189 return 0; 4190 4191 err_unlock: 4192 mutex_unlock(&i915->drm.struct_mutex); 4193 return err; 4194 } 4195 4196 static int 4197 i915_ring_missed_irq_get(void *data, u64 *val) 4198 { 4199 struct drm_i915_private *dev_priv = data; 4200 4201 *val = dev_priv->gpu_error.missed_irq_rings; 4202 return 0; 4203 } 4204 4205 static int 4206 i915_ring_missed_irq_set(void *data, u64 val) 4207 { 4208 struct drm_i915_private *i915 = data; 4209 4210 return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val); 4211 } 4212 4213 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops, 4214 i915_ring_missed_irq_get, i915_ring_missed_irq_set, 4215 "0x%08llx\n"); 4216 4217 static int 4218 i915_ring_test_irq_get(void *data, u64 *val) 4219 { 4220 struct drm_i915_private *dev_priv = data; 4221 4222 *val = dev_priv->gpu_error.test_irq_rings; 4223 4224 return 0; 4225 } 4226 4227 static int 4228 i915_ring_test_irq_set(void *data, u64 val) 4229 { 4230 struct drm_i915_private *i915 = data; 4231 4232 val &= INTEL_INFO(i915)->ring_mask; 4233 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); 4234 4235 return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val); 4236 } 4237 4238 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops, 4239 i915_ring_test_irq_get, i915_ring_test_irq_set, 4240 "0x%08llx\n"); 4241 4242 #define DROP_UNBOUND 0x1 4243 #define DROP_BOUND 0x2 4244 #define DROP_RETIRE 0x4 4245 #define DROP_ACTIVE 0x8 4246 #define DROP_FREED 0x10 4247 #define DROP_SHRINK_ALL 0x20 4248 #define DROP_ALL (DROP_UNBOUND | \ 4249 DROP_BOUND | \ 4250 DROP_RETIRE | \ 4251 DROP_ACTIVE | \ 4252 DROP_FREED | \ 4253 DROP_SHRINK_ALL) 4254 static int 4255 i915_drop_caches_get(void *data, u64 *val) 4256 { 4257 *val = DROP_ALL; 4258 4259 return 0; 4260 } 4261 4262 static int 4263 i915_drop_caches_set(void *data, u64 val) 4264 { 4265 struct drm_i915_private *dev_priv = data; 4266 struct drm_device *dev = &dev_priv->drm; 4267 int ret; 4268 4269 DRM_DEBUG("Dropping caches: 0x%08llx\n", val); 4270 4271 /* No need to check and wait for gpu resets, only libdrm auto-restarts 4272 * on ioctls on -EAGAIN. */ 4273 ret = mutex_lock_interruptible(&dev->struct_mutex); 4274 if (ret) 4275 return ret; 4276 4277 if (val & DROP_ACTIVE) { 4278 ret = i915_gem_wait_for_idle(dev_priv, 4279 I915_WAIT_INTERRUPTIBLE | 4280 I915_WAIT_LOCKED); 4281 if (ret) 4282 goto unlock; 4283 } 4284 4285 if (val & DROP_RETIRE) 4286 i915_gem_retire_requests(dev_priv); 4287 4288 lockdep_set_current_reclaim_state(GFP_KERNEL); 4289 if (val & DROP_BOUND) 4290 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND); 4291 4292 if (val & DROP_UNBOUND) 4293 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND); 4294 4295 if (val & DROP_SHRINK_ALL) 4296 i915_gem_shrink_all(dev_priv); 4297 lockdep_clear_current_reclaim_state(); 4298 4299 unlock: 4300 mutex_unlock(&dev->struct_mutex); 4301 4302 if (val & DROP_FREED) { 4303 synchronize_rcu(); 4304 i915_gem_drain_freed_objects(dev_priv); 4305 } 4306 4307 return ret; 4308 } 4309 4310 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 4311 i915_drop_caches_get, i915_drop_caches_set, 4312 "0x%08llx\n"); 4313 4314 static int 4315 i915_max_freq_get(void *data, u64 *val) 4316 { 4317 struct drm_i915_private *dev_priv = data; 4318 4319 if (INTEL_GEN(dev_priv) < 6) 4320 return -ENODEV; 4321 4322 *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); 4323 return 0; 4324 } 4325 4326 static int 4327 i915_max_freq_set(void *data, u64 val) 4328 { 4329 struct drm_i915_private *dev_priv = data; 4330 u32 hw_max, hw_min; 4331 int ret; 4332 4333 if (INTEL_GEN(dev_priv) < 6) 4334 return -ENODEV; 4335 4336 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 4337 4338 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4339 if (ret) 4340 return ret; 4341 4342 /* 4343 * Turbo will still be enabled, but won't go above the set value. 4344 */ 4345 val = intel_freq_opcode(dev_priv, val); 4346 4347 hw_max = dev_priv->rps.max_freq; 4348 hw_min = dev_priv->rps.min_freq; 4349 4350 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { 4351 mutex_unlock(&dev_priv->rps.hw_lock); 4352 return -EINVAL; 4353 } 4354 4355 dev_priv->rps.max_freq_softlimit = val; 4356 4357 if (intel_set_rps(dev_priv, val)) 4358 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); 4359 4360 mutex_unlock(&dev_priv->rps.hw_lock); 4361 4362 return 0; 4363 } 4364 4365 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, 4366 i915_max_freq_get, i915_max_freq_set, 4367 "%llu\n"); 4368 4369 static int 4370 i915_min_freq_get(void *data, u64 *val) 4371 { 4372 struct drm_i915_private *dev_priv = data; 4373 4374 if (INTEL_GEN(dev_priv) < 6) 4375 return -ENODEV; 4376 4377 *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); 4378 return 0; 4379 } 4380 4381 static int 4382 i915_min_freq_set(void *data, u64 val) 4383 { 4384 struct drm_i915_private *dev_priv = data; 4385 u32 hw_max, hw_min; 4386 int ret; 4387 4388 if (INTEL_GEN(dev_priv) < 6) 4389 return -ENODEV; 4390 4391 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 4392 4393 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4394 if (ret) 4395 return ret; 4396 4397 /* 4398 * Turbo will still be enabled, but won't go below the set value. 4399 */ 4400 val = intel_freq_opcode(dev_priv, val); 4401 4402 hw_max = dev_priv->rps.max_freq; 4403 hw_min = dev_priv->rps.min_freq; 4404 4405 if (val < hw_min || 4406 val > hw_max || val > dev_priv->rps.max_freq_softlimit) { 4407 mutex_unlock(&dev_priv->rps.hw_lock); 4408 return -EINVAL; 4409 } 4410 4411 dev_priv->rps.min_freq_softlimit = val; 4412 4413 if (intel_set_rps(dev_priv, val)) 4414 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); 4415 4416 mutex_unlock(&dev_priv->rps.hw_lock); 4417 4418 return 0; 4419 } 4420 4421 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, 4422 i915_min_freq_get, i915_min_freq_set, 4423 "%llu\n"); 4424 4425 static int 4426 i915_cache_sharing_get(void *data, u64 *val) 4427 { 4428 struct drm_i915_private *dev_priv = data; 4429 u32 snpcr; 4430 4431 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv))) 4432 return -ENODEV; 4433 4434 intel_runtime_pm_get(dev_priv); 4435 4436 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 4437 4438 intel_runtime_pm_put(dev_priv); 4439 4440 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 4441 4442 return 0; 4443 } 4444 4445 static int 4446 i915_cache_sharing_set(void *data, u64 val) 4447 { 4448 struct drm_i915_private *dev_priv = data; 4449 u32 snpcr; 4450 4451 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv))) 4452 return -ENODEV; 4453 4454 if (val > 3) 4455 return -EINVAL; 4456 4457 intel_runtime_pm_get(dev_priv); 4458 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); 4459 4460 /* Update the cache sharing policy here as well */ 4461 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 4462 snpcr &= ~GEN6_MBC_SNPCR_MASK; 4463 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 4464 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 4465 4466 intel_runtime_pm_put(dev_priv); 4467 return 0; 4468 } 4469 4470 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 4471 i915_cache_sharing_get, i915_cache_sharing_set, 4472 "%llu\n"); 4473 4474 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv, 4475 struct sseu_dev_info *sseu) 4476 { 4477 int ss_max = 2; 4478 int ss; 4479 u32 sig1[ss_max], sig2[ss_max]; 4480 4481 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1); 4482 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1); 4483 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2); 4484 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2); 4485 4486 for (ss = 0; ss < ss_max; ss++) { 4487 unsigned int eu_cnt; 4488 4489 if (sig1[ss] & CHV_SS_PG_ENABLE) 4490 /* skip disabled subslice */ 4491 continue; 4492 4493 sseu->slice_mask = BIT(0); 4494 sseu->subslice_mask |= BIT(ss); 4495 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) + 4496 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) + 4497 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) + 4498 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2); 4499 sseu->eu_total += eu_cnt; 4500 sseu->eu_per_subslice = max_t(unsigned int, 4501 sseu->eu_per_subslice, eu_cnt); 4502 } 4503 } 4504 4505 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, 4506 struct sseu_dev_info *sseu) 4507 { 4508 int s_max = 3, ss_max = 4; 4509 int s, ss; 4510 u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; 4511 4512 /* BXT has a single slice and at most 3 subslices. */ 4513 if (IS_GEN9_LP(dev_priv)) { 4514 s_max = 1; 4515 ss_max = 3; 4516 } 4517 4518 for (s = 0; s < s_max; s++) { 4519 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s)); 4520 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s)); 4521 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s)); 4522 } 4523 4524 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | 4525 GEN9_PGCTL_SSA_EU19_ACK | 4526 GEN9_PGCTL_SSA_EU210_ACK | 4527 GEN9_PGCTL_SSA_EU311_ACK; 4528 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK | 4529 GEN9_PGCTL_SSB_EU19_ACK | 4530 GEN9_PGCTL_SSB_EU210_ACK | 4531 GEN9_PGCTL_SSB_EU311_ACK; 4532 4533 for (s = 0; s < s_max; s++) { 4534 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) 4535 /* skip disabled slice */ 4536 continue; 4537 4538 sseu->slice_mask |= BIT(s); 4539 4540 if (IS_GEN9_BC(dev_priv)) 4541 sseu->subslice_mask = 4542 INTEL_INFO(dev_priv)->sseu.subslice_mask; 4543 4544 for (ss = 0; ss < ss_max; ss++) { 4545 unsigned int eu_cnt; 4546 4547 if (IS_GEN9_LP(dev_priv)) { 4548 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) 4549 /* skip disabled subslice */ 4550 continue; 4551 4552 sseu->subslice_mask |= BIT(ss); 4553 } 4554 4555 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] & 4556 eu_mask[ss%2]); 4557 sseu->eu_total += eu_cnt; 4558 sseu->eu_per_subslice = max_t(unsigned int, 4559 sseu->eu_per_subslice, 4560 eu_cnt); 4561 } 4562 } 4563 } 4564 4565 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv, 4566 struct sseu_dev_info *sseu) 4567 { 4568 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); 4569 int s; 4570 4571 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK; 4572 4573 if (sseu->slice_mask) { 4574 sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask; 4575 sseu->eu_per_subslice = 4576 INTEL_INFO(dev_priv)->sseu.eu_per_subslice; 4577 sseu->eu_total = sseu->eu_per_subslice * 4578 sseu_subslice_total(sseu); 4579 4580 /* subtract fused off EU(s) from enabled slice(s) */ 4581 for (s = 0; s < fls(sseu->slice_mask); s++) { 4582 u8 subslice_7eu = 4583 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s]; 4584 4585 sseu->eu_total -= hweight8(subslice_7eu); 4586 } 4587 } 4588 } 4589 4590 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info, 4591 const struct sseu_dev_info *sseu) 4592 { 4593 struct drm_i915_private *dev_priv = node_to_i915(m->private); 4594 const char *type = is_available_info ? "Available" : "Enabled"; 4595 4596 seq_printf(m, " %s Slice Mask: %04x\n", type, 4597 sseu->slice_mask); 4598 seq_printf(m, " %s Slice Total: %u\n", type, 4599 hweight8(sseu->slice_mask)); 4600 seq_printf(m, " %s Subslice Total: %u\n", type, 4601 sseu_subslice_total(sseu)); 4602 seq_printf(m, " %s Subslice Mask: %04x\n", type, 4603 sseu->subslice_mask); 4604 seq_printf(m, " %s Subslice Per Slice: %u\n", type, 4605 hweight8(sseu->subslice_mask)); 4606 seq_printf(m, " %s EU Total: %u\n", type, 4607 sseu->eu_total); 4608 seq_printf(m, " %s EU Per Subslice: %u\n", type, 4609 sseu->eu_per_subslice); 4610 4611 if (!is_available_info) 4612 return; 4613 4614 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv))); 4615 if (HAS_POOLED_EU(dev_priv)) 4616 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool); 4617 4618 seq_printf(m, " Has Slice Power Gating: %s\n", 4619 yesno(sseu->has_slice_pg)); 4620 seq_printf(m, " Has Subslice Power Gating: %s\n", 4621 yesno(sseu->has_subslice_pg)); 4622 seq_printf(m, " Has EU Power Gating: %s\n", 4623 yesno(sseu->has_eu_pg)); 4624 } 4625 4626 static int i915_sseu_status(struct seq_file *m, void *unused) 4627 { 4628 struct drm_i915_private *dev_priv = node_to_i915(m->private); 4629 struct sseu_dev_info sseu; 4630 4631 if (INTEL_GEN(dev_priv) < 8) 4632 return -ENODEV; 4633 4634 seq_puts(m, "SSEU Device Info\n"); 4635 i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu); 4636 4637 seq_puts(m, "SSEU Device Status\n"); 4638 memset(&sseu, 0, sizeof(sseu)); 4639 4640 intel_runtime_pm_get(dev_priv); 4641 4642 if (IS_CHERRYVIEW(dev_priv)) { 4643 cherryview_sseu_device_status(dev_priv, &sseu); 4644 } else if (IS_BROADWELL(dev_priv)) { 4645 broadwell_sseu_device_status(dev_priv, &sseu); 4646 } else if (INTEL_GEN(dev_priv) >= 9) { 4647 gen9_sseu_device_status(dev_priv, &sseu); 4648 } 4649 4650 intel_runtime_pm_put(dev_priv); 4651 4652 i915_print_sseu_info(m, false, &sseu); 4653 4654 return 0; 4655 } 4656 4657 static int i915_forcewake_open(struct inode *inode, struct file *file) 4658 { 4659 struct drm_i915_private *dev_priv = inode->i_private; 4660 4661 if (INTEL_GEN(dev_priv) < 6) 4662 return 0; 4663 4664 intel_runtime_pm_get(dev_priv); 4665 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4666 4667 return 0; 4668 } 4669 4670 static int i915_forcewake_release(struct inode *inode, struct file *file) 4671 { 4672 struct drm_i915_private *dev_priv = inode->i_private; 4673 4674 if (INTEL_GEN(dev_priv) < 6) 4675 return 0; 4676 4677 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4678 intel_runtime_pm_put(dev_priv); 4679 4680 return 0; 4681 } 4682 4683 static const struct file_operations i915_forcewake_fops = { 4684 .owner = THIS_MODULE, 4685 .open = i915_forcewake_open, 4686 .release = i915_forcewake_release, 4687 }; 4688 4689 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) 4690 { 4691 struct drm_i915_private *dev_priv = m->private; 4692 struct i915_hotplug *hotplug = &dev_priv->hotplug; 4693 4694 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); 4695 seq_printf(m, "Detected: %s\n", 4696 yesno(delayed_work_pending(&hotplug->reenable_work))); 4697 4698 return 0; 4699 } 4700 4701 static ssize_t i915_hpd_storm_ctl_write(struct file *file, 4702 const char __user *ubuf, size_t len, 4703 loff_t *offp) 4704 { 4705 struct seq_file *m = file->private_data; 4706 struct drm_i915_private *dev_priv = m->private; 4707 struct i915_hotplug *hotplug = &dev_priv->hotplug; 4708 unsigned int new_threshold; 4709 int i; 4710 char *newline; 4711 char tmp[16]; 4712 4713 if (len >= sizeof(tmp)) 4714 return -EINVAL; 4715 4716 if (copy_from_user(tmp, ubuf, len)) 4717 return -EFAULT; 4718 4719 tmp[len] = '\0'; 4720 4721 /* Strip newline, if any */ 4722 newline = strchr(tmp, '\n'); 4723 if (newline) 4724 *newline = '\0'; 4725 4726 if (strcmp(tmp, "reset") == 0) 4727 new_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4728 else if (kstrtouint(tmp, 10, &new_threshold) != 0) 4729 return -EINVAL; 4730 4731 if (new_threshold > 0) 4732 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n", 4733 new_threshold); 4734 else 4735 DRM_DEBUG_KMS("Disabling HPD storm detection\n"); 4736 4737 spin_lock_irq(&dev_priv->irq_lock); 4738 hotplug->hpd_storm_threshold = new_threshold; 4739 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 4740 for_each_hpd_pin(i) 4741 hotplug->stats[i].count = 0; 4742 spin_unlock_irq(&dev_priv->irq_lock); 4743 4744 /* Re-enable hpd immediately if we were in an irq storm */ 4745 flush_delayed_work(&dev_priv->hotplug.reenable_work); 4746 4747 return len; 4748 } 4749 4750 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file) 4751 { 4752 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private); 4753 } 4754 4755 static const struct file_operations i915_hpd_storm_ctl_fops = { 4756 .owner = THIS_MODULE, 4757 .open = i915_hpd_storm_ctl_open, 4758 .read = seq_read, 4759 .llseek = seq_lseek, 4760 .release = single_release, 4761 .write = i915_hpd_storm_ctl_write 4762 }; 4763 4764 static const struct drm_info_list i915_debugfs_list[] = { 4765 {"i915_capabilities", i915_capabilities, 0}, 4766 {"i915_gem_objects", i915_gem_object_info, 0}, 4767 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 4768 {"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1}, 4769 {"i915_gem_stolen", i915_gem_stolen_list_info }, 4770 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 4771 {"i915_gem_request", i915_gem_request_info, 0}, 4772 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 4773 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 4774 {"i915_gem_interrupt", i915_interrupt_info, 0}, 4775 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0}, 4776 {"i915_guc_info", i915_guc_info, 0}, 4777 {"i915_guc_load_status", i915_guc_load_status_info, 0}, 4778 {"i915_guc_log_dump", i915_guc_log_dump, 0}, 4779 {"i915_huc_load_status", i915_huc_load_status_info, 0}, 4780 {"i915_frequency_info", i915_frequency_info, 0}, 4781 {"i915_hangcheck_info", i915_hangcheck_info, 0}, 4782 {"i915_drpc_info", i915_drpc_info, 0}, 4783 {"i915_emon_status", i915_emon_status, 0}, 4784 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 4785 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, 4786 {"i915_fbc_status", i915_fbc_status, 0}, 4787 {"i915_ips_status", i915_ips_status, 0}, 4788 {"i915_sr_status", i915_sr_status, 0}, 4789 {"i915_opregion", i915_opregion, 0}, 4790 {"i915_vbt", i915_vbt, 0}, 4791 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 4792 {"i915_context_status", i915_context_status, 0}, 4793 {"i915_dump_lrc", i915_dump_lrc, 0}, 4794 {"i915_forcewake_domains", i915_forcewake_domains, 0}, 4795 {"i915_swizzle_info", i915_swizzle_info, 0}, 4796 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 4797 {"i915_llc", i915_llc, 0}, 4798 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 4799 {"i915_sink_crc_eDP1", i915_sink_crc, 0}, 4800 {"i915_energy_uJ", i915_energy_uJ, 0}, 4801 {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, 4802 {"i915_power_domain_info", i915_power_domain_info, 0}, 4803 {"i915_dmc_info", i915_dmc_info, 0}, 4804 {"i915_display_info", i915_display_info, 0}, 4805 {"i915_engine_info", i915_engine_info, 0}, 4806 {"i915_semaphore_status", i915_semaphore_status, 0}, 4807 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 4808 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 4809 {"i915_wa_registers", i915_wa_registers, 0}, 4810 {"i915_ddb_info", i915_ddb_info, 0}, 4811 {"i915_sseu_status", i915_sseu_status, 0}, 4812 {"i915_drrs_status", i915_drrs_status, 0}, 4813 {"i915_rps_boost_info", i915_rps_boost_info, 0}, 4814 }; 4815 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 4816 4817 static const struct i915_debugfs_files { 4818 const char *name; 4819 const struct file_operations *fops; 4820 } i915_debugfs_files[] = { 4821 {"i915_wedged", &i915_wedged_fops}, 4822 {"i915_max_freq", &i915_max_freq_fops}, 4823 {"i915_min_freq", &i915_min_freq_fops}, 4824 {"i915_cache_sharing", &i915_cache_sharing_fops}, 4825 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, 4826 {"i915_ring_test_irq", &i915_ring_test_irq_fops}, 4827 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 4828 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 4829 {"i915_error_state", &i915_error_state_fops}, 4830 {"i915_gpu_info", &i915_gpu_info_fops}, 4831 #endif 4832 {"i915_next_seqno", &i915_next_seqno_fops}, 4833 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, 4834 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 4835 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 4836 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 4837 {"i915_fbc_false_color", &i915_fbc_fc_fops}, 4838 {"i915_dp_test_data", &i915_displayport_test_data_fops}, 4839 {"i915_dp_test_type", &i915_displayport_test_type_fops}, 4840 {"i915_dp_test_active", &i915_displayport_test_active_fops}, 4841 {"i915_guc_log_control", &i915_guc_log_control_fops}, 4842 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops} 4843 }; 4844 4845 int i915_debugfs_register(struct drm_i915_private *dev_priv) 4846 { 4847 struct drm_minor *minor = dev_priv->drm.primary; 4848 struct dentry *ent; 4849 int ret, i; 4850 4851 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR, 4852 minor->debugfs_root, to_i915(minor->dev), 4853 &i915_forcewake_fops); 4854 if (!ent) 4855 return -ENOMEM; 4856 4857 ret = intel_pipe_crc_create(minor); 4858 if (ret) 4859 return ret; 4860 4861 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 4862 ent = debugfs_create_file(i915_debugfs_files[i].name, 4863 S_IRUGO | S_IWUSR, 4864 minor->debugfs_root, 4865 to_i915(minor->dev), 4866 i915_debugfs_files[i].fops); 4867 if (!ent) 4868 return -ENOMEM; 4869 } 4870 4871 return drm_debugfs_create_files(i915_debugfs_list, 4872 I915_DEBUGFS_ENTRIES, 4873 minor->debugfs_root, minor); 4874 } 4875 4876 struct dpcd_block { 4877 /* DPCD dump start address. */ 4878 unsigned int offset; 4879 /* DPCD dump end address, inclusive. If unset, .size will be used. */ 4880 unsigned int end; 4881 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */ 4882 size_t size; 4883 /* Only valid for eDP. */ 4884 bool edp; 4885 }; 4886 4887 static const struct dpcd_block i915_dpcd_debug[] = { 4888 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE }, 4889 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS }, 4890 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 }, 4891 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET }, 4892 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 }, 4893 { .offset = DP_SET_POWER }, 4894 { .offset = DP_EDP_DPCD_REV }, 4895 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 }, 4896 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB }, 4897 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET }, 4898 }; 4899 4900 static int i915_dpcd_show(struct seq_file *m, void *data) 4901 { 4902 struct drm_connector *connector = m->private; 4903 struct intel_dp *intel_dp = 4904 enc_to_intel_dp(&intel_attached_encoder(connector)->base); 4905 uint8_t buf[16]; 4906 ssize_t err; 4907 int i; 4908 4909 if (connector->status != connector_status_connected) 4910 return -ENODEV; 4911 4912 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) { 4913 const struct dpcd_block *b = &i915_dpcd_debug[i]; 4914 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1); 4915 4916 if (b->edp && 4917 connector->connector_type != DRM_MODE_CONNECTOR_eDP) 4918 continue; 4919 4920 /* low tech for now */ 4921 if (WARN_ON(size > sizeof(buf))) 4922 continue; 4923 4924 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size); 4925 if (err <= 0) { 4926 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", 4927 size, b->offset, err); 4928 continue; 4929 } 4930 4931 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf); 4932 } 4933 4934 return 0; 4935 } 4936 4937 static int i915_dpcd_open(struct inode *inode, struct file *file) 4938 { 4939 return single_open(file, i915_dpcd_show, inode->i_private); 4940 } 4941 4942 static const struct file_operations i915_dpcd_fops = { 4943 .owner = THIS_MODULE, 4944 .open = i915_dpcd_open, 4945 .read = seq_read, 4946 .llseek = seq_lseek, 4947 .release = single_release, 4948 }; 4949 4950 static int i915_panel_show(struct seq_file *m, void *data) 4951 { 4952 struct drm_connector *connector = m->private; 4953 struct intel_dp *intel_dp = 4954 enc_to_intel_dp(&intel_attached_encoder(connector)->base); 4955 4956 if (connector->status != connector_status_connected) 4957 return -ENODEV; 4958 4959 seq_printf(m, "Panel power up delay: %d\n", 4960 intel_dp->panel_power_up_delay); 4961 seq_printf(m, "Panel power down delay: %d\n", 4962 intel_dp->panel_power_down_delay); 4963 seq_printf(m, "Backlight on delay: %d\n", 4964 intel_dp->backlight_on_delay); 4965 seq_printf(m, "Backlight off delay: %d\n", 4966 intel_dp->backlight_off_delay); 4967 4968 return 0; 4969 } 4970 4971 static int i915_panel_open(struct inode *inode, struct file *file) 4972 { 4973 return single_open(file, i915_panel_show, inode->i_private); 4974 } 4975 4976 static const struct file_operations i915_panel_fops = { 4977 .owner = THIS_MODULE, 4978 .open = i915_panel_open, 4979 .read = seq_read, 4980 .llseek = seq_lseek, 4981 .release = single_release, 4982 }; 4983 4984 /** 4985 * i915_debugfs_connector_add - add i915 specific connector debugfs files 4986 * @connector: pointer to a registered drm_connector 4987 * 4988 * Cleanup will be done by drm_connector_unregister() through a call to 4989 * drm_debugfs_connector_remove(). 4990 * 4991 * Returns 0 on success, negative error codes on error. 4992 */ 4993 int i915_debugfs_connector_add(struct drm_connector *connector) 4994 { 4995 struct dentry *root = connector->debugfs_entry; 4996 4997 /* The connector must have been registered beforehands. */ 4998 if (!root) 4999 return -ENODEV; 5000 5001 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 5002 connector->connector_type == DRM_MODE_CONNECTOR_eDP) 5003 debugfs_create_file("i915_dpcd", S_IRUGO, root, 5004 connector, &i915_dpcd_fops); 5005 5006 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 5007 debugfs_create_file("i915_panel_timings", S_IRUGO, root, 5008 connector, &i915_panel_fops); 5009 5010 return 0; 5011 } 5012