1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/seq_file.h> 30 #include <linux/circ_buf.h> 31 #include <linux/ctype.h> 32 #include <linux/debugfs.h> 33 #include <linux/slab.h> 34 #include <linux/export.h> 35 #include <linux/list_sort.h> 36 #include <asm/msr-index.h> 37 #include <drm/drmP.h> 38 #include "intel_drv.h" 39 #include "intel_ringbuffer.h" 40 #include <drm/i915_drm.h> 41 #include "i915_drv.h" 42 43 enum { 44 ACTIVE_LIST, 45 INACTIVE_LIST, 46 PINNED_LIST, 47 }; 48 49 /* As the drm_debugfs_init() routines are called before dev->dev_private is 50 * allocated we need to hook into the minor for release. */ 51 static int 52 drm_add_fake_info_node(struct drm_minor *minor, 53 struct dentry *ent, 54 const void *key) 55 { 56 struct drm_info_node *node; 57 58 node = kmalloc(sizeof(*node), GFP_KERNEL); 59 if (node == NULL) { 60 debugfs_remove(ent); 61 return -ENOMEM; 62 } 63 64 node->minor = minor; 65 node->dent = ent; 66 node->info_ent = (void *) key; 67 68 mutex_lock(&minor->debugfs_lock); 69 list_add(&node->list, &minor->debugfs_list); 70 mutex_unlock(&minor->debugfs_lock); 71 72 return 0; 73 } 74 75 static int i915_capabilities(struct seq_file *m, void *data) 76 { 77 struct drm_info_node *node = m->private; 78 struct drm_device *dev = node->minor->dev; 79 const struct intel_device_info *info = INTEL_INFO(dev); 80 81 seq_printf(m, "gen: %d\n", info->gen); 82 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 83 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 84 #define SEP_SEMICOLON ; 85 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON); 86 #undef PRINT_FLAG 87 #undef SEP_SEMICOLON 88 89 return 0; 90 } 91 92 static const char *get_pin_flag(struct drm_i915_gem_object *obj) 93 { 94 if (obj->pin_display) 95 return "p"; 96 else 97 return " "; 98 } 99 100 static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 101 { 102 switch (obj->tiling_mode) { 103 default: 104 case I915_TILING_NONE: return " "; 105 case I915_TILING_X: return "X"; 106 case I915_TILING_Y: return "Y"; 107 } 108 } 109 110 static inline const char *get_global_flag(struct drm_i915_gem_object *obj) 111 { 112 return i915_gem_obj_to_ggtt(obj) ? "g" : " "; 113 } 114 115 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) 116 { 117 u64 size = 0; 118 struct i915_vma *vma; 119 120 list_for_each_entry(vma, &obj->vma_list, obj_link) { 121 if (vma->is_ggtt && drm_mm_node_allocated(&vma->node)) 122 size += vma->node.size; 123 } 124 125 return size; 126 } 127 128 static void 129 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 130 { 131 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 132 struct intel_engine_cs *ring; 133 struct i915_vma *vma; 134 int pin_count = 0; 135 int i; 136 137 seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ", 138 &obj->base, 139 obj->active ? "*" : " ", 140 get_pin_flag(obj), 141 get_tiling_flag(obj), 142 get_global_flag(obj), 143 obj->base.size / 1024, 144 obj->base.read_domains, 145 obj->base.write_domain); 146 for_each_ring(ring, dev_priv, i) 147 seq_printf(m, "%x ", 148 i915_gem_request_get_seqno(obj->last_read_req[i])); 149 seq_printf(m, "] %x %x%s%s%s", 150 i915_gem_request_get_seqno(obj->last_write_req), 151 i915_gem_request_get_seqno(obj->last_fenced_req), 152 i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level), 153 obj->dirty ? " dirty" : "", 154 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 155 if (obj->base.name) 156 seq_printf(m, " (name: %d)", obj->base.name); 157 list_for_each_entry(vma, &obj->vma_list, obj_link) { 158 if (vma->pin_count > 0) 159 pin_count++; 160 } 161 seq_printf(m, " (pinned x %d)", pin_count); 162 if (obj->pin_display) 163 seq_printf(m, " (display)"); 164 if (obj->fence_reg != I915_FENCE_REG_NONE) 165 seq_printf(m, " (fence: %d)", obj->fence_reg); 166 list_for_each_entry(vma, &obj->vma_list, obj_link) { 167 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx", 168 vma->is_ggtt ? "g" : "pp", 169 vma->node.start, vma->node.size); 170 if (vma->is_ggtt) 171 seq_printf(m, ", type: %u", vma->ggtt_view.type); 172 seq_puts(m, ")"); 173 } 174 if (obj->stolen) 175 seq_printf(m, " (stolen: %08llx)", obj->stolen->start); 176 if (obj->pin_display || obj->fault_mappable) { 177 char s[3], *t = s; 178 if (obj->pin_display) 179 *t++ = 'p'; 180 if (obj->fault_mappable) 181 *t++ = 'f'; 182 *t = '\0'; 183 seq_printf(m, " (%s mappable)", s); 184 } 185 if (obj->last_write_req != NULL) 186 seq_printf(m, " (%s)", 187 i915_gem_request_get_ring(obj->last_write_req)->name); 188 if (obj->frontbuffer_bits) 189 seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); 190 } 191 192 static void describe_ctx(struct seq_file *m, struct intel_context *ctx) 193 { 194 seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i'); 195 seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 196 seq_putc(m, ' '); 197 } 198 199 static int i915_gem_object_list_info(struct seq_file *m, void *data) 200 { 201 struct drm_info_node *node = m->private; 202 uintptr_t list = (uintptr_t) node->info_ent->data; 203 struct list_head *head; 204 struct drm_device *dev = node->minor->dev; 205 struct drm_i915_private *dev_priv = dev->dev_private; 206 struct i915_address_space *vm = &dev_priv->gtt.base; 207 struct i915_vma *vma; 208 u64 total_obj_size, total_gtt_size; 209 int count, ret; 210 211 ret = mutex_lock_interruptible(&dev->struct_mutex); 212 if (ret) 213 return ret; 214 215 /* FIXME: the user of this interface might want more than just GGTT */ 216 switch (list) { 217 case ACTIVE_LIST: 218 seq_puts(m, "Active:\n"); 219 head = &vm->active_list; 220 break; 221 case INACTIVE_LIST: 222 seq_puts(m, "Inactive:\n"); 223 head = &vm->inactive_list; 224 break; 225 default: 226 mutex_unlock(&dev->struct_mutex); 227 return -EINVAL; 228 } 229 230 total_obj_size = total_gtt_size = count = 0; 231 list_for_each_entry(vma, head, vm_link) { 232 seq_printf(m, " "); 233 describe_obj(m, vma->obj); 234 seq_printf(m, "\n"); 235 total_obj_size += vma->obj->base.size; 236 total_gtt_size += vma->node.size; 237 count++; 238 } 239 mutex_unlock(&dev->struct_mutex); 240 241 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", 242 count, total_obj_size, total_gtt_size); 243 return 0; 244 } 245 246 static int obj_rank_by_stolen(void *priv, 247 struct list_head *A, struct list_head *B) 248 { 249 struct drm_i915_gem_object *a = 250 container_of(A, struct drm_i915_gem_object, obj_exec_link); 251 struct drm_i915_gem_object *b = 252 container_of(B, struct drm_i915_gem_object, obj_exec_link); 253 254 if (a->stolen->start < b->stolen->start) 255 return -1; 256 if (a->stolen->start > b->stolen->start) 257 return 1; 258 return 0; 259 } 260 261 static int i915_gem_stolen_list_info(struct seq_file *m, void *data) 262 { 263 struct drm_info_node *node = m->private; 264 struct drm_device *dev = node->minor->dev; 265 struct drm_i915_private *dev_priv = dev->dev_private; 266 struct drm_i915_gem_object *obj; 267 u64 total_obj_size, total_gtt_size; 268 LIST_HEAD(stolen); 269 int count, ret; 270 271 ret = mutex_lock_interruptible(&dev->struct_mutex); 272 if (ret) 273 return ret; 274 275 total_obj_size = total_gtt_size = count = 0; 276 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 277 if (obj->stolen == NULL) 278 continue; 279 280 list_add(&obj->obj_exec_link, &stolen); 281 282 total_obj_size += obj->base.size; 283 total_gtt_size += i915_gem_obj_total_ggtt_size(obj); 284 count++; 285 } 286 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 287 if (obj->stolen == NULL) 288 continue; 289 290 list_add(&obj->obj_exec_link, &stolen); 291 292 total_obj_size += obj->base.size; 293 count++; 294 } 295 list_sort(NULL, &stolen, obj_rank_by_stolen); 296 seq_puts(m, "Stolen:\n"); 297 while (!list_empty(&stolen)) { 298 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link); 299 seq_puts(m, " "); 300 describe_obj(m, obj); 301 seq_putc(m, '\n'); 302 list_del_init(&obj->obj_exec_link); 303 } 304 mutex_unlock(&dev->struct_mutex); 305 306 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", 307 count, total_obj_size, total_gtt_size); 308 return 0; 309 } 310 311 #define count_objects(list, member) do { \ 312 list_for_each_entry(obj, list, member) { \ 313 size += i915_gem_obj_total_ggtt_size(obj); \ 314 ++count; \ 315 if (obj->map_and_fenceable) { \ 316 mappable_size += i915_gem_obj_ggtt_size(obj); \ 317 ++mappable_count; \ 318 } \ 319 } \ 320 } while (0) 321 322 struct file_stats { 323 struct drm_i915_file_private *file_priv; 324 unsigned long count; 325 u64 total, unbound; 326 u64 global, shared; 327 u64 active, inactive; 328 }; 329 330 static int per_file_stats(int id, void *ptr, void *data) 331 { 332 struct drm_i915_gem_object *obj = ptr; 333 struct file_stats *stats = data; 334 struct i915_vma *vma; 335 336 stats->count++; 337 stats->total += obj->base.size; 338 339 if (obj->base.name || obj->base.dma_buf) 340 stats->shared += obj->base.size; 341 342 if (USES_FULL_PPGTT(obj->base.dev)) { 343 list_for_each_entry(vma, &obj->vma_list, obj_link) { 344 struct i915_hw_ppgtt *ppgtt; 345 346 if (!drm_mm_node_allocated(&vma->node)) 347 continue; 348 349 if (vma->is_ggtt) { 350 stats->global += obj->base.size; 351 continue; 352 } 353 354 ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base); 355 if (ppgtt->file_priv != stats->file_priv) 356 continue; 357 358 if (obj->active) /* XXX per-vma statistic */ 359 stats->active += obj->base.size; 360 else 361 stats->inactive += obj->base.size; 362 363 return 0; 364 } 365 } else { 366 if (i915_gem_obj_ggtt_bound(obj)) { 367 stats->global += obj->base.size; 368 if (obj->active) 369 stats->active += obj->base.size; 370 else 371 stats->inactive += obj->base.size; 372 return 0; 373 } 374 } 375 376 if (!list_empty(&obj->global_list)) 377 stats->unbound += obj->base.size; 378 379 return 0; 380 } 381 382 #define print_file_stats(m, name, stats) do { \ 383 if (stats.count) \ 384 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \ 385 name, \ 386 stats.count, \ 387 stats.total, \ 388 stats.active, \ 389 stats.inactive, \ 390 stats.global, \ 391 stats.shared, \ 392 stats.unbound); \ 393 } while (0) 394 395 static void print_batch_pool_stats(struct seq_file *m, 396 struct drm_i915_private *dev_priv) 397 { 398 struct drm_i915_gem_object *obj; 399 struct file_stats stats; 400 struct intel_engine_cs *ring; 401 int i, j; 402 403 memset(&stats, 0, sizeof(stats)); 404 405 for_each_ring(ring, dev_priv, i) { 406 for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) { 407 list_for_each_entry(obj, 408 &ring->batch_pool.cache_list[j], 409 batch_pool_link) 410 per_file_stats(0, obj, &stats); 411 } 412 } 413 414 print_file_stats(m, "[k]batch pool", stats); 415 } 416 417 #define count_vmas(list, member) do { \ 418 list_for_each_entry(vma, list, member) { \ 419 size += i915_gem_obj_total_ggtt_size(vma->obj); \ 420 ++count; \ 421 if (vma->obj->map_and_fenceable) { \ 422 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \ 423 ++mappable_count; \ 424 } \ 425 } \ 426 } while (0) 427 428 static int i915_gem_object_info(struct seq_file *m, void* data) 429 { 430 struct drm_info_node *node = m->private; 431 struct drm_device *dev = node->minor->dev; 432 struct drm_i915_private *dev_priv = dev->dev_private; 433 u32 count, mappable_count, purgeable_count; 434 u64 size, mappable_size, purgeable_size; 435 struct drm_i915_gem_object *obj; 436 struct i915_address_space *vm = &dev_priv->gtt.base; 437 struct drm_file *file; 438 struct i915_vma *vma; 439 int ret; 440 441 ret = mutex_lock_interruptible(&dev->struct_mutex); 442 if (ret) 443 return ret; 444 445 seq_printf(m, "%u objects, %zu bytes\n", 446 dev_priv->mm.object_count, 447 dev_priv->mm.object_memory); 448 449 size = count = mappable_size = mappable_count = 0; 450 count_objects(&dev_priv->mm.bound_list, global_list); 451 seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n", 452 count, mappable_count, size, mappable_size); 453 454 size = count = mappable_size = mappable_count = 0; 455 count_vmas(&vm->active_list, vm_link); 456 seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n", 457 count, mappable_count, size, mappable_size); 458 459 size = count = mappable_size = mappable_count = 0; 460 count_vmas(&vm->inactive_list, vm_link); 461 seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n", 462 count, mappable_count, size, mappable_size); 463 464 size = count = purgeable_size = purgeable_count = 0; 465 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 466 size += obj->base.size, ++count; 467 if (obj->madv == I915_MADV_DONTNEED) 468 purgeable_size += obj->base.size, ++purgeable_count; 469 } 470 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size); 471 472 size = count = mappable_size = mappable_count = 0; 473 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 474 if (obj->fault_mappable) { 475 size += i915_gem_obj_ggtt_size(obj); 476 ++count; 477 } 478 if (obj->pin_display) { 479 mappable_size += i915_gem_obj_ggtt_size(obj); 480 ++mappable_count; 481 } 482 if (obj->madv == I915_MADV_DONTNEED) { 483 purgeable_size += obj->base.size; 484 ++purgeable_count; 485 } 486 } 487 seq_printf(m, "%u purgeable objects, %llu bytes\n", 488 purgeable_count, purgeable_size); 489 seq_printf(m, "%u pinned mappable objects, %llu bytes\n", 490 mappable_count, mappable_size); 491 seq_printf(m, "%u fault mappable objects, %llu bytes\n", 492 count, size); 493 494 seq_printf(m, "%llu [%llu] gtt total\n", 495 dev_priv->gtt.base.total, 496 (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start); 497 498 seq_putc(m, '\n'); 499 print_batch_pool_stats(m, dev_priv); 500 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 501 struct file_stats stats; 502 struct task_struct *task; 503 504 memset(&stats, 0, sizeof(stats)); 505 stats.file_priv = file->driver_priv; 506 spin_lock(&file->table_lock); 507 idr_for_each(&file->object_idr, per_file_stats, &stats); 508 spin_unlock(&file->table_lock); 509 /* 510 * Although we have a valid reference on file->pid, that does 511 * not guarantee that the task_struct who called get_pid() is 512 * still alive (e.g. get_pid(current) => fork() => exit()). 513 * Therefore, we need to protect this ->comm access using RCU. 514 */ 515 rcu_read_lock(); 516 task = pid_task(file->pid, PIDTYPE_PID); 517 print_file_stats(m, task ? task->comm : "<unknown>", stats); 518 rcu_read_unlock(); 519 } 520 521 mutex_unlock(&dev->struct_mutex); 522 523 return 0; 524 } 525 526 static int i915_gem_gtt_info(struct seq_file *m, void *data) 527 { 528 struct drm_info_node *node = m->private; 529 struct drm_device *dev = node->minor->dev; 530 uintptr_t list = (uintptr_t) node->info_ent->data; 531 struct drm_i915_private *dev_priv = dev->dev_private; 532 struct drm_i915_gem_object *obj; 533 u64 total_obj_size, total_gtt_size; 534 int count, ret; 535 536 ret = mutex_lock_interruptible(&dev->struct_mutex); 537 if (ret) 538 return ret; 539 540 total_obj_size = total_gtt_size = count = 0; 541 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 542 if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj)) 543 continue; 544 545 seq_puts(m, " "); 546 describe_obj(m, obj); 547 seq_putc(m, '\n'); 548 total_obj_size += obj->base.size; 549 total_gtt_size += i915_gem_obj_total_ggtt_size(obj); 550 count++; 551 } 552 553 mutex_unlock(&dev->struct_mutex); 554 555 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", 556 count, total_obj_size, total_gtt_size); 557 558 return 0; 559 } 560 561 static int i915_gem_pageflip_info(struct seq_file *m, void *data) 562 { 563 struct drm_info_node *node = m->private; 564 struct drm_device *dev = node->minor->dev; 565 struct drm_i915_private *dev_priv = dev->dev_private; 566 struct intel_crtc *crtc; 567 int ret; 568 569 ret = mutex_lock_interruptible(&dev->struct_mutex); 570 if (ret) 571 return ret; 572 573 for_each_intel_crtc(dev, crtc) { 574 const char pipe = pipe_name(crtc->pipe); 575 const char plane = plane_name(crtc->plane); 576 struct intel_unpin_work *work; 577 578 spin_lock_irq(&dev->event_lock); 579 work = crtc->unpin_work; 580 if (work == NULL) { 581 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 582 pipe, plane); 583 } else { 584 u32 addr; 585 586 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 587 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 588 pipe, plane); 589 } else { 590 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 591 pipe, plane); 592 } 593 if (work->flip_queued_req) { 594 struct intel_engine_cs *ring = 595 i915_gem_request_get_ring(work->flip_queued_req); 596 597 seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n", 598 ring->name, 599 i915_gem_request_get_seqno(work->flip_queued_req), 600 dev_priv->next_seqno, 601 ring->get_seqno(ring, true), 602 i915_gem_request_completed(work->flip_queued_req, true)); 603 } else 604 seq_printf(m, "Flip not associated with any ring\n"); 605 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", 606 work->flip_queued_vblank, 607 work->flip_ready_vblank, 608 drm_crtc_vblank_count(&crtc->base)); 609 if (work->enable_stall_check) 610 seq_puts(m, "Stall check enabled, "); 611 else 612 seq_puts(m, "Stall check waiting for page flip ioctl, "); 613 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 614 615 if (INTEL_INFO(dev)->gen >= 4) 616 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane))); 617 else 618 addr = I915_READ(DSPADDR(crtc->plane)); 619 seq_printf(m, "Current scanout address 0x%08x\n", addr); 620 621 if (work->pending_flip_obj) { 622 seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset); 623 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset); 624 } 625 } 626 spin_unlock_irq(&dev->event_lock); 627 } 628 629 mutex_unlock(&dev->struct_mutex); 630 631 return 0; 632 } 633 634 static int i915_gem_batch_pool_info(struct seq_file *m, void *data) 635 { 636 struct drm_info_node *node = m->private; 637 struct drm_device *dev = node->minor->dev; 638 struct drm_i915_private *dev_priv = dev->dev_private; 639 struct drm_i915_gem_object *obj; 640 struct intel_engine_cs *ring; 641 int total = 0; 642 int ret, i, j; 643 644 ret = mutex_lock_interruptible(&dev->struct_mutex); 645 if (ret) 646 return ret; 647 648 for_each_ring(ring, dev_priv, i) { 649 for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) { 650 int count; 651 652 count = 0; 653 list_for_each_entry(obj, 654 &ring->batch_pool.cache_list[j], 655 batch_pool_link) 656 count++; 657 seq_printf(m, "%s cache[%d]: %d objects\n", 658 ring->name, j, count); 659 660 list_for_each_entry(obj, 661 &ring->batch_pool.cache_list[j], 662 batch_pool_link) { 663 seq_puts(m, " "); 664 describe_obj(m, obj); 665 seq_putc(m, '\n'); 666 } 667 668 total += count; 669 } 670 } 671 672 seq_printf(m, "total: %d\n", total); 673 674 mutex_unlock(&dev->struct_mutex); 675 676 return 0; 677 } 678 679 static int i915_gem_request_info(struct seq_file *m, void *data) 680 { 681 struct drm_info_node *node = m->private; 682 struct drm_device *dev = node->minor->dev; 683 struct drm_i915_private *dev_priv = dev->dev_private; 684 struct intel_engine_cs *ring; 685 struct drm_i915_gem_request *req; 686 int ret, any, i; 687 688 ret = mutex_lock_interruptible(&dev->struct_mutex); 689 if (ret) 690 return ret; 691 692 any = 0; 693 for_each_ring(ring, dev_priv, i) { 694 int count; 695 696 count = 0; 697 list_for_each_entry(req, &ring->request_list, list) 698 count++; 699 if (count == 0) 700 continue; 701 702 seq_printf(m, "%s requests: %d\n", ring->name, count); 703 list_for_each_entry(req, &ring->request_list, list) { 704 struct task_struct *task; 705 706 rcu_read_lock(); 707 task = NULL; 708 if (req->pid) 709 task = pid_task(req->pid, PIDTYPE_PID); 710 seq_printf(m, " %x @ %d: %s [%d]\n", 711 req->seqno, 712 (int) (jiffies - req->emitted_jiffies), 713 task ? task->comm : "<unknown>", 714 task ? task->pid : -1); 715 rcu_read_unlock(); 716 } 717 718 any++; 719 } 720 mutex_unlock(&dev->struct_mutex); 721 722 if (any == 0) 723 seq_puts(m, "No requests\n"); 724 725 return 0; 726 } 727 728 static void i915_ring_seqno_info(struct seq_file *m, 729 struct intel_engine_cs *ring) 730 { 731 if (ring->get_seqno) { 732 seq_printf(m, "Current sequence (%s): %x\n", 733 ring->name, ring->get_seqno(ring, false)); 734 } 735 } 736 737 static int i915_gem_seqno_info(struct seq_file *m, void *data) 738 { 739 struct drm_info_node *node = m->private; 740 struct drm_device *dev = node->minor->dev; 741 struct drm_i915_private *dev_priv = dev->dev_private; 742 struct intel_engine_cs *ring; 743 int ret, i; 744 745 ret = mutex_lock_interruptible(&dev->struct_mutex); 746 if (ret) 747 return ret; 748 intel_runtime_pm_get(dev_priv); 749 750 for_each_ring(ring, dev_priv, i) 751 i915_ring_seqno_info(m, ring); 752 753 intel_runtime_pm_put(dev_priv); 754 mutex_unlock(&dev->struct_mutex); 755 756 return 0; 757 } 758 759 760 static int i915_interrupt_info(struct seq_file *m, void *data) 761 { 762 struct drm_info_node *node = m->private; 763 struct drm_device *dev = node->minor->dev; 764 struct drm_i915_private *dev_priv = dev->dev_private; 765 struct intel_engine_cs *ring; 766 int ret, i, pipe; 767 768 ret = mutex_lock_interruptible(&dev->struct_mutex); 769 if (ret) 770 return ret; 771 intel_runtime_pm_get(dev_priv); 772 773 if (IS_CHERRYVIEW(dev)) { 774 seq_printf(m, "Master Interrupt Control:\t%08x\n", 775 I915_READ(GEN8_MASTER_IRQ)); 776 777 seq_printf(m, "Display IER:\t%08x\n", 778 I915_READ(VLV_IER)); 779 seq_printf(m, "Display IIR:\t%08x\n", 780 I915_READ(VLV_IIR)); 781 seq_printf(m, "Display IIR_RW:\t%08x\n", 782 I915_READ(VLV_IIR_RW)); 783 seq_printf(m, "Display IMR:\t%08x\n", 784 I915_READ(VLV_IMR)); 785 for_each_pipe(dev_priv, pipe) 786 seq_printf(m, "Pipe %c stat:\t%08x\n", 787 pipe_name(pipe), 788 I915_READ(PIPESTAT(pipe))); 789 790 seq_printf(m, "Port hotplug:\t%08x\n", 791 I915_READ(PORT_HOTPLUG_EN)); 792 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 793 I915_READ(VLV_DPFLIPSTAT)); 794 seq_printf(m, "DPINVGTT:\t%08x\n", 795 I915_READ(DPINVGTT)); 796 797 for (i = 0; i < 4; i++) { 798 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 799 i, I915_READ(GEN8_GT_IMR(i))); 800 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 801 i, I915_READ(GEN8_GT_IIR(i))); 802 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 803 i, I915_READ(GEN8_GT_IER(i))); 804 } 805 806 seq_printf(m, "PCU interrupt mask:\t%08x\n", 807 I915_READ(GEN8_PCU_IMR)); 808 seq_printf(m, "PCU interrupt identity:\t%08x\n", 809 I915_READ(GEN8_PCU_IIR)); 810 seq_printf(m, "PCU interrupt enable:\t%08x\n", 811 I915_READ(GEN8_PCU_IER)); 812 } else if (INTEL_INFO(dev)->gen >= 8) { 813 seq_printf(m, "Master Interrupt Control:\t%08x\n", 814 I915_READ(GEN8_MASTER_IRQ)); 815 816 for (i = 0; i < 4; i++) { 817 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 818 i, I915_READ(GEN8_GT_IMR(i))); 819 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 820 i, I915_READ(GEN8_GT_IIR(i))); 821 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 822 i, I915_READ(GEN8_GT_IER(i))); 823 } 824 825 for_each_pipe(dev_priv, pipe) { 826 enum intel_display_power_domain power_domain; 827 828 power_domain = POWER_DOMAIN_PIPE(pipe); 829 if (!intel_display_power_get_if_enabled(dev_priv, 830 power_domain)) { 831 seq_printf(m, "Pipe %c power disabled\n", 832 pipe_name(pipe)); 833 continue; 834 } 835 seq_printf(m, "Pipe %c IMR:\t%08x\n", 836 pipe_name(pipe), 837 I915_READ(GEN8_DE_PIPE_IMR(pipe))); 838 seq_printf(m, "Pipe %c IIR:\t%08x\n", 839 pipe_name(pipe), 840 I915_READ(GEN8_DE_PIPE_IIR(pipe))); 841 seq_printf(m, "Pipe %c IER:\t%08x\n", 842 pipe_name(pipe), 843 I915_READ(GEN8_DE_PIPE_IER(pipe))); 844 845 intel_display_power_put(dev_priv, power_domain); 846 } 847 848 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 849 I915_READ(GEN8_DE_PORT_IMR)); 850 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", 851 I915_READ(GEN8_DE_PORT_IIR)); 852 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", 853 I915_READ(GEN8_DE_PORT_IER)); 854 855 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", 856 I915_READ(GEN8_DE_MISC_IMR)); 857 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", 858 I915_READ(GEN8_DE_MISC_IIR)); 859 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", 860 I915_READ(GEN8_DE_MISC_IER)); 861 862 seq_printf(m, "PCU interrupt mask:\t%08x\n", 863 I915_READ(GEN8_PCU_IMR)); 864 seq_printf(m, "PCU interrupt identity:\t%08x\n", 865 I915_READ(GEN8_PCU_IIR)); 866 seq_printf(m, "PCU interrupt enable:\t%08x\n", 867 I915_READ(GEN8_PCU_IER)); 868 } else if (IS_VALLEYVIEW(dev)) { 869 seq_printf(m, "Display IER:\t%08x\n", 870 I915_READ(VLV_IER)); 871 seq_printf(m, "Display IIR:\t%08x\n", 872 I915_READ(VLV_IIR)); 873 seq_printf(m, "Display IIR_RW:\t%08x\n", 874 I915_READ(VLV_IIR_RW)); 875 seq_printf(m, "Display IMR:\t%08x\n", 876 I915_READ(VLV_IMR)); 877 for_each_pipe(dev_priv, pipe) 878 seq_printf(m, "Pipe %c stat:\t%08x\n", 879 pipe_name(pipe), 880 I915_READ(PIPESTAT(pipe))); 881 882 seq_printf(m, "Master IER:\t%08x\n", 883 I915_READ(VLV_MASTER_IER)); 884 885 seq_printf(m, "Render IER:\t%08x\n", 886 I915_READ(GTIER)); 887 seq_printf(m, "Render IIR:\t%08x\n", 888 I915_READ(GTIIR)); 889 seq_printf(m, "Render IMR:\t%08x\n", 890 I915_READ(GTIMR)); 891 892 seq_printf(m, "PM IER:\t\t%08x\n", 893 I915_READ(GEN6_PMIER)); 894 seq_printf(m, "PM IIR:\t\t%08x\n", 895 I915_READ(GEN6_PMIIR)); 896 seq_printf(m, "PM IMR:\t\t%08x\n", 897 I915_READ(GEN6_PMIMR)); 898 899 seq_printf(m, "Port hotplug:\t%08x\n", 900 I915_READ(PORT_HOTPLUG_EN)); 901 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 902 I915_READ(VLV_DPFLIPSTAT)); 903 seq_printf(m, "DPINVGTT:\t%08x\n", 904 I915_READ(DPINVGTT)); 905 906 } else if (!HAS_PCH_SPLIT(dev)) { 907 seq_printf(m, "Interrupt enable: %08x\n", 908 I915_READ(IER)); 909 seq_printf(m, "Interrupt identity: %08x\n", 910 I915_READ(IIR)); 911 seq_printf(m, "Interrupt mask: %08x\n", 912 I915_READ(IMR)); 913 for_each_pipe(dev_priv, pipe) 914 seq_printf(m, "Pipe %c stat: %08x\n", 915 pipe_name(pipe), 916 I915_READ(PIPESTAT(pipe))); 917 } else { 918 seq_printf(m, "North Display Interrupt enable: %08x\n", 919 I915_READ(DEIER)); 920 seq_printf(m, "North Display Interrupt identity: %08x\n", 921 I915_READ(DEIIR)); 922 seq_printf(m, "North Display Interrupt mask: %08x\n", 923 I915_READ(DEIMR)); 924 seq_printf(m, "South Display Interrupt enable: %08x\n", 925 I915_READ(SDEIER)); 926 seq_printf(m, "South Display Interrupt identity: %08x\n", 927 I915_READ(SDEIIR)); 928 seq_printf(m, "South Display Interrupt mask: %08x\n", 929 I915_READ(SDEIMR)); 930 seq_printf(m, "Graphics Interrupt enable: %08x\n", 931 I915_READ(GTIER)); 932 seq_printf(m, "Graphics Interrupt identity: %08x\n", 933 I915_READ(GTIIR)); 934 seq_printf(m, "Graphics Interrupt mask: %08x\n", 935 I915_READ(GTIMR)); 936 } 937 for_each_ring(ring, dev_priv, i) { 938 if (INTEL_INFO(dev)->gen >= 6) { 939 seq_printf(m, 940 "Graphics Interrupt mask (%s): %08x\n", 941 ring->name, I915_READ_IMR(ring)); 942 } 943 i915_ring_seqno_info(m, ring); 944 } 945 intel_runtime_pm_put(dev_priv); 946 mutex_unlock(&dev->struct_mutex); 947 948 return 0; 949 } 950 951 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 952 { 953 struct drm_info_node *node = m->private; 954 struct drm_device *dev = node->minor->dev; 955 struct drm_i915_private *dev_priv = dev->dev_private; 956 int i, ret; 957 958 ret = mutex_lock_interruptible(&dev->struct_mutex); 959 if (ret) 960 return ret; 961 962 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 963 for (i = 0; i < dev_priv->num_fence_regs; i++) { 964 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 965 966 seq_printf(m, "Fence %d, pin count = %d, object = ", 967 i, dev_priv->fence_regs[i].pin_count); 968 if (obj == NULL) 969 seq_puts(m, "unused"); 970 else 971 describe_obj(m, obj); 972 seq_putc(m, '\n'); 973 } 974 975 mutex_unlock(&dev->struct_mutex); 976 return 0; 977 } 978 979 static int i915_hws_info(struct seq_file *m, void *data) 980 { 981 struct drm_info_node *node = m->private; 982 struct drm_device *dev = node->minor->dev; 983 struct drm_i915_private *dev_priv = dev->dev_private; 984 struct intel_engine_cs *ring; 985 const u32 *hws; 986 int i; 987 988 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 989 hws = ring->status_page.page_addr; 990 if (hws == NULL) 991 return 0; 992 993 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 994 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 995 i * 4, 996 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 997 } 998 return 0; 999 } 1000 1001 static ssize_t 1002 i915_error_state_write(struct file *filp, 1003 const char __user *ubuf, 1004 size_t cnt, 1005 loff_t *ppos) 1006 { 1007 struct i915_error_state_file_priv *error_priv = filp->private_data; 1008 struct drm_device *dev = error_priv->dev; 1009 int ret; 1010 1011 DRM_DEBUG_DRIVER("Resetting error state\n"); 1012 1013 ret = mutex_lock_interruptible(&dev->struct_mutex); 1014 if (ret) 1015 return ret; 1016 1017 i915_destroy_error_state(dev); 1018 mutex_unlock(&dev->struct_mutex); 1019 1020 return cnt; 1021 } 1022 1023 static int i915_error_state_open(struct inode *inode, struct file *file) 1024 { 1025 struct drm_device *dev = inode->i_private; 1026 struct i915_error_state_file_priv *error_priv; 1027 1028 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL); 1029 if (!error_priv) 1030 return -ENOMEM; 1031 1032 error_priv->dev = dev; 1033 1034 i915_error_state_get(dev, error_priv); 1035 1036 file->private_data = error_priv; 1037 1038 return 0; 1039 } 1040 1041 static int i915_error_state_release(struct inode *inode, struct file *file) 1042 { 1043 struct i915_error_state_file_priv *error_priv = file->private_data; 1044 1045 i915_error_state_put(error_priv); 1046 kfree(error_priv); 1047 1048 return 0; 1049 } 1050 1051 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, 1052 size_t count, loff_t *pos) 1053 { 1054 struct i915_error_state_file_priv *error_priv = file->private_data; 1055 struct drm_i915_error_state_buf error_str; 1056 loff_t tmp_pos = 0; 1057 ssize_t ret_count = 0; 1058 int ret; 1059 1060 ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos); 1061 if (ret) 1062 return ret; 1063 1064 ret = i915_error_state_to_str(&error_str, error_priv); 1065 if (ret) 1066 goto out; 1067 1068 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos, 1069 error_str.buf, 1070 error_str.bytes); 1071 1072 if (ret_count < 0) 1073 ret = ret_count; 1074 else 1075 *pos = error_str.start + ret_count; 1076 out: 1077 i915_error_state_buf_release(&error_str); 1078 return ret ?: ret_count; 1079 } 1080 1081 static const struct file_operations i915_error_state_fops = { 1082 .owner = THIS_MODULE, 1083 .open = i915_error_state_open, 1084 .read = i915_error_state_read, 1085 .write = i915_error_state_write, 1086 .llseek = default_llseek, 1087 .release = i915_error_state_release, 1088 }; 1089 1090 static int 1091 i915_next_seqno_get(void *data, u64 *val) 1092 { 1093 struct drm_device *dev = data; 1094 struct drm_i915_private *dev_priv = dev->dev_private; 1095 int ret; 1096 1097 ret = mutex_lock_interruptible(&dev->struct_mutex); 1098 if (ret) 1099 return ret; 1100 1101 *val = dev_priv->next_seqno; 1102 mutex_unlock(&dev->struct_mutex); 1103 1104 return 0; 1105 } 1106 1107 static int 1108 i915_next_seqno_set(void *data, u64 val) 1109 { 1110 struct drm_device *dev = data; 1111 int ret; 1112 1113 ret = mutex_lock_interruptible(&dev->struct_mutex); 1114 if (ret) 1115 return ret; 1116 1117 ret = i915_gem_set_seqno(dev, val); 1118 mutex_unlock(&dev->struct_mutex); 1119 1120 return ret; 1121 } 1122 1123 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, 1124 i915_next_seqno_get, i915_next_seqno_set, 1125 "0x%llx\n"); 1126 1127 static int i915_frequency_info(struct seq_file *m, void *unused) 1128 { 1129 struct drm_info_node *node = m->private; 1130 struct drm_device *dev = node->minor->dev; 1131 struct drm_i915_private *dev_priv = dev->dev_private; 1132 int ret = 0; 1133 1134 intel_runtime_pm_get(dev_priv); 1135 1136 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 1137 1138 if (IS_GEN5(dev)) { 1139 u16 rgvswctl = I915_READ16(MEMSWCTL); 1140 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 1141 1142 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 1143 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 1144 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 1145 MEMSTAT_VID_SHIFT); 1146 seq_printf(m, "Current P-state: %d\n", 1147 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 1148 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1149 u32 freq_sts; 1150 1151 mutex_lock(&dev_priv->rps.hw_lock); 1152 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 1153 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 1154 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 1155 1156 seq_printf(m, "actual GPU freq: %d MHz\n", 1157 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); 1158 1159 seq_printf(m, "current GPU freq: %d MHz\n", 1160 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); 1161 1162 seq_printf(m, "max GPU freq: %d MHz\n", 1163 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1164 1165 seq_printf(m, "min GPU freq: %d MHz\n", 1166 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); 1167 1168 seq_printf(m, "idle GPU freq: %d MHz\n", 1169 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); 1170 1171 seq_printf(m, 1172 "efficient (RPe) frequency: %d MHz\n", 1173 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); 1174 mutex_unlock(&dev_priv->rps.hw_lock); 1175 } else if (INTEL_INFO(dev)->gen >= 6) { 1176 u32 rp_state_limits; 1177 u32 gt_perf_status; 1178 u32 rp_state_cap; 1179 u32 rpmodectl, rpinclimit, rpdeclimit; 1180 u32 rpstat, cagf, reqf; 1181 u32 rpupei, rpcurup, rpprevup; 1182 u32 rpdownei, rpcurdown, rpprevdown; 1183 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; 1184 int max_freq; 1185 1186 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 1187 if (IS_BROXTON(dev)) { 1188 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); 1189 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS); 1190 } else { 1191 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 1192 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 1193 } 1194 1195 /* RPSTAT1 is in the GT power well */ 1196 ret = mutex_lock_interruptible(&dev->struct_mutex); 1197 if (ret) 1198 goto out; 1199 1200 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1201 1202 reqf = I915_READ(GEN6_RPNSWREQ); 1203 if (IS_GEN9(dev)) 1204 reqf >>= 23; 1205 else { 1206 reqf &= ~GEN6_TURBO_DISABLE; 1207 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1208 reqf >>= 24; 1209 else 1210 reqf >>= 25; 1211 } 1212 reqf = intel_gpu_freq(dev_priv, reqf); 1213 1214 rpmodectl = I915_READ(GEN6_RP_CONTROL); 1215 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD); 1216 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD); 1217 1218 rpstat = I915_READ(GEN6_RPSTAT1); 1219 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 1220 rpcurup = I915_READ(GEN6_RP_CUR_UP); 1221 rpprevup = I915_READ(GEN6_RP_PREV_UP); 1222 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 1223 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 1224 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 1225 if (IS_GEN9(dev)) 1226 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; 1227 else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1228 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 1229 else 1230 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 1231 cagf = intel_gpu_freq(dev_priv, cagf); 1232 1233 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1234 mutex_unlock(&dev->struct_mutex); 1235 1236 if (IS_GEN6(dev) || IS_GEN7(dev)) { 1237 pm_ier = I915_READ(GEN6_PMIER); 1238 pm_imr = I915_READ(GEN6_PMIMR); 1239 pm_isr = I915_READ(GEN6_PMISR); 1240 pm_iir = I915_READ(GEN6_PMIIR); 1241 pm_mask = I915_READ(GEN6_PMINTRMSK); 1242 } else { 1243 pm_ier = I915_READ(GEN8_GT_IER(2)); 1244 pm_imr = I915_READ(GEN8_GT_IMR(2)); 1245 pm_isr = I915_READ(GEN8_GT_ISR(2)); 1246 pm_iir = I915_READ(GEN8_GT_IIR(2)); 1247 pm_mask = I915_READ(GEN6_PMINTRMSK); 1248 } 1249 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", 1250 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); 1251 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1252 seq_printf(m, "Render p-state ratio: %d\n", 1253 (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8); 1254 seq_printf(m, "Render p-state VID: %d\n", 1255 gt_perf_status & 0xff); 1256 seq_printf(m, "Render p-state limit: %d\n", 1257 rp_state_limits & 0xff); 1258 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 1259 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl); 1260 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit); 1261 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 1262 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 1263 seq_printf(m, "CAGF: %dMHz\n", cagf); 1264 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 1265 GEN6_CURICONT_MASK); 1266 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 1267 GEN6_CURBSYTAVG_MASK); 1268 seq_printf(m, "RP PREV UP: %dus\n", rpprevup & 1269 GEN6_CURBSYTAVG_MASK); 1270 seq_printf(m, "Up threshold: %d%%\n", 1271 dev_priv->rps.up_threshold); 1272 1273 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 1274 GEN6_CURIAVG_MASK); 1275 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 1276 GEN6_CURBSYTAVG_MASK); 1277 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 1278 GEN6_CURBSYTAVG_MASK); 1279 seq_printf(m, "Down threshold: %d%%\n", 1280 dev_priv->rps.down_threshold); 1281 1282 max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 : 1283 rp_state_cap >> 16) & 0xff; 1284 max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ? 1285 GEN9_FREQ_SCALER : 1); 1286 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 1287 intel_gpu_freq(dev_priv, max_freq)); 1288 1289 max_freq = (rp_state_cap & 0xff00) >> 8; 1290 max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ? 1291 GEN9_FREQ_SCALER : 1); 1292 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 1293 intel_gpu_freq(dev_priv, max_freq)); 1294 1295 max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 : 1296 rp_state_cap >> 0) & 0xff; 1297 max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ? 1298 GEN9_FREQ_SCALER : 1); 1299 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1300 intel_gpu_freq(dev_priv, max_freq)); 1301 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1302 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1303 1304 seq_printf(m, "Current freq: %d MHz\n", 1305 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); 1306 seq_printf(m, "Actual freq: %d MHz\n", cagf); 1307 seq_printf(m, "Idle freq: %d MHz\n", 1308 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); 1309 seq_printf(m, "Min freq: %d MHz\n", 1310 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); 1311 seq_printf(m, "Max freq: %d MHz\n", 1312 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1313 seq_printf(m, 1314 "efficient (RPe) frequency: %d MHz\n", 1315 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); 1316 } else { 1317 seq_puts(m, "no P-state info available\n"); 1318 } 1319 1320 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk_freq); 1321 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq); 1322 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq); 1323 1324 out: 1325 intel_runtime_pm_put(dev_priv); 1326 return ret; 1327 } 1328 1329 static int i915_hangcheck_info(struct seq_file *m, void *unused) 1330 { 1331 struct drm_info_node *node = m->private; 1332 struct drm_device *dev = node->minor->dev; 1333 struct drm_i915_private *dev_priv = dev->dev_private; 1334 struct intel_engine_cs *ring; 1335 u64 acthd[I915_NUM_RINGS]; 1336 u32 seqno[I915_NUM_RINGS]; 1337 u32 instdone[I915_NUM_INSTDONE_REG]; 1338 int i, j; 1339 1340 if (!i915.enable_hangcheck) { 1341 seq_printf(m, "Hangcheck disabled\n"); 1342 return 0; 1343 } 1344 1345 intel_runtime_pm_get(dev_priv); 1346 1347 for_each_ring(ring, dev_priv, i) { 1348 seqno[i] = ring->get_seqno(ring, false); 1349 acthd[i] = intel_ring_get_active_head(ring); 1350 } 1351 1352 i915_get_extra_instdone(dev, instdone); 1353 1354 intel_runtime_pm_put(dev_priv); 1355 1356 if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) { 1357 seq_printf(m, "Hangcheck active, fires in %dms\n", 1358 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires - 1359 jiffies)); 1360 } else 1361 seq_printf(m, "Hangcheck inactive\n"); 1362 1363 for_each_ring(ring, dev_priv, i) { 1364 seq_printf(m, "%s:\n", ring->name); 1365 seq_printf(m, "\tseqno = %x [current %x]\n", 1366 ring->hangcheck.seqno, seqno[i]); 1367 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", 1368 (long long)ring->hangcheck.acthd, 1369 (long long)acthd[i]); 1370 seq_printf(m, "\tmax ACTHD = 0x%08llx\n", 1371 (long long)ring->hangcheck.max_acthd); 1372 seq_printf(m, "\tscore = %d\n", ring->hangcheck.score); 1373 seq_printf(m, "\taction = %d\n", ring->hangcheck.action); 1374 1375 if (ring->id == RCS) { 1376 seq_puts(m, "\tinstdone read ="); 1377 1378 for (j = 0; j < I915_NUM_INSTDONE_REG; j++) 1379 seq_printf(m, " 0x%08x", instdone[j]); 1380 1381 seq_puts(m, "\n\tinstdone accu ="); 1382 1383 for (j = 0; j < I915_NUM_INSTDONE_REG; j++) 1384 seq_printf(m, " 0x%08x", 1385 ring->hangcheck.instdone[j]); 1386 1387 seq_puts(m, "\n"); 1388 } 1389 } 1390 1391 return 0; 1392 } 1393 1394 static int ironlake_drpc_info(struct seq_file *m) 1395 { 1396 struct drm_info_node *node = m->private; 1397 struct drm_device *dev = node->minor->dev; 1398 struct drm_i915_private *dev_priv = dev->dev_private; 1399 u32 rgvmodectl, rstdbyctl; 1400 u16 crstandvid; 1401 int ret; 1402 1403 ret = mutex_lock_interruptible(&dev->struct_mutex); 1404 if (ret) 1405 return ret; 1406 intel_runtime_pm_get(dev_priv); 1407 1408 rgvmodectl = I915_READ(MEMMODECTL); 1409 rstdbyctl = I915_READ(RSTDBYCTL); 1410 crstandvid = I915_READ16(CRSTANDVID); 1411 1412 intel_runtime_pm_put(dev_priv); 1413 mutex_unlock(&dev->struct_mutex); 1414 1415 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN)); 1416 seq_printf(m, "Boost freq: %d\n", 1417 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1418 MEMMODE_BOOST_FREQ_SHIFT); 1419 seq_printf(m, "HW control enabled: %s\n", 1420 yesno(rgvmodectl & MEMMODE_HWIDLE_EN)); 1421 seq_printf(m, "SW control enabled: %s\n", 1422 yesno(rgvmodectl & MEMMODE_SWMODE_EN)); 1423 seq_printf(m, "Gated voltage change: %s\n", 1424 yesno(rgvmodectl & MEMMODE_RCLK_GATE)); 1425 seq_printf(m, "Starting frequency: P%d\n", 1426 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1427 seq_printf(m, "Max P-state: P%d\n", 1428 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1429 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1430 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1431 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1432 seq_printf(m, "Render standby enabled: %s\n", 1433 yesno(!(rstdbyctl & RCX_SW_EXIT))); 1434 seq_puts(m, "Current RS state: "); 1435 switch (rstdbyctl & RSX_STATUS_MASK) { 1436 case RSX_STATUS_ON: 1437 seq_puts(m, "on\n"); 1438 break; 1439 case RSX_STATUS_RC1: 1440 seq_puts(m, "RC1\n"); 1441 break; 1442 case RSX_STATUS_RC1E: 1443 seq_puts(m, "RC1E\n"); 1444 break; 1445 case RSX_STATUS_RS1: 1446 seq_puts(m, "RS1\n"); 1447 break; 1448 case RSX_STATUS_RS2: 1449 seq_puts(m, "RS2 (RC6)\n"); 1450 break; 1451 case RSX_STATUS_RS3: 1452 seq_puts(m, "RC3 (RC6+)\n"); 1453 break; 1454 default: 1455 seq_puts(m, "unknown\n"); 1456 break; 1457 } 1458 1459 return 0; 1460 } 1461 1462 static int i915_forcewake_domains(struct seq_file *m, void *data) 1463 { 1464 struct drm_info_node *node = m->private; 1465 struct drm_device *dev = node->minor->dev; 1466 struct drm_i915_private *dev_priv = dev->dev_private; 1467 struct intel_uncore_forcewake_domain *fw_domain; 1468 int i; 1469 1470 spin_lock_irq(&dev_priv->uncore.lock); 1471 for_each_fw_domain(fw_domain, dev_priv, i) { 1472 seq_printf(m, "%s.wake_count = %u\n", 1473 intel_uncore_forcewake_domain_to_str(i), 1474 fw_domain->wake_count); 1475 } 1476 spin_unlock_irq(&dev_priv->uncore.lock); 1477 1478 return 0; 1479 } 1480 1481 static int vlv_drpc_info(struct seq_file *m) 1482 { 1483 struct drm_info_node *node = m->private; 1484 struct drm_device *dev = node->minor->dev; 1485 struct drm_i915_private *dev_priv = dev->dev_private; 1486 u32 rpmodectl1, rcctl1, pw_status; 1487 1488 intel_runtime_pm_get(dev_priv); 1489 1490 pw_status = I915_READ(VLV_GTLC_PW_STATUS); 1491 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1492 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1493 1494 intel_runtime_pm_put(dev_priv); 1495 1496 seq_printf(m, "Video Turbo Mode: %s\n", 1497 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1498 seq_printf(m, "Turbo enabled: %s\n", 1499 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1500 seq_printf(m, "HW control enabled: %s\n", 1501 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1502 seq_printf(m, "SW control enabled: %s\n", 1503 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1504 GEN6_RP_MEDIA_SW_MODE)); 1505 seq_printf(m, "RC6 Enabled: %s\n", 1506 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | 1507 GEN6_RC_CTL_EI_MODE(1)))); 1508 seq_printf(m, "Render Power Well: %s\n", 1509 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); 1510 seq_printf(m, "Media Power Well: %s\n", 1511 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1512 1513 seq_printf(m, "Render RC6 residency since boot: %u\n", 1514 I915_READ(VLV_GT_RENDER_RC6)); 1515 seq_printf(m, "Media RC6 residency since boot: %u\n", 1516 I915_READ(VLV_GT_MEDIA_RC6)); 1517 1518 return i915_forcewake_domains(m, NULL); 1519 } 1520 1521 static int gen6_drpc_info(struct seq_file *m) 1522 { 1523 struct drm_info_node *node = m->private; 1524 struct drm_device *dev = node->minor->dev; 1525 struct drm_i915_private *dev_priv = dev->dev_private; 1526 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1527 unsigned forcewake_count; 1528 int count = 0, ret; 1529 1530 ret = mutex_lock_interruptible(&dev->struct_mutex); 1531 if (ret) 1532 return ret; 1533 intel_runtime_pm_get(dev_priv); 1534 1535 spin_lock_irq(&dev_priv->uncore.lock); 1536 forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count; 1537 spin_unlock_irq(&dev_priv->uncore.lock); 1538 1539 if (forcewake_count) { 1540 seq_puts(m, "RC information inaccurate because somebody " 1541 "holds a forcewake reference \n"); 1542 } else { 1543 /* NB: we cannot use forcewake, else we read the wrong values */ 1544 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1545 udelay(10); 1546 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1547 } 1548 1549 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS); 1550 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); 1551 1552 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1553 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1554 mutex_unlock(&dev->struct_mutex); 1555 mutex_lock(&dev_priv->rps.hw_lock); 1556 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1557 mutex_unlock(&dev_priv->rps.hw_lock); 1558 1559 intel_runtime_pm_put(dev_priv); 1560 1561 seq_printf(m, "Video Turbo Mode: %s\n", 1562 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1563 seq_printf(m, "HW control enabled: %s\n", 1564 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1565 seq_printf(m, "SW control enabled: %s\n", 1566 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1567 GEN6_RP_MEDIA_SW_MODE)); 1568 seq_printf(m, "RC1e Enabled: %s\n", 1569 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1570 seq_printf(m, "RC6 Enabled: %s\n", 1571 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1572 seq_printf(m, "Deep RC6 Enabled: %s\n", 1573 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1574 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1575 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1576 seq_puts(m, "Current RC state: "); 1577 switch (gt_core_status & GEN6_RCn_MASK) { 1578 case GEN6_RC0: 1579 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1580 seq_puts(m, "Core Power Down\n"); 1581 else 1582 seq_puts(m, "on\n"); 1583 break; 1584 case GEN6_RC3: 1585 seq_puts(m, "RC3\n"); 1586 break; 1587 case GEN6_RC6: 1588 seq_puts(m, "RC6\n"); 1589 break; 1590 case GEN6_RC7: 1591 seq_puts(m, "RC7\n"); 1592 break; 1593 default: 1594 seq_puts(m, "Unknown\n"); 1595 break; 1596 } 1597 1598 seq_printf(m, "Core Power Down: %s\n", 1599 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1600 1601 /* Not exactly sure what this is */ 1602 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", 1603 I915_READ(GEN6_GT_GFX_RC6_LOCKED)); 1604 seq_printf(m, "RC6 residency since boot: %u\n", 1605 I915_READ(GEN6_GT_GFX_RC6)); 1606 seq_printf(m, "RC6+ residency since boot: %u\n", 1607 I915_READ(GEN6_GT_GFX_RC6p)); 1608 seq_printf(m, "RC6++ residency since boot: %u\n", 1609 I915_READ(GEN6_GT_GFX_RC6pp)); 1610 1611 seq_printf(m, "RC6 voltage: %dmV\n", 1612 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1613 seq_printf(m, "RC6+ voltage: %dmV\n", 1614 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1615 seq_printf(m, "RC6++ voltage: %dmV\n", 1616 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1617 return 0; 1618 } 1619 1620 static int i915_drpc_info(struct seq_file *m, void *unused) 1621 { 1622 struct drm_info_node *node = m->private; 1623 struct drm_device *dev = node->minor->dev; 1624 1625 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 1626 return vlv_drpc_info(m); 1627 else if (INTEL_INFO(dev)->gen >= 6) 1628 return gen6_drpc_info(m); 1629 else 1630 return ironlake_drpc_info(m); 1631 } 1632 1633 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) 1634 { 1635 struct drm_info_node *node = m->private; 1636 struct drm_device *dev = node->minor->dev; 1637 struct drm_i915_private *dev_priv = dev->dev_private; 1638 1639 seq_printf(m, "FB tracking busy bits: 0x%08x\n", 1640 dev_priv->fb_tracking.busy_bits); 1641 1642 seq_printf(m, "FB tracking flip bits: 0x%08x\n", 1643 dev_priv->fb_tracking.flip_bits); 1644 1645 return 0; 1646 } 1647 1648 static int i915_fbc_status(struct seq_file *m, void *unused) 1649 { 1650 struct drm_info_node *node = m->private; 1651 struct drm_device *dev = node->minor->dev; 1652 struct drm_i915_private *dev_priv = dev->dev_private; 1653 1654 if (!HAS_FBC(dev)) { 1655 seq_puts(m, "FBC unsupported on this chipset\n"); 1656 return 0; 1657 } 1658 1659 intel_runtime_pm_get(dev_priv); 1660 mutex_lock(&dev_priv->fbc.lock); 1661 1662 if (intel_fbc_is_active(dev_priv)) 1663 seq_puts(m, "FBC enabled\n"); 1664 else 1665 seq_printf(m, "FBC disabled: %s\n", 1666 dev_priv->fbc.no_fbc_reason); 1667 1668 if (INTEL_INFO(dev_priv)->gen >= 7) 1669 seq_printf(m, "Compressing: %s\n", 1670 yesno(I915_READ(FBC_STATUS2) & 1671 FBC_COMPRESSION_MASK)); 1672 1673 mutex_unlock(&dev_priv->fbc.lock); 1674 intel_runtime_pm_put(dev_priv); 1675 1676 return 0; 1677 } 1678 1679 static int i915_fbc_fc_get(void *data, u64 *val) 1680 { 1681 struct drm_device *dev = data; 1682 struct drm_i915_private *dev_priv = dev->dev_private; 1683 1684 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) 1685 return -ENODEV; 1686 1687 *val = dev_priv->fbc.false_color; 1688 1689 return 0; 1690 } 1691 1692 static int i915_fbc_fc_set(void *data, u64 val) 1693 { 1694 struct drm_device *dev = data; 1695 struct drm_i915_private *dev_priv = dev->dev_private; 1696 u32 reg; 1697 1698 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) 1699 return -ENODEV; 1700 1701 mutex_lock(&dev_priv->fbc.lock); 1702 1703 reg = I915_READ(ILK_DPFC_CONTROL); 1704 dev_priv->fbc.false_color = val; 1705 1706 I915_WRITE(ILK_DPFC_CONTROL, val ? 1707 (reg | FBC_CTL_FALSE_COLOR) : 1708 (reg & ~FBC_CTL_FALSE_COLOR)); 1709 1710 mutex_unlock(&dev_priv->fbc.lock); 1711 return 0; 1712 } 1713 1714 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops, 1715 i915_fbc_fc_get, i915_fbc_fc_set, 1716 "%llu\n"); 1717 1718 static int i915_ips_status(struct seq_file *m, void *unused) 1719 { 1720 struct drm_info_node *node = m->private; 1721 struct drm_device *dev = node->minor->dev; 1722 struct drm_i915_private *dev_priv = dev->dev_private; 1723 1724 if (!HAS_IPS(dev)) { 1725 seq_puts(m, "not supported\n"); 1726 return 0; 1727 } 1728 1729 intel_runtime_pm_get(dev_priv); 1730 1731 seq_printf(m, "Enabled by kernel parameter: %s\n", 1732 yesno(i915.enable_ips)); 1733 1734 if (INTEL_INFO(dev)->gen >= 8) { 1735 seq_puts(m, "Currently: unknown\n"); 1736 } else { 1737 if (I915_READ(IPS_CTL) & IPS_ENABLE) 1738 seq_puts(m, "Currently: enabled\n"); 1739 else 1740 seq_puts(m, "Currently: disabled\n"); 1741 } 1742 1743 intel_runtime_pm_put(dev_priv); 1744 1745 return 0; 1746 } 1747 1748 static int i915_sr_status(struct seq_file *m, void *unused) 1749 { 1750 struct drm_info_node *node = m->private; 1751 struct drm_device *dev = node->minor->dev; 1752 struct drm_i915_private *dev_priv = dev->dev_private; 1753 bool sr_enabled = false; 1754 1755 intel_runtime_pm_get(dev_priv); 1756 1757 if (HAS_PCH_SPLIT(dev)) 1758 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1759 else if (IS_CRESTLINE(dev) || IS_G4X(dev) || 1760 IS_I945G(dev) || IS_I945GM(dev)) 1761 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1762 else if (IS_I915GM(dev)) 1763 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1764 else if (IS_PINEVIEW(dev)) 1765 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1766 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 1767 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 1768 1769 intel_runtime_pm_put(dev_priv); 1770 1771 seq_printf(m, "self-refresh: %s\n", 1772 sr_enabled ? "enabled" : "disabled"); 1773 1774 return 0; 1775 } 1776 1777 static int i915_emon_status(struct seq_file *m, void *unused) 1778 { 1779 struct drm_info_node *node = m->private; 1780 struct drm_device *dev = node->minor->dev; 1781 struct drm_i915_private *dev_priv = dev->dev_private; 1782 unsigned long temp, chipset, gfx; 1783 int ret; 1784 1785 if (!IS_GEN5(dev)) 1786 return -ENODEV; 1787 1788 ret = mutex_lock_interruptible(&dev->struct_mutex); 1789 if (ret) 1790 return ret; 1791 1792 temp = i915_mch_val(dev_priv); 1793 chipset = i915_chipset_val(dev_priv); 1794 gfx = i915_gfx_val(dev_priv); 1795 mutex_unlock(&dev->struct_mutex); 1796 1797 seq_printf(m, "GMCH temp: %ld\n", temp); 1798 seq_printf(m, "Chipset power: %ld\n", chipset); 1799 seq_printf(m, "GFX power: %ld\n", gfx); 1800 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1801 1802 return 0; 1803 } 1804 1805 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1806 { 1807 struct drm_info_node *node = m->private; 1808 struct drm_device *dev = node->minor->dev; 1809 struct drm_i915_private *dev_priv = dev->dev_private; 1810 int ret = 0; 1811 int gpu_freq, ia_freq; 1812 unsigned int max_gpu_freq, min_gpu_freq; 1813 1814 if (!HAS_CORE_RING_FREQ(dev)) { 1815 seq_puts(m, "unsupported on this chipset\n"); 1816 return 0; 1817 } 1818 1819 intel_runtime_pm_get(dev_priv); 1820 1821 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 1822 1823 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1824 if (ret) 1825 goto out; 1826 1827 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 1828 /* Convert GT frequency to 50 HZ units */ 1829 min_gpu_freq = 1830 dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER; 1831 max_gpu_freq = 1832 dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER; 1833 } else { 1834 min_gpu_freq = dev_priv->rps.min_freq_softlimit; 1835 max_gpu_freq = dev_priv->rps.max_freq_softlimit; 1836 } 1837 1838 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1839 1840 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { 1841 ia_freq = gpu_freq; 1842 sandybridge_pcode_read(dev_priv, 1843 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1844 &ia_freq); 1845 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1846 intel_gpu_freq(dev_priv, (gpu_freq * 1847 (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ? 1848 GEN9_FREQ_SCALER : 1))), 1849 ((ia_freq >> 0) & 0xff) * 100, 1850 ((ia_freq >> 8) & 0xff) * 100); 1851 } 1852 1853 mutex_unlock(&dev_priv->rps.hw_lock); 1854 1855 out: 1856 intel_runtime_pm_put(dev_priv); 1857 return ret; 1858 } 1859 1860 static int i915_opregion(struct seq_file *m, void *unused) 1861 { 1862 struct drm_info_node *node = m->private; 1863 struct drm_device *dev = node->minor->dev; 1864 struct drm_i915_private *dev_priv = dev->dev_private; 1865 struct intel_opregion *opregion = &dev_priv->opregion; 1866 int ret; 1867 1868 ret = mutex_lock_interruptible(&dev->struct_mutex); 1869 if (ret) 1870 goto out; 1871 1872 if (opregion->header) 1873 seq_write(m, opregion->header, OPREGION_SIZE); 1874 1875 mutex_unlock(&dev->struct_mutex); 1876 1877 out: 1878 return 0; 1879 } 1880 1881 static int i915_vbt(struct seq_file *m, void *unused) 1882 { 1883 struct drm_info_node *node = m->private; 1884 struct drm_device *dev = node->minor->dev; 1885 struct drm_i915_private *dev_priv = dev->dev_private; 1886 struct intel_opregion *opregion = &dev_priv->opregion; 1887 1888 if (opregion->vbt) 1889 seq_write(m, opregion->vbt, opregion->vbt_size); 1890 1891 return 0; 1892 } 1893 1894 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1895 { 1896 struct drm_info_node *node = m->private; 1897 struct drm_device *dev = node->minor->dev; 1898 struct intel_framebuffer *fbdev_fb = NULL; 1899 struct drm_framebuffer *drm_fb; 1900 1901 #ifdef CONFIG_DRM_FBDEV_EMULATION 1902 if (to_i915(dev)->fbdev) { 1903 fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb); 1904 1905 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1906 fbdev_fb->base.width, 1907 fbdev_fb->base.height, 1908 fbdev_fb->base.depth, 1909 fbdev_fb->base.bits_per_pixel, 1910 fbdev_fb->base.modifier[0], 1911 atomic_read(&fbdev_fb->base.refcount.refcount)); 1912 describe_obj(m, fbdev_fb->obj); 1913 seq_putc(m, '\n'); 1914 } 1915 #endif 1916 1917 mutex_lock(&dev->mode_config.fb_lock); 1918 drm_for_each_fb(drm_fb, dev) { 1919 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); 1920 if (fb == fbdev_fb) 1921 continue; 1922 1923 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1924 fb->base.width, 1925 fb->base.height, 1926 fb->base.depth, 1927 fb->base.bits_per_pixel, 1928 fb->base.modifier[0], 1929 atomic_read(&fb->base.refcount.refcount)); 1930 describe_obj(m, fb->obj); 1931 seq_putc(m, '\n'); 1932 } 1933 mutex_unlock(&dev->mode_config.fb_lock); 1934 1935 return 0; 1936 } 1937 1938 static void describe_ctx_ringbuf(struct seq_file *m, 1939 struct intel_ringbuffer *ringbuf) 1940 { 1941 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)", 1942 ringbuf->space, ringbuf->head, ringbuf->tail, 1943 ringbuf->last_retired_head); 1944 } 1945 1946 static int i915_context_status(struct seq_file *m, void *unused) 1947 { 1948 struct drm_info_node *node = m->private; 1949 struct drm_device *dev = node->minor->dev; 1950 struct drm_i915_private *dev_priv = dev->dev_private; 1951 struct intel_engine_cs *ring; 1952 struct intel_context *ctx; 1953 int ret, i; 1954 1955 ret = mutex_lock_interruptible(&dev->struct_mutex); 1956 if (ret) 1957 return ret; 1958 1959 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1960 if (!i915.enable_execlists && 1961 ctx->legacy_hw_ctx.rcs_state == NULL) 1962 continue; 1963 1964 seq_puts(m, "HW context "); 1965 describe_ctx(m, ctx); 1966 if (ctx == dev_priv->kernel_context) 1967 seq_printf(m, "(kernel context) "); 1968 1969 if (i915.enable_execlists) { 1970 seq_putc(m, '\n'); 1971 for_each_ring(ring, dev_priv, i) { 1972 struct drm_i915_gem_object *ctx_obj = 1973 ctx->engine[i].state; 1974 struct intel_ringbuffer *ringbuf = 1975 ctx->engine[i].ringbuf; 1976 1977 seq_printf(m, "%s: ", ring->name); 1978 if (ctx_obj) 1979 describe_obj(m, ctx_obj); 1980 if (ringbuf) 1981 describe_ctx_ringbuf(m, ringbuf); 1982 seq_putc(m, '\n'); 1983 } 1984 } else { 1985 describe_obj(m, ctx->legacy_hw_ctx.rcs_state); 1986 } 1987 1988 seq_putc(m, '\n'); 1989 } 1990 1991 mutex_unlock(&dev->struct_mutex); 1992 1993 return 0; 1994 } 1995 1996 static void i915_dump_lrc_obj(struct seq_file *m, 1997 struct intel_context *ctx, 1998 struct intel_engine_cs *ring) 1999 { 2000 struct page *page; 2001 uint32_t *reg_state; 2002 int j; 2003 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; 2004 unsigned long ggtt_offset = 0; 2005 2006 if (ctx_obj == NULL) { 2007 seq_printf(m, "Context on %s with no gem object\n", 2008 ring->name); 2009 return; 2010 } 2011 2012 seq_printf(m, "CONTEXT: %s %u\n", ring->name, 2013 intel_execlists_ctx_id(ctx, ring)); 2014 2015 if (!i915_gem_obj_ggtt_bound(ctx_obj)) 2016 seq_puts(m, "\tNot bound in GGTT\n"); 2017 else 2018 ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj); 2019 2020 if (i915_gem_object_get_pages(ctx_obj)) { 2021 seq_puts(m, "\tFailed to get pages for context object\n"); 2022 return; 2023 } 2024 2025 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); 2026 if (!WARN_ON(page == NULL)) { 2027 reg_state = kmap_atomic(page); 2028 2029 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) { 2030 seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n", 2031 ggtt_offset + 4096 + (j * 4), 2032 reg_state[j], reg_state[j + 1], 2033 reg_state[j + 2], reg_state[j + 3]); 2034 } 2035 kunmap_atomic(reg_state); 2036 } 2037 2038 seq_putc(m, '\n'); 2039 } 2040 2041 static int i915_dump_lrc(struct seq_file *m, void *unused) 2042 { 2043 struct drm_info_node *node = (struct drm_info_node *) m->private; 2044 struct drm_device *dev = node->minor->dev; 2045 struct drm_i915_private *dev_priv = dev->dev_private; 2046 struct intel_engine_cs *ring; 2047 struct intel_context *ctx; 2048 int ret, i; 2049 2050 if (!i915.enable_execlists) { 2051 seq_printf(m, "Logical Ring Contexts are disabled\n"); 2052 return 0; 2053 } 2054 2055 ret = mutex_lock_interruptible(&dev->struct_mutex); 2056 if (ret) 2057 return ret; 2058 2059 list_for_each_entry(ctx, &dev_priv->context_list, link) 2060 if (ctx != dev_priv->kernel_context) 2061 for_each_ring(ring, dev_priv, i) 2062 i915_dump_lrc_obj(m, ctx, ring); 2063 2064 mutex_unlock(&dev->struct_mutex); 2065 2066 return 0; 2067 } 2068 2069 static int i915_execlists(struct seq_file *m, void *data) 2070 { 2071 struct drm_info_node *node = (struct drm_info_node *)m->private; 2072 struct drm_device *dev = node->minor->dev; 2073 struct drm_i915_private *dev_priv = dev->dev_private; 2074 struct intel_engine_cs *ring; 2075 u32 status_pointer; 2076 u8 read_pointer; 2077 u8 write_pointer; 2078 u32 status; 2079 u32 ctx_id; 2080 struct list_head *cursor; 2081 int ring_id, i; 2082 int ret; 2083 2084 if (!i915.enable_execlists) { 2085 seq_puts(m, "Logical Ring Contexts are disabled\n"); 2086 return 0; 2087 } 2088 2089 ret = mutex_lock_interruptible(&dev->struct_mutex); 2090 if (ret) 2091 return ret; 2092 2093 intel_runtime_pm_get(dev_priv); 2094 2095 for_each_ring(ring, dev_priv, ring_id) { 2096 struct drm_i915_gem_request *head_req = NULL; 2097 int count = 0; 2098 unsigned long flags; 2099 2100 seq_printf(m, "%s\n", ring->name); 2101 2102 status = I915_READ(RING_EXECLIST_STATUS_LO(ring)); 2103 ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(ring)); 2104 seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n", 2105 status, ctx_id); 2106 2107 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); 2108 seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer); 2109 2110 read_pointer = ring->next_context_status_buffer; 2111 write_pointer = GEN8_CSB_WRITE_PTR(status_pointer); 2112 if (read_pointer > write_pointer) 2113 write_pointer += GEN8_CSB_ENTRIES; 2114 seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n", 2115 read_pointer, write_pointer); 2116 2117 for (i = 0; i < GEN8_CSB_ENTRIES; i++) { 2118 status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i)); 2119 ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i)); 2120 2121 seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n", 2122 i, status, ctx_id); 2123 } 2124 2125 spin_lock_irqsave(&ring->execlist_lock, flags); 2126 list_for_each(cursor, &ring->execlist_queue) 2127 count++; 2128 head_req = list_first_entry_or_null(&ring->execlist_queue, 2129 struct drm_i915_gem_request, execlist_link); 2130 spin_unlock_irqrestore(&ring->execlist_lock, flags); 2131 2132 seq_printf(m, "\t%d requests in queue\n", count); 2133 if (head_req) { 2134 seq_printf(m, "\tHead request id: %u\n", 2135 intel_execlists_ctx_id(head_req->ctx, ring)); 2136 seq_printf(m, "\tHead request tail: %u\n", 2137 head_req->tail); 2138 } 2139 2140 seq_putc(m, '\n'); 2141 } 2142 2143 intel_runtime_pm_put(dev_priv); 2144 mutex_unlock(&dev->struct_mutex); 2145 2146 return 0; 2147 } 2148 2149 static const char *swizzle_string(unsigned swizzle) 2150 { 2151 switch (swizzle) { 2152 case I915_BIT_6_SWIZZLE_NONE: 2153 return "none"; 2154 case I915_BIT_6_SWIZZLE_9: 2155 return "bit9"; 2156 case I915_BIT_6_SWIZZLE_9_10: 2157 return "bit9/bit10"; 2158 case I915_BIT_6_SWIZZLE_9_11: 2159 return "bit9/bit11"; 2160 case I915_BIT_6_SWIZZLE_9_10_11: 2161 return "bit9/bit10/bit11"; 2162 case I915_BIT_6_SWIZZLE_9_17: 2163 return "bit9/bit17"; 2164 case I915_BIT_6_SWIZZLE_9_10_17: 2165 return "bit9/bit10/bit17"; 2166 case I915_BIT_6_SWIZZLE_UNKNOWN: 2167 return "unknown"; 2168 } 2169 2170 return "bug"; 2171 } 2172 2173 static int i915_swizzle_info(struct seq_file *m, void *data) 2174 { 2175 struct drm_info_node *node = m->private; 2176 struct drm_device *dev = node->minor->dev; 2177 struct drm_i915_private *dev_priv = dev->dev_private; 2178 int ret; 2179 2180 ret = mutex_lock_interruptible(&dev->struct_mutex); 2181 if (ret) 2182 return ret; 2183 intel_runtime_pm_get(dev_priv); 2184 2185 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 2186 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 2187 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 2188 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 2189 2190 if (IS_GEN3(dev) || IS_GEN4(dev)) { 2191 seq_printf(m, "DDC = 0x%08x\n", 2192 I915_READ(DCC)); 2193 seq_printf(m, "DDC2 = 0x%08x\n", 2194 I915_READ(DCC2)); 2195 seq_printf(m, "C0DRB3 = 0x%04x\n", 2196 I915_READ16(C0DRB3)); 2197 seq_printf(m, "C1DRB3 = 0x%04x\n", 2198 I915_READ16(C1DRB3)); 2199 } else if (INTEL_INFO(dev)->gen >= 6) { 2200 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 2201 I915_READ(MAD_DIMM_C0)); 2202 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 2203 I915_READ(MAD_DIMM_C1)); 2204 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 2205 I915_READ(MAD_DIMM_C2)); 2206 seq_printf(m, "TILECTL = 0x%08x\n", 2207 I915_READ(TILECTL)); 2208 if (INTEL_INFO(dev)->gen >= 8) 2209 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 2210 I915_READ(GAMTARBMODE)); 2211 else 2212 seq_printf(m, "ARB_MODE = 0x%08x\n", 2213 I915_READ(ARB_MODE)); 2214 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 2215 I915_READ(DISP_ARB_CTL)); 2216 } 2217 2218 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) 2219 seq_puts(m, "L-shaped memory detected\n"); 2220 2221 intel_runtime_pm_put(dev_priv); 2222 mutex_unlock(&dev->struct_mutex); 2223 2224 return 0; 2225 } 2226 2227 static int per_file_ctx(int id, void *ptr, void *data) 2228 { 2229 struct intel_context *ctx = ptr; 2230 struct seq_file *m = data; 2231 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2232 2233 if (!ppgtt) { 2234 seq_printf(m, " no ppgtt for context %d\n", 2235 ctx->user_handle); 2236 return 0; 2237 } 2238 2239 if (i915_gem_context_is_default(ctx)) 2240 seq_puts(m, " default context:\n"); 2241 else 2242 seq_printf(m, " context %d:\n", ctx->user_handle); 2243 ppgtt->debug_dump(ppgtt, m); 2244 2245 return 0; 2246 } 2247 2248 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) 2249 { 2250 struct drm_i915_private *dev_priv = dev->dev_private; 2251 struct intel_engine_cs *ring; 2252 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2253 int unused, i; 2254 2255 if (!ppgtt) 2256 return; 2257 2258 for_each_ring(ring, dev_priv, unused) { 2259 seq_printf(m, "%s\n", ring->name); 2260 for (i = 0; i < 4; i++) { 2261 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(ring, i)); 2262 pdp <<= 32; 2263 pdp |= I915_READ(GEN8_RING_PDP_LDW(ring, i)); 2264 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); 2265 } 2266 } 2267 } 2268 2269 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) 2270 { 2271 struct drm_i915_private *dev_priv = dev->dev_private; 2272 struct intel_engine_cs *ring; 2273 int i; 2274 2275 if (INTEL_INFO(dev)->gen == 6) 2276 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 2277 2278 for_each_ring(ring, dev_priv, i) { 2279 seq_printf(m, "%s\n", ring->name); 2280 if (INTEL_INFO(dev)->gen == 7) 2281 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); 2282 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); 2283 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); 2284 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); 2285 } 2286 if (dev_priv->mm.aliasing_ppgtt) { 2287 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2288 2289 seq_puts(m, "aliasing PPGTT:\n"); 2290 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset); 2291 2292 ppgtt->debug_dump(ppgtt, m); 2293 } 2294 2295 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 2296 } 2297 2298 static int i915_ppgtt_info(struct seq_file *m, void *data) 2299 { 2300 struct drm_info_node *node = m->private; 2301 struct drm_device *dev = node->minor->dev; 2302 struct drm_i915_private *dev_priv = dev->dev_private; 2303 struct drm_file *file; 2304 2305 int ret = mutex_lock_interruptible(&dev->struct_mutex); 2306 if (ret) 2307 return ret; 2308 intel_runtime_pm_get(dev_priv); 2309 2310 if (INTEL_INFO(dev)->gen >= 8) 2311 gen8_ppgtt_info(m, dev); 2312 else if (INTEL_INFO(dev)->gen >= 6) 2313 gen6_ppgtt_info(m, dev); 2314 2315 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2316 struct drm_i915_file_private *file_priv = file->driver_priv; 2317 struct task_struct *task; 2318 2319 task = get_pid_task(file->pid, PIDTYPE_PID); 2320 if (!task) { 2321 ret = -ESRCH; 2322 goto out_put; 2323 } 2324 seq_printf(m, "\nproc: %s\n", task->comm); 2325 put_task_struct(task); 2326 idr_for_each(&file_priv->context_idr, per_file_ctx, 2327 (void *)(unsigned long)m); 2328 } 2329 2330 out_put: 2331 intel_runtime_pm_put(dev_priv); 2332 mutex_unlock(&dev->struct_mutex); 2333 2334 return ret; 2335 } 2336 2337 static int count_irq_waiters(struct drm_i915_private *i915) 2338 { 2339 struct intel_engine_cs *ring; 2340 int count = 0; 2341 int i; 2342 2343 for_each_ring(ring, i915, i) 2344 count += ring->irq_refcount; 2345 2346 return count; 2347 } 2348 2349 static int i915_rps_boost_info(struct seq_file *m, void *data) 2350 { 2351 struct drm_info_node *node = m->private; 2352 struct drm_device *dev = node->minor->dev; 2353 struct drm_i915_private *dev_priv = dev->dev_private; 2354 struct drm_file *file; 2355 2356 seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled); 2357 seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy); 2358 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); 2359 seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n", 2360 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq), 2361 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), 2362 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit), 2363 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit), 2364 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 2365 spin_lock(&dev_priv->rps.client_lock); 2366 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2367 struct drm_i915_file_private *file_priv = file->driver_priv; 2368 struct task_struct *task; 2369 2370 rcu_read_lock(); 2371 task = pid_task(file->pid, PIDTYPE_PID); 2372 seq_printf(m, "%s [%d]: %d boosts%s\n", 2373 task ? task->comm : "<unknown>", 2374 task ? task->pid : -1, 2375 file_priv->rps.boosts, 2376 list_empty(&file_priv->rps.link) ? "" : ", active"); 2377 rcu_read_unlock(); 2378 } 2379 seq_printf(m, "Semaphore boosts: %d%s\n", 2380 dev_priv->rps.semaphores.boosts, 2381 list_empty(&dev_priv->rps.semaphores.link) ? "" : ", active"); 2382 seq_printf(m, "MMIO flip boosts: %d%s\n", 2383 dev_priv->rps.mmioflips.boosts, 2384 list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active"); 2385 seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts); 2386 spin_unlock(&dev_priv->rps.client_lock); 2387 2388 return 0; 2389 } 2390 2391 static int i915_llc(struct seq_file *m, void *data) 2392 { 2393 struct drm_info_node *node = m->private; 2394 struct drm_device *dev = node->minor->dev; 2395 struct drm_i915_private *dev_priv = dev->dev_private; 2396 2397 /* Size calculation for LLC is a bit of a pain. Ignore for now. */ 2398 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev))); 2399 seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size); 2400 2401 return 0; 2402 } 2403 2404 static int i915_guc_load_status_info(struct seq_file *m, void *data) 2405 { 2406 struct drm_info_node *node = m->private; 2407 struct drm_i915_private *dev_priv = node->minor->dev->dev_private; 2408 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 2409 u32 tmp, i; 2410 2411 if (!HAS_GUC_UCODE(dev_priv->dev)) 2412 return 0; 2413 2414 seq_printf(m, "GuC firmware status:\n"); 2415 seq_printf(m, "\tpath: %s\n", 2416 guc_fw->guc_fw_path); 2417 seq_printf(m, "\tfetch: %s\n", 2418 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status)); 2419 seq_printf(m, "\tload: %s\n", 2420 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); 2421 seq_printf(m, "\tversion wanted: %d.%d\n", 2422 guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted); 2423 seq_printf(m, "\tversion found: %d.%d\n", 2424 guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found); 2425 seq_printf(m, "\theader: offset is %d; size = %d\n", 2426 guc_fw->header_offset, guc_fw->header_size); 2427 seq_printf(m, "\tuCode: offset is %d; size = %d\n", 2428 guc_fw->ucode_offset, guc_fw->ucode_size); 2429 seq_printf(m, "\tRSA: offset is %d; size = %d\n", 2430 guc_fw->rsa_offset, guc_fw->rsa_size); 2431 2432 tmp = I915_READ(GUC_STATUS); 2433 2434 seq_printf(m, "\nGuC status 0x%08x:\n", tmp); 2435 seq_printf(m, "\tBootrom status = 0x%x\n", 2436 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT); 2437 seq_printf(m, "\tuKernel status = 0x%x\n", 2438 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT); 2439 seq_printf(m, "\tMIA Core status = 0x%x\n", 2440 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT); 2441 seq_puts(m, "\nScratch registers:\n"); 2442 for (i = 0; i < 16; i++) 2443 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i))); 2444 2445 return 0; 2446 } 2447 2448 static void i915_guc_client_info(struct seq_file *m, 2449 struct drm_i915_private *dev_priv, 2450 struct i915_guc_client *client) 2451 { 2452 struct intel_engine_cs *ring; 2453 uint64_t tot = 0; 2454 uint32_t i; 2455 2456 seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n", 2457 client->priority, client->ctx_index, client->proc_desc_offset); 2458 seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n", 2459 client->doorbell_id, client->doorbell_offset, client->cookie); 2460 seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n", 2461 client->wq_size, client->wq_offset, client->wq_tail); 2462 2463 seq_printf(m, "\tFailed to queue: %u\n", client->q_fail); 2464 seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail); 2465 seq_printf(m, "\tLast submission result: %d\n", client->retcode); 2466 2467 for_each_ring(ring, dev_priv, i) { 2468 seq_printf(m, "\tSubmissions: %llu %s\n", 2469 client->submissions[ring->guc_id], 2470 ring->name); 2471 tot += client->submissions[ring->guc_id]; 2472 } 2473 seq_printf(m, "\tTotal: %llu\n", tot); 2474 } 2475 2476 static int i915_guc_info(struct seq_file *m, void *data) 2477 { 2478 struct drm_info_node *node = m->private; 2479 struct drm_device *dev = node->minor->dev; 2480 struct drm_i915_private *dev_priv = dev->dev_private; 2481 struct intel_guc guc; 2482 struct i915_guc_client client = {}; 2483 struct intel_engine_cs *ring; 2484 enum intel_ring_id i; 2485 u64 total = 0; 2486 2487 if (!HAS_GUC_SCHED(dev_priv->dev)) 2488 return 0; 2489 2490 if (mutex_lock_interruptible(&dev->struct_mutex)) 2491 return 0; 2492 2493 /* Take a local copy of the GuC data, so we can dump it at leisure */ 2494 guc = dev_priv->guc; 2495 if (guc.execbuf_client) 2496 client = *guc.execbuf_client; 2497 2498 mutex_unlock(&dev->struct_mutex); 2499 2500 seq_printf(m, "GuC total action count: %llu\n", guc.action_count); 2501 seq_printf(m, "GuC action failure count: %u\n", guc.action_fail); 2502 seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd); 2503 seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status); 2504 seq_printf(m, "GuC last action error code: %d\n", guc.action_err); 2505 2506 seq_printf(m, "\nGuC submissions:\n"); 2507 for_each_ring(ring, dev_priv, i) { 2508 seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n", 2509 ring->name, guc.submissions[ring->guc_id], 2510 guc.last_seqno[ring->guc_id]); 2511 total += guc.submissions[ring->guc_id]; 2512 } 2513 seq_printf(m, "\t%s: %llu\n", "Total", total); 2514 2515 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client); 2516 i915_guc_client_info(m, dev_priv, &client); 2517 2518 /* Add more as required ... */ 2519 2520 return 0; 2521 } 2522 2523 static int i915_guc_log_dump(struct seq_file *m, void *data) 2524 { 2525 struct drm_info_node *node = m->private; 2526 struct drm_device *dev = node->minor->dev; 2527 struct drm_i915_private *dev_priv = dev->dev_private; 2528 struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj; 2529 u32 *log; 2530 int i = 0, pg; 2531 2532 if (!log_obj) 2533 return 0; 2534 2535 for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) { 2536 log = kmap_atomic(i915_gem_object_get_page(log_obj, pg)); 2537 2538 for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4) 2539 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n", 2540 *(log + i), *(log + i + 1), 2541 *(log + i + 2), *(log + i + 3)); 2542 2543 kunmap_atomic(log); 2544 } 2545 2546 seq_putc(m, '\n'); 2547 2548 return 0; 2549 } 2550 2551 static int i915_edp_psr_status(struct seq_file *m, void *data) 2552 { 2553 struct drm_info_node *node = m->private; 2554 struct drm_device *dev = node->minor->dev; 2555 struct drm_i915_private *dev_priv = dev->dev_private; 2556 u32 psrperf = 0; 2557 u32 stat[3]; 2558 enum pipe pipe; 2559 bool enabled = false; 2560 2561 if (!HAS_PSR(dev)) { 2562 seq_puts(m, "PSR not supported\n"); 2563 return 0; 2564 } 2565 2566 intel_runtime_pm_get(dev_priv); 2567 2568 mutex_lock(&dev_priv->psr.lock); 2569 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); 2570 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); 2571 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled)); 2572 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active)); 2573 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", 2574 dev_priv->psr.busy_frontbuffer_bits); 2575 seq_printf(m, "Re-enable work scheduled: %s\n", 2576 yesno(work_busy(&dev_priv->psr.work.work))); 2577 2578 if (HAS_DDI(dev)) 2579 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; 2580 else { 2581 for_each_pipe(dev_priv, pipe) { 2582 stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & 2583 VLV_EDP_PSR_CURR_STATE_MASK; 2584 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2585 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2586 enabled = true; 2587 } 2588 } 2589 2590 seq_printf(m, "Main link in standby mode: %s\n", 2591 yesno(dev_priv->psr.link_standby)); 2592 2593 seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled)); 2594 2595 if (!HAS_DDI(dev)) 2596 for_each_pipe(dev_priv, pipe) { 2597 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2598 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2599 seq_printf(m, " pipe %c", pipe_name(pipe)); 2600 } 2601 seq_puts(m, "\n"); 2602 2603 /* 2604 * VLV/CHV PSR has no kind of performance counter 2605 * SKL+ Perf counter is reset to 0 everytime DC state is entered 2606 */ 2607 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2608 psrperf = I915_READ(EDP_PSR_PERF_CNT) & 2609 EDP_PSR_PERF_CNT_MASK; 2610 2611 seq_printf(m, "Performance_Counter: %u\n", psrperf); 2612 } 2613 mutex_unlock(&dev_priv->psr.lock); 2614 2615 intel_runtime_pm_put(dev_priv); 2616 return 0; 2617 } 2618 2619 static int i915_sink_crc(struct seq_file *m, void *data) 2620 { 2621 struct drm_info_node *node = m->private; 2622 struct drm_device *dev = node->minor->dev; 2623 struct intel_encoder *encoder; 2624 struct intel_connector *connector; 2625 struct intel_dp *intel_dp = NULL; 2626 int ret; 2627 u8 crc[6]; 2628 2629 drm_modeset_lock_all(dev); 2630 for_each_intel_connector(dev, connector) { 2631 2632 if (connector->base.dpms != DRM_MODE_DPMS_ON) 2633 continue; 2634 2635 if (!connector->base.encoder) 2636 continue; 2637 2638 encoder = to_intel_encoder(connector->base.encoder); 2639 if (encoder->type != INTEL_OUTPUT_EDP) 2640 continue; 2641 2642 intel_dp = enc_to_intel_dp(&encoder->base); 2643 2644 ret = intel_dp_sink_crc(intel_dp, crc); 2645 if (ret) 2646 goto out; 2647 2648 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n", 2649 crc[0], crc[1], crc[2], 2650 crc[3], crc[4], crc[5]); 2651 goto out; 2652 } 2653 ret = -ENODEV; 2654 out: 2655 drm_modeset_unlock_all(dev); 2656 return ret; 2657 } 2658 2659 static int i915_energy_uJ(struct seq_file *m, void *data) 2660 { 2661 struct drm_info_node *node = m->private; 2662 struct drm_device *dev = node->minor->dev; 2663 struct drm_i915_private *dev_priv = dev->dev_private; 2664 u64 power; 2665 u32 units; 2666 2667 if (INTEL_INFO(dev)->gen < 6) 2668 return -ENODEV; 2669 2670 intel_runtime_pm_get(dev_priv); 2671 2672 rdmsrl(MSR_RAPL_POWER_UNIT, power); 2673 power = (power & 0x1f00) >> 8; 2674 units = 1000000 / (1 << power); /* convert to uJ */ 2675 power = I915_READ(MCH_SECP_NRG_STTS); 2676 power *= units; 2677 2678 intel_runtime_pm_put(dev_priv); 2679 2680 seq_printf(m, "%llu", (long long unsigned)power); 2681 2682 return 0; 2683 } 2684 2685 static int i915_runtime_pm_status(struct seq_file *m, void *unused) 2686 { 2687 struct drm_info_node *node = m->private; 2688 struct drm_device *dev = node->minor->dev; 2689 struct drm_i915_private *dev_priv = dev->dev_private; 2690 2691 if (!HAS_RUNTIME_PM(dev)) { 2692 seq_puts(m, "not supported\n"); 2693 return 0; 2694 } 2695 2696 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); 2697 seq_printf(m, "IRQs disabled: %s\n", 2698 yesno(!intel_irqs_enabled(dev_priv))); 2699 #ifdef CONFIG_PM 2700 seq_printf(m, "Usage count: %d\n", 2701 atomic_read(&dev->dev->power.usage_count)); 2702 #else 2703 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); 2704 #endif 2705 2706 return 0; 2707 } 2708 2709 static int i915_power_domain_info(struct seq_file *m, void *unused) 2710 { 2711 struct drm_info_node *node = m->private; 2712 struct drm_device *dev = node->minor->dev; 2713 struct drm_i915_private *dev_priv = dev->dev_private; 2714 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2715 int i; 2716 2717 mutex_lock(&power_domains->lock); 2718 2719 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 2720 for (i = 0; i < power_domains->power_well_count; i++) { 2721 struct i915_power_well *power_well; 2722 enum intel_display_power_domain power_domain; 2723 2724 power_well = &power_domains->power_wells[i]; 2725 seq_printf(m, "%-25s %d\n", power_well->name, 2726 power_well->count); 2727 2728 for (power_domain = 0; power_domain < POWER_DOMAIN_NUM; 2729 power_domain++) { 2730 if (!(BIT(power_domain) & power_well->domains)) 2731 continue; 2732 2733 seq_printf(m, " %-23s %d\n", 2734 intel_display_power_domain_str(power_domain), 2735 power_domains->domain_use_count[power_domain]); 2736 } 2737 } 2738 2739 mutex_unlock(&power_domains->lock); 2740 2741 return 0; 2742 } 2743 2744 static int i915_dmc_info(struct seq_file *m, void *unused) 2745 { 2746 struct drm_info_node *node = m->private; 2747 struct drm_device *dev = node->minor->dev; 2748 struct drm_i915_private *dev_priv = dev->dev_private; 2749 struct intel_csr *csr; 2750 2751 if (!HAS_CSR(dev)) { 2752 seq_puts(m, "not supported\n"); 2753 return 0; 2754 } 2755 2756 csr = &dev_priv->csr; 2757 2758 intel_runtime_pm_get(dev_priv); 2759 2760 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL)); 2761 seq_printf(m, "path: %s\n", csr->fw_path); 2762 2763 if (!csr->dmc_payload) 2764 goto out; 2765 2766 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version), 2767 CSR_VERSION_MINOR(csr->version)); 2768 2769 if (IS_SKYLAKE(dev) && csr->version >= CSR_VERSION(1, 6)) { 2770 seq_printf(m, "DC3 -> DC5 count: %d\n", 2771 I915_READ(SKL_CSR_DC3_DC5_COUNT)); 2772 seq_printf(m, "DC5 -> DC6 count: %d\n", 2773 I915_READ(SKL_CSR_DC5_DC6_COUNT)); 2774 } else if (IS_BROXTON(dev) && csr->version >= CSR_VERSION(1, 4)) { 2775 seq_printf(m, "DC3 -> DC5 count: %d\n", 2776 I915_READ(BXT_CSR_DC3_DC5_COUNT)); 2777 } 2778 2779 out: 2780 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0))); 2781 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE)); 2782 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL)); 2783 2784 intel_runtime_pm_put(dev_priv); 2785 2786 return 0; 2787 } 2788 2789 static void intel_seq_print_mode(struct seq_file *m, int tabs, 2790 struct drm_display_mode *mode) 2791 { 2792 int i; 2793 2794 for (i = 0; i < tabs; i++) 2795 seq_putc(m, '\t'); 2796 2797 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n", 2798 mode->base.id, mode->name, 2799 mode->vrefresh, mode->clock, 2800 mode->hdisplay, mode->hsync_start, 2801 mode->hsync_end, mode->htotal, 2802 mode->vdisplay, mode->vsync_start, 2803 mode->vsync_end, mode->vtotal, 2804 mode->type, mode->flags); 2805 } 2806 2807 static void intel_encoder_info(struct seq_file *m, 2808 struct intel_crtc *intel_crtc, 2809 struct intel_encoder *intel_encoder) 2810 { 2811 struct drm_info_node *node = m->private; 2812 struct drm_device *dev = node->minor->dev; 2813 struct drm_crtc *crtc = &intel_crtc->base; 2814 struct intel_connector *intel_connector; 2815 struct drm_encoder *encoder; 2816 2817 encoder = &intel_encoder->base; 2818 seq_printf(m, "\tencoder %d: type: %s, connectors:\n", 2819 encoder->base.id, encoder->name); 2820 for_each_connector_on_encoder(dev, encoder, intel_connector) { 2821 struct drm_connector *connector = &intel_connector->base; 2822 seq_printf(m, "\t\tconnector %d: type: %s, status: %s", 2823 connector->base.id, 2824 connector->name, 2825 drm_get_connector_status_name(connector->status)); 2826 if (connector->status == connector_status_connected) { 2827 struct drm_display_mode *mode = &crtc->mode; 2828 seq_printf(m, ", mode:\n"); 2829 intel_seq_print_mode(m, 2, mode); 2830 } else { 2831 seq_putc(m, '\n'); 2832 } 2833 } 2834 } 2835 2836 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) 2837 { 2838 struct drm_info_node *node = m->private; 2839 struct drm_device *dev = node->minor->dev; 2840 struct drm_crtc *crtc = &intel_crtc->base; 2841 struct intel_encoder *intel_encoder; 2842 struct drm_plane_state *plane_state = crtc->primary->state; 2843 struct drm_framebuffer *fb = plane_state->fb; 2844 2845 if (fb) 2846 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", 2847 fb->base.id, plane_state->src_x >> 16, 2848 plane_state->src_y >> 16, fb->width, fb->height); 2849 else 2850 seq_puts(m, "\tprimary plane disabled\n"); 2851 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 2852 intel_encoder_info(m, intel_crtc, intel_encoder); 2853 } 2854 2855 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) 2856 { 2857 struct drm_display_mode *mode = panel->fixed_mode; 2858 2859 seq_printf(m, "\tfixed mode:\n"); 2860 intel_seq_print_mode(m, 2, mode); 2861 } 2862 2863 static void intel_dp_info(struct seq_file *m, 2864 struct intel_connector *intel_connector) 2865 { 2866 struct intel_encoder *intel_encoder = intel_connector->encoder; 2867 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2868 2869 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 2870 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); 2871 if (intel_encoder->type == INTEL_OUTPUT_EDP) 2872 intel_panel_info(m, &intel_connector->panel); 2873 } 2874 2875 static void intel_dp_mst_info(struct seq_file *m, 2876 struct intel_connector *intel_connector) 2877 { 2878 struct intel_encoder *intel_encoder = intel_connector->encoder; 2879 struct intel_dp_mst_encoder *intel_mst = 2880 enc_to_mst(&intel_encoder->base); 2881 struct intel_digital_port *intel_dig_port = intel_mst->primary; 2882 struct intel_dp *intel_dp = &intel_dig_port->dp; 2883 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, 2884 intel_connector->port); 2885 2886 seq_printf(m, "\taudio support: %s\n", yesno(has_audio)); 2887 } 2888 2889 static void intel_hdmi_info(struct seq_file *m, 2890 struct intel_connector *intel_connector) 2891 { 2892 struct intel_encoder *intel_encoder = intel_connector->encoder; 2893 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); 2894 2895 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); 2896 } 2897 2898 static void intel_lvds_info(struct seq_file *m, 2899 struct intel_connector *intel_connector) 2900 { 2901 intel_panel_info(m, &intel_connector->panel); 2902 } 2903 2904 static void intel_connector_info(struct seq_file *m, 2905 struct drm_connector *connector) 2906 { 2907 struct intel_connector *intel_connector = to_intel_connector(connector); 2908 struct intel_encoder *intel_encoder = intel_connector->encoder; 2909 struct drm_display_mode *mode; 2910 2911 seq_printf(m, "connector %d: type %s, status: %s\n", 2912 connector->base.id, connector->name, 2913 drm_get_connector_status_name(connector->status)); 2914 if (connector->status == connector_status_connected) { 2915 seq_printf(m, "\tname: %s\n", connector->display_info.name); 2916 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 2917 connector->display_info.width_mm, 2918 connector->display_info.height_mm); 2919 seq_printf(m, "\tsubpixel order: %s\n", 2920 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 2921 seq_printf(m, "\tCEA rev: %d\n", 2922 connector->display_info.cea_rev); 2923 } 2924 if (intel_encoder) { 2925 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 2926 intel_encoder->type == INTEL_OUTPUT_EDP) 2927 intel_dp_info(m, intel_connector); 2928 else if (intel_encoder->type == INTEL_OUTPUT_HDMI) 2929 intel_hdmi_info(m, intel_connector); 2930 else if (intel_encoder->type == INTEL_OUTPUT_LVDS) 2931 intel_lvds_info(m, intel_connector); 2932 else if (intel_encoder->type == INTEL_OUTPUT_DP_MST) 2933 intel_dp_mst_info(m, intel_connector); 2934 } 2935 2936 seq_printf(m, "\tmodes:\n"); 2937 list_for_each_entry(mode, &connector->modes, head) 2938 intel_seq_print_mode(m, 2, mode); 2939 } 2940 2941 static bool cursor_active(struct drm_device *dev, int pipe) 2942 { 2943 struct drm_i915_private *dev_priv = dev->dev_private; 2944 u32 state; 2945 2946 if (IS_845G(dev) || IS_I865G(dev)) 2947 state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; 2948 else 2949 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 2950 2951 return state; 2952 } 2953 2954 static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y) 2955 { 2956 struct drm_i915_private *dev_priv = dev->dev_private; 2957 u32 pos; 2958 2959 pos = I915_READ(CURPOS(pipe)); 2960 2961 *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK; 2962 if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT)) 2963 *x = -*x; 2964 2965 *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK; 2966 if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT)) 2967 *y = -*y; 2968 2969 return cursor_active(dev, pipe); 2970 } 2971 2972 static const char *plane_type(enum drm_plane_type type) 2973 { 2974 switch (type) { 2975 case DRM_PLANE_TYPE_OVERLAY: 2976 return "OVL"; 2977 case DRM_PLANE_TYPE_PRIMARY: 2978 return "PRI"; 2979 case DRM_PLANE_TYPE_CURSOR: 2980 return "CUR"; 2981 /* 2982 * Deliberately omitting default: to generate compiler warnings 2983 * when a new drm_plane_type gets added. 2984 */ 2985 } 2986 2987 return "unknown"; 2988 } 2989 2990 static const char *plane_rotation(unsigned int rotation) 2991 { 2992 static char buf[48]; 2993 /* 2994 * According to doc only one DRM_ROTATE_ is allowed but this 2995 * will print them all to visualize if the values are misused 2996 */ 2997 snprintf(buf, sizeof(buf), 2998 "%s%s%s%s%s%s(0x%08x)", 2999 (rotation & BIT(DRM_ROTATE_0)) ? "0 " : "", 3000 (rotation & BIT(DRM_ROTATE_90)) ? "90 " : "", 3001 (rotation & BIT(DRM_ROTATE_180)) ? "180 " : "", 3002 (rotation & BIT(DRM_ROTATE_270)) ? "270 " : "", 3003 (rotation & BIT(DRM_REFLECT_X)) ? "FLIPX " : "", 3004 (rotation & BIT(DRM_REFLECT_Y)) ? "FLIPY " : "", 3005 rotation); 3006 3007 return buf; 3008 } 3009 3010 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc) 3011 { 3012 struct drm_info_node *node = m->private; 3013 struct drm_device *dev = node->minor->dev; 3014 struct intel_plane *intel_plane; 3015 3016 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3017 struct drm_plane_state *state; 3018 struct drm_plane *plane = &intel_plane->base; 3019 3020 if (!plane->state) { 3021 seq_puts(m, "plane->state is NULL!\n"); 3022 continue; 3023 } 3024 3025 state = plane->state; 3026 3027 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n", 3028 plane->base.id, 3029 plane_type(intel_plane->base.type), 3030 state->crtc_x, state->crtc_y, 3031 state->crtc_w, state->crtc_h, 3032 (state->src_x >> 16), 3033 ((state->src_x & 0xffff) * 15625) >> 10, 3034 (state->src_y >> 16), 3035 ((state->src_y & 0xffff) * 15625) >> 10, 3036 (state->src_w >> 16), 3037 ((state->src_w & 0xffff) * 15625) >> 10, 3038 (state->src_h >> 16), 3039 ((state->src_h & 0xffff) * 15625) >> 10, 3040 state->fb ? drm_get_format_name(state->fb->pixel_format) : "N/A", 3041 plane_rotation(state->rotation)); 3042 } 3043 } 3044 3045 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc) 3046 { 3047 struct intel_crtc_state *pipe_config; 3048 int num_scalers = intel_crtc->num_scalers; 3049 int i; 3050 3051 pipe_config = to_intel_crtc_state(intel_crtc->base.state); 3052 3053 /* Not all platformas have a scaler */ 3054 if (num_scalers) { 3055 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d", 3056 num_scalers, 3057 pipe_config->scaler_state.scaler_users, 3058 pipe_config->scaler_state.scaler_id); 3059 3060 for (i = 0; i < SKL_NUM_SCALERS; i++) { 3061 struct intel_scaler *sc = 3062 &pipe_config->scaler_state.scalers[i]; 3063 3064 seq_printf(m, ", scalers[%d]: use=%s, mode=%x", 3065 i, yesno(sc->in_use), sc->mode); 3066 } 3067 seq_puts(m, "\n"); 3068 } else { 3069 seq_puts(m, "\tNo scalers available on this platform\n"); 3070 } 3071 } 3072 3073 static int i915_display_info(struct seq_file *m, void *unused) 3074 { 3075 struct drm_info_node *node = m->private; 3076 struct drm_device *dev = node->minor->dev; 3077 struct drm_i915_private *dev_priv = dev->dev_private; 3078 struct intel_crtc *crtc; 3079 struct drm_connector *connector; 3080 3081 intel_runtime_pm_get(dev_priv); 3082 drm_modeset_lock_all(dev); 3083 seq_printf(m, "CRTC info\n"); 3084 seq_printf(m, "---------\n"); 3085 for_each_intel_crtc(dev, crtc) { 3086 bool active; 3087 struct intel_crtc_state *pipe_config; 3088 int x, y; 3089 3090 pipe_config = to_intel_crtc_state(crtc->base.state); 3091 3092 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n", 3093 crtc->base.base.id, pipe_name(crtc->pipe), 3094 yesno(pipe_config->base.active), 3095 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 3096 yesno(pipe_config->dither), pipe_config->pipe_bpp); 3097 3098 if (pipe_config->base.active) { 3099 intel_crtc_info(m, crtc); 3100 3101 active = cursor_position(dev, crtc->pipe, &x, &y); 3102 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n", 3103 yesno(crtc->cursor_base), 3104 x, y, crtc->base.cursor->state->crtc_w, 3105 crtc->base.cursor->state->crtc_h, 3106 crtc->cursor_addr, yesno(active)); 3107 intel_scaler_info(m, crtc); 3108 intel_plane_info(m, crtc); 3109 } 3110 3111 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", 3112 yesno(!crtc->cpu_fifo_underrun_disabled), 3113 yesno(!crtc->pch_fifo_underrun_disabled)); 3114 } 3115 3116 seq_printf(m, "\n"); 3117 seq_printf(m, "Connector info\n"); 3118 seq_printf(m, "--------------\n"); 3119 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 3120 intel_connector_info(m, connector); 3121 } 3122 drm_modeset_unlock_all(dev); 3123 intel_runtime_pm_put(dev_priv); 3124 3125 return 0; 3126 } 3127 3128 static int i915_semaphore_status(struct seq_file *m, void *unused) 3129 { 3130 struct drm_info_node *node = (struct drm_info_node *) m->private; 3131 struct drm_device *dev = node->minor->dev; 3132 struct drm_i915_private *dev_priv = dev->dev_private; 3133 struct intel_engine_cs *ring; 3134 int num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 3135 int i, j, ret; 3136 3137 if (!i915_semaphore_is_enabled(dev)) { 3138 seq_puts(m, "Semaphores are disabled\n"); 3139 return 0; 3140 } 3141 3142 ret = mutex_lock_interruptible(&dev->struct_mutex); 3143 if (ret) 3144 return ret; 3145 intel_runtime_pm_get(dev_priv); 3146 3147 if (IS_BROADWELL(dev)) { 3148 struct page *page; 3149 uint64_t *seqno; 3150 3151 page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0); 3152 3153 seqno = (uint64_t *)kmap_atomic(page); 3154 for_each_ring(ring, dev_priv, i) { 3155 uint64_t offset; 3156 3157 seq_printf(m, "%s\n", ring->name); 3158 3159 seq_puts(m, " Last signal:"); 3160 for (j = 0; j < num_rings; j++) { 3161 offset = i * I915_NUM_RINGS + j; 3162 seq_printf(m, "0x%08llx (0x%02llx) ", 3163 seqno[offset], offset * 8); 3164 } 3165 seq_putc(m, '\n'); 3166 3167 seq_puts(m, " Last wait: "); 3168 for (j = 0; j < num_rings; j++) { 3169 offset = i + (j * I915_NUM_RINGS); 3170 seq_printf(m, "0x%08llx (0x%02llx) ", 3171 seqno[offset], offset * 8); 3172 } 3173 seq_putc(m, '\n'); 3174 3175 } 3176 kunmap_atomic(seqno); 3177 } else { 3178 seq_puts(m, " Last signal:"); 3179 for_each_ring(ring, dev_priv, i) 3180 for (j = 0; j < num_rings; j++) 3181 seq_printf(m, "0x%08x\n", 3182 I915_READ(ring->semaphore.mbox.signal[j])); 3183 seq_putc(m, '\n'); 3184 } 3185 3186 seq_puts(m, "\nSync seqno:\n"); 3187 for_each_ring(ring, dev_priv, i) { 3188 for (j = 0; j < num_rings; j++) { 3189 seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]); 3190 } 3191 seq_putc(m, '\n'); 3192 } 3193 seq_putc(m, '\n'); 3194 3195 intel_runtime_pm_put(dev_priv); 3196 mutex_unlock(&dev->struct_mutex); 3197 return 0; 3198 } 3199 3200 static int i915_shared_dplls_info(struct seq_file *m, void *unused) 3201 { 3202 struct drm_info_node *node = (struct drm_info_node *) m->private; 3203 struct drm_device *dev = node->minor->dev; 3204 struct drm_i915_private *dev_priv = dev->dev_private; 3205 int i; 3206 3207 drm_modeset_lock_all(dev); 3208 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 3209 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 3210 3211 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); 3212 seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n", 3213 pll->config.crtc_mask, pll->active, yesno(pll->on)); 3214 seq_printf(m, " tracked hardware state:\n"); 3215 seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll); 3216 seq_printf(m, " dpll_md: 0x%08x\n", 3217 pll->config.hw_state.dpll_md); 3218 seq_printf(m, " fp0: 0x%08x\n", pll->config.hw_state.fp0); 3219 seq_printf(m, " fp1: 0x%08x\n", pll->config.hw_state.fp1); 3220 seq_printf(m, " wrpll: 0x%08x\n", pll->config.hw_state.wrpll); 3221 } 3222 drm_modeset_unlock_all(dev); 3223 3224 return 0; 3225 } 3226 3227 static int i915_wa_registers(struct seq_file *m, void *unused) 3228 { 3229 int i; 3230 int ret; 3231 struct intel_engine_cs *ring; 3232 struct drm_info_node *node = (struct drm_info_node *) m->private; 3233 struct drm_device *dev = node->minor->dev; 3234 struct drm_i915_private *dev_priv = dev->dev_private; 3235 struct i915_workarounds *workarounds = &dev_priv->workarounds; 3236 3237 ret = mutex_lock_interruptible(&dev->struct_mutex); 3238 if (ret) 3239 return ret; 3240 3241 intel_runtime_pm_get(dev_priv); 3242 3243 seq_printf(m, "Workarounds applied: %d\n", workarounds->count); 3244 for_each_ring(ring, dev_priv, i) 3245 seq_printf(m, "HW whitelist count for %s: %d\n", 3246 ring->name, workarounds->hw_whitelist_count[i]); 3247 for (i = 0; i < workarounds->count; ++i) { 3248 i915_reg_t addr; 3249 u32 mask, value, read; 3250 bool ok; 3251 3252 addr = workarounds->reg[i].addr; 3253 mask = workarounds->reg[i].mask; 3254 value = workarounds->reg[i].value; 3255 read = I915_READ(addr); 3256 ok = (value & mask) == (read & mask); 3257 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", 3258 i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL"); 3259 } 3260 3261 intel_runtime_pm_put(dev_priv); 3262 mutex_unlock(&dev->struct_mutex); 3263 3264 return 0; 3265 } 3266 3267 static int i915_ddb_info(struct seq_file *m, void *unused) 3268 { 3269 struct drm_info_node *node = m->private; 3270 struct drm_device *dev = node->minor->dev; 3271 struct drm_i915_private *dev_priv = dev->dev_private; 3272 struct skl_ddb_allocation *ddb; 3273 struct skl_ddb_entry *entry; 3274 enum pipe pipe; 3275 int plane; 3276 3277 if (INTEL_INFO(dev)->gen < 9) 3278 return 0; 3279 3280 drm_modeset_lock_all(dev); 3281 3282 ddb = &dev_priv->wm.skl_hw.ddb; 3283 3284 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); 3285 3286 for_each_pipe(dev_priv, pipe) { 3287 seq_printf(m, "Pipe %c\n", pipe_name(pipe)); 3288 3289 for_each_plane(dev_priv, pipe, plane) { 3290 entry = &ddb->plane[pipe][plane]; 3291 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1, 3292 entry->start, entry->end, 3293 skl_ddb_entry_size(entry)); 3294 } 3295 3296 entry = &ddb->plane[pipe][PLANE_CURSOR]; 3297 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, 3298 entry->end, skl_ddb_entry_size(entry)); 3299 } 3300 3301 drm_modeset_unlock_all(dev); 3302 3303 return 0; 3304 } 3305 3306 static void drrs_status_per_crtc(struct seq_file *m, 3307 struct drm_device *dev, struct intel_crtc *intel_crtc) 3308 { 3309 struct intel_encoder *intel_encoder; 3310 struct drm_i915_private *dev_priv = dev->dev_private; 3311 struct i915_drrs *drrs = &dev_priv->drrs; 3312 int vrefresh = 0; 3313 3314 for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) { 3315 /* Encoder connected on this CRTC */ 3316 switch (intel_encoder->type) { 3317 case INTEL_OUTPUT_EDP: 3318 seq_puts(m, "eDP:\n"); 3319 break; 3320 case INTEL_OUTPUT_DSI: 3321 seq_puts(m, "DSI:\n"); 3322 break; 3323 case INTEL_OUTPUT_HDMI: 3324 seq_puts(m, "HDMI:\n"); 3325 break; 3326 case INTEL_OUTPUT_DISPLAYPORT: 3327 seq_puts(m, "DP:\n"); 3328 break; 3329 default: 3330 seq_printf(m, "Other encoder (id=%d).\n", 3331 intel_encoder->type); 3332 return; 3333 } 3334 } 3335 3336 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT) 3337 seq_puts(m, "\tVBT: DRRS_type: Static"); 3338 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT) 3339 seq_puts(m, "\tVBT: DRRS_type: Seamless"); 3340 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED) 3341 seq_puts(m, "\tVBT: DRRS_type: None"); 3342 else 3343 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value"); 3344 3345 seq_puts(m, "\n\n"); 3346 3347 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) { 3348 struct intel_panel *panel; 3349 3350 mutex_lock(&drrs->mutex); 3351 /* DRRS Supported */ 3352 seq_puts(m, "\tDRRS Supported: Yes\n"); 3353 3354 /* disable_drrs() will make drrs->dp NULL */ 3355 if (!drrs->dp) { 3356 seq_puts(m, "Idleness DRRS: Disabled"); 3357 mutex_unlock(&drrs->mutex); 3358 return; 3359 } 3360 3361 panel = &drrs->dp->attached_connector->panel; 3362 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X", 3363 drrs->busy_frontbuffer_bits); 3364 3365 seq_puts(m, "\n\t\t"); 3366 if (drrs->refresh_rate_type == DRRS_HIGH_RR) { 3367 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); 3368 vrefresh = panel->fixed_mode->vrefresh; 3369 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) { 3370 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); 3371 vrefresh = panel->downclock_mode->vrefresh; 3372 } else { 3373 seq_printf(m, "DRRS_State: Unknown(%d)\n", 3374 drrs->refresh_rate_type); 3375 mutex_unlock(&drrs->mutex); 3376 return; 3377 } 3378 seq_printf(m, "\t\tVrefresh: %d", vrefresh); 3379 3380 seq_puts(m, "\n\t\t"); 3381 mutex_unlock(&drrs->mutex); 3382 } else { 3383 /* DRRS not supported. Print the VBT parameter*/ 3384 seq_puts(m, "\tDRRS Supported : No"); 3385 } 3386 seq_puts(m, "\n"); 3387 } 3388 3389 static int i915_drrs_status(struct seq_file *m, void *unused) 3390 { 3391 struct drm_info_node *node = m->private; 3392 struct drm_device *dev = node->minor->dev; 3393 struct intel_crtc *intel_crtc; 3394 int active_crtc_cnt = 0; 3395 3396 for_each_intel_crtc(dev, intel_crtc) { 3397 drm_modeset_lock(&intel_crtc->base.mutex, NULL); 3398 3399 if (intel_crtc->base.state->active) { 3400 active_crtc_cnt++; 3401 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); 3402 3403 drrs_status_per_crtc(m, dev, intel_crtc); 3404 } 3405 3406 drm_modeset_unlock(&intel_crtc->base.mutex); 3407 } 3408 3409 if (!active_crtc_cnt) 3410 seq_puts(m, "No active crtc found\n"); 3411 3412 return 0; 3413 } 3414 3415 struct pipe_crc_info { 3416 const char *name; 3417 struct drm_device *dev; 3418 enum pipe pipe; 3419 }; 3420 3421 static int i915_dp_mst_info(struct seq_file *m, void *unused) 3422 { 3423 struct drm_info_node *node = (struct drm_info_node *) m->private; 3424 struct drm_device *dev = node->minor->dev; 3425 struct drm_encoder *encoder; 3426 struct intel_encoder *intel_encoder; 3427 struct intel_digital_port *intel_dig_port; 3428 drm_modeset_lock_all(dev); 3429 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 3430 intel_encoder = to_intel_encoder(encoder); 3431 if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT) 3432 continue; 3433 intel_dig_port = enc_to_dig_port(encoder); 3434 if (!intel_dig_port->dp.can_mst) 3435 continue; 3436 3437 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); 3438 } 3439 drm_modeset_unlock_all(dev); 3440 return 0; 3441 } 3442 3443 static int i915_pipe_crc_open(struct inode *inode, struct file *filep) 3444 { 3445 struct pipe_crc_info *info = inode->i_private; 3446 struct drm_i915_private *dev_priv = info->dev->dev_private; 3447 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 3448 3449 if (info->pipe >= INTEL_INFO(info->dev)->num_pipes) 3450 return -ENODEV; 3451 3452 spin_lock_irq(&pipe_crc->lock); 3453 3454 if (pipe_crc->opened) { 3455 spin_unlock_irq(&pipe_crc->lock); 3456 return -EBUSY; /* already open */ 3457 } 3458 3459 pipe_crc->opened = true; 3460 filep->private_data = inode->i_private; 3461 3462 spin_unlock_irq(&pipe_crc->lock); 3463 3464 return 0; 3465 } 3466 3467 static int i915_pipe_crc_release(struct inode *inode, struct file *filep) 3468 { 3469 struct pipe_crc_info *info = inode->i_private; 3470 struct drm_i915_private *dev_priv = info->dev->dev_private; 3471 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 3472 3473 spin_lock_irq(&pipe_crc->lock); 3474 pipe_crc->opened = false; 3475 spin_unlock_irq(&pipe_crc->lock); 3476 3477 return 0; 3478 } 3479 3480 /* (6 fields, 8 chars each, space separated (5) + '\n') */ 3481 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1) 3482 /* account for \'0' */ 3483 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1) 3484 3485 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) 3486 { 3487 assert_spin_locked(&pipe_crc->lock); 3488 return CIRC_CNT(pipe_crc->head, pipe_crc->tail, 3489 INTEL_PIPE_CRC_ENTRIES_NR); 3490 } 3491 3492 static ssize_t 3493 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, 3494 loff_t *pos) 3495 { 3496 struct pipe_crc_info *info = filep->private_data; 3497 struct drm_device *dev = info->dev; 3498 struct drm_i915_private *dev_priv = dev->dev_private; 3499 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 3500 char buf[PIPE_CRC_BUFFER_LEN]; 3501 int n_entries; 3502 ssize_t bytes_read; 3503 3504 /* 3505 * Don't allow user space to provide buffers not big enough to hold 3506 * a line of data. 3507 */ 3508 if (count < PIPE_CRC_LINE_LEN) 3509 return -EINVAL; 3510 3511 if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE) 3512 return 0; 3513 3514 /* nothing to read */ 3515 spin_lock_irq(&pipe_crc->lock); 3516 while (pipe_crc_data_count(pipe_crc) == 0) { 3517 int ret; 3518 3519 if (filep->f_flags & O_NONBLOCK) { 3520 spin_unlock_irq(&pipe_crc->lock); 3521 return -EAGAIN; 3522 } 3523 3524 ret = wait_event_interruptible_lock_irq(pipe_crc->wq, 3525 pipe_crc_data_count(pipe_crc), pipe_crc->lock); 3526 if (ret) { 3527 spin_unlock_irq(&pipe_crc->lock); 3528 return ret; 3529 } 3530 } 3531 3532 /* We now have one or more entries to read */ 3533 n_entries = count / PIPE_CRC_LINE_LEN; 3534 3535 bytes_read = 0; 3536 while (n_entries > 0) { 3537 struct intel_pipe_crc_entry *entry = 3538 &pipe_crc->entries[pipe_crc->tail]; 3539 int ret; 3540 3541 if (CIRC_CNT(pipe_crc->head, pipe_crc->tail, 3542 INTEL_PIPE_CRC_ENTRIES_NR) < 1) 3543 break; 3544 3545 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); 3546 pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 3547 3548 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, 3549 "%8u %8x %8x %8x %8x %8x\n", 3550 entry->frame, entry->crc[0], 3551 entry->crc[1], entry->crc[2], 3552 entry->crc[3], entry->crc[4]); 3553 3554 spin_unlock_irq(&pipe_crc->lock); 3555 3556 ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN); 3557 if (ret == PIPE_CRC_LINE_LEN) 3558 return -EFAULT; 3559 3560 user_buf += PIPE_CRC_LINE_LEN; 3561 n_entries--; 3562 3563 spin_lock_irq(&pipe_crc->lock); 3564 } 3565 3566 spin_unlock_irq(&pipe_crc->lock); 3567 3568 return bytes_read; 3569 } 3570 3571 static const struct file_operations i915_pipe_crc_fops = { 3572 .owner = THIS_MODULE, 3573 .open = i915_pipe_crc_open, 3574 .read = i915_pipe_crc_read, 3575 .release = i915_pipe_crc_release, 3576 }; 3577 3578 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = { 3579 { 3580 .name = "i915_pipe_A_crc", 3581 .pipe = PIPE_A, 3582 }, 3583 { 3584 .name = "i915_pipe_B_crc", 3585 .pipe = PIPE_B, 3586 }, 3587 { 3588 .name = "i915_pipe_C_crc", 3589 .pipe = PIPE_C, 3590 }, 3591 }; 3592 3593 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor, 3594 enum pipe pipe) 3595 { 3596 struct drm_device *dev = minor->dev; 3597 struct dentry *ent; 3598 struct pipe_crc_info *info = &i915_pipe_crc_data[pipe]; 3599 3600 info->dev = dev; 3601 ent = debugfs_create_file(info->name, S_IRUGO, root, info, 3602 &i915_pipe_crc_fops); 3603 if (!ent) 3604 return -ENOMEM; 3605 3606 return drm_add_fake_info_node(minor, ent, info); 3607 } 3608 3609 static const char * const pipe_crc_sources[] = { 3610 "none", 3611 "plane1", 3612 "plane2", 3613 "pf", 3614 "pipe", 3615 "TV", 3616 "DP-B", 3617 "DP-C", 3618 "DP-D", 3619 "auto", 3620 }; 3621 3622 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) 3623 { 3624 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX); 3625 return pipe_crc_sources[source]; 3626 } 3627 3628 static int display_crc_ctl_show(struct seq_file *m, void *data) 3629 { 3630 struct drm_device *dev = m->private; 3631 struct drm_i915_private *dev_priv = dev->dev_private; 3632 int i; 3633 3634 for (i = 0; i < I915_MAX_PIPES; i++) 3635 seq_printf(m, "%c %s\n", pipe_name(i), 3636 pipe_crc_source_name(dev_priv->pipe_crc[i].source)); 3637 3638 return 0; 3639 } 3640 3641 static int display_crc_ctl_open(struct inode *inode, struct file *file) 3642 { 3643 struct drm_device *dev = inode->i_private; 3644 3645 return single_open(file, display_crc_ctl_show, dev); 3646 } 3647 3648 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 3649 uint32_t *val) 3650 { 3651 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 3652 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 3653 3654 switch (*source) { 3655 case INTEL_PIPE_CRC_SOURCE_PIPE: 3656 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX; 3657 break; 3658 case INTEL_PIPE_CRC_SOURCE_NONE: 3659 *val = 0; 3660 break; 3661 default: 3662 return -EINVAL; 3663 } 3664 3665 return 0; 3666 } 3667 3668 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe, 3669 enum intel_pipe_crc_source *source) 3670 { 3671 struct intel_encoder *encoder; 3672 struct intel_crtc *crtc; 3673 struct intel_digital_port *dig_port; 3674 int ret = 0; 3675 3676 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 3677 3678 drm_modeset_lock_all(dev); 3679 for_each_intel_encoder(dev, encoder) { 3680 if (!encoder->base.crtc) 3681 continue; 3682 3683 crtc = to_intel_crtc(encoder->base.crtc); 3684 3685 if (crtc->pipe != pipe) 3686 continue; 3687 3688 switch (encoder->type) { 3689 case INTEL_OUTPUT_TVOUT: 3690 *source = INTEL_PIPE_CRC_SOURCE_TV; 3691 break; 3692 case INTEL_OUTPUT_DISPLAYPORT: 3693 case INTEL_OUTPUT_EDP: 3694 dig_port = enc_to_dig_port(&encoder->base); 3695 switch (dig_port->port) { 3696 case PORT_B: 3697 *source = INTEL_PIPE_CRC_SOURCE_DP_B; 3698 break; 3699 case PORT_C: 3700 *source = INTEL_PIPE_CRC_SOURCE_DP_C; 3701 break; 3702 case PORT_D: 3703 *source = INTEL_PIPE_CRC_SOURCE_DP_D; 3704 break; 3705 default: 3706 WARN(1, "nonexisting DP port %c\n", 3707 port_name(dig_port->port)); 3708 break; 3709 } 3710 break; 3711 default: 3712 break; 3713 } 3714 } 3715 drm_modeset_unlock_all(dev); 3716 3717 return ret; 3718 } 3719 3720 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev, 3721 enum pipe pipe, 3722 enum intel_pipe_crc_source *source, 3723 uint32_t *val) 3724 { 3725 struct drm_i915_private *dev_priv = dev->dev_private; 3726 bool need_stable_symbols = false; 3727 3728 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 3729 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 3730 if (ret) 3731 return ret; 3732 } 3733 3734 switch (*source) { 3735 case INTEL_PIPE_CRC_SOURCE_PIPE: 3736 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV; 3737 break; 3738 case INTEL_PIPE_CRC_SOURCE_DP_B: 3739 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV; 3740 need_stable_symbols = true; 3741 break; 3742 case INTEL_PIPE_CRC_SOURCE_DP_C: 3743 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV; 3744 need_stable_symbols = true; 3745 break; 3746 case INTEL_PIPE_CRC_SOURCE_DP_D: 3747 if (!IS_CHERRYVIEW(dev)) 3748 return -EINVAL; 3749 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV; 3750 need_stable_symbols = true; 3751 break; 3752 case INTEL_PIPE_CRC_SOURCE_NONE: 3753 *val = 0; 3754 break; 3755 default: 3756 return -EINVAL; 3757 } 3758 3759 /* 3760 * When the pipe CRC tap point is after the transcoders we need 3761 * to tweak symbol-level features to produce a deterministic series of 3762 * symbols for a given frame. We need to reset those features only once 3763 * a frame (instead of every nth symbol): 3764 * - DC-balance: used to ensure a better clock recovery from the data 3765 * link (SDVO) 3766 * - DisplayPort scrambling: used for EMI reduction 3767 */ 3768 if (need_stable_symbols) { 3769 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3770 3771 tmp |= DC_BALANCE_RESET_VLV; 3772 switch (pipe) { 3773 case PIPE_A: 3774 tmp |= PIPE_A_SCRAMBLE_RESET; 3775 break; 3776 case PIPE_B: 3777 tmp |= PIPE_B_SCRAMBLE_RESET; 3778 break; 3779 case PIPE_C: 3780 tmp |= PIPE_C_SCRAMBLE_RESET; 3781 break; 3782 default: 3783 return -EINVAL; 3784 } 3785 I915_WRITE(PORT_DFT2_G4X, tmp); 3786 } 3787 3788 return 0; 3789 } 3790 3791 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev, 3792 enum pipe pipe, 3793 enum intel_pipe_crc_source *source, 3794 uint32_t *val) 3795 { 3796 struct drm_i915_private *dev_priv = dev->dev_private; 3797 bool need_stable_symbols = false; 3798 3799 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 3800 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 3801 if (ret) 3802 return ret; 3803 } 3804 3805 switch (*source) { 3806 case INTEL_PIPE_CRC_SOURCE_PIPE: 3807 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX; 3808 break; 3809 case INTEL_PIPE_CRC_SOURCE_TV: 3810 if (!SUPPORTS_TV(dev)) 3811 return -EINVAL; 3812 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE; 3813 break; 3814 case INTEL_PIPE_CRC_SOURCE_DP_B: 3815 if (!IS_G4X(dev)) 3816 return -EINVAL; 3817 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X; 3818 need_stable_symbols = true; 3819 break; 3820 case INTEL_PIPE_CRC_SOURCE_DP_C: 3821 if (!IS_G4X(dev)) 3822 return -EINVAL; 3823 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X; 3824 need_stable_symbols = true; 3825 break; 3826 case INTEL_PIPE_CRC_SOURCE_DP_D: 3827 if (!IS_G4X(dev)) 3828 return -EINVAL; 3829 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X; 3830 need_stable_symbols = true; 3831 break; 3832 case INTEL_PIPE_CRC_SOURCE_NONE: 3833 *val = 0; 3834 break; 3835 default: 3836 return -EINVAL; 3837 } 3838 3839 /* 3840 * When the pipe CRC tap point is after the transcoders we need 3841 * to tweak symbol-level features to produce a deterministic series of 3842 * symbols for a given frame. We need to reset those features only once 3843 * a frame (instead of every nth symbol): 3844 * - DC-balance: used to ensure a better clock recovery from the data 3845 * link (SDVO) 3846 * - DisplayPort scrambling: used for EMI reduction 3847 */ 3848 if (need_stable_symbols) { 3849 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3850 3851 WARN_ON(!IS_G4X(dev)); 3852 3853 I915_WRITE(PORT_DFT_I9XX, 3854 I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET); 3855 3856 if (pipe == PIPE_A) 3857 tmp |= PIPE_A_SCRAMBLE_RESET; 3858 else 3859 tmp |= PIPE_B_SCRAMBLE_RESET; 3860 3861 I915_WRITE(PORT_DFT2_G4X, tmp); 3862 } 3863 3864 return 0; 3865 } 3866 3867 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev, 3868 enum pipe pipe) 3869 { 3870 struct drm_i915_private *dev_priv = dev->dev_private; 3871 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3872 3873 switch (pipe) { 3874 case PIPE_A: 3875 tmp &= ~PIPE_A_SCRAMBLE_RESET; 3876 break; 3877 case PIPE_B: 3878 tmp &= ~PIPE_B_SCRAMBLE_RESET; 3879 break; 3880 case PIPE_C: 3881 tmp &= ~PIPE_C_SCRAMBLE_RESET; 3882 break; 3883 default: 3884 return; 3885 } 3886 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) 3887 tmp &= ~DC_BALANCE_RESET_VLV; 3888 I915_WRITE(PORT_DFT2_G4X, tmp); 3889 3890 } 3891 3892 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev, 3893 enum pipe pipe) 3894 { 3895 struct drm_i915_private *dev_priv = dev->dev_private; 3896 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3897 3898 if (pipe == PIPE_A) 3899 tmp &= ~PIPE_A_SCRAMBLE_RESET; 3900 else 3901 tmp &= ~PIPE_B_SCRAMBLE_RESET; 3902 I915_WRITE(PORT_DFT2_G4X, tmp); 3903 3904 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) { 3905 I915_WRITE(PORT_DFT_I9XX, 3906 I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET); 3907 } 3908 } 3909 3910 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 3911 uint32_t *val) 3912 { 3913 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 3914 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 3915 3916 switch (*source) { 3917 case INTEL_PIPE_CRC_SOURCE_PLANE1: 3918 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK; 3919 break; 3920 case INTEL_PIPE_CRC_SOURCE_PLANE2: 3921 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK; 3922 break; 3923 case INTEL_PIPE_CRC_SOURCE_PIPE: 3924 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK; 3925 break; 3926 case INTEL_PIPE_CRC_SOURCE_NONE: 3927 *val = 0; 3928 break; 3929 default: 3930 return -EINVAL; 3931 } 3932 3933 return 0; 3934 } 3935 3936 static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable) 3937 { 3938 struct drm_i915_private *dev_priv = dev->dev_private; 3939 struct intel_crtc *crtc = 3940 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); 3941 struct intel_crtc_state *pipe_config; 3942 struct drm_atomic_state *state; 3943 int ret = 0; 3944 3945 drm_modeset_lock_all(dev); 3946 state = drm_atomic_state_alloc(dev); 3947 if (!state) { 3948 ret = -ENOMEM; 3949 goto out; 3950 } 3951 3952 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base); 3953 pipe_config = intel_atomic_get_crtc_state(state, crtc); 3954 if (IS_ERR(pipe_config)) { 3955 ret = PTR_ERR(pipe_config); 3956 goto out; 3957 } 3958 3959 pipe_config->pch_pfit.force_thru = enable; 3960 if (pipe_config->cpu_transcoder == TRANSCODER_EDP && 3961 pipe_config->pch_pfit.enabled != enable) 3962 pipe_config->base.connectors_changed = true; 3963 3964 ret = drm_atomic_commit(state); 3965 out: 3966 drm_modeset_unlock_all(dev); 3967 WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret); 3968 if (ret) 3969 drm_atomic_state_free(state); 3970 } 3971 3972 static int ivb_pipe_crc_ctl_reg(struct drm_device *dev, 3973 enum pipe pipe, 3974 enum intel_pipe_crc_source *source, 3975 uint32_t *val) 3976 { 3977 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 3978 *source = INTEL_PIPE_CRC_SOURCE_PF; 3979 3980 switch (*source) { 3981 case INTEL_PIPE_CRC_SOURCE_PLANE1: 3982 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB; 3983 break; 3984 case INTEL_PIPE_CRC_SOURCE_PLANE2: 3985 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; 3986 break; 3987 case INTEL_PIPE_CRC_SOURCE_PF: 3988 if (IS_HASWELL(dev) && pipe == PIPE_A) 3989 hsw_trans_edp_pipe_A_crc_wa(dev, true); 3990 3991 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; 3992 break; 3993 case INTEL_PIPE_CRC_SOURCE_NONE: 3994 *val = 0; 3995 break; 3996 default: 3997 return -EINVAL; 3998 } 3999 4000 return 0; 4001 } 4002 4003 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, 4004 enum intel_pipe_crc_source source) 4005 { 4006 struct drm_i915_private *dev_priv = dev->dev_private; 4007 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 4008 struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, 4009 pipe)); 4010 enum intel_display_power_domain power_domain; 4011 u32 val = 0; /* shut up gcc */ 4012 int ret; 4013 4014 if (pipe_crc->source == source) 4015 return 0; 4016 4017 /* forbid changing the source without going back to 'none' */ 4018 if (pipe_crc->source && source) 4019 return -EINVAL; 4020 4021 power_domain = POWER_DOMAIN_PIPE(pipe); 4022 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) { 4023 DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); 4024 return -EIO; 4025 } 4026 4027 if (IS_GEN2(dev)) 4028 ret = i8xx_pipe_crc_ctl_reg(&source, &val); 4029 else if (INTEL_INFO(dev)->gen < 5) 4030 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val); 4031 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 4032 ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val); 4033 else if (IS_GEN5(dev) || IS_GEN6(dev)) 4034 ret = ilk_pipe_crc_ctl_reg(&source, &val); 4035 else 4036 ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val); 4037 4038 if (ret != 0) 4039 goto out; 4040 4041 /* none -> real source transition */ 4042 if (source) { 4043 struct intel_pipe_crc_entry *entries; 4044 4045 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", 4046 pipe_name(pipe), pipe_crc_source_name(source)); 4047 4048 entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR, 4049 sizeof(pipe_crc->entries[0]), 4050 GFP_KERNEL); 4051 if (!entries) { 4052 ret = -ENOMEM; 4053 goto out; 4054 } 4055 4056 /* 4057 * When IPS gets enabled, the pipe CRC changes. Since IPS gets 4058 * enabled and disabled dynamically based on package C states, 4059 * user space can't make reliable use of the CRCs, so let's just 4060 * completely disable it. 4061 */ 4062 hsw_disable_ips(crtc); 4063 4064 spin_lock_irq(&pipe_crc->lock); 4065 kfree(pipe_crc->entries); 4066 pipe_crc->entries = entries; 4067 pipe_crc->head = 0; 4068 pipe_crc->tail = 0; 4069 spin_unlock_irq(&pipe_crc->lock); 4070 } 4071 4072 pipe_crc->source = source; 4073 4074 I915_WRITE(PIPE_CRC_CTL(pipe), val); 4075 POSTING_READ(PIPE_CRC_CTL(pipe)); 4076 4077 /* real source -> none transition */ 4078 if (source == INTEL_PIPE_CRC_SOURCE_NONE) { 4079 struct intel_pipe_crc_entry *entries; 4080 struct intel_crtc *crtc = 4081 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 4082 4083 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", 4084 pipe_name(pipe)); 4085 4086 drm_modeset_lock(&crtc->base.mutex, NULL); 4087 if (crtc->base.state->active) 4088 intel_wait_for_vblank(dev, pipe); 4089 drm_modeset_unlock(&crtc->base.mutex); 4090 4091 spin_lock_irq(&pipe_crc->lock); 4092 entries = pipe_crc->entries; 4093 pipe_crc->entries = NULL; 4094 pipe_crc->head = 0; 4095 pipe_crc->tail = 0; 4096 spin_unlock_irq(&pipe_crc->lock); 4097 4098 kfree(entries); 4099 4100 if (IS_G4X(dev)) 4101 g4x_undo_pipe_scramble_reset(dev, pipe); 4102 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 4103 vlv_undo_pipe_scramble_reset(dev, pipe); 4104 else if (IS_HASWELL(dev) && pipe == PIPE_A) 4105 hsw_trans_edp_pipe_A_crc_wa(dev, false); 4106 4107 hsw_enable_ips(crtc); 4108 } 4109 4110 ret = 0; 4111 4112 out: 4113 intel_display_power_put(dev_priv, power_domain); 4114 4115 return ret; 4116 } 4117 4118 /* 4119 * Parse pipe CRC command strings: 4120 * command: wsp* object wsp+ name wsp+ source wsp* 4121 * object: 'pipe' 4122 * name: (A | B | C) 4123 * source: (none | plane1 | plane2 | pf) 4124 * wsp: (#0x20 | #0x9 | #0xA)+ 4125 * 4126 * eg.: 4127 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A 4128 * "pipe A none" -> Stop CRC 4129 */ 4130 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words) 4131 { 4132 int n_words = 0; 4133 4134 while (*buf) { 4135 char *end; 4136 4137 /* skip leading white space */ 4138 buf = skip_spaces(buf); 4139 if (!*buf) 4140 break; /* end of buffer */ 4141 4142 /* find end of word */ 4143 for (end = buf; *end && !isspace(*end); end++) 4144 ; 4145 4146 if (n_words == max_words) { 4147 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n", 4148 max_words); 4149 return -EINVAL; /* ran out of words[] before bytes */ 4150 } 4151 4152 if (*end) 4153 *end++ = '\0'; 4154 words[n_words++] = buf; 4155 buf = end; 4156 } 4157 4158 return n_words; 4159 } 4160 4161 enum intel_pipe_crc_object { 4162 PIPE_CRC_OBJECT_PIPE, 4163 }; 4164 4165 static const char * const pipe_crc_objects[] = { 4166 "pipe", 4167 }; 4168 4169 static int 4170 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o) 4171 { 4172 int i; 4173 4174 for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++) 4175 if (!strcmp(buf, pipe_crc_objects[i])) { 4176 *o = i; 4177 return 0; 4178 } 4179 4180 return -EINVAL; 4181 } 4182 4183 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe) 4184 { 4185 const char name = buf[0]; 4186 4187 if (name < 'A' || name >= pipe_name(I915_MAX_PIPES)) 4188 return -EINVAL; 4189 4190 *pipe = name - 'A'; 4191 4192 return 0; 4193 } 4194 4195 static int 4196 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) 4197 { 4198 int i; 4199 4200 for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++) 4201 if (!strcmp(buf, pipe_crc_sources[i])) { 4202 *s = i; 4203 return 0; 4204 } 4205 4206 return -EINVAL; 4207 } 4208 4209 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len) 4210 { 4211 #define N_WORDS 3 4212 int n_words; 4213 char *words[N_WORDS]; 4214 enum pipe pipe; 4215 enum intel_pipe_crc_object object; 4216 enum intel_pipe_crc_source source; 4217 4218 n_words = display_crc_ctl_tokenize(buf, words, N_WORDS); 4219 if (n_words != N_WORDS) { 4220 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n", 4221 N_WORDS); 4222 return -EINVAL; 4223 } 4224 4225 if (display_crc_ctl_parse_object(words[0], &object) < 0) { 4226 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]); 4227 return -EINVAL; 4228 } 4229 4230 if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) { 4231 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]); 4232 return -EINVAL; 4233 } 4234 4235 if (display_crc_ctl_parse_source(words[2], &source) < 0) { 4236 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]); 4237 return -EINVAL; 4238 } 4239 4240 return pipe_crc_set_source(dev, pipe, source); 4241 } 4242 4243 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf, 4244 size_t len, loff_t *offp) 4245 { 4246 struct seq_file *m = file->private_data; 4247 struct drm_device *dev = m->private; 4248 char *tmpbuf; 4249 int ret; 4250 4251 if (len == 0) 4252 return 0; 4253 4254 if (len > PAGE_SIZE - 1) { 4255 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n", 4256 PAGE_SIZE); 4257 return -E2BIG; 4258 } 4259 4260 tmpbuf = kmalloc(len + 1, GFP_KERNEL); 4261 if (!tmpbuf) 4262 return -ENOMEM; 4263 4264 if (copy_from_user(tmpbuf, ubuf, len)) { 4265 ret = -EFAULT; 4266 goto out; 4267 } 4268 tmpbuf[len] = '\0'; 4269 4270 ret = display_crc_ctl_parse(dev, tmpbuf, len); 4271 4272 out: 4273 kfree(tmpbuf); 4274 if (ret < 0) 4275 return ret; 4276 4277 *offp += len; 4278 return len; 4279 } 4280 4281 static const struct file_operations i915_display_crc_ctl_fops = { 4282 .owner = THIS_MODULE, 4283 .open = display_crc_ctl_open, 4284 .read = seq_read, 4285 .llseek = seq_lseek, 4286 .release = single_release, 4287 .write = display_crc_ctl_write 4288 }; 4289 4290 static ssize_t i915_displayport_test_active_write(struct file *file, 4291 const char __user *ubuf, 4292 size_t len, loff_t *offp) 4293 { 4294 char *input_buffer; 4295 int status = 0; 4296 struct drm_device *dev; 4297 struct drm_connector *connector; 4298 struct list_head *connector_list; 4299 struct intel_dp *intel_dp; 4300 int val = 0; 4301 4302 dev = ((struct seq_file *)file->private_data)->private; 4303 4304 connector_list = &dev->mode_config.connector_list; 4305 4306 if (len == 0) 4307 return 0; 4308 4309 input_buffer = kmalloc(len + 1, GFP_KERNEL); 4310 if (!input_buffer) 4311 return -ENOMEM; 4312 4313 if (copy_from_user(input_buffer, ubuf, len)) { 4314 status = -EFAULT; 4315 goto out; 4316 } 4317 4318 input_buffer[len] = '\0'; 4319 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len); 4320 4321 list_for_each_entry(connector, connector_list, head) { 4322 4323 if (connector->connector_type != 4324 DRM_MODE_CONNECTOR_DisplayPort) 4325 continue; 4326 4327 if (connector->status == connector_status_connected && 4328 connector->encoder != NULL) { 4329 intel_dp = enc_to_intel_dp(connector->encoder); 4330 status = kstrtoint(input_buffer, 10, &val); 4331 if (status < 0) 4332 goto out; 4333 DRM_DEBUG_DRIVER("Got %d for test active\n", val); 4334 /* To prevent erroneous activation of the compliance 4335 * testing code, only accept an actual value of 1 here 4336 */ 4337 if (val == 1) 4338 intel_dp->compliance_test_active = 1; 4339 else 4340 intel_dp->compliance_test_active = 0; 4341 } 4342 } 4343 out: 4344 kfree(input_buffer); 4345 if (status < 0) 4346 return status; 4347 4348 *offp += len; 4349 return len; 4350 } 4351 4352 static int i915_displayport_test_active_show(struct seq_file *m, void *data) 4353 { 4354 struct drm_device *dev = m->private; 4355 struct drm_connector *connector; 4356 struct list_head *connector_list = &dev->mode_config.connector_list; 4357 struct intel_dp *intel_dp; 4358 4359 list_for_each_entry(connector, connector_list, head) { 4360 4361 if (connector->connector_type != 4362 DRM_MODE_CONNECTOR_DisplayPort) 4363 continue; 4364 4365 if (connector->status == connector_status_connected && 4366 connector->encoder != NULL) { 4367 intel_dp = enc_to_intel_dp(connector->encoder); 4368 if (intel_dp->compliance_test_active) 4369 seq_puts(m, "1"); 4370 else 4371 seq_puts(m, "0"); 4372 } else 4373 seq_puts(m, "0"); 4374 } 4375 4376 return 0; 4377 } 4378 4379 static int i915_displayport_test_active_open(struct inode *inode, 4380 struct file *file) 4381 { 4382 struct drm_device *dev = inode->i_private; 4383 4384 return single_open(file, i915_displayport_test_active_show, dev); 4385 } 4386 4387 static const struct file_operations i915_displayport_test_active_fops = { 4388 .owner = THIS_MODULE, 4389 .open = i915_displayport_test_active_open, 4390 .read = seq_read, 4391 .llseek = seq_lseek, 4392 .release = single_release, 4393 .write = i915_displayport_test_active_write 4394 }; 4395 4396 static int i915_displayport_test_data_show(struct seq_file *m, void *data) 4397 { 4398 struct drm_device *dev = m->private; 4399 struct drm_connector *connector; 4400 struct list_head *connector_list = &dev->mode_config.connector_list; 4401 struct intel_dp *intel_dp; 4402 4403 list_for_each_entry(connector, connector_list, head) { 4404 4405 if (connector->connector_type != 4406 DRM_MODE_CONNECTOR_DisplayPort) 4407 continue; 4408 4409 if (connector->status == connector_status_connected && 4410 connector->encoder != NULL) { 4411 intel_dp = enc_to_intel_dp(connector->encoder); 4412 seq_printf(m, "%lx", intel_dp->compliance_test_data); 4413 } else 4414 seq_puts(m, "0"); 4415 } 4416 4417 return 0; 4418 } 4419 static int i915_displayport_test_data_open(struct inode *inode, 4420 struct file *file) 4421 { 4422 struct drm_device *dev = inode->i_private; 4423 4424 return single_open(file, i915_displayport_test_data_show, dev); 4425 } 4426 4427 static const struct file_operations i915_displayport_test_data_fops = { 4428 .owner = THIS_MODULE, 4429 .open = i915_displayport_test_data_open, 4430 .read = seq_read, 4431 .llseek = seq_lseek, 4432 .release = single_release 4433 }; 4434 4435 static int i915_displayport_test_type_show(struct seq_file *m, void *data) 4436 { 4437 struct drm_device *dev = m->private; 4438 struct drm_connector *connector; 4439 struct list_head *connector_list = &dev->mode_config.connector_list; 4440 struct intel_dp *intel_dp; 4441 4442 list_for_each_entry(connector, connector_list, head) { 4443 4444 if (connector->connector_type != 4445 DRM_MODE_CONNECTOR_DisplayPort) 4446 continue; 4447 4448 if (connector->status == connector_status_connected && 4449 connector->encoder != NULL) { 4450 intel_dp = enc_to_intel_dp(connector->encoder); 4451 seq_printf(m, "%02lx", intel_dp->compliance_test_type); 4452 } else 4453 seq_puts(m, "0"); 4454 } 4455 4456 return 0; 4457 } 4458 4459 static int i915_displayport_test_type_open(struct inode *inode, 4460 struct file *file) 4461 { 4462 struct drm_device *dev = inode->i_private; 4463 4464 return single_open(file, i915_displayport_test_type_show, dev); 4465 } 4466 4467 static const struct file_operations i915_displayport_test_type_fops = { 4468 .owner = THIS_MODULE, 4469 .open = i915_displayport_test_type_open, 4470 .read = seq_read, 4471 .llseek = seq_lseek, 4472 .release = single_release 4473 }; 4474 4475 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) 4476 { 4477 struct drm_device *dev = m->private; 4478 int level; 4479 int num_levels; 4480 4481 if (IS_CHERRYVIEW(dev)) 4482 num_levels = 3; 4483 else if (IS_VALLEYVIEW(dev)) 4484 num_levels = 1; 4485 else 4486 num_levels = ilk_wm_max_level(dev) + 1; 4487 4488 drm_modeset_lock_all(dev); 4489 4490 for (level = 0; level < num_levels; level++) { 4491 unsigned int latency = wm[level]; 4492 4493 /* 4494 * - WM1+ latency values in 0.5us units 4495 * - latencies are in us on gen9/vlv/chv 4496 */ 4497 if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev) || 4498 IS_CHERRYVIEW(dev)) 4499 latency *= 10; 4500 else if (level > 0) 4501 latency *= 5; 4502 4503 seq_printf(m, "WM%d %u (%u.%u usec)\n", 4504 level, wm[level], latency / 10, latency % 10); 4505 } 4506 4507 drm_modeset_unlock_all(dev); 4508 } 4509 4510 static int pri_wm_latency_show(struct seq_file *m, void *data) 4511 { 4512 struct drm_device *dev = m->private; 4513 struct drm_i915_private *dev_priv = dev->dev_private; 4514 const uint16_t *latencies; 4515 4516 if (INTEL_INFO(dev)->gen >= 9) 4517 latencies = dev_priv->wm.skl_latency; 4518 else 4519 latencies = to_i915(dev)->wm.pri_latency; 4520 4521 wm_latency_show(m, latencies); 4522 4523 return 0; 4524 } 4525 4526 static int spr_wm_latency_show(struct seq_file *m, void *data) 4527 { 4528 struct drm_device *dev = m->private; 4529 struct drm_i915_private *dev_priv = dev->dev_private; 4530 const uint16_t *latencies; 4531 4532 if (INTEL_INFO(dev)->gen >= 9) 4533 latencies = dev_priv->wm.skl_latency; 4534 else 4535 latencies = to_i915(dev)->wm.spr_latency; 4536 4537 wm_latency_show(m, latencies); 4538 4539 return 0; 4540 } 4541 4542 static int cur_wm_latency_show(struct seq_file *m, void *data) 4543 { 4544 struct drm_device *dev = m->private; 4545 struct drm_i915_private *dev_priv = dev->dev_private; 4546 const uint16_t *latencies; 4547 4548 if (INTEL_INFO(dev)->gen >= 9) 4549 latencies = dev_priv->wm.skl_latency; 4550 else 4551 latencies = to_i915(dev)->wm.cur_latency; 4552 4553 wm_latency_show(m, latencies); 4554 4555 return 0; 4556 } 4557 4558 static int pri_wm_latency_open(struct inode *inode, struct file *file) 4559 { 4560 struct drm_device *dev = inode->i_private; 4561 4562 if (INTEL_INFO(dev)->gen < 5) 4563 return -ENODEV; 4564 4565 return single_open(file, pri_wm_latency_show, dev); 4566 } 4567 4568 static int spr_wm_latency_open(struct inode *inode, struct file *file) 4569 { 4570 struct drm_device *dev = inode->i_private; 4571 4572 if (HAS_GMCH_DISPLAY(dev)) 4573 return -ENODEV; 4574 4575 return single_open(file, spr_wm_latency_show, dev); 4576 } 4577 4578 static int cur_wm_latency_open(struct inode *inode, struct file *file) 4579 { 4580 struct drm_device *dev = inode->i_private; 4581 4582 if (HAS_GMCH_DISPLAY(dev)) 4583 return -ENODEV; 4584 4585 return single_open(file, cur_wm_latency_show, dev); 4586 } 4587 4588 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 4589 size_t len, loff_t *offp, uint16_t wm[8]) 4590 { 4591 struct seq_file *m = file->private_data; 4592 struct drm_device *dev = m->private; 4593 uint16_t new[8] = { 0 }; 4594 int num_levels; 4595 int level; 4596 int ret; 4597 char tmp[32]; 4598 4599 if (IS_CHERRYVIEW(dev)) 4600 num_levels = 3; 4601 else if (IS_VALLEYVIEW(dev)) 4602 num_levels = 1; 4603 else 4604 num_levels = ilk_wm_max_level(dev) + 1; 4605 4606 if (len >= sizeof(tmp)) 4607 return -EINVAL; 4608 4609 if (copy_from_user(tmp, ubuf, len)) 4610 return -EFAULT; 4611 4612 tmp[len] = '\0'; 4613 4614 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", 4615 &new[0], &new[1], &new[2], &new[3], 4616 &new[4], &new[5], &new[6], &new[7]); 4617 if (ret != num_levels) 4618 return -EINVAL; 4619 4620 drm_modeset_lock_all(dev); 4621 4622 for (level = 0; level < num_levels; level++) 4623 wm[level] = new[level]; 4624 4625 drm_modeset_unlock_all(dev); 4626 4627 return len; 4628 } 4629 4630 4631 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 4632 size_t len, loff_t *offp) 4633 { 4634 struct seq_file *m = file->private_data; 4635 struct drm_device *dev = m->private; 4636 struct drm_i915_private *dev_priv = dev->dev_private; 4637 uint16_t *latencies; 4638 4639 if (INTEL_INFO(dev)->gen >= 9) 4640 latencies = dev_priv->wm.skl_latency; 4641 else 4642 latencies = to_i915(dev)->wm.pri_latency; 4643 4644 return wm_latency_write(file, ubuf, len, offp, latencies); 4645 } 4646 4647 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 4648 size_t len, loff_t *offp) 4649 { 4650 struct seq_file *m = file->private_data; 4651 struct drm_device *dev = m->private; 4652 struct drm_i915_private *dev_priv = dev->dev_private; 4653 uint16_t *latencies; 4654 4655 if (INTEL_INFO(dev)->gen >= 9) 4656 latencies = dev_priv->wm.skl_latency; 4657 else 4658 latencies = to_i915(dev)->wm.spr_latency; 4659 4660 return wm_latency_write(file, ubuf, len, offp, latencies); 4661 } 4662 4663 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 4664 size_t len, loff_t *offp) 4665 { 4666 struct seq_file *m = file->private_data; 4667 struct drm_device *dev = m->private; 4668 struct drm_i915_private *dev_priv = dev->dev_private; 4669 uint16_t *latencies; 4670 4671 if (INTEL_INFO(dev)->gen >= 9) 4672 latencies = dev_priv->wm.skl_latency; 4673 else 4674 latencies = to_i915(dev)->wm.cur_latency; 4675 4676 return wm_latency_write(file, ubuf, len, offp, latencies); 4677 } 4678 4679 static const struct file_operations i915_pri_wm_latency_fops = { 4680 .owner = THIS_MODULE, 4681 .open = pri_wm_latency_open, 4682 .read = seq_read, 4683 .llseek = seq_lseek, 4684 .release = single_release, 4685 .write = pri_wm_latency_write 4686 }; 4687 4688 static const struct file_operations i915_spr_wm_latency_fops = { 4689 .owner = THIS_MODULE, 4690 .open = spr_wm_latency_open, 4691 .read = seq_read, 4692 .llseek = seq_lseek, 4693 .release = single_release, 4694 .write = spr_wm_latency_write 4695 }; 4696 4697 static const struct file_operations i915_cur_wm_latency_fops = { 4698 .owner = THIS_MODULE, 4699 .open = cur_wm_latency_open, 4700 .read = seq_read, 4701 .llseek = seq_lseek, 4702 .release = single_release, 4703 .write = cur_wm_latency_write 4704 }; 4705 4706 static int 4707 i915_wedged_get(void *data, u64 *val) 4708 { 4709 struct drm_device *dev = data; 4710 struct drm_i915_private *dev_priv = dev->dev_private; 4711 4712 *val = atomic_read(&dev_priv->gpu_error.reset_counter); 4713 4714 return 0; 4715 } 4716 4717 static int 4718 i915_wedged_set(void *data, u64 val) 4719 { 4720 struct drm_device *dev = data; 4721 struct drm_i915_private *dev_priv = dev->dev_private; 4722 4723 /* 4724 * There is no safeguard against this debugfs entry colliding 4725 * with the hangcheck calling same i915_handle_error() in 4726 * parallel, causing an explosion. For now we assume that the 4727 * test harness is responsible enough not to inject gpu hangs 4728 * while it is writing to 'i915_wedged' 4729 */ 4730 4731 if (i915_reset_in_progress(&dev_priv->gpu_error)) 4732 return -EAGAIN; 4733 4734 intel_runtime_pm_get(dev_priv); 4735 4736 i915_handle_error(dev, val, 4737 "Manually setting wedged to %llu", val); 4738 4739 intel_runtime_pm_put(dev_priv); 4740 4741 return 0; 4742 } 4743 4744 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 4745 i915_wedged_get, i915_wedged_set, 4746 "%llu\n"); 4747 4748 static int 4749 i915_ring_stop_get(void *data, u64 *val) 4750 { 4751 struct drm_device *dev = data; 4752 struct drm_i915_private *dev_priv = dev->dev_private; 4753 4754 *val = dev_priv->gpu_error.stop_rings; 4755 4756 return 0; 4757 } 4758 4759 static int 4760 i915_ring_stop_set(void *data, u64 val) 4761 { 4762 struct drm_device *dev = data; 4763 struct drm_i915_private *dev_priv = dev->dev_private; 4764 int ret; 4765 4766 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val); 4767 4768 ret = mutex_lock_interruptible(&dev->struct_mutex); 4769 if (ret) 4770 return ret; 4771 4772 dev_priv->gpu_error.stop_rings = val; 4773 mutex_unlock(&dev->struct_mutex); 4774 4775 return 0; 4776 } 4777 4778 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops, 4779 i915_ring_stop_get, i915_ring_stop_set, 4780 "0x%08llx\n"); 4781 4782 static int 4783 i915_ring_missed_irq_get(void *data, u64 *val) 4784 { 4785 struct drm_device *dev = data; 4786 struct drm_i915_private *dev_priv = dev->dev_private; 4787 4788 *val = dev_priv->gpu_error.missed_irq_rings; 4789 return 0; 4790 } 4791 4792 static int 4793 i915_ring_missed_irq_set(void *data, u64 val) 4794 { 4795 struct drm_device *dev = data; 4796 struct drm_i915_private *dev_priv = dev->dev_private; 4797 int ret; 4798 4799 /* Lock against concurrent debugfs callers */ 4800 ret = mutex_lock_interruptible(&dev->struct_mutex); 4801 if (ret) 4802 return ret; 4803 dev_priv->gpu_error.missed_irq_rings = val; 4804 mutex_unlock(&dev->struct_mutex); 4805 4806 return 0; 4807 } 4808 4809 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops, 4810 i915_ring_missed_irq_get, i915_ring_missed_irq_set, 4811 "0x%08llx\n"); 4812 4813 static int 4814 i915_ring_test_irq_get(void *data, u64 *val) 4815 { 4816 struct drm_device *dev = data; 4817 struct drm_i915_private *dev_priv = dev->dev_private; 4818 4819 *val = dev_priv->gpu_error.test_irq_rings; 4820 4821 return 0; 4822 } 4823 4824 static int 4825 i915_ring_test_irq_set(void *data, u64 val) 4826 { 4827 struct drm_device *dev = data; 4828 struct drm_i915_private *dev_priv = dev->dev_private; 4829 int ret; 4830 4831 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); 4832 4833 /* Lock against concurrent debugfs callers */ 4834 ret = mutex_lock_interruptible(&dev->struct_mutex); 4835 if (ret) 4836 return ret; 4837 4838 dev_priv->gpu_error.test_irq_rings = val; 4839 mutex_unlock(&dev->struct_mutex); 4840 4841 return 0; 4842 } 4843 4844 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops, 4845 i915_ring_test_irq_get, i915_ring_test_irq_set, 4846 "0x%08llx\n"); 4847 4848 #define DROP_UNBOUND 0x1 4849 #define DROP_BOUND 0x2 4850 #define DROP_RETIRE 0x4 4851 #define DROP_ACTIVE 0x8 4852 #define DROP_ALL (DROP_UNBOUND | \ 4853 DROP_BOUND | \ 4854 DROP_RETIRE | \ 4855 DROP_ACTIVE) 4856 static int 4857 i915_drop_caches_get(void *data, u64 *val) 4858 { 4859 *val = DROP_ALL; 4860 4861 return 0; 4862 } 4863 4864 static int 4865 i915_drop_caches_set(void *data, u64 val) 4866 { 4867 struct drm_device *dev = data; 4868 struct drm_i915_private *dev_priv = dev->dev_private; 4869 int ret; 4870 4871 DRM_DEBUG("Dropping caches: 0x%08llx\n", val); 4872 4873 /* No need to check and wait for gpu resets, only libdrm auto-restarts 4874 * on ioctls on -EAGAIN. */ 4875 ret = mutex_lock_interruptible(&dev->struct_mutex); 4876 if (ret) 4877 return ret; 4878 4879 if (val & DROP_ACTIVE) { 4880 ret = i915_gpu_idle(dev); 4881 if (ret) 4882 goto unlock; 4883 } 4884 4885 if (val & (DROP_RETIRE | DROP_ACTIVE)) 4886 i915_gem_retire_requests(dev); 4887 4888 if (val & DROP_BOUND) 4889 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND); 4890 4891 if (val & DROP_UNBOUND) 4892 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND); 4893 4894 unlock: 4895 mutex_unlock(&dev->struct_mutex); 4896 4897 return ret; 4898 } 4899 4900 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 4901 i915_drop_caches_get, i915_drop_caches_set, 4902 "0x%08llx\n"); 4903 4904 static int 4905 i915_max_freq_get(void *data, u64 *val) 4906 { 4907 struct drm_device *dev = data; 4908 struct drm_i915_private *dev_priv = dev->dev_private; 4909 int ret; 4910 4911 if (INTEL_INFO(dev)->gen < 6) 4912 return -ENODEV; 4913 4914 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4915 4916 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4917 if (ret) 4918 return ret; 4919 4920 *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); 4921 mutex_unlock(&dev_priv->rps.hw_lock); 4922 4923 return 0; 4924 } 4925 4926 static int 4927 i915_max_freq_set(void *data, u64 val) 4928 { 4929 struct drm_device *dev = data; 4930 struct drm_i915_private *dev_priv = dev->dev_private; 4931 u32 hw_max, hw_min; 4932 int ret; 4933 4934 if (INTEL_INFO(dev)->gen < 6) 4935 return -ENODEV; 4936 4937 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4938 4939 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 4940 4941 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4942 if (ret) 4943 return ret; 4944 4945 /* 4946 * Turbo will still be enabled, but won't go above the set value. 4947 */ 4948 val = intel_freq_opcode(dev_priv, val); 4949 4950 hw_max = dev_priv->rps.max_freq; 4951 hw_min = dev_priv->rps.min_freq; 4952 4953 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { 4954 mutex_unlock(&dev_priv->rps.hw_lock); 4955 return -EINVAL; 4956 } 4957 4958 dev_priv->rps.max_freq_softlimit = val; 4959 4960 intel_set_rps(dev, val); 4961 4962 mutex_unlock(&dev_priv->rps.hw_lock); 4963 4964 return 0; 4965 } 4966 4967 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, 4968 i915_max_freq_get, i915_max_freq_set, 4969 "%llu\n"); 4970 4971 static int 4972 i915_min_freq_get(void *data, u64 *val) 4973 { 4974 struct drm_device *dev = data; 4975 struct drm_i915_private *dev_priv = dev->dev_private; 4976 int ret; 4977 4978 if (INTEL_INFO(dev)->gen < 6) 4979 return -ENODEV; 4980 4981 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4982 4983 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4984 if (ret) 4985 return ret; 4986 4987 *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); 4988 mutex_unlock(&dev_priv->rps.hw_lock); 4989 4990 return 0; 4991 } 4992 4993 static int 4994 i915_min_freq_set(void *data, u64 val) 4995 { 4996 struct drm_device *dev = data; 4997 struct drm_i915_private *dev_priv = dev->dev_private; 4998 u32 hw_max, hw_min; 4999 int ret; 5000 5001 if (INTEL_INFO(dev)->gen < 6) 5002 return -ENODEV; 5003 5004 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 5005 5006 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 5007 5008 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 5009 if (ret) 5010 return ret; 5011 5012 /* 5013 * Turbo will still be enabled, but won't go below the set value. 5014 */ 5015 val = intel_freq_opcode(dev_priv, val); 5016 5017 hw_max = dev_priv->rps.max_freq; 5018 hw_min = dev_priv->rps.min_freq; 5019 5020 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) { 5021 mutex_unlock(&dev_priv->rps.hw_lock); 5022 return -EINVAL; 5023 } 5024 5025 dev_priv->rps.min_freq_softlimit = val; 5026 5027 intel_set_rps(dev, val); 5028 5029 mutex_unlock(&dev_priv->rps.hw_lock); 5030 5031 return 0; 5032 } 5033 5034 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, 5035 i915_min_freq_get, i915_min_freq_set, 5036 "%llu\n"); 5037 5038 static int 5039 i915_cache_sharing_get(void *data, u64 *val) 5040 { 5041 struct drm_device *dev = data; 5042 struct drm_i915_private *dev_priv = dev->dev_private; 5043 u32 snpcr; 5044 int ret; 5045 5046 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 5047 return -ENODEV; 5048 5049 ret = mutex_lock_interruptible(&dev->struct_mutex); 5050 if (ret) 5051 return ret; 5052 intel_runtime_pm_get(dev_priv); 5053 5054 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 5055 5056 intel_runtime_pm_put(dev_priv); 5057 mutex_unlock(&dev_priv->dev->struct_mutex); 5058 5059 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 5060 5061 return 0; 5062 } 5063 5064 static int 5065 i915_cache_sharing_set(void *data, u64 val) 5066 { 5067 struct drm_device *dev = data; 5068 struct drm_i915_private *dev_priv = dev->dev_private; 5069 u32 snpcr; 5070 5071 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 5072 return -ENODEV; 5073 5074 if (val > 3) 5075 return -EINVAL; 5076 5077 intel_runtime_pm_get(dev_priv); 5078 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); 5079 5080 /* Update the cache sharing policy here as well */ 5081 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 5082 snpcr &= ~GEN6_MBC_SNPCR_MASK; 5083 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 5084 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 5085 5086 intel_runtime_pm_put(dev_priv); 5087 return 0; 5088 } 5089 5090 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 5091 i915_cache_sharing_get, i915_cache_sharing_set, 5092 "%llu\n"); 5093 5094 struct sseu_dev_status { 5095 unsigned int slice_total; 5096 unsigned int subslice_total; 5097 unsigned int subslice_per_slice; 5098 unsigned int eu_total; 5099 unsigned int eu_per_subslice; 5100 }; 5101 5102 static void cherryview_sseu_device_status(struct drm_device *dev, 5103 struct sseu_dev_status *stat) 5104 { 5105 struct drm_i915_private *dev_priv = dev->dev_private; 5106 int ss_max = 2; 5107 int ss; 5108 u32 sig1[ss_max], sig2[ss_max]; 5109 5110 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1); 5111 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1); 5112 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2); 5113 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2); 5114 5115 for (ss = 0; ss < ss_max; ss++) { 5116 unsigned int eu_cnt; 5117 5118 if (sig1[ss] & CHV_SS_PG_ENABLE) 5119 /* skip disabled subslice */ 5120 continue; 5121 5122 stat->slice_total = 1; 5123 stat->subslice_per_slice++; 5124 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) + 5125 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) + 5126 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) + 5127 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2); 5128 stat->eu_total += eu_cnt; 5129 stat->eu_per_subslice = max(stat->eu_per_subslice, eu_cnt); 5130 } 5131 stat->subslice_total = stat->subslice_per_slice; 5132 } 5133 5134 static void gen9_sseu_device_status(struct drm_device *dev, 5135 struct sseu_dev_status *stat) 5136 { 5137 struct drm_i915_private *dev_priv = dev->dev_private; 5138 int s_max = 3, ss_max = 4; 5139 int s, ss; 5140 u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; 5141 5142 /* BXT has a single slice and at most 3 subslices. */ 5143 if (IS_BROXTON(dev)) { 5144 s_max = 1; 5145 ss_max = 3; 5146 } 5147 5148 for (s = 0; s < s_max; s++) { 5149 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s)); 5150 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s)); 5151 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s)); 5152 } 5153 5154 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | 5155 GEN9_PGCTL_SSA_EU19_ACK | 5156 GEN9_PGCTL_SSA_EU210_ACK | 5157 GEN9_PGCTL_SSA_EU311_ACK; 5158 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK | 5159 GEN9_PGCTL_SSB_EU19_ACK | 5160 GEN9_PGCTL_SSB_EU210_ACK | 5161 GEN9_PGCTL_SSB_EU311_ACK; 5162 5163 for (s = 0; s < s_max; s++) { 5164 unsigned int ss_cnt = 0; 5165 5166 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) 5167 /* skip disabled slice */ 5168 continue; 5169 5170 stat->slice_total++; 5171 5172 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) 5173 ss_cnt = INTEL_INFO(dev)->subslice_per_slice; 5174 5175 for (ss = 0; ss < ss_max; ss++) { 5176 unsigned int eu_cnt; 5177 5178 if (IS_BROXTON(dev) && 5179 !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) 5180 /* skip disabled subslice */ 5181 continue; 5182 5183 if (IS_BROXTON(dev)) 5184 ss_cnt++; 5185 5186 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] & 5187 eu_mask[ss%2]); 5188 stat->eu_total += eu_cnt; 5189 stat->eu_per_subslice = max(stat->eu_per_subslice, 5190 eu_cnt); 5191 } 5192 5193 stat->subslice_total += ss_cnt; 5194 stat->subslice_per_slice = max(stat->subslice_per_slice, 5195 ss_cnt); 5196 } 5197 } 5198 5199 static void broadwell_sseu_device_status(struct drm_device *dev, 5200 struct sseu_dev_status *stat) 5201 { 5202 struct drm_i915_private *dev_priv = dev->dev_private; 5203 int s; 5204 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); 5205 5206 stat->slice_total = hweight32(slice_info & GEN8_LSLICESTAT_MASK); 5207 5208 if (stat->slice_total) { 5209 stat->subslice_per_slice = INTEL_INFO(dev)->subslice_per_slice; 5210 stat->subslice_total = stat->slice_total * 5211 stat->subslice_per_slice; 5212 stat->eu_per_subslice = INTEL_INFO(dev)->eu_per_subslice; 5213 stat->eu_total = stat->eu_per_subslice * stat->subslice_total; 5214 5215 /* subtract fused off EU(s) from enabled slice(s) */ 5216 for (s = 0; s < stat->slice_total; s++) { 5217 u8 subslice_7eu = INTEL_INFO(dev)->subslice_7eu[s]; 5218 5219 stat->eu_total -= hweight8(subslice_7eu); 5220 } 5221 } 5222 } 5223 5224 static int i915_sseu_status(struct seq_file *m, void *unused) 5225 { 5226 struct drm_info_node *node = (struct drm_info_node *) m->private; 5227 struct drm_device *dev = node->minor->dev; 5228 struct sseu_dev_status stat; 5229 5230 if (INTEL_INFO(dev)->gen < 8) 5231 return -ENODEV; 5232 5233 seq_puts(m, "SSEU Device Info\n"); 5234 seq_printf(m, " Available Slice Total: %u\n", 5235 INTEL_INFO(dev)->slice_total); 5236 seq_printf(m, " Available Subslice Total: %u\n", 5237 INTEL_INFO(dev)->subslice_total); 5238 seq_printf(m, " Available Subslice Per Slice: %u\n", 5239 INTEL_INFO(dev)->subslice_per_slice); 5240 seq_printf(m, " Available EU Total: %u\n", 5241 INTEL_INFO(dev)->eu_total); 5242 seq_printf(m, " Available EU Per Subslice: %u\n", 5243 INTEL_INFO(dev)->eu_per_subslice); 5244 seq_printf(m, " Has Slice Power Gating: %s\n", 5245 yesno(INTEL_INFO(dev)->has_slice_pg)); 5246 seq_printf(m, " Has Subslice Power Gating: %s\n", 5247 yesno(INTEL_INFO(dev)->has_subslice_pg)); 5248 seq_printf(m, " Has EU Power Gating: %s\n", 5249 yesno(INTEL_INFO(dev)->has_eu_pg)); 5250 5251 seq_puts(m, "SSEU Device Status\n"); 5252 memset(&stat, 0, sizeof(stat)); 5253 if (IS_CHERRYVIEW(dev)) { 5254 cherryview_sseu_device_status(dev, &stat); 5255 } else if (IS_BROADWELL(dev)) { 5256 broadwell_sseu_device_status(dev, &stat); 5257 } else if (INTEL_INFO(dev)->gen >= 9) { 5258 gen9_sseu_device_status(dev, &stat); 5259 } 5260 seq_printf(m, " Enabled Slice Total: %u\n", 5261 stat.slice_total); 5262 seq_printf(m, " Enabled Subslice Total: %u\n", 5263 stat.subslice_total); 5264 seq_printf(m, " Enabled Subslice Per Slice: %u\n", 5265 stat.subslice_per_slice); 5266 seq_printf(m, " Enabled EU Total: %u\n", 5267 stat.eu_total); 5268 seq_printf(m, " Enabled EU Per Subslice: %u\n", 5269 stat.eu_per_subslice); 5270 5271 return 0; 5272 } 5273 5274 static int i915_forcewake_open(struct inode *inode, struct file *file) 5275 { 5276 struct drm_device *dev = inode->i_private; 5277 struct drm_i915_private *dev_priv = dev->dev_private; 5278 5279 if (INTEL_INFO(dev)->gen < 6) 5280 return 0; 5281 5282 intel_runtime_pm_get(dev_priv); 5283 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5284 5285 return 0; 5286 } 5287 5288 static int i915_forcewake_release(struct inode *inode, struct file *file) 5289 { 5290 struct drm_device *dev = inode->i_private; 5291 struct drm_i915_private *dev_priv = dev->dev_private; 5292 5293 if (INTEL_INFO(dev)->gen < 6) 5294 return 0; 5295 5296 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5297 intel_runtime_pm_put(dev_priv); 5298 5299 return 0; 5300 } 5301 5302 static const struct file_operations i915_forcewake_fops = { 5303 .owner = THIS_MODULE, 5304 .open = i915_forcewake_open, 5305 .release = i915_forcewake_release, 5306 }; 5307 5308 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) 5309 { 5310 struct drm_device *dev = minor->dev; 5311 struct dentry *ent; 5312 5313 ent = debugfs_create_file("i915_forcewake_user", 5314 S_IRUSR, 5315 root, dev, 5316 &i915_forcewake_fops); 5317 if (!ent) 5318 return -ENOMEM; 5319 5320 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 5321 } 5322 5323 static int i915_debugfs_create(struct dentry *root, 5324 struct drm_minor *minor, 5325 const char *name, 5326 const struct file_operations *fops) 5327 { 5328 struct drm_device *dev = minor->dev; 5329 struct dentry *ent; 5330 5331 ent = debugfs_create_file(name, 5332 S_IRUGO | S_IWUSR, 5333 root, dev, 5334 fops); 5335 if (!ent) 5336 return -ENOMEM; 5337 5338 return drm_add_fake_info_node(minor, ent, fops); 5339 } 5340 5341 static const struct drm_info_list i915_debugfs_list[] = { 5342 {"i915_capabilities", i915_capabilities, 0}, 5343 {"i915_gem_objects", i915_gem_object_info, 0}, 5344 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 5345 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, 5346 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 5347 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 5348 {"i915_gem_stolen", i915_gem_stolen_list_info }, 5349 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 5350 {"i915_gem_request", i915_gem_request_info, 0}, 5351 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 5352 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 5353 {"i915_gem_interrupt", i915_interrupt_info, 0}, 5354 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 5355 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 5356 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 5357 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, 5358 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0}, 5359 {"i915_guc_info", i915_guc_info, 0}, 5360 {"i915_guc_load_status", i915_guc_load_status_info, 0}, 5361 {"i915_guc_log_dump", i915_guc_log_dump, 0}, 5362 {"i915_frequency_info", i915_frequency_info, 0}, 5363 {"i915_hangcheck_info", i915_hangcheck_info, 0}, 5364 {"i915_drpc_info", i915_drpc_info, 0}, 5365 {"i915_emon_status", i915_emon_status, 0}, 5366 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 5367 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, 5368 {"i915_fbc_status", i915_fbc_status, 0}, 5369 {"i915_ips_status", i915_ips_status, 0}, 5370 {"i915_sr_status", i915_sr_status, 0}, 5371 {"i915_opregion", i915_opregion, 0}, 5372 {"i915_vbt", i915_vbt, 0}, 5373 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 5374 {"i915_context_status", i915_context_status, 0}, 5375 {"i915_dump_lrc", i915_dump_lrc, 0}, 5376 {"i915_execlists", i915_execlists, 0}, 5377 {"i915_forcewake_domains", i915_forcewake_domains, 0}, 5378 {"i915_swizzle_info", i915_swizzle_info, 0}, 5379 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 5380 {"i915_llc", i915_llc, 0}, 5381 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 5382 {"i915_sink_crc_eDP1", i915_sink_crc, 0}, 5383 {"i915_energy_uJ", i915_energy_uJ, 0}, 5384 {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, 5385 {"i915_power_domain_info", i915_power_domain_info, 0}, 5386 {"i915_dmc_info", i915_dmc_info, 0}, 5387 {"i915_display_info", i915_display_info, 0}, 5388 {"i915_semaphore_status", i915_semaphore_status, 0}, 5389 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 5390 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 5391 {"i915_wa_registers", i915_wa_registers, 0}, 5392 {"i915_ddb_info", i915_ddb_info, 0}, 5393 {"i915_sseu_status", i915_sseu_status, 0}, 5394 {"i915_drrs_status", i915_drrs_status, 0}, 5395 {"i915_rps_boost_info", i915_rps_boost_info, 0}, 5396 }; 5397 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 5398 5399 static const struct i915_debugfs_files { 5400 const char *name; 5401 const struct file_operations *fops; 5402 } i915_debugfs_files[] = { 5403 {"i915_wedged", &i915_wedged_fops}, 5404 {"i915_max_freq", &i915_max_freq_fops}, 5405 {"i915_min_freq", &i915_min_freq_fops}, 5406 {"i915_cache_sharing", &i915_cache_sharing_fops}, 5407 {"i915_ring_stop", &i915_ring_stop_fops}, 5408 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, 5409 {"i915_ring_test_irq", &i915_ring_test_irq_fops}, 5410 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 5411 {"i915_error_state", &i915_error_state_fops}, 5412 {"i915_next_seqno", &i915_next_seqno_fops}, 5413 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, 5414 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 5415 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 5416 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 5417 {"i915_fbc_false_color", &i915_fbc_fc_fops}, 5418 {"i915_dp_test_data", &i915_displayport_test_data_fops}, 5419 {"i915_dp_test_type", &i915_displayport_test_type_fops}, 5420 {"i915_dp_test_active", &i915_displayport_test_active_fops} 5421 }; 5422 5423 void intel_display_crc_init(struct drm_device *dev) 5424 { 5425 struct drm_i915_private *dev_priv = dev->dev_private; 5426 enum pipe pipe; 5427 5428 for_each_pipe(dev_priv, pipe) { 5429 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 5430 5431 pipe_crc->opened = false; 5432 spin_lock_init(&pipe_crc->lock); 5433 init_waitqueue_head(&pipe_crc->wq); 5434 } 5435 } 5436 5437 int i915_debugfs_init(struct drm_minor *minor) 5438 { 5439 int ret, i; 5440 5441 ret = i915_forcewake_create(minor->debugfs_root, minor); 5442 if (ret) 5443 return ret; 5444 5445 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 5446 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i); 5447 if (ret) 5448 return ret; 5449 } 5450 5451 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 5452 ret = i915_debugfs_create(minor->debugfs_root, minor, 5453 i915_debugfs_files[i].name, 5454 i915_debugfs_files[i].fops); 5455 if (ret) 5456 return ret; 5457 } 5458 5459 return drm_debugfs_create_files(i915_debugfs_list, 5460 I915_DEBUGFS_ENTRIES, 5461 minor->debugfs_root, minor); 5462 } 5463 5464 void i915_debugfs_cleanup(struct drm_minor *minor) 5465 { 5466 int i; 5467 5468 drm_debugfs_remove_files(i915_debugfs_list, 5469 I915_DEBUGFS_ENTRIES, minor); 5470 5471 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 5472 1, minor); 5473 5474 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 5475 struct drm_info_list *info_list = 5476 (struct drm_info_list *)&i915_pipe_crc_data[i]; 5477 5478 drm_debugfs_remove_files(info_list, 1, minor); 5479 } 5480 5481 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 5482 struct drm_info_list *info_list = 5483 (struct drm_info_list *) i915_debugfs_files[i].fops; 5484 5485 drm_debugfs_remove_files(info_list, 1, minor); 5486 } 5487 } 5488 5489 struct dpcd_block { 5490 /* DPCD dump start address. */ 5491 unsigned int offset; 5492 /* DPCD dump end address, inclusive. If unset, .size will be used. */ 5493 unsigned int end; 5494 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */ 5495 size_t size; 5496 /* Only valid for eDP. */ 5497 bool edp; 5498 }; 5499 5500 static const struct dpcd_block i915_dpcd_debug[] = { 5501 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE }, 5502 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS }, 5503 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 }, 5504 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET }, 5505 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 }, 5506 { .offset = DP_SET_POWER }, 5507 { .offset = DP_EDP_DPCD_REV }, 5508 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 }, 5509 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB }, 5510 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET }, 5511 }; 5512 5513 static int i915_dpcd_show(struct seq_file *m, void *data) 5514 { 5515 struct drm_connector *connector = m->private; 5516 struct intel_dp *intel_dp = 5517 enc_to_intel_dp(&intel_attached_encoder(connector)->base); 5518 uint8_t buf[16]; 5519 ssize_t err; 5520 int i; 5521 5522 if (connector->status != connector_status_connected) 5523 return -ENODEV; 5524 5525 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) { 5526 const struct dpcd_block *b = &i915_dpcd_debug[i]; 5527 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1); 5528 5529 if (b->edp && 5530 connector->connector_type != DRM_MODE_CONNECTOR_eDP) 5531 continue; 5532 5533 /* low tech for now */ 5534 if (WARN_ON(size > sizeof(buf))) 5535 continue; 5536 5537 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size); 5538 if (err <= 0) { 5539 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", 5540 size, b->offset, err); 5541 continue; 5542 } 5543 5544 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf); 5545 } 5546 5547 return 0; 5548 } 5549 5550 static int i915_dpcd_open(struct inode *inode, struct file *file) 5551 { 5552 return single_open(file, i915_dpcd_show, inode->i_private); 5553 } 5554 5555 static const struct file_operations i915_dpcd_fops = { 5556 .owner = THIS_MODULE, 5557 .open = i915_dpcd_open, 5558 .read = seq_read, 5559 .llseek = seq_lseek, 5560 .release = single_release, 5561 }; 5562 5563 /** 5564 * i915_debugfs_connector_add - add i915 specific connector debugfs files 5565 * @connector: pointer to a registered drm_connector 5566 * 5567 * Cleanup will be done by drm_connector_unregister() through a call to 5568 * drm_debugfs_connector_remove(). 5569 * 5570 * Returns 0 on success, negative error codes on error. 5571 */ 5572 int i915_debugfs_connector_add(struct drm_connector *connector) 5573 { 5574 struct dentry *root = connector->debugfs_entry; 5575 5576 /* The connector must have been registered beforehands. */ 5577 if (!root) 5578 return -ENODEV; 5579 5580 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 5581 connector->connector_type == DRM_MODE_CONNECTOR_eDP) 5582 debugfs_create_file("i915_dpcd", S_IRUGO, root, connector, 5583 &i915_dpcd_fops); 5584 5585 return 0; 5586 } 5587