1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/seq_file.h> 30 #include <linux/circ_buf.h> 31 #include <linux/ctype.h> 32 #include <linux/debugfs.h> 33 #include <linux/slab.h> 34 #include <linux/export.h> 35 #include <linux/list_sort.h> 36 #include <asm/msr-index.h> 37 #include <drm/drmP.h> 38 #include "intel_drv.h" 39 #include "intel_ringbuffer.h" 40 #include <drm/i915_drm.h> 41 #include "i915_drv.h" 42 43 enum { 44 ACTIVE_LIST, 45 INACTIVE_LIST, 46 PINNED_LIST, 47 }; 48 49 static const char *yesno(int v) 50 { 51 return v ? "yes" : "no"; 52 } 53 54 /* As the drm_debugfs_init() routines are called before dev->dev_private is 55 * allocated we need to hook into the minor for release. */ 56 static int 57 drm_add_fake_info_node(struct drm_minor *minor, 58 struct dentry *ent, 59 const void *key) 60 { 61 struct drm_info_node *node; 62 63 node = kmalloc(sizeof(*node), GFP_KERNEL); 64 if (node == NULL) { 65 debugfs_remove(ent); 66 return -ENOMEM; 67 } 68 69 node->minor = minor; 70 node->dent = ent; 71 node->info_ent = (void *) key; 72 73 mutex_lock(&minor->debugfs_lock); 74 list_add(&node->list, &minor->debugfs_list); 75 mutex_unlock(&minor->debugfs_lock); 76 77 return 0; 78 } 79 80 static int i915_capabilities(struct seq_file *m, void *data) 81 { 82 struct drm_info_node *node = m->private; 83 struct drm_device *dev = node->minor->dev; 84 const struct intel_device_info *info = INTEL_INFO(dev); 85 86 seq_printf(m, "gen: %d\n", info->gen); 87 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 88 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 89 #define SEP_SEMICOLON ; 90 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON); 91 #undef PRINT_FLAG 92 #undef SEP_SEMICOLON 93 94 return 0; 95 } 96 97 static const char *get_pin_flag(struct drm_i915_gem_object *obj) 98 { 99 if (obj->pin_display) 100 return "p"; 101 else 102 return " "; 103 } 104 105 static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 106 { 107 switch (obj->tiling_mode) { 108 default: 109 case I915_TILING_NONE: return " "; 110 case I915_TILING_X: return "X"; 111 case I915_TILING_Y: return "Y"; 112 } 113 } 114 115 static inline const char *get_global_flag(struct drm_i915_gem_object *obj) 116 { 117 return i915_gem_obj_to_ggtt(obj) ? "g" : " "; 118 } 119 120 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) 121 { 122 u64 size = 0; 123 struct i915_vma *vma; 124 125 list_for_each_entry(vma, &obj->vma_list, vma_link) { 126 if (i915_is_ggtt(vma->vm) && 127 drm_mm_node_allocated(&vma->node)) 128 size += vma->node.size; 129 } 130 131 return size; 132 } 133 134 static void 135 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 136 { 137 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 138 struct intel_engine_cs *ring; 139 struct i915_vma *vma; 140 int pin_count = 0; 141 int i; 142 143 seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ", 144 &obj->base, 145 obj->active ? "*" : " ", 146 get_pin_flag(obj), 147 get_tiling_flag(obj), 148 get_global_flag(obj), 149 obj->base.size / 1024, 150 obj->base.read_domains, 151 obj->base.write_domain); 152 for_each_ring(ring, dev_priv, i) 153 seq_printf(m, "%x ", 154 i915_gem_request_get_seqno(obj->last_read_req[i])); 155 seq_printf(m, "] %x %x%s%s%s", 156 i915_gem_request_get_seqno(obj->last_write_req), 157 i915_gem_request_get_seqno(obj->last_fenced_req), 158 i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level), 159 obj->dirty ? " dirty" : "", 160 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 161 if (obj->base.name) 162 seq_printf(m, " (name: %d)", obj->base.name); 163 list_for_each_entry(vma, &obj->vma_list, vma_link) { 164 if (vma->pin_count > 0) 165 pin_count++; 166 } 167 seq_printf(m, " (pinned x %d)", pin_count); 168 if (obj->pin_display) 169 seq_printf(m, " (display)"); 170 if (obj->fence_reg != I915_FENCE_REG_NONE) 171 seq_printf(m, " (fence: %d)", obj->fence_reg); 172 list_for_each_entry(vma, &obj->vma_list, vma_link) { 173 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx", 174 i915_is_ggtt(vma->vm) ? "g" : "pp", 175 vma->node.start, vma->node.size); 176 if (i915_is_ggtt(vma->vm)) 177 seq_printf(m, ", type: %u)", vma->ggtt_view.type); 178 else 179 seq_puts(m, ")"); 180 } 181 if (obj->stolen) 182 seq_printf(m, " (stolen: %08llx)", obj->stolen->start); 183 if (obj->pin_display || obj->fault_mappable) { 184 char s[3], *t = s; 185 if (obj->pin_display) 186 *t++ = 'p'; 187 if (obj->fault_mappable) 188 *t++ = 'f'; 189 *t = '\0'; 190 seq_printf(m, " (%s mappable)", s); 191 } 192 if (obj->last_write_req != NULL) 193 seq_printf(m, " (%s)", 194 i915_gem_request_get_ring(obj->last_write_req)->name); 195 if (obj->frontbuffer_bits) 196 seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); 197 } 198 199 static void describe_ctx(struct seq_file *m, struct intel_context *ctx) 200 { 201 seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i'); 202 seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 203 seq_putc(m, ' '); 204 } 205 206 static int i915_gem_object_list_info(struct seq_file *m, void *data) 207 { 208 struct drm_info_node *node = m->private; 209 uintptr_t list = (uintptr_t) node->info_ent->data; 210 struct list_head *head; 211 struct drm_device *dev = node->minor->dev; 212 struct drm_i915_private *dev_priv = dev->dev_private; 213 struct i915_address_space *vm = &dev_priv->gtt.base; 214 struct i915_vma *vma; 215 u64 total_obj_size, total_gtt_size; 216 int count, ret; 217 218 ret = mutex_lock_interruptible(&dev->struct_mutex); 219 if (ret) 220 return ret; 221 222 /* FIXME: the user of this interface might want more than just GGTT */ 223 switch (list) { 224 case ACTIVE_LIST: 225 seq_puts(m, "Active:\n"); 226 head = &vm->active_list; 227 break; 228 case INACTIVE_LIST: 229 seq_puts(m, "Inactive:\n"); 230 head = &vm->inactive_list; 231 break; 232 default: 233 mutex_unlock(&dev->struct_mutex); 234 return -EINVAL; 235 } 236 237 total_obj_size = total_gtt_size = count = 0; 238 list_for_each_entry(vma, head, mm_list) { 239 seq_printf(m, " "); 240 describe_obj(m, vma->obj); 241 seq_printf(m, "\n"); 242 total_obj_size += vma->obj->base.size; 243 total_gtt_size += vma->node.size; 244 count++; 245 } 246 mutex_unlock(&dev->struct_mutex); 247 248 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", 249 count, total_obj_size, total_gtt_size); 250 return 0; 251 } 252 253 static int obj_rank_by_stolen(void *priv, 254 struct list_head *A, struct list_head *B) 255 { 256 struct drm_i915_gem_object *a = 257 container_of(A, struct drm_i915_gem_object, obj_exec_link); 258 struct drm_i915_gem_object *b = 259 container_of(B, struct drm_i915_gem_object, obj_exec_link); 260 261 return a->stolen->start - b->stolen->start; 262 } 263 264 static int i915_gem_stolen_list_info(struct seq_file *m, void *data) 265 { 266 struct drm_info_node *node = m->private; 267 struct drm_device *dev = node->minor->dev; 268 struct drm_i915_private *dev_priv = dev->dev_private; 269 struct drm_i915_gem_object *obj; 270 u64 total_obj_size, total_gtt_size; 271 LIST_HEAD(stolen); 272 int count, ret; 273 274 ret = mutex_lock_interruptible(&dev->struct_mutex); 275 if (ret) 276 return ret; 277 278 total_obj_size = total_gtt_size = count = 0; 279 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 280 if (obj->stolen == NULL) 281 continue; 282 283 list_add(&obj->obj_exec_link, &stolen); 284 285 total_obj_size += obj->base.size; 286 total_gtt_size += i915_gem_obj_total_ggtt_size(obj); 287 count++; 288 } 289 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 290 if (obj->stolen == NULL) 291 continue; 292 293 list_add(&obj->obj_exec_link, &stolen); 294 295 total_obj_size += obj->base.size; 296 count++; 297 } 298 list_sort(NULL, &stolen, obj_rank_by_stolen); 299 seq_puts(m, "Stolen:\n"); 300 while (!list_empty(&stolen)) { 301 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link); 302 seq_puts(m, " "); 303 describe_obj(m, obj); 304 seq_putc(m, '\n'); 305 list_del_init(&obj->obj_exec_link); 306 } 307 mutex_unlock(&dev->struct_mutex); 308 309 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", 310 count, total_obj_size, total_gtt_size); 311 return 0; 312 } 313 314 #define count_objects(list, member) do { \ 315 list_for_each_entry(obj, list, member) { \ 316 size += i915_gem_obj_total_ggtt_size(obj); \ 317 ++count; \ 318 if (obj->map_and_fenceable) { \ 319 mappable_size += i915_gem_obj_ggtt_size(obj); \ 320 ++mappable_count; \ 321 } \ 322 } \ 323 } while (0) 324 325 struct file_stats { 326 struct drm_i915_file_private *file_priv; 327 unsigned long count; 328 u64 total, unbound; 329 u64 global, shared; 330 u64 active, inactive; 331 }; 332 333 static int per_file_stats(int id, void *ptr, void *data) 334 { 335 struct drm_i915_gem_object *obj = ptr; 336 struct file_stats *stats = data; 337 struct i915_vma *vma; 338 339 stats->count++; 340 stats->total += obj->base.size; 341 342 if (obj->base.name || obj->base.dma_buf) 343 stats->shared += obj->base.size; 344 345 if (USES_FULL_PPGTT(obj->base.dev)) { 346 list_for_each_entry(vma, &obj->vma_list, vma_link) { 347 struct i915_hw_ppgtt *ppgtt; 348 349 if (!drm_mm_node_allocated(&vma->node)) 350 continue; 351 352 if (i915_is_ggtt(vma->vm)) { 353 stats->global += obj->base.size; 354 continue; 355 } 356 357 ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base); 358 if (ppgtt->file_priv != stats->file_priv) 359 continue; 360 361 if (obj->active) /* XXX per-vma statistic */ 362 stats->active += obj->base.size; 363 else 364 stats->inactive += obj->base.size; 365 366 return 0; 367 } 368 } else { 369 if (i915_gem_obj_ggtt_bound(obj)) { 370 stats->global += obj->base.size; 371 if (obj->active) 372 stats->active += obj->base.size; 373 else 374 stats->inactive += obj->base.size; 375 return 0; 376 } 377 } 378 379 if (!list_empty(&obj->global_list)) 380 stats->unbound += obj->base.size; 381 382 return 0; 383 } 384 385 #define print_file_stats(m, name, stats) do { \ 386 if (stats.count) \ 387 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \ 388 name, \ 389 stats.count, \ 390 stats.total, \ 391 stats.active, \ 392 stats.inactive, \ 393 stats.global, \ 394 stats.shared, \ 395 stats.unbound); \ 396 } while (0) 397 398 static void print_batch_pool_stats(struct seq_file *m, 399 struct drm_i915_private *dev_priv) 400 { 401 struct drm_i915_gem_object *obj; 402 struct file_stats stats; 403 struct intel_engine_cs *ring; 404 int i, j; 405 406 memset(&stats, 0, sizeof(stats)); 407 408 for_each_ring(ring, dev_priv, i) { 409 for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) { 410 list_for_each_entry(obj, 411 &ring->batch_pool.cache_list[j], 412 batch_pool_link) 413 per_file_stats(0, obj, &stats); 414 } 415 } 416 417 print_file_stats(m, "[k]batch pool", stats); 418 } 419 420 #define count_vmas(list, member) do { \ 421 list_for_each_entry(vma, list, member) { \ 422 size += i915_gem_obj_total_ggtt_size(vma->obj); \ 423 ++count; \ 424 if (vma->obj->map_and_fenceable) { \ 425 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \ 426 ++mappable_count; \ 427 } \ 428 } \ 429 } while (0) 430 431 static int i915_gem_object_info(struct seq_file *m, void* data) 432 { 433 struct drm_info_node *node = m->private; 434 struct drm_device *dev = node->minor->dev; 435 struct drm_i915_private *dev_priv = dev->dev_private; 436 u32 count, mappable_count, purgeable_count; 437 u64 size, mappable_size, purgeable_size; 438 struct drm_i915_gem_object *obj; 439 struct i915_address_space *vm = &dev_priv->gtt.base; 440 struct drm_file *file; 441 struct i915_vma *vma; 442 int ret; 443 444 ret = mutex_lock_interruptible(&dev->struct_mutex); 445 if (ret) 446 return ret; 447 448 seq_printf(m, "%u objects, %zu bytes\n", 449 dev_priv->mm.object_count, 450 dev_priv->mm.object_memory); 451 452 size = count = mappable_size = mappable_count = 0; 453 count_objects(&dev_priv->mm.bound_list, global_list); 454 seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n", 455 count, mappable_count, size, mappable_size); 456 457 size = count = mappable_size = mappable_count = 0; 458 count_vmas(&vm->active_list, mm_list); 459 seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n", 460 count, mappable_count, size, mappable_size); 461 462 size = count = mappable_size = mappable_count = 0; 463 count_vmas(&vm->inactive_list, mm_list); 464 seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n", 465 count, mappable_count, size, mappable_size); 466 467 size = count = purgeable_size = purgeable_count = 0; 468 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 469 size += obj->base.size, ++count; 470 if (obj->madv == I915_MADV_DONTNEED) 471 purgeable_size += obj->base.size, ++purgeable_count; 472 } 473 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size); 474 475 size = count = mappable_size = mappable_count = 0; 476 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 477 if (obj->fault_mappable) { 478 size += i915_gem_obj_ggtt_size(obj); 479 ++count; 480 } 481 if (obj->pin_display) { 482 mappable_size += i915_gem_obj_ggtt_size(obj); 483 ++mappable_count; 484 } 485 if (obj->madv == I915_MADV_DONTNEED) { 486 purgeable_size += obj->base.size; 487 ++purgeable_count; 488 } 489 } 490 seq_printf(m, "%u purgeable objects, %llu bytes\n", 491 purgeable_count, purgeable_size); 492 seq_printf(m, "%u pinned mappable objects, %llu bytes\n", 493 mappable_count, mappable_size); 494 seq_printf(m, "%u fault mappable objects, %llu bytes\n", 495 count, size); 496 497 seq_printf(m, "%llu [%llu] gtt total\n", 498 dev_priv->gtt.base.total, 499 (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start); 500 501 seq_putc(m, '\n'); 502 print_batch_pool_stats(m, dev_priv); 503 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 504 struct file_stats stats; 505 struct task_struct *task; 506 507 memset(&stats, 0, sizeof(stats)); 508 stats.file_priv = file->driver_priv; 509 spin_lock(&file->table_lock); 510 idr_for_each(&file->object_idr, per_file_stats, &stats); 511 spin_unlock(&file->table_lock); 512 /* 513 * Although we have a valid reference on file->pid, that does 514 * not guarantee that the task_struct who called get_pid() is 515 * still alive (e.g. get_pid(current) => fork() => exit()). 516 * Therefore, we need to protect this ->comm access using RCU. 517 */ 518 rcu_read_lock(); 519 task = pid_task(file->pid, PIDTYPE_PID); 520 print_file_stats(m, task ? task->comm : "<unknown>", stats); 521 rcu_read_unlock(); 522 } 523 524 mutex_unlock(&dev->struct_mutex); 525 526 return 0; 527 } 528 529 static int i915_gem_gtt_info(struct seq_file *m, void *data) 530 { 531 struct drm_info_node *node = m->private; 532 struct drm_device *dev = node->minor->dev; 533 uintptr_t list = (uintptr_t) node->info_ent->data; 534 struct drm_i915_private *dev_priv = dev->dev_private; 535 struct drm_i915_gem_object *obj; 536 u64 total_obj_size, total_gtt_size; 537 int count, ret; 538 539 ret = mutex_lock_interruptible(&dev->struct_mutex); 540 if (ret) 541 return ret; 542 543 total_obj_size = total_gtt_size = count = 0; 544 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 545 if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj)) 546 continue; 547 548 seq_puts(m, " "); 549 describe_obj(m, obj); 550 seq_putc(m, '\n'); 551 total_obj_size += obj->base.size; 552 total_gtt_size += i915_gem_obj_total_ggtt_size(obj); 553 count++; 554 } 555 556 mutex_unlock(&dev->struct_mutex); 557 558 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", 559 count, total_obj_size, total_gtt_size); 560 561 return 0; 562 } 563 564 static int i915_gem_pageflip_info(struct seq_file *m, void *data) 565 { 566 struct drm_info_node *node = m->private; 567 struct drm_device *dev = node->minor->dev; 568 struct drm_i915_private *dev_priv = dev->dev_private; 569 struct intel_crtc *crtc; 570 int ret; 571 572 ret = mutex_lock_interruptible(&dev->struct_mutex); 573 if (ret) 574 return ret; 575 576 for_each_intel_crtc(dev, crtc) { 577 const char pipe = pipe_name(crtc->pipe); 578 const char plane = plane_name(crtc->plane); 579 struct intel_unpin_work *work; 580 581 spin_lock_irq(&dev->event_lock); 582 work = crtc->unpin_work; 583 if (work == NULL) { 584 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 585 pipe, plane); 586 } else { 587 u32 addr; 588 589 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 590 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 591 pipe, plane); 592 } else { 593 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 594 pipe, plane); 595 } 596 if (work->flip_queued_req) { 597 struct intel_engine_cs *ring = 598 i915_gem_request_get_ring(work->flip_queued_req); 599 600 seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n", 601 ring->name, 602 i915_gem_request_get_seqno(work->flip_queued_req), 603 dev_priv->next_seqno, 604 ring->get_seqno(ring, true), 605 i915_gem_request_completed(work->flip_queued_req, true)); 606 } else 607 seq_printf(m, "Flip not associated with any ring\n"); 608 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", 609 work->flip_queued_vblank, 610 work->flip_ready_vblank, 611 drm_crtc_vblank_count(&crtc->base)); 612 if (work->enable_stall_check) 613 seq_puts(m, "Stall check enabled, "); 614 else 615 seq_puts(m, "Stall check waiting for page flip ioctl, "); 616 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 617 618 if (INTEL_INFO(dev)->gen >= 4) 619 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane))); 620 else 621 addr = I915_READ(DSPADDR(crtc->plane)); 622 seq_printf(m, "Current scanout address 0x%08x\n", addr); 623 624 if (work->pending_flip_obj) { 625 seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset); 626 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset); 627 } 628 } 629 spin_unlock_irq(&dev->event_lock); 630 } 631 632 mutex_unlock(&dev->struct_mutex); 633 634 return 0; 635 } 636 637 static int i915_gem_batch_pool_info(struct seq_file *m, void *data) 638 { 639 struct drm_info_node *node = m->private; 640 struct drm_device *dev = node->minor->dev; 641 struct drm_i915_private *dev_priv = dev->dev_private; 642 struct drm_i915_gem_object *obj; 643 struct intel_engine_cs *ring; 644 int total = 0; 645 int ret, i, j; 646 647 ret = mutex_lock_interruptible(&dev->struct_mutex); 648 if (ret) 649 return ret; 650 651 for_each_ring(ring, dev_priv, i) { 652 for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) { 653 int count; 654 655 count = 0; 656 list_for_each_entry(obj, 657 &ring->batch_pool.cache_list[j], 658 batch_pool_link) 659 count++; 660 seq_printf(m, "%s cache[%d]: %d objects\n", 661 ring->name, j, count); 662 663 list_for_each_entry(obj, 664 &ring->batch_pool.cache_list[j], 665 batch_pool_link) { 666 seq_puts(m, " "); 667 describe_obj(m, obj); 668 seq_putc(m, '\n'); 669 } 670 671 total += count; 672 } 673 } 674 675 seq_printf(m, "total: %d\n", total); 676 677 mutex_unlock(&dev->struct_mutex); 678 679 return 0; 680 } 681 682 static int i915_gem_request_info(struct seq_file *m, void *data) 683 { 684 struct drm_info_node *node = m->private; 685 struct drm_device *dev = node->minor->dev; 686 struct drm_i915_private *dev_priv = dev->dev_private; 687 struct intel_engine_cs *ring; 688 struct drm_i915_gem_request *req; 689 int ret, any, i; 690 691 ret = mutex_lock_interruptible(&dev->struct_mutex); 692 if (ret) 693 return ret; 694 695 any = 0; 696 for_each_ring(ring, dev_priv, i) { 697 int count; 698 699 count = 0; 700 list_for_each_entry(req, &ring->request_list, list) 701 count++; 702 if (count == 0) 703 continue; 704 705 seq_printf(m, "%s requests: %d\n", ring->name, count); 706 list_for_each_entry(req, &ring->request_list, list) { 707 struct task_struct *task; 708 709 rcu_read_lock(); 710 task = NULL; 711 if (req->pid) 712 task = pid_task(req->pid, PIDTYPE_PID); 713 seq_printf(m, " %x @ %d: %s [%d]\n", 714 req->seqno, 715 (int) (jiffies - req->emitted_jiffies), 716 task ? task->comm : "<unknown>", 717 task ? task->pid : -1); 718 rcu_read_unlock(); 719 } 720 721 any++; 722 } 723 mutex_unlock(&dev->struct_mutex); 724 725 if (any == 0) 726 seq_puts(m, "No requests\n"); 727 728 return 0; 729 } 730 731 static void i915_ring_seqno_info(struct seq_file *m, 732 struct intel_engine_cs *ring) 733 { 734 if (ring->get_seqno) { 735 seq_printf(m, "Current sequence (%s): %x\n", 736 ring->name, ring->get_seqno(ring, false)); 737 } 738 } 739 740 static int i915_gem_seqno_info(struct seq_file *m, void *data) 741 { 742 struct drm_info_node *node = m->private; 743 struct drm_device *dev = node->minor->dev; 744 struct drm_i915_private *dev_priv = dev->dev_private; 745 struct intel_engine_cs *ring; 746 int ret, i; 747 748 ret = mutex_lock_interruptible(&dev->struct_mutex); 749 if (ret) 750 return ret; 751 intel_runtime_pm_get(dev_priv); 752 753 for_each_ring(ring, dev_priv, i) 754 i915_ring_seqno_info(m, ring); 755 756 intel_runtime_pm_put(dev_priv); 757 mutex_unlock(&dev->struct_mutex); 758 759 return 0; 760 } 761 762 763 static int i915_interrupt_info(struct seq_file *m, void *data) 764 { 765 struct drm_info_node *node = m->private; 766 struct drm_device *dev = node->minor->dev; 767 struct drm_i915_private *dev_priv = dev->dev_private; 768 struct intel_engine_cs *ring; 769 int ret, i, pipe; 770 771 ret = mutex_lock_interruptible(&dev->struct_mutex); 772 if (ret) 773 return ret; 774 intel_runtime_pm_get(dev_priv); 775 776 if (IS_CHERRYVIEW(dev)) { 777 seq_printf(m, "Master Interrupt Control:\t%08x\n", 778 I915_READ(GEN8_MASTER_IRQ)); 779 780 seq_printf(m, "Display IER:\t%08x\n", 781 I915_READ(VLV_IER)); 782 seq_printf(m, "Display IIR:\t%08x\n", 783 I915_READ(VLV_IIR)); 784 seq_printf(m, "Display IIR_RW:\t%08x\n", 785 I915_READ(VLV_IIR_RW)); 786 seq_printf(m, "Display IMR:\t%08x\n", 787 I915_READ(VLV_IMR)); 788 for_each_pipe(dev_priv, pipe) 789 seq_printf(m, "Pipe %c stat:\t%08x\n", 790 pipe_name(pipe), 791 I915_READ(PIPESTAT(pipe))); 792 793 seq_printf(m, "Port hotplug:\t%08x\n", 794 I915_READ(PORT_HOTPLUG_EN)); 795 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 796 I915_READ(VLV_DPFLIPSTAT)); 797 seq_printf(m, "DPINVGTT:\t%08x\n", 798 I915_READ(DPINVGTT)); 799 800 for (i = 0; i < 4; i++) { 801 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 802 i, I915_READ(GEN8_GT_IMR(i))); 803 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 804 i, I915_READ(GEN8_GT_IIR(i))); 805 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 806 i, I915_READ(GEN8_GT_IER(i))); 807 } 808 809 seq_printf(m, "PCU interrupt mask:\t%08x\n", 810 I915_READ(GEN8_PCU_IMR)); 811 seq_printf(m, "PCU interrupt identity:\t%08x\n", 812 I915_READ(GEN8_PCU_IIR)); 813 seq_printf(m, "PCU interrupt enable:\t%08x\n", 814 I915_READ(GEN8_PCU_IER)); 815 } else if (INTEL_INFO(dev)->gen >= 8) { 816 seq_printf(m, "Master Interrupt Control:\t%08x\n", 817 I915_READ(GEN8_MASTER_IRQ)); 818 819 for (i = 0; i < 4; i++) { 820 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 821 i, I915_READ(GEN8_GT_IMR(i))); 822 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 823 i, I915_READ(GEN8_GT_IIR(i))); 824 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 825 i, I915_READ(GEN8_GT_IER(i))); 826 } 827 828 for_each_pipe(dev_priv, pipe) { 829 if (!intel_display_power_is_enabled(dev_priv, 830 POWER_DOMAIN_PIPE(pipe))) { 831 seq_printf(m, "Pipe %c power disabled\n", 832 pipe_name(pipe)); 833 continue; 834 } 835 seq_printf(m, "Pipe %c IMR:\t%08x\n", 836 pipe_name(pipe), 837 I915_READ(GEN8_DE_PIPE_IMR(pipe))); 838 seq_printf(m, "Pipe %c IIR:\t%08x\n", 839 pipe_name(pipe), 840 I915_READ(GEN8_DE_PIPE_IIR(pipe))); 841 seq_printf(m, "Pipe %c IER:\t%08x\n", 842 pipe_name(pipe), 843 I915_READ(GEN8_DE_PIPE_IER(pipe))); 844 } 845 846 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 847 I915_READ(GEN8_DE_PORT_IMR)); 848 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", 849 I915_READ(GEN8_DE_PORT_IIR)); 850 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", 851 I915_READ(GEN8_DE_PORT_IER)); 852 853 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", 854 I915_READ(GEN8_DE_MISC_IMR)); 855 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", 856 I915_READ(GEN8_DE_MISC_IIR)); 857 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", 858 I915_READ(GEN8_DE_MISC_IER)); 859 860 seq_printf(m, "PCU interrupt mask:\t%08x\n", 861 I915_READ(GEN8_PCU_IMR)); 862 seq_printf(m, "PCU interrupt identity:\t%08x\n", 863 I915_READ(GEN8_PCU_IIR)); 864 seq_printf(m, "PCU interrupt enable:\t%08x\n", 865 I915_READ(GEN8_PCU_IER)); 866 } else if (IS_VALLEYVIEW(dev)) { 867 seq_printf(m, "Display IER:\t%08x\n", 868 I915_READ(VLV_IER)); 869 seq_printf(m, "Display IIR:\t%08x\n", 870 I915_READ(VLV_IIR)); 871 seq_printf(m, "Display IIR_RW:\t%08x\n", 872 I915_READ(VLV_IIR_RW)); 873 seq_printf(m, "Display IMR:\t%08x\n", 874 I915_READ(VLV_IMR)); 875 for_each_pipe(dev_priv, pipe) 876 seq_printf(m, "Pipe %c stat:\t%08x\n", 877 pipe_name(pipe), 878 I915_READ(PIPESTAT(pipe))); 879 880 seq_printf(m, "Master IER:\t%08x\n", 881 I915_READ(VLV_MASTER_IER)); 882 883 seq_printf(m, "Render IER:\t%08x\n", 884 I915_READ(GTIER)); 885 seq_printf(m, "Render IIR:\t%08x\n", 886 I915_READ(GTIIR)); 887 seq_printf(m, "Render IMR:\t%08x\n", 888 I915_READ(GTIMR)); 889 890 seq_printf(m, "PM IER:\t\t%08x\n", 891 I915_READ(GEN6_PMIER)); 892 seq_printf(m, "PM IIR:\t\t%08x\n", 893 I915_READ(GEN6_PMIIR)); 894 seq_printf(m, "PM IMR:\t\t%08x\n", 895 I915_READ(GEN6_PMIMR)); 896 897 seq_printf(m, "Port hotplug:\t%08x\n", 898 I915_READ(PORT_HOTPLUG_EN)); 899 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 900 I915_READ(VLV_DPFLIPSTAT)); 901 seq_printf(m, "DPINVGTT:\t%08x\n", 902 I915_READ(DPINVGTT)); 903 904 } else if (!HAS_PCH_SPLIT(dev)) { 905 seq_printf(m, "Interrupt enable: %08x\n", 906 I915_READ(IER)); 907 seq_printf(m, "Interrupt identity: %08x\n", 908 I915_READ(IIR)); 909 seq_printf(m, "Interrupt mask: %08x\n", 910 I915_READ(IMR)); 911 for_each_pipe(dev_priv, pipe) 912 seq_printf(m, "Pipe %c stat: %08x\n", 913 pipe_name(pipe), 914 I915_READ(PIPESTAT(pipe))); 915 } else { 916 seq_printf(m, "North Display Interrupt enable: %08x\n", 917 I915_READ(DEIER)); 918 seq_printf(m, "North Display Interrupt identity: %08x\n", 919 I915_READ(DEIIR)); 920 seq_printf(m, "North Display Interrupt mask: %08x\n", 921 I915_READ(DEIMR)); 922 seq_printf(m, "South Display Interrupt enable: %08x\n", 923 I915_READ(SDEIER)); 924 seq_printf(m, "South Display Interrupt identity: %08x\n", 925 I915_READ(SDEIIR)); 926 seq_printf(m, "South Display Interrupt mask: %08x\n", 927 I915_READ(SDEIMR)); 928 seq_printf(m, "Graphics Interrupt enable: %08x\n", 929 I915_READ(GTIER)); 930 seq_printf(m, "Graphics Interrupt identity: %08x\n", 931 I915_READ(GTIIR)); 932 seq_printf(m, "Graphics Interrupt mask: %08x\n", 933 I915_READ(GTIMR)); 934 } 935 for_each_ring(ring, dev_priv, i) { 936 if (INTEL_INFO(dev)->gen >= 6) { 937 seq_printf(m, 938 "Graphics Interrupt mask (%s): %08x\n", 939 ring->name, I915_READ_IMR(ring)); 940 } 941 i915_ring_seqno_info(m, ring); 942 } 943 intel_runtime_pm_put(dev_priv); 944 mutex_unlock(&dev->struct_mutex); 945 946 return 0; 947 } 948 949 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 950 { 951 struct drm_info_node *node = m->private; 952 struct drm_device *dev = node->minor->dev; 953 struct drm_i915_private *dev_priv = dev->dev_private; 954 int i, ret; 955 956 ret = mutex_lock_interruptible(&dev->struct_mutex); 957 if (ret) 958 return ret; 959 960 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 961 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 962 for (i = 0; i < dev_priv->num_fence_regs; i++) { 963 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 964 965 seq_printf(m, "Fence %d, pin count = %d, object = ", 966 i, dev_priv->fence_regs[i].pin_count); 967 if (obj == NULL) 968 seq_puts(m, "unused"); 969 else 970 describe_obj(m, obj); 971 seq_putc(m, '\n'); 972 } 973 974 mutex_unlock(&dev->struct_mutex); 975 return 0; 976 } 977 978 static int i915_hws_info(struct seq_file *m, void *data) 979 { 980 struct drm_info_node *node = m->private; 981 struct drm_device *dev = node->minor->dev; 982 struct drm_i915_private *dev_priv = dev->dev_private; 983 struct intel_engine_cs *ring; 984 const u32 *hws; 985 int i; 986 987 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 988 hws = ring->status_page.page_addr; 989 if (hws == NULL) 990 return 0; 991 992 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 993 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 994 i * 4, 995 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 996 } 997 return 0; 998 } 999 1000 static ssize_t 1001 i915_error_state_write(struct file *filp, 1002 const char __user *ubuf, 1003 size_t cnt, 1004 loff_t *ppos) 1005 { 1006 struct i915_error_state_file_priv *error_priv = filp->private_data; 1007 struct drm_device *dev = error_priv->dev; 1008 int ret; 1009 1010 DRM_DEBUG_DRIVER("Resetting error state\n"); 1011 1012 ret = mutex_lock_interruptible(&dev->struct_mutex); 1013 if (ret) 1014 return ret; 1015 1016 i915_destroy_error_state(dev); 1017 mutex_unlock(&dev->struct_mutex); 1018 1019 return cnt; 1020 } 1021 1022 static int i915_error_state_open(struct inode *inode, struct file *file) 1023 { 1024 struct drm_device *dev = inode->i_private; 1025 struct i915_error_state_file_priv *error_priv; 1026 1027 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL); 1028 if (!error_priv) 1029 return -ENOMEM; 1030 1031 error_priv->dev = dev; 1032 1033 i915_error_state_get(dev, error_priv); 1034 1035 file->private_data = error_priv; 1036 1037 return 0; 1038 } 1039 1040 static int i915_error_state_release(struct inode *inode, struct file *file) 1041 { 1042 struct i915_error_state_file_priv *error_priv = file->private_data; 1043 1044 i915_error_state_put(error_priv); 1045 kfree(error_priv); 1046 1047 return 0; 1048 } 1049 1050 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, 1051 size_t count, loff_t *pos) 1052 { 1053 struct i915_error_state_file_priv *error_priv = file->private_data; 1054 struct drm_i915_error_state_buf error_str; 1055 loff_t tmp_pos = 0; 1056 ssize_t ret_count = 0; 1057 int ret; 1058 1059 ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos); 1060 if (ret) 1061 return ret; 1062 1063 ret = i915_error_state_to_str(&error_str, error_priv); 1064 if (ret) 1065 goto out; 1066 1067 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos, 1068 error_str.buf, 1069 error_str.bytes); 1070 1071 if (ret_count < 0) 1072 ret = ret_count; 1073 else 1074 *pos = error_str.start + ret_count; 1075 out: 1076 i915_error_state_buf_release(&error_str); 1077 return ret ?: ret_count; 1078 } 1079 1080 static const struct file_operations i915_error_state_fops = { 1081 .owner = THIS_MODULE, 1082 .open = i915_error_state_open, 1083 .read = i915_error_state_read, 1084 .write = i915_error_state_write, 1085 .llseek = default_llseek, 1086 .release = i915_error_state_release, 1087 }; 1088 1089 static int 1090 i915_next_seqno_get(void *data, u64 *val) 1091 { 1092 struct drm_device *dev = data; 1093 struct drm_i915_private *dev_priv = dev->dev_private; 1094 int ret; 1095 1096 ret = mutex_lock_interruptible(&dev->struct_mutex); 1097 if (ret) 1098 return ret; 1099 1100 *val = dev_priv->next_seqno; 1101 mutex_unlock(&dev->struct_mutex); 1102 1103 return 0; 1104 } 1105 1106 static int 1107 i915_next_seqno_set(void *data, u64 val) 1108 { 1109 struct drm_device *dev = data; 1110 int ret; 1111 1112 ret = mutex_lock_interruptible(&dev->struct_mutex); 1113 if (ret) 1114 return ret; 1115 1116 ret = i915_gem_set_seqno(dev, val); 1117 mutex_unlock(&dev->struct_mutex); 1118 1119 return ret; 1120 } 1121 1122 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, 1123 i915_next_seqno_get, i915_next_seqno_set, 1124 "0x%llx\n"); 1125 1126 static int i915_frequency_info(struct seq_file *m, void *unused) 1127 { 1128 struct drm_info_node *node = m->private; 1129 struct drm_device *dev = node->minor->dev; 1130 struct drm_i915_private *dev_priv = dev->dev_private; 1131 int ret = 0; 1132 1133 intel_runtime_pm_get(dev_priv); 1134 1135 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 1136 1137 if (IS_GEN5(dev)) { 1138 u16 rgvswctl = I915_READ16(MEMSWCTL); 1139 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 1140 1141 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 1142 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 1143 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 1144 MEMSTAT_VID_SHIFT); 1145 seq_printf(m, "Current P-state: %d\n", 1146 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 1147 } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) || 1148 IS_BROADWELL(dev) || IS_GEN9(dev)) { 1149 u32 rp_state_limits; 1150 u32 gt_perf_status; 1151 u32 rp_state_cap; 1152 u32 rpmodectl, rpinclimit, rpdeclimit; 1153 u32 rpstat, cagf, reqf; 1154 u32 rpupei, rpcurup, rpprevup; 1155 u32 rpdownei, rpcurdown, rpprevdown; 1156 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; 1157 int max_freq; 1158 1159 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 1160 if (IS_BROXTON(dev)) { 1161 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); 1162 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS); 1163 } else { 1164 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 1165 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 1166 } 1167 1168 /* RPSTAT1 is in the GT power well */ 1169 ret = mutex_lock_interruptible(&dev->struct_mutex); 1170 if (ret) 1171 goto out; 1172 1173 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1174 1175 reqf = I915_READ(GEN6_RPNSWREQ); 1176 if (IS_GEN9(dev)) 1177 reqf >>= 23; 1178 else { 1179 reqf &= ~GEN6_TURBO_DISABLE; 1180 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1181 reqf >>= 24; 1182 else 1183 reqf >>= 25; 1184 } 1185 reqf = intel_gpu_freq(dev_priv, reqf); 1186 1187 rpmodectl = I915_READ(GEN6_RP_CONTROL); 1188 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD); 1189 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD); 1190 1191 rpstat = I915_READ(GEN6_RPSTAT1); 1192 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 1193 rpcurup = I915_READ(GEN6_RP_CUR_UP); 1194 rpprevup = I915_READ(GEN6_RP_PREV_UP); 1195 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 1196 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 1197 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 1198 if (IS_GEN9(dev)) 1199 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; 1200 else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1201 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 1202 else 1203 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 1204 cagf = intel_gpu_freq(dev_priv, cagf); 1205 1206 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1207 mutex_unlock(&dev->struct_mutex); 1208 1209 if (IS_GEN6(dev) || IS_GEN7(dev)) { 1210 pm_ier = I915_READ(GEN6_PMIER); 1211 pm_imr = I915_READ(GEN6_PMIMR); 1212 pm_isr = I915_READ(GEN6_PMISR); 1213 pm_iir = I915_READ(GEN6_PMIIR); 1214 pm_mask = I915_READ(GEN6_PMINTRMSK); 1215 } else { 1216 pm_ier = I915_READ(GEN8_GT_IER(2)); 1217 pm_imr = I915_READ(GEN8_GT_IMR(2)); 1218 pm_isr = I915_READ(GEN8_GT_ISR(2)); 1219 pm_iir = I915_READ(GEN8_GT_IIR(2)); 1220 pm_mask = I915_READ(GEN6_PMINTRMSK); 1221 } 1222 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", 1223 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); 1224 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1225 seq_printf(m, "Render p-state ratio: %d\n", 1226 (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8); 1227 seq_printf(m, "Render p-state VID: %d\n", 1228 gt_perf_status & 0xff); 1229 seq_printf(m, "Render p-state limit: %d\n", 1230 rp_state_limits & 0xff); 1231 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 1232 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl); 1233 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit); 1234 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 1235 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 1236 seq_printf(m, "CAGF: %dMHz\n", cagf); 1237 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 1238 GEN6_CURICONT_MASK); 1239 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 1240 GEN6_CURBSYTAVG_MASK); 1241 seq_printf(m, "RP PREV UP: %dus\n", rpprevup & 1242 GEN6_CURBSYTAVG_MASK); 1243 seq_printf(m, "Up threshold: %d%%\n", 1244 dev_priv->rps.up_threshold); 1245 1246 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 1247 GEN6_CURIAVG_MASK); 1248 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 1249 GEN6_CURBSYTAVG_MASK); 1250 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 1251 GEN6_CURBSYTAVG_MASK); 1252 seq_printf(m, "Down threshold: %d%%\n", 1253 dev_priv->rps.down_threshold); 1254 1255 max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 : 1256 rp_state_cap >> 16) & 0xff; 1257 max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); 1258 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 1259 intel_gpu_freq(dev_priv, max_freq)); 1260 1261 max_freq = (rp_state_cap & 0xff00) >> 8; 1262 max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); 1263 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 1264 intel_gpu_freq(dev_priv, max_freq)); 1265 1266 max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 : 1267 rp_state_cap >> 0) & 0xff; 1268 max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); 1269 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1270 intel_gpu_freq(dev_priv, max_freq)); 1271 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1272 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1273 1274 seq_printf(m, "Current freq: %d MHz\n", 1275 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); 1276 seq_printf(m, "Actual freq: %d MHz\n", cagf); 1277 seq_printf(m, "Idle freq: %d MHz\n", 1278 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); 1279 seq_printf(m, "Min freq: %d MHz\n", 1280 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); 1281 seq_printf(m, "Max freq: %d MHz\n", 1282 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1283 seq_printf(m, 1284 "efficient (RPe) frequency: %d MHz\n", 1285 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); 1286 } else if (IS_VALLEYVIEW(dev)) { 1287 u32 freq_sts; 1288 1289 mutex_lock(&dev_priv->rps.hw_lock); 1290 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 1291 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 1292 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 1293 1294 seq_printf(m, "actual GPU freq: %d MHz\n", 1295 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); 1296 1297 seq_printf(m, "current GPU freq: %d MHz\n", 1298 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); 1299 1300 seq_printf(m, "max GPU freq: %d MHz\n", 1301 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1302 1303 seq_printf(m, "min GPU freq: %d MHz\n", 1304 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); 1305 1306 seq_printf(m, "idle GPU freq: %d MHz\n", 1307 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); 1308 1309 seq_printf(m, 1310 "efficient (RPe) frequency: %d MHz\n", 1311 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); 1312 mutex_unlock(&dev_priv->rps.hw_lock); 1313 } else { 1314 seq_puts(m, "no P-state info available\n"); 1315 } 1316 1317 out: 1318 intel_runtime_pm_put(dev_priv); 1319 return ret; 1320 } 1321 1322 static int i915_hangcheck_info(struct seq_file *m, void *unused) 1323 { 1324 struct drm_info_node *node = m->private; 1325 struct drm_device *dev = node->minor->dev; 1326 struct drm_i915_private *dev_priv = dev->dev_private; 1327 struct intel_engine_cs *ring; 1328 u64 acthd[I915_NUM_RINGS]; 1329 u32 seqno[I915_NUM_RINGS]; 1330 int i; 1331 1332 if (!i915.enable_hangcheck) { 1333 seq_printf(m, "Hangcheck disabled\n"); 1334 return 0; 1335 } 1336 1337 intel_runtime_pm_get(dev_priv); 1338 1339 for_each_ring(ring, dev_priv, i) { 1340 seqno[i] = ring->get_seqno(ring, false); 1341 acthd[i] = intel_ring_get_active_head(ring); 1342 } 1343 1344 intel_runtime_pm_put(dev_priv); 1345 1346 if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) { 1347 seq_printf(m, "Hangcheck active, fires in %dms\n", 1348 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires - 1349 jiffies)); 1350 } else 1351 seq_printf(m, "Hangcheck inactive\n"); 1352 1353 for_each_ring(ring, dev_priv, i) { 1354 seq_printf(m, "%s:\n", ring->name); 1355 seq_printf(m, "\tseqno = %x [current %x]\n", 1356 ring->hangcheck.seqno, seqno[i]); 1357 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", 1358 (long long)ring->hangcheck.acthd, 1359 (long long)acthd[i]); 1360 seq_printf(m, "\tmax ACTHD = 0x%08llx\n", 1361 (long long)ring->hangcheck.max_acthd); 1362 seq_printf(m, "\tscore = %d\n", ring->hangcheck.score); 1363 seq_printf(m, "\taction = %d\n", ring->hangcheck.action); 1364 } 1365 1366 return 0; 1367 } 1368 1369 static int ironlake_drpc_info(struct seq_file *m) 1370 { 1371 struct drm_info_node *node = m->private; 1372 struct drm_device *dev = node->minor->dev; 1373 struct drm_i915_private *dev_priv = dev->dev_private; 1374 u32 rgvmodectl, rstdbyctl; 1375 u16 crstandvid; 1376 int ret; 1377 1378 ret = mutex_lock_interruptible(&dev->struct_mutex); 1379 if (ret) 1380 return ret; 1381 intel_runtime_pm_get(dev_priv); 1382 1383 rgvmodectl = I915_READ(MEMMODECTL); 1384 rstdbyctl = I915_READ(RSTDBYCTL); 1385 crstandvid = I915_READ16(CRSTANDVID); 1386 1387 intel_runtime_pm_put(dev_priv); 1388 mutex_unlock(&dev->struct_mutex); 1389 1390 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1391 "yes" : "no"); 1392 seq_printf(m, "Boost freq: %d\n", 1393 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1394 MEMMODE_BOOST_FREQ_SHIFT); 1395 seq_printf(m, "HW control enabled: %s\n", 1396 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 1397 seq_printf(m, "SW control enabled: %s\n", 1398 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 1399 seq_printf(m, "Gated voltage change: %s\n", 1400 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 1401 seq_printf(m, "Starting frequency: P%d\n", 1402 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1403 seq_printf(m, "Max P-state: P%d\n", 1404 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1405 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1406 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1407 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1408 seq_printf(m, "Render standby enabled: %s\n", 1409 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 1410 seq_puts(m, "Current RS state: "); 1411 switch (rstdbyctl & RSX_STATUS_MASK) { 1412 case RSX_STATUS_ON: 1413 seq_puts(m, "on\n"); 1414 break; 1415 case RSX_STATUS_RC1: 1416 seq_puts(m, "RC1\n"); 1417 break; 1418 case RSX_STATUS_RC1E: 1419 seq_puts(m, "RC1E\n"); 1420 break; 1421 case RSX_STATUS_RS1: 1422 seq_puts(m, "RS1\n"); 1423 break; 1424 case RSX_STATUS_RS2: 1425 seq_puts(m, "RS2 (RC6)\n"); 1426 break; 1427 case RSX_STATUS_RS3: 1428 seq_puts(m, "RC3 (RC6+)\n"); 1429 break; 1430 default: 1431 seq_puts(m, "unknown\n"); 1432 break; 1433 } 1434 1435 return 0; 1436 } 1437 1438 static int i915_forcewake_domains(struct seq_file *m, void *data) 1439 { 1440 struct drm_info_node *node = m->private; 1441 struct drm_device *dev = node->minor->dev; 1442 struct drm_i915_private *dev_priv = dev->dev_private; 1443 struct intel_uncore_forcewake_domain *fw_domain; 1444 int i; 1445 1446 spin_lock_irq(&dev_priv->uncore.lock); 1447 for_each_fw_domain(fw_domain, dev_priv, i) { 1448 seq_printf(m, "%s.wake_count = %u\n", 1449 intel_uncore_forcewake_domain_to_str(i), 1450 fw_domain->wake_count); 1451 } 1452 spin_unlock_irq(&dev_priv->uncore.lock); 1453 1454 return 0; 1455 } 1456 1457 static int vlv_drpc_info(struct seq_file *m) 1458 { 1459 struct drm_info_node *node = m->private; 1460 struct drm_device *dev = node->minor->dev; 1461 struct drm_i915_private *dev_priv = dev->dev_private; 1462 u32 rpmodectl1, rcctl1, pw_status; 1463 1464 intel_runtime_pm_get(dev_priv); 1465 1466 pw_status = I915_READ(VLV_GTLC_PW_STATUS); 1467 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1468 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1469 1470 intel_runtime_pm_put(dev_priv); 1471 1472 seq_printf(m, "Video Turbo Mode: %s\n", 1473 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1474 seq_printf(m, "Turbo enabled: %s\n", 1475 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1476 seq_printf(m, "HW control enabled: %s\n", 1477 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1478 seq_printf(m, "SW control enabled: %s\n", 1479 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1480 GEN6_RP_MEDIA_SW_MODE)); 1481 seq_printf(m, "RC6 Enabled: %s\n", 1482 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | 1483 GEN6_RC_CTL_EI_MODE(1)))); 1484 seq_printf(m, "Render Power Well: %s\n", 1485 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); 1486 seq_printf(m, "Media Power Well: %s\n", 1487 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1488 1489 seq_printf(m, "Render RC6 residency since boot: %u\n", 1490 I915_READ(VLV_GT_RENDER_RC6)); 1491 seq_printf(m, "Media RC6 residency since boot: %u\n", 1492 I915_READ(VLV_GT_MEDIA_RC6)); 1493 1494 return i915_forcewake_domains(m, NULL); 1495 } 1496 1497 static int gen6_drpc_info(struct seq_file *m) 1498 { 1499 struct drm_info_node *node = m->private; 1500 struct drm_device *dev = node->minor->dev; 1501 struct drm_i915_private *dev_priv = dev->dev_private; 1502 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1503 unsigned forcewake_count; 1504 int count = 0, ret; 1505 1506 ret = mutex_lock_interruptible(&dev->struct_mutex); 1507 if (ret) 1508 return ret; 1509 intel_runtime_pm_get(dev_priv); 1510 1511 spin_lock_irq(&dev_priv->uncore.lock); 1512 forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count; 1513 spin_unlock_irq(&dev_priv->uncore.lock); 1514 1515 if (forcewake_count) { 1516 seq_puts(m, "RC information inaccurate because somebody " 1517 "holds a forcewake reference \n"); 1518 } else { 1519 /* NB: we cannot use forcewake, else we read the wrong values */ 1520 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1521 udelay(10); 1522 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1523 } 1524 1525 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); 1526 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); 1527 1528 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1529 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1530 mutex_unlock(&dev->struct_mutex); 1531 mutex_lock(&dev_priv->rps.hw_lock); 1532 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1533 mutex_unlock(&dev_priv->rps.hw_lock); 1534 1535 intel_runtime_pm_put(dev_priv); 1536 1537 seq_printf(m, "Video Turbo Mode: %s\n", 1538 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1539 seq_printf(m, "HW control enabled: %s\n", 1540 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1541 seq_printf(m, "SW control enabled: %s\n", 1542 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1543 GEN6_RP_MEDIA_SW_MODE)); 1544 seq_printf(m, "RC1e Enabled: %s\n", 1545 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1546 seq_printf(m, "RC6 Enabled: %s\n", 1547 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1548 seq_printf(m, "Deep RC6 Enabled: %s\n", 1549 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1550 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1551 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1552 seq_puts(m, "Current RC state: "); 1553 switch (gt_core_status & GEN6_RCn_MASK) { 1554 case GEN6_RC0: 1555 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1556 seq_puts(m, "Core Power Down\n"); 1557 else 1558 seq_puts(m, "on\n"); 1559 break; 1560 case GEN6_RC3: 1561 seq_puts(m, "RC3\n"); 1562 break; 1563 case GEN6_RC6: 1564 seq_puts(m, "RC6\n"); 1565 break; 1566 case GEN6_RC7: 1567 seq_puts(m, "RC7\n"); 1568 break; 1569 default: 1570 seq_puts(m, "Unknown\n"); 1571 break; 1572 } 1573 1574 seq_printf(m, "Core Power Down: %s\n", 1575 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1576 1577 /* Not exactly sure what this is */ 1578 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", 1579 I915_READ(GEN6_GT_GFX_RC6_LOCKED)); 1580 seq_printf(m, "RC6 residency since boot: %u\n", 1581 I915_READ(GEN6_GT_GFX_RC6)); 1582 seq_printf(m, "RC6+ residency since boot: %u\n", 1583 I915_READ(GEN6_GT_GFX_RC6p)); 1584 seq_printf(m, "RC6++ residency since boot: %u\n", 1585 I915_READ(GEN6_GT_GFX_RC6pp)); 1586 1587 seq_printf(m, "RC6 voltage: %dmV\n", 1588 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1589 seq_printf(m, "RC6+ voltage: %dmV\n", 1590 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1591 seq_printf(m, "RC6++ voltage: %dmV\n", 1592 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1593 return 0; 1594 } 1595 1596 static int i915_drpc_info(struct seq_file *m, void *unused) 1597 { 1598 struct drm_info_node *node = m->private; 1599 struct drm_device *dev = node->minor->dev; 1600 1601 if (IS_VALLEYVIEW(dev)) 1602 return vlv_drpc_info(m); 1603 else if (INTEL_INFO(dev)->gen >= 6) 1604 return gen6_drpc_info(m); 1605 else 1606 return ironlake_drpc_info(m); 1607 } 1608 1609 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) 1610 { 1611 struct drm_info_node *node = m->private; 1612 struct drm_device *dev = node->minor->dev; 1613 struct drm_i915_private *dev_priv = dev->dev_private; 1614 1615 seq_printf(m, "FB tracking busy bits: 0x%08x\n", 1616 dev_priv->fb_tracking.busy_bits); 1617 1618 seq_printf(m, "FB tracking flip bits: 0x%08x\n", 1619 dev_priv->fb_tracking.flip_bits); 1620 1621 return 0; 1622 } 1623 1624 static int i915_fbc_status(struct seq_file *m, void *unused) 1625 { 1626 struct drm_info_node *node = m->private; 1627 struct drm_device *dev = node->minor->dev; 1628 struct drm_i915_private *dev_priv = dev->dev_private; 1629 1630 if (!HAS_FBC(dev)) { 1631 seq_puts(m, "FBC unsupported on this chipset\n"); 1632 return 0; 1633 } 1634 1635 intel_runtime_pm_get(dev_priv); 1636 mutex_lock(&dev_priv->fbc.lock); 1637 1638 if (intel_fbc_enabled(dev_priv)) 1639 seq_puts(m, "FBC enabled\n"); 1640 else 1641 seq_printf(m, "FBC disabled: %s\n", 1642 intel_no_fbc_reason_str(dev_priv->fbc.no_fbc_reason)); 1643 1644 if (INTEL_INFO(dev_priv)->gen >= 7) 1645 seq_printf(m, "Compressing: %s\n", 1646 yesno(I915_READ(FBC_STATUS2) & 1647 FBC_COMPRESSION_MASK)); 1648 1649 mutex_unlock(&dev_priv->fbc.lock); 1650 intel_runtime_pm_put(dev_priv); 1651 1652 return 0; 1653 } 1654 1655 static int i915_fbc_fc_get(void *data, u64 *val) 1656 { 1657 struct drm_device *dev = data; 1658 struct drm_i915_private *dev_priv = dev->dev_private; 1659 1660 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) 1661 return -ENODEV; 1662 1663 *val = dev_priv->fbc.false_color; 1664 1665 return 0; 1666 } 1667 1668 static int i915_fbc_fc_set(void *data, u64 val) 1669 { 1670 struct drm_device *dev = data; 1671 struct drm_i915_private *dev_priv = dev->dev_private; 1672 u32 reg; 1673 1674 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) 1675 return -ENODEV; 1676 1677 mutex_lock(&dev_priv->fbc.lock); 1678 1679 reg = I915_READ(ILK_DPFC_CONTROL); 1680 dev_priv->fbc.false_color = val; 1681 1682 I915_WRITE(ILK_DPFC_CONTROL, val ? 1683 (reg | FBC_CTL_FALSE_COLOR) : 1684 (reg & ~FBC_CTL_FALSE_COLOR)); 1685 1686 mutex_unlock(&dev_priv->fbc.lock); 1687 return 0; 1688 } 1689 1690 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops, 1691 i915_fbc_fc_get, i915_fbc_fc_set, 1692 "%llu\n"); 1693 1694 static int i915_ips_status(struct seq_file *m, void *unused) 1695 { 1696 struct drm_info_node *node = m->private; 1697 struct drm_device *dev = node->minor->dev; 1698 struct drm_i915_private *dev_priv = dev->dev_private; 1699 1700 if (!HAS_IPS(dev)) { 1701 seq_puts(m, "not supported\n"); 1702 return 0; 1703 } 1704 1705 intel_runtime_pm_get(dev_priv); 1706 1707 seq_printf(m, "Enabled by kernel parameter: %s\n", 1708 yesno(i915.enable_ips)); 1709 1710 if (INTEL_INFO(dev)->gen >= 8) { 1711 seq_puts(m, "Currently: unknown\n"); 1712 } else { 1713 if (I915_READ(IPS_CTL) & IPS_ENABLE) 1714 seq_puts(m, "Currently: enabled\n"); 1715 else 1716 seq_puts(m, "Currently: disabled\n"); 1717 } 1718 1719 intel_runtime_pm_put(dev_priv); 1720 1721 return 0; 1722 } 1723 1724 static int i915_sr_status(struct seq_file *m, void *unused) 1725 { 1726 struct drm_info_node *node = m->private; 1727 struct drm_device *dev = node->minor->dev; 1728 struct drm_i915_private *dev_priv = dev->dev_private; 1729 bool sr_enabled = false; 1730 1731 intel_runtime_pm_get(dev_priv); 1732 1733 if (HAS_PCH_SPLIT(dev)) 1734 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1735 else if (IS_CRESTLINE(dev) || IS_G4X(dev) || 1736 IS_I945G(dev) || IS_I945GM(dev)) 1737 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1738 else if (IS_I915GM(dev)) 1739 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1740 else if (IS_PINEVIEW(dev)) 1741 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1742 else if (IS_VALLEYVIEW(dev)) 1743 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 1744 1745 intel_runtime_pm_put(dev_priv); 1746 1747 seq_printf(m, "self-refresh: %s\n", 1748 sr_enabled ? "enabled" : "disabled"); 1749 1750 return 0; 1751 } 1752 1753 static int i915_emon_status(struct seq_file *m, void *unused) 1754 { 1755 struct drm_info_node *node = m->private; 1756 struct drm_device *dev = node->minor->dev; 1757 struct drm_i915_private *dev_priv = dev->dev_private; 1758 unsigned long temp, chipset, gfx; 1759 int ret; 1760 1761 if (!IS_GEN5(dev)) 1762 return -ENODEV; 1763 1764 ret = mutex_lock_interruptible(&dev->struct_mutex); 1765 if (ret) 1766 return ret; 1767 1768 temp = i915_mch_val(dev_priv); 1769 chipset = i915_chipset_val(dev_priv); 1770 gfx = i915_gfx_val(dev_priv); 1771 mutex_unlock(&dev->struct_mutex); 1772 1773 seq_printf(m, "GMCH temp: %ld\n", temp); 1774 seq_printf(m, "Chipset power: %ld\n", chipset); 1775 seq_printf(m, "GFX power: %ld\n", gfx); 1776 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1777 1778 return 0; 1779 } 1780 1781 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1782 { 1783 struct drm_info_node *node = m->private; 1784 struct drm_device *dev = node->minor->dev; 1785 struct drm_i915_private *dev_priv = dev->dev_private; 1786 int ret = 0; 1787 int gpu_freq, ia_freq; 1788 unsigned int max_gpu_freq, min_gpu_freq; 1789 1790 if (!HAS_CORE_RING_FREQ(dev)) { 1791 seq_puts(m, "unsupported on this chipset\n"); 1792 return 0; 1793 } 1794 1795 intel_runtime_pm_get(dev_priv); 1796 1797 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 1798 1799 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1800 if (ret) 1801 goto out; 1802 1803 if (IS_SKYLAKE(dev)) { 1804 /* Convert GT frequency to 50 HZ units */ 1805 min_gpu_freq = 1806 dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER; 1807 max_gpu_freq = 1808 dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER; 1809 } else { 1810 min_gpu_freq = dev_priv->rps.min_freq_softlimit; 1811 max_gpu_freq = dev_priv->rps.max_freq_softlimit; 1812 } 1813 1814 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1815 1816 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { 1817 ia_freq = gpu_freq; 1818 sandybridge_pcode_read(dev_priv, 1819 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1820 &ia_freq); 1821 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1822 intel_gpu_freq(dev_priv, (gpu_freq * 1823 (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1))), 1824 ((ia_freq >> 0) & 0xff) * 100, 1825 ((ia_freq >> 8) & 0xff) * 100); 1826 } 1827 1828 mutex_unlock(&dev_priv->rps.hw_lock); 1829 1830 out: 1831 intel_runtime_pm_put(dev_priv); 1832 return ret; 1833 } 1834 1835 static int i915_opregion(struct seq_file *m, void *unused) 1836 { 1837 struct drm_info_node *node = m->private; 1838 struct drm_device *dev = node->minor->dev; 1839 struct drm_i915_private *dev_priv = dev->dev_private; 1840 struct intel_opregion *opregion = &dev_priv->opregion; 1841 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL); 1842 int ret; 1843 1844 if (data == NULL) 1845 return -ENOMEM; 1846 1847 ret = mutex_lock_interruptible(&dev->struct_mutex); 1848 if (ret) 1849 goto out; 1850 1851 if (opregion->header) { 1852 memcpy_fromio(data, opregion->header, OPREGION_SIZE); 1853 seq_write(m, data, OPREGION_SIZE); 1854 } 1855 1856 mutex_unlock(&dev->struct_mutex); 1857 1858 out: 1859 kfree(data); 1860 return 0; 1861 } 1862 1863 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1864 { 1865 struct drm_info_node *node = m->private; 1866 struct drm_device *dev = node->minor->dev; 1867 struct intel_fbdev *ifbdev = NULL; 1868 struct intel_framebuffer *fb; 1869 struct drm_framebuffer *drm_fb; 1870 1871 #ifdef CONFIG_DRM_FBDEV_EMULATION 1872 struct drm_i915_private *dev_priv = dev->dev_private; 1873 1874 ifbdev = dev_priv->fbdev; 1875 fb = to_intel_framebuffer(ifbdev->helper.fb); 1876 1877 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1878 fb->base.width, 1879 fb->base.height, 1880 fb->base.depth, 1881 fb->base.bits_per_pixel, 1882 fb->base.modifier[0], 1883 atomic_read(&fb->base.refcount.refcount)); 1884 describe_obj(m, fb->obj); 1885 seq_putc(m, '\n'); 1886 #endif 1887 1888 mutex_lock(&dev->mode_config.fb_lock); 1889 drm_for_each_fb(drm_fb, dev) { 1890 fb = to_intel_framebuffer(drm_fb); 1891 if (ifbdev && &fb->base == ifbdev->helper.fb) 1892 continue; 1893 1894 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1895 fb->base.width, 1896 fb->base.height, 1897 fb->base.depth, 1898 fb->base.bits_per_pixel, 1899 fb->base.modifier[0], 1900 atomic_read(&fb->base.refcount.refcount)); 1901 describe_obj(m, fb->obj); 1902 seq_putc(m, '\n'); 1903 } 1904 mutex_unlock(&dev->mode_config.fb_lock); 1905 1906 return 0; 1907 } 1908 1909 static void describe_ctx_ringbuf(struct seq_file *m, 1910 struct intel_ringbuffer *ringbuf) 1911 { 1912 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)", 1913 ringbuf->space, ringbuf->head, ringbuf->tail, 1914 ringbuf->last_retired_head); 1915 } 1916 1917 static int i915_context_status(struct seq_file *m, void *unused) 1918 { 1919 struct drm_info_node *node = m->private; 1920 struct drm_device *dev = node->minor->dev; 1921 struct drm_i915_private *dev_priv = dev->dev_private; 1922 struct intel_engine_cs *ring; 1923 struct intel_context *ctx; 1924 int ret, i; 1925 1926 ret = mutex_lock_interruptible(&dev->struct_mutex); 1927 if (ret) 1928 return ret; 1929 1930 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1931 if (!i915.enable_execlists && 1932 ctx->legacy_hw_ctx.rcs_state == NULL) 1933 continue; 1934 1935 seq_puts(m, "HW context "); 1936 describe_ctx(m, ctx); 1937 for_each_ring(ring, dev_priv, i) { 1938 if (ring->default_context == ctx) 1939 seq_printf(m, "(default context %s) ", 1940 ring->name); 1941 } 1942 1943 if (i915.enable_execlists) { 1944 seq_putc(m, '\n'); 1945 for_each_ring(ring, dev_priv, i) { 1946 struct drm_i915_gem_object *ctx_obj = 1947 ctx->engine[i].state; 1948 struct intel_ringbuffer *ringbuf = 1949 ctx->engine[i].ringbuf; 1950 1951 seq_printf(m, "%s: ", ring->name); 1952 if (ctx_obj) 1953 describe_obj(m, ctx_obj); 1954 if (ringbuf) 1955 describe_ctx_ringbuf(m, ringbuf); 1956 seq_putc(m, '\n'); 1957 } 1958 } else { 1959 describe_obj(m, ctx->legacy_hw_ctx.rcs_state); 1960 } 1961 1962 seq_putc(m, '\n'); 1963 } 1964 1965 mutex_unlock(&dev->struct_mutex); 1966 1967 return 0; 1968 } 1969 1970 static void i915_dump_lrc_obj(struct seq_file *m, 1971 struct intel_engine_cs *ring, 1972 struct drm_i915_gem_object *ctx_obj) 1973 { 1974 struct page *page; 1975 uint32_t *reg_state; 1976 int j; 1977 unsigned long ggtt_offset = 0; 1978 1979 if (ctx_obj == NULL) { 1980 seq_printf(m, "Context on %s with no gem object\n", 1981 ring->name); 1982 return; 1983 } 1984 1985 seq_printf(m, "CONTEXT: %s %u\n", ring->name, 1986 intel_execlists_ctx_id(ctx_obj)); 1987 1988 if (!i915_gem_obj_ggtt_bound(ctx_obj)) 1989 seq_puts(m, "\tNot bound in GGTT\n"); 1990 else 1991 ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj); 1992 1993 if (i915_gem_object_get_pages(ctx_obj)) { 1994 seq_puts(m, "\tFailed to get pages for context object\n"); 1995 return; 1996 } 1997 1998 page = i915_gem_object_get_page(ctx_obj, 1); 1999 if (!WARN_ON(page == NULL)) { 2000 reg_state = kmap_atomic(page); 2001 2002 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) { 2003 seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n", 2004 ggtt_offset + 4096 + (j * 4), 2005 reg_state[j], reg_state[j + 1], 2006 reg_state[j + 2], reg_state[j + 3]); 2007 } 2008 kunmap_atomic(reg_state); 2009 } 2010 2011 seq_putc(m, '\n'); 2012 } 2013 2014 static int i915_dump_lrc(struct seq_file *m, void *unused) 2015 { 2016 struct drm_info_node *node = (struct drm_info_node *) m->private; 2017 struct drm_device *dev = node->minor->dev; 2018 struct drm_i915_private *dev_priv = dev->dev_private; 2019 struct intel_engine_cs *ring; 2020 struct intel_context *ctx; 2021 int ret, i; 2022 2023 if (!i915.enable_execlists) { 2024 seq_printf(m, "Logical Ring Contexts are disabled\n"); 2025 return 0; 2026 } 2027 2028 ret = mutex_lock_interruptible(&dev->struct_mutex); 2029 if (ret) 2030 return ret; 2031 2032 list_for_each_entry(ctx, &dev_priv->context_list, link) { 2033 for_each_ring(ring, dev_priv, i) { 2034 if (ring->default_context != ctx) 2035 i915_dump_lrc_obj(m, ring, 2036 ctx->engine[i].state); 2037 } 2038 } 2039 2040 mutex_unlock(&dev->struct_mutex); 2041 2042 return 0; 2043 } 2044 2045 static int i915_execlists(struct seq_file *m, void *data) 2046 { 2047 struct drm_info_node *node = (struct drm_info_node *)m->private; 2048 struct drm_device *dev = node->minor->dev; 2049 struct drm_i915_private *dev_priv = dev->dev_private; 2050 struct intel_engine_cs *ring; 2051 u32 status_pointer; 2052 u8 read_pointer; 2053 u8 write_pointer; 2054 u32 status; 2055 u32 ctx_id; 2056 struct list_head *cursor; 2057 int ring_id, i; 2058 int ret; 2059 2060 if (!i915.enable_execlists) { 2061 seq_puts(m, "Logical Ring Contexts are disabled\n"); 2062 return 0; 2063 } 2064 2065 ret = mutex_lock_interruptible(&dev->struct_mutex); 2066 if (ret) 2067 return ret; 2068 2069 intel_runtime_pm_get(dev_priv); 2070 2071 for_each_ring(ring, dev_priv, ring_id) { 2072 struct drm_i915_gem_request *head_req = NULL; 2073 int count = 0; 2074 unsigned long flags; 2075 2076 seq_printf(m, "%s\n", ring->name); 2077 2078 status = I915_READ(RING_EXECLIST_STATUS(ring)); 2079 ctx_id = I915_READ(RING_EXECLIST_STATUS(ring) + 4); 2080 seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n", 2081 status, ctx_id); 2082 2083 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); 2084 seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer); 2085 2086 read_pointer = ring->next_context_status_buffer; 2087 write_pointer = status_pointer & 0x07; 2088 if (read_pointer > write_pointer) 2089 write_pointer += 6; 2090 seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n", 2091 read_pointer, write_pointer); 2092 2093 for (i = 0; i < 6; i++) { 2094 status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i); 2095 ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i + 4); 2096 2097 seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n", 2098 i, status, ctx_id); 2099 } 2100 2101 spin_lock_irqsave(&ring->execlist_lock, flags); 2102 list_for_each(cursor, &ring->execlist_queue) 2103 count++; 2104 head_req = list_first_entry_or_null(&ring->execlist_queue, 2105 struct drm_i915_gem_request, execlist_link); 2106 spin_unlock_irqrestore(&ring->execlist_lock, flags); 2107 2108 seq_printf(m, "\t%d requests in queue\n", count); 2109 if (head_req) { 2110 struct drm_i915_gem_object *ctx_obj; 2111 2112 ctx_obj = head_req->ctx->engine[ring_id].state; 2113 seq_printf(m, "\tHead request id: %u\n", 2114 intel_execlists_ctx_id(ctx_obj)); 2115 seq_printf(m, "\tHead request tail: %u\n", 2116 head_req->tail); 2117 } 2118 2119 seq_putc(m, '\n'); 2120 } 2121 2122 intel_runtime_pm_put(dev_priv); 2123 mutex_unlock(&dev->struct_mutex); 2124 2125 return 0; 2126 } 2127 2128 static const char *swizzle_string(unsigned swizzle) 2129 { 2130 switch (swizzle) { 2131 case I915_BIT_6_SWIZZLE_NONE: 2132 return "none"; 2133 case I915_BIT_6_SWIZZLE_9: 2134 return "bit9"; 2135 case I915_BIT_6_SWIZZLE_9_10: 2136 return "bit9/bit10"; 2137 case I915_BIT_6_SWIZZLE_9_11: 2138 return "bit9/bit11"; 2139 case I915_BIT_6_SWIZZLE_9_10_11: 2140 return "bit9/bit10/bit11"; 2141 case I915_BIT_6_SWIZZLE_9_17: 2142 return "bit9/bit17"; 2143 case I915_BIT_6_SWIZZLE_9_10_17: 2144 return "bit9/bit10/bit17"; 2145 case I915_BIT_6_SWIZZLE_UNKNOWN: 2146 return "unknown"; 2147 } 2148 2149 return "bug"; 2150 } 2151 2152 static int i915_swizzle_info(struct seq_file *m, void *data) 2153 { 2154 struct drm_info_node *node = m->private; 2155 struct drm_device *dev = node->minor->dev; 2156 struct drm_i915_private *dev_priv = dev->dev_private; 2157 int ret; 2158 2159 ret = mutex_lock_interruptible(&dev->struct_mutex); 2160 if (ret) 2161 return ret; 2162 intel_runtime_pm_get(dev_priv); 2163 2164 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 2165 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 2166 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 2167 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 2168 2169 if (IS_GEN3(dev) || IS_GEN4(dev)) { 2170 seq_printf(m, "DDC = 0x%08x\n", 2171 I915_READ(DCC)); 2172 seq_printf(m, "DDC2 = 0x%08x\n", 2173 I915_READ(DCC2)); 2174 seq_printf(m, "C0DRB3 = 0x%04x\n", 2175 I915_READ16(C0DRB3)); 2176 seq_printf(m, "C1DRB3 = 0x%04x\n", 2177 I915_READ16(C1DRB3)); 2178 } else if (INTEL_INFO(dev)->gen >= 6) { 2179 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 2180 I915_READ(MAD_DIMM_C0)); 2181 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 2182 I915_READ(MAD_DIMM_C1)); 2183 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 2184 I915_READ(MAD_DIMM_C2)); 2185 seq_printf(m, "TILECTL = 0x%08x\n", 2186 I915_READ(TILECTL)); 2187 if (INTEL_INFO(dev)->gen >= 8) 2188 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 2189 I915_READ(GAMTARBMODE)); 2190 else 2191 seq_printf(m, "ARB_MODE = 0x%08x\n", 2192 I915_READ(ARB_MODE)); 2193 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 2194 I915_READ(DISP_ARB_CTL)); 2195 } 2196 2197 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) 2198 seq_puts(m, "L-shaped memory detected\n"); 2199 2200 intel_runtime_pm_put(dev_priv); 2201 mutex_unlock(&dev->struct_mutex); 2202 2203 return 0; 2204 } 2205 2206 static int per_file_ctx(int id, void *ptr, void *data) 2207 { 2208 struct intel_context *ctx = ptr; 2209 struct seq_file *m = data; 2210 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2211 2212 if (!ppgtt) { 2213 seq_printf(m, " no ppgtt for context %d\n", 2214 ctx->user_handle); 2215 return 0; 2216 } 2217 2218 if (i915_gem_context_is_default(ctx)) 2219 seq_puts(m, " default context:\n"); 2220 else 2221 seq_printf(m, " context %d:\n", ctx->user_handle); 2222 ppgtt->debug_dump(ppgtt, m); 2223 2224 return 0; 2225 } 2226 2227 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) 2228 { 2229 struct drm_i915_private *dev_priv = dev->dev_private; 2230 struct intel_engine_cs *ring; 2231 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2232 int unused, i; 2233 2234 if (!ppgtt) 2235 return; 2236 2237 for_each_ring(ring, dev_priv, unused) { 2238 seq_printf(m, "%s\n", ring->name); 2239 for (i = 0; i < 4; i++) { 2240 u32 offset = 0x270 + i * 8; 2241 u64 pdp = I915_READ(ring->mmio_base + offset + 4); 2242 pdp <<= 32; 2243 pdp |= I915_READ(ring->mmio_base + offset); 2244 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); 2245 } 2246 } 2247 } 2248 2249 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) 2250 { 2251 struct drm_i915_private *dev_priv = dev->dev_private; 2252 struct intel_engine_cs *ring; 2253 struct drm_file *file; 2254 int i; 2255 2256 if (INTEL_INFO(dev)->gen == 6) 2257 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 2258 2259 for_each_ring(ring, dev_priv, i) { 2260 seq_printf(m, "%s\n", ring->name); 2261 if (INTEL_INFO(dev)->gen == 7) 2262 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); 2263 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); 2264 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); 2265 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); 2266 } 2267 if (dev_priv->mm.aliasing_ppgtt) { 2268 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2269 2270 seq_puts(m, "aliasing PPGTT:\n"); 2271 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset); 2272 2273 ppgtt->debug_dump(ppgtt, m); 2274 } 2275 2276 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2277 struct drm_i915_file_private *file_priv = file->driver_priv; 2278 2279 seq_printf(m, "proc: %s\n", 2280 get_pid_task(file->pid, PIDTYPE_PID)->comm); 2281 idr_for_each(&file_priv->context_idr, per_file_ctx, m); 2282 } 2283 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 2284 } 2285 2286 static int i915_ppgtt_info(struct seq_file *m, void *data) 2287 { 2288 struct drm_info_node *node = m->private; 2289 struct drm_device *dev = node->minor->dev; 2290 struct drm_i915_private *dev_priv = dev->dev_private; 2291 2292 int ret = mutex_lock_interruptible(&dev->struct_mutex); 2293 if (ret) 2294 return ret; 2295 intel_runtime_pm_get(dev_priv); 2296 2297 if (INTEL_INFO(dev)->gen >= 8) 2298 gen8_ppgtt_info(m, dev); 2299 else if (INTEL_INFO(dev)->gen >= 6) 2300 gen6_ppgtt_info(m, dev); 2301 2302 intel_runtime_pm_put(dev_priv); 2303 mutex_unlock(&dev->struct_mutex); 2304 2305 return 0; 2306 } 2307 2308 static int count_irq_waiters(struct drm_i915_private *i915) 2309 { 2310 struct intel_engine_cs *ring; 2311 int count = 0; 2312 int i; 2313 2314 for_each_ring(ring, i915, i) 2315 count += ring->irq_refcount; 2316 2317 return count; 2318 } 2319 2320 static int i915_rps_boost_info(struct seq_file *m, void *data) 2321 { 2322 struct drm_info_node *node = m->private; 2323 struct drm_device *dev = node->minor->dev; 2324 struct drm_i915_private *dev_priv = dev->dev_private; 2325 struct drm_file *file; 2326 2327 seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled); 2328 seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy); 2329 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); 2330 seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n", 2331 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq), 2332 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), 2333 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit), 2334 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit), 2335 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 2336 spin_lock(&dev_priv->rps.client_lock); 2337 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2338 struct drm_i915_file_private *file_priv = file->driver_priv; 2339 struct task_struct *task; 2340 2341 rcu_read_lock(); 2342 task = pid_task(file->pid, PIDTYPE_PID); 2343 seq_printf(m, "%s [%d]: %d boosts%s\n", 2344 task ? task->comm : "<unknown>", 2345 task ? task->pid : -1, 2346 file_priv->rps.boosts, 2347 list_empty(&file_priv->rps.link) ? "" : ", active"); 2348 rcu_read_unlock(); 2349 } 2350 seq_printf(m, "Semaphore boosts: %d%s\n", 2351 dev_priv->rps.semaphores.boosts, 2352 list_empty(&dev_priv->rps.semaphores.link) ? "" : ", active"); 2353 seq_printf(m, "MMIO flip boosts: %d%s\n", 2354 dev_priv->rps.mmioflips.boosts, 2355 list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active"); 2356 seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts); 2357 spin_unlock(&dev_priv->rps.client_lock); 2358 2359 return 0; 2360 } 2361 2362 static int i915_llc(struct seq_file *m, void *data) 2363 { 2364 struct drm_info_node *node = m->private; 2365 struct drm_device *dev = node->minor->dev; 2366 struct drm_i915_private *dev_priv = dev->dev_private; 2367 2368 /* Size calculation for LLC is a bit of a pain. Ignore for now. */ 2369 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev))); 2370 seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size); 2371 2372 return 0; 2373 } 2374 2375 static int i915_edp_psr_status(struct seq_file *m, void *data) 2376 { 2377 struct drm_info_node *node = m->private; 2378 struct drm_device *dev = node->minor->dev; 2379 struct drm_i915_private *dev_priv = dev->dev_private; 2380 u32 psrperf = 0; 2381 u32 stat[3]; 2382 enum pipe pipe; 2383 bool enabled = false; 2384 2385 if (!HAS_PSR(dev)) { 2386 seq_puts(m, "PSR not supported\n"); 2387 return 0; 2388 } 2389 2390 intel_runtime_pm_get(dev_priv); 2391 2392 mutex_lock(&dev_priv->psr.lock); 2393 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); 2394 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); 2395 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled)); 2396 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active)); 2397 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", 2398 dev_priv->psr.busy_frontbuffer_bits); 2399 seq_printf(m, "Re-enable work scheduled: %s\n", 2400 yesno(work_busy(&dev_priv->psr.work.work))); 2401 2402 if (HAS_DDI(dev)) 2403 enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; 2404 else { 2405 for_each_pipe(dev_priv, pipe) { 2406 stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & 2407 VLV_EDP_PSR_CURR_STATE_MASK; 2408 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2409 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2410 enabled = true; 2411 } 2412 } 2413 seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled)); 2414 2415 if (!HAS_DDI(dev)) 2416 for_each_pipe(dev_priv, pipe) { 2417 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2418 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2419 seq_printf(m, " pipe %c", pipe_name(pipe)); 2420 } 2421 seq_puts(m, "\n"); 2422 2423 /* CHV PSR has no kind of performance counter */ 2424 if (HAS_DDI(dev)) { 2425 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & 2426 EDP_PSR_PERF_CNT_MASK; 2427 2428 seq_printf(m, "Performance_Counter: %u\n", psrperf); 2429 } 2430 mutex_unlock(&dev_priv->psr.lock); 2431 2432 intel_runtime_pm_put(dev_priv); 2433 return 0; 2434 } 2435 2436 static int i915_sink_crc(struct seq_file *m, void *data) 2437 { 2438 struct drm_info_node *node = m->private; 2439 struct drm_device *dev = node->minor->dev; 2440 struct intel_encoder *encoder; 2441 struct intel_connector *connector; 2442 struct intel_dp *intel_dp = NULL; 2443 int ret; 2444 u8 crc[6]; 2445 2446 drm_modeset_lock_all(dev); 2447 for_each_intel_connector(dev, connector) { 2448 2449 if (connector->base.dpms != DRM_MODE_DPMS_ON) 2450 continue; 2451 2452 if (!connector->base.encoder) 2453 continue; 2454 2455 encoder = to_intel_encoder(connector->base.encoder); 2456 if (encoder->type != INTEL_OUTPUT_EDP) 2457 continue; 2458 2459 intel_dp = enc_to_intel_dp(&encoder->base); 2460 2461 ret = intel_dp_sink_crc(intel_dp, crc); 2462 if (ret) 2463 goto out; 2464 2465 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n", 2466 crc[0], crc[1], crc[2], 2467 crc[3], crc[4], crc[5]); 2468 goto out; 2469 } 2470 ret = -ENODEV; 2471 out: 2472 drm_modeset_unlock_all(dev); 2473 return ret; 2474 } 2475 2476 static int i915_energy_uJ(struct seq_file *m, void *data) 2477 { 2478 struct drm_info_node *node = m->private; 2479 struct drm_device *dev = node->minor->dev; 2480 struct drm_i915_private *dev_priv = dev->dev_private; 2481 u64 power; 2482 u32 units; 2483 2484 if (INTEL_INFO(dev)->gen < 6) 2485 return -ENODEV; 2486 2487 intel_runtime_pm_get(dev_priv); 2488 2489 rdmsrl(MSR_RAPL_POWER_UNIT, power); 2490 power = (power & 0x1f00) >> 8; 2491 units = 1000000 / (1 << power); /* convert to uJ */ 2492 power = I915_READ(MCH_SECP_NRG_STTS); 2493 power *= units; 2494 2495 intel_runtime_pm_put(dev_priv); 2496 2497 seq_printf(m, "%llu", (long long unsigned)power); 2498 2499 return 0; 2500 } 2501 2502 static int i915_runtime_pm_status(struct seq_file *m, void *unused) 2503 { 2504 struct drm_info_node *node = m->private; 2505 struct drm_device *dev = node->minor->dev; 2506 struct drm_i915_private *dev_priv = dev->dev_private; 2507 2508 if (!HAS_RUNTIME_PM(dev)) { 2509 seq_puts(m, "not supported\n"); 2510 return 0; 2511 } 2512 2513 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); 2514 seq_printf(m, "IRQs disabled: %s\n", 2515 yesno(!intel_irqs_enabled(dev_priv))); 2516 #ifdef CONFIG_PM 2517 seq_printf(m, "Usage count: %d\n", 2518 atomic_read(&dev->dev->power.usage_count)); 2519 #else 2520 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); 2521 #endif 2522 2523 return 0; 2524 } 2525 2526 static const char *power_domain_str(enum intel_display_power_domain domain) 2527 { 2528 switch (domain) { 2529 case POWER_DOMAIN_PIPE_A: 2530 return "PIPE_A"; 2531 case POWER_DOMAIN_PIPE_B: 2532 return "PIPE_B"; 2533 case POWER_DOMAIN_PIPE_C: 2534 return "PIPE_C"; 2535 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 2536 return "PIPE_A_PANEL_FITTER"; 2537 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 2538 return "PIPE_B_PANEL_FITTER"; 2539 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 2540 return "PIPE_C_PANEL_FITTER"; 2541 case POWER_DOMAIN_TRANSCODER_A: 2542 return "TRANSCODER_A"; 2543 case POWER_DOMAIN_TRANSCODER_B: 2544 return "TRANSCODER_B"; 2545 case POWER_DOMAIN_TRANSCODER_C: 2546 return "TRANSCODER_C"; 2547 case POWER_DOMAIN_TRANSCODER_EDP: 2548 return "TRANSCODER_EDP"; 2549 case POWER_DOMAIN_PORT_DDI_A_2_LANES: 2550 return "PORT_DDI_A_2_LANES"; 2551 case POWER_DOMAIN_PORT_DDI_A_4_LANES: 2552 return "PORT_DDI_A_4_LANES"; 2553 case POWER_DOMAIN_PORT_DDI_B_2_LANES: 2554 return "PORT_DDI_B_2_LANES"; 2555 case POWER_DOMAIN_PORT_DDI_B_4_LANES: 2556 return "PORT_DDI_B_4_LANES"; 2557 case POWER_DOMAIN_PORT_DDI_C_2_LANES: 2558 return "PORT_DDI_C_2_LANES"; 2559 case POWER_DOMAIN_PORT_DDI_C_4_LANES: 2560 return "PORT_DDI_C_4_LANES"; 2561 case POWER_DOMAIN_PORT_DDI_D_2_LANES: 2562 return "PORT_DDI_D_2_LANES"; 2563 case POWER_DOMAIN_PORT_DDI_D_4_LANES: 2564 return "PORT_DDI_D_4_LANES"; 2565 case POWER_DOMAIN_PORT_DDI_E_2_LANES: 2566 return "PORT_DDI_E_2_LANES"; 2567 case POWER_DOMAIN_PORT_DSI: 2568 return "PORT_DSI"; 2569 case POWER_DOMAIN_PORT_CRT: 2570 return "PORT_CRT"; 2571 case POWER_DOMAIN_PORT_OTHER: 2572 return "PORT_OTHER"; 2573 case POWER_DOMAIN_VGA: 2574 return "VGA"; 2575 case POWER_DOMAIN_AUDIO: 2576 return "AUDIO"; 2577 case POWER_DOMAIN_PLLS: 2578 return "PLLS"; 2579 case POWER_DOMAIN_AUX_A: 2580 return "AUX_A"; 2581 case POWER_DOMAIN_AUX_B: 2582 return "AUX_B"; 2583 case POWER_DOMAIN_AUX_C: 2584 return "AUX_C"; 2585 case POWER_DOMAIN_AUX_D: 2586 return "AUX_D"; 2587 case POWER_DOMAIN_INIT: 2588 return "INIT"; 2589 default: 2590 MISSING_CASE(domain); 2591 return "?"; 2592 } 2593 } 2594 2595 static int i915_power_domain_info(struct seq_file *m, void *unused) 2596 { 2597 struct drm_info_node *node = m->private; 2598 struct drm_device *dev = node->minor->dev; 2599 struct drm_i915_private *dev_priv = dev->dev_private; 2600 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2601 int i; 2602 2603 mutex_lock(&power_domains->lock); 2604 2605 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 2606 for (i = 0; i < power_domains->power_well_count; i++) { 2607 struct i915_power_well *power_well; 2608 enum intel_display_power_domain power_domain; 2609 2610 power_well = &power_domains->power_wells[i]; 2611 seq_printf(m, "%-25s %d\n", power_well->name, 2612 power_well->count); 2613 2614 for (power_domain = 0; power_domain < POWER_DOMAIN_NUM; 2615 power_domain++) { 2616 if (!(BIT(power_domain) & power_well->domains)) 2617 continue; 2618 2619 seq_printf(m, " %-23s %d\n", 2620 power_domain_str(power_domain), 2621 power_domains->domain_use_count[power_domain]); 2622 } 2623 } 2624 2625 mutex_unlock(&power_domains->lock); 2626 2627 return 0; 2628 } 2629 2630 static void intel_seq_print_mode(struct seq_file *m, int tabs, 2631 struct drm_display_mode *mode) 2632 { 2633 int i; 2634 2635 for (i = 0; i < tabs; i++) 2636 seq_putc(m, '\t'); 2637 2638 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n", 2639 mode->base.id, mode->name, 2640 mode->vrefresh, mode->clock, 2641 mode->hdisplay, mode->hsync_start, 2642 mode->hsync_end, mode->htotal, 2643 mode->vdisplay, mode->vsync_start, 2644 mode->vsync_end, mode->vtotal, 2645 mode->type, mode->flags); 2646 } 2647 2648 static void intel_encoder_info(struct seq_file *m, 2649 struct intel_crtc *intel_crtc, 2650 struct intel_encoder *intel_encoder) 2651 { 2652 struct drm_info_node *node = m->private; 2653 struct drm_device *dev = node->minor->dev; 2654 struct drm_crtc *crtc = &intel_crtc->base; 2655 struct intel_connector *intel_connector; 2656 struct drm_encoder *encoder; 2657 2658 encoder = &intel_encoder->base; 2659 seq_printf(m, "\tencoder %d: type: %s, connectors:\n", 2660 encoder->base.id, encoder->name); 2661 for_each_connector_on_encoder(dev, encoder, intel_connector) { 2662 struct drm_connector *connector = &intel_connector->base; 2663 seq_printf(m, "\t\tconnector %d: type: %s, status: %s", 2664 connector->base.id, 2665 connector->name, 2666 drm_get_connector_status_name(connector->status)); 2667 if (connector->status == connector_status_connected) { 2668 struct drm_display_mode *mode = &crtc->mode; 2669 seq_printf(m, ", mode:\n"); 2670 intel_seq_print_mode(m, 2, mode); 2671 } else { 2672 seq_putc(m, '\n'); 2673 } 2674 } 2675 } 2676 2677 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) 2678 { 2679 struct drm_info_node *node = m->private; 2680 struct drm_device *dev = node->minor->dev; 2681 struct drm_crtc *crtc = &intel_crtc->base; 2682 struct intel_encoder *intel_encoder; 2683 2684 if (crtc->primary->fb) 2685 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", 2686 crtc->primary->fb->base.id, crtc->x, crtc->y, 2687 crtc->primary->fb->width, crtc->primary->fb->height); 2688 else 2689 seq_puts(m, "\tprimary plane disabled\n"); 2690 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 2691 intel_encoder_info(m, intel_crtc, intel_encoder); 2692 } 2693 2694 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) 2695 { 2696 struct drm_display_mode *mode = panel->fixed_mode; 2697 2698 seq_printf(m, "\tfixed mode:\n"); 2699 intel_seq_print_mode(m, 2, mode); 2700 } 2701 2702 static void intel_dp_info(struct seq_file *m, 2703 struct intel_connector *intel_connector) 2704 { 2705 struct intel_encoder *intel_encoder = intel_connector->encoder; 2706 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2707 2708 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 2709 seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" : 2710 "no"); 2711 if (intel_encoder->type == INTEL_OUTPUT_EDP) 2712 intel_panel_info(m, &intel_connector->panel); 2713 } 2714 2715 static void intel_hdmi_info(struct seq_file *m, 2716 struct intel_connector *intel_connector) 2717 { 2718 struct intel_encoder *intel_encoder = intel_connector->encoder; 2719 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); 2720 2721 seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" : 2722 "no"); 2723 } 2724 2725 static void intel_lvds_info(struct seq_file *m, 2726 struct intel_connector *intel_connector) 2727 { 2728 intel_panel_info(m, &intel_connector->panel); 2729 } 2730 2731 static void intel_connector_info(struct seq_file *m, 2732 struct drm_connector *connector) 2733 { 2734 struct intel_connector *intel_connector = to_intel_connector(connector); 2735 struct intel_encoder *intel_encoder = intel_connector->encoder; 2736 struct drm_display_mode *mode; 2737 2738 seq_printf(m, "connector %d: type %s, status: %s\n", 2739 connector->base.id, connector->name, 2740 drm_get_connector_status_name(connector->status)); 2741 if (connector->status == connector_status_connected) { 2742 seq_printf(m, "\tname: %s\n", connector->display_info.name); 2743 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 2744 connector->display_info.width_mm, 2745 connector->display_info.height_mm); 2746 seq_printf(m, "\tsubpixel order: %s\n", 2747 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 2748 seq_printf(m, "\tCEA rev: %d\n", 2749 connector->display_info.cea_rev); 2750 } 2751 if (intel_encoder) { 2752 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 2753 intel_encoder->type == INTEL_OUTPUT_EDP) 2754 intel_dp_info(m, intel_connector); 2755 else if (intel_encoder->type == INTEL_OUTPUT_HDMI) 2756 intel_hdmi_info(m, intel_connector); 2757 else if (intel_encoder->type == INTEL_OUTPUT_LVDS) 2758 intel_lvds_info(m, intel_connector); 2759 } 2760 2761 seq_printf(m, "\tmodes:\n"); 2762 list_for_each_entry(mode, &connector->modes, head) 2763 intel_seq_print_mode(m, 2, mode); 2764 } 2765 2766 static bool cursor_active(struct drm_device *dev, int pipe) 2767 { 2768 struct drm_i915_private *dev_priv = dev->dev_private; 2769 u32 state; 2770 2771 if (IS_845G(dev) || IS_I865G(dev)) 2772 state = I915_READ(_CURACNTR) & CURSOR_ENABLE; 2773 else 2774 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 2775 2776 return state; 2777 } 2778 2779 static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y) 2780 { 2781 struct drm_i915_private *dev_priv = dev->dev_private; 2782 u32 pos; 2783 2784 pos = I915_READ(CURPOS(pipe)); 2785 2786 *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK; 2787 if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT)) 2788 *x = -*x; 2789 2790 *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK; 2791 if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT)) 2792 *y = -*y; 2793 2794 return cursor_active(dev, pipe); 2795 } 2796 2797 static int i915_display_info(struct seq_file *m, void *unused) 2798 { 2799 struct drm_info_node *node = m->private; 2800 struct drm_device *dev = node->minor->dev; 2801 struct drm_i915_private *dev_priv = dev->dev_private; 2802 struct intel_crtc *crtc; 2803 struct drm_connector *connector; 2804 2805 intel_runtime_pm_get(dev_priv); 2806 drm_modeset_lock_all(dev); 2807 seq_printf(m, "CRTC info\n"); 2808 seq_printf(m, "---------\n"); 2809 for_each_intel_crtc(dev, crtc) { 2810 bool active; 2811 struct intel_crtc_state *pipe_config; 2812 int x, y; 2813 2814 pipe_config = to_intel_crtc_state(crtc->base.state); 2815 2816 seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n", 2817 crtc->base.base.id, pipe_name(crtc->pipe), 2818 yesno(pipe_config->base.active), 2819 pipe_config->pipe_src_w, pipe_config->pipe_src_h); 2820 if (pipe_config->base.active) { 2821 intel_crtc_info(m, crtc); 2822 2823 active = cursor_position(dev, crtc->pipe, &x, &y); 2824 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n", 2825 yesno(crtc->cursor_base), 2826 x, y, crtc->base.cursor->state->crtc_w, 2827 crtc->base.cursor->state->crtc_h, 2828 crtc->cursor_addr, yesno(active)); 2829 } 2830 2831 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", 2832 yesno(!crtc->cpu_fifo_underrun_disabled), 2833 yesno(!crtc->pch_fifo_underrun_disabled)); 2834 } 2835 2836 seq_printf(m, "\n"); 2837 seq_printf(m, "Connector info\n"); 2838 seq_printf(m, "--------------\n"); 2839 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 2840 intel_connector_info(m, connector); 2841 } 2842 drm_modeset_unlock_all(dev); 2843 intel_runtime_pm_put(dev_priv); 2844 2845 return 0; 2846 } 2847 2848 static int i915_semaphore_status(struct seq_file *m, void *unused) 2849 { 2850 struct drm_info_node *node = (struct drm_info_node *) m->private; 2851 struct drm_device *dev = node->minor->dev; 2852 struct drm_i915_private *dev_priv = dev->dev_private; 2853 struct intel_engine_cs *ring; 2854 int num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 2855 int i, j, ret; 2856 2857 if (!i915_semaphore_is_enabled(dev)) { 2858 seq_puts(m, "Semaphores are disabled\n"); 2859 return 0; 2860 } 2861 2862 ret = mutex_lock_interruptible(&dev->struct_mutex); 2863 if (ret) 2864 return ret; 2865 intel_runtime_pm_get(dev_priv); 2866 2867 if (IS_BROADWELL(dev)) { 2868 struct page *page; 2869 uint64_t *seqno; 2870 2871 page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0); 2872 2873 seqno = (uint64_t *)kmap_atomic(page); 2874 for_each_ring(ring, dev_priv, i) { 2875 uint64_t offset; 2876 2877 seq_printf(m, "%s\n", ring->name); 2878 2879 seq_puts(m, " Last signal:"); 2880 for (j = 0; j < num_rings; j++) { 2881 offset = i * I915_NUM_RINGS + j; 2882 seq_printf(m, "0x%08llx (0x%02llx) ", 2883 seqno[offset], offset * 8); 2884 } 2885 seq_putc(m, '\n'); 2886 2887 seq_puts(m, " Last wait: "); 2888 for (j = 0; j < num_rings; j++) { 2889 offset = i + (j * I915_NUM_RINGS); 2890 seq_printf(m, "0x%08llx (0x%02llx) ", 2891 seqno[offset], offset * 8); 2892 } 2893 seq_putc(m, '\n'); 2894 2895 } 2896 kunmap_atomic(seqno); 2897 } else { 2898 seq_puts(m, " Last signal:"); 2899 for_each_ring(ring, dev_priv, i) 2900 for (j = 0; j < num_rings; j++) 2901 seq_printf(m, "0x%08x\n", 2902 I915_READ(ring->semaphore.mbox.signal[j])); 2903 seq_putc(m, '\n'); 2904 } 2905 2906 seq_puts(m, "\nSync seqno:\n"); 2907 for_each_ring(ring, dev_priv, i) { 2908 for (j = 0; j < num_rings; j++) { 2909 seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]); 2910 } 2911 seq_putc(m, '\n'); 2912 } 2913 seq_putc(m, '\n'); 2914 2915 intel_runtime_pm_put(dev_priv); 2916 mutex_unlock(&dev->struct_mutex); 2917 return 0; 2918 } 2919 2920 static int i915_shared_dplls_info(struct seq_file *m, void *unused) 2921 { 2922 struct drm_info_node *node = (struct drm_info_node *) m->private; 2923 struct drm_device *dev = node->minor->dev; 2924 struct drm_i915_private *dev_priv = dev->dev_private; 2925 int i; 2926 2927 drm_modeset_lock_all(dev); 2928 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 2929 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 2930 2931 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); 2932 seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n", 2933 pll->config.crtc_mask, pll->active, yesno(pll->on)); 2934 seq_printf(m, " tracked hardware state:\n"); 2935 seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll); 2936 seq_printf(m, " dpll_md: 0x%08x\n", 2937 pll->config.hw_state.dpll_md); 2938 seq_printf(m, " fp0: 0x%08x\n", pll->config.hw_state.fp0); 2939 seq_printf(m, " fp1: 0x%08x\n", pll->config.hw_state.fp1); 2940 seq_printf(m, " wrpll: 0x%08x\n", pll->config.hw_state.wrpll); 2941 } 2942 drm_modeset_unlock_all(dev); 2943 2944 return 0; 2945 } 2946 2947 static int i915_wa_registers(struct seq_file *m, void *unused) 2948 { 2949 int i; 2950 int ret; 2951 struct drm_info_node *node = (struct drm_info_node *) m->private; 2952 struct drm_device *dev = node->minor->dev; 2953 struct drm_i915_private *dev_priv = dev->dev_private; 2954 2955 ret = mutex_lock_interruptible(&dev->struct_mutex); 2956 if (ret) 2957 return ret; 2958 2959 intel_runtime_pm_get(dev_priv); 2960 2961 seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count); 2962 for (i = 0; i < dev_priv->workarounds.count; ++i) { 2963 u32 addr, mask, value, read; 2964 bool ok; 2965 2966 addr = dev_priv->workarounds.reg[i].addr; 2967 mask = dev_priv->workarounds.reg[i].mask; 2968 value = dev_priv->workarounds.reg[i].value; 2969 read = I915_READ(addr); 2970 ok = (value & mask) == (read & mask); 2971 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", 2972 addr, value, mask, read, ok ? "OK" : "FAIL"); 2973 } 2974 2975 intel_runtime_pm_put(dev_priv); 2976 mutex_unlock(&dev->struct_mutex); 2977 2978 return 0; 2979 } 2980 2981 static int i915_ddb_info(struct seq_file *m, void *unused) 2982 { 2983 struct drm_info_node *node = m->private; 2984 struct drm_device *dev = node->minor->dev; 2985 struct drm_i915_private *dev_priv = dev->dev_private; 2986 struct skl_ddb_allocation *ddb; 2987 struct skl_ddb_entry *entry; 2988 enum pipe pipe; 2989 int plane; 2990 2991 if (INTEL_INFO(dev)->gen < 9) 2992 return 0; 2993 2994 drm_modeset_lock_all(dev); 2995 2996 ddb = &dev_priv->wm.skl_hw.ddb; 2997 2998 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); 2999 3000 for_each_pipe(dev_priv, pipe) { 3001 seq_printf(m, "Pipe %c\n", pipe_name(pipe)); 3002 3003 for_each_plane(dev_priv, pipe, plane) { 3004 entry = &ddb->plane[pipe][plane]; 3005 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1, 3006 entry->start, entry->end, 3007 skl_ddb_entry_size(entry)); 3008 } 3009 3010 entry = &ddb->cursor[pipe]; 3011 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, 3012 entry->end, skl_ddb_entry_size(entry)); 3013 } 3014 3015 drm_modeset_unlock_all(dev); 3016 3017 return 0; 3018 } 3019 3020 static void drrs_status_per_crtc(struct seq_file *m, 3021 struct drm_device *dev, struct intel_crtc *intel_crtc) 3022 { 3023 struct intel_encoder *intel_encoder; 3024 struct drm_i915_private *dev_priv = dev->dev_private; 3025 struct i915_drrs *drrs = &dev_priv->drrs; 3026 int vrefresh = 0; 3027 3028 for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) { 3029 /* Encoder connected on this CRTC */ 3030 switch (intel_encoder->type) { 3031 case INTEL_OUTPUT_EDP: 3032 seq_puts(m, "eDP:\n"); 3033 break; 3034 case INTEL_OUTPUT_DSI: 3035 seq_puts(m, "DSI:\n"); 3036 break; 3037 case INTEL_OUTPUT_HDMI: 3038 seq_puts(m, "HDMI:\n"); 3039 break; 3040 case INTEL_OUTPUT_DISPLAYPORT: 3041 seq_puts(m, "DP:\n"); 3042 break; 3043 default: 3044 seq_printf(m, "Other encoder (id=%d).\n", 3045 intel_encoder->type); 3046 return; 3047 } 3048 } 3049 3050 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT) 3051 seq_puts(m, "\tVBT: DRRS_type: Static"); 3052 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT) 3053 seq_puts(m, "\tVBT: DRRS_type: Seamless"); 3054 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED) 3055 seq_puts(m, "\tVBT: DRRS_type: None"); 3056 else 3057 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value"); 3058 3059 seq_puts(m, "\n\n"); 3060 3061 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) { 3062 struct intel_panel *panel; 3063 3064 mutex_lock(&drrs->mutex); 3065 /* DRRS Supported */ 3066 seq_puts(m, "\tDRRS Supported: Yes\n"); 3067 3068 /* disable_drrs() will make drrs->dp NULL */ 3069 if (!drrs->dp) { 3070 seq_puts(m, "Idleness DRRS: Disabled"); 3071 mutex_unlock(&drrs->mutex); 3072 return; 3073 } 3074 3075 panel = &drrs->dp->attached_connector->panel; 3076 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X", 3077 drrs->busy_frontbuffer_bits); 3078 3079 seq_puts(m, "\n\t\t"); 3080 if (drrs->refresh_rate_type == DRRS_HIGH_RR) { 3081 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); 3082 vrefresh = panel->fixed_mode->vrefresh; 3083 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) { 3084 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); 3085 vrefresh = panel->downclock_mode->vrefresh; 3086 } else { 3087 seq_printf(m, "DRRS_State: Unknown(%d)\n", 3088 drrs->refresh_rate_type); 3089 mutex_unlock(&drrs->mutex); 3090 return; 3091 } 3092 seq_printf(m, "\t\tVrefresh: %d", vrefresh); 3093 3094 seq_puts(m, "\n\t\t"); 3095 mutex_unlock(&drrs->mutex); 3096 } else { 3097 /* DRRS not supported. Print the VBT parameter*/ 3098 seq_puts(m, "\tDRRS Supported : No"); 3099 } 3100 seq_puts(m, "\n"); 3101 } 3102 3103 static int i915_drrs_status(struct seq_file *m, void *unused) 3104 { 3105 struct drm_info_node *node = m->private; 3106 struct drm_device *dev = node->minor->dev; 3107 struct intel_crtc *intel_crtc; 3108 int active_crtc_cnt = 0; 3109 3110 for_each_intel_crtc(dev, intel_crtc) { 3111 drm_modeset_lock(&intel_crtc->base.mutex, NULL); 3112 3113 if (intel_crtc->base.state->active) { 3114 active_crtc_cnt++; 3115 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); 3116 3117 drrs_status_per_crtc(m, dev, intel_crtc); 3118 } 3119 3120 drm_modeset_unlock(&intel_crtc->base.mutex); 3121 } 3122 3123 if (!active_crtc_cnt) 3124 seq_puts(m, "No active crtc found\n"); 3125 3126 return 0; 3127 } 3128 3129 struct pipe_crc_info { 3130 const char *name; 3131 struct drm_device *dev; 3132 enum pipe pipe; 3133 }; 3134 3135 static int i915_dp_mst_info(struct seq_file *m, void *unused) 3136 { 3137 struct drm_info_node *node = (struct drm_info_node *) m->private; 3138 struct drm_device *dev = node->minor->dev; 3139 struct drm_encoder *encoder; 3140 struct intel_encoder *intel_encoder; 3141 struct intel_digital_port *intel_dig_port; 3142 drm_modeset_lock_all(dev); 3143 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 3144 intel_encoder = to_intel_encoder(encoder); 3145 if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT) 3146 continue; 3147 intel_dig_port = enc_to_dig_port(encoder); 3148 if (!intel_dig_port->dp.can_mst) 3149 continue; 3150 3151 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); 3152 } 3153 drm_modeset_unlock_all(dev); 3154 return 0; 3155 } 3156 3157 static int i915_pipe_crc_open(struct inode *inode, struct file *filep) 3158 { 3159 struct pipe_crc_info *info = inode->i_private; 3160 struct drm_i915_private *dev_priv = info->dev->dev_private; 3161 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 3162 3163 if (info->pipe >= INTEL_INFO(info->dev)->num_pipes) 3164 return -ENODEV; 3165 3166 spin_lock_irq(&pipe_crc->lock); 3167 3168 if (pipe_crc->opened) { 3169 spin_unlock_irq(&pipe_crc->lock); 3170 return -EBUSY; /* already open */ 3171 } 3172 3173 pipe_crc->opened = true; 3174 filep->private_data = inode->i_private; 3175 3176 spin_unlock_irq(&pipe_crc->lock); 3177 3178 return 0; 3179 } 3180 3181 static int i915_pipe_crc_release(struct inode *inode, struct file *filep) 3182 { 3183 struct pipe_crc_info *info = inode->i_private; 3184 struct drm_i915_private *dev_priv = info->dev->dev_private; 3185 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 3186 3187 spin_lock_irq(&pipe_crc->lock); 3188 pipe_crc->opened = false; 3189 spin_unlock_irq(&pipe_crc->lock); 3190 3191 return 0; 3192 } 3193 3194 /* (6 fields, 8 chars each, space separated (5) + '\n') */ 3195 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1) 3196 /* account for \'0' */ 3197 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1) 3198 3199 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) 3200 { 3201 assert_spin_locked(&pipe_crc->lock); 3202 return CIRC_CNT(pipe_crc->head, pipe_crc->tail, 3203 INTEL_PIPE_CRC_ENTRIES_NR); 3204 } 3205 3206 static ssize_t 3207 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, 3208 loff_t *pos) 3209 { 3210 struct pipe_crc_info *info = filep->private_data; 3211 struct drm_device *dev = info->dev; 3212 struct drm_i915_private *dev_priv = dev->dev_private; 3213 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 3214 char buf[PIPE_CRC_BUFFER_LEN]; 3215 int n_entries; 3216 ssize_t bytes_read; 3217 3218 /* 3219 * Don't allow user space to provide buffers not big enough to hold 3220 * a line of data. 3221 */ 3222 if (count < PIPE_CRC_LINE_LEN) 3223 return -EINVAL; 3224 3225 if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE) 3226 return 0; 3227 3228 /* nothing to read */ 3229 spin_lock_irq(&pipe_crc->lock); 3230 while (pipe_crc_data_count(pipe_crc) == 0) { 3231 int ret; 3232 3233 if (filep->f_flags & O_NONBLOCK) { 3234 spin_unlock_irq(&pipe_crc->lock); 3235 return -EAGAIN; 3236 } 3237 3238 ret = wait_event_interruptible_lock_irq(pipe_crc->wq, 3239 pipe_crc_data_count(pipe_crc), pipe_crc->lock); 3240 if (ret) { 3241 spin_unlock_irq(&pipe_crc->lock); 3242 return ret; 3243 } 3244 } 3245 3246 /* We now have one or more entries to read */ 3247 n_entries = count / PIPE_CRC_LINE_LEN; 3248 3249 bytes_read = 0; 3250 while (n_entries > 0) { 3251 struct intel_pipe_crc_entry *entry = 3252 &pipe_crc->entries[pipe_crc->tail]; 3253 int ret; 3254 3255 if (CIRC_CNT(pipe_crc->head, pipe_crc->tail, 3256 INTEL_PIPE_CRC_ENTRIES_NR) < 1) 3257 break; 3258 3259 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); 3260 pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 3261 3262 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, 3263 "%8u %8x %8x %8x %8x %8x\n", 3264 entry->frame, entry->crc[0], 3265 entry->crc[1], entry->crc[2], 3266 entry->crc[3], entry->crc[4]); 3267 3268 spin_unlock_irq(&pipe_crc->lock); 3269 3270 ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN); 3271 if (ret == PIPE_CRC_LINE_LEN) 3272 return -EFAULT; 3273 3274 user_buf += PIPE_CRC_LINE_LEN; 3275 n_entries--; 3276 3277 spin_lock_irq(&pipe_crc->lock); 3278 } 3279 3280 spin_unlock_irq(&pipe_crc->lock); 3281 3282 return bytes_read; 3283 } 3284 3285 static const struct file_operations i915_pipe_crc_fops = { 3286 .owner = THIS_MODULE, 3287 .open = i915_pipe_crc_open, 3288 .read = i915_pipe_crc_read, 3289 .release = i915_pipe_crc_release, 3290 }; 3291 3292 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = { 3293 { 3294 .name = "i915_pipe_A_crc", 3295 .pipe = PIPE_A, 3296 }, 3297 { 3298 .name = "i915_pipe_B_crc", 3299 .pipe = PIPE_B, 3300 }, 3301 { 3302 .name = "i915_pipe_C_crc", 3303 .pipe = PIPE_C, 3304 }, 3305 }; 3306 3307 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor, 3308 enum pipe pipe) 3309 { 3310 struct drm_device *dev = minor->dev; 3311 struct dentry *ent; 3312 struct pipe_crc_info *info = &i915_pipe_crc_data[pipe]; 3313 3314 info->dev = dev; 3315 ent = debugfs_create_file(info->name, S_IRUGO, root, info, 3316 &i915_pipe_crc_fops); 3317 if (!ent) 3318 return -ENOMEM; 3319 3320 return drm_add_fake_info_node(minor, ent, info); 3321 } 3322 3323 static const char * const pipe_crc_sources[] = { 3324 "none", 3325 "plane1", 3326 "plane2", 3327 "pf", 3328 "pipe", 3329 "TV", 3330 "DP-B", 3331 "DP-C", 3332 "DP-D", 3333 "auto", 3334 }; 3335 3336 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) 3337 { 3338 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX); 3339 return pipe_crc_sources[source]; 3340 } 3341 3342 static int display_crc_ctl_show(struct seq_file *m, void *data) 3343 { 3344 struct drm_device *dev = m->private; 3345 struct drm_i915_private *dev_priv = dev->dev_private; 3346 int i; 3347 3348 for (i = 0; i < I915_MAX_PIPES; i++) 3349 seq_printf(m, "%c %s\n", pipe_name(i), 3350 pipe_crc_source_name(dev_priv->pipe_crc[i].source)); 3351 3352 return 0; 3353 } 3354 3355 static int display_crc_ctl_open(struct inode *inode, struct file *file) 3356 { 3357 struct drm_device *dev = inode->i_private; 3358 3359 return single_open(file, display_crc_ctl_show, dev); 3360 } 3361 3362 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 3363 uint32_t *val) 3364 { 3365 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 3366 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 3367 3368 switch (*source) { 3369 case INTEL_PIPE_CRC_SOURCE_PIPE: 3370 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX; 3371 break; 3372 case INTEL_PIPE_CRC_SOURCE_NONE: 3373 *val = 0; 3374 break; 3375 default: 3376 return -EINVAL; 3377 } 3378 3379 return 0; 3380 } 3381 3382 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe, 3383 enum intel_pipe_crc_source *source) 3384 { 3385 struct intel_encoder *encoder; 3386 struct intel_crtc *crtc; 3387 struct intel_digital_port *dig_port; 3388 int ret = 0; 3389 3390 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 3391 3392 drm_modeset_lock_all(dev); 3393 for_each_intel_encoder(dev, encoder) { 3394 if (!encoder->base.crtc) 3395 continue; 3396 3397 crtc = to_intel_crtc(encoder->base.crtc); 3398 3399 if (crtc->pipe != pipe) 3400 continue; 3401 3402 switch (encoder->type) { 3403 case INTEL_OUTPUT_TVOUT: 3404 *source = INTEL_PIPE_CRC_SOURCE_TV; 3405 break; 3406 case INTEL_OUTPUT_DISPLAYPORT: 3407 case INTEL_OUTPUT_EDP: 3408 dig_port = enc_to_dig_port(&encoder->base); 3409 switch (dig_port->port) { 3410 case PORT_B: 3411 *source = INTEL_PIPE_CRC_SOURCE_DP_B; 3412 break; 3413 case PORT_C: 3414 *source = INTEL_PIPE_CRC_SOURCE_DP_C; 3415 break; 3416 case PORT_D: 3417 *source = INTEL_PIPE_CRC_SOURCE_DP_D; 3418 break; 3419 default: 3420 WARN(1, "nonexisting DP port %c\n", 3421 port_name(dig_port->port)); 3422 break; 3423 } 3424 break; 3425 default: 3426 break; 3427 } 3428 } 3429 drm_modeset_unlock_all(dev); 3430 3431 return ret; 3432 } 3433 3434 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev, 3435 enum pipe pipe, 3436 enum intel_pipe_crc_source *source, 3437 uint32_t *val) 3438 { 3439 struct drm_i915_private *dev_priv = dev->dev_private; 3440 bool need_stable_symbols = false; 3441 3442 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 3443 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 3444 if (ret) 3445 return ret; 3446 } 3447 3448 switch (*source) { 3449 case INTEL_PIPE_CRC_SOURCE_PIPE: 3450 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV; 3451 break; 3452 case INTEL_PIPE_CRC_SOURCE_DP_B: 3453 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV; 3454 need_stable_symbols = true; 3455 break; 3456 case INTEL_PIPE_CRC_SOURCE_DP_C: 3457 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV; 3458 need_stable_symbols = true; 3459 break; 3460 case INTEL_PIPE_CRC_SOURCE_DP_D: 3461 if (!IS_CHERRYVIEW(dev)) 3462 return -EINVAL; 3463 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV; 3464 need_stable_symbols = true; 3465 break; 3466 case INTEL_PIPE_CRC_SOURCE_NONE: 3467 *val = 0; 3468 break; 3469 default: 3470 return -EINVAL; 3471 } 3472 3473 /* 3474 * When the pipe CRC tap point is after the transcoders we need 3475 * to tweak symbol-level features to produce a deterministic series of 3476 * symbols for a given frame. We need to reset those features only once 3477 * a frame (instead of every nth symbol): 3478 * - DC-balance: used to ensure a better clock recovery from the data 3479 * link (SDVO) 3480 * - DisplayPort scrambling: used for EMI reduction 3481 */ 3482 if (need_stable_symbols) { 3483 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3484 3485 tmp |= DC_BALANCE_RESET_VLV; 3486 switch (pipe) { 3487 case PIPE_A: 3488 tmp |= PIPE_A_SCRAMBLE_RESET; 3489 break; 3490 case PIPE_B: 3491 tmp |= PIPE_B_SCRAMBLE_RESET; 3492 break; 3493 case PIPE_C: 3494 tmp |= PIPE_C_SCRAMBLE_RESET; 3495 break; 3496 default: 3497 return -EINVAL; 3498 } 3499 I915_WRITE(PORT_DFT2_G4X, tmp); 3500 } 3501 3502 return 0; 3503 } 3504 3505 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev, 3506 enum pipe pipe, 3507 enum intel_pipe_crc_source *source, 3508 uint32_t *val) 3509 { 3510 struct drm_i915_private *dev_priv = dev->dev_private; 3511 bool need_stable_symbols = false; 3512 3513 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 3514 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 3515 if (ret) 3516 return ret; 3517 } 3518 3519 switch (*source) { 3520 case INTEL_PIPE_CRC_SOURCE_PIPE: 3521 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX; 3522 break; 3523 case INTEL_PIPE_CRC_SOURCE_TV: 3524 if (!SUPPORTS_TV(dev)) 3525 return -EINVAL; 3526 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE; 3527 break; 3528 case INTEL_PIPE_CRC_SOURCE_DP_B: 3529 if (!IS_G4X(dev)) 3530 return -EINVAL; 3531 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X; 3532 need_stable_symbols = true; 3533 break; 3534 case INTEL_PIPE_CRC_SOURCE_DP_C: 3535 if (!IS_G4X(dev)) 3536 return -EINVAL; 3537 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X; 3538 need_stable_symbols = true; 3539 break; 3540 case INTEL_PIPE_CRC_SOURCE_DP_D: 3541 if (!IS_G4X(dev)) 3542 return -EINVAL; 3543 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X; 3544 need_stable_symbols = true; 3545 break; 3546 case INTEL_PIPE_CRC_SOURCE_NONE: 3547 *val = 0; 3548 break; 3549 default: 3550 return -EINVAL; 3551 } 3552 3553 /* 3554 * When the pipe CRC tap point is after the transcoders we need 3555 * to tweak symbol-level features to produce a deterministic series of 3556 * symbols for a given frame. We need to reset those features only once 3557 * a frame (instead of every nth symbol): 3558 * - DC-balance: used to ensure a better clock recovery from the data 3559 * link (SDVO) 3560 * - DisplayPort scrambling: used for EMI reduction 3561 */ 3562 if (need_stable_symbols) { 3563 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3564 3565 WARN_ON(!IS_G4X(dev)); 3566 3567 I915_WRITE(PORT_DFT_I9XX, 3568 I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET); 3569 3570 if (pipe == PIPE_A) 3571 tmp |= PIPE_A_SCRAMBLE_RESET; 3572 else 3573 tmp |= PIPE_B_SCRAMBLE_RESET; 3574 3575 I915_WRITE(PORT_DFT2_G4X, tmp); 3576 } 3577 3578 return 0; 3579 } 3580 3581 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev, 3582 enum pipe pipe) 3583 { 3584 struct drm_i915_private *dev_priv = dev->dev_private; 3585 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3586 3587 switch (pipe) { 3588 case PIPE_A: 3589 tmp &= ~PIPE_A_SCRAMBLE_RESET; 3590 break; 3591 case PIPE_B: 3592 tmp &= ~PIPE_B_SCRAMBLE_RESET; 3593 break; 3594 case PIPE_C: 3595 tmp &= ~PIPE_C_SCRAMBLE_RESET; 3596 break; 3597 default: 3598 return; 3599 } 3600 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) 3601 tmp &= ~DC_BALANCE_RESET_VLV; 3602 I915_WRITE(PORT_DFT2_G4X, tmp); 3603 3604 } 3605 3606 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev, 3607 enum pipe pipe) 3608 { 3609 struct drm_i915_private *dev_priv = dev->dev_private; 3610 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3611 3612 if (pipe == PIPE_A) 3613 tmp &= ~PIPE_A_SCRAMBLE_RESET; 3614 else 3615 tmp &= ~PIPE_B_SCRAMBLE_RESET; 3616 I915_WRITE(PORT_DFT2_G4X, tmp); 3617 3618 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) { 3619 I915_WRITE(PORT_DFT_I9XX, 3620 I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET); 3621 } 3622 } 3623 3624 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 3625 uint32_t *val) 3626 { 3627 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 3628 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 3629 3630 switch (*source) { 3631 case INTEL_PIPE_CRC_SOURCE_PLANE1: 3632 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK; 3633 break; 3634 case INTEL_PIPE_CRC_SOURCE_PLANE2: 3635 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK; 3636 break; 3637 case INTEL_PIPE_CRC_SOURCE_PIPE: 3638 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK; 3639 break; 3640 case INTEL_PIPE_CRC_SOURCE_NONE: 3641 *val = 0; 3642 break; 3643 default: 3644 return -EINVAL; 3645 } 3646 3647 return 0; 3648 } 3649 3650 static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable) 3651 { 3652 struct drm_i915_private *dev_priv = dev->dev_private; 3653 struct intel_crtc *crtc = 3654 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); 3655 struct intel_crtc_state *pipe_config; 3656 struct drm_atomic_state *state; 3657 int ret = 0; 3658 3659 drm_modeset_lock_all(dev); 3660 state = drm_atomic_state_alloc(dev); 3661 if (!state) { 3662 ret = -ENOMEM; 3663 goto out; 3664 } 3665 3666 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base); 3667 pipe_config = intel_atomic_get_crtc_state(state, crtc); 3668 if (IS_ERR(pipe_config)) { 3669 ret = PTR_ERR(pipe_config); 3670 goto out; 3671 } 3672 3673 pipe_config->pch_pfit.force_thru = enable; 3674 if (pipe_config->cpu_transcoder == TRANSCODER_EDP && 3675 pipe_config->pch_pfit.enabled != enable) 3676 pipe_config->base.connectors_changed = true; 3677 3678 ret = drm_atomic_commit(state); 3679 out: 3680 drm_modeset_unlock_all(dev); 3681 WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret); 3682 if (ret) 3683 drm_atomic_state_free(state); 3684 } 3685 3686 static int ivb_pipe_crc_ctl_reg(struct drm_device *dev, 3687 enum pipe pipe, 3688 enum intel_pipe_crc_source *source, 3689 uint32_t *val) 3690 { 3691 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 3692 *source = INTEL_PIPE_CRC_SOURCE_PF; 3693 3694 switch (*source) { 3695 case INTEL_PIPE_CRC_SOURCE_PLANE1: 3696 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB; 3697 break; 3698 case INTEL_PIPE_CRC_SOURCE_PLANE2: 3699 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; 3700 break; 3701 case INTEL_PIPE_CRC_SOURCE_PF: 3702 if (IS_HASWELL(dev) && pipe == PIPE_A) 3703 hsw_trans_edp_pipe_A_crc_wa(dev, true); 3704 3705 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; 3706 break; 3707 case INTEL_PIPE_CRC_SOURCE_NONE: 3708 *val = 0; 3709 break; 3710 default: 3711 return -EINVAL; 3712 } 3713 3714 return 0; 3715 } 3716 3717 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, 3718 enum intel_pipe_crc_source source) 3719 { 3720 struct drm_i915_private *dev_priv = dev->dev_private; 3721 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 3722 struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, 3723 pipe)); 3724 u32 val = 0; /* shut up gcc */ 3725 int ret; 3726 3727 if (pipe_crc->source == source) 3728 return 0; 3729 3730 /* forbid changing the source without going back to 'none' */ 3731 if (pipe_crc->source && source) 3732 return -EINVAL; 3733 3734 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) { 3735 DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); 3736 return -EIO; 3737 } 3738 3739 if (IS_GEN2(dev)) 3740 ret = i8xx_pipe_crc_ctl_reg(&source, &val); 3741 else if (INTEL_INFO(dev)->gen < 5) 3742 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val); 3743 else if (IS_VALLEYVIEW(dev)) 3744 ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val); 3745 else if (IS_GEN5(dev) || IS_GEN6(dev)) 3746 ret = ilk_pipe_crc_ctl_reg(&source, &val); 3747 else 3748 ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val); 3749 3750 if (ret != 0) 3751 return ret; 3752 3753 /* none -> real source transition */ 3754 if (source) { 3755 struct intel_pipe_crc_entry *entries; 3756 3757 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", 3758 pipe_name(pipe), pipe_crc_source_name(source)); 3759 3760 entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR, 3761 sizeof(pipe_crc->entries[0]), 3762 GFP_KERNEL); 3763 if (!entries) 3764 return -ENOMEM; 3765 3766 /* 3767 * When IPS gets enabled, the pipe CRC changes. Since IPS gets 3768 * enabled and disabled dynamically based on package C states, 3769 * user space can't make reliable use of the CRCs, so let's just 3770 * completely disable it. 3771 */ 3772 hsw_disable_ips(crtc); 3773 3774 spin_lock_irq(&pipe_crc->lock); 3775 kfree(pipe_crc->entries); 3776 pipe_crc->entries = entries; 3777 pipe_crc->head = 0; 3778 pipe_crc->tail = 0; 3779 spin_unlock_irq(&pipe_crc->lock); 3780 } 3781 3782 pipe_crc->source = source; 3783 3784 I915_WRITE(PIPE_CRC_CTL(pipe), val); 3785 POSTING_READ(PIPE_CRC_CTL(pipe)); 3786 3787 /* real source -> none transition */ 3788 if (source == INTEL_PIPE_CRC_SOURCE_NONE) { 3789 struct intel_pipe_crc_entry *entries; 3790 struct intel_crtc *crtc = 3791 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 3792 3793 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", 3794 pipe_name(pipe)); 3795 3796 drm_modeset_lock(&crtc->base.mutex, NULL); 3797 if (crtc->base.state->active) 3798 intel_wait_for_vblank(dev, pipe); 3799 drm_modeset_unlock(&crtc->base.mutex); 3800 3801 spin_lock_irq(&pipe_crc->lock); 3802 entries = pipe_crc->entries; 3803 pipe_crc->entries = NULL; 3804 pipe_crc->head = 0; 3805 pipe_crc->tail = 0; 3806 spin_unlock_irq(&pipe_crc->lock); 3807 3808 kfree(entries); 3809 3810 if (IS_G4X(dev)) 3811 g4x_undo_pipe_scramble_reset(dev, pipe); 3812 else if (IS_VALLEYVIEW(dev)) 3813 vlv_undo_pipe_scramble_reset(dev, pipe); 3814 else if (IS_HASWELL(dev) && pipe == PIPE_A) 3815 hsw_trans_edp_pipe_A_crc_wa(dev, false); 3816 3817 hsw_enable_ips(crtc); 3818 } 3819 3820 return 0; 3821 } 3822 3823 /* 3824 * Parse pipe CRC command strings: 3825 * command: wsp* object wsp+ name wsp+ source wsp* 3826 * object: 'pipe' 3827 * name: (A | B | C) 3828 * source: (none | plane1 | plane2 | pf) 3829 * wsp: (#0x20 | #0x9 | #0xA)+ 3830 * 3831 * eg.: 3832 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A 3833 * "pipe A none" -> Stop CRC 3834 */ 3835 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words) 3836 { 3837 int n_words = 0; 3838 3839 while (*buf) { 3840 char *end; 3841 3842 /* skip leading white space */ 3843 buf = skip_spaces(buf); 3844 if (!*buf) 3845 break; /* end of buffer */ 3846 3847 /* find end of word */ 3848 for (end = buf; *end && !isspace(*end); end++) 3849 ; 3850 3851 if (n_words == max_words) { 3852 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n", 3853 max_words); 3854 return -EINVAL; /* ran out of words[] before bytes */ 3855 } 3856 3857 if (*end) 3858 *end++ = '\0'; 3859 words[n_words++] = buf; 3860 buf = end; 3861 } 3862 3863 return n_words; 3864 } 3865 3866 enum intel_pipe_crc_object { 3867 PIPE_CRC_OBJECT_PIPE, 3868 }; 3869 3870 static const char * const pipe_crc_objects[] = { 3871 "pipe", 3872 }; 3873 3874 static int 3875 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o) 3876 { 3877 int i; 3878 3879 for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++) 3880 if (!strcmp(buf, pipe_crc_objects[i])) { 3881 *o = i; 3882 return 0; 3883 } 3884 3885 return -EINVAL; 3886 } 3887 3888 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe) 3889 { 3890 const char name = buf[0]; 3891 3892 if (name < 'A' || name >= pipe_name(I915_MAX_PIPES)) 3893 return -EINVAL; 3894 3895 *pipe = name - 'A'; 3896 3897 return 0; 3898 } 3899 3900 static int 3901 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) 3902 { 3903 int i; 3904 3905 for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++) 3906 if (!strcmp(buf, pipe_crc_sources[i])) { 3907 *s = i; 3908 return 0; 3909 } 3910 3911 return -EINVAL; 3912 } 3913 3914 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len) 3915 { 3916 #define N_WORDS 3 3917 int n_words; 3918 char *words[N_WORDS]; 3919 enum pipe pipe; 3920 enum intel_pipe_crc_object object; 3921 enum intel_pipe_crc_source source; 3922 3923 n_words = display_crc_ctl_tokenize(buf, words, N_WORDS); 3924 if (n_words != N_WORDS) { 3925 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n", 3926 N_WORDS); 3927 return -EINVAL; 3928 } 3929 3930 if (display_crc_ctl_parse_object(words[0], &object) < 0) { 3931 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]); 3932 return -EINVAL; 3933 } 3934 3935 if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) { 3936 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]); 3937 return -EINVAL; 3938 } 3939 3940 if (display_crc_ctl_parse_source(words[2], &source) < 0) { 3941 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]); 3942 return -EINVAL; 3943 } 3944 3945 return pipe_crc_set_source(dev, pipe, source); 3946 } 3947 3948 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf, 3949 size_t len, loff_t *offp) 3950 { 3951 struct seq_file *m = file->private_data; 3952 struct drm_device *dev = m->private; 3953 char *tmpbuf; 3954 int ret; 3955 3956 if (len == 0) 3957 return 0; 3958 3959 if (len > PAGE_SIZE - 1) { 3960 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n", 3961 PAGE_SIZE); 3962 return -E2BIG; 3963 } 3964 3965 tmpbuf = kmalloc(len + 1, GFP_KERNEL); 3966 if (!tmpbuf) 3967 return -ENOMEM; 3968 3969 if (copy_from_user(tmpbuf, ubuf, len)) { 3970 ret = -EFAULT; 3971 goto out; 3972 } 3973 tmpbuf[len] = '\0'; 3974 3975 ret = display_crc_ctl_parse(dev, tmpbuf, len); 3976 3977 out: 3978 kfree(tmpbuf); 3979 if (ret < 0) 3980 return ret; 3981 3982 *offp += len; 3983 return len; 3984 } 3985 3986 static const struct file_operations i915_display_crc_ctl_fops = { 3987 .owner = THIS_MODULE, 3988 .open = display_crc_ctl_open, 3989 .read = seq_read, 3990 .llseek = seq_lseek, 3991 .release = single_release, 3992 .write = display_crc_ctl_write 3993 }; 3994 3995 static ssize_t i915_displayport_test_active_write(struct file *file, 3996 const char __user *ubuf, 3997 size_t len, loff_t *offp) 3998 { 3999 char *input_buffer; 4000 int status = 0; 4001 struct drm_device *dev; 4002 struct drm_connector *connector; 4003 struct list_head *connector_list; 4004 struct intel_dp *intel_dp; 4005 int val = 0; 4006 4007 dev = ((struct seq_file *)file->private_data)->private; 4008 4009 connector_list = &dev->mode_config.connector_list; 4010 4011 if (len == 0) 4012 return 0; 4013 4014 input_buffer = kmalloc(len + 1, GFP_KERNEL); 4015 if (!input_buffer) 4016 return -ENOMEM; 4017 4018 if (copy_from_user(input_buffer, ubuf, len)) { 4019 status = -EFAULT; 4020 goto out; 4021 } 4022 4023 input_buffer[len] = '\0'; 4024 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len); 4025 4026 list_for_each_entry(connector, connector_list, head) { 4027 4028 if (connector->connector_type != 4029 DRM_MODE_CONNECTOR_DisplayPort) 4030 continue; 4031 4032 if (connector->status == connector_status_connected && 4033 connector->encoder != NULL) { 4034 intel_dp = enc_to_intel_dp(connector->encoder); 4035 status = kstrtoint(input_buffer, 10, &val); 4036 if (status < 0) 4037 goto out; 4038 DRM_DEBUG_DRIVER("Got %d for test active\n", val); 4039 /* To prevent erroneous activation of the compliance 4040 * testing code, only accept an actual value of 1 here 4041 */ 4042 if (val == 1) 4043 intel_dp->compliance_test_active = 1; 4044 else 4045 intel_dp->compliance_test_active = 0; 4046 } 4047 } 4048 out: 4049 kfree(input_buffer); 4050 if (status < 0) 4051 return status; 4052 4053 *offp += len; 4054 return len; 4055 } 4056 4057 static int i915_displayport_test_active_show(struct seq_file *m, void *data) 4058 { 4059 struct drm_device *dev = m->private; 4060 struct drm_connector *connector; 4061 struct list_head *connector_list = &dev->mode_config.connector_list; 4062 struct intel_dp *intel_dp; 4063 4064 list_for_each_entry(connector, connector_list, head) { 4065 4066 if (connector->connector_type != 4067 DRM_MODE_CONNECTOR_DisplayPort) 4068 continue; 4069 4070 if (connector->status == connector_status_connected && 4071 connector->encoder != NULL) { 4072 intel_dp = enc_to_intel_dp(connector->encoder); 4073 if (intel_dp->compliance_test_active) 4074 seq_puts(m, "1"); 4075 else 4076 seq_puts(m, "0"); 4077 } else 4078 seq_puts(m, "0"); 4079 } 4080 4081 return 0; 4082 } 4083 4084 static int i915_displayport_test_active_open(struct inode *inode, 4085 struct file *file) 4086 { 4087 struct drm_device *dev = inode->i_private; 4088 4089 return single_open(file, i915_displayport_test_active_show, dev); 4090 } 4091 4092 static const struct file_operations i915_displayport_test_active_fops = { 4093 .owner = THIS_MODULE, 4094 .open = i915_displayport_test_active_open, 4095 .read = seq_read, 4096 .llseek = seq_lseek, 4097 .release = single_release, 4098 .write = i915_displayport_test_active_write 4099 }; 4100 4101 static int i915_displayport_test_data_show(struct seq_file *m, void *data) 4102 { 4103 struct drm_device *dev = m->private; 4104 struct drm_connector *connector; 4105 struct list_head *connector_list = &dev->mode_config.connector_list; 4106 struct intel_dp *intel_dp; 4107 4108 list_for_each_entry(connector, connector_list, head) { 4109 4110 if (connector->connector_type != 4111 DRM_MODE_CONNECTOR_DisplayPort) 4112 continue; 4113 4114 if (connector->status == connector_status_connected && 4115 connector->encoder != NULL) { 4116 intel_dp = enc_to_intel_dp(connector->encoder); 4117 seq_printf(m, "%lx", intel_dp->compliance_test_data); 4118 } else 4119 seq_puts(m, "0"); 4120 } 4121 4122 return 0; 4123 } 4124 static int i915_displayport_test_data_open(struct inode *inode, 4125 struct file *file) 4126 { 4127 struct drm_device *dev = inode->i_private; 4128 4129 return single_open(file, i915_displayport_test_data_show, dev); 4130 } 4131 4132 static const struct file_operations i915_displayport_test_data_fops = { 4133 .owner = THIS_MODULE, 4134 .open = i915_displayport_test_data_open, 4135 .read = seq_read, 4136 .llseek = seq_lseek, 4137 .release = single_release 4138 }; 4139 4140 static int i915_displayport_test_type_show(struct seq_file *m, void *data) 4141 { 4142 struct drm_device *dev = m->private; 4143 struct drm_connector *connector; 4144 struct list_head *connector_list = &dev->mode_config.connector_list; 4145 struct intel_dp *intel_dp; 4146 4147 list_for_each_entry(connector, connector_list, head) { 4148 4149 if (connector->connector_type != 4150 DRM_MODE_CONNECTOR_DisplayPort) 4151 continue; 4152 4153 if (connector->status == connector_status_connected && 4154 connector->encoder != NULL) { 4155 intel_dp = enc_to_intel_dp(connector->encoder); 4156 seq_printf(m, "%02lx", intel_dp->compliance_test_type); 4157 } else 4158 seq_puts(m, "0"); 4159 } 4160 4161 return 0; 4162 } 4163 4164 static int i915_displayport_test_type_open(struct inode *inode, 4165 struct file *file) 4166 { 4167 struct drm_device *dev = inode->i_private; 4168 4169 return single_open(file, i915_displayport_test_type_show, dev); 4170 } 4171 4172 static const struct file_operations i915_displayport_test_type_fops = { 4173 .owner = THIS_MODULE, 4174 .open = i915_displayport_test_type_open, 4175 .read = seq_read, 4176 .llseek = seq_lseek, 4177 .release = single_release 4178 }; 4179 4180 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) 4181 { 4182 struct drm_device *dev = m->private; 4183 int level; 4184 int num_levels; 4185 4186 if (IS_CHERRYVIEW(dev)) 4187 num_levels = 3; 4188 else if (IS_VALLEYVIEW(dev)) 4189 num_levels = 1; 4190 else 4191 num_levels = ilk_wm_max_level(dev) + 1; 4192 4193 drm_modeset_lock_all(dev); 4194 4195 for (level = 0; level < num_levels; level++) { 4196 unsigned int latency = wm[level]; 4197 4198 /* 4199 * - WM1+ latency values in 0.5us units 4200 * - latencies are in us on gen9/vlv/chv 4201 */ 4202 if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev)) 4203 latency *= 10; 4204 else if (level > 0) 4205 latency *= 5; 4206 4207 seq_printf(m, "WM%d %u (%u.%u usec)\n", 4208 level, wm[level], latency / 10, latency % 10); 4209 } 4210 4211 drm_modeset_unlock_all(dev); 4212 } 4213 4214 static int pri_wm_latency_show(struct seq_file *m, void *data) 4215 { 4216 struct drm_device *dev = m->private; 4217 struct drm_i915_private *dev_priv = dev->dev_private; 4218 const uint16_t *latencies; 4219 4220 if (INTEL_INFO(dev)->gen >= 9) 4221 latencies = dev_priv->wm.skl_latency; 4222 else 4223 latencies = to_i915(dev)->wm.pri_latency; 4224 4225 wm_latency_show(m, latencies); 4226 4227 return 0; 4228 } 4229 4230 static int spr_wm_latency_show(struct seq_file *m, void *data) 4231 { 4232 struct drm_device *dev = m->private; 4233 struct drm_i915_private *dev_priv = dev->dev_private; 4234 const uint16_t *latencies; 4235 4236 if (INTEL_INFO(dev)->gen >= 9) 4237 latencies = dev_priv->wm.skl_latency; 4238 else 4239 latencies = to_i915(dev)->wm.spr_latency; 4240 4241 wm_latency_show(m, latencies); 4242 4243 return 0; 4244 } 4245 4246 static int cur_wm_latency_show(struct seq_file *m, void *data) 4247 { 4248 struct drm_device *dev = m->private; 4249 struct drm_i915_private *dev_priv = dev->dev_private; 4250 const uint16_t *latencies; 4251 4252 if (INTEL_INFO(dev)->gen >= 9) 4253 latencies = dev_priv->wm.skl_latency; 4254 else 4255 latencies = to_i915(dev)->wm.cur_latency; 4256 4257 wm_latency_show(m, latencies); 4258 4259 return 0; 4260 } 4261 4262 static int pri_wm_latency_open(struct inode *inode, struct file *file) 4263 { 4264 struct drm_device *dev = inode->i_private; 4265 4266 if (INTEL_INFO(dev)->gen < 5) 4267 return -ENODEV; 4268 4269 return single_open(file, pri_wm_latency_show, dev); 4270 } 4271 4272 static int spr_wm_latency_open(struct inode *inode, struct file *file) 4273 { 4274 struct drm_device *dev = inode->i_private; 4275 4276 if (HAS_GMCH_DISPLAY(dev)) 4277 return -ENODEV; 4278 4279 return single_open(file, spr_wm_latency_show, dev); 4280 } 4281 4282 static int cur_wm_latency_open(struct inode *inode, struct file *file) 4283 { 4284 struct drm_device *dev = inode->i_private; 4285 4286 if (HAS_GMCH_DISPLAY(dev)) 4287 return -ENODEV; 4288 4289 return single_open(file, cur_wm_latency_show, dev); 4290 } 4291 4292 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 4293 size_t len, loff_t *offp, uint16_t wm[8]) 4294 { 4295 struct seq_file *m = file->private_data; 4296 struct drm_device *dev = m->private; 4297 uint16_t new[8] = { 0 }; 4298 int num_levels; 4299 int level; 4300 int ret; 4301 char tmp[32]; 4302 4303 if (IS_CHERRYVIEW(dev)) 4304 num_levels = 3; 4305 else if (IS_VALLEYVIEW(dev)) 4306 num_levels = 1; 4307 else 4308 num_levels = ilk_wm_max_level(dev) + 1; 4309 4310 if (len >= sizeof(tmp)) 4311 return -EINVAL; 4312 4313 if (copy_from_user(tmp, ubuf, len)) 4314 return -EFAULT; 4315 4316 tmp[len] = '\0'; 4317 4318 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", 4319 &new[0], &new[1], &new[2], &new[3], 4320 &new[4], &new[5], &new[6], &new[7]); 4321 if (ret != num_levels) 4322 return -EINVAL; 4323 4324 drm_modeset_lock_all(dev); 4325 4326 for (level = 0; level < num_levels; level++) 4327 wm[level] = new[level]; 4328 4329 drm_modeset_unlock_all(dev); 4330 4331 return len; 4332 } 4333 4334 4335 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 4336 size_t len, loff_t *offp) 4337 { 4338 struct seq_file *m = file->private_data; 4339 struct drm_device *dev = m->private; 4340 struct drm_i915_private *dev_priv = dev->dev_private; 4341 uint16_t *latencies; 4342 4343 if (INTEL_INFO(dev)->gen >= 9) 4344 latencies = dev_priv->wm.skl_latency; 4345 else 4346 latencies = to_i915(dev)->wm.pri_latency; 4347 4348 return wm_latency_write(file, ubuf, len, offp, latencies); 4349 } 4350 4351 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 4352 size_t len, loff_t *offp) 4353 { 4354 struct seq_file *m = file->private_data; 4355 struct drm_device *dev = m->private; 4356 struct drm_i915_private *dev_priv = dev->dev_private; 4357 uint16_t *latencies; 4358 4359 if (INTEL_INFO(dev)->gen >= 9) 4360 latencies = dev_priv->wm.skl_latency; 4361 else 4362 latencies = to_i915(dev)->wm.spr_latency; 4363 4364 return wm_latency_write(file, ubuf, len, offp, latencies); 4365 } 4366 4367 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 4368 size_t len, loff_t *offp) 4369 { 4370 struct seq_file *m = file->private_data; 4371 struct drm_device *dev = m->private; 4372 struct drm_i915_private *dev_priv = dev->dev_private; 4373 uint16_t *latencies; 4374 4375 if (INTEL_INFO(dev)->gen >= 9) 4376 latencies = dev_priv->wm.skl_latency; 4377 else 4378 latencies = to_i915(dev)->wm.cur_latency; 4379 4380 return wm_latency_write(file, ubuf, len, offp, latencies); 4381 } 4382 4383 static const struct file_operations i915_pri_wm_latency_fops = { 4384 .owner = THIS_MODULE, 4385 .open = pri_wm_latency_open, 4386 .read = seq_read, 4387 .llseek = seq_lseek, 4388 .release = single_release, 4389 .write = pri_wm_latency_write 4390 }; 4391 4392 static const struct file_operations i915_spr_wm_latency_fops = { 4393 .owner = THIS_MODULE, 4394 .open = spr_wm_latency_open, 4395 .read = seq_read, 4396 .llseek = seq_lseek, 4397 .release = single_release, 4398 .write = spr_wm_latency_write 4399 }; 4400 4401 static const struct file_operations i915_cur_wm_latency_fops = { 4402 .owner = THIS_MODULE, 4403 .open = cur_wm_latency_open, 4404 .read = seq_read, 4405 .llseek = seq_lseek, 4406 .release = single_release, 4407 .write = cur_wm_latency_write 4408 }; 4409 4410 static int 4411 i915_wedged_get(void *data, u64 *val) 4412 { 4413 struct drm_device *dev = data; 4414 struct drm_i915_private *dev_priv = dev->dev_private; 4415 4416 *val = atomic_read(&dev_priv->gpu_error.reset_counter); 4417 4418 return 0; 4419 } 4420 4421 static int 4422 i915_wedged_set(void *data, u64 val) 4423 { 4424 struct drm_device *dev = data; 4425 struct drm_i915_private *dev_priv = dev->dev_private; 4426 4427 /* 4428 * There is no safeguard against this debugfs entry colliding 4429 * with the hangcheck calling same i915_handle_error() in 4430 * parallel, causing an explosion. For now we assume that the 4431 * test harness is responsible enough not to inject gpu hangs 4432 * while it is writing to 'i915_wedged' 4433 */ 4434 4435 if (i915_reset_in_progress(&dev_priv->gpu_error)) 4436 return -EAGAIN; 4437 4438 intel_runtime_pm_get(dev_priv); 4439 4440 i915_handle_error(dev, val, 4441 "Manually setting wedged to %llu", val); 4442 4443 intel_runtime_pm_put(dev_priv); 4444 4445 return 0; 4446 } 4447 4448 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 4449 i915_wedged_get, i915_wedged_set, 4450 "%llu\n"); 4451 4452 static int 4453 i915_ring_stop_get(void *data, u64 *val) 4454 { 4455 struct drm_device *dev = data; 4456 struct drm_i915_private *dev_priv = dev->dev_private; 4457 4458 *val = dev_priv->gpu_error.stop_rings; 4459 4460 return 0; 4461 } 4462 4463 static int 4464 i915_ring_stop_set(void *data, u64 val) 4465 { 4466 struct drm_device *dev = data; 4467 struct drm_i915_private *dev_priv = dev->dev_private; 4468 int ret; 4469 4470 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val); 4471 4472 ret = mutex_lock_interruptible(&dev->struct_mutex); 4473 if (ret) 4474 return ret; 4475 4476 dev_priv->gpu_error.stop_rings = val; 4477 mutex_unlock(&dev->struct_mutex); 4478 4479 return 0; 4480 } 4481 4482 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops, 4483 i915_ring_stop_get, i915_ring_stop_set, 4484 "0x%08llx\n"); 4485 4486 static int 4487 i915_ring_missed_irq_get(void *data, u64 *val) 4488 { 4489 struct drm_device *dev = data; 4490 struct drm_i915_private *dev_priv = dev->dev_private; 4491 4492 *val = dev_priv->gpu_error.missed_irq_rings; 4493 return 0; 4494 } 4495 4496 static int 4497 i915_ring_missed_irq_set(void *data, u64 val) 4498 { 4499 struct drm_device *dev = data; 4500 struct drm_i915_private *dev_priv = dev->dev_private; 4501 int ret; 4502 4503 /* Lock against concurrent debugfs callers */ 4504 ret = mutex_lock_interruptible(&dev->struct_mutex); 4505 if (ret) 4506 return ret; 4507 dev_priv->gpu_error.missed_irq_rings = val; 4508 mutex_unlock(&dev->struct_mutex); 4509 4510 return 0; 4511 } 4512 4513 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops, 4514 i915_ring_missed_irq_get, i915_ring_missed_irq_set, 4515 "0x%08llx\n"); 4516 4517 static int 4518 i915_ring_test_irq_get(void *data, u64 *val) 4519 { 4520 struct drm_device *dev = data; 4521 struct drm_i915_private *dev_priv = dev->dev_private; 4522 4523 *val = dev_priv->gpu_error.test_irq_rings; 4524 4525 return 0; 4526 } 4527 4528 static int 4529 i915_ring_test_irq_set(void *data, u64 val) 4530 { 4531 struct drm_device *dev = data; 4532 struct drm_i915_private *dev_priv = dev->dev_private; 4533 int ret; 4534 4535 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); 4536 4537 /* Lock against concurrent debugfs callers */ 4538 ret = mutex_lock_interruptible(&dev->struct_mutex); 4539 if (ret) 4540 return ret; 4541 4542 dev_priv->gpu_error.test_irq_rings = val; 4543 mutex_unlock(&dev->struct_mutex); 4544 4545 return 0; 4546 } 4547 4548 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops, 4549 i915_ring_test_irq_get, i915_ring_test_irq_set, 4550 "0x%08llx\n"); 4551 4552 #define DROP_UNBOUND 0x1 4553 #define DROP_BOUND 0x2 4554 #define DROP_RETIRE 0x4 4555 #define DROP_ACTIVE 0x8 4556 #define DROP_ALL (DROP_UNBOUND | \ 4557 DROP_BOUND | \ 4558 DROP_RETIRE | \ 4559 DROP_ACTIVE) 4560 static int 4561 i915_drop_caches_get(void *data, u64 *val) 4562 { 4563 *val = DROP_ALL; 4564 4565 return 0; 4566 } 4567 4568 static int 4569 i915_drop_caches_set(void *data, u64 val) 4570 { 4571 struct drm_device *dev = data; 4572 struct drm_i915_private *dev_priv = dev->dev_private; 4573 int ret; 4574 4575 DRM_DEBUG("Dropping caches: 0x%08llx\n", val); 4576 4577 /* No need to check and wait for gpu resets, only libdrm auto-restarts 4578 * on ioctls on -EAGAIN. */ 4579 ret = mutex_lock_interruptible(&dev->struct_mutex); 4580 if (ret) 4581 return ret; 4582 4583 if (val & DROP_ACTIVE) { 4584 ret = i915_gpu_idle(dev); 4585 if (ret) 4586 goto unlock; 4587 } 4588 4589 if (val & (DROP_RETIRE | DROP_ACTIVE)) 4590 i915_gem_retire_requests(dev); 4591 4592 if (val & DROP_BOUND) 4593 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND); 4594 4595 if (val & DROP_UNBOUND) 4596 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND); 4597 4598 unlock: 4599 mutex_unlock(&dev->struct_mutex); 4600 4601 return ret; 4602 } 4603 4604 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 4605 i915_drop_caches_get, i915_drop_caches_set, 4606 "0x%08llx\n"); 4607 4608 static int 4609 i915_max_freq_get(void *data, u64 *val) 4610 { 4611 struct drm_device *dev = data; 4612 struct drm_i915_private *dev_priv = dev->dev_private; 4613 int ret; 4614 4615 if (INTEL_INFO(dev)->gen < 6) 4616 return -ENODEV; 4617 4618 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4619 4620 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4621 if (ret) 4622 return ret; 4623 4624 *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); 4625 mutex_unlock(&dev_priv->rps.hw_lock); 4626 4627 return 0; 4628 } 4629 4630 static int 4631 i915_max_freq_set(void *data, u64 val) 4632 { 4633 struct drm_device *dev = data; 4634 struct drm_i915_private *dev_priv = dev->dev_private; 4635 u32 hw_max, hw_min; 4636 int ret; 4637 4638 if (INTEL_INFO(dev)->gen < 6) 4639 return -ENODEV; 4640 4641 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4642 4643 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 4644 4645 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4646 if (ret) 4647 return ret; 4648 4649 /* 4650 * Turbo will still be enabled, but won't go above the set value. 4651 */ 4652 val = intel_freq_opcode(dev_priv, val); 4653 4654 hw_max = dev_priv->rps.max_freq; 4655 hw_min = dev_priv->rps.min_freq; 4656 4657 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { 4658 mutex_unlock(&dev_priv->rps.hw_lock); 4659 return -EINVAL; 4660 } 4661 4662 dev_priv->rps.max_freq_softlimit = val; 4663 4664 intel_set_rps(dev, val); 4665 4666 mutex_unlock(&dev_priv->rps.hw_lock); 4667 4668 return 0; 4669 } 4670 4671 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, 4672 i915_max_freq_get, i915_max_freq_set, 4673 "%llu\n"); 4674 4675 static int 4676 i915_min_freq_get(void *data, u64 *val) 4677 { 4678 struct drm_device *dev = data; 4679 struct drm_i915_private *dev_priv = dev->dev_private; 4680 int ret; 4681 4682 if (INTEL_INFO(dev)->gen < 6) 4683 return -ENODEV; 4684 4685 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4686 4687 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4688 if (ret) 4689 return ret; 4690 4691 *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); 4692 mutex_unlock(&dev_priv->rps.hw_lock); 4693 4694 return 0; 4695 } 4696 4697 static int 4698 i915_min_freq_set(void *data, u64 val) 4699 { 4700 struct drm_device *dev = data; 4701 struct drm_i915_private *dev_priv = dev->dev_private; 4702 u32 hw_max, hw_min; 4703 int ret; 4704 4705 if (INTEL_INFO(dev)->gen < 6) 4706 return -ENODEV; 4707 4708 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 4709 4710 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 4711 4712 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4713 if (ret) 4714 return ret; 4715 4716 /* 4717 * Turbo will still be enabled, but won't go below the set value. 4718 */ 4719 val = intel_freq_opcode(dev_priv, val); 4720 4721 hw_max = dev_priv->rps.max_freq; 4722 hw_min = dev_priv->rps.min_freq; 4723 4724 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) { 4725 mutex_unlock(&dev_priv->rps.hw_lock); 4726 return -EINVAL; 4727 } 4728 4729 dev_priv->rps.min_freq_softlimit = val; 4730 4731 intel_set_rps(dev, val); 4732 4733 mutex_unlock(&dev_priv->rps.hw_lock); 4734 4735 return 0; 4736 } 4737 4738 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, 4739 i915_min_freq_get, i915_min_freq_set, 4740 "%llu\n"); 4741 4742 static int 4743 i915_cache_sharing_get(void *data, u64 *val) 4744 { 4745 struct drm_device *dev = data; 4746 struct drm_i915_private *dev_priv = dev->dev_private; 4747 u32 snpcr; 4748 int ret; 4749 4750 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 4751 return -ENODEV; 4752 4753 ret = mutex_lock_interruptible(&dev->struct_mutex); 4754 if (ret) 4755 return ret; 4756 intel_runtime_pm_get(dev_priv); 4757 4758 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 4759 4760 intel_runtime_pm_put(dev_priv); 4761 mutex_unlock(&dev_priv->dev->struct_mutex); 4762 4763 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 4764 4765 return 0; 4766 } 4767 4768 static int 4769 i915_cache_sharing_set(void *data, u64 val) 4770 { 4771 struct drm_device *dev = data; 4772 struct drm_i915_private *dev_priv = dev->dev_private; 4773 u32 snpcr; 4774 4775 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 4776 return -ENODEV; 4777 4778 if (val > 3) 4779 return -EINVAL; 4780 4781 intel_runtime_pm_get(dev_priv); 4782 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); 4783 4784 /* Update the cache sharing policy here as well */ 4785 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 4786 snpcr &= ~GEN6_MBC_SNPCR_MASK; 4787 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 4788 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 4789 4790 intel_runtime_pm_put(dev_priv); 4791 return 0; 4792 } 4793 4794 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 4795 i915_cache_sharing_get, i915_cache_sharing_set, 4796 "%llu\n"); 4797 4798 struct sseu_dev_status { 4799 unsigned int slice_total; 4800 unsigned int subslice_total; 4801 unsigned int subslice_per_slice; 4802 unsigned int eu_total; 4803 unsigned int eu_per_subslice; 4804 }; 4805 4806 static void cherryview_sseu_device_status(struct drm_device *dev, 4807 struct sseu_dev_status *stat) 4808 { 4809 struct drm_i915_private *dev_priv = dev->dev_private; 4810 const int ss_max = 2; 4811 int ss; 4812 u32 sig1[ss_max], sig2[ss_max]; 4813 4814 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1); 4815 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1); 4816 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2); 4817 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2); 4818 4819 for (ss = 0; ss < ss_max; ss++) { 4820 unsigned int eu_cnt; 4821 4822 if (sig1[ss] & CHV_SS_PG_ENABLE) 4823 /* skip disabled subslice */ 4824 continue; 4825 4826 stat->slice_total = 1; 4827 stat->subslice_per_slice++; 4828 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) + 4829 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) + 4830 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) + 4831 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2); 4832 stat->eu_total += eu_cnt; 4833 stat->eu_per_subslice = max(stat->eu_per_subslice, eu_cnt); 4834 } 4835 stat->subslice_total = stat->subslice_per_slice; 4836 } 4837 4838 static void gen9_sseu_device_status(struct drm_device *dev, 4839 struct sseu_dev_status *stat) 4840 { 4841 struct drm_i915_private *dev_priv = dev->dev_private; 4842 int s_max = 3, ss_max = 4; 4843 int s, ss; 4844 u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; 4845 4846 /* BXT has a single slice and at most 3 subslices. */ 4847 if (IS_BROXTON(dev)) { 4848 s_max = 1; 4849 ss_max = 3; 4850 } 4851 4852 for (s = 0; s < s_max; s++) { 4853 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s)); 4854 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s)); 4855 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s)); 4856 } 4857 4858 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | 4859 GEN9_PGCTL_SSA_EU19_ACK | 4860 GEN9_PGCTL_SSA_EU210_ACK | 4861 GEN9_PGCTL_SSA_EU311_ACK; 4862 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK | 4863 GEN9_PGCTL_SSB_EU19_ACK | 4864 GEN9_PGCTL_SSB_EU210_ACK | 4865 GEN9_PGCTL_SSB_EU311_ACK; 4866 4867 for (s = 0; s < s_max; s++) { 4868 unsigned int ss_cnt = 0; 4869 4870 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) 4871 /* skip disabled slice */ 4872 continue; 4873 4874 stat->slice_total++; 4875 4876 if (IS_SKYLAKE(dev)) 4877 ss_cnt = INTEL_INFO(dev)->subslice_per_slice; 4878 4879 for (ss = 0; ss < ss_max; ss++) { 4880 unsigned int eu_cnt; 4881 4882 if (IS_BROXTON(dev) && 4883 !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) 4884 /* skip disabled subslice */ 4885 continue; 4886 4887 if (IS_BROXTON(dev)) 4888 ss_cnt++; 4889 4890 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] & 4891 eu_mask[ss%2]); 4892 stat->eu_total += eu_cnt; 4893 stat->eu_per_subslice = max(stat->eu_per_subslice, 4894 eu_cnt); 4895 } 4896 4897 stat->subslice_total += ss_cnt; 4898 stat->subslice_per_slice = max(stat->subslice_per_slice, 4899 ss_cnt); 4900 } 4901 } 4902 4903 static int i915_sseu_status(struct seq_file *m, void *unused) 4904 { 4905 struct drm_info_node *node = (struct drm_info_node *) m->private; 4906 struct drm_device *dev = node->minor->dev; 4907 struct sseu_dev_status stat; 4908 4909 if ((INTEL_INFO(dev)->gen < 8) || IS_BROADWELL(dev)) 4910 return -ENODEV; 4911 4912 seq_puts(m, "SSEU Device Info\n"); 4913 seq_printf(m, " Available Slice Total: %u\n", 4914 INTEL_INFO(dev)->slice_total); 4915 seq_printf(m, " Available Subslice Total: %u\n", 4916 INTEL_INFO(dev)->subslice_total); 4917 seq_printf(m, " Available Subslice Per Slice: %u\n", 4918 INTEL_INFO(dev)->subslice_per_slice); 4919 seq_printf(m, " Available EU Total: %u\n", 4920 INTEL_INFO(dev)->eu_total); 4921 seq_printf(m, " Available EU Per Subslice: %u\n", 4922 INTEL_INFO(dev)->eu_per_subslice); 4923 seq_printf(m, " Has Slice Power Gating: %s\n", 4924 yesno(INTEL_INFO(dev)->has_slice_pg)); 4925 seq_printf(m, " Has Subslice Power Gating: %s\n", 4926 yesno(INTEL_INFO(dev)->has_subslice_pg)); 4927 seq_printf(m, " Has EU Power Gating: %s\n", 4928 yesno(INTEL_INFO(dev)->has_eu_pg)); 4929 4930 seq_puts(m, "SSEU Device Status\n"); 4931 memset(&stat, 0, sizeof(stat)); 4932 if (IS_CHERRYVIEW(dev)) { 4933 cherryview_sseu_device_status(dev, &stat); 4934 } else if (INTEL_INFO(dev)->gen >= 9) { 4935 gen9_sseu_device_status(dev, &stat); 4936 } 4937 seq_printf(m, " Enabled Slice Total: %u\n", 4938 stat.slice_total); 4939 seq_printf(m, " Enabled Subslice Total: %u\n", 4940 stat.subslice_total); 4941 seq_printf(m, " Enabled Subslice Per Slice: %u\n", 4942 stat.subslice_per_slice); 4943 seq_printf(m, " Enabled EU Total: %u\n", 4944 stat.eu_total); 4945 seq_printf(m, " Enabled EU Per Subslice: %u\n", 4946 stat.eu_per_subslice); 4947 4948 return 0; 4949 } 4950 4951 static int i915_forcewake_open(struct inode *inode, struct file *file) 4952 { 4953 struct drm_device *dev = inode->i_private; 4954 struct drm_i915_private *dev_priv = dev->dev_private; 4955 4956 if (INTEL_INFO(dev)->gen < 6) 4957 return 0; 4958 4959 intel_runtime_pm_get(dev_priv); 4960 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4961 4962 return 0; 4963 } 4964 4965 static int i915_forcewake_release(struct inode *inode, struct file *file) 4966 { 4967 struct drm_device *dev = inode->i_private; 4968 struct drm_i915_private *dev_priv = dev->dev_private; 4969 4970 if (INTEL_INFO(dev)->gen < 6) 4971 return 0; 4972 4973 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4974 intel_runtime_pm_put(dev_priv); 4975 4976 return 0; 4977 } 4978 4979 static const struct file_operations i915_forcewake_fops = { 4980 .owner = THIS_MODULE, 4981 .open = i915_forcewake_open, 4982 .release = i915_forcewake_release, 4983 }; 4984 4985 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) 4986 { 4987 struct drm_device *dev = minor->dev; 4988 struct dentry *ent; 4989 4990 ent = debugfs_create_file("i915_forcewake_user", 4991 S_IRUSR, 4992 root, dev, 4993 &i915_forcewake_fops); 4994 if (!ent) 4995 return -ENOMEM; 4996 4997 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 4998 } 4999 5000 static int i915_debugfs_create(struct dentry *root, 5001 struct drm_minor *minor, 5002 const char *name, 5003 const struct file_operations *fops) 5004 { 5005 struct drm_device *dev = minor->dev; 5006 struct dentry *ent; 5007 5008 ent = debugfs_create_file(name, 5009 S_IRUGO | S_IWUSR, 5010 root, dev, 5011 fops); 5012 if (!ent) 5013 return -ENOMEM; 5014 5015 return drm_add_fake_info_node(minor, ent, fops); 5016 } 5017 5018 static const struct drm_info_list i915_debugfs_list[] = { 5019 {"i915_capabilities", i915_capabilities, 0}, 5020 {"i915_gem_objects", i915_gem_object_info, 0}, 5021 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 5022 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, 5023 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 5024 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 5025 {"i915_gem_stolen", i915_gem_stolen_list_info }, 5026 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 5027 {"i915_gem_request", i915_gem_request_info, 0}, 5028 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 5029 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 5030 {"i915_gem_interrupt", i915_interrupt_info, 0}, 5031 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 5032 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 5033 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 5034 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, 5035 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0}, 5036 {"i915_frequency_info", i915_frequency_info, 0}, 5037 {"i915_hangcheck_info", i915_hangcheck_info, 0}, 5038 {"i915_drpc_info", i915_drpc_info, 0}, 5039 {"i915_emon_status", i915_emon_status, 0}, 5040 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 5041 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, 5042 {"i915_fbc_status", i915_fbc_status, 0}, 5043 {"i915_ips_status", i915_ips_status, 0}, 5044 {"i915_sr_status", i915_sr_status, 0}, 5045 {"i915_opregion", i915_opregion, 0}, 5046 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 5047 {"i915_context_status", i915_context_status, 0}, 5048 {"i915_dump_lrc", i915_dump_lrc, 0}, 5049 {"i915_execlists", i915_execlists, 0}, 5050 {"i915_forcewake_domains", i915_forcewake_domains, 0}, 5051 {"i915_swizzle_info", i915_swizzle_info, 0}, 5052 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 5053 {"i915_llc", i915_llc, 0}, 5054 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 5055 {"i915_sink_crc_eDP1", i915_sink_crc, 0}, 5056 {"i915_energy_uJ", i915_energy_uJ, 0}, 5057 {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, 5058 {"i915_power_domain_info", i915_power_domain_info, 0}, 5059 {"i915_display_info", i915_display_info, 0}, 5060 {"i915_semaphore_status", i915_semaphore_status, 0}, 5061 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 5062 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 5063 {"i915_wa_registers", i915_wa_registers, 0}, 5064 {"i915_ddb_info", i915_ddb_info, 0}, 5065 {"i915_sseu_status", i915_sseu_status, 0}, 5066 {"i915_drrs_status", i915_drrs_status, 0}, 5067 {"i915_rps_boost_info", i915_rps_boost_info, 0}, 5068 }; 5069 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 5070 5071 static const struct i915_debugfs_files { 5072 const char *name; 5073 const struct file_operations *fops; 5074 } i915_debugfs_files[] = { 5075 {"i915_wedged", &i915_wedged_fops}, 5076 {"i915_max_freq", &i915_max_freq_fops}, 5077 {"i915_min_freq", &i915_min_freq_fops}, 5078 {"i915_cache_sharing", &i915_cache_sharing_fops}, 5079 {"i915_ring_stop", &i915_ring_stop_fops}, 5080 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, 5081 {"i915_ring_test_irq", &i915_ring_test_irq_fops}, 5082 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 5083 {"i915_error_state", &i915_error_state_fops}, 5084 {"i915_next_seqno", &i915_next_seqno_fops}, 5085 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, 5086 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 5087 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 5088 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 5089 {"i915_fbc_false_color", &i915_fbc_fc_fops}, 5090 {"i915_dp_test_data", &i915_displayport_test_data_fops}, 5091 {"i915_dp_test_type", &i915_displayport_test_type_fops}, 5092 {"i915_dp_test_active", &i915_displayport_test_active_fops} 5093 }; 5094 5095 void intel_display_crc_init(struct drm_device *dev) 5096 { 5097 struct drm_i915_private *dev_priv = dev->dev_private; 5098 enum pipe pipe; 5099 5100 for_each_pipe(dev_priv, pipe) { 5101 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 5102 5103 pipe_crc->opened = false; 5104 spin_lock_init(&pipe_crc->lock); 5105 init_waitqueue_head(&pipe_crc->wq); 5106 } 5107 } 5108 5109 int i915_debugfs_init(struct drm_minor *minor) 5110 { 5111 int ret, i; 5112 5113 ret = i915_forcewake_create(minor->debugfs_root, minor); 5114 if (ret) 5115 return ret; 5116 5117 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 5118 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i); 5119 if (ret) 5120 return ret; 5121 } 5122 5123 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 5124 ret = i915_debugfs_create(minor->debugfs_root, minor, 5125 i915_debugfs_files[i].name, 5126 i915_debugfs_files[i].fops); 5127 if (ret) 5128 return ret; 5129 } 5130 5131 return drm_debugfs_create_files(i915_debugfs_list, 5132 I915_DEBUGFS_ENTRIES, 5133 minor->debugfs_root, minor); 5134 } 5135 5136 void i915_debugfs_cleanup(struct drm_minor *minor) 5137 { 5138 int i; 5139 5140 drm_debugfs_remove_files(i915_debugfs_list, 5141 I915_DEBUGFS_ENTRIES, minor); 5142 5143 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 5144 1, minor); 5145 5146 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 5147 struct drm_info_list *info_list = 5148 (struct drm_info_list *)&i915_pipe_crc_data[i]; 5149 5150 drm_debugfs_remove_files(info_list, 1, minor); 5151 } 5152 5153 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 5154 struct drm_info_list *info_list = 5155 (struct drm_info_list *) i915_debugfs_files[i].fops; 5156 5157 drm_debugfs_remove_files(info_list, 1, minor); 5158 } 5159 } 5160 5161 struct dpcd_block { 5162 /* DPCD dump start address. */ 5163 unsigned int offset; 5164 /* DPCD dump end address, inclusive. If unset, .size will be used. */ 5165 unsigned int end; 5166 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */ 5167 size_t size; 5168 /* Only valid for eDP. */ 5169 bool edp; 5170 }; 5171 5172 static const struct dpcd_block i915_dpcd_debug[] = { 5173 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE }, 5174 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS }, 5175 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 }, 5176 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET }, 5177 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 }, 5178 { .offset = DP_SET_POWER }, 5179 { .offset = DP_EDP_DPCD_REV }, 5180 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 }, 5181 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB }, 5182 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET }, 5183 }; 5184 5185 static int i915_dpcd_show(struct seq_file *m, void *data) 5186 { 5187 struct drm_connector *connector = m->private; 5188 struct intel_dp *intel_dp = 5189 enc_to_intel_dp(&intel_attached_encoder(connector)->base); 5190 uint8_t buf[16]; 5191 ssize_t err; 5192 int i; 5193 5194 if (connector->status != connector_status_connected) 5195 return -ENODEV; 5196 5197 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) { 5198 const struct dpcd_block *b = &i915_dpcd_debug[i]; 5199 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1); 5200 5201 if (b->edp && 5202 connector->connector_type != DRM_MODE_CONNECTOR_eDP) 5203 continue; 5204 5205 /* low tech for now */ 5206 if (WARN_ON(size > sizeof(buf))) 5207 continue; 5208 5209 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size); 5210 if (err <= 0) { 5211 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", 5212 size, b->offset, err); 5213 continue; 5214 } 5215 5216 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf); 5217 } 5218 5219 return 0; 5220 } 5221 5222 static int i915_dpcd_open(struct inode *inode, struct file *file) 5223 { 5224 return single_open(file, i915_dpcd_show, inode->i_private); 5225 } 5226 5227 static const struct file_operations i915_dpcd_fops = { 5228 .owner = THIS_MODULE, 5229 .open = i915_dpcd_open, 5230 .read = seq_read, 5231 .llseek = seq_lseek, 5232 .release = single_release, 5233 }; 5234 5235 /** 5236 * i915_debugfs_connector_add - add i915 specific connector debugfs files 5237 * @connector: pointer to a registered drm_connector 5238 * 5239 * Cleanup will be done by drm_connector_unregister() through a call to 5240 * drm_debugfs_connector_remove(). 5241 * 5242 * Returns 0 on success, negative error codes on error. 5243 */ 5244 int i915_debugfs_connector_add(struct drm_connector *connector) 5245 { 5246 struct dentry *root = connector->debugfs_entry; 5247 5248 /* The connector must have been registered beforehands. */ 5249 if (!root) 5250 return -ENODEV; 5251 5252 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 5253 connector->connector_type == DRM_MODE_CONNECTOR_eDP) 5254 debugfs_create_file("i915_dpcd", S_IRUGO, root, connector, 5255 &i915_dpcd_fops); 5256 5257 return 0; 5258 } 5259