1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/seq_file.h> 30 #include <linux/circ_buf.h> 31 #include <linux/ctype.h> 32 #include <linux/debugfs.h> 33 #include <linux/slab.h> 34 #include <linux/export.h> 35 #include <linux/list_sort.h> 36 #include <asm/msr-index.h> 37 #include <drm/drmP.h> 38 #include "intel_drv.h" 39 #include "intel_ringbuffer.h" 40 #include <drm/i915_drm.h> 41 #include "i915_drv.h" 42 43 enum { 44 ACTIVE_LIST, 45 INACTIVE_LIST, 46 PINNED_LIST, 47 }; 48 49 static const char *yesno(int v) 50 { 51 return v ? "yes" : "no"; 52 } 53 54 /* As the drm_debugfs_init() routines are called before dev->dev_private is 55 * allocated we need to hook into the minor for release. */ 56 static int 57 drm_add_fake_info_node(struct drm_minor *minor, 58 struct dentry *ent, 59 const void *key) 60 { 61 struct drm_info_node *node; 62 63 node = kmalloc(sizeof(*node), GFP_KERNEL); 64 if (node == NULL) { 65 debugfs_remove(ent); 66 return -ENOMEM; 67 } 68 69 node->minor = minor; 70 node->dent = ent; 71 node->info_ent = (void *) key; 72 73 mutex_lock(&minor->debugfs_lock); 74 list_add(&node->list, &minor->debugfs_list); 75 mutex_unlock(&minor->debugfs_lock); 76 77 return 0; 78 } 79 80 static int i915_capabilities(struct seq_file *m, void *data) 81 { 82 struct drm_info_node *node = m->private; 83 struct drm_device *dev = node->minor->dev; 84 const struct intel_device_info *info = INTEL_INFO(dev); 85 86 seq_printf(m, "gen: %d\n", info->gen); 87 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 88 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 89 #define SEP_SEMICOLON ; 90 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON); 91 #undef PRINT_FLAG 92 #undef SEP_SEMICOLON 93 94 return 0; 95 } 96 97 static const char *get_pin_flag(struct drm_i915_gem_object *obj) 98 { 99 if (obj->user_pin_count > 0) 100 return "P"; 101 else if (i915_gem_obj_is_pinned(obj)) 102 return "p"; 103 else 104 return " "; 105 } 106 107 static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 108 { 109 switch (obj->tiling_mode) { 110 default: 111 case I915_TILING_NONE: return " "; 112 case I915_TILING_X: return "X"; 113 case I915_TILING_Y: return "Y"; 114 } 115 } 116 117 static inline const char *get_global_flag(struct drm_i915_gem_object *obj) 118 { 119 return obj->has_global_gtt_mapping ? "g" : " "; 120 } 121 122 static void 123 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 124 { 125 struct i915_vma *vma; 126 int pin_count = 0; 127 128 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s", 129 &obj->base, 130 get_pin_flag(obj), 131 get_tiling_flag(obj), 132 get_global_flag(obj), 133 obj->base.size / 1024, 134 obj->base.read_domains, 135 obj->base.write_domain, 136 obj->last_read_seqno, 137 obj->last_write_seqno, 138 obj->last_fenced_seqno, 139 i915_cache_level_str(obj->cache_level), 140 obj->dirty ? " dirty" : "", 141 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 142 if (obj->base.name) 143 seq_printf(m, " (name: %d)", obj->base.name); 144 list_for_each_entry(vma, &obj->vma_list, vma_link) 145 if (vma->pin_count > 0) 146 pin_count++; 147 seq_printf(m, " (pinned x %d)", pin_count); 148 if (obj->pin_display) 149 seq_printf(m, " (display)"); 150 if (obj->fence_reg != I915_FENCE_REG_NONE) 151 seq_printf(m, " (fence: %d)", obj->fence_reg); 152 list_for_each_entry(vma, &obj->vma_list, vma_link) { 153 if (!i915_is_ggtt(vma->vm)) 154 seq_puts(m, " (pp"); 155 else 156 seq_puts(m, " (g"); 157 seq_printf(m, "gtt offset: %08lx, size: %08lx)", 158 vma->node.start, vma->node.size); 159 } 160 if (obj->stolen) 161 seq_printf(m, " (stolen: %08lx)", obj->stolen->start); 162 if (obj->pin_mappable || obj->fault_mappable) { 163 char s[3], *t = s; 164 if (obj->pin_mappable) 165 *t++ = 'p'; 166 if (obj->fault_mappable) 167 *t++ = 'f'; 168 *t = '\0'; 169 seq_printf(m, " (%s mappable)", s); 170 } 171 if (obj->ring != NULL) 172 seq_printf(m, " (%s)", obj->ring->name); 173 if (obj->frontbuffer_bits) 174 seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); 175 } 176 177 static void describe_ctx(struct seq_file *m, struct intel_context *ctx) 178 { 179 seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i'); 180 seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 181 seq_putc(m, ' '); 182 } 183 184 static int i915_gem_object_list_info(struct seq_file *m, void *data) 185 { 186 struct drm_info_node *node = m->private; 187 uintptr_t list = (uintptr_t) node->info_ent->data; 188 struct list_head *head; 189 struct drm_device *dev = node->minor->dev; 190 struct drm_i915_private *dev_priv = dev->dev_private; 191 struct i915_address_space *vm = &dev_priv->gtt.base; 192 struct i915_vma *vma; 193 size_t total_obj_size, total_gtt_size; 194 int count, ret; 195 196 ret = mutex_lock_interruptible(&dev->struct_mutex); 197 if (ret) 198 return ret; 199 200 /* FIXME: the user of this interface might want more than just GGTT */ 201 switch (list) { 202 case ACTIVE_LIST: 203 seq_puts(m, "Active:\n"); 204 head = &vm->active_list; 205 break; 206 case INACTIVE_LIST: 207 seq_puts(m, "Inactive:\n"); 208 head = &vm->inactive_list; 209 break; 210 default: 211 mutex_unlock(&dev->struct_mutex); 212 return -EINVAL; 213 } 214 215 total_obj_size = total_gtt_size = count = 0; 216 list_for_each_entry(vma, head, mm_list) { 217 seq_printf(m, " "); 218 describe_obj(m, vma->obj); 219 seq_printf(m, "\n"); 220 total_obj_size += vma->obj->base.size; 221 total_gtt_size += vma->node.size; 222 count++; 223 } 224 mutex_unlock(&dev->struct_mutex); 225 226 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 227 count, total_obj_size, total_gtt_size); 228 return 0; 229 } 230 231 static int obj_rank_by_stolen(void *priv, 232 struct list_head *A, struct list_head *B) 233 { 234 struct drm_i915_gem_object *a = 235 container_of(A, struct drm_i915_gem_object, obj_exec_link); 236 struct drm_i915_gem_object *b = 237 container_of(B, struct drm_i915_gem_object, obj_exec_link); 238 239 return a->stolen->start - b->stolen->start; 240 } 241 242 static int i915_gem_stolen_list_info(struct seq_file *m, void *data) 243 { 244 struct drm_info_node *node = m->private; 245 struct drm_device *dev = node->minor->dev; 246 struct drm_i915_private *dev_priv = dev->dev_private; 247 struct drm_i915_gem_object *obj; 248 size_t total_obj_size, total_gtt_size; 249 LIST_HEAD(stolen); 250 int count, ret; 251 252 ret = mutex_lock_interruptible(&dev->struct_mutex); 253 if (ret) 254 return ret; 255 256 total_obj_size = total_gtt_size = count = 0; 257 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 258 if (obj->stolen == NULL) 259 continue; 260 261 list_add(&obj->obj_exec_link, &stolen); 262 263 total_obj_size += obj->base.size; 264 total_gtt_size += i915_gem_obj_ggtt_size(obj); 265 count++; 266 } 267 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 268 if (obj->stolen == NULL) 269 continue; 270 271 list_add(&obj->obj_exec_link, &stolen); 272 273 total_obj_size += obj->base.size; 274 count++; 275 } 276 list_sort(NULL, &stolen, obj_rank_by_stolen); 277 seq_puts(m, "Stolen:\n"); 278 while (!list_empty(&stolen)) { 279 obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link); 280 seq_puts(m, " "); 281 describe_obj(m, obj); 282 seq_putc(m, '\n'); 283 list_del_init(&obj->obj_exec_link); 284 } 285 mutex_unlock(&dev->struct_mutex); 286 287 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 288 count, total_obj_size, total_gtt_size); 289 return 0; 290 } 291 292 #define count_objects(list, member) do { \ 293 list_for_each_entry(obj, list, member) { \ 294 size += i915_gem_obj_ggtt_size(obj); \ 295 ++count; \ 296 if (obj->map_and_fenceable) { \ 297 mappable_size += i915_gem_obj_ggtt_size(obj); \ 298 ++mappable_count; \ 299 } \ 300 } \ 301 } while (0) 302 303 struct file_stats { 304 struct drm_i915_file_private *file_priv; 305 int count; 306 size_t total, unbound; 307 size_t global, shared; 308 size_t active, inactive; 309 }; 310 311 static int per_file_stats(int id, void *ptr, void *data) 312 { 313 struct drm_i915_gem_object *obj = ptr; 314 struct file_stats *stats = data; 315 struct i915_vma *vma; 316 317 stats->count++; 318 stats->total += obj->base.size; 319 320 if (obj->base.name || obj->base.dma_buf) 321 stats->shared += obj->base.size; 322 323 if (USES_FULL_PPGTT(obj->base.dev)) { 324 list_for_each_entry(vma, &obj->vma_list, vma_link) { 325 struct i915_hw_ppgtt *ppgtt; 326 327 if (!drm_mm_node_allocated(&vma->node)) 328 continue; 329 330 if (i915_is_ggtt(vma->vm)) { 331 stats->global += obj->base.size; 332 continue; 333 } 334 335 ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base); 336 if (ppgtt->ctx && ppgtt->ctx->file_priv != stats->file_priv) 337 continue; 338 339 if (obj->ring) /* XXX per-vma statistic */ 340 stats->active += obj->base.size; 341 else 342 stats->inactive += obj->base.size; 343 344 return 0; 345 } 346 } else { 347 if (i915_gem_obj_ggtt_bound(obj)) { 348 stats->global += obj->base.size; 349 if (obj->ring) 350 stats->active += obj->base.size; 351 else 352 stats->inactive += obj->base.size; 353 return 0; 354 } 355 } 356 357 if (!list_empty(&obj->global_list)) 358 stats->unbound += obj->base.size; 359 360 return 0; 361 } 362 363 #define count_vmas(list, member) do { \ 364 list_for_each_entry(vma, list, member) { \ 365 size += i915_gem_obj_ggtt_size(vma->obj); \ 366 ++count; \ 367 if (vma->obj->map_and_fenceable) { \ 368 mappable_size += i915_gem_obj_ggtt_size(vma->obj); \ 369 ++mappable_count; \ 370 } \ 371 } \ 372 } while (0) 373 374 static int i915_gem_object_info(struct seq_file *m, void* data) 375 { 376 struct drm_info_node *node = m->private; 377 struct drm_device *dev = node->minor->dev; 378 struct drm_i915_private *dev_priv = dev->dev_private; 379 u32 count, mappable_count, purgeable_count; 380 size_t size, mappable_size, purgeable_size; 381 struct drm_i915_gem_object *obj; 382 struct i915_address_space *vm = &dev_priv->gtt.base; 383 struct drm_file *file; 384 struct i915_vma *vma; 385 int ret; 386 387 ret = mutex_lock_interruptible(&dev->struct_mutex); 388 if (ret) 389 return ret; 390 391 seq_printf(m, "%u objects, %zu bytes\n", 392 dev_priv->mm.object_count, 393 dev_priv->mm.object_memory); 394 395 size = count = mappable_size = mappable_count = 0; 396 count_objects(&dev_priv->mm.bound_list, global_list); 397 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 398 count, mappable_count, size, mappable_size); 399 400 size = count = mappable_size = mappable_count = 0; 401 count_vmas(&vm->active_list, mm_list); 402 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 403 count, mappable_count, size, mappable_size); 404 405 size = count = mappable_size = mappable_count = 0; 406 count_vmas(&vm->inactive_list, mm_list); 407 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 408 count, mappable_count, size, mappable_size); 409 410 size = count = purgeable_size = purgeable_count = 0; 411 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { 412 size += obj->base.size, ++count; 413 if (obj->madv == I915_MADV_DONTNEED) 414 purgeable_size += obj->base.size, ++purgeable_count; 415 } 416 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size); 417 418 size = count = mappable_size = mappable_count = 0; 419 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 420 if (obj->fault_mappable) { 421 size += i915_gem_obj_ggtt_size(obj); 422 ++count; 423 } 424 if (obj->pin_mappable) { 425 mappable_size += i915_gem_obj_ggtt_size(obj); 426 ++mappable_count; 427 } 428 if (obj->madv == I915_MADV_DONTNEED) { 429 purgeable_size += obj->base.size; 430 ++purgeable_count; 431 } 432 } 433 seq_printf(m, "%u purgeable objects, %zu bytes\n", 434 purgeable_count, purgeable_size); 435 seq_printf(m, "%u pinned mappable objects, %zu bytes\n", 436 mappable_count, mappable_size); 437 seq_printf(m, "%u fault mappable objects, %zu bytes\n", 438 count, size); 439 440 seq_printf(m, "%zu [%lu] gtt total\n", 441 dev_priv->gtt.base.total, 442 dev_priv->gtt.mappable_end - dev_priv->gtt.base.start); 443 444 seq_putc(m, '\n'); 445 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 446 struct file_stats stats; 447 struct task_struct *task; 448 449 memset(&stats, 0, sizeof(stats)); 450 stats.file_priv = file->driver_priv; 451 spin_lock(&file->table_lock); 452 idr_for_each(&file->object_idr, per_file_stats, &stats); 453 spin_unlock(&file->table_lock); 454 /* 455 * Although we have a valid reference on file->pid, that does 456 * not guarantee that the task_struct who called get_pid() is 457 * still alive (e.g. get_pid(current) => fork() => exit()). 458 * Therefore, we need to protect this ->comm access using RCU. 459 */ 460 rcu_read_lock(); 461 task = pid_task(file->pid, PIDTYPE_PID); 462 seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", 463 task ? task->comm : "<unknown>", 464 stats.count, 465 stats.total, 466 stats.active, 467 stats.inactive, 468 stats.global, 469 stats.shared, 470 stats.unbound); 471 rcu_read_unlock(); 472 } 473 474 mutex_unlock(&dev->struct_mutex); 475 476 return 0; 477 } 478 479 static int i915_gem_gtt_info(struct seq_file *m, void *data) 480 { 481 struct drm_info_node *node = m->private; 482 struct drm_device *dev = node->minor->dev; 483 uintptr_t list = (uintptr_t) node->info_ent->data; 484 struct drm_i915_private *dev_priv = dev->dev_private; 485 struct drm_i915_gem_object *obj; 486 size_t total_obj_size, total_gtt_size; 487 int count, ret; 488 489 ret = mutex_lock_interruptible(&dev->struct_mutex); 490 if (ret) 491 return ret; 492 493 total_obj_size = total_gtt_size = count = 0; 494 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 495 if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj)) 496 continue; 497 498 seq_puts(m, " "); 499 describe_obj(m, obj); 500 seq_putc(m, '\n'); 501 total_obj_size += obj->base.size; 502 total_gtt_size += i915_gem_obj_ggtt_size(obj); 503 count++; 504 } 505 506 mutex_unlock(&dev->struct_mutex); 507 508 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 509 count, total_obj_size, total_gtt_size); 510 511 return 0; 512 } 513 514 static int i915_gem_pageflip_info(struct seq_file *m, void *data) 515 { 516 struct drm_info_node *node = m->private; 517 struct drm_device *dev = node->minor->dev; 518 unsigned long flags; 519 struct intel_crtc *crtc; 520 int ret; 521 522 ret = mutex_lock_interruptible(&dev->struct_mutex); 523 if (ret) 524 return ret; 525 526 for_each_intel_crtc(dev, crtc) { 527 const char pipe = pipe_name(crtc->pipe); 528 const char plane = plane_name(crtc->plane); 529 struct intel_unpin_work *work; 530 531 spin_lock_irqsave(&dev->event_lock, flags); 532 work = crtc->unpin_work; 533 if (work == NULL) { 534 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 535 pipe, plane); 536 } else { 537 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 538 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 539 pipe, plane); 540 } else { 541 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 542 pipe, plane); 543 } 544 if (work->enable_stall_check) 545 seq_puts(m, "Stall check enabled, "); 546 else 547 seq_puts(m, "Stall check waiting for page flip ioctl, "); 548 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 549 550 if (work->old_fb_obj) { 551 struct drm_i915_gem_object *obj = work->old_fb_obj; 552 if (obj) 553 seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n", 554 i915_gem_obj_ggtt_offset(obj)); 555 } 556 if (work->pending_flip_obj) { 557 struct drm_i915_gem_object *obj = work->pending_flip_obj; 558 if (obj) 559 seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n", 560 i915_gem_obj_ggtt_offset(obj)); 561 } 562 } 563 spin_unlock_irqrestore(&dev->event_lock, flags); 564 } 565 566 mutex_unlock(&dev->struct_mutex); 567 568 return 0; 569 } 570 571 static int i915_gem_request_info(struct seq_file *m, void *data) 572 { 573 struct drm_info_node *node = m->private; 574 struct drm_device *dev = node->minor->dev; 575 struct drm_i915_private *dev_priv = dev->dev_private; 576 struct intel_engine_cs *ring; 577 struct drm_i915_gem_request *gem_request; 578 int ret, count, i; 579 580 ret = mutex_lock_interruptible(&dev->struct_mutex); 581 if (ret) 582 return ret; 583 584 count = 0; 585 for_each_ring(ring, dev_priv, i) { 586 if (list_empty(&ring->request_list)) 587 continue; 588 589 seq_printf(m, "%s requests:\n", ring->name); 590 list_for_each_entry(gem_request, 591 &ring->request_list, 592 list) { 593 seq_printf(m, " %d @ %d\n", 594 gem_request->seqno, 595 (int) (jiffies - gem_request->emitted_jiffies)); 596 } 597 count++; 598 } 599 mutex_unlock(&dev->struct_mutex); 600 601 if (count == 0) 602 seq_puts(m, "No requests\n"); 603 604 return 0; 605 } 606 607 static void i915_ring_seqno_info(struct seq_file *m, 608 struct intel_engine_cs *ring) 609 { 610 if (ring->get_seqno) { 611 seq_printf(m, "Current sequence (%s): %u\n", 612 ring->name, ring->get_seqno(ring, false)); 613 } 614 } 615 616 static int i915_gem_seqno_info(struct seq_file *m, void *data) 617 { 618 struct drm_info_node *node = m->private; 619 struct drm_device *dev = node->minor->dev; 620 struct drm_i915_private *dev_priv = dev->dev_private; 621 struct intel_engine_cs *ring; 622 int ret, i; 623 624 ret = mutex_lock_interruptible(&dev->struct_mutex); 625 if (ret) 626 return ret; 627 intel_runtime_pm_get(dev_priv); 628 629 for_each_ring(ring, dev_priv, i) 630 i915_ring_seqno_info(m, ring); 631 632 intel_runtime_pm_put(dev_priv); 633 mutex_unlock(&dev->struct_mutex); 634 635 return 0; 636 } 637 638 639 static int i915_interrupt_info(struct seq_file *m, void *data) 640 { 641 struct drm_info_node *node = m->private; 642 struct drm_device *dev = node->minor->dev; 643 struct drm_i915_private *dev_priv = dev->dev_private; 644 struct intel_engine_cs *ring; 645 int ret, i, pipe; 646 647 ret = mutex_lock_interruptible(&dev->struct_mutex); 648 if (ret) 649 return ret; 650 intel_runtime_pm_get(dev_priv); 651 652 if (IS_CHERRYVIEW(dev)) { 653 int i; 654 seq_printf(m, "Master Interrupt Control:\t%08x\n", 655 I915_READ(GEN8_MASTER_IRQ)); 656 657 seq_printf(m, "Display IER:\t%08x\n", 658 I915_READ(VLV_IER)); 659 seq_printf(m, "Display IIR:\t%08x\n", 660 I915_READ(VLV_IIR)); 661 seq_printf(m, "Display IIR_RW:\t%08x\n", 662 I915_READ(VLV_IIR_RW)); 663 seq_printf(m, "Display IMR:\t%08x\n", 664 I915_READ(VLV_IMR)); 665 for_each_pipe(pipe) 666 seq_printf(m, "Pipe %c stat:\t%08x\n", 667 pipe_name(pipe), 668 I915_READ(PIPESTAT(pipe))); 669 670 seq_printf(m, "Port hotplug:\t%08x\n", 671 I915_READ(PORT_HOTPLUG_EN)); 672 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 673 I915_READ(VLV_DPFLIPSTAT)); 674 seq_printf(m, "DPINVGTT:\t%08x\n", 675 I915_READ(DPINVGTT)); 676 677 for (i = 0; i < 4; i++) { 678 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 679 i, I915_READ(GEN8_GT_IMR(i))); 680 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 681 i, I915_READ(GEN8_GT_IIR(i))); 682 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 683 i, I915_READ(GEN8_GT_IER(i))); 684 } 685 686 seq_printf(m, "PCU interrupt mask:\t%08x\n", 687 I915_READ(GEN8_PCU_IMR)); 688 seq_printf(m, "PCU interrupt identity:\t%08x\n", 689 I915_READ(GEN8_PCU_IIR)); 690 seq_printf(m, "PCU interrupt enable:\t%08x\n", 691 I915_READ(GEN8_PCU_IER)); 692 } else if (INTEL_INFO(dev)->gen >= 8) { 693 seq_printf(m, "Master Interrupt Control:\t%08x\n", 694 I915_READ(GEN8_MASTER_IRQ)); 695 696 for (i = 0; i < 4; i++) { 697 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 698 i, I915_READ(GEN8_GT_IMR(i))); 699 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 700 i, I915_READ(GEN8_GT_IIR(i))); 701 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 702 i, I915_READ(GEN8_GT_IER(i))); 703 } 704 705 for_each_pipe(pipe) { 706 seq_printf(m, "Pipe %c IMR:\t%08x\n", 707 pipe_name(pipe), 708 I915_READ(GEN8_DE_PIPE_IMR(pipe))); 709 seq_printf(m, "Pipe %c IIR:\t%08x\n", 710 pipe_name(pipe), 711 I915_READ(GEN8_DE_PIPE_IIR(pipe))); 712 seq_printf(m, "Pipe %c IER:\t%08x\n", 713 pipe_name(pipe), 714 I915_READ(GEN8_DE_PIPE_IER(pipe))); 715 } 716 717 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 718 I915_READ(GEN8_DE_PORT_IMR)); 719 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", 720 I915_READ(GEN8_DE_PORT_IIR)); 721 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", 722 I915_READ(GEN8_DE_PORT_IER)); 723 724 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", 725 I915_READ(GEN8_DE_MISC_IMR)); 726 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", 727 I915_READ(GEN8_DE_MISC_IIR)); 728 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", 729 I915_READ(GEN8_DE_MISC_IER)); 730 731 seq_printf(m, "PCU interrupt mask:\t%08x\n", 732 I915_READ(GEN8_PCU_IMR)); 733 seq_printf(m, "PCU interrupt identity:\t%08x\n", 734 I915_READ(GEN8_PCU_IIR)); 735 seq_printf(m, "PCU interrupt enable:\t%08x\n", 736 I915_READ(GEN8_PCU_IER)); 737 } else if (IS_VALLEYVIEW(dev)) { 738 seq_printf(m, "Display IER:\t%08x\n", 739 I915_READ(VLV_IER)); 740 seq_printf(m, "Display IIR:\t%08x\n", 741 I915_READ(VLV_IIR)); 742 seq_printf(m, "Display IIR_RW:\t%08x\n", 743 I915_READ(VLV_IIR_RW)); 744 seq_printf(m, "Display IMR:\t%08x\n", 745 I915_READ(VLV_IMR)); 746 for_each_pipe(pipe) 747 seq_printf(m, "Pipe %c stat:\t%08x\n", 748 pipe_name(pipe), 749 I915_READ(PIPESTAT(pipe))); 750 751 seq_printf(m, "Master IER:\t%08x\n", 752 I915_READ(VLV_MASTER_IER)); 753 754 seq_printf(m, "Render IER:\t%08x\n", 755 I915_READ(GTIER)); 756 seq_printf(m, "Render IIR:\t%08x\n", 757 I915_READ(GTIIR)); 758 seq_printf(m, "Render IMR:\t%08x\n", 759 I915_READ(GTIMR)); 760 761 seq_printf(m, "PM IER:\t\t%08x\n", 762 I915_READ(GEN6_PMIER)); 763 seq_printf(m, "PM IIR:\t\t%08x\n", 764 I915_READ(GEN6_PMIIR)); 765 seq_printf(m, "PM IMR:\t\t%08x\n", 766 I915_READ(GEN6_PMIMR)); 767 768 seq_printf(m, "Port hotplug:\t%08x\n", 769 I915_READ(PORT_HOTPLUG_EN)); 770 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 771 I915_READ(VLV_DPFLIPSTAT)); 772 seq_printf(m, "DPINVGTT:\t%08x\n", 773 I915_READ(DPINVGTT)); 774 775 } else if (!HAS_PCH_SPLIT(dev)) { 776 seq_printf(m, "Interrupt enable: %08x\n", 777 I915_READ(IER)); 778 seq_printf(m, "Interrupt identity: %08x\n", 779 I915_READ(IIR)); 780 seq_printf(m, "Interrupt mask: %08x\n", 781 I915_READ(IMR)); 782 for_each_pipe(pipe) 783 seq_printf(m, "Pipe %c stat: %08x\n", 784 pipe_name(pipe), 785 I915_READ(PIPESTAT(pipe))); 786 } else { 787 seq_printf(m, "North Display Interrupt enable: %08x\n", 788 I915_READ(DEIER)); 789 seq_printf(m, "North Display Interrupt identity: %08x\n", 790 I915_READ(DEIIR)); 791 seq_printf(m, "North Display Interrupt mask: %08x\n", 792 I915_READ(DEIMR)); 793 seq_printf(m, "South Display Interrupt enable: %08x\n", 794 I915_READ(SDEIER)); 795 seq_printf(m, "South Display Interrupt identity: %08x\n", 796 I915_READ(SDEIIR)); 797 seq_printf(m, "South Display Interrupt mask: %08x\n", 798 I915_READ(SDEIMR)); 799 seq_printf(m, "Graphics Interrupt enable: %08x\n", 800 I915_READ(GTIER)); 801 seq_printf(m, "Graphics Interrupt identity: %08x\n", 802 I915_READ(GTIIR)); 803 seq_printf(m, "Graphics Interrupt mask: %08x\n", 804 I915_READ(GTIMR)); 805 } 806 for_each_ring(ring, dev_priv, i) { 807 if (INTEL_INFO(dev)->gen >= 6) { 808 seq_printf(m, 809 "Graphics Interrupt mask (%s): %08x\n", 810 ring->name, I915_READ_IMR(ring)); 811 } 812 i915_ring_seqno_info(m, ring); 813 } 814 intel_runtime_pm_put(dev_priv); 815 mutex_unlock(&dev->struct_mutex); 816 817 return 0; 818 } 819 820 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 821 { 822 struct drm_info_node *node = m->private; 823 struct drm_device *dev = node->minor->dev; 824 struct drm_i915_private *dev_priv = dev->dev_private; 825 int i, ret; 826 827 ret = mutex_lock_interruptible(&dev->struct_mutex); 828 if (ret) 829 return ret; 830 831 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 832 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 833 for (i = 0; i < dev_priv->num_fence_regs; i++) { 834 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 835 836 seq_printf(m, "Fence %d, pin count = %d, object = ", 837 i, dev_priv->fence_regs[i].pin_count); 838 if (obj == NULL) 839 seq_puts(m, "unused"); 840 else 841 describe_obj(m, obj); 842 seq_putc(m, '\n'); 843 } 844 845 mutex_unlock(&dev->struct_mutex); 846 return 0; 847 } 848 849 static int i915_hws_info(struct seq_file *m, void *data) 850 { 851 struct drm_info_node *node = m->private; 852 struct drm_device *dev = node->minor->dev; 853 struct drm_i915_private *dev_priv = dev->dev_private; 854 struct intel_engine_cs *ring; 855 const u32 *hws; 856 int i; 857 858 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 859 hws = ring->status_page.page_addr; 860 if (hws == NULL) 861 return 0; 862 863 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 864 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 865 i * 4, 866 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 867 } 868 return 0; 869 } 870 871 static ssize_t 872 i915_error_state_write(struct file *filp, 873 const char __user *ubuf, 874 size_t cnt, 875 loff_t *ppos) 876 { 877 struct i915_error_state_file_priv *error_priv = filp->private_data; 878 struct drm_device *dev = error_priv->dev; 879 int ret; 880 881 DRM_DEBUG_DRIVER("Resetting error state\n"); 882 883 ret = mutex_lock_interruptible(&dev->struct_mutex); 884 if (ret) 885 return ret; 886 887 i915_destroy_error_state(dev); 888 mutex_unlock(&dev->struct_mutex); 889 890 return cnt; 891 } 892 893 static int i915_error_state_open(struct inode *inode, struct file *file) 894 { 895 struct drm_device *dev = inode->i_private; 896 struct i915_error_state_file_priv *error_priv; 897 898 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL); 899 if (!error_priv) 900 return -ENOMEM; 901 902 error_priv->dev = dev; 903 904 i915_error_state_get(dev, error_priv); 905 906 file->private_data = error_priv; 907 908 return 0; 909 } 910 911 static int i915_error_state_release(struct inode *inode, struct file *file) 912 { 913 struct i915_error_state_file_priv *error_priv = file->private_data; 914 915 i915_error_state_put(error_priv); 916 kfree(error_priv); 917 918 return 0; 919 } 920 921 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, 922 size_t count, loff_t *pos) 923 { 924 struct i915_error_state_file_priv *error_priv = file->private_data; 925 struct drm_i915_error_state_buf error_str; 926 loff_t tmp_pos = 0; 927 ssize_t ret_count = 0; 928 int ret; 929 930 ret = i915_error_state_buf_init(&error_str, count, *pos); 931 if (ret) 932 return ret; 933 934 ret = i915_error_state_to_str(&error_str, error_priv); 935 if (ret) 936 goto out; 937 938 ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos, 939 error_str.buf, 940 error_str.bytes); 941 942 if (ret_count < 0) 943 ret = ret_count; 944 else 945 *pos = error_str.start + ret_count; 946 out: 947 i915_error_state_buf_release(&error_str); 948 return ret ?: ret_count; 949 } 950 951 static const struct file_operations i915_error_state_fops = { 952 .owner = THIS_MODULE, 953 .open = i915_error_state_open, 954 .read = i915_error_state_read, 955 .write = i915_error_state_write, 956 .llseek = default_llseek, 957 .release = i915_error_state_release, 958 }; 959 960 static int 961 i915_next_seqno_get(void *data, u64 *val) 962 { 963 struct drm_device *dev = data; 964 struct drm_i915_private *dev_priv = dev->dev_private; 965 int ret; 966 967 ret = mutex_lock_interruptible(&dev->struct_mutex); 968 if (ret) 969 return ret; 970 971 *val = dev_priv->next_seqno; 972 mutex_unlock(&dev->struct_mutex); 973 974 return 0; 975 } 976 977 static int 978 i915_next_seqno_set(void *data, u64 val) 979 { 980 struct drm_device *dev = data; 981 int ret; 982 983 ret = mutex_lock_interruptible(&dev->struct_mutex); 984 if (ret) 985 return ret; 986 987 ret = i915_gem_set_seqno(dev, val); 988 mutex_unlock(&dev->struct_mutex); 989 990 return ret; 991 } 992 993 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, 994 i915_next_seqno_get, i915_next_seqno_set, 995 "0x%llx\n"); 996 997 static int i915_frequency_info(struct seq_file *m, void *unused) 998 { 999 struct drm_info_node *node = m->private; 1000 struct drm_device *dev = node->minor->dev; 1001 struct drm_i915_private *dev_priv = dev->dev_private; 1002 int ret = 0; 1003 1004 intel_runtime_pm_get(dev_priv); 1005 1006 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 1007 1008 if (IS_GEN5(dev)) { 1009 u16 rgvswctl = I915_READ16(MEMSWCTL); 1010 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 1011 1012 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 1013 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 1014 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 1015 MEMSTAT_VID_SHIFT); 1016 seq_printf(m, "Current P-state: %d\n", 1017 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 1018 } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) || 1019 IS_BROADWELL(dev)) { 1020 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 1021 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 1022 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 1023 u32 rpmodectl, rpinclimit, rpdeclimit; 1024 u32 rpstat, cagf, reqf; 1025 u32 rpupei, rpcurup, rpprevup; 1026 u32 rpdownei, rpcurdown, rpprevdown; 1027 int max_freq; 1028 1029 /* RPSTAT1 is in the GT power well */ 1030 ret = mutex_lock_interruptible(&dev->struct_mutex); 1031 if (ret) 1032 goto out; 1033 1034 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 1035 1036 reqf = I915_READ(GEN6_RPNSWREQ); 1037 reqf &= ~GEN6_TURBO_DISABLE; 1038 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1039 reqf >>= 24; 1040 else 1041 reqf >>= 25; 1042 reqf *= GT_FREQUENCY_MULTIPLIER; 1043 1044 rpmodectl = I915_READ(GEN6_RP_CONTROL); 1045 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD); 1046 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD); 1047 1048 rpstat = I915_READ(GEN6_RPSTAT1); 1049 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 1050 rpcurup = I915_READ(GEN6_RP_CUR_UP); 1051 rpprevup = I915_READ(GEN6_RP_PREV_UP); 1052 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 1053 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 1054 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 1055 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1056 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 1057 else 1058 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 1059 cagf *= GT_FREQUENCY_MULTIPLIER; 1060 1061 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 1062 mutex_unlock(&dev->struct_mutex); 1063 1064 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", 1065 I915_READ(GEN6_PMIER), 1066 I915_READ(GEN6_PMIMR), 1067 I915_READ(GEN6_PMISR), 1068 I915_READ(GEN6_PMIIR), 1069 I915_READ(GEN6_PMINTRMSK)); 1070 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1071 seq_printf(m, "Render p-state ratio: %d\n", 1072 (gt_perf_status & 0xff00) >> 8); 1073 seq_printf(m, "Render p-state VID: %d\n", 1074 gt_perf_status & 0xff); 1075 seq_printf(m, "Render p-state limit: %d\n", 1076 rp_state_limits & 0xff); 1077 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 1078 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl); 1079 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit); 1080 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 1081 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 1082 seq_printf(m, "CAGF: %dMHz\n", cagf); 1083 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 1084 GEN6_CURICONT_MASK); 1085 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 1086 GEN6_CURBSYTAVG_MASK); 1087 seq_printf(m, "RP PREV UP: %dus\n", rpprevup & 1088 GEN6_CURBSYTAVG_MASK); 1089 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 1090 GEN6_CURIAVG_MASK); 1091 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 1092 GEN6_CURBSYTAVG_MASK); 1093 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 1094 GEN6_CURBSYTAVG_MASK); 1095 1096 max_freq = (rp_state_cap & 0xff0000) >> 16; 1097 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 1098 max_freq * GT_FREQUENCY_MULTIPLIER); 1099 1100 max_freq = (rp_state_cap & 0xff00) >> 8; 1101 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 1102 max_freq * GT_FREQUENCY_MULTIPLIER); 1103 1104 max_freq = rp_state_cap & 0xff; 1105 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1106 max_freq * GT_FREQUENCY_MULTIPLIER); 1107 1108 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1109 dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER); 1110 } else if (IS_VALLEYVIEW(dev)) { 1111 u32 freq_sts; 1112 1113 mutex_lock(&dev_priv->rps.hw_lock); 1114 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 1115 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 1116 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 1117 1118 seq_printf(m, "max GPU freq: %d MHz\n", 1119 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1120 1121 seq_printf(m, "min GPU freq: %d MHz\n", 1122 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq)); 1123 1124 seq_printf(m, "efficient (RPe) frequency: %d MHz\n", 1125 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); 1126 1127 seq_printf(m, "current GPU freq: %d MHz\n", 1128 vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); 1129 mutex_unlock(&dev_priv->rps.hw_lock); 1130 } else { 1131 seq_puts(m, "no P-state info available\n"); 1132 } 1133 1134 out: 1135 intel_runtime_pm_put(dev_priv); 1136 return ret; 1137 } 1138 1139 static int ironlake_drpc_info(struct seq_file *m) 1140 { 1141 struct drm_info_node *node = m->private; 1142 struct drm_device *dev = node->minor->dev; 1143 struct drm_i915_private *dev_priv = dev->dev_private; 1144 u32 rgvmodectl, rstdbyctl; 1145 u16 crstandvid; 1146 int ret; 1147 1148 ret = mutex_lock_interruptible(&dev->struct_mutex); 1149 if (ret) 1150 return ret; 1151 intel_runtime_pm_get(dev_priv); 1152 1153 rgvmodectl = I915_READ(MEMMODECTL); 1154 rstdbyctl = I915_READ(RSTDBYCTL); 1155 crstandvid = I915_READ16(CRSTANDVID); 1156 1157 intel_runtime_pm_put(dev_priv); 1158 mutex_unlock(&dev->struct_mutex); 1159 1160 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1161 "yes" : "no"); 1162 seq_printf(m, "Boost freq: %d\n", 1163 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1164 MEMMODE_BOOST_FREQ_SHIFT); 1165 seq_printf(m, "HW control enabled: %s\n", 1166 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 1167 seq_printf(m, "SW control enabled: %s\n", 1168 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 1169 seq_printf(m, "Gated voltage change: %s\n", 1170 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 1171 seq_printf(m, "Starting frequency: P%d\n", 1172 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1173 seq_printf(m, "Max P-state: P%d\n", 1174 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1175 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1176 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1177 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1178 seq_printf(m, "Render standby enabled: %s\n", 1179 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 1180 seq_puts(m, "Current RS state: "); 1181 switch (rstdbyctl & RSX_STATUS_MASK) { 1182 case RSX_STATUS_ON: 1183 seq_puts(m, "on\n"); 1184 break; 1185 case RSX_STATUS_RC1: 1186 seq_puts(m, "RC1\n"); 1187 break; 1188 case RSX_STATUS_RC1E: 1189 seq_puts(m, "RC1E\n"); 1190 break; 1191 case RSX_STATUS_RS1: 1192 seq_puts(m, "RS1\n"); 1193 break; 1194 case RSX_STATUS_RS2: 1195 seq_puts(m, "RS2 (RC6)\n"); 1196 break; 1197 case RSX_STATUS_RS3: 1198 seq_puts(m, "RC3 (RC6+)\n"); 1199 break; 1200 default: 1201 seq_puts(m, "unknown\n"); 1202 break; 1203 } 1204 1205 return 0; 1206 } 1207 1208 static int vlv_drpc_info(struct seq_file *m) 1209 { 1210 1211 struct drm_info_node *node = m->private; 1212 struct drm_device *dev = node->minor->dev; 1213 struct drm_i915_private *dev_priv = dev->dev_private; 1214 u32 rpmodectl1, rcctl1; 1215 unsigned fw_rendercount = 0, fw_mediacount = 0; 1216 1217 intel_runtime_pm_get(dev_priv); 1218 1219 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1220 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1221 1222 intel_runtime_pm_put(dev_priv); 1223 1224 seq_printf(m, "Video Turbo Mode: %s\n", 1225 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1226 seq_printf(m, "Turbo enabled: %s\n", 1227 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1228 seq_printf(m, "HW control enabled: %s\n", 1229 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1230 seq_printf(m, "SW control enabled: %s\n", 1231 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1232 GEN6_RP_MEDIA_SW_MODE)); 1233 seq_printf(m, "RC6 Enabled: %s\n", 1234 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | 1235 GEN6_RC_CTL_EI_MODE(1)))); 1236 seq_printf(m, "Render Power Well: %s\n", 1237 (I915_READ(VLV_GTLC_PW_STATUS) & 1238 VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); 1239 seq_printf(m, "Media Power Well: %s\n", 1240 (I915_READ(VLV_GTLC_PW_STATUS) & 1241 VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1242 1243 seq_printf(m, "Render RC6 residency since boot: %u\n", 1244 I915_READ(VLV_GT_RENDER_RC6)); 1245 seq_printf(m, "Media RC6 residency since boot: %u\n", 1246 I915_READ(VLV_GT_MEDIA_RC6)); 1247 1248 spin_lock_irq(&dev_priv->uncore.lock); 1249 fw_rendercount = dev_priv->uncore.fw_rendercount; 1250 fw_mediacount = dev_priv->uncore.fw_mediacount; 1251 spin_unlock_irq(&dev_priv->uncore.lock); 1252 1253 seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount); 1254 seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount); 1255 1256 1257 return 0; 1258 } 1259 1260 1261 static int gen6_drpc_info(struct seq_file *m) 1262 { 1263 1264 struct drm_info_node *node = m->private; 1265 struct drm_device *dev = node->minor->dev; 1266 struct drm_i915_private *dev_priv = dev->dev_private; 1267 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1268 unsigned forcewake_count; 1269 int count = 0, ret; 1270 1271 ret = mutex_lock_interruptible(&dev->struct_mutex); 1272 if (ret) 1273 return ret; 1274 intel_runtime_pm_get(dev_priv); 1275 1276 spin_lock_irq(&dev_priv->uncore.lock); 1277 forcewake_count = dev_priv->uncore.forcewake_count; 1278 spin_unlock_irq(&dev_priv->uncore.lock); 1279 1280 if (forcewake_count) { 1281 seq_puts(m, "RC information inaccurate because somebody " 1282 "holds a forcewake reference \n"); 1283 } else { 1284 /* NB: we cannot use forcewake, else we read the wrong values */ 1285 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1286 udelay(10); 1287 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1288 } 1289 1290 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); 1291 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); 1292 1293 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1294 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1295 mutex_unlock(&dev->struct_mutex); 1296 mutex_lock(&dev_priv->rps.hw_lock); 1297 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1298 mutex_unlock(&dev_priv->rps.hw_lock); 1299 1300 intel_runtime_pm_put(dev_priv); 1301 1302 seq_printf(m, "Video Turbo Mode: %s\n", 1303 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1304 seq_printf(m, "HW control enabled: %s\n", 1305 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1306 seq_printf(m, "SW control enabled: %s\n", 1307 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1308 GEN6_RP_MEDIA_SW_MODE)); 1309 seq_printf(m, "RC1e Enabled: %s\n", 1310 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1311 seq_printf(m, "RC6 Enabled: %s\n", 1312 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1313 seq_printf(m, "Deep RC6 Enabled: %s\n", 1314 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1315 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1316 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1317 seq_puts(m, "Current RC state: "); 1318 switch (gt_core_status & GEN6_RCn_MASK) { 1319 case GEN6_RC0: 1320 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1321 seq_puts(m, "Core Power Down\n"); 1322 else 1323 seq_puts(m, "on\n"); 1324 break; 1325 case GEN6_RC3: 1326 seq_puts(m, "RC3\n"); 1327 break; 1328 case GEN6_RC6: 1329 seq_puts(m, "RC6\n"); 1330 break; 1331 case GEN6_RC7: 1332 seq_puts(m, "RC7\n"); 1333 break; 1334 default: 1335 seq_puts(m, "Unknown\n"); 1336 break; 1337 } 1338 1339 seq_printf(m, "Core Power Down: %s\n", 1340 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1341 1342 /* Not exactly sure what this is */ 1343 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", 1344 I915_READ(GEN6_GT_GFX_RC6_LOCKED)); 1345 seq_printf(m, "RC6 residency since boot: %u\n", 1346 I915_READ(GEN6_GT_GFX_RC6)); 1347 seq_printf(m, "RC6+ residency since boot: %u\n", 1348 I915_READ(GEN6_GT_GFX_RC6p)); 1349 seq_printf(m, "RC6++ residency since boot: %u\n", 1350 I915_READ(GEN6_GT_GFX_RC6pp)); 1351 1352 seq_printf(m, "RC6 voltage: %dmV\n", 1353 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1354 seq_printf(m, "RC6+ voltage: %dmV\n", 1355 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1356 seq_printf(m, "RC6++ voltage: %dmV\n", 1357 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1358 return 0; 1359 } 1360 1361 static int i915_drpc_info(struct seq_file *m, void *unused) 1362 { 1363 struct drm_info_node *node = m->private; 1364 struct drm_device *dev = node->minor->dev; 1365 1366 if (IS_VALLEYVIEW(dev)) 1367 return vlv_drpc_info(m); 1368 else if (IS_GEN6(dev) || IS_GEN7(dev)) 1369 return gen6_drpc_info(m); 1370 else 1371 return ironlake_drpc_info(m); 1372 } 1373 1374 static int i915_fbc_status(struct seq_file *m, void *unused) 1375 { 1376 struct drm_info_node *node = m->private; 1377 struct drm_device *dev = node->minor->dev; 1378 struct drm_i915_private *dev_priv = dev->dev_private; 1379 1380 if (!HAS_FBC(dev)) { 1381 seq_puts(m, "FBC unsupported on this chipset\n"); 1382 return 0; 1383 } 1384 1385 intel_runtime_pm_get(dev_priv); 1386 1387 if (intel_fbc_enabled(dev)) { 1388 seq_puts(m, "FBC enabled\n"); 1389 } else { 1390 seq_puts(m, "FBC disabled: "); 1391 switch (dev_priv->fbc.no_fbc_reason) { 1392 case FBC_OK: 1393 seq_puts(m, "FBC actived, but currently disabled in hardware"); 1394 break; 1395 case FBC_UNSUPPORTED: 1396 seq_puts(m, "unsupported by this chipset"); 1397 break; 1398 case FBC_NO_OUTPUT: 1399 seq_puts(m, "no outputs"); 1400 break; 1401 case FBC_STOLEN_TOO_SMALL: 1402 seq_puts(m, "not enough stolen memory"); 1403 break; 1404 case FBC_UNSUPPORTED_MODE: 1405 seq_puts(m, "mode not supported"); 1406 break; 1407 case FBC_MODE_TOO_LARGE: 1408 seq_puts(m, "mode too large"); 1409 break; 1410 case FBC_BAD_PLANE: 1411 seq_puts(m, "FBC unsupported on plane"); 1412 break; 1413 case FBC_NOT_TILED: 1414 seq_puts(m, "scanout buffer not tiled"); 1415 break; 1416 case FBC_MULTIPLE_PIPES: 1417 seq_puts(m, "multiple pipes are enabled"); 1418 break; 1419 case FBC_MODULE_PARAM: 1420 seq_puts(m, "disabled per module param (default off)"); 1421 break; 1422 case FBC_CHIP_DEFAULT: 1423 seq_puts(m, "disabled per chip default"); 1424 break; 1425 default: 1426 seq_puts(m, "unknown reason"); 1427 } 1428 seq_putc(m, '\n'); 1429 } 1430 1431 intel_runtime_pm_put(dev_priv); 1432 1433 return 0; 1434 } 1435 1436 static int i915_ips_status(struct seq_file *m, void *unused) 1437 { 1438 struct drm_info_node *node = m->private; 1439 struct drm_device *dev = node->minor->dev; 1440 struct drm_i915_private *dev_priv = dev->dev_private; 1441 1442 if (!HAS_IPS(dev)) { 1443 seq_puts(m, "not supported\n"); 1444 return 0; 1445 } 1446 1447 intel_runtime_pm_get(dev_priv); 1448 1449 seq_printf(m, "Enabled by kernel parameter: %s\n", 1450 yesno(i915.enable_ips)); 1451 1452 if (INTEL_INFO(dev)->gen >= 8) { 1453 seq_puts(m, "Currently: unknown\n"); 1454 } else { 1455 if (I915_READ(IPS_CTL) & IPS_ENABLE) 1456 seq_puts(m, "Currently: enabled\n"); 1457 else 1458 seq_puts(m, "Currently: disabled\n"); 1459 } 1460 1461 intel_runtime_pm_put(dev_priv); 1462 1463 return 0; 1464 } 1465 1466 static int i915_sr_status(struct seq_file *m, void *unused) 1467 { 1468 struct drm_info_node *node = m->private; 1469 struct drm_device *dev = node->minor->dev; 1470 struct drm_i915_private *dev_priv = dev->dev_private; 1471 bool sr_enabled = false; 1472 1473 intel_runtime_pm_get(dev_priv); 1474 1475 if (HAS_PCH_SPLIT(dev)) 1476 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1477 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1478 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1479 else if (IS_I915GM(dev)) 1480 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1481 else if (IS_PINEVIEW(dev)) 1482 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1483 1484 intel_runtime_pm_put(dev_priv); 1485 1486 seq_printf(m, "self-refresh: %s\n", 1487 sr_enabled ? "enabled" : "disabled"); 1488 1489 return 0; 1490 } 1491 1492 static int i915_emon_status(struct seq_file *m, void *unused) 1493 { 1494 struct drm_info_node *node = m->private; 1495 struct drm_device *dev = node->minor->dev; 1496 struct drm_i915_private *dev_priv = dev->dev_private; 1497 unsigned long temp, chipset, gfx; 1498 int ret; 1499 1500 if (!IS_GEN5(dev)) 1501 return -ENODEV; 1502 1503 ret = mutex_lock_interruptible(&dev->struct_mutex); 1504 if (ret) 1505 return ret; 1506 1507 temp = i915_mch_val(dev_priv); 1508 chipset = i915_chipset_val(dev_priv); 1509 gfx = i915_gfx_val(dev_priv); 1510 mutex_unlock(&dev->struct_mutex); 1511 1512 seq_printf(m, "GMCH temp: %ld\n", temp); 1513 seq_printf(m, "Chipset power: %ld\n", chipset); 1514 seq_printf(m, "GFX power: %ld\n", gfx); 1515 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1516 1517 return 0; 1518 } 1519 1520 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1521 { 1522 struct drm_info_node *node = m->private; 1523 struct drm_device *dev = node->minor->dev; 1524 struct drm_i915_private *dev_priv = dev->dev_private; 1525 int ret = 0; 1526 int gpu_freq, ia_freq; 1527 1528 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1529 seq_puts(m, "unsupported on this chipset\n"); 1530 return 0; 1531 } 1532 1533 intel_runtime_pm_get(dev_priv); 1534 1535 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 1536 1537 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1538 if (ret) 1539 goto out; 1540 1541 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1542 1543 for (gpu_freq = dev_priv->rps.min_freq_softlimit; 1544 gpu_freq <= dev_priv->rps.max_freq_softlimit; 1545 gpu_freq++) { 1546 ia_freq = gpu_freq; 1547 sandybridge_pcode_read(dev_priv, 1548 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1549 &ia_freq); 1550 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1551 gpu_freq * GT_FREQUENCY_MULTIPLIER, 1552 ((ia_freq >> 0) & 0xff) * 100, 1553 ((ia_freq >> 8) & 0xff) * 100); 1554 } 1555 1556 mutex_unlock(&dev_priv->rps.hw_lock); 1557 1558 out: 1559 intel_runtime_pm_put(dev_priv); 1560 return ret; 1561 } 1562 1563 static int i915_opregion(struct seq_file *m, void *unused) 1564 { 1565 struct drm_info_node *node = m->private; 1566 struct drm_device *dev = node->minor->dev; 1567 struct drm_i915_private *dev_priv = dev->dev_private; 1568 struct intel_opregion *opregion = &dev_priv->opregion; 1569 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL); 1570 int ret; 1571 1572 if (data == NULL) 1573 return -ENOMEM; 1574 1575 ret = mutex_lock_interruptible(&dev->struct_mutex); 1576 if (ret) 1577 goto out; 1578 1579 if (opregion->header) { 1580 memcpy_fromio(data, opregion->header, OPREGION_SIZE); 1581 seq_write(m, data, OPREGION_SIZE); 1582 } 1583 1584 mutex_unlock(&dev->struct_mutex); 1585 1586 out: 1587 kfree(data); 1588 return 0; 1589 } 1590 1591 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1592 { 1593 struct drm_info_node *node = m->private; 1594 struct drm_device *dev = node->minor->dev; 1595 struct intel_fbdev *ifbdev = NULL; 1596 struct intel_framebuffer *fb; 1597 1598 #ifdef CONFIG_DRM_I915_FBDEV 1599 struct drm_i915_private *dev_priv = dev->dev_private; 1600 1601 ifbdev = dev_priv->fbdev; 1602 fb = to_intel_framebuffer(ifbdev->helper.fb); 1603 1604 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ", 1605 fb->base.width, 1606 fb->base.height, 1607 fb->base.depth, 1608 fb->base.bits_per_pixel, 1609 atomic_read(&fb->base.refcount.refcount)); 1610 describe_obj(m, fb->obj); 1611 seq_putc(m, '\n'); 1612 #endif 1613 1614 mutex_lock(&dev->mode_config.fb_lock); 1615 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1616 if (ifbdev && &fb->base == ifbdev->helper.fb) 1617 continue; 1618 1619 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ", 1620 fb->base.width, 1621 fb->base.height, 1622 fb->base.depth, 1623 fb->base.bits_per_pixel, 1624 atomic_read(&fb->base.refcount.refcount)); 1625 describe_obj(m, fb->obj); 1626 seq_putc(m, '\n'); 1627 } 1628 mutex_unlock(&dev->mode_config.fb_lock); 1629 1630 return 0; 1631 } 1632 1633 static int i915_context_status(struct seq_file *m, void *unused) 1634 { 1635 struct drm_info_node *node = m->private; 1636 struct drm_device *dev = node->minor->dev; 1637 struct drm_i915_private *dev_priv = dev->dev_private; 1638 struct intel_engine_cs *ring; 1639 struct intel_context *ctx; 1640 int ret, i; 1641 1642 ret = mutex_lock_interruptible(&dev->struct_mutex); 1643 if (ret) 1644 return ret; 1645 1646 if (dev_priv->ips.pwrctx) { 1647 seq_puts(m, "power context "); 1648 describe_obj(m, dev_priv->ips.pwrctx); 1649 seq_putc(m, '\n'); 1650 } 1651 1652 if (dev_priv->ips.renderctx) { 1653 seq_puts(m, "render context "); 1654 describe_obj(m, dev_priv->ips.renderctx); 1655 seq_putc(m, '\n'); 1656 } 1657 1658 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1659 if (ctx->legacy_hw_ctx.rcs_state == NULL) 1660 continue; 1661 1662 seq_puts(m, "HW context "); 1663 describe_ctx(m, ctx); 1664 for_each_ring(ring, dev_priv, i) 1665 if (ring->default_context == ctx) 1666 seq_printf(m, "(default context %s) ", ring->name); 1667 1668 describe_obj(m, ctx->legacy_hw_ctx.rcs_state); 1669 seq_putc(m, '\n'); 1670 } 1671 1672 mutex_unlock(&dev->struct_mutex); 1673 1674 return 0; 1675 } 1676 1677 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) 1678 { 1679 struct drm_info_node *node = m->private; 1680 struct drm_device *dev = node->minor->dev; 1681 struct drm_i915_private *dev_priv = dev->dev_private; 1682 unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0; 1683 1684 spin_lock_irq(&dev_priv->uncore.lock); 1685 if (IS_VALLEYVIEW(dev)) { 1686 fw_rendercount = dev_priv->uncore.fw_rendercount; 1687 fw_mediacount = dev_priv->uncore.fw_mediacount; 1688 } else 1689 forcewake_count = dev_priv->uncore.forcewake_count; 1690 spin_unlock_irq(&dev_priv->uncore.lock); 1691 1692 if (IS_VALLEYVIEW(dev)) { 1693 seq_printf(m, "fw_rendercount = %u\n", fw_rendercount); 1694 seq_printf(m, "fw_mediacount = %u\n", fw_mediacount); 1695 } else 1696 seq_printf(m, "forcewake count = %u\n", forcewake_count); 1697 1698 return 0; 1699 } 1700 1701 static const char *swizzle_string(unsigned swizzle) 1702 { 1703 switch (swizzle) { 1704 case I915_BIT_6_SWIZZLE_NONE: 1705 return "none"; 1706 case I915_BIT_6_SWIZZLE_9: 1707 return "bit9"; 1708 case I915_BIT_6_SWIZZLE_9_10: 1709 return "bit9/bit10"; 1710 case I915_BIT_6_SWIZZLE_9_11: 1711 return "bit9/bit11"; 1712 case I915_BIT_6_SWIZZLE_9_10_11: 1713 return "bit9/bit10/bit11"; 1714 case I915_BIT_6_SWIZZLE_9_17: 1715 return "bit9/bit17"; 1716 case I915_BIT_6_SWIZZLE_9_10_17: 1717 return "bit9/bit10/bit17"; 1718 case I915_BIT_6_SWIZZLE_UNKNOWN: 1719 return "unknown"; 1720 } 1721 1722 return "bug"; 1723 } 1724 1725 static int i915_swizzle_info(struct seq_file *m, void *data) 1726 { 1727 struct drm_info_node *node = m->private; 1728 struct drm_device *dev = node->minor->dev; 1729 struct drm_i915_private *dev_priv = dev->dev_private; 1730 int ret; 1731 1732 ret = mutex_lock_interruptible(&dev->struct_mutex); 1733 if (ret) 1734 return ret; 1735 intel_runtime_pm_get(dev_priv); 1736 1737 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 1738 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 1739 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 1740 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 1741 1742 if (IS_GEN3(dev) || IS_GEN4(dev)) { 1743 seq_printf(m, "DDC = 0x%08x\n", 1744 I915_READ(DCC)); 1745 seq_printf(m, "C0DRB3 = 0x%04x\n", 1746 I915_READ16(C0DRB3)); 1747 seq_printf(m, "C1DRB3 = 0x%04x\n", 1748 I915_READ16(C1DRB3)); 1749 } else if (INTEL_INFO(dev)->gen >= 6) { 1750 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 1751 I915_READ(MAD_DIMM_C0)); 1752 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 1753 I915_READ(MAD_DIMM_C1)); 1754 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 1755 I915_READ(MAD_DIMM_C2)); 1756 seq_printf(m, "TILECTL = 0x%08x\n", 1757 I915_READ(TILECTL)); 1758 if (IS_GEN8(dev)) 1759 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 1760 I915_READ(GAMTARBMODE)); 1761 else 1762 seq_printf(m, "ARB_MODE = 0x%08x\n", 1763 I915_READ(ARB_MODE)); 1764 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 1765 I915_READ(DISP_ARB_CTL)); 1766 } 1767 intel_runtime_pm_put(dev_priv); 1768 mutex_unlock(&dev->struct_mutex); 1769 1770 return 0; 1771 } 1772 1773 static int per_file_ctx(int id, void *ptr, void *data) 1774 { 1775 struct intel_context *ctx = ptr; 1776 struct seq_file *m = data; 1777 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx); 1778 1779 if (i915_gem_context_is_default(ctx)) 1780 seq_puts(m, " default context:\n"); 1781 else 1782 seq_printf(m, " context %d:\n", ctx->user_handle); 1783 ppgtt->debug_dump(ppgtt, m); 1784 1785 return 0; 1786 } 1787 1788 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) 1789 { 1790 struct drm_i915_private *dev_priv = dev->dev_private; 1791 struct intel_engine_cs *ring; 1792 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1793 int unused, i; 1794 1795 if (!ppgtt) 1796 return; 1797 1798 seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages); 1799 seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries); 1800 for_each_ring(ring, dev_priv, unused) { 1801 seq_printf(m, "%s\n", ring->name); 1802 for (i = 0; i < 4; i++) { 1803 u32 offset = 0x270 + i * 8; 1804 u64 pdp = I915_READ(ring->mmio_base + offset + 4); 1805 pdp <<= 32; 1806 pdp |= I915_READ(ring->mmio_base + offset); 1807 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); 1808 } 1809 } 1810 } 1811 1812 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) 1813 { 1814 struct drm_i915_private *dev_priv = dev->dev_private; 1815 struct intel_engine_cs *ring; 1816 struct drm_file *file; 1817 int i; 1818 1819 if (INTEL_INFO(dev)->gen == 6) 1820 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 1821 1822 for_each_ring(ring, dev_priv, i) { 1823 seq_printf(m, "%s\n", ring->name); 1824 if (INTEL_INFO(dev)->gen == 7) 1825 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); 1826 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); 1827 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); 1828 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); 1829 } 1830 if (dev_priv->mm.aliasing_ppgtt) { 1831 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1832 1833 seq_puts(m, "aliasing PPGTT:\n"); 1834 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 1835 1836 ppgtt->debug_dump(ppgtt, m); 1837 } else 1838 return; 1839 1840 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 1841 struct drm_i915_file_private *file_priv = file->driver_priv; 1842 1843 seq_printf(m, "proc: %s\n", 1844 get_pid_task(file->pid, PIDTYPE_PID)->comm); 1845 idr_for_each(&file_priv->context_idr, per_file_ctx, m); 1846 } 1847 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1848 } 1849 1850 static int i915_ppgtt_info(struct seq_file *m, void *data) 1851 { 1852 struct drm_info_node *node = m->private; 1853 struct drm_device *dev = node->minor->dev; 1854 struct drm_i915_private *dev_priv = dev->dev_private; 1855 1856 int ret = mutex_lock_interruptible(&dev->struct_mutex); 1857 if (ret) 1858 return ret; 1859 intel_runtime_pm_get(dev_priv); 1860 1861 if (INTEL_INFO(dev)->gen >= 8) 1862 gen8_ppgtt_info(m, dev); 1863 else if (INTEL_INFO(dev)->gen >= 6) 1864 gen6_ppgtt_info(m, dev); 1865 1866 intel_runtime_pm_put(dev_priv); 1867 mutex_unlock(&dev->struct_mutex); 1868 1869 return 0; 1870 } 1871 1872 static int i915_llc(struct seq_file *m, void *data) 1873 { 1874 struct drm_info_node *node = m->private; 1875 struct drm_device *dev = node->minor->dev; 1876 struct drm_i915_private *dev_priv = dev->dev_private; 1877 1878 /* Size calculation for LLC is a bit of a pain. Ignore for now. */ 1879 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev))); 1880 seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size); 1881 1882 return 0; 1883 } 1884 1885 static int i915_edp_psr_status(struct seq_file *m, void *data) 1886 { 1887 struct drm_info_node *node = m->private; 1888 struct drm_device *dev = node->minor->dev; 1889 struct drm_i915_private *dev_priv = dev->dev_private; 1890 u32 psrperf = 0; 1891 bool enabled = false; 1892 1893 intel_runtime_pm_get(dev_priv); 1894 1895 mutex_lock(&dev_priv->psr.lock); 1896 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); 1897 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); 1898 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled)); 1899 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active)); 1900 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", 1901 dev_priv->psr.busy_frontbuffer_bits); 1902 seq_printf(m, "Re-enable work scheduled: %s\n", 1903 yesno(work_busy(&dev_priv->psr.work.work))); 1904 1905 enabled = HAS_PSR(dev) && 1906 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; 1907 seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled)); 1908 1909 if (HAS_PSR(dev)) 1910 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & 1911 EDP_PSR_PERF_CNT_MASK; 1912 seq_printf(m, "Performance_Counter: %u\n", psrperf); 1913 mutex_unlock(&dev_priv->psr.lock); 1914 1915 intel_runtime_pm_put(dev_priv); 1916 return 0; 1917 } 1918 1919 static int i915_sink_crc(struct seq_file *m, void *data) 1920 { 1921 struct drm_info_node *node = m->private; 1922 struct drm_device *dev = node->minor->dev; 1923 struct intel_encoder *encoder; 1924 struct intel_connector *connector; 1925 struct intel_dp *intel_dp = NULL; 1926 int ret; 1927 u8 crc[6]; 1928 1929 drm_modeset_lock_all(dev); 1930 list_for_each_entry(connector, &dev->mode_config.connector_list, 1931 base.head) { 1932 1933 if (connector->base.dpms != DRM_MODE_DPMS_ON) 1934 continue; 1935 1936 if (!connector->base.encoder) 1937 continue; 1938 1939 encoder = to_intel_encoder(connector->base.encoder); 1940 if (encoder->type != INTEL_OUTPUT_EDP) 1941 continue; 1942 1943 intel_dp = enc_to_intel_dp(&encoder->base); 1944 1945 ret = intel_dp_sink_crc(intel_dp, crc); 1946 if (ret) 1947 goto out; 1948 1949 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n", 1950 crc[0], crc[1], crc[2], 1951 crc[3], crc[4], crc[5]); 1952 goto out; 1953 } 1954 ret = -ENODEV; 1955 out: 1956 drm_modeset_unlock_all(dev); 1957 return ret; 1958 } 1959 1960 static int i915_energy_uJ(struct seq_file *m, void *data) 1961 { 1962 struct drm_info_node *node = m->private; 1963 struct drm_device *dev = node->minor->dev; 1964 struct drm_i915_private *dev_priv = dev->dev_private; 1965 u64 power; 1966 u32 units; 1967 1968 if (INTEL_INFO(dev)->gen < 6) 1969 return -ENODEV; 1970 1971 intel_runtime_pm_get(dev_priv); 1972 1973 rdmsrl(MSR_RAPL_POWER_UNIT, power); 1974 power = (power & 0x1f00) >> 8; 1975 units = 1000000 / (1 << power); /* convert to uJ */ 1976 power = I915_READ(MCH_SECP_NRG_STTS); 1977 power *= units; 1978 1979 intel_runtime_pm_put(dev_priv); 1980 1981 seq_printf(m, "%llu", (long long unsigned)power); 1982 1983 return 0; 1984 } 1985 1986 static int i915_pc8_status(struct seq_file *m, void *unused) 1987 { 1988 struct drm_info_node *node = m->private; 1989 struct drm_device *dev = node->minor->dev; 1990 struct drm_i915_private *dev_priv = dev->dev_private; 1991 1992 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { 1993 seq_puts(m, "not supported\n"); 1994 return 0; 1995 } 1996 1997 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); 1998 seq_printf(m, "IRQs disabled: %s\n", 1999 yesno(!intel_irqs_enabled(dev_priv))); 2000 2001 return 0; 2002 } 2003 2004 static const char *power_domain_str(enum intel_display_power_domain domain) 2005 { 2006 switch (domain) { 2007 case POWER_DOMAIN_PIPE_A: 2008 return "PIPE_A"; 2009 case POWER_DOMAIN_PIPE_B: 2010 return "PIPE_B"; 2011 case POWER_DOMAIN_PIPE_C: 2012 return "PIPE_C"; 2013 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 2014 return "PIPE_A_PANEL_FITTER"; 2015 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 2016 return "PIPE_B_PANEL_FITTER"; 2017 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 2018 return "PIPE_C_PANEL_FITTER"; 2019 case POWER_DOMAIN_TRANSCODER_A: 2020 return "TRANSCODER_A"; 2021 case POWER_DOMAIN_TRANSCODER_B: 2022 return "TRANSCODER_B"; 2023 case POWER_DOMAIN_TRANSCODER_C: 2024 return "TRANSCODER_C"; 2025 case POWER_DOMAIN_TRANSCODER_EDP: 2026 return "TRANSCODER_EDP"; 2027 case POWER_DOMAIN_PORT_DDI_A_2_LANES: 2028 return "PORT_DDI_A_2_LANES"; 2029 case POWER_DOMAIN_PORT_DDI_A_4_LANES: 2030 return "PORT_DDI_A_4_LANES"; 2031 case POWER_DOMAIN_PORT_DDI_B_2_LANES: 2032 return "PORT_DDI_B_2_LANES"; 2033 case POWER_DOMAIN_PORT_DDI_B_4_LANES: 2034 return "PORT_DDI_B_4_LANES"; 2035 case POWER_DOMAIN_PORT_DDI_C_2_LANES: 2036 return "PORT_DDI_C_2_LANES"; 2037 case POWER_DOMAIN_PORT_DDI_C_4_LANES: 2038 return "PORT_DDI_C_4_LANES"; 2039 case POWER_DOMAIN_PORT_DDI_D_2_LANES: 2040 return "PORT_DDI_D_2_LANES"; 2041 case POWER_DOMAIN_PORT_DDI_D_4_LANES: 2042 return "PORT_DDI_D_4_LANES"; 2043 case POWER_DOMAIN_PORT_DSI: 2044 return "PORT_DSI"; 2045 case POWER_DOMAIN_PORT_CRT: 2046 return "PORT_CRT"; 2047 case POWER_DOMAIN_PORT_OTHER: 2048 return "PORT_OTHER"; 2049 case POWER_DOMAIN_VGA: 2050 return "VGA"; 2051 case POWER_DOMAIN_AUDIO: 2052 return "AUDIO"; 2053 case POWER_DOMAIN_PLLS: 2054 return "PLLS"; 2055 case POWER_DOMAIN_INIT: 2056 return "INIT"; 2057 default: 2058 WARN_ON(1); 2059 return "?"; 2060 } 2061 } 2062 2063 static int i915_power_domain_info(struct seq_file *m, void *unused) 2064 { 2065 struct drm_info_node *node = m->private; 2066 struct drm_device *dev = node->minor->dev; 2067 struct drm_i915_private *dev_priv = dev->dev_private; 2068 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2069 int i; 2070 2071 mutex_lock(&power_domains->lock); 2072 2073 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 2074 for (i = 0; i < power_domains->power_well_count; i++) { 2075 struct i915_power_well *power_well; 2076 enum intel_display_power_domain power_domain; 2077 2078 power_well = &power_domains->power_wells[i]; 2079 seq_printf(m, "%-25s %d\n", power_well->name, 2080 power_well->count); 2081 2082 for (power_domain = 0; power_domain < POWER_DOMAIN_NUM; 2083 power_domain++) { 2084 if (!(BIT(power_domain) & power_well->domains)) 2085 continue; 2086 2087 seq_printf(m, " %-23s %d\n", 2088 power_domain_str(power_domain), 2089 power_domains->domain_use_count[power_domain]); 2090 } 2091 } 2092 2093 mutex_unlock(&power_domains->lock); 2094 2095 return 0; 2096 } 2097 2098 static void intel_seq_print_mode(struct seq_file *m, int tabs, 2099 struct drm_display_mode *mode) 2100 { 2101 int i; 2102 2103 for (i = 0; i < tabs; i++) 2104 seq_putc(m, '\t'); 2105 2106 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n", 2107 mode->base.id, mode->name, 2108 mode->vrefresh, mode->clock, 2109 mode->hdisplay, mode->hsync_start, 2110 mode->hsync_end, mode->htotal, 2111 mode->vdisplay, mode->vsync_start, 2112 mode->vsync_end, mode->vtotal, 2113 mode->type, mode->flags); 2114 } 2115 2116 static void intel_encoder_info(struct seq_file *m, 2117 struct intel_crtc *intel_crtc, 2118 struct intel_encoder *intel_encoder) 2119 { 2120 struct drm_info_node *node = m->private; 2121 struct drm_device *dev = node->minor->dev; 2122 struct drm_crtc *crtc = &intel_crtc->base; 2123 struct intel_connector *intel_connector; 2124 struct drm_encoder *encoder; 2125 2126 encoder = &intel_encoder->base; 2127 seq_printf(m, "\tencoder %d: type: %s, connectors:\n", 2128 encoder->base.id, encoder->name); 2129 for_each_connector_on_encoder(dev, encoder, intel_connector) { 2130 struct drm_connector *connector = &intel_connector->base; 2131 seq_printf(m, "\t\tconnector %d: type: %s, status: %s", 2132 connector->base.id, 2133 connector->name, 2134 drm_get_connector_status_name(connector->status)); 2135 if (connector->status == connector_status_connected) { 2136 struct drm_display_mode *mode = &crtc->mode; 2137 seq_printf(m, ", mode:\n"); 2138 intel_seq_print_mode(m, 2, mode); 2139 } else { 2140 seq_putc(m, '\n'); 2141 } 2142 } 2143 } 2144 2145 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) 2146 { 2147 struct drm_info_node *node = m->private; 2148 struct drm_device *dev = node->minor->dev; 2149 struct drm_crtc *crtc = &intel_crtc->base; 2150 struct intel_encoder *intel_encoder; 2151 2152 if (crtc->primary->fb) 2153 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", 2154 crtc->primary->fb->base.id, crtc->x, crtc->y, 2155 crtc->primary->fb->width, crtc->primary->fb->height); 2156 else 2157 seq_puts(m, "\tprimary plane disabled\n"); 2158 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 2159 intel_encoder_info(m, intel_crtc, intel_encoder); 2160 } 2161 2162 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) 2163 { 2164 struct drm_display_mode *mode = panel->fixed_mode; 2165 2166 seq_printf(m, "\tfixed mode:\n"); 2167 intel_seq_print_mode(m, 2, mode); 2168 } 2169 2170 static void intel_dp_info(struct seq_file *m, 2171 struct intel_connector *intel_connector) 2172 { 2173 struct intel_encoder *intel_encoder = intel_connector->encoder; 2174 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2175 2176 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 2177 seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" : 2178 "no"); 2179 if (intel_encoder->type == INTEL_OUTPUT_EDP) 2180 intel_panel_info(m, &intel_connector->panel); 2181 } 2182 2183 static void intel_hdmi_info(struct seq_file *m, 2184 struct intel_connector *intel_connector) 2185 { 2186 struct intel_encoder *intel_encoder = intel_connector->encoder; 2187 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); 2188 2189 seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" : 2190 "no"); 2191 } 2192 2193 static void intel_lvds_info(struct seq_file *m, 2194 struct intel_connector *intel_connector) 2195 { 2196 intel_panel_info(m, &intel_connector->panel); 2197 } 2198 2199 static void intel_connector_info(struct seq_file *m, 2200 struct drm_connector *connector) 2201 { 2202 struct intel_connector *intel_connector = to_intel_connector(connector); 2203 struct intel_encoder *intel_encoder = intel_connector->encoder; 2204 struct drm_display_mode *mode; 2205 2206 seq_printf(m, "connector %d: type %s, status: %s\n", 2207 connector->base.id, connector->name, 2208 drm_get_connector_status_name(connector->status)); 2209 if (connector->status == connector_status_connected) { 2210 seq_printf(m, "\tname: %s\n", connector->display_info.name); 2211 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 2212 connector->display_info.width_mm, 2213 connector->display_info.height_mm); 2214 seq_printf(m, "\tsubpixel order: %s\n", 2215 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 2216 seq_printf(m, "\tCEA rev: %d\n", 2217 connector->display_info.cea_rev); 2218 } 2219 if (intel_encoder) { 2220 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 2221 intel_encoder->type == INTEL_OUTPUT_EDP) 2222 intel_dp_info(m, intel_connector); 2223 else if (intel_encoder->type == INTEL_OUTPUT_HDMI) 2224 intel_hdmi_info(m, intel_connector); 2225 else if (intel_encoder->type == INTEL_OUTPUT_LVDS) 2226 intel_lvds_info(m, intel_connector); 2227 } 2228 2229 seq_printf(m, "\tmodes:\n"); 2230 list_for_each_entry(mode, &connector->modes, head) 2231 intel_seq_print_mode(m, 2, mode); 2232 } 2233 2234 static bool cursor_active(struct drm_device *dev, int pipe) 2235 { 2236 struct drm_i915_private *dev_priv = dev->dev_private; 2237 u32 state; 2238 2239 if (IS_845G(dev) || IS_I865G(dev)) 2240 state = I915_READ(_CURACNTR) & CURSOR_ENABLE; 2241 else 2242 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 2243 2244 return state; 2245 } 2246 2247 static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y) 2248 { 2249 struct drm_i915_private *dev_priv = dev->dev_private; 2250 u32 pos; 2251 2252 pos = I915_READ(CURPOS(pipe)); 2253 2254 *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK; 2255 if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT)) 2256 *x = -*x; 2257 2258 *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK; 2259 if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT)) 2260 *y = -*y; 2261 2262 return cursor_active(dev, pipe); 2263 } 2264 2265 static int i915_display_info(struct seq_file *m, void *unused) 2266 { 2267 struct drm_info_node *node = m->private; 2268 struct drm_device *dev = node->minor->dev; 2269 struct drm_i915_private *dev_priv = dev->dev_private; 2270 struct intel_crtc *crtc; 2271 struct drm_connector *connector; 2272 2273 intel_runtime_pm_get(dev_priv); 2274 drm_modeset_lock_all(dev); 2275 seq_printf(m, "CRTC info\n"); 2276 seq_printf(m, "---------\n"); 2277 for_each_intel_crtc(dev, crtc) { 2278 bool active; 2279 int x, y; 2280 2281 seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n", 2282 crtc->base.base.id, pipe_name(crtc->pipe), 2283 yesno(crtc->active), crtc->config.pipe_src_w, crtc->config.pipe_src_h); 2284 if (crtc->active) { 2285 intel_crtc_info(m, crtc); 2286 2287 active = cursor_position(dev, crtc->pipe, &x, &y); 2288 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n", 2289 yesno(crtc->cursor_base), 2290 x, y, crtc->cursor_width, crtc->cursor_height, 2291 crtc->cursor_addr, yesno(active)); 2292 } 2293 2294 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", 2295 yesno(!crtc->cpu_fifo_underrun_disabled), 2296 yesno(!crtc->pch_fifo_underrun_disabled)); 2297 } 2298 2299 seq_printf(m, "\n"); 2300 seq_printf(m, "Connector info\n"); 2301 seq_printf(m, "--------------\n"); 2302 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 2303 intel_connector_info(m, connector); 2304 } 2305 drm_modeset_unlock_all(dev); 2306 intel_runtime_pm_put(dev_priv); 2307 2308 return 0; 2309 } 2310 2311 static int i915_semaphore_status(struct seq_file *m, void *unused) 2312 { 2313 struct drm_info_node *node = (struct drm_info_node *) m->private; 2314 struct drm_device *dev = node->minor->dev; 2315 struct drm_i915_private *dev_priv = dev->dev_private; 2316 struct intel_engine_cs *ring; 2317 int num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 2318 int i, j, ret; 2319 2320 if (!i915_semaphore_is_enabled(dev)) { 2321 seq_puts(m, "Semaphores are disabled\n"); 2322 return 0; 2323 } 2324 2325 ret = mutex_lock_interruptible(&dev->struct_mutex); 2326 if (ret) 2327 return ret; 2328 intel_runtime_pm_get(dev_priv); 2329 2330 if (IS_BROADWELL(dev)) { 2331 struct page *page; 2332 uint64_t *seqno; 2333 2334 page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0); 2335 2336 seqno = (uint64_t *)kmap_atomic(page); 2337 for_each_ring(ring, dev_priv, i) { 2338 uint64_t offset; 2339 2340 seq_printf(m, "%s\n", ring->name); 2341 2342 seq_puts(m, " Last signal:"); 2343 for (j = 0; j < num_rings; j++) { 2344 offset = i * I915_NUM_RINGS + j; 2345 seq_printf(m, "0x%08llx (0x%02llx) ", 2346 seqno[offset], offset * 8); 2347 } 2348 seq_putc(m, '\n'); 2349 2350 seq_puts(m, " Last wait: "); 2351 for (j = 0; j < num_rings; j++) { 2352 offset = i + (j * I915_NUM_RINGS); 2353 seq_printf(m, "0x%08llx (0x%02llx) ", 2354 seqno[offset], offset * 8); 2355 } 2356 seq_putc(m, '\n'); 2357 2358 } 2359 kunmap_atomic(seqno); 2360 } else { 2361 seq_puts(m, " Last signal:"); 2362 for_each_ring(ring, dev_priv, i) 2363 for (j = 0; j < num_rings; j++) 2364 seq_printf(m, "0x%08x\n", 2365 I915_READ(ring->semaphore.mbox.signal[j])); 2366 seq_putc(m, '\n'); 2367 } 2368 2369 seq_puts(m, "\nSync seqno:\n"); 2370 for_each_ring(ring, dev_priv, i) { 2371 for (j = 0; j < num_rings; j++) { 2372 seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]); 2373 } 2374 seq_putc(m, '\n'); 2375 } 2376 seq_putc(m, '\n'); 2377 2378 intel_runtime_pm_put(dev_priv); 2379 mutex_unlock(&dev->struct_mutex); 2380 return 0; 2381 } 2382 2383 static int i915_shared_dplls_info(struct seq_file *m, void *unused) 2384 { 2385 struct drm_info_node *node = (struct drm_info_node *) m->private; 2386 struct drm_device *dev = node->minor->dev; 2387 struct drm_i915_private *dev_priv = dev->dev_private; 2388 int i; 2389 2390 drm_modeset_lock_all(dev); 2391 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 2392 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 2393 2394 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); 2395 seq_printf(m, " refcount: %i, active: %i, on: %s\n", pll->refcount, 2396 pll->active, yesno(pll->on)); 2397 seq_printf(m, " tracked hardware state:\n"); 2398 seq_printf(m, " dpll: 0x%08x\n", pll->hw_state.dpll); 2399 seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md); 2400 seq_printf(m, " fp0: 0x%08x\n", pll->hw_state.fp0); 2401 seq_printf(m, " fp1: 0x%08x\n", pll->hw_state.fp1); 2402 seq_printf(m, " wrpll: 0x%08x\n", pll->hw_state.wrpll); 2403 } 2404 drm_modeset_unlock_all(dev); 2405 2406 return 0; 2407 } 2408 2409 struct pipe_crc_info { 2410 const char *name; 2411 struct drm_device *dev; 2412 enum pipe pipe; 2413 }; 2414 2415 static int i915_dp_mst_info(struct seq_file *m, void *unused) 2416 { 2417 struct drm_info_node *node = (struct drm_info_node *) m->private; 2418 struct drm_device *dev = node->minor->dev; 2419 struct drm_encoder *encoder; 2420 struct intel_encoder *intel_encoder; 2421 struct intel_digital_port *intel_dig_port; 2422 drm_modeset_lock_all(dev); 2423 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 2424 intel_encoder = to_intel_encoder(encoder); 2425 if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT) 2426 continue; 2427 intel_dig_port = enc_to_dig_port(encoder); 2428 if (!intel_dig_port->dp.can_mst) 2429 continue; 2430 2431 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); 2432 } 2433 drm_modeset_unlock_all(dev); 2434 return 0; 2435 } 2436 2437 static int i915_pipe_crc_open(struct inode *inode, struct file *filep) 2438 { 2439 struct pipe_crc_info *info = inode->i_private; 2440 struct drm_i915_private *dev_priv = info->dev->dev_private; 2441 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 2442 2443 if (info->pipe >= INTEL_INFO(info->dev)->num_pipes) 2444 return -ENODEV; 2445 2446 spin_lock_irq(&pipe_crc->lock); 2447 2448 if (pipe_crc->opened) { 2449 spin_unlock_irq(&pipe_crc->lock); 2450 return -EBUSY; /* already open */ 2451 } 2452 2453 pipe_crc->opened = true; 2454 filep->private_data = inode->i_private; 2455 2456 spin_unlock_irq(&pipe_crc->lock); 2457 2458 return 0; 2459 } 2460 2461 static int i915_pipe_crc_release(struct inode *inode, struct file *filep) 2462 { 2463 struct pipe_crc_info *info = inode->i_private; 2464 struct drm_i915_private *dev_priv = info->dev->dev_private; 2465 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 2466 2467 spin_lock_irq(&pipe_crc->lock); 2468 pipe_crc->opened = false; 2469 spin_unlock_irq(&pipe_crc->lock); 2470 2471 return 0; 2472 } 2473 2474 /* (6 fields, 8 chars each, space separated (5) + '\n') */ 2475 #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1) 2476 /* account for \'0' */ 2477 #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1) 2478 2479 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) 2480 { 2481 assert_spin_locked(&pipe_crc->lock); 2482 return CIRC_CNT(pipe_crc->head, pipe_crc->tail, 2483 INTEL_PIPE_CRC_ENTRIES_NR); 2484 } 2485 2486 static ssize_t 2487 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, 2488 loff_t *pos) 2489 { 2490 struct pipe_crc_info *info = filep->private_data; 2491 struct drm_device *dev = info->dev; 2492 struct drm_i915_private *dev_priv = dev->dev_private; 2493 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 2494 char buf[PIPE_CRC_BUFFER_LEN]; 2495 int head, tail, n_entries, n; 2496 ssize_t bytes_read; 2497 2498 /* 2499 * Don't allow user space to provide buffers not big enough to hold 2500 * a line of data. 2501 */ 2502 if (count < PIPE_CRC_LINE_LEN) 2503 return -EINVAL; 2504 2505 if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE) 2506 return 0; 2507 2508 /* nothing to read */ 2509 spin_lock_irq(&pipe_crc->lock); 2510 while (pipe_crc_data_count(pipe_crc) == 0) { 2511 int ret; 2512 2513 if (filep->f_flags & O_NONBLOCK) { 2514 spin_unlock_irq(&pipe_crc->lock); 2515 return -EAGAIN; 2516 } 2517 2518 ret = wait_event_interruptible_lock_irq(pipe_crc->wq, 2519 pipe_crc_data_count(pipe_crc), pipe_crc->lock); 2520 if (ret) { 2521 spin_unlock_irq(&pipe_crc->lock); 2522 return ret; 2523 } 2524 } 2525 2526 /* We now have one or more entries to read */ 2527 head = pipe_crc->head; 2528 tail = pipe_crc->tail; 2529 n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR), 2530 count / PIPE_CRC_LINE_LEN); 2531 spin_unlock_irq(&pipe_crc->lock); 2532 2533 bytes_read = 0; 2534 n = 0; 2535 do { 2536 struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail]; 2537 int ret; 2538 2539 bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, 2540 "%8u %8x %8x %8x %8x %8x\n", 2541 entry->frame, entry->crc[0], 2542 entry->crc[1], entry->crc[2], 2543 entry->crc[3], entry->crc[4]); 2544 2545 ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN, 2546 buf, PIPE_CRC_LINE_LEN); 2547 if (ret == PIPE_CRC_LINE_LEN) 2548 return -EFAULT; 2549 2550 BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); 2551 tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 2552 n++; 2553 } while (--n_entries); 2554 2555 spin_lock_irq(&pipe_crc->lock); 2556 pipe_crc->tail = tail; 2557 spin_unlock_irq(&pipe_crc->lock); 2558 2559 return bytes_read; 2560 } 2561 2562 static const struct file_operations i915_pipe_crc_fops = { 2563 .owner = THIS_MODULE, 2564 .open = i915_pipe_crc_open, 2565 .read = i915_pipe_crc_read, 2566 .release = i915_pipe_crc_release, 2567 }; 2568 2569 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = { 2570 { 2571 .name = "i915_pipe_A_crc", 2572 .pipe = PIPE_A, 2573 }, 2574 { 2575 .name = "i915_pipe_B_crc", 2576 .pipe = PIPE_B, 2577 }, 2578 { 2579 .name = "i915_pipe_C_crc", 2580 .pipe = PIPE_C, 2581 }, 2582 }; 2583 2584 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor, 2585 enum pipe pipe) 2586 { 2587 struct drm_device *dev = minor->dev; 2588 struct dentry *ent; 2589 struct pipe_crc_info *info = &i915_pipe_crc_data[pipe]; 2590 2591 info->dev = dev; 2592 ent = debugfs_create_file(info->name, S_IRUGO, root, info, 2593 &i915_pipe_crc_fops); 2594 if (!ent) 2595 return -ENOMEM; 2596 2597 return drm_add_fake_info_node(minor, ent, info); 2598 } 2599 2600 static const char * const pipe_crc_sources[] = { 2601 "none", 2602 "plane1", 2603 "plane2", 2604 "pf", 2605 "pipe", 2606 "TV", 2607 "DP-B", 2608 "DP-C", 2609 "DP-D", 2610 "auto", 2611 }; 2612 2613 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) 2614 { 2615 BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX); 2616 return pipe_crc_sources[source]; 2617 } 2618 2619 static int display_crc_ctl_show(struct seq_file *m, void *data) 2620 { 2621 struct drm_device *dev = m->private; 2622 struct drm_i915_private *dev_priv = dev->dev_private; 2623 int i; 2624 2625 for (i = 0; i < I915_MAX_PIPES; i++) 2626 seq_printf(m, "%c %s\n", pipe_name(i), 2627 pipe_crc_source_name(dev_priv->pipe_crc[i].source)); 2628 2629 return 0; 2630 } 2631 2632 static int display_crc_ctl_open(struct inode *inode, struct file *file) 2633 { 2634 struct drm_device *dev = inode->i_private; 2635 2636 return single_open(file, display_crc_ctl_show, dev); 2637 } 2638 2639 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 2640 uint32_t *val) 2641 { 2642 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 2643 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 2644 2645 switch (*source) { 2646 case INTEL_PIPE_CRC_SOURCE_PIPE: 2647 *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX; 2648 break; 2649 case INTEL_PIPE_CRC_SOURCE_NONE: 2650 *val = 0; 2651 break; 2652 default: 2653 return -EINVAL; 2654 } 2655 2656 return 0; 2657 } 2658 2659 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe, 2660 enum intel_pipe_crc_source *source) 2661 { 2662 struct intel_encoder *encoder; 2663 struct intel_crtc *crtc; 2664 struct intel_digital_port *dig_port; 2665 int ret = 0; 2666 2667 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 2668 2669 drm_modeset_lock_all(dev); 2670 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 2671 base.head) { 2672 if (!encoder->base.crtc) 2673 continue; 2674 2675 crtc = to_intel_crtc(encoder->base.crtc); 2676 2677 if (crtc->pipe != pipe) 2678 continue; 2679 2680 switch (encoder->type) { 2681 case INTEL_OUTPUT_TVOUT: 2682 *source = INTEL_PIPE_CRC_SOURCE_TV; 2683 break; 2684 case INTEL_OUTPUT_DISPLAYPORT: 2685 case INTEL_OUTPUT_EDP: 2686 dig_port = enc_to_dig_port(&encoder->base); 2687 switch (dig_port->port) { 2688 case PORT_B: 2689 *source = INTEL_PIPE_CRC_SOURCE_DP_B; 2690 break; 2691 case PORT_C: 2692 *source = INTEL_PIPE_CRC_SOURCE_DP_C; 2693 break; 2694 case PORT_D: 2695 *source = INTEL_PIPE_CRC_SOURCE_DP_D; 2696 break; 2697 default: 2698 WARN(1, "nonexisting DP port %c\n", 2699 port_name(dig_port->port)); 2700 break; 2701 } 2702 break; 2703 } 2704 } 2705 drm_modeset_unlock_all(dev); 2706 2707 return ret; 2708 } 2709 2710 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev, 2711 enum pipe pipe, 2712 enum intel_pipe_crc_source *source, 2713 uint32_t *val) 2714 { 2715 struct drm_i915_private *dev_priv = dev->dev_private; 2716 bool need_stable_symbols = false; 2717 2718 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 2719 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 2720 if (ret) 2721 return ret; 2722 } 2723 2724 switch (*source) { 2725 case INTEL_PIPE_CRC_SOURCE_PIPE: 2726 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV; 2727 break; 2728 case INTEL_PIPE_CRC_SOURCE_DP_B: 2729 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV; 2730 need_stable_symbols = true; 2731 break; 2732 case INTEL_PIPE_CRC_SOURCE_DP_C: 2733 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV; 2734 need_stable_symbols = true; 2735 break; 2736 case INTEL_PIPE_CRC_SOURCE_NONE: 2737 *val = 0; 2738 break; 2739 default: 2740 return -EINVAL; 2741 } 2742 2743 /* 2744 * When the pipe CRC tap point is after the transcoders we need 2745 * to tweak symbol-level features to produce a deterministic series of 2746 * symbols for a given frame. We need to reset those features only once 2747 * a frame (instead of every nth symbol): 2748 * - DC-balance: used to ensure a better clock recovery from the data 2749 * link (SDVO) 2750 * - DisplayPort scrambling: used for EMI reduction 2751 */ 2752 if (need_stable_symbols) { 2753 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 2754 2755 tmp |= DC_BALANCE_RESET_VLV; 2756 if (pipe == PIPE_A) 2757 tmp |= PIPE_A_SCRAMBLE_RESET; 2758 else 2759 tmp |= PIPE_B_SCRAMBLE_RESET; 2760 2761 I915_WRITE(PORT_DFT2_G4X, tmp); 2762 } 2763 2764 return 0; 2765 } 2766 2767 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev, 2768 enum pipe pipe, 2769 enum intel_pipe_crc_source *source, 2770 uint32_t *val) 2771 { 2772 struct drm_i915_private *dev_priv = dev->dev_private; 2773 bool need_stable_symbols = false; 2774 2775 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 2776 int ret = i9xx_pipe_crc_auto_source(dev, pipe, source); 2777 if (ret) 2778 return ret; 2779 } 2780 2781 switch (*source) { 2782 case INTEL_PIPE_CRC_SOURCE_PIPE: 2783 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX; 2784 break; 2785 case INTEL_PIPE_CRC_SOURCE_TV: 2786 if (!SUPPORTS_TV(dev)) 2787 return -EINVAL; 2788 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE; 2789 break; 2790 case INTEL_PIPE_CRC_SOURCE_DP_B: 2791 if (!IS_G4X(dev)) 2792 return -EINVAL; 2793 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X; 2794 need_stable_symbols = true; 2795 break; 2796 case INTEL_PIPE_CRC_SOURCE_DP_C: 2797 if (!IS_G4X(dev)) 2798 return -EINVAL; 2799 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X; 2800 need_stable_symbols = true; 2801 break; 2802 case INTEL_PIPE_CRC_SOURCE_DP_D: 2803 if (!IS_G4X(dev)) 2804 return -EINVAL; 2805 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X; 2806 need_stable_symbols = true; 2807 break; 2808 case INTEL_PIPE_CRC_SOURCE_NONE: 2809 *val = 0; 2810 break; 2811 default: 2812 return -EINVAL; 2813 } 2814 2815 /* 2816 * When the pipe CRC tap point is after the transcoders we need 2817 * to tweak symbol-level features to produce a deterministic series of 2818 * symbols for a given frame. We need to reset those features only once 2819 * a frame (instead of every nth symbol): 2820 * - DC-balance: used to ensure a better clock recovery from the data 2821 * link (SDVO) 2822 * - DisplayPort scrambling: used for EMI reduction 2823 */ 2824 if (need_stable_symbols) { 2825 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 2826 2827 WARN_ON(!IS_G4X(dev)); 2828 2829 I915_WRITE(PORT_DFT_I9XX, 2830 I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET); 2831 2832 if (pipe == PIPE_A) 2833 tmp |= PIPE_A_SCRAMBLE_RESET; 2834 else 2835 tmp |= PIPE_B_SCRAMBLE_RESET; 2836 2837 I915_WRITE(PORT_DFT2_G4X, tmp); 2838 } 2839 2840 return 0; 2841 } 2842 2843 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev, 2844 enum pipe pipe) 2845 { 2846 struct drm_i915_private *dev_priv = dev->dev_private; 2847 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 2848 2849 if (pipe == PIPE_A) 2850 tmp &= ~PIPE_A_SCRAMBLE_RESET; 2851 else 2852 tmp &= ~PIPE_B_SCRAMBLE_RESET; 2853 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) 2854 tmp &= ~DC_BALANCE_RESET_VLV; 2855 I915_WRITE(PORT_DFT2_G4X, tmp); 2856 2857 } 2858 2859 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev, 2860 enum pipe pipe) 2861 { 2862 struct drm_i915_private *dev_priv = dev->dev_private; 2863 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 2864 2865 if (pipe == PIPE_A) 2866 tmp &= ~PIPE_A_SCRAMBLE_RESET; 2867 else 2868 tmp &= ~PIPE_B_SCRAMBLE_RESET; 2869 I915_WRITE(PORT_DFT2_G4X, tmp); 2870 2871 if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) { 2872 I915_WRITE(PORT_DFT_I9XX, 2873 I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET); 2874 } 2875 } 2876 2877 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, 2878 uint32_t *val) 2879 { 2880 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 2881 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 2882 2883 switch (*source) { 2884 case INTEL_PIPE_CRC_SOURCE_PLANE1: 2885 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK; 2886 break; 2887 case INTEL_PIPE_CRC_SOURCE_PLANE2: 2888 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK; 2889 break; 2890 case INTEL_PIPE_CRC_SOURCE_PIPE: 2891 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK; 2892 break; 2893 case INTEL_PIPE_CRC_SOURCE_NONE: 2894 *val = 0; 2895 break; 2896 default: 2897 return -EINVAL; 2898 } 2899 2900 return 0; 2901 } 2902 2903 static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev) 2904 { 2905 struct drm_i915_private *dev_priv = dev->dev_private; 2906 struct intel_crtc *crtc = 2907 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); 2908 2909 drm_modeset_lock_all(dev); 2910 /* 2911 * If we use the eDP transcoder we need to make sure that we don't 2912 * bypass the pfit, since otherwise the pipe CRC source won't work. Only 2913 * relevant on hsw with pipe A when using the always-on power well 2914 * routing. 2915 */ 2916 if (crtc->config.cpu_transcoder == TRANSCODER_EDP && 2917 !crtc->config.pch_pfit.enabled) { 2918 crtc->config.pch_pfit.force_thru = true; 2919 2920 intel_display_power_get(dev_priv, 2921 POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A)); 2922 2923 dev_priv->display.crtc_disable(&crtc->base); 2924 dev_priv->display.crtc_enable(&crtc->base); 2925 } 2926 drm_modeset_unlock_all(dev); 2927 } 2928 2929 static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev) 2930 { 2931 struct drm_i915_private *dev_priv = dev->dev_private; 2932 struct intel_crtc *crtc = 2933 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); 2934 2935 drm_modeset_lock_all(dev); 2936 /* 2937 * If we use the eDP transcoder we need to make sure that we don't 2938 * bypass the pfit, since otherwise the pipe CRC source won't work. Only 2939 * relevant on hsw with pipe A when using the always-on power well 2940 * routing. 2941 */ 2942 if (crtc->config.pch_pfit.force_thru) { 2943 crtc->config.pch_pfit.force_thru = false; 2944 2945 dev_priv->display.crtc_disable(&crtc->base); 2946 dev_priv->display.crtc_enable(&crtc->base); 2947 2948 intel_display_power_put(dev_priv, 2949 POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A)); 2950 } 2951 drm_modeset_unlock_all(dev); 2952 } 2953 2954 static int ivb_pipe_crc_ctl_reg(struct drm_device *dev, 2955 enum pipe pipe, 2956 enum intel_pipe_crc_source *source, 2957 uint32_t *val) 2958 { 2959 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) 2960 *source = INTEL_PIPE_CRC_SOURCE_PF; 2961 2962 switch (*source) { 2963 case INTEL_PIPE_CRC_SOURCE_PLANE1: 2964 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB; 2965 break; 2966 case INTEL_PIPE_CRC_SOURCE_PLANE2: 2967 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; 2968 break; 2969 case INTEL_PIPE_CRC_SOURCE_PF: 2970 if (IS_HASWELL(dev) && pipe == PIPE_A) 2971 hsw_trans_edp_pipe_A_crc_wa(dev); 2972 2973 *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; 2974 break; 2975 case INTEL_PIPE_CRC_SOURCE_NONE: 2976 *val = 0; 2977 break; 2978 default: 2979 return -EINVAL; 2980 } 2981 2982 return 0; 2983 } 2984 2985 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, 2986 enum intel_pipe_crc_source source) 2987 { 2988 struct drm_i915_private *dev_priv = dev->dev_private; 2989 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 2990 u32 val = 0; /* shut up gcc */ 2991 int ret; 2992 2993 if (pipe_crc->source == source) 2994 return 0; 2995 2996 /* forbid changing the source without going back to 'none' */ 2997 if (pipe_crc->source && source) 2998 return -EINVAL; 2999 3000 if (IS_GEN2(dev)) 3001 ret = i8xx_pipe_crc_ctl_reg(&source, &val); 3002 else if (INTEL_INFO(dev)->gen < 5) 3003 ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val); 3004 else if (IS_VALLEYVIEW(dev)) 3005 ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val); 3006 else if (IS_GEN5(dev) || IS_GEN6(dev)) 3007 ret = ilk_pipe_crc_ctl_reg(&source, &val); 3008 else 3009 ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val); 3010 3011 if (ret != 0) 3012 return ret; 3013 3014 /* none -> real source transition */ 3015 if (source) { 3016 DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", 3017 pipe_name(pipe), pipe_crc_source_name(source)); 3018 3019 pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) * 3020 INTEL_PIPE_CRC_ENTRIES_NR, 3021 GFP_KERNEL); 3022 if (!pipe_crc->entries) 3023 return -ENOMEM; 3024 3025 spin_lock_irq(&pipe_crc->lock); 3026 pipe_crc->head = 0; 3027 pipe_crc->tail = 0; 3028 spin_unlock_irq(&pipe_crc->lock); 3029 } 3030 3031 pipe_crc->source = source; 3032 3033 I915_WRITE(PIPE_CRC_CTL(pipe), val); 3034 POSTING_READ(PIPE_CRC_CTL(pipe)); 3035 3036 /* real source -> none transition */ 3037 if (source == INTEL_PIPE_CRC_SOURCE_NONE) { 3038 struct intel_pipe_crc_entry *entries; 3039 struct intel_crtc *crtc = 3040 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 3041 3042 DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", 3043 pipe_name(pipe)); 3044 3045 drm_modeset_lock(&crtc->base.mutex, NULL); 3046 if (crtc->active) 3047 intel_wait_for_vblank(dev, pipe); 3048 drm_modeset_unlock(&crtc->base.mutex); 3049 3050 spin_lock_irq(&pipe_crc->lock); 3051 entries = pipe_crc->entries; 3052 pipe_crc->entries = NULL; 3053 spin_unlock_irq(&pipe_crc->lock); 3054 3055 kfree(entries); 3056 3057 if (IS_G4X(dev)) 3058 g4x_undo_pipe_scramble_reset(dev, pipe); 3059 else if (IS_VALLEYVIEW(dev)) 3060 vlv_undo_pipe_scramble_reset(dev, pipe); 3061 else if (IS_HASWELL(dev) && pipe == PIPE_A) 3062 hsw_undo_trans_edp_pipe_A_crc_wa(dev); 3063 } 3064 3065 return 0; 3066 } 3067 3068 /* 3069 * Parse pipe CRC command strings: 3070 * command: wsp* object wsp+ name wsp+ source wsp* 3071 * object: 'pipe' 3072 * name: (A | B | C) 3073 * source: (none | plane1 | plane2 | pf) 3074 * wsp: (#0x20 | #0x9 | #0xA)+ 3075 * 3076 * eg.: 3077 * "pipe A plane1" -> Start CRC computations on plane1 of pipe A 3078 * "pipe A none" -> Stop CRC 3079 */ 3080 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words) 3081 { 3082 int n_words = 0; 3083 3084 while (*buf) { 3085 char *end; 3086 3087 /* skip leading white space */ 3088 buf = skip_spaces(buf); 3089 if (!*buf) 3090 break; /* end of buffer */ 3091 3092 /* find end of word */ 3093 for (end = buf; *end && !isspace(*end); end++) 3094 ; 3095 3096 if (n_words == max_words) { 3097 DRM_DEBUG_DRIVER("too many words, allowed <= %d\n", 3098 max_words); 3099 return -EINVAL; /* ran out of words[] before bytes */ 3100 } 3101 3102 if (*end) 3103 *end++ = '\0'; 3104 words[n_words++] = buf; 3105 buf = end; 3106 } 3107 3108 return n_words; 3109 } 3110 3111 enum intel_pipe_crc_object { 3112 PIPE_CRC_OBJECT_PIPE, 3113 }; 3114 3115 static const char * const pipe_crc_objects[] = { 3116 "pipe", 3117 }; 3118 3119 static int 3120 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o) 3121 { 3122 int i; 3123 3124 for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++) 3125 if (!strcmp(buf, pipe_crc_objects[i])) { 3126 *o = i; 3127 return 0; 3128 } 3129 3130 return -EINVAL; 3131 } 3132 3133 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe) 3134 { 3135 const char name = buf[0]; 3136 3137 if (name < 'A' || name >= pipe_name(I915_MAX_PIPES)) 3138 return -EINVAL; 3139 3140 *pipe = name - 'A'; 3141 3142 return 0; 3143 } 3144 3145 static int 3146 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) 3147 { 3148 int i; 3149 3150 for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++) 3151 if (!strcmp(buf, pipe_crc_sources[i])) { 3152 *s = i; 3153 return 0; 3154 } 3155 3156 return -EINVAL; 3157 } 3158 3159 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len) 3160 { 3161 #define N_WORDS 3 3162 int n_words; 3163 char *words[N_WORDS]; 3164 enum pipe pipe; 3165 enum intel_pipe_crc_object object; 3166 enum intel_pipe_crc_source source; 3167 3168 n_words = display_crc_ctl_tokenize(buf, words, N_WORDS); 3169 if (n_words != N_WORDS) { 3170 DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n", 3171 N_WORDS); 3172 return -EINVAL; 3173 } 3174 3175 if (display_crc_ctl_parse_object(words[0], &object) < 0) { 3176 DRM_DEBUG_DRIVER("unknown object %s\n", words[0]); 3177 return -EINVAL; 3178 } 3179 3180 if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) { 3181 DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]); 3182 return -EINVAL; 3183 } 3184 3185 if (display_crc_ctl_parse_source(words[2], &source) < 0) { 3186 DRM_DEBUG_DRIVER("unknown source %s\n", words[2]); 3187 return -EINVAL; 3188 } 3189 3190 return pipe_crc_set_source(dev, pipe, source); 3191 } 3192 3193 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf, 3194 size_t len, loff_t *offp) 3195 { 3196 struct seq_file *m = file->private_data; 3197 struct drm_device *dev = m->private; 3198 char *tmpbuf; 3199 int ret; 3200 3201 if (len == 0) 3202 return 0; 3203 3204 if (len > PAGE_SIZE - 1) { 3205 DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n", 3206 PAGE_SIZE); 3207 return -E2BIG; 3208 } 3209 3210 tmpbuf = kmalloc(len + 1, GFP_KERNEL); 3211 if (!tmpbuf) 3212 return -ENOMEM; 3213 3214 if (copy_from_user(tmpbuf, ubuf, len)) { 3215 ret = -EFAULT; 3216 goto out; 3217 } 3218 tmpbuf[len] = '\0'; 3219 3220 ret = display_crc_ctl_parse(dev, tmpbuf, len); 3221 3222 out: 3223 kfree(tmpbuf); 3224 if (ret < 0) 3225 return ret; 3226 3227 *offp += len; 3228 return len; 3229 } 3230 3231 static const struct file_operations i915_display_crc_ctl_fops = { 3232 .owner = THIS_MODULE, 3233 .open = display_crc_ctl_open, 3234 .read = seq_read, 3235 .llseek = seq_lseek, 3236 .release = single_release, 3237 .write = display_crc_ctl_write 3238 }; 3239 3240 static void wm_latency_show(struct seq_file *m, const uint16_t wm[5]) 3241 { 3242 struct drm_device *dev = m->private; 3243 int num_levels = ilk_wm_max_level(dev) + 1; 3244 int level; 3245 3246 drm_modeset_lock_all(dev); 3247 3248 for (level = 0; level < num_levels; level++) { 3249 unsigned int latency = wm[level]; 3250 3251 /* WM1+ latency values in 0.5us units */ 3252 if (level > 0) 3253 latency *= 5; 3254 3255 seq_printf(m, "WM%d %u (%u.%u usec)\n", 3256 level, wm[level], 3257 latency / 10, latency % 10); 3258 } 3259 3260 drm_modeset_unlock_all(dev); 3261 } 3262 3263 static int pri_wm_latency_show(struct seq_file *m, void *data) 3264 { 3265 struct drm_device *dev = m->private; 3266 3267 wm_latency_show(m, to_i915(dev)->wm.pri_latency); 3268 3269 return 0; 3270 } 3271 3272 static int spr_wm_latency_show(struct seq_file *m, void *data) 3273 { 3274 struct drm_device *dev = m->private; 3275 3276 wm_latency_show(m, to_i915(dev)->wm.spr_latency); 3277 3278 return 0; 3279 } 3280 3281 static int cur_wm_latency_show(struct seq_file *m, void *data) 3282 { 3283 struct drm_device *dev = m->private; 3284 3285 wm_latency_show(m, to_i915(dev)->wm.cur_latency); 3286 3287 return 0; 3288 } 3289 3290 static int pri_wm_latency_open(struct inode *inode, struct file *file) 3291 { 3292 struct drm_device *dev = inode->i_private; 3293 3294 if (HAS_GMCH_DISPLAY(dev)) 3295 return -ENODEV; 3296 3297 return single_open(file, pri_wm_latency_show, dev); 3298 } 3299 3300 static int spr_wm_latency_open(struct inode *inode, struct file *file) 3301 { 3302 struct drm_device *dev = inode->i_private; 3303 3304 if (HAS_GMCH_DISPLAY(dev)) 3305 return -ENODEV; 3306 3307 return single_open(file, spr_wm_latency_show, dev); 3308 } 3309 3310 static int cur_wm_latency_open(struct inode *inode, struct file *file) 3311 { 3312 struct drm_device *dev = inode->i_private; 3313 3314 if (HAS_GMCH_DISPLAY(dev)) 3315 return -ENODEV; 3316 3317 return single_open(file, cur_wm_latency_show, dev); 3318 } 3319 3320 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 3321 size_t len, loff_t *offp, uint16_t wm[5]) 3322 { 3323 struct seq_file *m = file->private_data; 3324 struct drm_device *dev = m->private; 3325 uint16_t new[5] = { 0 }; 3326 int num_levels = ilk_wm_max_level(dev) + 1; 3327 int level; 3328 int ret; 3329 char tmp[32]; 3330 3331 if (len >= sizeof(tmp)) 3332 return -EINVAL; 3333 3334 if (copy_from_user(tmp, ubuf, len)) 3335 return -EFAULT; 3336 3337 tmp[len] = '\0'; 3338 3339 ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]); 3340 if (ret != num_levels) 3341 return -EINVAL; 3342 3343 drm_modeset_lock_all(dev); 3344 3345 for (level = 0; level < num_levels; level++) 3346 wm[level] = new[level]; 3347 3348 drm_modeset_unlock_all(dev); 3349 3350 return len; 3351 } 3352 3353 3354 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 3355 size_t len, loff_t *offp) 3356 { 3357 struct seq_file *m = file->private_data; 3358 struct drm_device *dev = m->private; 3359 3360 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency); 3361 } 3362 3363 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 3364 size_t len, loff_t *offp) 3365 { 3366 struct seq_file *m = file->private_data; 3367 struct drm_device *dev = m->private; 3368 3369 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency); 3370 } 3371 3372 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 3373 size_t len, loff_t *offp) 3374 { 3375 struct seq_file *m = file->private_data; 3376 struct drm_device *dev = m->private; 3377 3378 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency); 3379 } 3380 3381 static const struct file_operations i915_pri_wm_latency_fops = { 3382 .owner = THIS_MODULE, 3383 .open = pri_wm_latency_open, 3384 .read = seq_read, 3385 .llseek = seq_lseek, 3386 .release = single_release, 3387 .write = pri_wm_latency_write 3388 }; 3389 3390 static const struct file_operations i915_spr_wm_latency_fops = { 3391 .owner = THIS_MODULE, 3392 .open = spr_wm_latency_open, 3393 .read = seq_read, 3394 .llseek = seq_lseek, 3395 .release = single_release, 3396 .write = spr_wm_latency_write 3397 }; 3398 3399 static const struct file_operations i915_cur_wm_latency_fops = { 3400 .owner = THIS_MODULE, 3401 .open = cur_wm_latency_open, 3402 .read = seq_read, 3403 .llseek = seq_lseek, 3404 .release = single_release, 3405 .write = cur_wm_latency_write 3406 }; 3407 3408 static int 3409 i915_wedged_get(void *data, u64 *val) 3410 { 3411 struct drm_device *dev = data; 3412 struct drm_i915_private *dev_priv = dev->dev_private; 3413 3414 *val = atomic_read(&dev_priv->gpu_error.reset_counter); 3415 3416 return 0; 3417 } 3418 3419 static int 3420 i915_wedged_set(void *data, u64 val) 3421 { 3422 struct drm_device *dev = data; 3423 struct drm_i915_private *dev_priv = dev->dev_private; 3424 3425 intel_runtime_pm_get(dev_priv); 3426 3427 i915_handle_error(dev, val, 3428 "Manually setting wedged to %llu", val); 3429 3430 intel_runtime_pm_put(dev_priv); 3431 3432 return 0; 3433 } 3434 3435 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 3436 i915_wedged_get, i915_wedged_set, 3437 "%llu\n"); 3438 3439 static int 3440 i915_ring_stop_get(void *data, u64 *val) 3441 { 3442 struct drm_device *dev = data; 3443 struct drm_i915_private *dev_priv = dev->dev_private; 3444 3445 *val = dev_priv->gpu_error.stop_rings; 3446 3447 return 0; 3448 } 3449 3450 static int 3451 i915_ring_stop_set(void *data, u64 val) 3452 { 3453 struct drm_device *dev = data; 3454 struct drm_i915_private *dev_priv = dev->dev_private; 3455 int ret; 3456 3457 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val); 3458 3459 ret = mutex_lock_interruptible(&dev->struct_mutex); 3460 if (ret) 3461 return ret; 3462 3463 dev_priv->gpu_error.stop_rings = val; 3464 mutex_unlock(&dev->struct_mutex); 3465 3466 return 0; 3467 } 3468 3469 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops, 3470 i915_ring_stop_get, i915_ring_stop_set, 3471 "0x%08llx\n"); 3472 3473 static int 3474 i915_ring_missed_irq_get(void *data, u64 *val) 3475 { 3476 struct drm_device *dev = data; 3477 struct drm_i915_private *dev_priv = dev->dev_private; 3478 3479 *val = dev_priv->gpu_error.missed_irq_rings; 3480 return 0; 3481 } 3482 3483 static int 3484 i915_ring_missed_irq_set(void *data, u64 val) 3485 { 3486 struct drm_device *dev = data; 3487 struct drm_i915_private *dev_priv = dev->dev_private; 3488 int ret; 3489 3490 /* Lock against concurrent debugfs callers */ 3491 ret = mutex_lock_interruptible(&dev->struct_mutex); 3492 if (ret) 3493 return ret; 3494 dev_priv->gpu_error.missed_irq_rings = val; 3495 mutex_unlock(&dev->struct_mutex); 3496 3497 return 0; 3498 } 3499 3500 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops, 3501 i915_ring_missed_irq_get, i915_ring_missed_irq_set, 3502 "0x%08llx\n"); 3503 3504 static int 3505 i915_ring_test_irq_get(void *data, u64 *val) 3506 { 3507 struct drm_device *dev = data; 3508 struct drm_i915_private *dev_priv = dev->dev_private; 3509 3510 *val = dev_priv->gpu_error.test_irq_rings; 3511 3512 return 0; 3513 } 3514 3515 static int 3516 i915_ring_test_irq_set(void *data, u64 val) 3517 { 3518 struct drm_device *dev = data; 3519 struct drm_i915_private *dev_priv = dev->dev_private; 3520 int ret; 3521 3522 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); 3523 3524 /* Lock against concurrent debugfs callers */ 3525 ret = mutex_lock_interruptible(&dev->struct_mutex); 3526 if (ret) 3527 return ret; 3528 3529 dev_priv->gpu_error.test_irq_rings = val; 3530 mutex_unlock(&dev->struct_mutex); 3531 3532 return 0; 3533 } 3534 3535 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops, 3536 i915_ring_test_irq_get, i915_ring_test_irq_set, 3537 "0x%08llx\n"); 3538 3539 #define DROP_UNBOUND 0x1 3540 #define DROP_BOUND 0x2 3541 #define DROP_RETIRE 0x4 3542 #define DROP_ACTIVE 0x8 3543 #define DROP_ALL (DROP_UNBOUND | \ 3544 DROP_BOUND | \ 3545 DROP_RETIRE | \ 3546 DROP_ACTIVE) 3547 static int 3548 i915_drop_caches_get(void *data, u64 *val) 3549 { 3550 *val = DROP_ALL; 3551 3552 return 0; 3553 } 3554 3555 static int 3556 i915_drop_caches_set(void *data, u64 val) 3557 { 3558 struct drm_device *dev = data; 3559 struct drm_i915_private *dev_priv = dev->dev_private; 3560 struct drm_i915_gem_object *obj, *next; 3561 struct i915_address_space *vm; 3562 struct i915_vma *vma, *x; 3563 int ret; 3564 3565 DRM_DEBUG("Dropping caches: 0x%08llx\n", val); 3566 3567 /* No need to check and wait for gpu resets, only libdrm auto-restarts 3568 * on ioctls on -EAGAIN. */ 3569 ret = mutex_lock_interruptible(&dev->struct_mutex); 3570 if (ret) 3571 return ret; 3572 3573 if (val & DROP_ACTIVE) { 3574 ret = i915_gpu_idle(dev); 3575 if (ret) 3576 goto unlock; 3577 } 3578 3579 if (val & (DROP_RETIRE | DROP_ACTIVE)) 3580 i915_gem_retire_requests(dev); 3581 3582 if (val & DROP_BOUND) { 3583 list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 3584 list_for_each_entry_safe(vma, x, &vm->inactive_list, 3585 mm_list) { 3586 if (vma->pin_count) 3587 continue; 3588 3589 ret = i915_vma_unbind(vma); 3590 if (ret) 3591 goto unlock; 3592 } 3593 } 3594 } 3595 3596 if (val & DROP_UNBOUND) { 3597 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, 3598 global_list) 3599 if (obj->pages_pin_count == 0) { 3600 ret = i915_gem_object_put_pages(obj); 3601 if (ret) 3602 goto unlock; 3603 } 3604 } 3605 3606 unlock: 3607 mutex_unlock(&dev->struct_mutex); 3608 3609 return ret; 3610 } 3611 3612 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 3613 i915_drop_caches_get, i915_drop_caches_set, 3614 "0x%08llx\n"); 3615 3616 static int 3617 i915_max_freq_get(void *data, u64 *val) 3618 { 3619 struct drm_device *dev = data; 3620 struct drm_i915_private *dev_priv = dev->dev_private; 3621 int ret; 3622 3623 if (INTEL_INFO(dev)->gen < 6) 3624 return -ENODEV; 3625 3626 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 3627 3628 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 3629 if (ret) 3630 return ret; 3631 3632 if (IS_VALLEYVIEW(dev)) 3633 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); 3634 else 3635 *val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER; 3636 mutex_unlock(&dev_priv->rps.hw_lock); 3637 3638 return 0; 3639 } 3640 3641 static int 3642 i915_max_freq_set(void *data, u64 val) 3643 { 3644 struct drm_device *dev = data; 3645 struct drm_i915_private *dev_priv = dev->dev_private; 3646 u32 rp_state_cap, hw_max, hw_min; 3647 int ret; 3648 3649 if (INTEL_INFO(dev)->gen < 6) 3650 return -ENODEV; 3651 3652 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 3653 3654 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 3655 3656 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 3657 if (ret) 3658 return ret; 3659 3660 /* 3661 * Turbo will still be enabled, but won't go above the set value. 3662 */ 3663 if (IS_VALLEYVIEW(dev)) { 3664 val = vlv_freq_opcode(dev_priv, val); 3665 3666 hw_max = dev_priv->rps.max_freq; 3667 hw_min = dev_priv->rps.min_freq; 3668 } else { 3669 do_div(val, GT_FREQUENCY_MULTIPLIER); 3670 3671 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 3672 hw_max = dev_priv->rps.max_freq; 3673 hw_min = (rp_state_cap >> 16) & 0xff; 3674 } 3675 3676 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { 3677 mutex_unlock(&dev_priv->rps.hw_lock); 3678 return -EINVAL; 3679 } 3680 3681 dev_priv->rps.max_freq_softlimit = val; 3682 3683 if (IS_VALLEYVIEW(dev)) 3684 valleyview_set_rps(dev, val); 3685 else 3686 gen6_set_rps(dev, val); 3687 3688 mutex_unlock(&dev_priv->rps.hw_lock); 3689 3690 return 0; 3691 } 3692 3693 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, 3694 i915_max_freq_get, i915_max_freq_set, 3695 "%llu\n"); 3696 3697 static int 3698 i915_min_freq_get(void *data, u64 *val) 3699 { 3700 struct drm_device *dev = data; 3701 struct drm_i915_private *dev_priv = dev->dev_private; 3702 int ret; 3703 3704 if (INTEL_INFO(dev)->gen < 6) 3705 return -ENODEV; 3706 3707 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 3708 3709 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 3710 if (ret) 3711 return ret; 3712 3713 if (IS_VALLEYVIEW(dev)) 3714 *val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); 3715 else 3716 *val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER; 3717 mutex_unlock(&dev_priv->rps.hw_lock); 3718 3719 return 0; 3720 } 3721 3722 static int 3723 i915_min_freq_set(void *data, u64 val) 3724 { 3725 struct drm_device *dev = data; 3726 struct drm_i915_private *dev_priv = dev->dev_private; 3727 u32 rp_state_cap, hw_max, hw_min; 3728 int ret; 3729 3730 if (INTEL_INFO(dev)->gen < 6) 3731 return -ENODEV; 3732 3733 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 3734 3735 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 3736 3737 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 3738 if (ret) 3739 return ret; 3740 3741 /* 3742 * Turbo will still be enabled, but won't go below the set value. 3743 */ 3744 if (IS_VALLEYVIEW(dev)) { 3745 val = vlv_freq_opcode(dev_priv, val); 3746 3747 hw_max = dev_priv->rps.max_freq; 3748 hw_min = dev_priv->rps.min_freq; 3749 } else { 3750 do_div(val, GT_FREQUENCY_MULTIPLIER); 3751 3752 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 3753 hw_max = dev_priv->rps.max_freq; 3754 hw_min = (rp_state_cap >> 16) & 0xff; 3755 } 3756 3757 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) { 3758 mutex_unlock(&dev_priv->rps.hw_lock); 3759 return -EINVAL; 3760 } 3761 3762 dev_priv->rps.min_freq_softlimit = val; 3763 3764 if (IS_VALLEYVIEW(dev)) 3765 valleyview_set_rps(dev, val); 3766 else 3767 gen6_set_rps(dev, val); 3768 3769 mutex_unlock(&dev_priv->rps.hw_lock); 3770 3771 return 0; 3772 } 3773 3774 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, 3775 i915_min_freq_get, i915_min_freq_set, 3776 "%llu\n"); 3777 3778 static int 3779 i915_cache_sharing_get(void *data, u64 *val) 3780 { 3781 struct drm_device *dev = data; 3782 struct drm_i915_private *dev_priv = dev->dev_private; 3783 u32 snpcr; 3784 int ret; 3785 3786 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3787 return -ENODEV; 3788 3789 ret = mutex_lock_interruptible(&dev->struct_mutex); 3790 if (ret) 3791 return ret; 3792 intel_runtime_pm_get(dev_priv); 3793 3794 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 3795 3796 intel_runtime_pm_put(dev_priv); 3797 mutex_unlock(&dev_priv->dev->struct_mutex); 3798 3799 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 3800 3801 return 0; 3802 } 3803 3804 static int 3805 i915_cache_sharing_set(void *data, u64 val) 3806 { 3807 struct drm_device *dev = data; 3808 struct drm_i915_private *dev_priv = dev->dev_private; 3809 u32 snpcr; 3810 3811 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3812 return -ENODEV; 3813 3814 if (val > 3) 3815 return -EINVAL; 3816 3817 intel_runtime_pm_get(dev_priv); 3818 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); 3819 3820 /* Update the cache sharing policy here as well */ 3821 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 3822 snpcr &= ~GEN6_MBC_SNPCR_MASK; 3823 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 3824 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 3825 3826 intel_runtime_pm_put(dev_priv); 3827 return 0; 3828 } 3829 3830 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 3831 i915_cache_sharing_get, i915_cache_sharing_set, 3832 "%llu\n"); 3833 3834 static int i915_forcewake_open(struct inode *inode, struct file *file) 3835 { 3836 struct drm_device *dev = inode->i_private; 3837 struct drm_i915_private *dev_priv = dev->dev_private; 3838 3839 if (INTEL_INFO(dev)->gen < 6) 3840 return 0; 3841 3842 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 3843 3844 return 0; 3845 } 3846 3847 static int i915_forcewake_release(struct inode *inode, struct file *file) 3848 { 3849 struct drm_device *dev = inode->i_private; 3850 struct drm_i915_private *dev_priv = dev->dev_private; 3851 3852 if (INTEL_INFO(dev)->gen < 6) 3853 return 0; 3854 3855 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 3856 3857 return 0; 3858 } 3859 3860 static const struct file_operations i915_forcewake_fops = { 3861 .owner = THIS_MODULE, 3862 .open = i915_forcewake_open, 3863 .release = i915_forcewake_release, 3864 }; 3865 3866 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) 3867 { 3868 struct drm_device *dev = minor->dev; 3869 struct dentry *ent; 3870 3871 ent = debugfs_create_file("i915_forcewake_user", 3872 S_IRUSR, 3873 root, dev, 3874 &i915_forcewake_fops); 3875 if (!ent) 3876 return -ENOMEM; 3877 3878 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 3879 } 3880 3881 static int i915_debugfs_create(struct dentry *root, 3882 struct drm_minor *minor, 3883 const char *name, 3884 const struct file_operations *fops) 3885 { 3886 struct drm_device *dev = minor->dev; 3887 struct dentry *ent; 3888 3889 ent = debugfs_create_file(name, 3890 S_IRUGO | S_IWUSR, 3891 root, dev, 3892 fops); 3893 if (!ent) 3894 return -ENOMEM; 3895 3896 return drm_add_fake_info_node(minor, ent, fops); 3897 } 3898 3899 static const struct drm_info_list i915_debugfs_list[] = { 3900 {"i915_capabilities", i915_capabilities, 0}, 3901 {"i915_gem_objects", i915_gem_object_info, 0}, 3902 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 3903 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, 3904 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 3905 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 3906 {"i915_gem_stolen", i915_gem_stolen_list_info }, 3907 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 3908 {"i915_gem_request", i915_gem_request_info, 0}, 3909 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 3910 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 3911 {"i915_gem_interrupt", i915_interrupt_info, 0}, 3912 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 3913 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 3914 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 3915 {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, 3916 {"i915_frequency_info", i915_frequency_info, 0}, 3917 {"i915_drpc_info", i915_drpc_info, 0}, 3918 {"i915_emon_status", i915_emon_status, 0}, 3919 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 3920 {"i915_fbc_status", i915_fbc_status, 0}, 3921 {"i915_ips_status", i915_ips_status, 0}, 3922 {"i915_sr_status", i915_sr_status, 0}, 3923 {"i915_opregion", i915_opregion, 0}, 3924 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 3925 {"i915_context_status", i915_context_status, 0}, 3926 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 3927 {"i915_swizzle_info", i915_swizzle_info, 0}, 3928 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 3929 {"i915_llc", i915_llc, 0}, 3930 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 3931 {"i915_sink_crc_eDP1", i915_sink_crc, 0}, 3932 {"i915_energy_uJ", i915_energy_uJ, 0}, 3933 {"i915_pc8_status", i915_pc8_status, 0}, 3934 {"i915_power_domain_info", i915_power_domain_info, 0}, 3935 {"i915_display_info", i915_display_info, 0}, 3936 {"i915_semaphore_status", i915_semaphore_status, 0}, 3937 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 3938 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 3939 }; 3940 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 3941 3942 static const struct i915_debugfs_files { 3943 const char *name; 3944 const struct file_operations *fops; 3945 } i915_debugfs_files[] = { 3946 {"i915_wedged", &i915_wedged_fops}, 3947 {"i915_max_freq", &i915_max_freq_fops}, 3948 {"i915_min_freq", &i915_min_freq_fops}, 3949 {"i915_cache_sharing", &i915_cache_sharing_fops}, 3950 {"i915_ring_stop", &i915_ring_stop_fops}, 3951 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, 3952 {"i915_ring_test_irq", &i915_ring_test_irq_fops}, 3953 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 3954 {"i915_error_state", &i915_error_state_fops}, 3955 {"i915_next_seqno", &i915_next_seqno_fops}, 3956 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, 3957 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 3958 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 3959 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 3960 }; 3961 3962 void intel_display_crc_init(struct drm_device *dev) 3963 { 3964 struct drm_i915_private *dev_priv = dev->dev_private; 3965 enum pipe pipe; 3966 3967 for_each_pipe(pipe) { 3968 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 3969 3970 pipe_crc->opened = false; 3971 spin_lock_init(&pipe_crc->lock); 3972 init_waitqueue_head(&pipe_crc->wq); 3973 } 3974 } 3975 3976 int i915_debugfs_init(struct drm_minor *minor) 3977 { 3978 int ret, i; 3979 3980 ret = i915_forcewake_create(minor->debugfs_root, minor); 3981 if (ret) 3982 return ret; 3983 3984 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 3985 ret = i915_pipe_crc_create(minor->debugfs_root, minor, i); 3986 if (ret) 3987 return ret; 3988 } 3989 3990 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 3991 ret = i915_debugfs_create(minor->debugfs_root, minor, 3992 i915_debugfs_files[i].name, 3993 i915_debugfs_files[i].fops); 3994 if (ret) 3995 return ret; 3996 } 3997 3998 return drm_debugfs_create_files(i915_debugfs_list, 3999 I915_DEBUGFS_ENTRIES, 4000 minor->debugfs_root, minor); 4001 } 4002 4003 void i915_debugfs_cleanup(struct drm_minor *minor) 4004 { 4005 int i; 4006 4007 drm_debugfs_remove_files(i915_debugfs_list, 4008 I915_DEBUGFS_ENTRIES, minor); 4009 4010 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 4011 1, minor); 4012 4013 for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { 4014 struct drm_info_list *info_list = 4015 (struct drm_info_list *)&i915_pipe_crc_data[i]; 4016 4017 drm_debugfs_remove_files(info_list, 1, minor); 4018 } 4019 4020 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 4021 struct drm_info_list *info_list = 4022 (struct drm_info_list *) i915_debugfs_files[i].fops; 4023 4024 drm_debugfs_remove_files(info_list, 1, minor); 4025 } 4026 } 4027