1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/seq_file.h> 30 #include <linux/debugfs.h> 31 #include <linux/slab.h> 32 #include <linux/export.h> 33 #include "drmP.h" 34 #include "drm.h" 35 #include "intel_drv.h" 36 #include "intel_ringbuffer.h" 37 #include "i915_drm.h" 38 #include "i915_drv.h" 39 40 #define DRM_I915_RING_DEBUG 1 41 42 43 #if defined(CONFIG_DEBUG_FS) 44 45 enum { 46 ACTIVE_LIST, 47 FLUSHING_LIST, 48 INACTIVE_LIST, 49 PINNED_LIST, 50 DEFERRED_FREE_LIST, 51 }; 52 53 static const char *yesno(int v) 54 { 55 return v ? "yes" : "no"; 56 } 57 58 static int i915_capabilities(struct seq_file *m, void *data) 59 { 60 struct drm_info_node *node = (struct drm_info_node *) m->private; 61 struct drm_device *dev = node->minor->dev; 62 const struct intel_device_info *info = INTEL_INFO(dev); 63 64 seq_printf(m, "gen: %d\n", info->gen); 65 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 66 #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 67 B(is_mobile); 68 B(is_i85x); 69 B(is_i915g); 70 B(is_i945gm); 71 B(is_g33); 72 B(need_gfx_hws); 73 B(is_g4x); 74 B(is_pineview); 75 B(is_broadwater); 76 B(is_crestline); 77 B(has_fbc); 78 B(has_pipe_cxsr); 79 B(has_hotplug); 80 B(cursor_needs_physical); 81 B(has_overlay); 82 B(overlay_needs_physical); 83 B(supports_tv); 84 B(has_bsd_ring); 85 B(has_blt_ring); 86 #undef B 87 88 return 0; 89 } 90 91 static const char *get_pin_flag(struct drm_i915_gem_object *obj) 92 { 93 if (obj->user_pin_count > 0) 94 return "P"; 95 else if (obj->pin_count > 0) 96 return "p"; 97 else 98 return " "; 99 } 100 101 static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 102 { 103 switch (obj->tiling_mode) { 104 default: 105 case I915_TILING_NONE: return " "; 106 case I915_TILING_X: return "X"; 107 case I915_TILING_Y: return "Y"; 108 } 109 } 110 111 static const char *cache_level_str(int type) 112 { 113 switch (type) { 114 case I915_CACHE_NONE: return " uncached"; 115 case I915_CACHE_LLC: return " snooped (LLC)"; 116 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)"; 117 default: return ""; 118 } 119 } 120 121 static void 122 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 123 { 124 seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s", 125 &obj->base, 126 get_pin_flag(obj), 127 get_tiling_flag(obj), 128 obj->base.size / 1024, 129 obj->base.read_domains, 130 obj->base.write_domain, 131 obj->last_rendering_seqno, 132 obj->last_fenced_seqno, 133 cache_level_str(obj->cache_level), 134 obj->dirty ? " dirty" : "", 135 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 136 if (obj->base.name) 137 seq_printf(m, " (name: %d)", obj->base.name); 138 if (obj->fence_reg != I915_FENCE_REG_NONE) 139 seq_printf(m, " (fence: %d)", obj->fence_reg); 140 if (obj->gtt_space != NULL) 141 seq_printf(m, " (gtt offset: %08x, size: %08x)", 142 obj->gtt_offset, (unsigned int)obj->gtt_space->size); 143 if (obj->pin_mappable || obj->fault_mappable) { 144 char s[3], *t = s; 145 if (obj->pin_mappable) 146 *t++ = 'p'; 147 if (obj->fault_mappable) 148 *t++ = 'f'; 149 *t = '\0'; 150 seq_printf(m, " (%s mappable)", s); 151 } 152 if (obj->ring != NULL) 153 seq_printf(m, " (%s)", obj->ring->name); 154 } 155 156 static int i915_gem_object_list_info(struct seq_file *m, void *data) 157 { 158 struct drm_info_node *node = (struct drm_info_node *) m->private; 159 uintptr_t list = (uintptr_t) node->info_ent->data; 160 struct list_head *head; 161 struct drm_device *dev = node->minor->dev; 162 drm_i915_private_t *dev_priv = dev->dev_private; 163 struct drm_i915_gem_object *obj; 164 size_t total_obj_size, total_gtt_size; 165 int count, ret; 166 167 ret = mutex_lock_interruptible(&dev->struct_mutex); 168 if (ret) 169 return ret; 170 171 switch (list) { 172 case ACTIVE_LIST: 173 seq_printf(m, "Active:\n"); 174 head = &dev_priv->mm.active_list; 175 break; 176 case INACTIVE_LIST: 177 seq_printf(m, "Inactive:\n"); 178 head = &dev_priv->mm.inactive_list; 179 break; 180 case PINNED_LIST: 181 seq_printf(m, "Pinned:\n"); 182 head = &dev_priv->mm.pinned_list; 183 break; 184 case FLUSHING_LIST: 185 seq_printf(m, "Flushing:\n"); 186 head = &dev_priv->mm.flushing_list; 187 break; 188 case DEFERRED_FREE_LIST: 189 seq_printf(m, "Deferred free:\n"); 190 head = &dev_priv->mm.deferred_free_list; 191 break; 192 default: 193 mutex_unlock(&dev->struct_mutex); 194 return -EINVAL; 195 } 196 197 total_obj_size = total_gtt_size = count = 0; 198 list_for_each_entry(obj, head, mm_list) { 199 seq_printf(m, " "); 200 describe_obj(m, obj); 201 seq_printf(m, "\n"); 202 total_obj_size += obj->base.size; 203 total_gtt_size += obj->gtt_space->size; 204 count++; 205 } 206 mutex_unlock(&dev->struct_mutex); 207 208 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 209 count, total_obj_size, total_gtt_size); 210 return 0; 211 } 212 213 #define count_objects(list, member) do { \ 214 list_for_each_entry(obj, list, member) { \ 215 size += obj->gtt_space->size; \ 216 ++count; \ 217 if (obj->map_and_fenceable) { \ 218 mappable_size += obj->gtt_space->size; \ 219 ++mappable_count; \ 220 } \ 221 } \ 222 } while (0) 223 224 static int i915_gem_object_info(struct seq_file *m, void* data) 225 { 226 struct drm_info_node *node = (struct drm_info_node *) m->private; 227 struct drm_device *dev = node->minor->dev; 228 struct drm_i915_private *dev_priv = dev->dev_private; 229 u32 count, mappable_count; 230 size_t size, mappable_size; 231 struct drm_i915_gem_object *obj; 232 int ret; 233 234 ret = mutex_lock_interruptible(&dev->struct_mutex); 235 if (ret) 236 return ret; 237 238 seq_printf(m, "%u objects, %zu bytes\n", 239 dev_priv->mm.object_count, 240 dev_priv->mm.object_memory); 241 242 size = count = mappable_size = mappable_count = 0; 243 count_objects(&dev_priv->mm.gtt_list, gtt_list); 244 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 245 count, mappable_count, size, mappable_size); 246 247 size = count = mappable_size = mappable_count = 0; 248 count_objects(&dev_priv->mm.active_list, mm_list); 249 count_objects(&dev_priv->mm.flushing_list, mm_list); 250 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 251 count, mappable_count, size, mappable_size); 252 253 size = count = mappable_size = mappable_count = 0; 254 count_objects(&dev_priv->mm.pinned_list, mm_list); 255 seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n", 256 count, mappable_count, size, mappable_size); 257 258 size = count = mappable_size = mappable_count = 0; 259 count_objects(&dev_priv->mm.inactive_list, mm_list); 260 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 261 count, mappable_count, size, mappable_size); 262 263 size = count = mappable_size = mappable_count = 0; 264 count_objects(&dev_priv->mm.deferred_free_list, mm_list); 265 seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n", 266 count, mappable_count, size, mappable_size); 267 268 size = count = mappable_size = mappable_count = 0; 269 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 270 if (obj->fault_mappable) { 271 size += obj->gtt_space->size; 272 ++count; 273 } 274 if (obj->pin_mappable) { 275 mappable_size += obj->gtt_space->size; 276 ++mappable_count; 277 } 278 } 279 seq_printf(m, "%u pinned mappable objects, %zu bytes\n", 280 mappable_count, mappable_size); 281 seq_printf(m, "%u fault mappable objects, %zu bytes\n", 282 count, size); 283 284 seq_printf(m, "%zu [%zu] gtt total\n", 285 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); 286 287 mutex_unlock(&dev->struct_mutex); 288 289 return 0; 290 } 291 292 static int i915_gem_gtt_info(struct seq_file *m, void* data) 293 { 294 struct drm_info_node *node = (struct drm_info_node *) m->private; 295 struct drm_device *dev = node->minor->dev; 296 struct drm_i915_private *dev_priv = dev->dev_private; 297 struct drm_i915_gem_object *obj; 298 size_t total_obj_size, total_gtt_size; 299 int count, ret; 300 301 ret = mutex_lock_interruptible(&dev->struct_mutex); 302 if (ret) 303 return ret; 304 305 total_obj_size = total_gtt_size = count = 0; 306 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 307 seq_printf(m, " "); 308 describe_obj(m, obj); 309 seq_printf(m, "\n"); 310 total_obj_size += obj->base.size; 311 total_gtt_size += obj->gtt_space->size; 312 count++; 313 } 314 315 mutex_unlock(&dev->struct_mutex); 316 317 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 318 count, total_obj_size, total_gtt_size); 319 320 return 0; 321 } 322 323 324 static int i915_gem_pageflip_info(struct seq_file *m, void *data) 325 { 326 struct drm_info_node *node = (struct drm_info_node *) m->private; 327 struct drm_device *dev = node->minor->dev; 328 unsigned long flags; 329 struct intel_crtc *crtc; 330 331 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 332 const char pipe = pipe_name(crtc->pipe); 333 const char plane = plane_name(crtc->plane); 334 struct intel_unpin_work *work; 335 336 spin_lock_irqsave(&dev->event_lock, flags); 337 work = crtc->unpin_work; 338 if (work == NULL) { 339 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 340 pipe, plane); 341 } else { 342 if (!work->pending) { 343 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 344 pipe, plane); 345 } else { 346 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 347 pipe, plane); 348 } 349 if (work->enable_stall_check) 350 seq_printf(m, "Stall check enabled, "); 351 else 352 seq_printf(m, "Stall check waiting for page flip ioctl, "); 353 seq_printf(m, "%d prepares\n", work->pending); 354 355 if (work->old_fb_obj) { 356 struct drm_i915_gem_object *obj = work->old_fb_obj; 357 if (obj) 358 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 359 } 360 if (work->pending_flip_obj) { 361 struct drm_i915_gem_object *obj = work->pending_flip_obj; 362 if (obj) 363 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 364 } 365 } 366 spin_unlock_irqrestore(&dev->event_lock, flags); 367 } 368 369 return 0; 370 } 371 372 static int i915_gem_request_info(struct seq_file *m, void *data) 373 { 374 struct drm_info_node *node = (struct drm_info_node *) m->private; 375 struct drm_device *dev = node->minor->dev; 376 drm_i915_private_t *dev_priv = dev->dev_private; 377 struct drm_i915_gem_request *gem_request; 378 int ret, count; 379 380 ret = mutex_lock_interruptible(&dev->struct_mutex); 381 if (ret) 382 return ret; 383 384 count = 0; 385 if (!list_empty(&dev_priv->ring[RCS].request_list)) { 386 seq_printf(m, "Render requests:\n"); 387 list_for_each_entry(gem_request, 388 &dev_priv->ring[RCS].request_list, 389 list) { 390 seq_printf(m, " %d @ %d\n", 391 gem_request->seqno, 392 (int) (jiffies - gem_request->emitted_jiffies)); 393 } 394 count++; 395 } 396 if (!list_empty(&dev_priv->ring[VCS].request_list)) { 397 seq_printf(m, "BSD requests:\n"); 398 list_for_each_entry(gem_request, 399 &dev_priv->ring[VCS].request_list, 400 list) { 401 seq_printf(m, " %d @ %d\n", 402 gem_request->seqno, 403 (int) (jiffies - gem_request->emitted_jiffies)); 404 } 405 count++; 406 } 407 if (!list_empty(&dev_priv->ring[BCS].request_list)) { 408 seq_printf(m, "BLT requests:\n"); 409 list_for_each_entry(gem_request, 410 &dev_priv->ring[BCS].request_list, 411 list) { 412 seq_printf(m, " %d @ %d\n", 413 gem_request->seqno, 414 (int) (jiffies - gem_request->emitted_jiffies)); 415 } 416 count++; 417 } 418 mutex_unlock(&dev->struct_mutex); 419 420 if (count == 0) 421 seq_printf(m, "No requests\n"); 422 423 return 0; 424 } 425 426 static void i915_ring_seqno_info(struct seq_file *m, 427 struct intel_ring_buffer *ring) 428 { 429 if (ring->get_seqno) { 430 seq_printf(m, "Current sequence (%s): %d\n", 431 ring->name, ring->get_seqno(ring)); 432 seq_printf(m, "Waiter sequence (%s): %d\n", 433 ring->name, ring->waiting_seqno); 434 seq_printf(m, "IRQ sequence (%s): %d\n", 435 ring->name, ring->irq_seqno); 436 } 437 } 438 439 static int i915_gem_seqno_info(struct seq_file *m, void *data) 440 { 441 struct drm_info_node *node = (struct drm_info_node *) m->private; 442 struct drm_device *dev = node->minor->dev; 443 drm_i915_private_t *dev_priv = dev->dev_private; 444 int ret, i; 445 446 ret = mutex_lock_interruptible(&dev->struct_mutex); 447 if (ret) 448 return ret; 449 450 for (i = 0; i < I915_NUM_RINGS; i++) 451 i915_ring_seqno_info(m, &dev_priv->ring[i]); 452 453 mutex_unlock(&dev->struct_mutex); 454 455 return 0; 456 } 457 458 459 static int i915_interrupt_info(struct seq_file *m, void *data) 460 { 461 struct drm_info_node *node = (struct drm_info_node *) m->private; 462 struct drm_device *dev = node->minor->dev; 463 drm_i915_private_t *dev_priv = dev->dev_private; 464 int ret, i, pipe; 465 466 ret = mutex_lock_interruptible(&dev->struct_mutex); 467 if (ret) 468 return ret; 469 470 if (!HAS_PCH_SPLIT(dev)) { 471 seq_printf(m, "Interrupt enable: %08x\n", 472 I915_READ(IER)); 473 seq_printf(m, "Interrupt identity: %08x\n", 474 I915_READ(IIR)); 475 seq_printf(m, "Interrupt mask: %08x\n", 476 I915_READ(IMR)); 477 for_each_pipe(pipe) 478 seq_printf(m, "Pipe %c stat: %08x\n", 479 pipe_name(pipe), 480 I915_READ(PIPESTAT(pipe))); 481 } else { 482 seq_printf(m, "North Display Interrupt enable: %08x\n", 483 I915_READ(DEIER)); 484 seq_printf(m, "North Display Interrupt identity: %08x\n", 485 I915_READ(DEIIR)); 486 seq_printf(m, "North Display Interrupt mask: %08x\n", 487 I915_READ(DEIMR)); 488 seq_printf(m, "South Display Interrupt enable: %08x\n", 489 I915_READ(SDEIER)); 490 seq_printf(m, "South Display Interrupt identity: %08x\n", 491 I915_READ(SDEIIR)); 492 seq_printf(m, "South Display Interrupt mask: %08x\n", 493 I915_READ(SDEIMR)); 494 seq_printf(m, "Graphics Interrupt enable: %08x\n", 495 I915_READ(GTIER)); 496 seq_printf(m, "Graphics Interrupt identity: %08x\n", 497 I915_READ(GTIIR)); 498 seq_printf(m, "Graphics Interrupt mask: %08x\n", 499 I915_READ(GTIMR)); 500 } 501 seq_printf(m, "Interrupts received: %d\n", 502 atomic_read(&dev_priv->irq_received)); 503 for (i = 0; i < I915_NUM_RINGS; i++) { 504 if (IS_GEN6(dev) || IS_GEN7(dev)) { 505 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", 506 dev_priv->ring[i].name, 507 I915_READ_IMR(&dev_priv->ring[i])); 508 } 509 i915_ring_seqno_info(m, &dev_priv->ring[i]); 510 } 511 mutex_unlock(&dev->struct_mutex); 512 513 return 0; 514 } 515 516 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 517 { 518 struct drm_info_node *node = (struct drm_info_node *) m->private; 519 struct drm_device *dev = node->minor->dev; 520 drm_i915_private_t *dev_priv = dev->dev_private; 521 int i, ret; 522 523 ret = mutex_lock_interruptible(&dev->struct_mutex); 524 if (ret) 525 return ret; 526 527 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 528 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 529 for (i = 0; i < dev_priv->num_fence_regs; i++) { 530 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 531 532 seq_printf(m, "Fenced object[%2d] = ", i); 533 if (obj == NULL) 534 seq_printf(m, "unused"); 535 else 536 describe_obj(m, obj); 537 seq_printf(m, "\n"); 538 } 539 540 mutex_unlock(&dev->struct_mutex); 541 return 0; 542 } 543 544 static int i915_hws_info(struct seq_file *m, void *data) 545 { 546 struct drm_info_node *node = (struct drm_info_node *) m->private; 547 struct drm_device *dev = node->minor->dev; 548 drm_i915_private_t *dev_priv = dev->dev_private; 549 struct intel_ring_buffer *ring; 550 const volatile u32 __iomem *hws; 551 int i; 552 553 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 554 hws = (volatile u32 __iomem *)ring->status_page.page_addr; 555 if (hws == NULL) 556 return 0; 557 558 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 559 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 560 i * 4, 561 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 562 } 563 return 0; 564 } 565 566 static void i915_dump_object(struct seq_file *m, 567 struct io_mapping *mapping, 568 struct drm_i915_gem_object *obj) 569 { 570 int page, page_count, i; 571 572 page_count = obj->base.size / PAGE_SIZE; 573 for (page = 0; page < page_count; page++) { 574 u32 *mem = io_mapping_map_wc(mapping, 575 obj->gtt_offset + page * PAGE_SIZE); 576 for (i = 0; i < PAGE_SIZE; i += 4) 577 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); 578 io_mapping_unmap(mem); 579 } 580 } 581 582 static int i915_batchbuffer_info(struct seq_file *m, void *data) 583 { 584 struct drm_info_node *node = (struct drm_info_node *) m->private; 585 struct drm_device *dev = node->minor->dev; 586 drm_i915_private_t *dev_priv = dev->dev_private; 587 struct drm_i915_gem_object *obj; 588 int ret; 589 590 ret = mutex_lock_interruptible(&dev->struct_mutex); 591 if (ret) 592 return ret; 593 594 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 595 if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) { 596 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); 597 i915_dump_object(m, dev_priv->mm.gtt_mapping, obj); 598 } 599 } 600 601 mutex_unlock(&dev->struct_mutex); 602 return 0; 603 } 604 605 static int i915_ringbuffer_data(struct seq_file *m, void *data) 606 { 607 struct drm_info_node *node = (struct drm_info_node *) m->private; 608 struct drm_device *dev = node->minor->dev; 609 drm_i915_private_t *dev_priv = dev->dev_private; 610 struct intel_ring_buffer *ring; 611 int ret; 612 613 ret = mutex_lock_interruptible(&dev->struct_mutex); 614 if (ret) 615 return ret; 616 617 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 618 if (!ring->obj) { 619 seq_printf(m, "No ringbuffer setup\n"); 620 } else { 621 const u8 __iomem *virt = ring->virtual_start; 622 uint32_t off; 623 624 for (off = 0; off < ring->size; off += 4) { 625 uint32_t *ptr = (uint32_t *)(virt + off); 626 seq_printf(m, "%08x : %08x\n", off, *ptr); 627 } 628 } 629 mutex_unlock(&dev->struct_mutex); 630 631 return 0; 632 } 633 634 static int i915_ringbuffer_info(struct seq_file *m, void *data) 635 { 636 struct drm_info_node *node = (struct drm_info_node *) m->private; 637 struct drm_device *dev = node->minor->dev; 638 drm_i915_private_t *dev_priv = dev->dev_private; 639 struct intel_ring_buffer *ring; 640 int ret; 641 642 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 643 if (ring->size == 0) 644 return 0; 645 646 ret = mutex_lock_interruptible(&dev->struct_mutex); 647 if (ret) 648 return ret; 649 650 seq_printf(m, "Ring %s:\n", ring->name); 651 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); 652 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); 653 seq_printf(m, " Size : %08x\n", ring->size); 654 seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring)); 655 seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring)); 656 if (IS_GEN6(dev) || IS_GEN7(dev)) { 657 seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring)); 658 seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring)); 659 } 660 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); 661 seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); 662 663 mutex_unlock(&dev->struct_mutex); 664 665 return 0; 666 } 667 668 static const char *ring_str(int ring) 669 { 670 switch (ring) { 671 case RING_RENDER: return " render"; 672 case RING_BSD: return " bsd"; 673 case RING_BLT: return " blt"; 674 default: return ""; 675 } 676 } 677 678 static const char *pin_flag(int pinned) 679 { 680 if (pinned > 0) 681 return " P"; 682 else if (pinned < 0) 683 return " p"; 684 else 685 return ""; 686 } 687 688 static const char *tiling_flag(int tiling) 689 { 690 switch (tiling) { 691 default: 692 case I915_TILING_NONE: return ""; 693 case I915_TILING_X: return " X"; 694 case I915_TILING_Y: return " Y"; 695 } 696 } 697 698 static const char *dirty_flag(int dirty) 699 { 700 return dirty ? " dirty" : ""; 701 } 702 703 static const char *purgeable_flag(int purgeable) 704 { 705 return purgeable ? " purgeable" : ""; 706 } 707 708 static void print_error_buffers(struct seq_file *m, 709 const char *name, 710 struct drm_i915_error_buffer *err, 711 int count) 712 { 713 seq_printf(m, "%s [%d]:\n", name, count); 714 715 while (count--) { 716 seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s", 717 err->gtt_offset, 718 err->size, 719 err->read_domains, 720 err->write_domain, 721 err->seqno, 722 pin_flag(err->pinned), 723 tiling_flag(err->tiling), 724 dirty_flag(err->dirty), 725 purgeable_flag(err->purgeable), 726 ring_str(err->ring), 727 cache_level_str(err->cache_level)); 728 729 if (err->name) 730 seq_printf(m, " (name: %d)", err->name); 731 if (err->fence_reg != I915_FENCE_REG_NONE) 732 seq_printf(m, " (fence: %d)", err->fence_reg); 733 734 seq_printf(m, "\n"); 735 err++; 736 } 737 } 738 739 static int i915_error_state(struct seq_file *m, void *unused) 740 { 741 struct drm_info_node *node = (struct drm_info_node *) m->private; 742 struct drm_device *dev = node->minor->dev; 743 drm_i915_private_t *dev_priv = dev->dev_private; 744 struct drm_i915_error_state *error; 745 unsigned long flags; 746 int i, page, offset, elt; 747 748 spin_lock_irqsave(&dev_priv->error_lock, flags); 749 if (!dev_priv->first_error) { 750 seq_printf(m, "no error state collected\n"); 751 goto out; 752 } 753 754 error = dev_priv->first_error; 755 756 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 757 error->time.tv_usec); 758 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 759 seq_printf(m, "EIR: 0x%08x\n", error->eir); 760 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 761 if (INTEL_INFO(dev)->gen >= 6) { 762 seq_printf(m, "ERROR: 0x%08x\n", error->error); 763 seq_printf(m, "Blitter command stream:\n"); 764 seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd); 765 seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir); 766 seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr); 767 seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone); 768 seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno); 769 seq_printf(m, "Video (BSD) command stream:\n"); 770 seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd); 771 seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir); 772 seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr); 773 seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone); 774 seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno); 775 } 776 seq_printf(m, "Render command stream:\n"); 777 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); 778 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir); 779 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); 780 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); 781 if (INTEL_INFO(dev)->gen >= 4) { 782 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); 783 seq_printf(m, " INSTPS: 0x%08x\n", error->instps); 784 } 785 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); 786 seq_printf(m, " seqno: 0x%08x\n", error->seqno); 787 788 for (i = 0; i < dev_priv->num_fence_regs; i++) 789 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); 790 791 if (error->active_bo) 792 print_error_buffers(m, "Active", 793 error->active_bo, 794 error->active_bo_count); 795 796 if (error->pinned_bo) 797 print_error_buffers(m, "Pinned", 798 error->pinned_bo, 799 error->pinned_bo_count); 800 801 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) { 802 if (error->batchbuffer[i]) { 803 struct drm_i915_error_object *obj = error->batchbuffer[i]; 804 805 seq_printf(m, "%s --- gtt_offset = 0x%08x\n", 806 dev_priv->ring[i].name, 807 obj->gtt_offset); 808 offset = 0; 809 for (page = 0; page < obj->page_count; page++) { 810 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 811 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); 812 offset += 4; 813 } 814 } 815 } 816 } 817 818 for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) { 819 if (error->ringbuffer[i]) { 820 struct drm_i915_error_object *obj = error->ringbuffer[i]; 821 seq_printf(m, "%s --- ringbuffer = 0x%08x\n", 822 dev_priv->ring[i].name, 823 obj->gtt_offset); 824 offset = 0; 825 for (page = 0; page < obj->page_count; page++) { 826 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 827 seq_printf(m, "%08x : %08x\n", 828 offset, 829 obj->pages[page][elt]); 830 offset += 4; 831 } 832 } 833 } 834 } 835 836 if (error->overlay) 837 intel_overlay_print_error_state(m, error->overlay); 838 839 if (error->display) 840 intel_display_print_error_state(m, dev, error->display); 841 842 out: 843 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 844 845 return 0; 846 } 847 848 static int i915_rstdby_delays(struct seq_file *m, void *unused) 849 { 850 struct drm_info_node *node = (struct drm_info_node *) m->private; 851 struct drm_device *dev = node->minor->dev; 852 drm_i915_private_t *dev_priv = dev->dev_private; 853 u16 crstanddelay; 854 int ret; 855 856 ret = mutex_lock_interruptible(&dev->struct_mutex); 857 if (ret) 858 return ret; 859 860 crstanddelay = I915_READ16(CRSTANDVID); 861 862 mutex_unlock(&dev->struct_mutex); 863 864 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 865 866 return 0; 867 } 868 869 static int i915_cur_delayinfo(struct seq_file *m, void *unused) 870 { 871 struct drm_info_node *node = (struct drm_info_node *) m->private; 872 struct drm_device *dev = node->minor->dev; 873 drm_i915_private_t *dev_priv = dev->dev_private; 874 int ret; 875 876 if (IS_GEN5(dev)) { 877 u16 rgvswctl = I915_READ16(MEMSWCTL); 878 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 879 880 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 881 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 882 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 883 MEMSTAT_VID_SHIFT); 884 seq_printf(m, "Current P-state: %d\n", 885 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 886 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 887 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 888 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 889 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 890 u32 rpstat; 891 u32 rpupei, rpcurup, rpprevup; 892 u32 rpdownei, rpcurdown, rpprevdown; 893 int max_freq; 894 895 /* RPSTAT1 is in the GT power well */ 896 ret = mutex_lock_interruptible(&dev->struct_mutex); 897 if (ret) 898 return ret; 899 900 gen6_gt_force_wake_get(dev_priv); 901 902 rpstat = I915_READ(GEN6_RPSTAT1); 903 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 904 rpcurup = I915_READ(GEN6_RP_CUR_UP); 905 rpprevup = I915_READ(GEN6_RP_PREV_UP); 906 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 907 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 908 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 909 910 gen6_gt_force_wake_put(dev_priv); 911 mutex_unlock(&dev->struct_mutex); 912 913 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 914 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 915 seq_printf(m, "Render p-state ratio: %d\n", 916 (gt_perf_status & 0xff00) >> 8); 917 seq_printf(m, "Render p-state VID: %d\n", 918 gt_perf_status & 0xff); 919 seq_printf(m, "Render p-state limit: %d\n", 920 rp_state_limits & 0xff); 921 seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> 922 GEN6_CAGF_SHIFT) * 50); 923 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 924 GEN6_CURICONT_MASK); 925 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 926 GEN6_CURBSYTAVG_MASK); 927 seq_printf(m, "RP PREV UP: %dus\n", rpprevup & 928 GEN6_CURBSYTAVG_MASK); 929 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 930 GEN6_CURIAVG_MASK); 931 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 932 GEN6_CURBSYTAVG_MASK); 933 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 934 GEN6_CURBSYTAVG_MASK); 935 936 max_freq = (rp_state_cap & 0xff0000) >> 16; 937 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 938 max_freq * 50); 939 940 max_freq = (rp_state_cap & 0xff00) >> 8; 941 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 942 max_freq * 50); 943 944 max_freq = rp_state_cap & 0xff; 945 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 946 max_freq * 50); 947 } else { 948 seq_printf(m, "no P-state info available\n"); 949 } 950 951 return 0; 952 } 953 954 static int i915_delayfreq_table(struct seq_file *m, void *unused) 955 { 956 struct drm_info_node *node = (struct drm_info_node *) m->private; 957 struct drm_device *dev = node->minor->dev; 958 drm_i915_private_t *dev_priv = dev->dev_private; 959 u32 delayfreq; 960 int ret, i; 961 962 ret = mutex_lock_interruptible(&dev->struct_mutex); 963 if (ret) 964 return ret; 965 966 for (i = 0; i < 16; i++) { 967 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 968 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, 969 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 970 } 971 972 mutex_unlock(&dev->struct_mutex); 973 974 return 0; 975 } 976 977 static inline int MAP_TO_MV(int map) 978 { 979 return 1250 - (map * 25); 980 } 981 982 static int i915_inttoext_table(struct seq_file *m, void *unused) 983 { 984 struct drm_info_node *node = (struct drm_info_node *) m->private; 985 struct drm_device *dev = node->minor->dev; 986 drm_i915_private_t *dev_priv = dev->dev_private; 987 u32 inttoext; 988 int ret, i; 989 990 ret = mutex_lock_interruptible(&dev->struct_mutex); 991 if (ret) 992 return ret; 993 994 for (i = 1; i <= 32; i++) { 995 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 996 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 997 } 998 999 mutex_unlock(&dev->struct_mutex); 1000 1001 return 0; 1002 } 1003 1004 static int ironlake_drpc_info(struct seq_file *m) 1005 { 1006 struct drm_info_node *node = (struct drm_info_node *) m->private; 1007 struct drm_device *dev = node->minor->dev; 1008 drm_i915_private_t *dev_priv = dev->dev_private; 1009 u32 rgvmodectl, rstdbyctl; 1010 u16 crstandvid; 1011 int ret; 1012 1013 ret = mutex_lock_interruptible(&dev->struct_mutex); 1014 if (ret) 1015 return ret; 1016 1017 rgvmodectl = I915_READ(MEMMODECTL); 1018 rstdbyctl = I915_READ(RSTDBYCTL); 1019 crstandvid = I915_READ16(CRSTANDVID); 1020 1021 mutex_unlock(&dev->struct_mutex); 1022 1023 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1024 "yes" : "no"); 1025 seq_printf(m, "Boost freq: %d\n", 1026 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1027 MEMMODE_BOOST_FREQ_SHIFT); 1028 seq_printf(m, "HW control enabled: %s\n", 1029 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 1030 seq_printf(m, "SW control enabled: %s\n", 1031 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 1032 seq_printf(m, "Gated voltage change: %s\n", 1033 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 1034 seq_printf(m, "Starting frequency: P%d\n", 1035 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1036 seq_printf(m, "Max P-state: P%d\n", 1037 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1038 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1039 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1040 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1041 seq_printf(m, "Render standby enabled: %s\n", 1042 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 1043 seq_printf(m, "Current RS state: "); 1044 switch (rstdbyctl & RSX_STATUS_MASK) { 1045 case RSX_STATUS_ON: 1046 seq_printf(m, "on\n"); 1047 break; 1048 case RSX_STATUS_RC1: 1049 seq_printf(m, "RC1\n"); 1050 break; 1051 case RSX_STATUS_RC1E: 1052 seq_printf(m, "RC1E\n"); 1053 break; 1054 case RSX_STATUS_RS1: 1055 seq_printf(m, "RS1\n"); 1056 break; 1057 case RSX_STATUS_RS2: 1058 seq_printf(m, "RS2 (RC6)\n"); 1059 break; 1060 case RSX_STATUS_RS3: 1061 seq_printf(m, "RC3 (RC6+)\n"); 1062 break; 1063 default: 1064 seq_printf(m, "unknown\n"); 1065 break; 1066 } 1067 1068 return 0; 1069 } 1070 1071 static int gen6_drpc_info(struct seq_file *m) 1072 { 1073 1074 struct drm_info_node *node = (struct drm_info_node *) m->private; 1075 struct drm_device *dev = node->minor->dev; 1076 struct drm_i915_private *dev_priv = dev->dev_private; 1077 u32 rpmodectl1, gt_core_status, rcctl1; 1078 unsigned forcewake_count; 1079 int count=0, ret; 1080 1081 1082 ret = mutex_lock_interruptible(&dev->struct_mutex); 1083 if (ret) 1084 return ret; 1085 1086 spin_lock_irq(&dev_priv->gt_lock); 1087 forcewake_count = dev_priv->forcewake_count; 1088 spin_unlock_irq(&dev_priv->gt_lock); 1089 1090 if (forcewake_count) { 1091 seq_printf(m, "RC information inaccurate because somebody " 1092 "holds a forcewake reference \n"); 1093 } else { 1094 /* NB: we cannot use forcewake, else we read the wrong values */ 1095 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1096 udelay(10); 1097 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1098 } 1099 1100 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); 1101 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4); 1102 1103 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1104 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1105 mutex_unlock(&dev->struct_mutex); 1106 1107 seq_printf(m, "Video Turbo Mode: %s\n", 1108 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1109 seq_printf(m, "HW control enabled: %s\n", 1110 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1111 seq_printf(m, "SW control enabled: %s\n", 1112 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1113 GEN6_RP_MEDIA_SW_MODE)); 1114 seq_printf(m, "RC1e Enabled: %s\n", 1115 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1116 seq_printf(m, "RC6 Enabled: %s\n", 1117 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1118 seq_printf(m, "Deep RC6 Enabled: %s\n", 1119 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1120 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1121 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1122 seq_printf(m, "Current RC state: "); 1123 switch (gt_core_status & GEN6_RCn_MASK) { 1124 case GEN6_RC0: 1125 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1126 seq_printf(m, "Core Power Down\n"); 1127 else 1128 seq_printf(m, "on\n"); 1129 break; 1130 case GEN6_RC3: 1131 seq_printf(m, "RC3\n"); 1132 break; 1133 case GEN6_RC6: 1134 seq_printf(m, "RC6\n"); 1135 break; 1136 case GEN6_RC7: 1137 seq_printf(m, "RC7\n"); 1138 break; 1139 default: 1140 seq_printf(m, "Unknown\n"); 1141 break; 1142 } 1143 1144 seq_printf(m, "Core Power Down: %s\n", 1145 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1146 return 0; 1147 } 1148 1149 static int i915_drpc_info(struct seq_file *m, void *unused) 1150 { 1151 struct drm_info_node *node = (struct drm_info_node *) m->private; 1152 struct drm_device *dev = node->minor->dev; 1153 1154 if (IS_GEN6(dev) || IS_GEN7(dev)) 1155 return gen6_drpc_info(m); 1156 else 1157 return ironlake_drpc_info(m); 1158 } 1159 1160 static int i915_fbc_status(struct seq_file *m, void *unused) 1161 { 1162 struct drm_info_node *node = (struct drm_info_node *) m->private; 1163 struct drm_device *dev = node->minor->dev; 1164 drm_i915_private_t *dev_priv = dev->dev_private; 1165 1166 if (!I915_HAS_FBC(dev)) { 1167 seq_printf(m, "FBC unsupported on this chipset\n"); 1168 return 0; 1169 } 1170 1171 if (intel_fbc_enabled(dev)) { 1172 seq_printf(m, "FBC enabled\n"); 1173 } else { 1174 seq_printf(m, "FBC disabled: "); 1175 switch (dev_priv->no_fbc_reason) { 1176 case FBC_NO_OUTPUT: 1177 seq_printf(m, "no outputs"); 1178 break; 1179 case FBC_STOLEN_TOO_SMALL: 1180 seq_printf(m, "not enough stolen memory"); 1181 break; 1182 case FBC_UNSUPPORTED_MODE: 1183 seq_printf(m, "mode not supported"); 1184 break; 1185 case FBC_MODE_TOO_LARGE: 1186 seq_printf(m, "mode too large"); 1187 break; 1188 case FBC_BAD_PLANE: 1189 seq_printf(m, "FBC unsupported on plane"); 1190 break; 1191 case FBC_NOT_TILED: 1192 seq_printf(m, "scanout buffer not tiled"); 1193 break; 1194 case FBC_MULTIPLE_PIPES: 1195 seq_printf(m, "multiple pipes are enabled"); 1196 break; 1197 case FBC_MODULE_PARAM: 1198 seq_printf(m, "disabled per module param (default off)"); 1199 break; 1200 default: 1201 seq_printf(m, "unknown reason"); 1202 } 1203 seq_printf(m, "\n"); 1204 } 1205 return 0; 1206 } 1207 1208 static int i915_sr_status(struct seq_file *m, void *unused) 1209 { 1210 struct drm_info_node *node = (struct drm_info_node *) m->private; 1211 struct drm_device *dev = node->minor->dev; 1212 drm_i915_private_t *dev_priv = dev->dev_private; 1213 bool sr_enabled = false; 1214 1215 if (HAS_PCH_SPLIT(dev)) 1216 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1217 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1218 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1219 else if (IS_I915GM(dev)) 1220 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1221 else if (IS_PINEVIEW(dev)) 1222 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1223 1224 seq_printf(m, "self-refresh: %s\n", 1225 sr_enabled ? "enabled" : "disabled"); 1226 1227 return 0; 1228 } 1229 1230 static int i915_emon_status(struct seq_file *m, void *unused) 1231 { 1232 struct drm_info_node *node = (struct drm_info_node *) m->private; 1233 struct drm_device *dev = node->minor->dev; 1234 drm_i915_private_t *dev_priv = dev->dev_private; 1235 unsigned long temp, chipset, gfx; 1236 int ret; 1237 1238 ret = mutex_lock_interruptible(&dev->struct_mutex); 1239 if (ret) 1240 return ret; 1241 1242 temp = i915_mch_val(dev_priv); 1243 chipset = i915_chipset_val(dev_priv); 1244 gfx = i915_gfx_val(dev_priv); 1245 mutex_unlock(&dev->struct_mutex); 1246 1247 seq_printf(m, "GMCH temp: %ld\n", temp); 1248 seq_printf(m, "Chipset power: %ld\n", chipset); 1249 seq_printf(m, "GFX power: %ld\n", gfx); 1250 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1251 1252 return 0; 1253 } 1254 1255 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1256 { 1257 struct drm_info_node *node = (struct drm_info_node *) m->private; 1258 struct drm_device *dev = node->minor->dev; 1259 drm_i915_private_t *dev_priv = dev->dev_private; 1260 int ret; 1261 int gpu_freq, ia_freq; 1262 1263 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1264 seq_printf(m, "unsupported on this chipset\n"); 1265 return 0; 1266 } 1267 1268 ret = mutex_lock_interruptible(&dev->struct_mutex); 1269 if (ret) 1270 return ret; 1271 1272 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); 1273 1274 for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay; 1275 gpu_freq++) { 1276 I915_WRITE(GEN6_PCODE_DATA, gpu_freq); 1277 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | 1278 GEN6_PCODE_READ_MIN_FREQ_TABLE); 1279 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & 1280 GEN6_PCODE_READY) == 0, 10)) { 1281 DRM_ERROR("pcode read of freq table timed out\n"); 1282 continue; 1283 } 1284 ia_freq = I915_READ(GEN6_PCODE_DATA); 1285 seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100); 1286 } 1287 1288 mutex_unlock(&dev->struct_mutex); 1289 1290 return 0; 1291 } 1292 1293 static int i915_gfxec(struct seq_file *m, void *unused) 1294 { 1295 struct drm_info_node *node = (struct drm_info_node *) m->private; 1296 struct drm_device *dev = node->minor->dev; 1297 drm_i915_private_t *dev_priv = dev->dev_private; 1298 int ret; 1299 1300 ret = mutex_lock_interruptible(&dev->struct_mutex); 1301 if (ret) 1302 return ret; 1303 1304 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1305 1306 mutex_unlock(&dev->struct_mutex); 1307 1308 return 0; 1309 } 1310 1311 static int i915_opregion(struct seq_file *m, void *unused) 1312 { 1313 struct drm_info_node *node = (struct drm_info_node *) m->private; 1314 struct drm_device *dev = node->minor->dev; 1315 drm_i915_private_t *dev_priv = dev->dev_private; 1316 struct intel_opregion *opregion = &dev_priv->opregion; 1317 int ret; 1318 1319 ret = mutex_lock_interruptible(&dev->struct_mutex); 1320 if (ret) 1321 return ret; 1322 1323 if (opregion->header) 1324 seq_write(m, opregion->header, OPREGION_SIZE); 1325 1326 mutex_unlock(&dev->struct_mutex); 1327 1328 return 0; 1329 } 1330 1331 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1332 { 1333 struct drm_info_node *node = (struct drm_info_node *) m->private; 1334 struct drm_device *dev = node->minor->dev; 1335 drm_i915_private_t *dev_priv = dev->dev_private; 1336 struct intel_fbdev *ifbdev; 1337 struct intel_framebuffer *fb; 1338 int ret; 1339 1340 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1341 if (ret) 1342 return ret; 1343 1344 ifbdev = dev_priv->fbdev; 1345 fb = to_intel_framebuffer(ifbdev->helper.fb); 1346 1347 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ", 1348 fb->base.width, 1349 fb->base.height, 1350 fb->base.depth, 1351 fb->base.bits_per_pixel); 1352 describe_obj(m, fb->obj); 1353 seq_printf(m, "\n"); 1354 1355 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1356 if (&fb->base == ifbdev->helper.fb) 1357 continue; 1358 1359 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ", 1360 fb->base.width, 1361 fb->base.height, 1362 fb->base.depth, 1363 fb->base.bits_per_pixel); 1364 describe_obj(m, fb->obj); 1365 seq_printf(m, "\n"); 1366 } 1367 1368 mutex_unlock(&dev->mode_config.mutex); 1369 1370 return 0; 1371 } 1372 1373 static int i915_context_status(struct seq_file *m, void *unused) 1374 { 1375 struct drm_info_node *node = (struct drm_info_node *) m->private; 1376 struct drm_device *dev = node->minor->dev; 1377 drm_i915_private_t *dev_priv = dev->dev_private; 1378 int ret; 1379 1380 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1381 if (ret) 1382 return ret; 1383 1384 if (dev_priv->pwrctx) { 1385 seq_printf(m, "power context "); 1386 describe_obj(m, dev_priv->pwrctx); 1387 seq_printf(m, "\n"); 1388 } 1389 1390 if (dev_priv->renderctx) { 1391 seq_printf(m, "render context "); 1392 describe_obj(m, dev_priv->renderctx); 1393 seq_printf(m, "\n"); 1394 } 1395 1396 mutex_unlock(&dev->mode_config.mutex); 1397 1398 return 0; 1399 } 1400 1401 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) 1402 { 1403 struct drm_info_node *node = (struct drm_info_node *) m->private; 1404 struct drm_device *dev = node->minor->dev; 1405 struct drm_i915_private *dev_priv = dev->dev_private; 1406 unsigned forcewake_count; 1407 1408 spin_lock_irq(&dev_priv->gt_lock); 1409 forcewake_count = dev_priv->forcewake_count; 1410 spin_unlock_irq(&dev_priv->gt_lock); 1411 1412 seq_printf(m, "forcewake count = %u\n", forcewake_count); 1413 1414 return 0; 1415 } 1416 1417 static int 1418 i915_wedged_open(struct inode *inode, 1419 struct file *filp) 1420 { 1421 filp->private_data = inode->i_private; 1422 return 0; 1423 } 1424 1425 static ssize_t 1426 i915_wedged_read(struct file *filp, 1427 char __user *ubuf, 1428 size_t max, 1429 loff_t *ppos) 1430 { 1431 struct drm_device *dev = filp->private_data; 1432 drm_i915_private_t *dev_priv = dev->dev_private; 1433 char buf[80]; 1434 int len; 1435 1436 len = snprintf(buf, sizeof(buf), 1437 "wedged : %d\n", 1438 atomic_read(&dev_priv->mm.wedged)); 1439 1440 if (len > sizeof(buf)) 1441 len = sizeof(buf); 1442 1443 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1444 } 1445 1446 static ssize_t 1447 i915_wedged_write(struct file *filp, 1448 const char __user *ubuf, 1449 size_t cnt, 1450 loff_t *ppos) 1451 { 1452 struct drm_device *dev = filp->private_data; 1453 char buf[20]; 1454 int val = 1; 1455 1456 if (cnt > 0) { 1457 if (cnt > sizeof(buf) - 1) 1458 return -EINVAL; 1459 1460 if (copy_from_user(buf, ubuf, cnt)) 1461 return -EFAULT; 1462 buf[cnt] = 0; 1463 1464 val = simple_strtoul(buf, NULL, 0); 1465 } 1466 1467 DRM_INFO("Manually setting wedged to %d\n", val); 1468 i915_handle_error(dev, val); 1469 1470 return cnt; 1471 } 1472 1473 static const struct file_operations i915_wedged_fops = { 1474 .owner = THIS_MODULE, 1475 .open = i915_wedged_open, 1476 .read = i915_wedged_read, 1477 .write = i915_wedged_write, 1478 .llseek = default_llseek, 1479 }; 1480 1481 static int 1482 i915_max_freq_open(struct inode *inode, 1483 struct file *filp) 1484 { 1485 filp->private_data = inode->i_private; 1486 return 0; 1487 } 1488 1489 static ssize_t 1490 i915_max_freq_read(struct file *filp, 1491 char __user *ubuf, 1492 size_t max, 1493 loff_t *ppos) 1494 { 1495 struct drm_device *dev = filp->private_data; 1496 drm_i915_private_t *dev_priv = dev->dev_private; 1497 char buf[80]; 1498 int len; 1499 1500 len = snprintf(buf, sizeof(buf), 1501 "max freq: %d\n", dev_priv->max_delay * 50); 1502 1503 if (len > sizeof(buf)) 1504 len = sizeof(buf); 1505 1506 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1507 } 1508 1509 static ssize_t 1510 i915_max_freq_write(struct file *filp, 1511 const char __user *ubuf, 1512 size_t cnt, 1513 loff_t *ppos) 1514 { 1515 struct drm_device *dev = filp->private_data; 1516 struct drm_i915_private *dev_priv = dev->dev_private; 1517 char buf[20]; 1518 int val = 1; 1519 1520 if (cnt > 0) { 1521 if (cnt > sizeof(buf) - 1) 1522 return -EINVAL; 1523 1524 if (copy_from_user(buf, ubuf, cnt)) 1525 return -EFAULT; 1526 buf[cnt] = 0; 1527 1528 val = simple_strtoul(buf, NULL, 0); 1529 } 1530 1531 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); 1532 1533 /* 1534 * Turbo will still be enabled, but won't go above the set value. 1535 */ 1536 dev_priv->max_delay = val / 50; 1537 1538 gen6_set_rps(dev, val / 50); 1539 1540 return cnt; 1541 } 1542 1543 static const struct file_operations i915_max_freq_fops = { 1544 .owner = THIS_MODULE, 1545 .open = i915_max_freq_open, 1546 .read = i915_max_freq_read, 1547 .write = i915_max_freq_write, 1548 .llseek = default_llseek, 1549 }; 1550 1551 static int 1552 i915_cache_sharing_open(struct inode *inode, 1553 struct file *filp) 1554 { 1555 filp->private_data = inode->i_private; 1556 return 0; 1557 } 1558 1559 static ssize_t 1560 i915_cache_sharing_read(struct file *filp, 1561 char __user *ubuf, 1562 size_t max, 1563 loff_t *ppos) 1564 { 1565 struct drm_device *dev = filp->private_data; 1566 drm_i915_private_t *dev_priv = dev->dev_private; 1567 char buf[80]; 1568 u32 snpcr; 1569 int len; 1570 1571 mutex_lock(&dev_priv->dev->struct_mutex); 1572 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1573 mutex_unlock(&dev_priv->dev->struct_mutex); 1574 1575 len = snprintf(buf, sizeof(buf), 1576 "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >> 1577 GEN6_MBC_SNPCR_SHIFT); 1578 1579 if (len > sizeof(buf)) 1580 len = sizeof(buf); 1581 1582 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1583 } 1584 1585 static ssize_t 1586 i915_cache_sharing_write(struct file *filp, 1587 const char __user *ubuf, 1588 size_t cnt, 1589 loff_t *ppos) 1590 { 1591 struct drm_device *dev = filp->private_data; 1592 struct drm_i915_private *dev_priv = dev->dev_private; 1593 char buf[20]; 1594 u32 snpcr; 1595 int val = 1; 1596 1597 if (cnt > 0) { 1598 if (cnt > sizeof(buf) - 1) 1599 return -EINVAL; 1600 1601 if (copy_from_user(buf, ubuf, cnt)) 1602 return -EFAULT; 1603 buf[cnt] = 0; 1604 1605 val = simple_strtoul(buf, NULL, 0); 1606 } 1607 1608 if (val < 0 || val > 3) 1609 return -EINVAL; 1610 1611 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val); 1612 1613 /* Update the cache sharing policy here as well */ 1614 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1615 snpcr &= ~GEN6_MBC_SNPCR_MASK; 1616 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 1617 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 1618 1619 return cnt; 1620 } 1621 1622 static const struct file_operations i915_cache_sharing_fops = { 1623 .owner = THIS_MODULE, 1624 .open = i915_cache_sharing_open, 1625 .read = i915_cache_sharing_read, 1626 .write = i915_cache_sharing_write, 1627 .llseek = default_llseek, 1628 }; 1629 1630 /* As the drm_debugfs_init() routines are called before dev->dev_private is 1631 * allocated we need to hook into the minor for release. */ 1632 static int 1633 drm_add_fake_info_node(struct drm_minor *minor, 1634 struct dentry *ent, 1635 const void *key) 1636 { 1637 struct drm_info_node *node; 1638 1639 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); 1640 if (node == NULL) { 1641 debugfs_remove(ent); 1642 return -ENOMEM; 1643 } 1644 1645 node->minor = minor; 1646 node->dent = ent; 1647 node->info_ent = (void *) key; 1648 1649 mutex_lock(&minor->debugfs_lock); 1650 list_add(&node->list, &minor->debugfs_list); 1651 mutex_unlock(&minor->debugfs_lock); 1652 1653 return 0; 1654 } 1655 1656 static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) 1657 { 1658 struct drm_device *dev = minor->dev; 1659 struct dentry *ent; 1660 1661 ent = debugfs_create_file("i915_wedged", 1662 S_IRUGO | S_IWUSR, 1663 root, dev, 1664 &i915_wedged_fops); 1665 if (IS_ERR(ent)) 1666 return PTR_ERR(ent); 1667 1668 return drm_add_fake_info_node(minor, ent, &i915_wedged_fops); 1669 } 1670 1671 static int i915_forcewake_open(struct inode *inode, struct file *file) 1672 { 1673 struct drm_device *dev = inode->i_private; 1674 struct drm_i915_private *dev_priv = dev->dev_private; 1675 int ret; 1676 1677 if (INTEL_INFO(dev)->gen < 6) 1678 return 0; 1679 1680 ret = mutex_lock_interruptible(&dev->struct_mutex); 1681 if (ret) 1682 return ret; 1683 gen6_gt_force_wake_get(dev_priv); 1684 mutex_unlock(&dev->struct_mutex); 1685 1686 return 0; 1687 } 1688 1689 int i915_forcewake_release(struct inode *inode, struct file *file) 1690 { 1691 struct drm_device *dev = inode->i_private; 1692 struct drm_i915_private *dev_priv = dev->dev_private; 1693 1694 if (INTEL_INFO(dev)->gen < 6) 1695 return 0; 1696 1697 /* 1698 * It's bad that we can potentially hang userspace if struct_mutex gets 1699 * forever stuck. However, if we cannot acquire this lock it means that 1700 * almost certainly the driver has hung, is not unload-able. Therefore 1701 * hanging here is probably a minor inconvenience not to be seen my 1702 * almost every user. 1703 */ 1704 mutex_lock(&dev->struct_mutex); 1705 gen6_gt_force_wake_put(dev_priv); 1706 mutex_unlock(&dev->struct_mutex); 1707 1708 return 0; 1709 } 1710 1711 static const struct file_operations i915_forcewake_fops = { 1712 .owner = THIS_MODULE, 1713 .open = i915_forcewake_open, 1714 .release = i915_forcewake_release, 1715 }; 1716 1717 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) 1718 { 1719 struct drm_device *dev = minor->dev; 1720 struct dentry *ent; 1721 1722 ent = debugfs_create_file("i915_forcewake_user", 1723 S_IRUSR, 1724 root, dev, 1725 &i915_forcewake_fops); 1726 if (IS_ERR(ent)) 1727 return PTR_ERR(ent); 1728 1729 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 1730 } 1731 1732 static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor) 1733 { 1734 struct drm_device *dev = minor->dev; 1735 struct dentry *ent; 1736 1737 ent = debugfs_create_file("i915_max_freq", 1738 S_IRUGO | S_IWUSR, 1739 root, dev, 1740 &i915_max_freq_fops); 1741 if (IS_ERR(ent)) 1742 return PTR_ERR(ent); 1743 1744 return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops); 1745 } 1746 1747 static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor) 1748 { 1749 struct drm_device *dev = minor->dev; 1750 struct dentry *ent; 1751 1752 ent = debugfs_create_file("i915_cache_sharing", 1753 S_IRUGO | S_IWUSR, 1754 root, dev, 1755 &i915_cache_sharing_fops); 1756 if (IS_ERR(ent)) 1757 return PTR_ERR(ent); 1758 1759 return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops); 1760 } 1761 1762 static struct drm_info_list i915_debugfs_list[] = { 1763 {"i915_capabilities", i915_capabilities, 0}, 1764 {"i915_gem_objects", i915_gem_object_info, 0}, 1765 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 1766 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 1767 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 1768 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 1769 {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST}, 1770 {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST}, 1771 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 1772 {"i915_gem_request", i915_gem_request_info, 0}, 1773 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 1774 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 1775 {"i915_gem_interrupt", i915_interrupt_info, 0}, 1776 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 1777 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 1778 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 1779 {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS}, 1780 {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS}, 1781 {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS}, 1782 {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS}, 1783 {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS}, 1784 {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS}, 1785 {"i915_batchbuffers", i915_batchbuffer_info, 0}, 1786 {"i915_error_state", i915_error_state, 0}, 1787 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 1788 {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, 1789 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 1790 {"i915_inttoext_table", i915_inttoext_table, 0}, 1791 {"i915_drpc_info", i915_drpc_info, 0}, 1792 {"i915_emon_status", i915_emon_status, 0}, 1793 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 1794 {"i915_gfxec", i915_gfxec, 0}, 1795 {"i915_fbc_status", i915_fbc_status, 0}, 1796 {"i915_sr_status", i915_sr_status, 0}, 1797 {"i915_opregion", i915_opregion, 0}, 1798 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 1799 {"i915_context_status", i915_context_status, 0}, 1800 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 1801 }; 1802 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 1803 1804 int i915_debugfs_init(struct drm_minor *minor) 1805 { 1806 int ret; 1807 1808 ret = i915_wedged_create(minor->debugfs_root, minor); 1809 if (ret) 1810 return ret; 1811 1812 ret = i915_forcewake_create(minor->debugfs_root, minor); 1813 if (ret) 1814 return ret; 1815 ret = i915_max_freq_create(minor->debugfs_root, minor); 1816 if (ret) 1817 return ret; 1818 ret = i915_cache_sharing_create(minor->debugfs_root, minor); 1819 if (ret) 1820 return ret; 1821 1822 return drm_debugfs_create_files(i915_debugfs_list, 1823 I915_DEBUGFS_ENTRIES, 1824 minor->debugfs_root, minor); 1825 } 1826 1827 void i915_debugfs_cleanup(struct drm_minor *minor) 1828 { 1829 drm_debugfs_remove_files(i915_debugfs_list, 1830 I915_DEBUGFS_ENTRIES, minor); 1831 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 1832 1, minor); 1833 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, 1834 1, minor); 1835 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops, 1836 1, minor); 1837 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, 1838 1, minor); 1839 } 1840 1841 #endif /* CONFIG_DEBUG_FS */ 1842