1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/seq_file.h> 30 #include <linux/debugfs.h> 31 #include <linux/slab.h> 32 #include <linux/export.h> 33 #include "drmP.h" 34 #include "drm.h" 35 #include "intel_drv.h" 36 #include "intel_ringbuffer.h" 37 #include "i915_drm.h" 38 #include "i915_drv.h" 39 40 #define DRM_I915_RING_DEBUG 1 41 42 43 #if defined(CONFIG_DEBUG_FS) 44 45 enum { 46 ACTIVE_LIST, 47 FLUSHING_LIST, 48 INACTIVE_LIST, 49 PINNED_LIST, 50 DEFERRED_FREE_LIST, 51 }; 52 53 static const char *yesno(int v) 54 { 55 return v ? "yes" : "no"; 56 } 57 58 static int i915_capabilities(struct seq_file *m, void *data) 59 { 60 struct drm_info_node *node = (struct drm_info_node *) m->private; 61 struct drm_device *dev = node->minor->dev; 62 const struct intel_device_info *info = INTEL_INFO(dev); 63 64 seq_printf(m, "gen: %d\n", info->gen); 65 #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 66 B(is_mobile); 67 B(is_i85x); 68 B(is_i915g); 69 B(is_i945gm); 70 B(is_g33); 71 B(need_gfx_hws); 72 B(is_g4x); 73 B(is_pineview); 74 B(is_broadwater); 75 B(is_crestline); 76 B(has_fbc); 77 B(has_pipe_cxsr); 78 B(has_hotplug); 79 B(cursor_needs_physical); 80 B(has_overlay); 81 B(overlay_needs_physical); 82 B(supports_tv); 83 B(has_bsd_ring); 84 B(has_blt_ring); 85 #undef B 86 87 return 0; 88 } 89 90 static const char *get_pin_flag(struct drm_i915_gem_object *obj) 91 { 92 if (obj->user_pin_count > 0) 93 return "P"; 94 else if (obj->pin_count > 0) 95 return "p"; 96 else 97 return " "; 98 } 99 100 static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 101 { 102 switch (obj->tiling_mode) { 103 default: 104 case I915_TILING_NONE: return " "; 105 case I915_TILING_X: return "X"; 106 case I915_TILING_Y: return "Y"; 107 } 108 } 109 110 static const char *cache_level_str(int type) 111 { 112 switch (type) { 113 case I915_CACHE_NONE: return " uncached"; 114 case I915_CACHE_LLC: return " snooped (LLC)"; 115 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)"; 116 default: return ""; 117 } 118 } 119 120 static void 121 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 122 { 123 seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s", 124 &obj->base, 125 get_pin_flag(obj), 126 get_tiling_flag(obj), 127 obj->base.size, 128 obj->base.read_domains, 129 obj->base.write_domain, 130 obj->last_rendering_seqno, 131 obj->last_fenced_seqno, 132 cache_level_str(obj->cache_level), 133 obj->dirty ? " dirty" : "", 134 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 135 if (obj->base.name) 136 seq_printf(m, " (name: %d)", obj->base.name); 137 if (obj->fence_reg != I915_FENCE_REG_NONE) 138 seq_printf(m, " (fence: %d)", obj->fence_reg); 139 if (obj->gtt_space != NULL) 140 seq_printf(m, " (gtt offset: %08x, size: %08x)", 141 obj->gtt_offset, (unsigned int)obj->gtt_space->size); 142 if (obj->pin_mappable || obj->fault_mappable) { 143 char s[3], *t = s; 144 if (obj->pin_mappable) 145 *t++ = 'p'; 146 if (obj->fault_mappable) 147 *t++ = 'f'; 148 *t = '\0'; 149 seq_printf(m, " (%s mappable)", s); 150 } 151 if (obj->ring != NULL) 152 seq_printf(m, " (%s)", obj->ring->name); 153 } 154 155 static int i915_gem_object_list_info(struct seq_file *m, void *data) 156 { 157 struct drm_info_node *node = (struct drm_info_node *) m->private; 158 uintptr_t list = (uintptr_t) node->info_ent->data; 159 struct list_head *head; 160 struct drm_device *dev = node->minor->dev; 161 drm_i915_private_t *dev_priv = dev->dev_private; 162 struct drm_i915_gem_object *obj; 163 size_t total_obj_size, total_gtt_size; 164 int count, ret; 165 166 ret = mutex_lock_interruptible(&dev->struct_mutex); 167 if (ret) 168 return ret; 169 170 switch (list) { 171 case ACTIVE_LIST: 172 seq_printf(m, "Active:\n"); 173 head = &dev_priv->mm.active_list; 174 break; 175 case INACTIVE_LIST: 176 seq_printf(m, "Inactive:\n"); 177 head = &dev_priv->mm.inactive_list; 178 break; 179 case PINNED_LIST: 180 seq_printf(m, "Pinned:\n"); 181 head = &dev_priv->mm.pinned_list; 182 break; 183 case FLUSHING_LIST: 184 seq_printf(m, "Flushing:\n"); 185 head = &dev_priv->mm.flushing_list; 186 break; 187 case DEFERRED_FREE_LIST: 188 seq_printf(m, "Deferred free:\n"); 189 head = &dev_priv->mm.deferred_free_list; 190 break; 191 default: 192 mutex_unlock(&dev->struct_mutex); 193 return -EINVAL; 194 } 195 196 total_obj_size = total_gtt_size = count = 0; 197 list_for_each_entry(obj, head, mm_list) { 198 seq_printf(m, " "); 199 describe_obj(m, obj); 200 seq_printf(m, "\n"); 201 total_obj_size += obj->base.size; 202 total_gtt_size += obj->gtt_space->size; 203 count++; 204 } 205 mutex_unlock(&dev->struct_mutex); 206 207 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 208 count, total_obj_size, total_gtt_size); 209 return 0; 210 } 211 212 #define count_objects(list, member) do { \ 213 list_for_each_entry(obj, list, member) { \ 214 size += obj->gtt_space->size; \ 215 ++count; \ 216 if (obj->map_and_fenceable) { \ 217 mappable_size += obj->gtt_space->size; \ 218 ++mappable_count; \ 219 } \ 220 } \ 221 } while (0) 222 223 static int i915_gem_object_info(struct seq_file *m, void* data) 224 { 225 struct drm_info_node *node = (struct drm_info_node *) m->private; 226 struct drm_device *dev = node->minor->dev; 227 struct drm_i915_private *dev_priv = dev->dev_private; 228 u32 count, mappable_count; 229 size_t size, mappable_size; 230 struct drm_i915_gem_object *obj; 231 int ret; 232 233 ret = mutex_lock_interruptible(&dev->struct_mutex); 234 if (ret) 235 return ret; 236 237 seq_printf(m, "%u objects, %zu bytes\n", 238 dev_priv->mm.object_count, 239 dev_priv->mm.object_memory); 240 241 size = count = mappable_size = mappable_count = 0; 242 count_objects(&dev_priv->mm.gtt_list, gtt_list); 243 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 244 count, mappable_count, size, mappable_size); 245 246 size = count = mappable_size = mappable_count = 0; 247 count_objects(&dev_priv->mm.active_list, mm_list); 248 count_objects(&dev_priv->mm.flushing_list, mm_list); 249 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 250 count, mappable_count, size, mappable_size); 251 252 size = count = mappable_size = mappable_count = 0; 253 count_objects(&dev_priv->mm.pinned_list, mm_list); 254 seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n", 255 count, mappable_count, size, mappable_size); 256 257 size = count = mappable_size = mappable_count = 0; 258 count_objects(&dev_priv->mm.inactive_list, mm_list); 259 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 260 count, mappable_count, size, mappable_size); 261 262 size = count = mappable_size = mappable_count = 0; 263 count_objects(&dev_priv->mm.deferred_free_list, mm_list); 264 seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n", 265 count, mappable_count, size, mappable_size); 266 267 size = count = mappable_size = mappable_count = 0; 268 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 269 if (obj->fault_mappable) { 270 size += obj->gtt_space->size; 271 ++count; 272 } 273 if (obj->pin_mappable) { 274 mappable_size += obj->gtt_space->size; 275 ++mappable_count; 276 } 277 } 278 seq_printf(m, "%u pinned mappable objects, %zu bytes\n", 279 mappable_count, mappable_size); 280 seq_printf(m, "%u fault mappable objects, %zu bytes\n", 281 count, size); 282 283 seq_printf(m, "%zu [%zu] gtt total\n", 284 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); 285 286 mutex_unlock(&dev->struct_mutex); 287 288 return 0; 289 } 290 291 static int i915_gem_gtt_info(struct seq_file *m, void* data) 292 { 293 struct drm_info_node *node = (struct drm_info_node *) m->private; 294 struct drm_device *dev = node->minor->dev; 295 struct drm_i915_private *dev_priv = dev->dev_private; 296 struct drm_i915_gem_object *obj; 297 size_t total_obj_size, total_gtt_size; 298 int count, ret; 299 300 ret = mutex_lock_interruptible(&dev->struct_mutex); 301 if (ret) 302 return ret; 303 304 total_obj_size = total_gtt_size = count = 0; 305 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 306 seq_printf(m, " "); 307 describe_obj(m, obj); 308 seq_printf(m, "\n"); 309 total_obj_size += obj->base.size; 310 total_gtt_size += obj->gtt_space->size; 311 count++; 312 } 313 314 mutex_unlock(&dev->struct_mutex); 315 316 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 317 count, total_obj_size, total_gtt_size); 318 319 return 0; 320 } 321 322 323 static int i915_gem_pageflip_info(struct seq_file *m, void *data) 324 { 325 struct drm_info_node *node = (struct drm_info_node *) m->private; 326 struct drm_device *dev = node->minor->dev; 327 unsigned long flags; 328 struct intel_crtc *crtc; 329 330 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 331 const char pipe = pipe_name(crtc->pipe); 332 const char plane = plane_name(crtc->plane); 333 struct intel_unpin_work *work; 334 335 spin_lock_irqsave(&dev->event_lock, flags); 336 work = crtc->unpin_work; 337 if (work == NULL) { 338 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 339 pipe, plane); 340 } else { 341 if (!work->pending) { 342 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 343 pipe, plane); 344 } else { 345 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 346 pipe, plane); 347 } 348 if (work->enable_stall_check) 349 seq_printf(m, "Stall check enabled, "); 350 else 351 seq_printf(m, "Stall check waiting for page flip ioctl, "); 352 seq_printf(m, "%d prepares\n", work->pending); 353 354 if (work->old_fb_obj) { 355 struct drm_i915_gem_object *obj = work->old_fb_obj; 356 if (obj) 357 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 358 } 359 if (work->pending_flip_obj) { 360 struct drm_i915_gem_object *obj = work->pending_flip_obj; 361 if (obj) 362 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 363 } 364 } 365 spin_unlock_irqrestore(&dev->event_lock, flags); 366 } 367 368 return 0; 369 } 370 371 static int i915_gem_request_info(struct seq_file *m, void *data) 372 { 373 struct drm_info_node *node = (struct drm_info_node *) m->private; 374 struct drm_device *dev = node->minor->dev; 375 drm_i915_private_t *dev_priv = dev->dev_private; 376 struct drm_i915_gem_request *gem_request; 377 int ret, count; 378 379 ret = mutex_lock_interruptible(&dev->struct_mutex); 380 if (ret) 381 return ret; 382 383 count = 0; 384 if (!list_empty(&dev_priv->ring[RCS].request_list)) { 385 seq_printf(m, "Render requests:\n"); 386 list_for_each_entry(gem_request, 387 &dev_priv->ring[RCS].request_list, 388 list) { 389 seq_printf(m, " %d @ %d\n", 390 gem_request->seqno, 391 (int) (jiffies - gem_request->emitted_jiffies)); 392 } 393 count++; 394 } 395 if (!list_empty(&dev_priv->ring[VCS].request_list)) { 396 seq_printf(m, "BSD requests:\n"); 397 list_for_each_entry(gem_request, 398 &dev_priv->ring[VCS].request_list, 399 list) { 400 seq_printf(m, " %d @ %d\n", 401 gem_request->seqno, 402 (int) (jiffies - gem_request->emitted_jiffies)); 403 } 404 count++; 405 } 406 if (!list_empty(&dev_priv->ring[BCS].request_list)) { 407 seq_printf(m, "BLT requests:\n"); 408 list_for_each_entry(gem_request, 409 &dev_priv->ring[BCS].request_list, 410 list) { 411 seq_printf(m, " %d @ %d\n", 412 gem_request->seqno, 413 (int) (jiffies - gem_request->emitted_jiffies)); 414 } 415 count++; 416 } 417 mutex_unlock(&dev->struct_mutex); 418 419 if (count == 0) 420 seq_printf(m, "No requests\n"); 421 422 return 0; 423 } 424 425 static void i915_ring_seqno_info(struct seq_file *m, 426 struct intel_ring_buffer *ring) 427 { 428 if (ring->get_seqno) { 429 seq_printf(m, "Current sequence (%s): %d\n", 430 ring->name, ring->get_seqno(ring)); 431 seq_printf(m, "Waiter sequence (%s): %d\n", 432 ring->name, ring->waiting_seqno); 433 seq_printf(m, "IRQ sequence (%s): %d\n", 434 ring->name, ring->irq_seqno); 435 } 436 } 437 438 static int i915_gem_seqno_info(struct seq_file *m, void *data) 439 { 440 struct drm_info_node *node = (struct drm_info_node *) m->private; 441 struct drm_device *dev = node->minor->dev; 442 drm_i915_private_t *dev_priv = dev->dev_private; 443 int ret, i; 444 445 ret = mutex_lock_interruptible(&dev->struct_mutex); 446 if (ret) 447 return ret; 448 449 for (i = 0; i < I915_NUM_RINGS; i++) 450 i915_ring_seqno_info(m, &dev_priv->ring[i]); 451 452 mutex_unlock(&dev->struct_mutex); 453 454 return 0; 455 } 456 457 458 static int i915_interrupt_info(struct seq_file *m, void *data) 459 { 460 struct drm_info_node *node = (struct drm_info_node *) m->private; 461 struct drm_device *dev = node->minor->dev; 462 drm_i915_private_t *dev_priv = dev->dev_private; 463 int ret, i, pipe; 464 465 ret = mutex_lock_interruptible(&dev->struct_mutex); 466 if (ret) 467 return ret; 468 469 if (!HAS_PCH_SPLIT(dev)) { 470 seq_printf(m, "Interrupt enable: %08x\n", 471 I915_READ(IER)); 472 seq_printf(m, "Interrupt identity: %08x\n", 473 I915_READ(IIR)); 474 seq_printf(m, "Interrupt mask: %08x\n", 475 I915_READ(IMR)); 476 for_each_pipe(pipe) 477 seq_printf(m, "Pipe %c stat: %08x\n", 478 pipe_name(pipe), 479 I915_READ(PIPESTAT(pipe))); 480 } else { 481 seq_printf(m, "North Display Interrupt enable: %08x\n", 482 I915_READ(DEIER)); 483 seq_printf(m, "North Display Interrupt identity: %08x\n", 484 I915_READ(DEIIR)); 485 seq_printf(m, "North Display Interrupt mask: %08x\n", 486 I915_READ(DEIMR)); 487 seq_printf(m, "South Display Interrupt enable: %08x\n", 488 I915_READ(SDEIER)); 489 seq_printf(m, "South Display Interrupt identity: %08x\n", 490 I915_READ(SDEIIR)); 491 seq_printf(m, "South Display Interrupt mask: %08x\n", 492 I915_READ(SDEIMR)); 493 seq_printf(m, "Graphics Interrupt enable: %08x\n", 494 I915_READ(GTIER)); 495 seq_printf(m, "Graphics Interrupt identity: %08x\n", 496 I915_READ(GTIIR)); 497 seq_printf(m, "Graphics Interrupt mask: %08x\n", 498 I915_READ(GTIMR)); 499 } 500 seq_printf(m, "Interrupts received: %d\n", 501 atomic_read(&dev_priv->irq_received)); 502 for (i = 0; i < I915_NUM_RINGS; i++) { 503 if (IS_GEN6(dev) || IS_GEN7(dev)) { 504 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", 505 dev_priv->ring[i].name, 506 I915_READ_IMR(&dev_priv->ring[i])); 507 } 508 i915_ring_seqno_info(m, &dev_priv->ring[i]); 509 } 510 mutex_unlock(&dev->struct_mutex); 511 512 return 0; 513 } 514 515 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 516 { 517 struct drm_info_node *node = (struct drm_info_node *) m->private; 518 struct drm_device *dev = node->minor->dev; 519 drm_i915_private_t *dev_priv = dev->dev_private; 520 int i, ret; 521 522 ret = mutex_lock_interruptible(&dev->struct_mutex); 523 if (ret) 524 return ret; 525 526 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 527 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 528 for (i = 0; i < dev_priv->num_fence_regs; i++) { 529 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 530 531 seq_printf(m, "Fenced object[%2d] = ", i); 532 if (obj == NULL) 533 seq_printf(m, "unused"); 534 else 535 describe_obj(m, obj); 536 seq_printf(m, "\n"); 537 } 538 539 mutex_unlock(&dev->struct_mutex); 540 return 0; 541 } 542 543 static int i915_hws_info(struct seq_file *m, void *data) 544 { 545 struct drm_info_node *node = (struct drm_info_node *) m->private; 546 struct drm_device *dev = node->minor->dev; 547 drm_i915_private_t *dev_priv = dev->dev_private; 548 struct intel_ring_buffer *ring; 549 const volatile u32 __iomem *hws; 550 int i; 551 552 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 553 hws = (volatile u32 __iomem *)ring->status_page.page_addr; 554 if (hws == NULL) 555 return 0; 556 557 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 558 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 559 i * 4, 560 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 561 } 562 return 0; 563 } 564 565 static void i915_dump_object(struct seq_file *m, 566 struct io_mapping *mapping, 567 struct drm_i915_gem_object *obj) 568 { 569 int page, page_count, i; 570 571 page_count = obj->base.size / PAGE_SIZE; 572 for (page = 0; page < page_count; page++) { 573 u32 *mem = io_mapping_map_wc(mapping, 574 obj->gtt_offset + page * PAGE_SIZE); 575 for (i = 0; i < PAGE_SIZE; i += 4) 576 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); 577 io_mapping_unmap(mem); 578 } 579 } 580 581 static int i915_batchbuffer_info(struct seq_file *m, void *data) 582 { 583 struct drm_info_node *node = (struct drm_info_node *) m->private; 584 struct drm_device *dev = node->minor->dev; 585 drm_i915_private_t *dev_priv = dev->dev_private; 586 struct drm_i915_gem_object *obj; 587 int ret; 588 589 ret = mutex_lock_interruptible(&dev->struct_mutex); 590 if (ret) 591 return ret; 592 593 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 594 if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) { 595 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); 596 i915_dump_object(m, dev_priv->mm.gtt_mapping, obj); 597 } 598 } 599 600 mutex_unlock(&dev->struct_mutex); 601 return 0; 602 } 603 604 static int i915_ringbuffer_data(struct seq_file *m, void *data) 605 { 606 struct drm_info_node *node = (struct drm_info_node *) m->private; 607 struct drm_device *dev = node->minor->dev; 608 drm_i915_private_t *dev_priv = dev->dev_private; 609 struct intel_ring_buffer *ring; 610 int ret; 611 612 ret = mutex_lock_interruptible(&dev->struct_mutex); 613 if (ret) 614 return ret; 615 616 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 617 if (!ring->obj) { 618 seq_printf(m, "No ringbuffer setup\n"); 619 } else { 620 const u8 __iomem *virt = ring->virtual_start; 621 uint32_t off; 622 623 for (off = 0; off < ring->size; off += 4) { 624 uint32_t *ptr = (uint32_t *)(virt + off); 625 seq_printf(m, "%08x : %08x\n", off, *ptr); 626 } 627 } 628 mutex_unlock(&dev->struct_mutex); 629 630 return 0; 631 } 632 633 static int i915_ringbuffer_info(struct seq_file *m, void *data) 634 { 635 struct drm_info_node *node = (struct drm_info_node *) m->private; 636 struct drm_device *dev = node->minor->dev; 637 drm_i915_private_t *dev_priv = dev->dev_private; 638 struct intel_ring_buffer *ring; 639 640 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 641 if (ring->size == 0) 642 return 0; 643 644 seq_printf(m, "Ring %s:\n", ring->name); 645 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); 646 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); 647 seq_printf(m, " Size : %08x\n", ring->size); 648 seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring)); 649 seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring)); 650 if (IS_GEN6(dev)) { 651 seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring)); 652 seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring)); 653 } 654 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); 655 seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); 656 657 return 0; 658 } 659 660 static const char *ring_str(int ring) 661 { 662 switch (ring) { 663 case RING_RENDER: return " render"; 664 case RING_BSD: return " bsd"; 665 case RING_BLT: return " blt"; 666 default: return ""; 667 } 668 } 669 670 static const char *pin_flag(int pinned) 671 { 672 if (pinned > 0) 673 return " P"; 674 else if (pinned < 0) 675 return " p"; 676 else 677 return ""; 678 } 679 680 static const char *tiling_flag(int tiling) 681 { 682 switch (tiling) { 683 default: 684 case I915_TILING_NONE: return ""; 685 case I915_TILING_X: return " X"; 686 case I915_TILING_Y: return " Y"; 687 } 688 } 689 690 static const char *dirty_flag(int dirty) 691 { 692 return dirty ? " dirty" : ""; 693 } 694 695 static const char *purgeable_flag(int purgeable) 696 { 697 return purgeable ? " purgeable" : ""; 698 } 699 700 static void print_error_buffers(struct seq_file *m, 701 const char *name, 702 struct drm_i915_error_buffer *err, 703 int count) 704 { 705 seq_printf(m, "%s [%d]:\n", name, count); 706 707 while (count--) { 708 seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s", 709 err->gtt_offset, 710 err->size, 711 err->read_domains, 712 err->write_domain, 713 err->seqno, 714 pin_flag(err->pinned), 715 tiling_flag(err->tiling), 716 dirty_flag(err->dirty), 717 purgeable_flag(err->purgeable), 718 ring_str(err->ring), 719 cache_level_str(err->cache_level)); 720 721 if (err->name) 722 seq_printf(m, " (name: %d)", err->name); 723 if (err->fence_reg != I915_FENCE_REG_NONE) 724 seq_printf(m, " (fence: %d)", err->fence_reg); 725 726 seq_printf(m, "\n"); 727 err++; 728 } 729 } 730 731 static int i915_error_state(struct seq_file *m, void *unused) 732 { 733 struct drm_info_node *node = (struct drm_info_node *) m->private; 734 struct drm_device *dev = node->minor->dev; 735 drm_i915_private_t *dev_priv = dev->dev_private; 736 struct drm_i915_error_state *error; 737 unsigned long flags; 738 int i, page, offset, elt; 739 740 spin_lock_irqsave(&dev_priv->error_lock, flags); 741 if (!dev_priv->first_error) { 742 seq_printf(m, "no error state collected\n"); 743 goto out; 744 } 745 746 error = dev_priv->first_error; 747 748 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 749 error->time.tv_usec); 750 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 751 seq_printf(m, "EIR: 0x%08x\n", error->eir); 752 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 753 if (INTEL_INFO(dev)->gen >= 6) { 754 seq_printf(m, "ERROR: 0x%08x\n", error->error); 755 seq_printf(m, "Blitter command stream:\n"); 756 seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd); 757 seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir); 758 seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr); 759 seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone); 760 seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno); 761 seq_printf(m, "Video (BSD) command stream:\n"); 762 seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd); 763 seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir); 764 seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr); 765 seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone); 766 seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno); 767 } 768 seq_printf(m, "Render command stream:\n"); 769 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); 770 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir); 771 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); 772 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); 773 if (INTEL_INFO(dev)->gen >= 4) { 774 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); 775 seq_printf(m, " INSTPS: 0x%08x\n", error->instps); 776 } 777 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); 778 seq_printf(m, " seqno: 0x%08x\n", error->seqno); 779 780 for (i = 0; i < dev_priv->num_fence_regs; i++) 781 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); 782 783 if (error->active_bo) 784 print_error_buffers(m, "Active", 785 error->active_bo, 786 error->active_bo_count); 787 788 if (error->pinned_bo) 789 print_error_buffers(m, "Pinned", 790 error->pinned_bo, 791 error->pinned_bo_count); 792 793 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) { 794 if (error->batchbuffer[i]) { 795 struct drm_i915_error_object *obj = error->batchbuffer[i]; 796 797 seq_printf(m, "%s --- gtt_offset = 0x%08x\n", 798 dev_priv->ring[i].name, 799 obj->gtt_offset); 800 offset = 0; 801 for (page = 0; page < obj->page_count; page++) { 802 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 803 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); 804 offset += 4; 805 } 806 } 807 } 808 } 809 810 for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) { 811 if (error->ringbuffer[i]) { 812 struct drm_i915_error_object *obj = error->ringbuffer[i]; 813 seq_printf(m, "%s --- ringbuffer = 0x%08x\n", 814 dev_priv->ring[i].name, 815 obj->gtt_offset); 816 offset = 0; 817 for (page = 0; page < obj->page_count; page++) { 818 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 819 seq_printf(m, "%08x : %08x\n", 820 offset, 821 obj->pages[page][elt]); 822 offset += 4; 823 } 824 } 825 } 826 } 827 828 if (error->overlay) 829 intel_overlay_print_error_state(m, error->overlay); 830 831 if (error->display) 832 intel_display_print_error_state(m, dev, error->display); 833 834 out: 835 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 836 837 return 0; 838 } 839 840 static int i915_rstdby_delays(struct seq_file *m, void *unused) 841 { 842 struct drm_info_node *node = (struct drm_info_node *) m->private; 843 struct drm_device *dev = node->minor->dev; 844 drm_i915_private_t *dev_priv = dev->dev_private; 845 u16 crstanddelay = I915_READ16(CRSTANDVID); 846 847 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 848 849 return 0; 850 } 851 852 static int i915_cur_delayinfo(struct seq_file *m, void *unused) 853 { 854 struct drm_info_node *node = (struct drm_info_node *) m->private; 855 struct drm_device *dev = node->minor->dev; 856 drm_i915_private_t *dev_priv = dev->dev_private; 857 int ret; 858 859 if (IS_GEN5(dev)) { 860 u16 rgvswctl = I915_READ16(MEMSWCTL); 861 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 862 863 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 864 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 865 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 866 MEMSTAT_VID_SHIFT); 867 seq_printf(m, "Current P-state: %d\n", 868 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 869 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 870 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 871 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 872 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 873 u32 rpstat; 874 u32 rpupei, rpcurup, rpprevup; 875 u32 rpdownei, rpcurdown, rpprevdown; 876 int max_freq; 877 878 /* RPSTAT1 is in the GT power well */ 879 ret = mutex_lock_interruptible(&dev->struct_mutex); 880 if (ret) 881 return ret; 882 883 gen6_gt_force_wake_get(dev_priv); 884 885 rpstat = I915_READ(GEN6_RPSTAT1); 886 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 887 rpcurup = I915_READ(GEN6_RP_CUR_UP); 888 rpprevup = I915_READ(GEN6_RP_PREV_UP); 889 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 890 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 891 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 892 893 gen6_gt_force_wake_put(dev_priv); 894 mutex_unlock(&dev->struct_mutex); 895 896 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 897 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 898 seq_printf(m, "Render p-state ratio: %d\n", 899 (gt_perf_status & 0xff00) >> 8); 900 seq_printf(m, "Render p-state VID: %d\n", 901 gt_perf_status & 0xff); 902 seq_printf(m, "Render p-state limit: %d\n", 903 rp_state_limits & 0xff); 904 seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> 905 GEN6_CAGF_SHIFT) * 50); 906 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 907 GEN6_CURICONT_MASK); 908 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 909 GEN6_CURBSYTAVG_MASK); 910 seq_printf(m, "RP PREV UP: %dus\n", rpprevup & 911 GEN6_CURBSYTAVG_MASK); 912 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 913 GEN6_CURIAVG_MASK); 914 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 915 GEN6_CURBSYTAVG_MASK); 916 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 917 GEN6_CURBSYTAVG_MASK); 918 919 max_freq = (rp_state_cap & 0xff0000) >> 16; 920 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 921 max_freq * 50); 922 923 max_freq = (rp_state_cap & 0xff00) >> 8; 924 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 925 max_freq * 50); 926 927 max_freq = rp_state_cap & 0xff; 928 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 929 max_freq * 50); 930 } else { 931 seq_printf(m, "no P-state info available\n"); 932 } 933 934 return 0; 935 } 936 937 static int i915_delayfreq_table(struct seq_file *m, void *unused) 938 { 939 struct drm_info_node *node = (struct drm_info_node *) m->private; 940 struct drm_device *dev = node->minor->dev; 941 drm_i915_private_t *dev_priv = dev->dev_private; 942 u32 delayfreq; 943 int i; 944 945 for (i = 0; i < 16; i++) { 946 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 947 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, 948 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 949 } 950 951 return 0; 952 } 953 954 static inline int MAP_TO_MV(int map) 955 { 956 return 1250 - (map * 25); 957 } 958 959 static int i915_inttoext_table(struct seq_file *m, void *unused) 960 { 961 struct drm_info_node *node = (struct drm_info_node *) m->private; 962 struct drm_device *dev = node->minor->dev; 963 drm_i915_private_t *dev_priv = dev->dev_private; 964 u32 inttoext; 965 int i; 966 967 for (i = 1; i <= 32; i++) { 968 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 969 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 970 } 971 972 return 0; 973 } 974 975 static int i915_drpc_info(struct seq_file *m, void *unused) 976 { 977 struct drm_info_node *node = (struct drm_info_node *) m->private; 978 struct drm_device *dev = node->minor->dev; 979 drm_i915_private_t *dev_priv = dev->dev_private; 980 u32 rgvmodectl = I915_READ(MEMMODECTL); 981 u32 rstdbyctl = I915_READ(RSTDBYCTL); 982 u16 crstandvid = I915_READ16(CRSTANDVID); 983 984 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 985 "yes" : "no"); 986 seq_printf(m, "Boost freq: %d\n", 987 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 988 MEMMODE_BOOST_FREQ_SHIFT); 989 seq_printf(m, "HW control enabled: %s\n", 990 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 991 seq_printf(m, "SW control enabled: %s\n", 992 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 993 seq_printf(m, "Gated voltage change: %s\n", 994 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 995 seq_printf(m, "Starting frequency: P%d\n", 996 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 997 seq_printf(m, "Max P-state: P%d\n", 998 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 999 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1000 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1001 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1002 seq_printf(m, "Render standby enabled: %s\n", 1003 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 1004 seq_printf(m, "Current RS state: "); 1005 switch (rstdbyctl & RSX_STATUS_MASK) { 1006 case RSX_STATUS_ON: 1007 seq_printf(m, "on\n"); 1008 break; 1009 case RSX_STATUS_RC1: 1010 seq_printf(m, "RC1\n"); 1011 break; 1012 case RSX_STATUS_RC1E: 1013 seq_printf(m, "RC1E\n"); 1014 break; 1015 case RSX_STATUS_RS1: 1016 seq_printf(m, "RS1\n"); 1017 break; 1018 case RSX_STATUS_RS2: 1019 seq_printf(m, "RS2 (RC6)\n"); 1020 break; 1021 case RSX_STATUS_RS3: 1022 seq_printf(m, "RC3 (RC6+)\n"); 1023 break; 1024 default: 1025 seq_printf(m, "unknown\n"); 1026 break; 1027 } 1028 1029 return 0; 1030 } 1031 1032 static int i915_fbc_status(struct seq_file *m, void *unused) 1033 { 1034 struct drm_info_node *node = (struct drm_info_node *) m->private; 1035 struct drm_device *dev = node->minor->dev; 1036 drm_i915_private_t *dev_priv = dev->dev_private; 1037 1038 if (!I915_HAS_FBC(dev)) { 1039 seq_printf(m, "FBC unsupported on this chipset\n"); 1040 return 0; 1041 } 1042 1043 if (intel_fbc_enabled(dev)) { 1044 seq_printf(m, "FBC enabled\n"); 1045 } else { 1046 seq_printf(m, "FBC disabled: "); 1047 switch (dev_priv->no_fbc_reason) { 1048 case FBC_NO_OUTPUT: 1049 seq_printf(m, "no outputs"); 1050 break; 1051 case FBC_STOLEN_TOO_SMALL: 1052 seq_printf(m, "not enough stolen memory"); 1053 break; 1054 case FBC_UNSUPPORTED_MODE: 1055 seq_printf(m, "mode not supported"); 1056 break; 1057 case FBC_MODE_TOO_LARGE: 1058 seq_printf(m, "mode too large"); 1059 break; 1060 case FBC_BAD_PLANE: 1061 seq_printf(m, "FBC unsupported on plane"); 1062 break; 1063 case FBC_NOT_TILED: 1064 seq_printf(m, "scanout buffer not tiled"); 1065 break; 1066 case FBC_MULTIPLE_PIPES: 1067 seq_printf(m, "multiple pipes are enabled"); 1068 break; 1069 case FBC_MODULE_PARAM: 1070 seq_printf(m, "disabled per module param (default off)"); 1071 break; 1072 default: 1073 seq_printf(m, "unknown reason"); 1074 } 1075 seq_printf(m, "\n"); 1076 } 1077 return 0; 1078 } 1079 1080 static int i915_sr_status(struct seq_file *m, void *unused) 1081 { 1082 struct drm_info_node *node = (struct drm_info_node *) m->private; 1083 struct drm_device *dev = node->minor->dev; 1084 drm_i915_private_t *dev_priv = dev->dev_private; 1085 bool sr_enabled = false; 1086 1087 if (HAS_PCH_SPLIT(dev)) 1088 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1089 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1090 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1091 else if (IS_I915GM(dev)) 1092 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1093 else if (IS_PINEVIEW(dev)) 1094 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1095 1096 seq_printf(m, "self-refresh: %s\n", 1097 sr_enabled ? "enabled" : "disabled"); 1098 1099 return 0; 1100 } 1101 1102 static int i915_emon_status(struct seq_file *m, void *unused) 1103 { 1104 struct drm_info_node *node = (struct drm_info_node *) m->private; 1105 struct drm_device *dev = node->minor->dev; 1106 drm_i915_private_t *dev_priv = dev->dev_private; 1107 unsigned long temp, chipset, gfx; 1108 int ret; 1109 1110 ret = mutex_lock_interruptible(&dev->struct_mutex); 1111 if (ret) 1112 return ret; 1113 1114 temp = i915_mch_val(dev_priv); 1115 chipset = i915_chipset_val(dev_priv); 1116 gfx = i915_gfx_val(dev_priv); 1117 mutex_unlock(&dev->struct_mutex); 1118 1119 seq_printf(m, "GMCH temp: %ld\n", temp); 1120 seq_printf(m, "Chipset power: %ld\n", chipset); 1121 seq_printf(m, "GFX power: %ld\n", gfx); 1122 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1123 1124 return 0; 1125 } 1126 1127 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1128 { 1129 struct drm_info_node *node = (struct drm_info_node *) m->private; 1130 struct drm_device *dev = node->minor->dev; 1131 drm_i915_private_t *dev_priv = dev->dev_private; 1132 int ret; 1133 int gpu_freq, ia_freq; 1134 1135 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1136 seq_printf(m, "unsupported on this chipset\n"); 1137 return 0; 1138 } 1139 1140 ret = mutex_lock_interruptible(&dev->struct_mutex); 1141 if (ret) 1142 return ret; 1143 1144 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); 1145 1146 for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay; 1147 gpu_freq++) { 1148 I915_WRITE(GEN6_PCODE_DATA, gpu_freq); 1149 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | 1150 GEN6_PCODE_READ_MIN_FREQ_TABLE); 1151 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & 1152 GEN6_PCODE_READY) == 0, 10)) { 1153 DRM_ERROR("pcode read of freq table timed out\n"); 1154 continue; 1155 } 1156 ia_freq = I915_READ(GEN6_PCODE_DATA); 1157 seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100); 1158 } 1159 1160 mutex_unlock(&dev->struct_mutex); 1161 1162 return 0; 1163 } 1164 1165 static int i915_gfxec(struct seq_file *m, void *unused) 1166 { 1167 struct drm_info_node *node = (struct drm_info_node *) m->private; 1168 struct drm_device *dev = node->minor->dev; 1169 drm_i915_private_t *dev_priv = dev->dev_private; 1170 1171 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1172 1173 return 0; 1174 } 1175 1176 static int i915_opregion(struct seq_file *m, void *unused) 1177 { 1178 struct drm_info_node *node = (struct drm_info_node *) m->private; 1179 struct drm_device *dev = node->minor->dev; 1180 drm_i915_private_t *dev_priv = dev->dev_private; 1181 struct intel_opregion *opregion = &dev_priv->opregion; 1182 int ret; 1183 1184 ret = mutex_lock_interruptible(&dev->struct_mutex); 1185 if (ret) 1186 return ret; 1187 1188 if (opregion->header) 1189 seq_write(m, opregion->header, OPREGION_SIZE); 1190 1191 mutex_unlock(&dev->struct_mutex); 1192 1193 return 0; 1194 } 1195 1196 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1197 { 1198 struct drm_info_node *node = (struct drm_info_node *) m->private; 1199 struct drm_device *dev = node->minor->dev; 1200 drm_i915_private_t *dev_priv = dev->dev_private; 1201 struct intel_fbdev *ifbdev; 1202 struct intel_framebuffer *fb; 1203 int ret; 1204 1205 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1206 if (ret) 1207 return ret; 1208 1209 ifbdev = dev_priv->fbdev; 1210 fb = to_intel_framebuffer(ifbdev->helper.fb); 1211 1212 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ", 1213 fb->base.width, 1214 fb->base.height, 1215 fb->base.depth, 1216 fb->base.bits_per_pixel); 1217 describe_obj(m, fb->obj); 1218 seq_printf(m, "\n"); 1219 1220 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1221 if (&fb->base == ifbdev->helper.fb) 1222 continue; 1223 1224 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ", 1225 fb->base.width, 1226 fb->base.height, 1227 fb->base.depth, 1228 fb->base.bits_per_pixel); 1229 describe_obj(m, fb->obj); 1230 seq_printf(m, "\n"); 1231 } 1232 1233 mutex_unlock(&dev->mode_config.mutex); 1234 1235 return 0; 1236 } 1237 1238 static int i915_context_status(struct seq_file *m, void *unused) 1239 { 1240 struct drm_info_node *node = (struct drm_info_node *) m->private; 1241 struct drm_device *dev = node->minor->dev; 1242 drm_i915_private_t *dev_priv = dev->dev_private; 1243 int ret; 1244 1245 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1246 if (ret) 1247 return ret; 1248 1249 if (dev_priv->pwrctx) { 1250 seq_printf(m, "power context "); 1251 describe_obj(m, dev_priv->pwrctx); 1252 seq_printf(m, "\n"); 1253 } 1254 1255 if (dev_priv->renderctx) { 1256 seq_printf(m, "render context "); 1257 describe_obj(m, dev_priv->renderctx); 1258 seq_printf(m, "\n"); 1259 } 1260 1261 mutex_unlock(&dev->mode_config.mutex); 1262 1263 return 0; 1264 } 1265 1266 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) 1267 { 1268 struct drm_info_node *node = (struct drm_info_node *) m->private; 1269 struct drm_device *dev = node->minor->dev; 1270 struct drm_i915_private *dev_priv = dev->dev_private; 1271 1272 seq_printf(m, "forcewake count = %d\n", 1273 atomic_read(&dev_priv->forcewake_count)); 1274 1275 return 0; 1276 } 1277 1278 static int 1279 i915_wedged_open(struct inode *inode, 1280 struct file *filp) 1281 { 1282 filp->private_data = inode->i_private; 1283 return 0; 1284 } 1285 1286 static ssize_t 1287 i915_wedged_read(struct file *filp, 1288 char __user *ubuf, 1289 size_t max, 1290 loff_t *ppos) 1291 { 1292 struct drm_device *dev = filp->private_data; 1293 drm_i915_private_t *dev_priv = dev->dev_private; 1294 char buf[80]; 1295 int len; 1296 1297 len = snprintf(buf, sizeof(buf), 1298 "wedged : %d\n", 1299 atomic_read(&dev_priv->mm.wedged)); 1300 1301 if (len > sizeof(buf)) 1302 len = sizeof(buf); 1303 1304 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1305 } 1306 1307 static ssize_t 1308 i915_wedged_write(struct file *filp, 1309 const char __user *ubuf, 1310 size_t cnt, 1311 loff_t *ppos) 1312 { 1313 struct drm_device *dev = filp->private_data; 1314 char buf[20]; 1315 int val = 1; 1316 1317 if (cnt > 0) { 1318 if (cnt > sizeof(buf) - 1) 1319 return -EINVAL; 1320 1321 if (copy_from_user(buf, ubuf, cnt)) 1322 return -EFAULT; 1323 buf[cnt] = 0; 1324 1325 val = simple_strtoul(buf, NULL, 0); 1326 } 1327 1328 DRM_INFO("Manually setting wedged to %d\n", val); 1329 i915_handle_error(dev, val); 1330 1331 return cnt; 1332 } 1333 1334 static const struct file_operations i915_wedged_fops = { 1335 .owner = THIS_MODULE, 1336 .open = i915_wedged_open, 1337 .read = i915_wedged_read, 1338 .write = i915_wedged_write, 1339 .llseek = default_llseek, 1340 }; 1341 1342 static int 1343 i915_max_freq_open(struct inode *inode, 1344 struct file *filp) 1345 { 1346 filp->private_data = inode->i_private; 1347 return 0; 1348 } 1349 1350 static ssize_t 1351 i915_max_freq_read(struct file *filp, 1352 char __user *ubuf, 1353 size_t max, 1354 loff_t *ppos) 1355 { 1356 struct drm_device *dev = filp->private_data; 1357 drm_i915_private_t *dev_priv = dev->dev_private; 1358 char buf[80]; 1359 int len; 1360 1361 len = snprintf(buf, sizeof(buf), 1362 "max freq: %d\n", dev_priv->max_delay * 50); 1363 1364 if (len > sizeof(buf)) 1365 len = sizeof(buf); 1366 1367 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1368 } 1369 1370 static ssize_t 1371 i915_max_freq_write(struct file *filp, 1372 const char __user *ubuf, 1373 size_t cnt, 1374 loff_t *ppos) 1375 { 1376 struct drm_device *dev = filp->private_data; 1377 struct drm_i915_private *dev_priv = dev->dev_private; 1378 char buf[20]; 1379 int val = 1; 1380 1381 if (cnt > 0) { 1382 if (cnt > sizeof(buf) - 1) 1383 return -EINVAL; 1384 1385 if (copy_from_user(buf, ubuf, cnt)) 1386 return -EFAULT; 1387 buf[cnt] = 0; 1388 1389 val = simple_strtoul(buf, NULL, 0); 1390 } 1391 1392 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); 1393 1394 /* 1395 * Turbo will still be enabled, but won't go above the set value. 1396 */ 1397 dev_priv->max_delay = val / 50; 1398 1399 gen6_set_rps(dev, val / 50); 1400 1401 return cnt; 1402 } 1403 1404 static const struct file_operations i915_max_freq_fops = { 1405 .owner = THIS_MODULE, 1406 .open = i915_max_freq_open, 1407 .read = i915_max_freq_read, 1408 .write = i915_max_freq_write, 1409 .llseek = default_llseek, 1410 }; 1411 1412 static int 1413 i915_cache_sharing_open(struct inode *inode, 1414 struct file *filp) 1415 { 1416 filp->private_data = inode->i_private; 1417 return 0; 1418 } 1419 1420 static ssize_t 1421 i915_cache_sharing_read(struct file *filp, 1422 char __user *ubuf, 1423 size_t max, 1424 loff_t *ppos) 1425 { 1426 struct drm_device *dev = filp->private_data; 1427 drm_i915_private_t *dev_priv = dev->dev_private; 1428 char buf[80]; 1429 u32 snpcr; 1430 int len; 1431 1432 mutex_lock(&dev_priv->dev->struct_mutex); 1433 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1434 mutex_unlock(&dev_priv->dev->struct_mutex); 1435 1436 len = snprintf(buf, sizeof(buf), 1437 "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >> 1438 GEN6_MBC_SNPCR_SHIFT); 1439 1440 if (len > sizeof(buf)) 1441 len = sizeof(buf); 1442 1443 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1444 } 1445 1446 static ssize_t 1447 i915_cache_sharing_write(struct file *filp, 1448 const char __user *ubuf, 1449 size_t cnt, 1450 loff_t *ppos) 1451 { 1452 struct drm_device *dev = filp->private_data; 1453 struct drm_i915_private *dev_priv = dev->dev_private; 1454 char buf[20]; 1455 u32 snpcr; 1456 int val = 1; 1457 1458 if (cnt > 0) { 1459 if (cnt > sizeof(buf) - 1) 1460 return -EINVAL; 1461 1462 if (copy_from_user(buf, ubuf, cnt)) 1463 return -EFAULT; 1464 buf[cnt] = 0; 1465 1466 val = simple_strtoul(buf, NULL, 0); 1467 } 1468 1469 if (val < 0 || val > 3) 1470 return -EINVAL; 1471 1472 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val); 1473 1474 /* Update the cache sharing policy here as well */ 1475 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1476 snpcr &= ~GEN6_MBC_SNPCR_MASK; 1477 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 1478 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 1479 1480 return cnt; 1481 } 1482 1483 static const struct file_operations i915_cache_sharing_fops = { 1484 .owner = THIS_MODULE, 1485 .open = i915_cache_sharing_open, 1486 .read = i915_cache_sharing_read, 1487 .write = i915_cache_sharing_write, 1488 .llseek = default_llseek, 1489 }; 1490 1491 /* As the drm_debugfs_init() routines are called before dev->dev_private is 1492 * allocated we need to hook into the minor for release. */ 1493 static int 1494 drm_add_fake_info_node(struct drm_minor *minor, 1495 struct dentry *ent, 1496 const void *key) 1497 { 1498 struct drm_info_node *node; 1499 1500 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); 1501 if (node == NULL) { 1502 debugfs_remove(ent); 1503 return -ENOMEM; 1504 } 1505 1506 node->minor = minor; 1507 node->dent = ent; 1508 node->info_ent = (void *) key; 1509 1510 mutex_lock(&minor->debugfs_lock); 1511 list_add(&node->list, &minor->debugfs_list); 1512 mutex_unlock(&minor->debugfs_lock); 1513 1514 return 0; 1515 } 1516 1517 static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) 1518 { 1519 struct drm_device *dev = minor->dev; 1520 struct dentry *ent; 1521 1522 ent = debugfs_create_file("i915_wedged", 1523 S_IRUGO | S_IWUSR, 1524 root, dev, 1525 &i915_wedged_fops); 1526 if (IS_ERR(ent)) 1527 return PTR_ERR(ent); 1528 1529 return drm_add_fake_info_node(minor, ent, &i915_wedged_fops); 1530 } 1531 1532 static int i915_forcewake_open(struct inode *inode, struct file *file) 1533 { 1534 struct drm_device *dev = inode->i_private; 1535 struct drm_i915_private *dev_priv = dev->dev_private; 1536 int ret; 1537 1538 if (!IS_GEN6(dev)) 1539 return 0; 1540 1541 ret = mutex_lock_interruptible(&dev->struct_mutex); 1542 if (ret) 1543 return ret; 1544 gen6_gt_force_wake_get(dev_priv); 1545 mutex_unlock(&dev->struct_mutex); 1546 1547 return 0; 1548 } 1549 1550 int i915_forcewake_release(struct inode *inode, struct file *file) 1551 { 1552 struct drm_device *dev = inode->i_private; 1553 struct drm_i915_private *dev_priv = dev->dev_private; 1554 1555 if (!IS_GEN6(dev)) 1556 return 0; 1557 1558 /* 1559 * It's bad that we can potentially hang userspace if struct_mutex gets 1560 * forever stuck. However, if we cannot acquire this lock it means that 1561 * almost certainly the driver has hung, is not unload-able. Therefore 1562 * hanging here is probably a minor inconvenience not to be seen my 1563 * almost every user. 1564 */ 1565 mutex_lock(&dev->struct_mutex); 1566 gen6_gt_force_wake_put(dev_priv); 1567 mutex_unlock(&dev->struct_mutex); 1568 1569 return 0; 1570 } 1571 1572 static const struct file_operations i915_forcewake_fops = { 1573 .owner = THIS_MODULE, 1574 .open = i915_forcewake_open, 1575 .release = i915_forcewake_release, 1576 }; 1577 1578 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) 1579 { 1580 struct drm_device *dev = minor->dev; 1581 struct dentry *ent; 1582 1583 ent = debugfs_create_file("i915_forcewake_user", 1584 S_IRUSR, 1585 root, dev, 1586 &i915_forcewake_fops); 1587 if (IS_ERR(ent)) 1588 return PTR_ERR(ent); 1589 1590 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 1591 } 1592 1593 static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor) 1594 { 1595 struct drm_device *dev = minor->dev; 1596 struct dentry *ent; 1597 1598 ent = debugfs_create_file("i915_max_freq", 1599 S_IRUGO | S_IWUSR, 1600 root, dev, 1601 &i915_max_freq_fops); 1602 if (IS_ERR(ent)) 1603 return PTR_ERR(ent); 1604 1605 return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops); 1606 } 1607 1608 static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor) 1609 { 1610 struct drm_device *dev = minor->dev; 1611 struct dentry *ent; 1612 1613 ent = debugfs_create_file("i915_cache_sharing", 1614 S_IRUGO | S_IWUSR, 1615 root, dev, 1616 &i915_cache_sharing_fops); 1617 if (IS_ERR(ent)) 1618 return PTR_ERR(ent); 1619 1620 return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops); 1621 } 1622 1623 static struct drm_info_list i915_debugfs_list[] = { 1624 {"i915_capabilities", i915_capabilities, 0}, 1625 {"i915_gem_objects", i915_gem_object_info, 0}, 1626 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 1627 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 1628 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 1629 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 1630 {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST}, 1631 {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST}, 1632 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 1633 {"i915_gem_request", i915_gem_request_info, 0}, 1634 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 1635 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 1636 {"i915_gem_interrupt", i915_interrupt_info, 0}, 1637 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 1638 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 1639 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 1640 {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS}, 1641 {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS}, 1642 {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS}, 1643 {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS}, 1644 {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS}, 1645 {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS}, 1646 {"i915_batchbuffers", i915_batchbuffer_info, 0}, 1647 {"i915_error_state", i915_error_state, 0}, 1648 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 1649 {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, 1650 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 1651 {"i915_inttoext_table", i915_inttoext_table, 0}, 1652 {"i915_drpc_info", i915_drpc_info, 0}, 1653 {"i915_emon_status", i915_emon_status, 0}, 1654 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 1655 {"i915_gfxec", i915_gfxec, 0}, 1656 {"i915_fbc_status", i915_fbc_status, 0}, 1657 {"i915_sr_status", i915_sr_status, 0}, 1658 {"i915_opregion", i915_opregion, 0}, 1659 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 1660 {"i915_context_status", i915_context_status, 0}, 1661 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 1662 }; 1663 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 1664 1665 int i915_debugfs_init(struct drm_minor *minor) 1666 { 1667 int ret; 1668 1669 ret = i915_wedged_create(minor->debugfs_root, minor); 1670 if (ret) 1671 return ret; 1672 1673 ret = i915_forcewake_create(minor->debugfs_root, minor); 1674 if (ret) 1675 return ret; 1676 ret = i915_max_freq_create(minor->debugfs_root, minor); 1677 if (ret) 1678 return ret; 1679 ret = i915_cache_sharing_create(minor->debugfs_root, minor); 1680 if (ret) 1681 return ret; 1682 1683 return drm_debugfs_create_files(i915_debugfs_list, 1684 I915_DEBUGFS_ENTRIES, 1685 minor->debugfs_root, minor); 1686 } 1687 1688 void i915_debugfs_cleanup(struct drm_minor *minor) 1689 { 1690 drm_debugfs_remove_files(i915_debugfs_list, 1691 I915_DEBUGFS_ENTRIES, minor); 1692 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 1693 1, minor); 1694 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, 1695 1, minor); 1696 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops, 1697 1, minor); 1698 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, 1699 1, minor); 1700 } 1701 1702 #endif /* CONFIG_DEBUG_FS */ 1703