1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/seq_file.h> 30 #include <linux/debugfs.h> 31 #include <linux/slab.h> 32 #include <linux/export.h> 33 #include "drmP.h" 34 #include "drm.h" 35 #include "intel_drv.h" 36 #include "intel_ringbuffer.h" 37 #include "i915_drm.h" 38 #include "i915_drv.h" 39 40 #define DRM_I915_RING_DEBUG 1 41 42 43 #if defined(CONFIG_DEBUG_FS) 44 45 enum { 46 ACTIVE_LIST, 47 FLUSHING_LIST, 48 INACTIVE_LIST, 49 PINNED_LIST, 50 DEFERRED_FREE_LIST, 51 }; 52 53 static const char *yesno(int v) 54 { 55 return v ? "yes" : "no"; 56 } 57 58 static int i915_capabilities(struct seq_file *m, void *data) 59 { 60 struct drm_info_node *node = (struct drm_info_node *) m->private; 61 struct drm_device *dev = node->minor->dev; 62 const struct intel_device_info *info = INTEL_INFO(dev); 63 64 seq_printf(m, "gen: %d\n", info->gen); 65 #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 66 B(is_mobile); 67 B(is_i85x); 68 B(is_i915g); 69 B(is_i945gm); 70 B(is_g33); 71 B(need_gfx_hws); 72 B(is_g4x); 73 B(is_pineview); 74 B(is_broadwater); 75 B(is_crestline); 76 B(has_fbc); 77 B(has_pipe_cxsr); 78 B(has_hotplug); 79 B(cursor_needs_physical); 80 B(has_overlay); 81 B(overlay_needs_physical); 82 B(supports_tv); 83 B(has_bsd_ring); 84 B(has_blt_ring); 85 #undef B 86 87 return 0; 88 } 89 90 static const char *get_pin_flag(struct drm_i915_gem_object *obj) 91 { 92 if (obj->user_pin_count > 0) 93 return "P"; 94 else if (obj->pin_count > 0) 95 return "p"; 96 else 97 return " "; 98 } 99 100 static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 101 { 102 switch (obj->tiling_mode) { 103 default: 104 case I915_TILING_NONE: return " "; 105 case I915_TILING_X: return "X"; 106 case I915_TILING_Y: return "Y"; 107 } 108 } 109 110 static const char *cache_level_str(int type) 111 { 112 switch (type) { 113 case I915_CACHE_NONE: return " uncached"; 114 case I915_CACHE_LLC: return " snooped (LLC)"; 115 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)"; 116 default: return ""; 117 } 118 } 119 120 static void 121 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 122 { 123 seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s", 124 &obj->base, 125 get_pin_flag(obj), 126 get_tiling_flag(obj), 127 obj->base.size, 128 obj->base.read_domains, 129 obj->base.write_domain, 130 obj->last_rendering_seqno, 131 obj->last_fenced_seqno, 132 cache_level_str(obj->cache_level), 133 obj->dirty ? " dirty" : "", 134 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 135 if (obj->base.name) 136 seq_printf(m, " (name: %d)", obj->base.name); 137 if (obj->fence_reg != I915_FENCE_REG_NONE) 138 seq_printf(m, " (fence: %d)", obj->fence_reg); 139 if (obj->gtt_space != NULL) 140 seq_printf(m, " (gtt offset: %08x, size: %08x)", 141 obj->gtt_offset, (unsigned int)obj->gtt_space->size); 142 if (obj->pin_mappable || obj->fault_mappable) { 143 char s[3], *t = s; 144 if (obj->pin_mappable) 145 *t++ = 'p'; 146 if (obj->fault_mappable) 147 *t++ = 'f'; 148 *t = '\0'; 149 seq_printf(m, " (%s mappable)", s); 150 } 151 if (obj->ring != NULL) 152 seq_printf(m, " (%s)", obj->ring->name); 153 } 154 155 static int i915_gem_object_list_info(struct seq_file *m, void *data) 156 { 157 struct drm_info_node *node = (struct drm_info_node *) m->private; 158 uintptr_t list = (uintptr_t) node->info_ent->data; 159 struct list_head *head; 160 struct drm_device *dev = node->minor->dev; 161 drm_i915_private_t *dev_priv = dev->dev_private; 162 struct drm_i915_gem_object *obj; 163 size_t total_obj_size, total_gtt_size; 164 int count, ret; 165 166 ret = mutex_lock_interruptible(&dev->struct_mutex); 167 if (ret) 168 return ret; 169 170 switch (list) { 171 case ACTIVE_LIST: 172 seq_printf(m, "Active:\n"); 173 head = &dev_priv->mm.active_list; 174 break; 175 case INACTIVE_LIST: 176 seq_printf(m, "Inactive:\n"); 177 head = &dev_priv->mm.inactive_list; 178 break; 179 case PINNED_LIST: 180 seq_printf(m, "Pinned:\n"); 181 head = &dev_priv->mm.pinned_list; 182 break; 183 case FLUSHING_LIST: 184 seq_printf(m, "Flushing:\n"); 185 head = &dev_priv->mm.flushing_list; 186 break; 187 case DEFERRED_FREE_LIST: 188 seq_printf(m, "Deferred free:\n"); 189 head = &dev_priv->mm.deferred_free_list; 190 break; 191 default: 192 mutex_unlock(&dev->struct_mutex); 193 return -EINVAL; 194 } 195 196 total_obj_size = total_gtt_size = count = 0; 197 list_for_each_entry(obj, head, mm_list) { 198 seq_printf(m, " "); 199 describe_obj(m, obj); 200 seq_printf(m, "\n"); 201 total_obj_size += obj->base.size; 202 total_gtt_size += obj->gtt_space->size; 203 count++; 204 } 205 mutex_unlock(&dev->struct_mutex); 206 207 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 208 count, total_obj_size, total_gtt_size); 209 return 0; 210 } 211 212 #define count_objects(list, member) do { \ 213 list_for_each_entry(obj, list, member) { \ 214 size += obj->gtt_space->size; \ 215 ++count; \ 216 if (obj->map_and_fenceable) { \ 217 mappable_size += obj->gtt_space->size; \ 218 ++mappable_count; \ 219 } \ 220 } \ 221 } while (0) 222 223 static int i915_gem_object_info(struct seq_file *m, void* data) 224 { 225 struct drm_info_node *node = (struct drm_info_node *) m->private; 226 struct drm_device *dev = node->minor->dev; 227 struct drm_i915_private *dev_priv = dev->dev_private; 228 u32 count, mappable_count; 229 size_t size, mappable_size; 230 struct drm_i915_gem_object *obj; 231 int ret; 232 233 ret = mutex_lock_interruptible(&dev->struct_mutex); 234 if (ret) 235 return ret; 236 237 seq_printf(m, "%u objects, %zu bytes\n", 238 dev_priv->mm.object_count, 239 dev_priv->mm.object_memory); 240 241 size = count = mappable_size = mappable_count = 0; 242 count_objects(&dev_priv->mm.gtt_list, gtt_list); 243 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 244 count, mappable_count, size, mappable_size); 245 246 size = count = mappable_size = mappable_count = 0; 247 count_objects(&dev_priv->mm.active_list, mm_list); 248 count_objects(&dev_priv->mm.flushing_list, mm_list); 249 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 250 count, mappable_count, size, mappable_size); 251 252 size = count = mappable_size = mappable_count = 0; 253 count_objects(&dev_priv->mm.pinned_list, mm_list); 254 seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n", 255 count, mappable_count, size, mappable_size); 256 257 size = count = mappable_size = mappable_count = 0; 258 count_objects(&dev_priv->mm.inactive_list, mm_list); 259 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 260 count, mappable_count, size, mappable_size); 261 262 size = count = mappable_size = mappable_count = 0; 263 count_objects(&dev_priv->mm.deferred_free_list, mm_list); 264 seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n", 265 count, mappable_count, size, mappable_size); 266 267 size = count = mappable_size = mappable_count = 0; 268 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 269 if (obj->fault_mappable) { 270 size += obj->gtt_space->size; 271 ++count; 272 } 273 if (obj->pin_mappable) { 274 mappable_size += obj->gtt_space->size; 275 ++mappable_count; 276 } 277 } 278 seq_printf(m, "%u pinned mappable objects, %zu bytes\n", 279 mappable_count, mappable_size); 280 seq_printf(m, "%u fault mappable objects, %zu bytes\n", 281 count, size); 282 283 seq_printf(m, "%zu [%zu] gtt total\n", 284 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); 285 286 mutex_unlock(&dev->struct_mutex); 287 288 return 0; 289 } 290 291 static int i915_gem_gtt_info(struct seq_file *m, void* data) 292 { 293 struct drm_info_node *node = (struct drm_info_node *) m->private; 294 struct drm_device *dev = node->minor->dev; 295 struct drm_i915_private *dev_priv = dev->dev_private; 296 struct drm_i915_gem_object *obj; 297 size_t total_obj_size, total_gtt_size; 298 int count, ret; 299 300 ret = mutex_lock_interruptible(&dev->struct_mutex); 301 if (ret) 302 return ret; 303 304 total_obj_size = total_gtt_size = count = 0; 305 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 306 seq_printf(m, " "); 307 describe_obj(m, obj); 308 seq_printf(m, "\n"); 309 total_obj_size += obj->base.size; 310 total_gtt_size += obj->gtt_space->size; 311 count++; 312 } 313 314 mutex_unlock(&dev->struct_mutex); 315 316 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 317 count, total_obj_size, total_gtt_size); 318 319 return 0; 320 } 321 322 323 static int i915_gem_pageflip_info(struct seq_file *m, void *data) 324 { 325 struct drm_info_node *node = (struct drm_info_node *) m->private; 326 struct drm_device *dev = node->minor->dev; 327 unsigned long flags; 328 struct intel_crtc *crtc; 329 330 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 331 const char pipe = pipe_name(crtc->pipe); 332 const char plane = plane_name(crtc->plane); 333 struct intel_unpin_work *work; 334 335 spin_lock_irqsave(&dev->event_lock, flags); 336 work = crtc->unpin_work; 337 if (work == NULL) { 338 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 339 pipe, plane); 340 } else { 341 if (!work->pending) { 342 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 343 pipe, plane); 344 } else { 345 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 346 pipe, plane); 347 } 348 if (work->enable_stall_check) 349 seq_printf(m, "Stall check enabled, "); 350 else 351 seq_printf(m, "Stall check waiting for page flip ioctl, "); 352 seq_printf(m, "%d prepares\n", work->pending); 353 354 if (work->old_fb_obj) { 355 struct drm_i915_gem_object *obj = work->old_fb_obj; 356 if (obj) 357 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 358 } 359 if (work->pending_flip_obj) { 360 struct drm_i915_gem_object *obj = work->pending_flip_obj; 361 if (obj) 362 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 363 } 364 } 365 spin_unlock_irqrestore(&dev->event_lock, flags); 366 } 367 368 return 0; 369 } 370 371 static int i915_gem_request_info(struct seq_file *m, void *data) 372 { 373 struct drm_info_node *node = (struct drm_info_node *) m->private; 374 struct drm_device *dev = node->minor->dev; 375 drm_i915_private_t *dev_priv = dev->dev_private; 376 struct drm_i915_gem_request *gem_request; 377 int ret, count; 378 379 ret = mutex_lock_interruptible(&dev->struct_mutex); 380 if (ret) 381 return ret; 382 383 count = 0; 384 if (!list_empty(&dev_priv->ring[RCS].request_list)) { 385 seq_printf(m, "Render requests:\n"); 386 list_for_each_entry(gem_request, 387 &dev_priv->ring[RCS].request_list, 388 list) { 389 seq_printf(m, " %d @ %d\n", 390 gem_request->seqno, 391 (int) (jiffies - gem_request->emitted_jiffies)); 392 } 393 count++; 394 } 395 if (!list_empty(&dev_priv->ring[VCS].request_list)) { 396 seq_printf(m, "BSD requests:\n"); 397 list_for_each_entry(gem_request, 398 &dev_priv->ring[VCS].request_list, 399 list) { 400 seq_printf(m, " %d @ %d\n", 401 gem_request->seqno, 402 (int) (jiffies - gem_request->emitted_jiffies)); 403 } 404 count++; 405 } 406 if (!list_empty(&dev_priv->ring[BCS].request_list)) { 407 seq_printf(m, "BLT requests:\n"); 408 list_for_each_entry(gem_request, 409 &dev_priv->ring[BCS].request_list, 410 list) { 411 seq_printf(m, " %d @ %d\n", 412 gem_request->seqno, 413 (int) (jiffies - gem_request->emitted_jiffies)); 414 } 415 count++; 416 } 417 mutex_unlock(&dev->struct_mutex); 418 419 if (count == 0) 420 seq_printf(m, "No requests\n"); 421 422 return 0; 423 } 424 425 static void i915_ring_seqno_info(struct seq_file *m, 426 struct intel_ring_buffer *ring) 427 { 428 if (ring->get_seqno) { 429 seq_printf(m, "Current sequence (%s): %d\n", 430 ring->name, ring->get_seqno(ring)); 431 seq_printf(m, "Waiter sequence (%s): %d\n", 432 ring->name, ring->waiting_seqno); 433 seq_printf(m, "IRQ sequence (%s): %d\n", 434 ring->name, ring->irq_seqno); 435 } 436 } 437 438 static int i915_gem_seqno_info(struct seq_file *m, void *data) 439 { 440 struct drm_info_node *node = (struct drm_info_node *) m->private; 441 struct drm_device *dev = node->minor->dev; 442 drm_i915_private_t *dev_priv = dev->dev_private; 443 int ret, i; 444 445 ret = mutex_lock_interruptible(&dev->struct_mutex); 446 if (ret) 447 return ret; 448 449 for (i = 0; i < I915_NUM_RINGS; i++) 450 i915_ring_seqno_info(m, &dev_priv->ring[i]); 451 452 mutex_unlock(&dev->struct_mutex); 453 454 return 0; 455 } 456 457 458 static int i915_interrupt_info(struct seq_file *m, void *data) 459 { 460 struct drm_info_node *node = (struct drm_info_node *) m->private; 461 struct drm_device *dev = node->minor->dev; 462 drm_i915_private_t *dev_priv = dev->dev_private; 463 int ret, i, pipe; 464 465 ret = mutex_lock_interruptible(&dev->struct_mutex); 466 if (ret) 467 return ret; 468 469 if (!HAS_PCH_SPLIT(dev)) { 470 seq_printf(m, "Interrupt enable: %08x\n", 471 I915_READ(IER)); 472 seq_printf(m, "Interrupt identity: %08x\n", 473 I915_READ(IIR)); 474 seq_printf(m, "Interrupt mask: %08x\n", 475 I915_READ(IMR)); 476 for_each_pipe(pipe) 477 seq_printf(m, "Pipe %c stat: %08x\n", 478 pipe_name(pipe), 479 I915_READ(PIPESTAT(pipe))); 480 } else { 481 seq_printf(m, "North Display Interrupt enable: %08x\n", 482 I915_READ(DEIER)); 483 seq_printf(m, "North Display Interrupt identity: %08x\n", 484 I915_READ(DEIIR)); 485 seq_printf(m, "North Display Interrupt mask: %08x\n", 486 I915_READ(DEIMR)); 487 seq_printf(m, "South Display Interrupt enable: %08x\n", 488 I915_READ(SDEIER)); 489 seq_printf(m, "South Display Interrupt identity: %08x\n", 490 I915_READ(SDEIIR)); 491 seq_printf(m, "South Display Interrupt mask: %08x\n", 492 I915_READ(SDEIMR)); 493 seq_printf(m, "Graphics Interrupt enable: %08x\n", 494 I915_READ(GTIER)); 495 seq_printf(m, "Graphics Interrupt identity: %08x\n", 496 I915_READ(GTIIR)); 497 seq_printf(m, "Graphics Interrupt mask: %08x\n", 498 I915_READ(GTIMR)); 499 } 500 seq_printf(m, "Interrupts received: %d\n", 501 atomic_read(&dev_priv->irq_received)); 502 for (i = 0; i < I915_NUM_RINGS; i++) { 503 if (IS_GEN6(dev) || IS_GEN7(dev)) { 504 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", 505 dev_priv->ring[i].name, 506 I915_READ_IMR(&dev_priv->ring[i])); 507 } 508 i915_ring_seqno_info(m, &dev_priv->ring[i]); 509 } 510 mutex_unlock(&dev->struct_mutex); 511 512 return 0; 513 } 514 515 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 516 { 517 struct drm_info_node *node = (struct drm_info_node *) m->private; 518 struct drm_device *dev = node->minor->dev; 519 drm_i915_private_t *dev_priv = dev->dev_private; 520 int i, ret; 521 522 ret = mutex_lock_interruptible(&dev->struct_mutex); 523 if (ret) 524 return ret; 525 526 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 527 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 528 for (i = 0; i < dev_priv->num_fence_regs; i++) { 529 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 530 531 seq_printf(m, "Fenced object[%2d] = ", i); 532 if (obj == NULL) 533 seq_printf(m, "unused"); 534 else 535 describe_obj(m, obj); 536 seq_printf(m, "\n"); 537 } 538 539 mutex_unlock(&dev->struct_mutex); 540 return 0; 541 } 542 543 static int i915_hws_info(struct seq_file *m, void *data) 544 { 545 struct drm_info_node *node = (struct drm_info_node *) m->private; 546 struct drm_device *dev = node->minor->dev; 547 drm_i915_private_t *dev_priv = dev->dev_private; 548 struct intel_ring_buffer *ring; 549 const volatile u32 __iomem *hws; 550 int i; 551 552 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 553 hws = (volatile u32 __iomem *)ring->status_page.page_addr; 554 if (hws == NULL) 555 return 0; 556 557 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 558 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 559 i * 4, 560 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 561 } 562 return 0; 563 } 564 565 static void i915_dump_object(struct seq_file *m, 566 struct io_mapping *mapping, 567 struct drm_i915_gem_object *obj) 568 { 569 int page, page_count, i; 570 571 page_count = obj->base.size / PAGE_SIZE; 572 for (page = 0; page < page_count; page++) { 573 u32 *mem = io_mapping_map_wc(mapping, 574 obj->gtt_offset + page * PAGE_SIZE); 575 for (i = 0; i < PAGE_SIZE; i += 4) 576 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); 577 io_mapping_unmap(mem); 578 } 579 } 580 581 static int i915_batchbuffer_info(struct seq_file *m, void *data) 582 { 583 struct drm_info_node *node = (struct drm_info_node *) m->private; 584 struct drm_device *dev = node->minor->dev; 585 drm_i915_private_t *dev_priv = dev->dev_private; 586 struct drm_i915_gem_object *obj; 587 int ret; 588 589 ret = mutex_lock_interruptible(&dev->struct_mutex); 590 if (ret) 591 return ret; 592 593 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 594 if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) { 595 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); 596 i915_dump_object(m, dev_priv->mm.gtt_mapping, obj); 597 } 598 } 599 600 mutex_unlock(&dev->struct_mutex); 601 return 0; 602 } 603 604 static int i915_ringbuffer_data(struct seq_file *m, void *data) 605 { 606 struct drm_info_node *node = (struct drm_info_node *) m->private; 607 struct drm_device *dev = node->minor->dev; 608 drm_i915_private_t *dev_priv = dev->dev_private; 609 struct intel_ring_buffer *ring; 610 int ret; 611 612 ret = mutex_lock_interruptible(&dev->struct_mutex); 613 if (ret) 614 return ret; 615 616 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 617 if (!ring->obj) { 618 seq_printf(m, "No ringbuffer setup\n"); 619 } else { 620 const u8 __iomem *virt = ring->virtual_start; 621 uint32_t off; 622 623 for (off = 0; off < ring->size; off += 4) { 624 uint32_t *ptr = (uint32_t *)(virt + off); 625 seq_printf(m, "%08x : %08x\n", off, *ptr); 626 } 627 } 628 mutex_unlock(&dev->struct_mutex); 629 630 return 0; 631 } 632 633 static int i915_ringbuffer_info(struct seq_file *m, void *data) 634 { 635 struct drm_info_node *node = (struct drm_info_node *) m->private; 636 struct drm_device *dev = node->minor->dev; 637 drm_i915_private_t *dev_priv = dev->dev_private; 638 struct intel_ring_buffer *ring; 639 int ret; 640 641 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 642 if (ring->size == 0) 643 return 0; 644 645 ret = mutex_lock_interruptible(&dev->struct_mutex); 646 if (ret) 647 return ret; 648 649 seq_printf(m, "Ring %s:\n", ring->name); 650 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); 651 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); 652 seq_printf(m, " Size : %08x\n", ring->size); 653 seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring)); 654 seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring)); 655 if (IS_GEN6(dev)) { 656 seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring)); 657 seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring)); 658 } 659 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); 660 seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); 661 662 mutex_unlock(&dev->struct_mutex); 663 664 return 0; 665 } 666 667 static const char *ring_str(int ring) 668 { 669 switch (ring) { 670 case RING_RENDER: return " render"; 671 case RING_BSD: return " bsd"; 672 case RING_BLT: return " blt"; 673 default: return ""; 674 } 675 } 676 677 static const char *pin_flag(int pinned) 678 { 679 if (pinned > 0) 680 return " P"; 681 else if (pinned < 0) 682 return " p"; 683 else 684 return ""; 685 } 686 687 static const char *tiling_flag(int tiling) 688 { 689 switch (tiling) { 690 default: 691 case I915_TILING_NONE: return ""; 692 case I915_TILING_X: return " X"; 693 case I915_TILING_Y: return " Y"; 694 } 695 } 696 697 static const char *dirty_flag(int dirty) 698 { 699 return dirty ? " dirty" : ""; 700 } 701 702 static const char *purgeable_flag(int purgeable) 703 { 704 return purgeable ? " purgeable" : ""; 705 } 706 707 static void print_error_buffers(struct seq_file *m, 708 const char *name, 709 struct drm_i915_error_buffer *err, 710 int count) 711 { 712 seq_printf(m, "%s [%d]:\n", name, count); 713 714 while (count--) { 715 seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s", 716 err->gtt_offset, 717 err->size, 718 err->read_domains, 719 err->write_domain, 720 err->seqno, 721 pin_flag(err->pinned), 722 tiling_flag(err->tiling), 723 dirty_flag(err->dirty), 724 purgeable_flag(err->purgeable), 725 ring_str(err->ring), 726 cache_level_str(err->cache_level)); 727 728 if (err->name) 729 seq_printf(m, " (name: %d)", err->name); 730 if (err->fence_reg != I915_FENCE_REG_NONE) 731 seq_printf(m, " (fence: %d)", err->fence_reg); 732 733 seq_printf(m, "\n"); 734 err++; 735 } 736 } 737 738 static int i915_error_state(struct seq_file *m, void *unused) 739 { 740 struct drm_info_node *node = (struct drm_info_node *) m->private; 741 struct drm_device *dev = node->minor->dev; 742 drm_i915_private_t *dev_priv = dev->dev_private; 743 struct drm_i915_error_state *error; 744 unsigned long flags; 745 int i, page, offset, elt; 746 747 spin_lock_irqsave(&dev_priv->error_lock, flags); 748 if (!dev_priv->first_error) { 749 seq_printf(m, "no error state collected\n"); 750 goto out; 751 } 752 753 error = dev_priv->first_error; 754 755 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 756 error->time.tv_usec); 757 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 758 seq_printf(m, "EIR: 0x%08x\n", error->eir); 759 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 760 if (INTEL_INFO(dev)->gen >= 6) { 761 seq_printf(m, "ERROR: 0x%08x\n", error->error); 762 seq_printf(m, "Blitter command stream:\n"); 763 seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd); 764 seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir); 765 seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr); 766 seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone); 767 seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno); 768 seq_printf(m, "Video (BSD) command stream:\n"); 769 seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd); 770 seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir); 771 seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr); 772 seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone); 773 seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno); 774 } 775 seq_printf(m, "Render command stream:\n"); 776 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); 777 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir); 778 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); 779 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); 780 if (INTEL_INFO(dev)->gen >= 4) { 781 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); 782 seq_printf(m, " INSTPS: 0x%08x\n", error->instps); 783 } 784 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); 785 seq_printf(m, " seqno: 0x%08x\n", error->seqno); 786 787 for (i = 0; i < dev_priv->num_fence_regs; i++) 788 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); 789 790 if (error->active_bo) 791 print_error_buffers(m, "Active", 792 error->active_bo, 793 error->active_bo_count); 794 795 if (error->pinned_bo) 796 print_error_buffers(m, "Pinned", 797 error->pinned_bo, 798 error->pinned_bo_count); 799 800 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) { 801 if (error->batchbuffer[i]) { 802 struct drm_i915_error_object *obj = error->batchbuffer[i]; 803 804 seq_printf(m, "%s --- gtt_offset = 0x%08x\n", 805 dev_priv->ring[i].name, 806 obj->gtt_offset); 807 offset = 0; 808 for (page = 0; page < obj->page_count; page++) { 809 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 810 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); 811 offset += 4; 812 } 813 } 814 } 815 } 816 817 for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) { 818 if (error->ringbuffer[i]) { 819 struct drm_i915_error_object *obj = error->ringbuffer[i]; 820 seq_printf(m, "%s --- ringbuffer = 0x%08x\n", 821 dev_priv->ring[i].name, 822 obj->gtt_offset); 823 offset = 0; 824 for (page = 0; page < obj->page_count; page++) { 825 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 826 seq_printf(m, "%08x : %08x\n", 827 offset, 828 obj->pages[page][elt]); 829 offset += 4; 830 } 831 } 832 } 833 } 834 835 if (error->overlay) 836 intel_overlay_print_error_state(m, error->overlay); 837 838 if (error->display) 839 intel_display_print_error_state(m, dev, error->display); 840 841 out: 842 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 843 844 return 0; 845 } 846 847 static int i915_rstdby_delays(struct seq_file *m, void *unused) 848 { 849 struct drm_info_node *node = (struct drm_info_node *) m->private; 850 struct drm_device *dev = node->minor->dev; 851 drm_i915_private_t *dev_priv = dev->dev_private; 852 u16 crstanddelay; 853 int ret; 854 855 ret = mutex_lock_interruptible(&dev->struct_mutex); 856 if (ret) 857 return ret; 858 859 crstanddelay = I915_READ16(CRSTANDVID); 860 861 mutex_unlock(&dev->struct_mutex); 862 863 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 864 865 return 0; 866 } 867 868 static int i915_cur_delayinfo(struct seq_file *m, void *unused) 869 { 870 struct drm_info_node *node = (struct drm_info_node *) m->private; 871 struct drm_device *dev = node->minor->dev; 872 drm_i915_private_t *dev_priv = dev->dev_private; 873 int ret; 874 875 if (IS_GEN5(dev)) { 876 u16 rgvswctl = I915_READ16(MEMSWCTL); 877 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 878 879 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 880 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 881 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 882 MEMSTAT_VID_SHIFT); 883 seq_printf(m, "Current P-state: %d\n", 884 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 885 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 886 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 887 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 888 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 889 u32 rpstat; 890 u32 rpupei, rpcurup, rpprevup; 891 u32 rpdownei, rpcurdown, rpprevdown; 892 int max_freq; 893 894 /* RPSTAT1 is in the GT power well */ 895 ret = mutex_lock_interruptible(&dev->struct_mutex); 896 if (ret) 897 return ret; 898 899 gen6_gt_force_wake_get(dev_priv); 900 901 rpstat = I915_READ(GEN6_RPSTAT1); 902 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 903 rpcurup = I915_READ(GEN6_RP_CUR_UP); 904 rpprevup = I915_READ(GEN6_RP_PREV_UP); 905 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 906 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 907 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 908 909 gen6_gt_force_wake_put(dev_priv); 910 mutex_unlock(&dev->struct_mutex); 911 912 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 913 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 914 seq_printf(m, "Render p-state ratio: %d\n", 915 (gt_perf_status & 0xff00) >> 8); 916 seq_printf(m, "Render p-state VID: %d\n", 917 gt_perf_status & 0xff); 918 seq_printf(m, "Render p-state limit: %d\n", 919 rp_state_limits & 0xff); 920 seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> 921 GEN6_CAGF_SHIFT) * 50); 922 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 923 GEN6_CURICONT_MASK); 924 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 925 GEN6_CURBSYTAVG_MASK); 926 seq_printf(m, "RP PREV UP: %dus\n", rpprevup & 927 GEN6_CURBSYTAVG_MASK); 928 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 929 GEN6_CURIAVG_MASK); 930 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 931 GEN6_CURBSYTAVG_MASK); 932 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 933 GEN6_CURBSYTAVG_MASK); 934 935 max_freq = (rp_state_cap & 0xff0000) >> 16; 936 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 937 max_freq * 50); 938 939 max_freq = (rp_state_cap & 0xff00) >> 8; 940 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 941 max_freq * 50); 942 943 max_freq = rp_state_cap & 0xff; 944 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 945 max_freq * 50); 946 } else { 947 seq_printf(m, "no P-state info available\n"); 948 } 949 950 return 0; 951 } 952 953 static int i915_delayfreq_table(struct seq_file *m, void *unused) 954 { 955 struct drm_info_node *node = (struct drm_info_node *) m->private; 956 struct drm_device *dev = node->minor->dev; 957 drm_i915_private_t *dev_priv = dev->dev_private; 958 u32 delayfreq; 959 int ret, i; 960 961 ret = mutex_lock_interruptible(&dev->struct_mutex); 962 if (ret) 963 return ret; 964 965 for (i = 0; i < 16; i++) { 966 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 967 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, 968 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 969 } 970 971 mutex_unlock(&dev->struct_mutex); 972 973 return 0; 974 } 975 976 static inline int MAP_TO_MV(int map) 977 { 978 return 1250 - (map * 25); 979 } 980 981 static int i915_inttoext_table(struct seq_file *m, void *unused) 982 { 983 struct drm_info_node *node = (struct drm_info_node *) m->private; 984 struct drm_device *dev = node->minor->dev; 985 drm_i915_private_t *dev_priv = dev->dev_private; 986 u32 inttoext; 987 int ret, i; 988 989 ret = mutex_lock_interruptible(&dev->struct_mutex); 990 if (ret) 991 return ret; 992 993 for (i = 1; i <= 32; i++) { 994 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 995 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 996 } 997 998 mutex_unlock(&dev->struct_mutex); 999 1000 return 0; 1001 } 1002 1003 static int i915_drpc_info(struct seq_file *m, void *unused) 1004 { 1005 struct drm_info_node *node = (struct drm_info_node *) m->private; 1006 struct drm_device *dev = node->minor->dev; 1007 drm_i915_private_t *dev_priv = dev->dev_private; 1008 u32 rgvmodectl, rstdbyctl; 1009 u16 crstandvid; 1010 int ret; 1011 1012 ret = mutex_lock_interruptible(&dev->struct_mutex); 1013 if (ret) 1014 return ret; 1015 1016 rgvmodectl = I915_READ(MEMMODECTL); 1017 rstdbyctl = I915_READ(RSTDBYCTL); 1018 crstandvid = I915_READ16(CRSTANDVID); 1019 1020 mutex_unlock(&dev->struct_mutex); 1021 1022 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1023 "yes" : "no"); 1024 seq_printf(m, "Boost freq: %d\n", 1025 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1026 MEMMODE_BOOST_FREQ_SHIFT); 1027 seq_printf(m, "HW control enabled: %s\n", 1028 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 1029 seq_printf(m, "SW control enabled: %s\n", 1030 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 1031 seq_printf(m, "Gated voltage change: %s\n", 1032 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 1033 seq_printf(m, "Starting frequency: P%d\n", 1034 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1035 seq_printf(m, "Max P-state: P%d\n", 1036 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1037 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1038 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1039 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1040 seq_printf(m, "Render standby enabled: %s\n", 1041 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 1042 seq_printf(m, "Current RS state: "); 1043 switch (rstdbyctl & RSX_STATUS_MASK) { 1044 case RSX_STATUS_ON: 1045 seq_printf(m, "on\n"); 1046 break; 1047 case RSX_STATUS_RC1: 1048 seq_printf(m, "RC1\n"); 1049 break; 1050 case RSX_STATUS_RC1E: 1051 seq_printf(m, "RC1E\n"); 1052 break; 1053 case RSX_STATUS_RS1: 1054 seq_printf(m, "RS1\n"); 1055 break; 1056 case RSX_STATUS_RS2: 1057 seq_printf(m, "RS2 (RC6)\n"); 1058 break; 1059 case RSX_STATUS_RS3: 1060 seq_printf(m, "RC3 (RC6+)\n"); 1061 break; 1062 default: 1063 seq_printf(m, "unknown\n"); 1064 break; 1065 } 1066 1067 return 0; 1068 } 1069 1070 static int i915_fbc_status(struct seq_file *m, void *unused) 1071 { 1072 struct drm_info_node *node = (struct drm_info_node *) m->private; 1073 struct drm_device *dev = node->minor->dev; 1074 drm_i915_private_t *dev_priv = dev->dev_private; 1075 1076 if (!I915_HAS_FBC(dev)) { 1077 seq_printf(m, "FBC unsupported on this chipset\n"); 1078 return 0; 1079 } 1080 1081 if (intel_fbc_enabled(dev)) { 1082 seq_printf(m, "FBC enabled\n"); 1083 } else { 1084 seq_printf(m, "FBC disabled: "); 1085 switch (dev_priv->no_fbc_reason) { 1086 case FBC_NO_OUTPUT: 1087 seq_printf(m, "no outputs"); 1088 break; 1089 case FBC_STOLEN_TOO_SMALL: 1090 seq_printf(m, "not enough stolen memory"); 1091 break; 1092 case FBC_UNSUPPORTED_MODE: 1093 seq_printf(m, "mode not supported"); 1094 break; 1095 case FBC_MODE_TOO_LARGE: 1096 seq_printf(m, "mode too large"); 1097 break; 1098 case FBC_BAD_PLANE: 1099 seq_printf(m, "FBC unsupported on plane"); 1100 break; 1101 case FBC_NOT_TILED: 1102 seq_printf(m, "scanout buffer not tiled"); 1103 break; 1104 case FBC_MULTIPLE_PIPES: 1105 seq_printf(m, "multiple pipes are enabled"); 1106 break; 1107 case FBC_MODULE_PARAM: 1108 seq_printf(m, "disabled per module param (default off)"); 1109 break; 1110 default: 1111 seq_printf(m, "unknown reason"); 1112 } 1113 seq_printf(m, "\n"); 1114 } 1115 return 0; 1116 } 1117 1118 static int i915_sr_status(struct seq_file *m, void *unused) 1119 { 1120 struct drm_info_node *node = (struct drm_info_node *) m->private; 1121 struct drm_device *dev = node->minor->dev; 1122 drm_i915_private_t *dev_priv = dev->dev_private; 1123 bool sr_enabled = false; 1124 1125 if (HAS_PCH_SPLIT(dev)) 1126 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1127 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1128 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1129 else if (IS_I915GM(dev)) 1130 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1131 else if (IS_PINEVIEW(dev)) 1132 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1133 1134 seq_printf(m, "self-refresh: %s\n", 1135 sr_enabled ? "enabled" : "disabled"); 1136 1137 return 0; 1138 } 1139 1140 static int i915_emon_status(struct seq_file *m, void *unused) 1141 { 1142 struct drm_info_node *node = (struct drm_info_node *) m->private; 1143 struct drm_device *dev = node->minor->dev; 1144 drm_i915_private_t *dev_priv = dev->dev_private; 1145 unsigned long temp, chipset, gfx; 1146 int ret; 1147 1148 ret = mutex_lock_interruptible(&dev->struct_mutex); 1149 if (ret) 1150 return ret; 1151 1152 temp = i915_mch_val(dev_priv); 1153 chipset = i915_chipset_val(dev_priv); 1154 gfx = i915_gfx_val(dev_priv); 1155 mutex_unlock(&dev->struct_mutex); 1156 1157 seq_printf(m, "GMCH temp: %ld\n", temp); 1158 seq_printf(m, "Chipset power: %ld\n", chipset); 1159 seq_printf(m, "GFX power: %ld\n", gfx); 1160 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1161 1162 return 0; 1163 } 1164 1165 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1166 { 1167 struct drm_info_node *node = (struct drm_info_node *) m->private; 1168 struct drm_device *dev = node->minor->dev; 1169 drm_i915_private_t *dev_priv = dev->dev_private; 1170 int ret; 1171 int gpu_freq, ia_freq; 1172 1173 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1174 seq_printf(m, "unsupported on this chipset\n"); 1175 return 0; 1176 } 1177 1178 ret = mutex_lock_interruptible(&dev->struct_mutex); 1179 if (ret) 1180 return ret; 1181 1182 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); 1183 1184 for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay; 1185 gpu_freq++) { 1186 I915_WRITE(GEN6_PCODE_DATA, gpu_freq); 1187 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | 1188 GEN6_PCODE_READ_MIN_FREQ_TABLE); 1189 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & 1190 GEN6_PCODE_READY) == 0, 10)) { 1191 DRM_ERROR("pcode read of freq table timed out\n"); 1192 continue; 1193 } 1194 ia_freq = I915_READ(GEN6_PCODE_DATA); 1195 seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100); 1196 } 1197 1198 mutex_unlock(&dev->struct_mutex); 1199 1200 return 0; 1201 } 1202 1203 static int i915_gfxec(struct seq_file *m, void *unused) 1204 { 1205 struct drm_info_node *node = (struct drm_info_node *) m->private; 1206 struct drm_device *dev = node->minor->dev; 1207 drm_i915_private_t *dev_priv = dev->dev_private; 1208 int ret; 1209 1210 ret = mutex_lock_interruptible(&dev->struct_mutex); 1211 if (ret) 1212 return ret; 1213 1214 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1215 1216 mutex_unlock(&dev->struct_mutex); 1217 1218 return 0; 1219 } 1220 1221 static int i915_opregion(struct seq_file *m, void *unused) 1222 { 1223 struct drm_info_node *node = (struct drm_info_node *) m->private; 1224 struct drm_device *dev = node->minor->dev; 1225 drm_i915_private_t *dev_priv = dev->dev_private; 1226 struct intel_opregion *opregion = &dev_priv->opregion; 1227 int ret; 1228 1229 ret = mutex_lock_interruptible(&dev->struct_mutex); 1230 if (ret) 1231 return ret; 1232 1233 if (opregion->header) 1234 seq_write(m, opregion->header, OPREGION_SIZE); 1235 1236 mutex_unlock(&dev->struct_mutex); 1237 1238 return 0; 1239 } 1240 1241 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1242 { 1243 struct drm_info_node *node = (struct drm_info_node *) m->private; 1244 struct drm_device *dev = node->minor->dev; 1245 drm_i915_private_t *dev_priv = dev->dev_private; 1246 struct intel_fbdev *ifbdev; 1247 struct intel_framebuffer *fb; 1248 int ret; 1249 1250 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1251 if (ret) 1252 return ret; 1253 1254 ifbdev = dev_priv->fbdev; 1255 fb = to_intel_framebuffer(ifbdev->helper.fb); 1256 1257 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ", 1258 fb->base.width, 1259 fb->base.height, 1260 fb->base.depth, 1261 fb->base.bits_per_pixel); 1262 describe_obj(m, fb->obj); 1263 seq_printf(m, "\n"); 1264 1265 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1266 if (&fb->base == ifbdev->helper.fb) 1267 continue; 1268 1269 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ", 1270 fb->base.width, 1271 fb->base.height, 1272 fb->base.depth, 1273 fb->base.bits_per_pixel); 1274 describe_obj(m, fb->obj); 1275 seq_printf(m, "\n"); 1276 } 1277 1278 mutex_unlock(&dev->mode_config.mutex); 1279 1280 return 0; 1281 } 1282 1283 static int i915_context_status(struct seq_file *m, void *unused) 1284 { 1285 struct drm_info_node *node = (struct drm_info_node *) m->private; 1286 struct drm_device *dev = node->minor->dev; 1287 drm_i915_private_t *dev_priv = dev->dev_private; 1288 int ret; 1289 1290 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1291 if (ret) 1292 return ret; 1293 1294 if (dev_priv->pwrctx) { 1295 seq_printf(m, "power context "); 1296 describe_obj(m, dev_priv->pwrctx); 1297 seq_printf(m, "\n"); 1298 } 1299 1300 if (dev_priv->renderctx) { 1301 seq_printf(m, "render context "); 1302 describe_obj(m, dev_priv->renderctx); 1303 seq_printf(m, "\n"); 1304 } 1305 1306 mutex_unlock(&dev->mode_config.mutex); 1307 1308 return 0; 1309 } 1310 1311 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) 1312 { 1313 struct drm_info_node *node = (struct drm_info_node *) m->private; 1314 struct drm_device *dev = node->minor->dev; 1315 struct drm_i915_private *dev_priv = dev->dev_private; 1316 1317 seq_printf(m, "forcewake count = %d\n", 1318 atomic_read(&dev_priv->forcewake_count)); 1319 1320 return 0; 1321 } 1322 1323 static int 1324 i915_wedged_open(struct inode *inode, 1325 struct file *filp) 1326 { 1327 filp->private_data = inode->i_private; 1328 return 0; 1329 } 1330 1331 static ssize_t 1332 i915_wedged_read(struct file *filp, 1333 char __user *ubuf, 1334 size_t max, 1335 loff_t *ppos) 1336 { 1337 struct drm_device *dev = filp->private_data; 1338 drm_i915_private_t *dev_priv = dev->dev_private; 1339 char buf[80]; 1340 int len; 1341 1342 len = snprintf(buf, sizeof(buf), 1343 "wedged : %d\n", 1344 atomic_read(&dev_priv->mm.wedged)); 1345 1346 if (len > sizeof(buf)) 1347 len = sizeof(buf); 1348 1349 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1350 } 1351 1352 static ssize_t 1353 i915_wedged_write(struct file *filp, 1354 const char __user *ubuf, 1355 size_t cnt, 1356 loff_t *ppos) 1357 { 1358 struct drm_device *dev = filp->private_data; 1359 char buf[20]; 1360 int val = 1; 1361 1362 if (cnt > 0) { 1363 if (cnt > sizeof(buf) - 1) 1364 return -EINVAL; 1365 1366 if (copy_from_user(buf, ubuf, cnt)) 1367 return -EFAULT; 1368 buf[cnt] = 0; 1369 1370 val = simple_strtoul(buf, NULL, 0); 1371 } 1372 1373 DRM_INFO("Manually setting wedged to %d\n", val); 1374 i915_handle_error(dev, val); 1375 1376 return cnt; 1377 } 1378 1379 static const struct file_operations i915_wedged_fops = { 1380 .owner = THIS_MODULE, 1381 .open = i915_wedged_open, 1382 .read = i915_wedged_read, 1383 .write = i915_wedged_write, 1384 .llseek = default_llseek, 1385 }; 1386 1387 static int 1388 i915_max_freq_open(struct inode *inode, 1389 struct file *filp) 1390 { 1391 filp->private_data = inode->i_private; 1392 return 0; 1393 } 1394 1395 static ssize_t 1396 i915_max_freq_read(struct file *filp, 1397 char __user *ubuf, 1398 size_t max, 1399 loff_t *ppos) 1400 { 1401 struct drm_device *dev = filp->private_data; 1402 drm_i915_private_t *dev_priv = dev->dev_private; 1403 char buf[80]; 1404 int len; 1405 1406 len = snprintf(buf, sizeof(buf), 1407 "max freq: %d\n", dev_priv->max_delay * 50); 1408 1409 if (len > sizeof(buf)) 1410 len = sizeof(buf); 1411 1412 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1413 } 1414 1415 static ssize_t 1416 i915_max_freq_write(struct file *filp, 1417 const char __user *ubuf, 1418 size_t cnt, 1419 loff_t *ppos) 1420 { 1421 struct drm_device *dev = filp->private_data; 1422 struct drm_i915_private *dev_priv = dev->dev_private; 1423 char buf[20]; 1424 int val = 1; 1425 1426 if (cnt > 0) { 1427 if (cnt > sizeof(buf) - 1) 1428 return -EINVAL; 1429 1430 if (copy_from_user(buf, ubuf, cnt)) 1431 return -EFAULT; 1432 buf[cnt] = 0; 1433 1434 val = simple_strtoul(buf, NULL, 0); 1435 } 1436 1437 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); 1438 1439 /* 1440 * Turbo will still be enabled, but won't go above the set value. 1441 */ 1442 dev_priv->max_delay = val / 50; 1443 1444 gen6_set_rps(dev, val / 50); 1445 1446 return cnt; 1447 } 1448 1449 static const struct file_operations i915_max_freq_fops = { 1450 .owner = THIS_MODULE, 1451 .open = i915_max_freq_open, 1452 .read = i915_max_freq_read, 1453 .write = i915_max_freq_write, 1454 .llseek = default_llseek, 1455 }; 1456 1457 static int 1458 i915_cache_sharing_open(struct inode *inode, 1459 struct file *filp) 1460 { 1461 filp->private_data = inode->i_private; 1462 return 0; 1463 } 1464 1465 static ssize_t 1466 i915_cache_sharing_read(struct file *filp, 1467 char __user *ubuf, 1468 size_t max, 1469 loff_t *ppos) 1470 { 1471 struct drm_device *dev = filp->private_data; 1472 drm_i915_private_t *dev_priv = dev->dev_private; 1473 char buf[80]; 1474 u32 snpcr; 1475 int len; 1476 1477 mutex_lock(&dev_priv->dev->struct_mutex); 1478 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1479 mutex_unlock(&dev_priv->dev->struct_mutex); 1480 1481 len = snprintf(buf, sizeof(buf), 1482 "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >> 1483 GEN6_MBC_SNPCR_SHIFT); 1484 1485 if (len > sizeof(buf)) 1486 len = sizeof(buf); 1487 1488 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1489 } 1490 1491 static ssize_t 1492 i915_cache_sharing_write(struct file *filp, 1493 const char __user *ubuf, 1494 size_t cnt, 1495 loff_t *ppos) 1496 { 1497 struct drm_device *dev = filp->private_data; 1498 struct drm_i915_private *dev_priv = dev->dev_private; 1499 char buf[20]; 1500 u32 snpcr; 1501 int val = 1; 1502 1503 if (cnt > 0) { 1504 if (cnt > sizeof(buf) - 1) 1505 return -EINVAL; 1506 1507 if (copy_from_user(buf, ubuf, cnt)) 1508 return -EFAULT; 1509 buf[cnt] = 0; 1510 1511 val = simple_strtoul(buf, NULL, 0); 1512 } 1513 1514 if (val < 0 || val > 3) 1515 return -EINVAL; 1516 1517 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val); 1518 1519 /* Update the cache sharing policy here as well */ 1520 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1521 snpcr &= ~GEN6_MBC_SNPCR_MASK; 1522 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 1523 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 1524 1525 return cnt; 1526 } 1527 1528 static const struct file_operations i915_cache_sharing_fops = { 1529 .owner = THIS_MODULE, 1530 .open = i915_cache_sharing_open, 1531 .read = i915_cache_sharing_read, 1532 .write = i915_cache_sharing_write, 1533 .llseek = default_llseek, 1534 }; 1535 1536 /* As the drm_debugfs_init() routines are called before dev->dev_private is 1537 * allocated we need to hook into the minor for release. */ 1538 static int 1539 drm_add_fake_info_node(struct drm_minor *minor, 1540 struct dentry *ent, 1541 const void *key) 1542 { 1543 struct drm_info_node *node; 1544 1545 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); 1546 if (node == NULL) { 1547 debugfs_remove(ent); 1548 return -ENOMEM; 1549 } 1550 1551 node->minor = minor; 1552 node->dent = ent; 1553 node->info_ent = (void *) key; 1554 1555 mutex_lock(&minor->debugfs_lock); 1556 list_add(&node->list, &minor->debugfs_list); 1557 mutex_unlock(&minor->debugfs_lock); 1558 1559 return 0; 1560 } 1561 1562 static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) 1563 { 1564 struct drm_device *dev = minor->dev; 1565 struct dentry *ent; 1566 1567 ent = debugfs_create_file("i915_wedged", 1568 S_IRUGO | S_IWUSR, 1569 root, dev, 1570 &i915_wedged_fops); 1571 if (IS_ERR(ent)) 1572 return PTR_ERR(ent); 1573 1574 return drm_add_fake_info_node(minor, ent, &i915_wedged_fops); 1575 } 1576 1577 static int i915_forcewake_open(struct inode *inode, struct file *file) 1578 { 1579 struct drm_device *dev = inode->i_private; 1580 struct drm_i915_private *dev_priv = dev->dev_private; 1581 int ret; 1582 1583 if (!IS_GEN6(dev)) 1584 return 0; 1585 1586 ret = mutex_lock_interruptible(&dev->struct_mutex); 1587 if (ret) 1588 return ret; 1589 gen6_gt_force_wake_get(dev_priv); 1590 mutex_unlock(&dev->struct_mutex); 1591 1592 return 0; 1593 } 1594 1595 int i915_forcewake_release(struct inode *inode, struct file *file) 1596 { 1597 struct drm_device *dev = inode->i_private; 1598 struct drm_i915_private *dev_priv = dev->dev_private; 1599 1600 if (!IS_GEN6(dev)) 1601 return 0; 1602 1603 /* 1604 * It's bad that we can potentially hang userspace if struct_mutex gets 1605 * forever stuck. However, if we cannot acquire this lock it means that 1606 * almost certainly the driver has hung, is not unload-able. Therefore 1607 * hanging here is probably a minor inconvenience not to be seen my 1608 * almost every user. 1609 */ 1610 mutex_lock(&dev->struct_mutex); 1611 gen6_gt_force_wake_put(dev_priv); 1612 mutex_unlock(&dev->struct_mutex); 1613 1614 return 0; 1615 } 1616 1617 static const struct file_operations i915_forcewake_fops = { 1618 .owner = THIS_MODULE, 1619 .open = i915_forcewake_open, 1620 .release = i915_forcewake_release, 1621 }; 1622 1623 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) 1624 { 1625 struct drm_device *dev = minor->dev; 1626 struct dentry *ent; 1627 1628 ent = debugfs_create_file("i915_forcewake_user", 1629 S_IRUSR, 1630 root, dev, 1631 &i915_forcewake_fops); 1632 if (IS_ERR(ent)) 1633 return PTR_ERR(ent); 1634 1635 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 1636 } 1637 1638 static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor) 1639 { 1640 struct drm_device *dev = minor->dev; 1641 struct dentry *ent; 1642 1643 ent = debugfs_create_file("i915_max_freq", 1644 S_IRUGO | S_IWUSR, 1645 root, dev, 1646 &i915_max_freq_fops); 1647 if (IS_ERR(ent)) 1648 return PTR_ERR(ent); 1649 1650 return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops); 1651 } 1652 1653 static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor) 1654 { 1655 struct drm_device *dev = minor->dev; 1656 struct dentry *ent; 1657 1658 ent = debugfs_create_file("i915_cache_sharing", 1659 S_IRUGO | S_IWUSR, 1660 root, dev, 1661 &i915_cache_sharing_fops); 1662 if (IS_ERR(ent)) 1663 return PTR_ERR(ent); 1664 1665 return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops); 1666 } 1667 1668 static struct drm_info_list i915_debugfs_list[] = { 1669 {"i915_capabilities", i915_capabilities, 0}, 1670 {"i915_gem_objects", i915_gem_object_info, 0}, 1671 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 1672 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 1673 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 1674 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 1675 {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST}, 1676 {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST}, 1677 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 1678 {"i915_gem_request", i915_gem_request_info, 0}, 1679 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 1680 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 1681 {"i915_gem_interrupt", i915_interrupt_info, 0}, 1682 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 1683 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 1684 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 1685 {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS}, 1686 {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS}, 1687 {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS}, 1688 {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS}, 1689 {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS}, 1690 {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS}, 1691 {"i915_batchbuffers", i915_batchbuffer_info, 0}, 1692 {"i915_error_state", i915_error_state, 0}, 1693 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 1694 {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, 1695 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 1696 {"i915_inttoext_table", i915_inttoext_table, 0}, 1697 {"i915_drpc_info", i915_drpc_info, 0}, 1698 {"i915_emon_status", i915_emon_status, 0}, 1699 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 1700 {"i915_gfxec", i915_gfxec, 0}, 1701 {"i915_fbc_status", i915_fbc_status, 0}, 1702 {"i915_sr_status", i915_sr_status, 0}, 1703 {"i915_opregion", i915_opregion, 0}, 1704 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 1705 {"i915_context_status", i915_context_status, 0}, 1706 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 1707 }; 1708 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 1709 1710 int i915_debugfs_init(struct drm_minor *minor) 1711 { 1712 int ret; 1713 1714 ret = i915_wedged_create(minor->debugfs_root, minor); 1715 if (ret) 1716 return ret; 1717 1718 ret = i915_forcewake_create(minor->debugfs_root, minor); 1719 if (ret) 1720 return ret; 1721 ret = i915_max_freq_create(minor->debugfs_root, minor); 1722 if (ret) 1723 return ret; 1724 ret = i915_cache_sharing_create(minor->debugfs_root, minor); 1725 if (ret) 1726 return ret; 1727 1728 return drm_debugfs_create_files(i915_debugfs_list, 1729 I915_DEBUGFS_ENTRIES, 1730 minor->debugfs_root, minor); 1731 } 1732 1733 void i915_debugfs_cleanup(struct drm_minor *minor) 1734 { 1735 drm_debugfs_remove_files(i915_debugfs_list, 1736 I915_DEBUGFS_ENTRIES, minor); 1737 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 1738 1, minor); 1739 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, 1740 1, minor); 1741 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops, 1742 1, minor); 1743 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, 1744 1, minor); 1745 } 1746 1747 #endif /* CONFIG_DEBUG_FS */ 1748