1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/seq_file.h> 30 #include <linux/debugfs.h> 31 #include <linux/slab.h> 32 #include <linux/export.h> 33 #include <drm/drmP.h> 34 #include "intel_drv.h" 35 #include "intel_ringbuffer.h" 36 #include <drm/i915_drm.h> 37 #include "i915_drv.h" 38 39 #define DRM_I915_RING_DEBUG 1 40 41 42 #if defined(CONFIG_DEBUG_FS) 43 44 enum { 45 ACTIVE_LIST, 46 INACTIVE_LIST, 47 PINNED_LIST, 48 }; 49 50 static const char *yesno(int v) 51 { 52 return v ? "yes" : "no"; 53 } 54 55 static int i915_capabilities(struct seq_file *m, void *data) 56 { 57 struct drm_info_node *node = (struct drm_info_node *) m->private; 58 struct drm_device *dev = node->minor->dev; 59 const struct intel_device_info *info = INTEL_INFO(dev); 60 61 seq_printf(m, "gen: %d\n", info->gen); 62 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 63 #define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 64 #define DEV_INFO_SEP ; 65 DEV_INFO_FLAGS; 66 #undef DEV_INFO_FLAG 67 #undef DEV_INFO_SEP 68 69 return 0; 70 } 71 72 static const char *get_pin_flag(struct drm_i915_gem_object *obj) 73 { 74 if (obj->user_pin_count > 0) 75 return "P"; 76 else if (obj->pin_count > 0) 77 return "p"; 78 else 79 return " "; 80 } 81 82 static const char *get_tiling_flag(struct drm_i915_gem_object *obj) 83 { 84 switch (obj->tiling_mode) { 85 default: 86 case I915_TILING_NONE: return " "; 87 case I915_TILING_X: return "X"; 88 case I915_TILING_Y: return "Y"; 89 } 90 } 91 92 static const char *cache_level_str(int type) 93 { 94 switch (type) { 95 case I915_CACHE_NONE: return " uncached"; 96 case I915_CACHE_LLC: return " snooped (LLC)"; 97 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)"; 98 default: return ""; 99 } 100 } 101 102 static void 103 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 104 { 105 seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s", 106 &obj->base, 107 get_pin_flag(obj), 108 get_tiling_flag(obj), 109 obj->base.size / 1024, 110 obj->base.read_domains, 111 obj->base.write_domain, 112 obj->last_read_seqno, 113 obj->last_write_seqno, 114 obj->last_fenced_seqno, 115 cache_level_str(obj->cache_level), 116 obj->dirty ? " dirty" : "", 117 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 118 if (obj->base.name) 119 seq_printf(m, " (name: %d)", obj->base.name); 120 if (obj->pin_count) 121 seq_printf(m, " (pinned x %d)", obj->pin_count); 122 if (obj->fence_reg != I915_FENCE_REG_NONE) 123 seq_printf(m, " (fence: %d)", obj->fence_reg); 124 if (obj->gtt_space != NULL) 125 seq_printf(m, " (gtt offset: %08x, size: %08x)", 126 obj->gtt_offset, (unsigned int)obj->gtt_space->size); 127 if (obj->pin_mappable || obj->fault_mappable) { 128 char s[3], *t = s; 129 if (obj->pin_mappable) 130 *t++ = 'p'; 131 if (obj->fault_mappable) 132 *t++ = 'f'; 133 *t = '\0'; 134 seq_printf(m, " (%s mappable)", s); 135 } 136 if (obj->ring != NULL) 137 seq_printf(m, " (%s)", obj->ring->name); 138 } 139 140 static int i915_gem_object_list_info(struct seq_file *m, void *data) 141 { 142 struct drm_info_node *node = (struct drm_info_node *) m->private; 143 uintptr_t list = (uintptr_t) node->info_ent->data; 144 struct list_head *head; 145 struct drm_device *dev = node->minor->dev; 146 drm_i915_private_t *dev_priv = dev->dev_private; 147 struct drm_i915_gem_object *obj; 148 size_t total_obj_size, total_gtt_size; 149 int count, ret; 150 151 ret = mutex_lock_interruptible(&dev->struct_mutex); 152 if (ret) 153 return ret; 154 155 switch (list) { 156 case ACTIVE_LIST: 157 seq_printf(m, "Active:\n"); 158 head = &dev_priv->mm.active_list; 159 break; 160 case INACTIVE_LIST: 161 seq_printf(m, "Inactive:\n"); 162 head = &dev_priv->mm.inactive_list; 163 break; 164 default: 165 mutex_unlock(&dev->struct_mutex); 166 return -EINVAL; 167 } 168 169 total_obj_size = total_gtt_size = count = 0; 170 list_for_each_entry(obj, head, mm_list) { 171 seq_printf(m, " "); 172 describe_obj(m, obj); 173 seq_printf(m, "\n"); 174 total_obj_size += obj->base.size; 175 total_gtt_size += obj->gtt_space->size; 176 count++; 177 } 178 mutex_unlock(&dev->struct_mutex); 179 180 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 181 count, total_obj_size, total_gtt_size); 182 return 0; 183 } 184 185 #define count_objects(list, member) do { \ 186 list_for_each_entry(obj, list, member) { \ 187 size += obj->gtt_space->size; \ 188 ++count; \ 189 if (obj->map_and_fenceable) { \ 190 mappable_size += obj->gtt_space->size; \ 191 ++mappable_count; \ 192 } \ 193 } \ 194 } while (0) 195 196 static int i915_gem_object_info(struct seq_file *m, void* data) 197 { 198 struct drm_info_node *node = (struct drm_info_node *) m->private; 199 struct drm_device *dev = node->minor->dev; 200 struct drm_i915_private *dev_priv = dev->dev_private; 201 u32 count, mappable_count, purgeable_count; 202 size_t size, mappable_size, purgeable_size; 203 struct drm_i915_gem_object *obj; 204 int ret; 205 206 ret = mutex_lock_interruptible(&dev->struct_mutex); 207 if (ret) 208 return ret; 209 210 seq_printf(m, "%u objects, %zu bytes\n", 211 dev_priv->mm.object_count, 212 dev_priv->mm.object_memory); 213 214 size = count = mappable_size = mappable_count = 0; 215 count_objects(&dev_priv->mm.bound_list, gtt_list); 216 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 217 count, mappable_count, size, mappable_size); 218 219 size = count = mappable_size = mappable_count = 0; 220 count_objects(&dev_priv->mm.active_list, mm_list); 221 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 222 count, mappable_count, size, mappable_size); 223 224 size = count = mappable_size = mappable_count = 0; 225 count_objects(&dev_priv->mm.inactive_list, mm_list); 226 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 227 count, mappable_count, size, mappable_size); 228 229 size = count = purgeable_size = purgeable_count = 0; 230 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) { 231 size += obj->base.size, ++count; 232 if (obj->madv == I915_MADV_DONTNEED) 233 purgeable_size += obj->base.size, ++purgeable_count; 234 } 235 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size); 236 237 size = count = mappable_size = mappable_count = 0; 238 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 239 if (obj->fault_mappable) { 240 size += obj->gtt_space->size; 241 ++count; 242 } 243 if (obj->pin_mappable) { 244 mappable_size += obj->gtt_space->size; 245 ++mappable_count; 246 } 247 if (obj->madv == I915_MADV_DONTNEED) { 248 purgeable_size += obj->base.size; 249 ++purgeable_count; 250 } 251 } 252 seq_printf(m, "%u purgeable objects, %zu bytes\n", 253 purgeable_count, purgeable_size); 254 seq_printf(m, "%u pinned mappable objects, %zu bytes\n", 255 mappable_count, mappable_size); 256 seq_printf(m, "%u fault mappable objects, %zu bytes\n", 257 count, size); 258 259 seq_printf(m, "%zu [%zu] gtt total\n", 260 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); 261 262 mutex_unlock(&dev->struct_mutex); 263 264 return 0; 265 } 266 267 static int i915_gem_gtt_info(struct seq_file *m, void* data) 268 { 269 struct drm_info_node *node = (struct drm_info_node *) m->private; 270 struct drm_device *dev = node->minor->dev; 271 uintptr_t list = (uintptr_t) node->info_ent->data; 272 struct drm_i915_private *dev_priv = dev->dev_private; 273 struct drm_i915_gem_object *obj; 274 size_t total_obj_size, total_gtt_size; 275 int count, ret; 276 277 ret = mutex_lock_interruptible(&dev->struct_mutex); 278 if (ret) 279 return ret; 280 281 total_obj_size = total_gtt_size = count = 0; 282 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 283 if (list == PINNED_LIST && obj->pin_count == 0) 284 continue; 285 286 seq_printf(m, " "); 287 describe_obj(m, obj); 288 seq_printf(m, "\n"); 289 total_obj_size += obj->base.size; 290 total_gtt_size += obj->gtt_space->size; 291 count++; 292 } 293 294 mutex_unlock(&dev->struct_mutex); 295 296 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", 297 count, total_obj_size, total_gtt_size); 298 299 return 0; 300 } 301 302 static int i915_gem_pageflip_info(struct seq_file *m, void *data) 303 { 304 struct drm_info_node *node = (struct drm_info_node *) m->private; 305 struct drm_device *dev = node->minor->dev; 306 unsigned long flags; 307 struct intel_crtc *crtc; 308 309 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 310 const char pipe = pipe_name(crtc->pipe); 311 const char plane = plane_name(crtc->plane); 312 struct intel_unpin_work *work; 313 314 spin_lock_irqsave(&dev->event_lock, flags); 315 work = crtc->unpin_work; 316 if (work == NULL) { 317 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 318 pipe, plane); 319 } else { 320 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 321 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 322 pipe, plane); 323 } else { 324 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 325 pipe, plane); 326 } 327 if (work->enable_stall_check) 328 seq_printf(m, "Stall check enabled, "); 329 else 330 seq_printf(m, "Stall check waiting for page flip ioctl, "); 331 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 332 333 if (work->old_fb_obj) { 334 struct drm_i915_gem_object *obj = work->old_fb_obj; 335 if (obj) 336 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 337 } 338 if (work->pending_flip_obj) { 339 struct drm_i915_gem_object *obj = work->pending_flip_obj; 340 if (obj) 341 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); 342 } 343 } 344 spin_unlock_irqrestore(&dev->event_lock, flags); 345 } 346 347 return 0; 348 } 349 350 static int i915_gem_request_info(struct seq_file *m, void *data) 351 { 352 struct drm_info_node *node = (struct drm_info_node *) m->private; 353 struct drm_device *dev = node->minor->dev; 354 drm_i915_private_t *dev_priv = dev->dev_private; 355 struct intel_ring_buffer *ring; 356 struct drm_i915_gem_request *gem_request; 357 int ret, count, i; 358 359 ret = mutex_lock_interruptible(&dev->struct_mutex); 360 if (ret) 361 return ret; 362 363 count = 0; 364 for_each_ring(ring, dev_priv, i) { 365 if (list_empty(&ring->request_list)) 366 continue; 367 368 seq_printf(m, "%s requests:\n", ring->name); 369 list_for_each_entry(gem_request, 370 &ring->request_list, 371 list) { 372 seq_printf(m, " %d @ %d\n", 373 gem_request->seqno, 374 (int) (jiffies - gem_request->emitted_jiffies)); 375 } 376 count++; 377 } 378 mutex_unlock(&dev->struct_mutex); 379 380 if (count == 0) 381 seq_printf(m, "No requests\n"); 382 383 return 0; 384 } 385 386 static void i915_ring_seqno_info(struct seq_file *m, 387 struct intel_ring_buffer *ring) 388 { 389 if (ring->get_seqno) { 390 seq_printf(m, "Current sequence (%s): %d\n", 391 ring->name, ring->get_seqno(ring, false)); 392 } 393 } 394 395 static int i915_gem_seqno_info(struct seq_file *m, void *data) 396 { 397 struct drm_info_node *node = (struct drm_info_node *) m->private; 398 struct drm_device *dev = node->minor->dev; 399 drm_i915_private_t *dev_priv = dev->dev_private; 400 struct intel_ring_buffer *ring; 401 int ret, i; 402 403 ret = mutex_lock_interruptible(&dev->struct_mutex); 404 if (ret) 405 return ret; 406 407 for_each_ring(ring, dev_priv, i) 408 i915_ring_seqno_info(m, ring); 409 410 mutex_unlock(&dev->struct_mutex); 411 412 return 0; 413 } 414 415 416 static int i915_interrupt_info(struct seq_file *m, void *data) 417 { 418 struct drm_info_node *node = (struct drm_info_node *) m->private; 419 struct drm_device *dev = node->minor->dev; 420 drm_i915_private_t *dev_priv = dev->dev_private; 421 struct intel_ring_buffer *ring; 422 int ret, i, pipe; 423 424 ret = mutex_lock_interruptible(&dev->struct_mutex); 425 if (ret) 426 return ret; 427 428 if (IS_VALLEYVIEW(dev)) { 429 seq_printf(m, "Display IER:\t%08x\n", 430 I915_READ(VLV_IER)); 431 seq_printf(m, "Display IIR:\t%08x\n", 432 I915_READ(VLV_IIR)); 433 seq_printf(m, "Display IIR_RW:\t%08x\n", 434 I915_READ(VLV_IIR_RW)); 435 seq_printf(m, "Display IMR:\t%08x\n", 436 I915_READ(VLV_IMR)); 437 for_each_pipe(pipe) 438 seq_printf(m, "Pipe %c stat:\t%08x\n", 439 pipe_name(pipe), 440 I915_READ(PIPESTAT(pipe))); 441 442 seq_printf(m, "Master IER:\t%08x\n", 443 I915_READ(VLV_MASTER_IER)); 444 445 seq_printf(m, "Render IER:\t%08x\n", 446 I915_READ(GTIER)); 447 seq_printf(m, "Render IIR:\t%08x\n", 448 I915_READ(GTIIR)); 449 seq_printf(m, "Render IMR:\t%08x\n", 450 I915_READ(GTIMR)); 451 452 seq_printf(m, "PM IER:\t\t%08x\n", 453 I915_READ(GEN6_PMIER)); 454 seq_printf(m, "PM IIR:\t\t%08x\n", 455 I915_READ(GEN6_PMIIR)); 456 seq_printf(m, "PM IMR:\t\t%08x\n", 457 I915_READ(GEN6_PMIMR)); 458 459 seq_printf(m, "Port hotplug:\t%08x\n", 460 I915_READ(PORT_HOTPLUG_EN)); 461 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 462 I915_READ(VLV_DPFLIPSTAT)); 463 seq_printf(m, "DPINVGTT:\t%08x\n", 464 I915_READ(DPINVGTT)); 465 466 } else if (!HAS_PCH_SPLIT(dev)) { 467 seq_printf(m, "Interrupt enable: %08x\n", 468 I915_READ(IER)); 469 seq_printf(m, "Interrupt identity: %08x\n", 470 I915_READ(IIR)); 471 seq_printf(m, "Interrupt mask: %08x\n", 472 I915_READ(IMR)); 473 for_each_pipe(pipe) 474 seq_printf(m, "Pipe %c stat: %08x\n", 475 pipe_name(pipe), 476 I915_READ(PIPESTAT(pipe))); 477 } else { 478 seq_printf(m, "North Display Interrupt enable: %08x\n", 479 I915_READ(DEIER)); 480 seq_printf(m, "North Display Interrupt identity: %08x\n", 481 I915_READ(DEIIR)); 482 seq_printf(m, "North Display Interrupt mask: %08x\n", 483 I915_READ(DEIMR)); 484 seq_printf(m, "South Display Interrupt enable: %08x\n", 485 I915_READ(SDEIER)); 486 seq_printf(m, "South Display Interrupt identity: %08x\n", 487 I915_READ(SDEIIR)); 488 seq_printf(m, "South Display Interrupt mask: %08x\n", 489 I915_READ(SDEIMR)); 490 seq_printf(m, "Graphics Interrupt enable: %08x\n", 491 I915_READ(GTIER)); 492 seq_printf(m, "Graphics Interrupt identity: %08x\n", 493 I915_READ(GTIIR)); 494 seq_printf(m, "Graphics Interrupt mask: %08x\n", 495 I915_READ(GTIMR)); 496 } 497 seq_printf(m, "Interrupts received: %d\n", 498 atomic_read(&dev_priv->irq_received)); 499 for_each_ring(ring, dev_priv, i) { 500 if (IS_GEN6(dev) || IS_GEN7(dev)) { 501 seq_printf(m, 502 "Graphics Interrupt mask (%s): %08x\n", 503 ring->name, I915_READ_IMR(ring)); 504 } 505 i915_ring_seqno_info(m, ring); 506 } 507 mutex_unlock(&dev->struct_mutex); 508 509 return 0; 510 } 511 512 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 513 { 514 struct drm_info_node *node = (struct drm_info_node *) m->private; 515 struct drm_device *dev = node->minor->dev; 516 drm_i915_private_t *dev_priv = dev->dev_private; 517 int i, ret; 518 519 ret = mutex_lock_interruptible(&dev->struct_mutex); 520 if (ret) 521 return ret; 522 523 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 524 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 525 for (i = 0; i < dev_priv->num_fence_regs; i++) { 526 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 527 528 seq_printf(m, "Fence %d, pin count = %d, object = ", 529 i, dev_priv->fence_regs[i].pin_count); 530 if (obj == NULL) 531 seq_printf(m, "unused"); 532 else 533 describe_obj(m, obj); 534 seq_printf(m, "\n"); 535 } 536 537 mutex_unlock(&dev->struct_mutex); 538 return 0; 539 } 540 541 static int i915_hws_info(struct seq_file *m, void *data) 542 { 543 struct drm_info_node *node = (struct drm_info_node *) m->private; 544 struct drm_device *dev = node->minor->dev; 545 drm_i915_private_t *dev_priv = dev->dev_private; 546 struct intel_ring_buffer *ring; 547 const volatile u32 __iomem *hws; 548 int i; 549 550 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 551 hws = (volatile u32 __iomem *)ring->status_page.page_addr; 552 if (hws == NULL) 553 return 0; 554 555 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) { 556 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 557 i * 4, 558 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]); 559 } 560 return 0; 561 } 562 563 static const char *ring_str(int ring) 564 { 565 switch (ring) { 566 case RCS: return "render"; 567 case VCS: return "bsd"; 568 case BCS: return "blt"; 569 default: return ""; 570 } 571 } 572 573 static const char *pin_flag(int pinned) 574 { 575 if (pinned > 0) 576 return " P"; 577 else if (pinned < 0) 578 return " p"; 579 else 580 return ""; 581 } 582 583 static const char *tiling_flag(int tiling) 584 { 585 switch (tiling) { 586 default: 587 case I915_TILING_NONE: return ""; 588 case I915_TILING_X: return " X"; 589 case I915_TILING_Y: return " Y"; 590 } 591 } 592 593 static const char *dirty_flag(int dirty) 594 { 595 return dirty ? " dirty" : ""; 596 } 597 598 static const char *purgeable_flag(int purgeable) 599 { 600 return purgeable ? " purgeable" : ""; 601 } 602 603 static void print_error_buffers(struct seq_file *m, 604 const char *name, 605 struct drm_i915_error_buffer *err, 606 int count) 607 { 608 seq_printf(m, "%s [%d]:\n", name, count); 609 610 while (count--) { 611 seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s", 612 err->gtt_offset, 613 err->size, 614 err->read_domains, 615 err->write_domain, 616 err->rseqno, err->wseqno, 617 pin_flag(err->pinned), 618 tiling_flag(err->tiling), 619 dirty_flag(err->dirty), 620 purgeable_flag(err->purgeable), 621 err->ring != -1 ? " " : "", 622 ring_str(err->ring), 623 cache_level_str(err->cache_level)); 624 625 if (err->name) 626 seq_printf(m, " (name: %d)", err->name); 627 if (err->fence_reg != I915_FENCE_REG_NONE) 628 seq_printf(m, " (fence: %d)", err->fence_reg); 629 630 seq_printf(m, "\n"); 631 err++; 632 } 633 } 634 635 static void i915_ring_error_state(struct seq_file *m, 636 struct drm_device *dev, 637 struct drm_i915_error_state *error, 638 unsigned ring) 639 { 640 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */ 641 seq_printf(m, "%s command stream:\n", ring_str(ring)); 642 seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]); 643 seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); 644 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); 645 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); 646 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); 647 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); 648 if (ring == RCS && INTEL_INFO(dev)->gen >= 4) 649 seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); 650 651 if (INTEL_INFO(dev)->gen >= 4) 652 seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); 653 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 654 seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); 655 if (INTEL_INFO(dev)->gen >= 6) { 656 seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); 657 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); 658 seq_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n", 659 error->semaphore_mboxes[ring][0], 660 error->semaphore_seqno[ring][0]); 661 seq_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", 662 error->semaphore_mboxes[ring][1], 663 error->semaphore_seqno[ring][1]); 664 } 665 seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); 666 seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); 667 seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); 668 seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); 669 } 670 671 struct i915_error_state_file_priv { 672 struct drm_device *dev; 673 struct drm_i915_error_state *error; 674 }; 675 676 static int i915_error_state(struct seq_file *m, void *unused) 677 { 678 struct i915_error_state_file_priv *error_priv = m->private; 679 struct drm_device *dev = error_priv->dev; 680 drm_i915_private_t *dev_priv = dev->dev_private; 681 struct drm_i915_error_state *error = error_priv->error; 682 struct intel_ring_buffer *ring; 683 int i, j, page, offset, elt; 684 685 if (!error) { 686 seq_printf(m, "no error state collected\n"); 687 return 0; 688 } 689 690 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 691 error->time.tv_usec); 692 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 693 seq_printf(m, "EIR: 0x%08x\n", error->eir); 694 seq_printf(m, "IER: 0x%08x\n", error->ier); 695 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); 696 seq_printf(m, "CCID: 0x%08x\n", error->ccid); 697 698 for (i = 0; i < dev_priv->num_fence_regs; i++) 699 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); 700 701 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++) 702 seq_printf(m, " INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]); 703 704 if (INTEL_INFO(dev)->gen >= 6) { 705 seq_printf(m, "ERROR: 0x%08x\n", error->error); 706 seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 707 } 708 709 if (INTEL_INFO(dev)->gen == 7) 710 seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int); 711 712 for_each_ring(ring, dev_priv, i) 713 i915_ring_error_state(m, dev, error, i); 714 715 if (error->active_bo) 716 print_error_buffers(m, "Active", 717 error->active_bo, 718 error->active_bo_count); 719 720 if (error->pinned_bo) 721 print_error_buffers(m, "Pinned", 722 error->pinned_bo, 723 error->pinned_bo_count); 724 725 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 726 struct drm_i915_error_object *obj; 727 728 if ((obj = error->ring[i].batchbuffer)) { 729 seq_printf(m, "%s --- gtt_offset = 0x%08x\n", 730 dev_priv->ring[i].name, 731 obj->gtt_offset); 732 offset = 0; 733 for (page = 0; page < obj->page_count; page++) { 734 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 735 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); 736 offset += 4; 737 } 738 } 739 } 740 741 if (error->ring[i].num_requests) { 742 seq_printf(m, "%s --- %d requests\n", 743 dev_priv->ring[i].name, 744 error->ring[i].num_requests); 745 for (j = 0; j < error->ring[i].num_requests; j++) { 746 seq_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", 747 error->ring[i].requests[j].seqno, 748 error->ring[i].requests[j].jiffies, 749 error->ring[i].requests[j].tail); 750 } 751 } 752 753 if ((obj = error->ring[i].ringbuffer)) { 754 seq_printf(m, "%s --- ringbuffer = 0x%08x\n", 755 dev_priv->ring[i].name, 756 obj->gtt_offset); 757 offset = 0; 758 for (page = 0; page < obj->page_count; page++) { 759 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 760 seq_printf(m, "%08x : %08x\n", 761 offset, 762 obj->pages[page][elt]); 763 offset += 4; 764 } 765 } 766 } 767 } 768 769 if (error->overlay) 770 intel_overlay_print_error_state(m, error->overlay); 771 772 if (error->display) 773 intel_display_print_error_state(m, dev, error->display); 774 775 return 0; 776 } 777 778 static ssize_t 779 i915_error_state_write(struct file *filp, 780 const char __user *ubuf, 781 size_t cnt, 782 loff_t *ppos) 783 { 784 struct seq_file *m = filp->private_data; 785 struct i915_error_state_file_priv *error_priv = m->private; 786 struct drm_device *dev = error_priv->dev; 787 int ret; 788 789 DRM_DEBUG_DRIVER("Resetting error state\n"); 790 791 ret = mutex_lock_interruptible(&dev->struct_mutex); 792 if (ret) 793 return ret; 794 795 i915_destroy_error_state(dev); 796 mutex_unlock(&dev->struct_mutex); 797 798 return cnt; 799 } 800 801 static int i915_error_state_open(struct inode *inode, struct file *file) 802 { 803 struct drm_device *dev = inode->i_private; 804 drm_i915_private_t *dev_priv = dev->dev_private; 805 struct i915_error_state_file_priv *error_priv; 806 unsigned long flags; 807 808 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL); 809 if (!error_priv) 810 return -ENOMEM; 811 812 error_priv->dev = dev; 813 814 spin_lock_irqsave(&dev_priv->error_lock, flags); 815 error_priv->error = dev_priv->first_error; 816 if (error_priv->error) 817 kref_get(&error_priv->error->ref); 818 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 819 820 return single_open(file, i915_error_state, error_priv); 821 } 822 823 static int i915_error_state_release(struct inode *inode, struct file *file) 824 { 825 struct seq_file *m = file->private_data; 826 struct i915_error_state_file_priv *error_priv = m->private; 827 828 if (error_priv->error) 829 kref_put(&error_priv->error->ref, i915_error_state_free); 830 kfree(error_priv); 831 832 return single_release(inode, file); 833 } 834 835 static const struct file_operations i915_error_state_fops = { 836 .owner = THIS_MODULE, 837 .open = i915_error_state_open, 838 .read = seq_read, 839 .write = i915_error_state_write, 840 .llseek = default_llseek, 841 .release = i915_error_state_release, 842 }; 843 844 static int i915_rstdby_delays(struct seq_file *m, void *unused) 845 { 846 struct drm_info_node *node = (struct drm_info_node *) m->private; 847 struct drm_device *dev = node->minor->dev; 848 drm_i915_private_t *dev_priv = dev->dev_private; 849 u16 crstanddelay; 850 int ret; 851 852 ret = mutex_lock_interruptible(&dev->struct_mutex); 853 if (ret) 854 return ret; 855 856 crstanddelay = I915_READ16(CRSTANDVID); 857 858 mutex_unlock(&dev->struct_mutex); 859 860 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 861 862 return 0; 863 } 864 865 static int i915_cur_delayinfo(struct seq_file *m, void *unused) 866 { 867 struct drm_info_node *node = (struct drm_info_node *) m->private; 868 struct drm_device *dev = node->minor->dev; 869 drm_i915_private_t *dev_priv = dev->dev_private; 870 int ret; 871 872 if (IS_GEN5(dev)) { 873 u16 rgvswctl = I915_READ16(MEMSWCTL); 874 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 875 876 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 877 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 878 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 879 MEMSTAT_VID_SHIFT); 880 seq_printf(m, "Current P-state: %d\n", 881 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 882 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 883 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 884 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 885 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 886 u32 rpstat; 887 u32 rpupei, rpcurup, rpprevup; 888 u32 rpdownei, rpcurdown, rpprevdown; 889 int max_freq; 890 891 /* RPSTAT1 is in the GT power well */ 892 ret = mutex_lock_interruptible(&dev->struct_mutex); 893 if (ret) 894 return ret; 895 896 gen6_gt_force_wake_get(dev_priv); 897 898 rpstat = I915_READ(GEN6_RPSTAT1); 899 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); 900 rpcurup = I915_READ(GEN6_RP_CUR_UP); 901 rpprevup = I915_READ(GEN6_RP_PREV_UP); 902 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 903 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 904 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 905 906 gen6_gt_force_wake_put(dev_priv); 907 mutex_unlock(&dev->struct_mutex); 908 909 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 910 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 911 seq_printf(m, "Render p-state ratio: %d\n", 912 (gt_perf_status & 0xff00) >> 8); 913 seq_printf(m, "Render p-state VID: %d\n", 914 gt_perf_status & 0xff); 915 seq_printf(m, "Render p-state limit: %d\n", 916 rp_state_limits & 0xff); 917 seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> 918 GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER); 919 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 920 GEN6_CURICONT_MASK); 921 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 922 GEN6_CURBSYTAVG_MASK); 923 seq_printf(m, "RP PREV UP: %dus\n", rpprevup & 924 GEN6_CURBSYTAVG_MASK); 925 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei & 926 GEN6_CURIAVG_MASK); 927 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown & 928 GEN6_CURBSYTAVG_MASK); 929 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown & 930 GEN6_CURBSYTAVG_MASK); 931 932 max_freq = (rp_state_cap & 0xff0000) >> 16; 933 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 934 max_freq * GT_FREQUENCY_MULTIPLIER); 935 936 max_freq = (rp_state_cap & 0xff00) >> 8; 937 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 938 max_freq * GT_FREQUENCY_MULTIPLIER); 939 940 max_freq = rp_state_cap & 0xff; 941 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 942 max_freq * GT_FREQUENCY_MULTIPLIER); 943 } else { 944 seq_printf(m, "no P-state info available\n"); 945 } 946 947 return 0; 948 } 949 950 static int i915_delayfreq_table(struct seq_file *m, void *unused) 951 { 952 struct drm_info_node *node = (struct drm_info_node *) m->private; 953 struct drm_device *dev = node->minor->dev; 954 drm_i915_private_t *dev_priv = dev->dev_private; 955 u32 delayfreq; 956 int ret, i; 957 958 ret = mutex_lock_interruptible(&dev->struct_mutex); 959 if (ret) 960 return ret; 961 962 for (i = 0; i < 16; i++) { 963 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 964 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, 965 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 966 } 967 968 mutex_unlock(&dev->struct_mutex); 969 970 return 0; 971 } 972 973 static inline int MAP_TO_MV(int map) 974 { 975 return 1250 - (map * 25); 976 } 977 978 static int i915_inttoext_table(struct seq_file *m, void *unused) 979 { 980 struct drm_info_node *node = (struct drm_info_node *) m->private; 981 struct drm_device *dev = node->minor->dev; 982 drm_i915_private_t *dev_priv = dev->dev_private; 983 u32 inttoext; 984 int ret, i; 985 986 ret = mutex_lock_interruptible(&dev->struct_mutex); 987 if (ret) 988 return ret; 989 990 for (i = 1; i <= 32; i++) { 991 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 992 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 993 } 994 995 mutex_unlock(&dev->struct_mutex); 996 997 return 0; 998 } 999 1000 static int ironlake_drpc_info(struct seq_file *m) 1001 { 1002 struct drm_info_node *node = (struct drm_info_node *) m->private; 1003 struct drm_device *dev = node->minor->dev; 1004 drm_i915_private_t *dev_priv = dev->dev_private; 1005 u32 rgvmodectl, rstdbyctl; 1006 u16 crstandvid; 1007 int ret; 1008 1009 ret = mutex_lock_interruptible(&dev->struct_mutex); 1010 if (ret) 1011 return ret; 1012 1013 rgvmodectl = I915_READ(MEMMODECTL); 1014 rstdbyctl = I915_READ(RSTDBYCTL); 1015 crstandvid = I915_READ16(CRSTANDVID); 1016 1017 mutex_unlock(&dev->struct_mutex); 1018 1019 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1020 "yes" : "no"); 1021 seq_printf(m, "Boost freq: %d\n", 1022 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1023 MEMMODE_BOOST_FREQ_SHIFT); 1024 seq_printf(m, "HW control enabled: %s\n", 1025 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); 1026 seq_printf(m, "SW control enabled: %s\n", 1027 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); 1028 seq_printf(m, "Gated voltage change: %s\n", 1029 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); 1030 seq_printf(m, "Starting frequency: P%d\n", 1031 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1032 seq_printf(m, "Max P-state: P%d\n", 1033 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1034 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1035 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1036 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1037 seq_printf(m, "Render standby enabled: %s\n", 1038 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 1039 seq_printf(m, "Current RS state: "); 1040 switch (rstdbyctl & RSX_STATUS_MASK) { 1041 case RSX_STATUS_ON: 1042 seq_printf(m, "on\n"); 1043 break; 1044 case RSX_STATUS_RC1: 1045 seq_printf(m, "RC1\n"); 1046 break; 1047 case RSX_STATUS_RC1E: 1048 seq_printf(m, "RC1E\n"); 1049 break; 1050 case RSX_STATUS_RS1: 1051 seq_printf(m, "RS1\n"); 1052 break; 1053 case RSX_STATUS_RS2: 1054 seq_printf(m, "RS2 (RC6)\n"); 1055 break; 1056 case RSX_STATUS_RS3: 1057 seq_printf(m, "RC3 (RC6+)\n"); 1058 break; 1059 default: 1060 seq_printf(m, "unknown\n"); 1061 break; 1062 } 1063 1064 return 0; 1065 } 1066 1067 static int gen6_drpc_info(struct seq_file *m) 1068 { 1069 1070 struct drm_info_node *node = (struct drm_info_node *) m->private; 1071 struct drm_device *dev = node->minor->dev; 1072 struct drm_i915_private *dev_priv = dev->dev_private; 1073 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1074 unsigned forcewake_count; 1075 int count=0, ret; 1076 1077 1078 ret = mutex_lock_interruptible(&dev->struct_mutex); 1079 if (ret) 1080 return ret; 1081 1082 spin_lock_irq(&dev_priv->gt_lock); 1083 forcewake_count = dev_priv->forcewake_count; 1084 spin_unlock_irq(&dev_priv->gt_lock); 1085 1086 if (forcewake_count) { 1087 seq_printf(m, "RC information inaccurate because somebody " 1088 "holds a forcewake reference \n"); 1089 } else { 1090 /* NB: we cannot use forcewake, else we read the wrong values */ 1091 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1092 udelay(10); 1093 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1094 } 1095 1096 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); 1097 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4); 1098 1099 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1100 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1101 mutex_unlock(&dev->struct_mutex); 1102 mutex_lock(&dev_priv->rps.hw_lock); 1103 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1104 mutex_unlock(&dev_priv->rps.hw_lock); 1105 1106 seq_printf(m, "Video Turbo Mode: %s\n", 1107 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1108 seq_printf(m, "HW control enabled: %s\n", 1109 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1110 seq_printf(m, "SW control enabled: %s\n", 1111 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1112 GEN6_RP_MEDIA_SW_MODE)); 1113 seq_printf(m, "RC1e Enabled: %s\n", 1114 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1115 seq_printf(m, "RC6 Enabled: %s\n", 1116 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1117 seq_printf(m, "Deep RC6 Enabled: %s\n", 1118 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1119 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1120 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1121 seq_printf(m, "Current RC state: "); 1122 switch (gt_core_status & GEN6_RCn_MASK) { 1123 case GEN6_RC0: 1124 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1125 seq_printf(m, "Core Power Down\n"); 1126 else 1127 seq_printf(m, "on\n"); 1128 break; 1129 case GEN6_RC3: 1130 seq_printf(m, "RC3\n"); 1131 break; 1132 case GEN6_RC6: 1133 seq_printf(m, "RC6\n"); 1134 break; 1135 case GEN6_RC7: 1136 seq_printf(m, "RC7\n"); 1137 break; 1138 default: 1139 seq_printf(m, "Unknown\n"); 1140 break; 1141 } 1142 1143 seq_printf(m, "Core Power Down: %s\n", 1144 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1145 1146 /* Not exactly sure what this is */ 1147 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", 1148 I915_READ(GEN6_GT_GFX_RC6_LOCKED)); 1149 seq_printf(m, "RC6 residency since boot: %u\n", 1150 I915_READ(GEN6_GT_GFX_RC6)); 1151 seq_printf(m, "RC6+ residency since boot: %u\n", 1152 I915_READ(GEN6_GT_GFX_RC6p)); 1153 seq_printf(m, "RC6++ residency since boot: %u\n", 1154 I915_READ(GEN6_GT_GFX_RC6pp)); 1155 1156 seq_printf(m, "RC6 voltage: %dmV\n", 1157 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1158 seq_printf(m, "RC6+ voltage: %dmV\n", 1159 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1160 seq_printf(m, "RC6++ voltage: %dmV\n", 1161 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1162 return 0; 1163 } 1164 1165 static int i915_drpc_info(struct seq_file *m, void *unused) 1166 { 1167 struct drm_info_node *node = (struct drm_info_node *) m->private; 1168 struct drm_device *dev = node->minor->dev; 1169 1170 if (IS_GEN6(dev) || IS_GEN7(dev)) 1171 return gen6_drpc_info(m); 1172 else 1173 return ironlake_drpc_info(m); 1174 } 1175 1176 static int i915_fbc_status(struct seq_file *m, void *unused) 1177 { 1178 struct drm_info_node *node = (struct drm_info_node *) m->private; 1179 struct drm_device *dev = node->minor->dev; 1180 drm_i915_private_t *dev_priv = dev->dev_private; 1181 1182 if (!I915_HAS_FBC(dev)) { 1183 seq_printf(m, "FBC unsupported on this chipset\n"); 1184 return 0; 1185 } 1186 1187 if (intel_fbc_enabled(dev)) { 1188 seq_printf(m, "FBC enabled\n"); 1189 } else { 1190 seq_printf(m, "FBC disabled: "); 1191 switch (dev_priv->no_fbc_reason) { 1192 case FBC_NO_OUTPUT: 1193 seq_printf(m, "no outputs"); 1194 break; 1195 case FBC_STOLEN_TOO_SMALL: 1196 seq_printf(m, "not enough stolen memory"); 1197 break; 1198 case FBC_UNSUPPORTED_MODE: 1199 seq_printf(m, "mode not supported"); 1200 break; 1201 case FBC_MODE_TOO_LARGE: 1202 seq_printf(m, "mode too large"); 1203 break; 1204 case FBC_BAD_PLANE: 1205 seq_printf(m, "FBC unsupported on plane"); 1206 break; 1207 case FBC_NOT_TILED: 1208 seq_printf(m, "scanout buffer not tiled"); 1209 break; 1210 case FBC_MULTIPLE_PIPES: 1211 seq_printf(m, "multiple pipes are enabled"); 1212 break; 1213 case FBC_MODULE_PARAM: 1214 seq_printf(m, "disabled per module param (default off)"); 1215 break; 1216 default: 1217 seq_printf(m, "unknown reason"); 1218 } 1219 seq_printf(m, "\n"); 1220 } 1221 return 0; 1222 } 1223 1224 static int i915_sr_status(struct seq_file *m, void *unused) 1225 { 1226 struct drm_info_node *node = (struct drm_info_node *) m->private; 1227 struct drm_device *dev = node->minor->dev; 1228 drm_i915_private_t *dev_priv = dev->dev_private; 1229 bool sr_enabled = false; 1230 1231 if (HAS_PCH_SPLIT(dev)) 1232 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1233 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1234 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1235 else if (IS_I915GM(dev)) 1236 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1237 else if (IS_PINEVIEW(dev)) 1238 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1239 1240 seq_printf(m, "self-refresh: %s\n", 1241 sr_enabled ? "enabled" : "disabled"); 1242 1243 return 0; 1244 } 1245 1246 static int i915_emon_status(struct seq_file *m, void *unused) 1247 { 1248 struct drm_info_node *node = (struct drm_info_node *) m->private; 1249 struct drm_device *dev = node->minor->dev; 1250 drm_i915_private_t *dev_priv = dev->dev_private; 1251 unsigned long temp, chipset, gfx; 1252 int ret; 1253 1254 if (!IS_GEN5(dev)) 1255 return -ENODEV; 1256 1257 ret = mutex_lock_interruptible(&dev->struct_mutex); 1258 if (ret) 1259 return ret; 1260 1261 temp = i915_mch_val(dev_priv); 1262 chipset = i915_chipset_val(dev_priv); 1263 gfx = i915_gfx_val(dev_priv); 1264 mutex_unlock(&dev->struct_mutex); 1265 1266 seq_printf(m, "GMCH temp: %ld\n", temp); 1267 seq_printf(m, "Chipset power: %ld\n", chipset); 1268 seq_printf(m, "GFX power: %ld\n", gfx); 1269 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1270 1271 return 0; 1272 } 1273 1274 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1275 { 1276 struct drm_info_node *node = (struct drm_info_node *) m->private; 1277 struct drm_device *dev = node->minor->dev; 1278 drm_i915_private_t *dev_priv = dev->dev_private; 1279 int ret; 1280 int gpu_freq, ia_freq; 1281 1282 if (!(IS_GEN6(dev) || IS_GEN7(dev))) { 1283 seq_printf(m, "unsupported on this chipset\n"); 1284 return 0; 1285 } 1286 1287 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1288 if (ret) 1289 return ret; 1290 1291 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); 1292 1293 for (gpu_freq = dev_priv->rps.min_delay; 1294 gpu_freq <= dev_priv->rps.max_delay; 1295 gpu_freq++) { 1296 ia_freq = gpu_freq; 1297 sandybridge_pcode_read(dev_priv, 1298 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1299 &ia_freq); 1300 seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100); 1301 } 1302 1303 mutex_unlock(&dev_priv->rps.hw_lock); 1304 1305 return 0; 1306 } 1307 1308 static int i915_gfxec(struct seq_file *m, void *unused) 1309 { 1310 struct drm_info_node *node = (struct drm_info_node *) m->private; 1311 struct drm_device *dev = node->minor->dev; 1312 drm_i915_private_t *dev_priv = dev->dev_private; 1313 int ret; 1314 1315 ret = mutex_lock_interruptible(&dev->struct_mutex); 1316 if (ret) 1317 return ret; 1318 1319 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1320 1321 mutex_unlock(&dev->struct_mutex); 1322 1323 return 0; 1324 } 1325 1326 static int i915_opregion(struct seq_file *m, void *unused) 1327 { 1328 struct drm_info_node *node = (struct drm_info_node *) m->private; 1329 struct drm_device *dev = node->minor->dev; 1330 drm_i915_private_t *dev_priv = dev->dev_private; 1331 struct intel_opregion *opregion = &dev_priv->opregion; 1332 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL); 1333 int ret; 1334 1335 if (data == NULL) 1336 return -ENOMEM; 1337 1338 ret = mutex_lock_interruptible(&dev->struct_mutex); 1339 if (ret) 1340 goto out; 1341 1342 if (opregion->header) { 1343 memcpy_fromio(data, opregion->header, OPREGION_SIZE); 1344 seq_write(m, data, OPREGION_SIZE); 1345 } 1346 1347 mutex_unlock(&dev->struct_mutex); 1348 1349 out: 1350 kfree(data); 1351 return 0; 1352 } 1353 1354 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1355 { 1356 struct drm_info_node *node = (struct drm_info_node *) m->private; 1357 struct drm_device *dev = node->minor->dev; 1358 drm_i915_private_t *dev_priv = dev->dev_private; 1359 struct intel_fbdev *ifbdev; 1360 struct intel_framebuffer *fb; 1361 int ret; 1362 1363 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1364 if (ret) 1365 return ret; 1366 1367 ifbdev = dev_priv->fbdev; 1368 fb = to_intel_framebuffer(ifbdev->helper.fb); 1369 1370 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ", 1371 fb->base.width, 1372 fb->base.height, 1373 fb->base.depth, 1374 fb->base.bits_per_pixel); 1375 describe_obj(m, fb->obj); 1376 seq_printf(m, "\n"); 1377 1378 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1379 if (&fb->base == ifbdev->helper.fb) 1380 continue; 1381 1382 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ", 1383 fb->base.width, 1384 fb->base.height, 1385 fb->base.depth, 1386 fb->base.bits_per_pixel); 1387 describe_obj(m, fb->obj); 1388 seq_printf(m, "\n"); 1389 } 1390 1391 mutex_unlock(&dev->mode_config.mutex); 1392 1393 return 0; 1394 } 1395 1396 static int i915_context_status(struct seq_file *m, void *unused) 1397 { 1398 struct drm_info_node *node = (struct drm_info_node *) m->private; 1399 struct drm_device *dev = node->minor->dev; 1400 drm_i915_private_t *dev_priv = dev->dev_private; 1401 int ret; 1402 1403 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1404 if (ret) 1405 return ret; 1406 1407 if (dev_priv->ips.pwrctx) { 1408 seq_printf(m, "power context "); 1409 describe_obj(m, dev_priv->ips.pwrctx); 1410 seq_printf(m, "\n"); 1411 } 1412 1413 if (dev_priv->ips.renderctx) { 1414 seq_printf(m, "render context "); 1415 describe_obj(m, dev_priv->ips.renderctx); 1416 seq_printf(m, "\n"); 1417 } 1418 1419 mutex_unlock(&dev->mode_config.mutex); 1420 1421 return 0; 1422 } 1423 1424 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) 1425 { 1426 struct drm_info_node *node = (struct drm_info_node *) m->private; 1427 struct drm_device *dev = node->minor->dev; 1428 struct drm_i915_private *dev_priv = dev->dev_private; 1429 unsigned forcewake_count; 1430 1431 spin_lock_irq(&dev_priv->gt_lock); 1432 forcewake_count = dev_priv->forcewake_count; 1433 spin_unlock_irq(&dev_priv->gt_lock); 1434 1435 seq_printf(m, "forcewake count = %u\n", forcewake_count); 1436 1437 return 0; 1438 } 1439 1440 static const char *swizzle_string(unsigned swizzle) 1441 { 1442 switch(swizzle) { 1443 case I915_BIT_6_SWIZZLE_NONE: 1444 return "none"; 1445 case I915_BIT_6_SWIZZLE_9: 1446 return "bit9"; 1447 case I915_BIT_6_SWIZZLE_9_10: 1448 return "bit9/bit10"; 1449 case I915_BIT_6_SWIZZLE_9_11: 1450 return "bit9/bit11"; 1451 case I915_BIT_6_SWIZZLE_9_10_11: 1452 return "bit9/bit10/bit11"; 1453 case I915_BIT_6_SWIZZLE_9_17: 1454 return "bit9/bit17"; 1455 case I915_BIT_6_SWIZZLE_9_10_17: 1456 return "bit9/bit10/bit17"; 1457 case I915_BIT_6_SWIZZLE_UNKNOWN: 1458 return "unkown"; 1459 } 1460 1461 return "bug"; 1462 } 1463 1464 static int i915_swizzle_info(struct seq_file *m, void *data) 1465 { 1466 struct drm_info_node *node = (struct drm_info_node *) m->private; 1467 struct drm_device *dev = node->minor->dev; 1468 struct drm_i915_private *dev_priv = dev->dev_private; 1469 int ret; 1470 1471 ret = mutex_lock_interruptible(&dev->struct_mutex); 1472 if (ret) 1473 return ret; 1474 1475 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 1476 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 1477 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 1478 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 1479 1480 if (IS_GEN3(dev) || IS_GEN4(dev)) { 1481 seq_printf(m, "DDC = 0x%08x\n", 1482 I915_READ(DCC)); 1483 seq_printf(m, "C0DRB3 = 0x%04x\n", 1484 I915_READ16(C0DRB3)); 1485 seq_printf(m, "C1DRB3 = 0x%04x\n", 1486 I915_READ16(C1DRB3)); 1487 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 1488 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 1489 I915_READ(MAD_DIMM_C0)); 1490 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 1491 I915_READ(MAD_DIMM_C1)); 1492 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 1493 I915_READ(MAD_DIMM_C2)); 1494 seq_printf(m, "TILECTL = 0x%08x\n", 1495 I915_READ(TILECTL)); 1496 seq_printf(m, "ARB_MODE = 0x%08x\n", 1497 I915_READ(ARB_MODE)); 1498 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 1499 I915_READ(DISP_ARB_CTL)); 1500 } 1501 mutex_unlock(&dev->struct_mutex); 1502 1503 return 0; 1504 } 1505 1506 static int i915_ppgtt_info(struct seq_file *m, void *data) 1507 { 1508 struct drm_info_node *node = (struct drm_info_node *) m->private; 1509 struct drm_device *dev = node->minor->dev; 1510 struct drm_i915_private *dev_priv = dev->dev_private; 1511 struct intel_ring_buffer *ring; 1512 int i, ret; 1513 1514 1515 ret = mutex_lock_interruptible(&dev->struct_mutex); 1516 if (ret) 1517 return ret; 1518 if (INTEL_INFO(dev)->gen == 6) 1519 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 1520 1521 for_each_ring(ring, dev_priv, i) { 1522 seq_printf(m, "%s\n", ring->name); 1523 if (INTEL_INFO(dev)->gen == 7) 1524 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); 1525 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); 1526 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); 1527 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); 1528 } 1529 if (dev_priv->mm.aliasing_ppgtt) { 1530 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1531 1532 seq_printf(m, "aliasing PPGTT:\n"); 1533 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 1534 } 1535 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1536 mutex_unlock(&dev->struct_mutex); 1537 1538 return 0; 1539 } 1540 1541 static int i915_dpio_info(struct seq_file *m, void *data) 1542 { 1543 struct drm_info_node *node = (struct drm_info_node *) m->private; 1544 struct drm_device *dev = node->minor->dev; 1545 struct drm_i915_private *dev_priv = dev->dev_private; 1546 int ret; 1547 1548 1549 if (!IS_VALLEYVIEW(dev)) { 1550 seq_printf(m, "unsupported\n"); 1551 return 0; 1552 } 1553 1554 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1555 if (ret) 1556 return ret; 1557 1558 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL)); 1559 1560 seq_printf(m, "DPIO_DIV_A: 0x%08x\n", 1561 intel_dpio_read(dev_priv, _DPIO_DIV_A)); 1562 seq_printf(m, "DPIO_DIV_B: 0x%08x\n", 1563 intel_dpio_read(dev_priv, _DPIO_DIV_B)); 1564 1565 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n", 1566 intel_dpio_read(dev_priv, _DPIO_REFSFR_A)); 1567 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n", 1568 intel_dpio_read(dev_priv, _DPIO_REFSFR_B)); 1569 1570 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n", 1571 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A)); 1572 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n", 1573 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B)); 1574 1575 seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n", 1576 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A)); 1577 seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n", 1578 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B)); 1579 1580 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", 1581 intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE)); 1582 1583 mutex_unlock(&dev->mode_config.mutex); 1584 1585 return 0; 1586 } 1587 1588 static ssize_t 1589 i915_wedged_read(struct file *filp, 1590 char __user *ubuf, 1591 size_t max, 1592 loff_t *ppos) 1593 { 1594 struct drm_device *dev = filp->private_data; 1595 drm_i915_private_t *dev_priv = dev->dev_private; 1596 char buf[80]; 1597 int len; 1598 1599 len = snprintf(buf, sizeof(buf), 1600 "wedged : %d\n", 1601 atomic_read(&dev_priv->mm.wedged)); 1602 1603 if (len > sizeof(buf)) 1604 len = sizeof(buf); 1605 1606 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1607 } 1608 1609 static ssize_t 1610 i915_wedged_write(struct file *filp, 1611 const char __user *ubuf, 1612 size_t cnt, 1613 loff_t *ppos) 1614 { 1615 struct drm_device *dev = filp->private_data; 1616 char buf[20]; 1617 int val = 1; 1618 1619 if (cnt > 0) { 1620 if (cnt > sizeof(buf) - 1) 1621 return -EINVAL; 1622 1623 if (copy_from_user(buf, ubuf, cnt)) 1624 return -EFAULT; 1625 buf[cnt] = 0; 1626 1627 val = simple_strtoul(buf, NULL, 0); 1628 } 1629 1630 DRM_INFO("Manually setting wedged to %d\n", val); 1631 i915_handle_error(dev, val); 1632 1633 return cnt; 1634 } 1635 1636 static const struct file_operations i915_wedged_fops = { 1637 .owner = THIS_MODULE, 1638 .open = simple_open, 1639 .read = i915_wedged_read, 1640 .write = i915_wedged_write, 1641 .llseek = default_llseek, 1642 }; 1643 1644 static ssize_t 1645 i915_ring_stop_read(struct file *filp, 1646 char __user *ubuf, 1647 size_t max, 1648 loff_t *ppos) 1649 { 1650 struct drm_device *dev = filp->private_data; 1651 drm_i915_private_t *dev_priv = dev->dev_private; 1652 char buf[20]; 1653 int len; 1654 1655 len = snprintf(buf, sizeof(buf), 1656 "0x%08x\n", dev_priv->stop_rings); 1657 1658 if (len > sizeof(buf)) 1659 len = sizeof(buf); 1660 1661 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1662 } 1663 1664 static ssize_t 1665 i915_ring_stop_write(struct file *filp, 1666 const char __user *ubuf, 1667 size_t cnt, 1668 loff_t *ppos) 1669 { 1670 struct drm_device *dev = filp->private_data; 1671 struct drm_i915_private *dev_priv = dev->dev_private; 1672 char buf[20]; 1673 int val = 0, ret; 1674 1675 if (cnt > 0) { 1676 if (cnt > sizeof(buf) - 1) 1677 return -EINVAL; 1678 1679 if (copy_from_user(buf, ubuf, cnt)) 1680 return -EFAULT; 1681 buf[cnt] = 0; 1682 1683 val = simple_strtoul(buf, NULL, 0); 1684 } 1685 1686 DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val); 1687 1688 ret = mutex_lock_interruptible(&dev->struct_mutex); 1689 if (ret) 1690 return ret; 1691 1692 dev_priv->stop_rings = val; 1693 mutex_unlock(&dev->struct_mutex); 1694 1695 return cnt; 1696 } 1697 1698 static const struct file_operations i915_ring_stop_fops = { 1699 .owner = THIS_MODULE, 1700 .open = simple_open, 1701 .read = i915_ring_stop_read, 1702 .write = i915_ring_stop_write, 1703 .llseek = default_llseek, 1704 }; 1705 1706 static ssize_t 1707 i915_max_freq_read(struct file *filp, 1708 char __user *ubuf, 1709 size_t max, 1710 loff_t *ppos) 1711 { 1712 struct drm_device *dev = filp->private_data; 1713 drm_i915_private_t *dev_priv = dev->dev_private; 1714 char buf[80]; 1715 int len, ret; 1716 1717 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1718 return -ENODEV; 1719 1720 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1721 if (ret) 1722 return ret; 1723 1724 len = snprintf(buf, sizeof(buf), 1725 "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER); 1726 mutex_unlock(&dev_priv->rps.hw_lock); 1727 1728 if (len > sizeof(buf)) 1729 len = sizeof(buf); 1730 1731 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1732 } 1733 1734 static ssize_t 1735 i915_max_freq_write(struct file *filp, 1736 const char __user *ubuf, 1737 size_t cnt, 1738 loff_t *ppos) 1739 { 1740 struct drm_device *dev = filp->private_data; 1741 struct drm_i915_private *dev_priv = dev->dev_private; 1742 char buf[20]; 1743 int val = 1, ret; 1744 1745 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1746 return -ENODEV; 1747 1748 if (cnt > 0) { 1749 if (cnt > sizeof(buf) - 1) 1750 return -EINVAL; 1751 1752 if (copy_from_user(buf, ubuf, cnt)) 1753 return -EFAULT; 1754 buf[cnt] = 0; 1755 1756 val = simple_strtoul(buf, NULL, 0); 1757 } 1758 1759 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); 1760 1761 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1762 if (ret) 1763 return ret; 1764 1765 /* 1766 * Turbo will still be enabled, but won't go above the set value. 1767 */ 1768 dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER; 1769 1770 gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); 1771 mutex_unlock(&dev_priv->rps.hw_lock); 1772 1773 return cnt; 1774 } 1775 1776 static const struct file_operations i915_max_freq_fops = { 1777 .owner = THIS_MODULE, 1778 .open = simple_open, 1779 .read = i915_max_freq_read, 1780 .write = i915_max_freq_write, 1781 .llseek = default_llseek, 1782 }; 1783 1784 static ssize_t 1785 i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max, 1786 loff_t *ppos) 1787 { 1788 struct drm_device *dev = filp->private_data; 1789 drm_i915_private_t *dev_priv = dev->dev_private; 1790 char buf[80]; 1791 int len, ret; 1792 1793 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1794 return -ENODEV; 1795 1796 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1797 if (ret) 1798 return ret; 1799 1800 len = snprintf(buf, sizeof(buf), 1801 "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER); 1802 mutex_unlock(&dev_priv->rps.hw_lock); 1803 1804 if (len > sizeof(buf)) 1805 len = sizeof(buf); 1806 1807 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1808 } 1809 1810 static ssize_t 1811 i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt, 1812 loff_t *ppos) 1813 { 1814 struct drm_device *dev = filp->private_data; 1815 struct drm_i915_private *dev_priv = dev->dev_private; 1816 char buf[20]; 1817 int val = 1, ret; 1818 1819 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1820 return -ENODEV; 1821 1822 if (cnt > 0) { 1823 if (cnt > sizeof(buf) - 1) 1824 return -EINVAL; 1825 1826 if (copy_from_user(buf, ubuf, cnt)) 1827 return -EFAULT; 1828 buf[cnt] = 0; 1829 1830 val = simple_strtoul(buf, NULL, 0); 1831 } 1832 1833 DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val); 1834 1835 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1836 if (ret) 1837 return ret; 1838 1839 /* 1840 * Turbo will still be enabled, but won't go below the set value. 1841 */ 1842 dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER; 1843 1844 gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); 1845 mutex_unlock(&dev_priv->rps.hw_lock); 1846 1847 return cnt; 1848 } 1849 1850 static const struct file_operations i915_min_freq_fops = { 1851 .owner = THIS_MODULE, 1852 .open = simple_open, 1853 .read = i915_min_freq_read, 1854 .write = i915_min_freq_write, 1855 .llseek = default_llseek, 1856 }; 1857 1858 static ssize_t 1859 i915_cache_sharing_read(struct file *filp, 1860 char __user *ubuf, 1861 size_t max, 1862 loff_t *ppos) 1863 { 1864 struct drm_device *dev = filp->private_data; 1865 drm_i915_private_t *dev_priv = dev->dev_private; 1866 char buf[80]; 1867 u32 snpcr; 1868 int len, ret; 1869 1870 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1871 return -ENODEV; 1872 1873 ret = mutex_lock_interruptible(&dev->struct_mutex); 1874 if (ret) 1875 return ret; 1876 1877 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1878 mutex_unlock(&dev_priv->dev->struct_mutex); 1879 1880 len = snprintf(buf, sizeof(buf), 1881 "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >> 1882 GEN6_MBC_SNPCR_SHIFT); 1883 1884 if (len > sizeof(buf)) 1885 len = sizeof(buf); 1886 1887 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1888 } 1889 1890 static ssize_t 1891 i915_cache_sharing_write(struct file *filp, 1892 const char __user *ubuf, 1893 size_t cnt, 1894 loff_t *ppos) 1895 { 1896 struct drm_device *dev = filp->private_data; 1897 struct drm_i915_private *dev_priv = dev->dev_private; 1898 char buf[20]; 1899 u32 snpcr; 1900 int val = 1; 1901 1902 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1903 return -ENODEV; 1904 1905 if (cnt > 0) { 1906 if (cnt > sizeof(buf) - 1) 1907 return -EINVAL; 1908 1909 if (copy_from_user(buf, ubuf, cnt)) 1910 return -EFAULT; 1911 buf[cnt] = 0; 1912 1913 val = simple_strtoul(buf, NULL, 0); 1914 } 1915 1916 if (val < 0 || val > 3) 1917 return -EINVAL; 1918 1919 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val); 1920 1921 /* Update the cache sharing policy here as well */ 1922 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1923 snpcr &= ~GEN6_MBC_SNPCR_MASK; 1924 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 1925 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 1926 1927 return cnt; 1928 } 1929 1930 static const struct file_operations i915_cache_sharing_fops = { 1931 .owner = THIS_MODULE, 1932 .open = simple_open, 1933 .read = i915_cache_sharing_read, 1934 .write = i915_cache_sharing_write, 1935 .llseek = default_llseek, 1936 }; 1937 1938 /* As the drm_debugfs_init() routines are called before dev->dev_private is 1939 * allocated we need to hook into the minor for release. */ 1940 static int 1941 drm_add_fake_info_node(struct drm_minor *minor, 1942 struct dentry *ent, 1943 const void *key) 1944 { 1945 struct drm_info_node *node; 1946 1947 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); 1948 if (node == NULL) { 1949 debugfs_remove(ent); 1950 return -ENOMEM; 1951 } 1952 1953 node->minor = minor; 1954 node->dent = ent; 1955 node->info_ent = (void *) key; 1956 1957 mutex_lock(&minor->debugfs_lock); 1958 list_add(&node->list, &minor->debugfs_list); 1959 mutex_unlock(&minor->debugfs_lock); 1960 1961 return 0; 1962 } 1963 1964 static int i915_forcewake_open(struct inode *inode, struct file *file) 1965 { 1966 struct drm_device *dev = inode->i_private; 1967 struct drm_i915_private *dev_priv = dev->dev_private; 1968 1969 if (INTEL_INFO(dev)->gen < 6) 1970 return 0; 1971 1972 gen6_gt_force_wake_get(dev_priv); 1973 1974 return 0; 1975 } 1976 1977 static int i915_forcewake_release(struct inode *inode, struct file *file) 1978 { 1979 struct drm_device *dev = inode->i_private; 1980 struct drm_i915_private *dev_priv = dev->dev_private; 1981 1982 if (INTEL_INFO(dev)->gen < 6) 1983 return 0; 1984 1985 gen6_gt_force_wake_put(dev_priv); 1986 1987 return 0; 1988 } 1989 1990 static const struct file_operations i915_forcewake_fops = { 1991 .owner = THIS_MODULE, 1992 .open = i915_forcewake_open, 1993 .release = i915_forcewake_release, 1994 }; 1995 1996 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) 1997 { 1998 struct drm_device *dev = minor->dev; 1999 struct dentry *ent; 2000 2001 ent = debugfs_create_file("i915_forcewake_user", 2002 S_IRUSR, 2003 root, dev, 2004 &i915_forcewake_fops); 2005 if (IS_ERR(ent)) 2006 return PTR_ERR(ent); 2007 2008 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); 2009 } 2010 2011 static int i915_debugfs_create(struct dentry *root, 2012 struct drm_minor *minor, 2013 const char *name, 2014 const struct file_operations *fops) 2015 { 2016 struct drm_device *dev = minor->dev; 2017 struct dentry *ent; 2018 2019 ent = debugfs_create_file(name, 2020 S_IRUGO | S_IWUSR, 2021 root, dev, 2022 fops); 2023 if (IS_ERR(ent)) 2024 return PTR_ERR(ent); 2025 2026 return drm_add_fake_info_node(minor, ent, fops); 2027 } 2028 2029 static struct drm_info_list i915_debugfs_list[] = { 2030 {"i915_capabilities", i915_capabilities, 0}, 2031 {"i915_gem_objects", i915_gem_object_info, 0}, 2032 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 2033 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, 2034 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 2035 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 2036 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 2037 {"i915_gem_request", i915_gem_request_info, 0}, 2038 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 2039 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 2040 {"i915_gem_interrupt", i915_interrupt_info, 0}, 2041 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, 2042 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, 2043 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, 2044 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 2045 {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, 2046 {"i915_delayfreq_table", i915_delayfreq_table, 0}, 2047 {"i915_inttoext_table", i915_inttoext_table, 0}, 2048 {"i915_drpc_info", i915_drpc_info, 0}, 2049 {"i915_emon_status", i915_emon_status, 0}, 2050 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 2051 {"i915_gfxec", i915_gfxec, 0}, 2052 {"i915_fbc_status", i915_fbc_status, 0}, 2053 {"i915_sr_status", i915_sr_status, 0}, 2054 {"i915_opregion", i915_opregion, 0}, 2055 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 2056 {"i915_context_status", i915_context_status, 0}, 2057 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 2058 {"i915_swizzle_info", i915_swizzle_info, 0}, 2059 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 2060 {"i915_dpio", i915_dpio_info, 0}, 2061 }; 2062 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 2063 2064 int i915_debugfs_init(struct drm_minor *minor) 2065 { 2066 int ret; 2067 2068 ret = i915_debugfs_create(minor->debugfs_root, minor, 2069 "i915_wedged", 2070 &i915_wedged_fops); 2071 if (ret) 2072 return ret; 2073 2074 ret = i915_forcewake_create(minor->debugfs_root, minor); 2075 if (ret) 2076 return ret; 2077 2078 ret = i915_debugfs_create(minor->debugfs_root, minor, 2079 "i915_max_freq", 2080 &i915_max_freq_fops); 2081 if (ret) 2082 return ret; 2083 2084 ret = i915_debugfs_create(minor->debugfs_root, minor, 2085 "i915_min_freq", 2086 &i915_min_freq_fops); 2087 if (ret) 2088 return ret; 2089 2090 ret = i915_debugfs_create(minor->debugfs_root, minor, 2091 "i915_cache_sharing", 2092 &i915_cache_sharing_fops); 2093 if (ret) 2094 return ret; 2095 2096 ret = i915_debugfs_create(minor->debugfs_root, minor, 2097 "i915_ring_stop", 2098 &i915_ring_stop_fops); 2099 if (ret) 2100 return ret; 2101 2102 ret = i915_debugfs_create(minor->debugfs_root, minor, 2103 "i915_error_state", 2104 &i915_error_state_fops); 2105 if (ret) 2106 return ret; 2107 2108 return drm_debugfs_create_files(i915_debugfs_list, 2109 I915_DEBUGFS_ENTRIES, 2110 minor->debugfs_root, minor); 2111 } 2112 2113 void i915_debugfs_cleanup(struct drm_minor *minor) 2114 { 2115 drm_debugfs_remove_files(i915_debugfs_list, 2116 I915_DEBUGFS_ENTRIES, minor); 2117 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 2118 1, minor); 2119 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, 2120 1, minor); 2121 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops, 2122 1, minor); 2123 drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops, 2124 1, minor); 2125 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, 2126 1, minor); 2127 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops, 2128 1, minor); 2129 drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops, 2130 1, minor); 2131 } 2132 2133 #endif /* CONFIG_DEBUG_FS */ 2134