1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/sched/mm.h> 30 #include <linux/sort.h> 31 32 #include <drm/drm_debugfs.h> 33 34 #include "gem/i915_gem_context.h" 35 #include "gt/intel_gt_buffer_pool.h" 36 #include "gt/intel_gt_clock_utils.h" 37 #include "gt/intel_gt.h" 38 #include "gt/intel_gt_pm.h" 39 #include "gt/intel_gt_requests.h" 40 #include "gt/intel_reset.h" 41 #include "gt/intel_rc6.h" 42 #include "gt/intel_rps.h" 43 #include "gt/intel_sseu_debugfs.h" 44 45 #include "i915_debugfs.h" 46 #include "i915_debugfs_params.h" 47 #include "i915_irq.h" 48 #include "i915_scheduler.h" 49 #include "i915_trace.h" 50 #include "intel_pm.h" 51 #include "intel_sideband.h" 52 53 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) 54 { 55 return to_i915(node->minor->dev); 56 } 57 58 static int i915_capabilities(struct seq_file *m, void *data) 59 { 60 struct drm_i915_private *i915 = node_to_i915(m->private); 61 struct drm_printer p = drm_seq_file_printer(m); 62 63 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915)); 64 65 intel_device_info_print_static(INTEL_INFO(i915), &p); 66 intel_device_info_print_runtime(RUNTIME_INFO(i915), &p); 67 intel_gt_info_print(&i915->gt.info, &p); 68 intel_driver_caps_print(&i915->caps, &p); 69 70 kernel_param_lock(THIS_MODULE); 71 i915_params_dump(&i915->params, &p); 72 kernel_param_unlock(THIS_MODULE); 73 74 return 0; 75 } 76 77 static char get_tiling_flag(struct drm_i915_gem_object *obj) 78 { 79 switch (i915_gem_object_get_tiling(obj)) { 80 default: 81 case I915_TILING_NONE: return ' '; 82 case I915_TILING_X: return 'X'; 83 case I915_TILING_Y: return 'Y'; 84 } 85 } 86 87 static char get_global_flag(struct drm_i915_gem_object *obj) 88 { 89 return READ_ONCE(obj->userfault_count) ? 'g' : ' '; 90 } 91 92 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj) 93 { 94 return obj->mm.mapping ? 'M' : ' '; 95 } 96 97 static const char * 98 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len) 99 { 100 size_t x = 0; 101 102 switch (page_sizes) { 103 case 0: 104 return ""; 105 case I915_GTT_PAGE_SIZE_4K: 106 return "4K"; 107 case I915_GTT_PAGE_SIZE_64K: 108 return "64K"; 109 case I915_GTT_PAGE_SIZE_2M: 110 return "2M"; 111 default: 112 if (!buf) 113 return "M"; 114 115 if (page_sizes & I915_GTT_PAGE_SIZE_2M) 116 x += snprintf(buf + x, len - x, "2M, "); 117 if (page_sizes & I915_GTT_PAGE_SIZE_64K) 118 x += snprintf(buf + x, len - x, "64K, "); 119 if (page_sizes & I915_GTT_PAGE_SIZE_4K) 120 x += snprintf(buf + x, len - x, "4K, "); 121 buf[x-2] = '\0'; 122 123 return buf; 124 } 125 } 126 127 void 128 i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 129 { 130 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 131 struct intel_engine_cs *engine; 132 struct i915_vma *vma; 133 int pin_count = 0; 134 135 seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s", 136 &obj->base, 137 get_tiling_flag(obj), 138 get_global_flag(obj), 139 get_pin_mapped_flag(obj), 140 obj->base.size / 1024, 141 obj->read_domains, 142 obj->write_domain, 143 i915_cache_level_str(dev_priv, obj->cache_level), 144 obj->mm.dirty ? " dirty" : "", 145 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : ""); 146 if (obj->base.name) 147 seq_printf(m, " (name: %d)", obj->base.name); 148 149 spin_lock(&obj->vma.lock); 150 list_for_each_entry(vma, &obj->vma.list, obj_link) { 151 if (!drm_mm_node_allocated(&vma->node)) 152 continue; 153 154 spin_unlock(&obj->vma.lock); 155 156 if (i915_vma_is_pinned(vma)) 157 pin_count++; 158 159 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s", 160 i915_vma_is_ggtt(vma) ? "g" : "pp", 161 vma->node.start, vma->node.size, 162 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0)); 163 if (i915_vma_is_ggtt(vma)) { 164 switch (vma->ggtt_view.type) { 165 case I915_GGTT_VIEW_NORMAL: 166 seq_puts(m, ", normal"); 167 break; 168 169 case I915_GGTT_VIEW_PARTIAL: 170 seq_printf(m, ", partial [%08llx+%x]", 171 vma->ggtt_view.partial.offset << PAGE_SHIFT, 172 vma->ggtt_view.partial.size << PAGE_SHIFT); 173 break; 174 175 case I915_GGTT_VIEW_ROTATED: 176 seq_printf(m, ", rotated [(%ux%u, src_stride=%u, dst_stride=%u, offset=%u), (%ux%u, src_stride=%u, dst_stride=%u, offset=%u)]", 177 vma->ggtt_view.rotated.plane[0].width, 178 vma->ggtt_view.rotated.plane[0].height, 179 vma->ggtt_view.rotated.plane[0].src_stride, 180 vma->ggtt_view.rotated.plane[0].dst_stride, 181 vma->ggtt_view.rotated.plane[0].offset, 182 vma->ggtt_view.rotated.plane[1].width, 183 vma->ggtt_view.rotated.plane[1].height, 184 vma->ggtt_view.rotated.plane[1].src_stride, 185 vma->ggtt_view.rotated.plane[1].dst_stride, 186 vma->ggtt_view.rotated.plane[1].offset); 187 break; 188 189 case I915_GGTT_VIEW_REMAPPED: 190 seq_printf(m, ", remapped [(%ux%u, src_stride=%u, dst_stride=%u, offset=%u), (%ux%u, src_stride=%u, dst_stride=%u, offset=%u)]", 191 vma->ggtt_view.remapped.plane[0].width, 192 vma->ggtt_view.remapped.plane[0].height, 193 vma->ggtt_view.remapped.plane[0].src_stride, 194 vma->ggtt_view.remapped.plane[0].dst_stride, 195 vma->ggtt_view.remapped.plane[0].offset, 196 vma->ggtt_view.remapped.plane[1].width, 197 vma->ggtt_view.remapped.plane[1].height, 198 vma->ggtt_view.remapped.plane[1].src_stride, 199 vma->ggtt_view.remapped.plane[1].dst_stride, 200 vma->ggtt_view.remapped.plane[1].offset); 201 break; 202 203 default: 204 MISSING_CASE(vma->ggtt_view.type); 205 break; 206 } 207 } 208 if (vma->fence) 209 seq_printf(m, " , fence: %d", vma->fence->id); 210 seq_puts(m, ")"); 211 212 spin_lock(&obj->vma.lock); 213 } 214 spin_unlock(&obj->vma.lock); 215 216 seq_printf(m, " (pinned x %d)", pin_count); 217 if (i915_gem_object_is_stolen(obj)) 218 seq_printf(m, " (stolen: %08llx)", obj->stolen->start); 219 if (i915_gem_object_is_framebuffer(obj)) 220 seq_printf(m, " (fb)"); 221 222 engine = i915_gem_object_last_write_engine(obj); 223 if (engine) 224 seq_printf(m, " (%s)", engine->name); 225 } 226 227 static int i915_gem_object_info(struct seq_file *m, void *data) 228 { 229 struct drm_i915_private *i915 = node_to_i915(m->private); 230 struct intel_memory_region *mr; 231 enum intel_region_id id; 232 233 seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n", 234 i915->mm.shrink_count, 235 atomic_read(&i915->mm.free_count), 236 i915->mm.shrink_memory); 237 for_each_memory_region(mr, i915, id) 238 seq_printf(m, "%s: total:%pa, available:%pa bytes\n", 239 mr->name, &mr->total, &mr->avail); 240 241 return 0; 242 } 243 244 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 245 static ssize_t gpu_state_read(struct file *file, char __user *ubuf, 246 size_t count, loff_t *pos) 247 { 248 struct i915_gpu_coredump *error; 249 ssize_t ret; 250 void *buf; 251 252 error = file->private_data; 253 if (!error) 254 return 0; 255 256 /* Bounce buffer required because of kernfs __user API convenience. */ 257 buf = kmalloc(count, GFP_KERNEL); 258 if (!buf) 259 return -ENOMEM; 260 261 ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count); 262 if (ret <= 0) 263 goto out; 264 265 if (!copy_to_user(ubuf, buf, ret)) 266 *pos += ret; 267 else 268 ret = -EFAULT; 269 270 out: 271 kfree(buf); 272 return ret; 273 } 274 275 static int gpu_state_release(struct inode *inode, struct file *file) 276 { 277 i915_gpu_coredump_put(file->private_data); 278 return 0; 279 } 280 281 static int i915_gpu_info_open(struct inode *inode, struct file *file) 282 { 283 struct drm_i915_private *i915 = inode->i_private; 284 struct i915_gpu_coredump *gpu; 285 intel_wakeref_t wakeref; 286 287 gpu = NULL; 288 with_intel_runtime_pm(&i915->runtime_pm, wakeref) 289 gpu = i915_gpu_coredump(&i915->gt, ALL_ENGINES); 290 if (IS_ERR(gpu)) 291 return PTR_ERR(gpu); 292 293 file->private_data = gpu; 294 return 0; 295 } 296 297 static const struct file_operations i915_gpu_info_fops = { 298 .owner = THIS_MODULE, 299 .open = i915_gpu_info_open, 300 .read = gpu_state_read, 301 .llseek = default_llseek, 302 .release = gpu_state_release, 303 }; 304 305 static ssize_t 306 i915_error_state_write(struct file *filp, 307 const char __user *ubuf, 308 size_t cnt, 309 loff_t *ppos) 310 { 311 struct i915_gpu_coredump *error = filp->private_data; 312 313 if (!error) 314 return 0; 315 316 drm_dbg(&error->i915->drm, "Resetting error state\n"); 317 i915_reset_error_state(error->i915); 318 319 return cnt; 320 } 321 322 static int i915_error_state_open(struct inode *inode, struct file *file) 323 { 324 struct i915_gpu_coredump *error; 325 326 error = i915_first_error_state(inode->i_private); 327 if (IS_ERR(error)) 328 return PTR_ERR(error); 329 330 file->private_data = error; 331 return 0; 332 } 333 334 static const struct file_operations i915_error_state_fops = { 335 .owner = THIS_MODULE, 336 .open = i915_error_state_open, 337 .read = gpu_state_read, 338 .write = i915_error_state_write, 339 .llseek = default_llseek, 340 .release = gpu_state_release, 341 }; 342 #endif 343 344 static int i915_frequency_info(struct seq_file *m, void *unused) 345 { 346 struct drm_i915_private *dev_priv = node_to_i915(m->private); 347 struct intel_uncore *uncore = &dev_priv->uncore; 348 struct intel_rps *rps = &dev_priv->gt.rps; 349 intel_wakeref_t wakeref; 350 351 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 352 353 if (IS_GEN(dev_priv, 5)) { 354 u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); 355 u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK); 356 357 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 358 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 359 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 360 MEMSTAT_VID_SHIFT); 361 seq_printf(m, "Current P-state: %d\n", 362 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 363 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 364 u32 rpmodectl, freq_sts; 365 366 rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL); 367 seq_printf(m, "Video Turbo Mode: %s\n", 368 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); 369 seq_printf(m, "HW control enabled: %s\n", 370 yesno(rpmodectl & GEN6_RP_ENABLE)); 371 seq_printf(m, "SW control enabled: %s\n", 372 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == 373 GEN6_RP_MEDIA_SW_MODE)); 374 375 vlv_punit_get(dev_priv); 376 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 377 vlv_punit_put(dev_priv); 378 379 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 380 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 381 382 seq_printf(m, "actual GPU freq: %d MHz\n", 383 intel_gpu_freq(rps, (freq_sts >> 8) & 0xff)); 384 385 seq_printf(m, "current GPU freq: %d MHz\n", 386 intel_gpu_freq(rps, rps->cur_freq)); 387 388 seq_printf(m, "max GPU freq: %d MHz\n", 389 intel_gpu_freq(rps, rps->max_freq)); 390 391 seq_printf(m, "min GPU freq: %d MHz\n", 392 intel_gpu_freq(rps, rps->min_freq)); 393 394 seq_printf(m, "idle GPU freq: %d MHz\n", 395 intel_gpu_freq(rps, rps->idle_freq)); 396 397 seq_printf(m, 398 "efficient (RPe) frequency: %d MHz\n", 399 intel_gpu_freq(rps, rps->efficient_freq)); 400 } else if (INTEL_GEN(dev_priv) >= 6) { 401 u32 rp_state_limits; 402 u32 gt_perf_status; 403 u32 rp_state_cap; 404 u32 rpmodectl, rpinclimit, rpdeclimit; 405 u32 rpstat, cagf, reqf; 406 u32 rpupei, rpcurup, rpprevup; 407 u32 rpdownei, rpcurdown, rpprevdown; 408 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; 409 int max_freq; 410 411 rp_state_limits = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_LIMITS); 412 if (IS_GEN9_LP(dev_priv)) { 413 rp_state_cap = intel_uncore_read(&dev_priv->uncore, BXT_RP_STATE_CAP); 414 gt_perf_status = intel_uncore_read(&dev_priv->uncore, BXT_GT_PERF_STATUS); 415 } else { 416 rp_state_cap = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_CAP); 417 gt_perf_status = intel_uncore_read(&dev_priv->uncore, GEN6_GT_PERF_STATUS); 418 } 419 420 /* RPSTAT1 is in the GT power well */ 421 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 422 423 reqf = intel_uncore_read(&dev_priv->uncore, GEN6_RPNSWREQ); 424 if (INTEL_GEN(dev_priv) >= 9) 425 reqf >>= 23; 426 else { 427 reqf &= ~GEN6_TURBO_DISABLE; 428 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 429 reqf >>= 24; 430 else 431 reqf >>= 25; 432 } 433 reqf = intel_gpu_freq(rps, reqf); 434 435 rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL); 436 rpinclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_UP_THRESHOLD); 437 rpdeclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_DOWN_THRESHOLD); 438 439 rpstat = intel_uncore_read(&dev_priv->uncore, GEN6_RPSTAT1); 440 rpupei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; 441 rpcurup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; 442 rpprevup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; 443 rpdownei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; 444 rpcurdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; 445 rpprevdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; 446 cagf = intel_rps_read_actual_frequency(rps); 447 448 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 449 450 if (INTEL_GEN(dev_priv) >= 11) { 451 pm_ier = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE); 452 pm_imr = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_MASK); 453 /* 454 * The equivalent to the PM ISR & IIR cannot be read 455 * without affecting the current state of the system 456 */ 457 pm_isr = 0; 458 pm_iir = 0; 459 } else if (INTEL_GEN(dev_priv) >= 8) { 460 pm_ier = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IER(2)); 461 pm_imr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IMR(2)); 462 pm_isr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_ISR(2)); 463 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IIR(2)); 464 } else { 465 pm_ier = intel_uncore_read(&dev_priv->uncore, GEN6_PMIER); 466 pm_imr = intel_uncore_read(&dev_priv->uncore, GEN6_PMIMR); 467 pm_isr = intel_uncore_read(&dev_priv->uncore, GEN6_PMISR); 468 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR); 469 } 470 pm_mask = intel_uncore_read(&dev_priv->uncore, GEN6_PMINTRMSK); 471 472 seq_printf(m, "Video Turbo Mode: %s\n", 473 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); 474 seq_printf(m, "HW control enabled: %s\n", 475 yesno(rpmodectl & GEN6_RP_ENABLE)); 476 seq_printf(m, "SW control enabled: %s\n", 477 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == 478 GEN6_RP_MEDIA_SW_MODE)); 479 480 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n", 481 pm_ier, pm_imr, pm_mask); 482 if (INTEL_GEN(dev_priv) <= 10) 483 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n", 484 pm_isr, pm_iir); 485 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n", 486 rps->pm_intrmsk_mbz); 487 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 488 seq_printf(m, "Render p-state ratio: %d\n", 489 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8); 490 seq_printf(m, "Render p-state VID: %d\n", 491 gt_perf_status & 0xff); 492 seq_printf(m, "Render p-state limit: %d\n", 493 rp_state_limits & 0xff); 494 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 495 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl); 496 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit); 497 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 498 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 499 seq_printf(m, "CAGF: %dMHz\n", cagf); 500 seq_printf(m, "RP CUR UP EI: %d (%lldns)\n", 501 rpupei, 502 intel_gt_pm_interval_to_ns(&dev_priv->gt, rpupei)); 503 seq_printf(m, "RP CUR UP: %d (%lldun)\n", 504 rpcurup, 505 intel_gt_pm_interval_to_ns(&dev_priv->gt, rpcurup)); 506 seq_printf(m, "RP PREV UP: %d (%lldns)\n", 507 rpprevup, 508 intel_gt_pm_interval_to_ns(&dev_priv->gt, rpprevup)); 509 seq_printf(m, "Up threshold: %d%%\n", 510 rps->power.up_threshold); 511 512 seq_printf(m, "RP CUR DOWN EI: %d (%lldns)\n", 513 rpdownei, 514 intel_gt_pm_interval_to_ns(&dev_priv->gt, 515 rpdownei)); 516 seq_printf(m, "RP CUR DOWN: %d (%lldns)\n", 517 rpcurdown, 518 intel_gt_pm_interval_to_ns(&dev_priv->gt, 519 rpcurdown)); 520 seq_printf(m, "RP PREV DOWN: %d (%lldns)\n", 521 rpprevdown, 522 intel_gt_pm_interval_to_ns(&dev_priv->gt, 523 rpprevdown)); 524 seq_printf(m, "Down threshold: %d%%\n", 525 rps->power.down_threshold); 526 527 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 : 528 rp_state_cap >> 16) & 0xff; 529 max_freq *= (IS_GEN9_BC(dev_priv) || 530 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1); 531 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 532 intel_gpu_freq(rps, max_freq)); 533 534 max_freq = (rp_state_cap & 0xff00) >> 8; 535 max_freq *= (IS_GEN9_BC(dev_priv) || 536 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1); 537 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 538 intel_gpu_freq(rps, max_freq)); 539 540 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 : 541 rp_state_cap >> 0) & 0xff; 542 max_freq *= (IS_GEN9_BC(dev_priv) || 543 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1); 544 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 545 intel_gpu_freq(rps, max_freq)); 546 seq_printf(m, "Max overclocked frequency: %dMHz\n", 547 intel_gpu_freq(rps, rps->max_freq)); 548 549 seq_printf(m, "Current freq: %d MHz\n", 550 intel_gpu_freq(rps, rps->cur_freq)); 551 seq_printf(m, "Actual freq: %d MHz\n", cagf); 552 seq_printf(m, "Idle freq: %d MHz\n", 553 intel_gpu_freq(rps, rps->idle_freq)); 554 seq_printf(m, "Min freq: %d MHz\n", 555 intel_gpu_freq(rps, rps->min_freq)); 556 seq_printf(m, "Boost freq: %d MHz\n", 557 intel_gpu_freq(rps, rps->boost_freq)); 558 seq_printf(m, "Max freq: %d MHz\n", 559 intel_gpu_freq(rps, rps->max_freq)); 560 seq_printf(m, 561 "efficient (RPe) frequency: %d MHz\n", 562 intel_gpu_freq(rps, rps->efficient_freq)); 563 } else { 564 seq_puts(m, "no P-state info available\n"); 565 } 566 567 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk); 568 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq); 569 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq); 570 571 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 572 return 0; 573 } 574 575 static const char *swizzle_string(unsigned swizzle) 576 { 577 switch (swizzle) { 578 case I915_BIT_6_SWIZZLE_NONE: 579 return "none"; 580 case I915_BIT_6_SWIZZLE_9: 581 return "bit9"; 582 case I915_BIT_6_SWIZZLE_9_10: 583 return "bit9/bit10"; 584 case I915_BIT_6_SWIZZLE_9_11: 585 return "bit9/bit11"; 586 case I915_BIT_6_SWIZZLE_9_10_11: 587 return "bit9/bit10/bit11"; 588 case I915_BIT_6_SWIZZLE_9_17: 589 return "bit9/bit17"; 590 case I915_BIT_6_SWIZZLE_9_10_17: 591 return "bit9/bit10/bit17"; 592 case I915_BIT_6_SWIZZLE_UNKNOWN: 593 return "unknown"; 594 } 595 596 return "bug"; 597 } 598 599 static int i915_swizzle_info(struct seq_file *m, void *data) 600 { 601 struct drm_i915_private *dev_priv = node_to_i915(m->private); 602 struct intel_uncore *uncore = &dev_priv->uncore; 603 intel_wakeref_t wakeref; 604 605 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 606 swizzle_string(dev_priv->ggtt.bit_6_swizzle_x)); 607 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 608 swizzle_string(dev_priv->ggtt.bit_6_swizzle_y)); 609 610 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) 611 seq_puts(m, "L-shaped memory detected\n"); 612 613 /* On BDW+, swizzling is not used. See detect_bit_6_swizzle() */ 614 if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) 615 return 0; 616 617 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 618 619 if (IS_GEN_RANGE(dev_priv, 3, 4)) { 620 seq_printf(m, "DDC = 0x%08x\n", 621 intel_uncore_read(uncore, DCC)); 622 seq_printf(m, "DDC2 = 0x%08x\n", 623 intel_uncore_read(uncore, DCC2)); 624 seq_printf(m, "C0DRB3 = 0x%04x\n", 625 intel_uncore_read16(uncore, C0DRB3_BW)); 626 seq_printf(m, "C1DRB3 = 0x%04x\n", 627 intel_uncore_read16(uncore, C1DRB3_BW)); 628 } else if (INTEL_GEN(dev_priv) >= 6) { 629 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 630 intel_uncore_read(uncore, MAD_DIMM_C0)); 631 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 632 intel_uncore_read(uncore, MAD_DIMM_C1)); 633 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 634 intel_uncore_read(uncore, MAD_DIMM_C2)); 635 seq_printf(m, "TILECTL = 0x%08x\n", 636 intel_uncore_read(uncore, TILECTL)); 637 if (INTEL_GEN(dev_priv) >= 8) 638 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 639 intel_uncore_read(uncore, GAMTARBMODE)); 640 else 641 seq_printf(m, "ARB_MODE = 0x%08x\n", 642 intel_uncore_read(uncore, ARB_MODE)); 643 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 644 intel_uncore_read(uncore, DISP_ARB_CTL)); 645 } 646 647 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 648 649 return 0; 650 } 651 652 static int i915_rps_boost_info(struct seq_file *m, void *data) 653 { 654 struct drm_i915_private *dev_priv = node_to_i915(m->private); 655 struct intel_rps *rps = &dev_priv->gt.rps; 656 657 seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps))); 658 seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps))); 659 seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake)); 660 seq_printf(m, "Boosts outstanding? %d\n", 661 atomic_read(&rps->num_waiters)); 662 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive)); 663 seq_printf(m, "Frequency requested %d, actual %d\n", 664 intel_gpu_freq(rps, rps->cur_freq), 665 intel_rps_read_actual_frequency(rps)); 666 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", 667 intel_gpu_freq(rps, rps->min_freq), 668 intel_gpu_freq(rps, rps->min_freq_softlimit), 669 intel_gpu_freq(rps, rps->max_freq_softlimit), 670 intel_gpu_freq(rps, rps->max_freq)); 671 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n", 672 intel_gpu_freq(rps, rps->idle_freq), 673 intel_gpu_freq(rps, rps->efficient_freq), 674 intel_gpu_freq(rps, rps->boost_freq)); 675 676 seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts)); 677 678 return 0; 679 } 680 681 static int i915_runtime_pm_status(struct seq_file *m, void *unused) 682 { 683 struct drm_i915_private *dev_priv = node_to_i915(m->private); 684 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 685 686 if (!HAS_RUNTIME_PM(dev_priv)) 687 seq_puts(m, "Runtime power management not supported\n"); 688 689 seq_printf(m, "Runtime power status: %s\n", 690 enableddisabled(!dev_priv->power_domains.init_wakeref)); 691 692 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake)); 693 seq_printf(m, "IRQs disabled: %s\n", 694 yesno(!intel_irqs_enabled(dev_priv))); 695 #ifdef CONFIG_PM 696 seq_printf(m, "Usage count: %d\n", 697 atomic_read(&dev_priv->drm.dev->power.usage_count)); 698 #else 699 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); 700 #endif 701 seq_printf(m, "PCI device power state: %s [%d]\n", 702 pci_power_name(pdev->current_state), 703 pdev->current_state); 704 705 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) { 706 struct drm_printer p = drm_seq_file_printer(m); 707 708 print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p); 709 } 710 711 return 0; 712 } 713 714 static int i915_engine_info(struct seq_file *m, void *unused) 715 { 716 struct drm_i915_private *i915 = node_to_i915(m->private); 717 struct intel_engine_cs *engine; 718 intel_wakeref_t wakeref; 719 struct drm_printer p; 720 721 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 722 723 seq_printf(m, "GT awake? %s [%d], %llums\n", 724 yesno(i915->gt.awake), 725 atomic_read(&i915->gt.wakeref.count), 726 ktime_to_ms(intel_gt_get_awake_time(&i915->gt))); 727 seq_printf(m, "CS timestamp frequency: %u Hz, %d ns\n", 728 i915->gt.clock_frequency, 729 i915->gt.clock_period_ns); 730 731 p = drm_seq_file_printer(m); 732 for_each_uabi_engine(engine, i915) 733 intel_engine_dump(engine, &p, "%s\n", engine->name); 734 735 intel_gt_show_timelines(&i915->gt, &p, i915_request_show_with_schedule); 736 737 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 738 739 return 0; 740 } 741 742 static int i915_wa_registers(struct seq_file *m, void *unused) 743 { 744 struct drm_i915_private *i915 = node_to_i915(m->private); 745 struct intel_engine_cs *engine; 746 747 for_each_uabi_engine(engine, i915) { 748 const struct i915_wa_list *wal = &engine->ctx_wa_list; 749 const struct i915_wa *wa; 750 unsigned int count; 751 752 count = wal->count; 753 if (!count) 754 continue; 755 756 seq_printf(m, "%s: Workarounds applied: %u\n", 757 engine->name, count); 758 759 for (wa = wal->list; count--; wa++) 760 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n", 761 i915_mmio_reg_offset(wa->reg), 762 wa->set, wa->clr); 763 764 seq_printf(m, "\n"); 765 } 766 767 return 0; 768 } 769 770 static int 771 i915_wedged_get(void *data, u64 *val) 772 { 773 struct drm_i915_private *i915 = data; 774 int ret = intel_gt_terminally_wedged(&i915->gt); 775 776 switch (ret) { 777 case -EIO: 778 *val = 1; 779 return 0; 780 case 0: 781 *val = 0; 782 return 0; 783 default: 784 return ret; 785 } 786 } 787 788 static int 789 i915_wedged_set(void *data, u64 val) 790 { 791 struct drm_i915_private *i915 = data; 792 793 /* Flush any previous reset before applying for a new one */ 794 wait_event(i915->gt.reset.queue, 795 !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags)); 796 797 intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE, 798 "Manually set wedged engine mask = %llx", val); 799 return 0; 800 } 801 802 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 803 i915_wedged_get, i915_wedged_set, 804 "%llu\n"); 805 806 static int 807 i915_perf_noa_delay_set(void *data, u64 val) 808 { 809 struct drm_i915_private *i915 = data; 810 811 /* 812 * This would lead to infinite waits as we're doing timestamp 813 * difference on the CS with only 32bits. 814 */ 815 if (intel_gt_ns_to_clock_interval(&i915->gt, val) > U32_MAX) 816 return -EINVAL; 817 818 atomic64_set(&i915->perf.noa_programming_delay, val); 819 return 0; 820 } 821 822 static int 823 i915_perf_noa_delay_get(void *data, u64 *val) 824 { 825 struct drm_i915_private *i915 = data; 826 827 *val = atomic64_read(&i915->perf.noa_programming_delay); 828 return 0; 829 } 830 831 DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops, 832 i915_perf_noa_delay_get, 833 i915_perf_noa_delay_set, 834 "%llu\n"); 835 836 #define DROP_UNBOUND BIT(0) 837 #define DROP_BOUND BIT(1) 838 #define DROP_RETIRE BIT(2) 839 #define DROP_ACTIVE BIT(3) 840 #define DROP_FREED BIT(4) 841 #define DROP_SHRINK_ALL BIT(5) 842 #define DROP_IDLE BIT(6) 843 #define DROP_RESET_ACTIVE BIT(7) 844 #define DROP_RESET_SEQNO BIT(8) 845 #define DROP_RCU BIT(9) 846 #define DROP_ALL (DROP_UNBOUND | \ 847 DROP_BOUND | \ 848 DROP_RETIRE | \ 849 DROP_ACTIVE | \ 850 DROP_FREED | \ 851 DROP_SHRINK_ALL |\ 852 DROP_IDLE | \ 853 DROP_RESET_ACTIVE | \ 854 DROP_RESET_SEQNO | \ 855 DROP_RCU) 856 static int 857 i915_drop_caches_get(void *data, u64 *val) 858 { 859 *val = DROP_ALL; 860 861 return 0; 862 } 863 static int 864 gt_drop_caches(struct intel_gt *gt, u64 val) 865 { 866 int ret; 867 868 if (val & DROP_RESET_ACTIVE && 869 wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT)) 870 intel_gt_set_wedged(gt); 871 872 if (val & DROP_RETIRE) 873 intel_gt_retire_requests(gt); 874 875 if (val & (DROP_IDLE | DROP_ACTIVE)) { 876 ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); 877 if (ret) 878 return ret; 879 } 880 881 if (val & DROP_IDLE) { 882 ret = intel_gt_pm_wait_for_idle(gt); 883 if (ret) 884 return ret; 885 } 886 887 if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt)) 888 intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL); 889 890 if (val & DROP_FREED) 891 intel_gt_flush_buffer_pool(gt); 892 893 return 0; 894 } 895 896 static int 897 i915_drop_caches_set(void *data, u64 val) 898 { 899 struct drm_i915_private *i915 = data; 900 int ret; 901 902 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", 903 val, val & DROP_ALL); 904 905 ret = gt_drop_caches(&i915->gt, val); 906 if (ret) 907 return ret; 908 909 fs_reclaim_acquire(GFP_KERNEL); 910 if (val & DROP_BOUND) 911 i915_gem_shrink(NULL, i915, LONG_MAX, NULL, I915_SHRINK_BOUND); 912 913 if (val & DROP_UNBOUND) 914 i915_gem_shrink(NULL, i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND); 915 916 if (val & DROP_SHRINK_ALL) 917 i915_gem_shrink_all(i915); 918 fs_reclaim_release(GFP_KERNEL); 919 920 if (val & DROP_RCU) 921 rcu_barrier(); 922 923 if (val & DROP_FREED) 924 i915_gem_drain_freed_objects(i915); 925 926 return 0; 927 } 928 929 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 930 i915_drop_caches_get, i915_drop_caches_set, 931 "0x%08llx\n"); 932 933 static int i915_sseu_status(struct seq_file *m, void *unused) 934 { 935 struct drm_i915_private *i915 = node_to_i915(m->private); 936 struct intel_gt *gt = &i915->gt; 937 938 return intel_sseu_status(m, gt); 939 } 940 941 static int i915_forcewake_open(struct inode *inode, struct file *file) 942 { 943 struct drm_i915_private *i915 = inode->i_private; 944 struct intel_gt *gt = &i915->gt; 945 946 atomic_inc(>->user_wakeref); 947 intel_gt_pm_get(gt); 948 if (INTEL_GEN(i915) >= 6) 949 intel_uncore_forcewake_user_get(gt->uncore); 950 951 return 0; 952 } 953 954 static int i915_forcewake_release(struct inode *inode, struct file *file) 955 { 956 struct drm_i915_private *i915 = inode->i_private; 957 struct intel_gt *gt = &i915->gt; 958 959 if (INTEL_GEN(i915) >= 6) 960 intel_uncore_forcewake_user_put(&i915->uncore); 961 intel_gt_pm_put(gt); 962 atomic_dec(>->user_wakeref); 963 964 return 0; 965 } 966 967 static const struct file_operations i915_forcewake_fops = { 968 .owner = THIS_MODULE, 969 .open = i915_forcewake_open, 970 .release = i915_forcewake_release, 971 }; 972 973 static const struct drm_info_list i915_debugfs_list[] = { 974 {"i915_capabilities", i915_capabilities, 0}, 975 {"i915_gem_objects", i915_gem_object_info, 0}, 976 {"i915_frequency_info", i915_frequency_info, 0}, 977 {"i915_swizzle_info", i915_swizzle_info, 0}, 978 {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, 979 {"i915_engine_info", i915_engine_info, 0}, 980 {"i915_wa_registers", i915_wa_registers, 0}, 981 {"i915_sseu_status", i915_sseu_status, 0}, 982 {"i915_rps_boost_info", i915_rps_boost_info, 0}, 983 }; 984 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 985 986 static const struct i915_debugfs_files { 987 const char *name; 988 const struct file_operations *fops; 989 } i915_debugfs_files[] = { 990 {"i915_perf_noa_delay", &i915_perf_noa_delay_fops}, 991 {"i915_wedged", &i915_wedged_fops}, 992 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 993 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 994 {"i915_error_state", &i915_error_state_fops}, 995 {"i915_gpu_info", &i915_gpu_info_fops}, 996 #endif 997 }; 998 999 void i915_debugfs_register(struct drm_i915_private *dev_priv) 1000 { 1001 struct drm_minor *minor = dev_priv->drm.primary; 1002 int i; 1003 1004 i915_debugfs_params(dev_priv); 1005 1006 debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root, 1007 to_i915(minor->dev), &i915_forcewake_fops); 1008 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 1009 debugfs_create_file(i915_debugfs_files[i].name, 1010 S_IRUGO | S_IWUSR, 1011 minor->debugfs_root, 1012 to_i915(minor->dev), 1013 i915_debugfs_files[i].fops); 1014 } 1015 1016 drm_debugfs_create_files(i915_debugfs_list, 1017 I915_DEBUGFS_ENTRIES, 1018 minor->debugfs_root, minor); 1019 } 1020