xref: /linux/drivers/gpu/drm/i915/i915_debugfs.c (revision b9d7eb6a31be296ca0af95641a23c4c758703c0a)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28 
29 #include <linux/sched/mm.h>
30 #include <linux/sort.h>
31 
32 #include <drm/drm_debugfs.h>
33 
34 #include "gem/i915_gem_context.h"
35 #include "gt/intel_gt.h"
36 #include "gt/intel_gt_buffer_pool.h"
37 #include "gt/intel_gt_clock_utils.h"
38 #include "gt/intel_gt_debugfs.h"
39 #include "gt/intel_gt_pm.h"
40 #include "gt/intel_gt_pm_debugfs.h"
41 #include "gt/intel_gt_regs.h"
42 #include "gt/intel_gt_requests.h"
43 #include "gt/intel_rc6.h"
44 #include "gt/intel_reset.h"
45 #include "gt/intel_rps.h"
46 #include "gt/intel_sseu_debugfs.h"
47 
48 #include "i915_debugfs.h"
49 #include "i915_debugfs_params.h"
50 #include "i915_irq.h"
51 #include "i915_scheduler.h"
52 #include "intel_pm.h"
53 
54 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
55 {
56 	return to_i915(node->minor->dev);
57 }
58 
59 static int i915_capabilities(struct seq_file *m, void *data)
60 {
61 	struct drm_i915_private *i915 = node_to_i915(m->private);
62 	struct drm_printer p = drm_seq_file_printer(m);
63 
64 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
65 
66 	intel_device_info_print_static(INTEL_INFO(i915), &p);
67 	intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
68 	i915_print_iommu_status(i915, &p);
69 	intel_gt_info_print(&to_gt(i915)->info, &p);
70 	intel_driver_caps_print(&i915->caps, &p);
71 
72 	kernel_param_lock(THIS_MODULE);
73 	i915_params_dump(&i915->params, &p);
74 	kernel_param_unlock(THIS_MODULE);
75 
76 	return 0;
77 }
78 
79 static char get_tiling_flag(struct drm_i915_gem_object *obj)
80 {
81 	switch (i915_gem_object_get_tiling(obj)) {
82 	default:
83 	case I915_TILING_NONE: return ' ';
84 	case I915_TILING_X: return 'X';
85 	case I915_TILING_Y: return 'Y';
86 	}
87 }
88 
89 static char get_global_flag(struct drm_i915_gem_object *obj)
90 {
91 	return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
92 }
93 
94 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
95 {
96 	return obj->mm.mapping ? 'M' : ' ';
97 }
98 
99 static const char *
100 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
101 {
102 	size_t x = 0;
103 
104 	switch (page_sizes) {
105 	case 0:
106 		return "";
107 	case I915_GTT_PAGE_SIZE_4K:
108 		return "4K";
109 	case I915_GTT_PAGE_SIZE_64K:
110 		return "64K";
111 	case I915_GTT_PAGE_SIZE_2M:
112 		return "2M";
113 	default:
114 		if (!buf)
115 			return "M";
116 
117 		if (page_sizes & I915_GTT_PAGE_SIZE_2M)
118 			x += snprintf(buf + x, len - x, "2M, ");
119 		if (page_sizes & I915_GTT_PAGE_SIZE_64K)
120 			x += snprintf(buf + x, len - x, "64K, ");
121 		if (page_sizes & I915_GTT_PAGE_SIZE_4K)
122 			x += snprintf(buf + x, len - x, "4K, ");
123 		buf[x-2] = '\0';
124 
125 		return buf;
126 	}
127 }
128 
129 static const char *stringify_vma_type(const struct i915_vma *vma)
130 {
131 	if (i915_vma_is_ggtt(vma))
132 		return "ggtt";
133 
134 	if (i915_vma_is_dpt(vma))
135 		return "dpt";
136 
137 	return "ppgtt";
138 }
139 
140 void
141 i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
142 {
143 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
144 	struct i915_vma *vma;
145 	int pin_count = 0;
146 
147 	seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
148 		   &obj->base,
149 		   get_tiling_flag(obj),
150 		   get_global_flag(obj),
151 		   get_pin_mapped_flag(obj),
152 		   obj->base.size / 1024,
153 		   obj->read_domains,
154 		   obj->write_domain,
155 		   i915_cache_level_str(dev_priv, obj->cache_level),
156 		   obj->mm.dirty ? " dirty" : "",
157 		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
158 	if (obj->base.name)
159 		seq_printf(m, " (name: %d)", obj->base.name);
160 
161 	spin_lock(&obj->vma.lock);
162 	list_for_each_entry(vma, &obj->vma.list, obj_link) {
163 		if (!drm_mm_node_allocated(&vma->node))
164 			continue;
165 
166 		spin_unlock(&obj->vma.lock);
167 
168 		if (i915_vma_is_pinned(vma))
169 			pin_count++;
170 
171 		seq_printf(m, " (%s offset: %08llx, size: %08llx, pages: %s",
172 			   stringify_vma_type(vma),
173 			   vma->node.start, vma->node.size,
174 			   stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
175 		if (i915_vma_is_ggtt(vma) || i915_vma_is_dpt(vma)) {
176 			switch (vma->ggtt_view.type) {
177 			case I915_GGTT_VIEW_NORMAL:
178 				seq_puts(m, ", normal");
179 				break;
180 
181 			case I915_GGTT_VIEW_PARTIAL:
182 				seq_printf(m, ", partial [%08llx+%x]",
183 					   vma->ggtt_view.partial.offset << PAGE_SHIFT,
184 					   vma->ggtt_view.partial.size << PAGE_SHIFT);
185 				break;
186 
187 			case I915_GGTT_VIEW_ROTATED:
188 				seq_printf(m, ", rotated [(%ux%u, src_stride=%u, dst_stride=%u, offset=%u), (%ux%u, src_stride=%u, dst_stride=%u, offset=%u)]",
189 					   vma->ggtt_view.rotated.plane[0].width,
190 					   vma->ggtt_view.rotated.plane[0].height,
191 					   vma->ggtt_view.rotated.plane[0].src_stride,
192 					   vma->ggtt_view.rotated.plane[0].dst_stride,
193 					   vma->ggtt_view.rotated.plane[0].offset,
194 					   vma->ggtt_view.rotated.plane[1].width,
195 					   vma->ggtt_view.rotated.plane[1].height,
196 					   vma->ggtt_view.rotated.plane[1].src_stride,
197 					   vma->ggtt_view.rotated.plane[1].dst_stride,
198 					   vma->ggtt_view.rotated.plane[1].offset);
199 				break;
200 
201 			case I915_GGTT_VIEW_REMAPPED:
202 				seq_printf(m, ", remapped [(%ux%u, src_stride=%u, dst_stride=%u, offset=%u), (%ux%u, src_stride=%u, dst_stride=%u, offset=%u)]",
203 					   vma->ggtt_view.remapped.plane[0].width,
204 					   vma->ggtt_view.remapped.plane[0].height,
205 					   vma->ggtt_view.remapped.plane[0].src_stride,
206 					   vma->ggtt_view.remapped.plane[0].dst_stride,
207 					   vma->ggtt_view.remapped.plane[0].offset,
208 					   vma->ggtt_view.remapped.plane[1].width,
209 					   vma->ggtt_view.remapped.plane[1].height,
210 					   vma->ggtt_view.remapped.plane[1].src_stride,
211 					   vma->ggtt_view.remapped.plane[1].dst_stride,
212 					   vma->ggtt_view.remapped.plane[1].offset);
213 				break;
214 
215 			default:
216 				MISSING_CASE(vma->ggtt_view.type);
217 				break;
218 			}
219 		}
220 		if (vma->fence)
221 			seq_printf(m, " , fence: %d", vma->fence->id);
222 		seq_puts(m, ")");
223 
224 		spin_lock(&obj->vma.lock);
225 	}
226 	spin_unlock(&obj->vma.lock);
227 
228 	seq_printf(m, " (pinned x %d)", pin_count);
229 	if (i915_gem_object_is_stolen(obj))
230 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
231 	if (i915_gem_object_is_framebuffer(obj))
232 		seq_printf(m, " (fb)");
233 }
234 
235 static int i915_gem_object_info(struct seq_file *m, void *data)
236 {
237 	struct drm_i915_private *i915 = node_to_i915(m->private);
238 	struct drm_printer p = drm_seq_file_printer(m);
239 	struct intel_memory_region *mr;
240 	enum intel_region_id id;
241 
242 	seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
243 		   i915->mm.shrink_count,
244 		   atomic_read(&i915->mm.free_count),
245 		   i915->mm.shrink_memory);
246 	for_each_memory_region(mr, i915, id)
247 		intel_memory_region_debug(mr, &p);
248 
249 	return 0;
250 }
251 
252 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
253 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
254 			      size_t count, loff_t *pos)
255 {
256 	struct i915_gpu_coredump *error;
257 	ssize_t ret;
258 	void *buf;
259 
260 	error = file->private_data;
261 	if (!error)
262 		return 0;
263 
264 	/* Bounce buffer required because of kernfs __user API convenience. */
265 	buf = kmalloc(count, GFP_KERNEL);
266 	if (!buf)
267 		return -ENOMEM;
268 
269 	ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
270 	if (ret <= 0)
271 		goto out;
272 
273 	if (!copy_to_user(ubuf, buf, ret))
274 		*pos += ret;
275 	else
276 		ret = -EFAULT;
277 
278 out:
279 	kfree(buf);
280 	return ret;
281 }
282 
283 static int gpu_state_release(struct inode *inode, struct file *file)
284 {
285 	i915_gpu_coredump_put(file->private_data);
286 	return 0;
287 }
288 
289 static int i915_gpu_info_open(struct inode *inode, struct file *file)
290 {
291 	struct drm_i915_private *i915 = inode->i_private;
292 	struct i915_gpu_coredump *gpu;
293 	intel_wakeref_t wakeref;
294 
295 	gpu = NULL;
296 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
297 		gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES);
298 	if (IS_ERR(gpu))
299 		return PTR_ERR(gpu);
300 
301 	file->private_data = gpu;
302 	return 0;
303 }
304 
305 static const struct file_operations i915_gpu_info_fops = {
306 	.owner = THIS_MODULE,
307 	.open = i915_gpu_info_open,
308 	.read = gpu_state_read,
309 	.llseek = default_llseek,
310 	.release = gpu_state_release,
311 };
312 
313 static ssize_t
314 i915_error_state_write(struct file *filp,
315 		       const char __user *ubuf,
316 		       size_t cnt,
317 		       loff_t *ppos)
318 {
319 	struct i915_gpu_coredump *error = filp->private_data;
320 
321 	if (!error)
322 		return 0;
323 
324 	drm_dbg(&error->i915->drm, "Resetting error state\n");
325 	i915_reset_error_state(error->i915);
326 
327 	return cnt;
328 }
329 
330 static int i915_error_state_open(struct inode *inode, struct file *file)
331 {
332 	struct i915_gpu_coredump *error;
333 
334 	error = i915_first_error_state(inode->i_private);
335 	if (IS_ERR(error))
336 		return PTR_ERR(error);
337 
338 	file->private_data  = error;
339 	return 0;
340 }
341 
342 static const struct file_operations i915_error_state_fops = {
343 	.owner = THIS_MODULE,
344 	.open = i915_error_state_open,
345 	.read = gpu_state_read,
346 	.write = i915_error_state_write,
347 	.llseek = default_llseek,
348 	.release = gpu_state_release,
349 };
350 #endif
351 
352 static int i915_frequency_info(struct seq_file *m, void *unused)
353 {
354 	struct drm_i915_private *i915 = node_to_i915(m->private);
355 	struct intel_gt *gt = to_gt(i915);
356 	struct drm_printer p = drm_seq_file_printer(m);
357 
358 	intel_gt_pm_frequency_dump(gt, &p);
359 
360 	return 0;
361 }
362 
363 static const char *swizzle_string(unsigned swizzle)
364 {
365 	switch (swizzle) {
366 	case I915_BIT_6_SWIZZLE_NONE:
367 		return "none";
368 	case I915_BIT_6_SWIZZLE_9:
369 		return "bit9";
370 	case I915_BIT_6_SWIZZLE_9_10:
371 		return "bit9/bit10";
372 	case I915_BIT_6_SWIZZLE_9_11:
373 		return "bit9/bit11";
374 	case I915_BIT_6_SWIZZLE_9_10_11:
375 		return "bit9/bit10/bit11";
376 	case I915_BIT_6_SWIZZLE_9_17:
377 		return "bit9/bit17";
378 	case I915_BIT_6_SWIZZLE_9_10_17:
379 		return "bit9/bit10/bit17";
380 	case I915_BIT_6_SWIZZLE_UNKNOWN:
381 		return "unknown";
382 	}
383 
384 	return "bug";
385 }
386 
387 static int i915_swizzle_info(struct seq_file *m, void *data)
388 {
389 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
390 	struct intel_uncore *uncore = &dev_priv->uncore;
391 	intel_wakeref_t wakeref;
392 
393 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
394 		   swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
395 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
396 		   swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
397 
398 	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
399 		seq_puts(m, "L-shaped memory detected\n");
400 
401 	/* On BDW+, swizzling is not used. See detect_bit_6_swizzle() */
402 	if (GRAPHICS_VER(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv))
403 		return 0;
404 
405 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
406 
407 	if (IS_GRAPHICS_VER(dev_priv, 3, 4)) {
408 		seq_printf(m, "DDC = 0x%08x\n",
409 			   intel_uncore_read(uncore, DCC));
410 		seq_printf(m, "DDC2 = 0x%08x\n",
411 			   intel_uncore_read(uncore, DCC2));
412 		seq_printf(m, "C0DRB3 = 0x%04x\n",
413 			   intel_uncore_read16(uncore, C0DRB3_BW));
414 		seq_printf(m, "C1DRB3 = 0x%04x\n",
415 			   intel_uncore_read16(uncore, C1DRB3_BW));
416 	} else if (GRAPHICS_VER(dev_priv) >= 6) {
417 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
418 			   intel_uncore_read(uncore, MAD_DIMM_C0));
419 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
420 			   intel_uncore_read(uncore, MAD_DIMM_C1));
421 		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
422 			   intel_uncore_read(uncore, MAD_DIMM_C2));
423 		seq_printf(m, "TILECTL = 0x%08x\n",
424 			   intel_uncore_read(uncore, TILECTL));
425 		if (GRAPHICS_VER(dev_priv) >= 8)
426 			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
427 				   intel_uncore_read(uncore, GAMTARBMODE));
428 		else
429 			seq_printf(m, "ARB_MODE = 0x%08x\n",
430 				   intel_uncore_read(uncore, ARB_MODE));
431 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
432 			   intel_uncore_read(uncore, DISP_ARB_CTL));
433 	}
434 
435 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
436 
437 	return 0;
438 }
439 
440 static int i915_rps_boost_info(struct seq_file *m, void *data)
441 {
442 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
443 	struct intel_rps *rps = &to_gt(dev_priv)->rps;
444 
445 	seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
446 	seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
447 	seq_printf(m, "GPU busy? %s\n", yesno(to_gt(dev_priv)->awake));
448 	seq_printf(m, "Boosts outstanding? %d\n",
449 		   atomic_read(&rps->num_waiters));
450 	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
451 	seq_printf(m, "Frequency requested %d, actual %d\n",
452 		   intel_gpu_freq(rps, rps->cur_freq),
453 		   intel_rps_read_actual_frequency(rps));
454 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
455 		   intel_gpu_freq(rps, rps->min_freq),
456 		   intel_gpu_freq(rps, rps->min_freq_softlimit),
457 		   intel_gpu_freq(rps, rps->max_freq_softlimit),
458 		   intel_gpu_freq(rps, rps->max_freq));
459 	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
460 		   intel_gpu_freq(rps, rps->idle_freq),
461 		   intel_gpu_freq(rps, rps->efficient_freq),
462 		   intel_gpu_freq(rps, rps->boost_freq));
463 
464 	seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
465 
466 	return 0;
467 }
468 
469 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
470 {
471 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
472 	struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
473 
474 	if (!HAS_RUNTIME_PM(dev_priv))
475 		seq_puts(m, "Runtime power management not supported\n");
476 
477 	seq_printf(m, "Runtime power status: %s\n",
478 		   enableddisabled(!dev_priv->power_domains.init_wakeref));
479 
480 	seq_printf(m, "GPU idle: %s\n", yesno(!to_gt(dev_priv)->awake));
481 	seq_printf(m, "IRQs disabled: %s\n",
482 		   yesno(!intel_irqs_enabled(dev_priv)));
483 #ifdef CONFIG_PM
484 	seq_printf(m, "Usage count: %d\n",
485 		   atomic_read(&dev_priv->drm.dev->power.usage_count));
486 #else
487 	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
488 #endif
489 	seq_printf(m, "PCI device power state: %s [%d]\n",
490 		   pci_power_name(pdev->current_state),
491 		   pdev->current_state);
492 
493 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
494 		struct drm_printer p = drm_seq_file_printer(m);
495 
496 		print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
497 	}
498 
499 	return 0;
500 }
501 
502 static int i915_engine_info(struct seq_file *m, void *unused)
503 {
504 	struct drm_i915_private *i915 = node_to_i915(m->private);
505 	struct intel_engine_cs *engine;
506 	intel_wakeref_t wakeref;
507 	struct drm_printer p;
508 
509 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
510 
511 	seq_printf(m, "GT awake? %s [%d], %llums\n",
512 		   yesno(to_gt(i915)->awake),
513 		   atomic_read(&to_gt(i915)->wakeref.count),
514 		   ktime_to_ms(intel_gt_get_awake_time(to_gt(i915))));
515 	seq_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
516 		   to_gt(i915)->clock_frequency,
517 		   to_gt(i915)->clock_period_ns);
518 
519 	p = drm_seq_file_printer(m);
520 	for_each_uabi_engine(engine, i915)
521 		intel_engine_dump(engine, &p, "%s\n", engine->name);
522 
523 	intel_gt_show_timelines(to_gt(i915), &p, i915_request_show_with_schedule);
524 
525 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
526 
527 	return 0;
528 }
529 
530 static int i915_wa_registers(struct seq_file *m, void *unused)
531 {
532 	struct drm_i915_private *i915 = node_to_i915(m->private);
533 	struct intel_engine_cs *engine;
534 
535 	for_each_uabi_engine(engine, i915) {
536 		const struct i915_wa_list *wal = &engine->ctx_wa_list;
537 		const struct i915_wa *wa;
538 		unsigned int count;
539 
540 		count = wal->count;
541 		if (!count)
542 			continue;
543 
544 		seq_printf(m, "%s: Workarounds applied: %u\n",
545 			   engine->name, count);
546 
547 		for (wa = wal->list; count--; wa++)
548 			seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
549 				   i915_mmio_reg_offset(wa->reg),
550 				   wa->set, wa->clr);
551 
552 		seq_printf(m, "\n");
553 	}
554 
555 	return 0;
556 }
557 
558 static int i915_wedged_get(void *data, u64 *val)
559 {
560 	struct drm_i915_private *i915 = data;
561 
562 	return intel_gt_debugfs_reset_show(to_gt(i915), val);
563 }
564 
565 static int i915_wedged_set(void *data, u64 val)
566 {
567 	struct drm_i915_private *i915 = data;
568 
569 	return intel_gt_debugfs_reset_store(to_gt(i915), val);
570 }
571 
572 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
573 			i915_wedged_get, i915_wedged_set,
574 			"%llu\n");
575 
576 static int
577 i915_perf_noa_delay_set(void *data, u64 val)
578 {
579 	struct drm_i915_private *i915 = data;
580 
581 	/*
582 	 * This would lead to infinite waits as we're doing timestamp
583 	 * difference on the CS with only 32bits.
584 	 */
585 	if (intel_gt_ns_to_clock_interval(to_gt(i915), val) > U32_MAX)
586 		return -EINVAL;
587 
588 	atomic64_set(&i915->perf.noa_programming_delay, val);
589 	return 0;
590 }
591 
592 static int
593 i915_perf_noa_delay_get(void *data, u64 *val)
594 {
595 	struct drm_i915_private *i915 = data;
596 
597 	*val = atomic64_read(&i915->perf.noa_programming_delay);
598 	return 0;
599 }
600 
601 DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops,
602 			i915_perf_noa_delay_get,
603 			i915_perf_noa_delay_set,
604 			"%llu\n");
605 
606 #define DROP_UNBOUND	BIT(0)
607 #define DROP_BOUND	BIT(1)
608 #define DROP_RETIRE	BIT(2)
609 #define DROP_ACTIVE	BIT(3)
610 #define DROP_FREED	BIT(4)
611 #define DROP_SHRINK_ALL	BIT(5)
612 #define DROP_IDLE	BIT(6)
613 #define DROP_RESET_ACTIVE	BIT(7)
614 #define DROP_RESET_SEQNO	BIT(8)
615 #define DROP_RCU	BIT(9)
616 #define DROP_ALL (DROP_UNBOUND	| \
617 		  DROP_BOUND	| \
618 		  DROP_RETIRE	| \
619 		  DROP_ACTIVE	| \
620 		  DROP_FREED	| \
621 		  DROP_SHRINK_ALL |\
622 		  DROP_IDLE	| \
623 		  DROP_RESET_ACTIVE | \
624 		  DROP_RESET_SEQNO | \
625 		  DROP_RCU)
626 static int
627 i915_drop_caches_get(void *data, u64 *val)
628 {
629 	*val = DROP_ALL;
630 
631 	return 0;
632 }
633 static int
634 gt_drop_caches(struct intel_gt *gt, u64 val)
635 {
636 	int ret;
637 
638 	if (val & DROP_RESET_ACTIVE &&
639 	    wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
640 		intel_gt_set_wedged(gt);
641 
642 	if (val & DROP_RETIRE)
643 		intel_gt_retire_requests(gt);
644 
645 	if (val & (DROP_IDLE | DROP_ACTIVE)) {
646 		ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
647 		if (ret)
648 			return ret;
649 	}
650 
651 	if (val & DROP_IDLE) {
652 		ret = intel_gt_pm_wait_for_idle(gt);
653 		if (ret)
654 			return ret;
655 	}
656 
657 	if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
658 		intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
659 
660 	if (val & DROP_FREED)
661 		intel_gt_flush_buffer_pool(gt);
662 
663 	return 0;
664 }
665 
666 static int
667 i915_drop_caches_set(void *data, u64 val)
668 {
669 	struct drm_i915_private *i915 = data;
670 	unsigned int flags;
671 	int ret;
672 
673 	DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
674 		  val, val & DROP_ALL);
675 
676 	ret = gt_drop_caches(to_gt(i915), val);
677 	if (ret)
678 		return ret;
679 
680 	fs_reclaim_acquire(GFP_KERNEL);
681 	flags = memalloc_noreclaim_save();
682 	if (val & DROP_BOUND)
683 		i915_gem_shrink(NULL, i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
684 
685 	if (val & DROP_UNBOUND)
686 		i915_gem_shrink(NULL, i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
687 
688 	if (val & DROP_SHRINK_ALL)
689 		i915_gem_shrink_all(i915);
690 	memalloc_noreclaim_restore(flags);
691 	fs_reclaim_release(GFP_KERNEL);
692 
693 	if (val & DROP_RCU)
694 		rcu_barrier();
695 
696 	if (val & DROP_FREED)
697 		i915_gem_drain_freed_objects(i915);
698 
699 	return 0;
700 }
701 
702 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
703 			i915_drop_caches_get, i915_drop_caches_set,
704 			"0x%08llx\n");
705 
706 static int i915_sseu_status(struct seq_file *m, void *unused)
707 {
708 	struct drm_i915_private *i915 = node_to_i915(m->private);
709 	struct intel_gt *gt = to_gt(i915);
710 
711 	return intel_sseu_status(m, gt);
712 }
713 
714 static int i915_forcewake_open(struct inode *inode, struct file *file)
715 {
716 	struct drm_i915_private *i915 = inode->i_private;
717 
718 	return intel_gt_pm_debugfs_forcewake_user_open(to_gt(i915));
719 }
720 
721 static int i915_forcewake_release(struct inode *inode, struct file *file)
722 {
723 	struct drm_i915_private *i915 = inode->i_private;
724 
725 	return intel_gt_pm_debugfs_forcewake_user_release(to_gt(i915));
726 }
727 
728 static const struct file_operations i915_forcewake_fops = {
729 	.owner = THIS_MODULE,
730 	.open = i915_forcewake_open,
731 	.release = i915_forcewake_release,
732 };
733 
734 static const struct drm_info_list i915_debugfs_list[] = {
735 	{"i915_capabilities", i915_capabilities, 0},
736 	{"i915_gem_objects", i915_gem_object_info, 0},
737 	{"i915_frequency_info", i915_frequency_info, 0},
738 	{"i915_swizzle_info", i915_swizzle_info, 0},
739 	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
740 	{"i915_engine_info", i915_engine_info, 0},
741 	{"i915_wa_registers", i915_wa_registers, 0},
742 	{"i915_sseu_status", i915_sseu_status, 0},
743 	{"i915_rps_boost_info", i915_rps_boost_info, 0},
744 };
745 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
746 
747 static const struct i915_debugfs_files {
748 	const char *name;
749 	const struct file_operations *fops;
750 } i915_debugfs_files[] = {
751 	{"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
752 	{"i915_wedged", &i915_wedged_fops},
753 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
754 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
755 	{"i915_error_state", &i915_error_state_fops},
756 	{"i915_gpu_info", &i915_gpu_info_fops},
757 #endif
758 };
759 
760 void i915_debugfs_register(struct drm_i915_private *dev_priv)
761 {
762 	struct drm_minor *minor = dev_priv->drm.primary;
763 	int i;
764 
765 	i915_debugfs_params(dev_priv);
766 
767 	debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
768 			    to_i915(minor->dev), &i915_forcewake_fops);
769 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
770 		debugfs_create_file(i915_debugfs_files[i].name,
771 				    S_IRUGO | S_IWUSR,
772 				    minor->debugfs_root,
773 				    to_i915(minor->dev),
774 				    i915_debugfs_files[i].fops);
775 	}
776 
777 	drm_debugfs_create_files(i915_debugfs_list,
778 				 I915_DEBUGFS_ENTRIES,
779 				 minor->debugfs_root, minor);
780 }
781