xref: /linux/drivers/gpu/drm/i915/i915_debugfs.c (revision 15a1fbdcfb519c2bd291ed01c6c94e0b89537a77)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28 
29 #include <linux/sched/mm.h>
30 #include <linux/sort.h>
31 
32 #include <drm/drm_debugfs.h>
33 
34 #include "gem/i915_gem_context.h"
35 #include "gt/intel_gt_pm.h"
36 #include "gt/intel_gt_requests.h"
37 #include "gt/intel_reset.h"
38 #include "gt/intel_rc6.h"
39 #include "gt/intel_rps.h"
40 #include "gt/uc/intel_guc_submission.h"
41 
42 #include "i915_debugfs.h"
43 #include "i915_debugfs_params.h"
44 #include "i915_irq.h"
45 #include "i915_trace.h"
46 #include "intel_pm.h"
47 #include "intel_sideband.h"
48 
49 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
50 {
51 	return to_i915(node->minor->dev);
52 }
53 
54 static int i915_capabilities(struct seq_file *m, void *data)
55 {
56 	struct drm_i915_private *i915 = node_to_i915(m->private);
57 	struct drm_printer p = drm_seq_file_printer(m);
58 
59 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
60 
61 	intel_device_info_print_static(INTEL_INFO(i915), &p);
62 	intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
63 	intel_driver_caps_print(&i915->caps, &p);
64 
65 	kernel_param_lock(THIS_MODULE);
66 	i915_params_dump(&i915_modparams, &p);
67 	kernel_param_unlock(THIS_MODULE);
68 
69 	return 0;
70 }
71 
72 static char get_tiling_flag(struct drm_i915_gem_object *obj)
73 {
74 	switch (i915_gem_object_get_tiling(obj)) {
75 	default:
76 	case I915_TILING_NONE: return ' ';
77 	case I915_TILING_X: return 'X';
78 	case I915_TILING_Y: return 'Y';
79 	}
80 }
81 
82 static char get_global_flag(struct drm_i915_gem_object *obj)
83 {
84 	return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
85 }
86 
87 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
88 {
89 	return obj->mm.mapping ? 'M' : ' ';
90 }
91 
92 static const char *
93 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
94 {
95 	size_t x = 0;
96 
97 	switch (page_sizes) {
98 	case 0:
99 		return "";
100 	case I915_GTT_PAGE_SIZE_4K:
101 		return "4K";
102 	case I915_GTT_PAGE_SIZE_64K:
103 		return "64K";
104 	case I915_GTT_PAGE_SIZE_2M:
105 		return "2M";
106 	default:
107 		if (!buf)
108 			return "M";
109 
110 		if (page_sizes & I915_GTT_PAGE_SIZE_2M)
111 			x += snprintf(buf + x, len - x, "2M, ");
112 		if (page_sizes & I915_GTT_PAGE_SIZE_64K)
113 			x += snprintf(buf + x, len - x, "64K, ");
114 		if (page_sizes & I915_GTT_PAGE_SIZE_4K)
115 			x += snprintf(buf + x, len - x, "4K, ");
116 		buf[x-2] = '\0';
117 
118 		return buf;
119 	}
120 }
121 
122 void
123 i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
124 {
125 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
126 	struct intel_engine_cs *engine;
127 	struct i915_vma *vma;
128 	int pin_count = 0;
129 
130 	seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
131 		   &obj->base,
132 		   get_tiling_flag(obj),
133 		   get_global_flag(obj),
134 		   get_pin_mapped_flag(obj),
135 		   obj->base.size / 1024,
136 		   obj->read_domains,
137 		   obj->write_domain,
138 		   i915_cache_level_str(dev_priv, obj->cache_level),
139 		   obj->mm.dirty ? " dirty" : "",
140 		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
141 	if (obj->base.name)
142 		seq_printf(m, " (name: %d)", obj->base.name);
143 
144 	spin_lock(&obj->vma.lock);
145 	list_for_each_entry(vma, &obj->vma.list, obj_link) {
146 		if (!drm_mm_node_allocated(&vma->node))
147 			continue;
148 
149 		spin_unlock(&obj->vma.lock);
150 
151 		if (i915_vma_is_pinned(vma))
152 			pin_count++;
153 
154 		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
155 			   i915_vma_is_ggtt(vma) ? "g" : "pp",
156 			   vma->node.start, vma->node.size,
157 			   stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
158 		if (i915_vma_is_ggtt(vma)) {
159 			switch (vma->ggtt_view.type) {
160 			case I915_GGTT_VIEW_NORMAL:
161 				seq_puts(m, ", normal");
162 				break;
163 
164 			case I915_GGTT_VIEW_PARTIAL:
165 				seq_printf(m, ", partial [%08llx+%x]",
166 					   vma->ggtt_view.partial.offset << PAGE_SHIFT,
167 					   vma->ggtt_view.partial.size << PAGE_SHIFT);
168 				break;
169 
170 			case I915_GGTT_VIEW_ROTATED:
171 				seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
172 					   vma->ggtt_view.rotated.plane[0].width,
173 					   vma->ggtt_view.rotated.plane[0].height,
174 					   vma->ggtt_view.rotated.plane[0].stride,
175 					   vma->ggtt_view.rotated.plane[0].offset,
176 					   vma->ggtt_view.rotated.plane[1].width,
177 					   vma->ggtt_view.rotated.plane[1].height,
178 					   vma->ggtt_view.rotated.plane[1].stride,
179 					   vma->ggtt_view.rotated.plane[1].offset);
180 				break;
181 
182 			case I915_GGTT_VIEW_REMAPPED:
183 				seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
184 					   vma->ggtt_view.remapped.plane[0].width,
185 					   vma->ggtt_view.remapped.plane[0].height,
186 					   vma->ggtt_view.remapped.plane[0].stride,
187 					   vma->ggtt_view.remapped.plane[0].offset,
188 					   vma->ggtt_view.remapped.plane[1].width,
189 					   vma->ggtt_view.remapped.plane[1].height,
190 					   vma->ggtt_view.remapped.plane[1].stride,
191 					   vma->ggtt_view.remapped.plane[1].offset);
192 				break;
193 
194 			default:
195 				MISSING_CASE(vma->ggtt_view.type);
196 				break;
197 			}
198 		}
199 		if (vma->fence)
200 			seq_printf(m, " , fence: %d", vma->fence->id);
201 		seq_puts(m, ")");
202 
203 		spin_lock(&obj->vma.lock);
204 	}
205 	spin_unlock(&obj->vma.lock);
206 
207 	seq_printf(m, " (pinned x %d)", pin_count);
208 	if (obj->stolen)
209 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
210 	if (i915_gem_object_is_framebuffer(obj))
211 		seq_printf(m, " (fb)");
212 
213 	engine = i915_gem_object_last_write_engine(obj);
214 	if (engine)
215 		seq_printf(m, " (%s)", engine->name);
216 }
217 
218 struct file_stats {
219 	struct i915_address_space *vm;
220 	unsigned long count;
221 	u64 total, unbound;
222 	u64 active, inactive;
223 	u64 closed;
224 };
225 
226 static int per_file_stats(int id, void *ptr, void *data)
227 {
228 	struct drm_i915_gem_object *obj = ptr;
229 	struct file_stats *stats = data;
230 	struct i915_vma *vma;
231 
232 	if (!kref_get_unless_zero(&obj->base.refcount))
233 		return 0;
234 
235 	stats->count++;
236 	stats->total += obj->base.size;
237 	if (!atomic_read(&obj->bind_count))
238 		stats->unbound += obj->base.size;
239 
240 	spin_lock(&obj->vma.lock);
241 	if (!stats->vm) {
242 		for_each_ggtt_vma(vma, obj) {
243 			if (!drm_mm_node_allocated(&vma->node))
244 				continue;
245 
246 			if (i915_vma_is_active(vma))
247 				stats->active += vma->node.size;
248 			else
249 				stats->inactive += vma->node.size;
250 
251 			if (i915_vma_is_closed(vma))
252 				stats->closed += vma->node.size;
253 		}
254 	} else {
255 		struct rb_node *p = obj->vma.tree.rb_node;
256 
257 		while (p) {
258 			long cmp;
259 
260 			vma = rb_entry(p, typeof(*vma), obj_node);
261 			cmp = i915_vma_compare(vma, stats->vm, NULL);
262 			if (cmp == 0) {
263 				if (drm_mm_node_allocated(&vma->node)) {
264 					if (i915_vma_is_active(vma))
265 						stats->active += vma->node.size;
266 					else
267 						stats->inactive += vma->node.size;
268 
269 					if (i915_vma_is_closed(vma))
270 						stats->closed += vma->node.size;
271 				}
272 				break;
273 			}
274 			if (cmp < 0)
275 				p = p->rb_right;
276 			else
277 				p = p->rb_left;
278 		}
279 	}
280 	spin_unlock(&obj->vma.lock);
281 
282 	i915_gem_object_put(obj);
283 	return 0;
284 }
285 
286 #define print_file_stats(m, name, stats) do { \
287 	if (stats.count) \
288 		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
289 			   name, \
290 			   stats.count, \
291 			   stats.total, \
292 			   stats.active, \
293 			   stats.inactive, \
294 			   stats.unbound, \
295 			   stats.closed); \
296 } while (0)
297 
298 static void print_context_stats(struct seq_file *m,
299 				struct drm_i915_private *i915)
300 {
301 	struct file_stats kstats = {};
302 	struct i915_gem_context *ctx, *cn;
303 
304 	spin_lock(&i915->gem.contexts.lock);
305 	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
306 		struct i915_gem_engines_iter it;
307 		struct intel_context *ce;
308 
309 		if (!kref_get_unless_zero(&ctx->ref))
310 			continue;
311 
312 		spin_unlock(&i915->gem.contexts.lock);
313 
314 		for_each_gem_engine(ce,
315 				    i915_gem_context_lock_engines(ctx), it) {
316 			if (intel_context_pin_if_active(ce)) {
317 				rcu_read_lock();
318 				if (ce->state)
319 					per_file_stats(0,
320 						       ce->state->obj, &kstats);
321 				per_file_stats(0, ce->ring->vma->obj, &kstats);
322 				rcu_read_unlock();
323 				intel_context_unpin(ce);
324 			}
325 		}
326 		i915_gem_context_unlock_engines(ctx);
327 
328 		if (!IS_ERR_OR_NULL(ctx->file_priv)) {
329 			struct file_stats stats = {
330 				.vm = rcu_access_pointer(ctx->vm),
331 			};
332 			struct drm_file *file = ctx->file_priv->file;
333 			struct task_struct *task;
334 			char name[80];
335 
336 			rcu_read_lock();
337 			idr_for_each(&file->object_idr, per_file_stats, &stats);
338 			rcu_read_unlock();
339 
340 			rcu_read_lock();
341 			task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
342 			snprintf(name, sizeof(name), "%s",
343 				 task ? task->comm : "<unknown>");
344 			rcu_read_unlock();
345 
346 			print_file_stats(m, name, stats);
347 		}
348 
349 		spin_lock(&i915->gem.contexts.lock);
350 		list_safe_reset_next(ctx, cn, link);
351 		i915_gem_context_put(ctx);
352 	}
353 	spin_unlock(&i915->gem.contexts.lock);
354 
355 	print_file_stats(m, "[k]contexts", kstats);
356 }
357 
358 static int i915_gem_object_info(struct seq_file *m, void *data)
359 {
360 	struct drm_i915_private *i915 = node_to_i915(m->private);
361 	struct intel_memory_region *mr;
362 	enum intel_region_id id;
363 
364 	seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
365 		   i915->mm.shrink_count,
366 		   atomic_read(&i915->mm.free_count),
367 		   i915->mm.shrink_memory);
368 	for_each_memory_region(mr, i915, id)
369 		seq_printf(m, "%s: total:%pa, available:%pa bytes\n",
370 			   mr->name, &mr->total, &mr->avail);
371 	seq_putc(m, '\n');
372 
373 	print_context_stats(m, i915);
374 
375 	return 0;
376 }
377 
378 static void gen8_display_interrupt_info(struct seq_file *m)
379 {
380 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
381 	enum pipe pipe;
382 
383 	for_each_pipe(dev_priv, pipe) {
384 		enum intel_display_power_domain power_domain;
385 		intel_wakeref_t wakeref;
386 
387 		power_domain = POWER_DOMAIN_PIPE(pipe);
388 		wakeref = intel_display_power_get_if_enabled(dev_priv,
389 							     power_domain);
390 		if (!wakeref) {
391 			seq_printf(m, "Pipe %c power disabled\n",
392 				   pipe_name(pipe));
393 			continue;
394 		}
395 		seq_printf(m, "Pipe %c IMR:\t%08x\n",
396 			   pipe_name(pipe),
397 			   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
398 		seq_printf(m, "Pipe %c IIR:\t%08x\n",
399 			   pipe_name(pipe),
400 			   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
401 		seq_printf(m, "Pipe %c IER:\t%08x\n",
402 			   pipe_name(pipe),
403 			   I915_READ(GEN8_DE_PIPE_IER(pipe)));
404 
405 		intel_display_power_put(dev_priv, power_domain, wakeref);
406 	}
407 
408 	seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
409 		   I915_READ(GEN8_DE_PORT_IMR));
410 	seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
411 		   I915_READ(GEN8_DE_PORT_IIR));
412 	seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
413 		   I915_READ(GEN8_DE_PORT_IER));
414 
415 	seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
416 		   I915_READ(GEN8_DE_MISC_IMR));
417 	seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
418 		   I915_READ(GEN8_DE_MISC_IIR));
419 	seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
420 		   I915_READ(GEN8_DE_MISC_IER));
421 
422 	seq_printf(m, "PCU interrupt mask:\t%08x\n",
423 		   I915_READ(GEN8_PCU_IMR));
424 	seq_printf(m, "PCU interrupt identity:\t%08x\n",
425 		   I915_READ(GEN8_PCU_IIR));
426 	seq_printf(m, "PCU interrupt enable:\t%08x\n",
427 		   I915_READ(GEN8_PCU_IER));
428 }
429 
430 static int i915_interrupt_info(struct seq_file *m, void *data)
431 {
432 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
433 	struct intel_engine_cs *engine;
434 	intel_wakeref_t wakeref;
435 	int i, pipe;
436 
437 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
438 
439 	if (IS_CHERRYVIEW(dev_priv)) {
440 		intel_wakeref_t pref;
441 
442 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
443 			   I915_READ(GEN8_MASTER_IRQ));
444 
445 		seq_printf(m, "Display IER:\t%08x\n",
446 			   I915_READ(VLV_IER));
447 		seq_printf(m, "Display IIR:\t%08x\n",
448 			   I915_READ(VLV_IIR));
449 		seq_printf(m, "Display IIR_RW:\t%08x\n",
450 			   I915_READ(VLV_IIR_RW));
451 		seq_printf(m, "Display IMR:\t%08x\n",
452 			   I915_READ(VLV_IMR));
453 		for_each_pipe(dev_priv, pipe) {
454 			enum intel_display_power_domain power_domain;
455 
456 			power_domain = POWER_DOMAIN_PIPE(pipe);
457 			pref = intel_display_power_get_if_enabled(dev_priv,
458 								  power_domain);
459 			if (!pref) {
460 				seq_printf(m, "Pipe %c power disabled\n",
461 					   pipe_name(pipe));
462 				continue;
463 			}
464 
465 			seq_printf(m, "Pipe %c stat:\t%08x\n",
466 				   pipe_name(pipe),
467 				   I915_READ(PIPESTAT(pipe)));
468 
469 			intel_display_power_put(dev_priv, power_domain, pref);
470 		}
471 
472 		pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
473 		seq_printf(m, "Port hotplug:\t%08x\n",
474 			   I915_READ(PORT_HOTPLUG_EN));
475 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
476 			   I915_READ(VLV_DPFLIPSTAT));
477 		seq_printf(m, "DPINVGTT:\t%08x\n",
478 			   I915_READ(DPINVGTT));
479 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
480 
481 		for (i = 0; i < 4; i++) {
482 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
483 				   i, I915_READ(GEN8_GT_IMR(i)));
484 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
485 				   i, I915_READ(GEN8_GT_IIR(i)));
486 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
487 				   i, I915_READ(GEN8_GT_IER(i)));
488 		}
489 
490 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
491 			   I915_READ(GEN8_PCU_IMR));
492 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
493 			   I915_READ(GEN8_PCU_IIR));
494 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
495 			   I915_READ(GEN8_PCU_IER));
496 	} else if (INTEL_GEN(dev_priv) >= 11) {
497 		seq_printf(m, "Master Interrupt Control:  %08x\n",
498 			   I915_READ(GEN11_GFX_MSTR_IRQ));
499 
500 		seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
501 			   I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
502 		seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
503 			   I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
504 		seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
505 			   I915_READ(GEN11_GUC_SG_INTR_ENABLE));
506 		seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
507 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
508 		seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
509 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
510 		seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
511 			   I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
512 
513 		seq_printf(m, "Display Interrupt Control:\t%08x\n",
514 			   I915_READ(GEN11_DISPLAY_INT_CTL));
515 
516 		gen8_display_interrupt_info(m);
517 	} else if (INTEL_GEN(dev_priv) >= 8) {
518 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
519 			   I915_READ(GEN8_MASTER_IRQ));
520 
521 		for (i = 0; i < 4; i++) {
522 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
523 				   i, I915_READ(GEN8_GT_IMR(i)));
524 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
525 				   i, I915_READ(GEN8_GT_IIR(i)));
526 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
527 				   i, I915_READ(GEN8_GT_IER(i)));
528 		}
529 
530 		gen8_display_interrupt_info(m);
531 	} else if (IS_VALLEYVIEW(dev_priv)) {
532 		intel_wakeref_t pref;
533 
534 		seq_printf(m, "Display IER:\t%08x\n",
535 			   I915_READ(VLV_IER));
536 		seq_printf(m, "Display IIR:\t%08x\n",
537 			   I915_READ(VLV_IIR));
538 		seq_printf(m, "Display IIR_RW:\t%08x\n",
539 			   I915_READ(VLV_IIR_RW));
540 		seq_printf(m, "Display IMR:\t%08x\n",
541 			   I915_READ(VLV_IMR));
542 		for_each_pipe(dev_priv, pipe) {
543 			enum intel_display_power_domain power_domain;
544 
545 			power_domain = POWER_DOMAIN_PIPE(pipe);
546 			pref = intel_display_power_get_if_enabled(dev_priv,
547 								  power_domain);
548 			if (!pref) {
549 				seq_printf(m, "Pipe %c power disabled\n",
550 					   pipe_name(pipe));
551 				continue;
552 			}
553 
554 			seq_printf(m, "Pipe %c stat:\t%08x\n",
555 				   pipe_name(pipe),
556 				   I915_READ(PIPESTAT(pipe)));
557 			intel_display_power_put(dev_priv, power_domain, pref);
558 		}
559 
560 		seq_printf(m, "Master IER:\t%08x\n",
561 			   I915_READ(VLV_MASTER_IER));
562 
563 		seq_printf(m, "Render IER:\t%08x\n",
564 			   I915_READ(GTIER));
565 		seq_printf(m, "Render IIR:\t%08x\n",
566 			   I915_READ(GTIIR));
567 		seq_printf(m, "Render IMR:\t%08x\n",
568 			   I915_READ(GTIMR));
569 
570 		seq_printf(m, "PM IER:\t\t%08x\n",
571 			   I915_READ(GEN6_PMIER));
572 		seq_printf(m, "PM IIR:\t\t%08x\n",
573 			   I915_READ(GEN6_PMIIR));
574 		seq_printf(m, "PM IMR:\t\t%08x\n",
575 			   I915_READ(GEN6_PMIMR));
576 
577 		pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
578 		seq_printf(m, "Port hotplug:\t%08x\n",
579 			   I915_READ(PORT_HOTPLUG_EN));
580 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
581 			   I915_READ(VLV_DPFLIPSTAT));
582 		seq_printf(m, "DPINVGTT:\t%08x\n",
583 			   I915_READ(DPINVGTT));
584 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
585 
586 	} else if (!HAS_PCH_SPLIT(dev_priv)) {
587 		seq_printf(m, "Interrupt enable:    %08x\n",
588 			   I915_READ(GEN2_IER));
589 		seq_printf(m, "Interrupt identity:  %08x\n",
590 			   I915_READ(GEN2_IIR));
591 		seq_printf(m, "Interrupt mask:      %08x\n",
592 			   I915_READ(GEN2_IMR));
593 		for_each_pipe(dev_priv, pipe)
594 			seq_printf(m, "Pipe %c stat:         %08x\n",
595 				   pipe_name(pipe),
596 				   I915_READ(PIPESTAT(pipe)));
597 	} else {
598 		seq_printf(m, "North Display Interrupt enable:		%08x\n",
599 			   I915_READ(DEIER));
600 		seq_printf(m, "North Display Interrupt identity:	%08x\n",
601 			   I915_READ(DEIIR));
602 		seq_printf(m, "North Display Interrupt mask:		%08x\n",
603 			   I915_READ(DEIMR));
604 		seq_printf(m, "South Display Interrupt enable:		%08x\n",
605 			   I915_READ(SDEIER));
606 		seq_printf(m, "South Display Interrupt identity:	%08x\n",
607 			   I915_READ(SDEIIR));
608 		seq_printf(m, "South Display Interrupt mask:		%08x\n",
609 			   I915_READ(SDEIMR));
610 		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
611 			   I915_READ(GTIER));
612 		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
613 			   I915_READ(GTIIR));
614 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
615 			   I915_READ(GTIMR));
616 	}
617 
618 	if (INTEL_GEN(dev_priv) >= 11) {
619 		seq_printf(m, "RCS Intr Mask:\t %08x\n",
620 			   I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
621 		seq_printf(m, "BCS Intr Mask:\t %08x\n",
622 			   I915_READ(GEN11_BCS_RSVD_INTR_MASK));
623 		seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
624 			   I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
625 		seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
626 			   I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
627 		seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
628 			   I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
629 		seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
630 			   I915_READ(GEN11_GUC_SG_INTR_MASK));
631 		seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
632 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
633 		seq_printf(m, "Crypto Intr Mask:\t %08x\n",
634 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
635 		seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
636 			   I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
637 
638 	} else if (INTEL_GEN(dev_priv) >= 6) {
639 		for_each_uabi_engine(engine, dev_priv) {
640 			seq_printf(m,
641 				   "Graphics Interrupt mask (%s):	%08x\n",
642 				   engine->name, ENGINE_READ(engine, RING_IMR));
643 		}
644 	}
645 
646 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
647 
648 	return 0;
649 }
650 
651 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
652 {
653 	struct drm_i915_private *i915 = node_to_i915(m->private);
654 	unsigned int i;
655 
656 	seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
657 
658 	rcu_read_lock();
659 	for (i = 0; i < i915->ggtt.num_fences; i++) {
660 		struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
661 		struct i915_vma *vma = reg->vma;
662 
663 		seq_printf(m, "Fence %d, pin count = %d, object = ",
664 			   i, atomic_read(&reg->pin_count));
665 		if (!vma)
666 			seq_puts(m, "unused");
667 		else
668 			i915_debugfs_describe_obj(m, vma->obj);
669 		seq_putc(m, '\n');
670 	}
671 	rcu_read_unlock();
672 
673 	return 0;
674 }
675 
676 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
677 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
678 			      size_t count, loff_t *pos)
679 {
680 	struct i915_gpu_coredump *error;
681 	ssize_t ret;
682 	void *buf;
683 
684 	error = file->private_data;
685 	if (!error)
686 		return 0;
687 
688 	/* Bounce buffer required because of kernfs __user API convenience. */
689 	buf = kmalloc(count, GFP_KERNEL);
690 	if (!buf)
691 		return -ENOMEM;
692 
693 	ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
694 	if (ret <= 0)
695 		goto out;
696 
697 	if (!copy_to_user(ubuf, buf, ret))
698 		*pos += ret;
699 	else
700 		ret = -EFAULT;
701 
702 out:
703 	kfree(buf);
704 	return ret;
705 }
706 
707 static int gpu_state_release(struct inode *inode, struct file *file)
708 {
709 	i915_gpu_coredump_put(file->private_data);
710 	return 0;
711 }
712 
713 static int i915_gpu_info_open(struct inode *inode, struct file *file)
714 {
715 	struct drm_i915_private *i915 = inode->i_private;
716 	struct i915_gpu_coredump *gpu;
717 	intel_wakeref_t wakeref;
718 
719 	gpu = NULL;
720 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
721 		gpu = i915_gpu_coredump(i915);
722 	if (IS_ERR(gpu))
723 		return PTR_ERR(gpu);
724 
725 	file->private_data = gpu;
726 	return 0;
727 }
728 
729 static const struct file_operations i915_gpu_info_fops = {
730 	.owner = THIS_MODULE,
731 	.open = i915_gpu_info_open,
732 	.read = gpu_state_read,
733 	.llseek = default_llseek,
734 	.release = gpu_state_release,
735 };
736 
737 static ssize_t
738 i915_error_state_write(struct file *filp,
739 		       const char __user *ubuf,
740 		       size_t cnt,
741 		       loff_t *ppos)
742 {
743 	struct i915_gpu_coredump *error = filp->private_data;
744 
745 	if (!error)
746 		return 0;
747 
748 	DRM_DEBUG_DRIVER("Resetting error state\n");
749 	i915_reset_error_state(error->i915);
750 
751 	return cnt;
752 }
753 
754 static int i915_error_state_open(struct inode *inode, struct file *file)
755 {
756 	struct i915_gpu_coredump *error;
757 
758 	error = i915_first_error_state(inode->i_private);
759 	if (IS_ERR(error))
760 		return PTR_ERR(error);
761 
762 	file->private_data  = error;
763 	return 0;
764 }
765 
766 static const struct file_operations i915_error_state_fops = {
767 	.owner = THIS_MODULE,
768 	.open = i915_error_state_open,
769 	.read = gpu_state_read,
770 	.write = i915_error_state_write,
771 	.llseek = default_llseek,
772 	.release = gpu_state_release,
773 };
774 #endif
775 
776 static int i915_frequency_info(struct seq_file *m, void *unused)
777 {
778 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
779 	struct intel_uncore *uncore = &dev_priv->uncore;
780 	struct intel_rps *rps = &dev_priv->gt.rps;
781 	intel_wakeref_t wakeref;
782 	int ret = 0;
783 
784 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
785 
786 	if (IS_GEN(dev_priv, 5)) {
787 		u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
788 		u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
789 
790 		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
791 		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
792 		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
793 			   MEMSTAT_VID_SHIFT);
794 		seq_printf(m, "Current P-state: %d\n",
795 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
796 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
797 		u32 rpmodectl, freq_sts;
798 
799 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
800 		seq_printf(m, "Video Turbo Mode: %s\n",
801 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
802 		seq_printf(m, "HW control enabled: %s\n",
803 			   yesno(rpmodectl & GEN6_RP_ENABLE));
804 		seq_printf(m, "SW control enabled: %s\n",
805 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
806 				  GEN6_RP_MEDIA_SW_MODE));
807 
808 		vlv_punit_get(dev_priv);
809 		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
810 		vlv_punit_put(dev_priv);
811 
812 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
813 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
814 
815 		seq_printf(m, "actual GPU freq: %d MHz\n",
816 			   intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
817 
818 		seq_printf(m, "current GPU freq: %d MHz\n",
819 			   intel_gpu_freq(rps, rps->cur_freq));
820 
821 		seq_printf(m, "max GPU freq: %d MHz\n",
822 			   intel_gpu_freq(rps, rps->max_freq));
823 
824 		seq_printf(m, "min GPU freq: %d MHz\n",
825 			   intel_gpu_freq(rps, rps->min_freq));
826 
827 		seq_printf(m, "idle GPU freq: %d MHz\n",
828 			   intel_gpu_freq(rps, rps->idle_freq));
829 
830 		seq_printf(m,
831 			   "efficient (RPe) frequency: %d MHz\n",
832 			   intel_gpu_freq(rps, rps->efficient_freq));
833 	} else if (INTEL_GEN(dev_priv) >= 6) {
834 		u32 rp_state_limits;
835 		u32 gt_perf_status;
836 		u32 rp_state_cap;
837 		u32 rpmodectl, rpinclimit, rpdeclimit;
838 		u32 rpstat, cagf, reqf;
839 		u32 rpupei, rpcurup, rpprevup;
840 		u32 rpdownei, rpcurdown, rpprevdown;
841 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
842 		int max_freq;
843 
844 		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
845 		if (IS_GEN9_LP(dev_priv)) {
846 			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
847 			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
848 		} else {
849 			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
850 			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
851 		}
852 
853 		/* RPSTAT1 is in the GT power well */
854 		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
855 
856 		reqf = I915_READ(GEN6_RPNSWREQ);
857 		if (INTEL_GEN(dev_priv) >= 9)
858 			reqf >>= 23;
859 		else {
860 			reqf &= ~GEN6_TURBO_DISABLE;
861 			if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
862 				reqf >>= 24;
863 			else
864 				reqf >>= 25;
865 		}
866 		reqf = intel_gpu_freq(rps, reqf);
867 
868 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
869 		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
870 		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
871 
872 		rpstat = I915_READ(GEN6_RPSTAT1);
873 		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
874 		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
875 		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
876 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
877 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
878 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
879 		cagf = intel_rps_read_actual_frequency(rps);
880 
881 		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
882 
883 		if (INTEL_GEN(dev_priv) >= 11) {
884 			pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
885 			pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
886 			/*
887 			 * The equivalent to the PM ISR & IIR cannot be read
888 			 * without affecting the current state of the system
889 			 */
890 			pm_isr = 0;
891 			pm_iir = 0;
892 		} else if (INTEL_GEN(dev_priv) >= 8) {
893 			pm_ier = I915_READ(GEN8_GT_IER(2));
894 			pm_imr = I915_READ(GEN8_GT_IMR(2));
895 			pm_isr = I915_READ(GEN8_GT_ISR(2));
896 			pm_iir = I915_READ(GEN8_GT_IIR(2));
897 		} else {
898 			pm_ier = I915_READ(GEN6_PMIER);
899 			pm_imr = I915_READ(GEN6_PMIMR);
900 			pm_isr = I915_READ(GEN6_PMISR);
901 			pm_iir = I915_READ(GEN6_PMIIR);
902 		}
903 		pm_mask = I915_READ(GEN6_PMINTRMSK);
904 
905 		seq_printf(m, "Video Turbo Mode: %s\n",
906 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
907 		seq_printf(m, "HW control enabled: %s\n",
908 			   yesno(rpmodectl & GEN6_RP_ENABLE));
909 		seq_printf(m, "SW control enabled: %s\n",
910 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
911 				  GEN6_RP_MEDIA_SW_MODE));
912 
913 		seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
914 			   pm_ier, pm_imr, pm_mask);
915 		if (INTEL_GEN(dev_priv) <= 10)
916 			seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
917 				   pm_isr, pm_iir);
918 		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
919 			   rps->pm_intrmsk_mbz);
920 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
921 		seq_printf(m, "Render p-state ratio: %d\n",
922 			   (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
923 		seq_printf(m, "Render p-state VID: %d\n",
924 			   gt_perf_status & 0xff);
925 		seq_printf(m, "Render p-state limit: %d\n",
926 			   rp_state_limits & 0xff);
927 		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
928 		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
929 		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
930 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
931 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
932 		seq_printf(m, "CAGF: %dMHz\n", cagf);
933 		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
934 			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
935 		seq_printf(m, "RP CUR UP: %d (%dus)\n",
936 			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
937 		seq_printf(m, "RP PREV UP: %d (%dus)\n",
938 			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
939 		seq_printf(m, "Up threshold: %d%%\n",
940 			   rps->power.up_threshold);
941 
942 		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
943 			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
944 		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
945 			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
946 		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
947 			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
948 		seq_printf(m, "Down threshold: %d%%\n",
949 			   rps->power.down_threshold);
950 
951 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
952 			    rp_state_cap >> 16) & 0xff;
953 		max_freq *= (IS_GEN9_BC(dev_priv) ||
954 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
955 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
956 			   intel_gpu_freq(rps, max_freq));
957 
958 		max_freq = (rp_state_cap & 0xff00) >> 8;
959 		max_freq *= (IS_GEN9_BC(dev_priv) ||
960 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
961 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
962 			   intel_gpu_freq(rps, max_freq));
963 
964 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
965 			    rp_state_cap >> 0) & 0xff;
966 		max_freq *= (IS_GEN9_BC(dev_priv) ||
967 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
968 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
969 			   intel_gpu_freq(rps, max_freq));
970 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
971 			   intel_gpu_freq(rps, rps->max_freq));
972 
973 		seq_printf(m, "Current freq: %d MHz\n",
974 			   intel_gpu_freq(rps, rps->cur_freq));
975 		seq_printf(m, "Actual freq: %d MHz\n", cagf);
976 		seq_printf(m, "Idle freq: %d MHz\n",
977 			   intel_gpu_freq(rps, rps->idle_freq));
978 		seq_printf(m, "Min freq: %d MHz\n",
979 			   intel_gpu_freq(rps, rps->min_freq));
980 		seq_printf(m, "Boost freq: %d MHz\n",
981 			   intel_gpu_freq(rps, rps->boost_freq));
982 		seq_printf(m, "Max freq: %d MHz\n",
983 			   intel_gpu_freq(rps, rps->max_freq));
984 		seq_printf(m,
985 			   "efficient (RPe) frequency: %d MHz\n",
986 			   intel_gpu_freq(rps, rps->efficient_freq));
987 	} else {
988 		seq_puts(m, "no P-state info available\n");
989 	}
990 
991 	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
992 	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
993 	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
994 
995 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
996 	return ret;
997 }
998 
999 static int ilk_drpc_info(struct seq_file *m)
1000 {
1001 	struct drm_i915_private *i915 = node_to_i915(m->private);
1002 	struct intel_uncore *uncore = &i915->uncore;
1003 	u32 rgvmodectl, rstdbyctl;
1004 	u16 crstandvid;
1005 
1006 	rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
1007 	rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
1008 	crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
1009 
1010 	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1011 	seq_printf(m, "Boost freq: %d\n",
1012 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1013 		   MEMMODE_BOOST_FREQ_SHIFT);
1014 	seq_printf(m, "HW control enabled: %s\n",
1015 		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1016 	seq_printf(m, "SW control enabled: %s\n",
1017 		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1018 	seq_printf(m, "Gated voltage change: %s\n",
1019 		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1020 	seq_printf(m, "Starting frequency: P%d\n",
1021 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1022 	seq_printf(m, "Max P-state: P%d\n",
1023 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1024 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1025 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1026 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1027 	seq_printf(m, "Render standby enabled: %s\n",
1028 		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1029 	seq_puts(m, "Current RS state: ");
1030 	switch (rstdbyctl & RSX_STATUS_MASK) {
1031 	case RSX_STATUS_ON:
1032 		seq_puts(m, "on\n");
1033 		break;
1034 	case RSX_STATUS_RC1:
1035 		seq_puts(m, "RC1\n");
1036 		break;
1037 	case RSX_STATUS_RC1E:
1038 		seq_puts(m, "RC1E\n");
1039 		break;
1040 	case RSX_STATUS_RS1:
1041 		seq_puts(m, "RS1\n");
1042 		break;
1043 	case RSX_STATUS_RS2:
1044 		seq_puts(m, "RS2 (RC6)\n");
1045 		break;
1046 	case RSX_STATUS_RS3:
1047 		seq_puts(m, "RC3 (RC6+)\n");
1048 		break;
1049 	default:
1050 		seq_puts(m, "unknown\n");
1051 		break;
1052 	}
1053 
1054 	return 0;
1055 }
1056 
1057 static int i915_forcewake_domains(struct seq_file *m, void *data)
1058 {
1059 	struct drm_i915_private *i915 = node_to_i915(m->private);
1060 	struct intel_uncore *uncore = &i915->uncore;
1061 	struct intel_uncore_forcewake_domain *fw_domain;
1062 	unsigned int tmp;
1063 
1064 	seq_printf(m, "user.bypass_count = %u\n",
1065 		   uncore->user_forcewake_count);
1066 
1067 	for_each_fw_domain(fw_domain, uncore, tmp)
1068 		seq_printf(m, "%s.wake_count = %u\n",
1069 			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
1070 			   READ_ONCE(fw_domain->wake_count));
1071 
1072 	return 0;
1073 }
1074 
1075 static void print_rc6_res(struct seq_file *m,
1076 			  const char *title,
1077 			  const i915_reg_t reg)
1078 {
1079 	struct drm_i915_private *i915 = node_to_i915(m->private);
1080 	intel_wakeref_t wakeref;
1081 
1082 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
1083 		seq_printf(m, "%s %u (%llu us)\n", title,
1084 			   intel_uncore_read(&i915->uncore, reg),
1085 			   intel_rc6_residency_us(&i915->gt.rc6, reg));
1086 }
1087 
1088 static int vlv_drpc_info(struct seq_file *m)
1089 {
1090 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1091 	u32 rcctl1, pw_status;
1092 
1093 	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1094 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1095 
1096 	seq_printf(m, "RC6 Enabled: %s\n",
1097 		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1098 					GEN6_RC_CTL_EI_MODE(1))));
1099 	seq_printf(m, "Render Power Well: %s\n",
1100 		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1101 	seq_printf(m, "Media Power Well: %s\n",
1102 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1103 
1104 	print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1105 	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1106 
1107 	return i915_forcewake_domains(m, NULL);
1108 }
1109 
1110 static int gen6_drpc_info(struct seq_file *m)
1111 {
1112 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1113 	u32 gt_core_status, rcctl1, rc6vids = 0;
1114 	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1115 
1116 	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1117 	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1118 
1119 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1120 	if (INTEL_GEN(dev_priv) >= 9) {
1121 		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1122 		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1123 	}
1124 
1125 	if (INTEL_GEN(dev_priv) <= 7)
1126 		sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1127 				       &rc6vids, NULL);
1128 
1129 	seq_printf(m, "RC1e Enabled: %s\n",
1130 		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1131 	seq_printf(m, "RC6 Enabled: %s\n",
1132 		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1133 	if (INTEL_GEN(dev_priv) >= 9) {
1134 		seq_printf(m, "Render Well Gating Enabled: %s\n",
1135 			yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1136 		seq_printf(m, "Media Well Gating Enabled: %s\n",
1137 			yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1138 	}
1139 	seq_printf(m, "Deep RC6 Enabled: %s\n",
1140 		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1141 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1142 		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1143 	seq_puts(m, "Current RC state: ");
1144 	switch (gt_core_status & GEN6_RCn_MASK) {
1145 	case GEN6_RC0:
1146 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1147 			seq_puts(m, "Core Power Down\n");
1148 		else
1149 			seq_puts(m, "on\n");
1150 		break;
1151 	case GEN6_RC3:
1152 		seq_puts(m, "RC3\n");
1153 		break;
1154 	case GEN6_RC6:
1155 		seq_puts(m, "RC6\n");
1156 		break;
1157 	case GEN6_RC7:
1158 		seq_puts(m, "RC7\n");
1159 		break;
1160 	default:
1161 		seq_puts(m, "Unknown\n");
1162 		break;
1163 	}
1164 
1165 	seq_printf(m, "Core Power Down: %s\n",
1166 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1167 	if (INTEL_GEN(dev_priv) >= 9) {
1168 		seq_printf(m, "Render Power Well: %s\n",
1169 			(gen9_powergate_status &
1170 			 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1171 		seq_printf(m, "Media Power Well: %s\n",
1172 			(gen9_powergate_status &
1173 			 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1174 	}
1175 
1176 	/* Not exactly sure what this is */
1177 	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1178 		      GEN6_GT_GFX_RC6_LOCKED);
1179 	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1180 	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1181 	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1182 
1183 	if (INTEL_GEN(dev_priv) <= 7) {
1184 		seq_printf(m, "RC6   voltage: %dmV\n",
1185 			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1186 		seq_printf(m, "RC6+  voltage: %dmV\n",
1187 			   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1188 		seq_printf(m, "RC6++ voltage: %dmV\n",
1189 			   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1190 	}
1191 
1192 	return i915_forcewake_domains(m, NULL);
1193 }
1194 
1195 static int i915_drpc_info(struct seq_file *m, void *unused)
1196 {
1197 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1198 	intel_wakeref_t wakeref;
1199 	int err = -ENODEV;
1200 
1201 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1202 		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1203 			err = vlv_drpc_info(m);
1204 		else if (INTEL_GEN(dev_priv) >= 6)
1205 			err = gen6_drpc_info(m);
1206 		else
1207 			err = ilk_drpc_info(m);
1208 	}
1209 
1210 	return err;
1211 }
1212 
1213 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1214 {
1215 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1216 	struct intel_rps *rps = &dev_priv->gt.rps;
1217 	unsigned int max_gpu_freq, min_gpu_freq;
1218 	intel_wakeref_t wakeref;
1219 	int gpu_freq, ia_freq;
1220 
1221 	if (!HAS_LLC(dev_priv))
1222 		return -ENODEV;
1223 
1224 	min_gpu_freq = rps->min_freq;
1225 	max_gpu_freq = rps->max_freq;
1226 	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1227 		/* Convert GT frequency to 50 HZ units */
1228 		min_gpu_freq /= GEN9_FREQ_SCALER;
1229 		max_gpu_freq /= GEN9_FREQ_SCALER;
1230 	}
1231 
1232 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1233 
1234 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1235 	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1236 		ia_freq = gpu_freq;
1237 		sandybridge_pcode_read(dev_priv,
1238 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1239 				       &ia_freq, NULL);
1240 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1241 			   intel_gpu_freq(rps,
1242 					  (gpu_freq *
1243 					   (IS_GEN9_BC(dev_priv) ||
1244 					    INTEL_GEN(dev_priv) >= 10 ?
1245 					    GEN9_FREQ_SCALER : 1))),
1246 			   ((ia_freq >> 0) & 0xff) * 100,
1247 			   ((ia_freq >> 8) & 0xff) * 100);
1248 	}
1249 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1250 
1251 	return 0;
1252 }
1253 
1254 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1255 {
1256 	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1257 		   ring->space, ring->head, ring->tail, ring->emit);
1258 }
1259 
1260 static int i915_context_status(struct seq_file *m, void *unused)
1261 {
1262 	struct drm_i915_private *i915 = node_to_i915(m->private);
1263 	struct i915_gem_context *ctx, *cn;
1264 
1265 	spin_lock(&i915->gem.contexts.lock);
1266 	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
1267 		struct i915_gem_engines_iter it;
1268 		struct intel_context *ce;
1269 
1270 		if (!kref_get_unless_zero(&ctx->ref))
1271 			continue;
1272 
1273 		spin_unlock(&i915->gem.contexts.lock);
1274 
1275 		seq_puts(m, "HW context ");
1276 		if (ctx->pid) {
1277 			struct task_struct *task;
1278 
1279 			task = get_pid_task(ctx->pid, PIDTYPE_PID);
1280 			if (task) {
1281 				seq_printf(m, "(%s [%d]) ",
1282 					   task->comm, task->pid);
1283 				put_task_struct(task);
1284 			}
1285 		} else if (IS_ERR(ctx->file_priv)) {
1286 			seq_puts(m, "(deleted) ");
1287 		} else {
1288 			seq_puts(m, "(kernel) ");
1289 		}
1290 
1291 		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1292 		seq_putc(m, '\n');
1293 
1294 		for_each_gem_engine(ce,
1295 				    i915_gem_context_lock_engines(ctx), it) {
1296 			if (intel_context_pin_if_active(ce)) {
1297 				seq_printf(m, "%s: ", ce->engine->name);
1298 				if (ce->state)
1299 					i915_debugfs_describe_obj(m, ce->state->obj);
1300 				describe_ctx_ring(m, ce->ring);
1301 				seq_putc(m, '\n');
1302 				intel_context_unpin(ce);
1303 			}
1304 		}
1305 		i915_gem_context_unlock_engines(ctx);
1306 
1307 		seq_putc(m, '\n');
1308 
1309 		spin_lock(&i915->gem.contexts.lock);
1310 		list_safe_reset_next(ctx, cn, link);
1311 		i915_gem_context_put(ctx);
1312 	}
1313 	spin_unlock(&i915->gem.contexts.lock);
1314 
1315 	return 0;
1316 }
1317 
1318 static const char *swizzle_string(unsigned swizzle)
1319 {
1320 	switch (swizzle) {
1321 	case I915_BIT_6_SWIZZLE_NONE:
1322 		return "none";
1323 	case I915_BIT_6_SWIZZLE_9:
1324 		return "bit9";
1325 	case I915_BIT_6_SWIZZLE_9_10:
1326 		return "bit9/bit10";
1327 	case I915_BIT_6_SWIZZLE_9_11:
1328 		return "bit9/bit11";
1329 	case I915_BIT_6_SWIZZLE_9_10_11:
1330 		return "bit9/bit10/bit11";
1331 	case I915_BIT_6_SWIZZLE_9_17:
1332 		return "bit9/bit17";
1333 	case I915_BIT_6_SWIZZLE_9_10_17:
1334 		return "bit9/bit10/bit17";
1335 	case I915_BIT_6_SWIZZLE_UNKNOWN:
1336 		return "unknown";
1337 	}
1338 
1339 	return "bug";
1340 }
1341 
1342 static int i915_swizzle_info(struct seq_file *m, void *data)
1343 {
1344 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1345 	struct intel_uncore *uncore = &dev_priv->uncore;
1346 	intel_wakeref_t wakeref;
1347 
1348 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1349 
1350 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1351 		   swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
1352 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1353 		   swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
1354 
1355 	if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1356 		seq_printf(m, "DDC = 0x%08x\n",
1357 			   intel_uncore_read(uncore, DCC));
1358 		seq_printf(m, "DDC2 = 0x%08x\n",
1359 			   intel_uncore_read(uncore, DCC2));
1360 		seq_printf(m, "C0DRB3 = 0x%04x\n",
1361 			   intel_uncore_read16(uncore, C0DRB3));
1362 		seq_printf(m, "C1DRB3 = 0x%04x\n",
1363 			   intel_uncore_read16(uncore, C1DRB3));
1364 	} else if (INTEL_GEN(dev_priv) >= 6) {
1365 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1366 			   intel_uncore_read(uncore, MAD_DIMM_C0));
1367 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1368 			   intel_uncore_read(uncore, MAD_DIMM_C1));
1369 		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1370 			   intel_uncore_read(uncore, MAD_DIMM_C2));
1371 		seq_printf(m, "TILECTL = 0x%08x\n",
1372 			   intel_uncore_read(uncore, TILECTL));
1373 		if (INTEL_GEN(dev_priv) >= 8)
1374 			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1375 				   intel_uncore_read(uncore, GAMTARBMODE));
1376 		else
1377 			seq_printf(m, "ARB_MODE = 0x%08x\n",
1378 				   intel_uncore_read(uncore, ARB_MODE));
1379 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1380 			   intel_uncore_read(uncore, DISP_ARB_CTL));
1381 	}
1382 
1383 	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1384 		seq_puts(m, "L-shaped memory detected\n");
1385 
1386 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1387 
1388 	return 0;
1389 }
1390 
1391 static const char *rps_power_to_str(unsigned int power)
1392 {
1393 	static const char * const strings[] = {
1394 		[LOW_POWER] = "low power",
1395 		[BETWEEN] = "mixed",
1396 		[HIGH_POWER] = "high power",
1397 	};
1398 
1399 	if (power >= ARRAY_SIZE(strings) || !strings[power])
1400 		return "unknown";
1401 
1402 	return strings[power];
1403 }
1404 
1405 static int i915_rps_boost_info(struct seq_file *m, void *data)
1406 {
1407 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1408 	struct intel_rps *rps = &dev_priv->gt.rps;
1409 
1410 	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
1411 	seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
1412 	seq_printf(m, "Boosts outstanding? %d\n",
1413 		   atomic_read(&rps->num_waiters));
1414 	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
1415 	seq_printf(m, "Frequency requested %d, actual %d\n",
1416 		   intel_gpu_freq(rps, rps->cur_freq),
1417 		   intel_rps_read_actual_frequency(rps));
1418 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
1419 		   intel_gpu_freq(rps, rps->min_freq),
1420 		   intel_gpu_freq(rps, rps->min_freq_softlimit),
1421 		   intel_gpu_freq(rps, rps->max_freq_softlimit),
1422 		   intel_gpu_freq(rps, rps->max_freq));
1423 	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
1424 		   intel_gpu_freq(rps, rps->idle_freq),
1425 		   intel_gpu_freq(rps, rps->efficient_freq),
1426 		   intel_gpu_freq(rps, rps->boost_freq));
1427 
1428 	seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
1429 
1430 	if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
1431 		u32 rpup, rpupei;
1432 		u32 rpdown, rpdownei;
1433 
1434 		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1435 		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
1436 		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
1437 		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
1438 		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
1439 		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1440 
1441 		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
1442 			   rps_power_to_str(rps->power.mode));
1443 		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
1444 			   rpup && rpupei ? 100 * rpup / rpupei : 0,
1445 			   rps->power.up_threshold);
1446 		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
1447 			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
1448 			   rps->power.down_threshold);
1449 	} else {
1450 		seq_puts(m, "\nRPS Autotuning inactive\n");
1451 	}
1452 
1453 	return 0;
1454 }
1455 
1456 static int i915_llc(struct seq_file *m, void *data)
1457 {
1458 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1459 	const bool edram = INTEL_GEN(dev_priv) > 8;
1460 
1461 	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
1462 	seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
1463 		   dev_priv->edram_size_mb);
1464 
1465 	return 0;
1466 }
1467 
1468 static int i915_huc_load_status_info(struct seq_file *m, void *data)
1469 {
1470 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1471 	intel_wakeref_t wakeref;
1472 	struct drm_printer p;
1473 
1474 	if (!HAS_GT_UC(dev_priv))
1475 		return -ENODEV;
1476 
1477 	p = drm_seq_file_printer(m);
1478 	intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
1479 
1480 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
1481 		seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
1482 
1483 	return 0;
1484 }
1485 
1486 static int i915_guc_load_status_info(struct seq_file *m, void *data)
1487 {
1488 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1489 	intel_wakeref_t wakeref;
1490 	struct drm_printer p;
1491 
1492 	if (!HAS_GT_UC(dev_priv))
1493 		return -ENODEV;
1494 
1495 	p = drm_seq_file_printer(m);
1496 	intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
1497 
1498 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1499 		u32 tmp = I915_READ(GUC_STATUS);
1500 		u32 i;
1501 
1502 		seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
1503 		seq_printf(m, "\tBootrom status = 0x%x\n",
1504 			   (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
1505 		seq_printf(m, "\tuKernel status = 0x%x\n",
1506 			   (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
1507 		seq_printf(m, "\tMIA Core status = 0x%x\n",
1508 			   (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
1509 		seq_puts(m, "\nScratch registers:\n");
1510 		for (i = 0; i < 16; i++) {
1511 			seq_printf(m, "\t%2d: \t0x%x\n",
1512 				   i, I915_READ(SOFT_SCRATCH(i)));
1513 		}
1514 	}
1515 
1516 	return 0;
1517 }
1518 
1519 static const char *
1520 stringify_guc_log_type(enum guc_log_buffer_type type)
1521 {
1522 	switch (type) {
1523 	case GUC_ISR_LOG_BUFFER:
1524 		return "ISR";
1525 	case GUC_DPC_LOG_BUFFER:
1526 		return "DPC";
1527 	case GUC_CRASH_DUMP_LOG_BUFFER:
1528 		return "CRASH";
1529 	default:
1530 		MISSING_CASE(type);
1531 	}
1532 
1533 	return "";
1534 }
1535 
1536 static void i915_guc_log_info(struct seq_file *m, struct intel_guc_log *log)
1537 {
1538 	enum guc_log_buffer_type type;
1539 
1540 	if (!intel_guc_log_relay_created(log)) {
1541 		seq_puts(m, "GuC log relay not created\n");
1542 		return;
1543 	}
1544 
1545 	seq_puts(m, "GuC logging stats:\n");
1546 
1547 	seq_printf(m, "\tRelay full count: %u\n",
1548 		   log->relay.full_count);
1549 
1550 	for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
1551 		seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
1552 			   stringify_guc_log_type(type),
1553 			   log->stats[type].flush,
1554 			   log->stats[type].sampled_overflow);
1555 	}
1556 }
1557 
1558 static int i915_guc_info(struct seq_file *m, void *data)
1559 {
1560 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1561 	struct intel_uc *uc = &dev_priv->gt.uc;
1562 
1563 	if (!intel_uc_uses_guc(uc))
1564 		return -ENODEV;
1565 
1566 	i915_guc_log_info(m, &uc->guc.log);
1567 
1568 	/* Add more as required ... */
1569 
1570 	return 0;
1571 }
1572 
1573 static int i915_guc_stage_pool(struct seq_file *m, void *data)
1574 {
1575 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1576 	struct intel_uc *uc = &dev_priv->gt.uc;
1577 	struct guc_stage_desc *desc = uc->guc.stage_desc_pool_vaddr;
1578 	int index;
1579 
1580 	if (!intel_uc_uses_guc_submission(uc))
1581 		return -ENODEV;
1582 
1583 	for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
1584 		struct intel_engine_cs *engine;
1585 
1586 		if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
1587 			continue;
1588 
1589 		seq_printf(m, "GuC stage descriptor %u:\n", index);
1590 		seq_printf(m, "\tIndex: %u\n", desc->stage_id);
1591 		seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
1592 		seq_printf(m, "\tPriority: %d\n", desc->priority);
1593 		seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
1594 		seq_printf(m, "\tEngines used: 0x%x\n",
1595 			   desc->engines_used);
1596 		seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
1597 			   desc->db_trigger_phy,
1598 			   desc->db_trigger_cpu,
1599 			   desc->db_trigger_uk);
1600 		seq_printf(m, "\tProcess descriptor: 0x%x\n",
1601 			   desc->process_desc);
1602 		seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
1603 			   desc->wq_addr, desc->wq_size);
1604 		seq_putc(m, '\n');
1605 
1606 		for_each_uabi_engine(engine, dev_priv) {
1607 			u32 guc_engine_id = engine->guc_id;
1608 			struct guc_execlist_context *lrc =
1609 						&desc->lrc[guc_engine_id];
1610 
1611 			seq_printf(m, "\t%s LRC:\n", engine->name);
1612 			seq_printf(m, "\t\tContext desc: 0x%x\n",
1613 				   lrc->context_desc);
1614 			seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
1615 			seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
1616 			seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
1617 			seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
1618 			seq_putc(m, '\n');
1619 		}
1620 	}
1621 
1622 	return 0;
1623 }
1624 
1625 static int i915_guc_log_dump(struct seq_file *m, void *data)
1626 {
1627 	struct drm_info_node *node = m->private;
1628 	struct drm_i915_private *dev_priv = node_to_i915(node);
1629 	bool dump_load_err = !!node->info_ent->data;
1630 	struct drm_i915_gem_object *obj = NULL;
1631 	u32 *log;
1632 	int i = 0;
1633 
1634 	if (!HAS_GT_UC(dev_priv))
1635 		return -ENODEV;
1636 
1637 	if (dump_load_err)
1638 		obj = dev_priv->gt.uc.load_err_log;
1639 	else if (dev_priv->gt.uc.guc.log.vma)
1640 		obj = dev_priv->gt.uc.guc.log.vma->obj;
1641 
1642 	if (!obj)
1643 		return 0;
1644 
1645 	log = i915_gem_object_pin_map(obj, I915_MAP_WC);
1646 	if (IS_ERR(log)) {
1647 		DRM_DEBUG("Failed to pin object\n");
1648 		seq_puts(m, "(log data unaccessible)\n");
1649 		return PTR_ERR(log);
1650 	}
1651 
1652 	for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
1653 		seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
1654 			   *(log + i), *(log + i + 1),
1655 			   *(log + i + 2), *(log + i + 3));
1656 
1657 	seq_putc(m, '\n');
1658 
1659 	i915_gem_object_unpin_map(obj);
1660 
1661 	return 0;
1662 }
1663 
1664 static int i915_guc_log_level_get(void *data, u64 *val)
1665 {
1666 	struct drm_i915_private *dev_priv = data;
1667 	struct intel_uc *uc = &dev_priv->gt.uc;
1668 
1669 	if (!intel_uc_uses_guc(uc))
1670 		return -ENODEV;
1671 
1672 	*val = intel_guc_log_get_level(&uc->guc.log);
1673 
1674 	return 0;
1675 }
1676 
1677 static int i915_guc_log_level_set(void *data, u64 val)
1678 {
1679 	struct drm_i915_private *dev_priv = data;
1680 	struct intel_uc *uc = &dev_priv->gt.uc;
1681 
1682 	if (!intel_uc_uses_guc(uc))
1683 		return -ENODEV;
1684 
1685 	return intel_guc_log_set_level(&uc->guc.log, val);
1686 }
1687 
1688 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
1689 			i915_guc_log_level_get, i915_guc_log_level_set,
1690 			"%lld\n");
1691 
1692 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
1693 {
1694 	struct drm_i915_private *i915 = inode->i_private;
1695 	struct intel_guc *guc = &i915->gt.uc.guc;
1696 	struct intel_guc_log *log = &guc->log;
1697 
1698 	if (!intel_guc_is_ready(guc))
1699 		return -ENODEV;
1700 
1701 	file->private_data = log;
1702 
1703 	return intel_guc_log_relay_open(log);
1704 }
1705 
1706 static ssize_t
1707 i915_guc_log_relay_write(struct file *filp,
1708 			 const char __user *ubuf,
1709 			 size_t cnt,
1710 			 loff_t *ppos)
1711 {
1712 	struct intel_guc_log *log = filp->private_data;
1713 	int val;
1714 	int ret;
1715 
1716 	ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
1717 	if (ret < 0)
1718 		return ret;
1719 
1720 	/*
1721 	 * Enable and start the guc log relay on value of 1.
1722 	 * Flush log relay for any other value.
1723 	 */
1724 	if (val == 1)
1725 		ret = intel_guc_log_relay_start(log);
1726 	else
1727 		intel_guc_log_relay_flush(log);
1728 
1729 	return ret ?: cnt;
1730 }
1731 
1732 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
1733 {
1734 	struct drm_i915_private *i915 = inode->i_private;
1735 	struct intel_guc *guc = &i915->gt.uc.guc;
1736 
1737 	intel_guc_log_relay_close(&guc->log);
1738 	return 0;
1739 }
1740 
1741 static const struct file_operations i915_guc_log_relay_fops = {
1742 	.owner = THIS_MODULE,
1743 	.open = i915_guc_log_relay_open,
1744 	.write = i915_guc_log_relay_write,
1745 	.release = i915_guc_log_relay_release,
1746 };
1747 
1748 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
1749 {
1750 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1751 	struct pci_dev *pdev = dev_priv->drm.pdev;
1752 
1753 	if (!HAS_RUNTIME_PM(dev_priv))
1754 		seq_puts(m, "Runtime power management not supported\n");
1755 
1756 	seq_printf(m, "Runtime power status: %s\n",
1757 		   enableddisabled(!dev_priv->power_domains.wakeref));
1758 
1759 	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
1760 	seq_printf(m, "IRQs disabled: %s\n",
1761 		   yesno(!intel_irqs_enabled(dev_priv)));
1762 #ifdef CONFIG_PM
1763 	seq_printf(m, "Usage count: %d\n",
1764 		   atomic_read(&dev_priv->drm.dev->power.usage_count));
1765 #else
1766 	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
1767 #endif
1768 	seq_printf(m, "PCI device power state: %s [%d]\n",
1769 		   pci_power_name(pdev->current_state),
1770 		   pdev->current_state);
1771 
1772 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
1773 		struct drm_printer p = drm_seq_file_printer(m);
1774 
1775 		print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
1776 	}
1777 
1778 	return 0;
1779 }
1780 
1781 static int i915_engine_info(struct seq_file *m, void *unused)
1782 {
1783 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1784 	struct intel_engine_cs *engine;
1785 	intel_wakeref_t wakeref;
1786 	struct drm_printer p;
1787 
1788 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1789 
1790 	seq_printf(m, "GT awake? %s [%d]\n",
1791 		   yesno(dev_priv->gt.awake),
1792 		   atomic_read(&dev_priv->gt.wakeref.count));
1793 	seq_printf(m, "CS timestamp frequency: %u kHz\n",
1794 		   RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
1795 
1796 	p = drm_seq_file_printer(m);
1797 	for_each_uabi_engine(engine, dev_priv)
1798 		intel_engine_dump(engine, &p, "%s\n", engine->name);
1799 
1800 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1801 
1802 	return 0;
1803 }
1804 
1805 static int i915_rcs_topology(struct seq_file *m, void *unused)
1806 {
1807 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1808 	struct drm_printer p = drm_seq_file_printer(m);
1809 
1810 	intel_device_info_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
1811 
1812 	return 0;
1813 }
1814 
1815 static int i915_shrinker_info(struct seq_file *m, void *unused)
1816 {
1817 	struct drm_i915_private *i915 = node_to_i915(m->private);
1818 
1819 	seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
1820 	seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
1821 
1822 	return 0;
1823 }
1824 
1825 static int i915_wa_registers(struct seq_file *m, void *unused)
1826 {
1827 	struct drm_i915_private *i915 = node_to_i915(m->private);
1828 	struct intel_engine_cs *engine;
1829 
1830 	for_each_uabi_engine(engine, i915) {
1831 		const struct i915_wa_list *wal = &engine->ctx_wa_list;
1832 		const struct i915_wa *wa;
1833 		unsigned int count;
1834 
1835 		count = wal->count;
1836 		if (!count)
1837 			continue;
1838 
1839 		seq_printf(m, "%s: Workarounds applied: %u\n",
1840 			   engine->name, count);
1841 
1842 		for (wa = wal->list; count--; wa++)
1843 			seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
1844 				   i915_mmio_reg_offset(wa->reg),
1845 				   wa->set, wa->clr);
1846 
1847 		seq_printf(m, "\n");
1848 	}
1849 
1850 	return 0;
1851 }
1852 
1853 static int
1854 i915_wedged_get(void *data, u64 *val)
1855 {
1856 	struct drm_i915_private *i915 = data;
1857 	int ret = intel_gt_terminally_wedged(&i915->gt);
1858 
1859 	switch (ret) {
1860 	case -EIO:
1861 		*val = 1;
1862 		return 0;
1863 	case 0:
1864 		*val = 0;
1865 		return 0;
1866 	default:
1867 		return ret;
1868 	}
1869 }
1870 
1871 static int
1872 i915_wedged_set(void *data, u64 val)
1873 {
1874 	struct drm_i915_private *i915 = data;
1875 
1876 	/* Flush any previous reset before applying for a new one */
1877 	wait_event(i915->gt.reset.queue,
1878 		   !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
1879 
1880 	intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
1881 			      "Manually set wedged engine mask = %llx", val);
1882 	return 0;
1883 }
1884 
1885 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
1886 			i915_wedged_get, i915_wedged_set,
1887 			"%llu\n");
1888 
1889 static int
1890 i915_perf_noa_delay_set(void *data, u64 val)
1891 {
1892 	struct drm_i915_private *i915 = data;
1893 	const u32 clk = RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
1894 
1895 	/*
1896 	 * This would lead to infinite waits as we're doing timestamp
1897 	 * difference on the CS with only 32bits.
1898 	 */
1899 	if (val > mul_u32_u32(U32_MAX, clk))
1900 		return -EINVAL;
1901 
1902 	atomic64_set(&i915->perf.noa_programming_delay, val);
1903 	return 0;
1904 }
1905 
1906 static int
1907 i915_perf_noa_delay_get(void *data, u64 *val)
1908 {
1909 	struct drm_i915_private *i915 = data;
1910 
1911 	*val = atomic64_read(&i915->perf.noa_programming_delay);
1912 	return 0;
1913 }
1914 
1915 DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops,
1916 			i915_perf_noa_delay_get,
1917 			i915_perf_noa_delay_set,
1918 			"%llu\n");
1919 
1920 #define DROP_UNBOUND	BIT(0)
1921 #define DROP_BOUND	BIT(1)
1922 #define DROP_RETIRE	BIT(2)
1923 #define DROP_ACTIVE	BIT(3)
1924 #define DROP_FREED	BIT(4)
1925 #define DROP_SHRINK_ALL	BIT(5)
1926 #define DROP_IDLE	BIT(6)
1927 #define DROP_RESET_ACTIVE	BIT(7)
1928 #define DROP_RESET_SEQNO	BIT(8)
1929 #define DROP_RCU	BIT(9)
1930 #define DROP_ALL (DROP_UNBOUND	| \
1931 		  DROP_BOUND	| \
1932 		  DROP_RETIRE	| \
1933 		  DROP_ACTIVE	| \
1934 		  DROP_FREED	| \
1935 		  DROP_SHRINK_ALL |\
1936 		  DROP_IDLE	| \
1937 		  DROP_RESET_ACTIVE | \
1938 		  DROP_RESET_SEQNO | \
1939 		  DROP_RCU)
1940 static int
1941 i915_drop_caches_get(void *data, u64 *val)
1942 {
1943 	*val = DROP_ALL;
1944 
1945 	return 0;
1946 }
1947 static int
1948 gt_drop_caches(struct intel_gt *gt, u64 val)
1949 {
1950 	int ret;
1951 
1952 	if (val & DROP_RESET_ACTIVE &&
1953 	    wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
1954 		intel_gt_set_wedged(gt);
1955 
1956 	if (val & DROP_RETIRE)
1957 		intel_gt_retire_requests(gt);
1958 
1959 	if (val & (DROP_IDLE | DROP_ACTIVE)) {
1960 		ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1961 		if (ret)
1962 			return ret;
1963 	}
1964 
1965 	if (val & DROP_IDLE) {
1966 		ret = intel_gt_pm_wait_for_idle(gt);
1967 		if (ret)
1968 			return ret;
1969 	}
1970 
1971 	if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
1972 		intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
1973 
1974 	return 0;
1975 }
1976 
1977 static int
1978 i915_drop_caches_set(void *data, u64 val)
1979 {
1980 	struct drm_i915_private *i915 = data;
1981 	int ret;
1982 
1983 	DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
1984 		  val, val & DROP_ALL);
1985 
1986 	ret = gt_drop_caches(&i915->gt, val);
1987 	if (ret)
1988 		return ret;
1989 
1990 	fs_reclaim_acquire(GFP_KERNEL);
1991 	if (val & DROP_BOUND)
1992 		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
1993 
1994 	if (val & DROP_UNBOUND)
1995 		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
1996 
1997 	if (val & DROP_SHRINK_ALL)
1998 		i915_gem_shrink_all(i915);
1999 	fs_reclaim_release(GFP_KERNEL);
2000 
2001 	if (val & DROP_RCU)
2002 		rcu_barrier();
2003 
2004 	if (val & DROP_FREED)
2005 		i915_gem_drain_freed_objects(i915);
2006 
2007 	return 0;
2008 }
2009 
2010 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
2011 			i915_drop_caches_get, i915_drop_caches_set,
2012 			"0x%08llx\n");
2013 
2014 static int
2015 i915_cache_sharing_get(void *data, u64 *val)
2016 {
2017 	struct drm_i915_private *dev_priv = data;
2018 	intel_wakeref_t wakeref;
2019 	u32 snpcr = 0;
2020 
2021 	if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
2022 		return -ENODEV;
2023 
2024 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
2025 		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
2026 
2027 	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
2028 
2029 	return 0;
2030 }
2031 
2032 static int
2033 i915_cache_sharing_set(void *data, u64 val)
2034 {
2035 	struct drm_i915_private *dev_priv = data;
2036 	intel_wakeref_t wakeref;
2037 
2038 	if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
2039 		return -ENODEV;
2040 
2041 	if (val > 3)
2042 		return -EINVAL;
2043 
2044 	drm_dbg(&dev_priv->drm,
2045 		"Manually setting uncore sharing to %llu\n", val);
2046 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
2047 		u32 snpcr;
2048 
2049 		/* Update the cache sharing policy here as well */
2050 		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
2051 		snpcr &= ~GEN6_MBC_SNPCR_MASK;
2052 		snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
2053 		I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
2054 	}
2055 
2056 	return 0;
2057 }
2058 
2059 static void
2060 intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
2061 			  u8 *to_mask)
2062 {
2063 	int offset = slice * sseu->ss_stride;
2064 
2065 	memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
2066 }
2067 
2068 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
2069 			i915_cache_sharing_get, i915_cache_sharing_set,
2070 			"%llu\n");
2071 
2072 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
2073 					  struct sseu_dev_info *sseu)
2074 {
2075 #define SS_MAX 2
2076 	const int ss_max = SS_MAX;
2077 	u32 sig1[SS_MAX], sig2[SS_MAX];
2078 	int ss;
2079 
2080 	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
2081 	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
2082 	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
2083 	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
2084 
2085 	for (ss = 0; ss < ss_max; ss++) {
2086 		unsigned int eu_cnt;
2087 
2088 		if (sig1[ss] & CHV_SS_PG_ENABLE)
2089 			/* skip disabled subslice */
2090 			continue;
2091 
2092 		sseu->slice_mask = BIT(0);
2093 		sseu->subslice_mask[0] |= BIT(ss);
2094 		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
2095 			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
2096 			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
2097 			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
2098 		sseu->eu_total += eu_cnt;
2099 		sseu->eu_per_subslice = max_t(unsigned int,
2100 					      sseu->eu_per_subslice, eu_cnt);
2101 	}
2102 #undef SS_MAX
2103 }
2104 
2105 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
2106 				     struct sseu_dev_info *sseu)
2107 {
2108 #define SS_MAX 6
2109 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
2110 	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
2111 	int s, ss;
2112 
2113 	for (s = 0; s < info->sseu.max_slices; s++) {
2114 		/*
2115 		 * FIXME: Valid SS Mask respects the spec and read
2116 		 * only valid bits for those registers, excluding reserved
2117 		 * although this seems wrong because it would leave many
2118 		 * subslices without ACK.
2119 		 */
2120 		s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
2121 			GEN10_PGCTL_VALID_SS_MASK(s);
2122 		eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
2123 		eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
2124 	}
2125 
2126 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
2127 		     GEN9_PGCTL_SSA_EU19_ACK |
2128 		     GEN9_PGCTL_SSA_EU210_ACK |
2129 		     GEN9_PGCTL_SSA_EU311_ACK;
2130 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
2131 		     GEN9_PGCTL_SSB_EU19_ACK |
2132 		     GEN9_PGCTL_SSB_EU210_ACK |
2133 		     GEN9_PGCTL_SSB_EU311_ACK;
2134 
2135 	for (s = 0; s < info->sseu.max_slices; s++) {
2136 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
2137 			/* skip disabled slice */
2138 			continue;
2139 
2140 		sseu->slice_mask |= BIT(s);
2141 		intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
2142 
2143 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
2144 			unsigned int eu_cnt;
2145 
2146 			if (info->sseu.has_subslice_pg &&
2147 			    !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
2148 				/* skip disabled subslice */
2149 				continue;
2150 
2151 			eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
2152 					       eu_mask[ss % 2]);
2153 			sseu->eu_total += eu_cnt;
2154 			sseu->eu_per_subslice = max_t(unsigned int,
2155 						      sseu->eu_per_subslice,
2156 						      eu_cnt);
2157 		}
2158 	}
2159 #undef SS_MAX
2160 }
2161 
2162 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
2163 				    struct sseu_dev_info *sseu)
2164 {
2165 #define SS_MAX 3
2166 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
2167 	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
2168 	int s, ss;
2169 
2170 	for (s = 0; s < info->sseu.max_slices; s++) {
2171 		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
2172 		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
2173 		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
2174 	}
2175 
2176 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
2177 		     GEN9_PGCTL_SSA_EU19_ACK |
2178 		     GEN9_PGCTL_SSA_EU210_ACK |
2179 		     GEN9_PGCTL_SSA_EU311_ACK;
2180 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
2181 		     GEN9_PGCTL_SSB_EU19_ACK |
2182 		     GEN9_PGCTL_SSB_EU210_ACK |
2183 		     GEN9_PGCTL_SSB_EU311_ACK;
2184 
2185 	for (s = 0; s < info->sseu.max_slices; s++) {
2186 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
2187 			/* skip disabled slice */
2188 			continue;
2189 
2190 		sseu->slice_mask |= BIT(s);
2191 
2192 		if (IS_GEN9_BC(dev_priv))
2193 			intel_sseu_copy_subslices(&info->sseu, s,
2194 						  sseu->subslice_mask);
2195 
2196 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
2197 			unsigned int eu_cnt;
2198 			u8 ss_idx = s * info->sseu.ss_stride +
2199 				    ss / BITS_PER_BYTE;
2200 
2201 			if (IS_GEN9_LP(dev_priv)) {
2202 				if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
2203 					/* skip disabled subslice */
2204 					continue;
2205 
2206 				sseu->subslice_mask[ss_idx] |=
2207 					BIT(ss % BITS_PER_BYTE);
2208 			}
2209 
2210 			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
2211 					       eu_mask[ss%2]);
2212 			sseu->eu_total += eu_cnt;
2213 			sseu->eu_per_subslice = max_t(unsigned int,
2214 						      sseu->eu_per_subslice,
2215 						      eu_cnt);
2216 		}
2217 	}
2218 #undef SS_MAX
2219 }
2220 
2221 static void bdw_sseu_device_status(struct drm_i915_private *dev_priv,
2222 				   struct sseu_dev_info *sseu)
2223 {
2224 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
2225 	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
2226 	int s;
2227 
2228 	sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
2229 
2230 	if (sseu->slice_mask) {
2231 		sseu->eu_per_subslice = info->sseu.eu_per_subslice;
2232 		for (s = 0; s < fls(sseu->slice_mask); s++)
2233 			intel_sseu_copy_subslices(&info->sseu, s,
2234 						  sseu->subslice_mask);
2235 		sseu->eu_total = sseu->eu_per_subslice *
2236 				 intel_sseu_subslice_total(sseu);
2237 
2238 		/* subtract fused off EU(s) from enabled slice(s) */
2239 		for (s = 0; s < fls(sseu->slice_mask); s++) {
2240 			u8 subslice_7eu = info->sseu.subslice_7eu[s];
2241 
2242 			sseu->eu_total -= hweight8(subslice_7eu);
2243 		}
2244 	}
2245 }
2246 
2247 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
2248 				 const struct sseu_dev_info *sseu)
2249 {
2250 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2251 	const char *type = is_available_info ? "Available" : "Enabled";
2252 	int s;
2253 
2254 	seq_printf(m, "  %s Slice Mask: %04x\n", type,
2255 		   sseu->slice_mask);
2256 	seq_printf(m, "  %s Slice Total: %u\n", type,
2257 		   hweight8(sseu->slice_mask));
2258 	seq_printf(m, "  %s Subslice Total: %u\n", type,
2259 		   intel_sseu_subslice_total(sseu));
2260 	for (s = 0; s < fls(sseu->slice_mask); s++) {
2261 		seq_printf(m, "  %s Slice%i subslices: %u\n", type,
2262 			   s, intel_sseu_subslices_per_slice(sseu, s));
2263 	}
2264 	seq_printf(m, "  %s EU Total: %u\n", type,
2265 		   sseu->eu_total);
2266 	seq_printf(m, "  %s EU Per Subslice: %u\n", type,
2267 		   sseu->eu_per_subslice);
2268 
2269 	if (!is_available_info)
2270 		return;
2271 
2272 	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
2273 	if (HAS_POOLED_EU(dev_priv))
2274 		seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
2275 
2276 	seq_printf(m, "  Has Slice Power Gating: %s\n",
2277 		   yesno(sseu->has_slice_pg));
2278 	seq_printf(m, "  Has Subslice Power Gating: %s\n",
2279 		   yesno(sseu->has_subslice_pg));
2280 	seq_printf(m, "  Has EU Power Gating: %s\n",
2281 		   yesno(sseu->has_eu_pg));
2282 }
2283 
2284 static int i915_sseu_status(struct seq_file *m, void *unused)
2285 {
2286 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2287 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
2288 	struct sseu_dev_info sseu;
2289 	intel_wakeref_t wakeref;
2290 
2291 	if (INTEL_GEN(dev_priv) < 8)
2292 		return -ENODEV;
2293 
2294 	seq_puts(m, "SSEU Device Info\n");
2295 	i915_print_sseu_info(m, true, &info->sseu);
2296 
2297 	seq_puts(m, "SSEU Device Status\n");
2298 	memset(&sseu, 0, sizeof(sseu));
2299 	intel_sseu_set_info(&sseu, info->sseu.max_slices,
2300 			    info->sseu.max_subslices,
2301 			    info->sseu.max_eus_per_subslice);
2302 
2303 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
2304 		if (IS_CHERRYVIEW(dev_priv))
2305 			cherryview_sseu_device_status(dev_priv, &sseu);
2306 		else if (IS_BROADWELL(dev_priv))
2307 			bdw_sseu_device_status(dev_priv, &sseu);
2308 		else if (IS_GEN(dev_priv, 9))
2309 			gen9_sseu_device_status(dev_priv, &sseu);
2310 		else if (INTEL_GEN(dev_priv) >= 10)
2311 			gen10_sseu_device_status(dev_priv, &sseu);
2312 	}
2313 
2314 	i915_print_sseu_info(m, false, &sseu);
2315 
2316 	return 0;
2317 }
2318 
2319 static int i915_forcewake_open(struct inode *inode, struct file *file)
2320 {
2321 	struct drm_i915_private *i915 = inode->i_private;
2322 	struct intel_gt *gt = &i915->gt;
2323 
2324 	atomic_inc(&gt->user_wakeref);
2325 	intel_gt_pm_get(gt);
2326 	if (INTEL_GEN(i915) >= 6)
2327 		intel_uncore_forcewake_user_get(gt->uncore);
2328 
2329 	return 0;
2330 }
2331 
2332 static int i915_forcewake_release(struct inode *inode, struct file *file)
2333 {
2334 	struct drm_i915_private *i915 = inode->i_private;
2335 	struct intel_gt *gt = &i915->gt;
2336 
2337 	if (INTEL_GEN(i915) >= 6)
2338 		intel_uncore_forcewake_user_put(&i915->uncore);
2339 	intel_gt_pm_put(gt);
2340 	atomic_dec(&gt->user_wakeref);
2341 
2342 	return 0;
2343 }
2344 
2345 static const struct file_operations i915_forcewake_fops = {
2346 	.owner = THIS_MODULE,
2347 	.open = i915_forcewake_open,
2348 	.release = i915_forcewake_release,
2349 };
2350 
2351 static const struct drm_info_list i915_debugfs_list[] = {
2352 	{"i915_capabilities", i915_capabilities, 0},
2353 	{"i915_gem_objects", i915_gem_object_info, 0},
2354 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2355 	{"i915_gem_interrupt", i915_interrupt_info, 0},
2356 	{"i915_guc_info", i915_guc_info, 0},
2357 	{"i915_guc_load_status", i915_guc_load_status_info, 0},
2358 	{"i915_guc_log_dump", i915_guc_log_dump, 0},
2359 	{"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
2360 	{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
2361 	{"i915_huc_load_status", i915_huc_load_status_info, 0},
2362 	{"i915_frequency_info", i915_frequency_info, 0},
2363 	{"i915_drpc_info", i915_drpc_info, 0},
2364 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
2365 	{"i915_context_status", i915_context_status, 0},
2366 	{"i915_forcewake_domains", i915_forcewake_domains, 0},
2367 	{"i915_swizzle_info", i915_swizzle_info, 0},
2368 	{"i915_llc", i915_llc, 0},
2369 	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
2370 	{"i915_engine_info", i915_engine_info, 0},
2371 	{"i915_rcs_topology", i915_rcs_topology, 0},
2372 	{"i915_shrinker_info", i915_shrinker_info, 0},
2373 	{"i915_wa_registers", i915_wa_registers, 0},
2374 	{"i915_sseu_status", i915_sseu_status, 0},
2375 	{"i915_rps_boost_info", i915_rps_boost_info, 0},
2376 };
2377 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2378 
2379 static const struct i915_debugfs_files {
2380 	const char *name;
2381 	const struct file_operations *fops;
2382 } i915_debugfs_files[] = {
2383 	{"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
2384 	{"i915_wedged", &i915_wedged_fops},
2385 	{"i915_cache_sharing", &i915_cache_sharing_fops},
2386 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
2387 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
2388 	{"i915_error_state", &i915_error_state_fops},
2389 	{"i915_gpu_info", &i915_gpu_info_fops},
2390 #endif
2391 	{"i915_guc_log_level", &i915_guc_log_level_fops},
2392 	{"i915_guc_log_relay", &i915_guc_log_relay_fops},
2393 };
2394 
2395 int i915_debugfs_register(struct drm_i915_private *dev_priv)
2396 {
2397 	struct drm_minor *minor = dev_priv->drm.primary;
2398 	int i;
2399 
2400 	i915_debugfs_params(dev_priv);
2401 
2402 	debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
2403 			    to_i915(minor->dev), &i915_forcewake_fops);
2404 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2405 		debugfs_create_file(i915_debugfs_files[i].name,
2406 				    S_IRUGO | S_IWUSR,
2407 				    minor->debugfs_root,
2408 				    to_i915(minor->dev),
2409 				    i915_debugfs_files[i].fops);
2410 	}
2411 
2412 	return drm_debugfs_create_files(i915_debugfs_list,
2413 					I915_DEBUGFS_ENTRIES,
2414 					minor->debugfs_root, minor);
2415 }
2416